diff --git a/.azure-pipelines/README.md b/.azure-pipelines/README.md index 385e70bac5..9e8ad74104 100644 --- a/.azure-pipelines/README.md +++ b/.azure-pipelines/README.md @@ -1,3 +1,9 @@ + + ## Azure Pipelines Configuration Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information. diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 027a2f0932..e9bfa6f8e4 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -1,3 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + trigger: batch: true branches: @@ -24,22 +29,20 @@ schedules: always: true branches: include: - - stable-2 - - stable-3 + - stable-11 + - stable-10 - cron: 0 11 * * 0 displayName: Weekly (old stable branches) always: true branches: include: - - stable-1 + - stable-9 variables: - name: checkoutPath value: ansible_collections/community/general - name: coverageBranches value: main - - name: pipelinesCoverage - value: coverage - name: entryPoint value: tests/utils/shippable/shippable.sh - name: fetchDepth @@ -48,7 +51,7 @@ variables: resources: containers: - container: default - image: quay.io/ansible/azure-pipelines-test-container:1.9.0 + image: quay.io/ansible/azure-pipelines-test-container:7.0.0 pool: Standard @@ -67,54 +70,40 @@ stages: - test: 2 - test: 3 - test: 4 - - test: extra - - stage: Sanity_2_12 - displayName: Sanity 2.12 + - stage: Sanity_2_20 + displayName: Sanity 2.20 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Test {0} - testFormat: 2.12/sanity/{0} + testFormat: 2.20/sanity/{0} targets: - test: 1 - test: 2 - test: 3 - test: 4 - - stage: Sanity_2_11 - displayName: Sanity 2.11 + - stage: Sanity_2_19 + displayName: Sanity 2.19 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Test {0} - testFormat: 2.11/sanity/{0} + testFormat: 2.19/sanity/{0} targets: - test: 1 - test: 2 - test: 3 - test: 4 - - stage: Sanity_2_10 - displayName: Sanity 2.10 + - stage: Sanity_2_18 + displayName: Sanity 2.18 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Test {0} - testFormat: 2.10/sanity/{0} - targets: - - test: 1 - - test: 2 - - test: 3 - - test: 4 - - stage: Sanity_2_9 - displayName: Sanity 2.9 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Test {0} - testFormat: 2.9/sanity/{0} + testFormat: 2.18/sanity/{0} targets: - test: 1 - test: 2 @@ -130,73 +119,68 @@ stages: nameFormat: Python {0} testFormat: devel/units/{0}/1 targets: - - test: 2.7 - - test: 3.5 - - test: 3.6 - - test: 3.7 - - test: 3.8 - test: 3.9 - test: '3.10' - - stage: Units_2_12 - displayName: Units 2.12 + - test: '3.11' + - test: '3.12' + - test: '3.13' + - test: '3.14' + - stage: Units_2_20 + displayName: Units 2.20 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Python {0} - testFormat: 2.12/units/{0}/1 + testFormat: 2.20/units/{0}/1 targets: - - test: 2.6 - - test: 2.7 - - test: 3.5 - - test: 3.6 - - test: 3.7 - - test: 3.8 - - test: '3.10' - - stage: Units_2_11 - displayName: Units 2.11 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.11/units/{0}/1 - targets: - - test: 2.6 - - test: 2.7 - - test: 3.5 - - test: 3.6 - - test: 3.7 - - test: 3.8 - test: 3.9 - - stage: Units_2_10 - displayName: Units 2.10 + - test: "3.12" + - test: "3.14" + - stage: Units_2_19 + displayName: Units 2.19 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Python {0} - testFormat: 2.10/units/{0}/1 + testFormat: 2.19/units/{0}/1 targets: - - test: 2.7 - - test: 3.6 - - stage: Units_2_9 - displayName: Units 2.9 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.9/units/{0}/1 - targets: - - test: 2.6 - - test: 2.7 - - test: 3.5 - - test: 3.6 - - test: 3.7 - test: 3.8 + - test: "3.11" + - test: "3.13" + - stage: Units_2_18 + displayName: Units 2.18 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Python {0} + testFormat: 2.18/units/{0}/1 + targets: + - test: 3.8 + - test: "3.11" + - test: "3.13" ## Remote + - stage: Remote_devel_extra_vms + displayName: Remote devel extra VMs + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: devel/{0} + targets: + - name: Alpine 3.22 + test: alpine/3.22 + # - name: Fedora 42 + # test: fedora/42 + - name: Ubuntu 22.04 + test: ubuntu/22.04 + - name: Ubuntu 24.04 + test: ubuntu/24.04 + groups: + - vm - stage: Remote_devel displayName: Remote devel dependsOn: [] @@ -205,86 +189,72 @@ stages: parameters: testFormat: devel/{0} targets: - - name: macOS 11.1 - test: macos/11.1 - - name: RHEL 7.9 - test: rhel/7.9 - - name: RHEL 8.4 - test: rhel/8.4 - - name: FreeBSD 12.2 - test: freebsd/12.2 - - name: FreeBSD 13.0 - test: freebsd/13.0 + - name: macOS 15.3 + test: macos/15.3 + - name: RHEL 10.0 + test: rhel/10.0 + - name: RHEL 9.6 + test: rhel/9.6 + - name: FreeBSD 14.3 + test: freebsd/14.3 + - name: FreeBSD 13.5 + test: freebsd/13.5 groups: - 1 - 2 - 3 - - stage: Remote_2_12 - displayName: Remote 2.12 + - stage: Remote_2_20 + displayName: Remote 2.20 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.12/{0} + testFormat: 2.20/{0} targets: - - name: macOS 11.1 - test: macos/11.1 - - name: RHEL 8.4 - test: rhel/8.4 - - name: FreeBSD 13.0 - test: freebsd/13.0 + - name: RHEL 10.0 + test: rhel/10.0 + - name: FreeBSD 14.3 + test: freebsd/14.3 groups: - 1 - 2 - - stage: Remote_2_11 - displayName: Remote 2.11 + - 3 + - stage: Remote_2_19 + displayName: Remote 2.19 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.11/{0} + testFormat: 2.19/{0} targets: - - name: RHEL 7.9 - test: rhel/7.9 - - name: RHEL 8.3 - test: rhel/8.3 - - name: FreeBSD 12.2 - test: freebsd/12.2 + - name: RHEL 9.5 + test: rhel/9.5 + - name: RHEL 10.0 + test: rhel/10.0 + - name: FreeBSD 14.2 + test: freebsd/14.2 groups: - 1 - 2 - - stage: Remote_2_10 - displayName: Remote 2.10 + - 3 + - stage: Remote_2_18 + displayName: Remote 2.18 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.10/{0} + testFormat: 2.18/{0} targets: - - name: OS X 10.11 - test: osx/10.11 - - name: macOS 10.15 - test: macos/10.15 - groups: - - 1 - - 2 - - stage: Remote_2_9 - displayName: Remote 2.9 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.9/{0} - targets: - - name: RHEL 8.2 - test: rhel/8.2 - - name: RHEL 7.8 - test: rhel/7.8 - - name: FreeBSD 12.0 - test: freebsd/12.0 + - name: macOS 14.3 + test: macos/14.3 + - name: RHEL 9.4 + test: rhel/9.4 + - name: FreeBSD 14.1 + test: freebsd/14.1 groups: - 1 - 2 + - 3 ### Docker - stage: Docker_devel @@ -295,173 +265,164 @@ stages: parameters: testFormat: devel/linux/{0} targets: - - name: CentOS 7 - test: centos7 - - name: Fedora 33 - test: fedora33 - - name: Fedora 34 - test: fedora34 - - name: openSUSE 15 py2 - test: opensuse15py2 - - name: openSUSE 15 py3 - test: opensuse15 - - name: Ubuntu 18.04 - test: ubuntu1804 - - name: Ubuntu 20.04 - test: ubuntu2004 + - name: Fedora 42 + test: fedora42 + - name: Alpine 3.22 + test: alpine322 + - name: Ubuntu 22.04 + test: ubuntu2204 + - name: Ubuntu 24.04 + test: ubuntu2404 groups: - 1 - 2 - 3 - - stage: Docker_2_12 - displayName: Docker 2.12 + - stage: Docker_2_20 + displayName: Docker 2.20 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.12/linux/{0} + testFormat: 2.20/linux/{0} targets: - - name: CentOS 6 - test: centos6 - - name: CentOS 8 - test: centos8 - - name: Fedora 34 - test: fedora34 - - name: openSUSE 15 py3 - test: opensuse15 - - name: Ubuntu 20.04 - test: ubuntu2004 + - name: Fedora 42 + test: fedora42 + - name: Alpine 3.22 + test: alpine322 groups: - 1 - 2 - 3 - - stage: Docker_2_11 - displayName: Docker 2.11 + - stage: Docker_2_19 + displayName: Docker 2.19 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.11/linux/{0} + testFormat: 2.19/linux/{0} targets: - - name: CentOS 7 - test: centos7 - - name: Fedora 33 - test: fedora33 - - name: openSUSE 15 py2 - test: opensuse15py2 + - name: Fedora 41 + test: fedora41 + - name: Alpine 3.21 + test: alpine321 groups: + - 1 - 2 - 3 - - stage: Docker_2_10 - displayName: Docker 2.10 + - stage: Docker_2_18 + displayName: Docker 2.18 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.10/linux/{0} + testFormat: 2.18/linux/{0} targets: - - name: Fedora 32 - test: fedora32 - - name: Ubuntu 16.04 - test: ubuntu1604 - groups: - - 2 - - 3 - - stage: Docker_2_9 - displayName: Docker 2.9 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.9/linux/{0} - targets: - - name: Fedora 31 - test: fedora31 - - name: openSUSE 15 py3 - test: opensuse15 + - name: Fedora 40 + test: fedora40 + - name: Alpine 3.20 + test: alpine320 + - name: Ubuntu 24.04 + test: ubuntu2404 groups: + - 1 - 2 - 3 -### Cloud - - stage: Cloud_devel - displayName: Cloud devel +### Community Docker + - stage: Docker_community_devel + displayName: Docker (community images) devel dependsOn: [] jobs: - template: templates/matrix.yml parameters: - nameFormat: Python {0} - testFormat: devel/cloud/{0}/1 + testFormat: devel/linux-community/{0} targets: - - test: 2.7 - - test: 3.9 - - stage: Cloud_2_12 - displayName: Cloud 2.12 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.12/cloud/{0}/1 - targets: - - test: 3.8 - - stage: Cloud_2_11 - displayName: Cloud 2.11 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.11/cloud/{0}/1 - targets: - - test: 3.6 - - stage: Cloud_2_10 - displayName: Cloud 2.10 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.10/cloud/{0}/1 - targets: - - test: 3.5 - - stage: Cloud_2_9 - displayName: Cloud 2.9 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.9/cloud/{0}/1 - targets: - - test: 2.7 + - name: Debian 11 Bullseye + test: debian-bullseye/3.9 + - name: Debian 12 Bookworm + test: debian-bookworm/3.11 + - name: Debian 13 Trixie + test: debian-13-trixie/3.13 + - name: ArchLinux + test: archlinux/3.13 + groups: + - 1 + - 2 + - 3 + +### Generic +# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. +# - stage: Generic_devel +# displayName: Generic devel +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: devel/generic/{0}/1 +# targets: +# - test: '3.9' +# - test: '3.12' +# - test: '3.14' +# - stage: Generic_2_20 +# displayName: Generic 2.20 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.20/generic/{0}/1 +# targets: +# - test: '3.10' +# - test: '3.14' +# - stage: Generic_2_19 +# displayName: Generic 2.19 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.19/generic/{0}/1 +# targets: +# - test: '3.9' +# - test: '3.13' +# - stage: Generic_2_18 +# displayName: Generic 2.18 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.18/generic/{0}/1 +# targets: +# - test: '3.8' +# - test: '3.13' + - stage: Summary condition: succeededOrFailed() dependsOn: - Sanity_devel - - Sanity_2_9 - - Sanity_2_10 - - Sanity_2_11 - - Sanity_2_12 + - Sanity_2_20 + - Sanity_2_19 + - Sanity_2_18 - Units_devel - - Units_2_9 - - Units_2_10 - - Units_2_11 - - Units_2_12 + - Units_2_20 + - Units_2_19 + - Units_2_18 + - Remote_devel_extra_vms - Remote_devel - - Remote_2_9 - - Remote_2_10 - - Remote_2_11 - - Remote_2_12 + - Remote_2_20 + - Remote_2_19 + - Remote_2_18 - Docker_devel - - Docker_2_9 - - Docker_2_10 - - Docker_2_11 - - Docker_2_12 - - Cloud_devel - - Cloud_2_9 - - Cloud_2_10 - - Cloud_2_11 - - Cloud_2_12 + - Docker_2_20 + - Docker_2_19 + - Docker_2_18 + - Docker_community_devel +# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. +# - Generic_devel +# - Generic_2_20 +# - Generic_2_19 +# - Generic_2_18 jobs: - template: templates/coverage.yml diff --git a/.azure-pipelines/scripts/aggregate-coverage.sh b/.azure-pipelines/scripts/aggregate-coverage.sh index f3113dd0a9..ca2b19de97 100755 --- a/.azure-pipelines/scripts/aggregate-coverage.sh +++ b/.azure-pipelines/scripts/aggregate-coverage.sh @@ -1,4 +1,8 @@ #!/usr/bin/env bash +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # Aggregate code coverage results for later processing. set -o pipefail -eu @@ -11,7 +15,7 @@ mkdir "${agent_temp_directory}/coverage/" options=(--venv --venv-system-site-packages --color -v) -ansible-test coverage combine --export "${agent_temp_directory}/coverage/" "${options[@]}" +ansible-test coverage combine --group-by command --export "${agent_temp_directory}/coverage/" "${options[@]}" if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then # Only analyze coverage if the installed version of ansible-test supports it. diff --git a/.azure-pipelines/scripts/combine-coverage.py b/.azure-pipelines/scripts/combine-coverage.py index 506ade6460..3b2fd993db 100755 --- a/.azure-pipelines/scripts/combine-coverage.py +++ b/.azure-pipelines/scripts/combine-coverage.py @@ -1,4 +1,8 @@ #!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + """ Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job. Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}" diff --git a/.azure-pipelines/scripts/process-results.sh b/.azure-pipelines/scripts/process-results.sh index f3f1d1bae8..1f4b8e4f10 100755 --- a/.azure-pipelines/scripts/process-results.sh +++ b/.azure-pipelines/scripts/process-results.sh @@ -1,4 +1,8 @@ #!/usr/bin/env bash +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # Check the test results and set variables for use in later steps. set -o pipefail -eu diff --git a/.azure-pipelines/scripts/publish-codecov.py b/.azure-pipelines/scripts/publish-codecov.py new file mode 100755 index 0000000000..58e32f6d37 --- /dev/null +++ b/.azure-pipelines/scripts/publish-codecov.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +""" +Upload code coverage reports to codecov.io. +Multiple coverage files from multiple languages are accepted and aggregated after upload. +Python coverage, as well as PowerShell and Python stubs can all be uploaded. +""" + +import argparse +import dataclasses +import pathlib +import shutil +import subprocess +import tempfile +import typing as t +import urllib.request + + +@dataclasses.dataclass(frozen=True) +class CoverageFile: + name: str + path: pathlib.Path + flags: t.List[str] + + +@dataclasses.dataclass(frozen=True) +class Args: + dry_run: bool + path: pathlib.Path + + +def parse_args() -> Args: + parser = argparse.ArgumentParser() + parser.add_argument('-n', '--dry-run', action='store_true') + parser.add_argument('path', type=pathlib.Path) + + args = parser.parse_args() + + # Store arguments in a typed dataclass + fields = dataclasses.fields(Args) + kwargs = {field.name: getattr(args, field.name) for field in fields} + + return Args(**kwargs) + + +def process_files(directory: pathlib.Path) -> t.Tuple[CoverageFile, ...]: + processed = [] + for file in directory.joinpath('reports').glob('coverage*.xml'): + name = file.stem.replace('coverage=', '') + + # Get flags from name + flags = name.replace('-powershell', '').split('=') # Drop '-powershell' suffix + flags = [flag if not flag.startswith('stub') else flag.split('-')[0] for flag in flags] # Remove "-01" from stub files + + processed.append(CoverageFile(name, file, flags)) + + return tuple(processed) + + +def upload_files(codecov_bin: pathlib.Path, files: t.Tuple[CoverageFile, ...], dry_run: bool = False) -> None: + for file in files: + cmd = [ + str(codecov_bin), + '--name', file.name, + '--file', str(file.path), + ] + for flag in file.flags: + cmd.extend(['--flags', flag]) + + if dry_run: + print(f'DRY-RUN: Would run command: {cmd}') + continue + + subprocess.run(cmd, check=True) + + +def download_file(url: str, dest: pathlib.Path, flags: int, dry_run: bool = False) -> None: + if dry_run: + print(f'DRY-RUN: Would download {url} to {dest} and set mode to {flags:o}') + return + + with urllib.request.urlopen(url) as resp: + with dest.open('w+b') as f: + # Read data in chunks rather than all at once + shutil.copyfileobj(resp, f, 64 * 1024) + + dest.chmod(flags) + + +def main(): + args = parse_args() + url = 'https://ansible-ci-files.s3.amazonaws.com/codecov/linux/codecov' + with tempfile.TemporaryDirectory(prefix='codecov-') as tmpdir: + codecov_bin = pathlib.Path(tmpdir) / 'codecov' + download_file(url, codecov_bin, 0o755, args.dry_run) + + files = process_files(args.path) + upload_files(codecov_bin, files, args.dry_run) + + +if __name__ == '__main__': + main() diff --git a/.azure-pipelines/scripts/publish-codecov.sh b/.azure-pipelines/scripts/publish-codecov.sh deleted file mode 100755 index 6d184f0b8d..0000000000 --- a/.azure-pipelines/scripts/publish-codecov.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash -# Upload code coverage reports to codecov.io. -# Multiple coverage files from multiple languages are accepted and aggregated after upload. -# Python coverage, as well as PowerShell and Python stubs can all be uploaded. - -set -o pipefail -eu - -output_path="$1" - -curl --silent --show-error https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh > codecov.sh - -for file in "${output_path}"/reports/coverage*.xml; do - name="${file}" - name="${name##*/}" # remove path - name="${name##coverage=}" # remove 'coverage=' prefix if present - name="${name%.xml}" # remove '.xml' suffix - - bash codecov.sh \ - -f "${file}" \ - -n "${name}" \ - -X coveragepy \ - -X gcov \ - -X fix \ - -X search \ - -X xcode \ - || echo "Failed to upload code coverage report to codecov.io: ${file}" -done diff --git a/.azure-pipelines/scripts/report-coverage.sh b/.azure-pipelines/scripts/report-coverage.sh index 1bd91bdc99..c08154b6f8 100755 --- a/.azure-pipelines/scripts/report-coverage.sh +++ b/.azure-pipelines/scripts/report-coverage.sh @@ -1,4 +1,8 @@ #!/usr/bin/env bash +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # Generate code coverage reports for uploading to Azure Pipelines and codecov.io. set -o pipefail -eu @@ -12,4 +16,4 @@ if ! ansible-test --help >/dev/null 2>&1; then pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check fi -ansible-test coverage xml --stub --venv --venv-system-site-packages --color -v +ansible-test coverage xml --group-by command --stub --venv --venv-system-site-packages --color -v diff --git a/.azure-pipelines/scripts/run-tests.sh b/.azure-pipelines/scripts/run-tests.sh index a947fdf013..2cfdcf61ef 100755 --- a/.azure-pipelines/scripts/run-tests.sh +++ b/.azure-pipelines/scripts/run-tests.sh @@ -1,4 +1,8 @@ #!/usr/bin/env bash +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # Configure the test environment and run the tests. set -o pipefail -eu diff --git a/.azure-pipelines/scripts/time-command.py b/.azure-pipelines/scripts/time-command.py index 5e8eb8d4c8..85a7c3c171 100755 --- a/.azure-pipelines/scripts/time-command.py +++ b/.azure-pipelines/scripts/time-command.py @@ -1,4 +1,8 @@ #!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + """Prepends a relative timestamp to each input line from stdin and writes it to stdout.""" from __future__ import (absolute_import, division, print_function) diff --git a/.azure-pipelines/templates/coverage.yml b/.azure-pipelines/templates/coverage.yml index 1864e44410..1bf17e053a 100644 --- a/.azure-pipelines/templates/coverage.yml +++ b/.azure-pipelines/templates/coverage.yml @@ -1,3 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # This template adds a job for processing code coverage data. # It will upload results to Azure Pipelines and codecov.io. # Use it from a job stage that completes after all other jobs have completed. @@ -23,17 +28,7 @@ jobs: - bash: .azure-pipelines/scripts/report-coverage.sh displayName: Generate Coverage Report condition: gt(variables.coverageFileCount, 0) - - task: PublishCodeCoverageResults@1 - inputs: - codeCoverageTool: Cobertura - # Azure Pipelines only accepts a single coverage data file. - # That means only Python or PowerShell coverage can be uploaded, but not both. - # Set the "pipelinesCoverage" variable to determine which type is uploaded. - # Use "coverage" for Python and "coverage-powershell" for PowerShell. - summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml" - displayName: Publish to Azure Pipelines - condition: gt(variables.coverageFileCount, 0) - - bash: .azure-pipelines/scripts/publish-codecov.sh "$(outputPath)" + - bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)" displayName: Publish to codecov.io condition: gt(variables.coverageFileCount, 0) continueOnError: true diff --git a/.azure-pipelines/templates/matrix.yml b/.azure-pipelines/templates/matrix.yml index 4e9555dd3b..49f5d8595a 100644 --- a/.azure-pipelines/templates/matrix.yml +++ b/.azure-pipelines/templates/matrix.yml @@ -1,3 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template. # If this matrix template does not provide the required functionality, consider using the test template directly instead. @@ -45,11 +50,11 @@ jobs: parameters: jobs: - ${{ if eq(length(parameters.groups), 0) }}: - - ${{ each target in parameters.targets }}: - - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }} - test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }} - - ${{ if not(eq(length(parameters.groups), 0)) }}: - - ${{ each group in parameters.groups }}: - ${{ each target in parameters.targets }}: - - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }} - test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }} + - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }} + test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }} + - ${{ if not(eq(length(parameters.groups), 0)) }}: + - ${{ each group in parameters.groups }}: + - ${{ each target in parameters.targets }}: + - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }} + test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }} diff --git a/.azure-pipelines/templates/test.yml b/.azure-pipelines/templates/test.yml index 5250ed8023..b263379c06 100644 --- a/.azure-pipelines/templates/test.yml +++ b/.azure-pipelines/templates/test.yml @@ -1,3 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # This template uses the provided list of jobs to create test one or more test jobs. # It can be used directly if needed, or through the matrix template. @@ -9,37 +14,37 @@ parameters: jobs: - ${{ each job in parameters.jobs }}: - - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }} - displayName: ${{ job.name }} - container: default - workspace: - clean: all - steps: - - checkout: self - fetchDepth: $(fetchDepth) - path: $(checkoutPath) - - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)" - displayName: Run Tests - - bash: .azure-pipelines/scripts/process-results.sh - condition: succeededOrFailed() - displayName: Process Results - - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)" - condition: eq(variables.haveCoverageData, 'true') - displayName: Aggregate Coverage Data - - task: PublishTestResults@2 - condition: eq(variables.haveTestResults, 'true') - inputs: - testResultsFiles: "$(outputPath)/junit/*.xml" - displayName: Publish Test Results - - task: PublishPipelineArtifact@1 - condition: eq(variables.haveBotResults, 'true') - displayName: Publish Bot Results - inputs: - targetPath: "$(outputPath)/bot/" - artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" - - task: PublishPipelineArtifact@1 - condition: eq(variables.haveCoverageData, 'true') - displayName: Publish Coverage Data - inputs: - targetPath: "$(Agent.TempDirectory)/coverage/" - artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" + - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }} + displayName: ${{ job.name }} + container: default + workspace: + clean: all + steps: + - checkout: self + fetchDepth: $(fetchDepth) + path: $(checkoutPath) + - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)" + displayName: Run Tests + - bash: .azure-pipelines/scripts/process-results.sh + condition: succeededOrFailed() + displayName: Process Results + - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)" + condition: eq(variables.haveCoverageData, 'true') + displayName: Aggregate Coverage Data + - task: PublishTestResults@2 + condition: eq(variables.haveTestResults, 'true') + inputs: + testResultsFiles: "$(outputPath)/junit/*.xml" + displayName: Publish Test Results + - task: PublishPipelineArtifact@1 + condition: eq(variables.haveBotResults, 'true') + displayName: Publish Bot Results + inputs: + targetPath: "$(outputPath)/bot/" + artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" + - task: PublishPipelineArtifact@1 + condition: eq(variables.haveCoverageData, 'true') + displayName: Publish Coverage Data + inputs: + targetPath: "$(Agent.TempDirectory)/coverage/" + artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..cd4bdfee65 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,9 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# YAML reformatting +d032de3b16eed11ea3a31cd3d96d78f7c46a2ee0 +e8f965fbf8154ea177c6622da149f2ae8533bd3c +e938ca5f20651abc160ee6aba10014013d04dcc1 +eaa5e07b2866e05b6c7b5628ca92e9cb1142d008 diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index a497c3daa2..d9d291f3b1 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1,3 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + notifications: true automerge: true files: @@ -8,9 +13,9 @@ files: support: community $actions: labels: action - $actions/system/iptables_state.py: + $actions/iptables_state.py: maintainers: quidame - $actions/system/shutdown.py: + $actions/shutdown.py: maintainers: nitzmahone samdoran aminvakil $becomes/: labels: become @@ -28,6 +33,8 @@ files: maintainers: $team_ansible_core $becomes/pmrun.py: maintainers: $team_ansible_core + $becomes/run0.py: + maintainers: konstruktoid $becomes/sesu.py: maintainers: nekonyuu $becomes/sudosu.py: @@ -45,20 +52,21 @@ files: $callbacks/cgroup_memory_recap.py: {} $callbacks/context_demo.py: {} $callbacks/counter_enabled.py: {} + $callbacks/default_without_diff.py: + maintainers: felixfontein $callbacks/dense.py: maintainers: dagwieers $callbacks/diy.py: maintainers: theque5t $callbacks/elastic.py: - maintainers: v1v keywords: apm observability - $callbacks/hipchat.py: {} + maintainers: v1v $callbacks/jabber.py: {} + $callbacks/log_plays.py: {} $callbacks/loganalytics.py: maintainers: zhcli $callbacks/logdna.py: {} $callbacks/logentries.py: {} - $callbacks/log_plays.py: {} $callbacks/logstash.py: maintainers: ujenmr $callbacks/mail.py: @@ -67,77 +75,155 @@ files: maintainers: rverchere $callbacks/null.py: {} $callbacks/opentelemetry.py: - maintainers: v1v keywords: opentelemetry observability + maintainers: v1v + $callbacks/print_task.py: + maintainers: demonpig $callbacks/say.py: - notify: chris-short - maintainers: $team_macos - labels: macos say keywords: brew cask darwin homebrew macosx macports osx + labels: macos say + maintainers: $team_macos + notify: chris-short $callbacks/selective.py: {} $callbacks/slack.py: {} $callbacks/splunk.py: {} $callbacks/sumologic.py: - maintainers: ryancurrah labels: sumologic + maintainers: ryancurrah $callbacks/syslog_json.py: maintainers: imjoseangel + $callbacks/tasks_only.py: + maintainers: felixfontein + $callbacks/timestamp.py: + maintainers: kurokobo $callbacks/unixy.py: - maintainers: akatch labels: unixy - $callbacks/yaml.py: {} + maintainers: akatch $connections/: labels: connections $connections/chroot.py: {} $connections/funcd.py: maintainers: mscherer $connections/iocage.py: {} + $connections/incus.py: + labels: incus + maintainers: stgraber $connections/jail.py: maintainers: $team_ansible_core $connections/lxc.py: {} $connections/lxd.py: - maintainers: mattclay labels: lxd + maintainers: mattclay $connections/qubes.py: maintainers: kushaldas $connections/saltstack.py: - maintainers: mscherer labels: saltstack + maintainers: mscherer + $connections/wsl.py: + maintainers: rgl $connections/zone.py: maintainers: $team_ansible_core $doc_fragments/: labels: docs_fragments + $doc_fragments/django.py: + maintainers: russoz $doc_fragments/hpe3par.py: - maintainers: farhan7500 gautamphegde labels: hpe3par + maintainers: farhan7500 gautamphegde $doc_fragments/hwc.py: - maintainers: $team_huawei labels: hwc + maintainers: $team_huawei $doc_fragments/nomad.py: - maintainers: chris93111 + maintainers: chris93111 apecnascimento + $doc_fragments/pipx.py: + maintainers: russoz $doc_fragments/xenserver.py: - maintainers: bvitnik labels: xenserver + maintainers: bvitnik + $filters/accumulate.py: + maintainers: VannTen + $filters/counter.py: + maintainers: keilr + $filters/crc32.py: + maintainers: jouir $filters/dict.py: maintainers: felixfontein $filters/dict_kv.py: maintainers: giner $filters/from_csv.py: maintainers: Ajpantuso - $filters/groupby: + $filters/from_ini.py: + maintainers: sscheib + $filters/groupby_as_dict.py: maintainers: felixfontein - $filters/hashids: + $filters/hashids.py: + maintainers: Ajpantuso + $filters/hashids_decode.yml: + maintainers: Ajpantuso + $filters/hashids_encode.yml: maintainers: Ajpantuso $filters/jc.py: maintainers: kellyjonbrazil + $filters/json_diff.yml: + maintainers: numo68 + $filters/json_patch.py: + maintainers: numo68 + $filters/json_patch.yml: + maintainers: numo68 + $filters/json_patch_recipe.yml: + maintainers: numo68 $filters/json_query.py: {} - $filters/list.py: + $filters/keep_keys.py: maintainers: vbotka - $filters/path_join_shim.py: - maintainers: felixfontein + $filters/lists.py: + maintainers: cfiehe + $filters/lists_difference.yml: + maintainers: cfiehe + $filters/lists_intersect.yml: + maintainers: cfiehe + $filters/lists_mergeby.py: + maintainers: vbotka + $filters/lists_symmetric_difference.yml: + maintainers: cfiehe + $filters/lists_union.yml: + maintainers: cfiehe $filters/random_mac.py: {} + $filters/remove_keys.py: + maintainers: vbotka + $filters/replace_keys.py: + maintainers: vbotka + $filters/reveal_ansible_type.py: + maintainers: vbotka $filters/time.py: maintainers: resmo + $filters/to_days.yml: + maintainers: resmo + $filters/to_hours.yml: + maintainers: resmo + $filters/to_ini.py: + maintainers: sscheib + $filters/to_milliseconds.yml: + maintainers: resmo + $filters/to_minutes.yml: + maintainers: resmo + $filters/to_months.yml: + maintainers: resmo + $filters/to_nice_yaml.yml: + maintainers: felixfontein + $filters/to_prettytable.py: + maintainers: tgadiev + $filters/to_seconds.yml: + maintainers: resmo + $filters/to_time_unit.yml: + maintainers: resmo + $filters/to_weeks.yml: + maintainers: resmo + $filters/to_yaml.py: + maintainers: felixfontein + $filters/to_yaml.yml: + maintainers: felixfontein + $filters/to_years.yml: + maintainers: resmo $filters/unicode_normalize.py: maintainers: Ajpantuso $filters/version_sort.py: @@ -148,31 +234,37 @@ files: maintainers: opoplawski $inventories/gitlab_runners.py: maintainers: morph027 + $inventories/iocage.py: + maintainers: vbotka + $inventories/icinga2.py: + maintainers: BongoEADGC6 $inventories/linode.py: - maintainers: $team_linode - labels: cloud linode keywords: linode dynamic inventory script + labels: cloud linode + maintainers: $team_linode $inventories/lxd.py: maintainers: conloos $inventories/nmap.py: {} $inventories/online.py: - maintainers: sieben + maintainers: remyleone $inventories/opennebula.py: - maintainers: feldsam - labels: cloud opennebula keywords: opennebula dynamic inventory script - $inventories/proxmox.py: - maintainers: $team_virt ilijamt - $inventories/icinga2.py: - maintainers: bongoeadgc6 + labels: cloud opennebula + maintainers: feldsam $inventories/scaleway.py: - maintainers: $team_scaleway labels: cloud scaleway - $inventories/stackpath_compute.py: - maintainers: shayrybak + maintainers: $team_scaleway $inventories/virtualbox.py: {} + $inventories/xen_orchestra.py: + maintainers: ddelnano shinuza $lookups/: labels: lookups + $lookups/binary_file.py: + maintainers: felixfontein + $lookups/bitwarden_secrets_manager.py: + maintainers: jantari + $lookups/bitwarden.py: + maintainers: lungj $lookups/cartesian.py: {} $lookups/chef_databag.py: {} $lookups/collection_version.py: @@ -180,40 +272,46 @@ files: $lookups/consul_kv.py: {} $lookups/credstash.py: {} $lookups/cyberarkpassword.py: - notify: cyberark-bizdev labels: cyberarkpassword + notify: cyberark-bizdev $lookups/dependent.py: maintainers: felixfontein $lookups/dig.py: - maintainers: jpmens labels: dig + maintainers: jpmens $lookups/dnstxt.py: maintainers: jpmens $lookups/dsv.py: - maintainers: amigus endlesstrax - $lookups/etcd3.py: - maintainers: eric-belhomme + ignore: amigus + maintainers: delineaKrehl tylerezimmerman $lookups/etcd.py: maintainers: jpmens + $lookups/etcd3.py: + maintainers: eric-belhomme $lookups/filetree.py: maintainers: dagwieers $lookups/flattened.py: {} + $lookups/github_app_access_token.py: + maintainers: weisheng-p blavoie $lookups/hiera.py: maintainers: jparrill $lookups/keyring.py: {} $lookups/lastpass.py: {} $lookups/lmdb_kv.py: maintainers: jpmens - $lookups/manifold.py: - maintainers: galanoff - labels: manifold + $lookups/merge_variables.py: + maintainers: rlenferink m-a-r-k-e alpex8 $lookups/onepass: - maintainers: samdoran labels: onepassword + maintainers: samdoran $lookups/onepassword.py: - maintainers: azenk scottsb + ignore: scottsb + maintainers: azenk $lookups/onepassword_raw.py: - maintainers: azenk scottsb + ignore: scottsb + maintainers: azenk + $lookups/onepassword_ssh_key.py: + maintainers: mohammedbabelly20 $lookups/passwordstore.py: {} $lookups/random_pet.py: maintainers: Akasurde @@ -223,974 +321,1258 @@ files: maintainers: konstruktoid $lookups/redis.py: maintainers: $team_ansible_core jpmens + $lookups/revbitspss.py: + maintainers: RevBits $lookups/shelvefile.py: {} $lookups/tss.py: - maintainers: amigus endlesstrax + ignore: amigus + maintainers: delineaKrehl tylerezimmerman $module_utils/: labels: module_utils + $module_utils/android_sdkmanager.py: + maintainers: shamilovstas + $module_utils/btrfs.py: + maintainers: gnfzdz + $module_utils/cmd_runner_fmt.py: + maintainers: russoz + $module_utils/cmd_runner.py: + maintainers: russoz + $module_utils/deps.py: + maintainers: russoz + $module_utils/django.py: + maintainers: russoz + $module_utils/gconftool2.py: + labels: gconftool2 + maintainers: russoz + $module_utils/gio_mime.py: + maintainers: russoz $module_utils/gitlab.py: - notify: jlozadad - maintainers: $team_gitlab - labels: gitlab keywords: gitlab source_control + labels: gitlab + maintainers: $team_gitlab + notify: jlozadad $module_utils/hwc_utils.py: - maintainers: $team_huawei - labels: huawei hwc_utils networking keywords: cloud huawei hwc + labels: huawei hwc_utils networking + maintainers: $team_huawei $module_utils/identity/keycloak/keycloak.py: maintainers: $team_keycloak + $module_utils/identity/keycloak/keycloak_clientsecret.py: + maintainers: $team_keycloak fynncfchen johncant $module_utils/ipa.py: - maintainers: $team_ipa labels: ipa + maintainers: $team_ipa + $module_utils/jenkins.py: + labels: jenkins + maintainers: russoz $module_utils/manageiq.py: - maintainers: $team_manageiq labels: manageiq + maintainers: $team_manageiq $module_utils/memset.py: - maintainers: glitchcrab labels: cloud memset $module_utils/mh/: - maintainers: russoz labels: module_helper + maintainers: russoz $module_utils/module_helper.py: - maintainers: russoz labels: module_helper + maintainers: russoz + $module_utils/net_tools/pritunl/: + maintainers: Lowess $module_utils/oracle/oci_utils.py: - maintainers: $team_oracle labels: cloud - $module_utils/pure.py: - maintainers: $team_purestorage - labels: pure pure_storage + maintainers: $team_oracle + $module_utils/pacemaker.py: + maintainers: munchtoast + $module_utils/pipx.py: + labels: pipx + maintainers: russoz + $module_utils/pkg_req.py: + maintainers: russoz + $module_utils/python_runner.py: + maintainers: russoz + $module_utils/puppet.py: + labels: puppet + maintainers: russoz $module_utils/redfish_utils.py: - maintainers: $team_redfish labels: redfish_utils + maintainers: $team_redfish $module_utils/remote_management/lxca/common.py: maintainers: navalkp prabhosa $module_utils/scaleway.py: - maintainers: $team_scaleway labels: cloud scaleway + maintainers: $team_scaleway + $module_utils/snap.py: + labels: snap + maintainers: russoz + $module_utils/ssh.py: + maintainers: russoz + $module_utils/systemd.py: + maintainers: NomakCooper $module_utils/storage/hpe3par/hpe3par.py: maintainers: farhan7500 gautamphegde $module_utils/utm_utils.py: - maintainers: $team_e_spirit labels: utm_utils + maintainers: $team_e_spirit + $module_utils/vardict.py: + labels: vardict + maintainers: russoz + $module_utils/wdc_redfish_utils.py: + labels: wdc_redfish_utils + maintainers: $team_wdc + $module_utils/xdg_mime.py: + maintainers: mhalano $module_utils/xenserver.py: - maintainers: bvitnik labels: xenserver - $modules/cloud/alicloud/: - maintainers: xiaozhu36 - $modules/cloud/atomic/atomic_container.py: - maintainers: giuseppe krsacme - $modules/cloud/atomic/: - maintainers: krsacme - $modules/cloud/centurylink/: - maintainers: clc-runner - $modules/cloud/dimensiondata/dimensiondata_network.py: - maintainers: aimonb tintoy - labels: dimensiondata_network - $modules/cloud/dimensiondata/dimensiondata_vlan.py: - maintainers: tintoy - $modules/cloud/heroku/heroku_collaborator.py: - maintainers: marns93 - $modules/cloud/huawei/: - maintainers: $team_huawei huaweicloud - keywords: cloud huawei hwc - $modules/cloud/linode/: - maintainers: $team_linode - $modules/cloud/linode/linode.py: - maintainers: zbal - $modules/cloud/lxc/lxc_container.py: - maintainers: cloudnull - $modules/cloud/lxd/: - ignore: hnakamur - $modules/cloud/lxd/lxd_profile.py: - maintainers: conloos - $modules/cloud/memset/: - maintainers: glitchcrab - $modules/cloud/misc/cloud_init_data_facts.py: - maintainers: resmo - $modules/cloud/misc/proxmox: - maintainers: $team_virt - labels: proxmox virt - keywords: kvm libvirt proxmox qemu - $modules/cloud/misc/proxmox.py: - maintainers: UnderGreen - ignore: skvidal - $modules/cloud/misc/proxmox_kvm.py: - maintainers: helldorado - ignore: skvidal - $modules/cloud/misc/proxmox_template.py: - maintainers: UnderGreen - ignore: skvidal - $modules/cloud/misc/rhevm.py: - maintainers: $team_virt TimothyVandenbrande - labels: rhevm virt - ignore: skvidal - keywords: kvm libvirt proxmox qemu - $modules/cloud/misc/: - ignore: ryansb - $modules/cloud/misc/terraform.py: - maintainers: m-yosefpor rainerleber - $modules/cloud/misc/xenserver_facts.py: - maintainers: caphrim007 cheese - labels: xenserver_facts - ignore: andyhky - $modules/cloud/oneandone/: - maintainers: aajdinov edevenport - $modules/cloud/online/: - maintainers: sieben - $modules/cloud/opennebula/: - maintainers: $team_opennebula - $modules/cloud/opennebula/one_host.py: - maintainers: rvalle - $modules/cloud/oracle/oci_vcn.py: - maintainers: $team_oracle rohitChaware - $modules/cloud/ovh/: - maintainers: pascalheraud - $modules/cloud/ovh/ovh_monthly_billing.py: - maintainers: fraff - $modules/cloud/packet/packet_device.py: - maintainers: baldwinSPC t0mk teebes - $modules/cloud/packet/: - maintainers: nurfet-becirevic t0mk - $modules/cloud/packet/packet_sshkey.py: - maintainers: t0mk - $modules/cloud/profitbricks/: - maintainers: baldwinSPC - $modules/cloud/pubnub/pubnub_blocks.py: - maintainers: parfeon pubnub - $modules/cloud/rackspace/rax.py: - maintainers: omgjlk sivel - $modules/cloud/rackspace/: - ignore: ryansb sivel - $modules/cloud/rackspace/rax_cbs.py: - maintainers: claco - $modules/cloud/rackspace/rax_cbs_attachments.py: - maintainers: claco - $modules/cloud/rackspace/rax_cdb.py: - maintainers: jails - $modules/cloud/rackspace/rax_cdb_user.py: - maintainers: jails - $modules/cloud/rackspace/rax_cdb_database.py: - maintainers: jails - $modules/cloud/rackspace/rax_clb.py: - maintainers: claco - $modules/cloud/rackspace/rax_clb_nodes.py: - maintainers: neuroid - $modules/cloud/rackspace/rax_clb_ssl.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_files.py: - maintainers: angstwad - $modules/cloud/rackspace/rax_files_objects.py: - maintainers: angstwad - $modules/cloud/rackspace/rax_identity.py: - maintainers: claco - $modules/cloud/rackspace/rax_network.py: - maintainers: claco omgjlk - $modules/cloud/rackspace/rax_mon_alarm.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_mon_check.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_mon_entity.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_mon_notification.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_mon_notification_plan.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_queue.py: - maintainers: claco - $modules/cloud/scaleway/: - maintainers: $team_scaleway - $modules/cloud/scaleway/scaleway_database_backup.py: - maintainers: guillaume_ro_fr - $modules/cloud/scaleway/scaleway_image_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_ip_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_organization_info.py: - maintainers: sieben Spredzy - $modules/cloud/scaleway/scaleway_security_group.py: - maintainers: DenBeke - $modules/cloud/scaleway/scaleway_security_group_info.py: - maintainers: sieben Spredzy - $modules/cloud/scaleway/scaleway_security_group_rule.py: - maintainers: DenBeke - $modules/cloud/scaleway/scaleway_server_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_snapshot_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_volume.py: - labels: scaleway_volume - ignore: hekonsek - $modules/cloud/scaleway/scaleway_volume_info.py: - maintainers: Spredzy - $modules/cloud/smartos/: - maintainers: $team_solaris - labels: solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/cloud/smartos/nictagadm.py: - maintainers: SmithX10 - $modules/cloud/softlayer/sl_vm.py: - maintainers: mcltn - $modules/cloud/spotinst/spotinst_aws_elastigroup.py: - maintainers: talzur - $modules/cloud/univention/: - maintainers: keachi - $modules/cloud/webfaction/: - maintainers: quentinsf - $modules/cloud/xenserver/: maintainers: bvitnik - $modules/clustering/consul/: - maintainers: $team_consul - ignore: colin-nolan - $modules/clustering/etcd3.py: - maintainers: evrardjp - ignore: vfauth - $modules/clustering/nomad/: - maintainers: chris93111 - $modules/clustering/pacemaker_cluster.py: - maintainers: matbu - $modules/clustering/znode.py: - maintainers: treyperry - $modules/database/aerospike/aerospike_migrations.py: + $module_utils/xfconf.py: + labels: xfconf + maintainers: russoz + $modules/aerospike_migrations.py: maintainers: Alb0t - $modules/database/influxdb/: - maintainers: kamsz - $modules/database/influxdb/influxdb_query.py: - maintainers: resmo - $modules/database/influxdb/influxdb_user.py: - maintainers: zhhuta - $modules/database/influxdb/influxdb_write.py: - maintainers: resmo - $modules/database/misc/elasticsearch_plugin.py: - maintainers: ThePixelDeveloper samdoran - $modules/database/misc/kibana_plugin.py: - maintainers: barryib - $modules/database/misc/odbc.py: - maintainers: john-westcott-iv - $modules/database/misc/redis.py: - maintainers: slok - $modules/database/misc/redis_info.py: - maintainers: levonet - $modules/database/misc/redis_data_info.py: - maintainers: paginabianca - $modules/database/misc/redis_data.py: - maintainers: paginabianca - $modules/database/misc/redis_data_incr.py: - maintainers: paginabianca - $modules/database/misc/riak.py: - maintainers: drewkerrigan jsmartin - $modules/database/mssql/mssql_db.py: - maintainers: vedit Jmainguy kenichi-ogawa-1988 - labels: mssql_db - $modules/database/mssql/mssql_script.py: - maintainers: kbudde - labels: mssql_script - $modules/database/saphana/hana_query.py: - maintainers: rainerleber - $modules/database/vertica/: - maintainers: dareko - $modules/files/archive.py: - maintainers: bendoh - $modules/files/filesize.py: - maintainers: quidame - $modules/files/ini_file.py: - maintainers: jpmens noseka1 - $modules/files/iso_create.py: - maintainers: Tomorrow9 - $modules/files/iso_extract.py: - maintainers: dagwieers jhoekx ribbons - $modules/files/read_csv.py: - maintainers: dagwieers - $modules/files/sapcar_extract.py: - maintainers: RainerLeber - $modules/files/xattr.py: - maintainers: bcoca - labels: xattr - $modules/files/xml.py: - maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0 - labels: m:xml xml - ignore: magnus919 - $modules/identity/ipa/: - maintainers: $team_ipa - $modules/identity/ipa/ipa_pwpolicy.py: - maintainers: adralioh - $modules/identity/ipa/ipa_service.py: - maintainers: cprh - $modules/identity/ipa/ipa_vault.py: - maintainers: jparrill - $modules/identity/keycloak/: - maintainers: $team_keycloak - $modules/identity/keycloak/keycloak_authentication.py: - maintainers: elfelip Gaetan2907 - $modules/identity/keycloak/keycloak_clientscope.py: - maintainers: Gaetan2907 - $modules/identity/keycloak/keycloak_client_rolemapping.py: - maintainers: Gaetan2907 - $modules/identity/keycloak/keycloak_group.py: - maintainers: adamgoossens - $modules/identity/keycloak/keycloak_identity_provider.py: - maintainers: laurpaum - $modules/identity/keycloak/keycloak_realm.py: - maintainers: kris2kris - $modules/identity/keycloak/keycloak_role.py: - maintainers: laurpaum - $modules/identity/keycloak/keycloak_user_federation.py: - maintainers: laurpaum - $modules/identity/onepassword_info.py: - maintainers: Rylon - $modules/identity/opendj/opendj_backendprop.py: - maintainers: dj-wasabi - $modules/monitoring/airbrake_deployment.py: - maintainers: phumpal + $modules/airbrake_deployment.py: + ignore: bpennypacker labels: airbrake_deployment - ignore: bpennypacker - $modules/monitoring/bigpanda.py: - maintainers: hkariti - $modules/monitoring/circonus_annotation.py: - maintainers: NickatEpic - $modules/monitoring/datadog/datadog_event.py: - maintainers: n0ts - labels: datadog_event - ignore: arturaz - $modules/monitoring/datadog/datadog_downtime.py: - maintainers: Datadog - $modules/monitoring/datadog/datadog_monitor.py: - maintainers: skornehl - $modules/monitoring/honeybadger_deployment.py: - maintainers: stympy - $modules/monitoring/icinga2_feature.py: - maintainers: nerzhul - $modules/monitoring/icinga2_host.py: - maintainers: t794104 - $modules/monitoring/librato_annotation.py: - maintainers: Sedward - $modules/monitoring/logentries.py: - labels: logentries - ignore: ivanvanderbyl - $modules/monitoring/logstash_plugin.py: - maintainers: nerzhul - $modules/monitoring/monit.py: - maintainers: dstoflet brian-brazil snopoke - labels: monit - $modules/monitoring/nagios.py: - maintainers: tbielawa tgoetheyn - $modules/monitoring/newrelic_deployment.py: - maintainers: mcodd - $modules/monitoring/pagerduty.py: - maintainers: suprememoocow thaumos - labels: pagerduty - ignore: bpennypacker - $modules/monitoring/pagerduty_alert.py: - maintainers: ApsOps - $modules/monitoring/pagerduty_change.py: - maintainers: adamvaughan - $modules/monitoring/pagerduty_user.py: - maintainers: zanssa - $modules/monitoring/pingdom.py: - maintainers: thaumos - $modules/monitoring/rollbar_deployment.py: - maintainers: kavu - $modules/monitoring/sensu/sensu_check.py: - maintainers: andsens - $modules/monitoring/sensu/: - maintainers: dmsimard - $modules/monitoring/sensu/sensu_silence.py: - maintainers: smbambling - $modules/monitoring/sensu/sensu_subscription.py: - maintainers: andsens - $modules/monitoring/spectrum_device.py: - maintainers: orgito - $modules/monitoring/spectrum_model_attrs.py: - maintainers: tgates81 - $modules/monitoring/stackdriver.py: - maintainers: bwhaley - $modules/monitoring/statsd.py: - maintainers: mamercad - $modules/monitoring/statusio_maintenance.py: - maintainers: bhcopeland - $modules/monitoring/uptimerobot.py: - maintainers: nate-kingsley - $modules/net_tools/cloudflare_dns.py: - maintainers: mgruener - labels: cloudflare_dns - $modules/net_tools/dnsimple.py: - maintainers: drcapulet - $modules/net_tools/dnsmadeeasy.py: - maintainers: briceburg - $modules/net_tools/gandi_livedns.py: - maintainers: gthiemonge - $modules/net_tools/haproxy.py: - maintainers: ravibhure Normo - $modules/net_tools/infinity/infinity.py: - maintainers: MeganLiu - $modules/net_tools/ip_netns.py: - maintainers: bregman-arie - $modules/net_tools/ipify_facts.py: - maintainers: resmo - $modules/net_tools/ipinfoio_facts.py: - maintainers: akostyuk - $modules/net_tools/ipwcli_dns.py: - maintainers: cwollinger - $modules/net_tools/ldap/ldap_attrs.py: - maintainers: drybjed jtyr noles - $modules/net_tools/ldap/ldap_entry.py: - maintainers: jtyr - $modules/net_tools/ldap/ldap_passwd.py: - maintainers: KellerFuchs jtyr - $modules/net_tools/ldap/ldap_search.py: - maintainers: eryx12o45 jtyr - $modules/net_tools/lldp.py: - labels: lldp - ignore: andyhky - $modules/net_tools/netcup_dns.py: - maintainers: nbuchwitz - $modules/net_tools/nsupdate.py: - maintainers: nerzhul - $modules/net_tools/omapi_host.py: - maintainers: amasolov nerzhul - $modules/net_tools/pritunl/: - maintainers: Lowess - $modules/net_tools/nmcli.py: - maintainers: alcamie101 - $modules/net_tools/snmp_facts.py: - maintainers: ogenstad ujwalkomarla - $modules/notification/bearychat.py: - maintainers: tonyseek - $modules/notification/campfire.py: - maintainers: fabulops - $modules/notification/catapult.py: - maintainers: Jmainguy - $modules/notification/cisco_webex.py: - maintainers: drew-russell - $modules/notification/discord.py: - maintainers: cwollinger - $modules/notification/flowdock.py: - maintainers: mcodd - $modules/notification/grove.py: - maintainers: zimbatm - $modules/notification/hipchat.py: - maintainers: pb8226 shirou - $modules/notification/irc.py: - maintainers: jpmens sivel - $modules/notification/jabber.py: - maintainers: bcoca - $modules/notification/logentries_msg.py: - maintainers: jcftang - $modules/notification/mail.py: - maintainers: dagwieers - $modules/notification/matrix.py: - maintainers: jcgruenhage - $modules/notification/mattermost.py: - maintainers: bjolivot - $modules/notification/mqtt.py: - maintainers: jpmens - $modules/notification/nexmo.py: - maintainers: sivel - $modules/notification/office_365_connector_card.py: - maintainers: marc-sensenich - $modules/notification/pushbullet.py: - maintainers: willybarro - $modules/notification/pushover.py: - maintainers: weaselkeeper wopfel - $modules/notification/rocketchat.py: - maintainers: Deepakkothandan - labels: rocketchat - ignore: ramondelafuente - $modules/notification/say.py: - maintainers: $team_ansible_core mpdehaan - $modules/notification/sendgrid.py: - maintainers: makaimc - $modules/notification/slack.py: - maintainers: ramondelafuente - $modules/notification/syslogger.py: - maintainers: garbled1 - $modules/notification/telegram.py: - maintainers: tyouxa loms lomserman - $modules/notification/twilio.py: - maintainers: makaimc - $modules/notification/typetalk.py: - maintainers: tksmd - $modules/packaging/language/ansible_galaxy_install.py: - maintainers: russoz - $modules/packaging/language/bower.py: - maintainers: mwarkentin - $modules/packaging/language/bundler.py: - maintainers: thoiberg - $modules/packaging/language/composer.py: - maintainers: dmtrs - ignore: resmo - $modules/packaging/language/cpanm.py: - maintainers: fcuny russoz - $modules/packaging/language/easy_install.py: - maintainers: mattupstate - $modules/packaging/language/gem.py: - maintainers: $team_ansible_core johanwiren - labels: gem - $modules/packaging/language/maven_artifact.py: - maintainers: tumbl3w33d turb - labels: maven_artifact - ignore: chrisisbeef - $modules/packaging/language/npm.py: - maintainers: shane-walker xcambar - labels: npm - ignore: chrishoffman - $modules/packaging/language/pear.py: - labels: pear - ignore: jle64 - $modules/packaging/language/pip_package_info.py: - maintainers: bcoca matburt maxamillion - $modules/packaging/language/pipx.py: - maintainers: russoz - $modules/packaging/language/yarn.py: - maintainers: chrishoffman verkaufer - $modules/packaging/os/apk.py: - maintainers: tdtrask - labels: apk - ignore: kbrebanov - $modules/packaging/os/apt_repo.py: - maintainers: obirvalger - $modules/packaging/os/apt_rpm.py: - maintainers: evgkrsk - $modules/packaging/os/copr.py: - maintainers: schlupov - $modules/packaging/os/dnf_versionlock.py: - maintainers: moreda - $modules/packaging/os/flatpak.py: - maintainers: $team_flatpak - $modules/packaging/os/flatpak_remote.py: - maintainers: $team_flatpak - $modules/packaging/os/pkg5: - maintainers: $team_solaris mavit - labels: pkg5 solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/packaging/os/homebrew.py: - notify: chris-short - maintainers: $team_macos andrew-d - labels: homebrew macos - ignore: ryansb - keywords: brew cask darwin homebrew macosx macports osx - $modules/packaging/os/homebrew_cask.py: - notify: chris-short - maintainers: $team_macos enriclluelles - labels: homebrew_ macos - ignore: ryansb - keywords: brew cask darwin homebrew macosx macports osx - $modules/packaging/os/homebrew_tap.py: - notify: chris-short - maintainers: $team_macos - labels: homebrew_ macos - ignore: ryansb - keywords: brew cask darwin homebrew macosx macports osx - $modules/packaging/os/installp.py: - maintainers: $team_aix kairoaraujo - labels: aix installp + maintainers: phumpal + $modules/aix: keywords: aix efix lpar wpar - $modules/packaging/os/layman.py: - maintainers: jirutka - $modules/packaging/os/macports.py: - notify: chris-short - maintainers: $team_macos jcftang - labels: macos macports - ignore: ryansb - keywords: brew cask darwin homebrew macosx macports osx - $modules/packaging/os/mas.py: - maintainers: lukasbestle mheap - $modules/packaging/os/openbsd_pkg.py: - maintainers: $team_bsd eest - labels: bsd openbsd_pkg - ignore: ryansb - keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense - $modules/packaging/os/opkg.py: - maintainers: skinp - $modules/packaging/os/pacman.py: - maintainers: elasticdog indrajitr tchernomax - labels: pacman - ignore: elasticdog - $modules/packaging/os/pacman_key.py: - maintainers: grawlinson - labels: pacman - $modules/packaging/os/pkgin.py: - maintainers: $team_solaris L2G jasperla szinck martinm82 - labels: pkgin solaris - $modules/packaging/os/pkgng.py: - maintainers: $team_bsd bleader - labels: bsd pkgng - ignore: bleader - keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense - $modules/packaging/os/pkgutil.py: - maintainers: $team_solaris dermute - labels: pkgutil solaris - $modules/packaging/os/portage.py: - maintainers: Tatsh wltjr - labels: portage - ignore: sayap - $modules/packaging/os/portinstall.py: - maintainers: $team_bsd berenddeboer - labels: bsd portinstall - ignore: ryansb - keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense - $modules/packaging/os/pulp_repo.py: - maintainers: sysadmind - $modules/packaging/os/redhat_subscription.py: - maintainers: barnabycourt alikins kahowell - labels: redhat_subscription - $modules/packaging/os/rhn_channel.py: - maintainers: vincentvdk alikins $team_rhn - labels: rhn_channel - $modules/packaging/os/rhn_register.py: - maintainers: jlaska $team_rhn - labels: rhn_register - $modules/packaging/os/rhsm_release.py: - maintainers: seandst - $modules/packaging/os/rhsm_repository.py: - maintainers: giovannisciortino - $modules/packaging/os/rpm_ostree_pkg.py: - maintainers: dustymabe Akasurde - $modules/packaging/os/slackpkg.py: - maintainers: KimNorgaard - $modules/packaging/os/snap.py: - maintainers: angristan vcarceler - labels: snap - $modules/packaging/os/snap_alias.py: - maintainers: russoz - labels: snap - $modules/packaging/os/sorcery.py: - maintainers: vaygr - $modules/packaging/os/svr4pkg.py: - maintainers: $team_solaris brontitall - labels: solaris svr4pkg - $modules/packaging/os/swdepot.py: - maintainers: $team_hpux melodous - labels: hpux swdepot - keywords: hp-ux - $modules/packaging/os/swupd.py: - maintainers: hnanni albertomurillo - labels: swupd - $modules/packaging/os/urpmi.py: - maintainers: pmakowski - $modules/packaging/os/xbps.py: - maintainers: dinoocch the-maldridge - $modules/packaging/os/yum_versionlock.py: - maintainers: florianpaulhoberg aminvakil - $modules/packaging/os/zypper.py: - maintainers: $team_suse - labels: zypper - ignore: dirtyharrycallahan robinro - $modules/packaging/os/zypper_repository.py: - maintainers: $team_suse - labels: zypper - ignore: matze - $modules/remote_management/cobbler/: - maintainers: dagwieers - $modules/remote_management/hpilo/: - maintainers: haad - ignore: dagwieers - $modules/remote_management/imc/imc_rest.py: - maintainers: dagwieers - labels: cisco - $modules/remote_management/ipmi/: - maintainers: bgaifullin cloudnull - $modules/remote_management/lenovoxcc/: - maintainers: panyy3 renxulei - $modules/remote_management/lxca/: - maintainers: navalkp prabhosa - $modules/remote_management/manageiq/: - labels: manageiq - maintainers: $team_manageiq - $modules/remote_management/manageiq/manageiq_group.py: - maintainers: evertmulder - $modules/remote_management/manageiq/manageiq_tenant.py: - maintainers: evertmulder - $modules/remote_management/oneview/: - maintainers: adriane-cardozo fgbulsoni tmiotto - $modules/remote_management/oneview/oneview_datacenter_info.py: - maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr - $modules/remote_management/oneview/oneview_fc_network.py: - maintainers: fgbulsoni - $modules/remote_management/oneview/oneview_fcoe_network.py: - maintainers: fgbulsoni - $modules/remote_management/redfish/: - maintainers: $team_redfish - ignore: jose-delarosa - $modules/remote_management/stacki/stacki_host.py: - maintainers: bsanders bbyhuy - labels: stacki_host - $modules/remote_management/wakeonlan.py: - maintainers: dagwieers - $modules/source_control/bitbucket/: - maintainers: catcombo - $modules/source_control/bzr.py: - maintainers: andreparames - $modules/source_control/git_config.py: - maintainers: djmattyg007 mgedmin - $modules/source_control/github/github_deploy_key.py: - maintainers: bincyber - $modules/source_control/github/github_issue.py: - maintainers: Akasurde - $modules/source_control/github/github_key.py: - maintainers: erydo - labels: github_key - ignore: erydo - $modules/source_control/github/github_release.py: - maintainers: adrianmoisey - $modules/source_control/github/github_repo.py: - maintainers: atorrescogollo - $modules/source_control/github/: - maintainers: stpierre - $modules/source_control/gitlab/: - notify: jlozadad - maintainers: $team_gitlab - keywords: gitlab source_control - $modules/source_control/gitlab/gitlab_project_variable.py: - maintainers: markuman - $modules/source_control/gitlab/gitlab_runner.py: - maintainers: SamyCoenen - $modules/source_control/gitlab/gitlab_user.py: - maintainers: LennertMertens stgrace - $modules/source_control/hg.py: - maintainers: yeukhon - $modules/storage/emc/emc_vnx_sg_member.py: - maintainers: remixtj - $modules/storage/hpe3par/ss_3par_cpg.py: - maintainers: farhan7500 gautamphegde - $modules/storage/ibm/: - maintainers: tzure - $modules/storage/vexata/: - maintainers: vexata - $modules/storage/zfs/: - maintainers: $team_solaris - labels: solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/storage/zfs/zfs.py: - maintainers: johanwiren - $modules/storage/zfs/zfs_delegate_admin.py: - maintainers: natefoo - $modules/system/aix: - maintainers: $team_aix labels: aix - keywords: aix efix lpar wpar - $modules/system/alternatives.py: - maintainers: mulby - labels: alternatives - ignore: DavidWittman - $modules/system/aix_lvol.py: - maintainers: adejoux - $modules/system/awall.py: - maintainers: tdtrask - $modules/system/beadm.py: - maintainers: $team_solaris - labels: beadm solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/system/capabilities.py: - maintainers: natefoo - $modules/system/cronvar.py: - maintainers: dougluce - $modules/system/crypttab.py: - maintainers: groks - $modules/system/dconf.py: - maintainers: azaghal - $modules/system/dpkg_divert.py: - maintainers: quidame - $modules/system/facter.py: - maintainers: $team_ansible_core gamethis - labels: facter - $modules/system/filesystem.py: - maintainers: pilou- abulimov quidame - labels: filesystem - $modules/system/gconftool2.py: - maintainers: Akasurde kevensen - labels: gconftool2 - $modules/system/interfaces_file.py: - maintainers: obourdon hryamzik - labels: interfaces_file - $modules/system/iptables_state.py: - maintainers: quidame - $modules/system/shutdown.py: - maintainers: nitzmahone samdoran aminvakil - $modules/system/java_cert.py: - maintainers: haad absynth76 - $modules/system/java_keystore.py: - maintainers: Mogztter quidame - $modules/system/kernel_blacklist.py: - maintainers: matze - $modules/system/launchd.py: - maintainers: martinm82 - $modules/system/lbu.py: - maintainers: kunkku - $modules/system/listen_ports_facts.py: - maintainers: ndavison - $modules/system/locale_gen.py: - maintainers: AugustusKling - $modules/system/lvg.py: - maintainers: abulimov - $modules/system/lvol.py: - maintainers: abulimov jhoekx zigaSRC unkaputtbar112 - $modules/system/make.py: - maintainers: LinusU - $modules/system/mksysb.py: maintainers: $team_aix - labels: aix mksysb - $modules/system/modprobe.py: - maintainers: jdauphant mattjeffery - labels: modprobe - ignore: stygstra - $modules/system/nosh.py: - maintainers: tacatac - $modules/system/ohai.py: - maintainers: $team_ansible_core mpdehaan - labels: ohai - $modules/system/open_iscsi.py: - maintainers: srvg - $modules/system/openwrt_init.py: - maintainers: agaffney - $modules/system/osx_defaults.py: - notify: chris-short - maintainers: $team_macos notok - labels: macos osx_defaults - keywords: brew cask darwin homebrew macosx macports osx - $modules/system/pam_limits.py: - maintainers: giovannisciortino - labels: pam_limits - ignore: usawa - $modules/system/pamd.py: - maintainers: kevensen - $modules/system/parted.py: - maintainers: ColOfAbRiX rosowiecki jake2184 - $modules/system/pids.py: - maintainers: saranyasridharan - $modules/system/puppet.py: - maintainers: nibalizer emonty - labels: puppet - $modules/system/python_requirements_info.py: - maintainers: willthames - ignore: ryansb - $modules/system/runit.py: - maintainers: jsumners - $modules/system/sap_task_list_execute: - maintainers: rainerleber - $modules/system/sefcontext.py: - maintainers: dagwieers - $modules/system/selinux_permissive.py: - maintainers: mscherer - $modules/system/selogin.py: - maintainers: bachradsusi dankeder jamescassell - $modules/system/seport.py: - maintainers: dankeder - $modules/system/solaris_zone.py: - maintainers: $team_solaris pmarkham - labels: solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/system/ssh_config.py: - maintainers: gaqzi Akasurde - $modules/system/svc.py: - maintainers: bcoca - $modules/system/syspatch.py: - maintainers: precurse - $modules/system/sysrc.py: - maintainers: dlundgren - $modules/system/sysupgrade.py: - maintainers: precurse - $modules/system/timezone.py: - maintainers: indrajitr jasperla tmshn - $modules/system/ufw.py: - notify: felixfontein - maintainers: ahtik ovcharenko pyykkis - labels: ufw - $modules/system/vdo.py: - maintainers: rhawalsh bgurney-rh - $modules/system/xfconf.py: - maintainers: russoz jbenden - labels: xfconf - $modules/system/xfconf_info.py: + $modules/aix_lvol.py: + maintainers: adejoux + $modules/alerta_customer.py: + maintainers: cwollinger + $modules/ali_: + maintainers: xiaozhu36 + $modules/alternatives.py: + ignore: DavidWittman jiuka + labels: alternatives + maintainers: mulby + $modules/android_sdk.py: + maintainers: shamilovstas + $modules/ansible_galaxy_install.py: maintainers: russoz - labels: xfconf - $modules/system/xfs_quota.py: - maintainers: bushvin - $modules/web_infrastructure/apache2_mod_proxy.py: + $modules/apache2_mod_proxy.py: maintainers: oboukili - $modules/web_infrastructure/apache2_module.py: - maintainers: berendt n0trax + $modules/apache2_module.py: ignore: robinro - $modules/web_infrastructure/deploy_helper.py: + maintainers: berendt n0trax + $modules/apk.py: + ignore: kbrebanov + labels: apk + maintainers: tdtrask + $modules/apt_repo.py: + maintainers: obirvalger + $modules/apt_rpm.py: + maintainers: evgkrsk + $modules/archive.py: + maintainers: bendoh + $modules/atomic_: + maintainers: krsacme + $modules/atomic_container.py: + maintainers: giuseppe krsacme + $modules/awall.py: + maintainers: tdtrask + $modules/beadm.py: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: beadm solaris + maintainers: $team_solaris + $modules/bigpanda.py: + ignore: hkariti + $modules/bitbucket_: + maintainers: catcombo + $modules/bootc_manage.py: + maintainers: cooktheryan + $modules/bower.py: + maintainers: mwarkentin + $modules/btrfs_: + maintainers: gnfzdz + $modules/bundler.py: + maintainers: thoiberg + $modules/bzr.py: + maintainers: andreparames + $modules/campfire.py: + maintainers: fabulops + $modules/capabilities.py: + maintainers: natefoo + $modules/cargo.py: + maintainers: radek-sprta + $modules/catapult.py: + maintainers: Jmainguy + $modules/circonus_annotation.py: + maintainers: NickatEpic + $modules/cisco_webex.py: + maintainers: drew-russell + $modules/cloud_init_data_facts.py: + maintainers: resmo + $modules/cloudflare_dns.py: + labels: cloudflare_dns + maintainers: mgruener + $modules/cobbler_: + maintainers: dagwieers + $modules/composer.py: + ignore: resmo + maintainers: dmtrs + $modules/consul: + ignore: colin-nolan Hakon + maintainers: $team_consul + $modules/copr.py: + maintainers: schlupov + $modules/cpanm.py: + maintainers: fcuny russoz + $modules/cronvar.py: + maintainers: dougluce + $modules/crypttab.py: + maintainers: groks + $modules/datadog_downtime.py: + maintainers: Datadog + $modules/datadog_event.py: + ignore: arturaz + labels: datadog_event + maintainers: n0ts + $modules/datadog_monitor.py: + ignore: skornehl + $modules/dconf.py: + maintainers: azaghal + $modules/decompress.py: + maintainers: shamilovstas + $modules/deploy_helper.py: maintainers: ramondelafuente - $modules/web_infrastructure/django_manage.py: + $modules/dimensiondata_network.py: + labels: dimensiondata_network + maintainers: aimonb tintoy + $modules/dimensiondata_vlan.py: + maintainers: tintoy + $modules/discord.py: + maintainers: cwollinger + $modules/django_check.py: maintainers: russoz + $modules/django_command.py: + maintainers: russoz + $modules/django_createcachetable.py: + maintainers: russoz + $modules/django_dumpdata.py: + maintainers: russoz + $modules/django_loaddata.py: + maintainers: russoz + $modules/django_manage.py: ignore: scottanderson42 tastychutney labels: django_manage - $modules/web_infrastructure/ejabberd_user.py: + maintainers: russoz + $modules/dnf_versionlock.py: + maintainers: moreda + $modules/dnf_config_manager.py: + maintainers: ahyattdev + $modules/dnsimple.py: + maintainers: drcapulet + $modules/dnsimple_info.py: + maintainers: edhilgendorf + $modules/dnsmadeeasy.py: + maintainers: briceburg + $modules/dpkg_divert.py: + maintainers: quidame + $modules/easy_install.py: + maintainers: mattupstate + $modules/ejabberd_user.py: maintainers: privateip - $modules/web_infrastructure/gunicorn.py: - maintainers: agmezr - $modules/web_infrastructure/htpasswd.py: - maintainers: $team_ansible_core - labels: htpasswd - $modules/web_infrastructure/jboss.py: - maintainers: $team_jboss jhoekx - labels: jboss - $modules/web_infrastructure/jenkins_build.py: - maintainers: brettmilford unnecessary-username - $modules/web_infrastructure/jenkins_job.py: - maintainers: sermilrod - $modules/web_infrastructure/jenkins_job_info.py: + $modules/elasticsearch_plugin.py: + maintainers: ThePixelDeveloper samdoran + $modules/emc_vnx_sg_member.py: + maintainers: remixtj + $modules/etcd3.py: + ignore: vfauth + maintainers: evrardjp + $modules/facter_facts.py: + labels: facter + maintainers: russoz $team_ansible_core gamethis + $modules/filesize.py: + maintainers: quidame + $modules/filesystem.py: + labels: filesystem + maintainers: pilou- abulimov quidame + $modules/flatpak.py: + maintainers: $team_flatpak + $modules/flatpak_remote.py: + maintainers: $team_flatpak + $modules/gandi_livedns.py: + maintainers: gthiemonge + $modules/gconftool2.py: + labels: gconftool2 + maintainers: Akasurde kevensen + $modules/gconftool2_info.py: + labels: gconftool2 + maintainers: russoz + $modules/gem.py: + labels: gem + maintainers: $team_ansible_core johanwiren + $modules/gio_mime.py: + maintainers: russoz + $modules/git_config.py: + maintainers: djmattyg007 mgedmin + $modules/git_config_info.py: + maintainers: guenhter + $modules/github_: maintainers: stpierre - $modules/web_infrastructure/jenkins_plugin.py: - maintainers: jtyr - $modules/web_infrastructure/jenkins_script.py: - maintainers: hogarthj - $modules/web_infrastructure/jira.py: - maintainers: Slezhuk tarka pertoft DWSR - labels: jira - $modules/web_infrastructure/nginx_status_info.py: + $modules/github_deploy_key.py: + maintainers: bincyber + $modules/github_issue.py: + maintainers: Akasurde + $modules/github_key.py: + ignore: erydo + labels: github_key + maintainers: erydo + $modules/github_release.py: + maintainers: adrianmoisey + $modules/github_repo.py: + maintainers: atorrescogollo + $modules/gitlab_: + keywords: gitlab source_control + maintainers: $team_gitlab + notify: jlozadad + ignore: dj-wasabi + $modules/gitlab_branch.py: + maintainers: paytroff + $modules/gitlab_issue.py: + maintainers: zvaraondrej + $modules/gitlab_label.py: + maintainers: gpongelli + $modules/gitlab_merge_request.py: + maintainers: zvaraondrej + $modules/gitlab_milestone.py: + maintainers: gpongelli + $modules/gitlab_project_variable.py: + maintainers: markuman + $modules/gitlab_instance_variable.py: + maintainers: benibr + $modules/gitlab_runner.py: + maintainers: SamyCoenen + $modules/gitlab_user.py: + maintainers: LennertMertens stgrace + $modules/gitlab_group_access_token.py: + maintainers: pixslx + $modules/gitlab_project_access_token.py: + maintainers: pixslx + $modules/grove.py: + maintainers: zimbatm + $modules/gunicorn.py: + maintainers: agmezr + $modules/haproxy.py: + maintainers: ravibhure Normo + $modules/heroku_collaborator.py: + maintainers: marns93 + $modules/hg.py: + maintainers: yeukhon + $modules/homebrew.py: + ignore: ryansb + keywords: brew cask darwin homebrew macosx macports osx + labels: homebrew macos + maintainers: $team_macos andrew-d + notify: chris-short + $modules/homebrew_cask.py: + ignore: ryansb + keywords: brew cask darwin homebrew macosx macports osx + labels: homebrew_ macos + maintainers: $team_macos enriclluelles + notify: chris-short + $modules/homebrew_tap.py: + ignore: ryansb + keywords: brew cask darwin homebrew macosx macports osx + labels: homebrew_ macos + maintainers: $team_macos + notify: chris-short + $modules/homebrew_services.py: + ignore: ryansb + keywords: brew cask services darwin homebrew macosx macports osx + labels: homebrew_ macos + maintainers: $team_macos kitizz + $modules/homectl.py: + maintainers: jameslivulpi + $modules/honeybadger_deployment.py: + maintainers: stympy + $modules/hpilo_: + ignore: dagwieers + maintainers: haad + $modules/hponcfg.py: + ignore: dagwieers + maintainers: haad + $modules/htpasswd.py: + labels: htpasswd + maintainers: $team_ansible_core + $modules/hwc_: + keywords: cloud huawei hwc + maintainers: $team_huawei huaweicloud + $modules/ibm_sa_: + maintainers: tzure + $modules/icinga2_feature.py: + maintainers: nerzhul + $modules/icinga2_host.py: + maintainers: t794104 + $modules/idrac_: + ignore: jose-delarosa + maintainers: $team_redfish + $modules/ilo_: + ignore: jose-delarosa varini-hp + maintainers: $team_redfish + $modules/imc_rest.py: + labels: cisco + maintainers: dagwieers + $modules/imgadm.py: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: solaris + maintainers: $team_solaris + $modules/infinity.py: + maintainers: MeganLiu + $modules/influxdb_: + maintainers: kamsz + $modules/influxdb_query.py: maintainers: resmo - $modules/web_infrastructure/rundeck_acl_policy.py: + $modules/influxdb_user.py: + maintainers: zhhuta + $modules/influxdb_write.py: + maintainers: resmo + $modules/ini_file.py: + maintainers: jpmens noseka1 + $modules/installp.py: + keywords: aix efix lpar wpar + labels: aix installp + maintainers: $team_aix kairoaraujo + $modules/interfaces_file.py: + labels: interfaces_file + maintainers: obourdon hryamzik + $modules/ip_netns.py: + maintainers: bregman-arie + $modules/ipa_: + maintainers: $team_ipa + ignore: fxfitz + $modules/ipa_getkeytab.py: + maintainers: abakanovskii + $modules/ipa_dnsrecord.py: + maintainers: $team_ipa jwbernin + $modules/ipbase_info.py: + maintainers: dominikkukacka + $modules/ipa_pwpolicy.py: + maintainers: adralioh + $modules/ipa_service.py: + maintainers: cprh + $modules/ipa_vault.py: + maintainers: jparrill + $modules/ipify_facts.py: + maintainers: resmo + $modules/ipinfoio_facts.py: + maintainers: akostyuk + $modules/ipmi_: + maintainers: bgaifullin cloudnull + $modules/iptables_state.py: + maintainers: quidame + $modules/ipwcli_dns.py: + maintainers: cwollinger + $modules/irc.py: + maintainers: jpmens sivel + $modules/iso_create.py: + maintainers: Tomorrow9 + $modules/iso_customize.py: + maintainers: ZouYuhua + $modules/iso_extract.py: + maintainers: dagwieers jhoekx ribbons + $modules/jabber.py: + maintainers: bcoca + $modules/java_cert.py: + maintainers: haad absynth76 + $modules/java_keystore.py: + maintainers: Mogztter quidame + $modules/jboss.py: + labels: jboss + maintainers: $team_jboss jhoekx + $modules/jenkins_build.py: + maintainers: brettmilford unnecessary-username juanmcasanova + $modules/jenkins_build_info.py: + maintainers: juanmcasanova + $modules/jenkins_credential.py: + maintainers: YoussefKhalidAli + $modules/jenkins_job.py: + maintainers: sermilrod + $modules/jenkins_job_info.py: + maintainers: stpierre + $modules/jenkins_node.py: + maintainers: phyrwork + $modules/jenkins_plugin.py: + maintainers: jtyr + $modules/jenkins_script.py: + maintainers: hogarthj + $modules/jira.py: + ignore: DWSR tarka + labels: jira + maintainers: Slezhuk pertoft + $modules/kdeconfig.py: + maintainers: smeso + $modules/kernel_blacklist.py: + maintainers: matze + $modules/keycloak_: + maintainers: $team_keycloak + $modules/keycloak_authentication.py: + maintainers: elfelip Gaetan2907 + $modules/keycloak_authentication_required_actions.py: + maintainers: Skrekulko + $modules/keycloak_authz_authorization_scope.py: + maintainers: mattock + $modules/keycloak_authz_permission.py: + maintainers: mattock + $modules/keycloak_authz_custom_policy.py: + maintainers: mattock + $modules/keycloak_authz_permission_info.py: + maintainers: mattock + $modules/keycloak_client_rolemapping.py: + maintainers: Gaetan2907 + $modules/keycloak_clientscope.py: + maintainers: Gaetan2907 + $modules/keycloak_clientscope_type.py: + maintainers: simonpahl + $modules/keycloak_clientsecret_info.py: + maintainers: fynncfchen johncant + $modules/keycloak_clientsecret_regenerate.py: + maintainers: fynncfchen johncant + $modules/keycloak_component.py: + maintainers: fivetide + $modules/keycloak_group.py: + maintainers: adamgoossens + $modules/keycloak_identity_provider.py: + maintainers: laurpaum + $modules/keycloak_realm.py: + maintainers: kris2kris + $modules/keycloak_realm_info.py: + maintainers: fynncfchen + $modules/keycloak_realm_key.py: + maintainers: mattock + $modules/keycloak_role.py: + maintainers: laurpaum + $modules/keycloak_user.py: + maintainers: elfelip + $modules/keycloak_user_federation.py: + maintainers: laurpaum + $modules/keycloak_userprofile.py: + maintainers: yeoldegrove + $modules/keycloak_component_info.py: + maintainers: desand01 + $modules/keycloak_client_rolescope.py: + maintainers: desand01 + $modules/keycloak_user_rolemapping.py: + maintainers: bratwurzt + $modules/keycloak_realm_rolemapping.py: + maintainers: agross mhuysamen Gaetan2907 + $modules/keyring.py: + maintainers: ahussey-redhat + $modules/keyring_info.py: + maintainers: ahussey-redhat + $modules/kibana_plugin.py: + maintainers: barryib + $modules/krb_ticket.py: + maintainers: abakanovskii + $modules/launchd.py: + maintainers: martinm82 + $modules/layman.py: + maintainers: jirutka + $modules/lbu.py: + maintainers: kunkku + $modules/ldap_attrs.py: + maintainers: drybjed jtyr noles + $modules/ldap_entry.py: + maintainers: jtyr + $modules/ldap_inc.py: + maintainers: pduveau + $modules/ldap_passwd.py: + maintainers: KellerFuchs jtyr + $modules/ldap_search.py: + maintainers: eryx12o45 jtyr + $modules/librato_annotation.py: + maintainers: Sedward + $modules/linode: + maintainers: $team_linode + $modules/linode.py: + maintainers: zbal + $modules/listen_ports_facts.py: + maintainers: ndavison + $modules/lldp.py: + ignore: andyhky + labels: lldp + $modules/locale_gen.py: + maintainers: AugustusKling + $modules/logentries.py: + ignore: ivanvanderbyl + labels: logentries + $modules/logentries_msg.py: + maintainers: jcftang + $modules/logstash_plugin.py: maintainers: nerzhul - $modules/web_infrastructure/rundeck_project.py: + $modules/lvg.py: + maintainers: abulimov + $modules/lvm_pv.py: + maintainers: klention + $modules/lvm_pv_move_data.py: + maintainers: klention + $modules/lvg_rename.py: + maintainers: lszomor + $modules/lvol.py: + maintainers: abulimov jhoekx zigaSRC unkaputtbar112 + $modules/lxc_container.py: + maintainers: cloudnull + $modules/lxca_: + maintainers: navalkp prabhosa + $modules/lxd_: + ignore: hnakamur + $modules/lxd_profile.py: + maintainers: conloos + $modules/lxd_project.py: + maintainers: we10710aa + $modules/macports.py: + ignore: ryansb + keywords: brew cask darwin homebrew macosx macports osx + labels: macos macports + maintainers: $team_macos jcftang + notify: chris-short + $modules/mail.py: + maintainers: dagwieers + $modules/make.py: + maintainers: LinusU + $modules/manageiq_: + labels: manageiq + maintainers: $team_manageiq + $modules/manageiq_alert_profiles.py: + maintainers: elad661 + $modules/manageiq_alerts.py: + maintainers: elad661 + $modules/manageiq_group.py: + maintainers: evertmulder + $modules/manageiq_policies_info.py: + maintainers: russoz $team_manageiq + $modules/manageiq_tags_info.py: + maintainers: russoz $team_manageiq + $modules/manageiq_tenant.py: + maintainers: evertmulder + $modules/mas.py: + maintainers: lukasbestle mheap + $modules/matrix.py: + maintainers: jcgruenhage + $modules/mattermost.py: + maintainers: bjolivot + $modules/maven_artifact.py: + ignore: chrisisbeef + labels: maven_artifact + maintainers: tumbl3w33d turb + $modules/memset_: + ignore: glitchcrab + $modules/mksysb.py: + labels: aix mksysb + maintainers: $team_aix + $modules/modprobe.py: + ignore: stygstra + labels: modprobe + maintainers: jdauphant mattjeffery + $modules/monit.py: + labels: monit + maintainers: dstoflet brian-brazil snopoke + $modules/mqtt.py: + maintainers: jpmens + $modules/mssql_db.py: + labels: mssql_db + maintainers: vedit Jmainguy kenichi-ogawa-1988 + $modules/mssql_script.py: + labels: mssql_script + maintainers: kbudde + $modules/nagios.py: + maintainers: tbielawa tgoetheyn + $modules/netcup_dns.py: + maintainers: nbuchwitz + $modules/newrelic_deployment.py: + ignore: mcodd + $modules/nexmo.py: + maintainers: sivel + $modules/nginx_status_info.py: + maintainers: resmo + $modules/nictagadm.py: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: solaris + maintainers: $team_solaris SmithX10 + $modules/nmcli.py: + maintainers: alcamie101 + $modules/nomad_: + maintainers: chris93111 apecnascimento + $modules/nosh.py: + maintainers: tacatac + $modules/npm.py: + ignore: chrishoffman + labels: npm + maintainers: shane-walker xcambar + $modules/nsupdate.py: maintainers: nerzhul - $modules/web_infrastructure/rundeck_job_run.py: + $modules/ocapi_command.py: + maintainers: $team_wdc + $modules/ocapi_info.py: + maintainers: $team_wdc + $modules/oci_vcn.py: + maintainers: $team_oracle rohitChaware + $modules/odbc.py: + maintainers: john-westcott-iv + $modules/office_365_connector_card.py: + maintainers: marc-sensenich + $modules/ohai.py: + labels: ohai + maintainers: $team_ansible_core + ignore: mpdehaan + $modules/omapi_host.py: + maintainers: amasolov nerzhul + $modules/one_: + maintainers: $team_opennebula + $modules/one_host.py: + maintainers: rvalle + $modules/one_vnet.py: + maintainers: abakanovskii + $modules/oneandone_: + maintainers: aajdinov edevenport + $modules/onepassword_info.py: + maintainers: Rylon + $modules/oneview_: + maintainers: adriane-cardozo fgbulsoni tmiotto + $modules/oneview_datacenter_info.py: + maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr + $modules/oneview_fc_network.py: + maintainers: fgbulsoni + $modules/oneview_fcoe_network.py: + maintainers: fgbulsoni + $modules/online_: + maintainers: remyleone + $modules/open_iscsi.py: + maintainers: srvg + $modules/openbsd_pkg.py: + ignore: ryansb + keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense + labels: bsd openbsd_pkg + maintainers: $team_bsd eest + $modules/opendj_backendprop.py: + maintainers: dj-wasabi + $modules/openwrt_init.py: + maintainers: agaffney + $modules/opkg.py: + maintainers: skinp + $modules/osx_defaults.py: + keywords: brew cask darwin homebrew macosx macports osx + labels: macos osx_defaults + maintainers: $team_macos notok + notify: chris-short + $modules/ovh_: + maintainers: pascalheraud + $modules/ovh_monthly_billing.py: + maintainers: fraff + $modules/pacemaker_cluster.py: + maintainers: matbu munchtoast + $modules/pacemaker_info.py: + maintainers: munchtoast + $modules/pacemaker_resource.py: + maintainers: munchtoast + $modules/pacemaker_stonith.py: + maintainers: munchtoast + $modules/packet_: + maintainers: nurfet-becirevic t0mk + $modules/packet_device.py: + maintainers: baldwinSPC t0mk teebes + $modules/packet_sshkey.py: + maintainers: t0mk + $modules/pacman.py: + ignore: elasticdog + labels: pacman + maintainers: elasticdog indrajitr tchernomax jraby + $modules/pacman_key.py: + labels: pacman + maintainers: grawlinson + $modules/pagerduty.py: + ignore: bpennypacker + labels: pagerduty + maintainers: suprememoocow thaumos + $modules/pagerduty_alert.py: + maintainers: ApsOps xshen1 + $modules/pagerduty_change.py: + maintainers: adamvaughan + $modules/pagerduty_user.py: + maintainers: zanssa + $modules/pam_limits.py: + ignore: usawa + labels: pam_limits + maintainers: giovannisciortino + $modules/pamd.py: + maintainers: kevensen + $modules/parted.py: + maintainers: ColOfAbRiX jake2184 + $modules/pear.py: + ignore: jle64 + labels: pear + $modules/pids.py: + maintainers: saranyasridharan + $modules/pingdom.py: + maintainers: thaumos + $modules/pip_package_info.py: + maintainers: bcoca matburt maxamillion + $modules/pipx.py: + maintainers: russoz + $modules/pipx_info.py: + maintainers: russoz + $modules/pkg5: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: pkg5 solaris + maintainers: $team_solaris mavit + $modules/pkgin.py: + labels: pkgin solaris + maintainers: $team_solaris L2G jasperla szinck martinm82 + $modules/pkgng.py: + ignore: bleader + keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense + labels: bsd pkgng + maintainers: $team_bsd bleader + $modules/pkgutil.py: + labels: pkgutil solaris + maintainers: $team_solaris dermute + $modules/pmem.py: + maintainers: mizumm + $modules/pnpm.py: + ignore: chrishoffman + maintainers: aretrosen + $modules/portage.py: + ignore: sayap + labels: portage + maintainers: Tatsh wltjr + $modules/portinstall.py: + ignore: ryansb + keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense + labels: bsd portinstall + maintainers: $team_bsd berenddeboer + $modules/pritunl_: + maintainers: Lowess + $modules/pubnub_blocks.py: + maintainers: parfeon pubnub + $modules/pulp_repo.py: + maintainers: sysadmind + $modules/puppet.py: + labels: puppet + maintainers: emonty + $modules/pushbullet.py: + maintainers: willybarro + $modules/pushover.py: + maintainers: weaselkeeper wopfel + $modules/python_requirements_info.py: + ignore: ryansb + maintainers: willthames + $modules/read_csv.py: + maintainers: dagwieers + $modules/redfish_: + ignore: jose-delarosa + maintainers: $team_redfish TSKushal + $modules/redhat_subscription.py: + labels: redhat_subscription + maintainers: $team_rhsm + ignore: barnabycourt alikins kahowell + $modules/redis.py: + maintainers: slok + $modules/redis_data.py: + maintainers: paginabianca + $modules/redis_data_incr.py: + maintainers: paginabianca + $modules/redis_data_info.py: + maintainers: paginabianca + $modules/redis_info.py: + maintainers: levonet + $modules/rhevm.py: + ignore: skvidal + keywords: kvm libvirt proxmox qemu + labels: rhevm virt + maintainers: $team_virt TimothyVandenbrande + $modules/rhsm_release.py: + maintainers: seandst $team_rhsm + $modules/rhsm_repository.py: + maintainers: giovannisciortino $team_rhsm + $modules/riak.py: + maintainers: drewkerrigan jsmartin + $modules/rocketchat.py: + ignore: ramondelafuente + labels: rocketchat + maintainers: Deepakkothandan + $modules/rollbar_deployment.py: + maintainers: kavu + $modules/rpm_ostree_pkg.py: + maintainers: dustymabe Akasurde + $modules/rundeck_acl_policy.py: + maintainers: nerzhul + $modules/rundeck_job_executions_info.py: maintainers: phsmith - $modules/web_infrastructure/rundeck_job_executions_info.py: + $modules/rundeck_job_run.py: maintainers: phsmith - $modules/web_infrastructure/sophos_utm/: - maintainers: $team_e_spirit - keywords: sophos utm - $modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py: - maintainers: $team_e_spirit stearz - keywords: sophos utm - $modules/web_infrastructure/sophos_utm/utm_proxy_exception.py: - maintainers: $team_e_spirit RickS-C137 - keywords: sophos utm - $modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py: - maintainers: stearz - $modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py: - maintainers: stearz - $modules/web_infrastructure/sophos_utm/utm_network_interface_address.py: - maintainers: steamx - $modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py: - maintainers: steamx - $modules/web_infrastructure/supervisorctl.py: + $modules/rundeck_project.py: + maintainers: nerzhul + $modules/runit.py: + maintainers: jsumners + $modules/say.py: + maintainers: $team_ansible_core + ignore: mpdehaan + $modules/scaleway_: + maintainers: $team_scaleway + $modules/scaleway_compute_private_network.py: + maintainers: pastral + $modules/scaleway_container.py: + maintainers: Lunik + $modules/scaleway_container_info.py: + maintainers: Lunik + $modules/scaleway_container_namespace.py: + maintainers: Lunik + $modules/scaleway_container_namespace_info.py: + maintainers: Lunik + $modules/scaleway_container_registry.py: + maintainers: Lunik + $modules/scaleway_container_registry_info.py: + maintainers: Lunik + $modules/scaleway_database_backup.py: + maintainers: guillaume_ro_fr + $modules/scaleway_function.py: + maintainers: Lunik + $modules/scaleway_function_info.py: + maintainers: Lunik + $modules/scaleway_function_namespace.py: + maintainers: Lunik + $modules/scaleway_function_namespace_info.py: + maintainers: Lunik + $modules/scaleway_image_info.py: + maintainers: Spredzy + $modules/scaleway_ip_info.py: + maintainers: Spredzy + $modules/scaleway_organization_info.py: + maintainers: Spredzy + $modules/scaleway_private_network.py: + maintainers: pastral + $modules/scaleway_security_group.py: + maintainers: DenBeke + $modules/scaleway_security_group_info.py: + maintainers: Spredzy + $modules/scaleway_security_group_rule.py: + maintainers: DenBeke + $modules/scaleway_server_info.py: + maintainers: Spredzy + $modules/scaleway_snapshot_info.py: + maintainers: Spredzy + $modules/scaleway_volume.py: + ignore: hekonsek + labels: scaleway_volume + $modules/scaleway_volume_info.py: + maintainers: Spredzy + $modules/sefcontext.py: + maintainers: dagwieers + $modules/selinux_permissive.py: + maintainers: mscherer + $modules/selogin.py: + maintainers: bachradsusi dankeder jamescassell + $modules/sendgrid.py: + maintainers: makaimc + $modules/sensu_: + maintainers: dmsimard + $modules/sensu_check.py: + maintainers: andsens + $modules/sensu_silence.py: + maintainers: smbambling + $modules/sensu_subscription.py: + maintainers: andsens + $modules/seport.py: + maintainers: dankeder + $modules/serverless.py: + ignore: ryansb + $modules/shutdown.py: + maintainers: nitzmahone samdoran aminvakil + $modules/simpleinit_msb.py: + maintainers: vaygr + $modules/sl_vm.py: + maintainers: mcltn + $modules/slack.py: + maintainers: ramondelafuente + $modules/slackpkg.py: + maintainers: KimNorgaard + $modules/smartos_image_info.py: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: solaris + maintainers: $team_solaris + $modules/snap.py: + labels: snap + maintainers: angristan vcarceler russoz + $modules/snap_alias.py: + labels: snap + maintainers: russoz + $modules/snmp_facts.py: + maintainers: ogenstad ujwalkomarla + $modules/solaris_zone.py: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: solaris + maintainers: $team_solaris pmarkham + $modules/sorcery.py: + maintainers: vaygr + $modules/spectrum_device.py: + maintainers: orgito + $modules/spectrum_model_attrs.py: + maintainers: tgates81 + $modules/spotinst_aws_elastigroup.py: + maintainers: talzur + $modules/ss_3par_cpg.py: + maintainers: farhan7500 gautamphegde + $modules/ssh_config.py: + maintainers: gaqzi Akasurde + $modules/stacki_host.py: + labels: stacki_host + maintainers: bsanders bbyhuy + $modules/statsd.py: + maintainers: mamercad + $modules/statusio_maintenance.py: + maintainers: bhcopeland + $modules/sudoers.py: + maintainers: JonEllis + $modules/supervisorctl.py: maintainers: inetfuture mattupstate - $modules/web_infrastructure/taiga_issue.py: + $modules/svc.py: + maintainers: bcoca + $modules/svr4pkg.py: + labels: solaris svr4pkg + maintainers: $team_solaris brontitall + $modules/swdepot.py: + keywords: hp-ux + labels: hpux swdepot + maintainers: $team_hpux melodous + $modules/swupd.py: + labels: swupd + maintainers: hnanni albertomurillo + $modules/syslogger.py: + maintainers: garbled1 + $modules/syspatch.py: + maintainers: precurse + $modules/sysrc.py: + maintainers: dlundgren + $modules/systemd_creds_decrypt.py: + maintainers: konstruktoid + $modules/systemd_creds_encrypt.py: + maintainers: konstruktoid + $modules/systemd_info.py: + maintainers: NomakCooper + $modules/sysupgrade.py: + maintainers: precurse + $modules/taiga_issue.py: maintainers: lekum + $modules/telegram.py: + maintainers: tyouxa loms lomserman + $modules/terraform.py: + ignore: ryansb + maintainers: m-yosefpor rainerleber + $modules/timezone.py: + maintainers: indrajitr jasperla tmshn + $modules/twilio.py: + maintainers: makaimc + $modules/typetalk.py: + maintainers: tksmd + $modules/udm_: + maintainers: keachi + $modules/ufw.py: + labels: ufw + maintainers: ahtik ovcharenko pyykkis + notify: felixfontein + $modules/uptimerobot.py: + maintainers: nate-kingsley + $modules/urpmi.py: + maintainers: pmakowski + $modules/usb_facts.py: + maintainers: maxopoly + $modules/utm_: + keywords: sophos utm + maintainers: $team_e_spirit + $modules/utm_ca_host_key_cert.py: + ignore: stearz + maintainers: $team_e_spirit + $modules/utm_ca_host_key_cert_info.py: + ignore: stearz + maintainers: $team_e_spirit + $modules/utm_network_interface_address.py: + maintainers: steamx + $modules/utm_network_interface_address_info.py: + maintainers: steamx + $modules/utm_proxy_auth_profile.py: + keywords: sophos utm + ignore: stearz + maintainers: $team_e_spirit + $modules/utm_proxy_exception.py: + keywords: sophos utm + maintainers: $team_e_spirit RickS-C137 + $modules/vdo.py: + maintainers: rhawalsh bgurney-rh + $modules/vertica_: + maintainers: dareko + $modules/vexata_: + maintainers: vexata + $modules/vmadm.py: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: solaris + maintainers: $team_solaris + $modules/wakeonlan.py: + maintainers: dagwieers + $modules/wdc_: + ignore: jose-delarosa + maintainers: $team_redfish + $modules/wdc_redfish_command.py: + maintainers: $team_wdc + $modules/wdc_redfish_info.py: + maintainers: $team_wdc + $modules/xattr.py: + labels: xattr + maintainers: bcoca + $modules/xbps.py: + maintainers: dinoocch the-maldridge + $modules/xcc_: + maintainers: panyy3 renxulei + $modules/xdg_mime.py: + maintainers: mhalano + $modules/xenserver_: + maintainers: bvitnik + $modules/xenserver_facts.py: + ignore: andyhky ryansb + labels: xenserver_facts + maintainers: caphrim007 cheese + $modules/xfconf.py: + labels: xfconf + maintainers: russoz jbenden + $modules/xfconf_info.py: + labels: xfconf + maintainers: russoz + $modules/xfs_quota.py: + maintainers: bushvin + $modules/xml.py: + ignore: magnus919 + labels: m:xml xml + maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0 + $modules/yarn.py: + ignore: chrishoffman verkaufer + $modules/yum_versionlock.py: + maintainers: gyptazy aminvakil + $modules/zfs: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: solaris + maintainers: $team_solaris + $modules/zfs.py: + maintainers: johanwiren + $modules/zfs_delegate_admin.py: + maintainers: natefoo + $modules/znode.py: + maintainers: treyperry + $modules/zpool.py: + maintainers: tomhesse + $modules/zpool_facts: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: solaris + maintainers: $team_solaris + $modules/zypper.py: + ignore: dirtyharrycallahan robinro + labels: zypper + maintainers: $team_suse + $modules/zypper_repository.py: + ignore: matze + labels: zypper + maintainers: $team_suse + $plugin_utils/ansible_type.py: + maintainers: vbotka + $modules/zypper_repository_info.py: + labels: zypper + maintainers: $team_suse TobiasZeuch181 + $plugin_utils/keys_filter.py: + maintainers: vbotka + $plugin_utils/unsafe.py: + maintainers: felixfontein $tests/a_module.py: maintainers: felixfontein + $tests/ansible_type.py: + maintainers: vbotka + $tests/fqdn_valid.py: + maintainers: vbotka +######################### + docs/docsite/rst/filter_guide.rst: {} + docs/docsite/rst/filter_guide_abstract_informations.rst: {} + docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst: + maintainers: keilr + docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst: + maintainers: felixfontein giner + docs/docsite/rst/filter_guide_abstract_informations_grouping.rst: + maintainers: felixfontein + docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst: + maintainers: cfiehe + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide_conversions.rst: + maintainers: Ajpantuso kellyjonbrazil + docs/docsite/rst/filter_guide_creating_identifiers.rst: + maintainers: Ajpantuso + docs/docsite/rst/filter_guide_paths.rst: {} + docs/docsite/rst/filter_guide_selecting_json_data.rst: {} + docs/docsite/rst/filter_guide_working_with_times.rst: + maintainers: resmo + docs/docsite/rst/filter_guide_working_with_unicode.rst: + maintainers: Ajpantuso + docs/docsite/rst/filter_guide_working_with_versions.rst: + maintainers: ericzolf + docs/docsite/rst/guide_alicloud.rst: + maintainers: xiaozhu36 + docs/docsite/rst/guide_cmdrunner.rst: + maintainers: russoz + docs/docsite/rst/guide_deps.rst: + maintainers: russoz + docs/docsite/rst/guide_iocage.rst: + maintainers: russoz felixfontein + docs/docsite/rst/guide_iocage_inventory.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_aliases.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_basics.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_dhcp.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_hooks.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_properties.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_tags.rst: + maintainers: vbotka + docs/docsite/rst/guide_modulehelper.rst: + maintainers: russoz + docs/docsite/rst/guide_online.rst: + maintainers: remyleone + docs/docsite/rst/guide_packet.rst: + maintainers: baldwinSPC nurfet-becirevic t0mk teebes + docs/docsite/rst/guide_scaleway.rst: + maintainers: $team_scaleway + docs/docsite/rst/guide_uthelper.rst: + maintainers: russoz + docs/docsite/rst/guide_vardict.rst: + maintainers: russoz + docs/docsite/rst/test_guide.rst: + maintainers: felixfontein ######################### tests/: labels: tests - tests/unit/: - labels: unit - support: community tests/integration: labels: integration support: community - tests/utils/: - maintainers: gundalow + tests/unit/: labels: unit + support: community + tests/utils/: + labels: unit + maintainers: gundalow macros: actions: plugins/action becomes: plugins/become caches: plugins/cache callbacks: plugins/callback - cliconfs: plugins/cliconf connections: plugins/connection doc_fragments: plugins/doc_fragments filters: plugins/filter @@ -1198,31 +1580,31 @@ macros: lookups: plugins/lookup module_utils: plugins/module_utils modules: plugins/modules - terminals: plugins/terminal + plugin_utils: plugins/plugin_utils tests: plugins/test team_ansible_core: team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo - team_consul: sgargan + team_consul: sgargan apollo13 Ilgmi team_cyberark_conjur: jvanderhoof ryanprior team_e_spirit: MatrixCrawler getjack team_flatpak: JayKayy oolongbrothers - team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii + team_gitlab: Lunik Shaps marwatk waheedi zanssa scodeman metanovii sh0shin nejch lgatellier suukit team_hpux: bcoca davx8342 team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2 - team_ipa: Akasurde Nosmoht fxfitz justchris1 + team_ipa: Akasurde Nosmoht justchris1 team_jboss: Wolfant jairojunior wbrefvem - team_keycloak: eikef ndclt + team_keycloak: eikef ndclt mattock thomasbach-dev team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding team_oracle: manojmeda mross22 nalsaber - team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16 - team_redfish: mraineri tomasg2012 xmadsen renxulei - team_rhn: FlossWare alikins barnabycourt vritant - team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben + team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 jyundt + team_rhsm: cnsnyder ptoscano + team_scaleway: remyleone abarbare team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l - team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom sealor - team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso + team_suse: commel evrardjp lrupp AnderEnder alxgu andytom sealor + team_virt: joshainglis karmab Thulium-Drake Ajpantuso + team_wdc: mikemoerk diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index d640b9aae4..4b1c1bfb95 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,149 +1,153 @@ --- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + name: Bug report description: Create a report to help us improve body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Also test if the latest release and devel branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* - [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues -- type: textarea - attributes: - label: Summary - description: Explain the problem briefly below. - placeholder: >- - When I try to do X with the collection from the main branch on GitHub, Y - breaks in a way Z under the env E. Here are all the details I know - about this problem... - validations: - required: true - -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Bug Report - validations: - required: true - -- type: textarea - attributes: - # For smaller collections we could use a multi-select and hardcode the list - # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins - # Select from list, filter as you type (`mysql` would only show the 3 mysql components) - # OR freeform - doesn't seem to be supported in adaptivecards - label: Component Name - description: >- - Write the short name of the module, plugin, task or feature below, - *use your best guess if unsure*. - placeholder: dnf, apt, yum, pip, user etc. - validations: - required: true - -- type: textarea - attributes: - label: Ansible Version - description: >- - Paste verbatim output from `ansible --version` between - tripple backticks. - value: | - ```console (paste below) - $ ansible --version - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Community.general Version - description: >- - Paste verbatim output from "ansible-galaxy collection list community.general" - between tripple backticks. - value: | - ```console (paste below) - $ ansible-galaxy collection list community.general - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Configuration - description: >- - If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. - This can be a piece of YAML from, e.g., an automation, script, scene or configuration. - Paste verbatim output from `ansible-config dump --only-changed` between quotes - value: | - ```console (paste below) - $ ansible-config dump --only-changed - - ``` - - -- type: textarea - attributes: - label: OS / Environment - description: >- - Provide all relevant information below, e.g. target OS versions, - network device firmware, etc. - placeholder: RHEL 8, CentOS Stream etc. - validations: - required: false - - -- type: textarea - attributes: - label: Steps to Reproduce - description: | - Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used. - - **HINT:** You can paste https://gist.github.com links for larger files. - value: | - - ```yaml (paste below) - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Expected Results - description: >- - Describe what you expected to happen when running the steps above. - placeholder: >- - I expected X to happen because I assumed Y. - that it did not. - validations: - required: true - -- type: textarea - attributes: - label: Actual Results - description: | - Describe what actually happened. If possible run with extra verbosity (`-vvvv`). - - Paste verbatim command output between quotes. - value: | - ```console (paste below) - - ``` -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct + - type: textarea + attributes: + label: Summary + description: Explain the problem briefly below. + placeholder: >- + When I try to do X with the collection from the main branch on GitHub, Y + breaks in a way Z under the env E. Here are all the details I know + about this problem... + validations: required: true + + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Bug Report + validations: + required: true + + - type: textarea + attributes: + # For smaller collections we could use a multi-select and hardcode the list + # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins + # Select from list, filter as you type (`mysql` would only show the 3 mysql components) + # OR freeform - doesn't seem to be supported in adaptivecards + label: Component Name + description: >- + Write the short name of the module, plugin, task or feature below, + *use your best guess if unsure*. Do not include `community.general.`! + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + + - type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` between + tripple backticks. + value: | + ```console (paste below) + $ ansible --version + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Community.general Version + description: >- + Paste verbatim output from "ansible-galaxy collection list community.general" + between tripple backticks. + value: | + ```console (paste below) + $ ansible-galaxy collection list community.general + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Configuration + description: >- + If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. + This can be a piece of YAML from, e.g., an automation, script, scene or configuration. + Paste verbatim output from `ansible-config dump --only-changed` between quotes + value: | + ```console (paste below) + $ ansible-config dump --only-changed + + ``` + + + - type: textarea + attributes: + label: OS / Environment + description: >- + Provide all relevant information below, e.g. target OS versions, + network device firmware, etc. + placeholder: RHEL 8, CentOS Stream etc. + validations: + required: false + + + - type: textarea + attributes: + label: Steps to Reproduce + description: | + Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also passed any playbooks, configs and commands you used. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Expected Results + description: >- + Describe what you expected to happen when running the steps above. + placeholder: >- + I expected X to happen because I assumed Y. + that it did not. + validations: + required: true + + - type: textarea + attributes: + label: Actual Results + description: | + Describe what actually happened. If possible run with extra verbosity (`-vvvv`). + + Paste verbatim command output between quotes. + value: | + ```console (paste below) + + ``` + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true ... diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index f90bd1ad86..476eed516e 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,27 +1,31 @@ --- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser blank_issues_enabled: false # default: true contact_links: -- name: Security bug report - url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: | - Please learn how to report security vulnerabilities here. + - name: Security bug report + url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: | + Please learn how to report security vulnerabilities here. - For all security related bugs, email security@ansible.com - instead of using this issue tracker and you will receive - a prompt response. + For all security related bugs, email security@ansible.com + instead of using this issue tracker and you will receive + a prompt response. - For more information, see - https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html -- name: Ansible Code of Conduct - url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: Be nice to other members of the community. -- name: Talks to the community - url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information - about: Please ask and answer usage questions here -- name: Working groups - url: https://github.com/ansible/community/wiki - about: Interested in improving a specific area? Become a part of a working group! -- name: For Enterprise - url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: Red Hat offers support for the Ansible Automation Platform + For more information, see + https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html + - name: Ansible Code of Conduct + url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: Be nice to other members of the community. + - name: Talks to the community + url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information + about: Please ask and answer usage questions here + - name: Working groups + url: https://github.com/ansible/community/wiki + about: Interested in improving a specific area? Become a part of a working group! + - name: For Enterprise + url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: Red Hat offers support for the Ansible Automation Platform diff --git a/.github/ISSUE_TEMPLATE/documentation_report.yml b/.github/ISSUE_TEMPLATE/documentation_report.yml index cd88343d06..2ad4bce44a 100644 --- a/.github/ISSUE_TEMPLATE/documentation_report.yml +++ b/.github/ISSUE_TEMPLATE/documentation_report.yml @@ -1,125 +1,129 @@ --- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + name: Documentation Report description: Ask us about docs # NOTE: issue body is enabled to allow screenshots body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Also test if the latest release and devel branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* - [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues -- type: textarea - attributes: - label: Summary - description: | - Explain the problem briefly below, add suggestions to wording or structure. + - type: textarea + attributes: + label: Summary + description: | + Explain the problem briefly below, add suggestions to wording or structure. - **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page? - placeholder: >- - I was reading the Collection documentation of version X and I'm having - problems understanding Y. It would be very helpful if that got - rephrased as Z. - validations: - required: true - -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Documentation Report - validations: - required: true - -- type: input - attributes: - label: Component Name - description: >- - Write the short name of the rst file, module, plugin, task or - feature below, *use your best guess if unsure*. - placeholder: mysql_user - validations: - required: true - -- type: textarea - attributes: - label: Ansible Version - description: >- - Paste verbatim output from `ansible --version` between - tripple backticks. - value: | - ```console (paste below) - $ ansible --version - - ``` - validations: - required: false - -- type: textarea - attributes: - label: Community.general Version - description: >- - Paste verbatim output from "ansible-galaxy collection list community.general" - between tripple backticks. - value: | - ```console (paste below) - $ ansible-galaxy collection list community.general - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Configuration - description: >- - Paste verbatim output from `ansible-config dump --only-changed` between quotes. - value: | - ```console (paste below) - $ ansible-config dump --only-changed - - ``` - validations: - required: false - -- type: textarea - attributes: - label: OS / Environment - description: >- - Provide all relevant information below, e.g. OS version, - browser, etc. - placeholder: Fedora 33, Firefox etc. - validations: - required: false - -- type: textarea - attributes: - label: Additional Information - description: | - Describe how this improves the documentation, e.g. before/after situation or screenshots. - - **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them. - - **HINT:** You can paste https://gist.github.com links for larger files. - placeholder: >- - When the improvement is applied, it makes it more straightforward - to understand X. - validations: - required: false - -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct + **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page? + placeholder: >- + I was reading the Collection documentation of version X and I'm having + problems understanding Y. It would be very helpful if that got + rephrased as Z. + validations: required: true + + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Documentation Report + validations: + required: true + + - type: input + attributes: + label: Component Name + description: >- + Write the short name of the file, module, plugin, task or feature below, + *use your best guess if unsure*. Do not include `community.general.`! + placeholder: mysql_user + validations: + required: true + + - type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` between + tripple backticks. + value: | + ```console (paste below) + $ ansible --version + + ``` + validations: + required: false + + - type: textarea + attributes: + label: Community.general Version + description: >- + Paste verbatim output from "ansible-galaxy collection list community.general" + between tripple backticks. + value: | + ```console (paste below) + $ ansible-galaxy collection list community.general + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Configuration + description: >- + Paste verbatim output from `ansible-config dump --only-changed` between quotes. + value: | + ```console (paste below) + $ ansible-config dump --only-changed + + ``` + validations: + required: false + + - type: textarea + attributes: + label: OS / Environment + description: >- + Provide all relevant information below, e.g. OS version, + browser, etc. + placeholder: Fedora 33, Firefox etc. + validations: + required: false + + - type: textarea + attributes: + label: Additional Information + description: | + Describe how this improves the documentation, e.g. before/after situation or screenshots. + + **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them. + + **HINT:** You can paste https://gist.github.com links for larger files. + placeholder: >- + When the improvement is applied, it makes it more straightforward + to understand X. + validations: + required: false + + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true ... diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index e676ff25ef..dc62f94c5c 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -1,69 +1,73 @@ --- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + name: Feature request description: Suggest an idea for this project body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Also test if the latest release and devel branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* - [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues -- type: textarea - attributes: - label: Summary - description: Describe the new feature/improvement briefly below. - placeholder: >- - I am trying to do X with the collection from the main branch on GitHub and - I think that implementing a feature Y would be very helpful for me and - every other user of community.general because of Z. - validations: - required: true - -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Feature Idea - validations: - required: true - -- type: input - attributes: - label: Component Name - description: >- - Write the short name of the module, plugin, task or feature below, - *use your best guess if unsure*. - placeholder: dnf, apt, yum, pip, user etc. - validations: - required: true - -- type: textarea - attributes: - label: Additional Information - description: | - Describe how the feature would be used, why it is needed and what it would solve. - - **HINT:** You can paste https://gist.github.com links for larger files. - value: | - - ```yaml (paste below) - - ``` - validations: - required: false -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct + - type: textarea + attributes: + label: Summary + description: Describe the new feature/improvement briefly below. + placeholder: >- + I am trying to do X with the collection from the main branch on GitHub and + I think that implementing a feature Y would be very helpful for me and + every other user of community.general because of Z. + validations: required: true + + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Feature Idea + validations: + required: true + + - type: input + attributes: + label: Component Name + description: >- + Write the short name of the module or plugin, or which other part(s) of the collection this feature affects. + *use your best guess if unsure*. Do not include `community.general.`! + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + + - type: textarea + attributes: + label: Additional Information + description: | + Describe how the feature would be used, why it is needed and what it would solve. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + validations: + required: false + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true ... diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 1cd413055f..f71b322d2a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,6 +1,15 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + version: 2 updates: - package-ecosystem: "github-actions" directory: "/" - interval: - schedule: "weekly" + schedule: + interval: "weekly" + groups: + ci: + patterns: + - "*" diff --git a/.github/patchback.yml b/.github/patchback.yml index 33ad6e84a6..5ee7812edb 100644 --- a/.github/patchback.yml +++ b/.github/patchback.yml @@ -1,4 +1,8 @@ --- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + backport_branch_prefix: patchback/backports/ backport_label_prefix: backport- target_branch_prefix: stable- diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..29a2d2e36a --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,32 @@ +##### SUMMARY + + + + + + +##### ISSUE TYPE + +- Bugfix Pull Request +- Docs Pull Request +- Feature Pull Request +- New Module/Plugin Pull Request +- Refactoring Pull Request +- Test Pull Request + +##### COMPONENT NAME + + +##### ADDITIONAL INFORMATION + + + + +```paste below + +``` diff --git a/.github/pull_request_template.md.license b/.github/pull_request_template.md.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/.github/pull_request_template.md.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/.github/settings.yml b/.github/settings.yml index 8a5b8d32f2..3e8a5f9ad8 100644 --- a/.github/settings.yml +++ b/.github/settings.yml @@ -1,3 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # DO NOT MODIFY # Settings: https://probot.github.io/apps/settings/ diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml new file mode 100644 index 0000000000..616c7a843c --- /dev/null +++ b/.github/workflows/ansible-test.yml @@ -0,0 +1,176 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# For the comprehensive list of the inputs supported by the ansible-community/ansible-test-gh-action GitHub Action, see +# https://github.com/marketplace/actions/ansible-test + +name: EOL CI +"on": + # Run EOL CI against all pushes (direct commits, also merged PRs), Pull Requests + push: + branches: + - main + - stable-* + pull_request: + # Run EOL CI once per day (at 08:00 UTC) + schedule: + - cron: '0 8 * * *' + +concurrency: + # Make sure there is at most one active run per PR, but do not cancel any non-PR runs + group: ${{ github.workflow }}-${{ (github.head_ref && github.event.number) || github.run_id }} + cancel-in-progress: true + +jobs: + sanity: + name: EOL Sanity (Ⓐ${{ matrix.ansible }}) + strategy: + matrix: + ansible: + - '2.17' + runs-on: ubuntu-latest + steps: + - name: Perform sanity testing + uses: felixfontein/ansible-test-gh-action@main + with: + ansible-core-version: stable-${{ matrix.ansible }} + codecov-token: ${{ secrets.CODECOV_TOKEN }} + coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }} + pull-request-change-detection: 'true' + testing-type: sanity + pre-test-cmd: >- + git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools + + units: + runs-on: ubuntu-latest + name: EOL Units (Ⓐ${{ matrix.ansible }}+py${{ matrix.python }}) + strategy: + # As soon as the first unit test fails, cancel the others to free up the CI queue + fail-fast: true + matrix: + ansible: + - '' + python: + - '' + exclude: + - ansible: '' + include: + - ansible: '2.17' + python: '3.7' + - ansible: '2.17' + python: '3.10' + - ansible: '2.17' + python: '3.12' + + steps: + - name: >- + Perform unit testing against + Ansible version ${{ matrix.ansible }} + uses: felixfontein/ansible-test-gh-action@main + with: + ansible-core-version: stable-${{ matrix.ansible }} + codecov-token: ${{ secrets.CODECOV_TOKEN }} + coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }} + pre-test-cmd: >- + mkdir -p ../../ansible + ; + git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools + pull-request-change-detection: 'true' + target-python-version: ${{ matrix.python }} + testing-type: units + + integration: + runs-on: ubuntu-latest + name: EOL I (Ⓐ${{ matrix.ansible }}+${{ matrix.docker }}+py${{ matrix.python }}:${{ matrix.target }}) + strategy: + fail-fast: false + matrix: + ansible: + - '' + docker: + - '' + python: + - '' + target: + - '' + exclude: + - ansible: '' + include: + # 2.17 + - ansible: '2.17' + docker: fedora39 + python: '' + target: azp/posix/1/ + - ansible: '2.17' + docker: fedora39 + python: '' + target: azp/posix/2/ + - ansible: '2.17' + docker: fedora39 + python: '' + target: azp/posix/3/ + - ansible: '2.17' + docker: ubuntu2004 + python: '' + target: azp/posix/1/ + - ansible: '2.17' + docker: ubuntu2004 + python: '' + target: azp/posix/2/ + - ansible: '2.17' + docker: ubuntu2004 + python: '' + target: azp/posix/3/ + - ansible: '2.17' + docker: alpine319 + python: '' + target: azp/posix/1/ + - ansible: '2.17' + docker: alpine319 + python: '' + target: azp/posix/2/ + - ansible: '2.17' + docker: alpine319 + python: '' + target: azp/posix/3/ + # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. + # - ansible: '2.17' + # docker: default + # python: '3.7' + # target: azp/generic/1/ + # - ansible: '2.17' + # docker: default + # python: '3.12' + # target: azp/generic/1/ + + steps: + - name: >- + Perform integration testing against + Ansible version ${{ matrix.ansible }} + under Python ${{ matrix.python }} + uses: felixfontein/ansible-test-gh-action@main + with: + ansible-core-version: stable-${{ matrix.ansible }} + codecov-token: ${{ secrets.CODECOV_TOKEN }} + coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }} + docker-image: ${{ matrix.docker }} + integration-continue-on-error: 'false' + integration-diff: 'false' + integration-retry-on-error: 'true' + # TODO: remove "--branch stable-2" from community.crypto install once we're only using ansible-core 2.17 or newer! + pre-test-cmd: >- + mkdir -p ../../ansible + ; + git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git ../../ansible/posix + ; + git clone --depth=1 --single-branch --branch stable-2 https://github.com/ansible-collections/community.crypto.git ../../community/crypto + ; + git clone --depth=1 --single-branch https://github.com/ansible-collections/community.docker.git ../../community/docker + ; + git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools + pull-request-change-detection: 'true' + target: ${{ matrix.target }} + target-python-version: ${{ matrix.python }} + testing-type: integration diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 81884ac43f..3c6776929d 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -1,49 +1,38 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + name: "Code scanning - action" -on: +"on": schedule: - cron: '26 19 * * 1' + workflow_dispatch: + +permissions: + contents: read jobs: CodeQL-Build: + permissions: + actions: read # for github/codeql-action/init to get workflow details + contents: read # for actions/checkout to fetch code + security-events: write # for github/codeql-action/autobuild to send a status report runs-on: ubuntu-latest steps: - - name: Checkout repository - uses: actions/checkout@v2 - with: - # We must fetch at least the immediate parents so that if this is - # a pull request then we can checkout the head. - fetch-depth: 2 + - name: Checkout repository + uses: actions/checkout@v5 + with: + persist-credentials: false - # If this run was triggered by a pull request event, then checkout - # the head of the pull request instead of the merge commit. - - run: git checkout HEAD^2 - if: ${{ github.event_name == 'pull_request' }} + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v4 + with: + languages: python - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v1 - # Override language selection by uncommenting this and choosing your languages - # with: - # languages: go, javascript, csharp, python, cpp, java - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v1 - - # ℹ️ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v4 diff --git a/.github/workflows/nox.yml b/.github/workflows/nox.yml new file mode 100644 index 0000000000..81c6563811 --- /dev/null +++ b/.github/workflows/nox.yml @@ -0,0 +1,28 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +name: nox +'on': + push: + branches: + - main + - stable-* + pull_request: + # Run CI once per day (at 08:00 UTC) + schedule: + - cron: '0 8 * * *' + workflow_dispatch: + +jobs: + nox: + runs-on: ubuntu-latest + name: "Run extra sanity tests" + steps: + - name: Check out collection + uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Run nox + uses: ansible-community/antsibull-nox@main diff --git a/.gitignore b/.gitignore index c6c78b42e7..e427699798 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,9 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -# Created by https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv -# Edit at https://www.toptal.com/developers/gitignore?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# Created by https://www.toptal.com/developers/gitignore/api/vim,git,macos,linux,pydev,emacs,dotenv,python,windows,webstorm,pycharm+all,jupyternotebooks +# Edit at https://www.toptal.com/developers/gitignore?templates=vim,git,macos,linux,pydev,emacs,dotenv,python,windows,webstorm,pycharm+all,jupyternotebooks ### dotenv ### .env @@ -71,7 +74,19 @@ flycheck_*.el *_LOCAL_*.txt *_REMOTE_*.txt -#!! ERROR: jupyternotebook is undefined. Use list command to see defined gitignore types !!# +### JupyterNotebooks ### +# gitignore template for Jupyter Notebooks +# website: http://jupyter.org/ + +.ipynb_checkpoints +*/.ipynb_checkpoints/* + +# IPython +profile_default/ +ipython_config.py + +# Remove previous ipynb_checkpoints +# git rm -r .ipynb_checkpoints/ ### Linux ### @@ -87,6 +102,39 @@ flycheck_*.el # .nfs files are created when an open file is removed but is still being accessed .nfs* +### macOS ### +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +### macOS Patch ### +# iCloud generated files +*.icloud + ### PyCharm+all ### # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 @@ -151,6 +199,9 @@ atlassian-ide-plugin.xml # Cursive Clojure plugin .idea/replstate.xml +# SonarLint plugin +.idea/sonarlint/ + # Crashlytics plugin (for Android Studio and IntelliJ) com_crashlytics_export_strings.xml crashlytics.properties @@ -164,20 +215,13 @@ fabric.properties .idea/caches/build_file_checksums.ser ### PyCharm+all Patch ### -# Ignores the whole .idea folder and all .iml files -# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 +# Ignore everything but code style settings and run configurations +# that are supposed to be shared within teams. -.idea/ +.idea/* -# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 - -*.iml -modules.xml -.idea/misc.xml -*.ipr - -# Sonarlint plugin -.idea/sonarlint +!.idea/codeStyles +!.idea/runConfigurations ### pydev ### .pydevproject @@ -260,16 +304,13 @@ docs/_build/ target/ # Jupyter Notebook -.ipynb_checkpoints # IPython -profile_default/ -ipython_config.py # pyenv # For a library or package, you might want to ignore these files since the code is # intended to run in multiple environments; otherwise, check them in: -.python-version +# .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. @@ -278,7 +319,22 @@ ipython_config.py # install all needed dependencies. #Pipfile.lock -# PEP 582; used by e.g. github.com/David-OConnor/pyflow +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm __pypackages__/ # Celery stuff @@ -320,6 +376,23 @@ dmypy.json # Cython debug symbols cython_debug/ +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +### Python Patch ### +# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration +poetry.toml + +# ruff +.ruff_cache/ + +# LSP config files +pyrightconfig.json + ### Vim ### # Swap [._]*.s[a-v][a-z] @@ -381,6 +454,8 @@ tags # Cursive Clojure plugin +# SonarLint plugin + # Crashlytics plugin (for Android Studio and IntelliJ) # Editor-based Rest Client @@ -417,6 +492,10 @@ tags # https://plugins.jetbrains.com/plugin/12206-codestream .idea/codestream.xml +# Azure Toolkit for IntelliJ plugin +# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij +.idea/**/azureSettings.xml + ### Windows ### # Windows thumbnail cache files Thumbs.db @@ -443,4 +522,12 @@ $RECYCLE.BIN/ # Windows shortcuts *.lnk -# End of https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# End of https://www.toptal.com/developers/gitignore/api/vim,git,macos,linux,pydev,emacs,dotenv,python,windows,webstorm,pycharm+all,jupyternotebooks + +# Integration tests cloud configs +tests/integration/cloud-config-*.ini + + +# VSCode specific extensions +.vscode/settings.json +.ansible diff --git a/.yamllint b/.yamllint new file mode 100644 index 0000000000..c10d86ab19 --- /dev/null +++ b/.yamllint @@ -0,0 +1,52 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +extends: default + +ignore: | + /changelogs/ + +rules: + line-length: + max: 1000 + level: error + document-start: disable + document-end: disable + truthy: + level: error + allowed-values: + - 'true' + - 'false' + indentation: + spaces: 2 + indent-sequences: true + key-duplicates: enable + trailing-spaces: enable + new-line-at-end-of-file: disable + hyphens: + max-spaces-after: 1 + empty-lines: + max: 2 + max-start: 0 + max-end: 0 + commas: + max-spaces-before: 0 + min-spaces-after: 1 + max-spaces-after: 1 + colons: + max-spaces-before: 0 + max-spaces-after: 1 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 0 + braces: + min-spaces-inside: 0 + max-spaces-inside: 1 + octal-values: + forbid-implicit-octal: true + forbid-explicit-octal: true + comments: + min-spaces-from-content: 1 + comments-indentation: false diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..b35c52441b --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,5 @@ +# Placeholder changelog + +This file is a placeholder; a version-specific `CHANGELOG-vX.md` will be generated during releases from fragments +under `changelogs/fragments`. On release branches once a release has been created, consult the branch's version-specific +file for changes that have occurred in that branch. diff --git a/CHANGELOG.md.license b/CHANGELOG.md.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/CHANGELOG.md.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7b796ddb34..119e04e170 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,6 @@ -=============================== -Community General Release Notes -=============================== +Placeholder changelog +===================== -.. contents:: Topics +This file is a placeholder; a version-specific ``CHANGELOG-vX.rst`` will be generated during releases from fragments +under ``changelogs/fragments``. On release branches once a release has been created, consult the branch's version-specific +file for changes that have occurred in that branch. diff --git a/CHANGELOG.rst.license b/CHANGELOG.rst.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/CHANGELOG.rst.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 970786ff56..94c5299069 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,3 +1,9 @@ + + # Contributing We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our contributions and interactions within this repository. @@ -24,8 +30,10 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which * Try committing your changes with an informative but short commit message. * Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge. -* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout. -* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) ) +* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the repository checkout. +* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/collection_development_process.html#creating-a-changelog-fragment). + * You must not include a fragment for new modules or new plugins. Also you shouldn't include one for docs-only changes. (If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) ) + * Please always include a link to the pull request itself, and if the PR is about an issue, also a link to the issue. Also make sure the fragment ends with a period, and begins with a lower-case letter after `-`. (Again, if you don't do this, we'll add suggestions to fix it, so don't worry too much :) ) * Avoid reformatting unrelated parts of the codebase in your PR. These types of changes will likely be requested for reversion, create additional work for reviewers, and may cause approval to be delayed. You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst). @@ -36,6 +44,136 @@ If you want to test a PR locally, refer to [our testing guide](https://github.co If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it. +## Run sanity or unit locally (with antsibull-nox) + +The easiest way to run sanity and unit tests locally is to use [antsibull-nox](https://ansible.readthedocs.io/projects/antsibull-nox/). +(If you have [nox](https://nox.thea.codes/en/stable/) installed, it will automatically install antsibull-nox in a virtual environment for you.) + +### Sanity tests + +The following commands show how to run ansible-test sanity tests: + +```.bash +# Run basic sanity tests for all files in the collection: +nox -Re ansible-test-sanity-devel + +# Run basic sanity tests for the given files and directories: +nox -Re ansible-test-sanity-devel -- plugins/modules/system/pids.py tests/integration/targets/pids/ + +# Run all other sanity tests for all files in the collection: +nox -R +``` + +If you replace `-Re` with `-e`, respectively. If you leave `-R` away, then the virtual environments will be re-created. The `-R` re-uses them (if they already exist). + +### Unit tests + +The following commands show how to run unit tests: + +```.bash +# Run all unit tests: +nox -Re ansible-test-units-devel + +# Run all unit tests for one Python version (a lot faster): +nox -Re ansible-test-units-devel -- --python 3.13 + +# Run a specific unit test (for the nmcli module) for one Python version: +nox -Re ansible-test-units-devel -- --python 3.13 tests/unit/plugins/modules/net_tools/test_nmcli.py +``` + +If you replace `-Re` with `-e`, then the virtual environments will be re-created. The `-R` re-uses them (if they already exist). + +## Run basic sanity, unit or integration tests locally (with ansible-test) + +Instead of using antsibull-nox, you can also run sanity and unit tests with ansible-test directly. +This also allows you to run integration tests. + +You have to check out the repository into a specific path structure to be able to run `ansible-test`. The path to the git checkout must end with `.../ansible_collections/community/general`. Please see [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how to check out the repository into a correct path structure. The short version of these instructions is: + +```.bash +mkdir -p ~/dev/ansible_collections/community +git clone https://github.com/ansible-collections/community.general.git ~/dev/ansible_collections/community/general +cd ~/dev/ansible_collections/community/general +``` + +Then you can run `ansible-test` (which is a part of [ansible-core](https://pypi.org/project/ansible-core/)) inside the checkout. The following example commands expect that you have installed Docker or Podman. Note that Podman has only been supported by more recent ansible-core releases. If you are using Docker, the following will work with Ansible 2.9+. + +### Basic sanity tests + +The following commands show how to run basic sanity tests: + +```.bash +# Run basic sanity tests for all files in the collection: +ansible-test sanity --docker -v + +# Run basic sanity tests for the given files and directories: +ansible-test sanity --docker -v plugins/modules/system/pids.py tests/integration/targets/pids/ +``` + +### Unit tests + +Note that for running unit tests, you need to install required collections in the same folder structure that `community.general` is checked out in. +Right now, you need to install [`community.internal_test_tools`](https://github.com/ansible-collections/community.internal_test_tools). +If you want to use the latest version from GitHub, you can run: +``` +git clone https://github.com/ansible-collections/community.internal_test_tools.git ~/dev/ansible_collections/community/internal_test_tools +``` + +The following commands show how to run unit tests: + +```.bash +# Run all unit tests: +ansible-test units --docker -v + +# Run all unit tests for one Python version (a lot faster): +ansible-test units --docker -v --python 3.8 + +# Run a specific unit test (for the nmcli module) for one Python version: +ansible-test units --docker -v --python 3.8 tests/unit/plugins/modules/net_tools/test_nmcli.py +``` + +### Integration tests + +Note that for running integration tests, you need to install required collections in the same folder structure that `community.general` is checked out in. +Right now, depending on the test, you need to install [`ansible.posix`](https://github.com/ansible-collections/ansible.posix), [`community.crypto`](https://github.com/ansible-collections/community.crypto), and [`community.docker`](https://github.com/ansible-collections/community.docker): +If you want to use the latest versions from GitHub, you can run: +``` +mkdir -p ~/dev/ansible_collections/ansible +git clone https://github.com/ansible-collections/ansible.posix.git ~/dev/ansible_collections/ansible/posix +git clone https://github.com/ansible-collections/community.crypto.git ~/dev/ansible_collections/community/crypto +git clone https://github.com/ansible-collections/community.docker.git ~/dev/ansible_collections/community/docker +``` + +The following commands show how to run integration tests: + +#### In Docker + +Integration tests on Docker have the following parameters: +- `image_name` (required): The name of the Docker image. To get the list of supported Docker images, run + `ansible-test integration --help` and look for _target docker images_. +- `test_name` (optional): The name of the integration test. + For modules, this equals the short name of the module; for example, `pacman` in case of `community.general.pacman`. + For plugins, the plugin type is added before the plugin's short name, for example `callback_yaml` for the `community.general.yaml` callback. +```.bash +# Test all plugins/modules on fedora40 +ansible-test integration -v --docker fedora40 + +# Template +ansible-test integration -v --docker image_name test_name + +# Example community.general.ini_file module on fedora40 Docker image: +ansible-test integration -v --docker fedora40 ini_file +``` + +#### Without isolation + +```.bash +# Run integration tests for the flattened lookup **without any isolation**: +ansible-test integration -v lookup_flattened +``` + +If you are unsure about the integration test target name for a module or plugin, you can take a look in `tests/integration/targets/`. Tests for plugins have the plugin type prepended. + ## Creating new modules or plugins Creating new modules and plugins requires a bit more work than other Pull Requests. @@ -58,13 +196,9 @@ Creating new modules and plugins requires a bit more work than other Pull Reques - Make sure that new plugins and modules have tests (unit tests, integration tests, or both); it is preferable to have some tests which run in CI. -4. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and create a symbolic link - from `plugins/modules/` respectively `plugins/action/` to the actual module/plugin code. (Other plugin types should not use - subdirectories.) - - - Action plugins need to be accompanied by a module, even if the module file only contains documentation - (`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/` - than the action plugin has in `plugins/action/`. +4. Action plugins need to be accompanied by a module, even if the module file only contains documentation + (`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/` + than the action plugin has in `plugins/action/`. 5. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the same directory to see how entries could look. You should list all authors either as `maintainers` or under `ignore`. People diff --git a/LICENSES/BSD-2-Clause.txt b/LICENSES/BSD-2-Clause.txt new file mode 100644 index 0000000000..6810e04e32 --- /dev/null +++ b/LICENSES/BSD-2-Clause.txt @@ -0,0 +1,8 @@ +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/LICENSES/GPL-3.0-or-later.txt b/LICENSES/GPL-3.0-or-later.txt new file mode 120000 index 0000000000..012065c853 --- /dev/null +++ b/LICENSES/GPL-3.0-or-later.txt @@ -0,0 +1 @@ +../COPYING \ No newline at end of file diff --git a/LICENSES/MIT.txt b/LICENSES/MIT.txt new file mode 100644 index 0000000000..2071b23b0e --- /dev/null +++ b/LICENSES/MIT.txt @@ -0,0 +1,9 @@ +MIT License + +Copyright (c) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/LICENSES/PSF-2.0.txt b/LICENSES/PSF-2.0.txt new file mode 100644 index 0000000000..35acd7fb5f --- /dev/null +++ b/LICENSES/PSF-2.0.txt @@ -0,0 +1,48 @@ +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Python Software Foundation; +All Rights Reserved" are retained in Python alone or in any derivative version +prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. diff --git a/README.md b/README.md index 66cd2b6c61..726d9cb872 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,17 @@ + + # Community General Collection +[![Documentation](https://img.shields.io/badge/docs-brightgreen.svg)](https://docs.ansible.com/ansible/devel/collections/community/general/) [![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=main)](https://dev.azure.com/ansible/community.general/_build?definitionId=31) +[![EOL CI](https://github.com/ansible-collections/community.general/actions/workflows/ansible-test.yml/badge.svg?branch=main)](https://github.com/ansible-collections/community.general/actions) +[![Nox CI](https://github.com/ansible-collections/community.general/actions/workflows/nox.yml/badge.svg?branch=main)](https://github.com/ansible-collections/community.general/actions) [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general) +[![REUSE status](https://api.reuse.software/badge/github.com/ansible-collections/community.general)](https://api.reuse.software/info/github.com/ansible-collections/community.general) This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections. @@ -15,9 +25,21 @@ We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/comm If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint. +## Communication + +* Join the Ansible forum: + * [Get Help](https://forum.ansible.com/c/help/6): get help or help others. This is for questions about modules or plugins in the collection. Please add appropriate tags if you start new discussions. + * [Tag `community-general`](https://forum.ansible.com/tag/community-general): discuss the *collection itself*, instead of specific modules or plugins. + * [Social Spaces](https://forum.ansible.com/c/chat/4): gather and interact with fellow enthusiasts. + * [News & Announcements](https://forum.ansible.com/c/news/5): track project-wide announcements including social events. + +* The Ansible [Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn): used to announce releases and important changes. + +For more information about communication, see the [Ansible communication guide](https://docs.ansible.com/ansible/devel/community/communication.html). + ## Tested with Ansible -Tested with the current Ansible 2.9, ansible-base 2.10, ansible-core 2.11, ansible-core 2.12 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported. +Tested with the current ansible-core 2.17, ansible-core 2.18, ansible-core 2.19, ansible-core 2.20 releases and the current development version of ansible-core. Ansible-core versions before 2.17.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. ## External requirements @@ -25,13 +47,13 @@ Some modules and plugins require external libraries. Please check the requiremen ## Included content -Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/). +Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/ui/repo/published/community/general/) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/). ## Using this collection This collection is shipped with the Ansible package. So if you have it installed, no more action is required. -If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool: +If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/ui/repo/published/community/general/) manually with the `ansible-galaxy` command-line tool: ansible-galaxy collection install community.general @@ -48,7 +70,7 @@ Note that if you install the collection manually, it will not be upgraded automa ansible-galaxy collection install community.general --upgrade ``` -You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general): +You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/ui/repo/published/community/general/): ```bash ansible-galaxy collection install community.general:==X.Y.Z @@ -90,25 +112,13 @@ It is necessary for maintainers of this collection to be subscribed to: They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn). -## Communication - -We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed. - -Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels) on [Libera.chat](https://libera.chat). - -We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us. - -For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community). - -For more information about communication, refer to Ansible's the [Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html). - ## Publishing New Version See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/main/releasing_collections.rst) to learn how to release this collection. ## Release notes -See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.rst). +See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.md). ## Roadmap @@ -125,6 +135,10 @@ See [this issue](https://github.com/ansible-collections/community.general/issues ## Licensing -GNU General Public License v3.0 or later. +This collection is primarily licensed and distributed as a whole under the GNU General Public License v3.0 or later. -See [COPYING](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text. +See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/main/COPYING) for the full text. + +Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/PSF-2.0.txt). + +All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `REUSE.toml`. This conforms to the [REUSE specification](https://reuse.software/spec/). diff --git a/REUSE.toml b/REUSE.toml new file mode 100644 index 0000000000..ff95bb8217 --- /dev/null +++ b/REUSE.toml @@ -0,0 +1,11 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +version = 1 + +[[annotations]] +path = "changelogs/fragments/**" +precedence = "aggregate" +SPDX-FileCopyrightText = "Ansible Project" +SPDX-License-Identifier = "GPL-3.0-or-later" diff --git a/antsibull-nox.toml b/antsibull-nox.toml new file mode 100644 index 0000000000..735d572599 --- /dev/null +++ b/antsibull-nox.toml @@ -0,0 +1,99 @@ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# SPDX-FileCopyrightText: 2025 Felix Fontein + +[collection_sources] +"ansible.posix" = "git+https://github.com/ansible-collections/ansible.posix.git,main" +"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,main" +"community.docker" = "git+https://github.com/ansible-collections/community.docker.git,main" +"community.internal_test_tools" = "git+https://github.com/ansible-collections/community.internal_test_tools.git,main" + +[collection_sources_per_ansible.'2.16'] +# community.crypto's main branch needs ansible-core >= 2.17 +"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,stable-2" + +[vcs] +vcs = "git" +development_branch = "main" +stable_branches = [ "stable-*" ] + +[sessions] + +[sessions.lint] +run_isort = false +run_black = false +run_flake8 = false +run_pylint = false +run_yamllint = true +yamllint_config = ".yamllint" +# yamllint_config_plugins = ".yamllint-docs" +# yamllint_config_plugins_examples = ".yamllint-examples" +run_mypy = false + +[sessions.docs_check] +validate_collection_refs="all" +codeblocks_restrict_types = [ + "ansible-output", + "console", + "ini", + "json", + "python", + "shell", + "yaml", + "yaml+jinja", + "text", +] +codeblocks_restrict_type_exact_case = true +codeblocks_allow_without_type = false +codeblocks_allow_literal_blocks = false + +[sessions.license_check] + +[sessions.extra_checks] +run_no_unwanted_files = true +no_unwanted_files_module_extensions = [".py"] +no_unwanted_files_yaml_extensions = [".yml"] +run_action_groups = true +run_no_trailing_whitespace = true +no_trailing_whitespace_skip_paths = [ + "tests/integration/targets/iso_extract/files/test.iso", + "tests/integration/targets/java_cert/files/testpkcs.p12", + "tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz", + "tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz", + "tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz", +] +no_trailing_whitespace_skip_directories = [ + "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/", + "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/", +] + +[[sessions.extra_checks.action_groups_config]] +name = "consul" +pattern = "^consul_.*$" +exclusions = [ + "consul_acl_bootstrap", + "consul_kv", +] +doc_fragment = "community.general.consul.actiongroup_consul" + +[[sessions.extra_checks.action_groups_config]] +name = "keycloak" +pattern = "^keycloak_.*$" +exclusions = [ + "keycloak_realm_info", +] +doc_fragment = "community.general.keycloak.actiongroup_keycloak" + +[[sessions.extra_checks.action_groups_config]] +name = "scaleway" +pattern = "^scaleway_.*$" +doc_fragment = "community.general.scaleway.actiongroup_scaleway" + +[sessions.build_import_check] +run_galaxy_importer = true + +[sessions.ansible_test_sanity] +include_devel = true + +[sessions.ansible_test_units] +include_devel = true diff --git a/changelogs/.gitignore b/changelogs/.gitignore index 6be6b5331d..3d7ad8262c 100644 --- a/changelogs/.gitignore +++ b/changelogs/.gitignore @@ -1 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + /.plugin-cache.yaml diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 114b6d6b29..f8129d5d73 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1,2 +1,3 @@ -ancestor: 3.0.0 +--- +ancestor: 11.0.0 releases: {} diff --git a/changelogs/changelog.yaml.license b/changelogs/changelog.yaml.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/changelogs/changelog.yaml.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/changelogs/config.yaml b/changelogs/config.yaml index fd0b422a5b..578b8c3765 100644 --- a/changelogs/config.yaml +++ b/changelogs/config.yaml @@ -1,29 +1,43 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + changelog_filename_template: ../CHANGELOG.rst changelog_filename_version_depth: 0 changes_file: changelog.yaml changes_format: combined +ignore_other_fragment_extensions: true keep_fragments: false mention_ancestor: true -flatmap: true new_plugins_after_name: removed_features notesdir: fragments +output_formats: + - md + - rst prelude_section_name: release_summary prelude_section_title: Release Summary sections: -- - major_changes - - Major Changes -- - minor_changes - - Minor Changes -- - breaking_changes - - Breaking Changes / Porting Guide -- - deprecated_features - - Deprecated Features -- - removed_features - - Removed Features (previously deprecated) -- - security_fixes - - Security Fixes -- - bugfixes - - Bugfixes -- - known_issues - - Known Issues + - - major_changes + - Major Changes + - - minor_changes + - Minor Changes + - - breaking_changes + - Breaking Changes / Porting Guide + - - deprecated_features + - Deprecated Features + - - removed_features + - Removed Features (previously deprecated) + - - security_fixes + - Security Fixes + - - bugfixes + - Bugfixes + - - known_issues + - Known Issues title: Community General +trivial_section_name: trivial +use_fqcn: true +add_plugin_period: true +changelog_nice_yaml: true +changelog_sort: version +vcs: auto diff --git a/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml b/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml new file mode 100644 index 0000000000..d1cfee7816 --- /dev/null +++ b/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml @@ -0,0 +1,7 @@ +deprecated_features: + - pacemaker_cluster - the parameter ``state`` will become a required parameter in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/10227). + +minor_changes: + - pacemaker_cluster - add ``state=maintenance`` for managing pacemaker maintenance mode (https://github.com/ansible-collections/community.general/issues/10200, https://github.com/ansible-collections/community.general/pull/10227). + - pacemaker_cluster - rename ``node`` to ``name`` and add ``node`` alias (https://github.com/ansible-collections/community.general/pull/10227). + - pacemaker_resource - enhance module by removing duplicative code (https://github.com/ansible-collections/community.general/pull/10227). diff --git a/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml b/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml new file mode 100644 index 0000000000..eec12e8669 --- /dev/null +++ b/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak - add support for ``grant_type=client_credentials`` to all keycloak modules, so that specifying ``auth_client_id`` and ``auth_client_secret`` is sufficient for authentication (https://github.com/ansible-collections/community.general/pull/10231). diff --git a/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml b/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml new file mode 100644 index 0000000000..29d71ca393 --- /dev/null +++ b/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml @@ -0,0 +1,2 @@ +minor_changes: + - cloudflare_dns - adds support for PTR records (https://github.com/ansible-collections/community.general/pull/10267). diff --git a/changelogs/fragments/10269-cloudflare-dns-refactor.yml b/changelogs/fragments/10269-cloudflare-dns-refactor.yml new file mode 100644 index 0000000000..9f91040d63 --- /dev/null +++ b/changelogs/fragments/10269-cloudflare-dns-refactor.yml @@ -0,0 +1,2 @@ +minor_changes: + - cloudflare_dns - simplify validations and refactor some code, no functional changes (https://github.com/ansible-collections/community.general/pull/10269). diff --git a/changelogs/fragments/10271--disable_lookups.yml b/changelogs/fragments/10271--disable_lookups.yml new file mode 100644 index 0000000000..d28e2ac833 --- /dev/null +++ b/changelogs/fragments/10271--disable_lookups.yml @@ -0,0 +1,3 @@ +bugfixes: + - "icinga2 inventory plugin - avoid using deprecated option when templating options (https://github.com/ansible-collections/community.general/pull/10271)." + - "linode inventory plugin - avoid using deprecated option when templating options (https://github.com/ansible-collections/community.general/pull/10271)." diff --git a/changelogs/fragments/10285-fstr-plugins.yml b/changelogs/fragments/10285-fstr-plugins.yml new file mode 100644 index 0000000000..6fff590fee --- /dev/null +++ b/changelogs/fragments/10285-fstr-plugins.yml @@ -0,0 +1,7 @@ +minor_changes: + - dense callback plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - mail callback plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - wsl connection plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - jc filter plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - iocage inventory plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - xen_orchestra inventory plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). diff --git a/changelogs/fragments/10299-github_app_access_token-lookup.yml b/changelogs/fragments/10299-github_app_access_token-lookup.yml new file mode 100644 index 0000000000..59233e2a05 --- /dev/null +++ b/changelogs/fragments/10299-github_app_access_token-lookup.yml @@ -0,0 +1,2 @@ +minor_changes: + - github_app_access_token lookup plugin - support both ``jwt`` and ``pyjwt`` to avoid conflict with other modules requirements (https://github.com/ansible-collections/community.general/issues/10299). diff --git a/changelogs/fragments/10311-xfconf-refactor.yml b/changelogs/fragments/10311-xfconf-refactor.yml new file mode 100644 index 0000000000..9d71bd17d8 --- /dev/null +++ b/changelogs/fragments/10311-xfconf-refactor.yml @@ -0,0 +1,2 @@ +minor_changes: + - xfconf - minor adjustments the the code (https://github.com/ansible-collections/community.general/pull/10311). diff --git a/changelogs/fragments/10323-nmcli-improvements.yml b/changelogs/fragments/10323-nmcli-improvements.yml new file mode 100644 index 0000000000..53436ea7d6 --- /dev/null +++ b/changelogs/fragments/10323-nmcli-improvements.yml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - simplify validations and refactor some code, no functional changes (https://github.com/ansible-collections/community.general/pull/10323). diff --git a/changelogs/fragments/10328-redundant-brackets.yml b/changelogs/fragments/10328-redundant-brackets.yml new file mode 100644 index 0000000000..f8f74a336c --- /dev/null +++ b/changelogs/fragments/10328-redundant-brackets.yml @@ -0,0 +1,32 @@ +minor_changes: + - logstash callback plugin - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - keycloak module utils - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - python_runner module utils - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - cloudflare_dns - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - crypttab - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - datadog_monitor - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_deploy_key - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_group_access_token - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_hook - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_project_access_token - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_runner - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - ipa_group - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - jenkins_build - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - jenkins_build_info - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - nmcli - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - oneandone_firewall_policy - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - oneandone_load_balancer - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - oneandone_monitoring_policy - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - onepassword_info - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - osx_defaults - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - ovh_ip_loadbalancing_backend - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - packet_device - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - pagerduty - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - pingdom - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - rhevm - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - rocketchat - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - sensu_silence - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - sl_vm - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - urpmi - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - xattr - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - xml - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). diff --git a/changelogs/fragments/10329-catapult-deprecation.yml b/changelogs/fragments/10329-catapult-deprecation.yml new file mode 100644 index 0000000000..5e5209edda --- /dev/null +++ b/changelogs/fragments/10329-catapult-deprecation.yml @@ -0,0 +1,2 @@ +deprecated_features: + - catapult - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10329). diff --git a/changelogs/fragments/10339-github_app_access_token.yml b/changelogs/fragments/10339-github_app_access_token.yml new file mode 100644 index 0000000000..00cd71f559 --- /dev/null +++ b/changelogs/fragments/10339-github_app_access_token.yml @@ -0,0 +1,2 @@ +bugfixes: + - github_release - support multiple types of GitHub tokens; no longer failing when ``ghs_`` token type is provided (https://github.com/ansible-collections/community.general/issues/10338, https://github.com/ansible-collections/community.general/pull/10339). \ No newline at end of file diff --git a/changelogs/fragments/10346-jenkins-plugins-fixes.yml b/changelogs/fragments/10346-jenkins-plugins-fixes.yml new file mode 100644 index 0000000000..382fe7aa53 --- /dev/null +++ b/changelogs/fragments/10346-jenkins-plugins-fixes.yml @@ -0,0 +1,6 @@ +bugfixes: + - "jenkins_plugin - install latest compatible version instead of latest (https://github.com/ansible-collections/community.general/issues/854, https://github.com/ansible-collections/community.general/pull/10346)." + - "jenkins_plugin - separate Jenkins and external URL credentials (https://github.com/ansible-collections/community.general/issues/4419, https://github.com/ansible-collections/community.general/pull/10346)." + +minor_changes: + - "jenkins_plugin - install dependencies for specific version (https://github.com/ansible-collections/community.general/issue/4995, https://github.com/ansible-collections/community.general/pull/10346)." diff --git a/changelogs/fragments/10349-incus_connection-error-handling.yml b/changelogs/fragments/10349-incus_connection-error-handling.yml new file mode 100644 index 0000000000..b35da354d2 --- /dev/null +++ b/changelogs/fragments/10349-incus_connection-error-handling.yml @@ -0,0 +1,2 @@ +bugfixes: + - incus connection plugin - fix error handling to return more useful Ansible errors to the user (https://github.com/ansible-collections/community.general/issues/10344, https://github.com/ansible-collections/community.general/pull/10349). diff --git a/changelogs/fragments/10359-dependent.yml b/changelogs/fragments/10359-dependent.yml new file mode 100644 index 0000000000..e48a6142e8 --- /dev/null +++ b/changelogs/fragments/10359-dependent.yml @@ -0,0 +1,2 @@ +bugfixes: + - "dependent lookup plugin - avoid deprecated ansible-core 2.19 functionality (https://github.com/ansible-collections/community.general/pull/10359)." diff --git a/changelogs/fragments/10413-pacemaker-resource-cleanup.yml b/changelogs/fragments/10413-pacemaker-resource-cleanup.yml new file mode 100644 index 0000000000..f4157559cc --- /dev/null +++ b/changelogs/fragments/10413-pacemaker-resource-cleanup.yml @@ -0,0 +1,3 @@ +minor_changes: + - pacemaker_resource - add ``state=cleanup`` for cleaning up pacemaker resources (https://github.com/ansible-collections/community.general/pull/10413) + - pacemaker_resource - the parameter ``name`` is no longer a required parameter in community.general 11.3.0 (https://github.com/ansible-collections/community.general/pull/10413) diff --git a/changelogs/fragments/10415-keycloak-realm-brute-force-attributes.yml b/changelogs/fragments/10415-keycloak-realm-brute-force-attributes.yml new file mode 100644 index 0000000000..22433b584e --- /dev/null +++ b/changelogs/fragments/10415-keycloak-realm-brute-force-attributes.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_realm - add support for ``brute_force_strategy`` and ``max_temporary_lockouts`` (https://github.com/ansible-collections/community.general/issues/10412, https://github.com/ansible-collections/community.general/pull/10415). diff --git a/changelogs/fragments/10417-sysrc-refactor.yml b/changelogs/fragments/10417-sysrc-refactor.yml new file mode 100644 index 0000000000..b1b5db632b --- /dev/null +++ b/changelogs/fragments/10417-sysrc-refactor.yml @@ -0,0 +1,4 @@ +minor_changes: + - sysrc - adjustments to the code (https://github.com/ansible-collections/community.general/pull/10417). +bugfixes: + - sysrc - fixes parsing with multi-line variables (https://github.com/ansible-collections/community.general/issues/10394, https://github.com/ansible-collections/community.general/pull/10417). \ No newline at end of file diff --git a/changelogs/fragments/10422-tasks_only-result_format.yml b/changelogs/fragments/10422-tasks_only-result_format.yml new file mode 100644 index 0000000000..13e5e749bf --- /dev/null +++ b/changelogs/fragments/10422-tasks_only-result_format.yml @@ -0,0 +1,2 @@ +minor_changes: + - "tasks_only callback plugin - add ``result_format`` and ``pretty_results`` options similarly to the default callback (https://github.com/ansible-collections/community.general/pull/10422)." diff --git a/changelogs/fragments/10423-apache_module-condition.yml b/changelogs/fragments/10423-apache_module-condition.yml new file mode 100644 index 0000000000..9a30d06b4e --- /dev/null +++ b/changelogs/fragments/10423-apache_module-condition.yml @@ -0,0 +1,2 @@ +bugfixes: + - apache2_module - check the ``cgi`` module restrictions only during activation (https://github.com/ansible-collections/community.general/pull/10423). diff --git a/changelogs/fragments/10424-scaleway-update-zones.yml b/changelogs/fragments/10424-scaleway-update-zones.yml new file mode 100644 index 0000000000..ffa508cd3a --- /dev/null +++ b/changelogs/fragments/10424-scaleway-update-zones.yml @@ -0,0 +1,2 @@ +minor_changes: + - scaleway_* modules, scaleway inventory plugin - update available zones and API URLs (https://github.com/ansible-collections/community.general/issues/10383, https://github.com/ansible-collections/community.general/pull/10424). \ No newline at end of file diff --git a/changelogs/fragments/10434-cpanm-deprecate-compat-mode.yml b/changelogs/fragments/10434-cpanm-deprecate-compat-mode.yml new file mode 100644 index 0000000000..84b6ecf471 --- /dev/null +++ b/changelogs/fragments/10434-cpanm-deprecate-compat-mode.yml @@ -0,0 +1,2 @@ +deprecated_features: + - cpanm - deprecate ``mode=compatibility``, ``mode=new`` should be used instead (https://github.com/ansible-collections/community.general/pull/10434). diff --git a/changelogs/fragments/10435-github-repo-deprecate-force-defaults.yml b/changelogs/fragments/10435-github-repo-deprecate-force-defaults.yml new file mode 100644 index 0000000000..cccb3a4c5f --- /dev/null +++ b/changelogs/fragments/10435-github-repo-deprecate-force-defaults.yml @@ -0,0 +1,2 @@ +deprecated_features: + - github_repo - deprecate ``force_defaults=true`` (https://github.com/ansible-collections/community.general/pull/10435). diff --git a/changelogs/fragments/10442-apk-fix-empty-names.yml b/changelogs/fragments/10442-apk-fix-empty-names.yml new file mode 100644 index 0000000000..24d68b52df --- /dev/null +++ b/changelogs/fragments/10442-apk-fix-empty-names.yml @@ -0,0 +1,3 @@ +bugfixes: + - apk - handle empty name strings properly + (https://github.com/ansible-collections/community.general/issues/10441, https://github.com/ansible-collections/community.general/pull/10442). \ No newline at end of file diff --git a/changelogs/fragments/10445-cronvar-reject-empty-values.yml b/changelogs/fragments/10445-cronvar-reject-empty-values.yml new file mode 100644 index 0000000000..1bf39619cc --- /dev/null +++ b/changelogs/fragments/10445-cronvar-reject-empty-values.yml @@ -0,0 +1,2 @@ +bugfixes: + - "cronvar - handle empty strings on ``value`` properly (https://github.com/ansible-collections/community.general/issues/10439, https://github.com/ansible-collections/community.general/pull/10445)." diff --git a/changelogs/fragments/10455-capabilities-improve-error-detection.yml b/changelogs/fragments/10455-capabilities-improve-error-detection.yml new file mode 100644 index 0000000000..40337a424b --- /dev/null +++ b/changelogs/fragments/10455-capabilities-improve-error-detection.yml @@ -0,0 +1,2 @@ +bugfixes: + - capabilities - using invalid path (symlink/directory/...) returned unrelated and incoherent error messages (https://github.com/ansible-collections/community.general/issues/5649, https://github.com/ansible-collections/community.general/pull/10455). \ No newline at end of file diff --git a/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml b/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml new file mode 100644 index 0000000000..70af0932b3 --- /dev/null +++ b/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml @@ -0,0 +1,2 @@ +bugfixes: + - "listen_port_facts - avoid crash when required commands are missing (https://github.com/ansible-collections/community.general/issues/10457, https://github.com/ansible-collections/community.general/pull/10458)." \ No newline at end of file diff --git a/changelogs/fragments/10459-deprecations.yml b/changelogs/fragments/10459-deprecations.yml new file mode 100644 index 0000000000..4b3f317454 --- /dev/null +++ b/changelogs/fragments/10459-deprecations.yml @@ -0,0 +1,6 @@ +bugfixes: + - "apache2_module - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "htpasswd - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "syspatch - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "sysupgrade - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "zypper_repository - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." diff --git a/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml b/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml new file mode 100644 index 0000000000..c4b77299f5 --- /dev/null +++ b/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml @@ -0,0 +1,2 @@ +bugfixes: + - "cronvar - fix crash on missing ``cron_file`` parent directories (https://github.com/ansible-collections/community.general/issues/10460, https://github.com/ansible-collections/community.general/pull/10461)." diff --git a/changelogs/fragments/10483-sensu-subscription-quotes.yml b/changelogs/fragments/10483-sensu-subscription-quotes.yml new file mode 100644 index 0000000000..355099684c --- /dev/null +++ b/changelogs/fragments/10483-sensu-subscription-quotes.yml @@ -0,0 +1,2 @@ +minor_changes: + - sensu_subscription - normalize quotes in the module output (https://github.com/ansible-collections/community.general/pull/10483). diff --git a/changelogs/fragments/10490-rocketchat.yml b/changelogs/fragments/10490-rocketchat.yml new file mode 100644 index 0000000000..73657ba67c --- /dev/null +++ b/changelogs/fragments/10490-rocketchat.yml @@ -0,0 +1,3 @@ +deprecated_features: + - "rocketchat - the default value for ``is_pre740``, currently ``true``, is deprecated and will change to ``false`` in community.general 13.0.0 + (https://github.com/ansible-collections/community.general/pull/10490)." diff --git a/changelogs/fragments/10491-irc.yml b/changelogs/fragments/10491-irc.yml new file mode 100644 index 0000000000..74867e71a7 --- /dev/null +++ b/changelogs/fragments/10491-irc.yml @@ -0,0 +1,2 @@ +bugfixes: + - "irc - pass hostname to ``wrap_socket()`` if ``use_tls=true`` and ``validate_certs=true`` (https://github.com/ansible-collections/community.general/issues/10472, https://github.com/ansible-collections/community.general/pull/10491)." diff --git a/changelogs/fragments/10493-nagios-services.yml b/changelogs/fragments/10493-nagios-services.yml new file mode 100644 index 0000000000..3a04556c68 --- /dev/null +++ b/changelogs/fragments/10493-nagios-services.yml @@ -0,0 +1,2 @@ +minor_changes: + - nagios - make parameter ``services`` a ``list`` instead of a ``str`` (https://github.com/ansible-collections/community.general/pull/10493). diff --git a/changelogs/fragments/10494-rfdn-1.yml b/changelogs/fragments/10494-rfdn-1.yml new file mode 100644 index 0000000000..09a0c442b0 --- /dev/null +++ b/changelogs/fragments/10494-rfdn-1.yml @@ -0,0 +1,27 @@ +minor_changes: + - aerospike_migrations - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - airbrake_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bigpanda - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bootc_manage - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bower - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - btrfs_subvolume - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bundler - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - campfire - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - cargo - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - catapult - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - cisco_webex - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - consul_kv - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - consul_policy - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - copr - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - datadog_downtime - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - datadog_monitor - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dconf - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dimensiondata_network - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dimensiondata_vlan - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dnf_config_manager - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dnsmadeeasy - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dpkg_divert - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - easy_install - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - elasticsearch_plugin - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - facter - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - filesystem - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). diff --git a/changelogs/fragments/10505-rfdn-2.yml b/changelogs/fragments/10505-rfdn-2.yml new file mode 100644 index 0000000000..89aeab9356 --- /dev/null +++ b/changelogs/fragments/10505-rfdn-2.yml @@ -0,0 +1,39 @@ +minor_changes: + - gem - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - git_config_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_deploy_key - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_repo - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_webhook - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_webhook_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_branch - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_group_access_token - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_group_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_hook - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_instance_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_issue - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_label - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_merge_request - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_milestone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_project - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_project_access_token - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_project_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - grove - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - hg - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - homebrew - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - homebrew_cask - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - homebrew_tap - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - honeybadger_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - htpasswd - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - icinga2_host - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - influxdb_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ini_file - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipa_dnsrecord - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipa_dnszone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipa_service - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipbase_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipwcli_dns - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - irc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jabber - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jenkins_credential - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jenkins_job - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jenkins_script - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). diff --git a/changelogs/fragments/10507-rfdn-3.yml b/changelogs/fragments/10507-rfdn-3.yml new file mode 100644 index 0000000000..fae9d118bc --- /dev/null +++ b/changelogs/fragments/10507-rfdn-3.yml @@ -0,0 +1,35 @@ +minor_changes: + - keycloak_authz_authorization_scope - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keycloak_authz_permission - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keycloak_role - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keycloak_userprofile - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keyring - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - kibana_plugin - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - layman - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - ldap_attrs - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - ldap_inc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - librato_annotation - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - lldp - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - logentries - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - lxca_cmms - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - lxca_nodes - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - macports - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mail - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_alerts - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_group - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_policies - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_policies_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_tags - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_tenant - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - matrix - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mattermost - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - maven_artifact - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - memset_dns_reload - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - memset_zone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - memset_zone_record - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mqtt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mssql_db - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mssql_script - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - netcup_dns - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - newrelic_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - nsupdate - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). diff --git a/changelogs/fragments/10512-rfdn-4.yml b/changelogs/fragments/10512-rfdn-4.yml new file mode 100644 index 0000000000..6d8f9e7d77 --- /dev/null +++ b/changelogs/fragments/10512-rfdn-4.yml @@ -0,0 +1,42 @@ +minor_changes: + - oci_vcn - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - one_image_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - one_template - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - one_vnet - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - onepassword_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - oneview_fc_network_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - opendj_backendprop - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - ovh_monthly_billing - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pagerduty - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pagerduty_change - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pagerduty_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pam_limits - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pear - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pkgng - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pnpm - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - portage - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_org - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_org_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_user_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pubnub_blocks - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pushbullet - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pushover - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - redis_data - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - redis_data_incr - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - riak - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - rocketchat - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - rollbar_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - say - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - scaleway_database_backup - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sendgrid - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sensu_silence - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sorcery - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - ssh_config - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - statusio_maintenance - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - svr4pkg - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - swdepot - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - syslogger - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sysrc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - systemd_creds_decrypt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - systemd_creds_encrypt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). diff --git a/changelogs/fragments/10513-rfdn-5.yml b/changelogs/fragments/10513-rfdn-5.yml new file mode 100644 index 0000000000..d930d7345c --- /dev/null +++ b/changelogs/fragments/10513-rfdn-5.yml @@ -0,0 +1,18 @@ +minor_changes: + - taiga_issue - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - twilio - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_aaa_group - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_ca_host_key_cert - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_dns_host - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_network_interface_address - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_auth_profile - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_exception - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_frontend - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_location - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - vertica_configuration - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - vertica_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - vertica_role - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - xbps - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - yarn - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - zypper - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - zypper_repository - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). diff --git a/changelogs/fragments/10514-deprecate-bearychat.yml b/changelogs/fragments/10514-deprecate-bearychat.yml new file mode 100644 index 0000000000..202210ac8c --- /dev/null +++ b/changelogs/fragments/10514-deprecate-bearychat.yml @@ -0,0 +1,2 @@ +deprecated_features: + - bearychat - module is deprecated and will be removed in community.general 12.0.0 (https://github.com/ansible-collections/community.general/issues/10514). diff --git a/changelogs/fragments/10520-arg-runcommand-list.yml b/changelogs/fragments/10520-arg-runcommand-list.yml new file mode 100644 index 0000000000..4479b3a694 --- /dev/null +++ b/changelogs/fragments/10520-arg-runcommand-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - apk - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/issues/10479, https://github.com/ansible-collections/community.general/pull/10520). diff --git a/changelogs/fragments/10523-bzr-cmd-list.yml b/changelogs/fragments/10523-bzr-cmd-list.yml new file mode 100644 index 0000000000..fb6c8a6c47 --- /dev/null +++ b/changelogs/fragments/10523-bzr-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - bzr - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10523). diff --git a/changelogs/fragments/10524-capabilities-cmd-list.yml b/changelogs/fragments/10524-capabilities-cmd-list.yml new file mode 100644 index 0000000000..e6af832b5c --- /dev/null +++ b/changelogs/fragments/10524-capabilities-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - capabilities - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10524). diff --git a/changelogs/fragments/10525-composer-cmd-list.yml b/changelogs/fragments/10525-composer-cmd-list.yml new file mode 100644 index 0000000000..a2aebc8a6d --- /dev/null +++ b/changelogs/fragments/10525-composer-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - composer - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10525). diff --git a/changelogs/fragments/10526-easy-install-cmd-list.yml b/changelogs/fragments/10526-easy-install-cmd-list.yml new file mode 100644 index 0000000000..6fa6717adc --- /dev/null +++ b/changelogs/fragments/10526-easy-install-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - easy_install - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10526). diff --git a/changelogs/fragments/10527-keycloak-idp-well-known-url-support.yml b/changelogs/fragments/10527-keycloak-idp-well-known-url-support.yml new file mode 100644 index 0000000000..cc2ae7efa0 --- /dev/null +++ b/changelogs/fragments/10527-keycloak-idp-well-known-url-support.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_identity_provider – add support for ``fromUrl`` to automatically fetch OIDC endpoints from the well-known discovery URL, simplifying identity provider configuration (https://github.com/ansible-collections/community.general/pull/10527). \ No newline at end of file diff --git a/changelogs/fragments/10531-wsl-paramiko.yml b/changelogs/fragments/10531-wsl-paramiko.yml new file mode 100644 index 0000000000..08257d6c78 --- /dev/null +++ b/changelogs/fragments/10531-wsl-paramiko.yml @@ -0,0 +1,3 @@ +bugfixes: + - "wsl connection plugin - avoid deprecated ansible-core paramiko import helper, import paramiko directly instead + (https://github.com/ansible-collections/community.general/issues/10515, https://github.com/ansible-collections/community.general/pull/10531)." diff --git a/changelogs/fragments/10532-apk.yml b/changelogs/fragments/10532-apk.yml new file mode 100644 index 0000000000..84c5d985e8 --- /dev/null +++ b/changelogs/fragments/10532-apk.yml @@ -0,0 +1,2 @@ +bugfixes: + - "apk - fix check for empty/whitespace-only package names (https://github.com/ansible-collections/community.general/pull/10532)." diff --git a/changelogs/fragments/10536-imgadm-cmd-list.yml b/changelogs/fragments/10536-imgadm-cmd-list.yml new file mode 100644 index 0000000000..0f22c774d8 --- /dev/null +++ b/changelogs/fragments/10536-imgadm-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - imgadm - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10536). diff --git a/changelogs/fragments/10538-keycloak-realm-add-support-client-options.yml b/changelogs/fragments/10538-keycloak-realm-add-support-client-options.yml new file mode 100644 index 0000000000..66333b01a8 --- /dev/null +++ b/changelogs/fragments/10538-keycloak-realm-add-support-client-options.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_realm - add support for client-related options and Oauth2 device (https://github.com/ansible-collections/community.general/pull/10538). \ No newline at end of file diff --git a/changelogs/fragments/10539-json_query.yml b/changelogs/fragments/10539-json_query.yml new file mode 100644 index 0000000000..7e84b7ecb0 --- /dev/null +++ b/changelogs/fragments/10539-json_query.yml @@ -0,0 +1,2 @@ +bugfixes: + - "json_query filter plugin - make compatible with lazy evaluation list and dictionary types of ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/10539)." diff --git a/changelogs/fragments/10566-merge_variables.yml b/changelogs/fragments/10566-merge_variables.yml new file mode 100644 index 0000000000..c0de6dd845 --- /dev/null +++ b/changelogs/fragments/10566-merge_variables.yml @@ -0,0 +1,2 @@ +bugfixes: + - "merge_variables lookup plugin - avoid deprecated functionality from ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/10566)." diff --git a/changelogs/fragments/10573-logstash-plugin-cmd-list.yml b/changelogs/fragments/10573-logstash-plugin-cmd-list.yml new file mode 100644 index 0000000000..441c1c49a3 --- /dev/null +++ b/changelogs/fragments/10573-logstash-plugin-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - logstash_plugin - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/issues/10479, https://github.com/ansible-collections/community.general/pull/10520). diff --git a/changelogs/fragments/10574-django-runner.yml b/changelogs/fragments/10574-django-runner.yml new file mode 100644 index 0000000000..a0bf6ec6d4 --- /dev/null +++ b/changelogs/fragments/10574-django-runner.yml @@ -0,0 +1,2 @@ +minor_changes: + - django module utils - remove deprecated parameter ``_DjangoRunner`` call (https://github.com/ansible-collections/community.general/pull/10574). diff --git a/changelogs/fragments/10599-open-iscsi-cmd-list.yml b/changelogs/fragments/10599-open-iscsi-cmd-list.yml new file mode 100644 index 0000000000..f8ef659ee9 --- /dev/null +++ b/changelogs/fragments/10599-open-iscsi-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - open_iscsi - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10599). diff --git a/changelogs/fragments/10601-pear-cmd-list.yml b/changelogs/fragments/10601-pear-cmd-list.yml new file mode 100644 index 0000000000..d5ab2d3d0e --- /dev/null +++ b/changelogs/fragments/10601-pear-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - pear - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10601). diff --git a/changelogs/fragments/10602-portage-cmd-list.yml b/changelogs/fragments/10602-portage-cmd-list.yml new file mode 100644 index 0000000000..36b6711e00 --- /dev/null +++ b/changelogs/fragments/10602-portage-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - portage - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10602). diff --git a/changelogs/fragments/10603-riak-cmd-list.yml b/changelogs/fragments/10603-riak-cmd-list.yml new file mode 100644 index 0000000000..1a29a07c7f --- /dev/null +++ b/changelogs/fragments/10603-riak-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - riak - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10603). diff --git a/changelogs/fragments/10604-solaris-zone-cmd-list.yml b/changelogs/fragments/10604-solaris-zone-cmd-list.yml new file mode 100644 index 0000000000..2fe52cbf31 --- /dev/null +++ b/changelogs/fragments/10604-solaris-zone-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - solaris_zone - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10604). diff --git a/changelogs/fragments/10605-swupd-cmd-list.yml b/changelogs/fragments/10605-swupd-cmd-list.yml new file mode 100644 index 0000000000..23669d7974 --- /dev/null +++ b/changelogs/fragments/10605-swupd-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - swupd - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10605). diff --git a/changelogs/fragments/10606-urpmi-cmd-list.yml b/changelogs/fragments/10606-urpmi-cmd-list.yml new file mode 100644 index 0000000000..a7a2e54a1e --- /dev/null +++ b/changelogs/fragments/10606-urpmi-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - urpmi - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10606). diff --git a/changelogs/fragments/10608-xbps-cmd-list.yml b/changelogs/fragments/10608-xbps-cmd-list.yml new file mode 100644 index 0000000000..ff951a4520 --- /dev/null +++ b/changelogs/fragments/10608-xbps-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - xbps - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10608). diff --git a/changelogs/fragments/10609-xfs-quota-cmd-list.yml b/changelogs/fragments/10609-xfs-quota-cmd-list.yml new file mode 100644 index 0000000000..74e170ef09 --- /dev/null +++ b/changelogs/fragments/10609-xfs-quota-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - xfs_quota - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10609). diff --git a/changelogs/fragments/10612-timezone-cmd-list.yml b/changelogs/fragments/10612-timezone-cmd-list.yml new file mode 100644 index 0000000000..601375fbc5 --- /dev/null +++ b/changelogs/fragments/10612-timezone-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - timezone - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10612). diff --git a/changelogs/fragments/10642-parted-cmd-list.yml b/changelogs/fragments/10642-parted-cmd-list.yml new file mode 100644 index 0000000000..29025512dd --- /dev/null +++ b/changelogs/fragments/10642-parted-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - parted - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10642). diff --git a/changelogs/fragments/10644-oneview-os.yml b/changelogs/fragments/10644-oneview-os.yml new file mode 100644 index 0000000000..f2789cf5fc --- /dev/null +++ b/changelogs/fragments/10644-oneview-os.yml @@ -0,0 +1,2 @@ +breaking_changes: + - oneview module utils - remove import of standard library ``os`` (https://github.com/ansible-collections/community.general/pull/10644). diff --git a/changelogs/fragments/10646-scaleway_container_cpu_limit.yml b/changelogs/fragments/10646-scaleway_container_cpu_limit.yml new file mode 100644 index 0000000000..f23a1bb96d --- /dev/null +++ b/changelogs/fragments/10646-scaleway_container_cpu_limit.yml @@ -0,0 +1,2 @@ +minor_changes: + - scaleway_container - add a ``cpu_limit`` argument (https://github.com/ansible-collections/community.general/pull/10646). diff --git a/changelogs/fragments/10647-scaleway-module-defaults.yml b/changelogs/fragments/10647-scaleway-module-defaults.yml new file mode 100644 index 0000000000..7fca7a171a --- /dev/null +++ b/changelogs/fragments/10647-scaleway-module-defaults.yml @@ -0,0 +1,2 @@ +minor_changes: + - scaleway modules - add a ``scaleway`` group to use ``module_defaults`` (https://github.com/ansible-collections/community.general/pull/10647). diff --git a/changelogs/fragments/10652-oracle-deprecation.yml b/changelogs/fragments/10652-oracle-deprecation.yml new file mode 100644 index 0000000000..3842e994f8 --- /dev/null +++ b/changelogs/fragments/10652-oracle-deprecation.yml @@ -0,0 +1,4 @@ +deprecated_features: + - oci_utils module utils - utils is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10652). + - oci_vcn - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10652). + - oracle* doc fragments - fragments are deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10652). diff --git a/changelogs/fragments/10661-support-gpg-auto-impor-keys-in-zypper.yml b/changelogs/fragments/10661-support-gpg-auto-impor-keys-in-zypper.yml new file mode 100644 index 0000000000..333121902f --- /dev/null +++ b/changelogs/fragments/10661-support-gpg-auto-impor-keys-in-zypper.yml @@ -0,0 +1,2 @@ +minor_changes: + - zypper - support the ``--gpg-auto-import-keys`` option in zypper (https://github.com/ansible-collections/community.general/issues/10660, https://github.com/ansible-collections/community.general/pull/10661). diff --git a/changelogs/fragments/10663-pacemaker-resource-fix-resource-type.yml b/changelogs/fragments/10663-pacemaker-resource-fix-resource-type.yml new file mode 100644 index 0000000000..270488d248 --- /dev/null +++ b/changelogs/fragments/10663-pacemaker-resource-fix-resource-type.yml @@ -0,0 +1,2 @@ +bugfixes: + - "pacemaker_resource - fix ``resource_type`` parameter formatting (https://github.com/ansible-collections/community.general/issues/10426, https://github.com/ansible-collections/community.general/pull/10663)." diff --git a/changelogs/fragments/10665-pacemaker-resource-clone.yml b/changelogs/fragments/10665-pacemaker-resource-clone.yml new file mode 100644 index 0000000000..c24420c598 --- /dev/null +++ b/changelogs/fragments/10665-pacemaker-resource-clone.yml @@ -0,0 +1,2 @@ +minor_changes: + - pacemaker_resource - add ``state=cloned`` for cloning pacemaker resources or groups (https://github.com/ansible-collections/community.general/issues/10322, https://github.com/ansible-collections/community.general/pull/10665). diff --git a/changelogs/fragments/10679-gitlab-access-token-add-planner-role.yml b/changelogs/fragments/10679-gitlab-access-token-add-planner-role.yml new file mode 100644 index 0000000000..65aeae2a86 --- /dev/null +++ b/changelogs/fragments/10679-gitlab-access-token-add-planner-role.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_group_access_token - add ``planner`` access level (https://github.com/ansible-collections/community.general/pull/10679). + - gitlab_project_access_token - add ``planner`` access level (https://github.com/ansible-collections/community.general/pull/10679). diff --git a/changelogs/fragments/10684-django-improvements.yml b/changelogs/fragments/10684-django-improvements.yml new file mode 100644 index 0000000000..a8ca1cfbe9 --- /dev/null +++ b/changelogs/fragments/10684-django-improvements.yml @@ -0,0 +1,4 @@ +minor_changes: + - django module utils - simplify/consolidate the common settings for the command line (https://github.com/ansible-collections/community.general/pull/10684). + - django_check - simplify/consolidate the common settings for the command line (https://github.com/ansible-collections/community.general/pull/10684). + - django_createcachetable - simplify/consolidate the common settings for the command line (https://github.com/ansible-collections/community.general/pull/10684). diff --git a/changelogs/fragments/10687-deprecations.yml b/changelogs/fragments/10687-deprecations.yml new file mode 100644 index 0000000000..62974ab6a0 --- /dev/null +++ b/changelogs/fragments/10687-deprecations.yml @@ -0,0 +1,2 @@ +bugfixes: + - "Avoid deprecated functionality in ansible-core 2.20 (https://github.com/ansible-collections/community.general/pull/10687)." diff --git a/changelogs/fragments/10688-pids.yml b/changelogs/fragments/10688-pids.yml new file mode 100644 index 0000000000..1ed97a6fed --- /dev/null +++ b/changelogs/fragments/10688-pids.yml @@ -0,0 +1,2 @@ +bugfixes: + - "pids - prevent error when an empty string is provided for ``name`` (https://github.com/ansible-collections/community.general/issues/10672, https://github.com/ansible-collections/community.general/pull/10688)." diff --git a/changelogs/fragments/10689-gem-prevent-soundness-issue.yml b/changelogs/fragments/10689-gem-prevent-soundness-issue.yml new file mode 100644 index 0000000000..a55dba1ea1 --- /dev/null +++ b/changelogs/fragments/10689-gem-prevent-soundness-issue.yml @@ -0,0 +1,2 @@ +bugfixes: + - "gem - fix soundness issue when uninstalling default gems on Ubuntu (https://github.com/ansible-collections/community.general/issues/10451, https://github.com/ansible-collections/community.general/pull/10689)." \ No newline at end of file diff --git a/changelogs/fragments/10700-django-check-databases.yml b/changelogs/fragments/10700-django-check-databases.yml new file mode 100644 index 0000000000..cfb8897f6a --- /dev/null +++ b/changelogs/fragments/10700-django-check-databases.yml @@ -0,0 +1,2 @@ +minor_changes: + - django_check - rename parameter ``database`` to ``databases``, add alias for compatibility (https://github.com/ansible-collections/community.general/pull/10700). diff --git a/changelogs/fragments/10705-openbsd-pkg-remove-unused.yml b/changelogs/fragments/10705-openbsd-pkg-remove-unused.yml new file mode 100644 index 0000000000..2ceb1352b4 --- /dev/null +++ b/changelogs/fragments/10705-openbsd-pkg-remove-unused.yml @@ -0,0 +1,2 @@ +minor_changes: + - openbsd_pkg - add ``autoremove`` parameter to remove unused dependencies (https://github.com/ansible-collections/community.general/pull/10705). diff --git a/changelogs/fragments/10707-pacemaker-maintenance-mode-regex.yml b/changelogs/fragments/10707-pacemaker-maintenance-mode-regex.yml new file mode 100644 index 0000000000..ba5e08edd3 --- /dev/null +++ b/changelogs/fragments/10707-pacemaker-maintenance-mode-regex.yml @@ -0,0 +1,2 @@ +bugfixes: + - "pacemaker - use regex for matching ``maintenance-mode`` output to determine cluster maintenance status (https://github.com/ansible-collections/community.general/issues/10426, https://github.com/ansible-collections/community.general/pull/10707)." diff --git a/changelogs/fragments/10711-pytohn-idioms-1.yml b/changelogs/fragments/10711-pytohn-idioms-1.yml new file mode 100644 index 0000000000..18ae9db37b --- /dev/null +++ b/changelogs/fragments/10711-pytohn-idioms-1.yml @@ -0,0 +1,6 @@ +minor_changes: + - gitlab_label - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). + - gitlab_milestone - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). + - ipa_host - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). + - lvg_rename - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). + - terraform - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). diff --git a/changelogs/fragments/10712-python-idioms-2.yml b/changelogs/fragments/10712-python-idioms-2.yml new file mode 100644 index 0000000000..8d49f1f86f --- /dev/null +++ b/changelogs/fragments/10712-python-idioms-2.yml @@ -0,0 +1,7 @@ +minor_changes: + - iocage inventory plugin - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - manageiq - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - android_sdk - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - elasticsearch_plugin - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - manageiq_alert_profiles - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - one_vm - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). diff --git a/changelogs/fragments/10727-python-idioms-3.yml b/changelogs/fragments/10727-python-idioms-3.yml new file mode 100644 index 0000000000..9b92b8bbef --- /dev/null +++ b/changelogs/fragments/10727-python-idioms-3.yml @@ -0,0 +1,10 @@ +minor_changes: + - filesize - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - iptables_state - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - manageiq_group - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - manageiq_tenant - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - mssql_db - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - openbsd_pkg - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - ufw - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - xenserver_facts - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - zfs_facts - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). diff --git a/changelogs/fragments/10741-pacemaker-cluster-cleanup-deprecate.yml b/changelogs/fragments/10741-pacemaker-cluster-cleanup-deprecate.yml new file mode 100644 index 0000000000..4bb018a9c7 --- /dev/null +++ b/changelogs/fragments/10741-pacemaker-cluster-cleanup-deprecate.yml @@ -0,0 +1,2 @@ +deprecated_features: + - pacemaker_cluster - the state ``cleanup`` will be removed from community.general 14.0.0 (https://github.com/ansible-collections/community.general/pull/10741). diff --git a/changelogs/fragments/10743-monit-handle-unknown-status.yml b/changelogs/fragments/10743-monit-handle-unknown-status.yml new file mode 100644 index 0000000000..1c9fbb1101 --- /dev/null +++ b/changelogs/fragments/10743-monit-handle-unknown-status.yml @@ -0,0 +1,2 @@ +bugfixes: + - monit - fix crash caused by an unknown status value returned from the monit service (https://github.com/ansible-collections/community.general/issues/10742, https://github.com/ansible-collections/community.general/pull/10743). diff --git a/changelogs/fragments/10751-kdeconfig-support-kwriteconfig6.yml b/changelogs/fragments/10751-kdeconfig-support-kwriteconfig6.yml new file mode 100644 index 0000000000..716ffa35f1 --- /dev/null +++ b/changelogs/fragments/10751-kdeconfig-support-kwriteconfig6.yml @@ -0,0 +1,3 @@ +bugfixes: + - kdeconfig - ``kwriteconfig`` executable could not be discovered automatically on systems with only ``kwriteconfig6`` installed. + ``kwriteconfig6`` can now be discovered by Ansible (https://github.com/ansible-collections/community.general/issues/10746, https://github.com/ansible-collections/community.general/pull/10751). \ No newline at end of file diff --git a/changelogs/fragments/10752-selective-hardcoded-loop-var.yml b/changelogs/fragments/10752-selective-hardcoded-loop-var.yml new file mode 100644 index 0000000000..cfc6bdd9e9 --- /dev/null +++ b/changelogs/fragments/10752-selective-hardcoded-loop-var.yml @@ -0,0 +1,2 @@ +bugfixes: + - selective callback plugin - specify ``ansible_loop_var`` instead of the explicit value ``item`` when printing task result (https://github.com/ansible-collections/community.general/pull/10752). diff --git a/changelogs/fragments/10769-xenserver-rf.yml b/changelogs/fragments/10769-xenserver-rf.yml new file mode 100644 index 0000000000..2c31edf886 --- /dev/null +++ b/changelogs/fragments/10769-xenserver-rf.yml @@ -0,0 +1,2 @@ +minor_changes: + - xenserver module utils - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10769). diff --git a/changelogs/fragments/10785-gitlab-token-add-missing-scopes.yml b/changelogs/fragments/10785-gitlab-token-add-missing-scopes.yml new file mode 100644 index 0000000000..a38d98a444 --- /dev/null +++ b/changelogs/fragments/10785-gitlab-token-add-missing-scopes.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_group_access_token - add missing scopes (https://github.com/ansible-collections/community.general/pull/10785). + - gitlab_project_access_token - add missing scopes (https://github.com/ansible-collections/community.general/pull/10785). diff --git a/changelogs/fragments/10787-gitlab-variable-support-masked-and-hidden-variables.yml b/changelogs/fragments/10787-gitlab-variable-support-masked-and-hidden-variables.yml new file mode 100644 index 0000000000..bbf5b6d9a5 --- /dev/null +++ b/changelogs/fragments/10787-gitlab-variable-support-masked-and-hidden-variables.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_group_variable - support masked-and-hidden variables (https://github.com/ansible-collections/community.general/pull/10787). + - gitlab_project_variable - support masked-and-hidden variables (https://github.com/ansible-collections/community.general/pull/10787). diff --git a/changelogs/fragments/10795-gitlab_protected_branch-add-allow_force_push-code_owner_approval_required.yml b/changelogs/fragments/10795-gitlab_protected_branch-add-allow_force_push-code_owner_approval_required.yml new file mode 100644 index 0000000000..ed4d4d78e8 --- /dev/null +++ b/changelogs/fragments/10795-gitlab_protected_branch-add-allow_force_push-code_owner_approval_required.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_protected_branch - add ``allow_force_push``, ``code_owner_approval_required`` (https://github.com/ansible-collections/community.general/pull/10795, https://github.com/ansible-collections/community.general/issues/6432, https://github.com/ansible-collections/community.general/issues/10289, https://github.com/ansible-collections/community.general/issues/10765). + - gitlab_protected_branch - update protected branches if possible instead of recreating them (https://github.com/ansible-collections/community.general/pull/10795). diff --git a/changelogs/fragments/10796-rocketchat-force-content-type.yml b/changelogs/fragments/10796-rocketchat-force-content-type.yml new file mode 100644 index 0000000000..96ca116e62 --- /dev/null +++ b/changelogs/fragments/10796-rocketchat-force-content-type.yml @@ -0,0 +1,2 @@ +bugfixes: + - rocketchat - fix message delivery in Rocket Chat >= 7.5.3 by forcing ``Content-Type`` header to ``application/json`` instead of the default ``application/x-www-form-urlencoded`` (https://github.com/ansible-collections/community.general/issues/10796, https://github.com/ansible-collections/community.general/pull/10796). diff --git a/changelogs/fragments/10805-homebrew-support-old-names.yml b/changelogs/fragments/10805-homebrew-support-old-names.yml new file mode 100644 index 0000000000..43d5a1c8bf --- /dev/null +++ b/changelogs/fragments/10805-homebrew-support-old-names.yml @@ -0,0 +1,2 @@ +bugfixes: + - homebrew - do not fail when cask or formula name has changed in homebrew repo (https://github.com/ansible-collections/community.general/issues/10804, https://github.com/ansible-collections/community.general/pull/10805). \ No newline at end of file diff --git a/changelogs/fragments/10810-github_app_access_token-jwt.yml b/changelogs/fragments/10810-github_app_access_token-jwt.yml new file mode 100644 index 0000000000..804ab9fbaa --- /dev/null +++ b/changelogs/fragments/10810-github_app_access_token-jwt.yml @@ -0,0 +1,2 @@ +bugfixes: + - "github_app_access_token lookup plugin - fix compatibility imports for using jwt (https://github.com/ansible-collections/community.general/issues/10807, https://github.com/ansible-collections/community.general/pull/10810)." diff --git a/changelogs/fragments/10812-gitlab-variable-add-description.yml b/changelogs/fragments/10812-gitlab-variable-add-description.yml new file mode 100644 index 0000000000..1de0405aff --- /dev/null +++ b/changelogs/fragments/10812-gitlab-variable-add-description.yml @@ -0,0 +1,4 @@ +minor_changes: + - gitlab_group_variable - add ``description`` option (https://github.com/ansible-collections/community.general/pull/10812). + - gitlab_instance_variable - add ``description`` option (https://github.com/ansible-collections/community.general/pull/10812). + - gitlab_project_variable - add ``description`` option (https://github.com/ansible-collections/community.general/pull/10812, https://github.com/ansible-collections/community.general/issues/8584, https://github.com/ansible-collections/community.general/issues/10809). diff --git a/changelogs/fragments/10823-parted-fail-json-command.yml b/changelogs/fragments/10823-parted-fail-json-command.yml new file mode 100644 index 0000000000..8a52be589e --- /dev/null +++ b/changelogs/fragments/10823-parted-fail-json-command.yml @@ -0,0 +1,2 @@ +bugfixes: + - parted - variable is a list, not text (https://github.com/ansible-collections/community.general/pull/10823, https://github.com/ansible-collections/community.general/issues/10817). diff --git a/changelogs/fragments/10829-fix-keycloak-role-changed-status.yml b/changelogs/fragments/10829-fix-keycloak-role-changed-status.yml new file mode 100644 index 0000000000..8fd05ec182 --- /dev/null +++ b/changelogs/fragments/10829-fix-keycloak-role-changed-status.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_role - fixes an issue where the module incorrectly returns ``changed=true`` when using the alias ``clientId`` in composite roles (https://github.com/ansible-collections/community.general/pull/10829). \ No newline at end of file diff --git a/changelogs/fragments/10840-fix-keycloak-subgroup-search-realm.yml b/changelogs/fragments/10840-fix-keycloak-subgroup-search-realm.yml new file mode 100644 index 0000000000..3b7818ee3e --- /dev/null +++ b/changelogs/fragments/10840-fix-keycloak-subgroup-search-realm.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_group - fixes an issue where module ignores realm when searching subgroups by name (https://github.com/ansible-collections/community.general/pull/10840). \ No newline at end of file diff --git a/changelogs/fragments/10842-keycloak-client-scope-support.yml b/changelogs/fragments/10842-keycloak-client-scope-support.yml new file mode 100644 index 0000000000..80266fa43b --- /dev/null +++ b/changelogs/fragments/10842-keycloak-client-scope-support.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_client - add idempotent support for ``optional_client_scopes`` and ``optional_client_scopes``, and ensure consistent change detection between check mode and live run (https://github.com/ansible-collections/community.general/issues/5495, https://github.com/ansible-collections/community.general/pull/10842). \ No newline at end of file diff --git a/changelogs/fragments/1085-consul-acl-hcl-whitelist-update.yml b/changelogs/fragments/1085-consul-acl-hcl-whitelist-update.yml deleted file mode 100644 index 78db43da7d..0000000000 --- a/changelogs/fragments/1085-consul-acl-hcl-whitelist-update.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - consul_acl - update the hcl allowlist to include all supported options (https://github.com/ansible-collections/community.general/pull/2495). diff --git a/changelogs/fragments/10852-yaml.yml b/changelogs/fragments/10852-yaml.yml new file mode 100644 index 0000000000..1319b94ab5 --- /dev/null +++ b/changelogs/fragments/10852-yaml.yml @@ -0,0 +1,2 @@ +bugfixes: + - "yaml cache plugin - make compatible with ansible-core 2.19 (https://github.com/ansible-collections/community.general/issues/10849, https://github.com/ansible-collections/community.general/issues/10852)." diff --git a/changelogs/fragments/10857-github_deploy_key-err.yml b/changelogs/fragments/10857-github_deploy_key-err.yml new file mode 100644 index 0000000000..58bac31c5e --- /dev/null +++ b/changelogs/fragments/10857-github_deploy_key-err.yml @@ -0,0 +1,2 @@ +bugfixes: + - "github_deploy_key - fix bug during error handling if no body was present in the result (https://github.com/ansible-collections/community.general/issues/10853, https://github.com/ansible-collections/community.general/pull/10857)." diff --git a/changelogs/fragments/10873-six.yml b/changelogs/fragments/10873-six.yml new file mode 100644 index 0000000000..d9ea201520 --- /dev/null +++ b/changelogs/fragments/10873-six.yml @@ -0,0 +1,2 @@ +bugfixes: + - "Avoid usage of deprecated ``ansible.module_utils.six`` in all code that does not have to support Python 2 (https://github.com/ansible-collections/community.general/pull/10873)." diff --git a/changelogs/fragments/10874-pipx-180.yml b/changelogs/fragments/10874-pipx-180.yml new file mode 100644 index 0000000000..dd776827e8 --- /dev/null +++ b/changelogs/fragments/10874-pipx-180.yml @@ -0,0 +1,2 @@ +minor_changes: + - pipx module_utils - use ``PIPX_USE_EMOJI`` to disable emojis in the output of ``pipx`` 1.8.0 (https://github.com/ansible-collections/community.general/pull/10874). diff --git a/changelogs/fragments/10880-github_app_access_token-lookup.yml b/changelogs/fragments/10880-github_app_access_token-lookup.yml new file mode 100644 index 0000000000..b3c9503d59 --- /dev/null +++ b/changelogs/fragments/10880-github_app_access_token-lookup.yml @@ -0,0 +1,2 @@ +minor_changes: + - "github_app_access_token lookup plugin - add support for GitHub Enterprise Server (https://github.com/ansible-collections/community.general/issues/10879, https://github.com/ansible-collections/community.general/pull/10880)." diff --git a/changelogs/fragments/10888-six.yml b/changelogs/fragments/10888-six.yml new file mode 100644 index 0000000000..b1f09accb3 --- /dev/null +++ b/changelogs/fragments/10888-six.yml @@ -0,0 +1,2 @@ +bugfixes: + - "Remove all usage of ``ansible.module_utils.six`` (https://github.com/ansible-collections/community.general/pull/10888)." diff --git a/changelogs/fragments/10891-dict-refactor.yml b/changelogs/fragments/10891-dict-refactor.yml new file mode 100644 index 0000000000..63d5e585ff --- /dev/null +++ b/changelogs/fragments/10891-dict-refactor.yml @@ -0,0 +1,6 @@ +minor_changes: + - dependent lookup plugin - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). + - scaleway module_utils - improve code readability, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). + - pacemaker_cluster.py - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). + - pacemaker_resource.py - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). + - pacemaker_stonith.py - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). diff --git a/changelogs/fragments/10892-remove-py2.yml b/changelogs/fragments/10892-remove-py2.yml new file mode 100644 index 0000000000..69904d4777 --- /dev/null +++ b/changelogs/fragments/10892-remove-py2.yml @@ -0,0 +1,7 @@ +minor_changes: + - known_hosts module_utils - drop Python 2 support when parsing output of ``urlparse`` (https://github.com/ansible-collections/community.general/pull/10892). + - aix_inittab - drop Python 2 support for function ``zip`` (https://github.com/ansible-collections/community.general/pull/10892). + - copr - drop support for Python 2 interpreter (https://github.com/ansible-collections/community.general/pull/10892). + - dconf - drop support for Python 2 interpreter (https://github.com/ansible-collections/community.general/pull/10892). + - irc - drop Python 2 support for SSL context creation (https://github.com/ansible-collections/community.general/pull/10892). + - mail - drop Python 2 support for Message-ID domain setting (https://github.com/ansible-collections/community.general/pull/10892). diff --git a/changelogs/fragments/10899-use-f-strings.yml b/changelogs/fragments/10899-use-f-strings.yml new file mode 100644 index 0000000000..9752e5ebf2 --- /dev/null +++ b/changelogs/fragments/10899-use-f-strings.yml @@ -0,0 +1,14 @@ +minor_changes: + - wsl connection plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - accumulate filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - counter filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - crc32 filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - groupby_as_dict filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - hashids filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - json_query filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - lists filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - random_mac filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - time filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - unicode_normalize filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - passwordstore lookup plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - ansible_type plugin_utils plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). diff --git a/changelogs/fragments/10903-2to3.yml b/changelogs/fragments/10903-2to3.yml new file mode 100644 index 0000000000..af0b744456 --- /dev/null +++ b/changelogs/fragments/10903-2to3.yml @@ -0,0 +1,8 @@ +minor_changes: + - pickle cache plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - counter_enabled callback plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - wsl connection plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - cobbler inventory plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - linode inventory plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - utm_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - vexata module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). diff --git a/changelogs/fragments/10904-2to3-mods.yml b/changelogs/fragments/10904-2to3-mods.yml new file mode 100644 index 0000000000..12ca58b250 --- /dev/null +++ b/changelogs/fragments/10904-2to3-mods.yml @@ -0,0 +1,30 @@ +minor_changes: + - bitbucket_access_key - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - bitbucket_pipeline_known_host - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - bitbucket_pipeline_variable - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - bzr - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - capabilities - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - gitlab_milestone - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - haproxy - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - homebrew - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - homebrew_cask - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - hwc_network_vpc - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - hwc_smn_topic - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - idrac_redfish_config - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - idrac_redfish_info - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - influxdb_retention_policy - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - ini_file - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - interfaces_file - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - launchd - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - logentries - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - packet_sshkey - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - pamd - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - taiga_issue - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vdo - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vertica_role - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vertica_schema - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vertica_user - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vexata_eg - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vexata_volume - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - xcc_redfish_command - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - zypper - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). diff --git a/changelogs/fragments/10905-java-keystore-simplify.yml b/changelogs/fragments/10905-java-keystore-simplify.yml new file mode 100644 index 0000000000..7b2a0de53a --- /dev/null +++ b/changelogs/fragments/10905-java-keystore-simplify.yml @@ -0,0 +1,2 @@ +minor_changes: + - java_keystore - remove redundant function (https://github.com/ansible-collections/community.general/pull/10905). diff --git a/changelogs/fragments/10906-linode-modutils.yml b/changelogs/fragments/10906-linode-modutils.yml new file mode 100644 index 0000000000..ced88a7474 --- /dev/null +++ b/changelogs/fragments/10906-linode-modutils.yml @@ -0,0 +1,2 @@ +minor_changes: + - linode module utils - remove redundant code for ancient versions of Ansible (https://github.com/ansible-collections/community.general/pull/10906). diff --git a/changelogs/fragments/10907-2to3-mu.yml b/changelogs/fragments/10907-2to3-mu.yml new file mode 100644 index 0000000000..af19593cf0 --- /dev/null +++ b/changelogs/fragments/10907-2to3-mu.yml @@ -0,0 +1,9 @@ +minor_changes: + - csv module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - gitlab module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - homebrew module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - ilo_redfish_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - redfish_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - saslprep module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - utm_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - vexata module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). diff --git a/changelogs/fragments/10908-archive-lzma.yml b/changelogs/fragments/10908-archive-lzma.yml new file mode 100644 index 0000000000..bcce681bed --- /dev/null +++ b/changelogs/fragments/10908-archive-lzma.yml @@ -0,0 +1,2 @@ +minor_changes: + - archive - remove conditional code for older Python versions (https://github.com/ansible-collections/community.general/pull/10908). diff --git a/changelogs/fragments/10909-launchd-plistlib.yml b/changelogs/fragments/10909-launchd-plistlib.yml new file mode 100644 index 0000000000..fc798c9ddb --- /dev/null +++ b/changelogs/fragments/10909-launchd-plistlib.yml @@ -0,0 +1,2 @@ +minor_changes: + - launchd - remove conditional code supporting Python versions prior to 3.4 (https://github.com/ansible-collections/community.general/pull/10909). diff --git a/changelogs/fragments/10918-gitlab-runner-fix-check-mode.yml b/changelogs/fragments/10918-gitlab-runner-fix-check-mode.yml new file mode 100644 index 0000000000..214487938b --- /dev/null +++ b/changelogs/fragments/10918-gitlab-runner-fix-check-mode.yml @@ -0,0 +1,2 @@ +bugfixes: + - gitlab_runner - fix exception in check mode when a new runner is created (https://github.com/ansible-collections/community.general/issues/8854). diff --git a/changelogs/fragments/10933-keycloak-add-client-auth-for-clientsecret-modules.yml b/changelogs/fragments/10933-keycloak-add-client-auth-for-clientsecret-modules.yml new file mode 100644 index 0000000000..df70186ff5 --- /dev/null +++ b/changelogs/fragments/10933-keycloak-add-client-auth-for-clientsecret-modules.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_clientsecret, keycloak_clientsecret_info - make ``client_auth`` work (https://github.com/ansible-collections/community.general/issues/10932, https://github.com/ansible-collections/community.general/pull/10933). \ No newline at end of file diff --git a/changelogs/fragments/10934-cloudflare-dns-srv-bug.yml b/changelogs/fragments/10934-cloudflare-dns-srv-bug.yml new file mode 100644 index 0000000000..eb2b06d2f1 --- /dev/null +++ b/changelogs/fragments/10934-cloudflare-dns-srv-bug.yml @@ -0,0 +1,2 @@ +bugfixes: + - cloudflare_dns - roll back changes to SRV record validation (https://github.com/ansible-collections/community.general/issues/10934, https://github.com/ansible-collections/community.general/pull/10937). diff --git a/changelogs/fragments/10940-use-f-strings-xenserver.yml b/changelogs/fragments/10940-use-f-strings-xenserver.yml new file mode 100644 index 0000000000..114ac46486 --- /dev/null +++ b/changelogs/fragments/10940-use-f-strings-xenserver.yml @@ -0,0 +1,2 @@ +minor_changes: + - xenserver module utils plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10940). diff --git a/changelogs/fragments/1334-jenkins-plugin-fallback-urls.yaml b/changelogs/fragments/1334-jenkins-plugin-fallback-urls.yaml deleted file mode 100644 index be0a86fa5b..0000000000 --- a/changelogs/fragments/1334-jenkins-plugin-fallback-urls.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - jenkins_plugin - add fallback url(s) for failure of plugin installation/download (https://github.com/ansible-collections/community.general/pull/1334). diff --git a/changelogs/fragments/1942_timezone.yml b/changelogs/fragments/1942_timezone.yml deleted file mode 100644 index 349c263298..0000000000 --- a/changelogs/fragments/1942_timezone.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- timezone - print error message to debug instead of warning when timedatectl fails (https://github.com/ansible-collections/community.general/issues/1942). diff --git a/changelogs/fragments/2045-bitbucket_support_basic_auth.yaml b/changelogs/fragments/2045-bitbucket_support_basic_auth.yaml deleted file mode 100644 index f6e7fa9e48..0000000000 --- a/changelogs/fragments/2045-bitbucket_support_basic_auth.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -minor_changes: - - bitbucket_* modules - add ``user`` and ``password`` options for Basic authentication (https://github.com/ansible-collections/community.general/pull/2045). -deprecated_features: - - bitbucket_* modules - ``username`` options have been deprecated in favor of ``workspace`` and will be removed in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/2045). -major_changes: - - "bitbucket_* modules - ``client_id`` is no longer marked as ``no_log=true``. If you relied on its value not showing up in logs and output, please mark the whole tasks with ``no_log: true`` (https://github.com/ansible-collections/community.general/pull/2045)." diff --git a/changelogs/fragments/2126-consul_kv-pass-token.yml b/changelogs/fragments/2126-consul_kv-pass-token.yml deleted file mode 100644 index a60fd2efcd..0000000000 --- a/changelogs/fragments/2126-consul_kv-pass-token.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - consul_kv lookup plugin - allow to set ``recurse``, ``index``, ``datacenter`` and ``token`` as keyword arguments - (https://github.com/ansible-collections/community.general/issues/2124). diff --git a/changelogs/fragments/2284-influxdb_retention_policy-fix_duration_parsing.yml b/changelogs/fragments/2284-influxdb_retention_policy-fix_duration_parsing.yml deleted file mode 100644 index 04c82480c1..0000000000 --- a/changelogs/fragments/2284-influxdb_retention_policy-fix_duration_parsing.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - influxdb_retention_policy - fix bug where ``INF`` duration values failed parsing - (https://github.com/ansible-collections/community.general/pull/2385). diff --git a/changelogs/fragments/2323-groupby_as_dict-filter.yml b/changelogs/fragments/2323-groupby_as_dict-filter.yml deleted file mode 100644 index e72f323a60..0000000000 --- a/changelogs/fragments/2323-groupby_as_dict-filter.yml +++ /dev/null @@ -1,3 +0,0 @@ -add plugin.filter: - - name: groupby_as_dict - description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute diff --git a/changelogs/fragments/2334-redfish_config-skip-incorrect-attributes.yml b/changelogs/fragments/2334-redfish_config-skip-incorrect-attributes.yml deleted file mode 100644 index 2e609c43fc..0000000000 --- a/changelogs/fragments/2334-redfish_config-skip-incorrect-attributes.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - redfish_utils module utils - modified set_bios_attributes function to skip invalid attribute instead of returning. Added skipped attributes to output (https://github.com/ansible-collections/community.general/issues/1995). - - idrac_redfish_config - modified set_manager_attributes function to skip invalid attribute instead of returning. Added skipped attributes to output. Modified module exit to add warning variable (https://github.com/ansible-collections/community.general/issues/1995). - - redfish_config - modified module exit to add warning variable (https://github.com/ansible-collections/community.general/issues/1995). diff --git a/changelogs/fragments/2337-mark-inventory-scripts-executable.yml b/changelogs/fragments/2337-mark-inventory-scripts-executable.yml deleted file mode 100644 index 69aa3fff62..0000000000 --- a/changelogs/fragments/2337-mark-inventory-scripts-executable.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - inventory and vault scripts - change file permissions to make vendored inventory and vault scripts exectuable (https://github.com/ansible-collections/community.general/pull/2337). diff --git a/changelogs/fragments/2348-composer-no-interaction-option-discovery-to-avoid-hang.yaml b/changelogs/fragments/2348-composer-no-interaction-option-discovery-to-avoid-hang.yaml deleted file mode 100644 index 0728aeb28b..0000000000 --- a/changelogs/fragments/2348-composer-no-interaction-option-discovery-to-avoid-hang.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - composer - use ``no-interaction`` option when discovering available options to avoid an issue where composer hangs (https://github.com/ansible-collections/community.general/pull/2348). diff --git a/changelogs/fragments/2355-spotinst_aws_elastigroup-list-elements.yml b/changelogs/fragments/2355-spotinst_aws_elastigroup-list-elements.yml deleted file mode 100644 index 876b212690..0000000000 --- a/changelogs/fragments/2355-spotinst_aws_elastigroup-list-elements.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - spotinst_aws_elastigroup - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2355). diff --git a/changelogs/fragments/2364-influxdb_user-first_user.yml b/changelogs/fragments/2364-influxdb_user-first_user.yml deleted file mode 100644 index 905688643b..0000000000 --- a/changelogs/fragments/2364-influxdb_user-first_user.yml +++ /dev/null @@ -1,5 +0,0 @@ -bugfixes: - - influxdb_user - allow creation of admin users when InfluxDB authentication - is enabled but no other user exists on the database. In this scenario, - InfluxDB 1.x allows only ``CREATE USER`` queries and rejects any other query - (https://github.com/ansible-collections/community.general/issues/2364). diff --git a/changelogs/fragments/2369-lvol_size_bug_fixes.yml b/changelogs/fragments/2369-lvol_size_bug_fixes.yml deleted file mode 100644 index fcd2f17b11..0000000000 --- a/changelogs/fragments/2369-lvol_size_bug_fixes.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - lvol - fixed size unit capitalization to match units used between different tools for comparison (https://github.com/ansible-collections/community.general/issues/2360). - - lvol - fixed rounding errors (https://github.com/ansible-collections/community.general/issues/2370). \ No newline at end of file diff --git a/changelogs/fragments/2373-svr4pkg-fix-typeerror.yml b/changelogs/fragments/2373-svr4pkg-fix-typeerror.yml deleted file mode 100644 index d0b3580889..0000000000 --- a/changelogs/fragments/2373-svr4pkg-fix-typeerror.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - svr4pkg - convert string to a bytes-like object to avoid ``TypeError`` with Python 3 (https://github.com/ansible-collections/community.general/issues/2373). diff --git a/changelogs/fragments/2383-influxdb_retention_policy-add-state-option.yml b/changelogs/fragments/2383-influxdb_retention_policy-add-state-option.yml deleted file mode 100644 index b8e358848e..0000000000 --- a/changelogs/fragments/2383-influxdb_retention_policy-add-state-option.yml +++ /dev/null @@ -1,6 +0,0 @@ -minor_changes: - - influxdb_retention_policy - add ``state`` parameter with allowed values - ``present`` and ``absent`` to support deletion of existing retention policies - (https://github.com/ansible-collections/community.general/issues/2383). - - influxdb_retention_policy - simplify duration logic parsing - (https://github.com/ansible-collections/community.general/pull/2385). diff --git a/changelogs/fragments/2393-module_helper-breakdown.yml b/changelogs/fragments/2393-module_helper-breakdown.yml deleted file mode 100644 index 472a1c3569..0000000000 --- a/changelogs/fragments/2393-module_helper-breakdown.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - module_helper module utils - break down of the long file into smaller pieces (https://github.com/ansible-collections/community.general/pull/2393). diff --git a/changelogs/fragments/2407-puppet-change_stdout_to_console.yaml b/changelogs/fragments/2407-puppet-change_stdout_to_console.yaml deleted file mode 100644 index 697b8e78d7..0000000000 --- a/changelogs/fragments/2407-puppet-change_stdout_to_console.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - puppet - replace ``console` with ``stdout`` in ``logdest`` option when ``all`` has been chosen (https://github.com/ansible-collections/community.general/issues/1190). diff --git a/changelogs/fragments/2409-nmcli_add_slave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml b/changelogs/fragments/2409-nmcli_add_slave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml deleted file mode 100644 index 8d0b4c1617..0000000000 --- a/changelogs/fragments/2409-nmcli_add_slave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nmcli - if type is ``bridge-slave`` add ``slave-type bridge`` to ``nmcli`` command (https://github.com/ansible-collections/community.general/issues/2408). diff --git a/changelogs/fragments/2410-linode-improvements.yml b/changelogs/fragments/2410-linode-improvements.yml deleted file mode 100644 index cdf8551b08..0000000000 --- a/changelogs/fragments/2410-linode-improvements.yml +++ /dev/null @@ -1,5 +0,0 @@ -deprecated_features: - - linode - parameter ``backupsenabled`` is deprecated and will be removed in community.general 5.0.0 (https://github.com/ansible-collections/community.general/pull/2410). -minor_changes: - - linode - added proper traceback when failing due to exceptions (https://github.com/ansible-collections/community.general/pull/2410). - - linode - parameter ``additional_disks`` is now validated as a list of dictionaries (https://github.com/ansible-collections/community.general/pull/2410). diff --git a/changelogs/fragments/2411-snap-revamp-enabled-disabled-states.yml b/changelogs/fragments/2411-snap-revamp-enabled-disabled-states.yml deleted file mode 100644 index a52b377817..0000000000 --- a/changelogs/fragments/2411-snap-revamp-enabled-disabled-states.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - snap - added ``enabled`` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/1990). diff --git a/changelogs/fragments/2416-nmcli_compare_mac_addresses_case_insensitively.yml b/changelogs/fragments/2416-nmcli_compare_mac_addresses_case_insensitively.yml deleted file mode 100644 index 6694638964..0000000000 --- a/changelogs/fragments/2416-nmcli_compare_mac_addresses_case_insensitively.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nmcli - compare MAC addresses case insensitively to fix idempotency issue (https://github.com/ansible-collections/community.general/issues/2409). diff --git a/changelogs/fragments/2417-nmcli_remove_dead_code.yml b/changelogs/fragments/2417-nmcli_remove_dead_code.yml deleted file mode 100644 index 9d94c393fa..0000000000 --- a/changelogs/fragments/2417-nmcli_remove_dead_code.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - remove dead code, ``options`` never contains keys from ``param_alias`` (https://github.com/ansible-collections/community.general/pull/2417). diff --git a/changelogs/fragments/2430-linodev4-error-message.yml b/changelogs/fragments/2430-linodev4-error-message.yml deleted file mode 100644 index 3dbfda1b9c..0000000000 --- a/changelogs/fragments/2430-linodev4-error-message.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - linode_v4 - changed the error message to point to the correct bugtracker URL (https://github.com/ansible-collections/community.general/pull/2430). diff --git a/changelogs/fragments/2435-one_vm-fix_missing_keys.yml b/changelogs/fragments/2435-one_vm-fix_missing_keys.yml deleted file mode 100644 index 395c024b26..0000000000 --- a/changelogs/fragments/2435-one_vm-fix_missing_keys.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - one_vm - Allow missing NIC keys (https://github.com/ansible-collections/community.general/pull/2435). diff --git a/changelogs/fragments/2448-stackpath_compute-fix.yml b/changelogs/fragments/2448-stackpath_compute-fix.yml deleted file mode 100644 index 196db780b1..0000000000 --- a/changelogs/fragments/2448-stackpath_compute-fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "stackpath_compute inventory script - fix broken validation checks for client ID and client secret (https://github.com/ansible-collections/community.general/pull/2448)." diff --git a/changelogs/fragments/2450-gitlab_user-add_expires_at_option.yaml b/changelogs/fragments/2450-gitlab_user-add_expires_at_option.yaml deleted file mode 100644 index 290e13847a..0000000000 --- a/changelogs/fragments/2450-gitlab_user-add_expires_at_option.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - gitlab_user - add ``expires_at`` option (https://github.com/ansible-collections/community.general/issues/2325). diff --git a/changelogs/fragments/2454-detect_zfs_changed.yml b/changelogs/fragments/2454-detect_zfs_changed.yml deleted file mode 100644 index 0604278f6b..0000000000 --- a/changelogs/fragments/2454-detect_zfs_changed.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - zfs - certain ZFS properties, especially sizes, would lead to a task being falsely marked as "changed" even when no actual change was made (https://github.com/ansible-collections/community.general/issues/975, https://github.com/ansible-collections/community.general/pull/2454). diff --git a/changelogs/fragments/2461-ovirt4-fix-configparser.yml b/changelogs/fragments/2461-ovirt4-fix-configparser.yml deleted file mode 100644 index 6e3845b21a..0000000000 --- a/changelogs/fragments/2461-ovirt4-fix-configparser.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - ovir4 inventory script - improve configparser creation to avoid crashes for options without values (https://github.com/ansible-collections/community.general/issues/674). diff --git a/changelogs/fragments/2472_filesystem_module_revamp.yml b/changelogs/fragments/2472_filesystem_module_revamp.yml deleted file mode 100644 index 691c861078..0000000000 --- a/changelogs/fragments/2472_filesystem_module_revamp.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -minor_changes: - - "filesystem - cleanup and revamp module, tests and doc. Pass all commands to - ``module.run_command()`` as lists. Move the device-vs-mountpoint logic to - ``grow()`` method. Give to all ``get_fs_size()`` the same logic and error - handling. (https://github.com/ansible-collections/community.general/pull/2472)." -bugfixes: - - "filesystem - repair ``reiserfs`` fstype support after adding it to integration - tests (https://github.com/ansible-collections/community.general/pull/2472)." diff --git a/changelogs/fragments/2485-java_keystore-ssl_backend-parameter.yml b/changelogs/fragments/2485-java_keystore-ssl_backend-parameter.yml deleted file mode 100644 index b446476f82..0000000000 --- a/changelogs/fragments/2485-java_keystore-ssl_backend-parameter.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - java_keystore - added ``ssl_backend`` parameter for using the cryptography library instead of the OpenSSL binary (https://github.com/ansible-collections/community.general/pull/2485). diff --git a/changelogs/fragments/2499-influxdb_user-fix-multiple-no-privileges.yml b/changelogs/fragments/2499-influxdb_user-fix-multiple-no-privileges.yml deleted file mode 100644 index d4575ea711..0000000000 --- a/changelogs/fragments/2499-influxdb_user-fix-multiple-no-privileges.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - influxdb_user - fix bug where an influxdb user has no privileges for 2 or more databases (https://github.com/ansible-collections/community.general/pull/2499). diff --git a/changelogs/fragments/2500-passwordstore-add_option_ignore_missing.yml b/changelogs/fragments/2500-passwordstore-add_option_ignore_missing.yml deleted file mode 100644 index 6141ac7747..0000000000 --- a/changelogs/fragments/2500-passwordstore-add_option_ignore_missing.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - passwordstore lookup - add option ``missing`` to choose what to do if the password file is missing - (https://github.com/ansible-collections/community.general/pull/2500). diff --git a/changelogs/fragments/2510-jenkins_plugin_use_post_method.yml b/changelogs/fragments/2510-jenkins_plugin_use_post_method.yml deleted file mode 100644 index b310e27061..0000000000 --- a/changelogs/fragments/2510-jenkins_plugin_use_post_method.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - jenkins_plugin - use POST method for sending request to jenkins API when ``state`` option is one of ``enabled``, ``disabled``, ``pinned``, ``unpinned``, or ``absent`` (https://github.com/ansible-collections/community.general/issues/2510). diff --git a/changelogs/fragments/2514-mh-improved-changed.yml b/changelogs/fragments/2514-mh-improved-changed.yml deleted file mode 100644 index b540600130..0000000000 --- a/changelogs/fragments/2514-mh-improved-changed.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ModuleHelper module utils - improved mechanism for customizing the calculation of ``changed`` (https://github.com/ansible-collections/community.general/pull/2514). diff --git a/changelogs/fragments/2516_fix_2515_keystore_type_jks.yml b/changelogs/fragments/2516_fix_2515_keystore_type_jks.yml deleted file mode 100644 index 767081dac9..0000000000 --- a/changelogs/fragments/2516_fix_2515_keystore_type_jks.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - "java_keystore - add parameter ``keystore_type`` to control output file format and override ``keytool``'s - default, which depends on Java version (https://github.com/ansible-collections/community.general/issues/2515)." diff --git a/changelogs/fragments/2517-cmd-params-from-vars.yml b/changelogs/fragments/2517-cmd-params-from-vars.yml deleted file mode 100644 index 95a2f7165d..0000000000 --- a/changelogs/fragments/2517-cmd-params-from-vars.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - cmd (Module Helper) module utils - ``CmdMixin`` now pulls the value for ``run_command()`` params from ``self.vars``, as opposed to previously retrieving those from ``self.module.params`` (https://github.com/ansible-collections/community.general/pull/2517). diff --git a/changelogs/fragments/2518-nmap-fix-cache-disabled.yml b/changelogs/fragments/2518-nmap-fix-cache-disabled.yml deleted file mode 100644 index 8f4680b6a6..0000000000 --- a/changelogs/fragments/2518-nmap-fix-cache-disabled.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nmap inventory plugin - fix local variable error when cache is disabled (https://github.com/ansible-collections/community.general/issues/2512). diff --git a/changelogs/fragments/2520-connection-refactors.yml b/changelogs/fragments/2520-connection-refactors.yml deleted file mode 100644 index 2e5c8273d7..0000000000 --- a/changelogs/fragments/2520-connection-refactors.yml +++ /dev/null @@ -1,9 +0,0 @@ -minor_changes: - - chroot connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - funcd connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - iocage connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - jail connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - lxc connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - qubes connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - saltstack connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - zone connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). diff --git a/changelogs/fragments/2521-flatpak-list.yml b/changelogs/fragments/2521-flatpak-list.yml deleted file mode 100644 index e30607b306..0000000000 --- a/changelogs/fragments/2521-flatpak-list.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- flatpak - allows installing or uninstalling a list of packages (https://github.com/ansible-collections/community.general/pull/2521). diff --git a/changelogs/fragments/2524-pacman_add_bin_option.yml b/changelogs/fragments/2524-pacman_add_bin_option.yml deleted file mode 100644 index 1a7c78f7ec..0000000000 --- a/changelogs/fragments/2524-pacman_add_bin_option.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - pacman - add ``executable`` option to use an alternative pacman binary (https://github.com/ansible-collections/community.general/issues/2524). diff --git a/changelogs/fragments/2525-iptables_state-fix-initialization-command.yml b/changelogs/fragments/2525-iptables_state-fix-initialization-command.yml deleted file mode 100644 index 552c0b26ab..0000000000 --- a/changelogs/fragments/2525-iptables_state-fix-initialization-command.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -bugfixes: - - "iptables_state - fix initialization of iptables from null state when adressing - more than one table (https://github.com/ansible-collections/community.general/issues/2523)." - - "iptables_state - fix a 'FutureWarning' in a regex and do some basic code clean up - (https://github.com/ansible-collections/community.general/pull/2525)." diff --git a/changelogs/fragments/2526-java_keystore-password-via-stdin.yml b/changelogs/fragments/2526-java_keystore-password-via-stdin.yml deleted file mode 100644 index 1e45e306af..0000000000 --- a/changelogs/fragments/2526-java_keystore-password-via-stdin.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -minor_changes: - - "java_keystore - replace envvar by stdin to pass secret to ``keytool`` - (https://github.com/ansible-collections/community.general/pull/2526)." diff --git a/changelogs/fragments/2540-zfs-delegate-choices.yml b/changelogs/fragments/2540-zfs-delegate-choices.yml deleted file mode 100644 index 8e0138420c..0000000000 --- a/changelogs/fragments/2540-zfs-delegate-choices.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - zfs_delegate_admin - drop choices from permissions, allowing any permission supported by the underlying zfs commands (https://github.com/ansible-collections/community.general/pull/2540). diff --git a/changelogs/fragments/2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml b/changelogs/fragments/2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml deleted file mode 100644 index e31fad744a..0000000000 --- a/changelogs/fragments/2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - jira - add comment visibility parameter for comment operation (https://github.com/ansible-collections/community.general/pull/2556). diff --git a/changelogs/fragments/2557-cloud-misc-refactor.yml b/changelogs/fragments/2557-cloud-misc-refactor.yml deleted file mode 100644 index 82e56dc942..0000000000 --- a/changelogs/fragments/2557-cloud-misc-refactor.yml +++ /dev/null @@ -1,7 +0,0 @@ -minor_changes: - - cloud_init_data_facts - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). - - proxmox_group_info - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). - - proxmox_kvm - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). - - rhevm - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). - - serverless - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). - - terraform - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). diff --git a/changelogs/fragments/2560-java_cert-pkcs12-alias-bugfix.yml b/changelogs/fragments/2560-java_cert-pkcs12-alias-bugfix.yml deleted file mode 100644 index 471962d74f..0000000000 --- a/changelogs/fragments/2560-java_cert-pkcs12-alias-bugfix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "java_cert - fix issue with incorrect alias used on PKCS#12 certificate import (https://github.com/ansible-collections/community.general/pull/2560)." diff --git a/changelogs/fragments/2564-mh-cmd-process-output.yml b/changelogs/fragments/2564-mh-cmd-process-output.yml deleted file mode 100644 index 717c0d7fbb..0000000000 --- a/changelogs/fragments/2564-mh-cmd-process-output.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - module_helper module utils - method ``CmdMixin.run_command()`` now accepts ``process_output`` specifying a function to process the outcome of the underlying ``module.run_command()`` (https://github.com/ansible-collections/community.general/pull/2564). diff --git a/changelogs/fragments/2568-ssh_config-reduce-stormssh-searches-based-on-host.yml b/changelogs/fragments/2568-ssh_config-reduce-stormssh-searches-based-on-host.yml deleted file mode 100644 index 2f3e400e7e..0000000000 --- a/changelogs/fragments/2568-ssh_config-reduce-stormssh-searches-based-on-host.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ssh_config - reduce stormssh searches based on host (https://github.com/ansible-collections/community.general/pull/2568/). diff --git a/changelogs/fragments/2571-rhsm_release-fix-release_matcher.yaml b/changelogs/fragments/2571-rhsm_release-fix-release_matcher.yaml deleted file mode 100644 index 764743303f..0000000000 --- a/changelogs/fragments/2571-rhsm_release-fix-release_matcher.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - rhsm_release - fix the issue that module considers 8, 7Client and 7Workstation as invalid releases (https://github.com/ansible-collections/community.general/pull/2571). diff --git a/changelogs/fragments/2573-terraform-overwrite-init.yml b/changelogs/fragments/2573-terraform-overwrite-init.yml deleted file mode 100644 index f2dad6a7ee..0000000000 --- a/changelogs/fragments/2573-terraform-overwrite-init.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - terraform - add option ``overwrite_init`` to skip init if exists (https://github.com/ansible-collections/community.general/pull/2573). diff --git a/changelogs/fragments/2578-ini-file-utf8-bom.yml b/changelogs/fragments/2578-ini-file-utf8-bom.yml deleted file mode 100644 index 00640c0b23..0000000000 --- a/changelogs/fragments/2578-ini-file-utf8-bom.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ini_file - opening file with encoding ``utf-8-sig`` (https://github.com/ansible-collections/community.general/issues/2189). diff --git a/changelogs/fragments/2579-redis-cache-ipv6.yml b/changelogs/fragments/2579-redis-cache-ipv6.yml deleted file mode 100644 index aaa5e78b34..0000000000 --- a/changelogs/fragments/2579-redis-cache-ipv6.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redis cache - improved connection string parsing (https://github.com/ansible-collections/community.general/issues/497). diff --git a/changelogs/fragments/2590-netcup_dns-exception-no-message-attr.yml b/changelogs/fragments/2590-netcup_dns-exception-no-message-attr.yml deleted file mode 100644 index 06cac9ad1b..0000000000 --- a/changelogs/fragments/2590-netcup_dns-exception-no-message-attr.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - netcup_dns - use ``str(ex)`` instead of unreliable ``ex.message`` in exception handling to fix ``AttributeError`` in error cases (https://github.com/ansible-collections/community.general/pull/2590). diff --git a/changelogs/fragments/2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml b/changelogs/fragments/2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml deleted file mode 100644 index dfae3f2bdf..0000000000 --- a/changelogs/fragments/2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - influxdb_user - fix bug which removed current privileges instead of appending them to existing ones (https://github.com/ansible-collections/community.general/issues/2609, https://github.com/ansible-collections/community.general/pull/2614). diff --git a/changelogs/fragments/2616-archive-exclusion_patterns-option.yml b/changelogs/fragments/2616-archive-exclusion_patterns-option.yml deleted file mode 100644 index 86ef806b63..0000000000 --- a/changelogs/fragments/2616-archive-exclusion_patterns-option.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - archive - added ``exclusion_patterns`` option to exclude files or subdirectories from archives (https://github.com/ansible-collections/community.general/pull/2616). diff --git a/changelogs/fragments/2632-cleanup.yml b/changelogs/fragments/2632-cleanup.yml deleted file mode 100644 index def89de634..0000000000 --- a/changelogs/fragments/2632-cleanup.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- "Remove unnecessary ``__init__.py`` files from ``plugins/`` (https://github.com/ansible-collections/community.general/pull/2632)." diff --git a/changelogs/fragments/2634-terraform-switch-workspace.yml b/changelogs/fragments/2634-terraform-switch-workspace.yml deleted file mode 100644 index 247447b3a8..0000000000 --- a/changelogs/fragments/2634-terraform-switch-workspace.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - terraform - ensure the workspace is set back to its previous value when the apply fails (https://github.com/ansible-collections/community.general/pull/2634). diff --git a/changelogs/fragments/2635-nmcli-add-ignore-auto-arguments.yml b/changelogs/fragments/2635-nmcli-add-ignore-auto-arguments.yml deleted file mode 100644 index e75ceb6a1b..0000000000 --- a/changelogs/fragments/2635-nmcli-add-ignore-auto-arguments.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - add new options to ignore automatic DNS servers and gateways (https://github.com/ansible-collections/community.general/issues/1087). diff --git a/changelogs/fragments/2648-proxmox_kvm-fix-vmid-return-value.yml b/changelogs/fragments/2648-proxmox_kvm-fix-vmid-return-value.yml deleted file mode 100644 index 7971fc24eb..0000000000 --- a/changelogs/fragments/2648-proxmox_kvm-fix-vmid-return-value.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox_kvm - fixed ``vmid`` return value when VM with ``name`` already exists (https://github.com/ansible-collections/community.general/issues/2648). diff --git a/changelogs/fragments/2650-composer-add_composer_executable.yml b/changelogs/fragments/2650-composer-add_composer_executable.yml deleted file mode 100644 index b1cccc689c..0000000000 --- a/changelogs/fragments/2650-composer-add_composer_executable.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - composer - add ``composer_executable`` option (https://github.com/ansible-collections/community.general/issues/2649). diff --git a/changelogs/fragments/2661-maven_artifact-add-sha1-option.yml b/changelogs/fragments/2661-maven_artifact-add-sha1-option.yml deleted file mode 100644 index 827942200b..0000000000 --- a/changelogs/fragments/2661-maven_artifact-add-sha1-option.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - maven_artifact - added ``checksum_alg`` option to support SHA1 checksums in order to support FIPS systems (https://github.com/ansible-collections/community.general/pull/2662). diff --git a/changelogs/fragments/2671-fix-broken-query-of-async_status-result.yml b/changelogs/fragments/2671-fix-broken-query-of-async_status-result.yml deleted file mode 100644 index 993caaa323..0000000000 --- a/changelogs/fragments/2671-fix-broken-query-of-async_status-result.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -bugfixes: - - "iptables_state - fix a broken query of ``async_status`` result - with current ansible-core development version - (https://github.com/ansible-collections/community.general/issues/2627, - https://github.com/ansible-collections/community.general/pull/2671)." diff --git a/changelogs/fragments/2681-stacki-host-bugfix.yml b/changelogs/fragments/2681-stacki-host-bugfix.yml deleted file mode 100644 index 3403bfbfbe..0000000000 --- a/changelogs/fragments/2681-stacki-host-bugfix.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - stacki_host - when adding a new server, ``rack`` and ``rank`` must be passed, and network parameters are optional (https://github.com/ansible-collections/community.general/pull/2681). -minor_changes: - - stacki_host - minor refactoring (https://github.com/ansible-collections/community.general/pull/2681). diff --git a/changelogs/fragments/2684-open_iscsi-single-target-multiple-portal-overrides.yml b/changelogs/fragments/2684-open_iscsi-single-target-multiple-portal-overrides.yml deleted file mode 100644 index cb14a08ba0..0000000000 --- a/changelogs/fragments/2684-open_iscsi-single-target-multiple-portal-overrides.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - open_iscsi - also consider ``portal`` and ``port`` to check if already logged in or not (https://github.com/ansible-collections/community.general/issues/2683). - - open_iscsi - add ``auto_portal_startup`` parameter to allow ``node.startup`` setting per portal (https://github.com/ansible-collections/community.general/issues/2685). diff --git a/changelogs/fragments/2691-gitlab_user-support-identity-provider.yml b/changelogs/fragments/2691-gitlab_user-support-identity-provider.yml deleted file mode 100644 index 065b524c86..0000000000 --- a/changelogs/fragments/2691-gitlab_user-support-identity-provider.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -minor_changes: - - "gitlab_user - specifying a password is no longer necessary (https://github.com/ansible-collections/community.general/pull/2691)." - - "gitlab_user - allow to reset an existing password with the new ``reset_password`` option (https://github.com/ansible-collections/community.general/pull/2691)." - - "gitlab_user - add functionality for adding external identity providers to a GitLab user (https://github.com/ansible-collections/community.general/pull/2691)." diff --git a/changelogs/fragments/2692-logstash-callback-plugin-replacing_options.yml b/changelogs/fragments/2692-logstash-callback-plugin-replacing_options.yml deleted file mode 100644 index ccf803598a..0000000000 --- a/changelogs/fragments/2692-logstash-callback-plugin-replacing_options.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - 'logstash callback plugin - replace ``_option`` with ``context.CLIARGS`` to fix the plugin on ansible-base and ansible-core (https://github.com/ansible-collections/community.general/issues/2692).' diff --git a/changelogs/fragments/2711-fix-iptables_state-2700-async_status-call.yml b/changelogs/fragments/2711-fix-iptables_state-2700-async_status-call.yml deleted file mode 100644 index 8f94cf5178..0000000000 --- a/changelogs/fragments/2711-fix-iptables_state-2700-async_status-call.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - "iptables_state - call ``async_status`` action plugin rather than its module - (https://github.com/ansible-collections/community.general/issues/2700)." diff --git a/changelogs/fragments/2722-zypper_repository-fix_idempotency_on_adding_repo_with_releasever.yml b/changelogs/fragments/2722-zypper_repository-fix_idempotency_on_adding_repo_with_releasever.yml deleted file mode 100644 index faada2e9bf..0000000000 --- a/changelogs/fragments/2722-zypper_repository-fix_idempotency_on_adding_repo_with_releasever.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -bugfixes: - - zypper_repository - fix idempotency on adding repository with - ``$releasever`` and ``$basearch`` variables - (https://github.com/ansible-collections/community.general/issues/1985). diff --git a/changelogs/fragments/273-add_multiple_options_with_same_name_to_ini_file.yml b/changelogs/fragments/273-add_multiple_options_with_same_name_to_ini_file.yml deleted file mode 100644 index f32dc305b5..0000000000 --- a/changelogs/fragments/273-add_multiple_options_with_same_name_to_ini_file.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - ini_file - add module option ``exclusive`` (boolean) for the ability to add/remove single ``option=value`` entries without overwriting existing options with the same name but different values (https://github.com/ansible-collections/community.general/pull/3033). - - ini_file - add abbility to define multiple options with the same name but different values (https://github.com/ansible-collections/community.general/issues/273, https://github.com/ansible-collections/community.general/issues/1204). diff --git a/changelogs/fragments/2731-mh-cmd-locale.yml b/changelogs/fragments/2731-mh-cmd-locale.yml deleted file mode 100644 index ea905cce4b..0000000000 --- a/changelogs/fragments/2731-mh-cmd-locale.yml +++ /dev/null @@ -1,5 +0,0 @@ -bugfixes: - - module_helper module utils - ``CmdMixin`` must also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731). - - xfconf - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/issues/2715). - - cpanm - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731). - - snap - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731). diff --git a/changelogs/fragments/2732-nmcli_add_options.yml b/changelogs/fragments/2732-nmcli_add_options.yml deleted file mode 100644 index 58ed2d2ee4..0000000000 --- a/changelogs/fragments/2732-nmcli_add_options.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - nmcli - add ``routing_rules4`` and ``may_fail4`` options (https://github.com/ansible-collections/community.general/issues/2730). - - nmcli - add ``disabled`` value to ``method6`` option (https://github.com/ansible-collections/community.general/issues/2730). diff --git a/changelogs/fragments/2735-onepassword-add_domain_option.yml b/changelogs/fragments/2735-onepassword-add_domain_option.yml deleted file mode 100644 index eef74439ce..0000000000 --- a/changelogs/fragments/2735-onepassword-add_domain_option.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - onepassword lookup plugin - add ``domain`` option (https://github.com/ansible-collections/community.general/issues/2734). diff --git a/changelogs/fragments/2751-flatpak-no_dependencies.yml b/changelogs/fragments/2751-flatpak-no_dependencies.yml deleted file mode 100644 index a07ead96da..0000000000 --- a/changelogs/fragments/2751-flatpak-no_dependencies.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- "flatpak - add ``no_dependencies`` parameter (https://github.com/ansible/ansible/pull/55452, https://github.com/ansible-collections/community.general/pull/2751)." diff --git a/changelogs/fragments/2771-scaleway_inventory_json_accept_byte_array.yml b/changelogs/fragments/2771-scaleway_inventory_json_accept_byte_array.yml deleted file mode 100644 index 8a6bfd1603..0000000000 --- a/changelogs/fragments/2771-scaleway_inventory_json_accept_byte_array.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - scaleway plugin inventory - fix ``JSON object must be str, not 'bytes'`` with Python 3.5 - (https://github.com/ansible-collections/community.general/issues/2769). diff --git a/changelogs/fragments/2774-datadog_event_api_parameter.yml b/changelogs/fragments/2774-datadog_event_api_parameter.yml deleted file mode 100644 index 6144b89400..0000000000 --- a/changelogs/fragments/2774-datadog_event_api_parameter.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- "datadog_event - adding parameter ``api_host`` to allow selecting a datadog API endpoint instead of using the default one (https://github.com/ansible-collections/community.general/issues/2774, https://github.com/ansible-collections/community.general/pull/2775)." diff --git a/changelogs/fragments/2779_redhat_subscription-add_server_prefix_and_server_port.yml b/changelogs/fragments/2779_redhat_subscription-add_server_prefix_and_server_port.yml deleted file mode 100644 index d484874ee9..0000000000 --- a/changelogs/fragments/2779_redhat_subscription-add_server_prefix_and_server_port.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redhat_subscription - add ``server_prefix`` and ``server_port`` parameters (https://github.com/ansible-collections/community.general/pull/2779). diff --git a/changelogs/fragments/2787-yum_versionlock-fix_idempotency_when_using_wildcard.yml b/changelogs/fragments/2787-yum_versionlock-fix_idempotency_when_using_wildcard.yml deleted file mode 100644 index 9fb569ec42..0000000000 --- a/changelogs/fragments/2787-yum_versionlock-fix_idempotency_when_using_wildcard.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - yum_versionlock - fix idempotency when using wildcard (asterisk) in ``name`` option (https://github.com/ansible-collections/community.general/issues/2761). diff --git a/changelogs/fragments/2790-callback_splunk-batch-option.yml b/changelogs/fragments/2790-callback_splunk-batch-option.yml deleted file mode 100644 index 70ee61ed64..0000000000 --- a/changelogs/fragments/2790-callback_splunk-batch-option.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - splunk callback plugin - add ``batch`` option for user-configurable correlation ID's (https://github.com/ansible-collections/community.general/issues/2790). diff --git a/changelogs/fragments/2808-pids-older-psutil.yml b/changelogs/fragments/2808-pids-older-psutil.yml deleted file mode 100644 index 34015e3f2c..0000000000 --- a/changelogs/fragments/2808-pids-older-psutil.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "pids - avoid crashes for older ``psutil`` versions, like on RHEL6 and RHEL7 (https://github.com/ansible-collections/community.general/pull/2808)." diff --git a/changelogs/fragments/2816-archive-refactor.yml b/changelogs/fragments/2816-archive-refactor.yml deleted file mode 100644 index 75c30bcdfc..0000000000 --- a/changelogs/fragments/2816-archive-refactor.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -bugfixes: - - archive - fixed incorrect ``state`` result value documentation (https://github.com/ansible-collections/community.general/pull/2816). - - archive - fixed ``exclude_path`` values causing incorrect archive root (https://github.com/ansible-collections/community.general/pull/2816). - - archive - fixed improper file names for single file zip archives (https://github.com/ansible-collections/community.general/issues/2818). diff --git a/changelogs/fragments/2821-ipa_sudorule.yml b/changelogs/fragments/2821-ipa_sudorule.yml deleted file mode 100644 index 5e1197da95..0000000000 --- a/changelogs/fragments/2821-ipa_sudorule.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - "ipa_sudorule - call ``sudorule_add_allow_command`` method instead of ``sudorule_add_allow_command_group`` - (https://github.com/ansible-collections/community.general/issues/2442)." diff --git a/changelogs/fragments/2824-gitlab_project-project-under-user.yml b/changelogs/fragments/2824-gitlab_project-project-under-user.yml deleted file mode 100644 index 7fa18941a0..0000000000 --- a/changelogs/fragments/2824-gitlab_project-project-under-user.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - gitlab_project - projects can be created under other user's namespaces with the new ``username`` option (https://github.com/ansible-collections/community.general/pull/2824). diff --git a/changelogs/fragments/2827-nmcli_fix_team_slave.yml b/changelogs/fragments/2827-nmcli_fix_team_slave.yml deleted file mode 100644 index 02f001c4f5..0000000000 --- a/changelogs/fragments/2827-nmcli_fix_team_slave.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nmcli - fixes team-slave configuration by adding connection.slave-type (https://github.com/ansible-collections/community.general/issues/766). diff --git a/changelogs/fragments/2830-npm-version-update.yml b/changelogs/fragments/2830-npm-version-update.yml deleted file mode 100644 index ab05258e2c..0000000000 --- a/changelogs/fragments/2830-npm-version-update.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - "npm - when the ``version`` option is used the comparison of installed vs missing will - use name@version instead of just name, allowing version specific updates - (https://github.com/ansible-collections/community.general/issues/2021)." diff --git a/changelogs/fragments/2841-proxmox_kvm_zfs_devstr.yml b/changelogs/fragments/2841-proxmox_kvm_zfs_devstr.yml deleted file mode 100644 index 7b61f175c6..0000000000 --- a/changelogs/fragments/2841-proxmox_kvm_zfs_devstr.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - "proxmox_kvm - fix parsing of Proxmox VM information with device info not containing - a comma, like disks backed by ZFS zvols - (https://github.com/ansible-collections/community.general/issues/2840)." diff --git a/changelogs/fragments/2843-modprobe-failure-conditions.yml b/changelogs/fragments/2843-modprobe-failure-conditions.yml deleted file mode 100644 index 78ee5ce1e9..0000000000 --- a/changelogs/fragments/2843-modprobe-failure-conditions.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - modprobe - added additional checks to ensure module load/unload is effective (https://github.com/ansible-collections/community.general/issues/1608). diff --git a/changelogs/fragments/2844-ali_instance_info-deprecate-params.yml b/changelogs/fragments/2844-ali_instance_info-deprecate-params.yml deleted file mode 100644 index a37555edcd..0000000000 --- a/changelogs/fragments/2844-ali_instance_info-deprecate-params.yml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: - - ali_instance_info - marked removal version of deprecated parameters ``availability_zone`` and ``instance_names`` (https://github.com/ansible-collections/community.general/issues/2429). diff --git a/changelogs/fragments/2845-serverless-deprecate-functions-param.yml b/changelogs/fragments/2845-serverless-deprecate-functions-param.yml deleted file mode 100644 index 6565b18974..0000000000 --- a/changelogs/fragments/2845-serverless-deprecate-functions-param.yml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: - - serverless - deprecating parameter ``functions`` because it was not used in the code (https://github.com/ansible-collections/community.general/pull/2845). diff --git a/changelogs/fragments/2850-jenkins_build-support-stop-jenkins-build.yml b/changelogs/fragments/2850-jenkins_build-support-stop-jenkins-build.yml deleted file mode 100644 index ad64e58eec..0000000000 --- a/changelogs/fragments/2850-jenkins_build-support-stop-jenkins-build.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - jenkins_build - support stopping a running jenkins build (https://github.com/ansible-collections/community.general/pull/2850). -bugfixes: - - jenkins_build - examine presence of ``build_number`` before deleting a jenkins build (https://github.com/ansible-collections/community.general/pull/2850). \ No newline at end of file diff --git a/changelogs/fragments/2867-redis-terminology.yml b/changelogs/fragments/2867-redis-terminology.yml deleted file mode 100644 index add76c0f91..0000000000 --- a/changelogs/fragments/2867-redis-terminology.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- "redis - allow to use the term ``replica`` instead of ``slave``, which has been the official Redis terminology since 2018 (https://github.com/ansible-collections/community.general/pull/2867)." diff --git a/changelogs/fragments/2874-terraform-check-destroy.yml b/changelogs/fragments/2874-terraform-check-destroy.yml deleted file mode 100644 index e41d1aebc0..0000000000 --- a/changelogs/fragments/2874-terraform-check-destroy.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - terraform - add ``check_destroy`` optional parameter to check for deletion of resources before it is applied (https://github.com/ansible-collections/community.general/pull/2874). diff --git a/changelogs/fragments/2875-ini_file-unicode.yml b/changelogs/fragments/2875-ini_file-unicode.yml deleted file mode 100644 index eaf1ff9ffb..0000000000 --- a/changelogs/fragments/2875-ini_file-unicode.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "ini_file - fix Unicode processing for Python 2 (https://github.com/ansible-collections/community.general/pull/2875)." \ No newline at end of file diff --git a/changelogs/fragments/2878-validate-certs-bool.yml b/changelogs/fragments/2878-validate-certs-bool.yml deleted file mode 100644 index e636f4981b..0000000000 --- a/changelogs/fragments/2878-validate-certs-bool.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- "nrdp callback plugin - parameters are now converted to strings, except ``validate_certs`` which is converted to boolean (https://github.com/ansible-collections/community.general/pull/2878)." diff --git a/changelogs/fragments/2881-gitlab_project-fix_workspace_user.yaml b/changelogs/fragments/2881-gitlab_project-fix_workspace_user.yaml deleted file mode 100644 index 0de8368b7f..0000000000 --- a/changelogs/fragments/2881-gitlab_project-fix_workspace_user.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - gitlab_project - user projects are created using namespace ID now, instead of user ID (https://github.com/ansible-collections/community.general/pull/2881). diff --git a/changelogs/fragments/2883-_mount-fixed-sanity-checks.yml b/changelogs/fragments/2883-_mount-fixed-sanity-checks.yml deleted file mode 100644 index 35496e1233..0000000000 --- a/changelogs/fragments/2883-_mount-fixed-sanity-checks.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - _mount module utils - fixed the sanity checks (https://github.com/ansible-collections/community.general/pull/2883). diff --git a/changelogs/fragments/2901-nmcli_teaming.yml b/changelogs/fragments/2901-nmcli_teaming.yml deleted file mode 100644 index 4178b9c6f5..0000000000 --- a/changelogs/fragments/2901-nmcli_teaming.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - add ``runner`` and ``runner_hwaddr_policy`` options (https://github.com/ansible-collections/community.general/issues/2901). diff --git a/changelogs/fragments/2902-filesystem_extend_freebsd_support.yml b/changelogs/fragments/2902-filesystem_extend_freebsd_support.yml deleted file mode 100644 index 1518d0190f..0000000000 --- a/changelogs/fragments/2902-filesystem_extend_freebsd_support.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -minor_changes: - - filesystem - extend support for FreeBSD. Avoid potential data loss by checking - existence of a filesystem with ``fstyp`` (native command) if ``blkid`` (foreign - command) doesn't find one. Add support for character devices and ``ufs`` filesystem - type (https://github.com/ansible-collections/community.general/pull/2902). diff --git a/changelogs/fragments/2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml b/changelogs/fragments/2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml deleted file mode 100644 index 21fde3eb58..0000000000 --- a/changelogs/fragments/2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - keycloak_authentication - fix bug when two identical executions are in the same authentication flow - (https://github.com/ansible-collections/community.general/pull/2904). diff --git a/changelogs/fragments/2912-snap-module-helper.yml b/changelogs/fragments/2912-snap-module-helper.yml deleted file mode 100644 index cb9935a5e4..0000000000 --- a/changelogs/fragments/2912-snap-module-helper.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - module_helper module utils - avoid failing when non-zero ``rc`` is present on regular exit (https://github.com/ansible-collections/community.general/pull/2912). - - snap - fix various bugs which prevented the module from working at all, and which resulted in ``state=absent`` fail on absent snaps (https://github.com/ansible-collections/community.general/issues/2835, https://github.com/ansible-collections/community.general/issues/2906, https://github.com/ansible-collections/community.general/pull/2912). diff --git a/changelogs/fragments/2913-archive-dest_state.yml b/changelogs/fragments/2913-archive-dest_state.yml deleted file mode 100644 index 9e9e67434e..0000000000 --- a/changelogs/fragments/2913-archive-dest_state.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -minor_changes: - - archive - added ``dest_state`` return value to describe final state of ``dest`` after successful task execution - (https://github.com/ansible-collections/community.general/pull/2913). diff --git a/changelogs/fragments/2918-snap-param-order.yml b/changelogs/fragments/2918-snap-param-order.yml deleted file mode 100644 index 85b907f8b6..0000000000 --- a/changelogs/fragments/2918-snap-param-order.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - snap - fixed the order of the ``--classic`` parameter in the command line invocation (https://github.com/ansible-collections/community.general/issues/2916). diff --git a/changelogs/fragments/2922-mh-cmd-output-feature-flag.yml b/changelogs/fragments/2922-mh-cmd-output-feature-flag.yml deleted file mode 100644 index e071e3413b..0000000000 --- a/changelogs/fragments/2922-mh-cmd-output-feature-flag.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - module_helper module utils - added feature flag parameters to ``CmdMixin`` to control whether ``rc``, ``out`` and ``err`` are automatically added to the module output (https://github.com/ansible-collections/community.general/pull/2922). diff --git a/changelogs/fragments/2923-archive-remove-bugfix.yml b/changelogs/fragments/2923-archive-remove-bugfix.yml deleted file mode 100644 index 4bef5ef459..0000000000 --- a/changelogs/fragments/2923-archive-remove-bugfix.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - archive - fixed task failure when using the ``remove`` option with a ``path`` containing nested files for - ``format``s other than ``zip`` (https://github.com/ansible-collections/community.general/issues/2919). diff --git a/changelogs/fragments/2924-npm-fix-package-json.yml b/changelogs/fragments/2924-npm-fix-package-json.yml deleted file mode 100644 index ce4a416cf7..0000000000 --- a/changelogs/fragments/2924-npm-fix-package-json.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - npm - correctly handle cases where a dependency does not have a ``version`` property because it is either missing or invalid - (https://github.com/ansible-collections/community.general/issues/2917). diff --git a/changelogs/fragments/2935-lvol-support_check_mode_thinpool.yml b/changelogs/fragments/2935-lvol-support_check_mode_thinpool.yml deleted file mode 100644 index 3efbe59860..0000000000 --- a/changelogs/fragments/2935-lvol-support_check_mode_thinpool.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - lvol - honor ``check_mode`` on thinpool (https://github.com/ansible-collections/community.general/issues/2934). diff --git a/changelogs/fragments/2936-pacman-fix_changed_status_when_ignorepkg_has_been_defined.yml b/changelogs/fragments/2936-pacman-fix_changed_status_when_ignorepkg_has_been_defined.yml deleted file mode 100644 index 815ffa4aee..0000000000 --- a/changelogs/fragments/2936-pacman-fix_changed_status_when_ignorepkg_has_been_defined.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - pacman - fix changed status when ignorepkg has been defined (https://github.com/ansible-collections/community.general/issues/1758). diff --git a/changelogs/fragments/2946-python-dnsimple-v2-rewrite.yml b/changelogs/fragments/2946-python-dnsimple-v2-rewrite.yml deleted file mode 100644 index 32a6341086..0000000000 --- a/changelogs/fragments/2946-python-dnsimple-v2-rewrite.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - dnsimple - module rewrite to include support for python-dnsimple>=2.0.0; also add ``sandbox`` parameter (https://github.com/ansible-collections/community.general/pull/2946). diff --git a/changelogs/fragments/2948-jenkins_job_info-remove_necessities_on_password_or_token.yml b/changelogs/fragments/2948-jenkins_job_info-remove_necessities_on_password_or_token.yml deleted file mode 100644 index 99259d6301..0000000000 --- a/changelogs/fragments/2948-jenkins_job_info-remove_necessities_on_password_or_token.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - jenkins_job_info - the ``password`` and ``token`` parameters can also be omitted to retrieve only public information (https://github.com/ansible-collections/community.general/pull/2948). diff --git a/changelogs/fragments/2949-add_authentication-flow-binding_keycloak-client.yml b/changelogs/fragments/2949-add_authentication-flow-binding_keycloak-client.yml deleted file mode 100644 index cdc0d4ae69..0000000000 --- a/changelogs/fragments/2949-add_authentication-flow-binding_keycloak-client.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - keycloak_client - add ``authentication_flow_binding_overrides`` option (https://github.com/ansible-collections/community.general/pull/2949). diff --git a/changelogs/fragments/2951-mh-vars-deepcopy.yml b/changelogs/fragments/2951-mh-vars-deepcopy.yml deleted file mode 100644 index 339cca3aa7..0000000000 --- a/changelogs/fragments/2951-mh-vars-deepcopy.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - module_helper module utils - fixed change-tracking for dictionaries and lists (https://github.com/ansible-collections/community.general/pull/2951). diff --git a/changelogs/fragments/2955-rax_mon_notification_plan-added-elements-to-list-params.yaml b/changelogs/fragments/2955-rax_mon_notification_plan-added-elements-to-list-params.yaml deleted file mode 100644 index 9ff6f01f7d..0000000000 --- a/changelogs/fragments/2955-rax_mon_notification_plan-added-elements-to-list-params.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - rax_mon_notification_plan - fixed validation checks by specifying type ``str`` as the ``elements`` of parameters ``ok_state``, ``warning_state`` and ``critical_state`` (https://github.com/ansible-collections/community.general/pull/2955). diff --git a/changelogs/fragments/2958-datadog_monitor_support_composites.yml b/changelogs/fragments/2958-datadog_monitor_support_composites.yml deleted file mode 100644 index 394a589994..0000000000 --- a/changelogs/fragments/2958-datadog_monitor_support_composites.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - datadog_monitor - allow creation of composite datadog monitors - (https://github.com/ansible-collections/community.general/issues/2956). diff --git a/changelogs/fragments/2960-launchd-validation-check.yaml b/changelogs/fragments/2960-launchd-validation-check.yaml deleted file mode 100644 index 15cb3c3fa5..0000000000 --- a/changelogs/fragments/2960-launchd-validation-check.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - launchd - fixed sanity check in the module's code (https://github.com/ansible-collections/community.general/pull/2960). diff --git a/changelogs/fragments/2963-improve-diff-mode-on-keycloak_authentication.yml b/changelogs/fragments/2963-improve-diff-mode-on-keycloak_authentication.yml deleted file mode 100644 index fa5f133d7d..0000000000 --- a/changelogs/fragments/2963-improve-diff-mode-on-keycloak_authentication.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- keycloak_authentication - enhanced diff mode to also return before and after state when the authentication flow is updated (https://github.com/ansible-collections/community.general/pull/2963). diff --git a/changelogs/fragments/2967-proxmox_inventory-offline-node-fix.yml b/changelogs/fragments/2967-proxmox_inventory-offline-node-fix.yml deleted file mode 100644 index d52fef4d8a..0000000000 --- a/changelogs/fragments/2967-proxmox_inventory-offline-node-fix.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - proxmox inventory plugin - fixed parsing failures when some cluster nodes are offline (https://github.com/ansible-collections/community.general/issues/2931). diff --git a/changelogs/fragments/2987-archive-stage-idempotency-fix.yml b/changelogs/fragments/2987-archive-stage-idempotency-fix.yml deleted file mode 100644 index 5c9e980935..0000000000 --- a/changelogs/fragments/2987-archive-stage-idempotency-fix.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -minor_changes: - - archive - refactoring prior to fix for idempotency checks. The fix will be a breaking change and only appear - in community.general 4.0.0 (https://github.com/ansible-collections/community.general/pull/2987). diff --git a/changelogs/fragments/2989-pamd-single-line.yaml b/changelogs/fragments/2989-pamd-single-line.yaml deleted file mode 100644 index 359e160785..0000000000 --- a/changelogs/fragments/2989-pamd-single-line.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - pamd - fixed problem with files containing only one or two lines (https://github.com/ansible-collections/community.general/issues/2925). diff --git a/changelogs/fragments/3001-enhance_gitlab_module.yml b/changelogs/fragments/3001-enhance_gitlab_module.yml deleted file mode 100644 index e39985530e..0000000000 --- a/changelogs/fragments/3001-enhance_gitlab_module.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab_project - add new options ``allow_merge_on_skipped_pipeline``, ``only_allow_merge_if_all_discussions_are_resolved``, ``only_allow_merge_if_pipeline_succeeds``, ``packages_enabled``, ``remove_source_branch_after_merge``, ``squash_option`` (https://github.com/ansible-collections/community.general/pull/3002). diff --git a/changelogs/fragments/3006-redfish_command-bootoverride-argument-check.yaml b/changelogs/fragments/3006-redfish_command-bootoverride-argument-check.yaml deleted file mode 100644 index 680d3dea83..0000000000 --- a/changelogs/fragments/3006-redfish_command-bootoverride-argument-check.yaml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - redfish_command - fix extraneous error caused by missing ``bootdevice`` argument - when using the ``DisableBootOverride`` sub-command (https://github.com/ansible-collections/community.general/issues/3005). diff --git a/changelogs/fragments/3028-snap-channel.yml b/changelogs/fragments/3028-snap-channel.yml deleted file mode 100644 index c3aea4b5a0..0000000000 --- a/changelogs/fragments/3028-snap-channel.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "snap - fix formatting of ``--channel`` argument when the ``channel`` option is used (https://github.com/ansible-collections/community.general/pull/3028)." diff --git a/changelogs/fragments/3034-promox-kvm-return-new-id.yaml b/changelogs/fragments/3034-promox-kvm-return-new-id.yaml deleted file mode 100644 index 8cbd769a04..0000000000 --- a/changelogs/fragments/3034-promox-kvm-return-new-id.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - proxmox_kvm - fix result of clone, now returns ``newid`` instead of ``vmid`` (https://github.com/ansible-collections/community.general/pull/3034). diff --git a/changelogs/fragments/3036-archive-root-path-fix.yml b/changelogs/fragments/3036-archive-root-path-fix.yml deleted file mode 100644 index fa460f82b9..0000000000 --- a/changelogs/fragments/3036-archive-root-path-fix.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - archive - fixing archive root determination when longest common root is ``/`` - (https://github.com/ansible-collections/community.general/pull/3036). diff --git a/changelogs/fragments/3038-enhance_github_repo_api_url.yml b/changelogs/fragments/3038-enhance_github_repo_api_url.yml deleted file mode 100644 index 19eda0f66d..0000000000 --- a/changelogs/fragments/3038-enhance_github_repo_api_url.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - github_repo - add new option ``api_url`` to allow working with on premises installations (https://github.com/ansible-collections/community.general/pull/3038). diff --git a/changelogs/fragments/3041-fix_gitlab_group_members_gitlab_project_mambers.yml b/changelogs/fragments/3041-fix_gitlab_group_members_gitlab_project_mambers.yml deleted file mode 100644 index d1be8b78d3..0000000000 --- a/changelogs/fragments/3041-fix_gitlab_group_members_gitlab_project_mambers.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - gitlab_group_members - fixes issue when gitlab group has more then 20 members, pagination problem (https://github.com/ansible-collections/community.general/issues/3041). - - gitlab_project_members - fixes issue when gitlab group has more then 20 members, pagination problem (https://github.com/ansible-collections/community.general/issues/3041). diff --git a/changelogs/fragments/3041-gitlab_x_members_fix_and_enhancement.yml b/changelogs/fragments/3041-gitlab_x_members_fix_and_enhancement.yml deleted file mode 100644 index ce558e1f84..0000000000 --- a/changelogs/fragments/3041-gitlab_x_members_fix_and_enhancement.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: -- gitlab_group_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3047). -- gitlab_group_members - added functionality to set all members exactly as given (https://github.com/ansible-collections/community.general/pull/3047). diff --git a/changelogs/fragments/3044-proxmox-inventory-snapshots.yml b/changelogs/fragments/3044-proxmox-inventory-snapshots.yml deleted file mode 100644 index d6a324ea30..0000000000 --- a/changelogs/fragments/3044-proxmox-inventory-snapshots.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox inventory plugin - added snapshots to host facts (https://github.com/ansible-collections/community.general/pull/3044). diff --git a/changelogs/fragments/3049-xfconf-deprecate-get.yaml b/changelogs/fragments/3049-xfconf-deprecate-get.yaml deleted file mode 100644 index 359b39301e..0000000000 --- a/changelogs/fragments/3049-xfconf-deprecate-get.yaml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: - - xfconf - deprecate the ``get`` state. The new module ``xfconf_info`` should be used instead (https://github.com/ansible-collections/community.general/pull/3049). diff --git a/changelogs/fragments/3052_proxmox_inventory_plugin.yml b/changelogs/fragments/3052_proxmox_inventory_plugin.yml deleted file mode 100644 index dfd4dddea9..0000000000 --- a/changelogs/fragments/3052_proxmox_inventory_plugin.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox inventory plugin - fixed plugin failure when a ``qemu`` guest has no ``template`` key (https://github.com/ansible-collections/community.general/pull/3052). diff --git a/changelogs/fragments/3067-taiga-bugfix.yaml b/changelogs/fragments/3067-taiga-bugfix.yaml deleted file mode 100644 index dfd3b531b0..0000000000 --- a/changelogs/fragments/3067-taiga-bugfix.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - taiga - some constructs in the module fixed to work also in Python 3 (https://github.com/ansible-collections/community.general/pull/3067). diff --git a/changelogs/fragments/3068-supervisorctl-bugfix.yaml b/changelogs/fragments/3068-supervisorctl-bugfix.yaml deleted file mode 100644 index 6571e211b6..0000000000 --- a/changelogs/fragments/3068-supervisorctl-bugfix.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - supervisorctl - state ``signalled`` was not working (https://github.com/ansible-collections/community.general/pull/3068). -minor_changes: - - supervisorctl - using standard Ansible mechanism to validate ``signalled`` state required parameter (https://github.com/ansible-collections/community.general/pull/3068). diff --git a/changelogs/fragments/3074-ini_file-3031-empty-value-inconsistency.yml b/changelogs/fragments/3074-ini_file-3031-empty-value-inconsistency.yml deleted file mode 100644 index 7bfe958a12..0000000000 --- a/changelogs/fragments/3074-ini_file-3031-empty-value-inconsistency.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - ini_file - fix inconsistency between empty value and no value - (https://github.com/ansible-collections/community.general/issues/3031). diff --git a/changelogs/fragments/3075-archive-idempotency-enhancements.yml b/changelogs/fragments/3075-archive-idempotency-enhancements.yml deleted file mode 100644 index 3d0bf65fb7..0000000000 --- a/changelogs/fragments/3075-archive-idempotency-enhancements.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -breaking_changes: - - archive - adding idempotency checks for changes to file names and content within the ``destination`` file - (https://github.com/ansible-collections/community.general/pull/3075). diff --git a/changelogs/fragments/3079-report-power-state-hpilo.yaml b/changelogs/fragments/3079-report-power-state-hpilo.yaml deleted file mode 100644 index e057e3395f..0000000000 --- a/changelogs/fragments/3079-report-power-state-hpilo.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - hpilo_info - added ``host_power_status`` return value to report power state of machine with ``OFF``, ``ON`` or ``UNKNOWN`` (https://github.com/ansible-collections/community.general/pull/3079). diff --git a/changelogs/fragments/3080-java_cert-2460-import_private_key.yml b/changelogs/fragments/3080-java_cert-2460-import_private_key.yml deleted file mode 100644 index 465c484673..0000000000 --- a/changelogs/fragments/3080-java_cert-2460-import_private_key.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - java_cert - import private key as well as public certificate from PKCS#12 - (https://github.com/ansible-collections/community.general/issues/2460). diff --git a/changelogs/fragments/3081-add-wifi-option-to-nmcli-module.yml b/changelogs/fragments/3081-add-wifi-option-to-nmcli-module.yml deleted file mode 100644 index 4425d955fc..0000000000 --- a/changelogs/fragments/3081-add-wifi-option-to-nmcli-module.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - nmcli - add ``wifi`` option to support managing Wi-Fi settings such as ``hidden`` or ``mode`` - (https://github.com/ansible-collections/community.general/pull/3081). diff --git a/changelogs/fragments/3084-info-checkmode.yaml b/changelogs/fragments/3084-info-checkmode.yaml deleted file mode 100644 index 4e9fa85075..0000000000 --- a/changelogs/fragments/3084-info-checkmode.yaml +++ /dev/null @@ -1,24 +0,0 @@ -bugfixes: - - ali_instance_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - memset_memstore_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - memset_server_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - xenserver_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - rax_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - smartos_image_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - snmp_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_datacenter_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_enclosure_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_ethernet_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_fc_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_fcoe_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_logical_interconnect_group_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_network_set_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_san_manager_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - idrac_redfish_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - redfish_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - xfconf_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - utm_aaa_group_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - utm_ca_host_key_cert_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - utm_network_interface_address_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - utm_proxy_frontend_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - utm_proxy_location_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). diff --git a/changelogs/fragments/3092-gunicorn-refactor.yaml b/changelogs/fragments/3092-gunicorn-refactor.yaml deleted file mode 100644 index 114e865add..0000000000 --- a/changelogs/fragments/3092-gunicorn-refactor.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gunicorn - search for ``gunicorn`` binary in more paths (https://github.com/ansible-collections/community.general/pull/3092). diff --git a/changelogs/fragments/3093-ejabberd_user-refactor.yaml b/changelogs/fragments/3093-ejabberd_user-refactor.yaml deleted file mode 100644 index 875ef6da71..0000000000 --- a/changelogs/fragments/3093-ejabberd_user-refactor.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ejabberd_user - replaced in-code check with ``required_if``, using ``get_bin_path()`` for the command, passing args to ``run_command()`` as list instead of string (https://github.com/ansible-collections/community.general/pull/3093). diff --git a/changelogs/fragments/3098-django_manage-cmd-list.yaml b/changelogs/fragments/3098-django_manage-cmd-list.yaml deleted file mode 100644 index 8522059ff6..0000000000 --- a/changelogs/fragments/3098-django_manage-cmd-list.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - django_manage - refactor to call ``run_command()`` passing command as a list instead of string (https://github.com/ansible-collections/community.general/pull/3098). diff --git a/changelogs/fragments/3104-deploy_helper-required_if.yaml b/changelogs/fragments/3104-deploy_helper-required_if.yaml deleted file mode 100644 index ee48461003..0000000000 --- a/changelogs/fragments/3104-deploy_helper-required_if.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - deploy_helper - improved parameter checking by using standard Ansible construct (https://github.com/ansible-collections/community.general/pull/3104). diff --git a/changelogs/fragments/3106-apache2_module-review.yaml b/changelogs/fragments/3106-apache2_module-review.yaml deleted file mode 100644 index d7840b2511..0000000000 --- a/changelogs/fragments/3106-apache2_module-review.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - apache2_module - minor refactoring improving code quality, readability and speed (https://github.com/ansible-collections/community.general/pull/3106). diff --git a/changelogs/fragments/3125-hana-query-userstore.yaml b/changelogs/fragments/3125-hana-query-userstore.yaml deleted file mode 100644 index 0a626fe7f5..0000000000 --- a/changelogs/fragments/3125-hana-query-userstore.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - hana_query - added the abillity to use hdbuserstore (https://github.com/ansible-collections/community.general/pull/3125). diff --git a/changelogs/fragments/3132-nmcli-dummy.yaml b/changelogs/fragments/3132-nmcli-dummy.yaml deleted file mode 100644 index 970bda34e8..0000000000 --- a/changelogs/fragments/3132-nmcli-dummy.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - add ``dummy`` interface support (https://github.com/ansible-collections/community.general/issues/724). diff --git a/changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml b/changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml deleted file mode 100644 index d1f24d4c24..0000000000 --- a/changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_command - add ``boot_override_mode`` argument to BootSourceOverride commands (https://github.com/ansible-collections/community.general/issues/3134). diff --git a/changelogs/fragments/3136-add-wifi-sec-change-detection-to-nmcli-module.yml b/changelogs/fragments/3136-add-wifi-sec-change-detection-to-nmcli-module.yml deleted file mode 100644 index 6cc5e7630d..0000000000 --- a/changelogs/fragments/3136-add-wifi-sec-change-detection-to-nmcli-module.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - nmcli - add ``wifi-sec`` option change detection to support managing secure Wi-Fi connections - (https://github.com/ansible-collections/community.general/pull/3136). diff --git a/changelogs/fragments/3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml b/changelogs/fragments/3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml deleted file mode 100644 index f06fa68ce0..0000000000 --- a/changelogs/fragments/3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - tss lookup plugin - fixed incompatibility with ``python-tss-sdk`` version 1.0.0 - (https://github.com/ansible-collections/community.general/issues/3057, https://github.com/ansible-collections/community.general/pull/3139). diff --git a/changelogs/fragments/3141-disallow-options-unsupported-by-nmcli.yml b/changelogs/fragments/3141-disallow-options-unsupported-by-nmcli.yml deleted file mode 100644 index e6c15c8786..0000000000 --- a/changelogs/fragments/3141-disallow-options-unsupported-by-nmcli.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - nmcli - query ``nmcli`` directly to determine available WiFi options - (https://github.com/ansible-collections/community.general/pull/3141). diff --git a/changelogs/fragments/3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml b/changelogs/fragments/3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml deleted file mode 100644 index 47e1837a0b..0000000000 --- a/changelogs/fragments/3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml +++ /dev/null @@ -1,4 +0,0 @@ -security_fixes: - - nmcli - do not pass WiFi secrets on the ``nmcli`` command line. Use ``nmcli con edit`` - instead and pass secrets as ``stdin`` - (https://github.com/ansible-collections/community.general/issues/3145). diff --git a/changelogs/fragments/3161-openbsd-pkg-fix-regexp-matching-crash.yml b/changelogs/fragments/3161-openbsd-pkg-fix-regexp-matching-crash.yml deleted file mode 100644 index bb29542c04..0000000000 --- a/changelogs/fragments/3161-openbsd-pkg-fix-regexp-matching-crash.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - openbsd_pkg - fix regexp matching crash. This bug could trigger on package names with special characters, for example ``g++`` (https://github.com/ansible-collections/community.general/pull/3161). diff --git a/changelogs/fragments/3164-zypper-support-transactional-updates.yaml b/changelogs/fragments/3164-zypper-support-transactional-updates.yaml deleted file mode 100644 index d12ff9a6bf..0000000000 --- a/changelogs/fragments/3164-zypper-support-transactional-updates.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - zypper - prefix zypper commands with ``/sbin/transactional-update --continue --drop-if-no-change --quiet run`` if transactional updates are detected (https://github.com/ansible-collections/community.general/issues/3159). diff --git a/changelogs/fragments/3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml b/changelogs/fragments/3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml deleted file mode 100644 index 9057be911c..0000000000 --- a/changelogs/fragments/3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - ipa_config - add ``ipaselinuxusermaporder`` option to set the SELinux user map order - (https://github.com/ansible-collections/community.general/pull/3178). diff --git a/changelogs/fragments/3191-vdo-refactor.yml b/changelogs/fragments/3191-vdo-refactor.yml deleted file mode 100644 index fe3fcfe7b1..0000000000 --- a/changelogs/fragments/3191-vdo-refactor.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - vdo - minor refactoring of the code (https://github.com/ansible-collections/community.general/pull/3191). -bugfixes: - - vdo - boolean arguments now compared with proper ``true`` and ``false`` values instead of string representations like ``"yes"`` or ``"no"`` (https://github.com/ansible-collections/community.general/pull/3191). diff --git a/changelogs/fragments/3194-sanity.yml b/changelogs/fragments/3194-sanity.yml deleted file mode 100644 index b6961556ce..0000000000 --- a/changelogs/fragments/3194-sanity.yml +++ /dev/null @@ -1,14 +0,0 @@ -bugfixes: -- "memcached cache plugin - change function argument names to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." -- "logdns callback plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." -- "saltstack connection plugin - fix function signature (https://github.com/ansible-collections/community.general/pull/3194)." -- "online inventory plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." -- "netapp module utils - remove always-true conditional to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." -- "online module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." -- "scaleway module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." -- "one_template - change function argument name to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." -- "packet_device - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." -- "packet_sshkey - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." -- "maven_artifact - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." -- "launchd - use private attribute to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." -- "ufw - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." diff --git a/changelogs/fragments/3199-tss-lookup-plugin-bugfix-for-backwards-compatibility.yml b/changelogs/fragments/3199-tss-lookup-plugin-bugfix-for-backwards-compatibility.yml deleted file mode 100644 index 3909286487..0000000000 --- a/changelogs/fragments/3199-tss-lookup-plugin-bugfix-for-backwards-compatibility.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - tss lookup plugin - fixed backwards compatibility issue with ``python-tss-sdk`` version <=0.0.5 - (https://github.com/ansible-collections/community.general/issues/3192, https://github.com/ansible-collections/community.general/pull/3199). diff --git a/changelogs/fragments/3203-linode-inventory-return-full-api-ip-data.yml b/changelogs/fragments/3203-linode-inventory-return-full-api-ip-data.yml deleted file mode 100644 index fa7581e820..0000000000 --- a/changelogs/fragments/3203-linode-inventory-return-full-api-ip-data.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "linode inventory plugin - adds the ``ip_style`` configuration key. Set to ``api`` to get more detailed network details back from the remote Linode host (https://github.com/ansible-collections/community.general/pull/3203)." diff --git a/changelogs/fragments/3205-slack-minor-refactor.yaml b/changelogs/fragments/3205-slack-minor-refactor.yaml deleted file mode 100644 index 5337350f69..0000000000 --- a/changelogs/fragments/3205-slack-minor-refactor.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - slack - minor refactoring (https://github.com/ansible-collections/community.general/pull/3205). diff --git a/changelogs/fragments/3206-mh-classmethod.yaml b/changelogs/fragments/3206-mh-classmethod.yaml deleted file mode 100644 index 19cd8a6739..0000000000 --- a/changelogs/fragments/3206-mh-classmethod.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - module_helper module_utils - added classmethod to trigger the execution of MH modules (https://github.com/ansible-collections/community.general/pull/3206). diff --git a/changelogs/fragments/3211-snap-error-handling.yml b/changelogs/fragments/3211-snap-error-handling.yml deleted file mode 100644 index d361b99f01..0000000000 --- a/changelogs/fragments/3211-snap-error-handling.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - snap - improved module error handling, especially for the case when snap server is down (https://github.com/ansible-collections/community.general/issues/2970). diff --git a/changelogs/fragments/3228-tss-domain-authorization.yml b/changelogs/fragments/3228-tss-domain-authorization.yml deleted file mode 100644 index 0a80b3dd8e..0000000000 --- a/changelogs/fragments/3228-tss-domain-authorization.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - tss lookup plugin - added new parameter for domain authorization - (https://github.com/ansible-collections/community.general/pull/3228). diff --git a/changelogs/fragments/3231-fix-keycloak-realm-events.yml b/changelogs/fragments/3231-fix-keycloak-realm-events.yml deleted file mode 100644 index 9950ed2c59..0000000000 --- a/changelogs/fragments/3231-fix-keycloak-realm-events.yml +++ /dev/null @@ -1,5 +0,0 @@ -bugfixes: - - keycloak_realm - element type for ``events_listeners`` parameter should be ``string`` instead of ``dict`` (https://github.com/ansible-collections/community.general/pull/3231). -minor_changes: - - keycloak_realm - add ``events_enabled`` parameter to allow activation or deactivation of login events (https://github.com/ansible-collections/community.general/pull/3231). - \ No newline at end of file diff --git a/changelogs/fragments/3233-include-thermal-sensor-status-via-redfish_info.yaml b/changelogs/fragments/3233-include-thermal-sensor-status-via-redfish_info.yaml deleted file mode 100644 index baed989fbf..0000000000 --- a/changelogs/fragments/3233-include-thermal-sensor-status-via-redfish_info.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_info - include ``Status`` property for Thermal objects when querying Thermal properties via ``GetChassisThermals`` command (https://github.com/ansible-collections/community.general/issues/3232). diff --git a/changelogs/fragments/3237-copr-fix_chroot_naming.yml b/changelogs/fragments/3237-copr-fix_chroot_naming.yml deleted file mode 100644 index 7a942bc94e..0000000000 --- a/changelogs/fragments/3237-copr-fix_chroot_naming.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - copr - fix chroot naming issues, ``centos-stream`` changed naming to ``centos-stream-`` (for exmaple ``centos-stream-8``) (https://github.com/ansible-collections/community.general/issues/2084, https://github.com/ansible-collections/community.general/pull/3237). \ No newline at end of file diff --git a/changelogs/fragments/3239-nmcli-sit-ipip-config-bugfix.yaml b/changelogs/fragments/3239-nmcli-sit-ipip-config-bugfix.yaml deleted file mode 100644 index 78a172342e..0000000000 --- a/changelogs/fragments/3239-nmcli-sit-ipip-config-bugfix.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "nmcli - added ip4/ip6 configuration arguments for ``sit`` and ``ipip`` tunnels (https://github.com/ansible-collections/community.general/issues/3238, https://github.com/ansible-collections/community.general/pull/3239)." diff --git a/changelogs/fragments/3247-retry_servfail-for-dig.yaml b/changelogs/fragments/3247-retry_servfail-for-dig.yaml deleted file mode 100644 index 1e4a00384f..0000000000 --- a/changelogs/fragments/3247-retry_servfail-for-dig.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - dig lookup plugin - add ``retry_servfail`` option (https://github.com/ansible-collections/community.general/pull/3247). diff --git a/changelogs/fragments/3248-adds-few-more-gitlab-group-options.yml b/changelogs/fragments/3248-adds-few-more-gitlab-group-options.yml deleted file mode 100644 index f565fea565..0000000000 --- a/changelogs/fragments/3248-adds-few-more-gitlab-group-options.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab_group - add new options ``project_creation_level``, ``auto_devops_enabled``, ``subgroup_creation_level`` (https://github.com/ansible-collections/community.general/pull/3248). diff --git a/changelogs/fragments/3250-parse-scw-config.yml b/changelogs/fragments/3250-parse-scw-config.yml deleted file mode 100644 index 8c96c55e47..0000000000 --- a/changelogs/fragments/3250-parse-scw-config.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - scaleway plugin inventory - parse scw-cli config file for ``oauth_token`` (https://github.com/ansible-collections/community.general/pull/3250). diff --git a/changelogs/fragments/3252-tss_lookup_plugin-refactor.yml b/changelogs/fragments/3252-tss_lookup_plugin-refactor.yml deleted file mode 100644 index 6e8ccb29f8..0000000000 --- a/changelogs/fragments/3252-tss_lookup_plugin-refactor.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -minor_changes: - - tss lookup plugin - refactored to decouple the supporting third-party library (``python-tss-sdk``) - (https://github.com/ansible-collections/community.general/pull/3252). diff --git a/changelogs/fragments/3256-fix-ptr-handling-in-udm_dns_record.yml b/changelogs/fragments/3256-fix-ptr-handling-in-udm_dns_record.yml deleted file mode 100644 index 141a31349f..0000000000 --- a/changelogs/fragments/3256-fix-ptr-handling-in-udm_dns_record.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - udm_dns_record - fixed managing of PTR records, which can never have worked before (https://github.com/ansible-collections/community.general/pull/3256). diff --git a/changelogs/fragments/3258-apache2_module.yml b/changelogs/fragments/3258-apache2_module.yml deleted file mode 100644 index a60f2125a4..0000000000 --- a/changelogs/fragments/3258-apache2_module.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "apache2_module - fix ``a2enmod``/``a2dismod`` detection, and error message when not found (https://github.com/ansible-collections/community.general/issues/3253)." diff --git a/changelogs/fragments/3262-nmcli-add-gre-tunnel-support.yaml b/changelogs/fragments/3262-nmcli-add-gre-tunnel-support.yaml deleted file mode 100644 index e3f6bef7bc..0000000000 --- a/changelogs/fragments/3262-nmcli-add-gre-tunnel-support.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "nmcli - add ``gre`` tunnel support (https://github.com/ansible-collections/community.general/issues/3105, https://github.com/ansible-collections/community.general/pull/3262)." diff --git a/changelogs/fragments/3266-vmid-existing-target-clone.yml b/changelogs/fragments/3266-vmid-existing-target-clone.yml deleted file mode 100644 index 5ff59f5311..0000000000 --- a/changelogs/fragments/3266-vmid-existing-target-clone.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - proxmox_kvm - clone operation should return the VMID of the target VM and not that of the source VM. - This was failing when the target VM with the chosen name already existed (https://github.com/ansible-collections/community.general/pull/3266). \ No newline at end of file diff --git a/changelogs/fragments/3267-dnsimple1-deprecation.yml b/changelogs/fragments/3267-dnsimple1-deprecation.yml deleted file mode 100644 index dadc1d2901..0000000000 --- a/changelogs/fragments/3267-dnsimple1-deprecation.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -deprecated_features: - - "dnsimple - python-dnsimple < 2.0.0 is deprecated and support for it will be removed in community.general 5.0.0 (https://github.com/ansible-collections/community.general/pull/2946#discussion_r667624693)." diff --git a/changelogs/fragments/3280-keycloak-module-cleanup-and-consistency.yml b/changelogs/fragments/3280-keycloak-module-cleanup-and-consistency.yml deleted file mode 100644 index 4d06070886..0000000000 --- a/changelogs/fragments/3280-keycloak-module-cleanup-and-consistency.yml +++ /dev/null @@ -1,6 +0,0 @@ -deprecated_features: - - keycloak_authentication - the return value ``flow`` is now deprecated and will be removed in community.general 6.0.0; use ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/3280). - - keycloak_group - the return value ``group`` is now deprecated and will be removed in community.general 6.0.0; use ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/3280). - -minor_changes: - - keycloak_* modules - refactor many of the ``keycloak_*`` modules to have similar structures, comments, and documentation (https://github.com/ansible-collections/community.general/pull/3280). diff --git a/changelogs/fragments/3283-django_manage-fix-command-splitting.yaml b/changelogs/fragments/3283-django_manage-fix-command-splitting.yaml deleted file mode 100644 index ba8b4efd69..0000000000 --- a/changelogs/fragments/3283-django_manage-fix-command-splitting.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - django_manage - argument ``command`` is being splitted again as it should (https://github.com/ansible-collections/community.general/issues/3215). diff --git a/changelogs/fragments/3284-openwrt_init-improvements.yaml b/changelogs/fragments/3284-openwrt_init-improvements.yaml deleted file mode 100644 index 99a60dfce8..0000000000 --- a/changelogs/fragments/3284-openwrt_init-improvements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - openwrt_init - minor refactoring (https://github.com/ansible-collections/community.general/pull/3284). -bugfixes: - - openwrt_init - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3284). diff --git a/changelogs/fragments/3285-pamd-updated-with-empty-args.yaml b/changelogs/fragments/3285-pamd-updated-with-empty-args.yaml deleted file mode 100644 index 1c176dfdc3..0000000000 --- a/changelogs/fragments/3285-pamd-updated-with-empty-args.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - pamd - code for ``state=updated`` when dealing with the pam module arguments, made no distinction between ``None`` and an empty list (https://github.com/ansible-collections/community.general/issues/3260). -minor_changes: - - pamd - minor refactorings (https://github.com/ansible-collections/community.general/pull/3285). diff --git a/changelogs/fragments/3286-open_iscsi-improvements.yaml b/changelogs/fragments/3286-open_iscsi-improvements.yaml deleted file mode 100644 index 860a5f7811..0000000000 --- a/changelogs/fragments/3286-open_iscsi-improvements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - open_iscsi - minor refactoring (https://github.com/ansible-collections/community.general/pull/3286). -bugfixes: - - open_iscsi - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3286). diff --git a/changelogs/fragments/3290-mh-cmd-boolean-not.yaml b/changelogs/fragments/3290-mh-cmd-boolean-not.yaml deleted file mode 100644 index ab34539f15..0000000000 --- a/changelogs/fragments/3290-mh-cmd-boolean-not.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - module_helper cmd module utils - added the ``ArgFormat`` style ``BOOLEAN_NOT``, to add CLI parameters when the module argument is false-ish (https://github.com/ansible-collections/community.general/pull/3290). diff --git a/changelogs/fragments/3296-clean-etag.yaml b/changelogs/fragments/3296-clean-etag.yaml deleted file mode 100644 index 317772cb15..0000000000 --- a/changelogs/fragments/3296-clean-etag.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "redfish_command and redfish_config and redfish_utils module utils - add parameter to strip etag of quotes before patch, since some vendors do not properly ``If-Match`` etag with quotes (https://github.com/ansible-collections/community.general/pull/3296)." diff --git a/changelogs/fragments/3313-nmcli-add_gsm_support.yml b/changelogs/fragments/3313-nmcli-add_gsm_support.yml deleted file mode 100644 index 9986bca675..0000000000 --- a/changelogs/fragments/3313-nmcli-add_gsm_support.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "nmcli - add ``gsm`` support (https://github.com/ansible-collections/community.general/pull/3313)." diff --git a/changelogs/fragments/3315-pids-refactor.yml b/changelogs/fragments/3315-pids-refactor.yml deleted file mode 100644 index 53a36c2cad..0000000000 --- a/changelogs/fragments/3315-pids-refactor.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -minor_changes: - - pids - refactor to add support for older ``psutil`` versions to the ``pattern`` option - (https://github.com/ansible-collections/community.general/pull/3315). diff --git a/changelogs/fragments/3319-gitlab_project_members_enhancement.yml b/changelogs/fragments/3319-gitlab_project_members_enhancement.yml deleted file mode 100644 index 7795cd1f02..0000000000 --- a/changelogs/fragments/3319-gitlab_project_members_enhancement.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - gitlab_project_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3319). - - gitlab_project_members - added functionality to set all members exactly as given (https://github.com/ansible-collections/community.general/pull/3319). diff --git a/changelogs/fragments/3327-tss-token-authorization.yml b/changelogs/fragments/3327-tss-token-authorization.yml deleted file mode 100644 index 5d9f56cb72..0000000000 --- a/changelogs/fragments/3327-tss-token-authorization.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - tss lookup plugin - added ``token`` parameter for token authorization; - ``username`` and ``password`` are optional when ``token`` is provided - (https://github.com/ansible-collections/community.general/pull/3327). diff --git a/changelogs/fragments/3328-interfaces_file-improvements.yaml b/changelogs/fragments/3328-interfaces_file-improvements.yaml deleted file mode 100644 index 10734af603..0000000000 --- a/changelogs/fragments/3328-interfaces_file-improvements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - interfaces_file - no longer reporting change when none happened (https://github.com/ansible-collections/community.general/pull/3328). -minor_changes: - - interfaces_file - minor refactor (https://github.com/ansible-collections/community.general/pull/3328). diff --git a/changelogs/fragments/3329-kernel_blacklist-improvements.yaml b/changelogs/fragments/3329-kernel_blacklist-improvements.yaml deleted file mode 100644 index 2c1dd31da5..0000000000 --- a/changelogs/fragments/3329-kernel_blacklist-improvements.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - kernel_blacklist - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3329). diff --git a/changelogs/fragments/3330-bugfix-keycloak-authentication-flow-requirements-not-set-correctly.yml.yml b/changelogs/fragments/3330-bugfix-keycloak-authentication-flow-requirements-not-set-correctly.yml.yml deleted file mode 100644 index bcd2e594d7..0000000000 --- a/changelogs/fragments/3330-bugfix-keycloak-authentication-flow-requirements-not-set-correctly.yml.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - keycloak_authentication - fix bug, the requirement was always on ``DISABLED`` when creating a new authentication flow - (https://github.com/ansible-collections/community.general/pull/3330). diff --git a/changelogs/fragments/3331-do_not_ignore_volatile_configs_by_option.yml b/changelogs/fragments/3331-do_not_ignore_volatile_configs_by_option.yml deleted file mode 100644 index 3e176c9b49..0000000000 --- a/changelogs/fragments/3331-do_not_ignore_volatile_configs_by_option.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "lxd_container - add ``ignore_volatile_options`` option which allows to disable the behavior that the module ignores options starting with ``volatile.`` (https://github.com/ansible-collections/community.general/pull/3331)." \ No newline at end of file diff --git a/changelogs/fragments/3332-zpool_facts-pythonify.yaml b/changelogs/fragments/3332-zpool_facts-pythonify.yaml deleted file mode 100644 index ddb29b9efb..0000000000 --- a/changelogs/fragments/3332-zpool_facts-pythonify.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - zpool_facts - minor refactoring (https://github.com/ansible-collections/community.general/pull/3332). diff --git a/changelogs/fragments/3334-django_manage-split-params.yaml b/changelogs/fragments/3334-django_manage-split-params.yaml deleted file mode 100644 index 38ec68a532..0000000000 --- a/changelogs/fragments/3334-django_manage-split-params.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - django_manage - parameters ``apps`` and ``fixtures`` are now splitted instead of being used as a single argument (https://github.com/ansible-collections/community.general/issues/3333). diff --git a/changelogs/fragments/3336-openbsd_pkg-fix-KeyError.yml b/changelogs/fragments/3336-openbsd_pkg-fix-KeyError.yml deleted file mode 100644 index 7f10c186dd..0000000000 --- a/changelogs/fragments/3336-openbsd_pkg-fix-KeyError.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -bugfixes: - - openbsd_pkg - fix crash from ``KeyError`` exception when package installs, - but ``pkg_add`` returns with a non-zero exit code - (https://github.com/ansible-collections/community.general/pull/3336). diff --git a/changelogs/fragments/3337-linode-fix.yml b/changelogs/fragments/3337-linode-fix.yml deleted file mode 100644 index 06887b1901..0000000000 --- a/changelogs/fragments/3337-linode-fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "linode inventory plugin - fix default value of new option ``ip_style`` (https://github.com/ansible-collections/community.general/issues/3337)." diff --git a/changelogs/fragments/3343-redfish_utils-addUser-userId.yml b/changelogs/fragments/3343-redfish_utils-addUser-userId.yml deleted file mode 100644 index 7b8aa0b700..0000000000 --- a/changelogs/fragments/3343-redfish_utils-addUser-userId.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_utils module utils - if given, add account ID of user that should be created to HTTP request (https://github.com/ansible-collections/community.general/pull/3343/). diff --git a/changelogs/fragments/3359-add-unicode_normalize-filter.yml b/changelogs/fragments/3359-add-unicode_normalize-filter.yml deleted file mode 100644 index 33aa06dc92..0000000000 --- a/changelogs/fragments/3359-add-unicode_normalize-filter.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -add plugin.filter: - - name: unicode_normalize - description: Normalizes unicode strings to facilitate comparison of characters with normalized forms diff --git a/changelogs/fragments/3367-add-require_two_factor_authentication-property-to-gitlab-group.yml b/changelogs/fragments/3367-add-require_two_factor_authentication-property-to-gitlab-group.yml deleted file mode 100644 index c2f9e7181d..0000000000 --- a/changelogs/fragments/3367-add-require_two_factor_authentication-property-to-gitlab-group.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab_group - add new property ``require_two_factor_authentication`` (https://github.com/ansible-collections/community.general/pull/3367). diff --git a/changelogs/fragments/3379-gitlab_project-ci_cd_properties.yml b/changelogs/fragments/3379-gitlab_project-ci_cd_properties.yml deleted file mode 100644 index dc68c27541..0000000000 --- a/changelogs/fragments/3379-gitlab_project-ci_cd_properties.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab_project - add new properties ``ci_config_path`` and ``shared_runners_enabled`` (https://github.com/ansible-collections/community.general/pull/3379). diff --git a/changelogs/fragments/3393-pkgng-many_packages_one_command.yml b/changelogs/fragments/3393-pkgng-many_packages_one_command.yml deleted file mode 100644 index 49b24f0bfc..0000000000 --- a/changelogs/fragments/3393-pkgng-many_packages_one_command.yml +++ /dev/null @@ -1,5 +0,0 @@ -minor_changes: - - pkgng - packages being installed (or upgraded) are acted on in one command (per action) - (https://github.com/ansible-collections/community.general/issues/2265). - - pkgng - status message specifies number of packages installed and/or upgraded separately. - Previously, all changes were reported as one count of packages "added" (https://github.com/ansible-collections/community.general/pull/3393). diff --git a/changelogs/fragments/3400-fix-gitLab-api-searches-always-return-first-found-match-3386.yml b/changelogs/fragments/3400-fix-gitLab-api-searches-always-return-first-found-match-3386.yml deleted file mode 100644 index ab13b4adba..0000000000 --- a/changelogs/fragments/3400-fix-gitLab-api-searches-always-return-first-found-match-3386.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - gitlab_group_members - ``get_group_id`` return the group ID by matching ``full_path``, ``path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3400). diff --git a/changelogs/fragments/3401-nmcli-needs-type.yml b/changelogs/fragments/3401-nmcli-needs-type.yml deleted file mode 100644 index 9fe7593ba3..0000000000 --- a/changelogs/fragments/3401-nmcli-needs-type.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - "nmcli - the option ``routing_rules4`` can now be specified as a list of strings, instead of as a single string (https://github.com/ansible-collections/community.general/issues/3401)." diff --git a/changelogs/fragments/3404-redfish_utils-skip-manager-network-check.yml b/changelogs/fragments/3404-redfish_utils-skip-manager-network-check.yml deleted file mode 100644 index 9b17761ff2..0000000000 --- a/changelogs/fragments/3404-redfish_utils-skip-manager-network-check.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_utils module utils - if a manager network property is not specified in the service, attempt to change the requested settings (https://github.com/ansible-collections/community.general/issues/3404/). diff --git a/changelogs/fragments/3422-open-iscsi-mutual-authentication-support.yaml b/changelogs/fragments/3422-open-iscsi-mutual-authentication-support.yaml deleted file mode 100644 index c5fc84d1ae..0000000000 --- a/changelogs/fragments/3422-open-iscsi-mutual-authentication-support.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - open-iscsi - adding support for mutual authentication between target and initiator (https://github.com/ansible-collections/community.general/pull/3422). diff --git a/changelogs/fragments/3425-mail_add_configurable_ehlo_hostname.yml b/changelogs/fragments/3425-mail_add_configurable_ehlo_hostname.yml deleted file mode 100644 index dbc9cfb276..0000000000 --- a/changelogs/fragments/3425-mail_add_configurable_ehlo_hostname.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - mail - added the ``ehlohost`` parameter which allows for manual override of the host used in SMTP EHLO (https://github.com/ansible-collections/community.general/pull/3425). diff --git a/changelogs/fragments/3426-copy-permissions-along-with-file-for-jboss-module.yml b/changelogs/fragments/3426-copy-permissions-along-with-file-for-jboss-module.yml deleted file mode 100644 index 7685cce02b..0000000000 --- a/changelogs/fragments/3426-copy-permissions-along-with-file-for-jboss-module.yml +++ /dev/null @@ -1,6 +0,0 @@ -bugfixes: - - jboss - fix the deployment file permission issue when Jboss server is running - under non-root user. The deployment file is copied with file content only. The - file permission is set to ``440`` and belongs to root user. When the - JBoss ``WildFly`` server is running under non-root user, it is unable to read - the deployment file (https://github.com/ansible-collections/community.general/pull/3426). diff --git a/changelogs/fragments/3429-enable_deprecaded_message_for_ignore_volatile_option.yml b/changelogs/fragments/3429-enable_deprecaded_message_for_ignore_volatile_option.yml deleted file mode 100644 index ce7a56cb10..0000000000 --- a/changelogs/fragments/3429-enable_deprecaded_message_for_ignore_volatile_option.yml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: - - "lxd_container - the current default value ``true`` of ``ignore_volatile_options`` is deprecated and will change to ``false`` in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/3429)." diff --git a/changelogs/fragments/3450-callback_opentelemetry-exception_handling.yml b/changelogs/fragments/3450-callback_opentelemetry-exception_handling.yml deleted file mode 100644 index 09e0af8bee..0000000000 --- a/changelogs/fragments/3450-callback_opentelemetry-exception_handling.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - opentelemetry callback plugin - validated the task result exception without crashing. Also simplifying code a bit (https://github.com/ansible-collections/community.general/pull/3450, https://github.com/ansible/ansible/issues/75726). diff --git a/changelogs/fragments/3451-gitlab-group-member-deprecate-name-and-path.yml b/changelogs/fragments/3451-gitlab-group-member-deprecate-name-and-path.yml deleted file mode 100644 index 6b83c18b62..0000000000 --- a/changelogs/fragments/3451-gitlab-group-member-deprecate-name-and-path.yml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: - - gitlab_group_members - setting ``gitlab_group`` to ``name`` or ``path`` is deprecated. Use ``full_path`` instead (https://github.com/ansible-collections/community.general/pull/3451). diff --git a/changelogs/fragments/3453-fix-gitlab_group-require_two_factor_authentication-cant_be_null.yml b/changelogs/fragments/3453-fix-gitlab_group-require_two_factor_authentication-cant_be_null.yml deleted file mode 100644 index 3077b4a45d..0000000000 --- a/changelogs/fragments/3453-fix-gitlab_group-require_two_factor_authentication-cant_be_null.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - gitlab_group - avoid passing wrong value for ``require_two_factor_authentication`` on creation when the option has not been specified (https://github.com/ansible-collections/community.general/pull/3453). diff --git a/changelogs/fragments/3461-remove-deprecations-for-4.0.0.yml b/changelogs/fragments/3461-remove-deprecations-for-4.0.0.yml deleted file mode 100644 index 4d88c5f936..0000000000 --- a/changelogs/fragments/3461-remove-deprecations-for-4.0.0.yml +++ /dev/null @@ -1,9 +0,0 @@ -removed_features: - - "ModuleHelper module utils - remove fallback when value could not be determined for a parameter (https://github.com/ansible-collections/community.general/pull/3461)." - - "proxmox - default value of ``proxmox_default_behavior`` changed to ``no_defaults`` (https://github.com/ansible-collections/community.general/pull/3461)." - - "proxmox_kvm - default value of ``proxmox_default_behavior`` changed to ``no_defaults`` (https://github.com/ansible-collections/community.general/pull/3461)." - - "grove - removed the deprecated alias ``message`` of the ``message_content`` option (https://github.com/ansible-collections/community.general/pull/3461)." - - "telegram - removed the deprecated ``msg``, ``msg_format`` and ``chat_id`` options (https://github.com/ansible-collections/community.general/pull/3461)." - - "cpanm - removed the deprecated ``system_lib`` option. Use Ansible's privilege escalation mechanism instead; the option basically used ``sudo`` (https://github.com/ansible-collections/community.general/pull/3461)." - - "runit - removed the deprecated ``dist`` option which was not used by the module (https://github.com/ansible-collections/community.general/pull/3461)." - - "xfconf - the default value of ``disable_facts`` changed to ``true``, and the value ``false`` is no longer allowed. Register the module results instead (https://github.com/ansible-collections/community.general/pull/3461)." diff --git a/changelogs/fragments/3473-gitlab_deploy_key-fix_idempotency.yml b/changelogs/fragments/3473-gitlab_deploy_key-fix_idempotency.yml deleted file mode 100644 index 45dc8f9641..0000000000 --- a/changelogs/fragments/3473-gitlab_deploy_key-fix_idempotency.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - gitlab_deploy_key - fix idempotency on projects with multiple deploy keys (https://github.com/ansible-collections/community.general/pull/3473). diff --git a/changelogs/fragments/3474-zypper_repository_improve_repo_file_idempotency.yml b/changelogs/fragments/3474-zypper_repository_improve_repo_file_idempotency.yml deleted file mode 100644 index 4f3b56329c..0000000000 --- a/changelogs/fragments/3474-zypper_repository_improve_repo_file_idempotency.yml +++ /dev/null @@ -1,7 +0,0 @@ -bugfixes: - - zypper_repository - when an URL to a .repo file was provided in option - ``repo=`` and ``state=present`` only the first run was successful, - future runs failed due to missing checks prior starting zypper. - Usage of ``state=absent`` in combination with a .repo file was not - working either (https://github.com/ansible-collections/community.general/issues/1791, - https://github.com/ansible-collections/community.general/issues/3466). diff --git a/changelogs/fragments/3478-yaml-callback.yml b/changelogs/fragments/3478-yaml-callback.yml deleted file mode 100644 index ec1801beaa..0000000000 --- a/changelogs/fragments/3478-yaml-callback.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "yaml callback plugin - avoid modifying PyYAML so that other plugins using it on the controller, like the ``to_yaml`` filter, do not produce different output (https://github.com/ansible-collections/community.general/issues/3471, https://github.com/ansible-collections/community.general/pull/3478)." diff --git a/changelogs/fragments/3495-ssh_config_add_forwardagent_option.yml b/changelogs/fragments/3495-ssh_config_add_forwardagent_option.yml deleted file mode 100644 index 9336921ade..0000000000 --- a/changelogs/fragments/3495-ssh_config_add_forwardagent_option.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - ssh_config - new feature to set ``ForwardAgent`` option to ``yes`` or ``no`` - (https://github.com/ansible-collections/community.general/issues/2473). diff --git a/changelogs/fragments/3496-callback_opentelemetry-enrich_stacktraces.yml b/changelogs/fragments/3496-callback_opentelemetry-enrich_stacktraces.yml deleted file mode 100644 index d273083b08..0000000000 --- a/changelogs/fragments/3496-callback_opentelemetry-enrich_stacktraces.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - opentelemetry callback plugin - enriched the stacktrace information with the ``message``, ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3496). diff --git a/changelogs/fragments/3498-callback_opentelemetry-only_in_ci.yml b/changelogs/fragments/3498-callback_opentelemetry-only_in_ci.yml deleted file mode 100644 index 7187ba3770..0000000000 --- a/changelogs/fragments/3498-callback_opentelemetry-only_in_ci.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - opentelemetry callback plugin - added option ``enable_from_environment`` to support enabling the plugin only if the given environment variable exists and it is set to true (https://github.com/ansible-collections/community.general/pull/3498). diff --git a/changelogs/fragments/3500-macports-add-stdout-and-stderr-to-status.yaml b/changelogs/fragments/3500-macports-add-stdout-and-stderr-to-status.yaml deleted file mode 100644 index f39466d876..0000000000 --- a/changelogs/fragments/3500-macports-add-stdout-and-stderr-to-status.yaml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - macports - add ``stdout`` and ``stderr`` to return values - (https://github.com/ansible-collections/community.general/issues/3499). diff --git a/changelogs/fragments/3509-redfish_utils-SetOneTimeBoot-mode-fix.yml b/changelogs/fragments/3509-redfish_utils-SetOneTimeBoot-mode-fix.yml deleted file mode 100644 index 3c1ce33513..0000000000 --- a/changelogs/fragments/3509-redfish_utils-SetOneTimeBoot-mode-fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_utils module utils - do not attempt to change the boot source override mode if not specified by the user (https://github.com/ansible-collections/community.general/issues/3509/). diff --git a/changelogs/fragments/3514-ufw_insert_or_delete_biased_when_deletion_enabled.yml b/changelogs/fragments/3514-ufw_insert_or_delete_biased_when_deletion_enabled.yml deleted file mode 100644 index 93c1bf96d5..0000000000 --- a/changelogs/fragments/3514-ufw_insert_or_delete_biased_when_deletion_enabled.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "ufw - if ``delete=true`` and ``insert`` option is present, then ``insert`` is now ignored rather than failing with a syntax error (https://github.com/ansible-collections/community.general/pull/3514)." diff --git a/changelogs/fragments/3526-pkgng-add-integration-tests.yml b/changelogs/fragments/3526-pkgng-add-integration-tests.yml deleted file mode 100644 index a676f50476..0000000000 --- a/changelogs/fragments/3526-pkgng-add-integration-tests.yml +++ /dev/null @@ -1,6 +0,0 @@ -bugfixes: - - 'pkgng - ``name=* state=latest`` check for upgrades did not count "Number of packages to be reinstalled" as a `changed` action, giving incorrect results in both regular and check mode (https://github.com/ansible-collections/community.general/pull/3526).' - - 'pkgng - an `earlier PR `_ broke check mode so that the module always reports `not changed`. This is now fixed so that the module reports number of upgrade or install actions that would be performed (https://github.com/ansible-collections/community.general/pull/3526).' - - 'pkgng - the ``annotation`` functionality was broken and is now fixed, and now also works with check mode (https://github.com/ansible-collections/community.general/pull/3526).' -minor_changes: - - 'pkgng - ``annotation`` can now also be a YAML list (https://github.com/ansible-collections/community.general/pull/3526).' diff --git a/changelogs/fragments/3536-quote-role-name-in-url.yml b/changelogs/fragments/3536-quote-role-name-in-url.yml deleted file mode 100644 index e7acae3247..0000000000 --- a/changelogs/fragments/3536-quote-role-name-in-url.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_role - quote role name when used in URL path to avoid errors when role names contain special characters (https://github.com/ansible-collections/community.general/issues/3535, https://github.com/ansible-collections/community.general/pull/3536). diff --git a/changelogs/fragments/3538-fix-keycloak-idp-mappers-change-detection.yml b/changelogs/fragments/3538-fix-keycloak-idp-mappers-change-detection.yml deleted file mode 100644 index bd205ceb2a..0000000000 --- a/changelogs/fragments/3538-fix-keycloak-idp-mappers-change-detection.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_identity_provider - fix change detection when updating identity provider mappers (https://github.com/ansible-collections/community.general/pull/3538, https://github.com/ansible-collections/community.general/issues/3537). diff --git a/changelogs/fragments/3540-terraform_add_parallelism_parameter.yml b/changelogs/fragments/3540-terraform_add_parallelism_parameter.yml deleted file mode 100644 index 45b1b0d0f4..0000000000 --- a/changelogs/fragments/3540-terraform_add_parallelism_parameter.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - terraform - add ``parallelism`` parameter (https://github.com/ansible-collections/community.general/pull/3540). diff --git a/changelogs/fragments/3545-ipa_group-add-append-option.yml b/changelogs/fragments/3545-ipa_group-add-append-option.yml deleted file mode 100644 index 5bf585b010..0000000000 --- a/changelogs/fragments/3545-ipa_group-add-append-option.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "ipa_group - add ``append`` option for adding group and users members, instead of replacing the respective lists (https://github.com/ansible-collections/community.general/pull/3545)." \ No newline at end of file diff --git a/changelogs/fragments/3551-supervisor-all.yml b/changelogs/fragments/3551-supervisor-all.yml deleted file mode 100644 index 09d940b7e9..0000000000 --- a/changelogs/fragments/3551-supervisor-all.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- supervisorctl - add the possibility to restart all programs and program groups (https://github.com/ansible-collections/community.general/issues/3551). diff --git a/changelogs/fragments/3554-opkg-name.yml b/changelogs/fragments/3554-opkg-name.yml deleted file mode 100644 index a06255f26e..0000000000 --- a/changelogs/fragments/3554-opkg-name.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "opkg - allow ``name`` to be a YAML list of strings (https://github.com/ansible-collections/community.general/issues/572, https://github.com/ansible-collections/community.general/pull/3554)." diff --git a/changelogs/fragments/3556-callback_elastic-enrich_stacktraces.yml b/changelogs/fragments/3556-callback_elastic-enrich_stacktraces.yml deleted file mode 100644 index 3b13e9680a..0000000000 --- a/changelogs/fragments/3556-callback_elastic-enrich_stacktraces.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - elastic callback plugin - enriched the stacktrace information with the ``message``, ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3556). diff --git a/changelogs/fragments/3558-callback_opentelemetry-enrich_service_map.yml b/changelogs/fragments/3558-callback_opentelemetry-enrich_service_map.yml deleted file mode 100644 index f89e8cd3e2..0000000000 --- a/changelogs/fragments/3558-callback_opentelemetry-enrich_service_map.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - opentelemetry callback plugin - enriched the span attributes with HTTP metadata for those Ansible tasks that interact with third party systems (https://github.com/ansible-collections/community.general/pull/3448). diff --git a/changelogs/fragments/3561-fix-ipa-host-var-detection.yml b/changelogs/fragments/3561-fix-ipa-host-var-detection.yml deleted file mode 100644 index 4b5f23f174..0000000000 --- a/changelogs/fragments/3561-fix-ipa-host-var-detection.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ipa_* modules - fix environment fallback for ``ipa_host`` option (https://github.com/ansible-collections/community.general/issues/3560). diff --git a/changelogs/fragments/3563-nmcli-ipv6_dns.yaml b/changelogs/fragments/3563-nmcli-ipv6_dns.yaml deleted file mode 100644 index 43f9ac76c5..0000000000 --- a/changelogs/fragments/3563-nmcli-ipv6_dns.yaml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - nmcli - fixed ``dns6`` option handling so that it is treated as a list internally (https://github.com/ansible-collections/community.general/pull/3563). - - nmcli - fixed ``ipv4.route-metric`` being in properties of type list (https://github.com/ansible-collections/community.general/pull/3563). diff --git a/changelogs/fragments/3564-callback_opentelemetry-redacted_user_pass_from_url_args.yml b/changelogs/fragments/3564-callback_opentelemetry-redacted_user_pass_from_url_args.yml deleted file mode 100644 index 6eb1495457..0000000000 --- a/changelogs/fragments/3564-callback_opentelemetry-redacted_user_pass_from_url_args.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - opentelemetry callback plugin - transformed args in a list of span attributes in addition it redacted username and password from any URLs (https://github.com/ansible-collections/community.general/pull/3564). diff --git a/changelogs/fragments/3583-fix-pkgin-exception.yml b/changelogs/fragments/3583-fix-pkgin-exception.yml deleted file mode 100644 index cc61b1187d..0000000000 --- a/changelogs/fragments/3583-fix-pkgin-exception.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - pkgin - Fix exception encountered when all packages are already installed (https://github.com/ansible-collections/community.general/pull/3583). diff --git a/changelogs/fragments/3599-callback_opentelemetry-enriched_errors_in_loops.yml b/changelogs/fragments/3599-callback_opentelemetry-enriched_errors_in_loops.yml deleted file mode 100644 index 922432bfd8..0000000000 --- a/changelogs/fragments/3599-callback_opentelemetry-enriched_errors_in_loops.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - opentelemetry callback plugin - enriched the stacktrace information for loops with the ``message``, ``exception`` and ``stderr`` fields from the failed item in the tasks in addition to the name of the task and failed item (https://github.com/ansible-collections/community.general/pull/3599). diff --git a/changelogs/fragments/3602-fix-gitlab_project_members-improve-search-method.yml b/changelogs/fragments/3602-fix-gitlab_project_members-improve-search-method.yml deleted file mode 100644 index 4d22049473..0000000000 --- a/changelogs/fragments/3602-fix-gitlab_project_members-improve-search-method.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - gitlab_project_members - ``get_project_id`` return the project id by matching ``full_path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3602). diff --git a/changelogs/fragments/3606-pacman-speed-up-check-if-package-is-installed.yml b/changelogs/fragments/3606-pacman-speed-up-check-if-package-is-installed.yml deleted file mode 100644 index 12197516af..0000000000 --- a/changelogs/fragments/3606-pacman-speed-up-check-if-package-is-installed.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- "pacman - speed up checking if the package is installed, when the latest version check is not needed (https://github.com/ansible-collections/community.general/pull/3606)." diff --git a/changelogs/fragments/3610-fix-keycloak-client-diff-bugs-when-sorting.yml b/changelogs/fragments/3610-fix-keycloak-client-diff-bugs-when-sorting.yml deleted file mode 100644 index ebbd6015d4..0000000000 --- a/changelogs/fragments/3610-fix-keycloak-client-diff-bugs-when-sorting.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_client - update the check mode to not show differences resulting from sorting and default values relating to the properties, ``redirectUris``, ``attributes``, and ``protocol_mappers`` (https://github.com/ansible-collections/community.general/pull/3610). diff --git a/changelogs/fragments/3611-pipx-fix-inject.yml b/changelogs/fragments/3611-pipx-fix-inject.yml deleted file mode 100644 index 19433b2cb8..0000000000 --- a/changelogs/fragments/3611-pipx-fix-inject.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - pipx - ``state=inject`` was failing to parse the list of injected packages (https://github.com/ansible-collections/community.general/pull/3611). - - pipx - set environment variable ``USE_EMOJI=0`` to prevent errors in platforms that do not support ``UTF-8`` (https://github.com/ansible-collections/community.general/pull/3611). diff --git a/changelogs/fragments/3622-fix-gitlab-deploy-key-check-mode.yml b/changelogs/fragments/3622-fix-gitlab-deploy-key-check-mode.yml deleted file mode 100644 index 407ab8d77c..0000000000 --- a/changelogs/fragments/3622-fix-gitlab-deploy-key-check-mode.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - gitlab_deploy_key - fix the SSH Deploy Key being deleted accidentally while running task in check mode (https://github.com/ansible-collections/community.general/issues/3621, https://github.com/ansible-collections/community.general/pull/3622). diff --git a/changelogs/fragments/3626-fix-one_image-error.yml b/changelogs/fragments/3626-fix-one_image-error.yml deleted file mode 100644 index e1dafab017..0000000000 --- a/changelogs/fragments/3626-fix-one_image-error.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - one_image - fix error message when renaming an image (https://github.com/ansible-collections/community.general/pull/3626). diff --git a/changelogs/fragments/3634-pipx-improve-changed.yaml b/changelogs/fragments/3634-pipx-improve-changed.yaml deleted file mode 100644 index 09c3ceee30..0000000000 --- a/changelogs/fragments/3634-pipx-improve-changed.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - pipx - minor refactor on the ``changed`` logic (https://github.com/ansible-collections/community.general/pull/3647). diff --git a/changelogs/fragments/3648-mh-cmd-publish-cmd.yaml b/changelogs/fragments/3648-mh-cmd-publish-cmd.yaml deleted file mode 100644 index 9088ec4c48..0000000000 --- a/changelogs/fragments/3648-mh-cmd-publish-cmd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - module_helper module utils - added feature flag parameter to ``CmdMixin`` to control whether ``cmd_args`` is automatically added to the module output (https://github.com/ansible-collections/community.general/pull/3648). diff --git a/changelogs/fragments/3649-proxmox_group_info_TypeError.yml b/changelogs/fragments/3649-proxmox_group_info_TypeError.yml deleted file mode 100644 index 9620ea7203..0000000000 --- a/changelogs/fragments/3649-proxmox_group_info_TypeError.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - proxmox_group_info - fix module crash if a ``group`` parameter is used (https://github.com/ansible-collections/community.general/pull/3649). diff --git a/changelogs/fragments/3655-use-publish_cmd.yaml b/changelogs/fragments/3655-use-publish_cmd.yaml deleted file mode 100644 index 6b20efcf33..0000000000 --- a/changelogs/fragments/3655-use-publish_cmd.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - ansible_galaxy_install - the output value ``cmd_args`` was bringing the intermediate command used to gather the state, instead of the command that actually performed the state change (https://github.com/ansible-collections/community.general/pull/3655). - - pipx - the output value ``cmd_args`` was bringing the intermediate command used to gather the state, instead of the command that actually performed the state change (https://github.com/ansible-collections/community.general/pull/3655). - - snap_alias - the output value ``cmd_args`` was bringing the intermediate command used to gather the state, instead of the command that actually performed the state change (https://github.com/ansible-collections/community.general/pull/3655). diff --git a/changelogs/fragments/502-zfs_bugfix_and_diff_mode_support.yaml b/changelogs/fragments/502-zfs_bugfix_and_diff_mode_support.yaml deleted file mode 100644 index 1ba7727c7c..0000000000 --- a/changelogs/fragments/502-zfs_bugfix_and_diff_mode_support.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - zfs - treated received properties as local (https://github.com/ansible-collections/community.general/pull/502). -minor_changes: - - zfs - added diff mode support (https://github.com/ansible-collections/community.general/pull/502). diff --git a/changelogs/fragments/634-gitlab_project_runners.yaml b/changelogs/fragments/634-gitlab_project_runners.yaml deleted file mode 100644 index 0a3a733624..0000000000 --- a/changelogs/fragments/634-gitlab_project_runners.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- gitlab_runner - support project-scoped gitlab.com runners registration (https://github.com/ansible-collections/community.general/pull/634). diff --git a/changelogs/fragments/9499-typetalk-deprecation.yml b/changelogs/fragments/9499-typetalk-deprecation.yml new file mode 100644 index 0000000000..8323bbe959 --- /dev/null +++ b/changelogs/fragments/9499-typetalk-deprecation.yml @@ -0,0 +1,2 @@ +deprecated_features: + - typetalk - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9499). diff --git a/changelogs/fragments/a_module-test.yml b/changelogs/fragments/a_module-test.yml deleted file mode 100644 index 4bcfdc068a..0000000000 --- a/changelogs/fragments/a_module-test.yml +++ /dev/null @@ -1,3 +0,0 @@ -add plugin.test: - - name: a_module - description: Check whether the given string refers to an available module or action plugin diff --git a/changelogs/fragments/ansible-core-2.16.yml b/changelogs/fragments/ansible-core-2.16.yml new file mode 100644 index 0000000000..1132d20e3e --- /dev/null +++ b/changelogs/fragments/ansible-core-2.16.yml @@ -0,0 +1,2 @@ +removed_features: + - "Ansible-core 2.16 is no longer supported. This also means that the collection now requires Python 3.7+ (https://github.com/ansible-collections/community.general/pull/10884)." diff --git a/changelogs/fragments/ansible-core-_text.yml b/changelogs/fragments/ansible-core-_text.yml deleted file mode 100644 index fae6391582..0000000000 --- a/changelogs/fragments/ansible-core-_text.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- "Avoid internal ansible-core module_utils in favor of equivalent public API available since at least Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/2877)." diff --git a/changelogs/fragments/become-pipelining.yml b/changelogs/fragments/become-pipelining.yml new file mode 100644 index 0000000000..201d85f71c --- /dev/null +++ b/changelogs/fragments/become-pipelining.yml @@ -0,0 +1,3 @@ +bugfixes: + - "doas become plugin - disable pipelining on ansible-core 2.19+. The plugin does not work with pipelining, and since ansible-core 2.19 become plugins can indicate that they do not work with pipelining (https://github.com/ansible-collections/community.general/issues/9977, https://github.com/ansible-collections/community.general/pull/10537)." + - "machinectl become plugin - disable pipelining on ansible-core 2.19+. The plugin does not work with pipelining, and since ansible-core 2.19 become plugins can indicate that they do not work with pipelining (https://github.com/ansible-collections/community.general/pull/10537)." diff --git a/changelogs/fragments/deprecations.yml b/changelogs/fragments/deprecations.yml new file mode 100644 index 0000000000..424b2d439b --- /dev/null +++ b/changelogs/fragments/deprecations.yml @@ -0,0 +1,16 @@ +removed_features: + - "yaml callback plugin - the deprecated plugin has been removed. Use the default callback with ``result_format=yaml`` instead (https://github.com/ansible-collections/community.general/pull/10883)." + - "purestorage doc fragment - the modules using this doc fragment have been removed from community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/10883)." + - "pure module utils - the modules using this module utils have been removed from community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/10883)." + - "bearychat - the module has been removed as the chat service is no longer available (https://github.com/ansible-collections/community.general/pull/10883)." + - "facter - the module has been replaced by ``community.general.facter_facts`` (https://github.com/ansible-collections/community.general/pull/10883)." + - "pacemaker_cluster - the option ``state`` is now required (https://github.com/ansible-collections/community.general/pull/10883)." + - >- + opkg - the value ``""`` for the option ``force`` is no longer allowed. Omit ``force`` instead (https://github.com/ansible-collections/community.general/pull/10883). + - "cmd_runner_fmt module utils - the parameter ``ctx_ignore_none`` to argument formatters has been removed (https://github.com/ansible-collections/community.general/pull/10883)." + - "cmd_runner module utils - the parameter ``ignore_value_none`` to ``CmdRunner.__call__()`` has been removed (https://github.com/ansible-collections/community.general/pull/10883)." + - >- + mh.deco module utils - the parameters ``on_success`` and ``on_failure`` of ``cause()`` have been removed; use ``when="success"`` and ``when="failure"`` instead (https://github.com/ansible-collections/community.general/pull/10883). +breaking_changes: + - "slack - the default of ``prepend_hash`` changed from ``auto`` to ``never`` (https://github.com/ansible-collections/community.general/pull/10883)." + - "mh.base module utils - ``debug`` will now always be delegated to the underlying ``AnsibleModule`` object (https://github.com/ansible-collections/community.general/pull/10883)." diff --git a/changelogs/fragments/gem_module_add_bindir_option.yml b/changelogs/fragments/gem_module_add_bindir_option.yml deleted file mode 100644 index f47b6deb27..0000000000 --- a/changelogs/fragments/gem_module_add_bindir_option.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - gem - add ``bindir`` option to specify an installation path for executables such as ``/home/user/bin`` or ``/home/user/.local/bin`` (https://github.com/ansible-collections/community.general/pull/2837). - - gem - add ``norc`` option to avoid loading any ``.gemrc`` file (https://github.com/ansible-collections/community.general/pull/2837). diff --git a/changelogs/fragments/hiera.yml b/changelogs/fragments/hiera.yml new file mode 100644 index 0000000000..70c75f059e --- /dev/null +++ b/changelogs/fragments/hiera.yml @@ -0,0 +1,4 @@ +deprecated_features: + - "hiera lookup plugin - retrieving data with Hiera has been deprecated a long time ago; because of that this plugin will be removed from community.general 13.0.0. + If you disagree with this deprecation, please create an issue in the community.general repository + (https://github.com/ansible-collections/community.general/issues/4462, https://github.com/ansible-collections/community.general/pull/10779)." diff --git a/changelogs/fragments/ipaddress.yml b/changelogs/fragments/ipaddress.yml deleted file mode 100644 index 7f6eeb70bb..0000000000 --- a/changelogs/fragments/ipaddress.yml +++ /dev/null @@ -1,5 +0,0 @@ -removed_features: -- "The vendored copy of ``ipaddress`` has been removed. Please use ``ipaddress`` from the Python 3 standard library, or `from pypi `_. (https://github.com/ansible-collections/community.general/pull/2441)." -breaking_changes: -- "scaleway_security_group_rule - when used with Python 2, the module now needs ``ipaddress`` installed `from pypi `_ (https://github.com/ansible-collections/community.general/pull/2441)." -- "lxd inventory plugin - when used with Python 2, the plugin now needs ``ipaddress`` installed `from pypi `_ (https://github.com/ansible-collections/community.general/pull/2441)." diff --git a/changelogs/fragments/json_query_more_types.yml b/changelogs/fragments/json_query_more_types.yml deleted file mode 100644 index 4ac69b67c0..0000000000 --- a/changelogs/fragments/json_query_more_types.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - json_query filter plugin - avoid 'unknown type' errors for more Ansible internal types (https://github.com/ansible-collections/community.general/pull/2607). diff --git a/changelogs/fragments/keycloak-realm-no-log-password-reset.yml b/changelogs/fragments/keycloak-realm-no-log-password-reset.yml deleted file mode 100644 index 104bf4179b..0000000000 --- a/changelogs/fragments/keycloak-realm-no-log-password-reset.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_realm - remove warning that ``reset_password_allowed`` needs to be marked as ``no_log`` (https://github.com/ansible-collections/community.general/pull/2694). diff --git a/changelogs/fragments/keycloak-realm-webauthn-policies.yml b/changelogs/fragments/keycloak-realm-webauthn-policies.yml new file mode 100644 index 0000000000..91b1f67b3a --- /dev/null +++ b/changelogs/fragments/keycloak-realm-webauthn-policies.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_realm - add support for WebAuthn policy configuration options, including both regular and passwordless WebAuthn policies (https://github.com/ansible-collections/community.general/pull/10791). diff --git a/changelogs/fragments/keycloak_realm_ssl_required.yml b/changelogs/fragments/keycloak_realm_ssl_required.yml deleted file mode 100644 index 7476612e2f..0000000000 --- a/changelogs/fragments/keycloak_realm_ssl_required.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - keycloak_realm - ``ssl_required`` changed from a boolean type to accept the strings ``none``, ``external`` or ``all``. This is not a breaking change since the module always failed when a boolean was supplied (https://github.com/ansible-collections/community.general/pull/2693). diff --git a/changelogs/fragments/logstash.yml b/changelogs/fragments/logstash.yml new file mode 100644 index 0000000000..1c7ec89b7d --- /dev/null +++ b/changelogs/fragments/logstash.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - logstash callback plugin - remove reference to Python 2 library (https://github.com/ansible-collections/community.general/pull/10345). diff --git a/changelogs/fragments/lvm_pv.yml b/changelogs/fragments/lvm_pv.yml new file mode 100644 index 0000000000..d0198d7ffb --- /dev/null +++ b/changelogs/fragments/lvm_pv.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - lvm_pv - properly detect SCSI or NVMe devices to rescan (https://github.com/ansible-collections/community.general/issues/10444, https://github.com/ansible-collections/community.general/pull/10596). diff --git a/changelogs/fragments/netapp-removal.yml b/changelogs/fragments/netapp-removal.yml deleted file mode 100644 index e515e377cd..0000000000 --- a/changelogs/fragments/netapp-removal.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: -- "Removed deprecated netapp module utils and doc fragments (https://github.com/ansible-collections/community.general/pull/3197)." diff --git a/changelogs/fragments/nios-removal.yml b/changelogs/fragments/nios-removal.yml deleted file mode 100644 index 84cdcb6a1b..0000000000 --- a/changelogs/fragments/nios-removal.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: -- "The nios, nios_next_ip, nios_next_network lookup plugins, the nios documentation fragment, and the nios_host_record, nios_ptr_record, nios_mx_record, nios_fixed_address, nios_zone, nios_member, nios_a_record, nios_aaaa_record, nios_network, nios_dns_view, nios_txt_record, nios_naptr_record, nios_srv_record, nios_cname_record, nios_nsgroup, and nios_network_view module have been removed from community.general 4.0.0 and were replaced by redirects to the `infoblox.nios_modules `_ collection. Please install the ``infoblox.nios_modules`` collection to continue using these plugins and modules, and update your FQCNs (https://github.com/ansible-collections/community.general/pull/3592)." diff --git a/changelogs/fragments/pkgin-output-after-error.yml b/changelogs/fragments/pkgin-output-after-error.yml deleted file mode 100644 index a2dd2d6a1e..0000000000 --- a/changelogs/fragments/pkgin-output-after-error.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - pkgin - in case of ``pkgin`` tool failue, display returned standard output ``stdout`` and standard error ``stderr`` to ease debugging (https://github.com/ansible-collections/community.general/issues/3146). diff --git a/changelogs/fragments/random_string_seed.yml b/changelogs/fragments/random_string_seed.yml new file mode 100644 index 0000000000..a90b7d93b5 --- /dev/null +++ b/changelogs/fragments/random_string_seed.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - random_string lookup plugin - allow to specify seed while generating random string (https://github.com/ansible-collections/community.general/issues/5362, https://github.com/ansible-collections/community.general/pull/10710). diff --git a/changelogs/fragments/remove-scripts.yml b/changelogs/fragments/remove-scripts.yml deleted file mode 100644 index 72cee7dee5..0000000000 --- a/changelogs/fragments/remove-scripts.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: -- "All inventory and vault scripts contained in community.general were moved to the `contrib-scripts GitHub repository `_ (https://github.com/ansible-collections/community.general/pull/2696)." diff --git a/changelogs/fragments/replace-random-with-secrets.yml b/changelogs/fragments/replace-random-with-secrets.yml new file mode 100644 index 0000000000..b82e59e7e9 --- /dev/null +++ b/changelogs/fragments/replace-random-with-secrets.yml @@ -0,0 +1,4 @@ +bugfixes: + - random_string lookup plugin - replace ``random.SystemRandom()`` with ``secrets.SystemRandom()`` when + generating strings. This has no practical effect, as both are the same + (https://github.com/ansible-collections/community.general/pull/10893). diff --git a/commit-rights.md b/commit-rights.md index 43836350c5..196565eca7 100644 --- a/commit-rights.md +++ b/commit-rights.md @@ -1,3 +1,9 @@ + + Committers Guidelines for community.general =========================================== diff --git a/docs/docsite/config.yml b/docs/docsite/config.yml new file mode 100644 index 0000000000..1d6cf8554a --- /dev/null +++ b/docs/docsite/config.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +changelog: + write_changelog: true diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml index 83f533ec08..4594ab4c2d 100644 --- a/docs/docsite/extra-docs.yml +++ b/docs/docsite/extra-docs.yml @@ -1,6 +1,24 @@ --- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + sections: - title: Guides toctree: - filter_guide - test_guide + - title: Technology Guides + toctree: + - guide_alicloud + - guide_iocage + - guide_online + - guide_packet + - guide_scaleway + - title: Developer Guides + toctree: + - guide_deps + - guide_vardict + - guide_cmdrunner + - guide_modulehelper + - guide_uthelper diff --git a/docs/docsite/links.yml b/docs/docsite/links.yml new file mode 100644 index 0000000000..fe41d1d2fd --- /dev/null +++ b/docs/docsite/links.yml @@ -0,0 +1,33 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +edit_on_github: + repository: ansible-collections/community.general + branch: main + path_prefix: '' + +extra_links: + - description: Ask for help + url: https://forum.ansible.com/c/help/6/none + - description: Submit a bug report + url: https://github.com/ansible-collections/community.general/issues/new?assignees=&labels=&template=bug_report.yml + - description: Request a feature + url: https://github.com/ansible-collections/community.general/issues/new?assignees=&labels=&template=feature_request.yml + +communication: + matrix_rooms: + - topic: General usage and support questions + room: '#users:ansible.im' + irc_channels: + - topic: General usage and support questions + network: Libera + channel: '#ansible' + forums: + - topic: "Ansible Forum: General usage and support questions" + # The following URL directly points to the "Get Help" section + url: https://forum.ansible.com/c/help/6/none + - topic: "Ansible Forum: Discussions about the collection itself, not for specific modules or plugins" + # The following URL directly points to the "community-general" tag + url: https://forum.ansible.com/tag/community-general diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst new file mode 100644 index 0000000000..3549d29ba7 --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst @@ -0,0 +1,151 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +keep_keys +""""""""" + +Use the filter :ansplugin:`community.general.keep_keys#filter` if you have a list of dictionaries and want to keep certain keys only. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1 + + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.keep_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-5 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + +1. Match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +2. Match keys that start with any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: ['k0', 'k1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +3. Match keys that end with any of the items in target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: ['x0', 'x1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +4. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ['^.*[01]_x.*$'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +5. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*[01]_x.*$ + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 6-9 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0} + - {k0_x0: A1} + + +6. Match keys that equal the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: k0_x0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +7. Match keys that start with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: k0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +8. Match keys that end with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: x0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +9. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*0_x.*$ + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst new file mode 100644 index 0000000000..4ac87ab79c --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst @@ -0,0 +1,159 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +remove_keys +""""""""""" + +Use the filter :ansplugin:`community.general.remove_keys#filter` if you have a list of dictionaries and want to remove certain keys. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1 + + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.remove_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k2_x2: [C0] + k3_x3: foo + - k2_x2: [C1] + k3_x3: bar + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-5 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k2_x2: [C0] + k3_x3: foo + - k2_x2: [C1] + k3_x3: bar + + +1. Match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +2. Match keys that start with any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: ['k0', 'k1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +3. Match keys that end with any of the items in target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: ['x0', 'x1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +4. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ['^.*[01]_x.*$'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +5. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*[01]_x.*$ + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 6-9 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +6. Match keys that equal the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: k0_x0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +7. Match keys that start with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: k0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +8. Match keys that end with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: x0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +9. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*0_x.*$ + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst new file mode 100644 index 0000000000..d0eb202bfe --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst @@ -0,0 +1,175 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +replace_keys +"""""""""""" + +Use the filter :ansplugin:`community.general.replace_keys#filter` if you have a list of dictionaries and want to replace certain keys. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + target: + - {after: a0, before: k0_x0} + - {after: a1, before: k1_x1} + + result: "{{ input | community.general.replace_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - a0: A0 + a1: B0 + k2_x2: [C0] + k3_x3: foo + - a0: A1 + a1: B1 + k2_x2: [C1] + k3_x3: bar + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-3 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - a0: A0 + a1: B0 + k2_x2: [C0] + k3_x3: foo + - a0: A1 + a1: B1 + k2_x2: [C1] + k3_x3: bar + + +1. Replace keys that starts with any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: starts_with + target: + - {after: a0, before: k0} + - {after: a1, before: k1} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +2. Replace keys that ends with any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: ends_with + target: + - {after: a0, before: x0} + - {after: a1, before: x1} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +3. Replace keys that match any regex of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: regex + target: + - {after: a0, before: ^.*0_x.*$} + - {after: a1, before: ^.*1_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 4-5 are the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {X: foo} + - {X: bar} + + +4. If more keys match the same attribute before the last one will be used. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + mp: regex + target: + - {after: X, before: ^.*_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +5. If there are items with equal attribute before the first one will be used. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + mp: regex + target: + - {after: X, before: ^.*_x.*$} + - {after: Y, before: ^.*_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + + +6. If there are more matches for a key the first one will be used. + +.. code-block:: yaml + :emphasize-lines: 1- + + input: + - {aaa1: A, bbb1: B, ccc1: C} + - {aaa2: D, bbb2: E, ccc2: F} + + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: starts_with + target: + - {after: X, before: a} + - {after: Y, before: aa} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {X: A, bbb1: B, ccc1: C} + - {X: D, bbb2: E, ccc2: F} + + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst new file mode 100644 index 0000000000..64a82536d8 --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst @@ -0,0 +1,18 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.filter_guide.filter_guide_abstract_informations.lists_of_dicts: + +Lists of dictionaries +^^^^^^^^^^^^^^^^^^^^^ + +Filters to manage keys in a list of dictionaries: + +.. toctree:: + :maxdepth: 1 + + filter_guide-abstract_informations-lists_of_dictionaries-keep_keys + filter_guide-abstract_informations-lists_of_dictionaries-remove_keys + filter_guide-abstract_informations-lists_of_dictionaries-replace_keys diff --git a/docs/docsite/rst/filter_guide.rst b/docs/docsite/rst/filter_guide.rst index dab8464439..da8a90af3c 100644 --- a/docs/docsite/rst/filter_guide.rst +++ b/docs/docsite/rst/filter_guide.rst @@ -1,784 +1,23 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + .. _ansible_collections.community.general.docsite.filter_guide: community.general Filter Guide ============================== -The :ref:`community.general collection ` offers several useful filter plugins. - -.. contents:: Topics - -Paths ------ - -The ``path_join`` filter has been added in ansible-base 2.10. If you want to use this filter, but also need to support Ansible 2.9, you can use ``community.general``'s ``path_join`` shim, ``community.general.path_join``. This filter redirects to ``path_join`` for ansible-base 2.10 and ansible-core 2.11 or newer, and re-implements the filter for Ansible 2.9. - -.. code-block:: yaml+jinja - - # ansible-base 2.10 or newer: - path: {{ ('/etc', path, 'subdir', file) | path_join }} - - # Also works with Ansible 2.9: - path: {{ ('/etc', path, 'subdir', file) | community.general.path_join }} - -.. versionadded:: 3.0.0 - -Abstract transformations ------------------------- - -Dictionaries -^^^^^^^^^^^^ - -You can use the ``dict_kv`` filter to create a single-entry dictionary with ``value | community.general.dict_kv(key)``: - -.. code-block:: yaml+jinja - - - name: Create a single-entry dictionary - debug: - msg: "{{ myvar | community.general.dict_kv('thatsmyvar') }}" - vars: - myvar: myvalue - - - name: Create a list of dictionaries where the 'server' field is taken from a list - debug: - msg: >- - {{ myservers | map('community.general.dict_kv', 'server') - | map('combine', common_config) }} - vars: - common_config: - type: host - database: all - myservers: - - server1 - - server2 - -This produces: - -.. code-block:: ansible-output - - TASK [Create a single-entry dictionary] ************************************************** - ok: [localhost] => { - "msg": { - "thatsmyvar": "myvalue" - } - } - - TASK [Create a list of dictionaries where the 'server' field is taken from a list] ******* - ok: [localhost] => { - "msg": [ - { - "database": "all", - "server": "server1", - "type": "host" - }, - { - "database": "all", - "server": "server2", - "type": "host" - } - ] - } - -.. versionadded:: 2.0.0 - -If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the ``community.general.dict`` filter can be used: - -.. code-block:: yaml+jinja - - - name: Create a dictionary with the dict function - debug: - msg: "{{ dict([[1, 2], ['a', 'b']]) }}" - - - name: Create a dictionary with the community.general.dict filter - debug: - msg: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}" - - - name: Create a list of dictionaries with map and the community.general.dict filter - debug: - msg: >- - {{ values | map('zip', ['k1', 'k2', 'k3']) - | map('map', 'reverse') - | map('community.general.dict') }} - vars: - values: - - - foo - - 23 - - a - - - bar - - 42 - - b - -This produces: - -.. code-block:: ansible-output - - TASK [Create a dictionary with the dict function] **************************************** - ok: [localhost] => { - "msg": { - "1": 2, - "a": "b" - } - } - - TASK [Create a dictionary with the community.general.dict filter] ************************ - ok: [localhost] => { - "msg": { - "1": 2, - "a": "b" - } - } - - TASK [Create a list of dictionaries with map and the community.general.dict filter] ****** - ok: [localhost] => { - "msg": [ - { - "k1": "foo", - "k2": 23, - "k3": "a" - }, - { - "k1": "bar", - "k2": 42, - "k3": "b" - } - ] - } - -.. versionadded:: 3.0.0 - -Grouping -^^^^^^^^ - -If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the ``community.general.groupby_as_dict`` filter to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary. - -One example is ``ansible_facts.mounts``, which is a list of dictionaries where each has one ``device`` element to indicate the device which is mounted. Therefore, ``ansible_facts.mounts | community.general.groupby_as_dict('device')`` is a dictionary mapping a device to the mount information: - -.. code-block:: yaml+jinja - - - name: Output mount facts grouped by device name - debug: - var: ansible_facts.mounts | community.general.groupby_as_dict('device') - - - name: Output mount facts grouped by mount point - debug: - var: ansible_facts.mounts | community.general.groupby_as_dict('mount') - -This produces: - -.. code-block:: ansible-output - - TASK [Output mount facts grouped by device name] ****************************************** - ok: [localhost] => { - "ansible_facts.mounts | community.general.groupby_as_dict('device')": { - "/dev/sda1": { - "block_available": 2000, - "block_size": 4096, - "block_total": 2345, - "block_used": 345, - "device": "/dev/sda1", - "fstype": "ext4", - "inode_available": 500, - "inode_total": 512, - "inode_used": 12, - "mount": "/boot", - "options": "rw,relatime,data=ordered", - "size_available": 56821, - "size_total": 543210, - "uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a" - }, - "/dev/sda2": { - "block_available": 1234, - "block_size": 4096, - "block_total": 12345, - "block_used": 11111, - "device": "/dev/sda2", - "fstype": "ext4", - "inode_available": 1111, - "inode_total": 1234, - "inode_used": 123, - "mount": "/", - "options": "rw,relatime", - "size_available": 42143, - "size_total": 543210, - "uuid": "abcdef01-2345-6789-0abc-def012345678" - } - } - } - - TASK [Output mount facts grouped by mount point] ****************************************** - ok: [localhost] => { - "ansible_facts.mounts | community.general.groupby_as_dict('mount')": { - "/": { - "block_available": 1234, - "block_size": 4096, - "block_total": 12345, - "block_used": 11111, - "device": "/dev/sda2", - "fstype": "ext4", - "inode_available": 1111, - "inode_total": 1234, - "inode_used": 123, - "mount": "/", - "options": "rw,relatime", - "size_available": 42143, - "size_total": 543210, - "uuid": "bdf50b7d-4859-40af-8665-c637ee7a7808" - }, - "/boot": { - "block_available": 2000, - "block_size": 4096, - "block_total": 2345, - "block_used": 345, - "device": "/dev/sda1", - "fstype": "ext4", - "inode_available": 500, - "inode_total": 512, - "inode_used": 12, - "mount": "/boot", - "options": "rw,relatime,data=ordered", - "size_available": 56821, - "size_total": 543210, - "uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a" - } - } - } - -.. versionadded: 3.0.0 - -Merging lists of dictionaries -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you have two lists of dictionaries and want to combine them into a list of merged dictionaries, where two dictionaries are merged if they coincide in one attribute, you can use the ``lists_mergeby`` filter. - -.. code-block:: yaml+jinja - - - name: Merge two lists by common attribute 'name' - debug: - var: list1 | community.general.lists_mergeby(list2, 'name') - vars: - list1: - - name: foo - extra: true - - name: bar - extra: false - - name: meh - extra: true - list2: - - name: foo - path: /foo - - name: baz - path: /bazzz - -This produces: - -.. code-block:: ansible-output - - TASK [Merge two lists by common attribute 'name'] **************************************** - ok: [localhost] => { - "list1 | community.general.lists_mergeby(list2, 'name')": [ - { - "extra": false, - "name": "bar" - }, - { - "name": "baz", - "path": "/bazzz" - }, - { - "extra": true, - "name": "foo", - "path": "/foo" - }, - { - "extra": true, - "name": "meh" - } - ] - } - -.. versionadded: 2.0.0 - -Working with times ------------------- - -The ``to_time_unit`` filter allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds. - -There are shorthands to directly convert to various units, like ``to_hours``, ``to_minutes``, ``to_seconds``, and so on. The following table lists all units that can be used: - -.. list-table:: Units - :widths: 25 25 25 25 - :header-rows: 1 - - * - Unit name - - Unit value in seconds - - Unit strings for filter - - Shorthand filter - * - Millisecond - - 1/1000 second - - ``ms``, ``millisecond``, ``milliseconds``, ``msec``, ``msecs``, ``msecond``, ``mseconds`` - - ``to_milliseconds`` - * - Second - - 1 second - - ``s``, ``sec``, ``secs``, ``second``, ``seconds`` - - ``to_seconds`` - * - Minute - - 60 seconds - - ``m``, ``min``, ``mins``, ``minute``, ``minutes`` - - ``to_minutes`` - * - Hour - - 60*60 seconds - - ``h``, ``hour``, ``hours`` - - ``to_hours`` - * - Day - - 24*60*60 seconds - - ``d``, ``day``, ``days`` - - ``to_days`` - * - Week - - 7*24*60*60 seconds - - ``w``, ``week``, ``weeks`` - - ``to_weeks`` - * - Month - - 30*24*60*60 seconds - - ``mo``, ``month``, ``months`` - - ``to_months`` - * - Year - - 365*24*60*60 seconds - - ``y``, ``year``, ``years`` - - ``to_years`` - -Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to ``to_time_unit`` and to all shorthand filters. - -.. code-block:: yaml+jinja - - - name: Convert string to seconds - debug: - msg: "{{ '30h 20m 10s 123ms' | community.general.to_time_unit('seconds') }}" - - - name: Convert string to hours - debug: - msg: "{{ '30h 20m 10s 123ms' | community.general.to_hours }}" - - - name: Convert string to years (using 365.25 days == 1 year) - debug: - msg: "{{ '400d 15h' | community.general.to_years(year=365.25) }}" - -This produces: - -.. code-block:: ansible-output - - TASK [Convert string to seconds] ********************************************************** - ok: [localhost] => { - "msg": "109210.123" - } - - TASK [Convert string to hours] ************************************************************ - ok: [localhost] => { - "msg": "30.336145277778" - } - - TASK [Convert string to years (using 365.25 days == 1 year)] ****************************** - ok: [localhost] => { - "msg": "1.096851471595" - } - -.. versionadded: 0.2.0 - -Working with versions ---------------------- - -If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the ``version_sort`` filter: - -.. code-block:: yaml+jinja - - - name: Sort list by version number - debug: - var: ansible_versions | community.general.version_sort - vars: - ansible_versions: - - '2.8.0' - - '2.11.0' - - '2.7.0' - - '2.10.0' - - '2.9.0' - -This produces: - -.. code-block:: ansible-output - - TASK [Sort list by version number] ******************************************************** - ok: [localhost] => { - "ansible_versions | community.general.version_sort": [ - "2.7.0", - "2.8.0", - "2.9.0", - "2.10.0", - "2.11.0" - ] - } - -.. versionadded: 2.2.0 - -Creating identifiers --------------------- - -The following filters allow to create identifiers. - -Hashids -^^^^^^^ - -`Hashids `_ allow to convert sequences of integers to short unique string identifiers. This filter needs the `hashids Python library `_ installed on the controller. - -.. code-block:: yaml+jinja - - - name: "Create hashid" - debug: - msg: "{{ [1234, 5, 6] | community.general.hashids_encode }}" - - - name: "Decode hashid" - debug: - msg: "{{ 'jm2Cytn' | community.general.hashids_decode }}" - -This produces: - -.. code-block:: ansible-output - - TASK [Create hashid] ********************************************************************** - ok: [localhost] => { - "msg": "jm2Cytn" - } - - TASK [Decode hashid] ********************************************************************** - ok: [localhost] => { - "msg": [ - 1234, - 5, - 6 - ] - } - -The hashids filters accept keyword arguments to allow fine-tuning the hashids generated: - -:salt: String to use as salt when hashing. -:alphabet: String of 16 or more unique characters to produce a hash. -:min_length: Minimum length of hash produced. - -.. versionadded: 3.0.0 - -Random MACs -^^^^^^^^^^^ - -You can use the ``random_mac`` filter to complete a partial `MAC address `_ to a random 6-byte MAC address. - -.. code-block:: yaml+jinja - - - name: "Create a random MAC starting with ff:" - debug: - msg: "{{ 'FF' | community.general.random_mac }}" - - - name: "Create a random MAC starting with 00:11:22:" - debug: - msg: "{{ '00:11:22' | community.general.random_mac }}" - -This produces: - -.. code-block:: ansible-output - - TASK [Create a random MAC starting with ff:] ********************************************** - ok: [localhost] => { - "msg": "ff:69:d3:78:7f:b4" - } - - TASK [Create a random MAC starting with 00:11:22:] **************************************** - ok: [localhost] => { - "msg": "00:11:22:71:5d:3b" - } - -You can also initialize the random number generator from a seed to create random-but-idempotent MAC addresses: - -.. code-block:: yaml+jinja - - "{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}" - -Conversions ------------ - -Parsing CSV files -^^^^^^^^^^^^^^^^^ - -Ansible offers the :ref:`community.general.read_csv module ` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the ``from_csv`` filter exists. - -.. code-block:: yaml+jinja - - - name: "Parse CSV from string" - debug: - msg: "{{ csv_string | community.general.from_csv }}" - vars: - csv_string: | - foo,bar,baz - 1,2,3 - you,this,then - -This produces: - -.. code-block:: ansible-output - - TASK [Parse CSV from string] ************************************************************** - ok: [localhost] => { - "msg": [ - { - "bar": "2", - "baz": "3", - "foo": "1" - }, - { - "bar": "this", - "baz": "then", - "foo": "you" - } - ] - } - -The ``from_csv`` filter has several keyword arguments to control its behavior: - -:dialect: Dialect of the CSV file. Default is ``excel``. Other possible choices are ``excel-tab`` and ``unix``. If one of ``delimiter``, ``skipinitialspace`` or ``strict`` is specified, ``dialect`` is ignored. -:fieldnames: A set of column names to use. If not provided, the first line of the CSV is assumed to contain the column names. -:delimiter: Sets the delimiter to use. Default depends on the dialect used. -:skipinitialspace: Set to ``true`` to ignore space directly after the delimiter. Default depends on the dialect used (usually ``false``). -:strict: Set to ``true`` to error out on invalid CSV input. - -.. versionadded: 3.0.0 - -Converting to JSON -^^^^^^^^^^^^^^^^^^ - -`JC `_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general. This filter needs the `jc Python library `_ installed on the controller. - -.. code-block:: yaml+jinja - - - name: Run 'ls' to list files in / - command: ls / - register: result - - - name: Parse the ls output - debug: - msg: "{{ result.stdout | community.general.jc('ls') }}" - -This produces: - -.. code-block:: ansible-output - - TASK [Run 'ls' to list files in /] ******************************************************** - changed: [localhost] - - TASK [Parse the ls output] **************************************************************** - ok: [localhost] => { - "msg": [ - { - "filename": "bin" - }, - { - "filename": "boot" - }, - { - "filename": "dev" - }, - { - "filename": "etc" - }, - { - "filename": "home" - }, - { - "filename": "lib" - }, - { - "filename": "proc" - }, - { - "filename": "root" - }, - { - "filename": "run" - }, - { - "filename": "tmp" - } - ] - } - -.. versionadded: 2.0.0 - -.. _ansible_collections.community.general.docsite.json_query_filter: - -Selecting JSON data: JSON queries ---------------------------------- - -To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the ``json_query`` filter. The ``json_query`` filter lets you query a complex JSON structure and iterate over it using a loop structure. - -.. note:: You must manually install the **jmespath** dependency on the Ansible controller before using this filter. This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples `_. - -Consider this data structure: - -.. code-block:: yaml+jinja - - { - "domain_definition": { - "domain": { - "cluster": [ - { - "name": "cluster1" - }, - { - "name": "cluster2" - } - ], - "server": [ - { - "name": "server11", - "cluster": "cluster1", - "port": "8080" - }, - { - "name": "server12", - "cluster": "cluster1", - "port": "8090" - }, - { - "name": "server21", - "cluster": "cluster2", - "port": "9080" - }, - { - "name": "server22", - "cluster": "cluster2", - "port": "9090" - } - ], - "library": [ - { - "name": "lib1", - "target": "cluster1" - }, - { - "name": "lib2", - "target": "cluster2" - } - ] - } - } - } - -To extract all clusters from this structure, you can use the following query: - -.. code-block:: yaml+jinja - - - name: Display all cluster names - ansible.builtin.debug: - var: item - loop: "{{ domain_definition | community.general.json_query('domain.cluster[*].name') }}" - -To extract all server names: - -.. code-block:: yaml+jinja - - - name: Display all server names - ansible.builtin.debug: - var: item - loop: "{{ domain_definition | community.general.json_query('domain.server[*].name') }}" - -To extract ports from cluster1: - -.. code-block:: yaml+jinja - - - name: Display all ports from cluster1 - ansible.builtin.debug: - var: item - loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}" - vars: - server_name_cluster1_query: "domain.server[?cluster=='cluster1'].port" - -.. note:: You can use a variable to make the query more readable. - -To print out the ports from cluster1 in a comma separated string: - -.. code-block:: yaml+jinja - - - name: Display all ports from cluster1 as a string - ansible.builtin.debug: - msg: "{{ domain_definition | community.general.json_query('domain.server[?cluster==`cluster1`].port') | join(', ') }}" - -.. note:: In the example above, quoting literals using backticks avoids escaping quotes and maintains readability. - -You can use YAML `single quote escaping `_: - -.. code-block:: yaml+jinja - - - name: Display all ports from cluster1 - ansible.builtin.debug: - var: item - loop: "{{ domain_definition | community.general.json_query('domain.server[?cluster==''cluster1''].port') }}" - -.. note:: Escaping single quotes within single quotes in YAML is done by doubling the single quote. - -To get a hash map with all ports and names of a cluster: - -.. code-block:: yaml+jinja - - - name: Display all server ports and names from cluster1 - ansible.builtin.debug: - var: item - loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}" - vars: - server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}" - -To extract ports from all clusters with name starting with 'server1': - -.. code-block:: yaml+jinja - - - name: Display all ports from cluster1 - ansible.builtin.debug: - msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}" - vars: - server_name_query: "domain.server[?starts_with(name,'server1')].port" - -To extract ports from all clusters with name containing 'server1': - -.. code-block:: yaml+jinja - - - name: Display all ports from cluster1 - ansible.builtin.debug: - msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}" - vars: - server_name_query: "domain.server[?contains(name,'server1')].port" - -.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure. - -Working with Unicode ---------------------- - -`Unicode `_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this ``Unicode`` defines `normalization forms `_ which avoid these distinctions by choosing a unique character sequence for a given visual representation. - -You can use the ``community.general.unicode_normalize`` filter to normalize ``Unicode`` strings within your playbooks. - -.. code-block:: yaml+jinja - - - name: Compare Unicode representations - debug: - msg: "{{ with_combining_character | community.general.unicode_normalize == without_combining_character }}" - vars: - with_combining_character: "{{ 'Mayagu\u0308ez' }}" - without_combining_character: Mayagüez - -This produces: - -.. code-block:: ansible-output - - TASK [Compare Unicode representations] ******************************************************** - ok: [localhost] => { - "msg": true - } - -The ``community.general.unicode_normalize`` filter accepts a keyword argument to select the ``Unicode`` form used to normalize the input string. - -:form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference `_ for more information. - -.. versionadded:: 3.7.0 +The :anscollection:`community.general collection ` offers several useful filter plugins. + +.. toctree:: + :maxdepth: 2 + + filter_guide_paths + filter_guide_abstract_informations + filter_guide_working_with_times + filter_guide_working_with_versions + filter_guide_creating_identifiers + filter_guide_conversions + filter_guide_selecting_json_data + filter_guide_working_with_unicode diff --git a/docs/docsite/rst/filter_guide_abstract_informations.rst b/docs/docsite/rst/filter_guide_abstract_informations.rst new file mode 100644 index 0000000000..818c09f02c --- /dev/null +++ b/docs/docsite/rst/filter_guide_abstract_informations.rst @@ -0,0 +1,17 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +Abstract transformations +------------------------ + +.. toctree:: + :maxdepth: 1 + + filter_guide_abstract_informations_dictionaries + filter_guide_abstract_informations_grouping + filter_guide-abstract_informations-lists_of_dictionaries + filter_guide_abstract_informations_merging_lists_of_dictionaries + filter_guide_abstract_informations_lists_helper + filter_guide_abstract_informations_counting_elements_in_sequence diff --git a/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst b/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst new file mode 100644 index 0000000000..98e8eb1c4d --- /dev/null +++ b/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst @@ -0,0 +1,82 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +Counting elements in a sequence +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :ansplugin:`community.general.counter filter plugin ` allows you to count (hashable) elements in a sequence. Elements are returned as dictionary keys and their counts are stored as dictionary values. + +.. code-block:: yaml+jinja + + - name: Count character occurrences in a string + debug: + msg: "{{ 'abccbaabca' | community.general.counter }}" + + - name: Count items in a list + debug: + msg: "{{ ['car', 'car', 'bike', 'plane', 'bike'] | community.general.counter }}" + +This produces: + +.. code-block:: ansible-output + + TASK [Count character occurrences in a string] ******************************************** + ok: [localhost] => { + "msg": { + "a": 4, + "b": 3, + "c": 3 + } + } + + TASK [Count items in a list] ************************************************************** + ok: [localhost] => { + "msg": { + "bike": 2, + "car": 2, + "plane": 1 + } + } + +This plugin is useful for selecting resources based on current allocation: + +.. code-block:: yaml+jinja + + - name: Get ID of SCSI controller(s) with less than 4 disks attached and choose the one with the least disks + debug: + msg: >- + {{ + ( disks | dict2items | map(attribute='value.adapter') | list + | community.general.counter | dict2items + | rejectattr('value', '>=', 4) | sort(attribute='value') | first + ).key + }} + vars: + disks: + sda: + adapter: scsi_1 + sdb: + adapter: scsi_1 + sdc: + adapter: scsi_1 + sdd: + adapter: scsi_1 + sde: + adapter: scsi_2 + sdf: + adapter: scsi_3 + sdg: + adapter: scsi_3 + +This produces: + +.. code-block:: ansible-output + + TASK [Get ID of SCSI controller(s) with less than 4 disks attached and choose the one with the least disks] + ok: [localhost] => { + "msg": "scsi_2" + } + +.. versionadded:: 4.3.0 diff --git a/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst b/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst new file mode 100644 index 0000000000..e5b5bb7e36 --- /dev/null +++ b/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst @@ -0,0 +1,124 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +Dictionaries +^^^^^^^^^^^^ + +You can use the :ansplugin:`community.general.dict_kv filter ` to create a single-entry dictionary with ``value | community.general.dict_kv(key)``: + +.. code-block:: yaml+jinja + + - name: Create a single-entry dictionary + debug: + msg: "{{ myvar | community.general.dict_kv('thatsmyvar') }}" + vars: + myvar: myvalue + + - name: Create a list of dictionaries where the 'server' field is taken from a list + debug: + msg: >- + {{ myservers | map('community.general.dict_kv', 'server') + | map('combine', common_config) }} + vars: + common_config: + type: host + database: all + myservers: + - server1 + - server2 + +This produces: + +.. code-block:: ansible-output + + TASK [Create a single-entry dictionary] ************************************************** + ok: [localhost] => { + "msg": { + "thatsmyvar": "myvalue" + } + } + + TASK [Create a list of dictionaries where the 'server' field is taken from a list] ******* + ok: [localhost] => { + "msg": [ + { + "database": "all", + "server": "server1", + "type": "host" + }, + { + "database": "all", + "server": "server2", + "type": "host" + } + ] + } + +.. versionadded:: 2.0.0 + +If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the :ansplugin:`community.general.dict filter ` can be used: + +.. code-block:: yaml+jinja + + - name: Create a dictionary with the dict function + debug: + msg: "{{ dict([[1, 2], ['a', 'b']]) }}" + + - name: Create a dictionary with the community.general.dict filter + debug: + msg: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}" + + - name: Create a list of dictionaries with map and the community.general.dict filter + debug: + msg: >- + {{ values | map('zip', ['k1', 'k2', 'k3']) + | map('map', 'reverse') + | map('community.general.dict') }} + vars: + values: + - - foo + - 23 + - a + - - bar + - 42 + - b + +This produces: + +.. code-block:: ansible-output + + TASK [Create a dictionary with the dict function] **************************************** + ok: [localhost] => { + "msg": { + "1": 2, + "a": "b" + } + } + + TASK [Create a dictionary with the community.general.dict filter] ************************ + ok: [localhost] => { + "msg": { + "1": 2, + "a": "b" + } + } + + TASK [Create a list of dictionaries with map and the community.general.dict filter] ****** + ok: [localhost] => { + "msg": [ + { + "k1": "foo", + "k2": 23, + "k3": "a" + }, + { + "k1": "bar", + "k2": 42, + "k3": "b" + } + ] + } + +.. versionadded:: 3.0.0 diff --git a/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst b/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst new file mode 100644 index 0000000000..cb15989659 --- /dev/null +++ b/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst @@ -0,0 +1,103 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +Grouping +^^^^^^^^ + +If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the :ansplugin:`community.general.groupby_as_dict filter ` to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary. + +One example is ``ansible_facts.mounts``, which is a list of dictionaries where each has one ``device`` element to indicate the device which is mounted. Therefore, ``ansible_facts.mounts | community.general.groupby_as_dict('device')`` is a dictionary mapping a device to the mount information: + +.. code-block:: yaml+jinja + + - name: Output mount facts grouped by device name + debug: + var: ansible_facts.mounts | community.general.groupby_as_dict('device') + + - name: Output mount facts grouped by mount point + debug: + var: ansible_facts.mounts | community.general.groupby_as_dict('mount') + +This produces: + +.. code-block:: ansible-output + + TASK [Output mount facts grouped by device name] ****************************************** + ok: [localhost] => { + "ansible_facts.mounts | community.general.groupby_as_dict('device')": { + "/dev/sda1": { + "block_available": 2000, + "block_size": 4096, + "block_total": 2345, + "block_used": 345, + "device": "/dev/sda1", + "fstype": "ext4", + "inode_available": 500, + "inode_total": 512, + "inode_used": 12, + "mount": "/boot", + "options": "rw,relatime,data=ordered", + "size_available": 56821, + "size_total": 543210, + "uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a" + }, + "/dev/sda2": { + "block_available": 1234, + "block_size": 4096, + "block_total": 12345, + "block_used": 11111, + "device": "/dev/sda2", + "fstype": "ext4", + "inode_available": 1111, + "inode_total": 1234, + "inode_used": 123, + "mount": "/", + "options": "rw,relatime", + "size_available": 42143, + "size_total": 543210, + "uuid": "abcdef01-2345-6789-0abc-def012345678" + } + } + } + + TASK [Output mount facts grouped by mount point] ****************************************** + ok: [localhost] => { + "ansible_facts.mounts | community.general.groupby_as_dict('mount')": { + "/": { + "block_available": 1234, + "block_size": 4096, + "block_total": 12345, + "block_used": 11111, + "device": "/dev/sda2", + "fstype": "ext4", + "inode_available": 1111, + "inode_total": 1234, + "inode_used": 123, + "mount": "/", + "options": "rw,relatime", + "size_available": 42143, + "size_total": 543210, + "uuid": "bdf50b7d-4859-40af-8665-c637ee7a7808" + }, + "/boot": { + "block_available": 2000, + "block_size": 4096, + "block_total": 2345, + "block_used": 345, + "device": "/dev/sda1", + "fstype": "ext4", + "inode_available": 500, + "inode_total": 512, + "inode_used": 12, + "mount": "/boot", + "options": "rw,relatime,data=ordered", + "size_available": 56821, + "size_total": 543210, + "uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a" + } + } + } + +.. versionadded: 3.0.0 diff --git a/docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst b/docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst new file mode 100644 index 0000000000..505320c79c --- /dev/null +++ b/docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst @@ -0,0 +1,81 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +Union, intersection and difference of lists +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Starting with Ansible Core 2.16, the builtin filters :ansplugin:`ansible.builtin.union#filter`, :ansplugin:`ansible.builtin.intersect#filter`, :ansplugin:`ansible.builtin.difference#filter` and :ansplugin:`ansible.builtin.symmetric_difference#filter` began to behave differently and do no longer preserve the item order. Items in the resulting lists are returned in arbitrary order and the order can vary between subsequent runs. + +The Ansible community.general collection provides the following additional list filters: + +- :ansplugin:`community.general.lists_union#filter` +- :ansplugin:`community.general.lists_intersect#filter` +- :ansplugin:`community.general.lists_difference#filter` +- :ansplugin:`community.general.lists_symmetric_difference#filter` + +These filters preserve the item order, eliminate duplicates and are an extended version of the builtin ones, because they can operate on more than two lists. + +.. note:: Stick to the builtin filters, when item order is not important or when you do not need the n-ary operating mode. The builtin filters are faster, because they rely mostly on sets as their underlying datastructure. + +Let us use the lists below in the following examples: + +.. code-block:: yaml + + A: [9, 5, 7, 1, 9, 4, 10, 5, 9, 7] + B: [4, 1, 2, 8, 3, 1, 7] + C: [10, 2, 1, 9, 1] + +The union of ``A`` and ``B`` can be written as: + +.. code-block:: yaml+jinja + + result: "{{ A | community.general.lists_union(B) }}" + +This statement produces: + +.. code-block:: yaml + + result: [9, 5, 7, 1, 4, 10, 2, 8, 3] + +If you want to calculate the intersection of ``A``, ``B`` and ``C``, you can use the following statement: + +.. code-block:: yaml+jinja + + result: "{{ A | community.general.lists_intersect(B, C) }}" + +Alternatively, you can use a list of lists as an input of the filter + +.. code-block:: yaml+jinja + + result: "{{ [A, B] | community.general.lists_intersect(C) }}" + +or + +.. code-block:: yaml+jinja + + result: "{{ [A, B, C] | community.general.lists_intersect(flatten=true) }}" + +All three statements are equivalent and give: + +.. code-block:: yaml + + result: [1] + +.. note:: Be aware that in most cases, filter calls without any argument require ``flatten=true``, otherwise the input is returned as result. The reason for this is, that the input is considered as a variable argument and is wrapped by an additional outer list. ``flatten=true`` ensures that this list is removed before the input is processed by the filter logic. + +The filters :ansplugin:`community.general.lists_difference#filter` or :ansplugin:`community.general.lists_symmetric_difference#filter` can be used in the same way as the filters in the examples above. They calculate the difference or the symmetric difference between two or more lists and preserve the item order. + +For example, the symmetric difference of ``A``, ``B`` and ``C`` may be written as: + +.. code-block:: yaml+jinja + + result: "{{ A | community.general.lists_symmetric_difference(B, C) }}" + +This gives: + +.. code-block:: yaml + + result: [5, 8, 3, 1] + diff --git a/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst new file mode 100644 index 0000000000..cafe04e5c4 --- /dev/null +++ b/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst @@ -0,0 +1,267 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +Merging lists of dictionaries +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the :ansplugin:`community.general.lists_mergeby ` filter. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See the documentation for the :ansplugin:`community.general.yaml callback plugin `. + +Let us use the lists below in the following examples: + +.. code-block:: yaml + + list1: + - {name: foo, extra: true} + - {name: bar, extra: false} + - {name: meh, extra: true} + + list2: + - {name: foo, path: /foo} + - {name: baz, path: /baz} + +Two lists +""""""""" +In the example below the lists are merged by the attribute ``name``: + +.. code-block:: yaml+jinja + + list3: "{{ list1 | + community.general.lists_mergeby(list2, 'name') }}" + +This produces: + +.. code-block:: yaml + + list3: + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} + + +.. versionadded:: 2.0.0 + +List of two lists +""""""""""""""""" +It is possible to use a list of lists as an input of the filter: + +.. code-block:: yaml+jinja + + list3: "{{ [list1, list2] | + community.general.lists_mergeby('name') }}" + +This produces the same result as in the previous example: + +.. code-block:: yaml + + list3: + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} + +Single list +""""""""""" +It is possible to merge single list: + +.. code-block:: yaml+jinja + + list3: "{{ [list1 + list2, []] | + community.general.lists_mergeby('name') }}" + +This produces the same result as in the previous example: + +.. code-block:: yaml + + list3: + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} + + +The filter also accepts two optional parameters: :ansopt:`community.general.lists_mergeby#filter:recursive` and :ansopt:`community.general.lists_mergeby#filter:list_merge`. This is available since community.general 4.4.0. + +**recursive** + Is a boolean, default to ``false``. Should the :ansplugin:`community.general.lists_mergeby#filter` filter recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``. + +**list_merge** + Is a string, its possible values are :ansval:`replace` (default), :ansval:`keep`, :ansval:`append`, :ansval:`prepend`, :ansval:`append_rp` or :ansval:`prepend_rp`. It modifies the behaviour of :ansplugin:`community.general.lists_mergeby#filter` when the hashes to merge contain arrays/lists. + +The examples below set :ansopt:`community.general.lists_mergeby#filter:recursive=true` and display the differences among all six options of :ansopt:`community.general.lists_mergeby#filter:list_merge`. Functionality of the parameters is exactly the same as in the filter :ansplugin:`ansible.builtin.combine#filter`. See :ref:`Combining hashes/dictionaries ` to learn details about these options. + +Let us use the lists below in the following examples + +.. code-block:: yaml + + list1: + - name: myname01 + param01: + x: default_value + y: default_value + list: [default_value] + - name: myname02 + param01: [1, 1, 2, 3] + + list2: + - name: myname01 + param01: + y: patch_value + z: patch_value + list: [patch_value] + - name: myname02 + param01: [3, 4, 4] + +list_merge=replace (default) +"""""""""""""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=replace` (default): + +.. code-block:: yaml+jinja + + list3: "{{ [list1, list2] | + community.general.lists_mergeby('name', + recursive=true) }}" + +This produces: + +.. code-block:: yaml + + list3: + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4] + +list_merge=keep +""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=keep`: + +.. code-block:: yaml+jinja + + list3: "{{ [list1, list2] | + community.general.lists_mergeby('name', + recursive=true, + list_merge='keep') }}" + +This produces: + +.. code-block:: yaml + + list3: + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3] + +list_merge=append +""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append`: + +.. code-block:: yaml+jinja + + list3: "{{ [list1, list2] | + community.general.lists_mergeby('name', + recursive=true, + list_merge='append') }}" + +This produces: + +.. code-block:: yaml + + list3: + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value, patch_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3, 3, 4, 4] + +list_merge=prepend +"""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend`: + +.. code-block:: yaml+jinja + + list3: "{{ [list1, list2] | + community.general.lists_mergeby('name', + recursive=true, + list_merge='prepend') }}" + +This produces: + +.. code-block:: yaml + + list3: + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value, default_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4, 1, 1, 2, 3] + +list_merge=append_rp +"""""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append_rp`: + +.. code-block:: yaml+jinja + + list3: "{{ [list1, list2] | + community.general.lists_mergeby('name', + recursive=true, + list_merge='append_rp') }}" + +This produces: + +.. code-block:: yaml + + list3: + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value, patch_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3, 4, 4] + +list_merge=prepend_rp +""""""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend_rp`: + +.. code-block:: yaml+jinja + + list3: "{{ [list1, list2] | + community.general.lists_mergeby('name', + recursive=true, + list_merge='prepend_rp') }}" + +This produces: + +.. code-block:: yaml + + list3: + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value, default_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4, 1, 1, 2] + diff --git a/docs/docsite/rst/filter_guide_conversions.rst b/docs/docsite/rst/filter_guide_conversions.rst new file mode 100644 index 0000000000..ca0401762c --- /dev/null +++ b/docs/docsite/rst/filter_guide_conversions.rst @@ -0,0 +1,113 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +Conversions +----------- + +Parsing CSV files +^^^^^^^^^^^^^^^^^ + +Ansible offers the :ansplugin:`community.general.read_csv module ` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the :ansplugin:`community.general.from_csv filter ` exists. + +.. code-block:: yaml+jinja + + - name: "Parse CSV from string" + debug: + msg: "{{ csv_string | community.general.from_csv }}" + vars: + csv_string: | + foo,bar,baz + 1,2,3 + you,this,then + +This produces: + +.. code-block:: ansible-output + + TASK [Parse CSV from string] ************************************************************** + ok: [localhost] => { + "msg": [ + { + "bar": "2", + "baz": "3", + "foo": "1" + }, + { + "bar": "this", + "baz": "then", + "foo": "you" + } + ] + } + +The :ansplugin:`community.general.from_csv filter ` has several keyword arguments to control its behavior: + +:dialect: Dialect of the CSV file. Default is ``excel``. Other possible choices are ``excel-tab`` and ``unix``. If one of ``delimiter``, ``skipinitialspace`` or ``strict`` is specified, ``dialect`` is ignored. +:fieldnames: A set of column names to use. If not provided, the first line of the CSV is assumed to contain the column names. +:delimiter: Sets the delimiter to use. Default depends on the dialect used. +:skipinitialspace: Set to ``true`` to ignore space directly after the delimiter. Default depends on the dialect used (usually ``false``). +:strict: Set to ``true`` to error out on invalid CSV input. + +.. versionadded: 3.0.0 + +Converting to JSON +^^^^^^^^^^^^^^^^^^ + +`JC `_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general, called :ansplugin:`community.general.jc#filter`. This filter needs the `jc Python library `_ installed on the controller. + +.. code-block:: yaml+jinja + + - name: Run 'ls' to list files in / + command: ls / + register: result + + - name: Parse the ls output + debug: + msg: "{{ result.stdout | community.general.jc('ls') }}" + +This produces: + +.. code-block:: ansible-output + + TASK [Run 'ls' to list files in /] ******************************************************** + changed: [localhost] + + TASK [Parse the ls output] **************************************************************** + ok: [localhost] => { + "msg": [ + { + "filename": "bin" + }, + { + "filename": "boot" + }, + { + "filename": "dev" + }, + { + "filename": "etc" + }, + { + "filename": "home" + }, + { + "filename": "lib" + }, + { + "filename": "proc" + }, + { + "filename": "root" + }, + { + "filename": "run" + }, + { + "filename": "tmp" + } + ] + } + +.. versionadded: 2.0.0 diff --git a/docs/docsite/rst/filter_guide_creating_identifiers.rst b/docs/docsite/rst/filter_guide_creating_identifiers.rst new file mode 100644 index 0000000000..6e0c730c60 --- /dev/null +++ b/docs/docsite/rst/filter_guide_creating_identifiers.rst @@ -0,0 +1,85 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +Creating identifiers +-------------------- + +The following filters allow to create identifiers. + +Hashids +^^^^^^^ + +`Hashids `_ allow to convert sequences of integers to short unique string identifiers. The :ansplugin:`community.general.hashids_encode#filter` and :ansplugin:`community.general.hashids_decode#filter` filters need the `hashids Python library `_ installed on the controller. + +.. code-block:: yaml+jinja + + - name: "Create hashid" + debug: + msg: "{{ [1234, 5, 6] | community.general.hashids_encode }}" + + - name: "Decode hashid" + debug: + msg: "{{ 'jm2Cytn' | community.general.hashids_decode }}" + +This produces: + +.. code-block:: ansible-output + + TASK [Create hashid] ********************************************************************** + ok: [localhost] => { + "msg": "jm2Cytn" + } + + TASK [Decode hashid] ********************************************************************** + ok: [localhost] => { + "msg": [ + 1234, + 5, + 6 + ] + } + +The hashids filters accept keyword arguments to allow fine-tuning the hashids generated: + +:salt: String to use as salt when hashing. +:alphabet: String of 16 or more unique characters to produce a hash. +:min_length: Minimum length of hash produced. + +.. versionadded: 3.0.0 + +Random MACs +^^^^^^^^^^^ + +You can use the :ansplugin:`community.general.random_mac filter ` to complete a partial `MAC address `_ to a random 6-byte MAC address. + +.. code-block:: yaml+jinja + + - name: "Create a random MAC starting with ff:" + debug: + msg: "{{ 'FF' | community.general.random_mac }}" + + - name: "Create a random MAC starting with 00:11:22:" + debug: + msg: "{{ '00:11:22' | community.general.random_mac }}" + +This produces: + +.. code-block:: ansible-output + + TASK [Create a random MAC starting with ff:] ********************************************** + ok: [localhost] => { + "msg": "ff:69:d3:78:7f:b4" + } + + TASK [Create a random MAC starting with 00:11:22:] **************************************** + ok: [localhost] => { + "msg": "00:11:22:71:5d:3b" + } + +You can also initialize the random number generator from a seed to create random-but-idempotent MAC addresses: + +.. code-block:: yaml+jinja + + "{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}" diff --git a/docs/docsite/rst/filter_guide_paths.rst b/docs/docsite/rst/filter_guide_paths.rst new file mode 100644 index 0000000000..41185832f2 --- /dev/null +++ b/docs/docsite/rst/filter_guide_paths.rst @@ -0,0 +1,9 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +Paths +----- + +The :ansplugin:`ansible.builtin.path_join filter ` has been added in ansible-base 2.10. Community.general 3.0.0 and newer contains an alias ``community.general.path_join`` for this filter that could be used on Ansible 2.9 as well. Since community.general no longer supports Ansible 2.9, this is now a simple redirect to :ansplugin:`ansible.builtin.path_join filter `. diff --git a/docs/docsite/rst/filter_guide_selecting_json_data.rst b/docs/docsite/rst/filter_guide_selecting_json_data.rst new file mode 100644 index 0000000000..bdf2624f3c --- /dev/null +++ b/docs/docsite/rst/filter_guide_selecting_json_data.rst @@ -0,0 +1,149 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.json_query_filter: + +Selecting JSON data: JSON queries +--------------------------------- + +To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the :ansplugin:`community.general.json_query filter `. The :ansplugin:`community.general.json_query#filter` filter lets you query a complex JSON structure and iterate over it using a loop structure. + +.. note:: You must manually install the **jmespath** dependency on the Ansible controller before using this filter. This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples `_. + +Consider this data structure: + +.. code-block:: yaml+jinja + + { + "domain_definition": { + "domain": { + "cluster": [ + { + "name": "cluster1" + }, + { + "name": "cluster2" + } + ], + "server": [ + { + "name": "server11", + "cluster": "cluster1", + "port": "8080" + }, + { + "name": "server12", + "cluster": "cluster1", + "port": "8090" + }, + { + "name": "server21", + "cluster": "cluster2", + "port": "9080" + }, + { + "name": "server22", + "cluster": "cluster2", + "port": "9090" + } + ], + "library": [ + { + "name": "lib1", + "target": "cluster1" + }, + { + "name": "lib2", + "target": "cluster2" + } + ] + } + } + } + +To extract all clusters from this structure, you can use the following query: + +.. code-block:: yaml+jinja + + - name: Display all cluster names + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.cluster[*].name') }}" + +To extract all server names: + +.. code-block:: yaml+jinja + + - name: Display all server names + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.server[*].name') }}" + +To extract ports from cluster1: + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}" + vars: + server_name_cluster1_query: "domain.server[?cluster=='cluster1'].port" + +.. note:: You can use a variable to make the query more readable. + +To print out the ports from cluster1 in a comma separated string: + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 as a string + ansible.builtin.debug: + msg: "{{ domain_definition | community.general.json_query('domain.server[?cluster==`cluster1`].port') | join(', ') }}" + +.. note:: In the example above, quoting literals using backticks avoids escaping quotes and maintains readability. + +You can use YAML `single quote escaping `_: + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.server[?cluster==''cluster1''].port') }}" + +.. note:: Escaping single quotes within single quotes in YAML is done by doubling the single quote. + +To get a hash map with all ports and names of a cluster: + +.. code-block:: yaml+jinja + + - name: Display all server ports and names from cluster1 + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}" + vars: + server_name_cluster1_query: "domain.server[?cluster=='cluster1'].{name: name, port: port}" + +To extract ports from all clusters with name starting with 'server1': + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 + ansible.builtin.debug: + msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}" + vars: + server_name_query: "domain.server[?starts_with(name,'server1')].port" + +To extract ports from all clusters with name containing 'server1': + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 + ansible.builtin.debug: + msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}" + vars: + server_name_query: "domain.server[?contains(name,'server1')].port" + +.. note:: while using ``starts_with`` and ``contains``, you have to use ``to_json | from_json`` filter for correct parsing of data structure. diff --git a/docs/docsite/rst/filter_guide_working_with_times.rst b/docs/docsite/rst/filter_guide_working_with_times.rst new file mode 100644 index 0000000000..032d44bb57 --- /dev/null +++ b/docs/docsite/rst/filter_guide_working_with_times.rst @@ -0,0 +1,89 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +Working with times +------------------ + +The :ansplugin:`community.general.to_time_unit filter ` allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds. + +There are shorthands to directly convert to various units, like :ansplugin:`community.general.to_hours#filter`, :ansplugin:`community.general.to_minutes#filter`, :ansplugin:`community.general.to_seconds#filter`, and so on. The following table lists all units that can be used: + +.. list-table:: Units + :widths: 25 25 25 25 + :header-rows: 1 + + * - Unit name + - Unit value in seconds + - Unit strings for filter + - Shorthand filter + * - Millisecond + - 1/1000 second + - ``ms``, ``millisecond``, ``milliseconds``, ``msec``, ``msecs``, ``msecond``, ``mseconds`` + - :ansplugin:`community.general.to_milliseconds#filter` + * - Second + - 1 second + - ``s``, ``sec``, ``secs``, ``second``, ``seconds`` + - :ansplugin:`community.general.to_seconds#filter` + * - Minute + - 60 seconds + - ``m``, ``min``, ``mins``, ``minute``, ``minutes`` + - :ansplugin:`community.general.to_minutes#filter` + * - Hour + - 60*60 seconds + - ``h``, ``hour``, ``hours`` + - :ansplugin:`community.general.to_hours#filter` + * - Day + - 24*60*60 seconds + - ``d``, ``day``, ``days`` + - :ansplugin:`community.general.to_days#filter` + * - Week + - 7*24*60*60 seconds + - ``w``, ``week``, ``weeks`` + - :ansplugin:`community.general.to_weeks#filter` + * - Month + - 30*24*60*60 seconds + - ``mo``, ``month``, ``months`` + - :ansplugin:`community.general.to_months#filter` + * - Year + - 365*24*60*60 seconds + - ``y``, ``year``, ``years`` + - :ansplugin:`community.general.to_years#filter` + +Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to :ansplugin:`community.general.to_time_unit#filter` and to all shorthand filters. + +.. code-block:: yaml+jinja + + - name: Convert string to seconds + debug: + msg: "{{ '30h 20m 10s 123ms' | community.general.to_time_unit('seconds') }}" + + - name: Convert string to hours + debug: + msg: "{{ '30h 20m 10s 123ms' | community.general.to_hours }}" + + - name: Convert string to years (using 365.25 days == 1 year) + debug: + msg: "{{ '400d 15h' | community.general.to_years(year=365.25) }}" + +This produces: + +.. code-block:: ansible-output + + TASK [Convert string to seconds] ********************************************************** + ok: [localhost] => { + "msg": "109210.123" + } + + TASK [Convert string to hours] ************************************************************ + ok: [localhost] => { + "msg": "30.336145277778" + } + + TASK [Convert string to years (using 365.25 days == 1 year)] ****************************** + ok: [localhost] => { + "msg": "1.096851471595" + } + +.. versionadded: 0.2.0 diff --git a/docs/docsite/rst/filter_guide_working_with_unicode.rst b/docs/docsite/rst/filter_guide_working_with_unicode.rst new file mode 100644 index 0000000000..e75b0f871b --- /dev/null +++ b/docs/docsite/rst/filter_guide_working_with_unicode.rst @@ -0,0 +1,35 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +Working with Unicode +--------------------- + +`Unicode `_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this Unicode defines `normalization forms `_ which avoid these distinctions by choosing a unique character sequence for a given visual representation. + +You can use the :ansplugin:`community.general.unicode_normalize filter ` to normalize Unicode strings within your playbooks. + +.. code-block:: yaml+jinja + + - name: Compare Unicode representations + debug: + msg: "{{ with_combining_character | community.general.unicode_normalize == without_combining_character }}" + vars: + with_combining_character: "{{ 'Mayagu\u0308ez' }}" + without_combining_character: Mayagüez + +This produces: + +.. code-block:: ansible-output + + TASK [Compare Unicode representations] ******************************************************** + ok: [localhost] => { + "msg": true + } + +The :ansplugin:`community.general.unicode_normalize filter ` accepts a keyword argument :ansopt:`community.general.unicode_normalize#filter:form` to select the Unicode form used to normalize the input string. + +:form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference `_ for more information. + +.. versionadded:: 3.7.0 diff --git a/docs/docsite/rst/filter_guide_working_with_versions.rst b/docs/docsite/rst/filter_guide_working_with_versions.rst new file mode 100644 index 0000000000..055bbcd217 --- /dev/null +++ b/docs/docsite/rst/filter_guide_working_with_versions.rst @@ -0,0 +1,39 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +Working with versions +--------------------- + +If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the :ansplugin:`community.general.version_sort filter `: + +.. code-block:: yaml+jinja + + - name: Sort list by version number + debug: + var: ansible_versions | community.general.version_sort + vars: + ansible_versions: + - '2.8.0' + - '2.11.0' + - '2.7.0' + - '2.10.0' + - '2.9.0' + +This produces: + +.. code-block:: ansible-output + + TASK [Sort list by version number] ******************************************************** + ok: [localhost] => { + "ansible_versions | community.general.version_sort": [ + "2.7.0", + "2.8.0", + "2.9.0", + "2.10.0", + "2.11.0" + ] + } + +.. versionadded: 2.2.0 diff --git a/docs/docsite/rst/guide_alicloud.rst b/docs/docsite/rst/guide_alicloud.rst new file mode 100644 index 0000000000..b5ce2c063c --- /dev/null +++ b/docs/docsite/rst/guide_alicloud.rst @@ -0,0 +1,96 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_alicloud: + +Alibaba Cloud Compute Services Guide +==================================== + +Introduction +```````````` + +The community.general collection contains several modules for controlling and managing Alibaba Cloud Compute Services (Alicloud). This guide +explains how to use the Alicloud Ansible modules together. + +All Alicloud modules require ``footmark`` - install it on your control machine with ``pip install footmark``. + +Cloud modules, including Alicloud modules, are usually executed on your local machine (the control machine) with ``connection: local``, rather than on remote machines defined in your hosts. + +Normally, you'll use the following pattern for plays that provision Alicloud resources: + +.. code-block:: yaml + + - hosts: localhost + connection: local + vars: + - ... + tasks: + - ... + +Authentication +`````````````` + +You can specify your Alicloud authentication credentials (access key and secret key) by passing them as +environment variables or by storing them in a vars file. + +To pass authentication credentials as environment variables: + +.. code-block:: console + + export ALICLOUD_ACCESS_KEY='Alicloud123' + export ALICLOUD_SECRET_KEY='AlicloudSecret123' + +To store authentication credentials in a vars file, encrypt them with :ref:`Ansible Vault ` to keep them secure, then list them: + +.. code-block:: yaml + + --- + alicloud_access_key: "--REMOVED--" + alicloud_secret_key: "--REMOVED--" + +Note that if you store your credentials in a vars file, you need to refer to them in each Alicloud module. For example: + +.. code-block:: yaml+jinja + + - community.general.ali_instance: + alicloud_access_key: "{{ alicloud_access_key }}" + alicloud_secret_key: "{{ alicloud_secret_key }}" + image_id: "..." + +Provisioning +```````````` + +Alicloud modules create Alicloud ECS instances (:ansplugin:`community.general.ali_instance#module`) and retrieve information on these (:ansplugin:`community.general.ali_instance_info#module`). + +You can use the ``count`` parameter to control the number of resources you create or terminate. For example, if you want exactly 5 instances tagged ``NewECS``, set the ``count`` of instances to 5 and the ``count_tag`` to ``NewECS``, as shown in the last task of the example playbook below. If there are no instances with the tag ``NewECS``, the task creates 5 new instances. If there are 2 instances with that tag, the task creates 3 more. If there are 8 instances with that tag, the task terminates 3 of those instances. + +If you do not specify a ``count_tag``, the task creates the number of instances you specify in ``count`` with the ``instance_name`` you provide. + +.. code-block:: yaml+jinja + + # alicloud_setup.yml + + - hosts: localhost + connection: local + + tasks: + - name: Create a set of instances + community.general.ali_instance: + instance_type: ecs.n4.small + image_id: "{{ ami_id }}" + instance_name: "My-new-instance" + instance_tags: + Name: NewECS + Version: 0.0.1 + count: 5 + count_tag: + Name: NewECS + allocate_public_ip: true + max_bandwidth_out: 50 + register: create_instance + +In the example playbook above, data about the instances created by this playbook is saved in the variable defined by the ``register`` keyword in the task. + +Each Alicloud module offers a variety of parameter options. Not all options are demonstrated in the above example. See each individual module for further details and examples. diff --git a/docs/docsite/rst/guide_cmdrunner.rst b/docs/docsite/rst/guide_cmdrunner.rst new file mode 100644 index 0000000000..c1514ee340 --- /dev/null +++ b/docs/docsite/rst/guide_cmdrunner.rst @@ -0,0 +1,529 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_cmdrunner: + + +Command Runner guide +==================== + + +Introduction +^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.cmd_runner`` module util provides the +``CmdRunner`` class to help execute external commands. The class is a wrapper around +the standard ``AnsibleModule.run_command()`` method, handling command arguments, localization setting, +output processing output, check mode, and other features. + +It is even more useful when one command is used in multiple modules, so that you can define all options +in a module util file, and each module uses the same runner with different arguments. + +For the sake of clarity, throughout this guide, unless otherwise specified, we use the term *option* when referring to +Ansible module options, and the term *argument* when referring to the command line arguments for the external command. + + +Quickstart +"""""""""" + +``CmdRunner`` defines a command and a set of coded instructions on how to format +the command-line arguments, in which specific order, for a particular execution. +It relies on ``ansible.module_utils.basic.AnsibleModule.run_command()`` to actually execute the command. +There are other features, see more details throughout this document. + +To use ``CmdRunner`` you must start by creating an object. The example below is a simplified +version of the actual code in :ansplugin:`community.general.ansible_galaxy_install#module`: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + runner = CmdRunner( + module, + command="ansible-galaxy", + arg_formats=dict( + type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]), + galaxy_cmd=cmd_runner_fmt.as_list(), + upgrade=cmd_runner_fmt.as_bool("--upgrade"), + requirements_file=cmd_runner_fmt.as_opt_val('-r'), + dest=cmd_runner_fmt.as_opt_val('-p'), + force=cmd_runner_fmt.as_bool("--force"), + no_deps=cmd_runner_fmt.as_bool("--no-deps"), + version=cmd_runner_fmt.as_fixed("--version"), + name=cmd_runner_fmt.as_list(), + ) + ) + +This is meant to be done once, then every time you need to execute the command you create a context and pass values as needed: + +.. code-block:: python + + # Run the command with these arguments, when values exist for them + with runner("type galaxy_cmd upgrade force no_deps dest requirements_file name", output_process=process) as ctx: + ctx.run(galaxy_cmd="install", upgrade=upgrade) + + # version is fixed, requires no value + with runner("version") as ctx: + dummy, stdout, dummy = ctx.run() + + # passes arg 'data' to AnsibleModule.run_command() + with runner("type name", data=stdin_data) as ctx: + dummy, stdout, dummy = ctx.run() + + # Another way of expressing it + dummy, stdout, dummy = runner("version").run() + +Note that you can pass values for the arguments when calling ``run()``, otherwise ``CmdRunner`` +uses the module options with the exact same names to provide values for the runner arguments. +If no value is passed and no module option is found for the name specified, then an exception is raised, unless +the argument is using ``cmd_runner_fmt.as_fixed`` as format function like the ``version`` in the example above. +See more about it below. + +In the first example, values of ``type``, ``force``, ``no_deps`` and others +are taken straight from the module, whilst ``galaxy_cmd`` and ``upgrade`` are +passed explicitly. + +.. note:: + + It is not possible to automatically retrieve values of suboptions. + +That generates a resulting command line similar to (example taken from the +output of an integration test): + +.. code-block:: python + + [ + "/bin/ansible-galaxy", + "collection", + "install", + "--upgrade", + "-p", + "", + "netbox.netbox", + ] + + +Argument formats +^^^^^^^^^^^^^^^^ + +As seen in the example, ``CmdRunner`` expects a parameter named ``arg_formats`` +defining how to format each CLI named argument. +An "argument format" is nothing but a function to transform the value of a variable +into something formatted for the command line. + + +Argument format function +"""""""""""""""""""""""" + +An ``arg_format`` function is defined in the form similar to: + +.. code-block:: python + + def func(value): + return ["--some-param-name", value] + +The parameter ``value`` can be of any type - although there are convenience +mechanisms to help handling sequence and mapping objects. + +The result is expected to be of the type ``Sequence[str]`` type (most commonly +``list[str]`` or ``tuple[str]``), otherwise it is considered to be a ``str``, +and it is coerced into ``list[str]``. +This resulting sequence of strings is added to the command line when that +argument is actually used. + +For example, if ``func`` returns: + +- ``["nee", 2, "shruberries"]``, the command line adds arguments ``"nee" "2" "shruberries"``. +- ``2 == 2``, the command line adds argument ``True``. +- ``None``, the command line adds argument ``None``. +- ``[]``, the command line adds no command line argument for that particular argument. + + +Convenience format methods +"""""""""""""""""""""""""" + +In the same module as ``CmdRunner`` there is a class ``cmd_runner_fmt`` which +provides a set of convenience methods that return format functions for common cases. +In the first block of code in the `Quickstart`_ section you can see the importing of +that class: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + +The same example shows how to make use of some of them in the instantiation of the ``CmdRunner`` object. +A description of each one of the convenience methods available and examples of how to use them is found below. +In these descriptions ``value`` refers to the single parameter passed to the formatting function. + +- ``cmd_runner_fmt.as_list()`` + This method does not receive any parameter, function returns ``value`` as-is. + + - Creation: + ``cmd_runner_fmt.as_list()`` + - Examples: + +----------------------+---------------------+ + | Value | Outcome | + +======================+=====================+ + | ``["foo", "bar"]`` | ``["foo", "bar"]`` | + +----------------------+---------------------+ + | ``"foobar"`` | ``["foobar"]`` | + +----------------------+---------------------+ + +- ``cmd_runner_fmt.as_bool()`` + This method receives two different parameters: ``args_true`` and ``args_false``, latter being optional. + If the boolean evaluation of ``value`` is ``True``, the format function returns ``args_true``. + If the boolean evaluation is ``False``, then the function returns ``args_false`` if it was provided, or ``[]`` otherwise. + + - Creation (one arg): + ``cmd_runner_fmt.as_bool("--force")`` + - Examples: + +------------+--------------------+ + | Value | Outcome | + +============+====================+ + | ``True`` | ``["--force"]`` | + +------------+--------------------+ + | ``False`` | ``[]`` | + +------------+--------------------+ + - Creation (two args, ``None`` treated as ``False``): + ``cmd_runner_fmt.as_bool("--relax", "--dont-do-it")`` + - Examples: + +------------+----------------------+ + | Value | Outcome | + +============+======================+ + | ``True`` | ``["--relax"]`` | + +------------+----------------------+ + | ``False`` | ``["--dont-do-it"]`` | + +------------+----------------------+ + | | ``["--dont-do-it"]`` | + +------------+----------------------+ + - Creation (two args, ``None`` is ignored): + ``cmd_runner_fmt.as_bool("--relax", "--dont-do-it", ignore_none=True)`` + - Examples: + +------------+----------------------+ + | Value | Outcome | + +============+======================+ + | ``True`` | ``["--relax"]`` | + +------------+----------------------+ + | ``False`` | ``["--dont-do-it"]`` | + +------------+----------------------+ + | | ``[]`` | + +------------+----------------------+ + +- ``cmd_runner_fmt.as_bool_not()`` + This method receives one parameter, which is returned by the function when the boolean evaluation + of ``value`` is ``False``. + + - Creation: + ``cmd_runner_fmt.as_bool_not("--no-deps")`` + - Examples: + +-------------+---------------------+ + | Value | Outcome | + +=============+=====================+ + | ``True`` | ``[]`` | + +-------------+---------------------+ + | ``False`` | ``["--no-deps"]`` | + +-------------+---------------------+ + +- ``cmd_runner_fmt.as_optval()`` + This method receives one parameter ``arg``, the function returns the string concatenation + of ``arg`` and ``value``. + + - Creation: + ``cmd_runner_fmt.as_optval("-i")`` + - Examples: + +---------------+---------------------+ + | Value | Outcome | + +===============+=====================+ + | ``3`` | ``["-i3"]`` | + +---------------+---------------------+ + | ``foobar`` | ``["-ifoobar"]`` | + +---------------+---------------------+ + +- ``cmd_runner_fmt.as_opt_val()`` + This method receives one parameter ``arg``, the function returns ``[arg, value]``. + + - Creation: + ``cmd_runner_fmt.as_opt_val("--name")`` + - Examples: + +--------------+--------------------------+ + | Value | Outcome | + +==============+==========================+ + | ``abc`` | ``["--name", "abc"]`` | + +--------------+--------------------------+ + +- ``cmd_runner_fmt.as_opt_eq_val()`` + This method receives one parameter ``arg``, the function returns the string of the form + ``{arg}={value}``. + + - Creation: + ``cmd_runner_fmt.as_opt_eq_val("--num-cpus")`` + - Examples: + +------------+-------------------------+ + | Value | Outcome | + +============+=========================+ + | ``10`` | ``["--num-cpus=10"]`` | + +------------+-------------------------+ + +- ``cmd_runner_fmt.as_fixed()`` + This method defines one or more fixed arguments that are returned by the generated function + regardless whether ``value`` is passed to it or not. + + This method accepts these arguments in one of three forms: + + * one scalar parameter ``arg``, which will be returned as ``[arg]`` by the function, or + * one sequence parameter, such as a list, ``arg``, which will be returned by the function as ``arg[0]``, or + * multiple parameters ``args``, which will be returned as ``args`` directly by the function. + + See the examples below for each one of those forms. And, stressing that the generated function expects no ``value`` - if one + is provided then it is ignored. + + - Creation (one scalar argument): + * ``cmd_runner_fmt.as_fixed("--version")`` + - Examples: + +---------+--------------------------------------+ + | Value | Outcome | + +=========+======================================+ + | | * ``["--version"]`` | + +---------+--------------------------------------+ + | 57 | * ``["--version"]`` | + +---------+--------------------------------------+ + + - Creation (one sequence argument): + * ``cmd_runner_fmt.as_fixed(["--list", "--json"])`` + - Examples: + +---------+--------------------------------------+ + | Value | Outcome | + +=========+======================================+ + | | * ``["--list", "--json"]`` | + +---------+--------------------------------------+ + | True | * ``["--list", "--json"]`` | + +---------+--------------------------------------+ + + - Creation (multiple arguments): + * ``cmd_runner_fmt.as_fixed("--one", "--two", "--three")`` + - Examples: + +---------+--------------------------------------+ + | Value | Outcome | + +=========+======================================+ + | | * ``["--one", "--two", "--three"]`` | + +---------+--------------------------------------+ + | False | * ``["--one", "--two", "--three"]`` | + +---------+--------------------------------------+ + + - Note: + This is the only special case in which a value can be missing for the formatting function. + The first example here comes from the code in `Quickstart`_. + In that case, the module has code to determine the command's version so that it can assert compatibility. + There is no *value* to be passed for that CLI argument. + +- ``cmd_runner_fmt.as_map()`` + This method receives one parameter ``arg`` which must be a dictionary, and an optional parameter ``default``. + The function returns the evaluation of ``arg[value]``. + If ``value not in arg``, then it returns ``default`` if defined, otherwise ``[]``. + + - Creation: + ``cmd_runner_fmt.as_map(dict(a=1, b=2, c=3), default=42)`` + - Examples: + +---------------------+---------------+ + | Value | Outcome | + +=====================+===============+ + | ``"b"`` | ``["2"]`` | + +---------------------+---------------+ + | ``"yabadabadoo"`` | ``["42"]`` | + +---------------------+---------------+ + + - Note: + If ``default`` is not specified, invalid values return an empty list, meaning they are silently ignored. + +- ``cmd_runner_fmt.as_func()`` + This method receives one parameter ``arg`` which is itself is a format function and it must abide by the rules described above. + + - Creation: + ``cmd_runner_fmt.as_func(lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)])`` + - Note: + The outcome for that depends entirely on the function provided by the developer. + + +Other features for argument formatting +"""""""""""""""""""""""""""""""""""""" + +Some additional features are available as decorators: + +- ``cmd_runner_fmt.unpack args()`` + This decorator unpacks the incoming ``value`` as a list of elements. + + For example, in ``ansible_collections.community.general.plugins.module_utils.puppet``, it is used as: + + .. code-block:: python + + @cmd_runner_fmt.unpack_args + def execute_func(execute, manifest): + if execute: + return ["--execute", execute] + else: + return [manifest] + + runner = CmdRunner( + module, + command=_prepare_base_cmd(), + path_prefix=_PUPPET_PATH_PREFIX, + arg_formats=dict( + # ... + _execute=cmd_runner_fmt.as_func(execute_func), + # ... + ), + ) + + Then, in :ansplugin:`community.general.puppet#module` it is put to use with: + + .. code-block:: python + + with runner(args_order) as ctx: + rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']]) + +- ``cmd_runner_fmt.unpack_kwargs()`` + Conversely, this decorator unpacks the incoming ``value`` as a ``dict``-like object. + +- ``cmd_runner_fmt.stack()`` + This decorator assumes ``value`` is a sequence and concatenates the output + of the wrapped function applied to each element of the sequence. + + For example, in :ansplugin:`community.general.django_check#module`, the argument format for ``database`` + is defined as: + + .. code-block:: python + + arg_formats = dict( + # ... + database=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--database"), + # ... + ) + + When receiving a list ``["abc", "def"]``, the output is: + + .. code-block:: python + + ["--database", "abc", "--database", "def"] + + +Command Runner +^^^^^^^^^^^^^^ + +Settings that can be passed to the ``CmdRunner`` constructor are: + +- ``module: AnsibleModule`` + Module instance. Mandatory parameter. +- ``command: str | list[str]`` + Command to be executed. It can be a single string, the executable name, or a list + of strings containing the executable name as the first element and, optionally, fixed parameters. + Those parameters are used in all executions of the runner. + The *executable* pointed by this parameter (whether itself when ``str`` or its first element when ``list``) is + processed using ``AnsibleModule.get_bin_path()`` *unless* it is an absolute path or contains the character ``/``. +- ``arg_formats: dict`` + Mapping of argument names to formatting functions. +- ``default_args_order: str`` + As the name suggests, a default ordering for the arguments. When + this is passed, the context can be created without specifying ``args_order``. Defaults to ``()``. +- ``check_rc: bool`` + When ``True``, if the return code from the command is not zero, the module exits + with an error. Defaults to ``False``. +- ``path_prefix: list[str]`` + If the command being executed is installed in a non-standard directory path, + additional paths might be provided to search for the executable. Defaults to ``None``. +- ``environ_update: dict`` + Pass additional environment variables to be set during the command execution. + Defaults to ``None``. +- ``force_lang: str`` + It is usually important to force the locale to one specific value, so that responses are consistent and, therefore, parseable. + Please note that using this option (which is enabled by default) overwrites the environment variables ``LANGUAGE`` and ``LC_ALL``. + To disable this mechanism, set this parameter to ``None``. + In community.general 9.1.0 a special value ``auto`` was introduced for this parameter, with the effect + that ``CmdRunner`` then tries to determine the best parseable locale for the runtime. + It should become the default value in the future, but for the time being the default value is ``C``. + +When creating a context, the additional settings that can be passed to the call are: + +- ``args_order: str`` + Establishes the order in which the arguments are rendered in the command line. + This parameter is mandatory unless ``default_args_order`` was provided to the runner instance. +- ``output_process: func`` + Function to transform the output of the executable into different values or formats. + See examples in section below. +- ``check_mode_skip: bool`` + Whether to skip the actual execution of the command when the module is in check mode. + Defaults to ``False``. +- ``check_mode_return: any`` + If ``check_mode_skip=True``, then return this value instead. +- valid named arguments to ``AnsibleModule.run_command()`` + Other than ``args``, any valid argument to ``run_command()`` can be passed when setting up the run context. + For example, ``data`` can be used to send information to the command's standard input. + Or ``cwd`` can be used to run the command inside a specific working directory. + +Additionally, any other valid parameters for ``AnsibleModule.run_command()`` may be passed, but unexpected behavior +might occur if redefining options already present in the runner or its context creation. Use with caution. + + +Processing results +^^^^^^^^^^^^^^^^^^ + +As mentioned, ``CmdRunner`` uses ``AnsibleModule.run_command()`` to execute the external command, +and it passes the return value from that method back to caller. That means that, +by default, the result is going to be a tuple ``(rc, stdout, stderr)``. + +If you need to transform or process that output, you can pass a function to the context, +as the ``output_process`` parameter. It must be a function like: + +.. code-block:: python + + def process(rc, stdout, stderr): + # do some magic + return processed_value # whatever that is + +In that case, the return of ``run()`` is the ``processed_value`` returned by the function. + + +PythonRunner +^^^^^^^^^^^^ + +The ``PythonRunner`` class is a specialized version of ``CmdRunner``, geared towards the execution of +Python scripts. It features two extra and mutually exclusive parameters ``python`` and ``venv`` in its constructor: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner + from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt + + runner = PythonRunner( + module, + command=["-m", "django"], + arg_formats=dict(...), + python="python", + venv="/path/to/some/venv", + ) + +The default value for ``python`` is the string ``python``, and the for ``venv`` it is ``None``. + +The command line produced by such a command with ``python="python3.12"`` is something like: + +.. code-block:: shell + + /usr/bin/python3.12 -m django ... + +And the command line for ``venv="/work/venv"`` is like: + +.. code-block:: shell + + /work/venv/bin/python -m django ... + +You may provide the value of the ``command`` argument as a string (in that case the string is used as a script name) +or as a list, in which case the elements of the list must be valid arguments for the Python interpreter, as in the example above. +See `Command line and environment `_ for more details. + +If the parameter ``python`` is an absolute path, or contains directory separators, such as ``/``, then it is used +as-is, otherwise the runtime ``PATH`` is searched for that command name. + +Other than that, everything else works as in ``CmdRunner``. + +.. versionadded:: 4.8.0 diff --git a/docs/docsite/rst/guide_deps.rst b/docs/docsite/rst/guide_deps.rst new file mode 100644 index 0000000000..1a44051ee4 --- /dev/null +++ b/docs/docsite/rst/guide_deps.rst @@ -0,0 +1,75 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_deps: + +``deps`` Guide +============== + + +Using ``deps`` +^^^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.deps`` module util simplifies +the importing of code as described in :ref:`Importing and using shared code `. +Please notice that ``deps`` is meant to be used specifically with Ansible modules, and not other types of plugins. + +The same example from the Developer Guide would become: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils import deps + + + with deps.declare("foo"): + import foo + +Then in ``main()``, just after the argspec (or anywhere in the code, for that matter), do + +.. code-block:: python + + deps.validate(module) # assuming module is a valid AnsibleModule instance + +By default, ``deps`` will rely on ``ansible.module_utils.basic.missing_required_lib`` to generate +a message about a failing import. That function accepts parameters ``reason`` and ``url``, and +and so does ``deps```: + +.. code-block:: python + + with deps.declare("foo", reason="foo is needed to properly bar", url="https://foo.bar.io"): + import foo + +If you would rather write a custom message instead of using ``missing_required_lib`` then do: + +.. code-block:: python + + with deps.declare("foo", msg="Custom msg explaining why foo is needed"): + import foo + +``deps`` allows for multiple dependencies to be declared: + +.. code-block:: python + + with deps.declare("foo"): + import foo + + with deps.declare("bar"): + import bar + + with deps.declare("doe"): + import doe + +By default, ``deps.validate()`` will check on all the declared dependencies, but if so desired, +they can be validated selectively by doing: + +.. code-block:: python + + deps.validate(module, "foo") # only validates the "foo" dependency + + deps.validate(module, "doe:bar") # only validates the "doe" and "bar" dependencies + + deps.validate(module, "-doe:bar") # validates all dependencies except "doe" and "bar" + +.. versionadded:: 6.1.0 diff --git a/docs/docsite/rst/guide_iocage.rst b/docs/docsite/rst/guide_iocage.rst new file mode 100644 index 0000000000..67eb0e8a99 --- /dev/null +++ b/docs/docsite/rst/guide_iocage.rst @@ -0,0 +1,15 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage: + +************ +Iocage Guide +************ + +.. toctree:: + :maxdepth: 1 + + guide_iocage_inventory diff --git a/docs/docsite/rst/guide_iocage_inventory.rst b/docs/docsite/rst/guide_iocage_inventory.rst new file mode 100644 index 0000000000..4a410c35db --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory.rst @@ -0,0 +1,31 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory: + +community.general.iocage inventory plugin +========================================= + +The inventory plugin :ansplugin:`community.general.iocage#inventory` gets the inventory hosts from the iocage jail manager. + +See: + +* `iocage - A FreeBSD Jail Manager `_ +* `man iocage `_ +* `Jails and Containers `_ + +.. note:: + The output of the examples is YAML formatted. See the option :ansopt:`ansible.builtin.default#callback:result_format`. + +.. toctree:: + :caption: Table of Contents + :maxdepth: 1 + + guide_iocage_inventory_basics + guide_iocage_inventory_dhcp + guide_iocage_inventory_hooks + guide_iocage_inventory_properties + guide_iocage_inventory_tags + guide_iocage_inventory_aliases diff --git a/docs/docsite/rst/guide_iocage_inventory_aliases.rst b/docs/docsite/rst/guide_iocage_inventory_aliases.rst new file mode 100644 index 0000000000..431403d733 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_aliases.rst @@ -0,0 +1,200 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_aliases: + +Aliases +------- + +Quoting :ref:`inventory_aliases`: + + The ``inventory_hostname`` is the unique identifier for a host in Ansible, this can be an IP or a hostname, but also just an 'alias' or short name for the host. + +As root at the iocage host, stop and destroy all jails: + +.. code-block:: console + + shell> iocage stop ALL + * Stopping srv_1 + + Executing prestop OK + + Stopping services OK + + Tearing down VNET OK + + Removing devfs_ruleset: 1000 OK + + Removing jail process OK + + Executing poststop OK + * Stopping srv_2 + + Executing prestop OK + + Stopping services OK + + Tearing down VNET OK + + Removing devfs_ruleset: 1001 OK + + Removing jail process OK + + Executing poststop OK + * Stopping srv_3 + + Executing prestop OK + + Stopping services OK + + Tearing down VNET OK + + Removing devfs_ruleset: 1002 OK + + Removing jail process OK + + Executing poststop OK + ansible_client is not running! + + shell> iocage destroy -f srv_1 srv_2 srv_3 + Destroying srv_1 + Destroying srv_2 + Destroying srv_3 + +Create three VNET jails with a DHCP interface from the template *ansible_client*. Use the option ``--count``: + +.. code-block:: console + + shell> iocage create --short --template ansible_client --count 3 bpf=1 dhcp=1 vnet=1 + 1c11de2d successfully created! + 9d94cc9e successfully created! + 052b9557 successfully created! + +The names are random. Start the jails: + +.. code-block:: console + + shell> iocage start ALL + No default gateway found for ipv6. + * Starting 052b9557 + + Started OK + + Using devfs_ruleset: 1000 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.137/24 + No default gateway found for ipv6. + * Starting 1c11de2d + + Started OK + + Using devfs_ruleset: 1001 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.146/24 + No default gateway found for ipv6. + * Starting 9d94cc9e + + Started OK + + Using devfs_ruleset: 1002 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.115/24 + Please convert back to a jail before trying to start ansible_client + +List the jails: + +.. code-block:: console + + shell> iocage list -l + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+==========+======+=======+======+=================+====================+=====+================+==========+ + | 207 | 052b9557 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.137 | - | ansible_client | no | + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 208 | 1c11de2d | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.146 | - | ansible_client | no | + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 209 | 9d94cc9e | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.115 | - | ansible_client | no | + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +Set notes. The tag *alias* will be used to create inventory aliases: + +.. code-block:: console + + shell> iocage set notes="vmm=iocage_02 project=foo alias=srv_1" 052b9557 + notes: none -> vmm=iocage_02 project=foo alias=srv_1 + shell> iocage set notes="vmm=iocage_02 project=foo alias=srv_2" 1c11de2d + notes: none -> vmm=iocage_02 project=foo alias=srv_2 + shell> iocage set notes="vmm=iocage_02 project=bar alias=srv_3" 9d94cc9e + notes: none -> vmm=iocage_02 project=bar alias=srv_3 + +Update the inventory configuration. Set the option +:ansopt:`community.general.iocage#inventory:inventory_hostname_tag` to :ansval:`alias`. This tag keeps the +value of the alias. The option :ansopt:`community.general.iocage#inventory:get_properties` must be +enabled. For example, ``hosts/02_iocage.yml`` contains: + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + get_properties: true + inventory_hostname_tag: alias + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + iocage_tags: dict(iocage_properties.notes | split | map('split', '=')) + keyed_groups: + - prefix: vmm + key: iocage_tags.vmm + - prefix: project + key: iocage_tags.project + +Display tags and groups. Create a playbook ``pb-test-groups.yml`` with the following content: + +.. code-block:: yaml+jinja + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - debug: + var: iocage_tags + + - debug: + msg: | + {% for group in groups %} + {{ group }}: {{ groups[group] }} + {% endfor %} + run_once: true + +Run the playbook: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml pb-test-groups.yml + + PLAY [all] ********************************************************************************************************** + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + iocage_tags: + alias: srv_1 + project: foo + vmm: iocage_02 + ok: [srv_2] => + iocage_tags: + alias: srv_2 + project: foo + vmm: iocage_02 + ok: [srv_3] => + iocage_tags: + alias: srv_3 + project: bar + vmm: iocage_02 + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + msg: |- + all: ['srv_1', 'srv_2', 'srv_3'] + ungrouped: [] + vmm_iocage_02: ['srv_1', 'srv_2', 'srv_3'] + project_foo: ['srv_1', 'srv_2'] + project_bar: ['srv_3'] + + PLAY RECAP ********************************************************************************************************** + srv_1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 diff --git a/docs/docsite/rst/guide_iocage_inventory_basics.rst b/docs/docsite/rst/guide_iocage_inventory_basics.rst new file mode 100644 index 0000000000..f198edc4f4 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_basics.rst @@ -0,0 +1,128 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_basics: + +Basics +------ + +As root at the iocage host, create three VNET jails with a DHCP interface from the template +*ansible_client*: + +.. code-block:: console + + shell> iocage create --template ansible_client --name srv_1 bpf=1 dhcp=1 vnet=1 + srv_1 successfully created! + shell> iocage create --template ansible_client --name srv_2 bpf=1 dhcp=1 vnet=1 + srv_2 successfully created! + shell> iocage create --template ansible_client --name srv_3 bpf=1 dhcp=1 vnet=1 + srv_3 successfully created! + +See: `Configuring a VNET Jail `_. + +As admin at the controller, list the jails: + +.. code-block:: console + + shell> ssh admin@10.1.0.73 iocage list -l + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +======+=======+======+=======+======+=================+====================+=====+================+==========+ + | None | srv_1 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | None | srv_2 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | None | srv_3 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +Create the inventory file ``hosts/02_iocage.yml`` + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + +Display the inventory: + +.. code-block:: console + + shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml + all: + children: + ungrouped: + hosts: + srv_1: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (not running) + iocage_ip6: '-' + iocage_jid: None + iocage_release: 14.2-RELEASE-p3 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + srv_2: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (not running) + iocage_ip6: '-' + iocage_jid: None + iocage_release: 14.2-RELEASE-p3 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + srv_3: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (not running) + iocage_ip6: '-' + iocage_jid: None + iocage_release: 14.2-RELEASE-p3 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + +Optionally, create shared IP jails: + +.. code-block:: console + + shell> iocage create --template ansible_client --name srv_1 ip4_addr="em0|10.1.0.101/24" + srv_1 successfully created! + shell> iocage create --template ansible_client --name srv_2 ip4_addr="em0|10.1.0.102/24" + srv_2 successfully created! + shell> iocage create --template ansible_client --name srv_3 ip4_addr="em0|10.1.0.103/24" + srv_3 successfully created! + shell> iocage list -l + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +======+=======+======+=======+======+=================+===================+=====+================+==========+ + | None | srv_1 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.101/24 | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + | None | srv_2 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.102/24 | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + | None | srv_3 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.103/24 | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + +See: `Configuring a Shared IP Jail `_ + +If iocage needs environment variable(s), use the option :ansopt:`community.general.iocage#inventory:env`. For example, + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 diff --git a/docs/docsite/rst/guide_iocage_inventory_dhcp.rst b/docs/docsite/rst/guide_iocage_inventory_dhcp.rst new file mode 100644 index 0000000000..3c37366ca6 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_dhcp.rst @@ -0,0 +1,175 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_dhcp: + +DHCP +---- + +As root at the iocage host, start the jails: + +.. code-block:: console + + shell> iocage start ALL + No default gateway found for ipv6. + * Starting srv_1 + + Started OK + + Using devfs_ruleset: 1000 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.183/24 + No default gateway found for ipv6. + * Starting srv_2 + + Started OK + + Using devfs_ruleset: 1001 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.204/24 + No default gateway found for ipv6. + * Starting srv_3 + + Started OK + + Using devfs_ruleset: 1002 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.169/24 + Please convert back to a jail before trying to start ansible_client + +List the jails: + +.. code-block:: console + + shell> iocage list -l + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+=======+======+=======+======+=================+====================+=====+================+==========+ + | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.183 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.204 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.169 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +As admin at the controller, list the jails. The IP4 tab says "... address requires root": + +.. code-block:: console + + shell> ssh admin@10.1.0.73 iocage list -l + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+=======+======+=======+======+=================+=========================================+=====+================+==========+ + | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + +Use sudo if enabled: + +.. code-block:: console + + shell> ssh admin@10.1.0.73 sudo iocage list -l + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+=======+======+=======+======+=================+====================+=====+================+==========+ + | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.183 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.204 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.169 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +Create the inventory file ``hosts/02_iocage.yml``. Use the option +:ansopt:`community.general.iocage#inventory:sudo`: + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + sudo: true + +Display the inventory: + +.. code-block:: console + + shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml + all: + children: + ungrouped: + hosts: + srv_1: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: 10.1.0.183 + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.183 + mask: '-' + msg: '' + iocage_ip6: '-' + iocage_jid: '204' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_2: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: 10.1.0.204 + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.204 + mask: '-' + msg: '' + iocage_ip6: '-' + iocage_jid: '205' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_3: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: 10.1.0.169 + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.169 + mask: '-' + msg: '' + iocage_ip6: '-' + iocage_jid: '206' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + +Note: If the option :ansopt:`community.general.iocage#inventory:env` is used and :ansopt:`community.general.iocage#inventory:sudo` is enabled, enable also :ansopt:`community.general.iocage#inventory:sudo_preserve_env`. For example, + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 + sudo: true + sudo_preserve_env: true + +In this case, make sure the sudo tag ``SETENV`` is used: + +.. code-block:: console + + shell> ssh admin@10.1.0.73 sudo cat /usr/local/etc/sudoers | grep admin + admin ALL=(ALL) NOPASSWD:SETENV: ALL diff --git a/docs/docsite/rst/guide_iocage_inventory_hooks.rst b/docs/docsite/rst/guide_iocage_inventory_hooks.rst new file mode 100644 index 0000000000..45364fc798 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_hooks.rst @@ -0,0 +1,187 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_hooks: + +Hooks +----- + +The iocage utility internally opens a console to a jail to get the jail's DHCP address. This +requires root. If you run the command ``iocage list -l`` as unprivileged user, you'll see the +message ``DHCP (running -- address requires root)``. If you are not granted the root privilege, use +``/etc/dhclient-exit-hooks``. For example, in the jail *srv_1*, create the file +``/zroot/iocage/jails/srv_1/root/etc/dhclient-exit-hooks`` + +.. code-block:: shell + + case "$reason" in + "BOUND"|"REBIND"|"REBOOT"|"RENEW") + echo $new_ip_address > /var/db/dhclient-hook.address.$interface + ;; + esac + +where ``/zroot/iocage`` is the activated pool. + +.. code-block:: console + + shell> zfs list | grep /zroot/iocage + zroot/iocage 4.69G 446G 5.08M /zroot/iocage + zroot/iocage/download 927M 446G 384K /zroot/iocage/download + zroot/iocage/download/14.1-RELEASE 465M 446G 465M /zroot/iocage/download/14.1-RELEASE + zroot/iocage/download/14.2-RELEASE 462M 446G 462M /zroot/iocage/download/14.2-RELEASE + zroot/iocage/images 384K 446G 384K /zroot/iocage/images + zroot/iocage/jails 189M 446G 480K /zroot/iocage/jails + zroot/iocage/jails/srv_1 62.9M 446G 464K /zroot/iocage/jails/srv_1 + zroot/iocage/jails/srv_1/root 62.4M 446G 3.53G /zroot/iocage/jails/srv_1/root + zroot/iocage/jails/srv_2 62.8M 446G 464K /zroot/iocage/jails/srv_2 + zroot/iocage/jails/srv_2/root 62.3M 446G 3.53G /zroot/iocage/jails/srv_2/root + zroot/iocage/jails/srv_3 62.8M 446G 464K /zroot/iocage/jails/srv_3 + zroot/iocage/jails/srv_3/root 62.3M 446G 3.53G /zroot/iocage/jails/srv_3/root + zroot/iocage/log 688K 446G 688K /zroot/iocage/log + zroot/iocage/releases 2.93G 446G 384K /zroot/iocage/releases + zroot/iocage/releases/14.2-RELEASE 2.93G 446G 384K /zroot/iocage/releases/14.2-RELEASE + zroot/iocage/releases/14.2-RELEASE/root 2.93G 446G 2.88G /zroot/iocage/releases/14.2-RELEASE/root + zroot/iocage/templates 682M 446G 416K /zroot/iocage/templates + zroot/iocage/templates/ansible_client 681M 446G 432K /zroot/iocage/templates/ansible_client + zroot/iocage/templates/ansible_client/root 681M 446G 3.53G /zroot/iocage/templates/ansible_client/root + +See: `man dhclient-script `_ + +Create the inventory configuration. Use the option :ansopt:`community.general.iocage#inventory:hooks_results` instead of :ansopt:`community.general.iocage#inventory:sudo`: + +.. code-block:: console + + shell> cat hosts/02_iocage.yml + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + hooks_results: + - /var/db/dhclient-hook.address.epair0b + +.. note:: + + The option :ansopt:`community.general.iocage#inventory:hooks_results` expects the poolname to be mounted to ``/poolname``. For example, if you + activate the pool iocage, this plugin expects to find the :ansopt:`community.general.iocage#inventory:hooks_results` items in the path + /iocage/iocage/jails//root. If you mount the poolname to a different path, the easiest + remedy is to create a symlink. + +As admin at the controller, display the inventory: + +.. code-block:: console + + shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml + all: + children: + ungrouped: + hosts: + srv_1: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_hooks: + - 10.1.0.183 + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (running -- address requires root) + iocage_ip6: '-' + iocage_jid: '204' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_2: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_hooks: + - 10.1.0.204 + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (running -- address requires root) + iocage_ip6: '-' + iocage_jid: '205' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_3: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_hooks: + - 10.1.0.169 + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (running -- address requires root) + iocage_ip6: '-' + iocage_jid: '206' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + +Compose the variable ``ansible_host``. For example, ``hosts/02_iocage.yml`` could look like: + +.. code-block:: yaml+jinja + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + +Test the jails. Create a playbook ``pb-test-uname.yml``: + +.. code-block:: yaml + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - command: uname -a + register: out + + - debug: + var: out.stdout + +See: :ref:`working_with_bsd` + +Run the playbook: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml pb-test-uname.yml + + PLAY [all] ********************************************************************************************************** + + TASK [command] ****************************************************************************************************** + changed: [srv_3] + changed: [srv_1] + changed: [srv_2] + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + out.stdout: FreeBSD srv-1 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 + ok: [srv_3] => + out.stdout: FreeBSD srv-3 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 + ok: [srv_2] => + out.stdout: FreeBSD srv-2 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 + + PLAY RECAP ********************************************************************************************************** + srv_1 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_2 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_3 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + +Note: This playbook and the inventory configuration works also for the *Shared IP Jails*. diff --git a/docs/docsite/rst/guide_iocage_inventory_properties.rst b/docs/docsite/rst/guide_iocage_inventory_properties.rst new file mode 100644 index 0000000000..d044f2e7f2 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_properties.rst @@ -0,0 +1,201 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_properties: + +Properties +---------- + +Optionally, in the inventory file ``hosts/02_iocage.yml``, get the iocage properties. Enable +:ansopt:`community.general.iocage#inventory:get_properties`: + +.. code-block:: yaml+jinja + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + get_properties: true + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + +Display the properties. Create the playbook ``pb-test-properties.yml``: + +.. code-block:: yaml + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - debug: + var: iocage_properties + +Run the playbook. Limit the inventory to *srv_3*: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml -l srv_3 pb-test-properties.yml + + PLAY [all] ********************************************************************************************************** + + TASK [debug] ******************************************************************************************************** + ok: [srv_3] => + iocage_properties: + CONFIG_VERSION: '33' + allow_chflags: '0' + allow_mlock: '0' + allow_mount: '1' + allow_mount_devfs: '0' + allow_mount_fdescfs: '0' + allow_mount_fusefs: '0' + allow_mount_linprocfs: '0' + allow_mount_linsysfs: '0' + allow_mount_nullfs: '0' + allow_mount_procfs: '0' + allow_mount_tmpfs: '0' + allow_mount_zfs: '0' + allow_nfsd: '0' + allow_quotas: '0' + allow_raw_sockets: '0' + allow_set_hostname: '1' + allow_socket_af: '0' + allow_sysvipc: '0' + allow_tun: '0' + allow_vmm: '0' + assign_localhost: '0' + available: readonly + basejail: '0' + boot: '0' + bpf: '1' + children_max: '0' + cloned_release: 14.2-RELEASE + comment: none + compression: 'on' + compressratio: readonly + coredumpsize: 'off' + count: '1' + cpuset: 'off' + cputime: 'off' + datasize: 'off' + dedup: 'off' + defaultrouter: auto + defaultrouter6: auto + depends: none + devfs_ruleset: '4' + dhcp: '1' + enforce_statfs: '2' + exec_clean: '1' + exec_created: /usr/bin/true + exec_fib: '0' + exec_jail_user: root + exec_poststart: /usr/bin/true + exec_poststop: /usr/bin/true + exec_prestart: /usr/bin/true + exec_prestop: /usr/bin/true + exec_start: /bin/sh /etc/rc + exec_stop: /bin/sh /etc/rc.shutdown + exec_system_jail_user: '0' + exec_system_user: root + exec_timeout: '60' + host_domainname: none + host_hostname: srv-3 + host_hostuuid: srv_3 + host_time: '1' + hostid: ea2ba7d1-4fcd-f13f-82e4-8b32c0a03403 + hostid_strict_check: '0' + interfaces: vnet0:bridge0 + ip4: new + ip4_addr: none + ip4_saddrsel: '1' + ip6: new + ip6_addr: none + ip6_saddrsel: '1' + ip_hostname: '0' + jail_zfs: '0' + jail_zfs_dataset: iocage/jails/srv_3/data + jail_zfs_mountpoint: none + last_started: '2025-06-11 04:29:23' + localhost_ip: none + login_flags: -f root + mac_prefix: 02a098 + maxproc: 'off' + memorylocked: 'off' + memoryuse: 'off' + min_dyn_devfs_ruleset: '1000' + mount_devfs: '1' + mount_fdescfs: '1' + mount_linprocfs: '0' + mount_procfs: '0' + mountpoint: readonly + msgqqueued: 'off' + msgqsize: 'off' + nat: '0' + nat_backend: ipfw + nat_forwards: none + nat_interface: none + nat_prefix: '172.16' + nmsgq: 'off' + notes: none + nsem: 'off' + nsemop: 'off' + nshm: 'off' + nthr: 'off' + openfiles: 'off' + origin: readonly + owner: root + pcpu: 'off' + plugin_name: none + plugin_repository: none + priority: '99' + pseudoterminals: 'off' + quota: none + readbps: 'off' + readiops: 'off' + release: 14.2-RELEASE-p3 + reservation: none + resolver: /etc/resolv.conf + rlimits: 'off' + rtsold: '0' + securelevel: '2' + shmsize: 'off' + source_template: ansible_client + stacksize: 'off' + state: up + stop_timeout: '30' + swapuse: 'off' + sync_state: none + sync_target: none + sync_tgt_zpool: none + sysvmsg: new + sysvsem: new + sysvshm: new + template: '0' + type: jail + used: readonly + vmemoryuse: 'off' + vnet: '1' + vnet0_mac: 02a0983da05d 02a0983da05e + vnet0_mtu: auto + vnet1_mac: none + vnet1_mtu: auto + vnet2_mac: none + vnet2_mtu: auto + vnet3_mac: none + vnet3_mtu: auto + vnet_default_interface: auto + vnet_default_mtu: '1500' + vnet_interfaces: none + wallclock: 'off' + writebps: 'off' + writeiops: 'off' + + PLAY RECAP ********************************************************************************************************** + srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 diff --git a/docs/docsite/rst/guide_iocage_inventory_tags.rst b/docs/docsite/rst/guide_iocage_inventory_tags.rst new file mode 100644 index 0000000000..8adf641073 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_tags.rst @@ -0,0 +1,117 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_tags: + +Tags +---- + +Quoting `man iocage `_ + +.. code-block:: text + + PROPERTIES + ... + notes="any string" + Custom notes for miscellaneous tagging. + Default: none + Source: local + +We will use the format ``notes="tag1=value1 tag2=value2 ..."``. + +.. note:: + + The iocage tags have nothing to do with the :ref:`tags`. + +As root at the iocage host, set notes. For example, + +.. code-block:: console + + shell> iocage set notes="vmm=iocage_02 project=foo" srv_1 + notes: none -> vmm=iocage_02 project=foo + shell> iocage set notes="vmm=iocage_02 project=foo" srv_2 + notes: none -> vmm=iocage_02 project=foo + shell> iocage set notes="vmm=iocage_02 project=bar" srv_3 + notes: none -> vmm=iocage_02 project=bar + +Update the inventory configuration. Compose a dictionary *iocage_tags* and create groups. The option +:ansopt:`community.general.iocage#inventory:get_properties` must be enabled. +For example, ``hosts/02_iocage.yml`` could look like: + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + get_properties: true + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + iocage_tags: dict(iocage_properties.notes | split | map('split', '=')) + keyed_groups: + - prefix: vmm + key: iocage_tags.vmm + - prefix: project + key: iocage_tags.project + +Display tags and groups. Create a playbook ``pb-test-groups.yml``: + +.. code-block:: yaml+jinja + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - debug: + var: iocage_tags + + - debug: + msg: | + {% for group in groups %} + {{ group }}: {{ groups[group] }} + {% endfor %} + run_once: true + +Run the playbook: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml pb-test-groups.yml + + PLAY [all] ********************************************************************************************************** + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + iocage_tags: + project: foo + vmm: iocage_02 + ok: [srv_2] => + iocage_tags: + project: foo + vmm: iocage_02 + ok: [srv_3] => + iocage_tags: + project: bar + vmm: iocage_02 + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + msg: |- + all: ['srv_1', 'srv_2', 'srv_3'] + ungrouped: [] + vmm_iocage_02: ['srv_1', 'srv_2', 'srv_3'] + project_foo: ['srv_1', 'srv_2'] + project_bar: ['srv_3'] + + PLAY RECAP ********************************************************************************************************** + srv_1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 diff --git a/docs/docsite/rst/guide_modulehelper.rst b/docs/docsite/rst/guide_modulehelper.rst new file mode 100644 index 0000000000..711cdc7f99 --- /dev/null +++ b/docs/docsite/rst/guide_modulehelper.rst @@ -0,0 +1,559 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_modulehelper: + +Module Helper guide +=================== + + +Introduction +^^^^^^^^^^^^ + +Writing a module for Ansible is largely described in existing documentation. +However, a good part of that is boilerplate code that needs to be repeated every single time. +That is where ``ModuleHelper`` comes to assistance: a lot of that boilerplate code is done. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.quickstart: + +Quickstart +"""""""""" + +See the `example from Ansible documentation `_ +written with ``ModuleHelper``. +But bear in mind that it does not showcase all of MH's features: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + + class MyTest(ModuleHelper): + module = dict( + argument_spec=dict( + name=dict(type='str', required=True), + new=dict(type='bool', required=False, default=False), + ), + supports_check_mode=True, + ) + + def __run__(self): + self.vars.original_message = '' + self.vars.message = '' + if self.check_mode: + return + self.vars.original_message = self.vars.name + self.vars.message = 'goodbye' + self.changed = self.vars['new'] + if self.vars.name == "fail me": + self.do_raise("You requested this to fail") + + + def main(): + MyTest.execute() + + + if __name__ == '__main__': + main() + + +Module Helper +^^^^^^^^^^^^^ + +Introduction +"""""""""""" + +``ModuleHelper`` is a wrapper around the standard ``AnsibleModule``, providing extra features and conveniences. +The basic structure of a module using ``ModuleHelper`` is as shown in the +:ref:`ansible_collections.community.general.docsite.guide_modulehelper.quickstart` +section above, but there are more elements that will take part in it. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + + class MyTest(ModuleHelper): + # behavior for module paramaters ONLY, see below for further information + output_params = () + change_params = () + diff_params = () + facts_params = () + + facts_name = None # used if generating facts, from parameters or otherwise + + module = dict( + argument_spec=dict(...), + # ... + ) + +After importing the ``ModuleHelper`` class, you need to declare your own class extending it. + +.. seealso:: + + There is a variation called ``StateModuleHelper``, which builds on top of the features provided by MH. + See :ref:`ansible_collections.community.general.docsite.guide_modulehelper.statemh` below for more details. + +The easiest way of specifying the module is to create the class variable ``module`` with a dictionary +containing the exact arguments that would be passed as parameters to ``AnsibleModule``. +If you prefer to create the ``AnsibleModule`` object yourself, just assign it to the ``module`` class variable. +MH also accepts a parameter ``module`` in its constructor, if that parameter is used used, +then it will override the class variable. The parameter can either be ``dict`` or ``AnsibleModule`` as well. + +Beyond the definition of the module, there are other variables that can be used to control aspects +of MH's behavior. These variables should be set at the very beginning of the class, and their semantics are +explained through this document. + +The main logic of MH happens in the ``ModuleHelper.run()`` method, which looks like: + +.. code-block:: python + + @module_fails_on_exception + def run(self): + self.__init_module__() + self.__run__() + self.__quit_module__() + output = self.output + if 'failed' not in output: + output['failed'] = False + self.module.exit_json(changed=self.has_changed(), **output) + +The method ``ModuleHelper.__run__()`` must be implemented by the module and most +modules will be able to perform their actions implementing only that MH method. +However, in some cases, you might want to execute actions before or after the main tasks, in which cases +you should implement ``ModuleHelper.__init_module__()`` and ``ModuleHelper.__quit_module__()`` respectively. + +Note that the output comes from ``self.output``, which is a ``@property`` method. +By default, that property will collect all the variables that are marked for output and return them in a dictionary with their values. +Moreover, the default ``self.output`` will also handle Ansible ``facts`` and *diff mode*. +Also note the changed status comes from ``self.has_changed()``, which is usually calculated from variables that are marked +to track changes in their content. + +.. seealso:: + + More details in sections + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.paramvaroutput` and + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.changes` below. + +.. seealso:: + + See more about the decorator + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.modulefailsdeco` below. + + +Another way to write the example from the +:ref:`ansible_collections.community.general.docsite.guide_modulehelper.quickstart` +would be: + +.. code-block:: python + + def __init_module__(self): + self.vars.original_message = '' + self.vars.message = '' + + def __run__(self): + if self.check_mode: + return + self.vars.original_message = self.vars.name + self.vars.message = 'goodbye' + self.changed = self.vars['new'] + + def __quit_module__(self): + if self.vars.name == "fail me": + self.do_raise("You requested this to fail") + +Notice that there are no calls to ``module.exit_json()`` nor ``module.fail_json()``: if the module fails, raise an exception. +You can use the convenience method ``self.do_raise()`` or raise the exception as usual in Python to do that. +If no exception is raised, then the module succeeds. + +.. seealso:: + + See more about exceptions in section + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.exceptions` below. + +Ansible modules must have a ``main()`` function and the usual test for ``'__main__'``. When using MH that should look like: + +.. code-block:: python + + def main(): + MyTest.execute() + + + if __name__ == '__main__': + main() + +The class method ``execute()`` is nothing more than a convenience shorcut for: + +.. code-block:: python + + m = MyTest() + m.run() + +Optionally, an ``AnsibleModule`` may be passed as parameter to ``execute()``. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.paramvaroutput: + +Parameters, variables, and output +""""""""""""""""""""""""""""""""" + +All the parameters automatically become variables in the ``self.vars`` attribute, which is of the ``VarDict`` type. +By using ``self.vars``, you get a central mechanism to access the parameters but also to expose variables as return values of the module. +As described in :ref:`ansible_collections.community.general.docsite.guide_vardict`, variables in ``VarDict`` have metadata associated to them. +One of the attributes in that metadata marks the variable for output, and MH makes use of that to generate the module's return values. + +.. note:: + + The ``VarDict`` class was introduced in community.general 7.1.0, as part of ``ModuleHelper`` itself. + However, it has been factored out to become an utility on its own, described in :ref:`ansible_collections.community.general.docsite.guide_vardict`, + and the older implementation was removed in community.general 11.0.0. + + Some code might still refer to the class variables ``use_old_vardict`` and ``mute_vardict_deprecation``, used for the transtition to the new + implementation but from community.general 11.0.0 onwards they are no longer used and can be safely removed from the code. + +Contrary to new variables created in ``VarDict``, module parameters are not set for output by default. +If you want to include some module parameters in the output, list them in the ``output_params`` class variable. + +.. code-block:: python + + class MyTest(ModuleHelper): + output_params = ('state', 'name') + ... + +.. important:: + + The variable names listed in ``output_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + +Another neat feature provided by MH by using ``VarDict`` is the automatic tracking of changes when setting the metadata ``change=True``. +Again, to enable this feature for module parameters, you must list them in the ``change_params`` class variable. + +.. code-block:: python + + class MyTest(ModuleHelper): + # example from community.general.xfconf + change_params = ('value', ) + ... + +.. important:: + + The variable names listed in ``change_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + +.. seealso:: + + See more about this in + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.changes` below. + +Similarly, if you want to use Ansible's diff mode, you can set the metadata ``diff=True`` and ``diff_params`` for module parameters. +With that, MH will automatically generate the diff output for variables that have changed. + +.. code-block:: python + + class MyTest(ModuleHelper): + diff_params = ('value', ) + + def __run__(self): + # example from community.general.gio_mime + self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True) + +.. important:: + + The variable names listed in ``diff_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + +Moreover, if a module is set to return *facts* instead of return values, then again use the metadata ``fact=True`` and ``fact_params`` for module parameters. +Additionally, you must specify ``facts_name``, as in: + +.. code-block:: python + + class VolumeFacts(ModuleHelper): + facts_name = 'volume_facts' + + def __init_module__(self): + self.vars.set("volume", 123, fact=True) + +That generates an Ansible fact like: + +.. code-block:: yaml+jinja + + - name: Obtain volume facts + some.collection.volume_facts: + # parameters + + - name: Print volume facts + debug: + msg: Volume fact is {{ ansible_facts.volume_facts.volume }} + +.. important:: + + The variable names listed in ``fact_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + +.. important:: + + If ``facts_name`` is not set, the module does not generate any facts. + + +.. _ansible_collections.community.general.docsite.guide_modulehelper.changes: + +Handling changes +"""""""""""""""" + +In MH there are many ways to indicate change in the module execution. Here they are: + +Tracking changes in variables +----------------------------- + +As explained above, you can enable change tracking in any number of variables in ``self.vars``. +By the end of the module execution, if any of those variables has a value different then the first value assigned to them, +then that will be picked up by MH and signalled as changed at the module output. +See the example below to learn how you can enabled change tracking in variables: + +.. code-block:: python + + # using __init_module__() as example, it works the same in __run__() and __quit_module__() + def __init_module__(self): + # example from community.general.ansible_galaxy_install + self.vars.set("new_roles", {}, change=True) + + # example of "hidden" variable used only to track change in a value from community.general.gconftool2 + self.vars.set('_value', self.vars.previous_value, output=False, change=True) + + # enable change-tracking without assigning value + self.vars.set_meta("new_roles", change=True) + + # if you must forcibly set an initial value to the variable + self.vars.set_meta("new_roles", initial_value=[]) + ... + +If the end value of any variable marked ``change`` is different from its initial value, then MH will return ``changed=True``. + +Indicating changes with ``changed`` +----------------------------------- + +If you want to indicate change directly in the code, then use the ``self.changed`` property in MH. +Beware that this is a ``@property`` method in MH, with both a *getter* and a *setter*. +By default, that hidden field is set to ``False``. + +Effective change +---------------- + +The effective outcome for the module is determined in the ``self.has_changed()`` method, and it consists of the logical *OR* operation +between ``self.changed`` and the change calculated from ``self.vars``. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.exceptions: + +Exceptions +"""""""""" + +In MH, instead of calling ``module.fail_json()`` you can just raise an exception. +The output variables are collected the same way they would be for a successful execution. +However, you can set output variables specifically for that exception, if you so choose. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelperException + + def __init_module__(self): + if not complex_validation(): + self.do_raise("Validation failed!") + + # Or passing output variables + awesomeness = calculate_awesomeness() + if awesomeness > 1000: + self.do_raise("Over awesome, I cannot handle it!", update_output={"awesomeness": awesomeness}) + # which is just a convenience shortcut for + raise ModuleHelperException("...", update_output={...}) + +All exceptions derived from ``Exception`` are captured and translated into a ``fail_json()`` call. +However, if you do want to call ``self.module.fail_json()`` yourself it will work, +just keep in mind that there will be no automatic handling of output variables in that case. + +Behind the curtains, all ``do_raise()`` does is to raise a ``ModuleHelperException``. +If you want to create specialized error handling for your code, the best way is to extend that clas and raise it when needed. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.statemh: + +StateModuleHelper +^^^^^^^^^^^^^^^^^ + +Many modules use a parameter ``state`` that effectively controls the exact action performed by the module, such as +``state=present`` or ``state=absent`` for installing or removing packages. +By using ``StateModuleHelper`` you can make your code like the excerpt from the ``gconftool2`` below: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + + class GConftool(StateModuleHelper): + ... + module = dict( + ... + ) + + def __init_module__(self): + self.runner = gconftool2_runner(self.module, check_rc=True) + ... + + self.vars.set('previous_value', self._get(), fact=True) + self.vars.set('value_type', self.vars.value_type) + self.vars.set('_value', self.vars.previous_value, output=False, change=True) + self.vars.set_meta('value', initial_value=self.vars.previous_value) + self.vars.set('playbook_value', self.vars.value, fact=True) + + ... + + def state_absent(self): + with self.runner("state key", output_process=self._make_process(False)) as ctx: + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', None, fact=True) + self.vars._value = None + + def state_present(self): + with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx: + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', self._get(), fact=True) + self.vars._value = self.vars.new_value + +Note that the method ``__run__()`` is implemented in ``StateModuleHelper``, all you need to implement are the methods ``state_``. +In the example above, :ansplugin:`community.general.gconftool2#module` only has two states, ``present`` and ``absent``, thus, ``state_present()`` and ``state_absent()``. + +If the controlling parameter is not called ``state``, like in :ansplugin:`community.general.jira#module` module, just let SMH know about it: + +.. code-block:: python + + class JIRA(StateModuleHelper): + state_param = 'operation' + + def operation_create(self): + ... + + def operation_search(self): + ... + +Lastly, if the module is called with ``state=somevalue`` and the method ``state_somevalue`` +is not implemented, SMH will resort to call a method called ``__state_fallback__()``. +By default, this method will raise a ``ValueError`` indicating the method was not found. +Naturally, you can override that method to write a default implementation, as in :ansplugin:`community.general.locale_gen#module`: + +.. code-block:: python + + def __state_fallback__(self): + if self.vars.state_tracking == self.vars.state: + return + if self.vars.ubuntu_mode: + self.apply_change_ubuntu(self.vars.state, self.vars.name) + else: + self.apply_change(self.vars.state, self.vars.name) + +That module has only the states ``present`` and ``absent`` and the code for both is the one in the fallback method. + +.. note:: + + The name of the fallback method **does not change** if you set a different value of ``state_param``. + + +Other Conveniences +^^^^^^^^^^^^^^^^^^ + +Delegations to AnsibleModule +"""""""""""""""""""""""""""" + +The MH properties and methods below are delegated as-is to the underlying ``AnsibleModule`` instance in ``self.module``: + +- ``check_mode`` +- ``get_bin_path()`` +- ``warn()`` +- ``deprecate()`` + +Additionally, MH will also delegate: + +- ``diff_mode`` to ``self.module._diff`` +- ``verbosity`` to ``self.module._verbosity`` + +Starting in community.general 10.3.0, MH will also delegate the method ``debug`` to ``self.module``. +If any existing module already has a ``debug`` attribute defined, a warning message will be generated, +requesting it to be renamed. Upon the release of community.general 12.0.0, the delegation will be +preemptive and will override any existing method or property in the subclasses. + +Decorators +"""""""""" + +The following decorators should only be used within ``ModuleHelper`` class. + +@cause_changes +-------------- + +This decorator will control whether the outcome of the method will cause the module to signal change in its output. +If the method completes without raising an exception it is considered to have succeeded, otherwise, it will have failed. + +The decorator has a parameter ``when`` that accepts three different values: ``success``, ``failure``, and ``always``. +There are also two legacy parameters, ``on_success`` and ``on_failure``, that will be deprecated, so do not use them. +The value of ``changed`` in the module output will be set to ``True``: + +- ``when="success"`` and the method completes without raising an exception. +- ``when="failure"`` and the method raises an exception. +- ``when="always"``, regardless of the method raising an exception or not. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import cause_changes + + # adapted excerpt from the community.general.jira module + class JIRA(StateModuleHelper): + @cause_changes(when="success") + def operation_create(self): + ... + +If ``when`` has a different value or no parameters are specificied, the decorator will have no effect whatsoever. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.modulefailsdeco: + +@module_fails_on_exception +-------------------------- + +In a method using this decorator, if an exception is raised, the text message of that exception will be captured +by the decorator and used to call ``self.module.fail_json()``. +In most of the cases there will be no need to use this decorator, because ``ModuleHelper.run()`` already uses it. + +@check_mode_skip +---------------- + +If the module is running in check mode, this decorator will prevent the method from executing. +The return value in that case is ``None``. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import check_mode_skip + + # adapted excerpt from the community.general.locale_gen module + class LocaleGen(StateModuleHelper): + @check_mode_skip + def __state_fallback__(self): + ... + + +@check_mode_skip_returns +------------------------ + +This decorator is similar to the previous one, but the developer can control the return value for the method when running in check mode. +It is used with one of two parameters. One is ``callable`` and the return value in check mode will be ``callable(self, *args, **kwargs)``, +where ``self`` is the ``ModuleHelper`` instance and the union of ``args`` and ``kwargs`` will contain all the parameters passed to the method. + +The other option is to use the parameter ``value``, in which case the method will return ``value`` when in check mode. + + +References +^^^^^^^^^^ + +- `Ansible Developer Guide `_ +- `Creating a module `_ +- `Returning ansible facts `_ +- :ref:`ansible_collections.community.general.docsite.guide_vardict` + + +.. versionadded:: 3.1.0 diff --git a/docs/docsite/rst/guide_online.rst b/docs/docsite/rst/guide_online.rst new file mode 100644 index 0000000000..c233b403e8 --- /dev/null +++ b/docs/docsite/rst/guide_online.rst @@ -0,0 +1,49 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_online: + +**************** +Online.net Guide +**************** + +Introduction +============ + +Online is a French hosting company mainly known for providing bare-metal servers named Dedibox. +Check it out: `https://www.online.net/en `_ + +Dynamic inventory for Online resources +-------------------------------------- + +Ansible has a dynamic inventory plugin that can list your resources. + +1. Create a YAML configuration such as ``online_inventory.yml`` with this content: + + .. code-block:: yaml + + plugin: community.general.online + +2. Set your ``ONLINE_TOKEN`` environment variable with your token. + + You need to open an account and log into it before you can get a token. + You can find your token at the following page: `https://console.online.net/en/api/access `_ + +3. You can test that your inventory is working by running: + + .. code-block:: console + + $ ansible-inventory -v -i online_inventory.yml --list + + +4. Now you can run your playbook or any other module with this inventory: + + .. code-block:: ansible-output + + $ ansible all -i online_inventory.yml -m ping + sd-96735 | SUCCESS => { + "changed": false, + "ping": "pong" + } diff --git a/docs/docsite/rst/guide_packet.rst b/docs/docsite/rst/guide_packet.rst new file mode 100644 index 0000000000..95b38dddd0 --- /dev/null +++ b/docs/docsite/rst/guide_packet.rst @@ -0,0 +1,214 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_packet: + +********************************** +Packet.net Guide +********************************** + +Introduction +============ + +`Packet.net `_ is a bare metal infrastructure host that is supported by the community.general collection through six cloud modules. The six modules are: + +- :ansplugin:`community.general.packet_device#module`: manages servers on Packet. You can use this module to create, restart and delete devices. +- :ansplugin:`community.general.packet_ip_subnet#module`: assign IP subnet to a bare metal server +- :ansplugin:`community.general.packet_project#module`: create/delete a project in Packet host +- :ansplugin:`community.general.packet_sshkey#module`: adds a public SSH key from file or value to the Packet infrastructure. Every subsequently-created device will have this public key installed in .ssh/authorized_keys. +- :ansplugin:`community.general.packet_volume#module`: create/delete a volume in Packet host +- :ansplugin:`community.general.packet_volume_attachment#module`: attach/detach a volume to a device in the Packet host + +Note, this guide assumes you are familiar with Ansible and how it works. If you are not, have a look at their :ref:`docs ` before getting started. + +Requirements +============ + +The Packet modules connect to the Packet API using the `packet-python package `_. You can install it with pip: + +.. code-block:: console + + $ pip install packet-python + +In order to check the state of devices created by Ansible on Packet, it is a good idea to install one of the `Packet CLI clients `_. Otherwise you can check them through the `Packet portal `_. + +To use the modules you will need a Packet API token. You can generate an API token through the Packet portal `here `__. The simplest way to authenticate yourself is to set the Packet API token in an environment variable: + +.. code-block:: console + + $ export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs + +If you are not comfortable exporting your API token, you can pass it as a parameter to the modules. + +On Packet, devices and reserved IP addresses belong to `projects `_. In order to use the packet_device module, you need to specify the UUID of the project in which you want to create or manage devices. You can find a project's UUID in the Packet portal `here `_ (it is just under the project table) or through one of the available `CLIs `_. + + +If you want to use a new SSH key pair in this tutorial, you can generate it to ``./id_rsa`` and ``./id_rsa.pub`` as: + +.. code-block:: console + + $ ssh-keygen -t rsa -f ./id_rsa + +If you want to use an existing key pair, just copy the private and public key over to the playbook directory. + + +Device Creation +=============== + +The following code block is a simple playbook that creates one `Type 0 `_ server (the ``plan`` parameter). You have to supply ``plan`` and ``operating_system``. ``location`` defaults to ``ewr1`` (Parsippany, NJ). You can find all the possible values for the parameters through a `CLI client `_. + +.. code-block:: yaml+jinja + + # playbook_create.yml + + - name: Create Ubuntu device + hosts: localhost + tasks: + + - community.general.packet_sshkey: + key_file: ./id_rsa.pub + label: tutorial key + + - community.general.packet_device: + project_id: + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + +After running ``ansible-playbook playbook_create.yml``, you should have a server provisioned on Packet. You can verify through a CLI or in the `Packet portal `__. + +If you get an error with the message "failed to set machine state present, error: Error 404: Not Found", please verify your project UUID. + + +Updating Devices +================ + +The two parameters used to uniquely identify Packet devices are: "device_ids" and "hostnames". Both parameters accept either a single string (later converted to a one-element list), or a list of strings. + +The ``device_ids`` and ``hostnames`` parameters are mutually exclusive. The following values are all acceptable: + +- device_ids: ``a27b7a83-fc93-435b-a128-47a5b04f2dcf`` + +- hostnames: ``mydev1`` + +- device_ids: ``[a27b7a83-fc93-435b-a128-47a5b04f2dcf, 4887130f-0ccd-49a0-99b0-323c1ceb527b]`` + +- hostnames: ``[mydev1, mydev2]`` + +In addition, hostnames can contain a special ``%d`` formatter along with a ``count`` parameter that lets you easily expand hostnames that follow a simple name and number pattern; in other words, ``hostnames: "mydev%d", count: 2`` will expand to [mydev1, mydev2]. + +If your playbook acts on existing Packet devices, you can only pass the ``hostname`` and ``device_ids`` parameters. The following playbook shows how you can reboot a specific Packet device by setting the ``hostname`` parameter: + +.. code-block:: yaml+jinja + + # playbook_reboot.yml + + - name: reboot myserver + hosts: localhost + tasks: + + - community.general.packet_device: + project_id: + hostnames: myserver + state: rebooted + +You can also identify specific Packet devices with the ``device_ids`` parameter. The device's UUID can be found in the `Packet Portal `_ or by using a `CLI `_. The following playbook removes a Packet device using the ``device_ids`` field: + +.. code-block:: yaml+jinja + + # playbook_remove.yml + + - name: remove a device + hosts: localhost + tasks: + + - community.general.packet_device: + project_id: + device_ids: + state: absent + + +More Complex Playbooks +====================== + +In this example, we will create a CoreOS cluster with `user data `_. + + +The CoreOS cluster will use `etcd `_ for discovery of other servers in the cluster. Before provisioning your servers, you will need to generate a discovery token for your cluster: + +.. code-block:: console + + $ curl -w "\n" 'https://discovery.etcd.io/new?size=3' + +The following playbook will create an SSH key, 3 Packet servers, and then wait until SSH is ready (or until 5 minutes passed). Make sure to substitute the discovery token URL in ``user_data``, and the ``project_id`` before running ``ansible-playbook``. Also, feel free to change ``plan`` and ``facility``. + +.. code-block:: yaml+jinja + + # playbook_coreos.yml + + - name: Start 3 CoreOS nodes in Packet and wait until SSH is ready + hosts: localhost + tasks: + + - community.general.packet_sshkey: + key_file: ./id_rsa.pub + label: new + + - community.general.packet_device: + hostnames: [coreos-one, coreos-two, coreos-three] + operating_system: coreos_beta + plan: baremetal_0 + facility: ewr1 + project_id: + wait_for_public_IPv: 4 + user_data: | + # cloud-config + coreos: + etcd2: + discovery: https://discovery.etcd.io/ + advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001 + initial-advertise-peer-urls: http://$private_ipv4:2380 + listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 + listen-peer-urls: http://$private_ipv4:2380 + fleet: + public-ip: $private_ipv4 + units: + - name: etcd2.service + command: start + - name: fleet.service + command: start + register: newhosts + + - name: wait for ssh + ansible.builtin.wait_for: + delay: 1 + host: "{{ item.public_ipv4 }}" + port: 22 + state: started + timeout: 500 + loop: "{{ newhosts.results[0].devices }}" + + +As with most Ansible modules, the default states of the Packet modules are idempotent, meaning the resources in your project will remain the same after re-runs of a playbook. Thus, we can keep the ``packet_sshkey`` module call in our playbook. If the public key is already in your Packet account, the call will have no effect. + +The second module call provisions 3 Packet Type 0 (specified using the ``plan`` parameter) servers in the project identified by the ``project_id`` parameter. The servers are all provisioned with CoreOS beta (the ``operating_system`` parameter) and are customized with cloud-config user data passed to the ``user_data`` parameter. + +The ``packet_device`` module has a ``wait_for_public_IPv`` that is used to specify the version of the IP address to wait for (valid values are ``4`` or ``6`` for IPv4 or IPv6). If specified, Ansible will wait until the GET API call for a device contains an Internet-routeable IP address of the specified version. When referring to an IP address of a created device in subsequent module calls, it is wise to use the ``wait_for_public_IPv`` parameter, or ``state: active`` in the packet_device module call. + +Run the playbook: + +.. code-block:: console + + $ ansible-playbook playbook_coreos.yml + +Once the playbook quits, your new devices should be reachable through SSH. Try to connect to one and check if etcd has started properly: + +.. code-block:: console + + tomk@work $ ssh -i id_rsa core@$one_of_the_servers_ip + core@coreos-one ~ $ etcdctl cluster-health + +If you have any questions or comments let us know! help@packet.net diff --git a/docs/docsite/rst/guide_scaleway.rst b/docs/docsite/rst/guide_scaleway.rst new file mode 100644 index 0000000000..f3b7b24e0e --- /dev/null +++ b/docs/docsite/rst/guide_scaleway.rst @@ -0,0 +1,320 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_scaleway: + +************** +Scaleway Guide +************** + +Introduction +============ + +`Scaleway `_ is a cloud provider supported by the community.general collection through a set of plugins and modules. +Those modules are: + +- :ansplugin:`community.general.scaleway_compute#module`: manages servers on Scaleway. You can use this module to create, restart and delete servers. +- :ansplugin:`community.general.scaleway_compute_private_network#module` +- :ansplugin:`community.general.scaleway_container#module` +- :ansplugin:`community.general.scaleway_container_info#module` +- :ansplugin:`community.general.scaleway_container_namespace_info#module` +- :ansplugin:`community.general.scaleway_container_namespace#module` +- :ansplugin:`community.general.scaleway_container_registry_info#module` +- :ansplugin:`community.general.scaleway_container_registry#module` +- :ansplugin:`community.general.scaleway_database_backup#module` +- :ansplugin:`community.general.scaleway_function#module` +- :ansplugin:`community.general.scaleway_function_info#module` +- :ansplugin:`community.general.scaleway_function_namespace_info#module` +- :ansplugin:`community.general.scaleway_function_namespace#module` +- :ansplugin:`community.general.scaleway_image_info#module` +- :ansplugin:`community.general.scaleway_ip#module` +- :ansplugin:`community.general.scaleway_ip_info#module` +- :ansplugin:`community.general.scaleway_lb#module` +- :ansplugin:`community.general.scaleway_organization_info#module` +- :ansplugin:`community.general.scaleway_private_network#module` +- :ansplugin:`community.general.scaleway_security_group#module` +- :ansplugin:`community.general.scaleway_security_group_info#module` +- :ansplugin:`community.general.scaleway_security_group_rule#module` +- :ansplugin:`community.general.scaleway_server_info#module` +- :ansplugin:`community.general.scaleway_snapshot_info#module` +- :ansplugin:`community.general.scaleway_sshkey#module`: adds a public SSH key from a file or value to the Packet infrastructure. Every subsequently-created device will have this public key installed in .ssh/authorized_keys. +- :ansplugin:`community.general.scaleway_user_data#module` +- :ansplugin:`community.general.scaleway_volume#module`: manages volumes on Scaleway. +- :ansplugin:`community.general.scaleway_volume_info#module` + +The plugins are: + +- :ansplugin:`community.general.scaleway#inventory`: inventory plugin + + +.. note:: + This guide assumes you are familiar with Ansible and how it works. + If you are not, have a look at :ref:`ansible_documentation` before getting started. + +Requirements +============ + +The Scaleway modules and inventory script connect to the Scaleway API using `Scaleway REST API `_. +To use the modules and inventory script you will need a Scaleway API token. +You can generate an API token through the `Scaleway console's credential page `__. +The simplest way to authenticate yourself is to set the Scaleway API token in an environment variable: + +.. code-block:: console + + $ export SCW_TOKEN=00000000-1111-2222-3333-444444444444 + +If you are not comfortable exporting your API token, you can pass it as a parameter to the modules using the ``api_token`` argument. + +If you want to use a new SSH key pair in this tutorial, you can generate it to ``./id_rsa`` and ``./id_rsa.pub`` as: + +.. code-block:: console + + $ ssh-keygen -t rsa -f ./id_rsa + +If you want to use an existing key pair, just copy the private and public key over to the playbook directory. + +How to add an SSH key? +====================== + +Connection to Scaleway Compute nodes use Secure Shell. +SSH keys are stored at the account level, which means that you can reuse the same SSH key in multiple nodes. +The first step to configure Scaleway compute resources is to have at least one SSH key configured. + +:ansplugin:`community.general.scaleway_sshkey#module` is a module that manages SSH keys on your Scaleway account. +You can add an SSH key to your account by including the following task in a playbook: + +.. code-block:: yaml+jinja + + - name: "Add SSH key" + community.general.scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAA..." + state: "present" + +The ``ssh_pub_key`` parameter contains your ssh public key as a string. Here is an example inside a playbook: + + +.. code-block:: yaml+jinja + + - name: Test SSH key lifecycle on a Scaleway account + hosts: localhost + gather_facts: false + environment: + SCW_API_KEY: "" + + tasks: + + - community.general.scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAAB...424242 developer@example.com" + state: present + register: result + + - ansible.builtin.assert: + that: + - result is success and result is changed + +How to create a compute instance? +================================= + +Now that we have an SSH key configured, the next step is to spin up a server! +:ansplugin:`community.general.scaleway_compute#module` is a module that can create, update and delete Scaleway compute instances: + +.. code-block:: yaml+jinja + + - name: Create a server + community.general.scaleway_compute: + name: foobar + state: present + image: 00000000-1111-2222-3333-444444444444 + organization: 00000000-1111-2222-3333-444444444444 + region: ams1 + commercial_type: START1-S + +Here are the parameter details for the example shown above: + +- ``name`` is the name of the instance (the one that will show up in your web console). +- ``image`` is the UUID of the system image you would like to use. + A list of all images is available for each availability zone. +- ``organization`` represents the organization that your account is attached to. +- ``region`` represents the Availability Zone which your instance is in (for this example, ``par1`` and ``ams1``). +- ``commercial_type`` represents the name of the commercial offers. + You can check out the Scaleway pricing page to find which instance is right for you. + +Take a look at this short playbook to see a working example using ``scaleway_compute``: + +.. code-block:: yaml+jinja + + - name: Test compute instance lifecycle on a Scaleway account + hosts: localhost + gather_facts: false + environment: + SCW_API_KEY: "" + + tasks: + + - name: Create a server + register: server_creation_task + community.general.scaleway_compute: + name: foobar + state: present + image: 00000000-1111-2222-3333-444444444444 + organization: 00000000-1111-2222-3333-444444444444 + region: ams1 + commercial_type: START1-S + wait: true + + - ansible.builtin.debug: + var: server_creation_task + + - ansible.builtin.assert: + that: + - server_creation_task is success + - server_creation_task is changed + + - name: Run it + community.general.scaleway_compute: + name: foobar + state: running + image: 00000000-1111-2222-3333-444444444444 + organization: 00000000-1111-2222-3333-444444444444 + region: ams1 + commercial_type: START1-S + wait: true + tags: + - web_server + register: server_run_task + + - ansible.builtin.debug: + var: server_run_task + + - ansible.builtin.assert: + that: + - server_run_task is success + - server_run_task is changed + +Dynamic Inventory Plugin +======================== + +Ansible ships with :ansplugin:`community.general.scaleway#inventory`. +You can now get a complete inventory of your Scaleway resources through this plugin and filter it on +different parameters (``regions`` and ``tags`` are currently supported). + +Let us create an example! +Suppose that we want to get all hosts that got the tag web_server. +Create a file named ``scaleway_inventory.yml`` with the following content: + +.. code-block:: yaml+jinja + + plugin: community.general.scaleway + regions: + - ams1 + - par1 + tags: + - web_server + +This inventory means that we want all hosts that got the tag ``web_server`` on the zones ``ams1`` and ``par1``. +Once you have configured this file, you can get the information using the following command: + +.. code-block:: console + + $ ansible-inventory --list -i scaleway_inventory.yml + +The output will be: + +.. code-block:: json + + { + "_meta": { + "hostvars": { + "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d": { + "ansible_verbosity": 6, + "arch": "x86_64", + "commercial_type": "START1-S", + "hostname": "foobar", + "ipv4": "192.0.2.1", + "organization": "00000000-1111-2222-3333-444444444444", + "state": "running", + "tags": [ + "web_server" + ] + } + } + }, + "all": { + "children": [ + "ams1", + "par1", + "ungrouped", + "web_server" + ] + }, + "ams1": {}, + "par1": { + "hosts": [ + "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d" + ] + }, + "ungrouped": {}, + "web_server": { + "hosts": [ + "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d" + ] + } + } + +As you can see, we get different groups of hosts. +``par1`` and ``ams1`` are groups based on location. +``web_server`` is a group based on a tag. + +In case a filter parameter is not defined, the plugin supposes all values possible are wanted. +This means that for each tag that exists on your Scaleway compute nodes, a group based on each tag will be created. + +Scaleway S3 object storage +========================== + +`Object Storage `_ allows you to store any kind of objects (documents, images, videos, and so on). +As the Scaleway API is S3 compatible, Ansible supports it natively through the amazon.aws modules: :ansplugin:`amazon.aws.s3_bucket#module`, :ansplugin:`amazon.aws.s3_object#module`. + +You can find many examples in the `scaleway_s3 integration tests `_. + +.. code-block:: yaml+jinja + + - hosts: myserver + vars: + scaleway_region: nl-ams + s3_url: https://s3.nl-ams.scw.cloud + environment: + # AWS_ACCESS_KEY matches your scaleway organization id available at https://cloud.scaleway.com/#/account + AWS_ACCESS_KEY: 00000000-1111-2222-3333-444444444444 + # AWS_SECRET_KEY matches a secret token that you can retrieve at https://cloud.scaleway.com/#/credentials + AWS_SECRET_KEY: aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + module_defaults: + group/amazon.aws.aws: + s3_url: '{{ s3_url }}' + region: '{{ scaleway_region }}' + tasks: + # use a fact instead of a variable, otherwise template is evaluate each time variable is used + - ansible.builtin.set_fact: + bucket_name: "{{ 99999999 | random | to_uuid }}" + + # "requester_pays:" is mandatory because Scaleway does not implement related API + # another way is to use amazon.aws.s3_object and "mode: create" ! + - amazon.aws.s3_bucket: + name: '{{ bucket_name }}' + requester_pays: + + - name: Another way to create the bucket + amazon.aws.s3_object: + bucket: '{{ bucket_name }}' + mode: create + encrypt: false + register: bucket_creation_check + + - name: add something in the bucket + amazon.aws.s3_object: + mode: put + bucket: '{{ bucket_name }}' + src: /tmp/test.txt # needs to be created before + object: test.txt + encrypt: false # server side encryption must be disabled diff --git a/docs/docsite/rst/guide_uthelper.rst b/docs/docsite/rst/guide_uthelper.rst new file mode 100644 index 0000000000..c4a4110d70 --- /dev/null +++ b/docs/docsite/rst/guide_uthelper.rst @@ -0,0 +1,394 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_uthelper: + +UTHelper Guide +============== + +Introduction +^^^^^^^^^^^^ + +``UTHelper`` was written to reduce the boilerplate code used in unit tests for modules. +It was originally written to handle tests of modules that run external commands using ``AnsibleModule.run_command()``. +At the time of writing (Feb 2025) that remains the only type of tests you can use +``UTHelper`` for, but it aims to provide support for other types of interactions. + +Until now, there are many different ways to implement unit tests that validate a module based on the execution of external commands. See some examples: + +* `test_apk.py `_ - A very simple one +* `test_bootc_manage.py `_ - + This one has more test cases, but do notice how the code is repeated amongst them. +* `test_modprobe.py `_ - + This one has 15 tests in it, but to achieve that it declares 8 classes repeating quite a lot of code. + +As you can notice, there is no consistency in the way these tests are executed - +they all do the same thing eventually, but each one is written in a very distinct way. + +``UTHelper`` aims to: + +* provide a consistent idiom to define unit tests +* reduce the code to a bare minimal, and +* define tests as data instead +* allow the test cases definition to be expressed not only as a Python data structure but also as YAML content + +Quickstart +"""""""""" + +To use UTHelper, your test module will need only a bare minimal of code: + +.. code-block:: python + + # tests/unit/plugin/modules/test_ansible_module.py + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + + UTHelper.from_module(ansible_module, __name__, mocks=[RunCommandMock]) + +Then, in the test specification file, you have: + +.. code-block:: yaml + + # tests/unit/plugin/modules/test_ansible_module.yaml + test_cases: + - id: test_ansible_module + flags: + diff: true + input: + state: present + name: Roger the Shrubber + output: + shrubbery: + looks: nice + price: not too expensive + changed: true + diff: + before: + shrubbery: null + after: + shrubbery: + looks: nice + price: not too expensive + mocks: + run_command: + - command: [/testbin/shrubber, --version] + rc: 0 + out: "2.80.0\n" + err: '' + - command: [/testbin/shrubber, --make-shrubbery] + rc: 0 + out: 'Shrubbery created' + err: '' + +.. note:: + + If you prefer to pick a different YAML file for the test cases, or if you prefer to define them in plain Python, + you can use the convenience methods ``UTHelper.from_file()`` and ``UTHelper.from_spec()``, respectively. + See more details below. + + +Using ``UTHelper`` +^^^^^^^^^^^^^^^^^^ + +Test Module +""""""""""" + +``UTHelper`` is **strictly for unit tests**. To use it, you import the ``.uthelper.UTHelper`` class. +As mentioned in different parts of this guide, there are three different mechanisms to load the test cases. + +.. seealso:: + + See the UTHelper class reference below for API details on the three different mechanisms. + + +The easies and most recommended way of using ``UTHelper`` is literally the example shown. +See a real world example at +`test_gconftool2.py `_. + +The ``from_module()`` method will pick the filename of the test module up (in the example above, ``tests/unit/plugins/modules/test_gconftool2.py``) +and it will search for ``tests/unit/plugins/modules/test_gconftool2.yaml`` (or ``.yml`` if that is not found). +In that file it will expect to find the test specification expressed in YAML format, conforming to the structure described below LINK LINK LINK. + +If you prefer to read the test specifications a different file path, use ``from_file()`` passing the file handle for the YAML file. + +And, if for any reason you prefer or need to pass the data structure rather than dealing with YAML files, use the ``from_spec()`` method. +A real world example for that can be found at +`test_snap.py `_. + + +Test Specification +"""""""""""""""""" + +The structure of the test specification data is described below. + +Top level +--------- + +At the top level there are two accepted keys: + +- ``anchors: dict`` + Optional. Placeholder for you to define YAML anchors that can be repeated in the test cases. + Its contents are never accessed directly by test Helper. +- ``test_cases: list`` + Mandatory. List of test cases, see below for definition. + +Test cases +---------- + +You write the test cases with five elements: + +- ``id: str`` + Mandatory. Used to identify the test case. + +- ``flags: dict`` + Optional. Flags controling the behavior of the test case. All flags are optional. Accepted flags: + + * ``check: bool``: set to ``true`` if the module is to be executed in **check mode**. + * ``diff: bool``: set to ``true`` if the module is to be executed in **diff mode**. + * ``skip: str``: set the test case to be skipped, providing the message for ``pytest.skip()``. + * ``xfail: str``: set the test case to expect failure, providing the message for ``pytest.xfail()``. + +- ``input: dict`` + Optional. Parameters for the Ansible module, it can be empty. + +- ``output: dict`` + Optional. Expected return values from the Ansible module. + All RV names are used here are expected to be found in the module output, but not all RVs in the output must be here. + It can include special RVs such as ``changed`` and ``diff``. + It can be empty. + +- ``mocks: dict`` + Optional. Mocked interactions, ``run_command`` being the only one supported for now. + Each key in this dictionary refers to one subclass of ``TestCaseMock`` and its + structure is dictated by the ``TestCaseMock`` subclass implementation. + All keys are expected to be named using snake case, as in ``run_command``. + The ``TestCaseMock`` subclass is responsible for defining the name used in the test specification. + The structure for that specification is dependent on the implementing class. + See more details below for the implementation of ``RunCommandMock`` + +Example using YAML +------------------ + +We recommend you use ``UTHelper`` reading the test specifications from a YAML file. +See an example below of how one actually looks like (excerpt from ``test_opkg.yaml``): + +.. code-block:: yaml + + --- + anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} + test_cases: + - id: install_zlibdev + input: + name: zlib-dev + state: present + output: + msg: installed 1 package(s) + mocks: + run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, install, zlib-dev] + environ: *env-def + rc: 0 + out: | + Installing zlib-dev (1.2.11-6) to root... + Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk + Installing zlib (1.2.11-6) to root... + Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib_1.2.11-6_mips_24kc.ipk + Configuring zlib. + Configuring zlib-dev. + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: | + zlib-dev - 1.2.11-6 + err: '' + - id: install_zlibdev_present + input: + name: zlib-dev + state: present + output: + msg: package(s) already present + mocks: + run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: | + zlib-dev - 1.2.11-6 + err: '' + +TestCaseMocks Specifications +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``TestCaseMock`` subclass is free to define the expected data structure. + +RunCommandMock Specification +"""""""""""""""""""""""""""" + +``RunCommandMock`` mocks can be specified with the key ``run_command`` and it expects a ``list`` in which elements follow the structure: + +- ``command: Union[list, str]`` + Mandatory. The command that is expected to be executed by the module. It corresponds to the parameter ``args`` of the ``AnsibleModule.run_command()`` call. + It can be either a list or a string, though the list form is generally recommended. +- ``environ: dict`` + Mandatory. All other parameters passed to the ``AnsibleModule.run_command()`` call. + Most commonly used are ``environ_update`` and ``check_rc``. + Must include all parameters the Ansible module uses in the ``AnsibleModule.run_command()`` call, otherwise the test will fail. +- ``rc: int`` + Mandatory. The return code for the command execution. + As per usual in bash scripting, a value of ``0`` means success, whereas any other number is an error code. +- ``out: str`` + Mandatory. The *stdout* result of the command execution, as one single string containing zero or more lines. +- ``err: str`` + Mandatory. The *stderr* result of the command execution, as one single string containing zero or more lines. + + +``UTHelper`` Reference +^^^^^^^^^^^^^^^^^^^^^^ + +.. py:module:: .uthelper + + .. py:class:: UTHelper + + A class to encapsulate unit tests. + + .. py:staticmethod:: from_spec(ansible_module, test_module, test_spec, mocks=None) + + Creates an ``UTHelper`` instance from a given test specification. + + :param ansible_module: The Ansible module to be tested. + :type ansible_module: :py:class:`types.ModuleType` + :param test_module: The test module. + :type test_module: :py:class:`types.ModuleType` + :param test_spec: The test specification. + :type test_spec: dict + :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists. + :type mocks: list or None + :return: An ``UTHelper`` instance. + :rtype: UTHelper + + Example usage of ``from_spec()``: + + .. code-block:: python + + import sys + + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + TEST_SPEC = dict( + test_cases=[ + ... + ] + ) + + helper = UTHelper.from_spec(ansible_module, sys.modules[__name__], TEST_SPEC, mocks=[RunCommandMock]) + + .. py:staticmethod:: from_file(ansible_module, test_module, test_spec_filehandle, mocks=None) + + Creates an ``UTHelper`` instance from a test specification file. + + :param ansible_module: The Ansible module to be tested. + :type ansible_module: :py:class:`types.ModuleType` + :param test_module: The test module. + :type test_module: :py:class:`types.ModuleType` + :param test_spec_filehandle: A file handle to an file stream handle providing the test specification in YAML format. + :type test_spec_filehandle: ``file-like object`` + :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists. + :type mocks: list or None + :return: An ``UTHelper`` instance. + :rtype: UTHelper + + Example usage of ``from_file()``: + + .. code-block:: python + + import sys + + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + with open("test_spec.yaml", "r") as test_spec_filehandle: + helper = UTHelper.from_file(ansible_module, sys.modules[__name__], test_spec_filehandle, mocks=[RunCommandMock]) + + .. py:staticmethod:: from_module(ansible_module, test_module_name, mocks=None) + + Creates an ``UTHelper`` instance from a given Ansible module and test module. + + :param ansible_module: The Ansible module to be tested. + :type ansible_module: :py:class:`types.ModuleType` + :param test_module_name: The name of the test module. It works if passed ``__name__``. + :type test_module_name: str + :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists. + :type mocks: list or None + :return: An ``UTHelper`` instance. + :rtype: UTHelper + + Example usage of ``from_module()``: + + .. code-block:: python + + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + # Example usage + helper = UTHelper.from_module(ansible_module, __name__, mocks=[RunCommandMock]) + + +Creating TestCaseMocks +^^^^^^^^^^^^^^^^^^^^^^ + +To create a new ``TestCaseMock`` you must extend that class and implement the relevant parts: + +.. code-block:: python + + class ShrubberyMock(TestCaseMock): + # this name is mandatory, it is the name used in the test specification + name = "shrubbery" + + def setup(self, mocker): + # perform setup, commonly using mocker to patch some other piece of code + ... + + def check(self, test_case, results): + # verify the tst execution met the expectations of the test case + # for example the function was called as many times as it should + ... + + def fixtures(self): + # returns a dict mapping names to pytest fixtures that should be used for the test case + # for example, in RunCommandMock it creates a fixture that patches AnsibleModule.get_bin_path + ... + +Caveats +^^^^^^^ + +Known issues/opportunities for improvement: + +* Only one ``UTHelper`` per test module: UTHelper injects a test function with a fixed name into the module's namespace, + so placing a second ``UTHelper`` instance is going to overwrite the function created by the first one. +* Order of elements in module's namespace is not consistent across executions in Python 3.5, so if adding more tests to the test module + might make Test Helper add its function before or after the other test functions. + In the community.general collection the CI processes uses ``pytest-xdist`` to paralellize and distribute the tests, + and it requires the order of the tests to be consistent. + +.. versionadded:: 7.5.0 diff --git a/docs/docsite/rst/guide_vardict.rst b/docs/docsite/rst/guide_vardict.rst new file mode 100644 index 0000000000..1beef0c57f --- /dev/null +++ b/docs/docsite/rst/guide_vardict.rst @@ -0,0 +1,176 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_vardict: + +VarDict Guide +============= + +Introduction +^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.vardict`` module util provides the +``VarDict`` class to help manage the module variables. That class is a container for module variables, +especially the ones for which the module must keep track of state changes, and the ones that should +be published as return values. + +Each variable has extra behaviors controlled by associated metadata, simplifying the generation of +output values from the module. + +Quickstart +"""""""""" + +The simplest way of using ``VarDict`` is: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.vardict import VarDict + +Then in ``main()``, or any other function called from there: + +.. code-block:: python + + vars = VarDict() + + # Next 3 statements are equivalent + vars.abc = 123 + vars["abc"] = 123 + vars.set("abc", 123) + + vars.xyz = "bananas" + vars.ghi = False + +And by the time the module is about to exit: + +.. code-block:: python + + results = vars.output() + module.exit_json(**results) + +That makes the return value of the module: + +.. code-block:: json + + { + "abc": 123, + "xyz": "bananas", + "ghi": false + } + +Metadata +"""""""" + +The metadata values associated with each variable are: + +- ``output: bool`` - marks the variable for module output as a module return value. +- ``fact: bool`` - marks the variable for module output as an Ansible fact. +- ``verbosity: int`` - sets the minimum level of verbosity for which the variable will be included in the output. +- ``change: bool`` - controls the detection of changes in the variable value. +- ``initial_value: any`` - when using ``change`` and need to forcefully set an intial value to the variable. +- ``diff: bool`` - used along with ``change``, this generates an Ansible-style diff ``dict``. + +See the sections below for more details on how to use the metadata. + + +Using VarDict +^^^^^^^^^^^^^ + +Basic Usage +""""""""""" + +As shown above, variables can be accessed using the ``[]`` operator, as in a ``dict`` object, +and also as an object attribute, such as ``vars.abc``. The form using the ``set()`` +method is special in the sense that you can use it to set metadata values: + +.. code-block:: python + + vars.set("abc", 123, output=False) + vars.set("abc", 123, output=True, change=True) + +Another way to set metadata after the variables have been created is: + +.. code-block:: python + + vars.set_meta("abc", output=False) + vars.set_meta("abc", output=True, change=True, diff=True) + +You can use either operator and attribute forms to access the value of the variable. Other ways to +access its value and its metadata are: + +.. code-block:: python + + print("abc value = {0}".format(vars.var("abc")["value"])) # get the value + print("abc output? {0}".format(vars.get_meta("abc")["output"])) # get the metadata like this + +The names of methods, such as ``set``, ``get_meta``, ``output`` amongst others, are reserved and +cannot be used as variable names. If you try to use a reserved name a ``ValueError`` exception +is raised with the message "Name is reserved". + +Generating output +""""""""""""""""" + +By default, every variable create will be enable for output with minimum verbosity set to zero, in +other words, they will always be in the output by default. + +You can control that when creating the variable for the first time or later in the code: + +.. code-block:: python + + vars.set("internal", x + 4, output=False) + vars.set_meta("internal", output=False) + +You can also set the verbosity of some variable, like: + +.. code-block:: python + + vars.set("abc", x + 4) + vars.set("debug_x", x, verbosity=3) + + results = vars.output(module._verbosity) + module.exit_json(**results) + +If the module was invoked with verbosity lower than 3, then the output will only contain +the variable ``abc``. If running at higher verbosity, as in ``ansible-playbook -vvv``, +then the output will also contain ``debug_x``. + +Generating facts is very similar to regular output, but variables are not marked as facts by default. + +.. code-block:: python + + vars.set("modulefact", x + 4, fact=True) + vars.set("debugfact", x, fact=True, verbosity=3) + + results = vars.output(module._verbosity) + results["ansible_facts"] = {"module_name": vars.facts(module._verbosity)} + module.exit_json(**results) + +Handling change +""""""""""""""" + +You can use ``VarDict`` to determine whether variables have had their values changed. + +.. code-block:: python + + vars.set("abc", 42, change=True) + vars.abc = 90 + + results = vars.output() + results["changed"] = vars.has_changed + module.exit_json(**results) + +If tracking changes in variables, you may want to present the difference between the initial and the final +values of it. For that, you want to use: + +.. code-block:: python + + vars.set("abc", 42, change=True, diff=True) + vars.abc = 90 + + results = vars.output() + results["changed"] = vars.has_changed + results["diff"] = vars.diff() + module.exit_json(**results) + +.. versionadded:: 7.1.0 diff --git a/docs/docsite/rst/test_guide.rst b/docs/docsite/rst/test_guide.rst index 2df0ed04cd..a1f5723df4 100644 --- a/docs/docsite/rst/test_guide.rst +++ b/docs/docsite/rst/test_guide.rst @@ -1,16 +1,21 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + .. _ansible_collections.community.general.docsite.test_guide: community.general Test (Plugin) Guide ===================================== -The :ref:`community.general collection ` offers currently one test plugin. +The :anscollection:`community.general collection ` offers currently one test plugin. .. contents:: Topics Feature Tests ------------- -The ``a_module`` test allows to check whether a given string refers to an existing module or action plugin. This can be useful in roles, which can use this to ensure that required modules are present ahead of time. +The :ansplugin:`community.general.a_module test ` allows to check whether a given string refers to an existing module or action plugin. This can be useful in roles, which can use this to ensure that required modules are present ahead of time. .. code-block:: yaml+jinja diff --git a/galaxy.yml b/galaxy.yml index dc4a491bbc..0288625dbb 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,16 +1,21 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + namespace: community name: general -version: 4.0.0 +version: 12.0.0 readme: README.md authors: - Ansible (https://github.com/ansible) -description: null +description: >- + The community.general collection is a part of the Ansible package and includes many modules and + plugins supported by Ansible community which are not part of more specialized community collections. license_file: COPYING -tags: [community] -# NOTE: No dependencies are expected to be added here -# dependencies: +tags: + - community repository: https://github.com/ansible-collections/community.general documentation: https://docs.ansible.com/ansible/latest/collections/community/general/ homepage: https://github.com/ansible-collections/community.general issues: https://github.com/ansible-collections/community.general/issues -#type: flatmap diff --git a/meta/runtime.yml b/meta/runtime.yml index f593166692..d2be5a89c1 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -1,16 +1,134 @@ --- -requires_ansible: '>=2.9.10' +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +requires_ansible: '>=2.17.0' +action_groups: + consul: + - consul_agent_check + - consul_agent_service + - consul_auth_method + - consul_binding_rule + - consul_policy + - consul_role + - consul_session + - consul_token + proxmox: + - metadata: + extend_group: + - community.proxmox.proxmox + keycloak: + - keycloak_authentication + - keycloak_authentication_required_actions + - keycloak_authz_authorization_scope + - keycloak_authz_custom_policy + - keycloak_authz_permission + - keycloak_authz_permission_info + - keycloak_client + - keycloak_client_rolemapping + - keycloak_client_rolescope + - keycloak_clientscope + - keycloak_clientscope_type + - keycloak_clientsecret_info + - keycloak_clientsecret_regenerate + - keycloak_clienttemplate + - keycloak_component + - keycloak_component_info + - keycloak_group + - keycloak_identity_provider + - keycloak_realm + - keycloak_realm_key + - keycloak_realm_keys_metadata_info + - keycloak_realm_rolemapping + - keycloak_role + - keycloak_user + - keycloak_user_federation + - keycloak_user_rolemapping + - keycloak_userprofile + scaleway: + - scaleway_compute + - scaleway_compute_private_network + - scaleway_container + - scaleway_container_info + - scaleway_container_namespace + - scaleway_container_namespace_info + - scaleway_container_registry + - scaleway_container_registry_info + - scaleway_database_backup + - scaleway_function + - scaleway_function_info + - scaleway_function_namespace + - scaleway_function_namespace_info + - scaleway_image_info + - scaleway_ip + - scaleway_ip_info + - scaleway_lb + - scaleway_organization_info + - scaleway_private_network + - scaleway_security_group + - scaleway_security_group_info + - scaleway_security_group_rule + - scaleway_server_info + - scaleway_snapshot_info + - scaleway_sshkey + - scaleway_user_data + - scaleway_volume + - scaleway_volume_info + plugin_routing: + callback: + actionable: + tombstone: + removal_version: 2.0.0 + warning_text: Use the 'default' callback plugin with 'display_skipped_hosts + = no' and 'display_ok_hosts = no' options. + full_skip: + tombstone: + removal_version: 2.0.0 + warning_text: Use the 'default' callback plugin with 'display_skipped_hosts + = no' option. + hipchat: + tombstone: + removal_version: 10.0.0 + warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. + osx_say: + redirect: community.general.say + stderr: + tombstone: + removal_version: 2.0.0 + warning_text: Use the 'default' callback plugin with 'display_failed_stderr + = yes' option. + yaml: + tombstone: + removal_version: 12.0.0 + warning_text: >- + The plugin has been superseded by the option `result_format=yaml` in callback plugin ansible.builtin.default from ansible-core 2.13 onwards. connection: docker: redirect: community.docker.docker oc: redirect: community.okd.oc + proxmox_pct_remote: + redirect: community.proxmox.proxmox_pct_remote + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. lookup: gcp_storage_file: redirect: community.google.gcp_storage_file hashi_vault: redirect: community.hashi_vault.hashi_vault + hiera: + deprecation: + removal_version: 13.0.0 + warning_text: >- + Hiera has been deprecated a long time ago. + If you disagree with this deprecation, please create an issue in the community.general repository. + manifold: + tombstone: + removal_version: 11.0.0 + warning_text: Company was acquired in 2021 and service was ceased afterwards. nios: redirect: infoblox.nios_modules.nios_lookup nios_next_ip: @@ -22,6 +140,68 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use community.general.ali_instance_info instead. + atomic_container: + deprecation: + removal_version: 13.0.0 + warning_text: Project Atomic was sunset by the end of 2019. + atomic_host: + deprecation: + removal_version: 13.0.0 + warning_text: Project Atomic was sunset by the end of 2019. + atomic_image: + deprecation: + removal_version: 13.0.0 + warning_text: Project Atomic was sunset by the end of 2019. + bearychat: + tombstone: + removal_version: 12.0.0 + warning_text: Chat service is no longer available. + catapult: + deprecation: + removal_version: 13.0.0 + warning_text: DNS fails to resolve the API endpoint used by the module since Oct 2024. See https://github.com/ansible-collections/community.general/issues/10318 for details. + cisco_spark: + redirect: community.general.cisco_webex + clc_alert_policy: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_blueprint_package: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_firewall_policy: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_group: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_loadbalancer: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_modify_server: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_publicip: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_server: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_server_snapshot: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + consul_acl: + tombstone: + removal_version: 10.0.0 + warning_text: Use community.general.consul_token and/or community.general.consul_policy instead. docker_compose: redirect: community.docker.docker_compose docker_config: @@ -76,6 +256,15 @@ plugin_routing: redirect: community.docker.docker_volume docker_volume_info: redirect: community.docker.docker_volume_info + facter: + tombstone: + removal_version: 12.0.0 + warning_text: Use community.general.facter_facts instead. + flowdock: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. foreman: tombstone: removal_version: 2.0.0 @@ -154,6 +343,8 @@ plugin_routing: removal_version: 2.0.0 warning_text: Use community.general.github_webhook and community.general.github_webhook_info instead. + hana_query: + redirect: community.sap_libs.sap_hdbsql hetzner_failover_ip: redirect: community.hrobot.failover_ip hetzner_failover_ip_info: @@ -162,6 +353,10 @@ plugin_routing: redirect: community.hrobot.firewall hetzner_firewall_info: redirect: community.hrobot.firewall_info + hipchat: + tombstone: + removal_version: 11.0.0 + warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. hpilo_facts: tombstone: removal_version: 3.0.0 @@ -288,6 +483,10 @@ plugin_routing: redirect: infoblox.nios_modules.nios_txt_record nios_zone: redirect: infoblox.nios_modules.nios_zone + oci_vcn: + deprecation: + removal_version: 13.0.0 + warning_text: Use oracle.oci.oci_network_vcn instead. ome_device_info: redirect: dellemc.openmanage.ome_device_info one_image_facts: @@ -483,6 +682,116 @@ plugin_routing: redirect: community.postgresql.postgresql_user postgresql_user_obj_stat_info: redirect: community.postgresql.postgresql_user_obj_stat_info + profitbricks: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + profitbricks_datacenter: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + profitbricks_nic: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + profitbricks_volume: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + profitbricks_volume_attachments: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + proxmox: + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_backup: + redirect: community.proxmox.proxmox_backup + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_backup_info: + redirect: community.proxmox.proxmox_backup_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_disk: + redirect: community.proxmox.proxmox_disk + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_domain_info: + redirect: community.proxmox.proxmox_domain_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_group_info: + redirect: community.proxmox.proxmox_group_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_kvm: + redirect: community.proxmox.proxmox_kvm + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_nic: + redirect: community.proxmox.proxmox_nic + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_node_info: + redirect: community.proxmox.proxmox_node_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_pool: + redirect: community.proxmox.proxmox_pool + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_pool_member: + redirect: community.proxmox.proxmox_pool_member + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_snap: + redirect: community.proxmox.proxmox_snap + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_storage_contents_info: + redirect: community.proxmox.proxmox_storage_contents_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_storage_info: + redirect: community.proxmox.proxmox_storage_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_tasks_info: + redirect: community.proxmox.proxmox_tasks_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_template: + redirect: community.proxmox.proxmox_template + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_user_info: + redirect: community.proxmox.proxmox_user_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_vm_info: + redirect: community.proxmox.proxmox_vm_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. purefa_facts: tombstone: removal_version: 3.0.0 @@ -495,10 +804,126 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use community.general.python_requirements_info instead. + rax_cbs_attachments: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_cbs: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_cdb_database: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_cdb_user: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_cdb: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_clb_nodes: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_clb_ssl: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_clb: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_dns_record: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_dns: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_facts: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_files_objects: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_files: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_identity: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_keypair: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_meta: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_mon_alarm: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_mon_check: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_mon_entity: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_mon_notification_plan: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_mon_notification: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_network: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_queue: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_scaling_group: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_scaling_policy: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. redfish_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.redfish_info instead. + rhn_channel: + tombstone: + removal_version: 10.0.0 + warning_text: RHN is EOL. + rhn_register: + tombstone: + removal_version: 10.0.0 + warning_text: RHN is EOL. + sapcar_extract: + redirect: community.sap_libs.sapcar_extract + sap_task_list_execute: + redirect: community.sap_libs.sap_task_list_execute scaleway_image_facts: tombstone: removal_version: 3.0.0 @@ -527,6 +952,26 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_volume_info instead. + sensu_check: + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + sensu_client: + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + sensu_handler: + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + sensu_silence: + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + sensu_subscription: + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. sf_account_manager: tombstone: removal_version: 2.0.0 @@ -551,10 +996,45 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use community.general.smartos_image_info instead. + stackdriver: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore, + and any new development in the direction of providing an alternative should + happen in the context of the google.cloud collection. + typetalk: + deprecation: + removal_version: 13.0.0 + warning_text: The typetalk service will be discontinued on Dec 2025. vertica_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.vertica_info instead. + webfaction_app: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. + webfaction_db: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. + webfaction_domain: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. + webfaction_mailbox: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. + webfaction_site: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. xenserver_guest_facts: tombstone: removal_version: 3.0.0 @@ -572,8 +1052,46 @@ plugin_routing: redirect: community.kubevirt.kubevirt_vm_options nios: redirect: infoblox.nios_modules.nios + oracle: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_creatable_resource: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_display_name_option: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_name_option: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_tags: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_wait_options: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. postgresql: redirect: community.postgresql.postgresql + proxmox: + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + purestorage: + tombstone: + removal_version: 12.0.0 + warning_text: The modules for purestorage were removed in community.general 3.0.0, this document fragment was left behind. + rackspace: + tombstone: + removal_version: 9.0.0 + warning_text: This doc fragment was used by rax modules, that relied on the deprecated + package pyrax. module_utils: docker.common: redirect: community.docker.common @@ -591,39 +1109,52 @@ plugin_routing: redirect: community.kubevirt.kubevirt net_tools.nios.api: redirect: infoblox.nios_modules.api + oci_utils: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. postgresql: redirect: community.postgresql.postgresql + proxmox: + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + pure: + tombstone: + removal_version: 12.0.0 + warning_text: The modules for purestorage were removed in community.general 3.0.0, this module util was left behind. + rax: + tombstone: + removal_version: 9.0.0 + warning_text: This module util relied on the deprecated package pyrax. remote_management.dellemc.dellemc_idrac: redirect: dellemc.openmanage.dellemc_idrac remote_management.dellemc.ome: redirect: dellemc.openmanage.ome - callback: - actionable: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_skipped_hosts - = no' and 'display_ok_hosts = no' options. - full_skip: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_skipped_hosts - = no' option. - stderr: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_failed_stderr - = yes' option. inventory: docker_machine: redirect: community.docker.docker_machine docker_swarm: redirect: community.docker.docker_swarm + proxmox: + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. kubevirt: redirect: community.kubevirt.kubevirt + stackpath_compute: + tombstone: + removal_version: 11.0.0 + warning_text: The company and the service were sunset in June 2024. filter: path_join: # The ansible.builtin.path_join filter has been added in ansible-base 2.10. # Since plugin routing is only available since ansible-base 2.10, this - # redirect will be used for ansible-base 2.10 or later, and the included - # path_join filter will be used for Ansible 2.9 or earlier. + # redirect will be used for ansible-base 2.10 or later. This was mostly + # relevant before community.general 5.0.0, when community.general also + # supported Ansible 2.9. Back then, the included path_join filter was used + # for Ansible 2.9 or earlier. Now we only will have the redirect until we + # eventually will deprecate and then remove it. redirect: ansible.builtin.path_join diff --git a/noxfile.py b/noxfile.py new file mode 100644 index 0000000000..9b2f92a9e1 --- /dev/null +++ b/noxfile.py @@ -0,0 +1,38 @@ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# SPDX-FileCopyrightText: 2025 Felix Fontein + +# /// script +# dependencies = ["nox>=2025.02.09", "antsibull-nox"] +# /// + +import sys + +import nox + + +try: + import antsibull_nox +except ImportError: + print("You need to install antsibull-nox in the same Python environment as nox.") + sys.exit(1) + + +antsibull_nox.load_antsibull_nox_toml() + + +@nox.session(name="aliases", python=False, default=True) +def aliases(session: nox.Session) -> None: + session.run("python", "tests/sanity/extra/aliases.py") + + +@nox.session(name="botmeta", default=True) +def botmeta(session: nox.Session) -> None: + session.install("PyYAML", "voluptuous") + session.run("python", "tests/sanity/extra/botmeta.py") + + +# Allow to run the noxfile with `python noxfile.py`, `pipx run noxfile.py`, or similar. +# Requires nox >= 2025.02.09 +if __name__ == "__main__": + nox.main() diff --git a/plugins/action/iptables_state.py b/plugins/action/iptables_state.py deleted file mode 120000 index 7884b75ae7..0000000000 --- a/plugins/action/iptables_state.py +++ /dev/null @@ -1 +0,0 @@ -./system/iptables_state.py \ No newline at end of file diff --git a/plugins/action/iptables_state.py b/plugins/action/iptables_state.py new file mode 100644 index 0000000000..dd6724476f --- /dev/null +++ b/plugins/action/iptables_state.py @@ -0,0 +1,197 @@ +# Copyright (c) 2020, quidame +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import time + +from ansible.plugins.action import ActionBase +from ansible.errors import AnsibleActionFail, AnsibleConnectionFailure +from ansible.utils.vars import merge_hash +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionBase): + + # Keep internal params away from user interactions + _VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait')) + DEFAULT_SUDOABLE = True + + @staticmethod + def msg_error__async_and_poll_not_zero(task_poll, task_async, max_timeout): + return ( + "This module doesn't support async>0 and poll>0 when its 'state' param " + "is set to 'restored'. To enable its rollback feature (that needs the " + "module to run asynchronously on the remote), please set task attribute " + f"'poll' (={task_poll}) to 0, and 'async' (={task_async}) to a value >2 and not greater than " + f"'ansible_timeout' (={max_timeout}) (recommended).") + + @staticmethod + def msg_warning__no_async_is_no_rollback(task_poll, task_async, max_timeout): + return ( + "Attempts to restore iptables state without rollback in case of mistake " + "may lead the ansible controller to loose access to the hosts and never " + "regain it before fixing firewall rules through a serial console, or any " + f"other way except SSH. Please set task attribute 'poll' (={task_poll}) to 0, and " + f"'async' (={task_async}) to a value >2 and not greater than 'ansible_timeout' (={max_timeout}) " + "(recommended).") + + @staticmethod + def msg_warning__async_greater_than_timeout(task_poll, task_async, max_timeout): + return ( + "You attempt to restore iptables state with rollback in case of mistake, " + "but with settings that will lead this rollback to happen AFTER that the " + "controller will reach its own timeout. Please set task attribute 'poll' " + f"(={task_poll}) to 0, and 'async' (={task_async}) to a value >2 and not greater than " + f"'ansible_timeout' (={max_timeout}) (recommended).") + + def _async_result(self, async_status_args, task_vars, timeout): + ''' + Retrieve results of the asynchronous task, and display them in place of + the async wrapper results (those with the ansible_job_id key). + ''' + async_status = self._task.copy() + async_status.args = async_status_args + async_status.action = 'ansible.builtin.async_status' + async_status.async_val = 0 + async_action = self._shared_loader_obj.action_loader.get( + async_status.action, task=async_status, connection=self._connection, + play_context=self._play_context, loader=self._loader, templar=self._templar, + shared_loader_obj=self._shared_loader_obj) + + if async_status.args['mode'] == 'cleanup': + return async_action.run(task_vars=task_vars) + + # At least one iteration is required, even if timeout is 0. + for dummy in range(max(1, timeout)): + async_result = async_action.run(task_vars=task_vars) + if async_result.get('finished', 0) == 1: + break + time.sleep(min(1, timeout)) + + return async_result + + def run(self, tmp=None, task_vars=None): + + self._supports_check_mode = True + self._supports_async = True + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + if not result.get('skipped'): + + # FUTURE: better to let _execute_module calculate this internally? + wrap_async = self._task.async_val and not self._connection.has_native_async + + # Set short names for values we'll have to compare or reuse + task_poll = self._task.poll + task_async = self._task.async_val + check_mode = self._play_context.check_mode + max_timeout = self._connection._play_context.timeout + module_args = self._task.args + + async_status_args = {} + starter_cmd = None + confirm_cmd = None + + if module_args.get('state', None) == 'restored': + if not wrap_async: + if not check_mode: + display.warning(self.msg_error__async_and_poll_not_zero( + task_poll, + task_async, + max_timeout)) + elif task_poll: + raise AnsibleActionFail(self.msg_warning__no_async_is_no_rollback( + task_poll, + task_async, + max_timeout)) + else: + if task_async > max_timeout and not check_mode: + display.warning(self.msg_warning__async_greater_than_timeout( + task_poll, + task_async, + max_timeout)) + + # inject the async directory based on the shell option into the + # module args + async_dir = self.get_shell_option('async_dir', default="~/.ansible_async") + + # Bind the loop max duration to consistent values on both + # remote and local sides (if not the same, make the loop + # longer on the controller); and set a backup file path. + module_args['_timeout'] = task_async + module_args['_back'] = f'{async_dir}/iptables.state' + async_status_args = dict(mode='status') + confirm_cmd = f"rm -f {module_args['_back']}" + starter_cmd = f"touch {module_args['_back']}.starter" + remaining_time = max(task_async, max_timeout) + + # do work! + result = merge_hash(result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async)) + + # Then the 3-steps "go ahead or rollback": + # 1. Catch early errors of the module (in asynchronous task) if any. + # Touch a file on the target to signal the module to process now. + # 2. Reset connection to ensure a persistent one will not be reused. + # 3. Confirm the restored state by removing the backup on the remote. + # Retrieve the results of the asynchronous task to return them. + if '_back' in module_args: + async_status_args['jid'] = result.get('ansible_job_id', None) + if async_status_args['jid'] is None: + raise AnsibleActionFail("Unable to get 'ansible_job_id'.") + + # Catch early errors due to missing mandatory option, bad + # option type/value, missing required system command, etc. + result = merge_hash(result, self._async_result(async_status_args, task_vars, 0)) + + # The module is aware to not process the main iptables-restore + # command before finding (and deleting) the 'starter' cookie on + # the host, so the previous query will not reach ssh timeout. + dummy = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE) + + # As the main command is not yet executed on the target, here + # 'finished' means 'failed before main command be executed'. + if not result['finished']: + try: + self._connection.reset() + except AttributeError: + pass + + for dummy in range(max_timeout): + time.sleep(1) + remaining_time -= 1 + # - AnsibleConnectionFailure covers rejected requests (i.e. + # by rules with '--jump REJECT') + # - ansible_timeout is able to cover dropped requests (due + # to a rule or policy DROP) if not lower than async_val. + try: + dummy = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE) + break + except AnsibleConnectionFailure: + continue + + result = merge_hash(result, self._async_result(async_status_args, task_vars, remaining_time)) + + # Cleanup async related stuff and internal params + for key in ('ansible_job_id', 'results_file', 'started', 'finished'): + if result.get(key): + del result[key] + + if result.get('invocation', {}).get('module_args'): + for key in ('_back', '_timeout', '_async_dir', 'jid'): + if result['invocation']['module_args'].get(key): + del result['invocation']['module_args'][key] + + async_status_args['mode'] = 'cleanup' + dummy = self._async_result(async_status_args, task_vars, 0) + + if not wrap_async: + # remove a temporary path we created + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result diff --git a/plugins/action/shutdown.py b/plugins/action/shutdown.py deleted file mode 120000 index 260ca8ece4..0000000000 --- a/plugins/action/shutdown.py +++ /dev/null @@ -1 +0,0 @@ -./system/shutdown.py \ No newline at end of file diff --git a/plugins/action/shutdown.py b/plugins/action/shutdown.py new file mode 100644 index 0000000000..d2a9d3c2b7 --- /dev/null +++ b/plugins/action/shutdown.py @@ -0,0 +1,221 @@ +# Copyright (c) 2020, Amin Vakil +# Copyright (c) 2016-2018, Matt Davis +# Copyright (c) 2018, Sam Doran +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +from ansible.errors import AnsibleError, AnsibleConnectionFailure +from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.collections import is_string +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display + +display = Display() + + +def fmt(mapping, key): + return to_native(mapping[key]).strip() + + +class TimedOutException(Exception): + pass + + +class ActionModule(ActionBase): + TRANSFERS_FILES = False + _VALID_ARGS = frozenset(( + 'msg', + 'delay', + 'search_paths' + )) + + DEFAULT_CONNECT_TIMEOUT = None + DEFAULT_PRE_SHUTDOWN_DELAY = 0 + DEFAULT_SHUTDOWN_MESSAGE = 'Shut down initiated by Ansible' + DEFAULT_SHUTDOWN_COMMAND = 'shutdown' + DEFAULT_SHUTDOWN_COMMAND_ARGS = '-h {delay_min} "{message}"' + DEFAULT_SUDOABLE = True + + SHUTDOWN_COMMANDS = { + 'alpine': 'poweroff', + 'vmkernel': 'halt', + } + + SHUTDOWN_COMMAND_ARGS = { + 'alpine': '', + 'void': '-h +{delay_min} "{message}"', + 'freebsd': '-p +{delay_sec}s "{message}"', + 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS, + 'macosx': '-h +{delay_min} "{message}"', + 'openbsd': '-h +{delay_min} "{message}"', + 'solaris': '-y -g {delay_sec} -i 5 "{message}"', + 'sunos': '-y -g {delay_sec} -i 5 "{message}"', + 'vmkernel': '-d {delay_sec}', + 'aix': '-Fh', + } + + def __init__(self, *args, **kwargs): + super(ActionModule, self).__init__(*args, **kwargs) + + @property + def delay(self): + return self._check_delay('delay', self.DEFAULT_PRE_SHUTDOWN_DELAY) + + def _check_delay(self, key, default): + """Ensure that the value is positive or zero""" + value = int(self._task.args.get(key, default)) + if value < 0: + value = 0 + return value + + def _get_value_from_facts(self, variable_name, distribution, default_value): + """Get dist+version specific args first, then distribution, then family, lastly use default""" + attr = getattr(self, variable_name) + value = attr.get( + distribution['name'] + distribution['version'], + attr.get( + distribution['name'], + attr.get( + distribution['family'], + getattr(self, default_value)))) + return value + + def get_distribution(self, task_vars): + # FIXME: only execute the module if we don't already have the facts we need + distribution = {} + display.debug(f'{self._task.action}: running setup module to get distribution') + module_output = self._execute_module( + task_vars=task_vars, + module_name='ansible.legacy.setup', + module_args={'gather_subset': 'min'}) + try: + if module_output.get('failed', False): + raise AnsibleError(f"Failed to determine system distribution. {fmt(module_output, 'module_stdout')}, {fmt(module_output, 'module_stderr')}") + distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower() + distribution['version'] = to_text( + module_output['ansible_facts']['ansible_distribution_version'].split('.')[0]) + distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower()) + display.debug(f"{self._task.action}: distribution: {distribution}") + return distribution + except KeyError as ke: + raise AnsibleError(f'Failed to get distribution information. Missing "{ke.args[0]}" in output.') + + def get_shutdown_command(self, task_vars, distribution): + def find_command(command, find_search_paths): + display.debug(f'{self._task.action}: running find module looking in {find_search_paths} to get path for "{command}"') + find_result = self._execute_module( + task_vars=task_vars, + # prevent collection search by calling with ansible.legacy (still allows library/ override of find) + module_name='ansible.legacy.find', + module_args={ + 'paths': find_search_paths, + 'patterns': [command], + 'file_type': 'any' + } + ) + return [x['path'] for x in find_result['files']] + + shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND') + default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin'] + search_paths = self._task.args.get('search_paths', default_search_paths) + + # FIXME: switch all this to user arg spec validation methods when they are available + # Convert bare strings to a list + if is_string(search_paths): + search_paths = [search_paths] + + try: + incorrect_type = any(not is_string(x) for x in search_paths) + if not isinstance(search_paths, list) or incorrect_type: + raise TypeError + except TypeError: + # Error if we didn't get a list + err_msg = f"'search_paths' must be a string or flat list of strings, got {search_paths}" + raise AnsibleError(err_msg) + + full_path = find_command(shutdown_bin, search_paths) # find the path to the shutdown command + if not full_path: # if we could not find the shutdown command + + # tell the user we will try with systemd + display.vvv(f'Unable to find command "{shutdown_bin}" in search paths: {search_paths}, will attempt a shutdown using systemd directly.') + systemctl_search_paths = ['/bin', '/usr/bin'] + full_path = find_command('systemctl', systemctl_search_paths) # find the path to the systemctl command + if not full_path: # if we couldn't find systemctl + raise AnsibleError( + f'Could not find command "{shutdown_bin}" in search paths: {search_paths} or systemctl' + f' command in search paths: {systemctl_search_paths}, unable to shutdown.') # we give up here + else: + return f"{full_path[0]} poweroff" # done, since we cannot use args with systemd shutdown + + # systemd case taken care of, here we add args to the command + args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS') + # Convert seconds to minutes. If less that 60, set it to 0. + delay_sec = self.delay + shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE) + + af = args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message) + return f'{full_path[0]} {af}' + + def perform_shutdown(self, task_vars, distribution): + result = {} + shutdown_result = {} + shutdown_command_exec = self.get_shutdown_command(task_vars, distribution) + + self.cleanup(force=True) + try: + display.vvv(f"{self._task.action}: shutting down server...") + display.debug(f"{self._task.action}: shutting down server with command '{shutdown_command_exec}'") + if self._play_context.check_mode: + shutdown_result['rc'] = 0 + else: + shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE) + except AnsibleConnectionFailure as e: + # If the connection is closed too quickly due to the system being shutdown, carry on + display.debug( + f'{self._task.action}: AnsibleConnectionFailure caught and handled: {e}') + shutdown_result['rc'] = 0 + + if shutdown_result['rc'] != 0: + result['failed'] = True + result['shutdown'] = False + result['msg'] = f"Shutdown command failed. Error was {fmt(shutdown_result, 'stdout')}, {fmt(shutdown_result, 'stderr')}" + return result + + result['failed'] = False + result['shutdown_command'] = shutdown_command_exec + return result + + def run(self, tmp=None, task_vars=None): + self._supports_check_mode = True + self._supports_async = True + + # If running with local connection, fail so we don't shutdown ourself + if self._connection.transport == 'local' and (not self._play_context.check_mode): + msg = f'Running {self._task.action} with local connection would shutdown the control node.' + return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg} + + if task_vars is None: + task_vars = {} + + result = super(ActionModule, self).run(tmp, task_vars) + + if result.get('skipped', False) or result.get('failed', False): + return result + + distribution = self.get_distribution(task_vars) + + # Initiate shutdown + shutdown_result = self.perform_shutdown(task_vars, distribution) + + if shutdown_result['failed']: + result = shutdown_result + return result + + result['shutdown'] = True + result['changed'] = True + result['shutdown_command'] = shutdown_result['shutdown_command'] + + return result diff --git a/plugins/action/system/iptables_state.py b/plugins/action/system/iptables_state.py deleted file mode 100644 index b8ae1a5dea..0000000000 --- a/plugins/action/system/iptables_state.py +++ /dev/null @@ -1,186 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, quidame -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import time - -from ansible.plugins.action import ActionBase -from ansible.errors import AnsibleActionFail, AnsibleConnectionFailure -from ansible.utils.vars import merge_hash -from ansible.utils.display import Display - -display = Display() - - -class ActionModule(ActionBase): - - # Keep internal params away from user interactions - _VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait')) - DEFAULT_SUDOABLE = True - - MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = ( - "This module doesn't support async>0 and poll>0 when its 'state' param " - "is set to 'restored'. To enable its rollback feature (that needs the " - "module to run asynchronously on the remote), please set task attribute " - "'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than " - "'ansible_timeout' (=%s) (recommended).") - MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = ( - "Attempts to restore iptables state without rollback in case of mistake " - "may lead the ansible controller to loose access to the hosts and never " - "regain it before fixing firewall rules through a serial console, or any " - "other way except SSH. Please set task attribute 'poll' (=%s) to 0, and " - "'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) " - "(recommended).") - MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = ( - "You attempt to restore iptables state with rollback in case of mistake, " - "but with settings that will lead this rollback to happen AFTER that the " - "controller will reach its own timeout. Please set task attribute 'poll' " - "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than " - "'ansible_timeout' (=%s) (recommended).") - - def _async_result(self, async_status_args, task_vars, timeout): - ''' - Retrieve results of the asynchonous task, and display them in place of - the async wrapper results (those with the ansible_job_id key). - ''' - async_status = self._task.copy() - async_status.args = async_status_args - async_status.action = 'ansible.builtin.async_status' - async_status.async_val = 0 - async_action = self._shared_loader_obj.action_loader.get( - async_status.action, task=async_status, connection=self._connection, - play_context=self._play_context, loader=self._loader, templar=self._templar, - shared_loader_obj=self._shared_loader_obj) - - if async_status.args['mode'] == 'cleanup': - return async_action.run(task_vars=task_vars) - - # At least one iteration is required, even if timeout is 0. - for dummy in range(max(1, timeout)): - async_result = async_action.run(task_vars=task_vars) - if async_result.get('finished', 0) == 1: - break - time.sleep(min(1, timeout)) - - return async_result - - def run(self, tmp=None, task_vars=None): - - self._supports_check_mode = True - self._supports_async = True - - result = super(ActionModule, self).run(tmp, task_vars) - del tmp # tmp no longer has any effect - - if not result.get('skipped'): - - # FUTURE: better to let _execute_module calculate this internally? - wrap_async = self._task.async_val and not self._connection.has_native_async - - # Set short names for values we'll have to compare or reuse - task_poll = self._task.poll - task_async = self._task.async_val - check_mode = self._play_context.check_mode - max_timeout = self._connection._play_context.timeout - module_args = self._task.args - - if module_args.get('state', None) == 'restored': - if not wrap_async: - if not check_mode: - display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % ( - task_poll, - task_async, - max_timeout)) - elif task_poll: - raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % ( - task_poll, - task_async, - max_timeout)) - else: - if task_async > max_timeout and not check_mode: - display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % ( - task_poll, - task_async, - max_timeout)) - - # inject the async directory based on the shell option into the - # module args - async_dir = self.get_shell_option('async_dir', default="~/.ansible_async") - - # Bind the loop max duration to consistent values on both - # remote and local sides (if not the same, make the loop - # longer on the controller); and set a backup file path. - module_args['_timeout'] = task_async - module_args['_back'] = '%s/iptables.state' % async_dir - async_status_args = dict(mode='status') - confirm_cmd = 'rm -f %s' % module_args['_back'] - starter_cmd = 'touch %s.starter' % module_args['_back'] - remaining_time = max(task_async, max_timeout) - - # do work! - result = merge_hash(result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async)) - - # Then the 3-steps "go ahead or rollback": - # 1. Catch early errors of the module (in asynchronous task) if any. - # Touch a file on the target to signal the module to process now. - # 2. Reset connection to ensure a persistent one will not be reused. - # 3. Confirm the restored state by removing the backup on the remote. - # Retrieve the results of the asynchronous task to return them. - if '_back' in module_args: - async_status_args['jid'] = result.get('ansible_job_id', None) - if async_status_args['jid'] is None: - raise AnsibleActionFail("Unable to get 'ansible_job_id'.") - - # Catch early errors due to missing mandatory option, bad - # option type/value, missing required system command, etc. - result = merge_hash(result, self._async_result(async_status_args, task_vars, 0)) - - # The module is aware to not process the main iptables-restore - # command before finding (and deleting) the 'starter' cookie on - # the host, so the previous query will not reach ssh timeout. - dummy = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE) - - # As the main command is not yet executed on the target, here - # 'finished' means 'failed before main command be executed'. - if not result['finished']: - try: - self._connection.reset() - except AttributeError: - pass - - for dummy in range(max_timeout): - time.sleep(1) - remaining_time -= 1 - # - AnsibleConnectionFailure covers rejected requests (i.e. - # by rules with '--jump REJECT') - # - ansible_timeout is able to cover dropped requests (due - # to a rule or policy DROP) if not lower than async_val. - try: - dummy = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE) - break - except AnsibleConnectionFailure: - continue - - result = merge_hash(result, self._async_result(async_status_args, task_vars, remaining_time)) - - # Cleanup async related stuff and internal params - for key in ('ansible_job_id', 'results_file', 'started', 'finished'): - if result.get(key): - del result[key] - - if result.get('invocation', {}).get('module_args'): - for key in ('_back', '_timeout', '_async_dir', 'jid'): - if result['invocation']['module_args'].get(key): - del result['invocation']['module_args'][key] - - async_status_args['mode'] = 'cleanup' - dummy = self._async_result(async_status_args, task_vars, 0) - - if not wrap_async: - # remove a temporary path we created - self._remove_tmp_path(self._connection._shell.tmpdir) - - return result diff --git a/plugins/action/system/shutdown.py b/plugins/action/system/shutdown.py deleted file mode 100644 index 19813b0847..0000000000 --- a/plugins/action/system/shutdown.py +++ /dev/null @@ -1,212 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Amin Vakil -# Copyright: (c) 2016-2018, Matt Davis -# Copyright: (c) 2018, Sam Doran -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.errors import AnsibleError, AnsibleConnectionFailure -from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.module_utils.common.collections import is_string -from ansible.plugins.action import ActionBase -from ansible.utils.display import Display - -display = Display() - - -class TimedOutException(Exception): - pass - - -class ActionModule(ActionBase): - TRANSFERS_FILES = False - _VALID_ARGS = frozenset(( - 'msg', - 'delay', - 'search_paths' - )) - - DEFAULT_CONNECT_TIMEOUT = None - DEFAULT_PRE_SHUTDOWN_DELAY = 0 - DEFAULT_SHUTDOWN_MESSAGE = 'Shut down initiated by Ansible' - DEFAULT_SHUTDOWN_COMMAND = 'shutdown' - DEFAULT_SHUTDOWN_COMMAND_ARGS = '-h {delay_min} "{message}"' - DEFAULT_SUDOABLE = True - - SHUTDOWN_COMMANDS = { - 'alpine': 'poweroff', - 'vmkernel': 'halt', - } - - SHUTDOWN_COMMAND_ARGS = { - 'alpine': '', - 'void': '-h +{delay_min} "{message}"', - 'freebsd': '-h +{delay_sec}s "{message}"', - 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS, - 'macosx': '-h +{delay_min} "{message}"', - 'openbsd': '-h +{delay_min} "{message}"', - 'solaris': '-y -g {delay_sec} -i 5 "{message}"', - 'sunos': '-y -g {delay_sec} -i 5 "{message}"', - 'vmkernel': '-d {delay_sec}', - 'aix': '-Fh', - } - - def __init__(self, *args, **kwargs): - super(ActionModule, self).__init__(*args, **kwargs) - - @property - def delay(self): - return self._check_delay('delay', self.DEFAULT_PRE_SHUTDOWN_DELAY) - - def _check_delay(self, key, default): - """Ensure that the value is positive or zero""" - value = int(self._task.args.get(key, default)) - if value < 0: - value = 0 - return value - - def _get_value_from_facts(self, variable_name, distribution, default_value): - """Get dist+version specific args first, then distribution, then family, lastly use default""" - attr = getattr(self, variable_name) - value = attr.get( - distribution['name'] + distribution['version'], - attr.get( - distribution['name'], - attr.get( - distribution['family'], - getattr(self, default_value)))) - return value - - def get_shutdown_command_args(self, distribution): - args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS') - # Convert seconds to minutes. If less that 60, set it to 0. - delay_sec = self.delay - shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE) - return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message) - - def get_distribution(self, task_vars): - # FIXME: only execute the module if we don't already have the facts we need - distribution = {} - display.debug('{action}: running setup module to get distribution'.format(action=self._task.action)) - module_output = self._execute_module( - task_vars=task_vars, - module_name='ansible.legacy.setup', - module_args={'gather_subset': 'min'}) - try: - if module_output.get('failed', False): - raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format( - to_native(module_output['module_stdout']).strip(), - to_native(module_output['module_stderr']).strip())) - distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower() - distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0]) - distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower()) - display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution)) - return distribution - except KeyError as ke: - raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0])) - - def get_shutdown_command(self, task_vars, distribution): - shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND') - default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin'] - search_paths = self._task.args.get('search_paths', default_search_paths) - - # FIXME: switch all this to user arg spec validation methods when they are available - # Convert bare strings to a list - if is_string(search_paths): - search_paths = [search_paths] - - # Error if we didn't get a list - err_msg = "'search_paths' must be a string or flat list of strings, got {0}" - try: - incorrect_type = any(not is_string(x) for x in search_paths) - if not isinstance(search_paths, list) or incorrect_type: - raise TypeError - except TypeError: - raise AnsibleError(err_msg.format(search_paths)) - - display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format( - action=self._task.action, - command=shutdown_bin, - paths=search_paths)) - find_result = self._execute_module( - task_vars=task_vars, - # prevent collection search by calling with ansible.legacy (still allows library/ override of find) - module_name='ansible.legacy.find', - module_args={ - 'paths': search_paths, - 'patterns': [shutdown_bin], - 'file_type': 'any' - } - ) - - full_path = [x['path'] for x in find_result['files']] - if not full_path: - raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths)) - self._shutdown_command = full_path[0] - return self._shutdown_command - - def perform_shutdown(self, task_vars, distribution): - result = {} - shutdown_result = {} - shutdown_command = self.get_shutdown_command(task_vars, distribution) - shutdown_command_args = self.get_shutdown_command_args(distribution) - shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args) - - self.cleanup(force=True) - try: - display.vvv("{action}: shutting down server...".format(action=self._task.action)) - display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec)) - if self._play_context.check_mode: - shutdown_result['rc'] = 0 - else: - shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE) - except AnsibleConnectionFailure as e: - # If the connection is closed too quickly due to the system being shutdown, carry on - display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e))) - shutdown_result['rc'] = 0 - - if shutdown_result['rc'] != 0: - result['failed'] = True - result['shutdown'] = False - result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format( - stdout=to_native(shutdown_result['stdout'].strip()), - stderr=to_native(shutdown_result['stderr'].strip())) - return result - - result['failed'] = False - result['shutdown_command'] = shutdown_command_exec - return result - - def run(self, tmp=None, task_vars=None): - self._supports_check_mode = True - self._supports_async = True - - # If running with local connection, fail so we don't shutdown ourself - if self._connection.transport == 'local' and (not self._play_context.check_mode): - msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action) - return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg} - - if task_vars is None: - task_vars = {} - - result = super(ActionModule, self).run(tmp, task_vars) - - if result.get('skipped', False) or result.get('failed', False): - return result - - distribution = self.get_distribution(task_vars) - - # Initiate shutdown - shutdown_result = self.perform_shutdown(task_vars, distribution) - - if shutdown_result['failed']: - result = shutdown_result - return result - - result['shutdown'] = True - result['changed'] = True - result['shutdown_command'] = shutdown_result['shutdown_command'] - - return result diff --git a/plugins/become/doas.py b/plugins/become/doas.py index 7cf4a79c7b..84efe31ac4 100644 --- a/plugins/become/doas.py +++ b/plugins/become/doas.py @@ -1,83 +1,91 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: doas - short_description: Do As user +DOCUMENTATION = r""" +name: doas +short_description: Do As user +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(doas) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + ini: + - section: privilege_escalation + key: become_user + - section: doas_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_doas_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_DOAS_USER + become_exe: + description: C(doas) executable. + type: string + default: doas + ini: + - section: privilege_escalation + key: become_exe + - section: doas_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_doas_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_DOAS_EXE + become_flags: + description: Options to pass to C(doas). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: doas_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_doas_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_DOAS_FLAGS + become_pass: + description: Password for C(doas) prompt. + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_doas_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_DOAS_PASS + ini: + - section: doas_become_plugin + key: password + prompt_l10n: description: - - This become plugins allows your remote/login user to execute commands as another user via the doas utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - ini: - - section: privilege_escalation - key: become_user - - section: doas_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_doas_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_DOAS_USER - become_exe: - description: Doas executable - default: doas - ini: - - section: privilege_escalation - key: become_exe - - section: doas_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_doas_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_DOAS_EXE - become_flags: - description: Options to pass to doas - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: doas_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_doas_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_DOAS_FLAGS - become_pass: - description: password for doas prompt - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_doas_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_DOAS_PASS - ini: - - section: doas_become_plugin - key: password - prompt_l10n: - description: - - List of localized strings to match for prompt detection - - If empty we'll use the built in one - default: [] - ini: - - section: doas_become_plugin - key: localized_prompts - vars: - - name: ansible_doas_prompt_l10n - env: - - name: ANSIBLE_DOAS_PROMPT_L10N -''' + - List of localized strings to match for prompt detection. + - If empty the plugin uses the built-in one. + type: list + elements: string + default: [] + ini: + - section: doas_become_plugin + key: localized_prompts + vars: + - name: ansible_doas_prompt_l10n + env: + - name: ANSIBLE_DOAS_PROMPT_L10N +notes: + - This become plugin does not work when connection pipelining is enabled. With ansible-core 2.19+, using it automatically + disables pipelining. On ansible-core 2.18 and before, pipelining must explicitly be disabled by the user. +""" import re @@ -93,6 +101,10 @@ class BecomeModule(BecomeBase): fail = ('Permission denied',) missing = ('Authorization required',) + # See https://github.com/ansible-collections/community.general/issues/9977, + # https://github.com/ansible/ansible/pull/78111 + pipelining = False + def check_password_prompt(self, b_output): ''' checks if the expected password prompt exists in b_output ''' @@ -118,9 +130,9 @@ class BecomeModule(BecomeBase): flags += ' -n' become_user = self.get_option('become_user') - user = '-u %s' % (become_user) if become_user else '' + user = f'-u {become_user}' if become_user else '' success_cmd = self._build_success_command(cmd, shell, noexe=True) executable = getattr(shell, 'executable', shell.SHELL_FAMILY) - return '%s %s %s %s -c %s' % (become_exe, flags, user, executable, success_cmd) + return f'{become_exe} {flags} {user} {executable} -c {success_cmd}' diff --git a/plugins/become/dzdo.py b/plugins/become/dzdo.py index 1aef8edb69..dad05eb34e 100644 --- a/plugins/become/dzdo.py +++ b/plugins/become/dzdo.py @@ -1,70 +1,74 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: dzdo - short_description: Centrify's Direct Authorize - description: - - This become plugins allows your remote/login user to execute commands as another user via the dzdo utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - ini: - - section: privilege_escalation - key: become_user - - section: dzdo_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_dzdo_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_DZDO_USER - become_exe: - description: Dzdo executable - default: dzdo - ini: - - section: privilege_escalation - key: become_exe - - section: dzdo_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_dzdo_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_DZDO_EXE - become_flags: - description: Options to pass to dzdo - default: -H -S -n - ini: - - section: privilege_escalation - key: become_flags - - section: dzdo_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_dzdo_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_DZDO_FLAGS - become_pass: - description: Options to pass to dzdo - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_dzdo_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_DZDO_PASS - ini: - - section: dzdo_become_plugin - key: password -''' +DOCUMENTATION = r""" +name: dzdo +short_description: Centrify's Direct Authorize +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(dzdo) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + ini: + - section: privilege_escalation + key: become_user + - section: dzdo_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_dzdo_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_DZDO_USER + become_exe: + description: C(dzdo) executable. + type: string + default: dzdo + ini: + - section: privilege_escalation + key: become_exe + - section: dzdo_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_dzdo_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_DZDO_EXE + become_flags: + description: Options to pass to C(dzdo). + type: string + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: dzdo_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_dzdo_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_DZDO_FLAGS + become_pass: + description: Options to pass to C(dzdo). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_dzdo_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_DZDO_PASS + ini: + - section: dzdo_become_plugin + key: password +""" from ansible.plugins.become import BecomeBase @@ -86,10 +90,10 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') if self.get_option('become_pass'): - self.prompt = '[dzdo via ansible, key=%s] password:' % self._id - flags = '%s -p "%s"' % (flags.replace('-n', ''), self.prompt) + self.prompt = f'[dzdo via ansible, key={self._id}] password:' + flags = f"{flags.replace('-n', '')} -p \"{self.prompt}\"" become_user = self.get_option('become_user') - user = '-u %s' % (become_user) if become_user else '' + user = f'-u {become_user}' if become_user else '' - return ' '.join([becomecmd, flags, user, self._build_success_command(cmd, shell)]) + return f"{becomecmd} {flags} {user} {self._build_success_command(cmd, shell)}" diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py index 1ee47b0fa3..0ffba62385 100644 --- a/plugins/become/ksu.py +++ b/plugins/become/ksu.py @@ -1,84 +1,89 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: ksu - short_description: Kerberos substitute user +DOCUMENTATION = r""" +name: ksu +short_description: Kerberos substitute user +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(ksu) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + ini: + - section: privilege_escalation + key: become_user + - section: ksu_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_ksu_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_KSU_USER + required: true + become_exe: + description: C(ksu) executable. + type: string + default: ksu + ini: + - section: privilege_escalation + key: become_exe + - section: ksu_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_ksu_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_KSU_EXE + become_flags: + description: Options to pass to C(ksu). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: ksu_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_ksu_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_KSU_FLAGS + become_pass: + description: C(ksu) password. + type: string + required: false + vars: + - name: ansible_ksu_pass + - name: ansible_become_pass + - name: ansible_become_password + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_KSU_PASS + ini: + - section: ksu_become_plugin + key: password + prompt_l10n: description: - - This become plugins allows your remote/login user to execute commands as another user via the ksu utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - ini: - - section: privilege_escalation - key: become_user - - section: ksu_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_ksu_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_KSU_USER - required: True - become_exe: - description: Su executable - default: ksu - ini: - - section: privilege_escalation - key: become_exe - - section: ksu_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_ksu_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_KSU_EXE - become_flags: - description: Options to pass to ksu - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: ksu_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_ksu_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_KSU_FLAGS - become_pass: - description: ksu password - required: False - vars: - - name: ansible_ksu_pass - - name: ansible_become_pass - - name: ansible_become_password - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_KSU_PASS - ini: - - section: ksu_become_plugin - key: password - prompt_l10n: - description: - - List of localized strings to match for prompt detection - - If empty we'll use the built in one - default: [] - ini: - - section: ksu_become_plugin - key: localized_prompts - vars: - - name: ansible_ksu_prompt_l10n - env: - - name: ANSIBLE_KSU_PROMPT_L10N -''' + - List of localized strings to match for prompt detection. + - If empty the plugin uses the built-in one. + type: list + elements: string + default: [] + ini: + - section: ksu_become_plugin + key: localized_prompts + vars: + - name: ansible_ksu_prompt_l10n + env: + - name: ANSIBLE_KSU_PROMPT_L10N +""" import re @@ -117,4 +122,4 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') user = self.get_option('become_user') - return '%s %s %s -e %s ' % (exe, user, flags, self._build_success_command(cmd, shell)) + return f'{exe} {user} {flags} -e {self._build_success_command(cmd, shell)} ' diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py index aebb0891b0..685f39f5d8 100644 --- a/plugins/become/machinectl.py +++ b/plugins/become/machinectl.py @@ -1,80 +1,121 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: machinectl - short_description: Systemd's machinectl privilege escalation - description: - - This become plugins allows your remote/login user to execute commands as another user via the machinectl utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - default: '' - ini: - - section: privilege_escalation - key: become_user - - section: machinectl_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_machinectl_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_MACHINECTL_USER - become_exe: - description: Machinectl executable - default: machinectl - ini: - - section: privilege_escalation - key: become_exe - - section: machinectl_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_machinectl_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_MACHINECTL_EXE - become_flags: - description: Options to pass to machinectl - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: machinectl_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_machinectl_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_MACHINECTL_FLAGS - become_pass: - description: Password for machinectl - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_machinectl_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_MACHINECTL_PASS - ini: - - section: machinectl_become_plugin - key: password -''' +DOCUMENTATION = r""" +name: machinectl +short_description: Systemd's machinectl privilege escalation +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(machinectl) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + default: '' + ini: + - section: privilege_escalation + key: become_user + - section: machinectl_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_machinectl_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_MACHINECTL_USER + become_exe: + description: C(machinectl) executable. + type: string + default: machinectl + ini: + - section: privilege_escalation + key: become_exe + - section: machinectl_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_machinectl_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_MACHINECTL_EXE + become_flags: + description: Options to pass to C(machinectl). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: machinectl_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_machinectl_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_MACHINECTL_FLAGS + become_pass: + description: Password for C(machinectl). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_machinectl_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_MACHINECTL_PASS + ini: + - section: machinectl_become_plugin + key: password +notes: + - When not using this plugin with user V(root), it only works correctly with a polkit rule which alters the behaviour + of C(machinectl). This rule must alter the prompt behaviour to ask directly for the user credentials, if the user is allowed + to perform the action (take a look at the examples section). If such a rule is not present the plugin only works if it + is used in context with the root user, because then no further prompt is shown by C(machinectl). + - This become plugin does not work when connection pipelining is enabled. With ansible-core 2.19+, using it automatically + disables pipelining. On ansible-core 2.18 and before, pipelining must explicitly be disabled by the user. +""" + +EXAMPLES = r""" +# A polkit rule needed to use the module with a non-root user. +# See the Notes section for details. +/etc/polkit-1/rules.d/60-machinectl-fast-user-auth.rules: |- + polkit.addRule(function(action, subject) { + if(action.id == "org.freedesktop.machine1.host-shell" && + subject.isInGroup("wheel")) { + return polkit.Result.AUTH_SELF_KEEP; + } + }); +""" + +from re import compile as re_compile from ansible.plugins.become import BecomeBase +from ansible.module_utils.common.text.converters import to_bytes + + +ansi_color_codes = re_compile(to_bytes(r'\x1B\[[0-9;]+m')) class BecomeModule(BecomeBase): name = 'community.general.machinectl' + prompt = 'Password: ' + fail = ('==== AUTHENTICATION FAILED ====',) + success = ('==== AUTHENTICATION COMPLETE ====',) + require_tty = True # see https://github.com/ansible-collections/community.general/issues/6932 + + # See https://github.com/ansible/ansible/issues/81254, + # https://github.com/ansible/ansible/pull/78111 + pipelining = False + + @staticmethod + def remove_ansi_codes(line): + return ansi_color_codes.sub(b"", line) + def build_become_command(self, cmd, shell): super(BecomeModule, self).build_become_command(cmd, shell) @@ -85,4 +126,16 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') user = self.get_option('become_user') - return '%s -q shell %s %s@ %s' % (become, flags, user, cmd) + return f'{become} -q shell {flags} {user}@ {self._build_success_command(cmd, shell)}' + + def check_success(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_success(b_output) + + def check_incorrect_password(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_incorrect_password(b_output) + + def check_missing_password(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_missing_password(b_output) diff --git a/plugins/become/pbrun.py b/plugins/become/pbrun.py index fe28e61c2b..c9eb975427 100644 --- a/plugins/become/pbrun.py +++ b/plugins/become/pbrun.py @@ -1,83 +1,86 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: pbrun - short_description: PowerBroker run - description: - - This become plugins allows your remote/login user to execute commands as another user via the pbrun utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - default: '' - ini: - - section: privilege_escalation - key: become_user - - section: pbrun_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_pbrun_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_PBRUN_USER - become_exe: - description: Sudo executable - default: pbrun - ini: - - section: privilege_escalation - key: become_exe - - section: pbrun_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_pbrun_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_PBRUN_EXE - become_flags: - description: Options to pass to pbrun - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: pbrun_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_pbrun_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_PBRUN_FLAGS - become_pass: - description: Password for pbrun - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_pbrun_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_PBRUN_PASS - ini: - - section: pbrun_become_plugin - key: password - wrap_exe: - description: Toggle to wrap the command pbrun calls in 'shell -c' or not - default: False - type: bool - ini: - - section: pbrun_become_plugin - key: wrap_execution - vars: - - name: ansible_pbrun_wrap_execution - env: - - name: ANSIBLE_PBRUN_WRAP_EXECUTION -''' +DOCUMENTATION = r""" +name: pbrun +short_description: PowerBroker run +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(pbrun) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + default: '' + ini: + - section: privilege_escalation + key: become_user + - section: pbrun_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_pbrun_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_PBRUN_USER + become_exe: + description: C(pbrun) executable. + type: string + default: pbrun + ini: + - section: privilege_escalation + key: become_exe + - section: pbrun_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_pbrun_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_PBRUN_EXE + become_flags: + description: Options to pass to C(pbrun). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: pbrun_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_pbrun_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_PBRUN_FLAGS + become_pass: + description: Password for C(pbrun). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_pbrun_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_PBRUN_PASS + ini: + - section: pbrun_become_plugin + key: password + wrap_exe: + description: Toggle to wrap the command C(pbrun) calls in C(shell -c) or not. + default: false + type: bool + ini: + - section: pbrun_become_plugin + key: wrap_execution + vars: + - name: ansible_pbrun_wrap_execution + env: + - name: ANSIBLE_PBRUN_WRAP_EXECUTION +""" from ansible.plugins.become import BecomeBase @@ -98,7 +101,7 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') become_user = self.get_option('become_user') - user = '-u %s' % (become_user) if become_user else '' + user = f'-u {become_user}' if become_user else '' noexe = not self.get_option('wrap_exe') - return ' '.join([become_exe, flags, user, self._build_success_command(cmd, shell, noexe=noexe)]) + return f"{become_exe} {flags} {user} {self._build_success_command(cmd, shell, noexe=noexe)}" diff --git a/plugins/become/pfexec.py b/plugins/become/pfexec.py index 2b37044c93..2e7df0f6c0 100644 --- a/plugins/become/pfexec.py +++ b/plugins/become/pfexec.py @@ -1,88 +1,91 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: pfexec - short_description: profile based execution +DOCUMENTATION = r""" +name: pfexec +short_description: Profile based execution +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(pfexec) utility. +author: Ansible Core Team +options: + become_user: description: - - This become plugins allows your remote/login user to execute commands as another user via the pfexec utility. - author: Ansible Core Team - options: - become_user: - description: - - User you 'become' to execute the task - - This plugin ignores this setting as pfexec uses it's own C(exec_attr) to figure this out, - but it is supplied here for Ansible to make decisions needed for the task execution, like file permissions. - default: root - ini: - - section: privilege_escalation - key: become_user - - section: pfexec_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_pfexec_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_PFEXEC_USER - become_exe: - description: Sudo executable - default: pfexec - ini: - - section: privilege_escalation - key: become_exe - - section: pfexec_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_pfexec_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_PFEXEC_EXE - become_flags: - description: Options to pass to pfexec - default: -H -S -n - ini: - - section: privilege_escalation - key: become_flags - - section: pfexec_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_pfexec_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_PFEXEC_FLAGS - become_pass: - description: pfexec password - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_pfexec_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_PFEXEC_PASS - ini: - - section: pfexec_become_plugin - key: password - wrap_exe: - description: Toggle to wrap the command pfexec calls in 'shell -c' or not - default: False - type: bool - ini: - - section: pfexec_become_plugin - key: wrap_execution - vars: - - name: ansible_pfexec_wrap_execution - env: - - name: ANSIBLE_PFEXEC_WRAP_EXECUTION - notes: - - This plugin ignores I(become_user) as pfexec uses it's own C(exec_attr) to figure this out. -''' + - User you 'become' to execute the task. + - This plugin ignores this setting as pfexec uses its own C(exec_attr) to figure this out, but it is supplied here for + Ansible to make decisions needed for the task execution, like file permissions. + type: string + default: root + ini: + - section: privilege_escalation + key: become_user + - section: pfexec_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_pfexec_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_PFEXEC_USER + become_exe: + description: C(pfexec) executable. + type: string + default: pfexec + ini: + - section: privilege_escalation + key: become_exe + - section: pfexec_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_pfexec_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_PFEXEC_EXE + become_flags: + description: Options to pass to C(pfexec). + type: string + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: pfexec_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_pfexec_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_PFEXEC_FLAGS + become_pass: + description: C(pfexec) password. + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_pfexec_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_PFEXEC_PASS + ini: + - section: pfexec_become_plugin + key: password + wrap_exe: + description: Toggle to wrap the command C(pfexec) calls in C(shell -c) or not. + default: false + type: bool + ini: + - section: pfexec_become_plugin + key: wrap_execution + vars: + - name: ansible_pfexec_wrap_execution + env: + - name: ANSIBLE_PFEXEC_WRAP_EXECUTION +notes: + - This plugin ignores O(become_user) as pfexec uses its own C(exec_attr) to figure this out. +""" from ansible.plugins.become import BecomeBase @@ -101,4 +104,4 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') noexe = not self.get_option('wrap_exe') - return '%s %s "%s"' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe)) + return f'{exe} {flags} {self._build_success_command(cmd, shell, noexe=noexe)}' diff --git a/plugins/become/pmrun.py b/plugins/become/pmrun.py index 8cb24fa937..413600cdbf 100644 --- a/plugins/become/pmrun.py +++ b/plugins/become/pmrun.py @@ -1,63 +1,65 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: pmrun - short_description: Privilege Manager run - description: - - This become plugins allows your remote/login user to execute commands as another user via the pmrun utility. - author: Ansible Core Team - options: - become_exe: - description: Sudo executable - default: pmrun - ini: - - section: privilege_escalation - key: become_exe - - section: pmrun_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_pmrun_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_PMRUN_EXE - become_flags: - description: Options to pass to pmrun - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: pmrun_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_pmrun_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_PMRUN_FLAGS - become_pass: - description: pmrun password - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_pmrun_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_PMRUN_PASS - ini: - - section: pmrun_become_plugin - key: password - notes: - - This plugin ignores the become_user supplied and uses pmrun's own configuration to select the user. -''' +DOCUMENTATION = r""" +name: pmrun +short_description: Privilege Manager run +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(pmrun) utility. +author: Ansible Core Team +options: + become_exe: + description: C(pmrun) executable. + type: string + default: pmrun + ini: + - section: privilege_escalation + key: become_exe + - section: pmrun_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_pmrun_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_PMRUN_EXE + become_flags: + description: Options to pass to C(pmrun). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: pmrun_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_pmrun_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_PMRUN_FLAGS + become_pass: + description: C(pmrun) password. + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_pmrun_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_PMRUN_PASS + ini: + - section: pmrun_become_plugin + key: password +notes: + - This plugin ignores the C(become_user) supplied and uses C(pmrun)'s own configuration to select the user. +""" +from shlex import quote as shlex_quote from ansible.plugins.become import BecomeBase -from ansible.module_utils.six.moves import shlex_quote class BecomeModule(BecomeBase): @@ -74,4 +76,4 @@ class BecomeModule(BecomeBase): become = self.get_option('become_exe') flags = self.get_option('become_flags') - return '%s %s %s' % (become, flags, shlex_quote(self._build_success_command(cmd, shell))) + return f'{become} {flags} {shlex_quote(self._build_success_command(cmd, shell))}' diff --git a/plugins/become/run0.py b/plugins/become/run0.py new file mode 100644 index 0000000000..4362d53ebf --- /dev/null +++ b/plugins/become/run0.py @@ -0,0 +1,126 @@ +# Copyright (c) 2024, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +name: run0 +short_description: Systemd's run0 +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(run0) utility. +author: + - Thomas Sjögren (@konstruktoid) +version_added: '9.0.0' +options: + become_user: + description: User you 'become' to execute the task. + default: root + ini: + - section: privilege_escalation + key: become_user + - section: run0_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_run0_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_RUN0_USER + type: string + become_exe: + description: C(run0) executable. + default: run0 + ini: + - section: privilege_escalation + key: become_exe + - section: run0_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_run0_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_RUN0_EXE + type: string + become_flags: + description: Options to pass to C(run0). + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: run0_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_run0_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_RUN0_FLAGS + type: string +notes: + - This plugin only works when a C(polkit) rule is in place. +""" + +EXAMPLES = r""" +# An example polkit rule that allows the user 'ansible' in the 'wheel' group +# to execute commands using run0 without authentication. +/etc/polkit-1/rules.d/60-run0-fast-user-auth.rules: |- + polkit.addRule(function(action, subject) { + if(action.id == "org.freedesktop.systemd1.manage-units" && + subject.isInGroup("wheel") && + subject.user == "ansible") { + return polkit.Result.YES; + } + }); +""" + +from re import compile as re_compile + +from ansible.plugins.become import BecomeBase +from ansible.module_utils.common.text.converters import to_bytes + +ansi_color_codes = re_compile(to_bytes(r"\x1B\[[0-9;]+m")) + + +class BecomeModule(BecomeBase): + + name = "community.general.run0" + + prompt = "Password: " + fail = ("==== AUTHENTICATION FAILED ====",) + success = ("==== AUTHENTICATION COMPLETE ====",) + require_tty = ( + True # see https://github.com/ansible-collections/community.general/issues/6932 + ) + + @staticmethod + def remove_ansi_codes(line): + return ansi_color_codes.sub(b"", line) + + def build_become_command(self, cmd, shell): + super().build_become_command(cmd, shell) + + if not cmd: + return cmd + + become = self.get_option("become_exe") + flags = self.get_option("become_flags") + user = self.get_option("become_user") + + return ( + f"{become} --user={user} {flags} {self._build_success_command(cmd, shell)}" + ) + + def check_success(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_success(b_output) + + def check_incorrect_password(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_incorrect_password(b_output) + + def check_missing_password(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_missing_password(b_output) diff --git a/plugins/become/sesu.py b/plugins/become/sesu.py index 7113b19442..ecd29c83c5 100644 --- a/plugins/become/sesu.py +++ b/plugins/become/sesu.py @@ -1,72 +1,75 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: sesu - short_description: CA Privileged Access Manager - description: - - This become plugins allows your remote/login user to execute commands as another user via the sesu utility. - author: ansible (@nekonyuu) - options: - become_user: - description: User you 'become' to execute the task - default: '' - ini: - - section: privilege_escalation - key: become_user - - section: sesu_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_sesu_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_SESU_USER - become_exe: - description: sesu executable - default: sesu - ini: - - section: privilege_escalation - key: become_exe - - section: sesu_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_sesu_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_SESU_EXE - become_flags: - description: Options to pass to sesu - default: -H -S -n - ini: - - section: privilege_escalation - key: become_flags - - section: sesu_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_sesu_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_SESU_FLAGS - become_pass: - description: Password to pass to sesu - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_sesu_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_SESU_PASS - ini: - - section: sesu_become_plugin - key: password -''' +DOCUMENTATION = r""" +name: sesu +short_description: CA Privileged Access Manager +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(sesu) utility. +author: ansible (@nekonyuu) +options: + become_user: + description: User you 'become' to execute the task. + type: string + default: '' + ini: + - section: privilege_escalation + key: become_user + - section: sesu_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_sesu_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_SESU_USER + become_exe: + description: C(sesu) executable. + type: string + default: sesu + ini: + - section: privilege_escalation + key: become_exe + - section: sesu_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_sesu_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_SESU_EXE + become_flags: + description: Options to pass to C(sesu). + type: string + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: sesu_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_sesu_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_SESU_FLAGS + become_pass: + description: Password to pass to C(sesu). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_sesu_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_SESU_PASS + ini: + - section: sesu_become_plugin + key: password +""" from ansible.plugins.become import BecomeBase @@ -88,4 +91,4 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') user = self.get_option('become_user') - return '%s %s %s -c %s' % (become, flags, user, self._build_success_command(cmd, shell)) + return f'{become} {flags} {user} -c {self._build_success_command(cmd, shell)}' diff --git a/plugins/become/sudosu.py b/plugins/become/sudosu.py index 410b881b96..3b5d4d8b7f 100644 --- a/plugins/become/sudosu.py +++ b/plugins/become/sudosu.py @@ -1,59 +1,77 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = """ - name: sudosu - short_description: Run tasks using sudo su - +DOCUMENTATION = r""" +name: sudosu +short_description: Run tasks using sudo su - +description: + - This become plugin allows your remote/login user to execute commands as another user using the C(sudo) and C(su) utilities + combined. +author: + - Dag Wieers (@dagwieers) +version_added: 2.4.0 +options: + become_user: + description: User you 'become' to execute the task. + type: string + default: root + ini: + - section: privilege_escalation + key: become_user + - section: sudo_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_sudo_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_SUDO_USER + become_flags: + description: Options to pass to C(sudo). + type: string + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: sudo_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_sudo_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_SUDO_FLAGS + become_pass: + description: Password to pass to C(sudo). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_sudo_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_SUDO_PASS + ini: + - section: sudo_become_plugin + key: password + alt_method: description: - - This become plugins allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined. - author: - - Dag Wieers (@dagwieers) - version_added: 2.4.0 - options: - become_user: - description: User you 'become' to execute the task. - default: root - ini: - - section: privilege_escalation - key: become_user - - section: sudo_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_sudo_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_SUDO_USER - become_flags: - description: Options to pass to C(sudo). - default: -H -S -n - ini: - - section: privilege_escalation - key: become_flags - - section: sudo_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_sudo_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_SUDO_FLAGS - become_pass: - description: Password to pass to C(sudo). - required: false - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_sudo_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_SUDO_PASS - ini: - - section: sudo_become_plugin - key: password + - Whether to use an alternative method to call C(su). Instead of running C(su -l user /path/to/shell -c command), it + runs C(su -l user -c command). + - Use this when the default one is not working on your system. + required: false + type: boolean + ini: + - section: community.general.sudosu + key: alternative_method + vars: + - name: ansible_sudosu_alt_method + env: + - name: ANSIBLE_SUDOSU_ALT_METHOD + version_added: 9.2.0 """ @@ -79,13 +97,16 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') or '' prompt = '' if self.get_option('become_pass'): - self.prompt = '[sudo via ansible, key=%s] password:' % self._id + self.prompt = f'[sudo via ansible, key={self._id}] password:' if flags: # this could be simplified, but kept as is for now for backwards string matching flags = flags.replace('-n', '') - prompt = '-p "%s"' % (self.prompt) + prompt = f'-p "{self.prompt}"' user = self.get_option('become_user') or '' if user: - user = '%s' % (user) + user = f'{user}' - return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)]) + if self.get_option('alt_method'): + return f"{becomecmd} {flags} {prompt} su -l {user} -c {self._build_success_command(cmd, shell, True)}" + else: + return f"{becomecmd} {flags} {prompt} su -l {user} {self._build_success_command(cmd, shell)}" diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index b7d14aa86d..28011e8cab 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -1,48 +1,50 @@ -# -*- coding: utf-8 -*- -# (c) 2014, Brian Coca, Josh Drake, et al -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Brian Coca, Josh Drake, et al +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: memcached - short_description: Use memcached DB for cache +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: memcached +short_description: Use memcached DB for cache +description: + - This cache uses JSON formatted, per host records saved in memcached. +requirements: + - memcache (python lib) +options: + _uri: description: - - This cache uses JSON formatted, per host records saved in memcached. - requirements: - - memcache (python lib) - options: - _uri: - description: - - List of connection information for the memcached DBs - default: ['127.0.0.1:11211'] - type: list - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the DB entries - default: ansible_facts - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults - type: integer -''' + - List of connection information for the memcached DBs. + default: ['127.0.0.1:11211'] + type: list + elements: string + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the DB entries. + type: string + default: ansible_facts + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _timeout: + default: 86400 + type: integer + # TODO: determine whether it is OK to change to: type: float + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults +""" import collections import os @@ -50,11 +52,9 @@ import time from multiprocessing import Lock from itertools import chain -from ansible import constants as C from ansible.errors import AnsibleError -from ansible.module_utils.common._collections_compat import MutableSet +from collections.abc import MutableSet from ansible.plugins.cache import BaseCacheModule -from ansible.release import __version__ as ansible_base_version from ansible.utils.display import Display try: @@ -175,20 +175,11 @@ class CacheModule(BaseCacheModule): def __init__(self, *args, **kwargs): connection = ['127.0.0.1:11211'] - try: - super(CacheModule, self).__init__(*args, **kwargs) - if self.get_option('_uri'): - connection = self.get_option('_uri') - self._timeout = self.get_option('_timeout') - self._prefix = self.get_option('_prefix') - except KeyError: - # TODO: remove once we no longer support Ansible 2.9 - if not ansible_base_version.startswith('2.9.'): - raise AnsibleError("Do not import CacheModules directly. Use ansible.plugins.loader.cache_loader instead.") - if C.CACHE_PLUGIN_CONNECTION: - connection = C.CACHE_PLUGIN_CONNECTION.split(',') - self._timeout = C.CACHE_PLUGIN_TIMEOUT - self._prefix = C.CACHE_PLUGIN_PREFIX + super(CacheModule, self).__init__(*args, **kwargs) + if self.get_option('_uri'): + connection = self.get_option('_uri') + self._timeout = self.get_option('_timeout') + self._prefix = self.get_option('_prefix') if not HAS_MEMCACHE: raise AnsibleError("python-memcached is required for the memcached fact cache") @@ -198,7 +189,7 @@ class CacheModule(BaseCacheModule): self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or []) def _make_key(self, key): - return "{0}{1}".format(self._prefix, key) + return f"{self._prefix}{key}" def _expire_keys(self): if self._timeout > 0: diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py index 1e549d4d66..6c053138c8 100644 --- a/plugins/cache/pickle.py +++ b/plugins/cache/pickle.py @@ -1,51 +1,49 @@ -# -*- coding: utf-8 -*- -# (c) 2017, Brian Coca -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Brian Coca +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: pickle - short_description: Pickle formatted files. +DOCUMENTATION = r""" +name: pickle +short_description: Pickle formatted files +description: + - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem. +author: Brian Coca (@bcoca) +options: + _uri: + required: true description: - - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem. - author: Brian Coca (@bcoca) - options: - _uri: - required: True - description: - - Path in which the cache plugin will save the files - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the files - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults -''' + - Path in which the cache plugin saves the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + type: path + _prefix: + description: User defined prefix to use when creating the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + type: string + _timeout: + default: 86400 + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: float +""" -try: - import cPickle as pickle -except ImportError: - import pickle +import pickle -from ansible.module_utils.six import PY3 from ansible.plugins.cache import BaseFileCacheModule @@ -53,14 +51,12 @@ class CacheModule(BaseFileCacheModule): """ A caching module backed by pickle files. """ + _persistent = False # prevent unnecessary JSON serialization and key munging def _load(self, filepath): # Pickle is a binary format with open(filepath, 'rb') as f: - if PY3: - return pickle.load(f, encoding='bytes') - else: - return pickle.load(f) + return pickle.load(f, encoding='bytes') def _dump(self, value, filepath): with open(filepath, 'wb') as f: diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index 3c73d8b5be..d7b596bb32 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -1,77 +1,78 @@ -# -*- coding: utf-8 -*- -# (c) 2014, Brian Coca, Josh Drake, et al -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2014, Brian Coca, Josh Drake, et al +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: redis - short_description: Use Redis DB for cache +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: redis +short_description: Use Redis DB for cache +description: + - This cache uses JSON formatted, per host records saved in Redis. +requirements: + - redis>=2.4.5 (python lib) +options: + _uri: description: - - This cache uses JSON formatted, per host records saved in Redis. - requirements: - - redis>=2.4.5 (python lib) - options: - _uri: - description: - - A colon separated string of connection information for Redis. - - The format is C(host:port:db:password), for example C(localhost:6379:0:changeme). - - To use encryption in transit, prefix the connection with C(tls://), as in C(tls://localhost:6379:0:changeme). - - To use redis sentinel, use separator C(;), for example C(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0. - required: True - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the DB entries - default: ansible_facts - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _keyset_name: - description: User defined name for cache keyset name. - default: ansible_cache_keys - env: - - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME - ini: - - key: fact_caching_redis_keyset_name - section: defaults - version_added: 1.3.0 - _sentinel_service_name: - description: The redis sentinel service name (or referenced as cluster name). - env: - - name: ANSIBLE_CACHE_REDIS_SENTINEL - ini: - - key: fact_caching_redis_sentinel - section: defaults - version_added: 1.3.0 - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults - type: integer -''' + - A colon separated string of connection information for Redis. + - The format is V(host:port:db:password), for example V(localhost:6379:0:changeme). + - To use encryption in transit, prefix the connection with V(tls://), as in V(tls://localhost:6379:0:changeme). + - To use redis sentinel, use separator V(;), for example V(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0. + type: string + required: true + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the DB entries. + type: string + default: ansible_facts + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _keyset_name: + description: User defined name for cache keyset name. + type: string + default: ansible_cache_keys + env: + - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME + ini: + - key: fact_caching_redis_keyset_name + section: defaults + version_added: 1.3.0 + _sentinel_service_name: + description: The redis sentinel service name (or referenced as cluster name). + type: string + env: + - name: ANSIBLE_CACHE_REDIS_SENTINEL + ini: + - key: fact_caching_redis_sentinel + section: defaults + version_added: 1.3.0 + _timeout: + default: 86400 + type: integer + # TODO: determine whether it is OK to change to: type: float + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults +""" import re import time import json -from ansible import constants as C from ansible.errors import AnsibleError -from ansible.module_utils.common.text.converters import to_native from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder from ansible.plugins.cache import BaseCacheModule -from ansible.release import __version__ as ansible_base_version from ansible.utils.display import Display try: @@ -99,23 +100,13 @@ class CacheModule(BaseCacheModule): def __init__(self, *args, **kwargs): uri = '' - try: - super(CacheModule, self).__init__(*args, **kwargs) - if self.get_option('_uri'): - uri = self.get_option('_uri') - self._timeout = float(self.get_option('_timeout')) - self._prefix = self.get_option('_prefix') - self._keys_set = self.get_option('_keyset_name') - self._sentinel_service_name = self.get_option('_sentinel_service_name') - except KeyError: - # TODO: remove once we no longer support Ansible 2.9 - if not ansible_base_version.startswith('2.9.'): - raise AnsibleError("Do not import CacheModules directly. Use ansible.plugins.loader.cache_loader instead.") - if C.CACHE_PLUGIN_CONNECTION: - uri = C.CACHE_PLUGIN_CONNECTION - self._timeout = float(C.CACHE_PLUGIN_TIMEOUT) - self._prefix = C.CACHE_PLUGIN_PREFIX - self._keys_set = 'ansible_cache_keys' + super(CacheModule, self).__init__(*args, **kwargs) + if self.get_option('_uri'): + uri = self.get_option('_uri') + self._timeout = float(self.get_option('_timeout')) + self._prefix = self.get_option('_prefix') + self._keys_set = self.get_option('_keyset_name') + self._sentinel_service_name = self.get_option('_sentinel_service_name') if not HAS_REDIS: raise AnsibleError("The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'") @@ -137,7 +128,7 @@ class CacheModule(BaseCacheModule): connection = self._parse_connection(self.re_url_conn, uri) self._db = StrictRedis(*connection, **kw) - display.vv('Redis connection: %s' % self._db) + display.vv(f'Redis connection: {self._db}') @staticmethod def _parse_connection(re_patt, uri): @@ -161,7 +152,7 @@ class CacheModule(BaseCacheModule): # format: "localhost:26379;localhost2:26379;0:changeme" connections = uri.split(';') connection_args = connections.pop(-1) - if len(connection_args) > 0: # hanle if no db nr is given + if len(connection_args) > 0: # handle if no db nr is given connection_args = connection_args.split(':') kw['db'] = connection_args.pop(0) try: @@ -170,12 +161,12 @@ class CacheModule(BaseCacheModule): pass # password is optional sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections] - display.vv('\nUsing redis sentinels: %s' % sentinels) + display.vv(f'\nUsing redis sentinels: {sentinels}') scon = Sentinel(sentinels, **kw) try: return scon.master_for(self._sentinel_service_name, socket_timeout=0.2) except Exception as exc: - raise AnsibleError('Could not connect to redis sentinel: %s' % to_native(exc)) + raise AnsibleError(f'Could not connect to redis sentinel: {exc}') def _make_key(self, key): return self._prefix + key @@ -233,7 +224,7 @@ class CacheModule(BaseCacheModule): def copy(self): # TODO: there is probably a better way to do this in redis - ret = dict([(k, self.get(k)) for k in self.keys()]) + ret = {k: self.get(k) for k in self.keys()} return ret def __getstate__(self): diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py index e5062b16d1..52cbf887de 100644 --- a/plugins/cache/yaml.py +++ b/plugins/cache/yaml.py @@ -1,48 +1,49 @@ -# -*- coding: utf-8 -*- -# (c) 2017, Brian Coca -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Brian Coca +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: yaml - short_description: YAML formatted files. +DOCUMENTATION = r""" +name: yaml +short_description: YAML formatted files +description: + - This cache uses YAML formatted, per host, files saved to the filesystem. +author: Brian Coca (@bcoca) +options: + _uri: + required: true description: - - This cache uses YAML formatted, per host, files saved to the filesystem. - author: Brian Coca (@bcoca) - options: - _uri: - required: True - description: - - Path in which the cache plugin will save the files - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the files - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults - type: integer -''' + - Path in which the cache plugin saves the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + type: string + _prefix: + description: User defined prefix to use when creating the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + type: string + _timeout: + default: 86400 + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: integer + # TODO: determine whether it is OK to change to: type: float +""" - -import codecs +import os import yaml @@ -57,9 +58,9 @@ class CacheModule(BaseFileCacheModule): """ def _load(self, filepath): - with codecs.open(filepath, 'r', encoding='utf-8') as f: + with open(os.path.abspath(filepath), 'r', encoding='utf-8') as f: return AnsibleLoader(f).get_single_data() def _dump(self, value, filepath): - with codecs.open(filepath, 'w', encoding='utf-8') as f: + with open(os.path.abspath(filepath), 'w', encoding='utf-8') as f: yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False) diff --git a/plugins/callback/cgroup_memory_recap.py b/plugins/callback/cgroup_memory_recap.py index 0334bee664..294ee4b378 100644 --- a/plugins/callback/cgroup_memory_recap.py +++ b/plugins/callback/cgroup_memory_recap.py @@ -1,43 +1,45 @@ -# -*- coding: utf-8 -*- -# (c) 2018 Matt Martz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018 Matt Martz +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: cgroup_memory_recap - type: aggregate - requirements: - - whitelist in configuration - - cgroups - short_description: Profiles maximum memory usage of tasks and full execution using cgroups - description: - - This is an ansible callback plugin that profiles maximum memory usage of ansible and individual tasks, and displays a recap at the end using cgroups - notes: - - Requires ansible to be run from within a cgroup, such as with C(cgexec -g memory:ansible_profile ansible-playbook ...) - - This cgroup should only be used by ansible to get accurate results - - To create the cgroup, first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile) - options: - max_mem_file: - required: True - description: Path to cgroups C(memory.max_usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes) - env: - - name: CGROUP_MAX_MEM_FILE - ini: - - section: callback_cgroupmemrecap - key: max_mem_file - cur_mem_file: - required: True - description: Path to C(memory.usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes) - env: - - name: CGROUP_CUR_MEM_FILE - ini: - - section: callback_cgroupmemrecap - key: cur_mem_file -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: cgroup_memory_recap +type: aggregate +requirements: + - whitelist in configuration + - cgroups +short_description: Profiles maximum memory usage of tasks and full execution using cgroups +description: + - This is an Ansible callback plugin that profiles maximum memory usage of Ansible and individual tasks, and displays a + recap at the end using cgroups. +notes: + - Requires ansible to be run from within a C(cgroup), such as with C(cgexec -g memory:ansible_profile ansible-playbook ...). + - This C(cgroup) should only be used by Ansible to get accurate results. + - To create the C(cgroup), first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile). +options: + max_mem_file: + required: true + description: Path to cgroups C(memory.max_usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes). + type: str + env: + - name: CGROUP_MAX_MEM_FILE + ini: + - section: callback_cgroupmemrecap + key: max_mem_file + cur_mem_file: + required: true + description: Path to C(memory.usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes). + type: str + env: + - name: CGROUP_CUR_MEM_FILE + ini: + - section: callback_cgroupmemrecap + key: cur_mem_file +""" import time import threading @@ -111,7 +113,7 @@ class CallbackModule(CallbackBase): max_results = int(f.read().strip()) / 1024 / 1024 self._display.banner('CGROUP MEMORY RECAP') - self._display.display('Execution Maximum: %0.2fMB\n\n' % max_results) + self._display.display(f'Execution Maximum: {max_results:0.2f}MB\n\n') for task, memory in self.task_results: - self._display.display('%s (%s): %0.2fMB' % (task.get_name(), task._uuid, memory)) + self._display.display(f'{task.get_name()} ({task._uuid}): {memory:0.2f}MB') diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py index c85cc60cda..f390a947a4 100644 --- a/plugins/callback/context_demo.py +++ b/plugins/callback/context_demo.py @@ -1,22 +1,21 @@ -# -*- coding: utf-8 -*- -# (C) 2012, Michael DeHaan, -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (C) 2012, Michael DeHaan, +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: context_demo - type: aggregate - short_description: demo callback that adds play/task context - description: - - Displays some play and task context along with normal output - - This is mostly for demo purposes - requirements: - - whitelist in configuration -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: context_demo +type: aggregate +short_description: Demo callback that adds play/task context +description: + - Displays some play and task context along with normal output. + - This is mostly for demo purposes. +requirements: + - whitelist in configuration +""" from ansible.plugins.callback import CallbackBase @@ -37,15 +36,15 @@ class CallbackModule(CallbackBase): self.play = None def v2_on_any(self, *args, **kwargs): - self._display.display("--- play: {0} task: {1} ---".format(getattr(self.play, 'name', None), self.task)) + self._display.display(f"--- play: {getattr(self.play, 'name', None)} task: {self.task} ---") self._display.display(" --- ARGS ") for i, a in enumerate(args): - self._display.display(' %s: %s' % (i, a)) + self._display.display(f' {i}: {a}') self._display.display(" --- KWARGS ") for k in kwargs: - self._display.display(' %s: %s' % (k, kwargs[k])) + self._display.display(f' {k}: {kwargs[k]}') def v2_playbook_on_play_start(self, play): self.play = play diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py index 3b6e5e7ad4..d5fe334a49 100644 --- a/plugins/callback/counter_enabled.py +++ b/plugins/callback/counter_enabled.py @@ -1,32 +1,30 @@ -# -*- coding: utf-8 -*- -# (c) 2018, Ivan Aragones Muniesa -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Ivan Aragones Muniesa +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later ''' Counter enabled Ansible callback plugin (See DOCUMENTATION for more information) ''' -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: counter_enabled - type: stdout - short_description: adds counters to the output items (tasks and hosts/task) - description: - - Use this callback when you need a kind of progress bar on a large environments. - - You will know how many tasks has the playbook to run, and which one is actually running. - - You will know how many hosts may run a task, and which of them is actually running. - extends_documentation_fragment: - - default_callback - requirements: - - set as stdout callback in ansible.cfg (stdout_callback = counter_enabled) -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: counter_enabled +type: stdout +short_description: Adds counters to the output items (tasks and hosts/task) +description: + - Use this callback when you need a kind of progress bar on a large environments. + - You can see how many tasks has the playbook to run, and which one is actually running. + - You can see how many hosts may run a task, and which of them is actually running. +extends_documentation_fragment: + - default_callback +requirements: + - set as stdout callback in C(ansible.cfg) (C(stdout_callback = counter_enabled)) +""" from ansible import constants as C from ansible.plugins.callback import CallbackBase from ansible.utils.color import colorize, hostcolor -from ansible.template import Templar from ansible.playbook.task_include import TaskInclude @@ -45,6 +43,8 @@ class CallbackModule(CallbackBase): _task_total = 0 _host_counter = 1 _host_total = 0 + _current_batch_total = 0 + _previous_batch_total = 0 def __init__(self): super(CallbackModule, self).__init__() @@ -67,17 +67,20 @@ class CallbackModule(CallbackBase): def v2_playbook_on_play_start(self, play): name = play.get_name().strip() if not name: - msg = u"play" + msg = "play" else: - msg = u"PLAY [%s]" % name + msg = f"PLAY [{name}]" self._play = play self._display.banner(msg) self._play = play + self._previous_batch_total = self._current_batch_total + self._current_batch_total = self._previous_batch_total + len(self._all_vars()['vars']['ansible_play_batch']) self._host_total = len(self._all_vars()['vars']['ansible_play_hosts_all']) self._task_total = len(self._play.get_tasks()[0]) + self._task_counter = 1 def v2_playbook_on_stats(self, stats): self._display.banner("PLAY RECAP") @@ -86,25 +89,17 @@ class CallbackModule(CallbackBase): for host in hosts: stat = stats.summarize(host) - self._display.display(u"%s : %s %s %s %s %s %s" % ( - hostcolor(host, stat), - colorize(u'ok', stat['ok'], C.COLOR_OK), - colorize(u'changed', stat['changed'], C.COLOR_CHANGED), - colorize(u'unreachable', stat['unreachable'], C.COLOR_UNREACHABLE), - colorize(u'failed', stat['failures'], C.COLOR_ERROR), - colorize(u'rescued', stat['rescued'], C.COLOR_OK), - colorize(u'ignored', stat['ignored'], C.COLOR_WARN)), + self._display.display( + f"{hostcolor(host, stat)} : {colorize('ok', stat['ok'], C.COLOR_OK)} {colorize('changed', stat['changed'], C.COLOR_CHANGED)} " + f"{colorize('unreachable', stat['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', stat['failures'], C.COLOR_ERROR)} " + f"{colorize('rescued', stat['rescued'], C.COLOR_OK)} {colorize('ignored', stat['ignored'], C.COLOR_WARN)}", screen_only=True ) - self._display.display(u"%s : %s %s %s %s %s %s" % ( - hostcolor(host, stat, False), - colorize(u'ok', stat['ok'], None), - colorize(u'changed', stat['changed'], None), - colorize(u'unreachable', stat['unreachable'], None), - colorize(u'failed', stat['failures'], None), - colorize(u'rescued', stat['rescued'], None), - colorize(u'ignored', stat['ignored'], None)), + self._display.display( + f"{hostcolor(host, stat, False)} : {colorize('ok', stat['ok'], None)} {colorize('changed', stat['changed'], None)} " + f"{colorize('unreachable', stat['unreachable'], None)} {colorize('failed', stat['failures'], None)} " + f"{colorize('rescued', stat['rescued'], None)} {colorize('ignored', stat['ignored'], None)}", log_only=True ) @@ -119,12 +114,14 @@ class CallbackModule(CallbackBase): for k in sorted(stats.custom.keys()): if k == '_run': continue - self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', ''))) + _custom_stats = self._dump_results(stats.custom[k], indent=1).replace('\n', '') + self._display.display(f'\t{k}: {_custom_stats}') # print per run custom stats if '_run' in stats.custom: self._display.display("", screen_only=True) - self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', '')) + _custom_stats_run = self._dump_results(stats.custom['_run'], indent=1).replace('\n', '') + self._display.display(f'\tRUN: {_custom_stats_run}') self._display.display("", screen_only=True) def v2_playbook_on_task_start(self, task, is_conditional): @@ -138,14 +135,14 @@ class CallbackModule(CallbackBase): # that they can secure this if they feel that their stdout is insecure # (shoulder surfing, logging stdout straight to a file, etc). if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT: - args = ', '.join(('%s=%s' % a for a in task.args.items())) - args = ' %s' % args - self._display.banner("TASK %d/%d [%s%s]" % (self._task_counter, self._task_total, task.get_name().strip(), args)) + args = ', '.join(('{k}={v}' for k, v in task.args.items())) + args = f' {args}' + self._display.banner(f"TASK {self._task_counter}/{self._task_total} [{task.get_name().strip()}{args}]") if self._display.verbosity >= 2: path = task.get_path() if path: - self._display.display("task path: %s" % path, color=C.COLOR_DEBUG) - self._host_counter = 0 + self._display.display(f"task path: {path}", color=C.COLOR_DEBUG) + self._host_counter = self._previous_batch_total self._task_counter += 1 def v2_runner_on_ok(self, result): @@ -161,15 +158,15 @@ class CallbackModule(CallbackBase): return elif result._result.get('changed', False): if delegated_vars: - msg = "changed: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host']) + msg = f"changed: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> {delegated_vars['ansible_host']}]" else: - msg = "changed: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + msg = f"changed: {self._host_counter}/{self._host_total} [{result._host.get_name()}]" color = C.COLOR_CHANGED else: if delegated_vars: - msg = "ok: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host']) + msg = f"ok: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> {delegated_vars['ansible_host']}]" else: - msg = "ok: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + msg = f"ok: {self._host_counter}/{self._host_total} [{result._host.get_name()}]" color = C.COLOR_OK self._handle_warnings(result._result) @@ -180,7 +177,7 @@ class CallbackModule(CallbackBase): self._clean_results(result._result, result._task.action) if self._run_is_verbose(result): - msg += " => %s" % (self._dump_results(result._result),) + msg += f" => {self._dump_results(result._result)}" self._display.display(msg, color=color) def v2_runner_on_failed(self, result, ignore_errors=False): @@ -201,14 +198,16 @@ class CallbackModule(CallbackBase): else: if delegated_vars: - self._display.display("fatal: %d/%d [%s -> %s]: FAILED! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), delegated_vars['ansible_host'], - self._dump_results(result._result)), - color=C.COLOR_ERROR) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> " + f"{delegated_vars['ansible_host']}]: FAILED! => {self._dump_results(result._result)}", + color=C.COLOR_ERROR + ) else: - self._display.display("fatal: %d/%d [%s]: FAILED! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), self._dump_results(result._result)), - color=C.COLOR_ERROR) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()}]: FAILED! => {self._dump_results(result._result)}", + color=C.COLOR_ERROR + ) if ignore_errors: self._display.display("...ignoring", color=C.COLOR_SKIP) @@ -226,9 +225,9 @@ class CallbackModule(CallbackBase): if result._task.loop and 'results' in result._result: self._process_items(result) else: - msg = "skipping: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + msg = f"skipping: {self._host_counter}/{self._host_total} [{result._host.get_name()}]" if self._run_is_verbose(result): - msg += " => %s" % self._dump_results(result._result) + msg += f" => {self._dump_results(result._result)}" self._display.display(msg, color=C.COLOR_SKIP) def v2_runner_on_unreachable(self, result): @@ -239,11 +238,13 @@ class CallbackModule(CallbackBase): delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: - self._display.display("fatal: %d/%d [%s -> %s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), delegated_vars['ansible_host'], - self._dump_results(result._result)), - color=C.COLOR_UNREACHABLE) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> " + f"{delegated_vars['ansible_host']}]: UNREACHABLE! => {self._dump_results(result._result)}", + color=C.COLOR_UNREACHABLE + ) else: - self._display.display("fatal: %d/%d [%s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), self._dump_results(result._result)), - color=C.COLOR_UNREACHABLE) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()}]: UNREACHABLE! => {self._dump_results(result._result)}", + color=C.COLOR_UNREACHABLE + ) diff --git a/plugins/callback/default_without_diff.py b/plugins/callback/default_without_diff.py new file mode 100644 index 0000000000..b0315829b5 --- /dev/null +++ b/plugins/callback/default_without_diff.py @@ -0,0 +1,43 @@ + +# Copyright (c) 2024, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: default_without_diff +type: stdout +short_description: The default ansible callback without diff output +version_added: 8.4.0 +description: + - This is basically the default ansible callback plugin (P(ansible.builtin.default#callback)) without showing diff output. + This can be useful when using another callback which sends more detailed information to another service, like the L(ARA, + https://ara.recordsansible.org/) callback, and you want diff output sent to that plugin but not shown on the console output. +author: Felix Fontein (@felixfontein) +extends_documentation_fragment: + - ansible.builtin.default_callback + - ansible.builtin.result_format_callback +""" + +EXAMPLES = r""" +# Enable callback in ansible.cfg: +ansible_config: | + [defaults] + stdout_callback = community.general.default_without_diff + +# Enable callback with environment variables: +environment_variable: |- + ANSIBLE_STDOUT_CALLBACK=community.general.default_without_diff +""" + +from ansible.plugins.callback.default import CallbackModule as Default + + +class CallbackModule(Default): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.default_without_diff' + + def v2_on_file_diff(self, result): + pass diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py index af8464631c..de50d97ce1 100644 --- a/plugins/callback/dense.py +++ b/plugins/callback/dense.py @@ -1,24 +1,23 @@ -# -*- coding: utf-8 -*- -# (c) 2016, Dag Wieers -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Dag Wieers +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: dense type: stdout -short_description: minimal stdout output +short_description: Minimal stdout output extends_documentation_fragment: -- default_callback + - default_callback description: -- When in verbose mode it will act the same as the default callback + - When in verbose mode it acts the same as the default callback. author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) requirements: -- set as stdout in configuration -''' + - set as stdout in configuration +""" HAS_OD = False try: @@ -27,8 +26,7 @@ try: except ImportError: pass -from ansible.module_utils.six import binary_type, text_type -from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence +from collections.abc import MutableMapping, MutableSequence from ansible.plugins.callback.default import CallbackModule as CallbackModule_default from ansible.utils.color import colorize, hostcolor from ansible.utils.display import Display @@ -194,7 +192,7 @@ class CallbackModule(CallbackModule_default): self.disabled = True def __del__(self): - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") def _add_host(self, result, status): name = result._host.get_name() @@ -232,17 +230,17 @@ class CallbackModule(CallbackModule_default): # Remove non-essential attributes for attr in self.removed_attributes: if attr in result: - del(result[attr]) + del result[attr] # Remove empty attributes (list, dict, str) for attr in result.copy(): - if isinstance(result[attr], (MutableSequence, MutableMapping, binary_type, text_type)): + if isinstance(result[attr], (MutableSequence, MutableMapping, bytes, str)): if not result[attr]: - del(result[attr]) + del result[attr] def _handle_exceptions(self, result): if 'exception' in result: - # Remove the exception from the result so it's not shown every time + # Remove the exception from the result so it is not shown every time del result['exception'] if self._display.verbosity == 1: @@ -251,7 +249,7 @@ class CallbackModule(CallbackModule_default): def _display_progress(self, result=None): # Always rewrite the complete line sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline) - sys.stdout.write('%s %d:' % (self.type, self.count[self.type])) + sys.stdout.write(f'{self.type} {self.count[self.type]}:') sys.stdout.write(vt100.reset) sys.stdout.flush() @@ -259,22 +257,18 @@ class CallbackModule(CallbackModule_default): for name in self.hosts: sys.stdout.write(' ') if self.hosts[name].get('delegate', None): - sys.stdout.write(self.hosts[name]['delegate'] + '>') + sys.stdout.write(f"{self.hosts[name]['delegate']}>") sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset) sys.stdout.flush() -# if result._result.get('diff', False): -# sys.stdout.write('\n' + vt100.linewrap) sys.stdout.write(vt100.linewrap) -# self.keep = True - def _display_task_banner(self): if not self.shown_title: self.shown_title = True sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline) - sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip())) - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f'{self.type} {self.count[self.type]}: {self.task.get_name().strip()}') + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) @@ -283,7 +277,7 @@ class CallbackModule(CallbackModule_default): def _display_results(self, result, status): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) self.keep = False @@ -308,16 +302,16 @@ class CallbackModule(CallbackModule_default): if result._task.loop and 'results' in result._result: self._process_items(result) else: - sys.stdout.write(colors[status] + status + ': ') + sys.stdout.write(f"{colors[status] + status}: ") delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: - sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host']) + sys.stdout.write(f"{vt100.reset}{result._host.get_name()}>{colors[status]}{delegated_vars['ansible_host']}") else: sys.stdout.write(result._host.get_name()) - sys.stdout.write(': ' + dump + '\n') - sys.stdout.write(vt100.reset + vt100.save + vt100.clearline) + sys.stdout.write(f": {dump}\n") + sys.stdout.write(f"{vt100.reset}{vt100.save}{vt100.clearline}") sys.stdout.flush() if status == 'changed': @@ -326,7 +320,7 @@ class CallbackModule(CallbackModule_default): def v2_playbook_on_play_start(self, play): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.bold}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold) @@ -340,14 +334,14 @@ class CallbackModule(CallbackModule_default): name = play.get_name().strip() if not name: name = 'unnamed' - sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper())) - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"PLAY {self.count['play']}: {name.upper()}") + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() def v2_playbook_on_task_start(self, task, is_conditional): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.underline}") else: # Do not clear line, since we want to retain the previous output sys.stdout.write(vt100.restore + vt100.reset + vt100.underline) @@ -364,14 +358,14 @@ class CallbackModule(CallbackModule_default): self.count['task'] += 1 # Write the next task on screen (behind the prompt is the previous output) - sys.stdout.write('%s %d.' % (self.type, self.count[self.type])) + sys.stdout.write(f'{self.type} {self.count[self.type]}.') sys.stdout.write(vt100.reset) sys.stdout.flush() def v2_playbook_on_handler_task_start(self, task): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.underline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline) @@ -387,7 +381,7 @@ class CallbackModule(CallbackModule_default): self.count[self.type] += 1 # Write the next task on screen (behind the prompt is the previous output) - sys.stdout.write('%s %d.' % (self.type, self.count[self.type])) + sys.stdout.write(f'{self.type} {self.count[self.type]}.') sys.stdout.write(vt100.reset) sys.stdout.flush() @@ -450,13 +444,13 @@ class CallbackModule(CallbackModule_default): def v2_playbook_on_no_hosts_remaining(self): if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) self.keep = False - sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT') - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.white + vt100.redbg}NO MORE HOSTS LEFT") + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() def v2_playbook_on_include(self, included_file): @@ -464,7 +458,7 @@ class CallbackModule(CallbackModule_default): def v2_playbook_on_stats(self, stats): if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) @@ -475,22 +469,16 @@ class CallbackModule(CallbackModule_default): sys.stdout.write(vt100.bold + vt100.underline) sys.stdout.write('SUMMARY') - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() hosts = sorted(stats.processed.keys()) for h in hosts: t = stats.summarize(h) self._display.display( - u"%s : %s %s %s %s %s %s" % ( - hostcolor(h, t), - colorize(u'ok', t['ok'], C.COLOR_OK), - colorize(u'changed', t['changed'], C.COLOR_CHANGED), - colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), - colorize(u'failed', t['failures'], C.COLOR_ERROR), - colorize(u'rescued', t['rescued'], C.COLOR_OK), - colorize(u'ignored', t['ignored'], C.COLOR_WARN), - ), + f"{hostcolor(h, t)} : {colorize('ok', t['ok'], C.COLOR_OK)} {colorize('changed', t['changed'], C.COLOR_CHANGED)} " + f"{colorize('unreachable', t['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', t['failures'], C.COLOR_ERROR)} " + f"{colorize('rescued', t['rescued'], C.COLOR_OK)} {colorize('ignored', t['ignored'], C.COLOR_WARN)}", screen_only=True ) diff --git a/plugins/callback/diy.py b/plugins/callback/diy.py index b288ee4b97..c94fe25093 100644 --- a/plugins/callback/diy.py +++ b/plugins/callback/diy.py @@ -1,607 +1,601 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Trevor Highfill -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Trevor Highfill +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: diy - type: stdout - short_description: Customize the output - version_added: 0.2.0 - description: - - Callback plugin that allows you to supply your own custom callback templates to be output. - author: Trevor Highfill (@theque5t) - extends_documentation_fragment: - - default_callback - notes: - - Uses the C(default) callback plugin output when a custom callback message(C(msg)) is not provided. - - Makes the callback event data available via the C(ansible_callback_diy) dictionary, which can be used in the templating context for the options. - The dictionary is only available in the templating context for the options. It is not a variable that is available via the other - various execution contexts, such as playbook, play, task etc. - - Options being set by their respective variable input can only be set using the variable if the variable was set in a context that is available to the - respective callback. - Use the C(ansible_callback_diy) dictionary to see what is available to a callback. Additionally, C(ansible_callback_diy.top_level_var_names) will output - the top level variable names available to the callback. - - Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For example, - C("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}") - - "**Condition** for all C(msg) options: - if value C(is None or omit), - then the option is not being used. - **Effect**: use of the C(default) callback plugin for output" - - "**Condition** for all C(msg) options: - if value C(is not None and not omit and length is not greater than 0), - then the option is being used without output. - **Effect**: suppress output" - - "**Condition** for all C(msg) options: - if value C(is not None and not omit and length is greater than 0), - then the option is being used with output. - **Effect**: render value as template and output" - - "Valid color values: C(black), C(bright gray), C(blue), C(white), C(green), C(bright blue), C(cyan), C(bright green), C(red), C(bright cyan), - C(purple), C(bright red), C(yellow), C(bright purple), C(dark gray), C(bright yellow), C(magenta), C(bright magenta), C(normal)" - seealso: - - name: default – default Ansible screen output - description: The official documentation on the B(default) callback plugin. - link: https://docs.ansible.com/ansible/latest/plugins/callback/default.html - requirements: - - set as stdout_callback in configuration - options: - on_any_msg: - description: Output to be used for callback on_any. - ini: - - section: callback_diy - key: on_any_msg - env: - - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG - vars: - - name: ansible_callback_diy_on_any_msg - type: str +DOCUMENTATION = r""" +name: diy +type: stdout +short_description: Customize the output +version_added: 0.2.0 +description: + - Callback plugin that allows you to supply your own custom callback templates to be output. +author: Trevor Highfill (@theque5t) +extends_documentation_fragment: + - default_callback +notes: + - Uses the P(ansible.builtin.default#callback) callback plugin output when a custom callback V(message(msg\)) is not provided. + - Makes the callback event data available using the C(ansible_callback_diy) dictionary, which can be used in the templating + context for the options. The dictionary is only available in the templating context for the options. It is not a variable + that is available using the other various execution contexts, such as playbook, play, task, and so on so forth. + - Options being set by their respective variable input can only be set using the variable if the variable was set in a context + that is available to the respective callback. Use the C(ansible_callback_diy) dictionary to see what is available to a + callback. Additionally, C(ansible_callback_diy.top_level_var_names) outputs the top level variable names available + to the callback. + - Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For + example, V("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}"). + - 'B(Condition) for all C(msg) options: if value V(is None or omit), then the option is not being used. B(Effect): use of + the C(default) callback plugin for output.' + - 'B(Condition) for all C(msg) options: if value V(is not None and not omit and length is not greater than 0), then the + option is being used without output. B(Effect): suppress output.' + - 'B(Condition) for all C(msg) options: if value V(is not None and not omit and length is greater than 0), then the option + is being used with output. B(Effect): render value as template and output.' + - 'Valid color values: V(black), V(bright gray), V(blue), V(white), V(green), V(bright blue), V(cyan), V(bright green), + V(red), V(bright cyan), V(purple), V(bright red), V(yellow), V(bright purple), V(dark gray), V(bright yellow), V(magenta), + V(bright magenta), V(normal).' +seealso: + - name: default – default Ansible screen output + description: The official documentation on the B(default) callback plugin. + link: https://docs.ansible.com/ansible/latest/plugins/callback/default.html +requirements: + - set as stdout_callback in configuration +options: + on_any_msg: + description: Output to be used for callback on_any. + ini: + - section: callback_diy + key: on_any_msg + env: + - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG + vars: + - name: ansible_callback_diy_on_any_msg + type: str - on_any_msg_color: - description: - - Output color to be used for I(on_any_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: on_any_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG_COLOR - vars: - - name: ansible_callback_diy_on_any_msg_color - type: str + on_any_msg_color: + description: + - Output color to be used for O(on_any_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: on_any_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG_COLOR + vars: + - name: ansible_callback_diy_on_any_msg_color + type: str - runner_on_failed_msg: - description: Output to be used for callback runner_on_failed. - ini: - - section: callback_diy - key: runner_on_failed_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG - vars: - - name: ansible_callback_diy_runner_on_failed_msg - type: str + runner_on_failed_msg: + description: Output to be used for callback runner_on_failed. + ini: + - section: callback_diy + key: runner_on_failed_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG + vars: + - name: ansible_callback_diy_runner_on_failed_msg + type: str - runner_on_failed_msg_color: - description: - - Output color to be used for I(runner_on_failed_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_failed_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_failed_msg_color - type: str + runner_on_failed_msg_color: + description: + - Output color to be used for O(runner_on_failed_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_failed_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_failed_msg_color + type: str - runner_on_ok_msg: - description: Output to be used for callback runner_on_ok. - ini: - - section: callback_diy - key: runner_on_ok_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG - vars: - - name: ansible_callback_diy_runner_on_ok_msg - type: str + runner_on_ok_msg: + description: Output to be used for callback runner_on_ok. + ini: + - section: callback_diy + key: runner_on_ok_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG + vars: + - name: ansible_callback_diy_runner_on_ok_msg + type: str - runner_on_ok_msg_color: - description: - - Output color to be used for I(runner_on_ok_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_ok_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_ok_msg_color - type: str + runner_on_ok_msg_color: + description: + - Output color to be used for O(runner_on_ok_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_ok_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_ok_msg_color + type: str - runner_on_skipped_msg: - description: Output to be used for callback runner_on_skipped. - ini: - - section: callback_diy - key: runner_on_skipped_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG - vars: - - name: ansible_callback_diy_runner_on_skipped_msg - type: str + runner_on_skipped_msg: + description: Output to be used for callback runner_on_skipped. + ini: + - section: callback_diy + key: runner_on_skipped_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG + vars: + - name: ansible_callback_diy_runner_on_skipped_msg + type: str - runner_on_skipped_msg_color: - description: - - Output color to be used for I(runner_on_skipped_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_skipped_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_skipped_msg_color - type: str + runner_on_skipped_msg_color: + description: + - Output color to be used for O(runner_on_skipped_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_skipped_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_skipped_msg_color + type: str - runner_on_unreachable_msg: - description: Output to be used for callback runner_on_unreachable. - ini: - - section: callback_diy - key: runner_on_unreachable_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG - vars: - - name: ansible_callback_diy_runner_on_unreachable_msg - type: str + runner_on_unreachable_msg: + description: Output to be used for callback runner_on_unreachable. + ini: + - section: callback_diy + key: runner_on_unreachable_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG + vars: + - name: ansible_callback_diy_runner_on_unreachable_msg + type: str - runner_on_unreachable_msg_color: - description: - - Output color to be used for I(runner_on_unreachable_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_unreachable_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_unreachable_msg_color - type: str + runner_on_unreachable_msg_color: + description: + - Output color to be used for O(runner_on_unreachable_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_unreachable_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_unreachable_msg_color + type: str - playbook_on_start_msg: - description: Output to be used for callback playbook_on_start. - ini: - - section: callback_diy - key: playbook_on_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_start_msg - type: str + playbook_on_start_msg: + description: Output to be used for callback playbook_on_start. + ini: + - section: callback_diy + key: playbook_on_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_start_msg + type: str - playbook_on_start_msg_color: - description: - - Output color to be used for I(playbook_on_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_start_msg_color - type: str + playbook_on_start_msg_color: + description: + - Output color to be used for O(playbook_on_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_start_msg_color + type: str - playbook_on_notify_msg: - description: Output to be used for callback playbook_on_notify. - ini: - - section: callback_diy - key: playbook_on_notify_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG - vars: - - name: ansible_callback_diy_playbook_on_notify_msg - type: str + playbook_on_notify_msg: + description: Output to be used for callback playbook_on_notify. + ini: + - section: callback_diy + key: playbook_on_notify_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG + vars: + - name: ansible_callback_diy_playbook_on_notify_msg + type: str - playbook_on_notify_msg_color: - description: - - Output color to be used for I(playbook_on_notify_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_notify_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_notify_msg_color - type: str + playbook_on_notify_msg_color: + description: + - Output color to be used for O(playbook_on_notify_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_notify_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_notify_msg_color + type: str - playbook_on_no_hosts_matched_msg: - description: Output to be used for callback playbook_on_no_hosts_matched. - ini: - - section: callback_diy - key: playbook_on_no_hosts_matched_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg - type: str + playbook_on_no_hosts_matched_msg: + description: Output to be used for callback playbook_on_no_hosts_matched. + ini: + - section: callback_diy + key: playbook_on_no_hosts_matched_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg + type: str - playbook_on_no_hosts_matched_msg_color: - description: - - Output color to be used for I(playbook_on_no_hosts_matched_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_no_hosts_matched_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg_color - type: str + playbook_on_no_hosts_matched_msg_color: + description: + - Output color to be used for O(playbook_on_no_hosts_matched_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_no_hosts_matched_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg_color + type: str - playbook_on_no_hosts_remaining_msg: - description: Output to be used for callback playbook_on_no_hosts_remaining. - ini: - - section: callback_diy - key: playbook_on_no_hosts_remaining_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg - type: str + playbook_on_no_hosts_remaining_msg: + description: Output to be used for callback playbook_on_no_hosts_remaining. + ini: + - section: callback_diy + key: playbook_on_no_hosts_remaining_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg + type: str - playbook_on_no_hosts_remaining_msg_color: - description: - - Output color to be used for I(playbook_on_no_hosts_remaining_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_no_hosts_remaining_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg_color - type: str + playbook_on_no_hosts_remaining_msg_color: + description: + - Output color to be used for O(playbook_on_no_hosts_remaining_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_no_hosts_remaining_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg_color + type: str - playbook_on_task_start_msg: - description: Output to be used for callback playbook_on_task_start. - ini: - - section: callback_diy - key: playbook_on_task_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_task_start_msg - type: str + playbook_on_task_start_msg: + description: Output to be used for callback playbook_on_task_start. + ini: + - section: callback_diy + key: playbook_on_task_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_task_start_msg + type: str - playbook_on_task_start_msg_color: - description: - - Output color to be used for I(playbook_on_task_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_task_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_task_start_msg_color - type: str + playbook_on_task_start_msg_color: + description: + - Output color to be used for O(playbook_on_task_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_task_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_task_start_msg_color + type: str - playbook_on_handler_task_start_msg: - description: Output to be used for callback playbook_on_handler_task_start. - ini: - - section: callback_diy - key: playbook_on_handler_task_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_handler_task_start_msg - type: str + playbook_on_handler_task_start_msg: + description: Output to be used for callback playbook_on_handler_task_start. + ini: + - section: callback_diy + key: playbook_on_handler_task_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_handler_task_start_msg + type: str - playbook_on_handler_task_start_msg_color: - description: - - Output color to be used for I(playbook_on_handler_task_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_handler_task_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_handler_task_start_msg_color - type: str + playbook_on_handler_task_start_msg_color: + description: + - Output color to be used for O(playbook_on_handler_task_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_handler_task_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_handler_task_start_msg_color + type: str - playbook_on_vars_prompt_msg: - description: Output to be used for callback playbook_on_vars_prompt. - ini: - - section: callback_diy - key: playbook_on_vars_prompt_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG - vars: - - name: ansible_callback_diy_playbook_on_vars_prompt_msg - type: str + playbook_on_vars_prompt_msg: + description: Output to be used for callback playbook_on_vars_prompt. + ini: + - section: callback_diy + key: playbook_on_vars_prompt_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG + vars: + - name: ansible_callback_diy_playbook_on_vars_prompt_msg + type: str - playbook_on_vars_prompt_msg_color: - description: - - Output color to be used for I(playbook_on_vars_prompt_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_vars_prompt_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_vars_prompt_msg_color - type: str + playbook_on_vars_prompt_msg_color: + description: + - Output color to be used for O(playbook_on_vars_prompt_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_vars_prompt_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_vars_prompt_msg_color + type: str - playbook_on_play_start_msg: - description: Output to be used for callback playbook_on_play_start. - ini: - - section: callback_diy - key: playbook_on_play_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_play_start_msg - type: str + playbook_on_play_start_msg: + description: Output to be used for callback playbook_on_play_start. + ini: + - section: callback_diy + key: playbook_on_play_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_play_start_msg + type: str - playbook_on_play_start_msg_color: - description: - - Output color to be used for I(playbook_on_play_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_play_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_play_start_msg_color - type: str + playbook_on_play_start_msg_color: + description: + - Output color to be used for O(playbook_on_play_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_play_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_play_start_msg_color + type: str - playbook_on_stats_msg: - description: Output to be used for callback playbook_on_stats. - ini: - - section: callback_diy - key: playbook_on_stats_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG - vars: - - name: ansible_callback_diy_playbook_on_stats_msg - type: str + playbook_on_stats_msg: + description: Output to be used for callback playbook_on_stats. + ini: + - section: callback_diy + key: playbook_on_stats_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG + vars: + - name: ansible_callback_diy_playbook_on_stats_msg + type: str - playbook_on_stats_msg_color: - description: - - Output color to be used for I(playbook_on_stats_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_stats_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_stats_msg_color - type: str + playbook_on_stats_msg_color: + description: + - Output color to be used for O(playbook_on_stats_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_stats_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_stats_msg_color + type: str - on_file_diff_msg: - description: Output to be used for callback on_file_diff. - ini: - - section: callback_diy - key: on_file_diff_msg - env: - - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG - vars: - - name: ansible_callback_diy_on_file_diff_msg - type: str + on_file_diff_msg: + description: Output to be used for callback on_file_diff. + ini: + - section: callback_diy + key: on_file_diff_msg + env: + - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG + vars: + - name: ansible_callback_diy_on_file_diff_msg + type: str - on_file_diff_msg_color: - description: - - Output color to be used for I(on_file_diff_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: on_file_diff_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG_COLOR - vars: - - name: ansible_callback_diy_on_file_diff_msg_color - type: str + on_file_diff_msg_color: + description: + - Output color to be used for O(on_file_diff_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: on_file_diff_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG_COLOR + vars: + - name: ansible_callback_diy_on_file_diff_msg_color + type: str - playbook_on_include_msg: - description: Output to be used for callback playbook_on_include. - ini: - - section: callback_diy - key: playbook_on_include_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG - vars: - - name: ansible_callback_diy_playbook_on_include_msg - type: str + playbook_on_include_msg: + description: Output to be used for callback playbook_on_include. + ini: + - section: callback_diy + key: playbook_on_include_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG + vars: + - name: ansible_callback_diy_playbook_on_include_msg + type: str - playbook_on_include_msg_color: - description: - - Output color to be used for I(playbook_on_include_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_include_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_include_msg_color - type: str + playbook_on_include_msg_color: + description: + - Output color to be used for O(playbook_on_include_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_include_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_include_msg_color + type: str - runner_item_on_ok_msg: - description: Output to be used for callback runner_item_on_ok. - ini: - - section: callback_diy - key: runner_item_on_ok_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG - vars: - - name: ansible_callback_diy_runner_item_on_ok_msg - type: str + runner_item_on_ok_msg: + description: Output to be used for callback runner_item_on_ok. + ini: + - section: callback_diy + key: runner_item_on_ok_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG + vars: + - name: ansible_callback_diy_runner_item_on_ok_msg + type: str - runner_item_on_ok_msg_color: - description: - - Output color to be used for I(runner_item_on_ok_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_item_on_ok_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_item_on_ok_msg_color - type: str + runner_item_on_ok_msg_color: + description: + - Output color to be used for O(runner_item_on_ok_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_item_on_ok_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_item_on_ok_msg_color + type: str - runner_item_on_failed_msg: - description: Output to be used for callback runner_item_on_failed. - ini: - - section: callback_diy - key: runner_item_on_failed_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG - vars: - - name: ansible_callback_diy_runner_item_on_failed_msg - type: str + runner_item_on_failed_msg: + description: Output to be used for callback runner_item_on_failed. + ini: + - section: callback_diy + key: runner_item_on_failed_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG + vars: + - name: ansible_callback_diy_runner_item_on_failed_msg + type: str - runner_item_on_failed_msg_color: - description: - - Output color to be used for I(runner_item_on_failed_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_item_on_failed_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_item_on_failed_msg_color - type: str + runner_item_on_failed_msg_color: + description: + - Output color to be used for O(runner_item_on_failed_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_item_on_failed_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_item_on_failed_msg_color + type: str - runner_item_on_skipped_msg: - description: Output to be used for callback runner_item_on_skipped. - ini: - - section: callback_diy - key: runner_item_on_skipped_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG - vars: - - name: ansible_callback_diy_runner_item_on_skipped_msg - type: str + runner_item_on_skipped_msg: + description: Output to be used for callback runner_item_on_skipped. + ini: + - section: callback_diy + key: runner_item_on_skipped_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG + vars: + - name: ansible_callback_diy_runner_item_on_skipped_msg + type: str - runner_item_on_skipped_msg_color: - description: - - Output color to be used for I(runner_item_on_skipped_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_item_on_skipped_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_item_on_skipped_msg_color - type: str + runner_item_on_skipped_msg_color: + description: + - Output color to be used for O(runner_item_on_skipped_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_item_on_skipped_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_item_on_skipped_msg_color + type: str - runner_retry_msg: - description: Output to be used for callback runner_retry. - ini: - - section: callback_diy - key: runner_retry_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG - vars: - - name: ansible_callback_diy_runner_retry_msg - type: str + runner_retry_msg: + description: Output to be used for callback runner_retry. + ini: + - section: callback_diy + key: runner_retry_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG + vars: + - name: ansible_callback_diy_runner_retry_msg + type: str - runner_retry_msg_color: - description: - - Output color to be used for I(runner_retry_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_retry_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_retry_msg_color - type: str + runner_retry_msg_color: + description: + - Output color to be used for O(runner_retry_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_retry_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_retry_msg_color + type: str - runner_on_start_msg: - description: Output to be used for callback runner_on_start. - ini: - - section: callback_diy - key: runner_on_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG - vars: - - name: ansible_callback_diy_runner_on_start_msg - type: str + runner_on_start_msg: + description: Output to be used for callback runner_on_start. + ini: + - section: callback_diy + key: runner_on_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG + vars: + - name: ansible_callback_diy_runner_on_start_msg + type: str - runner_on_start_msg_color: - description: - - Output color to be used for I(runner_on_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_start_msg_color - type: str + runner_on_start_msg_color: + description: + - Output color to be used for O(runner_on_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_start_msg_color + type: str - runner_on_no_hosts_msg: - description: Output to be used for callback runner_on_no_hosts. - ini: - - section: callback_diy - key: runner_on_no_hosts_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG - vars: - - name: ansible_callback_diy_runner_on_no_hosts_msg - type: str + runner_on_no_hosts_msg: + description: Output to be used for callback runner_on_no_hosts. + ini: + - section: callback_diy + key: runner_on_no_hosts_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG + vars: + - name: ansible_callback_diy_runner_on_no_hosts_msg + type: str - runner_on_no_hosts_msg_color: - description: - - Output color to be used for I(runner_on_no_hosts_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_no_hosts_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_no_hosts_msg_color - type: str + runner_on_no_hosts_msg_color: + description: + - Output color to be used for O(runner_on_no_hosts_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_no_hosts_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_no_hosts_msg_color + type: str - playbook_on_setup_msg: - description: Output to be used for callback playbook_on_setup. - ini: - - section: callback_diy - key: playbook_on_setup_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG - vars: - - name: ansible_callback_diy_playbook_on_setup_msg - type: str + playbook_on_setup_msg: + description: Output to be used for callback playbook_on_setup. + ini: + - section: callback_diy + key: playbook_on_setup_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG + vars: + - name: ansible_callback_diy_playbook_on_setup_msg + type: str - playbook_on_setup_msg_color: - description: - - Output color to be used for I(playbook_on_setup_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_setup_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_setup_msg_color - type: str -''' + playbook_on_setup_msg_color: + description: + - Output color to be used for O(playbook_on_setup_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_setup_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_setup_msg_color + type: str +""" -EXAMPLES = r''' +EXAMPLES = r""" ansible.cfg: > # Enable plugin [defaults] @@ -622,11 +616,11 @@ ansible.cfg: > # Newline after every callback # on_any_msg='{{ " " | join("\n") }}' -playbook.yml: > +playbook.yml: >- --- - name: "Default plugin output: play example" hosts: localhost - gather_facts: no + gather_facts: false tasks: - name: Default plugin output ansible.builtin.debug: @@ -634,7 +628,7 @@ playbook.yml: > - name: Override from play vars hosts: localhost - gather_facts: no + gather_facts: false vars: ansible_connection: local green: "\e[0m\e[38;5;82m" @@ -712,7 +706,7 @@ playbook.yml: > - name: Using alias vars (see ansible.cfg) ansible.builtin.debug: msg: - when: False + when: false vars: ansible_callback_diy_playbook_on_task_start_msg: "" on_skipped_msg: "DIY output(via task vars): skipped example:\n\e[0m\e[38;5;4m\u25b6\u25b6 {{ ansible_callback_diy.result.task.name }}\n" @@ -781,19 +775,21 @@ playbook.yml: > {{ white }}{{ ansible_callback_diy[key] }} {% endfor %} -''' +""" import sys from contextlib import contextmanager -from ansible import constants as C -from ansible.playbook.task_include import TaskInclude -from ansible.plugins.callback import CallbackBase -from ansible.utils.color import colorize, hostcolor from ansible.template import Templar from ansible.vars.manager import VariableManager from ansible.plugins.callback.default import CallbackModule as Default from ansible.module_utils.common.text.converters import to_text +try: + from ansible.template import trust_as_template # noqa: F401, pylint: disable=unused-import + SUPPORTS_DATA_TAGGING = True +except ImportError: + SUPPORTS_DATA_TAGGING = False + class DummyStdout(object): def flush(self): @@ -831,9 +827,9 @@ class CallbackModule(Default): _callback_options = ['msg', 'msg_color'] for option in _callback_options: - _option_name = '%s_%s' % (_callback_type, option) + _option_name = f'{_callback_type}_{option}' _option_template = variables.get( - self.DIY_NS + "_" + _option_name, + f"{self.DIY_NS}_{_option_name}", self.get_option(_option_name) ) _ret.update({option: self._template( @@ -847,7 +843,10 @@ class CallbackModule(Default): return _ret def _using_diy(self, spec): - return (spec['msg'] is not None) and (spec['msg'] != spec['vars']['omit']) + sentinel = object() + omit = spec['vars'].get('omit', sentinel) + # With Data Tagging, omit is sentinel + return (spec['msg'] is not None) and (spec['msg'] != omit or omit is sentinel) def _parent_has_callback(self): return hasattr(super(CallbackModule, self), sys._getframe(1).f_code.co_name) @@ -870,7 +869,7 @@ class CallbackModule(Default): handler=None, result=None, stats=None, remove_attr_ref_loop=True): def _get_value(obj, attr=None, method=None): if attr: - return getattr(obj, attr, getattr(obj, "_" + attr, None)) + return getattr(obj, attr, getattr(obj, f"_{attr}", None)) if method: _method = getattr(obj, method) @@ -903,7 +902,7 @@ class CallbackModule(Default): ) _ret.update(_all) - _ret.update(_ret.get(self.DIY_NS, {self.DIY_NS: CallbackDIYDict()})) + _ret.update(_ret.get(self.DIY_NS, {self.DIY_NS: {} if SUPPORTS_DATA_TAGGING else CallbackDIYDict()})) _ret[self.DIY_NS].update({'playbook': {}}) _playbook_attributes = ['entries', 'file_name', 'basedir'] diff --git a/plugins/callback/elastic.py b/plugins/callback/elastic.py index 095c0993ca..82478b9e7d 100644 --- a/plugins/callback/elastic.py +++ b/plugins/callback/elastic.py @@ -1,72 +1,72 @@ -# (C) 2021, Victor Martinez -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Victor Martinez +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Victor Martinez (@v1v) - name: elastic - type: notification - short_description: Create distributed traces for each Ansible task in Elastic APM - version_added: 3.8.0 +DOCUMENTATION = r""" +author: Victor Martinez (@v1v) +name: elastic +type: notification +short_description: Create distributed traces for each Ansible task in Elastic APM +version_added: 3.8.0 +description: + - This callback creates distributed traces for each Ansible task in Elastic APM. + - You can configure the plugin with environment variables. + - See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html). +options: + hide_task_arguments: + default: false + type: bool description: - - This callback creates distributed traces for each Ansible task in Elastic APM. - - You can configure the plugin with environment variables. - - See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html). - options: - hide_task_arguments: - default: false - type: bool - description: - - Hide the arguments for a task. - env: - - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS - apm_service_name: - default: ansible - type: str - description: - - The service name resource attribute. - env: - - name: ELASTIC_APM_SERVICE_NAME - apm_server_url: - type: str - description: - - Use the APM server and its environment variables. - env: - - name: ELASTIC_APM_SERVER_URL - apm_secret_token: - type: str - description: - - Use the APM server token - env: - - name: ELASTIC_APM_SECRET_TOKEN - apm_api_key: - type: str - description: - - Use the APM API key - env: - - name: ELASTIC_APM_API_KEY - apm_verify_server_cert: - default: true - type: bool - description: - - Verifies the SSL certificate if an HTTPS connection. - env: - - name: ELASTIC_APM_VERIFY_SERVER_CERT - traceparent: - type: str - description: - - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). - env: - - name: TRACEPARENT - requirements: - - elastic-apm (Python library) -''' + - Hide the arguments for a task. + env: + - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS + apm_service_name: + default: ansible + type: str + description: + - The service name resource attribute. + env: + - name: ELASTIC_APM_SERVICE_NAME + apm_server_url: + type: str + description: + - Use the APM server and its environment variables. + env: + - name: ELASTIC_APM_SERVER_URL + apm_secret_token: + type: str + description: + - Use the APM server token. + env: + - name: ELASTIC_APM_SECRET_TOKEN + apm_api_key: + type: str + description: + - Use the APM API key. + env: + - name: ELASTIC_APM_API_KEY + apm_verify_server_cert: + default: true + type: bool + description: + - Verifies the SSL certificate if an HTTPS connection. + env: + - name: ELASTIC_APM_VERIFY_SERVER_CERT + traceparent: + type: str + description: + - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). + env: + - name: TRACEPARENT +requirements: + - elastic-apm (Python library) +""" -EXAMPLES = ''' -examples: | +EXAMPLES = r""" +examples: |- Enable the plugin in ansible.cfg: [defaults] callbacks_enabled = community.general.elastic @@ -75,7 +75,7 @@ examples: | export ELASTIC_APM_SERVER_URL= export ELASTIC_APM_SERVICE_NAME=your_service_name export ELASTIC_APM_API_KEY=your_APM_API_KEY -''' +""" import getpass import socket @@ -83,10 +83,11 @@ import time import uuid from collections import OrderedDict +from contextlib import closing from os.path import basename from ansible.errors import AnsibleError, AnsibleRuntimeError -from ansible.module_utils.six import raise_from +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.plugins.callback import CallbackBase try: @@ -116,7 +117,7 @@ class TaskData: if host.uuid in self.host_data: if host.status == 'included': # concatenate task include output from multiple items - host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result) + host.result = f'{self.host_data[host.uuid].result}\n{host.result}' else: return @@ -139,7 +140,6 @@ class HostData: class ElasticSource(object): def __init__(self, display): self.ansible_playbook = "" - self.ansible_version = None self.session = str(uuid.uuid4()) self.host = socket.gethostname() try: @@ -164,7 +164,7 @@ class ElasticSource(object): args = None if not task.no_log and not hide_task_arguments: - args = ', '.join(('%s=%s' % a for a in task.args.items())) + args = ', '.join((f'{k}={v}' for k, v in task.args.items())) tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args) @@ -182,9 +182,6 @@ class ElasticSource(object): task = tasks_data[task_uuid] - if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'): - self.ansible_version = result._task_fields['args'].get('_ansible_version') - task.add_host(HostData(host_uuid, host_name, status, result)) def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_service_name, @@ -200,29 +197,29 @@ class ElasticSource(object): apm_cli = self.init_apm_client(apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key) if apm_cli: - instrument() # Only call this once, as early as possible. - if traceparent: - parent = trace_parent_from_string(traceparent) - apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time) - else: - apm_cli.begin_transaction("Session", start=parent_start_time) - # Populate trace metadata attributes - if self.ansible_version is not None: - label(ansible_version=self.ansible_version) - label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user) - if self.ip_address is not None: - label(ansible_host_ip=self.ip_address) + with closing(apm_cli): + instrument() # Only call this once, as early as possible. + if traceparent: + parent = trace_parent_from_string(traceparent) + apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time) + else: + apm_cli.begin_transaction("Session", start=parent_start_time) + # Populate trace metadata attributes + label(ansible_version=ansible_version) + label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user) + if self.ip_address is not None: + label(ansible_host_ip=self.ip_address) - for task_data in tasks: - for host_uuid, host_data in task_data.host_data.items(): - self.create_span_data(apm_cli, task_data, host_data) + for task_data in tasks: + for host_uuid, host_data in task_data.host_data.items(): + self.create_span_data(apm_cli, task_data, host_data) - apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time) + apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time) def create_span_data(self, apm_cli, task_data, host_data): """ create the span with the given TaskData and HostData """ - name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name) + name = f'[{host_data.name}] {task_data.play}: {task_data.name}' message = "success" status = "success" @@ -256,7 +253,7 @@ class ElasticSource(object): "ansible.task.host.status": host_data.status}) as span: span.outcome = status if 'failure' in status: - exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, enriched_error_message)) + exception = AnsibleRuntimeError(message=f"{task_data.action}: {name} failed with error message {enriched_error_message}") apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True) def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key): @@ -285,7 +282,7 @@ class ElasticSource(object): message = result.get('msg', 'failed') exception = result.get('exception') stderr = result.get('stderr') - return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr) + return f"message: \"{message}\"\nexception: \"{exception}\"\nstderr: \"{stderr}\"" class CallbackModule(CallbackBase): @@ -310,9 +307,7 @@ class CallbackModule(CallbackBase): self.disabled = False if ELASTIC_LIBRARY_IMPORT_ERROR: - raise_from( - AnsibleError('The `elastic-apm` must be installed to use this plugin'), - ELASTIC_LIBRARY_IMPORT_ERROR) + raise AnsibleError('The `elastic-apm` must be installed to use this plugin') from ELASTIC_LIBRARY_IMPORT_ERROR self.tasks_data = OrderedDict() diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py deleted file mode 100644 index c64b892d9b..0000000000 --- a/plugins/callback/hipchat.py +++ /dev/null @@ -1,228 +0,0 @@ -# -*- coding: utf-8 -*- -# (C) 2014, Matt Martz -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: hipchat - type: notification - requirements: - - whitelist in configuration. - - prettytable (python lib) - short_description: post task events to hipchat - description: - - This callback plugin sends status updates to a HipChat channel during playbook execution. - - Before 2.4 only environment variables were available for configuring this plugin. - options: - token: - description: HipChat API token for v1 or v2 API. - required: True - env: - - name: HIPCHAT_TOKEN - ini: - - section: callback_hipchat - key: token - api_version: - description: HipChat API version, v1 or v2. - required: False - default: v1 - env: - - name: HIPCHAT_API_VERSION - ini: - - section: callback_hipchat - key: api_version - room: - description: HipChat room to post in. - default: ansible - env: - - name: HIPCHAT_ROOM - ini: - - section: callback_hipchat - key: room - from: - description: Name to post as - default: ansible - env: - - name: HIPCHAT_FROM - ini: - - section: callback_hipchat - key: from - notify: - description: Add notify flag to important messages - type: bool - default: True - env: - - name: HIPCHAT_NOTIFY - ini: - - section: callback_hipchat - key: notify - -''' - -import os -import json - -try: - import prettytable - HAS_PRETTYTABLE = True -except ImportError: - HAS_PRETTYTABLE = False - -from ansible.plugins.callback import CallbackBase -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import open_url - - -class CallbackModule(CallbackBase): - """This is an example ansible callback plugin that sends status - updates to a HipChat channel during playbook execution. - """ - - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.hipchat' - CALLBACK_NEEDS_WHITELIST = True - - API_V1_URL = 'https://api.hipchat.com/v1/rooms/message' - API_V2_URL = 'https://api.hipchat.com/v2/' - - def __init__(self): - - super(CallbackModule, self).__init__() - - if not HAS_PRETTYTABLE: - self.disabled = True - self._display.warning('The `prettytable` python module is not installed. ' - 'Disabling the HipChat callback plugin.') - self.printed_playbook = False - self.playbook_name = None - self.play = None - - def set_options(self, task_keys=None, var_options=None, direct=None): - super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) - - self.token = self.get_option('token') - self.api_version = self.get_option('api_version') - self.from_name = self.get_option('from') - self.allow_notify = self.get_option('notify') - self.room = self.get_option('room') - - if self.token is None: - self.disabled = True - self._display.warning('HipChat token could not be loaded. The HipChat ' - 'token can be provided using the `HIPCHAT_TOKEN` ' - 'environment variable.') - - # Pick the request handler. - if self.api_version == 'v2': - self.send_msg = self.send_msg_v2 - else: - self.send_msg = self.send_msg_v1 - - def send_msg_v2(self, msg, msg_format='text', color='yellow', notify=False): - """Method for sending a message to HipChat""" - - headers = {'Authorization': 'Bearer %s' % self.token, 'Content-Type': 'application/json'} - - body = {} - body['room_id'] = self.room - body['from'] = self.from_name[:15] # max length is 15 - body['message'] = msg - body['message_format'] = msg_format - body['color'] = color - body['notify'] = self.allow_notify and notify - - data = json.dumps(body) - url = self.API_V2_URL + "room/{room_id}/notification".format(room_id=self.room) - try: - response = open_url(url, data=data, headers=headers, method='POST') - return response.read() - except Exception as ex: - self._display.warning('Could not submit message to hipchat: {0}'.format(ex)) - - def send_msg_v1(self, msg, msg_format='text', color='yellow', notify=False): - """Method for sending a message to HipChat""" - - params = {} - params['room_id'] = self.room - params['from'] = self.from_name[:15] # max length is 15 - params['message'] = msg - params['message_format'] = msg_format - params['color'] = color - params['notify'] = int(self.allow_notify and notify) - - url = ('%s?auth_token=%s' % (self.API_V1_URL, self.token)) - try: - response = open_url(url, data=urlencode(params)) - return response.read() - except Exception as ex: - self._display.warning('Could not submit message to hipchat: {0}'.format(ex)) - - def v2_playbook_on_play_start(self, play): - """Display Playbook and play start messages""" - - self.play = play - name = play.name - # This block sends information about a playbook when it starts - # The playbook object is not immediately available at - # playbook_on_start so we grab it via the play - # - # Displays info about playbook being started by a person on an - # inventory, as well as Tags, Skip Tags and Limits - if not self.printed_playbook: - self.playbook_name, dummy = os.path.splitext(os.path.basename(self.play.playbook.filename)) - host_list = self.play.playbook.inventory.host_list - inventory = os.path.basename(os.path.realpath(host_list)) - self.send_msg("%s: Playbook initiated by %s against %s" % - (self.playbook_name, - self.play.playbook.remote_user, - inventory), notify=True) - self.printed_playbook = True - subset = self.play.playbook.inventory._subset - skip_tags = self.play.playbook.skip_tags - self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" % - (self.playbook_name, - ', '.join(self.play.playbook.only_tags), - ', '.join(skip_tags) if skip_tags else None, - ', '.join(subset) if subset else subset)) - - # This is where we actually say we are starting a play - self.send_msg("%s: Starting play: %s" % - (self.playbook_name, name)) - - def playbook_on_stats(self, stats): - """Display info about playbook statistics""" - hosts = sorted(stats.processed.keys()) - - t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable', - 'Failures']) - - failures = False - unreachable = False - - for h in hosts: - s = stats.summarize(h) - - if s['failures'] > 0: - failures = True - if s['unreachable'] > 0: - unreachable = True - - t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable', - 'failures']]) - - self.send_msg("%s: Playbook complete" % self.playbook_name, - notify=True) - - if failures or unreachable: - color = 'red' - self.send_msg("%s: Failures detected" % self.playbook_name, - color=color, notify=True) - else: - color = 'green' - - self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color) diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py index b535fa9540..319611d460 100644 --- a/plugins/callback/jabber.py +++ b/plugins/callback/jabber.py @@ -1,43 +1,46 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2016 maxn nikolaev.makc@gmail.com # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: jabber - type: notification - short_description: post task events to a jabber server - description: - - The chatty part of ChatOps with a Hipchat server as a target - - This callback plugin sends status updates to a HipChat channel during playbook execution. - requirements: - - xmpp (python lib https://github.com/ArchipelProject/xmpppy) - options: - server: - description: connection info to jabber server - required: True - env: - - name: JABBER_SERV - user: - description: Jabber user to authenticate as - required: True - env: - - name: JABBER_USER - password: - description: Password for the user to the jabber server - required: True - env: - - name: JABBER_PASS - to: - description: chat identifier that will receive the message - required: True - env: - - name: JABBER_TO -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: jabber +type: notification +short_description: Post task events to a Jabber server +description: + - The chatty part of ChatOps with a Hipchat server as a target. + - This callback plugin sends status updates to a HipChat channel during playbook execution. +requirements: + - xmpp (Python library U(https://github.com/ArchipelProject/xmpppy)) +options: + server: + description: Connection info to Jabber server. + type: str + required: true + env: + - name: JABBER_SERV + user: + description: Jabber user to authenticate as. + type: str + required: true + env: + - name: JABBER_USER + password: + description: Password for the user to the Jabber server. + type: str + required: true + env: + - name: JABBER_PASS + to: + description: Chat identifier that receives the message. + type: str + required: true + env: + - name: JABBER_TO +""" import os @@ -97,7 +100,7 @@ class CallbackModule(CallbackBase): """Display Playbook and play start messages""" self.play = play name = play.name - self.send_msg("Ansible starting play: %s" % (name)) + self.send_msg(f"Ansible starting play: {name}") def playbook_on_stats(self, stats): name = self.play @@ -113,7 +116,7 @@ class CallbackModule(CallbackBase): if failures or unreachable: out = self.debug - self.send_msg("%s: Failures detected \n%s \nHost: %s\n Failed at:\n%s" % (name, self.task, h, out)) + self.send_msg(f"{name}: Failures detected \n{self.task} \nHost: {h}\n Failed at:\n{out}") else: out = self.debug - self.send_msg("Great! \n Playbook %s completed:\n%s \n Last task debug:\n %s" % (name, s, out)) + self.send_msg(f"Great! \n Playbook {name} completed:\n{s} \n Last task debug:\n {out}") diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py index 2539bd9ade..89ec8cbff3 100644 --- a/plugins/callback/log_plays.py +++ b/plugins/callback/log_plays.py @@ -1,31 +1,31 @@ -# -*- coding: utf-8 -*- -# (C) 2012, Michael DeHaan, -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2012, Michael DeHaan, +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: log_plays - type: notification - short_description: write playbook output to log file - description: - - This callback writes playbook output to a file per host in the `/var/log/ansible/hosts` directory - requirements: - - Whitelist in configuration - - A writeable /var/log/ansible/hosts directory by the user executing Ansible on the controller - options: - log_folder: - default: /var/log/ansible/hosts - description: The folder where log files will be created. - env: - - name: ANSIBLE_LOG_FOLDER - ini: - - section: callback_log_plays - key: log_folder -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: log_plays +type: notification +short_description: Write playbook output to log file +description: + - This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory. +requirements: + - Whitelist in configuration + - A writeable C(/var/log/ansible/hosts) directory by the user executing Ansible on the controller +options: + log_folder: + default: /var/log/ansible/hosts + description: The folder where log files are created. + type: str + env: + - name: ANSIBLE_LOG_FOLDER + ini: + - section: callback_log_plays + key: log_folder +""" import os import time @@ -33,7 +33,7 @@ import json from ansible.utils.path import makedirs_safe from ansible.module_utils.common.text.converters import to_bytes -from ansible.module_utils.common._collections_compat import MutableMapping +from collections.abc import MutableMapping from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase @@ -55,7 +55,10 @@ class CallbackModule(CallbackBase): CALLBACK_NEEDS_WHITELIST = True TIME_FORMAT = "%b %d %Y %H:%M:%S" - MSG_FORMAT = "%(now)s - %(playbook)s - %(task_name)s - %(task_action)s - %(category)s - %(data)s\n\n" + + @staticmethod + def _make_msg(now, playbook, task_name, task_action, category, data): + return f"{now} - {playbook} - {task_name} - {task_action} - {category} - {data}\n\n" def __init__(self): @@ -80,22 +83,12 @@ class CallbackModule(CallbackBase): invocation = data.pop('invocation', None) data = json.dumps(data, cls=AnsibleJSONEncoder) if invocation is not None: - data = json.dumps(invocation) + " => %s " % data + data = f"{json.dumps(invocation)} => {data} " path = os.path.join(self.log_folder, result._host.get_name()) now = time.strftime(self.TIME_FORMAT, time.localtime()) - msg = to_bytes( - self.MSG_FORMAT - % dict( - now=now, - playbook=self.playbook, - task_name=result._task.name, - task_action=result._task.action, - category=category, - data=data, - ) - ) + msg = to_bytes(self._make_msg(now, self.playbook, result._task.name, result._task.action, category, data)) with open(path, "ab") as fd: fd.write(msg) diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index 04fc646dc4..05996f2492 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -1,42 +1,44 @@ -# -*- coding: utf-8 -*- -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: loganalytics - type: aggregate - short_description: Posts task results to Azure Log Analytics - author: "Cyrus Li (@zhcli) " - description: - - This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace. - - Credits to authors of splunk callback plugin. - version_added: "2.4.0" - requirements: - - Whitelisting this callback plugin. - - An Azure log analytics work space has been established. - options: - workspace_id: - description: Workspace ID of the Azure log analytics workspace. - required: true - env: - - name: WORKSPACE_ID - ini: - - section: callback_loganalytics - key: workspace_id - shared_key: - description: Shared key to connect to Azure log analytics workspace. - required: true - env: - - name: WORKSPACE_SHARED_KEY - ini: - - section: callback_loganalytics - key: shared_key -''' +DOCUMENTATION = r""" +name: loganalytics +type: notification +short_description: Posts task results to Azure Log Analytics +author: "Cyrus Li (@zhcli) " +description: + - This callback plugin posts task results in JSON formatted to an Azure Log Analytics workspace. + - Credits to authors of splunk callback plugin. +version_added: "2.4.0" +requirements: + - Whitelisting this callback plugin. + - An Azure log analytics work space has been established. +options: + workspace_id: + description: Workspace ID of the Azure log analytics workspace. + type: str + required: true + env: + - name: WORKSPACE_ID + ini: + - section: callback_loganalytics + key: workspace_id + shared_key: + description: Shared key to connect to Azure log analytics workspace. + type: str + required: true + env: + - name: WORKSPACE_SHARED_KEY + ini: + - section: callback_loganalytics + key: shared_key +""" -EXAMPLES = ''' -examples: | +EXAMPLES = r""" +examples: |- Whitelist the plugin in ansible.cfg: [defaults] callback_whitelist = community.general.loganalytics @@ -47,30 +49,32 @@ examples: | [callback_loganalytics] workspace_id = 01234567-0123-0123-0123-01234567890a shared_key = dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA== -''' +""" import hashlib import hmac import base64 -import logging import json import uuid import socket import getpass -from datetime import datetime from os.path import basename +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class AzureLogAnalyticsSource(object): def __init__(self): self.ansible_check_mode = False self.ansible_playbook = "" - self.ansible_version = "" self.session = str(uuid.uuid4()) self.host = socket.gethostname() self.user = getpass.getuser() @@ -78,30 +82,25 @@ class AzureLogAnalyticsSource(object): def __build_signature(self, date, workspace_id, shared_key, content_length): # Build authorisation signature for Azure log analytics API call - sigs = "POST\n{0}\napplication/json\nx-ms-date:{1}\n/api/logs".format( - str(content_length), date) + sigs = f"POST\n{content_length}\napplication/json\nx-ms-date:{date}\n/api/logs" utf8_sigs = sigs.encode('utf-8') decoded_shared_key = base64.b64decode(shared_key) hmac_sha256_sigs = hmac.new( decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest() encoded_hash = base64.b64encode(hmac_sha256_sigs).decode('utf-8') - signature = "SharedKey {0}:{1}".format(workspace_id, encoded_hash) + signature = f"SharedKey {workspace_id}:{encoded_hash}" return signature def __build_workspace_url(self, workspace_id): - return "https://{0}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01".format(workspace_id) + return f"https://{workspace_id}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01" def __rfc1123date(self): - return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') + return now().strftime('%a, %d %b %Y %H:%M:%S GMT') def send_event(self, workspace_id, shared_key, state, result, runtime): if result._task_fields['args'].get('_ansible_check_mode') is True: self.ansible_check_mode = True - if result._task_fields['args'].get('_ansible_version'): - self.ansible_version = \ - result._task_fields['args'].get('_ansible_version') - if result._task._role: ansible_role = str(result._task._role) else: @@ -115,7 +114,7 @@ class AzureLogAnalyticsSource(object): data['host'] = self.host data['user'] = self.user data['runtime'] = runtime - data['ansible_version'] = self.ansible_version + data['ansible_version'] = ansible_version data['ansible_check_mode'] = self.ansible_check_mode data['ansible_host'] = result._host.name data['ansible_playbook'] = self.ansible_playbook @@ -153,7 +152,7 @@ class AzureLogAnalyticsSource(object): class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' + CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'loganalytics' CALLBACK_NEEDS_WHITELIST = True @@ -166,7 +165,7 @@ class CallbackModule(CallbackBase): def _seconds_since_start(self, result): return ( - datetime.utcnow() - + now() - self.start_datetimes[result._task._uuid] ).total_seconds() @@ -184,10 +183,10 @@ class CallbackModule(CallbackBase): self.loganalytics.ansible_playbook = basename(playbook._file_name) def v2_playbook_on_task_start(self, task, is_conditional): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_playbook_on_handler_task_start(self, task): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_runner_on_ok(self, result, **kwargs): self.loganalytics.send_event( diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py index 138b612de8..09d8b38dcb 100644 --- a/plugins/callback/logdna.py +++ b/plugins/callback/logdna.py @@ -1,60 +1,59 @@ -# -*- coding: utf-8 -*- -# (c) 2018, Samir Musali -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Samir Musali +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: logdna - type: aggregate - short_description: Sends playbook logs to LogDNA - description: - - This callback will report logs from playbook actions, tasks, and events to LogDNA (https://app.logdna.com) - requirements: - - LogDNA Python Library (https://github.com/logdna/python) - - whitelisting in configuration - options: - conf_key: - required: True - description: LogDNA Ingestion Key - type: string - env: - - name: LOGDNA_INGESTION_KEY - ini: - - section: callback_logdna - key: conf_key - plugin_ignore_errors: - required: False - description: Whether to ignore errors on failing or not - type: boolean - env: - - name: ANSIBLE_IGNORE_ERRORS - ini: - - section: callback_logdna - key: plugin_ignore_errors - default: False - conf_hostname: - required: False - description: Alternative Host Name; the current host name by default - type: string - env: - - name: LOGDNA_HOSTNAME - ini: - - section: callback_logdna - key: conf_hostname - conf_tags: - required: False - description: Tags - type: string - env: - - name: LOGDNA_TAGS - ini: - - section: callback_logdna - key: conf_tags - default: ansible -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: logdna +type: notification +short_description: Sends playbook logs to LogDNA +description: + - This callback reports logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)). +requirements: + - LogDNA Python Library (U(https://github.com/logdna/python)) + - whitelisting in configuration +options: + conf_key: + required: true + description: LogDNA Ingestion Key. + type: string + env: + - name: LOGDNA_INGESTION_KEY + ini: + - section: callback_logdna + key: conf_key + plugin_ignore_errors: + required: false + description: Whether to ignore errors on failing or not. + type: boolean + env: + - name: ANSIBLE_IGNORE_ERRORS + ini: + - section: callback_logdna + key: plugin_ignore_errors + default: false + conf_hostname: + required: false + description: Alternative Host Name; the current host name by default. + type: string + env: + - name: LOGDNA_HOSTNAME + ini: + - section: callback_logdna + key: conf_hostname + conf_tags: + required: false + description: Tags. + type: string + env: + - name: LOGDNA_TAGS + ini: + - section: callback_logdna + key: conf_tags + default: ansible +""" import logging import json @@ -72,7 +71,7 @@ except ImportError: # Getting MAC Address of system: def get_mac(): - mac = "%012x" % getnode() + mac = f"{getnode():012x}" return ":".join(map(lambda index: mac[index:index + 2], range(int(len(mac) / 2)))) @@ -110,7 +109,7 @@ def isJSONable(obj): class CallbackModule(CallbackBase): CALLBACK_VERSION = 0.1 - CALLBACK_TYPE = 'aggregate' + CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'community.general.logdna' CALLBACK_NEEDS_WHITELIST = True @@ -160,7 +159,7 @@ class CallbackModule(CallbackBase): if ninvalidKeys > 0: for key in invalidKeys: del meta[key] - meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(invalidKeys) + meta['__errors'] = f"These keys have been sanitized: {', '.join(invalidKeys)}" return meta def sanitizeJSON(self, data): diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py index ad71a6d448..8fbcef4dd6 100644 --- a/plugins/callback/logentries.py +++ b/plugins/callback/logentries.py @@ -1,79 +1,80 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Logentries.com, Jimmy Tang -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2015, Logentries.com, Jimmy Tang +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: logentries - type: notification - short_description: Sends events to Logentries +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: logentries +type: notification +short_description: Sends events to Logentries +description: + - This callback plugin generates JSON objects and send them to Logentries using TCP for auditing/debugging purposes. +requirements: + - whitelisting in configuration + - certifi (Python library) + - flatdict (Python library), if you want to use the O(flatten) option +options: + api: + description: URI to the Logentries API. + type: str + env: + - name: LOGENTRIES_API + default: data.logentries.com + ini: + - section: callback_logentries + key: api + port: + description: HTTP port to use when connecting to the API. + type: int + env: + - name: LOGENTRIES_PORT + default: 80 + ini: + - section: callback_logentries + key: port + tls_port: + description: Port to use when connecting to the API when TLS is enabled. + type: int + env: + - name: LOGENTRIES_TLS_PORT + default: 443 + ini: + - section: callback_logentries + key: tls_port + token: + description: The logentries C(TCP token). + type: str + env: + - name: LOGENTRIES_ANSIBLE_TOKEN + required: true + ini: + - section: callback_logentries + key: token + use_tls: description: - - This callback plugin will generate JSON objects and send them to Logentries via TCP for auditing/debugging purposes. - - Before 2.4, if you wanted to use an ini configuration, the file must be placed in the same directory as this plugin and named logentries.ini - - In 2.4 and above you can just put it in the main Ansible configuration file. - requirements: - - whitelisting in configuration - - certifi (python library) - - flatdict (python library), if you want to use the 'flatten' option - options: - api: - description: URI to the Logentries API - env: - - name: LOGENTRIES_API - default: data.logentries.com - ini: - - section: callback_logentries - key: api - port: - description: HTTP port to use when connecting to the API - env: - - name: LOGENTRIES_PORT - default: 80 - ini: - - section: callback_logentries - key: port - tls_port: - description: Port to use when connecting to the API when TLS is enabled - env: - - name: LOGENTRIES_TLS_PORT - default: 443 - ini: - - section: callback_logentries - key: tls_port - token: - description: The logentries "TCP token" - env: - - name: LOGENTRIES_ANSIBLE_TOKEN - required: True - ini: - - section: callback_logentries - key: token - use_tls: - description: - - Toggle to decide whether to use TLS to encrypt the communications with the API server - env: - - name: LOGENTRIES_USE_TLS - default: False - type: boolean - ini: - - section: callback_logentries - key: use_tls - flatten: - description: flatten complex data structures into a single dictionary with complex keys - type: boolean - default: False - env: - - name: LOGENTRIES_FLATTEN - ini: - - section: callback_logentries - key: flatten -''' + - Toggle to decide whether to use TLS to encrypt the communications with the API server. + env: + - name: LOGENTRIES_USE_TLS + default: false + type: boolean + ini: + - section: callback_logentries + key: use_tls + flatten: + description: Flatten complex data structures into a single dictionary with complex keys. + type: boolean + default: false + env: + - name: LOGENTRIES_FLATTEN + ini: + - section: callback_logentries + key: flatten +""" -EXAMPLES = ''' -examples: > +EXAMPLES = r""" +examples: >- To enable, add this to your ansible.cfg file in the defaults block [defaults] @@ -89,10 +90,10 @@ examples: > api = data.logentries.com port = 10000 tls_port = 20000 - use_tls = no + use_tls = true token = dd21fc88-f00a-43ff-b977-e3a4233c53af - flatten = False -''' + flatten = false +""" import os import socket @@ -130,7 +131,7 @@ class PlainTextSocketAppender(object): # Error message displayed when an incorrect Token has been detected self.INVALID_TOKEN = "\n\nIt appears the LOGENTRIES_TOKEN parameter you entered is incorrect!\n\n" # Unicode Line separator character \u2028 - self.LINE_SEP = u'\u2028' + self.LINE_SEP = '\u2028' self._display = display self._conn = None @@ -148,7 +149,7 @@ class PlainTextSocketAppender(object): self.open_connection() return except Exception as e: - self._display.vvvv(u"Unable to connect to Logentries: %s" % to_text(e)) + self._display.vvvv(f"Unable to connect to Logentries: {e}") root_delay *= 2 if root_delay > self.MAX_DELAY: @@ -157,7 +158,7 @@ class PlainTextSocketAppender(object): wait_for = root_delay + random.uniform(0, root_delay) try: - self._display.vvvv("sleeping %s before retry" % wait_for) + self._display.vvvv(f"sleeping {wait_for} before retry") time.sleep(wait_for) except KeyboardInterrupt: raise @@ -170,8 +171,8 @@ class PlainTextSocketAppender(object): # Replace newlines with Unicode line separator # for multi-line events data = to_text(data, errors='surrogate_or_strict') - multiline = data.replace(u'\n', self.LINE_SEP) - multiline += u"\n" + multiline = data.replace('\n', self.LINE_SEP) + multiline += "\n" # Send data, reconnect if needed while True: try: @@ -195,15 +196,11 @@ else: class TLSSocketAppender(PlainTextSocketAppender): def open_connection(self): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock = ssl.wrap_socket( + context = ssl.create_default_context( + purpose=ssl.Purpose.SERVER_AUTH, + cafile=certifi.where(), ) + sock = context.wrap_socket( sock=sock, - keyfile=None, - certfile=None, - server_side=False, - cert_reqs=ssl.CERT_REQUIRED, - ssl_version=getattr( - ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1), - ca_certs=certifi.where(), do_handshake_on_connect=True, suppress_ragged_eofs=True, ) sock.connect((self.LE_API, self.LE_TLS_PORT)) @@ -248,7 +245,7 @@ class CallbackModule(CallbackBase): self.use_tls = self.get_option('use_tls') self.flatten = self.get_option('flatten') except KeyError as e: - self._display.warning(u"Missing option for Logentries callback plugin: %s" % to_text(e)) + self._display.warning(f"Missing option for Logentries callback plugin: {e}") self.disabled = True try: @@ -267,10 +264,10 @@ class CallbackModule(CallbackBase): if not self.disabled: if self.use_tls: - self._display.vvvv("Connecting to %s:%s with TLS" % (self.api_url, self.api_tls_port)) + self._display.vvvv(f"Connecting to {self.api_url}:{self.api_tls_port} with TLS") self._appender = TLSSocketAppender(display=self._display, LE_API=self.api_url, LE_TLS_PORT=self.api_tls_port) else: - self._display.vvvv("Connecting to %s:%s" % (self.api_url, self.api_port)) + self._display.vvvv(f"Connecting to {self.api_url}:{self.api_port}") self._appender = PlainTextSocketAppender(display=self._display, LE_API=self.api_url, LE_PORT=self.api_port) self._appender.reopen_connection() @@ -283,7 +280,7 @@ class CallbackModule(CallbackBase): def emit(self, record): msg = record.rstrip('\n') - msg = "{0} {1}".format(self.token, msg) + msg = f"{self.token} {msg}" self._appender.put(msg) self._display.vvvv("Sent event to logentries") diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py index d34928ff34..f2279929f0 100644 --- a/plugins/callback/logstash.py +++ b/plugins/callback/logstash.py @@ -1,96 +1,98 @@ -# -*- coding: utf-8 -*- -# (C) 2020, Yevhen Khmelenko -# (C) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, Yevhen Khmelenko +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - author: Yevhen Khmelenko (@ujenmr) - name: logstash - type: notification - short_description: Sends events to Logstash - description: - - This callback will report facts and task events to Logstash https://www.elastic.co/products/logstash - requirements: - - whitelisting in configuration - - logstash (python library) - options: - server: - description: Address of the Logstash server - env: - - name: LOGSTASH_SERVER - ini: - - section: callback_logstash - key: server - version_added: 1.0.0 - default: localhost - port: - description: Port on which logstash is listening - env: - - name: LOGSTASH_PORT - ini: - - section: callback_logstash - key: port - version_added: 1.0.0 - default: 5000 - type: - description: Message type - env: - - name: LOGSTASH_TYPE - ini: - - section: callback_logstash - key: type - version_added: 1.0.0 - default: ansible - pre_command: - description: Executes command before run and result put to ansible_pre_command_output field. - version_added: 2.0.0 - ini: - - section: callback_logstash - key: pre_command - env: - - name: LOGSTASH_PRE_COMMAND - format_version: - description: Logging format - type: str - version_added: 2.0.0 - ini: - - section: callback_logstash - key: format_version - env: - - name: LOGSTASH_FORMAT_VERSION - default: v1 - choices: - - v1 - - v2 +DOCUMENTATION = r""" +author: Yevhen Khmelenko (@ujenmr) +name: logstash +type: notification +short_description: Sends events to Logstash +description: + - This callback reports facts and task events to Logstash U(https://www.elastic.co/products/logstash). +requirements: + - whitelisting in configuration + - logstash (Python library) +options: + server: + description: Address of the Logstash server. + type: str + env: + - name: LOGSTASH_SERVER + ini: + - section: callback_logstash + key: server + version_added: 1.0.0 + default: localhost + port: + description: Port on which logstash is listening. + type: int + env: + - name: LOGSTASH_PORT + ini: + - section: callback_logstash + key: port + version_added: 1.0.0 + default: 5000 + type: + description: Message type. + type: str + env: + - name: LOGSTASH_TYPE + ini: + - section: callback_logstash + key: type + version_added: 1.0.0 + default: ansible + pre_command: + description: Executes command before run and its result is added to the C(ansible_pre_command_output) logstash field. + type: str + version_added: 2.0.0 + ini: + - section: callback_logstash + key: pre_command + env: + - name: LOGSTASH_PRE_COMMAND + format_version: + description: Logging format. + type: str + version_added: 2.0.0 + ini: + - section: callback_logstash + key: format_version + env: + - name: LOGSTASH_FORMAT_VERSION + default: v1 + choices: + - v1 + - v2 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" ansible.cfg: | - # Enable Callback plugin - [defaults] - callback_whitelist = community.general.logstash + # Enable Callback plugin + [defaults] + callback_whitelist = community.general.logstash - [callback_logstash] - server = logstash.example.com - port = 5000 - pre_command = git rev-parse HEAD - type = ansible + [callback_logstash] + server = logstash.example.com + port = 5000 + pre_command = git rev-parse HEAD + type = ansible -11-input-tcp.conf: | - # Enable Logstash TCP Input - input { - tcp { - port => 5000 - codec => json - add_field => { "[@metadata][beat]" => "notify" } - add_field => { "[@metadata][type]" => "ansible" } - } - } -''' +11-input-tcp.conf: |- + # Enable Logstash TCP Input + input { + tcp { + port => 5000 + codec => json + add_field => { "[@metadata][beat]" => "notify" } + add_field => { "[@metadata][type]" => "ansible" } + } + } +""" import os import json @@ -98,7 +100,6 @@ from ansible import context import socket import uuid import logging -from datetime import datetime try: import logstash @@ -108,11 +109,15 @@ except ImportError: from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' + CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'community.general.logstash' CALLBACK_NEEDS_WHITELIST = True @@ -121,11 +126,9 @@ class CallbackModule(CallbackBase): if not HAS_LOGSTASH: self.disabled = True - self._display.warning("The required python-logstash/python3-logstash is not installed. " - "pip install python-logstash for Python 2" - "pip install python3-logstash for Python 3") + self._display.warning("The required python3-logstash is not installed.") - self.start_time = datetime.utcnow() + self.start_time = now() def _init_plugin(self): if not self.disabled: @@ -176,7 +179,7 @@ class CallbackModule(CallbackBase): data['status'] = "OK" data['ansible_playbook'] = playbook._file_name - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "START PLAYBOOK | %s", data['ansible_playbook'], extra=data ) @@ -184,7 +187,7 @@ class CallbackModule(CallbackBase): self.logger.info("ansible start", extra=data) def v2_playbook_on_stats(self, stats): - end_time = datetime.utcnow() + end_time = now() runtime = end_time - self.start_time summarize_stat = {} for host in stats.processed.keys(): @@ -201,7 +204,7 @@ class CallbackModule(CallbackBase): data['ansible_playbook_duration'] = runtime.total_seconds() data['ansible_result'] = json.dumps(summarize_stat) # deprecated field - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data ) @@ -220,7 +223,7 @@ class CallbackModule(CallbackBase): data['ansible_play_id'] = self.play_id data['ansible_play_name'] = self.play_name - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("START PLAY | %s", self.play_name, extra=data) else: self.logger.info("ansible play", extra=data) @@ -245,7 +248,7 @@ class CallbackModule(CallbackBase): data['ansible_task'] = task_name data['ansible_facts'] = self._dump_results(result._result) - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "SETUP FACTS | %s", self._dump_results(result._result), extra=data ) @@ -266,7 +269,7 @@ class CallbackModule(CallbackBase): data['ansible_task_id'] = self.task_id data['ansible_result'] = self._dump_results(result._result) - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "TASK OK | %s | RESULT | %s", task_name, self._dump_results(result._result), extra=data @@ -287,7 +290,7 @@ class CallbackModule(CallbackBase): data['ansible_task_id'] = self.task_id data['ansible_result'] = self._dump_results(result._result) - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("TASK SKIPPED | %s", task_name, extra=data) else: self.logger.info("ansible skipped", extra=data) @@ -301,7 +304,7 @@ class CallbackModule(CallbackBase): data['ansible_play_name'] = self.play_name data['imported_file'] = imported_file - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("IMPORT | %s", imported_file, extra=data) else: self.logger.info("ansible import", extra=data) @@ -315,7 +318,7 @@ class CallbackModule(CallbackBase): data['ansible_play_name'] = self.play_name data['imported_file'] = missing_file - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("NOT IMPORTED | %s", missing_file, extra=data) else: self.logger.info("ansible import", extra=data) @@ -339,7 +342,7 @@ class CallbackModule(CallbackBase): data['ansible_result'] = self._dump_results(result._result) self.errors += 1 - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.error( "TASK FAILED | %s | HOST | %s | RESULT | %s", task_name, self.hostname, @@ -362,7 +365,7 @@ class CallbackModule(CallbackBase): data['ansible_result'] = self._dump_results(result._result) self.errors += 1 - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.error( "UNREACHABLE | %s | HOST | %s | RESULT | %s", task_name, self.hostname, @@ -385,7 +388,7 @@ class CallbackModule(CallbackBase): data['ansible_result'] = self._dump_results(result._result) self.errors += 1 - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.error( "ASYNC FAILED | %s | HOST | %s | RESULT | %s", task_name, self.hostname, diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py index e48e2de98e..7afb08e3f0 100644 --- a/plugins/callback/mail.py +++ b/plugins/callback/mail.py @@ -1,67 +1,91 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2012, Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2012, Dag Wieers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: mail type: notification -short_description: Sends failure events via email +short_description: Sends failure events through email description: -- This callback will report failures via email + - This callback reports failures through email. author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) requirements: -- whitelisting in configuration + - whitelisting in configuration options: mta: - description: Mail Transfer Agent, server that accepts SMTP + description: + - Mail Transfer Agent, server that accepts SMTP. + type: str env: - - name: SMTPHOST + - name: SMTPHOST ini: - - section: callback_mail - key: smtphost + - section: callback_mail + key: smtphost default: localhost mtaport: - description: Mail Transfer Agent Port, port at which server SMTP + description: + - Mail Transfer Agent Port. + - Port at which server SMTP. + type: int ini: - - section: callback_mail - key: smtpport + - section: callback_mail + key: smtpport default: 25 to: - description: Mail recipient + description: + - Mail recipient. + type: list + elements: str ini: - - section: callback_mail - key: to - default: root + - section: callback_mail + key: to + default: [root] sender: - description: Mail sender + description: + - Mail sender. + - This is required since community.general 6.0.0. + type: str + required: true ini: - - section: callback_mail - key: sender + - section: callback_mail + key: sender cc: - description: CC'd recipient + description: + - CC'd recipients. + type: list + elements: str ini: - - section: callback_mail - key: cc + - section: callback_mail + key: cc bcc: - description: BCC'd recipient + description: + - BCC'd recipients. + type: list + elements: str ini: - - section: callback_mail - key: bcc -notes: -- "TODO: expand configuration options now that plugins can leverage Ansible's configuration" -''' + - section: callback_mail + key: bcc + message_id_domain: + description: + - The domain name to use for the L(Message-ID header, https://en.wikipedia.org/wiki/Message-ID). + - The default is the hostname of the control node. + type: str + ini: + - section: callback_mail + key: message_id_domain + version_added: 8.2.0 +""" import json import os import re +import email.utils import smtplib -from ansible.module_utils.six import string_types from ansible.module_utils.common.text.converters import to_bytes from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase @@ -90,7 +114,7 @@ class CallbackModule(CallbackBase): self.sender = self.get_option('sender') self.to = self.get_option('to') self.smtphost = self.get_option('mta') - self.smtpport = int(self.get_option('mtaport')) + self.smtpport = self.get_option('mtaport') self.cc = self.get_option('cc') self.bcc = self.get_option('bcc') @@ -100,49 +124,54 @@ class CallbackModule(CallbackBase): smtp = smtplib.SMTP(self.smtphost, port=self.smtpport) - b_sender = to_bytes(self.sender) - b_to = to_bytes(self.to) - b_cc = to_bytes(self.cc) - b_bcc = to_bytes(self.bcc) - b_subject = to_bytes(subject) - b_body = to_bytes(body) - - b_content = b'From: %s\n' % b_sender - b_content += b'To: %s\n' % b_to + sender_address = email.utils.parseaddr(self.sender) + if self.to: + to_addresses = email.utils.getaddresses(self.to) if self.cc: - b_content += b'Cc: %s\n' % b_cc - b_content += b'Subject: %s\n\n' % b_subject - b_content += b_body - - b_addresses = b_to.split(b',') - if self.cc: - b_addresses += b_cc.split(b',') + cc_addresses = email.utils.getaddresses(self.cc) if self.bcc: - b_addresses += b_bcc.split(b',') + bcc_addresses = email.utils.getaddresses(self.bcc) - for b_address in b_addresses: - smtp.sendmail(b_sender, b_address, b_content) + content = f'Date: {email.utils.formatdate()}\n' + content += f'From: {email.utils.formataddr(sender_address)}\n' + if self.to: + content += f"To: {', '.join([email.utils.formataddr(pair) for pair in to_addresses])}\n" + if self.cc: + content += f"Cc: {', '.join([email.utils.formataddr(pair) for pair in cc_addresses])}\n" + content += f"Message-ID: {email.utils.make_msgid(domain=self.get_option('message_id_domain'))}\n" + content += f'Subject: {subject.strip()}\n\n' + content += body + + addresses = to_addresses + if self.cc: + addresses += cc_addresses + if self.bcc: + addresses += bcc_addresses + + if not addresses: + self._display.warning('No receiver has been specified for the mail callback plugin.') + + smtp.sendmail(self.sender, [address for name, address in addresses], to_bytes(content)) smtp.quit() def subject_msg(self, multiline, failtype, linenr): - return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr]) + msg = multiline.strip('\r\n').splitlines()[linenr] + return f'{failtype}: {msg}' def indent(self, multiline, indent=8): return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE) def body_blob(self, multiline, texttype): ''' Turn some text output in a well-indented block for sending in a mail body ''' - intro = 'with the following %s:\n\n' % texttype - blob = '' - for line in multiline.strip('\r\n').splitlines(): - blob += '%s\n' % line - return intro + self.indent(blob) + '\n' + intro = f'with the following {texttype}:\n\n' + blob = "\n".join(multiline.strip('\r\n').splitlines()) + return f"{intro}{self.indent(blob)}\n" def mail_result(self, result, failtype): host = result._host.get_name() if not self.sender: - self.sender = '"Ansible: %s" ' % host + self.sender = f'"Ansible: {host}" ' # Add subject if self.itembody: @@ -158,31 +187,33 @@ class CallbackModule(CallbackBase): elif result._result.get('exception'): # Unrelated exceptions are added to output :-/ subject = self.subject_msg(result._result['exception'], failtype, -1) else: - subject = '%s: %s' % (failtype, result._task.name or result._task.action) + subject = f'{failtype}: {result._task.name or result._task.action}' # Make playbook name visible (e.g. in Outlook/Gmail condensed view) - body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name) + body = f'Playbook: {os.path.basename(self.playbook._file_name)}\n' if result._task.name: - body += 'Task: %s\n' % result._task.name - body += 'Module: %s\n' % result._task.action - body += 'Host: %s\n' % host + body += f'Task: {result._task.name}\n' + body += f'Module: {result._task.action}\n' + body += f'Host: {host}\n' body += '\n' # Add task information (as much as possible) body += 'The following task failed:\n\n' if 'invocation' in result._result: - body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4))) + body += self.indent(f"{result._task.action}: {json.dumps(result._result['invocation']['module_args'], indent=4)}\n") elif result._task.name: - body += self.indent('%s (%s)\n' % (result._task.name, result._task.action)) + body += self.indent(f'{result._task.name} ({result._task.action})\n') else: - body += self.indent('%s\n' % result._task.action) + body += self.indent(f'{result._task.action}\n') body += '\n' # Add item / message if self.itembody: body += self.itembody elif result._result.get('failed_when_result') is True: - body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n' + fail_cond_list = '\n- '.join(result._task.failed_when) + fail_cond = self.indent(f"failed_when:\n- {fail_cond_list}") + body += f"due to the following condition:\n\n{fail_cond}\n\n" elif result._result.get('msg'): body += self.body_blob(result._result['msg'], 'message') @@ -195,13 +226,13 @@ class CallbackModule(CallbackBase): body += self.body_blob(result._result['exception'], 'exception') if result._result.get('warnings'): for i in range(len(result._result.get('warnings'))): - body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1)) + body += self.body_blob(result._result['warnings'][i], f'exception {i + 1}') if result._result.get('deprecations'): for i in range(len(result._result.get('deprecations'))): - body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1)) + body += self.body_blob(result._result['deprecations'][i], f'exception {i + 1}') body += 'and a complete dump of the error:\n\n' - body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4))) + body += self.indent(f'{failtype}: {json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)}') self.mail(subject=subject, body=body) @@ -224,4 +255,4 @@ class CallbackModule(CallbackBase): def v2_runner_item_on_failed(self, result): # Pass item information to task failure self.itemsubject = result._result['msg'] - self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), "failed item dump '%(item)s'" % result._result) + self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), f"failed item dump '{result._result['item']}'") diff --git a/plugins/callback/nrdp.py b/plugins/callback/nrdp.py index 744c2d2ed4..6f1b5e2f5b 100644 --- a/plugins/callback/nrdp.py +++ b/plugins/callback/nrdp.py @@ -1,75 +1,73 @@ -# -*- coding: utf-8 -*- -# (c) 2018 Remi Verchere -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018 Remi Verchere +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: nrdp - type: notification - author: "Remi VERCHERE (@rverchere)" - short_description: Post task results to a Nagios server through nrdp - description: - - This callback send playbook result to Nagios. - - Nagios shall use NRDP to recive passive events. - - The passive check is sent to a dedicated host/service for Ansible. - options: - url: - description: URL of the nrdp server. - required: true - env: - - name : NRDP_URL - ini: - - section: callback_nrdp - key: url - type: string - validate_certs: - description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs.) - env: - - name: NRDP_VALIDATE_CERTS - ini: - - section: callback_nrdp - key: validate_nrdp_certs - - section: callback_nrdp - key: validate_certs - type: boolean - default: false - aliases: [ validate_nrdp_certs ] - token: - description: Token to be allowed to push nrdp events. - required: true - env: - - name: NRDP_TOKEN - ini: - - section: callback_nrdp - key: token - type: string - hostname: - description: Hostname where the passive check is linked to. - required: true - env: - - name : NRDP_HOSTNAME - ini: - - section: callback_nrdp - key: hostname - type: string - servicename: - description: Service where the passive check is linked to. - required: true - env: - - name : NRDP_SERVICENAME - ini: - - section: callback_nrdp - key: servicename - type: string -''' +DOCUMENTATION = r""" +name: nrdp +type: notification +author: "Remi VERCHERE (@rverchere)" +short_description: Post task results to a Nagios server through nrdp +description: + - This callback send playbook result to Nagios. + - Nagios shall use NRDP to receive passive events. + - The passive check is sent to a dedicated host/service for Ansible. +options: + url: + description: URL of the nrdp server. + required: true + env: + - name: NRDP_URL + ini: + - section: callback_nrdp + key: url + type: string + validate_certs: + description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs). + env: + - name: NRDP_VALIDATE_CERTS + ini: + - section: callback_nrdp + key: validate_nrdp_certs + - section: callback_nrdp + key: validate_certs + type: boolean + default: false + aliases: [validate_nrdp_certs] + token: + description: Token to be allowed to push nrdp events. + required: true + env: + - name: NRDP_TOKEN + ini: + - section: callback_nrdp + key: token + type: string + hostname: + description: Hostname where the passive check is linked to. + required: true + env: + - name: NRDP_HOSTNAME + ini: + - section: callback_nrdp + key: hostname + type: string + servicename: + description: Service where the passive check is linked to. + required: true + env: + - name: NRDP_SERVICENAME + ini: + - section: callback_nrdp + key: servicename + type: string +""" -import os -import json +from urllib.parse import urlencode -from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.urls import open_url from ansible.plugins.callback import CallbackBase @@ -133,17 +131,17 @@ class CallbackModule(CallbackBase): xmldata = "\n" xmldata += "\n" xmldata += "\n" - xmldata += "%s\n" % self.hostname - xmldata += "%s\n" % self.servicename - xmldata += "%d\n" % state - xmldata += "%s\n" % msg + xmldata += f"{self.hostname}\n" + xmldata += f"{self.servicename}\n" + xmldata += f"{state}\n" + xmldata += f"{msg}\n" xmldata += "\n" xmldata += "\n" body = { 'cmd': 'submitcheck', 'token': self.token, - 'XMLDATA': bytes(xmldata) + 'XMLDATA': to_bytes(xmldata) } try: @@ -153,7 +151,7 @@ class CallbackModule(CallbackBase): validate_certs=self.validate_nrdp_certs) return response.read() except Exception as ex: - self._display.warning("NRDP callback cannot send result {0}".format(ex)) + self._display.warning(f"NRDP callback cannot send result {ex}") def v2_playbook_on_play_start(self, play): ''' @@ -171,17 +169,16 @@ class CallbackModule(CallbackBase): critical = warning = 0 for host in hosts: stat = stats.summarize(host) - gstats += "'%s_ok'=%d '%s_changed'=%d \ - '%s_unreachable'=%d '%s_failed'=%d " % \ - (host, stat['ok'], host, stat['changed'], - host, stat['unreachable'], host, stat['failures']) + gstats += ( + f"'{host}_ok'={stat['ok']} '{host}_changed'={stat['changed']} '{host}_unreachable'={stat['unreachable']} '{host}_failed'={stat['failures']} " + ) # Critical when failed tasks or unreachable host critical += stat['failures'] critical += stat['unreachable'] # Warning when changed tasks warning += stat['changed'] - msg = "%s | %s" % (name, gstats) + msg = f"{name} | {gstats}" if critical: # Send Critical self._send_nrdp(self.CRITICAL, msg) diff --git a/plugins/callback/null.py b/plugins/callback/null.py index 13ea65b438..3074a698d0 100644 --- a/plugins/callback/null.py +++ b/plugins/callback/null.py @@ -1,21 +1,20 @@ -# -*- coding: utf-8 -*- -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: 'null' - type: stdout - requirements: - - set as main display callback - short_description: Don't display stuff to screen - description: - - This callback prevents outputing events to screen -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: 'null' +type: stdout +requirements: + - set as main display callback +short_description: Do not display stuff to screen +description: + - This callback prevents outputting events to screen. +""" from ansible.plugins.callback import CallbackBase @@ -23,7 +22,7 @@ from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): ''' - This callback wont print messages to stdout when new callback events are received. + This callback won't print messages to stdout when new callback events are received. ''' CALLBACK_VERSION = 2.0 diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index c1856d9c01..ca6ec2b916 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -1,99 +1,166 @@ -# (C) 2021, Victor Martinez -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Victor Martinez +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Victor Martinez (@v1v) - name: opentelemetry - type: notification - short_description: Create distributed traces with OpenTelemetry - version_added: 3.7.0 +DOCUMENTATION = r""" +author: Victor Martinez (@v1v) +name: opentelemetry +type: notification +short_description: Create distributed traces with OpenTelemetry +version_added: 3.7.0 +description: + - This callback creates distributed traces for each Ansible task with OpenTelemetry. + - You can configure the OpenTelemetry exporter and SDK with environment variables. + - See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html). + - See + U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables). +options: + hide_task_arguments: + default: false + type: bool description: - - This callback creates distributed traces for each Ansible task with OpenTelemetry. - - You can configure the OpenTelemetry exporter and SDK with environment variables. - - See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html). - - See U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables). - options: - hide_task_arguments: - default: false - type: bool - description: - - Hide the arguments for a task. - env: - - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS - enable_from_environment: - type: str - description: - - Whether to enable this callback only if the given environment variable exists and it is set to C(true). - - This is handy when you use Configuration as Code and want to send distributed traces - if running in the CI rather when running Ansible locally. - - For such, it evaluates the given I(enable_from_environment) value as environment variable - and if set to true this plugin will be enabled. - env: - - name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT - version_added: 3.8.0 - otel_service_name: - default: ansible - type: str - description: - - The service name resource attribute. - env: - - name: OTEL_SERVICE_NAME - traceparent: - default: None - type: str - description: - - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). - env: - - name: TRACEPARENT - requirements: - - opentelemetry-api (Python library) - - opentelemetry-exporter-otlp (Python library) - - opentelemetry-sdk (Python library) -''' + - Hide the arguments for a task. + env: + - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS + ini: + - section: callback_opentelemetry + key: hide_task_arguments + version_added: 5.3.0 + enable_from_environment: + type: str + description: + - Whether to enable this callback only if the given environment variable exists and it is set to V(true). + - This is handy when you use Configuration as Code and want to send distributed traces if running in the CI rather when + running Ansible locally. + - For such, it evaluates the given O(enable_from_environment) value as environment variable and if set to V(true) this + plugin is enabled. + env: + - name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT + ini: + - section: callback_opentelemetry + key: enable_from_environment + version_added: 5.3.0 + version_added: 3.8.0 + otel_service_name: + default: ansible + type: str + description: + - The service name resource attribute. + env: + - name: OTEL_SERVICE_NAME + ini: + - section: callback_opentelemetry + key: otel_service_name + version_added: 5.3.0 + traceparent: + default: None + type: str + description: + - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). + env: + - name: TRACEPARENT + disable_logs: + default: false + type: bool + description: + - Disable sending logs. + env: + - name: ANSIBLE_OPENTELEMETRY_DISABLE_LOGS + ini: + - section: callback_opentelemetry + key: disable_logs + version_added: 5.8.0 + disable_attributes_in_logs: + default: false + type: bool + description: + - Disable populating span attributes to the logs. + env: + - name: ANSIBLE_OPENTELEMETRY_DISABLE_ATTRIBUTES_IN_LOGS + ini: + - section: callback_opentelemetry + key: disable_attributes_in_logs + version_added: 7.1.0 + store_spans_in_file: + type: str + description: + - It stores the exported spans in the given file. + env: + - name: ANSIBLE_OPENTELEMETRY_STORE_SPANS_IN_FILE + ini: + - section: callback_opentelemetry + key: store_spans_in_file + version_added: 9.0.0 + otel_exporter_otlp_traces_protocol: + type: str + description: + - E(OTEL_EXPORTER_OTLP_TRACES_PROTOCOL) represents the transport protocol for spans. + - See + U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#envvar-OTEL_EXPORTER_OTLP_TRACES_PROTOCOL). + default: grpc + choices: + - grpc + - http/protobuf + env: + - name: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL + ini: + - section: callback_opentelemetry + key: otel_exporter_otlp_traces_protocol + version_added: 9.0.0 +requirements: + - opentelemetry-api (Python library) + - opentelemetry-exporter-otlp (Python library) + - opentelemetry-sdk (Python library) +""" -EXAMPLES = ''' -examples: | +EXAMPLES = r""" +examples: |- Enable the plugin in ansible.cfg: [defaults] callbacks_enabled = community.general.opentelemetry + [callback_opentelemetry] + enable_from_environment = ANSIBLE_OPENTELEMETRY_ENABLED Set the environment variable: export OTEL_EXPORTER_OTLP_ENDPOINT= export OTEL_EXPORTER_OTLP_HEADERS="authorization=Bearer your_otel_token" export OTEL_SERVICE_NAME=your_service_name -''' + export ANSIBLE_OPENTELEMETRY_ENABLED=true +""" import getpass +import json import os import socket -import sys -import time import uuid - from collections import OrderedDict from os.path import basename +from time import time_ns +from urllib.parse import urlparse from ansible.errors import AnsibleError -from ansible.module_utils.six import raise_from -from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.plugins.callback import CallbackBase try: from opentelemetry import trace from opentelemetry.trace import SpanKind - from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter + from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter as GRPCOTLPSpanExporter + from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter as HTTPOTLPSpanExporter from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.trace.status import Status, StatusCode from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( - BatchSpanProcessor + BatchSpanProcessor, + SimpleSpanProcessor + ) + from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter ) - from opentelemetry.util._time import _time_ns except ImportError as imp_exc: OTEL_LIBRARY_IMPORT_ERROR = imp_exc else: @@ -111,18 +178,16 @@ class TaskData: self.path = path self.play = play self.host_data = OrderedDict() - if sys.version_info >= (3, 7): - self.start = time.time_ns() - else: - self.start = _time_ns() + self.start = time_ns() self.action = action self.args = args + self.dump = None def add_host(self, host): if host.uuid in self.host_data: if host.status == 'included': # concatenate task include output from multiple items - host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result) + host.result = f'{self.host_data[host.uuid].result}\n{host.result}' else: return @@ -139,16 +204,12 @@ class HostData: self.name = name self.status = status self.result = result - if sys.version_info >= (3, 7): - self.finish = time.time_ns() - else: - self.finish = _time_ns() + self.finish = time_ns() class OpenTelemetrySource(object): def __init__(self, display): self.ansible_playbook = "" - self.ansible_version = None self.session = str(uuid.uuid4()) self.host = socket.gethostname() try: @@ -182,7 +243,7 @@ class OpenTelemetrySource(object): tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args) - def finish_task(self, tasks_data, status, result): + def finish_task(self, tasks_data, status, result, dump): """ record the results of a task for a single host """ task_uuid = result._task._uuid @@ -196,12 +257,19 @@ class OpenTelemetrySource(object): task = tasks_data[task_uuid] - if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'): - self.ansible_version = result._task_fields['args'].get('_ansible_version') - + task.dump = dump task.add_host(HostData(host_uuid, host_name, status, result)) - def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent): + def generate_distributed_traces(self, + otel_service_name, + ansible_playbook, + tasks_data, + status, + traceparent, + disable_logs, + disable_attributes_in_logs, + otel_exporter_otlp_traces_protocol, + store_spans_in_file): """ generate distributed traces from the collected TaskData and HostData """ tasks = [] @@ -217,7 +285,16 @@ class OpenTelemetrySource(object): ) ) - processor = BatchSpanProcessor(OTLPSpanExporter()) + otel_exporter = None + if store_spans_in_file: + otel_exporter = InMemorySpanExporter() + processor = SimpleSpanProcessor(otel_exporter) + else: + if otel_exporter_otlp_traces_protocol == 'grpc': + otel_exporter = GRPCOTLPSpanExporter() + else: + otel_exporter = HTTPOTLPSpanExporter() + processor = BatchSpanProcessor(otel_exporter) trace.get_tracer_provider().add_span_processor(processor) @@ -227,8 +304,7 @@ class OpenTelemetrySource(object): start_time=parent_start_time, kind=SpanKind.SERVER) as parent: parent.set_status(status) # Populate trace metadata attributes - if self.ansible_version is not None: - parent.set_attribute("ansible.version", self.ansible_version) + parent.set_attribute("ansible.version", ansible_version) parent.set_attribute("ansible.session", self.session) parent.set_attribute("ansible.host.name", self.host) if self.ip_address is not None: @@ -237,12 +313,14 @@ class OpenTelemetrySource(object): for task in tasks: for host_uuid, host_data in task.host_data.items(): with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span: - self.update_span_data(task, host_data, span) + self.update_span_data(task, host_data, span, disable_logs, disable_attributes_in_logs) - def update_span_data(self, task_data, host_data, span): + return otel_exporter + + def update_span_data(self, task_data, host_data, span, disable_logs, disable_attributes_in_logs): """ update the span with the given TaskData and HostData """ - name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name) + name = f'[{host_data.name}] {task_data.play}: {task_data.name}' message = 'success' res = {} @@ -250,6 +328,7 @@ class OpenTelemetrySource(object): status = Status(status_code=StatusCode.OK) if host_data.status != 'included': # Support loops + enriched_error_message = None if 'results' in host_data.result._result: if host_data.status == 'failed': message = self.get_error_message_from_results(host_data.result._result['results'], task_data.action) @@ -257,8 +336,9 @@ class OpenTelemetrySource(object): else: res = host_data.result._result rc = res.get('rc', 0) - message = self.get_error_message(res) - enriched_error_message = self.enrich_error_message(res) + if host_data.status == 'failed': + message = self.get_error_message(res) + enriched_error_message = self.enrich_error_message(res) if host_data.status == 'failed': status = Status(status_code=StatusCode.ERROR, description=message) @@ -267,38 +347,52 @@ class OpenTelemetrySource(object): elif host_data.status == 'skipped': message = res['skip_reason'] if 'skip_reason' in res else 'skipped' status = Status(status_code=StatusCode.UNSET) + elif host_data.status == 'ignored': + status = Status(status_code=StatusCode.UNSET) span.set_status(status) + + # Create the span and log attributes + attributes = { + "ansible.task.module": task_data.action, + "ansible.task.message": message, + "ansible.task.name": name, + "ansible.task.result": rc, + "ansible.task.host.name": host_data.name, + "ansible.task.host.status": host_data.status + } if isinstance(task_data.args, dict) and "gather_facts" not in task_data.action: names = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.keys()) values = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.values()) - self.set_span_attribute(span, ("ansible.task.args.name"), names) - self.set_span_attribute(span, ("ansible.task.args.value"), values) - self.set_span_attribute(span, "ansible.task.module", task_data.action) - self.set_span_attribute(span, "ansible.task.message", message) - self.set_span_attribute(span, "ansible.task.name", name) - self.set_span_attribute(span, "ansible.task.result", rc) - self.set_span_attribute(span, "ansible.task.host.name", host_data.name) - self.set_span_attribute(span, "ansible.task.host.status", host_data.status) + attributes[("ansible.task.args.name")] = names + attributes[("ansible.task.args.value")] = values + + self.set_span_attributes(span, attributes) + # This will allow to enrich the service map self.add_attributes_for_service_map_if_possible(span, task_data) + # Send logs + if not disable_logs: + # This will avoid populating span attributes to the logs + span.add_event(task_data.dump, attributes={} if disable_attributes_in_logs else attributes) + # Close span always span.end(end_time=host_data.finish) - def set_span_attribute(self, span, attributeName, attributeValue): - """ update the span attribute with the given attribute and value if not None """ + def set_span_attributes(self, span, attributes): + """ update the span attributes with the given attributes if not None """ if span is None and self._display is not None: self._display.warning('span object is None. Please double check if that is expected.') else: - if attributeValue is not None: - span.set_attribute(attributeName, attributeValue) + if attributes is not None: + span.set_attributes(attributes) def add_attributes_for_service_map_if_possible(self, span, task_data): """Update the span attributes with the service that the task interacted with, if possible.""" redacted_url = self.parse_and_redact_url_if_possible(task_data.args) if redacted_url: - self.set_span_attribute(span, "http.url", redacted_url.geturl()) + span.set_attribute("http.url", redacted_url.geturl()) @staticmethod def parse_and_redact_url_if_possible(args): @@ -316,9 +410,9 @@ class OpenTelemetrySource(object): @staticmethod def url_from_args(args): # the order matters - url_args = ("url", "api_url", "baseurl", "repo", "server_url", "chart_repo_url") + url_args = ("url", "api_url", "baseurl", "repo", "server_url", "chart_repo_url", "registry_url", "endpoint", "uri", "updates_url") for arg in url_args: - if args.get(arg): + if args is not None and args.get(arg): return args.get(arg) return "" @@ -349,7 +443,7 @@ class OpenTelemetrySource(object): def get_error_message_from_results(results, action): for result in results: if result.get('failed', False): - return ('{0}({1}) - {2}').format(action, result.get('item', 'none'), OpenTelemetrySource.get_error_message(result)) + return f"{action}({result.get('item', 'none')}) - {OpenTelemetrySource.get_error_message(result)}" @staticmethod def _last_line(text): @@ -361,14 +455,14 @@ class OpenTelemetrySource(object): message = result.get('msg', 'failed') exception = result.get('exception') stderr = result.get('stderr') - return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr) + return f"message: \"{message}\"\nexception: \"{exception}\"\nstderr: \"{stderr}\"" @staticmethod def enrich_error_message_from_results(results, action): message = "" for result in results: if result.get('failed', False): - message = ('{0}({1}) - {2}\n{3}').format(action, result.get('item', 'none'), OpenTelemetrySource.enrich_error_message(result), message) + message = f"{action}({result.get('item', 'none')}) - {OpenTelemetrySource.enrich_error_message(result)}\n{message}" return message @@ -385,6 +479,8 @@ class CallbackModule(CallbackBase): def __init__(self, display=None): super(CallbackModule, self).__init__(display=display) self.hide_task_arguments = None + self.disable_attributes_in_logs = None + self.disable_logs = None self.otel_service_name = None self.ansible_playbook = None self.play_name = None @@ -392,11 +488,13 @@ class CallbackModule(CallbackBase): self.errors = 0 self.disabled = False self.traceparent = False + self.store_spans_in_file = False + self.otel_exporter_otlp_traces_protocol = None if OTEL_LIBRARY_IMPORT_ERROR: - raise_from( - AnsibleError('The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin'), - OTEL_LIBRARY_IMPORT_ERROR) + raise AnsibleError( + 'The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin' + ) from OTEL_LIBRARY_IMPORT_ERROR self.tasks_data = OrderedDict() @@ -410,11 +508,18 @@ class CallbackModule(CallbackBase): environment_variable = self.get_option('enable_from_environment') if environment_variable is not None and os.environ.get(environment_variable, 'false').lower() != 'true': self.disabled = True - self._display.warning("The `enable_from_environment` option has been set and {0} is not enabled. " - "Disabling the `opentelemetry` callback plugin.".format(environment_variable)) + self._display.warning( + f"The `enable_from_environment` option has been set and {environment_variable} is not enabled. Disabling the `opentelemetry` callback plugin." + ) self.hide_task_arguments = self.get_option('hide_task_arguments') + self.disable_attributes_in_logs = self.get_option('disable_attributes_in_logs') + + self.disable_logs = self.get_option('disable_logs') + + self.store_spans_in_file = self.get_option('store_spans_in_file') + self.otel_service_name = self.get_option('otel_service_name') if not self.otel_service_name: @@ -423,6 +528,22 @@ class CallbackModule(CallbackBase): # See https://github.com/open-telemetry/opentelemetry-specification/issues/740 self.traceparent = self.get_option('traceparent') + self.otel_exporter_otlp_traces_protocol = self.get_option('otel_exporter_otlp_traces_protocol') + + def dump_results(self, task, result): + """ dump the results if disable_logs is not enabled """ + if self.disable_logs: + return "" + # ansible.builtin.uri contains the response in the json field + save = dict(result._result) + + if "json" in save and task.action in ("ansible.builtin.uri", "ansible.legacy.uri", "uri"): + save.pop("json") + # ansible.builtin.slurp contains the response in the content field + if "content" in save and task.action in ("ansible.builtin.slurp", "ansible.legacy.slurp", "slurp"): + save.pop("content") + return self._dump_results(save) + def v2_playbook_on_start(self, playbook): self.ansible_playbook = basename(playbook._file_name) @@ -462,32 +583,41 @@ class CallbackModule(CallbackBase): ) def v2_runner_on_failed(self, result, ignore_errors=False): - self.errors += 1 + if ignore_errors: + status = 'ignored' + else: + status = 'failed' + self.errors += 1 + self.opentelemetry.finish_task( self.tasks_data, - 'failed', - result + status, + result, + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_runner_on_ok(self, result): self.opentelemetry.finish_task( self.tasks_data, 'ok', - result + result, + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_runner_on_skipped(self, result): self.opentelemetry.finish_task( self.tasks_data, 'skipped', - result + result, + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_playbook_on_include(self, included_file): self.opentelemetry.finish_task( self.tasks_data, 'included', - included_file + included_file, + "" ) def v2_playbook_on_stats(self, stats): @@ -495,13 +625,22 @@ class CallbackModule(CallbackBase): status = Status(status_code=StatusCode.OK) else: status = Status(status_code=StatusCode.ERROR) - self.opentelemetry.generate_distributed_traces( + otel_exporter = self.opentelemetry.generate_distributed_traces( self.otel_service_name, self.ansible_playbook, self.tasks_data, status, - self.traceparent + self.traceparent, + self.disable_logs, + self.disable_attributes_in_logs, + self.otel_exporter_otlp_traces_protocol, + self.store_spans_in_file ) + if self.store_spans_in_file: + spans = [json.loads(span.to_json()) for span in otel_exporter.get_finished_spans()] + with open(self.store_spans_in_file, "w", encoding="utf-8") as output: + json.dump({"spans": spans}, output, indent=4) + def v2_runner_on_async_failed(self, result, **kwargs): self.errors += 1 diff --git a/plugins/callback/osx_say.py b/plugins/callback/osx_say.py deleted file mode 120000 index f080521d9d..0000000000 --- a/plugins/callback/osx_say.py +++ /dev/null @@ -1 +0,0 @@ -say.py \ No newline at end of file diff --git a/plugins/callback/print_task.py b/plugins/callback/print_task.py new file mode 100644 index 0000000000..f6008c817f --- /dev/null +++ b/plugins/callback/print_task.py @@ -0,0 +1,62 @@ +# Copyright (c) 2025, Max Mitschke +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: print_task +type: aggregate +short_description: Prints playbook task snippet to job output +description: + - This plugin prints the currently executing playbook task to the job output. +version_added: 10.7.0 +requirements: + - enable in configuration +""" + +EXAMPLES = r""" +ansible.cfg: |- + # Enable plugin + [defaults] + callbacks_enabled=community.general.print_task +""" + +from yaml import load, dump + +try: + from yaml import CSafeDumper as SafeDumper + from yaml import CSafeLoader as SafeLoader +except ImportError: + from yaml import SafeDumper, SafeLoader + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + """ + This callback module tells you how long your plays ran for. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'community.general.print_task' + + CALLBACK_NEEDS_ENABLED = True + + def __init__(self): + super(CallbackModule, self).__init__() + self._printed_message = False + + def _print_task(self, task): + if hasattr(task, '_ds'): + task_snippet = load(str([task._ds.copy()]), Loader=SafeLoader) + task_yaml = dump(task_snippet, sort_keys=False, Dumper=SafeDumper) + self._display.display(f"\n{task_yaml}\n") + self._printed_message = True + + def v2_playbook_on_task_start(self, task, is_conditional): + self._printed_message = False + + def v2_runner_on_start(self, host, task): + if not self._printed_message: + self._print_task(task) diff --git a/plugins/callback/say.py b/plugins/callback/say.py index 8e8bd507a2..0455ee69e6 100644 --- a/plugins/callback/say.py +++ b/plugins/callback/say.py @@ -1,31 +1,28 @@ -# -*- coding: utf-8 -*- -# (c) 2012, Michael DeHaan, -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2012, Michael DeHaan, +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: say - type: notification - requirements: - - whitelisting in configuration - - the '/usr/bin/say' command line program (standard on macOS) or 'espeak' command line program - short_description: notify using software speech synthesizer - description: - - This plugin will use the 'say' or 'espeak' program to "speak" about play events. - notes: - - In 2.8, this callback has been renamed from C(osx_say) into M(community.general.say). -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: say +type: notification +requirements: + - whitelisting in configuration + - the C(/usr/bin/say) command line program (standard on macOS) or C(espeak) command line program +short_description: Notify using software speech synthesizer +description: + - This plugin uses C(say) or C(espeak) to "speak" about play events. +""" -import distutils.spawn import platform import subprocess import os +from ansible.module_utils.common.process import get_bin_path from ansible.plugins.callback import CallbackBase @@ -47,27 +44,30 @@ class CallbackModule(CallbackBase): self.HAPPY_VOICE = None self.LASER_VOICE = None - self.synthesizer = distutils.spawn.find_executable('say') - if not self.synthesizer: - self.synthesizer = distutils.spawn.find_executable('espeak') - if self.synthesizer: + try: + self.synthesizer = get_bin_path('say') + if platform.system() != 'Darwin': + # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter + self._display.warning(f"'say' executable found but system is '{platform.system()}': ignoring voice parameter") + else: + self.FAILED_VOICE = 'Zarvox' + self.REGULAR_VOICE = 'Trinoids' + self.HAPPY_VOICE = 'Cellos' + self.LASER_VOICE = 'Princess' + except ValueError: + try: + self.synthesizer = get_bin_path('espeak') self.FAILED_VOICE = 'klatt' self.HAPPY_VOICE = 'f5' self.LASER_VOICE = 'whisper' - elif platform.system() != 'Darwin': - # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter - self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system()) - else: - self.FAILED_VOICE = 'Zarvox' - self.REGULAR_VOICE = 'Trinoids' - self.HAPPY_VOICE = 'Cellos' - self.LASER_VOICE = 'Princess' + except ValueError: + self.synthesizer = None # plugin disable itself if say is not present # ansible will not call any callback if disabled is set to True if not self.synthesizer: self.disabled = True - self._display.warning("Unable to find either 'say' or 'espeak' executable, plugin %s disabled" % os.path.basename(__file__)) + self._display.warning(f"Unable to find either 'say' or 'espeak' executable, plugin {os.path.basename(__file__)} disabled") def say(self, msg, voice): cmd = [self.synthesizer, msg] @@ -76,7 +76,7 @@ class CallbackModule(CallbackBase): subprocess.call(cmd) def runner_on_failed(self, host, res, ignore_errors=False): - self.say("Failure on host %s" % host, self.FAILED_VOICE) + self.say(f"Failure on host {host}", self.FAILED_VOICE) def runner_on_ok(self, host, res): self.say("pew", self.LASER_VOICE) @@ -85,13 +85,13 @@ class CallbackModule(CallbackBase): self.say("pew", self.LASER_VOICE) def runner_on_unreachable(self, host, res): - self.say("Failure on host %s" % host, self.FAILED_VOICE) + self.say(f"Failure on host {host}", self.FAILED_VOICE) def runner_on_async_ok(self, host, res, jid): self.say("pew", self.LASER_VOICE) def runner_on_async_failed(self, host, res, jid): - self.say("Failure on host %s" % host, self.FAILED_VOICE) + self.say(f"Failure on host {host}", self.FAILED_VOICE) def playbook_on_start(self): self.say("Running Playbook", self.REGULAR_VOICE) @@ -101,15 +101,15 @@ class CallbackModule(CallbackBase): def playbook_on_task_start(self, name, is_conditional): if not is_conditional: - self.say("Starting task: %s" % name, self.REGULAR_VOICE) + self.say(f"Starting task: {name}", self.REGULAR_VOICE) else: - self.say("Notifying task: %s" % name, self.REGULAR_VOICE) + self.say(f"Notifying task: {name}", self.REGULAR_VOICE) def playbook_on_setup(self): self.say("Gathering facts", self.REGULAR_VOICE) def playbook_on_play_start(self, name): - self.say("Starting play: %s" % name, self.HAPPY_VOICE) + self.say(f"Starting play: {name}", self.HAPPY_VOICE) def playbook_on_stats(self, stats): self.say("Play complete", self.HAPPY_VOICE) diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index 403eb84b33..2a7dd07a3e 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -1,40 +1,39 @@ -# -*- coding: utf-8 -*- -# (c) Fastly, inc 2016 -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Fastly, inc 2016 +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: selective - type: stdout - requirements: - - set as main display callback - short_description: only print certain tasks - description: - - This callback only prints tasks that have been tagged with `print_action` or that have failed. - This allows operators to focus on the tasks that provide value only. - - Tasks that are not printed are placed with a '.'. - - If you increase verbosity all tasks are printed. - options: - nocolor: - default: False - description: This setting allows suppressing colorizing output - env: - - name: ANSIBLE_NOCOLOR - - name: ANSIBLE_SELECTIVE_DONT_COLORIZE - ini: - - section: defaults - key: nocolor - type: boolean -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: selective +type: stdout +requirements: + - set as main display callback +short_description: Only print certain tasks +description: + - This callback only prints tasks that have been tagged with C(print_action) or that have failed. This allows operators + to focus on the tasks that provide value only. + - Tasks that are not printed are placed with a C(.). + - If you increase verbosity all tasks are printed. +options: + nocolor: + default: false + description: This setting allows suppressing colorizing output. + env: + - name: ANSIBLE_NOCOLOR + - name: ANSIBLE_SELECTIVE_DONT_COLORIZE + ini: + - section: defaults + key: nocolor + type: boolean +""" -EXAMPLES = """ - - ansible.builtin.debug: msg="This will not be printed" - - ansible.builtin.debug: msg="But this will" - tags: [print_action] +EXAMPLES = r""" +- ansible.builtin.debug: msg="This will not be printed" +- ansible.builtin.debug: msg="But this will" + tags: [print_action] """ import difflib @@ -43,26 +42,17 @@ from ansible import constants as C from ansible.plugins.callback import CallbackBase from ansible.module_utils.common.text.converters import to_text -try: - codeCodes = C.COLOR_CODES -except AttributeError: - # This constant was moved to ansible.constants in - # https://github.com/ansible/ansible/commit/1202dd000f10b0e8959019484f1c3b3f9628fc67 - # (will be included in ansible-core 2.11.0). For older Ansible/ansible-base versions, - # we include from the original location. - from ansible.utils.color import codeCodes - DONT_COLORIZE = False COLORS = { 'normal': '\033[0m', - 'ok': '\033[{0}m'.format(codeCodes[C.COLOR_OK]), + 'ok': f'\x1b[{C.COLOR_CODES[C.COLOR_OK]}m', 'bold': '\033[1m', 'not_so_bold': '\033[1m\033[34m', - 'changed': '\033[{0}m'.format(codeCodes[C.COLOR_CHANGED]), - 'failed': '\033[{0}m'.format(codeCodes[C.COLOR_ERROR]), + 'changed': f'\x1b[{C.COLOR_CODES[C.COLOR_CHANGED]}m', + 'failed': f'\x1b[{C.COLOR_CODES[C.COLOR_ERROR]}m', 'endc': '\033[0m', - 'skipped': '\033[{0}m'.format(codeCodes[C.COLOR_SKIP]), + 'skipped': f'\x1b[{C.COLOR_CODES[C.COLOR_SKIP]}m', } @@ -81,7 +71,7 @@ def colorize(msg, color): if DONT_COLORIZE: return msg else: - return '{0}{1}{2}'.format(COLORS[color], msg, COLORS['endc']) + return f"{COLORS[color]}{msg}{COLORS['endc']}" class CallbackModule(CallbackBase): @@ -114,15 +104,15 @@ class CallbackModule(CallbackBase): line_length = 120 if self.last_skipped: print() - msg = colorize("# {0} {1}".format(task_name, - '*' * (line_length - len(task_name))), 'bold') + line = f"# {task_name} " + msg = colorize(f"{line}{'*' * (line_length - len(line))}", 'bold') print(msg) def _indent_text(self, text, indent_level): lines = text.splitlines() result_lines = [] for l in lines: - result_lines.append("{0}{1}".format(' ' * indent_level, l)) + result_lines.append(f"{' ' * indent_level}{l}") return '\n'.join(result_lines) def _print_diff(self, diff, indent_level): @@ -155,19 +145,19 @@ class CallbackModule(CallbackBase): change_string = colorize('FAILED!!!', color) else: color = 'changed' if changed else 'ok' - change_string = colorize("changed={0}".format(changed), color) + change_string = colorize(f"changed={changed}", color) msg = colorize(msg, color) line_length = 120 spaces = ' ' * (40 - len(name) - indent_level) - line = "{0} * {1}{2}- {3}".format(' ' * indent_level, name, spaces, change_string) + line = f"{' ' * indent_level} * {name}{spaces}- {change_string}" if len(msg) < 50: - line += ' -- {0}'.format(msg) - print("{0} {1}---------".format(line, '-' * (line_length - len(line)))) + line += f' -- {msg}' + print(f"{line} {'-' * (line_length - len(line))}---------") else: - print("{0} {1}".format(line, '-' * (line_length - len(line)))) + print(f"{line} {'-' * (line_length - len(line))}") print(self._indent_text(msg, indent_level + 4)) if diff: @@ -217,7 +207,7 @@ class CallbackModule(CallbackBase): stderr = [r.get('exception', None), r.get('module_stderr', None)] stderr = "\n".join([e for e in stderr if e]).strip() - self._print_host_or_item(r['item'], + self._print_host_or_item(r[r['ansible_loop_var']], r.get('changed', False), to_text(r.get('msg', '')), r.get('diff', None), @@ -247,8 +237,10 @@ class CallbackModule(CallbackBase): else: color = 'ok' - msg = '{0} : ok={1}\tchanged={2}\tfailed={3}\tunreachable={4}\trescued={5}\tignored={6}'.format( - host, s['ok'], s['changed'], s['failures'], s['unreachable'], s['rescued'], s['ignored']) + msg = ( + f"{host} : ok={s['ok']}\tchanged={s['changed']}\tfailed={s['failures']}\tunreachable=" + f"{s['unreachable']}\trescued={s['rescued']}\tignored={s['ignored']}" + ) print(colorize(msg, color)) def v2_runner_on_skipped(self, result, **kwargs): @@ -260,17 +252,15 @@ class CallbackModule(CallbackBase): line_length = 120 spaces = ' ' * (31 - len(result._host.name) - 4) - line = " * {0}{1}- {2}".format(colorize(result._host.name, 'not_so_bold'), - spaces, - colorize("skipped", 'skipped'),) + line = f" * {colorize(result._host.name, 'not_so_bold')}{spaces}- {colorize('skipped', 'skipped')}" reason = result._result.get('skipped_reason', '') or \ result._result.get('skip_reason', '') if len(reason) < 50: - line += ' -- {0}'.format(reason) - print("{0} {1}---------".format(line, '-' * (line_length - len(line)))) + line += f' -- {reason}' + print(f"{line} {'-' * (line_length - len(line))}---------") else: - print("{0} {1}".format(line, '-' * (line_length - len(line)))) + print(f"{line} {'-' * (line_length - len(line))}") print(self._indent_text(reason, 8)) print(reason) diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py index 5cb402b109..e1d95abe06 100644 --- a/plugins/callback/slack.py +++ b/plugins/callback/slack.py @@ -1,65 +1,70 @@ -# -*- coding: utf-8 -*- -# (C) 2014-2015, Matt Martz -# (C) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014-2015, Matt Martz +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: slack - type: notification - requirements: - - whitelist in configuration - - prettytable (python library) - short_description: Sends play events to a Slack channel +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: slack +type: notification +requirements: + - whitelist in configuration + - prettytable (python library) +short_description: Sends play events to a Slack channel +description: + - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. +options: + http_agent: description: - - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. - - Before 2.4 only environment variables were available for configuring this plugin - options: - webhook_url: - required: True - description: Slack Webhook URL - env: - - name: SLACK_WEBHOOK_URL - ini: - - section: callback_slack - key: webhook_url - channel: - default: "#ansible" - description: Slack room to post in. - env: - - name: SLACK_CHANNEL - ini: - - section: callback_slack - key: channel - username: - description: Username to post as. - env: - - name: SLACK_USERNAME - default: ansible - ini: - - section: callback_slack - key: username - validate_certs: - description: validate the SSL certificate of the Slack server. (For HTTPS URLs) - env: - - name: SLACK_VALIDATE_CERTS - ini: - - section: callback_slack - key: validate_certs - default: True - type: bool -''' + - HTTP user agent to use for requests to Slack. + type: string + version_added: "10.5.0" + webhook_url: + required: true + description: Slack Webhook URL. + type: str + env: + - name: SLACK_WEBHOOK_URL + ini: + - section: callback_slack + key: webhook_url + channel: + default: "#ansible" + description: Slack room to post in. + type: str + env: + - name: SLACK_CHANNEL + ini: + - section: callback_slack + key: channel + username: + description: Username to post as. + type: str + env: + - name: SLACK_USERNAME + default: ansible + ini: + - section: callback_slack + key: username + validate_certs: + description: Validate the SSL certificate of the Slack server for HTTPS URLs. + env: + - name: SLACK_VALIDATE_CERTS + ini: + - section: callback_slack + key: validate_certs + default: true + type: bool +""" import json import os import uuid from ansible import context -from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.urls import open_url from ansible.plugins.callback import CallbackBase @@ -105,7 +110,7 @@ class CallbackModule(CallbackBase): self.username = self.get_option('username') self.show_invocation = (self._display.verbosity > 1) self.validate_certs = self.get_option('validate_certs') - + self.http_agent = self.get_option('http_agent') if self.webhook_url is None: self.disabled = True self._display.warning('Slack Webhook URL was not provided. The ' @@ -131,18 +136,22 @@ class CallbackModule(CallbackBase): self._display.debug(data) self._display.debug(self.webhook_url) try: - response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs, - headers=headers) + response = open_url( + self.webhook_url, + data=data, + validate_certs=self.validate_certs, + headers=headers, + http_agent=self.http_agent, + ) return response.read() except Exception as e: - self._display.warning(u'Could not submit message to Slack: %s' % - to_text(e)) + self._display.warning(f'Could not submit message to Slack: {e}') def v2_playbook_on_start(self, playbook): self.playbook_name = os.path.basename(playbook._file_name) title = [ - '*Playbook initiated* (_%s_)' % self.guid + f'*Playbook initiated* (_{self.guid}_)' ] invocation_items = [] @@ -153,23 +162,23 @@ class CallbackModule(CallbackBase): subset = context.CLIARGS['subset'] inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']] - invocation_items.append('Inventory: %s' % ', '.join(inventory)) + invocation_items.append(f"Inventory: {', '.join(inventory)}") if tags and tags != ['all']: - invocation_items.append('Tags: %s' % ', '.join(tags)) + invocation_items.append(f"Tags: {', '.join(tags)}") if skip_tags: - invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags)) + invocation_items.append(f"Skip Tags: {', '.join(skip_tags)}") if subset: - invocation_items.append('Limit: %s' % subset) + invocation_items.append(f'Limit: {subset}') if extra_vars: - invocation_items.append('Extra Vars: %s' % - ' '.join(extra_vars)) + invocation_items.append(f"Extra Vars: {' '.join(extra_vars)}") - title.append('by *%s*' % context.CLIARGS['remote_user']) + title.append(f"by *{context.CLIARGS['remote_user']}*") - title.append('\n\n*%s*' % self.playbook_name) + title.append(f'\n\n*{self.playbook_name}*') msg_items = [' '.join(title)] if invocation_items: - msg_items.append('```\n%s\n```' % '\n'.join(invocation_items)) + _inv_item = '\n'.join(invocation_items) + msg_items.append(f'```\n{_inv_item}\n```') msg = '\n'.join(msg_items) @@ -189,8 +198,8 @@ class CallbackModule(CallbackBase): def v2_playbook_on_play_start(self, play): """Display Play start messages""" - name = play.name or 'Play name not specified (%s)' % play._uuid - msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name) + name = play.name or f'Play name not specified ({play._uuid})' + msg = f'*Starting play* (_{self.guid}_)\n\n*{name}*' attachments = [ { 'fallback': msg, @@ -225,7 +234,7 @@ class CallbackModule(CallbackBase): attachments = [] msg_items = [ - '*Playbook Complete* (_%s_)' % self.guid + f'*Playbook Complete* (_{self.guid}_)' ] if failures or unreachable: color = 'danger' @@ -234,7 +243,7 @@ class CallbackModule(CallbackBase): color = 'good' msg_items.append('\n*Success!*') - msg_items.append('```\n%s\n```' % t) + msg_items.append(f'```\n{t}\n```') msg = '\n'.join(msg_items) diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py index cb63d3b23f..635a3109bc 100644 --- a/plugins/callback/splunk.py +++ b/plugins/callback/splunk.py @@ -1,87 +1,76 @@ -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: splunk - type: aggregate - short_description: Sends task result events to Splunk HTTP Event Collector - author: "Stuart Hirst (!UNKNOWN) " +DOCUMENTATION = r""" +name: splunk +type: notification +short_description: Sends task result events to Splunk HTTP Event Collector +author: "Stuart Hirst (!UNKNOWN) " +description: + - This callback plugin sends task results as JSON formatted events to a Splunk HTTP collector. + - The companion Splunk Monitoring & Diagnostics App is available here U(https://splunkbase.splunk.com/app/4023/). + - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based. +requirements: + - Whitelisting this callback plugin + - 'Create a HTTP Event Collector in Splunk' + - 'Define the URL and token in C(ansible.cfg)' +options: + url: + description: URL to the Splunk HTTP collector source. + type: str + env: + - name: SPLUNK_URL + ini: + - section: callback_splunk + key: url + authtoken: + description: Token to authenticate the connection to the Splunk HTTP collector. + type: str + env: + - name: SPLUNK_AUTHTOKEN + ini: + - section: callback_splunk + key: authtoken + validate_certs: + description: Whether to validate certificates for connections to HEC. It is not recommended to set to V(false) except + when you are sure that nobody can intercept the connection between this plugin and HEC, as setting it to V(false) allows + man-in-the-middle attacks! + env: + - name: SPLUNK_VALIDATE_CERTS + ini: + - section: callback_splunk + key: validate_certs + type: bool + default: true + version_added: '1.0.0' + include_milliseconds: + description: Whether to include milliseconds as part of the generated timestamp field in the event sent to the Splunk + HTTP collector. + env: + - name: SPLUNK_INCLUDE_MILLISECONDS + ini: + - section: callback_splunk + key: include_milliseconds + type: bool + default: false + version_added: 2.0.0 + batch: description: - - This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector. - - The companion Splunk Monitoring & Diagnostics App is available here "https://splunkbase.splunk.com/app/4023/" - - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based. - requirements: - - Whitelisting this callback plugin - - 'Create a HTTP Event Collector in Splunk' - - 'Define the url and token in ansible.cfg' - options: - url: - description: URL to the Splunk HTTP collector source - env: - - name: SPLUNK_URL - ini: - - section: callback_splunk - key: url - authtoken: - description: Token to authenticate the connection to the Splunk HTTP collector - env: - - name: SPLUNK_AUTHTOKEN - ini: - - section: callback_splunk - key: authtoken - validate_certs: - description: Whether to validate certificates for connections to HEC. It is not recommended to set to - C(false) except when you are sure that nobody can intercept the connection - between this plugin and HEC, as setting it to C(false) allows man-in-the-middle attacks! - env: - - name: SPLUNK_VALIDATE_CERTS - ini: - - section: callback_splunk - key: validate_certs - type: bool - default: true - version_added: '1.0.0' - include_milliseconds: - description: Whether to include milliseconds as part of the generated timestamp field in the event - sent to the Splunk HTTP collector - env: - - name: SPLUNK_INCLUDE_MILLISECONDS - ini: - - section: callback_splunk - key: include_milliseconds - type: bool - default: false - version_added: 2.0.0 - batch: - description: - - Correlation ID which can be set across multiple playbook executions. - env: - - name: SPLUNK_BATCH - ini: - - section: callback_splunk - key: batch - type: str - version_added: 3.3.0 -''' + - Correlation ID which can be set across multiple playbook executions. + env: + - name: SPLUNK_BATCH + ini: + - section: callback_splunk + key: batch + type: str + version_added: 3.3.0 +""" -EXAMPLES = ''' -examples: > +EXAMPLES = r""" +examples: >- To enable, add this to your ansible.cfg file in the defaults block [defaults] callback_whitelist = community.general.splunk @@ -92,26 +81,29 @@ examples: > [callback_splunk] url = http://mysplunkinstance.datapaas.io:8088/services/collector/event authtoken = f23blad6-5965-4537-bf69-5b5a545blabla88 -''' +""" import json import uuid import socket import getpass -from datetime import datetime from os.path import basename +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class SplunkHTTPCollectorSource(object): def __init__(self): self.ansible_check_mode = False self.ansible_playbook = "" - self.ansible_version = "" self.session = str(uuid.uuid4()) self.host = socket.gethostname() self.ip_address = socket.gethostbyname(socket.gethostname()) @@ -121,10 +113,6 @@ class SplunkHTTPCollectorSource(object): if result._task_fields['args'].get('_ansible_check_mode') is True: self.ansible_check_mode = True - if result._task_fields['args'].get('_ansible_version'): - self.ansible_version = \ - result._task_fields['args'].get('_ansible_version') - if result._task._role: ansible_role = str(result._task._role) else: @@ -145,12 +133,12 @@ class SplunkHTTPCollectorSource(object): else: time_format = '%Y-%m-%d %H:%M:%S +0000' - data['timestamp'] = datetime.utcnow().strftime(time_format) + data['timestamp'] = now().strftime(time_format) data['host'] = self.host data['ip_address'] = self.ip_address data['user'] = self.user data['runtime'] = runtime - data['ansible_version'] = self.ansible_version + data['ansible_version'] = ansible_version data['ansible_check_mode'] = self.ansible_check_mode data['ansible_host'] = result._host.name data['ansible_playbook'] = self.ansible_playbook @@ -159,15 +147,14 @@ class SplunkHTTPCollectorSource(object): data['ansible_result'] = result._result # This wraps the json payload in and outer json event needed by Splunk - jsondata = json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True) - jsondata = '{"event":' + jsondata + "}" + jsondata = json.dumps({"event": data}, cls=AnsibleJSONEncoder, sort_keys=True) open_url( url, jsondata, headers={ 'Content-type': 'application/json', - 'Authorization': 'Splunk ' + authtoken + 'Authorization': f"Splunk {authtoken}" }, method='POST', validate_certs=validate_certs @@ -176,7 +163,7 @@ class SplunkHTTPCollectorSource(object): class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' + CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'community.general.splunk' CALLBACK_NEEDS_WHITELIST = True @@ -192,7 +179,7 @@ class CallbackModule(CallbackBase): def _runtime(self, result): return ( - datetime.utcnow() - + now() - self.start_datetimes[result._task._uuid] ).total_seconds() @@ -231,10 +218,10 @@ class CallbackModule(CallbackBase): self.splunk.ansible_playbook = basename(playbook._file_name) def v2_playbook_on_task_start(self, task, is_conditional): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_playbook_on_handler_task_start(self, task): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_runner_on_ok(self, result, **kwargs): self.splunk.send_event( diff --git a/plugins/callback/sumologic.py b/plugins/callback/sumologic.py index b1ce85af77..3f99bf216a 100644 --- a/plugins/callback/sumologic.py +++ b/plugins/callback/sumologic.py @@ -1,45 +1,33 @@ -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: sumologic -type: aggregate +type: notification short_description: Sends task result events to Sumologic author: "Ryan Currah (@ryancurrah)" description: - - This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source + - This callback plugin sends task results as JSON formatted events to a Sumologic HTTP collector source. requirements: - Whitelisting this callback plugin - - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of C(yyyy-MM-dd HH:mm:ss ZZZZ) and a custom timestamp locator - of C("timestamp": "(.*)")' + - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of V(yyyy-MM-dd HH:mm:ss ZZZZ) and + a custom timestamp locator of V("timestamp": "(.*\)")' options: url: - description: URL to the Sumologic HTTP collector source + description: URL to the Sumologic HTTP collector source. + type: str env: - name: SUMOLOGIC_URL ini: - section: callback_sumologic key: url -''' +""" -EXAMPLES = ''' -examples: > +EXAMPLES = r""" +examples: |- To enable, add this to your ansible.cfg file in the defaults block [defaults] callback_whitelist = community.general.sumologic @@ -50,26 +38,29 @@ examples: > Set the ansible.cfg variable in the callback_sumologic block [callback_sumologic] url = https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp== -''' +""" import json import uuid import socket import getpass -from datetime import datetime from os.path import basename +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class SumologicHTTPCollectorSource(object): def __init__(self): self.ansible_check_mode = False self.ansible_playbook = "" - self.ansible_version = "" self.session = str(uuid.uuid4()) self.host = socket.gethostname() self.ip_address = socket.gethostbyname(socket.gethostname()) @@ -79,10 +70,6 @@ class SumologicHTTPCollectorSource(object): if result._task_fields['args'].get('_ansible_check_mode') is True: self.ansible_check_mode = True - if result._task_fields['args'].get('_ansible_version'): - self.ansible_version = \ - result._task_fields['args'].get('_ansible_version') - if result._task._role: ansible_role = str(result._task._role) else: @@ -95,13 +82,12 @@ class SumologicHTTPCollectorSource(object): data['uuid'] = result._task._uuid data['session'] = self.session data['status'] = state - data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S ' - '+0000') + data['timestamp'] = now().strftime('%Y-%m-%d %H:%M:%S +0000') data['host'] = self.host data['ip_address'] = self.ip_address data['user'] = self.user data['runtime'] = runtime - data['ansible_version'] = self.ansible_version + data['ansible_version'] = ansible_version data['ansible_check_mode'] = self.ansible_check_mode data['ansible_host'] = result._host.name data['ansible_playbook'] = self.ansible_playbook @@ -122,7 +108,7 @@ class SumologicHTTPCollectorSource(object): class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' + CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'community.general.sumologic' CALLBACK_NEEDS_WHITELIST = True @@ -134,7 +120,7 @@ class CallbackModule(CallbackBase): def _runtime(self, result): return ( - datetime.utcnow() - + now() - self.start_datetimes[result._task._uuid] ).total_seconds() @@ -155,10 +141,10 @@ class CallbackModule(CallbackBase): self.sumologic.ansible_playbook = basename(playbook._file_name) def v2_playbook_on_task_start(self, task, is_conditional): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_playbook_on_handler_task_start(self, task): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_runner_on_ok(self, result, **kwargs): self.sumologic.send_event( diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py index f4865f2a26..657ca017f6 100644 --- a/plugins/callback/syslog_json.py +++ b/plugins/callback/syslog_json.py @@ -1,50 +1,58 @@ -# -*- coding: utf-8 -*- -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: syslog_json - type: notification - requirements: - - whitelist in configuration - short_description: sends JSON events to syslog - description: - - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format - - Before 2.9 only environment variables were available for configuration - options: - server: - description: syslog server that will receive the event - env: - - name: SYSLOG_SERVER - default: localhost - ini: - - section: callback_syslog_json - key: syslog_server - port: - description: port on which the syslog server is listening - env: - - name: SYSLOG_PORT - default: 514 - ini: - - section: callback_syslog_json - key: syslog_port - facility: - description: syslog facility to log as - env: - - name: SYSLOG_FACILITY - default: user - ini: - - section: callback_syslog_json - key: syslog_facility -''' - -import os -import json +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: syslog_json +type: notification +requirements: + - whitelist in configuration +short_description: Sends JSON events to syslog +description: + - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format. +options: + server: + description: Syslog server that receives the event. + type: str + env: + - name: SYSLOG_SERVER + default: localhost + ini: + - section: callback_syslog_json + key: syslog_server + port: + description: Port on which the syslog server is listening. + type: int + env: + - name: SYSLOG_PORT + default: 514 + ini: + - section: callback_syslog_json + key: syslog_port + facility: + description: Syslog facility to log as. + type: str + env: + - name: SYSLOG_FACILITY + default: user + ini: + - section: callback_syslog_json + key: syslog_facility + setup: + description: Log setup tasks. + env: + - name: ANSIBLE_SYSLOG_SETUP + type: bool + default: true + ini: + - section: callback_syslog_json + key: syslog_setup + version_added: 4.5.0 +""" import logging import logging.handlers @@ -60,7 +68,7 @@ class CallbackModule(CallbackBase): """ CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' + CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'community.general.syslog_json' CALLBACK_NEEDS_WHITELIST = True @@ -86,23 +94,36 @@ class CallbackModule(CallbackBase): self.logger.addHandler(self.handler) self.hostname = socket.gethostname() - def runner_on_failed(self, host, res, ignore_errors=False): + def v2_runner_on_failed(self, result, ignore_errors=False): + res = result._result + host = result._host.get_name() self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res)) - def runner_on_ok(self, host, res): - self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s', self.hostname, host, self._dump_results(res)) + def v2_runner_on_ok(self, result): + res = result._result + host = result._host.get_name() + if result._task.action != "gather_facts" or self.get_option("setup"): + self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s', self.hostname, host, self._dump_results(res)) - def runner_on_skipped(self, host, item=None): + def v2_runner_on_skipped(self, result): + host = result._host.get_name() self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s', self.hostname, host, 'skipped') - def runner_on_unreachable(self, host, res): + def v2_runner_on_unreachable(self, result): + res = result._result + host = result._host.get_name() self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s', self.hostname, host, self._dump_results(res)) - def runner_on_async_failed(self, host, res, jid): + def v2_runner_on_async_failed(self, result): + res = result._result + host = result._host.get_name() + jid = result._result.get('ansible_job_id') self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res)) - def playbook_on_import_for_host(self, host, imported_file): + def v2_playbook_on_import_for_host(self, result, imported_file): + host = result._host.get_name() self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s', self.hostname, host, imported_file) - def playbook_on_not_import_for_host(self, host, missing_file): + def v2_playbook_on_not_import_for_host(self, result, missing_file): + host = result._host.get_name() self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s', self.hostname, host, missing_file) diff --git a/plugins/callback/tasks_only.py b/plugins/callback/tasks_only.py new file mode 100644 index 0000000000..3de81fc2db --- /dev/null +++ b/plugins/callback/tasks_only.py @@ -0,0 +1,68 @@ + +# Copyright (c) 2025, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: Felix Fontein (@felixfontein) +name: tasks_only +type: stdout +version_added: 11.1.0 +short_description: Only show tasks +description: + - Removes play start and stats marker from P(ansible.builtin.default#callback)'s output. + - Can be used to generate output for documentation examples. + For this, the O(number_of_columns) option should be set to an explicit value. +extends_documentation_fragment: + - ansible.builtin.default_callback + - ansible.builtin.result_format_callback +options: + number_of_columns: + description: + - Sets the number of columns for Ansible's display. + type: int + env: + - name: ANSIBLE_COLLECTIONS_TASKS_ONLY_NUMBER_OF_COLUMNS + result_format: + # Part of the ansible.builtin.result_format_callback doc fragment + version_added: 11.2.0 + pretty_results: + # Part of the ansible.builtin.result_format_callback doc fragment + version_added: 11.2.0 +""" + +EXAMPLES = r""" +--- +# Enable callback in ansible.cfg: +ansible_config: |- + [defaults] + stdout_callback = community.general.tasks_only + +--- +# Enable callback with environment variables: +environment_variable: |- + ANSIBLE_STDOUT_CALLBACK=community.general.tasks_only +""" + +from ansible.plugins.callback.default import CallbackModule as Default + + +class CallbackModule(Default): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.tasks_only' + + def v2_playbook_on_play_start(self, play): + pass + + def v2_playbook_on_stats(self, stats): + pass + + def set_options(self, *args, **kwargs): + result = super(CallbackModule, self).set_options(*args, **kwargs) + self.number_of_columns = self.get_option("number_of_columns") + if self.number_of_columns is not None: + self._display.columns = self.number_of_columns + return result diff --git a/plugins/callback/timestamp.py b/plugins/callback/timestamp.py new file mode 100644 index 0000000000..f733fa8cb7 --- /dev/null +++ b/plugins/callback/timestamp.py @@ -0,0 +1,124 @@ + +# Copyright (c) 2024, kurokobo +# Copyright (c) 2014, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +name: timestamp +type: stdout +short_description: Adds simple timestamp for each header +version_added: 9.0.0 +description: + - This callback adds simple timestamp for each header. +author: kurokobo (@kurokobo) +options: + timezone: + description: + - Timezone to use for the timestamp in IANA time zone format. + - For example V(America/New_York), V(Asia/Tokyo)). Ignored on Python < 3.9. + ini: + - section: callback_timestamp + key: timezone + env: + - name: ANSIBLE_CALLBACK_TIMESTAMP_TIMEZONE + type: string + format_string: + description: + - Format of the timestamp shown to user in 1989 C standard format. + - Refer to L(the Python documentation,https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) + for the available format codes. + ini: + - section: callback_timestamp + key: format_string + env: + - name: ANSIBLE_CALLBACK_TIMESTAMP_FORMAT_STRING + default: "%H:%M:%S" + type: string +seealso: + - plugin: ansible.posix.profile_tasks + plugin_type: callback + description: >- + You can use P(ansible.posix.profile_tasks#callback) callback plugin to time individual tasks and overall execution time + with detailed timestamps. +extends_documentation_fragment: + - ansible.builtin.default_callback + - ansible.builtin.result_format_callback +""" + + +from ansible.plugins.callback.default import CallbackModule as Default +from ansible.utils.display import get_text_width +from ansible.module_utils.common.text.converters import to_text +from datetime import datetime +import types +import sys + +# Store whether the zoneinfo module is available +_ZONEINFO_AVAILABLE = sys.version_info >= (3, 9) + + +def get_datetime_now(tz): + """ + Returns the current timestamp with the specified timezone + """ + return datetime.now(tz=tz) + + +def banner(self, msg, color=None, cows=True): + """ + Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum) with trailing timestamp + + Based on the banner method of Display class from ansible.utils.display + + https://github.com/ansible/ansible/blob/4403519afe89138042108e237aef317fd5f09c33/lib/ansible/utils/display.py#L511 + """ + timestamp = get_datetime_now(self.timestamp_tzinfo).strftime(self.timestamp_format_string) + timestamp_len = get_text_width(timestamp) + 1 # +1 for leading space + + msg = to_text(msg) + if self.b_cowsay and cows: + try: + self.banner_cowsay(f"{msg} @ {timestamp}") + return + except OSError: + self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.") + + msg = msg.strip() + try: + star_len = self.columns - get_text_width(msg) - timestamp_len + except EnvironmentError: + star_len = self.columns - len(msg) - timestamp_len + if star_len <= 3: + star_len = 3 + stars = "*" * star_len + self.display(f"\n{msg} {stars} {timestamp}", color=color) + + +class CallbackModule(Default): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = "stdout" + CALLBACK_NAME = "community.general.timestamp" + + def __init__(self): + super(CallbackModule, self).__init__() + + # Replace the banner method of the display object with the custom one + self._display.banner = types.MethodType(banner, self._display) + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + # Store zoneinfo for specified timezone if available + tzinfo = None + if _ZONEINFO_AVAILABLE and self.get_option("timezone"): + from zoneinfo import ZoneInfo + + tzinfo = ZoneInfo(self.get_option("timezone")) + + # Inject options into the display object + setattr(self._display, "timestamp_tzinfo", tzinfo) + setattr(self._display, "timestamp_format_string", self.get_option("format_string")) diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py index fd00fae71b..d155aefc66 100644 --- a/plugins/callback/unixy.py +++ b/plugins/callback/unixy.py @@ -1,24 +1,23 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Allyson Bowles <@akatch> -# Copyright: (c) 2012-2014, Michael DeHaan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2023, Al Bowles <@akatch> +# Copyright (c) 2012-2014, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: unixy - type: stdout - author: Allyson Bowles (@akatch) - short_description: condensed Ansible output - description: - - Consolidated Ansible output in the style of LINUX/UNIX startup logs. - extends_documentation_fragment: - - default_callback - requirements: - - set as stdout in configuration -''' +DOCUMENTATION = r""" +name: unixy +type: stdout +author: Al Bowles (@akatch) +short_description: Condensed Ansible output +description: + - Consolidated Ansible output in the style of LINUX/UNIX startup logs. +extends_documentation_fragment: + - default_callback +requirements: + - set as stdout in configuration +""" from os.path import basename from ansible import constants as C @@ -39,7 +38,6 @@ class CallbackModule(CallbackModule_default): - Only display task names if the task runs on at least one host - Add option to display all hostnames on a single line in the appropriate result color (failures may have a separate line) - Consolidate stats display - - Display whether run is in --check mode - Don't show play name if no hosts found ''' @@ -62,59 +60,71 @@ class CallbackModule(CallbackModule_default): def _preprocess_result(self, result): self.delegated_vars = result._result.get('_ansible_delegated_vars', None) - self._handle_exception(result._result, use_stderr=self.display_failed_stderr) + self._handle_exception(result._result, use_stderr=self.get_option('display_failed_stderr')) self._handle_warnings(result._result) def _process_result_output(self, result, msg): task_host = result._host.get_name() - task_result = "%s %s" % (task_host, msg) + task_result = f"{task_host} {msg}" if self._run_is_verbose(result): - task_result = "%s %s: %s" % (task_host, msg, self._dump_results(result._result, indent=4)) + task_result = f"{task_host} {msg}: {self._dump_results(result._result, indent=4)}" return task_result if self.delegated_vars: task_delegate_host = self.delegated_vars['ansible_host'] - task_result = "%s -> %s %s" % (task_host, task_delegate_host, msg) + task_result = f"{task_host} -> {task_delegate_host} {msg}" if result._result.get('msg') and result._result.get('msg') != "All items completed": - task_result += " | msg: " + to_text(result._result.get('msg')) + task_result += f" | msg: {to_text(result._result.get('msg'))}" if result._result.get('stdout'): - task_result += " | stdout: " + result._result.get('stdout') + task_result += f" | stdout: {result._result.get('stdout')}" if result._result.get('stderr'): - task_result += " | stderr: " + result._result.get('stderr') + task_result += f" | stderr: {result._result.get('stderr')}" return task_result def v2_playbook_on_task_start(self, task, is_conditional): self._get_task_display_name(task) if self.task_display_name is not None: - self._display.display("%s..." % self.task_display_name) + if task.check_mode and self.get_option('check_mode_markers'): + self._display.display(f"{self.task_display_name} (check mode)...") + else: + self._display.display(f"{self.task_display_name}...") def v2_playbook_on_handler_task_start(self, task): self._get_task_display_name(task) if self.task_display_name is not None: - self._display.display("%s (via handler)... " % self.task_display_name) + if task.check_mode and self.get_option('check_mode_markers'): + self._display.display(f"{self.task_display_name} (via handler in check mode)... ") + else: + self._display.display(f"{self.task_display_name} (via handler)... ") def v2_playbook_on_play_start(self, play): name = play.get_name().strip() - if name and play.hosts: - msg = u"\n- %s on hosts: %s -" % (name, ",".join(play.hosts)) + if play.check_mode and self.get_option('check_mode_markers'): + if name and play.hosts: + msg = f"\n- {name} (in check mode) on hosts: {','.join(play.hosts)} -" + else: + msg = "- check mode -" else: - msg = u"---" + if name and play.hosts: + msg = f"\n- {name} on hosts: {','.join(play.hosts)} -" + else: + msg = "---" self._display.display(msg) def v2_runner_on_skipped(self, result, ignore_errors=False): - if self.display_skipped_hosts: + if self.get_option('display_skipped_hosts'): self._preprocess_result(result) display_color = C.COLOR_SKIP msg = "skipped" task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color) + self._display.display(f" {task_result}", display_color) else: return @@ -124,10 +134,10 @@ class CallbackModule(CallbackModule_default): msg = "failed" item_value = self._get_item_label(result._result) if item_value: - msg += " | item: %s" % (item_value,) + msg += f" | item: {item_value}" task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color, stderr=self.display_failed_stderr) + self._display.display(f" {task_result}", display_color, stderr=self.get_option('display_failed_stderr')) def v2_runner_on_ok(self, result, msg="ok", display_color=C.COLOR_OK): self._preprocess_result(result) @@ -137,13 +147,13 @@ class CallbackModule(CallbackModule_default): msg = "done" item_value = self._get_item_label(result._result) if item_value: - msg += " | item: %s" % (item_value,) + msg += f" | item: {item_value}" display_color = C.COLOR_CHANGED task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color) - elif self.display_ok_hosts: + self._display.display(f" {task_result}", display_color) + elif self.get_option('display_ok_hosts'): task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color) + self._display.display(f" {task_result}", display_color) def v2_runner_item_on_skipped(self, result): self.v2_runner_on_skipped(result) @@ -161,7 +171,7 @@ class CallbackModule(CallbackModule_default): display_color = C.COLOR_UNREACHABLE task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color, stderr=self.display_failed_stderr) + self._display.display(f" {task_result}", display_color, stderr=self.get_option('display_failed_stderr')) def v2_on_file_diff(self, result): if result._task.loop and 'results' in result._result: @@ -183,40 +193,34 @@ class CallbackModule(CallbackModule_default): # TODO how else can we display these? t = stats.summarize(h) - self._display.display(u" %s : %s %s %s %s %s %s" % ( - hostcolor(h, t), - colorize(u'ok', t['ok'], C.COLOR_OK), - colorize(u'changed', t['changed'], C.COLOR_CHANGED), - colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), - colorize(u'failed', t['failures'], C.COLOR_ERROR), - colorize(u'rescued', t['rescued'], C.COLOR_OK), - colorize(u'ignored', t['ignored'], C.COLOR_WARN)), + self._display.display( + f" {hostcolor(h, t)} : {colorize('ok', t['ok'], C.COLOR_OK)} {colorize('changed', t['changed'], C.COLOR_CHANGED)} " + f"{colorize('unreachable', t['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', t['failures'], C.COLOR_ERROR)} " + f"{colorize('rescued', t['rescued'], C.COLOR_OK)} {colorize('ignored', t['ignored'], C.COLOR_WARN)}", screen_only=True ) - self._display.display(u" %s : %s %s %s %s %s %s" % ( - hostcolor(h, t, False), - colorize(u'ok', t['ok'], None), - colorize(u'changed', t['changed'], None), - colorize(u'unreachable', t['unreachable'], None), - colorize(u'failed', t['failures'], None), - colorize(u'rescued', t['rescued'], None), - colorize(u'ignored', t['ignored'], None)), + self._display.display( + f" {hostcolor(h, t, False)} : {colorize('ok', t['ok'], None)} {colorize('changed', t['changed'], None)} " + f"{colorize('unreachable', t['unreachable'], None)} {colorize('failed', t['failures'], None)} {colorize('rescued', t['rescued'], None)} " + f"{colorize('ignored', t['ignored'], None)}", log_only=True ) - if stats.custom and self.show_custom_stats: + if stats.custom and self.get_option('show_custom_stats'): self._display.banner("CUSTOM STATS: ") # per host # TODO: come up with 'pretty format' for k in sorted(stats.custom.keys()): if k == '_run': continue - self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', ''))) + stat_val = self._dump_results(stats.custom[k], indent=1).replace('\n', '') + self._display.display(f'\t{k}: {stat_val}') # print per run custom stats if '_run' in stats.custom: self._display.display("", screen_only=True) - self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', '')) + stat_val_run = self._dump_results(stats.custom['_run'], indent=1).replace('\n', '') + self._display.display(f'\tRUN: {stat_val_run}') self._display.display("", screen_only=True) def v2_playbook_on_no_hosts_matched(self): @@ -226,22 +230,24 @@ class CallbackModule(CallbackModule_default): self._display.display(" Ran out of hosts!", color=C.COLOR_ERROR) def v2_playbook_on_start(self, playbook): - # TODO display whether this run is happening in check mode - self._display.display("Executing playbook %s" % basename(playbook._file_name)) + if context.CLIARGS['check'] and self.get_option('check_mode_markers'): + self._display.display(f"Executing playbook {basename(playbook._file_name)} in check mode") + else: + self._display.display(f"Executing playbook {basename(playbook._file_name)}") # show CLI arguments if self._display.verbosity > 3: if context.CLIARGS.get('args'): - self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']), + self._display.display(f"Positional arguments: {' '.join(context.CLIARGS['args'])}", color=C.COLOR_VERBOSE, screen_only=True) for argument in (a for a in context.CLIARGS if a != 'args'): val = context.CLIARGS[argument] if val: - self._display.vvvv('%s: %s' % (argument, val)) + self._display.vvvv(f'{argument}: {val}') def v2_runner_retry(self, result): - msg = " Retrying... (%d of %d)" % (result._result['attempts'], result._result['retries']) + msg = f" Retrying... ({result._result['attempts']} of {result._result['retries']})" if self._run_is_verbose(result): - msg += "Result was: %s" % self._dump_results(result._result) + msg += f"Result was: {self._dump_results(result._result)}" self._display.display(msg, color=C.COLOR_DEBUG) diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py deleted file mode 100644 index 59fb350934..0000000000 --- a/plugins/callback/yaml.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: yaml - type: stdout - short_description: yaml-ized Ansible screen output - description: - - Ansible output that can be quite a bit easier to read than the - default JSON formatting. - extends_documentation_fragment: - - default_callback - requirements: - - set as stdout in configuration -''' - -import yaml -import json -import re -import string -import sys - -from ansible.module_utils.common.text.converters import to_bytes, to_text -from ansible.module_utils.six import string_types -from ansible.parsing.yaml.dumper import AnsibleDumper -from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy -from ansible.plugins.callback.default import CallbackModule as Default - - -# from http://stackoverflow.com/a/15423007/115478 -def should_use_block(value): - """Returns true if string should be in block format""" - for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029": - if c in value: - return True - return False - - -class MyDumper(AnsibleDumper): - def represent_scalar(self, tag, value, style=None): - """Uses block style for multi-line strings""" - if style is None: - if should_use_block(value): - style = '|' - # we care more about readable than accuracy, so... - # ...no trailing space - value = value.rstrip() - # ...and non-printable characters - value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0) - # ...tabs prevent blocks from expanding - value = value.expandtabs() - # ...and odd bits of whitespace - value = re.sub(r'[\x0b\x0c\r]', '', value) - # ...as does trailing space - value = re.sub(r' +\n', '\n', value) - else: - style = self.default_style - node = yaml.representer.ScalarNode(tag, value, style=style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - return node - - -class CallbackModule(Default): - - """ - Variation of the Default output which uses nicely readable YAML instead - of JSON for printing results. - """ - - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'community.general.yaml' - - def __init__(self): - super(CallbackModule, self).__init__() - - def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): - if result.get('_ansible_no_log', False): - return json.dumps(dict(censored="The output has been hidden due to the fact that 'no_log: true' was specified for this result")) - - # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything. - abridged_result = strip_internal_keys(module_response_deepcopy(result)) - - # remove invocation unless specifically wanting it - if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: - del abridged_result['invocation'] - - # remove diff information from screen output - if self._display.verbosity < 3 and 'diff' in result: - del abridged_result['diff'] - - # remove exception from screen output - if 'exception' in abridged_result: - del abridged_result['exception'] - - dumped = '' - - # put changed and skipped into a header line - if 'changed' in abridged_result: - dumped += 'changed=' + str(abridged_result['changed']).lower() + ' ' - del abridged_result['changed'] - - if 'skipped' in abridged_result: - dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' ' - del abridged_result['skipped'] - - # if we already have stdout, we don't need stdout_lines - if 'stdout' in abridged_result and 'stdout_lines' in abridged_result: - abridged_result['stdout_lines'] = '' - - # if we already have stderr, we don't need stderr_lines - if 'stderr' in abridged_result and 'stderr_lines' in abridged_result: - abridged_result['stderr_lines'] = '' - - if abridged_result: - dumped += '\n' - dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=MyDumper, default_flow_style=False)) - - # indent by a couple of spaces - dumped = '\n '.join(dumped.split('\n')).rstrip() - return dumped - - def _serialize_diff(self, diff): - return to_text(yaml.dump(diff, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False)) diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index 295bd4046b..35f7312326 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -1,61 +1,85 @@ -# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # # (c) 2013, Maykel Moya # (c) 2015, Toshio Kuratomi # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Maykel Moya (!UNKNOWN) - name: chroot - short_description: Interact with local chroot +DOCUMENTATION = r""" +author: Maykel Moya (!UNKNOWN) +name: chroot +short_description: Interact with local chroot +description: + - Run commands or put/fetch files to an existing chroot on the Ansible controller. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing chroot on the Ansible controller. - options: - remote_addr: - description: - - The path of the chroot you want to access. - default: inventory_hostname - vars: - - name: ansible_host - executable: - description: - - User specified executable shell - ini: - - section: defaults - key: executable - env: - - name: ANSIBLE_EXECUTABLE - vars: - - name: ansible_executable - default: /bin/sh - chroot_exe: - description: - - User specified chroot binary - ini: - - section: chroot_connection - key: exe - env: - - name: ANSIBLE_CHROOT_EXE - vars: - - name: ansible_chroot_exe - default: chroot -''' + - The path of the chroot you want to access. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + executable: + description: + - User specified executable shell. + type: string + ini: + - section: defaults + key: executable + env: + - name: ANSIBLE_EXECUTABLE + vars: + - name: ansible_executable + default: /bin/sh + chroot_exe: + description: + - User specified chroot binary. + type: string + ini: + - section: chroot_connection + key: exe + env: + - name: ANSIBLE_CHROOT_EXE + vars: + - name: ansible_chroot_exe + default: chroot + disable_root_check: + description: + - Do not check that the user is not root. + ini: + - section: chroot_connection + key: disable_root_check + env: + - name: ANSIBLE_CHROOT_DISABLE_ROOT_CHECK + vars: + - name: ansible_chroot_disable_root_check + default: false + type: bool + version_added: 7.3.0 +""" + +EXAMPLES = r""" +- hosts: chroots + connection: community.general.chroot + tasks: + - debug: + msg: "This is coming from chroot environment" +""" import os import os.path import subprocess import traceback +from shlex import quote as shlex_quote from ansible.errors import AnsibleError from ansible.module_utils.basic import is_executable from ansible.module_utils.common.process import get_bin_path -from ansible.module_utils.six.moves import shlex_quote -from ansible.module_utils.common.text.converters import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.display import Display @@ -79,31 +103,32 @@ class Connection(ConnectionBase): self.chroot = self._play_context.remote_addr - if os.geteuid() != 0: - raise AnsibleError("chroot connection requires running as root") - - # we're running as root on the local system so do some - # trivial checks for ensuring 'host' is actually a chroot'able dir + # do some trivial checks for ensuring 'host' is actually a chroot'able dir if not os.path.isdir(self.chroot): - raise AnsibleError("%s is not a directory" % self.chroot) + raise AnsibleError(f"{self.chroot} is not a directory") chrootsh = os.path.join(self.chroot, 'bin/sh') # Want to check for a usable bourne shell inside the chroot. # is_executable() == True is sufficient. For symlinks it # gets really complicated really fast. So we punt on finding that - # out. As long as it's a symlink we assume that it will work + # out. As long as it is a symlink we assume that it will work if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))): - raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot) + raise AnsibleError(f"{self.chroot} does not look like a chrootable dir (/bin/sh missing)") def _connect(self): """ connect to the chroot """ + if not self.get_option('disable_root_check') and os.geteuid() != 0: + raise AnsibleError( + "chroot connection requires running as root. " + "You can override this check with the `disable_root_check` option.") + if os.path.isabs(self.get_option('chroot_exe')): self.chroot_cmd = self.get_option('chroot_exe') else: try: self.chroot_cmd = get_bin_path(self.get_option('chroot_exe')) except ValueError as e: - raise AnsibleError(to_native(e)) + raise AnsibleError(str(e)) super(Connection, self)._connect() if not self._connected: @@ -121,7 +146,7 @@ class Connection(ConnectionBase): executable = self.get_option('executable') local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] - display.vvv("EXEC %s" % local_cmd, host=self.chroot) + display.vvv(f"EXEC {local_cmd}", host=self.chroot) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -146,7 +171,7 @@ class Connection(ConnectionBase): exist in any given chroot. So for now we're choosing "/" instead. This also happens to be the former default. - Can revisit using $HOME instead if it's a problem + Can revisit using $HOME instead if it is a problem """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) @@ -155,7 +180,7 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): """ transfer a file from local to chroot """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.chroot) out_path = shlex_quote(self._prefix_login_path(out_path)) try: @@ -165,27 +190,27 @@ class Connection(ConnectionBase): else: count = '' try: - p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") try: stdout, stderr = p.communicate() except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") except IOError: - raise AnsibleError("file or module does not exist at: %s" % in_path) + raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): """ fetch a file from chroot to local """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.chroot) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") @@ -197,10 +222,10 @@ class Connection(ConnectionBase): chunk = p.stdout.read(BUFSIZE) except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") def close(self): """ terminate the connection; nothing to do here """ diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index 94d1a3bd9c..86d050c1db 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -1,30 +1,30 @@ -# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # Based on chroot.py (c) 2013, Maykel Moya # Copyright (c) 2013, Michael Scherer # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Michael Scherer (@mscherer) - name: funcd - short_description: Use funcd to connect to target +DOCUMENTATION = r""" +author: Michael Scherer (@mscherer) +name: funcd +short_description: Use funcd to connect to target +description: + - This transport permits you to use Ansible over Func. + - For people who have already setup func and that wish to play with ansible, this permit to move gradually to ansible without + having to redo completely the setup of the network. +options: + remote_addr: description: - - This transport permits you to use Ansible over Func. - - For people who have already setup func and that wish to play with ansible, - this permit to move gradually to ansible without having to redo completely the setup of the network. - options: - remote_addr: - description: - - The path of the chroot you want to access. - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_func_host -''' + - The path of the chroot you want to access. + type: string + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_func_host +""" HAVE_FUNC = False try: @@ -63,14 +63,14 @@ class Connection(ConnectionBase): self.client = fc.Client(self.host) return self - def exec_command(self, cmd, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): + def exec_command(self, cmd, in_data=None, sudoable=True): """ run a command on the remote minion """ if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") - # totally ignores privlege escalation - display.vvv("EXEC %s" % cmd, host=self.host) + # totally ignores privilege escalation + display.vvv(f"EXEC {cmd}", host=self.host) p = self.client.command.run(cmd)[self.host] return p[0], p[1], p[2] @@ -85,14 +85,14 @@ class Connection(ConnectionBase): """ transfer a file from local to remote """ out_path = self._normalize_path(out_path, '/') - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.host) self.client.local.copyfile.send(in_path, out_path) def fetch_file(self, in_path, out_path): """ fetch a file from remote to local """ in_path = self._normalize_path(in_path, '/') - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.host) # need to use a tmp dir due to difference of semantic for getfile # ( who take a # directory as destination) and fetch_file, who # take a file directly diff --git a/plugins/connection/incus.py b/plugins/connection/incus.py new file mode 100644 index 0000000000..3dfd37764b --- /dev/null +++ b/plugins/connection/incus.py @@ -0,0 +1,274 @@ +# Based on lxd.py (c) 2016, Matt Clay +# (c) 2023, Stephane Graber +# Copyright (c) 2023 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: Stéphane Graber (@stgraber) +name: incus +short_description: Run tasks in Incus instances using the Incus CLI +description: + - Run commands or put/fetch files to an existing Incus instance using Incus CLI. +version_added: "8.2.0" +options: + remote_addr: + description: + - The instance identifier. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_incus_host + executable: + description: + - The shell to use for execution inside the instance. + type: string + default: /bin/sh + vars: + - name: ansible_executable + - name: ansible_incus_executable + incus_become_method: + description: + - Become command used to switch to a non-root user. + - Is only used when O(remote_user) is not V(root). + type: str + default: /bin/su + vars: + - name: incus_become_method + version_added: 10.4.0 + remote: + description: + - The name of the Incus remote to use (per C(incus remote list)). + - Remotes are used to access multiple servers from a single client. + type: string + default: local + vars: + - name: ansible_incus_remote + remote_user: + description: + - User to login/authenticate as. + - Can be set from the CLI with the C(--user) or C(-u) options. + type: string + default: root + vars: + - name: ansible_user + env: + - name: ANSIBLE_REMOTE_USER + ini: + - section: defaults + key: remote_user + keyword: + - name: remote_user + version_added: 10.4.0 + project: + description: + - The name of the Incus project to use (per C(incus project list)). + - Projects are used to divide the instances running on a server. + type: string + default: default + vars: + - name: ansible_incus_project +""" + +import os +from subprocess import call, Popen, PIPE + +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound +from ansible.module_utils.common.process import get_bin_path +from ansible.module_utils.common.text.converters import to_bytes, to_text +from ansible.plugins.connection import ConnectionBase + + +class Connection(ConnectionBase): + """ Incus based connections """ + + transport = "incus" + has_pipelining = True + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + self._incus_cmd = get_bin_path("incus") + + if not self._incus_cmd: + raise AnsibleError("incus command not found in PATH") + + def _connect(self): + """connect to Incus (nothing to do here) """ + super(Connection, self)._connect() + + if not self._connected: + self._display.vvv(f"ESTABLISH Incus CONNECTION FOR USER: {self.get_option('remote_user')}", + host=self._instance()) + self._connected = True + + def _build_command(self, cmd) -> str: + """build the command to execute on the incus host""" + + exec_cmd = [ + self._incus_cmd, + "--project", self.get_option("project"), + "exec", + f"{self.get_option('remote')}:{self._instance()}", + "--"] + + if self.get_option("remote_user") != "root": + self._display.vvv( + f"INFO: Running as non-root user: {self.get_option('remote_user')}, \ + trying to run 'incus exec' with become method: {self.get_option('incus_become_method')}", + host=self._instance(), + ) + exec_cmd.extend( + [self.get_option("incus_become_method"), self.get_option("remote_user"), "-c"] + ) + + exec_cmd.extend([self.get_option("executable"), "-c", cmd]) + + return exec_cmd + + def _instance(self): + # Return only the leading part of the FQDN as the instance name + # as Incus instance names cannot be a FQDN. + return self.get_option('remote_addr').split(".")[0] + + def exec_command(self, cmd, in_data=None, sudoable=True): + """ execute a command on the Incus host """ + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + self._display.vvv(f"EXEC {cmd}", + host=self._instance()) + + local_cmd = self._build_command(cmd) + self._display.vvvvv(f"EXEC {local_cmd}", host=self._instance()) + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru') + + process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) + stdout, stderr = process.communicate(in_data) + + stdout = to_text(stdout) + stderr = to_text(stderr) + + if stderr.startswith("Error: ") and stderr.rstrip().endswith( + ": Instance is not running" + ): + raise AnsibleConnectionFailure( + f"instance not running: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) + + if stderr.startswith("Error: ") and stderr.rstrip().endswith( + ": Instance not found" + ): + raise AnsibleConnectionFailure( + f"instance not found: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) + + if ( + stderr.startswith("Error: ") + and ": User does not have permission " in stderr + ): + raise AnsibleConnectionFailure( + f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) + + if ( + stderr.startswith("Error: ") + and ": User does not have entitlement " in stderr + ): + raise AnsibleConnectionFailure( + f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) + + return process.returncode, stdout, stderr + + def _get_remote_uid_gid(self) -> tuple[int, int]: + """Get the user and group ID of 'remote_user' from the instance.""" + + rc, uid_out, err = self.exec_command("/bin/id -u") + if rc != 0: + raise AnsibleError( + f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}" + ) + uid = uid_out.strip() + + rc, gid_out, err = self.exec_command("/bin/id -g") + if rc != 0: + raise AnsibleError( + f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}" + ) + gid = gid_out.strip() + + return int(uid), int(gid) + + def put_file(self, in_path, out_path): + """ put a file from local to Incus """ + super(Connection, self).put_file(in_path, out_path) + + self._display.vvv(f"PUT {in_path} TO {out_path}", + host=self._instance()) + + if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound(f"input path is not a file: {in_path}") + + if self.get_option("remote_user") != "root": + uid, gid = self._get_remote_uid_gid() + local_cmd = [ + self._incus_cmd, + "--project", + self.get_option("project"), + "file", + "push", + "--uid", + str(uid), + "--gid", + str(gid), + "--quiet", + in_path, + f"{self.get_option('remote')}:{self._instance()}/{out_path}", + ] + else: + local_cmd = [ + self._incus_cmd, + "--project", + self.get_option("project"), + "file", + "push", + "--quiet", + in_path, + f"{self.get_option('remote')}:{self._instance()}/{out_path}", + ] + + self._display.vvvvv(f"PUT {local_cmd}", host=self._instance()) + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + + call(local_cmd) + + def fetch_file(self, in_path, out_path): + """ fetch a file from Incus to local """ + super(Connection, self).fetch_file(in_path, out_path) + + self._display.vvv(f"FETCH {in_path} TO {out_path}", + host=self._instance()) + + local_cmd = [ + self._incus_cmd, + "--project", self.get_option("project"), + "file", "pull", "--quiet", + f"{self.get_option('remote')}:{self._instance()}/{in_path}", + out_path] + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + + call(local_cmd) + + def close(self): + """ close the connection (nothing to do here) """ + super(Connection, self).close() + + self._connected = False diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index 2fd74313bc..fa4973bae1 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -1,34 +1,35 @@ -# -*- coding: utf-8 -*- # Based on jail.py # (c) 2013, Michael Scherer # (c) 2015, Toshio Kuratomi # (c) 2016, Stephan Lohse # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Stephan Lohse (!UNKNOWN) - name: iocage - short_description: Run tasks in iocage jails +DOCUMENTATION = r""" +author: Stephan Lohse (!UNKNOWN) +name: iocage +short_description: Run tasks in iocage jails +description: + - Run commands or put/fetch files to an existing iocage jail. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing iocage jail - options: - remote_addr: - description: - - Path to the jail - vars: - - name: ansible_host - - name: ansible_iocage_host - remote_user: - description: - - User to execute as inside the jail - vars: - - name: ansible_user - - name: ansible_iocage_user -''' + - Path to the jail. + type: string + vars: + - name: ansible_host + - name: ansible_iocage_host + remote_user: + description: + - User to execute as inside the jail. + type: string + vars: + - name: ansible_user + - name: ansible_iocage_user +""" import subprocess @@ -52,11 +53,12 @@ class Connection(Jail): jail_uuid = self.get_jail_uuid() - kwargs[Jail.modified_jailname_key] = 'ioc-{0}'.format(jail_uuid) + kwargs[Jail.modified_jailname_key] = f'ioc-{jail_uuid}' - display.vvv(u"Jail {iocjail} has been translated to {rawjail}".format( - iocjail=self.ioc_jail, rawjail=kwargs[Jail.modified_jailname_key]), - host=kwargs[Jail.modified_jailname_key]) + display.vvv( + f"Jail {self.ioc_jail} has been translated to {kwargs[Jail.modified_jailname_key]}", + host=kwargs[Jail.modified_jailname_key] + ) super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) @@ -78,6 +80,6 @@ class Connection(Jail): p.wait() if p.returncode != 0: - raise AnsibleError(u"iocage returned an error: {0}".format(stdout)) + raise AnsibleError(f"iocage returned an error: {stdout}") return stdout.strip('\n') diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index 02f5aeeddd..7f25c3fe01 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -1,44 +1,46 @@ -# -*- coding: utf-8 -*- # Based on local.py by Michael DeHaan # and chroot.py by Maykel Moya # Copyright (c) 2013, Michael Scherer # Copyright (c) 2015, Toshio Kuratomi # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Ansible Core Team - name: jail - short_description: Run tasks in jails +DOCUMENTATION = r""" +author: Ansible Core Team +name: jail +short_description: Run tasks in jails +description: + - Run commands or put/fetch files to an existing jail. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing jail - options: - remote_addr: - description: - - Path to the jail - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_jail_host - remote_user: - description: - - User to execute as inside the jail - vars: - - name: ansible_user - - name: ansible_jail_user -''' + - Path to the jail. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_jail_host + remote_user: + description: + - User to execute as inside the jail. + type: string + vars: + - name: ansible_user + - name: ansible_jail_user +""" -import distutils.spawn import os import os.path import subprocess import traceback +from shlex import quote as shlex_quote from ansible.errors import AnsibleError -from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.display import Display @@ -71,14 +73,14 @@ class Connection(ConnectionBase): self.jexec_cmd = self._search_executable('jexec') if self.jail not in self.list_jails(): - raise AnsibleError("incorrect jail name %s" % self.jail) + raise AnsibleError(f"incorrect jail name {self.jail}") @staticmethod def _search_executable(executable): - cmd = distutils.spawn.find_executable(executable) - if not cmd: - raise AnsibleError("%s command not found in PATH" % executable) - return cmd + try: + return get_bin_path(executable) + except ValueError: + raise AnsibleError(f"{executable} command not found in PATH") def list_jails(self): p = subprocess.Popen([self.jls_cmd, '-q', 'name'], @@ -93,7 +95,7 @@ class Connection(ConnectionBase): """ connect to the jail; nothing to do here """ super(Connection, self)._connect() if not self._connected: - display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail) + display.vvv(f"ESTABLISH JAIL CONNECTION FOR USER: {self._play_context.remote_user}", host=self.jail) self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): @@ -111,11 +113,11 @@ class Connection(ConnectionBase): if self._play_context.remote_user is not None: local_cmd += ['-U', self._play_context.remote_user] # update HOME since -U does not update the jail environment - set_env = 'HOME=~' + self._play_context.remote_user + ' ' + set_env = f"HOME=~{self._play_context.remote_user} " local_cmd += [self.jail, self._play_context.executable, '-c', set_env + cmd] - display.vvv("EXEC %s" % (local_cmd,), host=self.jail) + display.vvv(f"EXEC {local_cmd}", host=self.jail) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -140,7 +142,7 @@ class Connection(ConnectionBase): exist in any given chroot. So for now we're choosing "/" instead. This also happens to be the former default. - Can revisit using $HOME instead if it's a problem + Can revisit using $HOME instead if it is a problem """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) @@ -149,7 +151,7 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): """ transfer a file from local to jail """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.jail) out_path = shlex_quote(self._prefix_login_path(out_path)) try: @@ -159,27 +161,27 @@ class Connection(ConnectionBase): else: count = '' try: - p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) except OSError: raise AnsibleError("jail connection requires dd command in the jail") try: stdout, stderr = p.communicate() except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr))) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}") except IOError: - raise AnsibleError("file or module does not exist at: %s" % in_path) + raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): """ fetch a file from jail to local """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.jail) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') except OSError: raise AnsibleError("jail connection requires dd command in the jail") @@ -191,10 +193,10 @@ class Connection(ConnectionBase): chunk = p.stdout.read(BUFSIZE) except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr))) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}") def close(self): """ terminate the connection; nothing to do here """ diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index 2aaf1619dc..e8e28ed804 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -1,33 +1,35 @@ -# -*- coding: utf-8 -*- # (c) 2015, Joerg Thalheim # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Joerg Thalheim (!UNKNOWN) - name: lxc - short_description: Run tasks in lxc containers via lxc python library +DOCUMENTATION = r""" +author: Joerg Thalheim (!UNKNOWN) +name: lxc +short_description: Run tasks in LXC containers using lxc python library +description: + - Run commands or put/fetch files to an existing LXC container using lxc python library. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing lxc container using lxc python library - options: - remote_addr: - description: - - Container identifier - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_lxc_host - executable: - default: /bin/sh - description: - - Shell executable - vars: - - name: ansible_executable - - name: ansible_lxc_executable -''' + - Container identifier. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_lxc_host + executable: + default: /bin/sh + description: + - Shell executable. + type: string + vars: + - name: ansible_executable + - name: ansible_lxc_executable +""" import os import shutil @@ -58,7 +60,7 @@ class Connection(ConnectionBase): def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) - self.container_name = self._play_context.remote_addr + self.container_name = None self.container = None def _connect(self): @@ -66,16 +68,19 @@ class Connection(ConnectionBase): super(Connection, self)._connect() if not HAS_LIBLXC: - msg = "lxc bindings for python2 are not installed" + msg = "lxc python bindings are not installed" raise errors.AnsibleError(msg) - if self.container: + container_name = self.get_option('remote_addr') + if self.container and self.container_name == container_name: return + self.container_name = container_name + self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name) self.container = _lxc.Container(self.container_name) if self.container.state == "STOPPED": - raise errors.AnsibleError("%s is not running" % self.container_name) + raise errors.AnsibleError(f"{self.container_name} is not running") @staticmethod def _communicate(pid, in_data, stdin, stdout, stderr): @@ -116,7 +121,7 @@ class Connection(ConnectionBase): super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) # python2-lxc needs bytes. python3-lxc needs text. - executable = to_native(self._play_context.executable, errors='surrogate_or_strict') + executable = to_native(self.get_option('executable'), errors='surrogate_or_strict') local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')] read_stdout, write_stdout = None, None @@ -137,10 +142,10 @@ class Connection(ConnectionBase): read_stdin, write_stdin = os.pipe() kwargs['stdin'] = self._set_nonblocking(read_stdin) - self._display.vvv("EXEC %s" % (local_cmd), host=self.container_name) + self._display.vvv(f"EXEC {local_cmd}", host=self.container_name) pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs) if pid == -1: - msg = "failed to attach to container %s" % self.container_name + msg = f"failed to attach to container {self.container_name}" raise errors.AnsibleError(msg) write_stdout = os.close(write_stdout) @@ -167,18 +172,18 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): ''' transfer a file from local to lxc ''' super(Connection, self).put_file(in_path, out_path) - self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.container_name) + self._display.vvv(f"PUT {in_path} TO {out_path}", host=self.container_name) in_path = to_bytes(in_path, errors='surrogate_or_strict') out_path = to_bytes(out_path, errors='surrogate_or_strict') if not os.path.exists(in_path): - msg = "file or module does not exist: %s" % in_path + msg = f"file or module does not exist: {in_path}" raise errors.AnsibleFileNotFound(msg) try: src_file = open(in_path, "rb") except IOError: traceback.print_exc() - raise errors.AnsibleError("failed to open input file to %s" % in_path) + raise errors.AnsibleError(f"failed to open input file to {in_path}") try: def write_file(args): with open(out_path, 'wb+') as dst_file: @@ -187,7 +192,7 @@ class Connection(ConnectionBase): self.container.attach_wait(write_file, None) except IOError: traceback.print_exc() - msg = "failed to transfer file to %s" % out_path + msg = f"failed to transfer file to {out_path}" raise errors.AnsibleError(msg) finally: src_file.close() @@ -195,7 +200,7 @@ class Connection(ConnectionBase): def fetch_file(self, in_path, out_path): ''' fetch a file from lxc to local ''' super(Connection, self).fetch_file(in_path, out_path) - self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.container_name) + self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self.container_name) in_path = to_bytes(in_path, errors='surrogate_or_strict') out_path = to_bytes(out_path, errors='surrogate_or_strict') @@ -203,7 +208,7 @@ class Connection(ConnectionBase): dst_file = open(out_path, "wb") except IOError: traceback.print_exc() - msg = "failed to open output file %s" % out_path + msg = f"failed to open output file {out_path}" raise errors.AnsibleError(msg) try: def write_file(args): @@ -218,7 +223,7 @@ class Connection(ConnectionBase): self.container.attach_wait(write_file, None) except IOError: traceback.print_exc() - msg = "failed to transfer file from %s to %s" % (in_path, out_path) + msg = f"failed to transfer file from {in_path} to {out_path}" raise errors.AnsibleError(msg) finally: dst_file.close() diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py index 63eaf6ca51..d4d3b45d0a 100644 --- a/plugins/connection/lxd.py +++ b/plugins/connection/lxd.py @@ -1,52 +1,83 @@ -# -*- coding: utf-8 -*- -# (c) 2016 Matt Clay -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016 Matt Clay +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Matt Clay (@mattclay) - name: lxd - short_description: Run tasks in lxc containers via lxc CLI +DOCUMENTATION = r""" +author: Matt Clay (@mattclay) +name: lxd +short_description: Run tasks in LXD instances using C(lxc) CLI +description: + - Run commands or put/fetch files to an existing instance using C(lxc) CLI. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing lxc container using lxc CLI - options: - remote_addr: - description: - - Container identifier. - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_lxd_host - executable: - description: - - shell to use for execution inside container - default: /bin/sh - vars: - - name: ansible_executable - - name: ansible_lxd_executable - remote: - description: - - Name of the LXD remote to use. - default: local - vars: - - name: ansible_lxd_remote - version_added: 2.0.0 - project: - description: - - Name of the LXD project to use. - vars: - - name: ansible_lxd_project - version_added: 2.0.0 -''' + - Instance (container/VM) identifier. + - Since community.general 8.0.0, a FQDN can be provided; in that case, the first component (the part before C(.)) is + used as the instance identifier. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_lxd_host + executable: + description: + - Shell to use for execution inside instance. + type: string + default: /bin/sh + vars: + - name: ansible_executable + - name: ansible_lxd_executable + lxd_become_method: + description: + - Become command used to switch to a non-root user. + - Is only used when O(remote_user) is not V(root). + type: str + default: /bin/su + vars: + - name: lxd_become_method + version_added: 10.4.0 + remote: + description: + - Name of the LXD remote to use. + type: string + default: local + vars: + - name: ansible_lxd_remote + version_added: 2.0.0 + remote_user: + description: + - User to login/authenticate as. + - Can be set from the CLI with the C(--user) or C(-u) options. + type: string + default: root + vars: + - name: ansible_user + env: + - name: ANSIBLE_REMOTE_USER + ini: + - section: defaults + key: remote_user + keyword: + - name: remote_user + version_added: 10.4.0 + project: + description: + - Name of the LXD project to use. + type: string + vars: + - name: ansible_lxd_project + version_added: 2.0.0 +""" import os -from distutils.spawn import find_executable from subprocess import Popen, PIPE from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound +from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.plugins.connection import ConnectionBase @@ -56,43 +87,59 @@ class Connection(ConnectionBase): transport = 'community.general.lxd' has_pipelining = True - default_user = 'root' def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) - self._host = self._play_context.remote_addr - self._lxc_cmd = find_executable("lxc") - - if not self._lxc_cmd: + try: + self._lxc_cmd = get_bin_path("lxc") + except ValueError: raise AnsibleError("lxc command not found in PATH") - if self._play_context.remote_user is not None and self._play_context.remote_user != 'root': - self._display.warning('lxd does not support remote_user, using container default: root') + def _host(self): + """ translate remote_addr to lxd (short) hostname """ + return self.get_option("remote_addr").split(".", 1)[0] def _connect(self): """connect to lxd (nothing to do here) """ super(Connection, self)._connect() if not self._connected: - self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self._host) + self._display.vvv(f"ESTABLISH LXD CONNECTION FOR USER: {self.get_option('remote_user')}", host=self._host()) self._connected = True + def _build_command(self, cmd) -> str: + """build the command to execute on the lxd host""" + + exec_cmd = [self._lxc_cmd] + + if self.get_option("project"): + exec_cmd.extend(["--project", self.get_option("project")]) + + exec_cmd.extend(["exec", f"{self.get_option('remote')}:{self._host()}", "--"]) + + if self.get_option("remote_user") != "root": + self._display.vvv( + f"INFO: Running as non-root user: {self.get_option('remote_user')}, \ + trying to run 'lxc exec' with become method: {self.get_option('lxd_become_method')}", + host=self._host(), + ) + exec_cmd.extend( + [self.get_option("lxd_become_method"), self.get_option("remote_user"), "-c"] + ) + + exec_cmd.extend([self.get_option("executable"), "-c", cmd]) + + return exec_cmd + def exec_command(self, cmd, in_data=None, sudoable=True): """ execute a command on the lxd host """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - self._display.vvv(u"EXEC {0}".format(cmd), host=self._host) + self._display.vvv(f"EXEC {cmd}", host=self._host()) - local_cmd = [self._lxc_cmd] - if self.get_option("project"): - local_cmd.extend(["--project", self.get_option("project")]) - local_cmd.extend([ - "exec", - "%s:%s" % (self.get_option("remote"), self._host), - "--", - self._play_context.executable, "-c", cmd - ]) + local_cmd = self._build_command(cmd) + self._display.vvvvv(f"EXEC {local_cmd}", host=self._host()) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru') @@ -103,31 +150,73 @@ class Connection(ConnectionBase): stdout = to_text(stdout) stderr = to_text(stderr) - if stderr == "error: Container is not running.\n": - raise AnsibleConnectionFailure("container not running: %s" % self._host) + self._display.vvvvv(f"EXEC lxc output: {stdout} {stderr}", host=self._host()) - if stderr == "error: not found\n": - raise AnsibleConnectionFailure("container not found: %s" % self._host) + if "is not running" in stderr: + raise AnsibleConnectionFailure(f"instance not running: {self._host()}") + + if stderr.strip() == "Error: Instance not found" or stderr.strip() == "error: not found": + raise AnsibleConnectionFailure(f"instance not found: {self._host()}") return process.returncode, stdout, stderr + def _get_remote_uid_gid(self) -> tuple[int, int]: + """Get the user and group ID of 'remote_user' from the instance.""" + + rc, uid_out, err = self.exec_command("/bin/id -u") + if rc != 0: + raise AnsibleError( + f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}" + ) + uid = uid_out.strip() + + rc, gid_out, err = self.exec_command("/bin/id -g") + if rc != 0: + raise AnsibleError( + f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}" + ) + gid = gid_out.strip() + + return int(uid), int(gid) + def put_file(self, in_path, out_path): """ put a file from local to lxd """ super(Connection, self).put_file(in_path, out_path) - self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host) + self._display.vvv(f"PUT {in_path} TO {out_path}", host=self._host()) if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')): - raise AnsibleFileNotFound("input path is not a file: %s" % in_path) + raise AnsibleFileNotFound(f"input path is not a file: {in_path}") local_cmd = [self._lxc_cmd] if self.get_option("project"): local_cmd.extend(["--project", self.get_option("project")]) - local_cmd.extend([ - "file", "push", - in_path, - "%s:%s/%s" % (self.get_option("remote"), self._host, out_path) - ]) + + if self.get_option("remote_user") != "root": + uid, gid = self._get_remote_uid_gid() + local_cmd.extend( + [ + "file", + "push", + "--uid", + str(uid), + "--gid", + str(gid), + in_path, + f"{self.get_option('remote')}:{self._host()}/{out_path}", + ] + ) + else: + local_cmd.extend( + [ + "file", + "push", + in_path, + f"{self.get_option('remote')}:{self._host()}/{out_path}", + ] + ) + + self._display.vvvvv(f"PUT {local_cmd}", host=self._host()) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] @@ -138,14 +227,14 @@ class Connection(ConnectionBase): """ fetch a file from lxd to local """ super(Connection, self).fetch_file(in_path, out_path) - self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host) + self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self._host()) local_cmd = [self._lxc_cmd] if self.get_option("project"): local_cmd.extend(["--project", self.get_option("project")]) local_cmd.extend([ "file", "pull", - "%s:%s/%s" % (self.get_option("remote"), self._host, in_path), + f"{self.get_option('remote')}:{self._host()}/{in_path}", out_path ]) diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index 1de9e10011..8d69594b22 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -1,42 +1,42 @@ -# -*- coding: utf-8 -*- # Based on the buildah connection plugin # Copyright (c) 2017 Ansible Project # 2018 Kushal Das -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # # # Written by: Kushal Das (https://github.com/kushaldas) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: qubes - short_description: Interact with an existing QubesOS AppVM +DOCUMENTATION = r""" +name: qubes +short_description: Interact with an existing QubesOS AppVM +description: + - Run commands or put/fetch files to an existing Qubes AppVM using qubes tools. +author: Kushal Das (@kushaldas) + + +options: + remote_addr: description: - - Run commands or put/fetch files to an existing Qubes AppVM using qubes tools. - - author: Kushal Das (@kushaldas) - - - options: - remote_addr: - description: - - vm name - default: inventory_hostname - vars: - - name: ansible_host - remote_user: - description: - - The user to execute as inside the vm. - default: The *user* account as default in Qubes OS. - vars: - - name: ansible_user + - VM name. + type: string + default: inventory_hostname + vars: + - name: ansible_host + remote_user: + description: + - The user to execute as inside the VM. + type: string + default: The I(user) account as default in Qubes OS. + vars: + - name: ansible_user # keyword: # - name: hosts -''' +""" import subprocess @@ -75,7 +75,7 @@ class Connection(ConnectionBase): """ display.vvvv("CMD: ", cmd) if not cmd.endswith("\n"): - cmd = cmd + "\n" + cmd = f"{cmd}\n" local_cmd = [] # For dom0 @@ -92,7 +92,7 @@ class Connection(ConnectionBase): display.vvvv("Local cmd: ", local_cmd) - display.vvv("RUN %s" % (local_cmd,), host=self._remote_vmname) + display.vvv(f"RUN {local_cmd}", host=self._remote_vmname) p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -111,42 +111,42 @@ class Connection(ConnectionBase): """Run specified command in a running QubesVM """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - display.vvvv("CMD IS: %s" % cmd) + display.vvvv(f"CMD IS: {cmd}") rc, stdout, stderr = self._qubes(cmd) - display.vvvvv("STDOUT %r STDERR %r" % (stderr, stderr)) + display.vvvvv(f"STDOUT {stdout!r} STDERR {stderr!r}") return rc, stdout, stderr def put_file(self, in_path, out_path): """ Place a local file located in 'in_path' inside VM at 'out_path' """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._remote_vmname) + display.vvv(f"PUT {in_path} TO {out_path}", host=self._remote_vmname) with open(in_path, "rb") as fobj: source_data = fobj.read() - retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data, "qubes.VMRootShell") + retcode, dummy, dummy = self._qubes(f'cat > "{out_path}\"\n', source_data, "qubes.VMRootShell") # if qubes.VMRootShell service not supported, fallback to qubes.VMShell and # hope it will have appropriate permissions if retcode == 127: - retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data) + retcode, dummy, dummy = self._qubes(f'cat > "{out_path}\"\n', source_data) if retcode != 0: - raise AnsibleConnectionFailure('Failed to put_file to {0}'.format(out_path)) + raise AnsibleConnectionFailure(f'Failed to put_file to {out_path}') def fetch_file(self, in_path, out_path): """Obtain file specified via 'in_path' from the container and place it at 'out_path' """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._remote_vmname) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self._remote_vmname) # We are running in dom0 - cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, "cat {0}".format(in_path)] + cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, f"cat {in_path}"] with open(out_path, "wb") as fobj: p = subprocess.Popen(cmd_args_list, shell=False, stdout=fobj) p.communicate() if p.returncode != 0: - raise AnsibleConnectionFailure('Failed to fetch file to {0}'.format(out_path)) + raise AnsibleConnectionFailure(f'Failed to fetch file to {out_path}') def close(self): """ Closing the connection """ diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index 95870ad2d0..b09ffcd787 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -1,21 +1,20 @@ -# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # Based on chroot.py (c) 2013, Maykel Moya # Based on func.py -# (c) 2014, Michael Scherer -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Michael Scherer +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Michael Scherer (@mscherer) - name: saltstack - short_description: Allow ansible to piggyback on salt minions - description: - - This allows you to use existing Saltstack infrastructure to connect to targets. -''' +DOCUMENTATION = r""" +author: Michael Scherer (@mscherer) +name: saltstack +short_description: Allow ansible to piggyback on salt minions +description: + - This allows you to use existing Saltstack infrastructure to connect to targets. +""" import os import base64 @@ -58,11 +57,11 @@ class Connection(ConnectionBase): if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - self._display.vvv("EXEC %s" % cmd, host=self.host) + self._display.vvv(f"EXEC {cmd}", host=self.host) # need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077 - res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd]) + res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', f"true;{cmd}"]) if self.host not in res: - raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host) + raise errors.AnsibleError(f"Minion {self.host} didn't answer, check if salt-minion is running and the name is correct") p = res[self.host] return p['retcode'], p['stdout'], p['stderr'] @@ -80,7 +79,7 @@ class Connection(ConnectionBase): super(Connection, self).put_file(in_path, out_path) out_path = self._normalize_path(out_path, '/') - self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + self._display.vvv(f"PUT {in_path} TO {out_path}", host=self.host) with open(in_path, 'rb') as in_fh: content = in_fh.read() self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path]) @@ -92,7 +91,7 @@ class Connection(ConnectionBase): super(Connection, self).fetch_file(in_path, out_path) in_path = self._normalize_path(in_path, '/') - self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self.host) content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host] open(out_path, 'wb').write(content) diff --git a/plugins/connection/wsl.py b/plugins/connection/wsl.py new file mode 100644 index 0000000000..3b768eebf8 --- /dev/null +++ b/plugins/connection/wsl.py @@ -0,0 +1,790 @@ +# Derived from ansible/plugins/connection/proxmox_pct_remote.py (c) 2024 Nils Stein (@mietzen) +# Derived from ansible/plugins/connection/paramiko_ssh.py (c) 2012, Michael DeHaan +# Copyright (c) 2025 Rui Lopes (@rgl) +# Copyright (c) 2025 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: Rui Lopes (@rgl) +name: wsl +short_description: Run tasks in WSL distribution using wsl.exe CLI using SSH +requirements: + - paramiko +description: + - Run commands or put/fetch files to an existing WSL distribution using wsl.exe CLI using SSH. + - Uses the Python SSH implementation (Paramiko) to connect to the WSL host. +version_added: "10.6.0" +options: + remote_addr: + description: + - Address of the remote target. + default: inventory_hostname + type: string + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_ssh_host + - name: ansible_paramiko_host + port: + description: Remote port to connect to. + type: int + default: 22 + ini: + - section: defaults + key: remote_port + - section: paramiko_connection + key: remote_port + env: + - name: ANSIBLE_REMOTE_PORT + - name: ANSIBLE_REMOTE_PARAMIKO_PORT + vars: + - name: ansible_port + - name: ansible_ssh_port + - name: ansible_paramiko_port + keyword: + - name: port + remote_user: + description: + - User to login/authenticate as. + - Can be set from the CLI with the C(--user) or C(-u) options. + type: string + vars: + - name: ansible_user + - name: ansible_ssh_user + - name: ansible_paramiko_user + env: + - name: ANSIBLE_REMOTE_USER + - name: ANSIBLE_PARAMIKO_REMOTE_USER + ini: + - section: defaults + key: remote_user + - section: paramiko_connection + key: remote_user + keyword: + - name: remote_user + password: + description: + - Secret used to either login the SSH server or as a passphrase for SSH keys that require it. + - Can be set from the CLI with the C(--ask-pass) option. + type: string + vars: + - name: ansible_password + - name: ansible_ssh_pass + - name: ansible_ssh_password + - name: ansible_paramiko_pass + - name: ansible_paramiko_password + use_rsa_sha2_algorithms: + description: + - Whether or not to enable RSA SHA2 algorithms for pubkeys and hostkeys. + - On paramiko versions older than 2.9, this only affects hostkeys. + - For behavior matching paramiko<2.9 set this to V(false). + vars: + - name: ansible_paramiko_use_rsa_sha2_algorithms + ini: + - {key: use_rsa_sha2_algorithms, section: paramiko_connection} + env: + - {name: ANSIBLE_PARAMIKO_USE_RSA_SHA2_ALGORITHMS} + default: true + type: boolean + host_key_auto_add: + description: "Automatically add host keys to C(~/.ssh/known_hosts)." + env: + - name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD + ini: + - key: host_key_auto_add + section: paramiko_connection + type: boolean + look_for_keys: + default: true + description: "Set to V(false) to disable searching for private key files in C(~/.ssh/)." + env: + - name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS + ini: + - {key: look_for_keys, section: paramiko_connection} + type: boolean + proxy_command: + default: "" + description: + - Proxy information for running the connection through a jumphost. + - This option is supported by paramiko version 1.9.0 or newer. + type: string + env: + - name: ANSIBLE_PARAMIKO_PROXY_COMMAND + ini: + - {key: proxy_command, section: paramiko_connection} + vars: + - name: ansible_paramiko_proxy_command + record_host_keys: + default: true + description: "Save the host keys to a file." + env: + - name: ANSIBLE_PARAMIKO_RECORD_HOST_KEYS + ini: + - section: paramiko_connection + key: record_host_keys + type: boolean + host_key_checking: + description: "Set this to V(false) if you want to avoid host key checking by the underlying tools Ansible uses to connect + to the host." + type: boolean + default: true + env: + - name: ANSIBLE_HOST_KEY_CHECKING + - name: ANSIBLE_SSH_HOST_KEY_CHECKING + - name: ANSIBLE_PARAMIKO_HOST_KEY_CHECKING + ini: + - section: defaults + key: host_key_checking + - section: paramiko_connection + key: host_key_checking + vars: + - name: ansible_host_key_checking + - name: ansible_ssh_host_key_checking + - name: ansible_paramiko_host_key_checking + use_persistent_connections: + description: "Toggles the use of persistence for connections." + type: boolean + default: false + env: + - name: ANSIBLE_USE_PERSISTENT_CONNECTIONS + ini: + - section: defaults + key: use_persistent_connections + banner_timeout: + type: float + default: 30 + description: + - Configures, in seconds, the amount of time to wait for the SSH banner to be presented. + - This option is supported by paramiko version 1.15.0 or newer. + ini: + - section: paramiko_connection + key: banner_timeout + env: + - name: ANSIBLE_PARAMIKO_BANNER_TIMEOUT + timeout: + type: int + default: 10 + description: + - Number of seconds until the plugin gives up on failing to establish a TCP connection. + - This option is supported by paramiko version 2.2.0 or newer. + ini: + - section: defaults + key: timeout + - section: ssh_connection + key: timeout + - section: paramiko_connection + key: timeout + env: + - name: ANSIBLE_TIMEOUT + - name: ANSIBLE_SSH_TIMEOUT + - name: ANSIBLE_PARAMIKO_TIMEOUT + vars: + - name: ansible_ssh_timeout + - name: ansible_paramiko_timeout + cli: + - name: timeout + lock_file_timeout: + type: int + default: 60 + description: Number of seconds until the plugin gives up on trying to write a lock file when writing SSH known host keys. + vars: + - name: ansible_lock_file_timeout + env: + - name: ANSIBLE_LOCK_FILE_TIMEOUT + private_key_file: + description: + - Path to private key file to use for authentication. + type: path + ini: + - section: defaults + key: private_key_file + - section: paramiko_connection + key: private_key_file + env: + - name: ANSIBLE_PRIVATE_KEY_FILE + - name: ANSIBLE_PARAMIKO_PRIVATE_KEY_FILE + vars: + - name: ansible_private_key_file + - name: ansible_ssh_private_key_file + - name: ansible_paramiko_private_key_file + cli: + - name: private_key_file + option: "--private-key" + user_known_hosts_file: + description: + - Path to the user known hosts file. + - Used to verify the ssh hosts keys. + type: path + default: ~/.ssh/known_hosts + ini: + - section: paramiko_connection + key: user_known_hosts_file + vars: + - name: ansible_paramiko_user_known_hosts_file + wsl_distribution: + description: + - WSL distribution name. + type: string + required: true + vars: + - name: wsl_distribution + wsl_user: + description: + - WSL distribution user. + type: string + vars: + - name: wsl_user + become_user: + description: + - WSL distribution user. + type: string + default: root + vars: + - name: become_user + - name: ansible_become_user + become: + description: + - Whether to use the user defined by O(become_user). + type: bool + default: false + vars: + - name: become + - name: ansible_become +""" + +EXAMPLES = r""" +# ------------------------ +# Inventory: inventory.yml +# ------------------------ +--- +all: + children: + wsl: + hosts: + example-wsl-ubuntu: + ansible_host: 10.0.0.10 + wsl_distribution: ubuntu + wsl_user: ubuntu + vars: + ansible_connection: community.general.wsl + ansible_user: vagrant +# ---------------------- +# Playbook: playbook.yml +# ---------------------- +--- +- name: WSL Example + hosts: wsl + gather_facts: true + become: true + tasks: + - name: Ping + ansible.builtin.ping: + - name: Id (with become false) + become: false + changed_when: false + args: + executable: /bin/bash + ansible.builtin.shell: | + exec 2>&1 + set -x + echo "$0" + pwd + id + - name: Id (with become true) + changed_when: false + args: + executable: /bin/bash + ansible.builtin.shell: | + exec 2>&1 + set -x + echo "$0" + pwd + id + - name: Reboot + ansible.builtin.reboot: + boot_time_command: systemctl show -p ActiveEnterTimestamp init.scope +""" + +import io +import os +import pathlib +import shlex +import socket +import tempfile +import traceback +import typing as t + +from ansible.errors import ( + AnsibleAuthenticationFailure, + AnsibleConnectionFailure, + AnsibleError, +) +from ansible_collections.community.general.plugins.module_utils._filelock import FileLock, LockTimeout +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text +from ansible.playbook.play_context import PlayContext +from ansible.plugins.connection import ConnectionBase +from ansible.utils.display import Display +from ansible.utils.path import makedirs_safe +from binascii import hexlify +from subprocess import list2cmdline + +try: + import paramiko + PARAMIKO_IMPORT_ERR = None +except ImportError: + paramiko = None + PARAMIKO_IMPORT_ERR = traceback.format_exc() + + +if t.TYPE_CHECKING and PARAMIKO_IMPORT_ERR is None: + from paramiko import MissingHostKeyPolicy + from paramiko.client import SSHClient + from paramiko.pkey import PKey +else: + MissingHostKeyPolicy: type = object + SSHClient: type = object + PKey: type = object + + +display = Display() + + +def authenticity_msg(hostname: str, ktype: str, fingerprint: str) -> str: + msg = f""" + paramiko: The authenticity of host '{hostname}' can't be established. + The {ktype} key fingerprint is {fingerprint}. + Are you sure you want to continue connecting (yes/no)? + """ + return msg + + +class MyAddPolicy(MissingHostKeyPolicy): + """ + Based on AutoAddPolicy in paramiko so we can determine when keys are added + + and also prompt for input. + + Policy for automatically adding the hostname and new host key to the + local L{HostKeys} object, and saving it. This is used by L{SSHClient}. + """ + + def __init__(self, connection: Connection) -> None: + self.connection = connection + self._options = connection._options + + def missing_host_key(self, client: SSHClient, hostname: str, key: PKey) -> None: + + if all((self.connection.get_option('host_key_checking'), not self.connection.get_option('host_key_auto_add'))): + + fingerprint = hexlify(key.get_fingerprint()) + ktype = key.get_name() + + if self.connection.get_option('use_persistent_connections') or self.connection.force_persistence: + # don't print the prompt string since the user cannot respond + # to the question anyway + raise AnsibleError(authenticity_msg(hostname, ktype, fingerprint)[1:92]) + + inp = to_text( + display.prompt_until(authenticity_msg(hostname, ktype, fingerprint), private=False), + errors='surrogate_or_strict' + ) + + if inp.lower() not in ['yes', 'y', '']: + raise AnsibleError('host connection rejected by user') + + key._added_by_ansible_this_time = True + + # existing implementation below: + client._host_keys.add(hostname, key.get_name(), key) + + # host keys are actually saved in close() function below + # in order to control ordering. + + +class Connection(ConnectionBase): + """ SSH based connections (paramiko) to WSL """ + + transport = 'community.general.wsl' + _log_channel: str | None = None + + def __init__(self, play_context: PlayContext, new_stdin: io.TextIOWrapper | None = None, *args: t.Any, **kwargs: t.Any): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + def _set_log_channel(self, name: str) -> None: + """ Mimic paramiko.SSHClient.set_log_channel """ + self._log_channel = name + + def _parse_proxy_command(self, port: int = 22) -> dict[str, t.Any]: + proxy_command = self.get_option('proxy_command') or None + + sock_kwarg = {} + if proxy_command: + replacers: t.Dict[str, str] = { + '%h': self.get_option('remote_addr'), + '%p': str(port), + '%r': self.get_option('remote_user') + } + for find, replace in replacers.items(): + proxy_command = proxy_command.replace(find, replace) + try: + sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)} + display.vvv(f'CONFIGURE PROXY COMMAND FOR CONNECTION: {proxy_command}', host=self.get_option('remote_addr')) + except AttributeError: + display.warning('Paramiko ProxyCommand support unavailable. ' + 'Please upgrade to Paramiko 1.9.0 or newer. ' + 'Not using configured ProxyCommand') + + return sock_kwarg + + def _connect(self) -> Connection: + """ activates the connection object """ + + if PARAMIKO_IMPORT_ERR is not None: + raise AnsibleError(f'paramiko is not installed: {to_native(PARAMIKO_IMPORT_ERR)}') + + port = self.get_option('port') + display.vvv(f'ESTABLISH PARAMIKO SSH CONNECTION FOR USER: {self.get_option("remote_user")} on PORT {to_text(port)} TO {self.get_option("remote_addr")}', + host=self.get_option('remote_addr')) + + ssh = paramiko.SSHClient() + + # Set pubkey and hostkey algorithms to disable, the only manipulation allowed currently + # is keeping or omitting rsa-sha2 algorithms + # default_keys: t.Tuple[str] = () + paramiko_preferred_pubkeys = getattr(paramiko.Transport, '_preferred_pubkeys', ()) + paramiko_preferred_hostkeys = getattr(paramiko.Transport, '_preferred_keys', ()) + use_rsa_sha2_algorithms = self.get_option('use_rsa_sha2_algorithms') + disabled_algorithms: t.Dict[str, t.Iterable[str]] = {} + if not use_rsa_sha2_algorithms: + if paramiko_preferred_pubkeys: + disabled_algorithms['pubkeys'] = tuple(a for a in paramiko_preferred_pubkeys if 'rsa-sha2' in a) + if paramiko_preferred_hostkeys: + disabled_algorithms['keys'] = tuple(a for a in paramiko_preferred_hostkeys if 'rsa-sha2' in a) + + # override paramiko's default logger name + if self._log_channel is not None: + ssh.set_log_channel(self._log_channel) + + self.keyfile = os.path.expanduser(self.get_option('user_known_hosts_file')) + + if self.get_option('host_key_checking'): + for ssh_known_hosts in ('/etc/ssh/ssh_known_hosts', '/etc/openssh/ssh_known_hosts', self.keyfile): + try: + ssh.load_system_host_keys(ssh_known_hosts) + break + except IOError: + pass # file was not found, but not required to function + except paramiko.hostkeys.InvalidHostKey as e: + raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}') + try: + ssh.load_system_host_keys() + except paramiko.hostkeys.InvalidHostKey as e: + raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}') + + ssh_connect_kwargs = self._parse_proxy_command(port) + ssh.set_missing_host_key_policy(MyAddPolicy(self)) + conn_password = self.get_option('password') + allow_agent = True + + if conn_password is not None: + allow_agent = False + + try: + key_filename = None + if self.get_option('private_key_file'): + key_filename = os.path.expanduser(self.get_option('private_key_file')) + + # paramiko 2.2 introduced auth_timeout parameter + if LooseVersion(paramiko.__version__) >= LooseVersion('2.2.0'): + ssh_connect_kwargs['auth_timeout'] = self.get_option('timeout') + + # paramiko 1.15 introduced banner timeout parameter + if LooseVersion(paramiko.__version__) >= LooseVersion('1.15.0'): + ssh_connect_kwargs['banner_timeout'] = self.get_option('banner_timeout') + + ssh.connect( + self.get_option('remote_addr').lower(), + username=self.get_option('remote_user'), + allow_agent=allow_agent, + look_for_keys=self.get_option('look_for_keys'), + key_filename=key_filename, + password=conn_password, + timeout=self.get_option('timeout'), + port=port, + disabled_algorithms=disabled_algorithms, + **ssh_connect_kwargs, + ) + except paramiko.ssh_exception.BadHostKeyException as e: + raise AnsibleConnectionFailure(f'host key mismatch for {to_text(e.hostname)}') + except paramiko.ssh_exception.AuthenticationException as e: + msg = f'Failed to authenticate: {e}' + raise AnsibleAuthenticationFailure(msg) + except Exception as e: + msg = to_text(e) + if 'PID check failed' in msg: + raise AnsibleError('paramiko version issue, please upgrade paramiko on the machine running ansible') + elif 'Private key file is encrypted' in msg: + msg = ( + f'ssh {self.get_option("remote_user")}@{self.get_options("remote_addr")}:{port} : ' + f'{msg}\nTo connect as a different user, use -u .' + ) + raise AnsibleConnectionFailure(msg) + else: + raise AnsibleConnectionFailure(msg) + self.ssh = ssh + self._connected = True + return self + + def _any_keys_added(self) -> bool: + for hostname, keys in self.ssh._host_keys.items(): + for keytype, key in keys.items(): + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if added_this_time: + return True + return False + + def _save_ssh_host_keys(self, filename: str) -> None: + """ + not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks + don't complain about it :) + """ + + if not self._any_keys_added(): + return + + path = os.path.expanduser('~/.ssh') + makedirs_safe(path) + + with open(filename, 'w') as f: + for hostname, keys in self.ssh._host_keys.items(): + for keytype, key in keys.items(): + # was f.write + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if not added_this_time: + f.write(f'{hostname} {keytype} {key.get_base64()}\n') + + for hostname, keys in self.ssh._host_keys.items(): + for keytype, key in keys.items(): + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if added_this_time: + f.write(f'{hostname} {keytype} {key.get_base64()}\n') + + def _build_wsl_command(self, cmd: str) -> str: + wsl_distribution = self.get_option('wsl_distribution') + become = self.get_option('become') + become_user = self.get_option('become_user') + if become and become_user: + wsl_user = become_user + else: + wsl_user = self.get_option('wsl_user') + args = ['wsl.exe', '--distribution', wsl_distribution] + if wsl_user: + args.extend(['--user', wsl_user]) + args.extend(['--']) + args.extend(shlex.split(cmd)) + if os.getenv('_ANSIBLE_TEST_WSL_CONNECTION_PLUGIN_Waeri5tepheeSha2fae8'): + return shlex.join(args) + return list2cmdline(args) # see https://github.com/python/cpython/blob/3.11/Lib/subprocess.py#L576 + + def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]: + """ run a command on inside a WSL distribution """ + + cmd = self._build_wsl_command(cmd) + + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + bufsize = 4096 + + try: + self.ssh.get_transport().set_keepalive(5) + chan = self.ssh.get_transport().open_session() + except Exception as e: + text_e = to_text(e) + msg = 'Failed to open session' + if text_e: + msg += f': {text_e}' + raise AnsibleConnectionFailure(to_native(msg)) + + display.vvv(f'EXEC {cmd}', host=self.get_option('remote_addr')) + + cmd = to_bytes(cmd, errors='surrogate_or_strict') + + no_prompt_out = b'' + no_prompt_err = b'' + become_output = b'' + + try: + chan.exec_command(cmd) + if self.become and self.become.expect_prompt(): + password_prompt = False + become_success = False + while not (become_success or password_prompt): + display.debug('Waiting for Privilege Escalation input') + + chunk = chan.recv(bufsize) + display.debug(f'chunk is: {to_text(chunk)}') + if not chunk: + if b'unknown user' in become_output: + n_become_user = to_native(self.become.get_option('become_user')) + raise AnsibleError(f'user {n_become_user} does not exist') + else: + break + # raise AnsibleError('ssh connection closed waiting for password prompt') + become_output += chunk + + # need to check every line because we might get lectured + # and we might get the middle of a line in a chunk + for line in become_output.splitlines(True): + if self.become.check_success(line): + become_success = True + break + elif self.become.check_password_prompt(line): + password_prompt = True + break + + if password_prompt: + if self.become: + become_pass = self.become.get_option('become_pass') + chan.sendall(to_bytes(f"{become_pass}\n", errors='surrogate_or_strict')) + else: + raise AnsibleError('A password is required but none was supplied') + else: + no_prompt_out += become_output + no_prompt_err += become_output + + if in_data: + for i in range(0, len(in_data), bufsize): + chan.send(in_data[i:i + bufsize]) + chan.shutdown_write() + elif in_data == b'': + chan.shutdown_write() + + except socket.timeout: + raise AnsibleError(f'ssh timed out waiting for privilege escalation.\n{to_text(become_output)}') + + stdout = b''.join(chan.makefile('rb', bufsize)) + stderr = b''.join(chan.makefile_stderr('rb', bufsize)) + returncode = chan.recv_exit_status() + + # NB the full english error message is: + # 'wsl.exe' is not recognized as an internal or external command, + # operable program or batch file. + if "'wsl.exe' is not recognized" in stderr.decode('utf-8'): + raise AnsibleError( + f'wsl.exe not found in path of host: {to_text(self.get_option("remote_addr"))}') + + return (returncode, no_prompt_out + stdout, no_prompt_out + stderr) + + def put_file(self, in_path: str, out_path: str) -> None: + """ transfer a file from local to remote """ + + display.vvv(f'PUT {in_path} TO {out_path}', host=self.get_option('remote_addr')) + try: + with open(in_path, 'rb') as f: + data = f.read() + returncode, stdout, stderr = self.exec_command( + f"{self._shell.executable} -c {self._shell.quote(f'cat > {out_path}')}", + in_data=data, + sudoable=False) + if returncode != 0: + if 'cat: not found' in stderr.decode('utf-8'): + raise AnsibleError( + f'cat not found in path of WSL distribution: {to_text(self.get_option("wsl_distribution"))}') + raise AnsibleError( + f'{to_text(stdout)}\n{to_text(stderr)}') + except Exception as e: + raise AnsibleError( + f'error occurred while putting file from {in_path} to {out_path}!\n{to_text(e)}') + + def fetch_file(self, in_path: str, out_path: str) -> None: + """ save a remote file to the specified path """ + + display.vvv(f'FETCH {in_path} TO {out_path}', host=self.get_option('remote_addr')) + try: + returncode, stdout, stderr = self.exec_command( + f"{self._shell.executable} -c {self._shell.quote(f'cat {in_path}')}", + sudoable=False) + if returncode != 0: + if 'cat: not found' in stderr.decode('utf-8'): + raise AnsibleError( + f'cat not found in path of WSL distribution: {to_text(self.get_option("wsl_distribution"))}') + raise AnsibleError( + f'{to_text(stdout)}\n{to_text(stderr)}') + with open(out_path, 'wb') as f: + f.write(stdout) + except Exception as e: + raise AnsibleError( + f'error occurred while fetching file from {in_path} to {out_path}!\n{to_text(e)}') + + def reset(self) -> None: + """ reset the connection """ + + if not self._connected: + return + self.close() + self._connect() + + def close(self) -> None: + """ terminate the connection """ + + if self.get_option('host_key_checking') and self.get_option('record_host_keys') and self._any_keys_added(): + # add any new SSH host keys -- warning -- this could be slow + # (This doesn't acquire the connection lock because it needs + # to exclude only other known_hosts writers, not connections + # that are starting up.) + lockfile = os.path.basename(self.keyfile) + dirname = os.path.dirname(self.keyfile) + makedirs_safe(dirname) + tmp_keyfile_name = None + try: + with FileLock().lock_file(lockfile, dirname, self.get_option('lock_file_timeout')): + # just in case any were added recently + + self.ssh.load_system_host_keys() + self.ssh._host_keys.update(self.ssh._system_host_keys) + + # gather information about the current key file, so + # we can ensure the new file has the correct mode/owner + + key_dir = os.path.dirname(self.keyfile) + if os.path.exists(self.keyfile): + key_stat = os.stat(self.keyfile) + mode = key_stat.st_mode & 0o777 + uid = key_stat.st_uid + gid = key_stat.st_gid + else: + mode = 0o644 + uid = os.getuid() + gid = os.getgid() + + # Save the new keys to a temporary file and move it into place + # rather than rewriting the file. We set delete=False because + # the file will be moved into place rather than cleaned up. + + with tempfile.NamedTemporaryFile(dir=key_dir, delete=False) as tmp_keyfile: + tmp_keyfile_name = tmp_keyfile.name + os.chmod(tmp_keyfile_name, mode) + os.chown(tmp_keyfile_name, uid, gid) + self._save_ssh_host_keys(tmp_keyfile_name) + + os.rename(tmp_keyfile_name, self.keyfile) + except LockTimeout: + raise AnsibleError( + f'writing lock file for {self.keyfile} ran in to the timeout of {self.get_option("lock_file_timeout")}s') + except paramiko.hostkeys.InvalidHostKey as e: + raise AnsibleConnectionFailure(f'Invalid host key: {e.line}') + except Exception as e: + # unable to save keys, including scenario when key was invalid + # and caught earlier + raise AnsibleError( + f'error occurred while writing SSH host keys!\n{to_text(e)}') + finally: + if tmp_keyfile_name is not None: + pathlib.Path(tmp_keyfile_name).unlink(missing_ok=True) + + self.ssh.close() + self._connected = False diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index 8fbcd8a038..49b3188f44 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -1,39 +1,39 @@ -# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # and chroot.py (c) 2013, Maykel Moya # and jail.py (c) 2013, Michael Scherer # (c) 2015, Dagobert Michelsen # (c) 2015, Toshio Kuratomi # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Ansible Core Team - name: zone - short_description: Run tasks in a zone instance +DOCUMENTATION = r""" +author: Ansible Core Team +name: zone +short_description: Run tasks in a zone instance +description: + - Run commands or put/fetch files to an existing zone. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing zone - options: - remote_addr: - description: - - Zone identifier - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_zone_host -''' + - Zone identifier. + type: string + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_zone_host +""" -import distutils.spawn import os import os.path import subprocess import traceback +from shlex import quote as shlex_quote from ansible.errors import AnsibleError -from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.display import Display @@ -60,14 +60,14 @@ class Connection(ConnectionBase): self.zlogin_cmd = to_bytes(self._search_executable('zlogin')) if self.zone not in self.list_zones(): - raise AnsibleError("incorrect zone name %s" % self.zone) + raise AnsibleError(f"incorrect zone name {self.zone}") @staticmethod def _search_executable(executable): - cmd = distutils.spawn.find_executable(executable) - if not cmd: - raise AnsibleError("%s command not found in PATH" % executable) - return cmd + try: + return get_bin_path(executable) + except ValueError: + raise AnsibleError(f"{executable} command not found in PATH") def list_zones(self): process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'], @@ -92,7 +92,7 @@ class Connection(ConnectionBase): # stdout, stderr = p.communicate() path = process.stdout.readlines()[0].split(':')[3] - return path + '/root' + return f"{path}/root" def _connect(self): """ connect to the zone; nothing to do here """ @@ -115,7 +115,7 @@ class Connection(ConnectionBase): local_cmd = [self.zlogin_cmd, self.zone, cmd] local_cmd = map(to_bytes, local_cmd) - display.vvv("EXEC %s" % (local_cmd), host=self.zone) + display.vvv(f"EXEC {local_cmd}", host=self.zone) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -138,7 +138,7 @@ class Connection(ConnectionBase): exist in any given chroot. So for now we're choosing "/" instead. This also happens to be the former default. - Can revisit using $HOME instead if it's a problem + Can revisit using $HOME instead if it is a problem """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) @@ -147,7 +147,7 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): """ transfer a file from local to zone """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.zone) out_path = shlex_quote(self._prefix_login_path(out_path)) try: @@ -157,27 +157,27 @@ class Connection(ConnectionBase): else: count = '' try: - p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) except OSError: raise AnsibleError("jail connection requires dd command in the jail") try: stdout, stderr = p.communicate() except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") except IOError: - raise AnsibleError("file or module does not exist at: %s" % in_path) + raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): """ fetch a file from zone to local """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.zone) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') except OSError: raise AnsibleError("zone connection requires dd command in the zone") @@ -189,10 +189,10 @@ class Connection(ConnectionBase): chunk = p.stdout.read(BUFSIZE) except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") def close(self): """ terminate the connection; nothing to do here """ diff --git a/plugins/doc_fragments/alicloud.py b/plugins/doc_fragments/alicloud.py index f9c9640b61..f0083c9946 100644 --- a/plugins/doc_fragments/alicloud.py +++ b/plugins/doc_fragments/alicloud.py @@ -1,108 +1,97 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Alicloud only documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: alicloud_access_key: description: - - Alibaba Cloud access key. If not set then the value of environment variable C(ALICLOUD_ACCESS_KEY), - C(ALICLOUD_ACCESS_KEY_ID) will be used instead. + - Alibaba Cloud access key. If not set then the value of environment variable E(ALICLOUD_ACCESS_KEY), E(ALICLOUD_ACCESS_KEY_ID) + is used instead. aliases: ['access_key_id', 'access_key'] type: str alicloud_secret_key: description: - - Alibaba Cloud secret key. If not set then the value of environment variable C(ALICLOUD_SECRET_KEY), - C(ALICLOUD_SECRET_ACCESS_KEY) will be used instead. + - Alibaba Cloud secret key. If not set then the value of environment variable E(ALICLOUD_SECRET_KEY), E(ALICLOUD_SECRET_ACCESS_KEY) + is used instead. aliases: ['secret_access_key', 'secret_key'] type: str alicloud_region: description: - - The Alibaba Cloud region to use. If not specified then the value of environment variable - C(ALICLOUD_REGION), C(ALICLOUD_REGION_ID) will be used instead. + - The Alibaba Cloud region to use. If not specified then the value of environment variable E(ALICLOUD_REGION), E(ALICLOUD_REGION_ID) + is used instead. aliases: ['region', 'region_id'] required: true type: str alicloud_security_token: description: - - The Alibaba Cloud security token. If not specified then the value of environment variable - C(ALICLOUD_SECURITY_TOKEN) will be used instead. + - The Alibaba Cloud security token. If not specified then the value of environment variable E(ALICLOUD_SECURITY_TOKEN) + is used instead. aliases: ['security_token'] type: str alicloud_assume_role: description: - - If provided with a role ARN, Ansible will attempt to assume this role using the supplied credentials. - - The nested assume_role block supports I(alicloud_assume_role_arn), I(alicloud_assume_role_session_name), - I(alicloud_assume_role_session_expiration) and I(alicloud_assume_role_policy) + - If provided with a role ARN, Ansible attempts to assume this role using the supplied credentials. + - The nested assume_role block supports C(alicloud_assume_role_arn), C(alicloud_assume_role_session_name), C(alicloud_assume_role_session_expiration) + and C(alicloud_assume_role_policy). type: dict aliases: ['assume_role'] alicloud_assume_role_arn: description: - - The Alibaba Cloud role_arn. The ARN of the role to assume. If ARN is set to an empty string, - it does not perform role switching. It supports environment variable ALICLOUD_ASSUME_ROLE_ARN. - ansible will execute with provided credentials. + - The Alibaba Cloud C(role_arn). The ARN of the role to assume. If ARN is set to an empty string, it does not perform + role switching. It supports environment variable E(ALICLOUD_ASSUME_ROLE_ARN). Ansible executes with provided credentials. aliases: ['assume_role_arn'] type: str alicloud_assume_role_session_name: description: - - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted, - 'ansible' is passed to the AssumeRole call as session name. It supports environment variable - ALICLOUD_ASSUME_ROLE_SESSION_NAME + - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted, 'ansible' is passed to + the AssumeRole call as session name. It supports environment variable E(ALICLOUD_ASSUME_ROLE_SESSION_NAME). aliases: ['assume_role_session_name'] type: str alicloud_assume_role_session_expiration: description: - - The Alibaba Cloud session_expiration. The time after which the established session for assuming - role expires. Valid value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default - value). It supports environment variable ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION + - The Alibaba Cloud C(session_expiration). The time after which the established session for assuming role expires. Valid + value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default value). It supports environment + variable E(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION). aliases: ['assume_role_session_expiration'] type: int ecs_role_name: description: - - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' - section of the Alibaba Cloud console. - - If you're running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible will just access the - metadata U(http://100.100.100.200/latest/meta-data/ram/security-credentials/) to obtain the STS - credential. This is a preferred approach over any other when running in ECS as you can avoid hard coding - credentials. Instead these are leased on-the-fly by Ansible which reduces the chance of leakage. + - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' section + of the Alibaba Cloud console. + - If you are running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible just accesses the metadata + U(http://100.100.100.200/latest/meta-data/ram/security-credentials/) to obtain the STS credential. + This is a preferred approach over any other when running in ECS as you can avoid hard coding credentials. Instead + these are leased on-the-fly by Ansible which reduces the chance of leakage. aliases: ['role_name'] type: str profile: description: - - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the - ALICLOUD_PROFILE environment variable. + - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the E(ALICLOUD_PROFILE) + environment variable. type: str shared_credentials_file: description: - - This is the path to the shared credentials file. It can also be sourced from the ALICLOUD_SHARED_CREDENTIALS_FILE + - This is the path to the shared credentials file. It can also be sourced from the E(ALICLOUD_SHARED_CREDENTIALS_FILE) environment variable. - - If this is not set and a profile is specified, ~/.aliyun/config.json will be used. + - If this is not set and a profile is specified, C(~/.aliyun/config.json) is used. type: str author: - - "He Guimin (@xiaozhu36)" + - "He Guimin (@xiaozhu36)" requirements: - - "python >= 3.6" + - "Python >= 3.6" notes: - - If parameters are not set within the module, the following - environment variables can be used in decreasing order of precedence - C(ALICLOUD_ACCESS_KEY) or C(ALICLOUD_ACCESS_KEY_ID), - C(ALICLOUD_SECRET_KEY) or C(ALICLOUD_SECRET_ACCESS_KEY), - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID), - C(ALICLOUD_SECURITY_TOKEN), - C(ALICLOUD_ECS_ROLE_NAME), - C(ALICLOUD_SHARED_CREDENTIALS_FILE), - C(ALICLOUD_PROFILE), - C(ALICLOUD_ASSUME_ROLE_ARN), - C(ALICLOUD_ASSUME_ROLE_SESSION_NAME), - C(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION), - - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID) can be typically be used to specify the - ALICLOUD region, when required, but this can also be configured in the footmark config file -''' + - If parameters are not set within the module, the following environment variables can be used in decreasing order of precedence + E(ALICLOUD_ACCESS_KEY) or E(ALICLOUD_ACCESS_KEY_ID), E(ALICLOUD_SECRET_KEY) or E(ALICLOUD_SECRET_ACCESS_KEY), E(ALICLOUD_REGION) + or E(ALICLOUD_REGION_ID), E(ALICLOUD_SECURITY_TOKEN), E(ALICLOUD_ECS_ROLE_NAME), E(ALICLOUD_SHARED_CREDENTIALS_FILE), + E(ALICLOUD_PROFILE), E(ALICLOUD_ASSUME_ROLE_ARN), E(ALICLOUD_ASSUME_ROLE_SESSION_NAME), E(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION). + - E(ALICLOUD_REGION) or E(ALICLOUD_REGION_ID) can be typically be used to specify the Alicloud region, when required, but + this can also be configured in the footmark config file. +""" diff --git a/plugins/doc_fragments/attributes.py b/plugins/doc_fragments/attributes.py new file mode 100644 index 0000000000..fdafe1aeaa --- /dev/null +++ b/plugins/doc_fragments/attributes.py @@ -0,0 +1,91 @@ + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + + # Standard documentation fragment + DOCUMENTATION = r""" +options: {} +attributes: + check_mode: + description: Can run in C(check_mode) and return changed status prediction without modifying target. + diff_mode: + description: Returns details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode. +""" + + PLATFORM = r""" +options: {} +attributes: + platform: + description: Target OS/families that can be operated against. + support: N/A +""" + + # Should be used together with the standard fragment + INFO_MODULE = r''' +options: {} +attributes: + check_mode: + support: full + details: + - This action does not modify state. + diff_mode: + support: N/A + details: + - This action does not modify state. +''' + + CONN = r""" +options: {} +attributes: + become: + description: Is usable alongside C(become) keywords. + connection: + description: Uses the target's configured connection information to execute code on it. + delegation: + description: Can be used in conjunction with C(delegate_to) and related keywords. +""" + + FACTS = r""" +options: {} +attributes: + facts: + description: Action returns an C(ansible_facts) dictionary that updates existing host facts. +""" + + # Should be used together with the standard fragment and the FACTS fragment + FACTS_MODULE = r''' +options: {} +attributes: + check_mode: + support: full + details: + - This action does not modify state. + diff_mode: + support: N/A + details: + - This action does not modify state. + facts: + support: full +''' + + FILES = r""" +options: {} +attributes: + safe_file_operations: + description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption. +""" + + FLOW = r""" +options: {} +attributes: + action: + description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. + async: + description: Supports being used with the C(async) keyword. +""" diff --git a/plugins/doc_fragments/auth_basic.py b/plugins/doc_fragments/auth_basic.py index 6f590611d9..3d99466165 100644 --- a/plugins/doc_fragments/auth_basic.py +++ b/plugins/doc_fragments/auth_basic.py @@ -1,31 +1,30 @@ -# -*- coding: utf-8 -*- +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: api_url: description: - - The resolvable endpoint for the API + - The resolvable endpoint for the API. type: str api_username: description: - - The username to use for authentication against the API + - The username to use for authentication against the API. type: str api_password: description: - - The password to use for authentication against the API + - The password to use for authentication against the API. type: str validate_certs: description: - - Whether or not to validate SSL certs when supplying a https endpoint. + - Whether or not to validate SSL certs when supplying a HTTPS endpoint. type: bool - default: yes -''' + default: true +""" diff --git a/plugins/doc_fragments/bitbucket.py b/plugins/doc_fragments/bitbucket.py index 28489356b1..c96a010e71 100644 --- a/plugins/doc_fragments/bitbucket.py +++ b/plugins/doc_fragments/bitbucket.py @@ -1,41 +1,42 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: client_id: description: - The OAuth consumer key. - - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used. + - If not set the environment variable E(BITBUCKET_CLIENT_ID) is used. type: str client_secret: description: - The OAuth consumer secret. - - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used. + - If not set the environment variable E(BITBUCKET_CLIENT_SECRET) is used. type: str user: description: - The username. - - If not set the environment variable C(BITBUCKET_USERNAME) will be used. + - If not set the environment variable E(BITBUCKET_USERNAME) is used. + - O(ignore:username) is an alias of O(user) since community.general 6.0.0. It was an alias of O(workspace) before. type: str version_added: 4.0.0 + aliases: [username] password: description: - The App password. - - If not set the environment variable C(BITBUCKET_PASSWORD) will be used. + - If not set the environment variable E(BITBUCKET_PASSWORD) is used. type: str version_added: 4.0.0 notes: - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth. - Bitbucket App password can be created from Bitbucket profile -> Personal Settings -> App passwords. - If both OAuth and Basic Auth credentials are passed, OAuth credentials take precedence. -''' +""" diff --git a/plugins/doc_fragments/consul.py b/plugins/doc_fragments/consul.py new file mode 100644 index 0000000000..fd9c1a6e6c --- /dev/null +++ b/plugins/doc_fragments/consul.py @@ -0,0 +1,55 @@ +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment: + # Common parameters for Consul modules + DOCUMENTATION = r""" +options: + host: + description: + - Host of the Consul agent. + default: localhost + type: str + port: + type: int + description: + - The port on which the consul agent is running. + default: 8500 + scheme: + description: + - The protocol scheme on which the Consul agent is running. Defaults to V(http) and can be set to V(https) for secure + connections. + default: http + type: str + validate_certs: + type: bool + description: + - Whether to verify the TLS certificate of the Consul agent. + default: true + ca_path: + description: + - The CA bundle to use for https connections. + type: str +""" + + TOKEN = r""" +options: + token: + description: + - The token to use for authorization. + type: str +""" + + ACTIONGROUP_CONSUL = r""" +options: {} +attributes: + action_group: + description: Use C(group/community.general.consul) in C(module_defaults) to set defaults for this module. + support: full + membership: + - community.general.consul +""" diff --git a/plugins/doc_fragments/dimensiondata.py b/plugins/doc_fragments/dimensiondata.py index 02435e25cc..1804c3c7ba 100644 --- a/plugins/doc_fragments/dimensiondata.py +++ b/plugins/doc_fragments/dimensiondata.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # -# Copyright: (c) 2016, Dimension Data -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Dimension Data +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # Authors: # - Adam Friedman @@ -13,28 +12,27 @@ __metaclass__ = type class ModuleDocFragment(object): # Dimension Data doc fragment - DOCUMENTATION = r''' - + DOCUMENTATION = r""" options: region: description: - The target region. - - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py] - - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html) - - Note that the default value "na" stands for "North America". - - The module prepends 'dd-' to the region choice. + - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py]. + - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html). + - Note that the default value C(na) stands for "North America". + - The module prepends C(dd-) to the region choice. type: str default: na mcp_user: description: - The username used to authenticate to the CloudControl API. - - If not specified, will fall back to C(MCP_USER) from environment variable or C(~/.dimensiondata). + - If not specified, falls back to E(MCP_USER) from environment variable or C(~/.dimensiondata). type: str mcp_password: description: - The password used to authenticate to the CloudControl API. - - If not specified, will fall back to C(MCP_PASSWORD) from environment variable or C(~/.dimensiondata). - - Required if I(mcp_user) is specified. + - If not specified, falls back to E(MCP_PASSWORD) from environment variable or C(~/.dimensiondata). + - Required if O(mcp_user) is specified. type: str location: description: @@ -43,8 +41,8 @@ options: required: true validate_certs: description: - - If C(false), SSL certificates will not be validated. + - If V(false), SSL certificates are not validated. - This should only be used on private instances of the CloudControl API that use self-signed certificates. type: bool - default: yes -''' + default: true +""" diff --git a/plugins/doc_fragments/dimensiondata_wait.py b/plugins/doc_fragments/dimensiondata_wait.py index ac3deab154..40b3a1d6e8 100644 --- a/plugins/doc_fragments/dimensiondata_wait.py +++ b/plugins/doc_fragments/dimensiondata_wait.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # -# Copyright: (c) 2016, Dimension Data -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Dimension Data +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # Authors: # - Adam Friedman @@ -13,24 +12,23 @@ __metaclass__ = type class ModuleDocFragment(object): # Dimension Data ("wait-for-completion" parameters) doc fragment - DOCUMENTATION = r''' - + DOCUMENTATION = r""" options: wait: description: - Should we wait for the task to complete before moving onto the next. type: bool - default: no + default: false wait_time: description: - The maximum amount of time (in seconds) to wait for the task to complete. - - Only applicable if I(wait=true). + - Only applicable if O(wait=true). type: int default: 600 wait_poll_interval: description: - The amount of time (in seconds) to wait between checks for task completion. - - Only applicable if I(wait=true). + - Only applicable if O(wait=true). type: int default: 2 - ''' +""" diff --git a/plugins/doc_fragments/django.py b/plugins/doc_fragments/django.py new file mode 100644 index 0000000000..f62e2224d8 --- /dev/null +++ b/plugins/doc_fragments/django.py @@ -0,0 +1,80 @@ +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + DOCUMENTATION = r""" +options: + venv: + description: + - Use the Python interpreter from this virtual environment. + - Pass the path to the root of the virtualenv, not the C(bin/) directory nor the C(python) executable. + type: path + settings: + description: + - Specifies the settings module to use. + - The value is passed as is to the C(--settings) argument in C(django-admin). + type: str + required: true + pythonpath: + description: + - Adds the given filesystem path to the Python import search path. + - The value is passed as is to the C(--pythonpath) argument in C(django-admin). + type: path + traceback: + description: + - Provides a full stack trace in the output when a C(CommandError) is raised. + type: bool + verbosity: + description: + - Specifies the amount of notification and debug information in the output of C(django-admin). + type: int + choices: [0, 1, 2, 3] + skip_checks: + description: + - Skips running system checks prior to running the command. + type: bool + + +notes: + - The C(django-admin) command is always executed using the C(C) locale, and the option C(--no-color) is always passed. +seealso: + - name: django-admin and manage.py in official Django documentation + description: >- + Refer to this documentation for the builtin commands and options of C(django-admin). Please make sure that you select + the right version of Django in the version selector on that page. + link: https://docs.djangoproject.com/en/5.0/ref/django-admin/ +""" + + DATABASE = r""" +options: + database: + description: + - Specify the database to be used. + type: str + default: default +""" + + DATA = r""" +options: + excludes: + description: + - Applications or models to be excluded. + - Format must be either V(app_label) or V(app_label.ModelName). + type: list + elements: str + format: + description: + - Serialization format of the output data. + type: str + default: json + choices: [xml, json, jsonl, yaml] +notes: + - As it is now, the module is B(not idempotent). Ensuring idempotency for this case can be a bit tricky, because it would + amount to ensuring beforehand that all the data in the fixture file is already in the database, which is not a trivial feat. + Unfortunately, neither C(django loaddata) nor C(django dumpdata) have a C(--dry-run) option, so the only way to know whether + there is a change or not is to actually load or dump the data. +""" diff --git a/plugins/doc_fragments/emc.py b/plugins/doc_fragments/emc.py index cce76823fe..9268b7fc42 100644 --- a/plugins/doc_fragments/emc.py +++ b/plugins/doc_fragments/emc.py @@ -1,45 +1,34 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Luca Lorenzetto (@remix_tj) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Luca Lorenzetto (@remix_tj) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): - DOCUMENTATION = r''' -options: - - See respective platform section for more details -requirements: - - See respective platform section for more details -notes: - - Ansible modules are available for EMC VNX. -''' - # Documentation fragment for VNX (emc_vnx) EMC_VNX = r''' options: - sp_address: - description: - - Address of the SP of target/secondary storage. - type: str - required: true - sp_user: - description: - - Username for accessing SP. - type: str - default: sysadmin - sp_password: - description: - - password for accessing SP. - type: str - default: sysadmin + sp_address: + description: + - Address of the SP of target/secondary storage. + type: str + required: true + sp_user: + description: + - Username for accessing SP. + type: str + default: sysadmin + sp_password: + description: + - password for accessing SP. + type: str + default: sysadmin requirements: - An EMC VNX Storage device. - - Ansible 2.7. - - storops (0.5.10 or greater). Install using 'pip install storops'. + - storops (0.5.10 or greater). Install using C(pip install storops). notes: - - The modules prefixed with emc_vnx are built to support the EMC VNX storage platform. + - The modules prefixed with C(emc_vnx) are built to support the EMC VNX storage platform. ''' diff --git a/plugins/doc_fragments/gitlab.py b/plugins/doc_fragments/gitlab.py new file mode 100644 index 0000000000..af7a527a81 --- /dev/null +++ b/plugins/doc_fragments/gitlab.py @@ -0,0 +1,35 @@ +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r""" +requirements: + - requests (Python library U(https://pypi.org/project/requests/)) + +options: + api_token: + description: + - GitLab access token with API permissions. + type: str + api_oauth_token: + description: + - GitLab OAuth token for logging in. + type: str + version_added: 4.2.0 + api_job_token: + description: + - GitLab CI job token for logging in. + type: str + version_added: 4.2.0 + ca_path: + description: + - The CA certificates bundle to use to verify GitLab server certificate. + type: str + version_added: 8.1.0 +""" diff --git a/plugins/doc_fragments/hpe3par.py b/plugins/doc_fragments/hpe3par.py index ad445205d8..e126c63c56 100644 --- a/plugins/doc_fragments/hpe3par.py +++ b/plugins/doc_fragments/hpe3par.py @@ -1,36 +1,33 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # HPE 3PAR doc fragment - DOCUMENTATION = ''' + DOCUMENTATION = r""" options: - storage_system_ip: - description: - - The storage system IP address. - type: str - required: true - storage_system_password: - description: - - The storage system password. - type: str - required: true - storage_system_username: - description: - - The storage system user name. - type: str - required: true + storage_system_ip: + description: + - The storage system IP address. + type: str + required: true + storage_system_password: + description: + - The storage system password. + type: str + required: true + storage_system_username: + description: + - The storage system user name. + type: str + required: true requirements: - - hpe3par_sdk >= 1.0.2. Install using 'pip install hpe3par_sdk' + - hpe3par_sdk >= 1.0.2. Install using C(pip install hpe3par_sdk). - WSAPI service should be enabled on the 3PAR storage array. notes: - - check_mode not supported - ''' +""" diff --git a/plugins/doc_fragments/hwc.py b/plugins/doc_fragments/hwc.py index ecba2adde8..99362243ec 100644 --- a/plugins/doc_fragments/hwc.py +++ b/plugins/doc_fragments/hwc.py @@ -1,66 +1,57 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Huawei Inc. -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Huawei Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # HWC doc fragment. - DOCUMENTATION = ''' + DOCUMENTATION = r""" options: - identity_endpoint: - description: - - The Identity authentication URL. - type: str - required: true - user: - description: - - The user name to login with (currently only user names are - supported, and not user IDs). - type: str - required: true - password: - description: - - The password to login with. - type: str - required: true - domain: - description: - - The name of the Domain to scope to (Identity v3). - (currently only domain names are supported, and not domain IDs). - type: str - required: true - project: - description: - - The name of the Tenant (Identity v2) or Project (Identity v3). - (currently only project names are supported, and not - project IDs). - type: str - required: true - region: - description: - - The region to which the project belongs. - type: str - id: - description: - - The id of resource to be managed. - type: str + identity_endpoint: + description: + - The Identity authentication URL. + type: str + required: true + user: + description: + - The user name to login with. + - Currently only user names are supported, and not user IDs. + type: str + required: true + password: + description: + - The password to login with. + type: str + required: true + domain: + description: + - The name of the Domain to scope to (Identity v3). + - Currently only domain names are supported, and not domain IDs. + type: str + required: true + project: + description: + - The name of the Tenant (Identity v2) or Project (Identity v3). + - Currently only project names are supported, and not project IDs. + type: str + required: true + region: + description: + - The region to which the project belongs. + type: str + id: + description: + - The ID of resource to be managed. + type: str notes: - - For authentication, you can set identity_endpoint using the - C(ANSIBLE_HWC_IDENTITY_ENDPOINT) env variable. - - For authentication, you can set user using the - C(ANSIBLE_HWC_USER) env variable. - - For authentication, you can set password using the C(ANSIBLE_HWC_PASSWORD) env - variable. - - For authentication, you can set domain using the C(ANSIBLE_HWC_DOMAIN) env - variable. - - For authentication, you can set project using the C(ANSIBLE_HWC_PROJECT) env - variable. - - For authentication, you can set region using the C(ANSIBLE_HWC_REGION) env variable. - - Environment variables values will only be used if the playbook values are - not set. -''' + - For authentication, you can set identity_endpoint using the E(ANSIBLE_HWC_IDENTITY_ENDPOINT) environment variable. + - For authentication, you can set user using the E(ANSIBLE_HWC_USER) environment variable. + - For authentication, you can set password using the E(ANSIBLE_HWC_PASSWORD) environment variable. + - For authentication, you can set domain using the E(ANSIBLE_HWC_DOMAIN) environment variable. + - For authentication, you can set project using the E(ANSIBLE_HWC_PROJECT) environment variable. + - For authentication, you can set region using the E(ANSIBLE_HWC_REGION) environment variable. + - Environment variables values are only used when the playbook values are not set. +""" diff --git a/plugins/doc_fragments/ibm_storage.py b/plugins/doc_fragments/ibm_storage.py index 0d8eb5fe22..ab61cd51c1 100644 --- a/plugins/doc_fragments/ibm_storage.py +++ b/plugins/doc_fragments/ibm_storage.py @@ -1,37 +1,34 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, IBM CORPORATION +# Copyright (c) 2018, IBM CORPORATION # Author(s): Tzur Eliyahu -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # ibm_storage documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - username: - description: - - Management user on the spectrum accelerate storage system. - type: str - required: True - password: - description: - - Password for username on the spectrum accelerate storage system. - type: str - required: True - endpoints: - description: - - The hostname or management IP of Spectrum Accelerate storage system. - type: str - required: True + username: + description: + - Management user on the Spectrum Accelerate storage system. + type: str + required: true + password: + description: + - Password for username on the Spectrum Accelerate storage system. + type: str + required: true + endpoints: + description: + - The hostname or management IP of Spectrum Accelerate storage system. + type: str + required: true notes: - - This module requires pyxcli python library. - Use 'pip install pyxcli' in order to get pyxcli. + - This module requires pyxcli python library. Use C(pip install pyxcli) in order to get pyxcli. requirements: - - python >= 2.7 - pyxcli -''' +""" diff --git a/plugins/doc_fragments/influxdb.py b/plugins/doc_fragments/influxdb.py index a31c84cbb1..7f0688b868 100644 --- a/plugins/doc_fragments/influxdb.py +++ b/plugins/doc_fragments/influxdb.py @@ -1,82 +1,80 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Parameters for influxdb modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: hostname: description: - - The hostname or IP address on which InfluxDB server is listening. - - Since Ansible 2.5, defaulted to localhost. + - The hostname or IP address on which InfluxDB server is listening. type: str default: localhost username: description: - - Username that will be used to authenticate against InfluxDB server. - - Alias C(login_username) added in Ansible 2.5. + - Username that is used to authenticate against InfluxDB server. type: str default: root - aliases: [ login_username ] + aliases: [login_username] password: description: - - Password that will be used to authenticate against InfluxDB server. - - Alias C(login_password) added in Ansible 2.5. + - Password that is used to authenticate against InfluxDB server. type: str default: root - aliases: [ login_password ] + aliases: [login_password] port: description: - - The port on which InfluxDB server is listening + - The port on which InfluxDB server is listening. type: int default: 8086 path: description: - - The path on which InfluxDB server is accessible - - Only available when using python-influxdb >= 5.1.0 + - The path on which InfluxDB server is accessible. + - Only available when using python-influxdb >= 5.1.0. type: str + default: '' version_added: '0.2.0' validate_certs: description: - - If set to C(no), the SSL certificates will not be validated. - - This should only set to C(no) used on personally controlled sites using self-signed certificates. + - If set to V(false), the SSL certificates are not validated. + - This should only set to V(false) used on personally controlled sites using self-signed certificates. type: bool - default: yes + default: true ssl: description: - - Use https instead of http to connect to InfluxDB server. + - Use https instead of http to connect to InfluxDB server. type: bool default: false timeout: description: - - Number of seconds Requests will wait for client to establish a connection. + - Number of seconds Requests waits for client to establish a connection. type: int retries: description: - - Number of retries client will try before aborting. - - C(0) indicates try until success. - - Only available when using python-influxdb >= 4.1.0 + - Number of retries client performs before aborting. + - V(0) indicates try until success. + - Only available when using C(python-influxdb) >= 4.1.0. type: int default: 3 use_udp: description: - - Use UDP to connect to InfluxDB server. + - Use UDP to connect to InfluxDB server. type: bool default: false udp_port: description: - - UDP port to connect to InfluxDB server. + - UDP port to connect to InfluxDB server. type: int default: 4444 proxies: description: - - HTTP(S) proxy to use for Requests to connect to InfluxDB server. + - HTTP(S) proxy to use for Requests to connect to InfluxDB server. type: dict -''' + default: {} +""" diff --git a/plugins/doc_fragments/ipa.py b/plugins/doc_fragments/ipa.py index 47bcee60ba..0b740ae8ed 100644 --- a/plugins/doc_fragments/ipa.py +++ b/plugins/doc_fragments/ipa.py @@ -1,75 +1,83 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017-18, Ansible Project -# Copyright: (c) 2017-18, Abhijeet Kasurde (akasurde@redhat.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017-18, Ansible Project +# Copyright (c) 2017-18, Abhijeet Kasurde (akasurde@redhat.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Parameters for FreeIPA/IPA modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: ipa_port: description: - - Port of FreeIPA / IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_PORT) will be used instead. - - If both the environment variable C(IPA_PORT) and the value are not specified in the task, then default value is set. - - Environment variable fallback mechanism is added in Ansible 2.5. + - Port of FreeIPA / IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_PORT) is used instead. + - If both the environment variable E(IPA_PORT) and the value are not specified in the task, then default value is set. type: int default: 443 ipa_host: description: - - IP or hostname of IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_HOST) will be used instead. - - If both the environment variable C(IPA_HOST) and the value are not specified in the task, then DNS will be used to try to discover the FreeIPA server. - - The relevant entry needed in FreeIPA is the 'ipa-ca' entry. - - If neither the DNS entry, nor the environment C(IPA_HOST), nor the value are available in the task, then the default value will be used. - - Environment variable fallback mechanism is added in Ansible 2.5. + - IP or hostname of IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_HOST) is used instead. + - If both the environment variable E(IPA_HOST) and the value are not specified in the task, then DNS is used to try + to discover the FreeIPA server. + - The relevant entry needed in FreeIPA is the C(ipa-ca) entry. + - If neither the DNS entry, nor the environment E(IPA_HOST), nor the value are available in the task, then the default + value is used. type: str default: ipa.example.com ipa_user: description: - - Administrative account used on IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_USER) will be used instead. - - If both the environment variable C(IPA_USER) and the value are not specified in the task, then default value is set. - - Environment variable fallback mechanism is added in Ansible 2.5. + - Administrative account used on IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_USER) is used instead. + - If both the environment variable E(IPA_USER) and the value are not specified in the task, then default value is set. type: str default: admin ipa_pass: description: - - Password of administrative user. - - If the value is not specified in the task, the value of environment variable C(IPA_PASS) will be used instead. - - Note that if the 'urllib_gssapi' library is available, it is possible to use GSSAPI to authenticate to FreeIPA. - - If the environment variable C(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate to the FreeIPA server. - - If the environment variable C(KRB5_CLIENT_KTNAME) is available, and C(KRB5CCNAME) is not; the module will use this kerberos keytab to authenticate. - - If GSSAPI is not available, the usage of 'ipa_pass' is required. - - Environment variable fallback mechanism is added in Ansible 2.5. + - Password of administrative user. + - If the value is not specified in the task, the value of environment variable E(IPA_PASS) is used instead. + - Note that if the C(urllib_gssapi) library is available, it is possible to use GSSAPI to authenticate to FreeIPA. + - If the environment variable E(KRB5CCNAME) is available, the module uses this Kerberos credentials cache to authenticate + to the FreeIPA server. + - If the environment variable E(KRB5_CLIENT_KTNAME) is available, and E(KRB5CCNAME) is not; the module uses this Kerberos + keytab to authenticate. + - If GSSAPI is not available, the usage of O(ipa_pass) is required. type: str ipa_prot: description: - - Protocol used by IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_PROT) will be used instead. - - If both the environment variable C(IPA_PROT) and the value are not specified in the task, then default value is set. - - Environment variable fallback mechanism is added in Ansible 2.5. + - Protocol used by IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_PROT) is used instead. + - If both the environment variable E(IPA_PROT) and the value are not specified in the task, then default value is set. type: str - choices: [ http, https ] + choices: [http, https] default: https validate_certs: description: - - This only applies if C(ipa_prot) is I(https). - - If set to C(no), the SSL certificates will not be validated. - - This should only set to C(no) used on personally controlled sites using self-signed certificates. + - This only applies if O(ipa_prot) is V(https). + - If set to V(false), the SSL certificates are not validated. + - This should only set to V(false) used on personally controlled sites using self-signed certificates. type: bool - default: yes + default: true ipa_timeout: description: - - Specifies idle timeout (in seconds) for the connection. - - For bulk operations, you may want to increase this in order to avoid timeout from IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_TIMEOUT) will be used instead. - - If both the environment variable C(IPA_TIMEOUT) and the value are not specified in the task, then default value is set. + - Specifies idle timeout (in seconds) for the connection. + - For bulk operations, you may want to increase this in order to avoid timeout from IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_TIMEOUT) is used instead. + - If both the environment variable E(IPA_TIMEOUT) and the value are not specified in the task, then default value is + set. type: int default: 10 -''' +""" + + CONNECTION_NOTES = r""" +options: {} +notes: + - This module uses JSON-RPC over HTTP(S) to communicate with the FreeIPA server. + If you need to enroll the managed node into FreeIPA realm, you might want to consider using the collection + L(freeipa.ansible_freeipa, https://galaxy.ansible.com/ui/repo/published/freeipa/ansible_freeipa/), but shell access to one + node from the realm is required to manage the deployment. +""" diff --git a/plugins/doc_fragments/keycloak.py b/plugins/doc_fragments/keycloak.py index 72e0b71d50..2ec693eb99 100644 --- a/plugins/doc_fragments/keycloak.py +++ b/plugins/doc_fragments/keycloak.py @@ -1,64 +1,93 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Eike Frost -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Eike Frost +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - auth_keycloak_url: - description: - - URL to the Keycloak instance. - type: str - required: true - aliases: - - url + auth_keycloak_url: + description: + - URL to the Keycloak instance. + type: str + required: true + aliases: + - url - auth_client_id: - description: - - OpenID Connect I(client_id) to authenticate to the API with. - type: str - default: admin-cli + auth_client_id: + description: + - OpenID Connect C(client_id) to authenticate to the API with. + type: str + default: admin-cli - auth_realm: - description: - - Keycloak realm name to authenticate to for API access. - type: str + auth_realm: + description: + - Keycloak realm name to authenticate to for API access. + type: str - auth_client_secret: - description: - - Client Secret to use in conjunction with I(auth_client_id) (if required). - type: str + auth_client_secret: + description: + - Client Secret to use in conjunction with O(auth_client_id) (if required). + type: str - auth_username: - description: - - Username to authenticate for API access with. - type: str - aliases: - - username + auth_username: + description: + - Username to authenticate for API access with. + type: str + aliases: + - username - auth_password: - description: - - Password to authenticate for API access with. - type: str - aliases: - - password + auth_password: + description: + - Password to authenticate for API access with. + type: str + aliases: + - password - token: - description: - - Authentication token for Keycloak API. - type: str - version_added: 3.0.0 + token: + description: + - Authentication token for Keycloak API. + type: str + version_added: 3.0.0 - validate_certs: - description: - - Verify TLS certificates (do not disable this in production). - type: bool - default: yes -''' + refresh_token: + description: + - Authentication refresh token for Keycloak API. + type: str + version_added: 10.3.0 + + validate_certs: + description: + - Verify TLS certificates (do not disable this in production). + type: bool + default: true + + connection_timeout: + description: + - Controls the HTTP connections timeout period (in seconds) to Keycloak API. + type: int + default: 10 + version_added: 4.5.0 + + http_agent: + description: + - Configures the HTTP User-Agent header. + type: str + default: Ansible + version_added: 5.4.0 +""" + + ACTIONGROUP_KEYCLOAK = r""" +options: {} +attributes: + action_group: + description: Use C(group/community.general.keycloak) in C(module_defaults) to set defaults for this module. + support: full + membership: + - community.general.keycloak +""" diff --git a/plugins/doc_fragments/ldap.py b/plugins/doc_fragments/ldap.py index 1c9931fb85..d787bfd65d 100644 --- a/plugins/doc_fragments/ldap.py +++ b/plugins/doc_fragments/ldap.py @@ -1,27 +1,49 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Peter Sagerson -# Copyright: (c) 2016, Jiri Tyr -# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Peter Sagerson +# Copyright (c) 2016, Jiri Tyr +# Copyright (c) 2017-2018 Keller Fuchs (@KellerFuchs) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard LDAP documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" +notes: + - The default authentication settings attempts to use a SASL EXTERNAL bind over a UNIX domain socket. This works well with + the default Ubuntu install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL rule allowing root to + modify the server configuration. If you need to use a simple bind to access your server, pass the credentials in O(bind_dn) + and O(bind_pw). options: bind_dn: description: - - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default. - - If this is blank, we'll use an anonymous bind. + - A DN to bind with. Try to use a SASL bind with the EXTERNAL mechanism as default when this parameter is omitted. + - Use an anonymous bind if the parameter is blank. type: str bind_pw: description: - - The password to use with I(bind_dn). + - The password to use with O(bind_dn). type: str + default: '' + ca_path: + description: + - Set the path to PEM file with CA certs. + type: path + version_added: "6.5.0" + client_cert: + type: path + description: + - PEM formatted certificate chain file to be used for SSL client authentication. + - Required if O(client_key) is defined. + version_added: "7.1.0" + client_key: + type: path + description: + - PEM formatted file that contains your private key to be used for SSL client authentication. + - Required if O(client_cert) is defined. + version_added: "7.1.0" dn: required: true description: @@ -33,32 +55,44 @@ options: type: str description: - Set the referrals chasing behavior. - - C(anonymous) follow referrals anonymously. This is the default behavior. - - C(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off. + - V(anonymous) follow referrals anonymously. This is the default behavior. + - V(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off. version_added: 2.0.0 server_uri: description: - - A URI to the LDAP server. + - The O(server_uri) parameter may be a comma- or whitespace-separated list of URIs containing only the schema, the host, + and the port fields. - The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location. + - Note that when using multiple URIs you cannot determine to which URI your client gets connected. + - For URIs containing additional fields, particularly when using commas, behavior is undefined. type: str default: ldapi:/// start_tls: description: - - If true, we'll use the START_TLS LDAP extension. + - Use the START_TLS LDAP extension if set to V(true). type: bool - default: no + default: false validate_certs: description: - - If set to C(no), SSL certificates will not be validated. + - If set to V(false), SSL certificates are not validated. - This should only be used on sites using self-signed certificates. type: bool - default: yes + default: true sasl_class: description: - The class to use for SASL authentication. - - possible choices are C(external), C(gssapi). type: str choices: ['external', 'gssapi'] default: external version_added: "2.0.0" -''' + xorder_discovery: + description: + - Set the behavior on how to process Xordered DNs. + - V(enable) performs a C(ONELEVEL) search below the superior RDN to find the matching DN. + - V(disable) always uses the DN unmodified (as passed by the O(dn) parameter). + - V(auto) only performs a search if the first RDN does not contain an index number (C({x})). + type: str + choices: ['enable', 'auto', 'disable'] + default: auto + version_added: "6.4.0" +""" diff --git a/plugins/doc_fragments/lxca_common.py b/plugins/doc_fragments/lxca_common.py index c55eca16ac..72bc3b7054 100644 --- a/plugins/doc_fragments/lxca_common.py +++ b/plugins/doc_fragments/lxca_common.py @@ -1,15 +1,14 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2017 Lenovo, Inc. -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard Pylxca documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" author: - Naval Patel (@navalkp) - Prashant Bhosale (@prabhosa) @@ -17,19 +16,19 @@ author: options: login_user: description: - - The username for use in HTTP basic authentication. + - The username for use in HTTP basic authentication. type: str required: true login_password: description: - - The password for use in HTTP basic authentication. + - The password for use in HTTP basic authentication. type: str required: true auth_url: description: - - lxca https full web address + - Lxca HTTPS full web address. type: str required: true @@ -37,7 +36,6 @@ requirements: - pylxca notes: - - Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca) - - Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca) - - Check mode is not supported. -''' + - Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca). + - Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca). +""" diff --git a/plugins/doc_fragments/manageiq.py b/plugins/doc_fragments/manageiq.py index b610b512b7..e7351e4f5e 100644 --- a/plugins/doc_fragments/manageiq.py +++ b/plugins/doc_fragments/manageiq.py @@ -1,16 +1,15 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Daniel Korn -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Daniel Korn +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard ManageIQ documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: manageiq_connection: description: @@ -20,33 +19,34 @@ options: suboptions: url: description: - - ManageIQ environment url. C(MIQ_URL) env var if set. otherwise, it is required to pass it. + - ManageIQ environment URL. E(MIQ_URL) environment variable if set. Otherwise, it is required to pass it. type: str required: false username: description: - - ManageIQ username. C(MIQ_USERNAME) env var if set. otherwise, required if no token is passed in. + - ManageIQ username. E(MIQ_USERNAME) environment variable if set. Otherwise, required if no token is passed in. type: str password: description: - - ManageIQ password. C(MIQ_PASSWORD) env var if set. otherwise, required if no token is passed in. + - ManageIQ password. E(MIQ_PASSWORD) environment variable if set. Otherwise, required if no token is passed in. type: str token: description: - - ManageIQ token. C(MIQ_TOKEN) env var if set. otherwise, required if no username or password is passed in. + - ManageIQ token. E(MIQ_TOKEN) environment variable if set. Otherwise, required if no username or password is passed + in. type: str validate_certs: description: - - Whether SSL certificates should be verified for HTTPS requests. defaults to True. + - Whether SSL certificates should be verified for HTTPS requests. type: bool - default: yes - aliases: [ verify_ssl ] + default: true + aliases: [verify_ssl] ca_cert: description: - - The path to a CA bundle file or directory with certificates. defaults to None. + - The path to a CA bundle file or directory with certificates. type: str - aliases: [ ca_bundle_path ] + aliases: [ca_bundle_path] requirements: - 'manageiq-client U(https://github.com/ManageIQ/manageiq-api-client-python/)' -''' +""" diff --git a/plugins/doc_fragments/nomad.py b/plugins/doc_fragments/nomad.py index 3845c54120..37485ef9a7 100644 --- a/plugins/doc_fragments/nomad.py +++ b/plugins/doc_fragments/nomad.py @@ -1,51 +1,56 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2020 FERREIRA Christophe -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020 FERREIRA Christophe +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - host: - description: - - FQDN of Nomad server. - required: true - type: str - use_ssl: - description: - - Use TLS/SSL connection. - type: bool - default: true - timeout: - description: - - Timeout (in seconds) for the request to Nomad. - type: int - default: 5 - validate_certs: - description: - - Enable TLS/SSL certificate validation. - type: bool - default: true - client_cert: - description: - - Path of certificate for TLS/SSL. - type: path - client_key: - description: - - Path of certificate's private key for TLS/SSL. - type: path - namespace: - description: - - Namespace for Nomad. - type: str - token: - description: - - ACL token for authentification. - type: str -''' + host: + description: + - FQDN of Nomad server. + required: true + type: str + port: + description: + - Port of Nomad server. + type: int + default: 4646 + version_added: 8.0.0 + use_ssl: + description: + - Use TLS/SSL connection. + type: bool + default: true + timeout: + description: + - Timeout (in seconds) for the request to Nomad. + type: int + default: 5 + validate_certs: + description: + - Enable TLS/SSL certificate validation. + type: bool + default: true + client_cert: + description: + - Path of certificate for TLS/SSL. + type: path + client_key: + description: + - Path of certificate's private key for TLS/SSL. + type: path + namespace: + description: + - Namespace for Nomad. + type: str + token: + description: + - ACL token for authentication. + type: str +""" diff --git a/plugins/doc_fragments/onepassword.py b/plugins/doc_fragments/onepassword.py new file mode 100644 index 0000000000..7a2c7566c3 --- /dev/null +++ b/plugins/doc_fragments/onepassword.py @@ -0,0 +1,77 @@ + +# Copyright (c) 2023, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + DOCUMENTATION = r""" +requirements: + - See U(https://support.1password.com/command-line/) +options: + master_password: + description: The password used to unlock the specified vault. + aliases: ['vault_password'] + type: str + section: + description: Item section containing the field to retrieve (case-insensitive). If absent, returns first match from any + section. + domain: + description: Domain of 1Password. + default: '1password.com' + type: str + subdomain: + description: The 1Password subdomain to authenticate against. + type: str + account_id: + description: The account ID to target. + type: str + username: + description: The username used to sign in. + type: str + secret_key: + description: The secret key used when performing an initial sign in. + type: str + service_account_token: + description: + - The access key for a service account. + - Only works with 1Password CLI version 2 or later. + type: str + vault: + description: Vault containing the item to retrieve (case-insensitive). If absent, searches all vaults. + type: str + connect_host: + description: The host for 1Password Connect. Must be used in combination with O(connect_token). + type: str + env: + - name: OP_CONNECT_HOST + version_added: 8.1.0 + connect_token: + description: The token for 1Password Connect. Must be used in combination with O(connect_host). + type: str + env: + - name: OP_CONNECT_TOKEN + version_added: 8.1.0 +""" + + LOOKUP = r""" +options: + service_account_token: + env: + - name: OP_SERVICE_ACCOUNT_TOKEN + version_added: 8.2.0 +notes: + - This lookup uses an existing 1Password session if one exists. If not, and you have already performed an initial sign in + (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the O(master_password) + is required. You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain is used by C(op). + - This lookup can perform an initial login by providing O(subdomain), O(username), O(secret_key), and O(master_password). + - Can target a specific account by providing the O(account_id). + - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal + credentials needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or + greater in strength to the 1Password master password. + - This lookup stores potentially sensitive data from 1Password as Ansible facts. Facts are subject to caching if enabled, + which means this data could be stored in clear text on disk or in a database. + - Tested with C(op) version 2.7.2. +""" diff --git a/plugins/doc_fragments/oneview.py b/plugins/doc_fragments/oneview.py index 0d385e99aa..9e64f02e1a 100644 --- a/plugins/doc_fragments/oneview.py +++ b/plugins/doc_fragments/oneview.py @@ -1,79 +1,75 @@ -# -*- coding: utf-8 -*- # -# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # OneView doc fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - config: - description: - - Path to a .json configuration file containing the OneView client configuration. - The configuration file is optional and when used should be present in the host running the ansible commands. - If the file path is not provided, the configuration will be loaded from environment variables. - For links to example configuration files or how to use the environment variables verify the notes section. - type: path - api_version: - description: - - OneView API Version. - type: int - image_streamer_hostname: - description: - - IP address or hostname for the HPE Image Streamer REST API. - type: str - hostname: - description: - - IP address or hostname for the appliance. - type: str - username: - description: - - Username for API authentication. - type: str - password: - description: - - Password for API authentication. - type: str + config: + description: + - Path to a JSON configuration file containing the OneView client configuration. The configuration file is optional + and when used should be present in the host running the ansible commands. If the file path is not provided, the configuration + is loaded from environment variables. For links to example configuration files or how to use the environment variables + verify the notes section. + type: path + api_version: + description: + - OneView API Version. + type: int + image_streamer_hostname: + description: + - IP address or hostname for the HPE Image Streamer REST API. + type: str + hostname: + description: + - IP address or hostname for the appliance. + type: str + username: + description: + - Username for API authentication. + type: str + password: + description: + - Password for API authentication. + type: str requirements: - - python >= 2.7.9 + - Python >= 2.7.9 notes: - - "A sample configuration file for the config parameter can be found at: - U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json)" - - "Check how to use environment variables for configuration at: - U(https://github.com/HewlettPackard/oneview-ansible#environment-variables)" - - "Additional Playbooks for the HPE OneView Ansible modules can be found at: - U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples)" - - "The OneView API version used will directly affect returned and expected fields in resources. - Information on setting the desired API version and can be found at: - U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version)" - ''' + - 'A sample configuration file for the config parameter can be found at: + U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json).' + - 'Check how to use environment variables for configuration at: U(https://github.com/HewlettPackard/oneview-ansible#environment-variables).' + - 'Additional Playbooks for the HPE OneView Ansible modules can be found at: U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples).' + - 'The OneView API version used directly affects returned and expected fields in resources. Information on setting the desired + API version and can be found at: U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version).' +""" - VALIDATEETAG = r''' + VALIDATEETAG = r""" options: - validate_etag: - description: - - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag - for the resource matches the ETag provided in the data. - type: bool - default: yes -''' + validate_etag: + description: + - When the ETag Validation is enabled, the request is conditionally processed only if the current ETag for the resource + matches the ETag provided in the data. + type: bool + default: true +""" - FACTSPARAMS = r''' + FACTSPARAMS = r""" options: - params: - description: - - List of params to delimit, filter and sort the list of resources. - - "params allowed: - - C(start): The first item to return, using 0-based indexing. - - C(count): The number of resources to return. - - C(filter): A general filter/query string to narrow the list of items returned. - - C(sort): The sort order of the returned data set." - type: dict -''' + params: + description: + - List of parameters to delimit, filter and sort the list of resources. + - 'Parameter keys allowed are:' + - 'V(start): The first item to return, using 0-based indexing.' + - 'V(count): The number of resources to return.' + - 'V(filter): A general filter/query string to narrow the list of items returned.' + - 'V(sort): The sort order of the returned data set.' + type: dict +""" diff --git a/plugins/doc_fragments/online.py b/plugins/doc_fragments/online.py index 4ad35bab20..c2b130e7a0 100644 --- a/plugins/doc_fragments/online.py +++ b/plugins/doc_fragments/online.py @@ -1,44 +1,41 @@ -# -*- coding: utf-8 -*- +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: api_token: description: - Online OAuth token. type: str required: true - aliases: [ oauth_token ] + aliases: [oauth_token] api_url: description: - - Online API URL + - Online API URL. type: str default: 'https://api.online.net' - aliases: [ base_url ] + aliases: [base_url] api_timeout: description: - HTTP timeout to Online API in seconds. type: int default: 30 - aliases: [ timeout ] + aliases: [timeout] validate_certs: description: - Validate SSL certs of the Online API. type: bool - default: yes + default: true notes: - - Also see the API documentation on U(https://console.online.net/en/api/) - - If C(api_token) is not set within the module, the following - environment variables can be used in decreasing order of precedence - C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN) - - If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL) - environment variable. -''' + - Also see the API documentation on U(https://console.online.net/en/api/). + - If O(api_token) is not set within the module, the following environment variables can be used in decreasing order of precedence + E(ONLINE_TOKEN), E(ONLINE_API_KEY), E(ONLINE_OAUTH_TOKEN), E(ONLINE_API_TOKEN). + - If one wants to use a different O(api_url) one can also set the E(ONLINE_API_URL) environment variable. +""" diff --git a/plugins/doc_fragments/opennebula.py b/plugins/doc_fragments/opennebula.py index 08b614a6fc..72ccf7d70d 100644 --- a/plugins/doc_fragments/opennebula.py +++ b/plugins/doc_fragments/opennebula.py @@ -1,44 +1,43 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, www.privaz.io Valletech AB -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, www.privaz.io Valletech AB +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # OpenNebula common documentation - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - api_url: - description: - - The ENDPOINT URL of the XMLRPC server. - - If not specified then the value of the ONE_URL environment variable, if any, is used. - type: str - aliases: - - api_endpoint - api_username: - description: - - The name of the user for XMLRPC authentication. - - If not specified then the value of the ONE_USERNAME environment variable, if any, is used. - type: str - api_password: - description: - - The password or token for XMLRPC authentication. - - If not specified then the value of the ONE_PASSWORD environment variable, if any, is used. - type: str - aliases: - - api_token - validate_certs: - description: - - Whether to validate the SSL certificates or not. - - This parameter is ignored if PYTHONHTTPSVERIFY environment variable is used. - type: bool - default: yes - wait_timeout: - description: - - Time to wait for the desired state to be reached before timeout, in seconds. - type: int - default: 300 -''' + api_url: + description: + - The ENDPOINT URL of the XMLRPC server. + - If not specified then the value of the E(ONE_URL) environment variable, if any, is used. + type: str + aliases: + - api_endpoint + api_username: + description: + - The name of the user for XMLRPC authentication. + - If not specified then the value of the E(ONE_USERNAME) environment variable, if any, is used. + type: str + api_password: + description: + - The password or token for XMLRPC authentication. + - If not specified then the value of the E(ONE_PASSWORD) environment variable, if any, is used. + type: str + aliases: + - api_token + validate_certs: + description: + - Whether to validate the TLS/SSL certificates or not. + - This parameter is ignored if E(PYTHONHTTPSVERIFY) environment variable is used. + type: bool + default: true + wait_timeout: + description: + - Time to wait for the desired state to be reached before timeout, in seconds. + type: int + default: 300 +""" diff --git a/plugins/doc_fragments/openswitch.py b/plugins/doc_fragments/openswitch.py index 7ab7c15540..aac90e020f 100644 --- a/plugins/doc_fragments/openswitch.py +++ b/plugins/doc_fragments/openswitch.py @@ -1,84 +1,69 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2015, Peter Sprygada -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Peter Sprygada +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: host: description: - - Specifies the DNS host name or address for connecting to the remote - device over the specified transport. The value of host is used as - the destination address for the transport. Note this argument - does not affect the SSH argument. + - Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value + of host is used as the destination address for the transport. Note this argument does not affect the SSH argument. type: str port: description: - - Specifies the port to use when building the connection to the remote - device. This value applies to either I(cli) or I(rest). The port - value will default to the appropriate transport common port if - none is provided in the task. (cli=22, http=80, https=443). Note - this argument does not affect the SSH transport. + - Specifies the port to use when building the connection to the remote device. This value applies to either O(transport=cli) + or O(transport=rest). The port value defaults to the appropriate transport common port if none is provided in the + task. (cli=22, http=80, https=443). Note this argument does not affect the SSH transport. type: int default: 0 (use common port) username: description: - - Configures the username to use to authenticate the connection to - the remote device. This value is used to authenticate - either the CLI login or the eAPI authentication depending on which - transport is used. Note this argument does not affect the SSH - transport. If the value is not specified in the task, the value of - environment variable C(ANSIBLE_NET_USERNAME) will be used instead. + - Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate + either the CLI login or the eAPI authentication depending on which transport is used. Note this argument does not + affect the SSH transport. If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_USERNAME) + is used instead. type: str password: description: - - Specifies the password to use to authenticate the connection to - the remote device. This is a common argument used for either I(cli) - or I(rest) transports. Note this argument does not affect the SSH - transport. If the value is not specified in the task, the value of - environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. + - Specifies the password to use to authenticate the connection to the remote device. This is a common argument used + for either O(transport=cli) or O(transport=rest). Note this argument does not affect the SSH transport. If the value + is not specified in the task, the value of environment variable E(ANSIBLE_NET_PASSWORD) is used instead. type: str timeout: description: - - Specifies the timeout in seconds for communicating with the network device - for either connecting or sending commands. If the timeout is - exceeded before the operation is completed, the module will error. + - Specifies the timeout in seconds for communicating with the network device for either connecting or sending commands. + If the timeout is exceeded before the operation is completed, the module fails. type: int default: 10 ssh_keyfile: description: - - Specifies the SSH key to use to authenticate the connection to - the remote device. This argument is only used for the I(cli) - transports. If the value is not specified in the task, the value of - environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. + - Specifies the SSH key to use to authenticate the connection to the remote device. This argument is only used for O(transport=cli). + If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_SSH_KEYFILE) is used instead. type: path transport: description: - - Configures the transport connection to use when connecting to the - remote device. The transport argument supports connectivity to the - device over ssh, cli or REST. + - Configures the transport connection to use when connecting to the remote device. The transport argument supports connectivity + to the device over SSH (V(ssh)), CLI (V(cli)), or REST (V(rest)). required: true type: str - choices: [ cli, rest, ssh ] + choices: [cli, rest, ssh] default: ssh use_ssl: description: - - Configures the I(transport) to use SSL if set to C(yes) only when the - I(transport) argument is configured as rest. If the transport - argument is not I(rest), this value is ignored. + - Configures the O(transport) to use SSL if set to V(true) only when the O(transport) argument is configured as rest. + If the transport argument is not V(rest), this value is ignored. type: bool - default: yes + default: true provider: description: - - Convenience method that allows all I(openswitch) arguments to be passed as - a dict object. All constraints (required, choices, etc) must be - met either by individual arguments or values in this dict. + - Convenience method that allows all C(openswitch) arguments to be passed as a dict object. All constraints (required, + choices, and so on) must be met either by individual arguments or values in this dict. type: dict -''' +""" diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py index 94999c04ec..05120f7aa3 100644 --- a/plugins/doc_fragments/oracle.py +++ b/plugins/doc_fragments/oracle.py @@ -1,83 +1,80 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - requirements: - - "python >= 2.7" - - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io) - notes: - - For OCI python sdk configuration, please refer to - U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html) - options: - config_file_location: - description: - - Path to configuration file. If not set then the value of the OCI_CONFIG_FILE environment variable, - if any, is used. Otherwise, defaults to ~/.oci/config. - type: str - config_profile_name: - description: - - The profile to load from the config file referenced by C(config_file_location). If not set, then the - value of the OCI_CONFIG_PROFILE environment variable, if any, is used. Otherwise, defaults to the - "DEFAULT" profile in C(config_file_location). - default: "DEFAULT" - type: str - api_user: - description: - - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the - value of the OCI_USER_OCID environment variable, if any, is used. This option is required if the user - is not specified through a configuration file (See C(config_file_location)). To get the user's OCID, - please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). - type: str - api_user_fingerprint: - description: - - Fingerprint for the key pair being used. If not set, then the value of the OCI_USER_FINGERPRINT - environment variable, if any, is used. This option is required if the key fingerprint is not - specified through a configuration file (See C(config_file_location)). To get the key pair's - fingerprint value please refer - U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). - type: str - api_user_key_file: - description: - - Full path and filename of the private key (in PEM format). If not set, then the value of the - OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is - not specified through a configuration file (See C(config_file_location)). If the key is encrypted - with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided. - type: path - api_user_key_pass_phrase: - description: - - Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then - the value of the OCI_USER_KEY_PASS_PHRASE variable, if any, is used. This option is required if the - key passphrase is not specified through a configuration file (See C(config_file_location)). - type: str - auth_type: - description: - - The type of authentication to use for making API requests. By default C(auth_type="api_key") based - authentication is performed and the API key (see I(api_user_key_file)) in your config file will be - used. If this 'auth_type' module option is not specified, the value of the OCI_ANSIBLE_AUTH_TYPE, - if any, is used. Use C(auth_type="instance_principal") to use instance principal based authentication - when running ansible playbooks within an OCI compute instance. - choices: ['api_key', 'instance_principal'] - default: 'api_key' - type: str - tenancy: - description: - - OCID of your tenancy. If not set, then the value of the OCI_TENANCY variable, if any, is - used. This option is required if the tenancy OCID is not specified through a configuration file - (See C(config_file_location)). To get the tenancy OCID, please refer - U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm) - type: str - region: - description: - - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the - value of the OCI_REGION variable, if any, is used. This option is required if the region is - not specified through a configuration file (See C(config_file_location)). Please refer to - U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) for more information - on OCI regions. - type: str - """ + DOCUMENTATION = r""" +requirements: + - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io) +notes: + - For OCI Python SDK configuration, please refer to U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html). +options: + config_file_location: + description: + - Path to configuration file. If not set then the value of the E(OCI_CONFIG_FILE) environment variable, if any, is used. + Otherwise, defaults to C(~/.oci/config). + type: str + config_profile_name: + description: + - The profile to load from the config file referenced by O(config_file_location). If not set, then the value of the + E(OCI_CONFIG_PROFILE) environment variable, if any, is used. Otherwise, defaults to the C(DEFAULT) profile in O(config_file_location). + default: "DEFAULT" + type: str + api_user: + description: + - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the value of the E(OCI_USER_OCID) environment + variable, if any, is used. This option is required if the user is not specified through a configuration file (See + O(config_file_location)). To get the user's OCID, please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). + type: str + api_user_fingerprint: + description: + - Fingerprint for the key pair being used. If not set, then the value of the E(OCI_USER_FINGERPRINT) environment variable, + if any, is used. This option is required if the key fingerprint is not specified through a configuration file (See + O(config_file_location)). To get the key pair's fingerprint value please refer to + U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). + type: str + api_user_key_file: + description: + - Full path and filename of the private key (in PEM format). If not set, then the value of the E(OCI_USER_KEY_FILE) + variable, if any, is used. This option is required if the private key is not specified through a configuration file + (See O(config_file_location)). If the key is encrypted with a pass-phrase, the O(api_user_key_pass_phrase) option + must also be provided. + type: path + api_user_key_pass_phrase: + description: + - Passphrase used by the key referenced in O(api_user_key_file), if it is encrypted. If not set, then the value of the + E(OCI_USER_KEY_PASS_PHRASE) variable, if any, is used. This option is required if the key passphrase is not specified + through a configuration file (See O(config_file_location)). + type: str + auth_type: + description: + - The type of authentication to use for making API requests. By default O(auth_type=api_key) based authentication is + performed and the API key (see O(api_user_key_file)) in your config file is used. If O(auth_type) is not specified, + the value of the E(OCI_ANSIBLE_AUTH_TYPE), if any, is used. Use O(auth_type=instance_principal) to use instance principal + based authentication when running ansible playbooks within an OCI compute instance. + choices: ['api_key', 'instance_principal'] + default: 'api_key' + type: str + tenancy: + description: + - OCID of your tenancy. If not set, then the value of the E(OCI_TENANCY) variable, if any, is used. This option is required + if the tenancy OCID is not specified through a configuration file (See O(config_file_location)). To get the tenancy + OCID, please refer to U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). + type: str + region: + description: + - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the value of the E(OCI_REGION) + variable, if any, is used. This option is required if the region is not specified through a configuration file (See + O(config_file_location)). Please refer to U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) + for more information on OCI regions. + type: str +""" diff --git a/plugins/doc_fragments/oracle_creatable_resource.py b/plugins/doc_fragments/oracle_creatable_resource.py index 211ca6f9c1..1728e56d81 100644 --- a/plugins/doc_fragments/oracle_creatable_resource.py +++ b/plugins/doc_fragments/oracle_creatable_resource.py @@ -1,25 +1,29 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - force_create: - description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an - idempotent operation, and doesn't create the resource if it already exists. Setting this option - to true, forcefully creates a copy of the resource, even if it already exists.This option is - mutually exclusive with I(key_by). - default: False - type: bool - key_by: - description: The list of comma-separated attributes of this resource which should be used to uniquely - identify an instance of the resource. By default, all the attributes of a resource except - I(freeform_tags) are used to uniquely identify a resource. - type: list - elements: str - """ + DOCUMENTATION = r""" +options: + force_create: + description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an idempotent operation, + and does not create the resource if it already exists. Setting this option to V(true), forcefully creates a copy of + the resource, even if it already exists. This option is mutually exclusive with O(key_by). + default: false + type: bool + key_by: + description: The list of comma-separated attributes of this resource which should be used to uniquely identify an instance + of the resource. By default, all the attributes of a resource except O(freeform_tags) are used to uniquely identify + a resource. + type: list + elements: str +""" diff --git a/plugins/doc_fragments/oracle_display_name_option.py b/plugins/doc_fragments/oracle_display_name_option.py index ff70d45dd9..1ac210bbd4 100644 --- a/plugins/doc_fragments/oracle_display_name_option.py +++ b/plugins/doc_fragments/oracle_display_name_option.py @@ -1,16 +1,21 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - display_name: - description: Use I(display_name) along with the other options to return only resources that match the given - display name exactly. - type: str - """ + DOCUMENTATION = r""" +options: + display_name: + description: Use O(display_name) along with the other options to return only resources that match the given display name + exactly. + type: str +""" diff --git a/plugins/doc_fragments/oracle_name_option.py b/plugins/doc_fragments/oracle_name_option.py index 8c4f9c1e39..a281bc5e68 100644 --- a/plugins/doc_fragments/oracle_name_option.py +++ b/plugins/doc_fragments/oracle_name_option.py @@ -1,16 +1,20 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - name: - description: Use I(name) along with the other options to return only resources that match the given name - exactly. - type: str - """ + DOCUMENTATION = r""" +options: + name: + description: Use O(name) along with the other options to return only resources that match the given name exactly. + type: str +""" diff --git a/plugins/doc_fragments/oracle_tags.py b/plugins/doc_fragments/oracle_tags.py index f95b22c8ed..ec0096ba33 100644 --- a/plugins/doc_fragments/oracle_tags.py +++ b/plugins/doc_fragments/oracle_tags.py @@ -1,22 +1,25 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - defined_tags: - description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more - information, see - U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). - type: dict - freeform_tags: - description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, - type, or namespace. For more information, see - U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). - type: dict - """ + DOCUMENTATION = r""" +options: + defined_tags: + description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see + U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). + type: dict + freeform_tags: + description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. + For more information, see U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). + type: dict +""" diff --git a/plugins/doc_fragments/oracle_wait_options.py b/plugins/doc_fragments/oracle_wait_options.py index 0312755ffa..868fb3cb04 100644 --- a/plugins/doc_fragments/oracle_wait_options.py +++ b/plugins/doc_fragments/oracle_wait_options.py @@ -1,26 +1,30 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - wait: - description: Whether to wait for create or delete operation to complete. - default: yes - type: bool - wait_timeout: - description: Time, in seconds, to wait when I(wait=yes). - default: 1200 - type: int - wait_until: - description: The lifecycle state to wait for the resource to transition into when I(wait=yes). By default, - when I(wait=yes), we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/ - RUNNING applicable lifecycle state during create operation & to get into DELETED/DETACHED/ - TERMINATED lifecycle state during delete operation. - type: str - """ + DOCUMENTATION = r""" +options: + wait: + description: Whether to wait for create or delete operation to complete. + default: true + type: bool + wait_timeout: + description: Time, in seconds, to wait when O(wait=true). + default: 1200 + type: int + wait_until: + description: The lifecycle state to wait for the resource to transition into when O(wait=true). By default, when O(wait=true), + we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/ RUNNING applicable lifecycle state during + create operation and to get into DELETED/DETACHED/ TERMINATED lifecycle state during delete operation. + type: str +""" diff --git a/plugins/doc_fragments/pipx.py b/plugins/doc_fragments/pipx.py new file mode 100644 index 0000000000..70a502ddda --- /dev/null +++ b/plugins/doc_fragments/pipx.py @@ -0,0 +1,40 @@ + +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + DOCUMENTATION = r""" +options: + global: + description: + - The module passes the C(--global) argument to C(pipx), to execute actions in global scope. + type: bool + default: false + executable: + description: + - Path to the C(pipx) installed in the system. + - If not specified, the module uses C(python -m pipx) to run the tool, using the same Python interpreter as ansible + itself. + type: path +requirements: + - This module requires C(pipx) version 1.7.0 or above. + - Please note that C(pipx) 1.7.0 requires Python 3.8 or above. + - Please note that C(pipx) 1.8.0 requires Python 3.9 or above. +notes: + - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). + - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module, meaning + that C(python -m pipx) must work. + - This module honors C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) passed using + the R(environment Ansible keyword, playbooks_environment). + - This module disabled emojis in the output of C(pipx) commands to reduce clutter. In C(pipx) 1.8.0, the environment variable + E(USE_EMOJI) was renamed to E(PIPX_USE_EMOJI) and for compatibility with both versions, starting in community.general + 11.4.0, this module sets them both to C(0) to disable emojis. +seealso: + - name: C(pipx) command manual page + description: Manual page for the command. + link: https://pipx.pypa.io/latest/docs/ +""" diff --git a/plugins/doc_fragments/pritunl.py b/plugins/doc_fragments/pritunl.py index e2eaff2889..17e03fc716 100644 --- a/plugins/doc_fragments/pritunl.py +++ b/plugins/doc_fragments/pritunl.py @@ -1,43 +1,37 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Florian Dambrine -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function - -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): DOCUMENTATION = r""" options: - pritunl_url: - type: str - required: true - description: - - URL and port of the Pritunl server on which the API is enabled. - - pritunl_api_token: - type: str - required: true - description: - - API Token of a Pritunl admin user. - - It needs to be enabled in Administrators > USERNAME > Enable Token Authentication. - - pritunl_api_secret: - type: str - required: true - description: - - API Secret found in Administrators > USERNAME > API Secret. - - validate_certs: - type: bool - required: false - default: true - description: - - If certificates should be validated or not. - - This should never be set to C(false), except if you are very sure that - your connection to the server can not be subject to a Man In The Middle - attack. + pritunl_url: + type: str + required: true + description: + - URL and port of the Pritunl server on which the API is enabled. + pritunl_api_token: + type: str + required: true + description: + - API Token of a Pritunl admin user. + - It needs to be enabled in Administrators > USERNAME > Enable Token Authentication. + pritunl_api_secret: + type: str + required: true + description: + - API Secret found in Administrators > USERNAME > API Secret. + validate_certs: + type: bool + required: false + default: true + description: + - If certificates should be validated or not. + - This should never be set to V(false), except if you are very sure that your connection to the server can not be subject + to a Man In The Middle attack. """ diff --git a/plugins/doc_fragments/proxmox.py b/plugins/doc_fragments/proxmox.py deleted file mode 100644 index 165a78527a..0000000000 --- a/plugins/doc_fragments/proxmox.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - # Common parameters for Proxmox VE modules - DOCUMENTATION = r''' -options: - api_host: - description: - - Specify the target host of the Proxmox VE cluster. - type: str - required: true - api_user: - description: - - Specify the user to authenticate with. - type: str - required: true - api_password: - description: - - Specify the password to authenticate with. - - You can use C(PROXMOX_PASSWORD) environment variable. - type: str - api_token_id: - description: - - Specify the token ID. - type: str - version_added: 1.3.0 - api_token_secret: - description: - - Specify the token secret. - type: str - version_added: 1.3.0 - validate_certs: - description: - - If C(no), SSL certificates will not be validated. - - This should only be used on personally controlled sites using self-signed certificates. - type: bool - default: no -requirements: [ "proxmoxer", "requests" ] -''' - - SELECTION = r''' -options: - vmid: - description: - - Specifies the instance ID. - - If not set the next available ID will be fetched from ProxmoxAPI. - type: int - node: - description: - - Proxmox VE node on which to operate. - - Only required for I(state=present). - - For every other states it will be autodiscovered. - type: str - pool: - description: - - Add the new VM to the specified pool. - type: str -''' diff --git a/plugins/doc_fragments/purestorage.py b/plugins/doc_fragments/purestorage.py deleted file mode 100644 index f35f026711..0000000000 --- a/plugins/doc_fragments/purestorage.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Simon Dodsley -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard Pure Storage documentation fragment - DOCUMENTATION = r''' -options: - - See separate platform section for more details -requirements: - - See separate platform section for more details -notes: - - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade -''' - - # Documentation fragment for FlashBlade - FB = r''' -options: - fb_url: - description: - - FlashBlade management IP address or Hostname. - type: str - api_token: - description: - - FlashBlade API token for admin privileged user. - type: str -notes: - - This module requires the C(purity_fb) Python library - - You must set C(PUREFB_URL) and C(PUREFB_API) environment variables - if I(fb_url) and I(api_token) arguments are not passed to the module directly -requirements: - - python >= 2.7 - - purity_fb >= 1.1 -''' - - # Documentation fragment for FlashArray - FA = r''' -options: - fa_url: - description: - - FlashArray management IPv4 address or Hostname. - type: str - required: true - api_token: - description: - - FlashArray API token for admin privileged user. - type: str - required: true -notes: - - This module requires the C(purestorage) Python library - - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables - if I(fa_url) and I(api_token) arguments are not passed to the module directly -requirements: - - python >= 2.7 - - purestorage -''' diff --git a/plugins/doc_fragments/rackspace.py b/plugins/doc_fragments/rackspace.py deleted file mode 100644 index 0f57dd8899..0000000000 --- a/plugins/doc_fragments/rackspace.py +++ /dev/null @@ -1,117 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Matt Martz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard Rackspace only documentation fragment - DOCUMENTATION = r''' -options: - api_key: - description: - - Rackspace API key, overrides I(credentials). - type: str - aliases: [ password ] - credentials: - description: - - File to find the Rackspace credentials in. Ignored if I(api_key) and - I(username) are provided. - type: path - aliases: [ creds_file ] - env: - description: - - Environment as configured in I(~/.pyrax.cfg), - see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration). - type: str - region: - description: - - Region to create an instance in. - type: str - username: - description: - - Rackspace username, overrides I(credentials). - type: str - validate_certs: - description: - - Whether or not to require SSL validation of API endpoints. - type: bool - aliases: [ verify_ssl ] -requirements: - - python >= 2.6 - - pyrax -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) -''' - - # Documentation fragment including attributes to enable communication - # of other OpenStack clouds. Not all rax modules support this. - OPENSTACK = r''' -options: - api_key: - type: str - description: - - Rackspace API key, overrides I(credentials). - aliases: [ password ] - auth_endpoint: - type: str - description: - - The URI of the authentication service. - - If not specified will be set to U(https://identity.api.rackspacecloud.com/v2.0/) - credentials: - type: path - description: - - File to find the Rackspace credentials in. Ignored if I(api_key) and - I(username) are provided. - aliases: [ creds_file ] - env: - type: str - description: - - Environment as configured in I(~/.pyrax.cfg), - see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration). - identity_type: - type: str - description: - - Authentication mechanism to use, such as rackspace or keystone. - default: rackspace - region: - type: str - description: - - Region to create an instance in. - tenant_id: - type: str - description: - - The tenant ID used for authentication. - tenant_name: - type: str - description: - - The tenant name used for authentication. - username: - type: str - description: - - Rackspace username, overrides I(credentials). - validate_certs: - description: - - Whether or not to require SSL validation of API endpoints. - type: bool - aliases: [ verify_ssl ] -requirements: - - python >= 2.6 - - pyrax -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) -''' diff --git a/plugins/doc_fragments/redfish.py b/plugins/doc_fragments/redfish.py new file mode 100644 index 0000000000..ed95eeab83 --- /dev/null +++ b/plugins/doc_fragments/redfish.py @@ -0,0 +1,35 @@ + +# Copyright (c) 2025 Ansible community +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + + # Use together with the community.general.redfish module utils' REDFISH_COMMON_ARGUMENT_SPEC + DOCUMENTATION = r""" +options: + validate_certs: + description: + - If V(false), TLS/SSL certificates are not validated. + - Set this to V(true) to enable certificate checking. Should be used together with O(ca_path). + type: bool + default: false + ca_path: + description: + - PEM formatted file that contains a CA certificate to be used for validation. + - Only used if O(validate_certs=true). + type: path + ciphers: + required: false + description: + - TLS/SSL Ciphers to use for the request. + - When a list is provided, all ciphers are joined in order with V(:). + - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) + for more details. + - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. + type: list + elements: str +""" diff --git a/plugins/doc_fragments/redis.py b/plugins/doc_fragments/redis.py index e7af25ec8f..38889a3cbd 100644 --- a/plugins/doc_fragments/redis.py +++ b/plugins/doc_fragments/redis.py @@ -1,15 +1,14 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Common parameters for Redis modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: login_host: description: @@ -39,19 +38,26 @@ options: validate_certs: description: - Specify whether or not to validate TLS certificates. - - This should only be turned off for personally controlled sites or with - C(localhost) as target. + - This should only be turned off for personally controlled sites or with C(localhost) as target. type: bool default: true ca_certs: description: - - Path to root certificates file. If not set and I(tls) is - set to C(true), certifi ca-certificates will be used. + - Path to root certificates file. If not set and O(tls) is set to V(true), certifi's CA certificates are used. type: str -requirements: [ "redis", "certifi" ] + client_cert_file: + description: + - Path to the client certificate file. + type: str + version_added: 9.3.0 + client_key_file: + description: + - Path to the client private key file. + type: str + version_added: 9.3.0 +requirements: ["redis", "certifi"] notes: - - Requires the C(redis) Python package on the remote host. You can - install it with pip (C(pip install redis)) or with a package manager. - Information on the library can be found at U(https://github.com/andymccurdy/redis-py). -''' + - Requires the C(redis) Python package on the remote host. You can install it with pip (C(pip install redis)) or with a + package manager. Information on the library can be found at U(https://github.com/andymccurdy/redis-py). +""" diff --git a/plugins/doc_fragments/rundeck.py b/plugins/doc_fragments/rundeck.py index 056a54f37f..3e9d99aa7a 100644 --- a/plugins/doc_fragments/rundeck.py +++ b/plugins/doc_fragments/rundeck.py @@ -1,16 +1,15 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Phillipe Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Phillipe Smith +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: url: type: str @@ -28,4 +27,4 @@ options: description: - Rundeck User API Token. required: true -''' +""" diff --git a/plugins/doc_fragments/scaleway.py b/plugins/doc_fragments/scaleway.py index c1e1b13d9d..7810deb901 100644 --- a/plugins/doc_fragments/scaleway.py +++ b/plugins/doc_fragments/scaleway.py @@ -1,50 +1,57 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: api_token: description: - Scaleway OAuth token. type: str required: true - aliases: [ oauth_token ] + aliases: [oauth_token] api_url: description: - Scaleway API URL. type: str default: https://api.scaleway.com - aliases: [ base_url ] + aliases: [base_url] api_timeout: description: - HTTP timeout to Scaleway API in seconds. type: int default: 30 - aliases: [ timeout ] + aliases: [timeout] query_parameters: description: - - List of parameters passed to the query string. + - List of parameters passed to the query string. type: dict default: {} validate_certs: description: - Validate SSL certs of the Scaleway API. type: bool - default: yes + default: true notes: - - Also see the API documentation on U(https://developer.scaleway.com/) - - If C(api_token) is not set within the module, the following - environment variables can be used in decreasing order of precedence - C(SCW_TOKEN), C(SCW_API_KEY), C(SCW_OAUTH_TOKEN) or C(SCW_API_TOKEN). - - If one wants to use a different C(api_url) one can also set the C(SCW_API_URL) - environment variable. -''' + - Also see the API documentation on U(https://developer.scaleway.com/). + - If O(api_token) is not set within the module, the following environment variables can be used in decreasing order of precedence + E(SCW_TOKEN), E(SCW_API_KEY), E(SCW_OAUTH_TOKEN) or E(SCW_API_TOKEN). + - If one wants to use a different O(api_url) one can also set the E(SCW_API_URL) environment variable. +""" + + ACTIONGROUP_SCALEWAY = r""" +options: {} +attributes: + action_group: + description: Use C(group/community.general.scaleway) in C(module_defaults) to set defaults for this module. + support: full + membership: + - community.general.scaleway +""" diff --git a/plugins/doc_fragments/scaleway_waitable_resource.py b/plugins/doc_fragments/scaleway_waitable_resource.py new file mode 100644 index 0000000000..2a14c7571e --- /dev/null +++ b/plugins/doc_fragments/scaleway_waitable_resource.py @@ -0,0 +1,31 @@ + +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + + # Standard documentation fragment + DOCUMENTATION = r""" +options: + wait: + description: + - Wait for the resource to reach its desired state before returning. + type: bool + default: true + wait_timeout: + type: int + description: + - Time to wait for the resource to reach the expected state. + required: false + default: 300 + wait_sleep_time: + type: int + description: + - Time to wait before every attempt to check the state of the resource. + required: false + default: 3 +""" diff --git a/plugins/doc_fragments/utm.py b/plugins/doc_fragments/utm.py index 413fb49675..831f4ccc96 100644 --- a/plugins/doc_fragments/utm.py +++ b/plugins/doc_fragments/utm.py @@ -1,54 +1,55 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Johannes Brunswicker -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - headers: - description: - - A dictionary of additional headers to be sent to POST and PUT requests. - - Is needed for some modules - type: dict - required: false - utm_host: - description: - - The REST Endpoint of the Sophos UTM. - type: str - required: true - utm_port: - description: - - The port of the REST interface. - type: int - default: 4444 - utm_token: - description: - - "The token used to identify at the REST-API. See U(https://www.sophos.com/en-us/medialibrary/\ - PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter 2.4.2." - type: str - required: true - utm_protocol: - description: - - The protocol of the REST Endpoint. - choices: [ http, https ] - type: str - default: https - validate_certs: - description: - - Whether the REST interface's ssl certificate should be verified or not. - type: bool - default: yes - state: - description: - - The desired state of the object. - - C(present) will create or update an object - - C(absent) will delete an object if it was present - type: str - choices: [ absent, present ] - default: present -''' + headers: + description: + - A dictionary of additional headers to be sent to POST and PUT requests. + - Is needed for some modules. + type: dict + required: false + default: {} + utm_host: + description: + - The REST Endpoint of the Sophos UTM. + type: str + required: true + utm_port: + description: + - The port of the REST interface. + type: int + default: 4444 + utm_token: + description: + - The token used to identify at the REST-API. + - See U(https://www.sophos.com/en-us/medialibrary/PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter + 2.4.2. + type: str + required: true + utm_protocol: + description: + - The protocol of the REST Endpoint. + choices: [http, https] + type: str + default: https + validate_certs: + description: + - Whether the REST interface's SSL certificate should be verified or not. + type: bool + default: true + state: + description: + - The desired state of the object. + - V(present) creates or updates an object. + - V(absent) deletes an object if present. + type: str + choices: [absent, present] + default: present +""" diff --git a/plugins/doc_fragments/vexata.py b/plugins/doc_fragments/vexata.py index d541d5ad85..3ca6684469 100644 --- a/plugins/doc_fragments/vexata.py +++ b/plugins/doc_fragments/vexata.py @@ -1,23 +1,13 @@ -# -*- coding: utf-8 -*- # -# Copyright: (c) 2019, Sandeep Kasargod -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Sandeep Kasargod +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): - DOCUMENTATION = r''' -options: - - See respective platform section for more details -requirements: - - See respective platform section for more details -notes: - - Ansible modules are available for Vexata VX100 arrays. -''' - # Documentation fragment for Vexata VX100 series VX100 = r''' options: @@ -29,25 +19,26 @@ options: user: description: - Vexata API user with administrative privileges. + - Uses the E(VEXATA_USER) environment variable as a fallback. required: false type: str password: description: - Vexata API user password. + - Uses the E(VEXATA_PASSWORD) environment variable as a fallback. required: false type: str validate_certs: description: - - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted. - - If set to C(yes), please make sure Python >= 2.7.9 is installed on the given machine. + - Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted. + - If set to V(true), please make sure Python >= 2.7.9 is installed on the given machine. required: false type: bool - default: 'no' + default: false requirements: - Vexata VX100 storage array with VXOS >= v3.5.0 on storage array - vexatapi >= 0.0.1 - - python >= 2.7 - - VEXATA_USER and VEXATA_PASSWORD environment variables must be set if + - E(VEXATA_USER) and E(VEXATA_PASSWORD) environment variables must be set if user and password arguments are not passed to the module directly. ''' diff --git a/plugins/doc_fragments/xenserver.py b/plugins/doc_fragments/xenserver.py index 747bf02f1b..7da1391420 100644 --- a/plugins/doc_fragments/xenserver.py +++ b/plugins/doc_fragments/xenserver.py @@ -1,40 +1,39 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Bojan Vitnik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Bojan Vitnik +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Common parameters for XenServer modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: hostname: description: - - The hostname or IP address of the XenServer host or XenServer pool master. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_HOST) will be used instead. + - The hostname or IP address of the XenServer host or XenServer pool master. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_HOST) is used instead. type: str default: localhost - aliases: [ host, pool ] + aliases: [host, pool] username: description: - - The username to use for connecting to XenServer. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_USER) will be used instead. + - The username to use for connecting to XenServer. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_USER) is used instead. type: str default: root - aliases: [ admin, user ] + aliases: [admin, user] password: description: - - The password to use for connecting to XenServer. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_PASSWORD) will be used instead. + - The password to use for connecting to XenServer. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_PASSWORD) is used instead. type: str - aliases: [ pass, pwd ] + aliases: [pass, pwd] validate_certs: description: - - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_VALIDATE_CERTS) will be used instead. + - Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_VALIDATE_CERTS) is used instead. type: bool - default: yes -''' + default: true +""" diff --git a/plugins/filter/accumulate.py b/plugins/filter/accumulate.py new file mode 100644 index 0000000000..da784ab12b --- /dev/null +++ b/plugins/filter/accumulate.py @@ -0,0 +1,62 @@ +# Copyright (c) Max Gautier +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION = r""" +name: accumulate +short_description: Produce a list of accumulated sums of the input list contents +version_added: 10.1.0 +author: Max Gautier (@VannTen) +description: + - Passthrough to the L(Python itertools.accumulate function,https://docs.python.org/3/library/itertools.html#itertools.accumulate). + - Transforms an input list into the cumulative list of results from applying addition to the elements of the input list. + - Addition means the default Python implementation of C(+) for input list elements type. +options: + _input: + description: A list. + type: list + elements: any + required: true +""" + +RETURN = r""" +_value: + description: A list of cumulated sums of the elements of the input list. + type: list + elements: any +""" + +EXAMPLES = r""" +- name: Enumerate parent directories of some path + ansible.builtin.debug: + var: > + "/some/path/to/my/file" + | split('/') | map('split', '/') + | community.general.accumulate | map('join', '/') + # Produces: ['', '/some', '/some/path', '/some/path/to', '/some/path/to/my', '/some/path/to/my/file'] + +- name: Growing string + ansible.builtin.debug: + var: "'abc' | community.general.accumulate" + # Produces ['a', 'ab', 'abc'] +""" + +from itertools import accumulate +from collections.abc import Sequence + +from ansible.errors import AnsibleFilterError + + +def list_accumulate(sequence): + if not isinstance(sequence, Sequence): + raise AnsibleFilterError(f'Invalid value type ({type(sequence)}) for accumulate ({sequence!r})') + + return accumulate(sequence) + + +class FilterModule(object): + + def filters(self): + return { + 'accumulate': list_accumulate, + } diff --git a/plugins/filter/counter.py b/plugins/filter/counter.py new file mode 100644 index 0000000000..f89bfd6d1a --- /dev/null +++ b/plugins/filter/counter.py @@ -0,0 +1,64 @@ +# Copyright (c) 2021, Remy Keil +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: counter +short_description: Counts hashable elements in a sequence +version_added: 4.3.0 +author: Rémy Keil (@keilr) +description: + - Counts hashable elements in a sequence. +options: + _input: + description: A sequence. + type: list + elements: any + required: true +""" + +EXAMPLES = r""" +- name: Count occurrences + ansible.builtin.debug: + msg: >- + {{ [1, 'a', 2, 2, 'a', 'b', 'a'] | community.general.counter }} + # Produces: {1: 1, 'a': 3, 2: 2, 'b': 1} +""" + +RETURN = r""" +_value: + description: A dictionary with the elements of the sequence as keys, and their number of occurrences in the sequence as + values. + type: dictionary +""" + +from ansible.errors import AnsibleFilterError +from collections.abc import Sequence +from collections import Counter + + +def counter(sequence): + ''' Count elements in a sequence. Returns dict with count result. ''' + if not isinstance(sequence, Sequence): + raise AnsibleFilterError(f'Argument for community.general.counter must be a sequence (string or list). {sequence} is {type(sequence)}') + + try: + result = dict(Counter(sequence)) + except TypeError as e: + raise AnsibleFilterError( + f"community.general.counter needs a sequence with hashable elements (int, float or str) - {e}" + ) + return result + + +class FilterModule(object): + ''' Ansible counter jinja2 filters ''' + + def filters(self): + filters = { + 'counter': counter, + } + + return filters diff --git a/plugins/filter/crc32.py b/plugins/filter/crc32.py new file mode 100644 index 0000000000..11a6e77495 --- /dev/null +++ b/plugins/filter/crc32.py @@ -0,0 +1,61 @@ +# Copyright (c) 2022, Julien Riou +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.common.text.converters import to_bytes +from ansible.module_utils.common.collections import is_string + +try: + from zlib import crc32 + HAS_ZLIB = True +except ImportError: + HAS_ZLIB = False + + +DOCUMENTATION = r""" +name: crc32 +short_description: Generate a CRC32 checksum +version_added: 5.4.0 +description: + - Checksum a string using CRC32 algorithm and return its hexadecimal representation. +options: + _input: + description: + - The string to checksum. + type: string + required: true +author: + - Julien Riou +""" + +EXAMPLES = r""" +- name: Checksum a test string + ansible.builtin.debug: + msg: "{{ 'test' | community.general.crc32 }}" +""" + +RETURN = r""" +_value: + description: CRC32 checksum. + type: string +""" + + +def crc32s(value): + if not is_string(value): + raise AnsibleFilterError(f'Invalid value type ({type(value)}) for crc32 ({value!r})') + + if not HAS_ZLIB: + raise AnsibleFilterError('Failed to import zlib module') + + data = to_bytes(value, errors='surrogate_or_strict') + return f"{crc32(data) & 0xffffffff:x}" + + +class FilterModule: + def filters(self): + return { + 'crc32': crc32s, + } diff --git a/plugins/filter/dict.py b/plugins/filter/dict.py index 3d20e752b1..d2d8bb952c 100644 --- a/plugins/filter/dict.py +++ b/plugins/filter/dict.py @@ -1,10 +1,63 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Felix Fontein -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations + +DOCUMENTATION = r""" +name: dict +short_description: Convert a list of tuples into a dictionary +version_added: 3.0.0 +author: Felix Fontein (@felixfontein) +description: + - Convert a list of tuples into a dictionary. This is a filter version of the C(dict) function. +options: + _input: + description: A list of tuples (with exactly two elements). + type: list + elements: tuple + required: true +""" + +EXAMPLES = r""" +- name: Convert list of tuples into dictionary + ansible.builtin.set_fact: + dictionary: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}" + # Result is {1: 2, 'a': 'b'} + +- name: Create a list of dictionaries with map and the community.general.dict filter + ansible.builtin.debug: + msg: >- + {{ values | map('zip', ['k1', 'k2', 'k3']) + | map('map', 'reverse') + | map('community.general.dict') }} + vars: + values: + - - foo + - 23 + - a + - - bar + - 42 + - b + # Produces the following list of dictionaries: + # { + # "k1": "foo", + # "k2": 23, + # "k3": "a" + # }, + # { + # "k1": "bar", + # "k2": 42, + # "k3": "b" + # } +""" + +RETURN = r""" +_value: + description: A dictionary with the provided key-value pairs. + type: dictionary +""" def dict_filter(sequence): diff --git a/plugins/filter/dict_kv.py b/plugins/filter/dict_kv.py index 7ce6c3e44a..79c8dd0fe6 100644 --- a/plugins/filter/dict_kv.py +++ b/plugins/filter/dict_kv.py @@ -1,9 +1,40 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2020 Stanislav German-Evtushenko (@giner) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +DOCUMENTATION = r""" +name: dict_kv +short_description: Convert a value to a dictionary with a single key-value pair +version_added: 1.3.0 +author: Stanislav German-Evtushenko (@giner) +description: + - Convert a value to a dictionary with a single key-value pair. +positional: key +options: + _input: + description: The value for the single key-value pair. + type: any + required: true + key: + description: The key for the single key-value pair. + type: any + required: true +""" + +EXAMPLES = r""" +- name: Create a one-element dictionary from a value + ansible.builtin.debug: + msg: "{{ 'myvalue' | dict_kv('mykey') }}" + # Produces the dictionary {'mykey': 'myvalue'} +""" + +RETURN = r""" +_value: + description: A dictionary with a single key-value pair. + type: dictionary +""" def dict_kv(value, key): diff --git a/plugins/filter/from_csv.py b/plugins/filter/from_csv.py index b66d47699b..160eed959e 100644 --- a/plugins/filter/from_csv.py +++ b/plugins/filter/from_csv.py @@ -1,14 +1,84 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) -# Copyright: (c) 2018, Dag Wieers (@dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) +# Copyright (c) 2018, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations + +DOCUMENTATION = r""" +name: from_csv +short_description: Converts CSV text input into list of dicts +version_added: 2.3.0 +author: Andrew Pantuso (@Ajpantuso) +description: + - Converts CSV text input into list of dictionaries. +options: + _input: + description: A string containing a CSV document. + type: string + required: true + dialect: + description: + - The CSV dialect to use when parsing the CSV file. + - Possible values include V(excel), V(excel-tab) or V(unix). + type: str + default: excel + fieldnames: + description: + - A list of field names for every column. + - This is needed if the CSV does not have a header. + type: list + elements: str + delimiter: + description: + - A one-character string used to separate fields. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: str + skipinitialspace: + description: + - Whether to ignore any whitespaces immediately following the delimiter. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: bool + strict: + description: + - Whether to raise an exception on bad CSV input. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: bool +""" + +EXAMPLES = r""" +- name: Parse a CSV file's contents + ansible.builtin.debug: + msg: >- + {{ csv_data | community.general.from_csv(dialect='unix') }} + vars: + csv_data: | + Column 1,Value + foo,23 + bar,42 + # Produces the following list of dictionaries: + # { + # "Column 1": "foo", + # "Value": "23", + # }, + # { + # "Column 1": "bar", + # "Value": "42", + # } +""" + +RETURN = r""" +_value: + description: A list with one dictionary per row. + type: list + elements: dictionary +""" from ansible.errors import AnsibleFilterError -from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, DialectNotAvailableError, @@ -26,7 +96,7 @@ def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitial try: dialect = initialize_dialect(dialect, **dialect_params) except (CustomDialectFailureError, DialectNotAvailableError) as e: - raise AnsibleFilterError(to_native(e)) + raise AnsibleFilterError(str(e)) reader = read_csv(data, dialect, fieldnames) @@ -36,7 +106,7 @@ def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitial for row in reader: data_list.append(row) except CSVError as e: - raise AnsibleFilterError("Unable to process file: %s" % to_native(e)) + raise AnsibleFilterError(f"Unable to process file: {e}") return data_list diff --git a/plugins/filter/from_ini.py b/plugins/filter/from_ini.py new file mode 100644 index 0000000000..07b16d4ac2 --- /dev/null +++ b/plugins/filter/from_ini.py @@ -0,0 +1,95 @@ + +# Copyright (c) 2023, Steffen Scheib +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: from_ini +short_description: Converts INI text input into a dictionary +version_added: 8.2.0 +author: Steffen Scheib (@sscheib) +description: + - Converts INI text input into a dictionary. +options: + _input: + description: A string containing an INI document. + type: string + required: true +""" + +EXAMPLES = r""" +- name: Slurp an INI file + ansible.builtin.slurp: + src: /etc/rhsm/rhsm.conf + register: rhsm_conf + +- name: Display the INI file as dictionary + ansible.builtin.debug: + var: rhsm_conf.content | b64decode | community.general.from_ini + +- name: Set a new dictionary fact with the contents of the INI file + ansible.builtin.set_fact: + rhsm_dict: >- + {{ + rhsm_conf.content | b64decode | community.general.from_ini + }} +""" + +RETURN = r""" +_value: + description: A dictionary representing the INI file. + type: dictionary +""" + + +from io import StringIO +from configparser import ConfigParser + +from ansible.errors import AnsibleFilterError + + +class IniParser(ConfigParser): + ''' Implements a configparser which is able to return a dict ''' + + def __init__(self): + super().__init__(interpolation=None) + self.optionxform = str + + def as_dict(self): + d = dict(self._sections) + for k in d: + d[k] = dict(self._defaults, **d[k]) + d[k].pop('__name__', None) + + if self._defaults: + d['DEFAULT'] = dict(self._defaults) + + return d + + +def from_ini(obj): + ''' Read the given string as INI file and return a dict ''' + + if not isinstance(obj, str): + raise AnsibleFilterError(f'from_ini requires a str, got {type(obj)}') + + parser = IniParser() + + try: + parser.read_file(StringIO(obj)) + except Exception as ex: + raise AnsibleFilterError(f'from_ini failed to parse given string: {ex}', orig_exc=ex) + + return parser.as_dict() + + +class FilterModule(object): + ''' Query filter ''' + + def filters(self): + + return { + 'from_ini': from_ini + } diff --git a/plugins/filter/groupby.py b/plugins/filter/groupby.py deleted file mode 100644 index a2a85aa905..0000000000 --- a/plugins/filter/groupby.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2021, Felix Fontein -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.errors import AnsibleFilterError -from ansible.module_utils.common._collections_compat import Mapping, Sequence - - -def groupby_as_dict(sequence, attribute): - ''' - Given a sequence of dictionaries and an attribute name, returns a dictionary mapping - the value of this attribute to the dictionary. - - If multiple dictionaries in the sequence have the same value for this attribute, - the filter will fail. - ''' - if not isinstance(sequence, Sequence): - raise AnsibleFilterError('Input is not a sequence') - - result = dict() - for list_index, element in enumerate(sequence): - if not isinstance(element, Mapping): - raise AnsibleFilterError('Sequence element #{0} is not a mapping'.format(list_index)) - if attribute not in element: - raise AnsibleFilterError('Attribute not contained in element #{0} of sequence'.format(list_index)) - result_index = element[attribute] - if result_index in result: - raise AnsibleFilterError('Multiple sequence entries have attribute value {0!r}'.format(result_index)) - result[result_index] = element - return result - - -class FilterModule(object): - ''' Ansible list filters ''' - - def filters(self): - return { - 'groupby_as_dict': groupby_as_dict, - } diff --git a/plugins/filter/groupby_as_dict.py b/plugins/filter/groupby_as_dict.py new file mode 100644 index 0000000000..766d365575 --- /dev/null +++ b/plugins/filter/groupby_as_dict.py @@ -0,0 +1,89 @@ +# Copyright (c) 2021, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: groupby_as_dict +short_description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute +version_added: 3.1.0 +author: Felix Fontein (@felixfontein) +description: + - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute. + - This filter is similar to the Jinja2 C(groupby) filter. Use the Jinja2 C(groupby) filter if you have multiple entries + with the same value, or when you need a dictionary with list values, or when you need to use deeply nested attributes. +positional: attribute +options: + _input: + description: A list of dictionaries. + type: list + elements: dictionary + required: true + attribute: + description: The attribute to use as the key. + type: str + required: true +""" + +EXAMPLES = r""" +- name: Arrange a list of dictionaries as a dictionary of dictionaries + ansible.builtin.debug: + msg: "{{ sequence | community.general.groupby_as_dict('key') }}" + vars: + sequence: + - key: value + foo: bar + - key: other_value + baz: bar + # Produces the following nested structure: + # + # value: + # key: value + # foo: bar + # other_value: + # key: other_value + # baz: bar +""" + +RETURN = r""" +_value: + description: A dictionary containing the dictionaries from the list as values. + type: dictionary +""" + +from ansible.errors import AnsibleFilterError +from collections.abc import Mapping, Sequence + + +def groupby_as_dict(sequence, attribute): + ''' + Given a sequence of dictionaries and an attribute name, returns a dictionary mapping + the value of this attribute to the dictionary. + + If multiple dictionaries in the sequence have the same value for this attribute, + the filter will fail. + ''' + if not isinstance(sequence, Sequence): + raise AnsibleFilterError('Input is not a sequence') + + result = dict() + for list_index, element in enumerate(sequence): + if not isinstance(element, Mapping): + raise AnsibleFilterError(f'Sequence element #{list_index} is not a mapping') + if attribute not in element: + raise AnsibleFilterError(f'Attribute not contained in element #{list_index} of sequence') + result_index = element[attribute] + if result_index in result: + raise AnsibleFilterError(f'Multiple sequence entries have attribute value {result_index!r}') + result[result_index] = element + return result + + +class FilterModule(object): + ''' Ansible list filters ''' + + def filters(self): + return { + 'groupby_as_dict': groupby_as_dict, + } diff --git a/plugins/filter/hashids.py b/plugins/filter/hashids.py index c4735afeae..c58ae4d70b 100644 --- a/plugins/filter/hashids.py +++ b/plugins/filter/hashids.py @@ -1,20 +1,23 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.errors import ( AnsibleError, AnsibleFilterError, - AnsibleFilterTypeError, ) from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.collections import is_sequence +try: + from ansible.errors import AnsibleTypeError +except ImportError: + from ansible.errors import AnsibleFilterTypeError as AnsibleTypeError + try: from hashids import Hashids HAS_HASHIDS = True @@ -26,7 +29,7 @@ def initialize_hashids(**kwargs): if not HAS_HASHIDS: raise AnsibleError("The hashids library must be installed in order to use this plugin") - params = dict((k, v) for k, v in kwargs.items() if v) + params = {k: v for k, v in kwargs.items() if v} try: return Hashids(**params) @@ -63,9 +66,7 @@ def hashids_encode(nums, salt=None, alphabet=None, min_length=None): try: hashid = hashids.encode(*nums) except TypeError as e: - raise AnsibleFilterTypeError( - "Data to encode must by a tuple or list of ints: %s" % to_native(e) - ) + raise AnsibleTypeError(f"Data to encode must by a tuple or list of ints: {e}") return hashid diff --git a/plugins/filter/hashids_decode.yml b/plugins/filter/hashids_decode.yml new file mode 100644 index 0000000000..3d2144f725 --- /dev/null +++ b/plugins/filter/hashids_decode.yml @@ -0,0 +1,43 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: hashids_decode + short_description: Decodes a sequence of numbers from a YouTube-like hash + version_added: 3.0.0 + author: Andrew Pantuso (@Ajpantuso) + description: + - Decodes a sequence of numbers from a YouTube-like hash. + options: + _input: + description: A YouTube-like hash. + type: string + required: true + salt: + description: + - String to use as salt when hashing. + type: str + default: excel + alphabet: + description: + - String of 16 or more unique characters to produce a hash. + type: list + elements: str + min_length: + description: + - Minimum length of hash produced. + type: integer + +EXAMPLES: | + - name: Convert hash to list of integers + ansible.builtin.debug: + msg: "{{ 'o2fXhV' | community.general.hashids_decode }}" + # Produces: [1, 2, 3] + +RETURN: + _value: + description: A list of integers. + type: list + elements: integer diff --git a/plugins/filter/hashids_encode.yml b/plugins/filter/hashids_encode.yml new file mode 100644 index 0000000000..af19522d0a --- /dev/null +++ b/plugins/filter/hashids_encode.yml @@ -0,0 +1,43 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: hashids_encode + short_description: Encodes YouTube-like hashes from a sequence of integers + version_added: 3.0.0 + author: Andrew Pantuso (@Ajpantuso) + description: + - Encodes YouTube-like hashes from a sequence of integers. + options: + _input: + description: A list of integers. + type: list + elements: integer + required: true + salt: + description: + - String to use as salt when hashing. + type: str + default: excel + alphabet: + description: + - String of 16 or more unique characters to produce a hash. + type: list + elements: str + min_length: + description: + - Minimum length of hash produced. + type: integer + +EXAMPLES: | + - name: Convert list of integers to hash + ansible.builtin.debug: + msg: "{{ [1, 2, 3] | community.general.hashids_encode }}" + # Produces: 'o2fXhV' + +RETURN: + _value: + description: A YouTube-like hash. + type: string diff --git a/plugins/filter/jc.py b/plugins/filter/jc.py index f8fc4ac5bd..92996e812c 100644 --- a/plugins/filter/jc.py +++ b/plugins/filter/jc.py @@ -1,25 +1,78 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Filipe Niero Felisbino -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) 2015, Filipe Niero Felisbino +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # # contributed by Kelly Brazil -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +DOCUMENTATION = r""" +name: jc +short_description: Convert output of many shell commands and file-types to JSON +version_added: 1.1.0 +author: Kelly Brazil (@kellyjonbrazil) +description: + - Convert output of many shell commands and file-types to JSON. + - Uses the L(jc library,https://github.com/kellyjonbrazil/jc). +positional: parser +options: + _input: + description: The data to convert. + type: string + required: true + parser: + description: + - The correct parser for the input data. + - For example V(ifconfig). + - 'Note: use underscores instead of dashes (if any) in the parser module name.' + - See U(https://github.com/kellyjonbrazil/jc#parsers) for the latest list of parsers. + type: string + required: true + quiet: + description: Set to V(false) to not suppress warnings. + type: boolean + default: true + raw: + description: Set to V(true) to return pre-processed JSON. + type: boolean + default: false +requirements: + - jc installed as a Python library (U(https://pypi.org/project/jc/)) +""" + +EXAMPLES = r""" +- name: Install the prereqs of the jc filter (jc Python package) on the Ansible controller + delegate_to: localhost + ansible.builtin.pip: + name: jc + state: present + +- name: Run command + ansible.builtin.command: uname -a + register: result + +- name: Convert command's result to JSON + ansible.builtin.debug: + msg: "{{ result.stdout | community.general.jc('uname') }}" + # Possible output: + # + # "msg": { + # "hardware_platform": "x86_64", + # "kernel_name": "Linux", + # "kernel_release": "4.15.0-112-generic", + # "kernel_version": "#113-Ubuntu SMP Thu Jul 9 23:41:39 UTC 2020", + # "machine": "x86_64", + # "node_name": "kbrazil-ubuntu", + # "operating_system": "GNU/Linux", + # "processor": "x86_64" + # } +""" + +RETURN = r""" +_value: + description: The processed output. + type: any +""" from ansible.errors import AnsibleError, AnsibleFilterError import importlib @@ -31,7 +84,7 @@ except ImportError: HAS_LIB = False -def jc(data, parser, quiet=True, raw=False): +def jc_filter(data, parser, quiet=True, raw=False): """Convert returned command output to JSON using the JC library Arguments: @@ -46,15 +99,19 @@ def jc(data, parser, quiet=True, raw=False): dictionary or list of dictionaries Example: - - name: run date command hosts: ubuntu tasks: - - shell: date + - name: install the prereqs of the jc filter (jc Python package) on the Ansible controller + delegate_to: localhost + ansible.builtin.pip: + name: jc + state: present + - ansible.builtin.shell: date register: result - - set_fact: + - ansible.builtin.set_fact: myvar: "{{ result.stdout | community.general.jc('date') }}" - - debug: + - ansible.builtin.debug: msg: "{{ myvar }}" produces: @@ -76,14 +133,20 @@ def jc(data, parser, quiet=True, raw=False): """ if not HAS_LIB: - raise AnsibleError('You need to install "jc" prior to running jc filter') + raise AnsibleError('You need to install "jc" as a Python library on the Ansible controller prior to running jc filter') try: - jc_parser = importlib.import_module('jc.parsers.' + parser) - return jc_parser.parse(data, quiet=quiet, raw=raw) + # new API (jc v1.18.0 and higher) allows use of plugin parsers + if hasattr(jc, 'parse'): + return jc.parse(parser, data, quiet=quiet, raw=raw) + + # old API (jc v1.17.7 and lower) + else: + jc_parser = importlib.import_module(f'jc.parsers.{parser}') + return jc_parser.parse(data, quiet=quiet, raw=raw) except Exception as e: - raise AnsibleFilterError('Error in jc filter plugin: %s' % e) + raise AnsibleFilterError(f'Error in jc filter plugin: {e}') class FilterModule(object): @@ -91,5 +154,5 @@ class FilterModule(object): def filters(self): return { - 'jc': jc + 'jc': jc_filter, } diff --git a/plugins/filter/json_diff.yml b/plugins/filter/json_diff.yml new file mode 100644 index 0000000000..a370564d7a --- /dev/null +++ b/plugins/filter/json_diff.yml @@ -0,0 +1,56 @@ +--- +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: json_diff + short_description: Create a JSON patch by comparing two JSON files + description: + - This filter compares the input with the argument and computes a list of operations + that can be consumed by the P(community.general.json_patch_recipe#filter) to change the input + to the argument. + requirements: + - jsonpatch + version_added: 10.3.0 + author: + - Stanislav Meduna (@numo68) + positional: target + options: + _input: + description: A list or a dictionary representing a source JSON object, or a string containing a JSON object. + type: raw + required: true + target: + description: A list or a dictionary representing a target JSON object, or a string containing a JSON object. + type: raw + required: true + seealso: + - name: RFC 6902 + description: JavaScript Object Notation (JSON) Patch + link: https://datatracker.ietf.org/doc/html/rfc6902 + - name: RFC 6901 + description: JavaScript Object Notation (JSON) Pointer + link: https://datatracker.ietf.org/doc/html/rfc6901 + - name: jsonpatch Python Package + description: A Python library for applying JSON patches + link: https://pypi.org/project/jsonpatch/ + +RETURN: + _value: + description: A list of JSON patch operations to apply. + type: list + elements: dict + +EXAMPLES: | + - name: Compute a difference + ansible.builtin.debug: + msg: "{{ input | community.general.json_diff(target) }}" + vars: + input: {"foo": 1, "bar":{"baz": 2}, "baw": [1, 2, 3], "hello": "day"} + target: {"foo": 1, "bar": {"baz": 2}, "baw": [1, 3], "baq": {"baz": 2}, "hello": "night"} + # => [ + # {"op": "add", "path": "/baq", "value": {"baz": 2}}, + # {"op": "remove", "path": "/baw/1"}, + # {"op": "replace", "path": "/hello", "value": "night"} + # ] diff --git a/plugins/filter/json_patch.py b/plugins/filter/json_patch.py new file mode 100644 index 0000000000..8cd6bd08b0 --- /dev/null +++ b/plugins/filter/json_patch.py @@ -0,0 +1,193 @@ +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations +from json import loads +from typing import TYPE_CHECKING +from ansible.errors import AnsibleFilterError + + +if TYPE_CHECKING: + from typing import Any, Callable, Union + +try: + import jsonpatch + +except ImportError as exc: + HAS_LIB = False + JSONPATCH_IMPORT_ERROR = exc +else: + HAS_LIB = True + JSONPATCH_IMPORT_ERROR = None + +OPERATIONS_AVAILABLE = ["add", "copy", "move", "remove", "replace", "test"] +OPERATIONS_NEEDING_FROM = ["copy", "move"] +OPERATIONS_NEEDING_VALUE = ["add", "replace", "test"] + + +class FilterModule: + """Filter plugin.""" + + def check_json_object(self, filter_name: str, object_name: str, inp: Any): + if isinstance(inp, (str, bytes, bytearray)): + try: + return loads(inp) + except Exception as e: + raise AnsibleFilterError( + f"{filter_name}: could not decode JSON from {object_name}: {e}" + ) from e + + if not isinstance(inp, (list, dict)): + raise AnsibleFilterError( + f"{filter_name}: {object_name} is not dictionary, list or string" + ) + + return inp + + def check_patch_arguments(self, filter_name: str, args: dict): + + if "op" not in args or not isinstance(args["op"], str): + raise AnsibleFilterError(f"{filter_name}: 'op' argument is not a string") + + if args["op"] not in OPERATIONS_AVAILABLE: + raise AnsibleFilterError( + f"{filter_name}: unsupported 'op' argument: {args['op']}" + ) + + if "path" not in args or not isinstance(args["path"], str): + raise AnsibleFilterError(f"{filter_name}: 'path' argument is not a string") + + if args["op"] in OPERATIONS_NEEDING_FROM: + if "from" not in args: + raise AnsibleFilterError( + f"{filter_name}: 'from' argument missing for '{args['op']}' operation" + ) + if not isinstance(args["from"], str): + raise AnsibleFilterError( + f"{filter_name}: 'from' argument is not a string" + ) + + def json_patch( + self, + inp: Union[str, list, dict, bytes, bytearray], + op: str, + path: str, + value: Any = None, + **kwargs: dict, + ) -> Any: + + if not HAS_LIB: + raise AnsibleFilterError( + "You need to install 'jsonpatch' package prior to running 'json_patch' filter" + ) from JSONPATCH_IMPORT_ERROR + + args = {"op": op, "path": path} + from_arg = kwargs.pop("from", None) + fail_test = kwargs.pop("fail_test", False) + + if kwargs: + raise AnsibleFilterError( + f"json_patch: unexpected keywords arguments: {', '.join(sorted(kwargs))}" + ) + + if not isinstance(fail_test, bool): + raise AnsibleFilterError("json_patch: 'fail_test' argument is not a bool") + + if op in OPERATIONS_NEEDING_VALUE: + args["value"] = value + if op in OPERATIONS_NEEDING_FROM and from_arg is not None: + args["from"] = from_arg + + inp = self.check_json_object("json_patch", "input", inp) + self.check_patch_arguments("json_patch", args) + + result = None + + try: + result = jsonpatch.apply_patch(inp, [args]) + except jsonpatch.JsonPatchTestFailed as e: + if fail_test: + raise AnsibleFilterError( + f"json_patch: test operation failed: {e}" + ) from e + else: + pass + except Exception as e: + raise AnsibleFilterError(f"json_patch: patch failed: {e}") from e + + return result + + def json_patch_recipe( + self, + inp: Union[str, list, dict, bytes, bytearray], + operations: list, + /, + fail_test: bool = False, + ) -> Any: + + if not HAS_LIB: + raise AnsibleFilterError( + "You need to install 'jsonpatch' package prior to running 'json_patch_recipe' filter" + ) from JSONPATCH_IMPORT_ERROR + + if not isinstance(operations, list): + raise AnsibleFilterError( + "json_patch_recipe: 'operations' needs to be a list" + ) + + if not isinstance(fail_test, bool): + raise AnsibleFilterError("json_patch: 'fail_test' argument is not a bool") + + result = None + + inp = self.check_json_object("json_patch_recipe", "input", inp) + for args in operations: + self.check_patch_arguments("json_patch_recipe", args) + + try: + result = jsonpatch.apply_patch(inp, operations) + except jsonpatch.JsonPatchTestFailed as e: + if fail_test: + raise AnsibleFilterError( + f"json_patch_recipe: test operation failed: {e}" + ) from e + else: + pass + except Exception as e: + raise AnsibleFilterError(f"json_patch_recipe: patch failed: {e}") from e + + return result + + def json_diff( + self, + inp: Union[str, list, dict, bytes, bytearray], + target: Union[str, list, dict, bytes, bytearray], + ) -> list: + + if not HAS_LIB: + raise AnsibleFilterError( + "You need to install 'jsonpatch' package prior to running 'json_diff' filter" + ) from JSONPATCH_IMPORT_ERROR + + inp = self.check_json_object("json_diff", "input", inp) + target = self.check_json_object("json_diff", "target", target) + + try: + result = list(jsonpatch.make_patch(inp, target)) + except Exception as e: + raise AnsibleFilterError(f"JSON diff failed: {e}") from e + + return result + + def filters(self) -> dict[str, Callable[..., Any]]: + """Map filter plugin names to their functions. + + Returns: + dict: The filter plugin functions. + """ + return { + "json_patch": self.json_patch, + "json_patch_recipe": self.json_patch_recipe, + "json_diff": self.json_diff, + } diff --git a/plugins/filter/json_patch.yml b/plugins/filter/json_patch.yml new file mode 100644 index 0000000000..42a0309202 --- /dev/null +++ b/plugins/filter/json_patch.yml @@ -0,0 +1,145 @@ +--- +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: json_patch + short_description: Apply a JSON-Patch (RFC 6902) operation to an object + description: + - This filter applies a single JSON patch operation and returns a modified object. + - If the operation is a test, the filter returns an ummodified object if the test + succeeded and a V(none) value otherwise. + requirements: + - jsonpatch + version_added: 10.3.0 + author: + - Stanislav Meduna (@numo68) + positional: op, path, value + options: + _input: + description: A list or a dictionary representing a JSON object, or a string containing a JSON object. + type: raw + required: true + op: + description: Operation to perform (see L(RFC 6902, https://datatracker.ietf.org/doc/html/rfc6902)). + type: str + choices: [add, copy, move, remove, replace, test] + required: true + path: + description: JSON Pointer path to the target location (see L(RFC 6901, https://datatracker.ietf.org/doc/html/rfc6901)). + type: str + required: true + value: + description: Value to use in the operation. Ignored for O(op=copy), O(op=move), and O(op=remove). + type: raw + from: + description: The source location for the copy and move operation. Mandatory + for O(op=copy) and O(op=move), ignored otherwise. + type: str + fail_test: + description: If V(false), a failed O(op=test) will return V(none). If V(true), the filter + invocation will fail with an error. + type: bool + default: false + seealso: + - name: RFC 6902 + description: JavaScript Object Notation (JSON) Patch + link: https://datatracker.ietf.org/doc/html/rfc6902 + - name: RFC 6901 + description: JavaScript Object Notation (JSON) Pointer + link: https://datatracker.ietf.org/doc/html/rfc6901 + - name: jsonpatch Python Package + description: A Python library for applying JSON patches + link: https://pypi.org/project/jsonpatch/ + +RETURN: + _value: + description: A modified object or V(none) if O(op=test), O(fail_test=false) and the test failed. + type: any + returned: always + +EXAMPLES: | + - name: Insert a new element into an array at a specified index + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/1', {'baz': 'qux'}) }}" + vars: + input: ["foo": { "one": 1 }, "bar": { "two": 2 }] + # => [{"foo": {"one": 1}}, {"baz": "qux"}, {"bar": {"two": 2}}] + + - name: Insert a new key into a dictionary + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/bar/baz', 'qux') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": {"baz": "qux", "two": 2}} + + - name: Input is a string + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/baz', 3) }}" + vars: + input: '{ "foo": { "one": 1 }, "bar": { "two": 2 } }' + # => {"foo": {"one": 1}, "bar": { "two": 2 }, "baz": 3} + + - name: Existing key is replaced + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/bar', 'qux') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": "qux"} + + - name: Escaping tilde as ~0 and slash as ~1 in the path + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/~0~1', 'qux') }}" + vars: + input: {} + # => {"~/": "qux"} + + - name: Add at the end of the array + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/-', 4) }}" + vars: + input: [1, 2, 3] + # => [1, 2, 3, 4] + + - name: Remove a key + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('remove', '/bar') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1} } + + - name: Replace a value + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('replace', '/bar', 2) }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": 2} + + - name: Copy a value + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('copy', '/baz', from='/bar') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": { "two": 2 }, "baz": { "two": 2 }} + + - name: Move a value + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('move', '/baz', from='/bar') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "baz": { "two": 2 }} + + - name: Successful test + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('test', '/bar/two', 2) | ternary('OK', 'Failed') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => OK + + - name: Unuccessful test + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('test', '/bar/two', 9) | ternary('OK', 'Failed') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => Failed diff --git a/plugins/filter/json_patch_recipe.yml b/plugins/filter/json_patch_recipe.yml new file mode 100644 index 0000000000..671600b941 --- /dev/null +++ b/plugins/filter/json_patch_recipe.yml @@ -0,0 +1,102 @@ +--- +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: json_patch_recipe + short_description: Apply JSON-Patch (RFC 6902) operations to an object + description: + - This filter sequentially applies JSON patch operations and returns a modified object. + - If there is a test operation in the list, the filter continues if the test + succeeded and returns a V(none) value otherwise. + requirements: + - jsonpatch + version_added: 10.3.0 + author: + - Stanislav Meduna (@numo68) + positional: operations, fail_test + options: + _input: + description: A list or a dictionary representing a JSON object, or a string containing a JSON object. + type: raw + required: true + operations: + description: A list of JSON patch operations to apply. + type: list + elements: dict + required: true + suboptions: + op: + description: Operation to perform (see L(RFC 6902, https://datatracker.ietf.org/doc/html/rfc6902)). + type: str + choices: [add, copy, move, remove, replace, test] + required: true + path: + description: JSON Pointer path to the target location (see L(RFC 6901, https://datatracker.ietf.org/doc/html/rfc6901)). + type: str + required: true + value: + description: Value to use in the operation. Ignored for O(operations[].op=copy), O(operations[].op=move), and O(operations[].op=remove). + type: raw + from: + description: The source location for the copy and move operation. Mandatory + for O(operations[].op=copy) and O(operations[].op=move), ignored otherwise. + type: str + fail_test: + description: If V(false), a failed O(operations[].op=test) will return V(none). If V(true), the filter + invocation will fail with an error. + type: bool + default: false + seealso: + - name: RFC 6902 + description: JavaScript Object Notation (JSON) Patch + link: https://datatracker.ietf.org/doc/html/rfc6902 + - name: RFC 6901 + description: JavaScript Object Notation (JSON) Pointer + link: https://datatracker.ietf.org/doc/html/rfc6901 + - name: jsonpatch Python Package + description: A Python library for applying JSON patches + link: https://pypi.org/project/jsonpatch/ + +RETURN: + _value: + description: A modified object or V(none) if O(operations[].op=test), O(fail_test=false) + and the test failed. + type: any + returned: always + +EXAMPLES: | + - name: Apply a series of operations + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch_recipe(operations) }}" + vars: + input: {} + operations: + - op: 'add' + path: '/foo' + value: 1 + - op: 'add' + path: '/bar' + value: [] + - op: 'add' + path: '/bar/-' + value: 2 + - op: 'add' + path: '/bar/0' + value: 1 + - op: 'remove' + path: '/bar/0' + - op: 'move' + from: '/foo' + path: '/baz' + - op: 'copy' + from: '/baz' + path: '/bax' + - op: 'copy' + from: '/baz' + path: '/bay' + - op: 'replace' + path: '/baz' + value: [10, 20, 30] + # => {"bar":[2],"bax":1,"bay":1,"baz":[10,20,30]} diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py index 9c835e8c71..e040a4aca2 100644 --- a/plugins/filter/json_query.py +++ b/plugins/filter/json_query.py @@ -1,23 +1,109 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Filipe Niero Felisbino -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) 2015, Filipe Niero Felisbino +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +DOCUMENTATION = r""" +name: json_query +short_description: Select a single element or a data subset from a complex data structure +description: + - This filter lets you query a complex JSON structure and iterate over it using a loop structure. +positional: expr +options: + _input: + description: + - The JSON data to query. + type: any + required: true + expr: + description: + - The query expression. + - See U(http://jmespath.org/examples.html) for examples. + type: string + required: true +requirements: + - jmespath +""" + +EXAMPLES = r""" +- name: Define data to work on in the examples below + ansible.builtin.set_fact: + domain_definition: + domain: + cluster: + - name: cluster1 + - name: cluster2 + server: + - name: server11 + cluster: cluster1 + port: '8080' + - name: server12 + cluster: cluster1 + port: '8090' + - name: server21 + cluster: cluster2 + port: '9080' + - name: server22 + cluster: cluster2 + port: '9090' + library: + - name: lib1 + target: cluster1 + - name: lib2 + target: cluster2 + +- name: Display all cluster names + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.cluster[*].name') }}" + +- name: Display all server names + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.server[*].name') }}" + +- name: Display all ports from cluster1 + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}" + vars: + server_name_cluster1_query: "domain.server[?cluster=='cluster1'].port" + +- name: Display all ports from cluster1 as a string + ansible.builtin.debug: + msg: "{{ domain_definition | community.general.json_query('domain.server[?cluster==`cluster1`].port') | join(', ') }}" + +- name: Display all ports from cluster1 + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.server[?cluster==''cluster1''].port') }}" + +- name: Display all server ports and names from cluster1 + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}" + vars: + server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}" + +- name: Display all ports from cluster1 + ansible.builtin.debug: + msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}" + vars: + server_name_query: "domain.server[?starts_with(name,'server1')].port" + +- name: Display all ports from cluster1 + ansible.builtin.debug: + msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}" + vars: + server_name_query: "domain.server[?contains(name,'server1')].port" +""" + +RETURN = r""" +_value: + description: The result of the query. + type: any +""" from ansible.errors import AnsibleError, AnsibleFilterError @@ -37,17 +123,24 @@ def json_query(data, expr): 'json_query filter') # Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence - # See issue: https://github.com/ansible-collections/community.general/issues/320 - jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', ) - jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', ) - jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', ) + # See issues https://github.com/ansible-collections/community.general/issues/320 + # and https://github.com/ansible/ansible/issues/85600. + jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ( + 'AnsibleUnicode', 'AnsibleUnsafeText', '_AnsibleTaggedStr', + ) + jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ( + 'AnsibleSequence', '_AnsibleLazyTemplateList', + ) + jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ( + 'AnsibleMapping', '_AnsibleLazyTemplateDict', + ) try: return jmespath.search(expr, data) except jmespath.exceptions.JMESPathError as e: - raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e) + raise AnsibleFilterError(f'JMESPathError in json_query filter plugin:\n{e}') except Exception as e: # For older jmespath, we can get ValueError and TypeError without much info. - raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e) + raise AnsibleFilterError(f'Error in jmespath.search in json_query filter plugin:\n{e}') class FilterModule(object): diff --git a/plugins/filter/keep_keys.py b/plugins/filter/keep_keys.py new file mode 100644 index 0000000000..18876789d6 --- /dev/null +++ b/plugins/filter/keep_keys.py @@ -0,0 +1,136 @@ +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: keep_keys +short_description: Keep specific keys from dictionaries in a list +version_added: "9.1.0" +author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) +description: This filter keeps only specified keys from a provided list of dictionaries. +options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A single key or key pattern to keep, or a list of keys or keys patterns to keep. + - If O(matching_parameter=regex) there must be exactly one pattern provided. + type: raw + required: true + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target) items. + starts_with: Matches keys that start with one of the O(target) items. + ends_with: Matches keys that end with one of the O(target) items. + regex: + - Matches keys that match the regular expresion provided in O(target). + - In this case, O(target) must be a regex string or a list with single regex string. +""" + +EXAMPLES = r""" +- l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default match keys that equal any of the items in the target. +- t: [k0_x0, k1_x1] + r: "{{ l | community.general.keep_keys(target=t) }}" + + # 2) Match keys that start with any of the items in the target. +- t: [k0, k1] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Match keys that end with any of the items in target. +- t: [x0, x1] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Match keys by the regex. +- t: ['^.*[01]_x.*$'] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # 5) Match keys by the regex. +- t: '^.*[01]_x.*$' + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-5 are all the same. +- r: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + # 6) By default match keys that equal the target. +- t: k0_x0 + r: "{{ l | community.general.keep_keys(target=t) }}" + + # 7) Match keys that start with the target. +- t: k0 + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}" + + # 8) Match keys that end with the target. +- t: x0 + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}" + + # 9) Match keys by the regex. +- t: '^.*0_x.*$' + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 6-9 are all the same. +- r: + - {k0_x0: A0} + - {k0_x0: A1} +""" + +RETURN = r""" +_value: + description: The list of dictionaries with selected keys. + type: list + elements: dictionary +""" + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_str) + + +def keep_keys(data, target=None, matching_parameter='equal'): + """keep specific keys from dictionaries in a list""" + + # test parameters + _keys_filter_params(data, matching_parameter) + # test and transform target + tt = _keys_filter_target_str(target, matching_parameter) + + if matching_parameter == 'equal': + def keep_key(key): + return key in tt + elif matching_parameter == 'starts_with': + def keep_key(key): + return key.startswith(tt) + elif matching_parameter == 'ends_with': + def keep_key(key): + return key.endswith(tt) + elif matching_parameter == 'regex': + def keep_key(key): + return tt.match(key) is not None + + return [{k: v for k, v in d.items() if keep_key(k)} for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'keep_keys': keep_keys, + } diff --git a/plugins/filter/list.py b/plugins/filter/list.py deleted file mode 100644 index 460e45194f..0000000000 --- a/plugins/filter/list.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2020, Vladimir Botka -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.errors import AnsibleError, AnsibleFilterError -from ansible.module_utils.six import string_types -from ansible.module_utils.common._collections_compat import Mapping, Sequence -from collections import defaultdict -from operator import itemgetter - - -def lists_mergeby(l1, l2, index): - ''' merge lists by attribute index. Example: - - debug: msg="{{ l1|community.general.lists_mergeby(l2, 'index')|list }}" ''' - - if not isinstance(l1, Sequence): - raise AnsibleFilterError('First argument for community.general.lists_mergeby must be list. %s is %s' % - (l1, type(l1))) - - if not isinstance(l2, Sequence): - raise AnsibleFilterError('Second argument for community.general.lists_mergeby must be list. %s is %s' % - (l2, type(l2))) - - if not isinstance(index, string_types): - raise AnsibleFilterError('Third argument for community.general.lists_mergeby must be string. %s is %s' % - (index, type(index))) - - d = defaultdict(dict) - for l in (l1, l2): - for elem in l: - if not isinstance(elem, Mapping): - raise AnsibleFilterError('Elements of list arguments for lists_mergeby must be dictionaries. Found {0!r}.'.format(elem)) - if index in elem.keys(): - d[elem[index]].update(elem) - return sorted(d.values(), key=itemgetter(index)) - - -class FilterModule(object): - ''' Ansible list filters ''' - - def filters(self): - return { - 'lists_mergeby': lists_mergeby, - } diff --git a/plugins/filter/lists.py b/plugins/filter/lists.py new file mode 100644 index 0000000000..0bae08f24c --- /dev/null +++ b/plugins/filter/lists.py @@ -0,0 +1,200 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.common.collections import is_sequence + + +def remove_duplicates(lst): + seen = set() + seen_add = seen.add + result = [] + for item in lst: + try: + if item not in seen: + seen_add(item) + result.append(item) + except TypeError: + # This happens for unhashable values `item`. If this happens, + # convert `seen` to a list and continue. + seen = list(seen) + seen_add = seen.append + if item not in seen: + seen_add(item) + result.append(item) + return result + + +def flatten_list(lst): + result = [] + for sublist in lst: + if not is_sequence(sublist): + msg = ("All arguments must be lists. %s is %s") + raise AnsibleFilterError(msg % (sublist, type(sublist))) + if len(sublist) > 0: + if all(is_sequence(sub) for sub in sublist): + for item in sublist: + result.append(item) + else: + result.append(sublist) + return result + + +def lists_union(*args, **kwargs): + lists = args + flatten = kwargs.pop('flatten', False) + + if kwargs: + # Some unused kwargs remain + raise AnsibleFilterError( + f"lists_union() got unexpected keywords arguments: {', '.join(kwargs.keys())}" + ) + + if flatten: + lists = flatten_list(args) + + if not lists: + return [] + + if len(lists) == 1: + return lists[0] + + a = lists[0] + for b in lists[1:]: + a = do_union(a, b) + return remove_duplicates(a) + + +def do_union(a, b): + return a + b + + +def lists_intersect(*args, **kwargs): + lists = args + flatten = kwargs.pop('flatten', False) + + if kwargs: + # Some unused kwargs remain + raise AnsibleFilterError( + f"lists_intersect() got unexpected keywords arguments: {', '.join(kwargs.keys())}" + ) + + if flatten: + lists = flatten_list(args) + + if not lists: + return [] + + if len(lists) == 1: + return lists[0] + + a = remove_duplicates(lists[0]) + for b in lists[1:]: + a = do_intersect(a, b) + return a + + +def do_intersect(a, b): + isect = [] + try: + other = set(b) + isect = [item for item in a if item in other] + except TypeError: + # This happens for unhashable values, + # use a list instead and redo. + other = list(b) + isect = [item for item in a if item in other] + return isect + + +def lists_difference(*args, **kwargs): + lists = args + flatten = kwargs.pop('flatten', False) + + if kwargs: + # Some unused kwargs remain + raise AnsibleFilterError( + f"lists_difference() got unexpected keywords arguments: {', '.join(kwargs.keys())}" + ) + + if flatten: + lists = flatten_list(args) + + if not lists: + return [] + + if len(lists) == 1: + return lists[0] + + a = remove_duplicates(lists[0]) + for b in lists[1:]: + a = do_difference(a, b) + return a + + +def do_difference(a, b): + diff = [] + try: + other = set(b) + diff = [item for item in a if item not in other] + except TypeError: + # This happens for unhashable values, + # use a list instead and redo. + other = list(b) + diff = [item for item in a if item not in other] + return diff + + +def lists_symmetric_difference(*args, **kwargs): + lists = args + flatten = kwargs.pop('flatten', False) + + if kwargs: + # Some unused kwargs remain + raise AnsibleFilterError( + f"lists_difference() got unexpected keywords arguments: {', '.join(kwargs.keys())}" + ) + + if flatten: + lists = flatten_list(args) + + if not lists: + return [] + + if len(lists) == 1: + return lists[0] + + a = lists[0] + for b in lists[1:]: + a = do_symmetric_difference(a, b) + return a + + +def do_symmetric_difference(a, b): + sym_diff = [] + union = lists_union(a, b) + try: + isect = set(a) & set(b) + sym_diff = [item for item in union if item not in isect] + except TypeError: + # This happens for unhashable values, + # build the intersection of `a` and `b` backed + # by a list instead of a set and redo. + isect = lists_intersect(a, b) + sym_diff = [item for item in union if item not in isect] + return sym_diff + + +class FilterModule(object): + ''' Ansible lists jinja2 filters ''' + + def filters(self): + return { + 'lists_union': lists_union, + 'lists_intersect': lists_intersect, + 'lists_difference': lists_difference, + 'lists_symmetric_difference': lists_symmetric_difference, + } diff --git a/plugins/filter/lists_difference.yml b/plugins/filter/lists_difference.yml new file mode 100644 index 0000000000..630e77cf0a --- /dev/null +++ b/plugins/filter/lists_difference.yml @@ -0,0 +1,48 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: lists_difference + short_description: Difference of lists with a predictive order + version_added: 8.4.0 + description: + - Provide a unique list of all the elements from the first which do not appear in the other lists. + - The order of the items in the resulting list is preserved. + options: + _input: + description: A list. + type: list + elements: any + required: true + flatten: + description: Whether to remove one hierarchy level from the input list. + type: boolean + default: false + author: + - Christoph Fiehe (@cfiehe) + +EXAMPLES: | + - name: Return the difference of list1 and list2. + ansible.builtin.debug: + msg: "{{ list1 | community.general.lists_difference(list2) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + # => [10] + + - name: Return the difference of list1, list2 and list3. + ansible.builtin.debug: + msg: "{{ [list1, list2, list3] | community.general.lists_difference(flatten=true) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + list3: [1, 2, 3, 4, 5, 10, 99, 101] + # => [] + +RETURN: + _value: + description: A unique list of all the elements from the first list that do not appear on the other lists. + type: list + elements: any diff --git a/plugins/filter/lists_intersect.yml b/plugins/filter/lists_intersect.yml new file mode 100644 index 0000000000..d2ea9483b1 --- /dev/null +++ b/plugins/filter/lists_intersect.yml @@ -0,0 +1,48 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: lists_intersect + short_description: Intersection of lists with a predictive order + version_added: 8.4.0 + description: + - Provide a unique list of all the common elements of two or more lists. + - The order of the items in the resulting list is preserved. + options: + _input: + description: A list. + type: list + elements: any + required: true + flatten: + description: Whether to remove one hierarchy level from the input list. + type: boolean + default: false + author: + - Christoph Fiehe (@cfiehe) + +EXAMPLES: | + - name: Return the intersection of list1 and list2. + ansible.builtin.debug: + msg: "{{ list1 | community.general.lists_intersect(list2) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + # => [1, 2, 5, 3, 4] + + - name: Return the intersection of list1, list2 and list3. + ansible.builtin.debug: + msg: "{{ [list1, list2, list3] | community.general.lists_intersect(flatten=true) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + list3: [1, 2, 3, 4, 5, 10, 99, 101] + # => [1, 2, 5, 3, 4] + +RETURN: + _value: + description: A unique list of all the common elements from the provided lists. + type: list + elements: any diff --git a/plugins/filter/lists_mergeby.py b/plugins/filter/lists_mergeby.py new file mode 100644 index 0000000000..4b8bf971f4 --- /dev/null +++ b/plugins/filter/lists_mergeby.py @@ -0,0 +1,277 @@ +# Copyright (c) 2020-2024, Vladimir Botka +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: lists_mergeby +short_description: Merge two or more lists of dictionaries by a given attribute +version_added: 2.0.0 +author: Vladimir Botka (@vbotka) +description: + - Merge two or more lists by attribute O(index). Optional parameters O(recursive) and O(list_merge) control the merging + of the nested dictionaries and lists. + - The function C(merge_hash) from C(ansible.utils.vars) is used. + - To learn details on how to use the parameters O(recursive) and O(list_merge) see Ansible User's Guide chapter "Using filters + to manipulate data" section R(Combining hashes/dictionaries, combine_filter) or the filter P(ansible.builtin.combine#filter). +positional: another_list, index +options: + _input: + description: + - A list of dictionaries, or a list of lists of dictionaries. + - The required type of the C(elements) is set to C(raw) because all elements of O(_input) can be either dictionaries + or lists. + type: list + elements: raw + required: true + another_list: + description: + - Another list of dictionaries, or a list of lists of dictionaries. + - This parameter can be specified multiple times. + type: list + elements: raw + index: + description: + - The dictionary key that must be present in every dictionary in every list that is used to merge the lists. + type: string + required: true + recursive: + description: + - Should the combine recursively merge nested dictionaries (hashes). + - B(Note:) It does not depend on the value of the C(hash_behaviour) setting in C(ansible.cfg). + type: boolean + default: false + list_merge: + description: + - Modifies the behaviour when the dictionaries (hashes) to merge contain arrays/lists. + type: string + default: replace + choices: + - replace + - keep + - append + - prepend + - append_rp + - prepend_rp +""" + +EXAMPLES = r""" +# Some results below are manually formatted for better readability. The +# dictionaries' keys will be sorted alphabetically in real output. + +- name: Example 1. Merge two lists. The results r1 and r2 are the same. + ansible.builtin.debug: + msg: | + r1: {{ r1 }} + r2: {{ r2 }} + vars: + list1: + - {index: a, value: 123} + - {index: b, value: 4} + list2: + - {index: a, foo: bar} + - {index: c, foo: baz} + r1: "{{ list1 | community.general.lists_mergeby(list2, 'index') }}" + r2: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" + +# r1: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# r2: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} + +- name: Example 2. Merge three lists + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, value: 123} + - {index: b, value: 4} + list2: + - {index: a, foo: bar} + - {index: c, foo: baz} + list3: + - {index: d, foo: qux} + r: "{{ [list1, list2, list3] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# - {index: d, foo: qux} + +- name: Example 3. Merge single list. The result is the same as 2. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, value: 123} + - {index: b, value: 4} + - {index: a, foo: bar} + - {index: c, foo: baz} + - {index: d, foo: qux} + r: "{{ [list1, []] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# - {index: d, foo: qux} + +- name: Example 4. Merge two lists. By default, replace nested lists. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: [X1, X2]} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: [Y1, Y2]} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: [Y1, Y2]} +# - {index: b, foo: [Y1, Y2]} + +- name: Example 5. Merge two lists. Append nested lists. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: [X1, X2]} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: [Y1, Y2]} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index', list_merge='append') }}" + +# r: +# - {index: a, foo: [X1, X2, Y1, Y2]} +# - {index: b, foo: [X1, X2, Y1, Y2]} + +- name: Example 6. Merge two lists. By default, do not merge nested dictionaries. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: {x: 1, y: 2}} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: {y: 3, z: 4}} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: {y: 3, z: 4}} +# - {index: b, foo: [Y1, Y2]} + +- name: Example 7. Merge two lists. Merge nested dictionaries too. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: {x: 1, y: 2}} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: {y: 3, z: 4}} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index', recursive=true) }}" + +# r: +# - {index: a, foo: {x:1, y: 3, z: 4}} +# - {index: b, foo: [Y1, Y2]} +""" + +RETURN = r""" +_value: + description: The merged list. + type: list + elements: dictionary +""" + +from ansible.errors import AnsibleFilterError +from collections.abc import Mapping, Sequence +from ansible.utils.vars import merge_hash + +from collections import defaultdict +from operator import itemgetter + + +def list_mergeby(x, y, index, recursive=False, list_merge='replace'): + '''Merge 2 lists by attribute 'index'. The function 'merge_hash' + from ansible.utils.vars is used. This function is used by the + function lists_mergeby. + ''' + + d = defaultdict(dict) + for lst in (x, y): + for elem in lst: + if not isinstance(elem, Mapping): + msg = "Elements of list arguments for lists_mergeby must be dictionaries. %s is %s" + raise AnsibleFilterError(msg % (elem, type(elem))) + if index in elem.keys(): + d[elem[index]].update(merge_hash(d[elem[index]], elem, recursive, list_merge)) + return sorted(d.values(), key=itemgetter(index)) + + +def lists_mergeby(*terms, **kwargs): + '''Merge 2 or more lists by attribute 'index'. To learn details + on how to use the parameters 'recursive' and 'list_merge' see + the filter ansible.builtin.combine. + ''' + + recursive = kwargs.pop('recursive', False) + list_merge = kwargs.pop('list_merge', 'replace') + if kwargs: + raise AnsibleFilterError("'recursive' and 'list_merge' are the only valid keyword arguments.") + if len(terms) < 2: + raise AnsibleFilterError("At least one list and index are needed.") + + # allow the user to do `[list1, list2, ...] | lists_mergeby('index')` + flat_list = [] + for sublist in terms[:-1]: + if not isinstance(sublist, Sequence): + msg = ("All arguments before the argument index for community.general.lists_mergeby " + "must be lists. %s is %s") + raise AnsibleFilterError(msg % (sublist, type(sublist))) + if len(sublist) > 0: + if all(isinstance(lst, Sequence) for lst in sublist): + for item in sublist: + flat_list.append(item) + else: + flat_list.append(sublist) + lists = flat_list + + if not lists: + return [] + + if len(lists) == 1: + return lists[0] + + index = terms[-1] + + if not isinstance(index, str): + msg = ("First argument after the lists for community.general.lists_mergeby must be string. " + "%s is %s") + raise AnsibleFilterError(msg % (index, type(index))) + + high_to_low_prio_list_iterator = reversed(lists) + result = next(high_to_low_prio_list_iterator) + for list in high_to_low_prio_list_iterator: + result = list_mergeby(list, result, index, recursive, list_merge) + + return result + + +class FilterModule(object): + ''' Ansible list filters ''' + + def filters(self): + return { + 'lists_mergeby': lists_mergeby, + } diff --git a/plugins/filter/lists_symmetric_difference.yml b/plugins/filter/lists_symmetric_difference.yml new file mode 100644 index 0000000000..abd8caab8a --- /dev/null +++ b/plugins/filter/lists_symmetric_difference.yml @@ -0,0 +1,48 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: lists_symmetric_difference + short_description: Symmetric Difference of lists with a predictive order + version_added: 8.4.0 + description: + - Provide a unique list containing the symmetric difference of two or more lists. + - The order of the items in the resulting list is preserved. + options: + _input: + description: A list. + type: list + elements: any + required: true + flatten: + description: Whether to remove one hierarchy level from the input list. + type: boolean + default: false + author: + - Christoph Fiehe (@cfiehe) + +EXAMPLES: | + - name: Return the symmetric difference of list1 and list2. + ansible.builtin.debug: + msg: "{{ list1 | community.general.lists_symmetric_difference(list2) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + # => [10, 11, 99] + + - name: Return the symmetric difference of list1, list2 and list3. + ansible.builtin.debug: + msg: "{{ [list1, list2, list3] | community.general.lists_symmetric_difference(flatten=true) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + list3: [1, 2, 3, 4, 5, 10, 99, 101] + # => [11, 1, 2, 3, 4, 5, 101] + +RETURN: + _value: + description: A unique list containing the symmetric difference of two or more lists. + type: list + elements: any diff --git a/plugins/filter/lists_union.yml b/plugins/filter/lists_union.yml new file mode 100644 index 0000000000..8c1ffb4f87 --- /dev/null +++ b/plugins/filter/lists_union.yml @@ -0,0 +1,48 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: lists_union + short_description: Union of lists with a predictive order + version_added: 8.4.0 + description: + - Provide a unique list of all the elements of two or more lists. + - The order of the items in the resulting list is preserved. + options: + _input: + description: A list. + type: list + elements: any + required: true + flatten: + description: Whether to remove one hierarchy level from the input list. + type: boolean + default: false + author: + - Christoph Fiehe (@cfiehe) + +EXAMPLES: | + - name: Return the union of list1, list2 and list3. + ansible.builtin.debug: + msg: "{{ list1 | community.general.lists_union(list2, list3) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + list3: [1, 2, 3, 4, 5, 10, 99, 101] + # => [1, 2, 5, 3, 4, 10, 11, 99, 101] + + - name: Return the union of list1 and list2. + ansible.builtin.debug: + msg: "{{ [list1, list2] | community.general.lists_union(flatten=true) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + # => [1, 2, 5, 3, 4, 10, 11, 99] + +RETURN: + _value: + description: A unique list of all the elements from the provided lists. + type: list + elements: any diff --git a/plugins/filter/path_join_shim.py b/plugins/filter/path_join_shim.py deleted file mode 100644 index 9734298a15..0000000000 --- a/plugins/filter/path_join_shim.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020-2021, Felix Fontein -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -import os.path - - -def path_join(list): - '''Join list of paths. - - This is a minimal shim for ansible.builtin.path_join included in ansible-base 2.10. - This should only be called by Ansible 2.9 or earlier. See meta/runtime.yml for details. - ''' - return os.path.join(*list) - - -class FilterModule(object): - '''Ansible jinja2 filters''' - - def filters(self): - return { - 'path_join': path_join, - } diff --git a/plugins/filter/random_mac.py b/plugins/filter/random_mac.py index 7d25555aa9..e5e6201f1c 100644 --- a/plugins/filter/random_mac.py +++ b/plugins/filter/random_mac.py @@ -1,46 +1,63 @@ -# -*- coding: utf-8 -*- -# (c) 2020 Ansible Project -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +DOCUMENTATION = r""" +name: random_mac +short_description: Generate a random MAC address +description: + - Generates random networking interfaces MAC addresses for a given prefix. +options: + _input: + description: A string prefix to use as a basis for the random MAC generated. + type: string + required: true + seed: + description: + - A randomization seed to initialize the process, used to get repeatable results. + - If no seed is provided, a system random source such as C(/dev/urandom) is used. + required: false + type: string +""" + +EXAMPLES = r""" +- name: Random MAC given a prefix + ansible.builtin.debug: + msg: "{{ '52:54:00' | community.general.random_mac }}" + # => '52:54:00:ef:1c:03' + +- name: With a seed + ansible.builtin.debug: + msg: "{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}" +""" + +RETURN = r""" +_value: + description: The generated MAC. + type: string +""" import re from random import Random, SystemRandom from ansible.errors import AnsibleFilterError -from ansible.module_utils.six import string_types def random_mac(value, seed=None): ''' takes string prefix, and return it completed with random bytes to get a complete 6 bytes MAC address ''' - if not isinstance(value, string_types): - raise AnsibleFilterError('Invalid value type (%s) for random_mac (%s)' % - (type(value), value)) + if not isinstance(value, str): + raise AnsibleFilterError(f'Invalid value type ({type(value)}) for random_mac ({value})') value = value.lower() mac_items = value.split(':') if len(mac_items) > 5: - raise AnsibleFilterError('Invalid value (%s) for random_mac: 5 colon(:) separated' - ' items max' % value) + raise AnsibleFilterError(f'Invalid value ({value}) for random_mac: 5 colon(:) separated items max') err = "" for mac in mac_items: @@ -48,11 +65,11 @@ def random_mac(value, seed=None): err += ",empty item" continue if not re.match('[a-f0-9]{2}', mac): - err += ",%s not hexa byte" % mac + err += f",{mac} not hexa byte" err = err.strip(',') if err: - raise AnsibleFilterError('Invalid value (%s) for random_mac: %s' % (value, err)) + raise AnsibleFilterError(f'Invalid value ({value}) for random_mac: {err}') if seed is None: r = SystemRandom() @@ -62,7 +79,7 @@ def random_mac(value, seed=None): v = r.randint(68719476736, 1099511627775) # Select first n chars to complement input prefix remain = 2 * (6 - len(mac_items)) - rnd = ('%x' % v)[:remain] + rnd = f'{v:x}'[:remain] return value + re.sub(r'(..)', r':\1', rnd) diff --git a/plugins/filter/remove_keys.py b/plugins/filter/remove_keys.py new file mode 100644 index 0000000000..fc134b41d0 --- /dev/null +++ b/plugins/filter/remove_keys.py @@ -0,0 +1,136 @@ +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: remove_keys +short_description: Remove specific keys from dictionaries in a list +version_added: "9.1.0" +author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) +description: This filter removes only specified keys from a provided list of dictionaries. +options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A single key or key pattern to remove, or a list of keys or keys patterns to remove. + - If O(matching_parameter=regex) there must be exactly one pattern provided. + type: raw + required: true + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target) items. + starts_with: Matches keys that start with one of the O(target) items. + ends_with: Matches keys that end with one of the O(target) items. + regex: + - Matches keys that match the regular expresion provided in O(target). + - In this case, O(target) must be a regex string or a list with single regex string. +""" + +EXAMPLES = r""" +- l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default match keys that equal any of the items in the target. +- t: [k0_x0, k1_x1] + r: "{{ l | community.general.remove_keys(target=t) }}" + + # 2) Match keys that start with any of the items in the target. +- t: [k0, k1] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Match keys that end with any of the items in target. +- t: [x0, x1] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Match keys by the regex. +- t: ['^.*[01]_x.*$'] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # 5) Match keys by the regex. +- t: '^.*[01]_x.*$' + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-5 are all the same. +- r: + - {k2_x2: [C0], k3_x3: foo} + - {k2_x2: [C1], k3_x3: bar} + + # 6) By default match keys that equal the target. +- t: k0_x0 + r: "{{ l | community.general.remove_keys(target=t) }}" + + # 7) Match keys that start with the target. +- t: k0 + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}" + + # 8) Match keys that end with the target. +- t: x0 + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}" + + # 9) Match keys by the regex. +- t: '^.*0_x.*$' + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 6-9 are all the same. +- r: + - {k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k1_x1: B1, k2_x2: [C1], k3_x3: bar} +""" + +RETURN = r""" +_value: + description: The list of dictionaries with selected keys removed. + type: list + elements: dictionary +""" + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_str) + + +def remove_keys(data, target=None, matching_parameter='equal'): + """remove specific keys from dictionaries in a list""" + + # test parameters + _keys_filter_params(data, matching_parameter) + # test and transform target + tt = _keys_filter_target_str(target, matching_parameter) + + if matching_parameter == 'equal': + def keep_key(key): + return key not in tt + elif matching_parameter == 'starts_with': + def keep_key(key): + return not key.startswith(tt) + elif matching_parameter == 'ends_with': + def keep_key(key): + return not key.endswith(tt) + elif matching_parameter == 'regex': + def keep_key(key): + return tt.match(key) is None + + return [{k: v for k, v in d.items() if keep_key(k)} for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'remove_keys': remove_keys, + } diff --git a/plugins/filter/replace_keys.py b/plugins/filter/replace_keys.py new file mode 100644 index 0000000000..5af0b22f62 --- /dev/null +++ b/plugins/filter/replace_keys.py @@ -0,0 +1,178 @@ +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: replace_keys +short_description: Replace specific keys in a list of dictionaries +version_added: "9.1.0" +author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) +description: This filter replaces specified keys in a provided list of dictionaries. +options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A list of dictionaries with attributes C(before) and C(after). + - The value of O(target[].after) replaces key matching O(target[].before). + type: list + elements: dictionary + required: true + suboptions: + before: + description: + - A key or key pattern to change. + - The interpretation of O(target[].before) depends on O(matching_parameter). + - For a key that matches multiple O(target[].before)s, the B(first) matching O(target[].after) is used. + type: str + after: + description: A matching key change to. + type: str + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target[].before) items. + starts_with: Matches keys that start with one of the O(target[].before) items. + ends_with: Matches keys that end with one of the O(target[].before) items. + regex: Matches keys that match one of the regular expressions provided in O(target[].before). +""" + +EXAMPLES = r""" +- l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default, replace keys that are equal any of the attributes before. +- t: + - {before: k0_x0, after: a0} + - {before: k1_x1, after: a1} + r: "{{ l | community.general.replace_keys(target=t) }}" + + # 2) Replace keys that starts with any of the attributes before. +- t: + - {before: k0, after: a0} + - {before: k1, after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Replace keys that ends with any of the attributes before. +- t: + - {before: x0, after: a0} + - {before: x1, after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Replace keys that match any regex of the attributes before. +- t: + - {before: "^.*0_x.*$", after: a0} + - {before: "^.*1_x.*$", after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-4 are all the same. +- r: + - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} + - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} + + # 5) If more keys match the same attribute before the last one will be used. +- t: + - {before: "^.*_x.*$", after: X} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # gives + +- r: + - X: foo + - X: bar + + # 6) If there are items with equal attribute before the first one will be used. +- t: + - {before: "^.*_x.*$", after: X} + - {before: "^.*_x.*$", after: Y} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # gives + +- r: + - X: foo + - X: bar + + # 7) If there are more matches for a key the first one will be used. +- l: + - {aaa1: A, bbb1: B, ccc1: C} + - {aaa2: D, bbb2: E, ccc2: F} +- t: + - {before: a, after: X} + - {before: aa, after: Y} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}" + + # gives + +- r: + - {X: A, bbb1: B, ccc1: C} + - {X: D, bbb2: E, ccc2: F} +""" + +RETURN = r""" +_value: + description: The list of dictionaries with replaced keys. + type: list + elements: dictionary +""" + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_dict) + + +def replace_keys(data, target=None, matching_parameter='equal'): + """replace specific keys in a list of dictionaries""" + + # test parameters + _keys_filter_params(data, matching_parameter) + # test and transform target + tz = _keys_filter_target_dict(target, matching_parameter) + + if matching_parameter == 'equal': + def replace_key(key): + for b, a in tz: + if key == b: + return a + return key + elif matching_parameter == 'starts_with': + def replace_key(key): + for b, a in tz: + if key.startswith(b): + return a + return key + elif matching_parameter == 'ends_with': + def replace_key(key): + for b, a in tz: + if key.endswith(b): + return a + return key + elif matching_parameter == 'regex': + def replace_key(key): + for b, a in tz: + if b.match(key): + return a + return key + + return [{replace_key(k): v for k, v in d.items()} for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'replace_keys': replace_keys, + } diff --git a/plugins/filter/reveal_ansible_type.py b/plugins/filter/reveal_ansible_type.py new file mode 100644 index 0000000000..e068702355 --- /dev/null +++ b/plugins/filter/reveal_ansible_type.py @@ -0,0 +1,147 @@ +# Copyright (c) 2024 Vladimir Botka +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: reveal_ansible_type +short_description: Return input type +version_added: "9.2.0" +author: Vladimir Botka (@vbotka) +description: This filter returns input type. +options: + _input: + description: Input data. + type: raw + required: true + alias: + description: Data type aliases. + default: {} + type: dictionary +""" + +EXAMPLES = r""" +# Substitution converts str to AnsibleUnicode or _AnsibleTaggedStr +# ---------------------------------------------------------------- + +# String. AnsibleUnicode or _AnsibleTaggedStr. +- data: "abc" + result: '{{ data | community.general.reveal_ansible_type }}' +# result => AnsibleUnicode (or _AnsibleTaggedStr) + +# String. AnsibleUnicode/_AnsibleTaggedStr alias str. +- alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str"} + data: "abc" + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => str + +# List. All items are AnsibleUnicode/_AnsibleTaggedStr. +- data: ["a", "b", "c"] + result: '{{ data | community.general.reveal_ansible_type }}' +# result => list[AnsibleUnicode] or list[_AnsibleTaggedStr] + +# Dictionary. All keys and values are AnsibleUnicode/_AnsibleTaggedStr. +- data: {"a": "foo", "b": "bar", "c": "baz"} + result: '{{ data | community.general.reveal_ansible_type }}' +# result => dict[AnsibleUnicode, AnsibleUnicode] or dict[_AnsibleTaggedStr, _AnsibleTaggedStr] + +# No substitution and no alias. Type of strings is str +# ---------------------------------------------------- + +# String +- result: '{{ "abc" | community.general.reveal_ansible_type }}' +# result => str + +# Integer +- result: '{{ 123 | community.general.reveal_ansible_type }}' +# result => int + +# Float +- result: '{{ 123.45 | community.general.reveal_ansible_type }}' +# result => float + +# Boolean +- result: '{{ true | community.general.reveal_ansible_type }}' +# result => bool + +# List. All items are strings. +- result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}' +# result => list[str] + +# List of dictionaries. +- result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}' +# result => list[dict] + +# Dictionary. All keys are strings. All values are integers. +- result: '{{ {"a": 1} | community.general.reveal_ansible_type }}' +# result => dict[str, int] + +# Dictionary. All keys are strings. All values are integers. +- result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}' +# result => dict[str, int] + +# Type of strings is AnsibleUnicode, _AnsibleTaggedStr, or str +# ------------------------------------------------------------ + +# Dictionary. The keys are integers or strings. All values are strings. +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + data: {1: 'a', 'b': 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[int|str, str] + +# Dictionary. All keys are integers. All values are keys. +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + data: {1: 'a', 2: 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[int, str] + +# Dictionary. All keys are strings. Multiple types values. +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + _AnsibleTaggedFloat: float + data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': true, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[str, bool|dict|float|int|list|str] + +# List. Multiple types items. +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + _AnsibleTaggedFloat: float + data: [1, 2, 1.1, 'abc', true, ['x', 'y', 'z'], {'x': 1, 'y': 2}] + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => list[bool|dict|float|int|list|str] +""" + +RETURN = r""" +_value: + description: Type of the data. + type: str +""" + +from ansible_collections.community.general.plugins.plugin_utils.ansible_type import _ansible_type + + +def reveal_ansible_type(data, alias=None): + """Returns data type""" + + # TODO: expose use_native_type parameter + return _ansible_type(data, alias) + + +class FilterModule(object): + + def filters(self): + return { + 'reveal_ansible_type': reveal_ansible_type + } diff --git a/plugins/filter/time.py b/plugins/filter/time.py index 3b44ad0e49..e48e24216a 100644 --- a/plugins/filter/time.py +++ b/plugins/filter/time.py @@ -1,9 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2020, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re from ansible.errors import AnsibleFilterError @@ -46,15 +45,20 @@ def multiply(factors): def to_time_unit(human_time, unit='ms', **kwargs): ''' Return a time unit from a human readable string ''' + + # No need to handle 0 + if human_time == "0": + return 0 + unit_to_short_form = UNIT_TO_SHORT_FORM unit_factors = UNIT_FACTORS unit = unit_to_short_form.get(unit.rstrip('s'), unit) if unit not in unit_factors: - raise AnsibleFilterError("to_time_unit() can not convert to the following unit: %s. " - "Available units (singular or plural): %s. " - "Available short units: %s" - % (unit, ', '.join(unit_to_short_form.keys()), ', '.join(unit_factors.keys()))) + raise AnsibleFilterError(( + f"to_time_unit() can not convert to the following unit: {unit}. Available units (singular or plural):" + f"{', '.join(unit_to_short_form.keys())}. Available short units: {', '.join(unit_factors.keys())}" + )) if 'year' in kwargs: unit_factors['y'] = unit_factors['y'][:-1] + [kwargs.pop('year')] @@ -62,14 +66,14 @@ def to_time_unit(human_time, unit='ms', **kwargs): unit_factors['mo'] = unit_factors['mo'][:-1] + [kwargs.pop('month')] if kwargs: - raise AnsibleFilterError('to_time_unit() got unknown keyword arguments: %s' % ', '.join(kwargs.keys())) + raise AnsibleFilterError(f"to_time_unit() got unknown keyword arguments: {', '.join(kwargs.keys())}") result = 0 for h_time_string in human_time.split(): res = re.match(r'(-?\d+)(\w+)', h_time_string) if not res: raise AnsibleFilterError( - "to_time_unit() can not interpret following string: %s" % human_time) + f"to_time_unit() can not interpret following string: {human_time}") h_time_int = int(res.group(1)) h_time_unit = res.group(2) @@ -77,7 +81,7 @@ def to_time_unit(human_time, unit='ms', **kwargs): h_time_unit = unit_to_short_form.get(h_time_unit.rstrip('s'), h_time_unit) if h_time_unit not in unit_factors: raise AnsibleFilterError( - "to_time_unit() can not interpret following string: %s" % human_time) + f"to_time_unit() can not interpret following string: {human_time}") time_in_milliseconds = h_time_int * multiply(unit_factors[h_time_unit]) result += time_in_milliseconds diff --git a/plugins/filter/to_days.yml b/plugins/filter/to_days.yml new file mode 100644 index 0000000000..c76697f1ee --- /dev/null +++ b/plugins/filter/to_days.yml @@ -0,0 +1,45 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_days + short_description: Converts a duration string to days + version_added: 0.2.0 + description: + - Parse a human readable time duration string and convert to days. + options: + _input: + description: + - The time string to convert. + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). + - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. + - Examples are V(1h), V(-5m), and V(3h -5m 6s). + type: string + required: true + year: + description: + - Number of days per year. + default: 365 + type: float + month: + description: + - Number of days per month. + default: 30 + type: float + author: + - René Moser (@resmo) + +EXAMPLES: | + - name: Convert a duration into days + ansible.builtin.debug: + msg: "{{ '1y 7m 5d 30h' | community.general.to_days }}" + +RETURN: + _value: + description: Number of days. + type: float diff --git a/plugins/filter/to_hours.yml b/plugins/filter/to_hours.yml new file mode 100644 index 0000000000..520740897b --- /dev/null +++ b/plugins/filter/to_hours.yml @@ -0,0 +1,45 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_hours + short_description: Converts a duration string to hours + version_added: 0.2.0 + description: + - Parse a human readable time duration string and convert to hours. + options: + _input: + description: + - The time string to convert. + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). + - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. + - Examples are V(1h), V(-5m), and V(3h -5m 6s). + type: string + required: true + year: + description: + - Number of days per year. + default: 365 + type: float + month: + description: + - Number of days per month. + default: 30 + type: float + author: + - René Moser (@resmo) + +EXAMPLES: | + - name: Convert a duration into hours + ansible.builtin.debug: + msg: "{{ '7d 30h 20m 10s 123ms' | community.general.to_hours }}" + +RETURN: + _value: + description: Number of hours. + type: float diff --git a/plugins/filter/to_ini.py b/plugins/filter/to_ini.py new file mode 100644 index 0000000000..a70740b8aa --- /dev/null +++ b/plugins/filter/to_ini.py @@ -0,0 +1,100 @@ + +# Copyright (c) 2023, Steffen Scheib +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: to_ini +short_description: Converts a dictionary to the INI file format +version_added: 8.2.0 +author: Steffen Scheib (@sscheib) +description: + - Converts a dictionary to the INI file format. +options: + _input: + description: The dictionary that should be converted to the INI format. + type: dictionary + required: true +""" + +EXAMPLES = r""" +- name: Define a dictionary + ansible.builtin.set_fact: + my_dict: + section_name: + key_name: 'key value' + + another_section: + connection: 'ssh' + +- name: Write dictionary to INI file + ansible.builtin.copy: + dest: /tmp/test.ini + content: '{{ my_dict | community.general.to_ini }}' + + # /tmp/test.ini will look like this: + # [section_name] + # key_name = key value + # + # [another_section] + # connection = ssh +""" + +RETURN = r""" +_value: + description: A string formatted as INI file. + type: string +""" + +from collections.abc import Mapping +from configparser import ConfigParser +from io import StringIO +from ansible.errors import AnsibleFilterError + + +class IniParser(ConfigParser): + ''' Implements a configparser which sets the correct optionxform ''' + + def __init__(self): + super().__init__(interpolation=None) + self.optionxform = str + + +def to_ini(obj): + ''' Read the given dict and return an INI formatted string ''' + + if not isinstance(obj, Mapping): + raise AnsibleFilterError(f'to_ini requires a dict, got {type(obj)}') + + ini_parser = IniParser() + + try: + ini_parser.read_dict(obj) + except Exception as ex: + raise AnsibleFilterError('to_ini failed to parse given dict:' + f'{ex}', orig_exc=ex) + + # catching empty dicts + if obj == dict(): + raise AnsibleFilterError('to_ini received an empty dict. ' + 'An empty dict cannot be converted.') + + config = StringIO() + ini_parser.write(config) + + # config.getvalue() returns two \n at the end + # with the below insanity, we remove the very last character of + # the resulting string + return ''.join(config.getvalue().rsplit(config.getvalue()[-1], 1)) + + +class FilterModule(object): + ''' Query filter ''' + + def filters(self): + + return { + 'to_ini': to_ini + } diff --git a/plugins/filter/to_milliseconds.yml b/plugins/filter/to_milliseconds.yml new file mode 100644 index 0000000000..f25bd86623 --- /dev/null +++ b/plugins/filter/to_milliseconds.yml @@ -0,0 +1,45 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_milliseconds + short_description: Converts a duration string to milliseconds + version_added: 0.2.0 + description: + - Parse a human readable time duration string and convert to milliseconds. + options: + _input: + description: + - The time string to convert. + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). + - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. + - Examples are V(1h), V(-5m), and V(3h -5m 6s). + type: string + required: true + year: + description: + - Number of days per year. + default: 365 + type: float + month: + description: + - Number of days per month. + default: 30 + type: float + author: + - René Moser (@resmo) + +EXAMPLES: | + - name: Convert a duration into milliseconds + ansible.builtin.debug: + msg: "{{ '30h 20m 10s 123ms' | community.general.to_milliseconds }}" + +RETURN: + _value: + description: Number of milliseconds. + type: float diff --git a/plugins/filter/to_minutes.yml b/plugins/filter/to_minutes.yml new file mode 100644 index 0000000000..924fb6feb3 --- /dev/null +++ b/plugins/filter/to_minutes.yml @@ -0,0 +1,45 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_minutes + short_description: Converts a duration string to minutes + version_added: 0.2.0 + description: + - Parse a human readable time duration string and convert to minutes. + options: + _input: + description: + - The time string to convert. + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). + - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. + - Examples are V(1h), V(-5m), and V(3h -5m 6s). + type: string + required: true + year: + description: + - Number of days per year. + default: 365 + type: float + month: + description: + - Number of days per month. + default: 30 + type: float + author: + - René Moser (@resmo) + +EXAMPLES: | + - name: Convert a duration into minutes + ansible.builtin.debug: + msg: "{{ '30h 20m 10s 123ms' | community.general.to_minutes }}" + +RETURN: + _value: + description: Number of minutes. + type: float diff --git a/plugins/filter/to_months.yml b/plugins/filter/to_months.yml new file mode 100644 index 0000000000..09e9c38b5d --- /dev/null +++ b/plugins/filter/to_months.yml @@ -0,0 +1,45 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_months + short_description: Convert a duration string to months + version_added: 0.2.0 + description: + - Parse a human readable time duration string and convert to months. + options: + _input: + description: + - The time string to convert. + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). + - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. + - Examples are V(1h), V(-5m), and V(3h -5m 6s). + type: string + required: true + year: + description: + - Number of days per year. + default: 365 + type: float + month: + description: + - Number of days per month. + default: 30 + type: float + author: + - René Moser (@resmo) + +EXAMPLES: | + - name: Convert a duration into months + ansible.builtin.debug: + msg: "{{ '1y 7m 5d 30h' | community.general.to_months }}" + +RETURN: + _value: + description: Number of months. + type: float diff --git a/plugins/filter/to_nice_yaml.yml b/plugins/filter/to_nice_yaml.yml new file mode 100644 index 0000000000..fe7a316f46 --- /dev/null +++ b/plugins/filter/to_nice_yaml.yml @@ -0,0 +1,89 @@ +# Copyright (c) Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_nice_yaml + author: + - Ansible Core Team + - Felix Fontein (@felixfontein) + version_added: 11.3.0 + short_description: Convert variable to YAML string + description: + - Converts an Ansible variable into a YAML string representation, without preserving vaulted strings as P(ansible.builtin.to_yaml#filter). + - This filter functions as a wrapper to the L(Python PyYAML library, https://pypi.org/project/PyYAML/)'s C(yaml.dump) function. + positional: _input + options: + _input: + description: + - A variable or expression that returns a data structure. + type: raw + required: true + indent: + description: + - Number of spaces to indent Python structures, mainly used for display to humans. + type: integer + default: 2 + sort_keys: + description: + - Affects sorting of dictionary keys. + default: true + type: bool + default_style: + description: + - Indicates the style of the scalar. + choices: + - '' + - "'" + - '"' + - '|' + - '>' + type: string + canonical: + description: + - If set to V(true), export tag type to the output. + type: bool + width: + description: + - Set the preferred line width. + type: integer + line_break: + description: + - Specify the line break. + type: string + encoding: + description: + - Specify the output encoding. + type: string + explicit_start: + description: + - If set to V(true), adds an explicit start using C(---). + type: bool + explicit_end: + description: + - If set to V(true), adds an explicit end using C(...). + type: bool + redact_sensitive_values: + description: + - If set to V(true), vaulted strings are replaced by V() instead of being decrypted. + - With future ansible-core versions, this can extend to other strings tagged as sensitive. + - B(Note) that with ansible-core 2.18 and before this might not yield the expected result + since these versions of ansible-core strip the vault information away from strings that are + part of more complex data structures specified in C(vars). + type: bool + default: false + notes: + - More options may be available, see L(PyYAML documentation, https://pyyaml.org/wiki/PyYAMLDocumentation) for details. + - >- + These parameters to C(yaml.dump) are not accepted, as they are overridden internally: O(ignore:allow_unicode). + +EXAMPLES: | + --- + # Dump variable in a template to create a YAML document + value: "{{ github_workflow | community.general.to_nice_yaml }}" + +RETURN: + _value: + description: + - The YAML serialized string representing the variable structure inputted. + type: string diff --git a/plugins/filter/to_prettytable.py b/plugins/filter/to_prettytable.py new file mode 100644 index 0000000000..266a426cf2 --- /dev/null +++ b/plugins/filter/to_prettytable.py @@ -0,0 +1,409 @@ +# Copyright (c) 2025, Timur Gadiev +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: to_prettytable +short_description: Format a list of dictionaries as an ASCII table +version_added: "10.7.0" +author: Timur Gadiev (@tgadiev) +description: + - This filter takes a list of dictionaries and formats it as an ASCII table using the I(prettytable) Python library. +requirements: + - prettytable +options: + _input: + description: A list of dictionaries to format. + type: list + elements: dictionary + required: true + column_order: + description: List of column names to specify the order of columns in the table. + type: list + elements: string + header_names: + description: List of custom header names to use instead of dictionary keys. + type: list + elements: string + column_alignments: + description: + - Dictionary where keys are column names and values are alignment settings. Valid alignment values are C(left), C(center), + C(right), C(l), C(c), or C(r). + - "For example, V({'name': 'left', 'id': 'right'}) aligns the C(name) column to the left and the C(id) column to the + right." + type: dictionary +""" + +EXAMPLES = r""" +- name: Set a list of users + ansible.builtin.set_fact: + users: + - name: Alice + age: 25 + role: admin + - name: Bob + age: 30 + role: user + +- name: Display a list of users as a table + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable + }} + +- name: Display a table with custom column ordering + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_order=['role', 'name', 'age'] + ) + }} + +- name: Display a table with selective column output (only show name and role fields) + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_order=['name', 'role'] + ) + }} + +- name: Display a table with custom headers + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + header_names=['User Name', 'User Age', 'User Role'] + ) + }} + +- name: Display a table with custom alignments + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_alignments={'name': 'center', 'age': 'right', 'role': 'left'} + ) + }} + +- name: Combine multiple options + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_order=['role', 'name', 'age'], + header_names=['Position', 'Full Name', 'Years'], + column_alignments={'name': 'center', 'age': 'right', 'role': 'left'} + ) + }} +""" + +RETURN = r""" +_value: + description: The formatted ASCII table. + type: string +""" + +try: + import prettytable + HAS_PRETTYTABLE = True +except ImportError: + HAS_PRETTYTABLE = False + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.common.text.converters import to_text + + +class TypeValidationError(AnsibleFilterError): + """Custom exception for type validation errors. + + Args: + obj: The object with incorrect type + expected: Description of expected type + """ + def __init__(self, obj, expected): + type_name = "string" if isinstance(obj, str) else type(obj).__name__ + super().__init__(f"Expected {expected}, got a {type_name}") + + +def _validate_list_param(param, param_name, ensure_strings=True): + """Validate a parameter is a list and optionally ensure all elements are strings. + + Args: + param: The parameter to validate + param_name: The name of the parameter for error messages + ensure_strings: Whether to check that all elements are strings + + Raises: + AnsibleFilterError: If validation fails + """ + # Map parameter names to their original error message format + error_messages = { + "column_order": "a list of column names", + "header_names": "a list of header names" + } + + # Use the specific error message if available, otherwise use a generic one + error_msg = error_messages.get(param_name, f"a list for {param_name}") + + if not isinstance(param, list): + raise TypeValidationError(param, error_msg) + + if ensure_strings: + for item in param: + if not isinstance(item, str): + # Maintain original error message format + if param_name == "column_order": + error_msg = "a string for column name" + elif param_name == "header_names": + error_msg = "a string for header name" + else: + error_msg = f"a string for {param_name} element" + raise TypeValidationError(item, error_msg) + + +def _match_key(item_dict, lookup_key): + """Find a matching key in a dictionary, handling type conversion. + + Args: + item_dict: Dictionary to search in + lookup_key: Key to look for, possibly needing type conversion + + Returns: + The matching key or None if no match found + """ + # Direct key match + if lookup_key in item_dict: + return lookup_key + + # Try boolean conversion for 'true'/'false' strings + if isinstance(lookup_key, str): + if lookup_key.lower() == 'true' and True in item_dict: + return True + if lookup_key.lower() == 'false' and False in item_dict: + return False + + # Try numeric conversion for string numbers + if lookup_key.isdigit() and int(lookup_key) in item_dict: + return int(lookup_key) + + # No match found + return None + + +def _build_key_maps(data): + """Build mappings between string keys and original keys. + + Args: + data: List of dictionaries with keys to map + + Returns: + Tuple of (key_map, reverse_key_map) + """ + key_map = {} + reverse_key_map = {} + + # Check if the data list is not empty + if not data: + return key_map, reverse_key_map + + first_dict = data[0] + for orig_key in first_dict.keys(): + # Store string version of the key + str_key = to_text(orig_key) + key_map[str_key] = orig_key + # Also store lowercase version for case-insensitive lookups + reverse_key_map[str_key.lower()] = orig_key + + return key_map, reverse_key_map + + +def _configure_alignments(table, field_names, column_alignments): + """Configure column alignments for the table. + + Args: + table: The PrettyTable instance to configure + field_names: List of field names to align + column_alignments: Dict of column alignments + """ + valid_alignments = {"left", "center", "right", "l", "c", "r"} + + if not isinstance(column_alignments, dict): + return + + for col_name, alignment in column_alignments.items(): + if col_name in field_names: + # We already validated alignment is a string and a valid value in the main function + # Just apply it here + alignment = alignment.lower() + table.align[col_name] = alignment[0] + + +def to_prettytable(data, *args, **kwargs): + """Convert a list of dictionaries to an ASCII table. + + Args: + data: List of dictionaries to format + *args: Optional list of column names to specify column order + **kwargs: Optional keyword arguments: + - column_order: List of column names to specify the order + - header_names: List of custom header names + - column_alignments: Dict of column alignments (left, center, right) + + Returns: + String containing the ASCII table + """ + if not HAS_PRETTYTABLE: + raise AnsibleFilterError( + 'You need to install "prettytable" Python module to use this filter' + ) + + # === Input validation === + # Validate list type + if not isinstance(data, list): + raise TypeValidationError(data, "a list of dictionaries") + + # Validate dictionary items if list is not empty + if data and not all(isinstance(item, dict) for item in data): + invalid_item = next((item for item in data if not isinstance(item, dict)), None) + raise TypeValidationError(invalid_item, "all items in the list to be dictionaries") + + # Get sample dictionary to determine fields - empty if no data + sample_dict = data[0] if data else {} + max_fields = len(sample_dict) + + # === Process column order === + # Handle both positional and keyword column_order + column_order = kwargs.pop('column_order', None) + + # Check for conflict between args and column_order + if args and column_order is not None: + raise AnsibleFilterError("Cannot use both positional arguments and the 'column_order' keyword argument") + + # Use positional args if provided + if args: + column_order = list(args) + + # Validate column_order + if column_order is not None: + _validate_list_param(column_order, "column_order") + + # Validate column_order doesn't exceed the number of fields (skip if data is empty) + if data and len(column_order) > max_fields: + raise AnsibleFilterError( + f"'column_order' has more elements ({len(column_order)}) than available fields in data ({max_fields})") + + # === Process headers === + # Determine field names and ensure they are strings + if column_order: + field_names = column_order + else: + # Use field names from first dictionary, ensuring all are strings + field_names = [to_text(k) for k in sample_dict] + + # Process custom headers + header_names = kwargs.pop('header_names', None) + if header_names is not None: + _validate_list_param(header_names, "header_names") + + # Validate header_names doesn't exceed the number of fields (skip if data is empty) + if data and len(header_names) > max_fields: + raise AnsibleFilterError( + f"'header_names' has more elements ({len(header_names)}) than available fields in data ({max_fields})") + + # Validate that column_order and header_names have the same size if both provided + if column_order is not None and len(column_order) != len(header_names): + raise AnsibleFilterError( + f"'column_order' and 'header_names' must have the same number of elements. " + f"Got {len(column_order)} columns and {len(header_names)} headers.") + + # === Process alignments === + # Get column alignments and validate + column_alignments = kwargs.pop('column_alignments', {}) + valid_alignments = {"left", "center", "right", "l", "c", "r"} + + # Validate column_alignments is a dictionary + if not isinstance(column_alignments, dict): + raise TypeValidationError(column_alignments, "a dictionary for column_alignments") + + # Validate column_alignments keys and values + for key, value in column_alignments.items(): + # Check that keys are strings + if not isinstance(key, str): + raise TypeValidationError(key, "a string for column_alignments key") + + # Check that values are strings + if not isinstance(value, str): + raise TypeValidationError(value, "a string for column_alignments value") + + # Check that values are valid alignments + if value.lower() not in valid_alignments: + raise AnsibleFilterError( + f"Invalid alignment '{value}' in 'column_alignments'. " + f"Valid alignments are: {', '.join(sorted(valid_alignments))}") + + # Validate column_alignments doesn't have more keys than fields (skip if data is empty) + if data and len(column_alignments) > max_fields: + raise AnsibleFilterError( + f"'column_alignments' has more elements ({len(column_alignments)}) than available fields in data ({max_fields})") + + # Check for unknown parameters + if kwargs: + raise AnsibleFilterError(f"Unknown parameter(s) for to_prettytable filter: {', '.join(sorted(kwargs))}") + + # === Build the table === + table = prettytable.PrettyTable() + + # Set the field names for display + display_names = header_names if header_names is not None else field_names + table.field_names = [to_text(name) for name in display_names] + + # Configure alignments after setting field_names + _configure_alignments(table, display_names, column_alignments) + + # Build key maps only if not using explicit column_order and we have data + key_map = {} + reverse_key_map = {} + if not column_order and data: # Only needed when using original dictionary keys and we have data + key_map, reverse_key_map = _build_key_maps(data) + + # If we have an empty list with no custom parameters, return a simple empty table + if not data and not column_order and not header_names and not column_alignments: + return "++\n++" + + # Process each row if we have data + for item in data: + row = [] + for col in field_names: + # Try direct mapping first + if col in key_map: + row.append(item.get(key_map[col], "")) + else: + # Try to find a matching key in the item + matched_key = _match_key(item, col) + if matched_key is not None: + row.append(item.get(matched_key, "")) + else: + # Try case-insensitive lookup as last resort + lower_col = col.lower() if isinstance(col, str) else str(col).lower() + if lower_col in reverse_key_map: + row.append(item.get(reverse_key_map[lower_col], "")) + else: + # No match found + row.append("") + table.add_row(row) + + return to_text(table) + + +class FilterModule(object): + """Ansible core jinja2 filters.""" + + def filters(self): + return { + 'to_prettytable': to_prettytable + } diff --git a/plugins/filter/to_seconds.yml b/plugins/filter/to_seconds.yml new file mode 100644 index 0000000000..49b69d6d69 --- /dev/null +++ b/plugins/filter/to_seconds.yml @@ -0,0 +1,45 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_seconds + short_description: Converts a duration string to seconds + version_added: 0.2.0 + description: + - Parse a human readable time duration string and convert to seconds. + options: + _input: + description: + - The time string to convert. + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). + - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. + - Examples are V(1h), V(-5m), and V(3h -5m 6s). + type: string + required: true + year: + description: + - Number of days per year. + default: 365 + type: float + month: + description: + - Number of days per month. + default: 30 + type: float + author: + - René Moser (@resmo) + +EXAMPLES: | + - name: Convert a duration into seconds + ansible.builtin.debug: + msg: "{{ '30h 20m 10s 123ms' | community.general.to_seconds }}" + +RETURN: + _value: + description: Number of seconds. + type: float diff --git a/plugins/filter/to_time_unit.yml b/plugins/filter/to_time_unit.yml new file mode 100644 index 0000000000..256ca573f4 --- /dev/null +++ b/plugins/filter/to_time_unit.yml @@ -0,0 +1,89 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_time_unit + short_description: Converts a duration string to the given time unit + version_added: 0.2.0 + description: + - Parse a human readable time duration string and convert to the given time unit. + positional: unit + options: + _input: + description: + - The time string to convert. + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). + - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. + - Examples are V(1h), V(-5m), and V(3h -5m 6s). + type: string + required: true + unit: + description: + - Time unit to convert the duration to. + default: ms + choices: + - millisecond + - milliseconds + - ms + - msec + - msecs + - msecond + - mseconds + - s + - sec + - secs + - second + - seconds + - h + - hour + - hours + - hs + - m + - min + - mins + - minute + - minutes + - d + - ds + - day + - days + - w + - ws + - week + - weeks + - mo + - mos + - month + - months + - y + - ys + - year + - years + type: string + year: + description: + - Number of days per year. + default: 365 + type: float + month: + description: + - Number of days per month. + default: 30 + type: float + author: + - René Moser (@resmo) + +EXAMPLES: | + - name: Convert a duration into seconds + ansible.builtin.debug: + msg: "{{ '1053d 17h 53m -10s 391ms' | community.general.to_time_unit('s') }}" + +RETURN: + _value: + description: Number of time units. + type: float diff --git a/plugins/filter/to_weeks.yml b/plugins/filter/to_weeks.yml new file mode 100644 index 0000000000..750e77c378 --- /dev/null +++ b/plugins/filter/to_weeks.yml @@ -0,0 +1,45 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_weeks + short_description: Converts a duration string to weeks + version_added: 0.2.0 + description: + - Parse a human readable time duration string and convert to weeks. + options: + _input: + description: + - The time string to convert. + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). + - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. + - Examples are V(1h), V(-5m), and V(3h -5m 6s). + type: string + required: true + year: + description: + - Number of days per year. + default: 365 + type: float + month: + description: + - Number of days per month. + default: 30 + type: float + author: + - René Moser (@resmo) + +EXAMPLES: | + - name: Convert a duration into weeks + ansible.builtin.debug: + msg: "{{ '1y 7m 5d 30h' | community.general.to_weeks }}" + +RETURN: + _value: + description: Number of weeks. + type: float diff --git a/plugins/filter/to_yaml.py b/plugins/filter/to_yaml.py new file mode 100644 index 0000000000..905b04271c --- /dev/null +++ b/plugins/filter/to_yaml.py @@ -0,0 +1,113 @@ +# Copyright (c) Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import typing as t +from collections.abc import Mapping, Set + +from yaml import dump +try: + from yaml.cyaml import CSafeDumper as SafeDumper +except ImportError: + from yaml import SafeDumper + +from ansible.module_utils.common.collections import is_sequence +try: + # This is ansible-core 2.19+ + from ansible.utils.vars import transform_to_native_types + from ansible.parsing.vault import VaultHelper, VaultLib +except ImportError: + transform_to_native_types = None + +from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode +from ansible.utils.unsafe_proxy import AnsibleUnsafe + + +def _to_native_types_compat(value: t.Any, *, redact_value: str | None) -> t.Any: + """Compatibility function for ansible-core 2.18 and before.""" + if value is None: + return value + if isinstance(value, AnsibleUnsafe): + # This only works up to ansible-core 2.18: + return _to_native_types_compat(value._strip_unsafe(), redact_value=redact_value) + # But that's fine, since this code path isn't taken on ansible-core 2.19+ anyway. + if isinstance(value, Mapping): + return { + _to_native_types_compat(key, redact_value=redact_value): _to_native_types_compat(val, redact_value=redact_value) + for key, val in value.items() + } + if isinstance(value, Set): + return {_to_native_types_compat(elt, redact_value=redact_value) for elt in value} + if is_sequence(value): + return [_to_native_types_compat(elt, redact_value=redact_value) for elt in value] + if isinstance(value, AnsibleVaultEncryptedUnicode): + if redact_value is not None: + return redact_value + # This only works up to ansible-core 2.18: + return value.data + # But that's fine, since this code path isn't taken on ansible-core 2.19+ anyway. + if isinstance(value, bytes): + return bytes(value) + if isinstance(value, str): + return str(value) + + return value + + +def _to_native_types(value: t.Any, *, redact: bool) -> t.Any: + if isinstance(value, Mapping): + return {_to_native_types(k, redact=redact): _to_native_types(v, redact=redact) for k, v in value.items()} + if is_sequence(value): + return [_to_native_types(e, redact=redact) for e in value] + if redact: + ciphertext = VaultHelper.get_ciphertext(value, with_tags=False) + if ciphertext and VaultLib.is_encrypted(ciphertext): + return "" + return transform_to_native_types(value, redact=redact) + + +def remove_all_tags(value: t.Any, *, redact_sensitive_values: bool = False) -> t.Any: + """ + Remove all tags from all values in the input. + + If ``redact_sensitive_values`` is ``True``, all sensitive values will be redacted. + """ + if transform_to_native_types is not None: + return _to_native_types(value, redact=redact_sensitive_values) + + return _to_native_types_compat( + value, + redact_value="" if redact_sensitive_values else None, # same string as in ansible-core 2.19 by transform_to_native_types() + ) + + +def to_yaml(value: t.Any, *, redact_sensitive_values: bool = False, default_flow_style: bool | None = None, **kwargs) -> str: + """Serialize input as terse flow-style YAML.""" + return dump( + remove_all_tags(value, redact_sensitive_values=redact_sensitive_values), + Dumper=SafeDumper, + allow_unicode=True, + default_flow_style=default_flow_style, + **kwargs, + ) + + +def to_nice_yaml(value: t.Any, *, redact_sensitive_values: bool = False, indent: int = 2, default_flow_style: bool = False, **kwargs) -> str: + """Serialize input as verbose multi-line YAML.""" + return to_yaml( + value, + redact_sensitive_values=redact_sensitive_values, + default_flow_style=default_flow_style, + indent=indent, + **kwargs, + ) + + +class FilterModule(object): + def filters(self): + return { + 'to_yaml': to_yaml, + 'to_nice_yaml': to_nice_yaml, + } diff --git a/plugins/filter/to_yaml.yml b/plugins/filter/to_yaml.yml new file mode 100644 index 0000000000..066f8d990d --- /dev/null +++ b/plugins/filter/to_yaml.yml @@ -0,0 +1,92 @@ +# Copyright (c) Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_yaml + author: + - Ansible Core Team + - Felix Fontein (@felixfontein) + version_added: 11.3.0 + short_description: Convert variable to YAML string + description: + - Converts an Ansible variable into a YAML string representation, without preserving vaulted strings as P(ansible.builtin.to_yaml#filter). + - This filter functions as a wrapper to the L(Python PyYAML library, https://pypi.org/project/PyYAML/)'s C(yaml.dump) function. + positional: _input + options: + _input: + description: + - A variable or expression that returns a data structure. + type: raw + required: true + indent: + description: + - Number of spaces to indent Python structures, mainly used for display to humans. + type: integer + sort_keys: + description: + - Affects sorting of dictionary keys. + default: true + type: bool + default_style: + description: + - Indicates the style of the scalar. + choices: + - '' + - "'" + - '"' + - '|' + - '>' + type: string + canonical: + description: + - If set to V(true), export tag type to the output. + type: bool + width: + description: + - Set the preferred line width. + type: integer + line_break: + description: + - Specify the line break. + type: string + encoding: + description: + - Specify the output encoding. + type: string + explicit_start: + description: + - If set to V(true), adds an explicit start using C(---). + type: bool + explicit_end: + description: + - If set to V(true), adds an explicit end using C(...). + type: bool + redact_sensitive_values: + description: + - If set to V(true), vaulted strings are replaced by V() instead of being decrypted. + - With future ansible-core versions, this can extend to other strings tagged as sensitive. + - B(Note) that with ansible-core 2.18 and before this might not yield the expected result + since these versions of ansible-core strip the vault information away from strings that are + part of more complex data structures specified in C(vars). + type: bool + default: false + notes: + - More options may be available, see L(PyYAML documentation, https://pyyaml.org/wiki/PyYAMLDocumentation) for details. + - >- + These parameters to C(yaml.dump) are not accepted, as they are overridden internally: O(ignore:allow_unicode). + +EXAMPLES: | + --- + # Dump variable in a template to create a YAML document + value: "{{ github_workflow | community.general.to_yaml }}" + + --- + # Same as above but 'prettier' (equivalent to community.general.to_nice_yaml filter) + value: "{{ docker_config | community.general.to_yaml(indent=2) }}" + +RETURN: + _value: + description: + - The YAML serialized string representing the variable structure inputted. + type: string diff --git a/plugins/filter/to_years.yml b/plugins/filter/to_years.yml new file mode 100644 index 0000000000..62f282a8b6 --- /dev/null +++ b/plugins/filter/to_years.yml @@ -0,0 +1,45 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_years + short_description: Converts a duration string to years + version_added: 0.2.0 + description: + - Parse a human readable time duration string and convert to years. + options: + _input: + description: + - The time string to convert. + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). + - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. + - Examples are V(1h), V(-5m), and V(3h -5m 6s). + type: string + required: true + year: + description: + - Number of days per year. + default: 365 + type: float + month: + description: + - Number of days per month. + default: 30 + type: float + author: + - René Moser (@resmo) + +EXAMPLES: | + - name: Convert a duration into years + ansible.builtin.debug: + msg: "{{ '1053d 30h' | community.general.to_years }}" + +RETURN: + _value: + description: Number of years. + type: float diff --git a/plugins/filter/unicode_normalize.py b/plugins/filter/unicode_normalize.py index 9afbf29e3f..f1fe18402b 100644 --- a/plugins/filter/unicode_normalize.py +++ b/plugins/filter/unicode_normalize.py @@ -1,15 +1,58 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations + +DOCUMENTATION = r""" +name: unicode_normalize +short_description: Normalizes unicode strings to facilitate comparison of characters with normalized forms +version_added: 3.7.0 +author: Andrew Pantuso (@Ajpantuso) +description: + - Normalizes unicode strings to facilitate comparison of characters with normalized forms. +positional: form +options: + _input: + description: A unicode string. + type: string + required: true + form: + description: + - The normal form to use. + - See U(https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize) for details. + type: string + default: NFC + choices: + - NFC + - NFD + - NFKC + - NFKD +""" + +EXAMPLES = r""" +- name: Normalize unicode string + ansible.builtin.set_fact: + dictionary: "{{ 'ä' | community.general.unicode_normalize('NFKD') }}" + # The resulting string has length 2: one letter is 'a', the other + # the diacritic combiner. +""" + +RETURN = r""" +_value: + description: The normalized unicode string of the specified normal form. + type: string +""" from unicodedata import normalize -from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError -from ansible.module_utils.six import text_type +from ansible.errors import AnsibleFilterError + +try: + from ansible.errors import AnsibleTypeError +except ImportError: + from ansible.errors import AnsibleFilterTypeError as AnsibleTypeError def unicode_normalize(data, form='NFC'): @@ -24,11 +67,11 @@ def unicode_normalize(data, form='NFC'): A normalized unicode string of the specified 'form'. """ - if not isinstance(data, text_type): - raise AnsibleFilterTypeError("%s is not a valid input type" % type(data)) + if not isinstance(data, str): + raise AnsibleTypeError(f"{type(data)} is not a valid input type") if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'): - raise AnsibleFilterError("%s is not a valid form" % form) + raise AnsibleFilterError(f"{form!r} is not a valid form") return normalize(form, data) diff --git a/plugins/filter/version_sort.py b/plugins/filter/version_sort.py index c59e87c9c6..893c7e5bd3 100644 --- a/plugins/filter/version_sort.py +++ b/plugins/filter/version_sort.py @@ -1,11 +1,39 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2021 Eric Lavarde -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -from distutils.version import LooseVersion +DOCUMENTATION = r""" +name: version_sort +short_description: Sort a list according to version order instead of pure alphabetical one +version_added: 2.2.0 +author: Eric L. (@ericzolf) +description: + - Sort a list according to version order instead of pure alphabetical one. +options: + _input: + description: A list of strings to sort. + type: list + elements: string + required: true +""" + +EXAMPLES = r""" +- name: Convert list of tuples into dictionary + ansible.builtin.set_fact: + dictionary: "{{ ['2.1', '2.10', '2.9'] | community.general.version_sort }}" + # Result is ['2.1', '2.9', '2.10'] +""" + +RETURN = r""" +_value: + description: The list of strings sorted by version. + type: list + elements: string +""" + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion def version_sort(value, reverse=False): diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py index d9bc549ed6..7374193a74 100644 --- a/plugins/inventory/cobbler.py +++ b/plugins/inventory/cobbler.py @@ -1,92 +1,160 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2020 Orion Poplawski # Copyright (c) 2020 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: Orion Poplawski (@opoplawski) - name: cobbler - short_description: Cobbler inventory source - version_added: 1.0.0 +DOCUMENTATION = r""" +author: Orion Poplawski (@opoplawski) +name: cobbler +short_description: Cobbler inventory source +version_added: 1.0.0 +description: + - Get inventory hosts from the cobbler service. + - 'Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and have a C(plugin: + cobbler) entry.' + - Adds the primary IP addresses to C(cobbler_ipv4_address) and C(cobbler_ipv6_address) host variables if defined in Cobbler. + The primary IP address is defined as the management interface if defined, or the interface who's DNS name matches the + hostname of the system, or else the first interface found. +extends_documentation_fragment: + - inventory_cache +options: + plugin: + description: The name of this plugin, it should always be set to V(community.general.cobbler) for this plugin to recognize + it as its own. + type: string + required: true + choices: ['cobbler', 'community.general.cobbler'] + url: + description: URL to cobbler. + type: string + default: 'http://cobbler/cobbler_api' + env: + - name: COBBLER_SERVER + user: + description: Cobbler authentication user. + type: string + required: false + env: + - name: COBBLER_USER + password: + description: Cobbler authentication password. + type: string + required: false + env: + - name: COBBLER_PASSWORD + cache_fallback: + description: Fallback to cached results if connection to cobbler fails. + type: boolean + default: false + connection_timeout: + description: Timeout to connect to cobbler server. + type: int + required: false + version_added: 10.7.0 + exclude_mgmt_classes: + description: Management classes to exclude from inventory. + type: list + default: [] + elements: str + version_added: 7.4.0 + exclude_profiles: description: - - Get inventory hosts from the cobbler service. - - "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and has a C(plugin: cobbler) entry." - extends_documentation_fragment: - - inventory_cache - options: - plugin: - description: The name of this plugin, it should always be set to C(community.general.cobbler) for this plugin to recognize it as it's own. - required: yes - choices: [ 'cobbler', 'community.general.cobbler' ] - url: - description: URL to cobbler. - default: 'http://cobbler/cobbler_api' - env: - - name: COBBLER_SERVER - user: - description: Cobbler authentication user. - required: no - env: - - name: COBBLER_USER - password: - description: Cobbler authentication password - required: no - env: - - name: COBBLER_PASSWORD - cache_fallback: - description: Fallback to cached results if connection to cobbler fails - type: boolean - default: no - exclude_profiles: - description: Profiles to exclude from inventory - type: list - default: [] - elements: str - group_by: - description: Keys to group hosts by - type: list - default: [ 'mgmt_classes', 'owners', 'status' ] - group: - description: Group to place all hosts into - default: cobbler - group_prefix: - description: Prefix to apply to cobbler groups - default: cobbler_ - want_facts: - description: Toggle, if C(true) the plugin will retrieve host facts from the server - type: boolean - default: yes -''' + - Profiles to exclude from inventory. + - Ignored if O(include_profiles) is specified. + type: list + default: [] + elements: str + include_mgmt_classes: + description: Management classes to include from inventory. + type: list + default: [] + elements: str + version_added: 7.4.0 + include_profiles: + description: + - Profiles to include from inventory. + - If specified, all other profiles are excluded. + - O(exclude_profiles) is ignored if O(include_profiles) is specified. + type: list + default: [] + elements: str + version_added: 4.4.0 + inventory_hostname: + description: + - What to use for the ansible inventory hostname. + - By default the networking hostname is used if defined, otherwise the DNS name of the management or first non-static + interface. + - If set to V(system), the cobbler system name is used. + type: str + choices: ['hostname', 'system'] + default: hostname + version_added: 7.1.0 + group_by: + description: Keys to group hosts by. + type: list + elements: string + default: ['mgmt_classes', 'owners', 'status'] + group: + description: Group to place all hosts into. + default: cobbler + group_prefix: + description: Prefix to apply to cobbler groups. + default: cobbler_ + want_facts: + description: Toggle, if V(true) the plugin retrieves all host facts from the server. + type: boolean + default: true + want_ip_addresses: + description: + - Toggle, if V(true) the plugin adds a C(cobbler_ipv4_addresses) and C(cobbler_ipv6_addresses) dictionary to the + defined O(group) mapping interface DNS names to IP addresses. + type: boolean + default: true + version_added: 7.1.0 + facts_level: + description: + - Set to V(normal) to gather only system-level variables. + - Set to V(as_rendered) to gather all variables as rolled up by Cobbler. + type: string + choices: ['normal', 'as_rendered'] + default: normal + version_added: 10.7.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" # my.cobbler.yml plugin: community.general.cobbler url: http://cobbler/cobbler_api user: ansible-tester password: secure -''' +""" -from distutils.version import LooseVersion import socket from ansible.errors import AnsibleError -from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text -from ansible.module_utils.common._collections_compat import MutableMapping -from ansible.module_utils.six import iteritems from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + # xmlrpc try: - import xmlrpclib as xmlrpc_client + import xmlrpc.client as xmlrpc_client HAS_XMLRPC_CLIENT = True except ImportError: - try: - import xmlrpc.client as xmlrpc_client - HAS_XMLRPC_CLIENT = True - except ImportError: - HAS_XMLRPC_CLIENT = False + HAS_XMLRPC_CLIENT = False + + +class TimeoutTransport (xmlrpc_client.SafeTransport): + def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): + super(TimeoutTransport, self).__init__() + self._timeout = timeout + self.context = None + + def make_connection(self, host): + conn = xmlrpc_client.SafeTransport.make_connection(self, host) + conn.timeout = self._timeout + return conn class InventoryModule(BaseInventoryPlugin, Cacheable): @@ -95,18 +163,11 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): NAME = 'community.general.cobbler' def __init__(self): - super(InventoryModule, self).__init__() - - # from config - self.cobbler_url = None - self.exclude_profiles = [] # A list of profiles to exclude - - self.connection = None - self.token = None - self.cache_key = None - self.use_cache = None + + if not HAS_XMLRPC_CLIENT: + raise AnsibleError('Could not import xmlrpc client library') def verify_file(self, path): valid = False @@ -117,18 +178,6 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): self.display.vvv('Skipping due to inventory source not ending in "cobbler.yaml" nor "cobbler.yml"') return valid - def _get_connection(self): - if not HAS_XMLRPC_CLIENT: - raise AnsibleError('Could not import xmlrpc client library') - - if self.connection is None: - self.display.vvvv('Connecting to %s\n' % self.cobbler_url) - self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True) - self.token = None - if self.get_option('user') is not None: - self.token = self.connection.login(self.get_option('user'), self.get_option('password')) - return self.connection - def _init_cache(self): if self.cache_key not in self._cache: self._cache[self.cache_key] = {} @@ -142,12 +191,11 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): def _get_profiles(self): if not self.use_cache or 'profiles' not in self._cache.get(self.cache_key, {}): - c = self._get_connection() try: if self.token is not None: - data = c.get_profiles(self.token) + data = self.cobbler.get_profiles(self.token) else: - data = c.get_profiles() + data = self.cobbler.get_profiles() except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError): self._reload_cache() else: @@ -158,12 +206,20 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): def _get_systems(self): if not self.use_cache or 'systems' not in self._cache.get(self.cache_key, {}): - c = self._get_connection() try: if self.token is not None: - data = c.get_systems(self.token) + data = self.cobbler.get_systems(self.token) else: - data = c.get_systems() + data = self.cobbler.get_systems() + + # If more facts are requested, gather them all from Cobbler + if self.facts_level == "as_rendered": + for i, host in enumerate(data): + self.display.vvvv(f"Gathering all facts for {host['name']}\n") + if self.token is not None: + data[i] = self.cobbler.get_system_as_rendered(host['name'], self.token) + else: + data[i] = self.cobbler.get_system_as_rendered(host['name']) except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError): self._reload_cache() else: @@ -173,11 +229,17 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): return self._cache[self.cache_key]['systems'] def _add_safe_group_name(self, group, child=None): - group_name = self.inventory.add_group(to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group.lower().replace(" ", "")))) + group_name = self.inventory.add_group(to_safe_group_name(f"{self.get_option('group_prefix')}{group.lower().replace(' ', '')}")) if child is not None: self.inventory.add_child(group_name, child) return group_name + def _exclude_profile(self, profile): + if self.include_profiles: + return profile not in self.include_profiles + else: + return profile in self.exclude_profiles + def parse(self, inventory, loader, path, cache=True): super(InventoryModule, self).parse(inventory, loader, path) @@ -187,37 +249,53 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): # get connection host self.cobbler_url = self.get_option('url') + self.display.vvvv(f'Connecting to {self.cobbler_url}\n') + + if 'connection_timeout' in self._options: + self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True, + transport=TimeoutTransport(timeout=self.get_option('connection_timeout'))) + else: + self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True) + self.token = None + if self.get_option('user') is not None: + self.token = self.cobbler.login(str(self.get_option('user')), str(self.get_option('password'))) + self.cache_key = self.get_cache_key(path) self.use_cache = cache and self.get_option('cache') + self.exclude_mgmt_classes = self.get_option('exclude_mgmt_classes') + self.include_mgmt_classes = self.get_option('include_mgmt_classes') self.exclude_profiles = self.get_option('exclude_profiles') + self.include_profiles = self.get_option('include_profiles') self.group_by = self.get_option('group_by') + self.inventory_hostname = self.get_option('inventory_hostname') + self.facts_level = self.get_option('facts_level') for profile in self._get_profiles(): if profile['parent']: - self.display.vvvv('Processing profile %s with parent %s\n' % (profile['name'], profile['parent'])) - if profile['parent'] not in self.exclude_profiles: + self.display.vvvv(f"Processing profile {profile['name']} with parent {profile['parent']}\n") + if not self._exclude_profile(profile['parent']): parent_group_name = self._add_safe_group_name(profile['parent']) - self.display.vvvv('Added profile parent group %s\n' % parent_group_name) - if profile['name'] not in self.exclude_profiles: + self.display.vvvv(f'Added profile parent group {parent_group_name}\n') + if not self._exclude_profile(profile['name']): group_name = self._add_safe_group_name(profile['name']) - self.display.vvvv('Added profile group %s\n' % group_name) + self.display.vvvv(f'Added profile group {group_name}\n') self.inventory.add_child(parent_group_name, group_name) else: - self.display.vvvv('Processing profile %s without parent\n' % profile['name']) - # Create a heirarchy of profile names + self.display.vvvv(f"Processing profile {profile['name']} without parent\n") + # Create a hierarchy of profile names profile_elements = profile['name'].split('-') i = 0 while i < len(profile_elements) - 1: profile_group = '-'.join(profile_elements[0:i + 1]) profile_group_child = '-'.join(profile_elements[0:i + 2]) - if profile_group in self.exclude_profiles: - self.display.vvvv('Excluding profile %s\n' % profile_group) + if self._exclude_profile(profile_group): + self.display.vvvv(f'Excluding profile {profile_group}\n') break group_name = self._add_safe_group_name(profile_group) - self.display.vvvv('Added profile group %s\n' % group_name) + self.display.vvvv(f'Added profile group {group_name}\n') child_group_name = self._add_safe_group_name(profile_group_child) - self.display.vvvv('Added profile child group %s to %s\n' % (child_group_name, group_name)) + self.display.vvvv(f'Added profile child group {child_group_name} to {group_name}\n') self.inventory.add_child(group_name, child_group_name) i = i + 1 @@ -225,54 +303,112 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): self.group = to_safe_group_name(self.get_option('group')) if self.group is not None and self.group != '': self.inventory.add_group(self.group) - self.display.vvvv('Added site group %s\n' % self.group) + self.display.vvvv(f'Added site group {self.group}\n') + ip_addresses = {} + ipv6_addresses = {} for host in self._get_systems(): # Get the FQDN for the host and add it to the right groups - hostname = host['hostname'] # None + if self.inventory_hostname == 'system': + hostname = make_unsafe(host['name']) # None + else: + hostname = make_unsafe(host['hostname']) # None interfaces = host['interfaces'] - if host['profile'] in self.exclude_profiles: - self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile'])) - continue + if set(host['mgmt_classes']) & set(self.include_mgmt_classes): + self.display.vvvv(f"Including host {host['name']} in mgmt_classes {host['mgmt_classes']}\n") + else: + if self._exclude_profile(host['profile']): + self.display.vvvv(f"Excluding host {host['name']} in profile {host['profile']}\n") + continue + + if set(host['mgmt_classes']) & set(self.exclude_mgmt_classes): + self.display.vvvv(f"Excluding host {host['name']} in mgmt_classes {host['mgmt_classes']}\n") + continue # hostname is often empty for non-static IP hosts if hostname == '': - for (iname, ivalue) in iteritems(interfaces): + for iname, ivalue in interfaces.items(): if ivalue['management'] or not ivalue['static']: this_dns_name = ivalue.get('dns_name', None) if this_dns_name is not None and this_dns_name != "": - hostname = this_dns_name - self.display.vvvv('Set hostname to %s from %s\n' % (hostname, iname)) + hostname = make_unsafe(this_dns_name) + self.display.vvvv(f'Set hostname to {hostname} from {iname}\n') if hostname == '': - self.display.vvvv('Cannot determine hostname for host %s, skipping\n' % host['name']) + self.display.vvvv(f"Cannot determine hostname for host {host['name']}, skipping\n") continue self.inventory.add_host(hostname) - self.display.vvvv('Added host %s hostname %s\n' % (host['name'], hostname)) + self.display.vvvv(f"Added host {host['name']} hostname {hostname}\n") # Add host to profile group - group_name = self._add_safe_group_name(host['profile'], child=hostname) - self.display.vvvv('Added host %s to profile group %s\n' % (hostname, group_name)) + if host['profile'] != '': + group_name = self._add_safe_group_name(host['profile'], child=hostname) + self.display.vvvv(f'Added host {hostname} to profile group {group_name}\n') + else: + self.display.warning(f'Host {hostname} has an empty profile\n') # Add host to groups specified by group_by fields for group_by in self.group_by: - if host[group_by] == '<>': + if host[group_by] == '<>' or host[group_by] == '': groups = [] else: groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by] for group in groups: group_name = self._add_safe_group_name(group, child=hostname) - self.display.vvvv('Added host %s to group_by %s group %s\n' % (hostname, group_by, group_name)) + self.display.vvvv(f'Added host {hostname} to group_by {group_by} group {group_name}\n') # Add to group for this inventory if self.group is not None: self.inventory.add_child(self.group, hostname) # Add host variables + ip_address = None + ip_address_first = None + ipv6_address = None + ipv6_address_first = None + for iname, ivalue in interfaces.items(): + # Set to first interface or management interface if defined or hostname matches dns_name + if ivalue['ip_address'] != "": + if ip_address_first is None: + ip_address_first = ivalue['ip_address'] + if ivalue['management']: + ip_address = ivalue['ip_address'] + elif ivalue['dns_name'] == hostname and ip_address is None: + ip_address = ivalue['ip_address'] + if ivalue['ipv6_address'] != "": + if ipv6_address_first is None: + ipv6_address_first = ivalue['ipv6_address'] + if ivalue['management']: + ipv6_address = ivalue['ipv6_address'] + elif ivalue['dns_name'] == hostname and ipv6_address is None: + ipv6_address = ivalue['ipv6_address'] + + # Collect all interface name mappings for adding to group vars + if self.get_option('want_ip_addresses'): + if ivalue['dns_name'] != "": + if ivalue['ip_address'] != "": + ip_addresses[ivalue['dns_name']] = ivalue['ip_address'] + if ivalue['ipv6_address'] != "": + ip_addresses[ivalue['dns_name']] = ivalue['ipv6_address'] + + # Add ip_address to host if defined, use first if no management or matched dns_name + if ip_address is None and ip_address_first is not None: + ip_address = ip_address_first + if ip_address is not None: + self.inventory.set_variable(hostname, 'cobbler_ipv4_address', make_unsafe(ip_address)) + if ipv6_address is None and ipv6_address_first is not None: + ipv6_address = ipv6_address_first + if ipv6_address is not None: + self.inventory.set_variable(hostname, 'cobbler_ipv6_address', make_unsafe(ipv6_address)) + if self.get_option('want_facts'): try: - self.inventory.set_variable(hostname, 'cobbler', host) + self.inventory.set_variable(hostname, 'cobbler', make_unsafe(host)) except ValueError as e: - self.display.warning("Could not set host info for %s: %s" % (hostname, to_text(e))) + self.display.warning(f"Could not set host info for {hostname}: {e}") + + if self.get_option('want_ip_addresses'): + self.inventory.set_variable(self.group, 'cobbler_ipv4_addresses', make_unsafe(ip_addresses)) + self.inventory.set_variable(self.group, 'cobbler_ipv6_addresses', make_unsafe(ipv6_addresses)) diff --git a/plugins/inventory/gitlab_runners.py b/plugins/inventory/gitlab_runners.py index ddf64cd626..4a2b32680e 100644 --- a/plugins/inventory/gitlab_runners.py +++ b/plugins/inventory/gitlab_runners.py @@ -1,72 +1,71 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Stefan Heitmueller # Copyright (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' - name: gitlab_runners - author: - - Stefan Heitmüller (@morph027) - short_description: Ansible dynamic inventory plugin for GitLab runners. - requirements: - - python >= 2.7 - - python-gitlab > 1.8.0 - extends_documentation_fragment: - - constructed - description: - - Reads inventories from the GitLab API. - - Uses a YAML configuration file gitlab_runners.[yml|yaml]. - options: - plugin: - description: The name of this plugin, it should always be set to 'gitlab_runners' for this plugin to recognize it as it's own. - type: str - required: true - choices: - - gitlab_runners - - community.general.gitlab_runners - server_url: - description: The URL of the GitLab server, with protocol (i.e. http or https). - env: - - name: GITLAB_SERVER_URL - version_added: 1.0.0 - type: str - required: true - default: https://gitlab.com - api_token: - description: GitLab token for logging in. - env: - - name: GITLAB_API_TOKEN - version_added: 1.0.0 - type: str - aliases: - - private_token - - access_token - filter: - description: filter runners from GitLab API - env: - - name: GITLAB_FILTER - version_added: 1.0.0 - type: str - choices: ['active', 'paused', 'online', 'specific', 'shared'] - verbose_output: - description: Toggle to (not) include all available nodes metadata - type: bool - default: yes -''' +DOCUMENTATION = r""" +name: gitlab_runners +author: + - Stefan Heitmüller (@morph027) +short_description: Ansible dynamic inventory plugin for GitLab runners +requirements: + - python-gitlab > 1.8.0 +extends_documentation_fragment: + - constructed +description: + - Reads inventories from the GitLab API. + - Uses a YAML configuration file gitlab_runners.[yml|yaml]. +options: + plugin: + description: The name of this plugin, it should always be set to V(gitlab_runners) for this plugin to recognize it as its own. + type: str + required: true + choices: + - gitlab_runners + - community.general.gitlab_runners + server_url: + description: The URL of the GitLab server, with protocol (i.e. http or https). + env: + - name: GITLAB_SERVER_URL + version_added: 1.0.0 + type: str + required: true + api_token: + description: GitLab token for logging in. + env: + - name: GITLAB_API_TOKEN + version_added: 1.0.0 + type: str + aliases: + - private_token + - access_token + filter: + description: Filter runners from GitLab API. + env: + - name: GITLAB_FILTER + version_added: 1.0.0 + type: str + choices: ['active', 'paused', 'online', 'specific', 'shared'] + verbose_output: + description: Toggle to (not) include all available nodes metadata. + type: bool + default: true +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # gitlab_runners.yml plugin: community.general.gitlab_runners host: https://gitlab.com +--- # Example using constructed features to create groups and set ansible_host plugin: community.general.gitlab_runners host: https://gitlab.com -strict: False +strict: false keyed_groups: # add e.g. amd64 hosts to an arch_amd64 group - prefix: arch @@ -79,12 +78,13 @@ keyed_groups: # hint: labels containing special characters will be converted to safe names - key: 'tag_list' prefix: tag -''' +""" from ansible.errors import AnsibleError, AnsibleParserError -from ansible.module_utils.common.text.converters import to_native from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + try: import gitlab HAS_GITLAB = True @@ -106,11 +106,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable): else: runners = gl.runners.all() for runner in runners: - host = str(runner['id']) + host = make_unsafe(str(runner['id'])) ip_address = runner['ip_address'] - host_attrs = vars(gl.runners.get(runner['id']))['_attrs'] + host_attrs = make_unsafe(vars(gl.runners.get(runner['id']))['_attrs']) self.inventory.add_host(host, group='gitlab_runners') - self.inventory.set_variable(host, 'ansible_host', ip_address) + self.inventory.set_variable(host, 'ansible_host', make_unsafe(ip_address)) if self.get_option('verbose_output', True): self.inventory.set_variable(host, 'gitlab_runner_attributes', host_attrs) @@ -123,7 +123,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): # Create groups based on variable values and add the corresponding hosts to it self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_attrs, host, strict=strict) except Exception as e: - raise AnsibleParserError('Unable to fetch hosts from GitLab API, this was the original exception: %s' % to_native(e)) + raise AnsibleParserError(f'Unable to fetch hosts from GitLab API, this was the original exception: {e}') def verify_file(self, path): """Return the possibly of a file being consumable by this plugin.""" diff --git a/plugins/inventory/icinga2.py b/plugins/inventory/icinga2.py index 8a50ecd178..017959f403 100644 --- a/plugins/inventory/icinga2.py +++ b/plugins/inventory/icinga2.py @@ -1,65 +1,107 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Cliff Hults # Copyright (c) 2021 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' - name: icinga2 - short_description: Icinga2 inventory source - version_added: 3.7.0 - author: - - Cliff Hults (@BongoEADGC6) +DOCUMENTATION = r""" +name: icinga2 +short_description: Icinga2 inventory source +version_added: 3.7.0 +author: + - Cliff Hults (@BongoEADGC6) +description: + - Get inventory hosts from the Icinga2 API. + - Uses a configuration file as an inventory source, it must end in C(.icinga2.yml) or C(.icinga2.yaml). +extends_documentation_fragment: + - constructed +options: + strict: + version_added: 4.4.0 + compose: + version_added: 4.4.0 + groups: + version_added: 4.4.0 + keyed_groups: + version_added: 4.4.0 + plugin: + description: Name of the plugin. + required: true + type: string + choices: ['community.general.icinga2'] + url: + description: Root URL of Icinga2 API. + type: string + required: true + user: + description: Username to query the API. + type: string + required: true + password: + description: Password to query the API. + type: string + required: true + host_filter: description: - - Get inventory hosts from the Icinga2 API. - - "Uses a configuration file as an inventory source, it must end in - C(.icinga2.yml) or C(.icinga2.yaml)." - options: - plugin: - description: Name of the plugin. - required: true - type: string - choices: ['community.general.icinga2'] - url: - description: Root URL of Icinga2 API. - type: string - required: true - user: - description: Username to query the API. - type: string - required: true - password: - description: Password to query the API. - type: string - required: true - host_filter: - description: An Icinga2 API valid host filter. - type: string - required: false - validate_certs: - description: Enables or disables SSL certificate verification. - type: boolean - default: true -''' + - An Icinga2 API valid host filter. Leave blank for no filtering. + type: string + required: false + validate_certs: + description: Enables or disables SSL certificate verification. + type: boolean + default: true + inventory_attr: + description: + - Allows the override of the inventory name based on different attributes. + - This allows for changing the way limits are used. + - The current default, V(address), is sometimes not unique or present. We recommend to use V(name) instead. + type: string + default: address + choices: ['name', 'display_name', 'address'] + version_added: 4.2.0 + group_by_hostgroups: + description: + - Uses Icinga2 hostgroups as groups. + type: boolean + default: true + version_added: 8.4.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" # my.icinga2.yml plugin: community.general.icinga2 url: http://localhost:5665 user: ansible password: secure host_filter: \"linux-servers\" in host.groups -validate_certs: false -''' +validate_certs: false # only do this when connecting to localhost! +inventory_attr: name +groups: + # simple name matching + webservers: inventory_hostname.startswith('web') + + # using icinga2 template + databaseservers: "'db-template' in (icinga2_attributes.templates|list)" + +compose: + # set all icinga2 attributes to a host variable 'icinga2_attrs' + icinga2_attrs: icinga2_attributes + + # set 'ansible_user' and 'ansible_port' from icinga2 host vars + ansible_user: icinga2_attributes.vars.ansible_user + ansible_port: icinga2_attributes.vars.ansible_port | default(22) +""" import json +from urllib.error import HTTPError from ansible.errors import AnsibleParserError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible.module_utils.urls import open_url +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + class InventoryModule(BaseInventoryPlugin, Constructable): ''' Host inventory parser for ansible using Icinga2 as source. ''' @@ -76,6 +118,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable): self.icinga2_password = None self.ssl_verify = None self.host_filter = None + self.inventory_attr = None + self.group_by_hostgroups = None self.cache_key = None self.use_cache = None @@ -94,7 +138,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): 'User-Agent': "ansible-icinga2-inv", 'Accept': "application/json", } - api_status_url = self.icinga2_url + "/status" + api_status_url = f"{self.icinga2_url}/status" request_args = { 'headers': self.headers, 'url_username': self.icinga2_user, @@ -104,7 +148,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): open_url(api_status_url, **request_args) def _post_request(self, request_url, data=None): - self.display.vvv("Requested URL: %s" % request_url) + self.display.vvv(f"Requested URL: {request_url}") request_args = { 'headers': self.headers, 'url_username': self.icinga2_user, @@ -113,30 +157,38 @@ class InventoryModule(BaseInventoryPlugin, Constructable): } if data is not None: request_args['data'] = json.dumps(data) - self.display.vvv("Request Args: %s" % request_args) - response = open_url(request_url, **request_args) + self.display.vvv(f"Request Args: {request_args}") + try: + response = open_url(request_url, **request_args) + except HTTPError as e: + try: + error_body = json.loads(e.read().decode()) + self.display.vvv(f"Error returned: {error_body}") + except Exception: + error_body = {"status": None} + if e.code == 404 and error_body.get('status') == "No objects found.": + raise AnsibleParserError("Host filter returned no data. Please confirm your host_filter value is valid") + raise AnsibleParserError(f"Unexpected data returned: {e} -- {error_body}") + response_body = response.read() json_data = json.loads(response_body.decode('utf-8')) + self.display.vvv(f"Returned Data: {json.dumps(json_data, indent=4, sort_keys=True)}") if 200 <= response.status <= 299: return json_data if response.status == 404 and json_data['status'] == "No objects found.": raise AnsibleParserError( - "API returned no data -- Response: %s - %s" - % (response.status, json_data['status'])) + f"API returned no data -- Response: {response.status} - {json_data['status']}") if response.status == 401: raise AnsibleParserError( - "API was unable to complete query -- Response: %s - %s" - % (response.status, json_data['status'])) + f"API was unable to complete query -- Response: {response.status} - {json_data['status']}") if response.status == 500: raise AnsibleParserError( - "API Response - %s - %s" - % (json_data['status'], json_data['errors'])) + f"API Response - {json_data['status']} - {json_data['errors']}") raise AnsibleParserError( - "Unexpected data returned - %s - %s" - % (json_data['status'], json_data['errors'])) + f"Unexpected data returned - {json_data['status']} - {json_data['errors']}") def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None): - query_hosts_url = "{0}/objects/hosts".format(self.icinga2_url) + query_hosts_url = f"{self.icinga2_url}/objects/hosts" self.headers['X-HTTP-Method-Override'] = 'GET' data_dict = dict() if hosts: @@ -155,7 +207,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): """Query for all hosts """ self.display.vvv("Querying Icinga2 for inventory") query_args = { - "attrs": ["address", "state_type", "state", "groups"], + "attrs": ["address", "address6", "name", "display_name", "state_type", "state", "templates", "groups", "vars", "zone"], } if self.host_filter is not None: query_args['host_filter'] = self.host_filter @@ -165,6 +217,12 @@ class InventoryModule(BaseInventoryPlugin, Constructable): ansible_inv = self._convert_inv(results_json) return ansible_inv + def _apply_constructable(self, name, variables): + strict = self.get_option('strict') + self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict) + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict) + self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict) + def _populate(self): groups = self._to_json(self.get_inventory_from_icinga()) return groups @@ -177,25 +235,41 @@ class InventoryModule(BaseInventoryPlugin, Constructable): """Convert Icinga2 API data to JSON format for Ansible""" groups_dict = {"_meta": {"hostvars": {}}} for entry in json_data: - host_name = entry['name'] - host_attrs = entry['attrs'] + host_attrs = make_unsafe(entry['attrs']) + if self.inventory_attr == "name": + host_name = make_unsafe(entry.get('name')) + if self.inventory_attr == "address": + # When looking for address for inventory, if missing fallback to object name + if host_attrs.get('address', '') != '': + host_name = make_unsafe(host_attrs.get('address')) + else: + host_name = make_unsafe(entry.get('name')) + if self.inventory_attr == "display_name": + host_name = host_attrs.get('display_name') if host_attrs['state'] == 0: host_attrs['state'] = 'on' else: host_attrs['state'] = 'off' - host_groups = host_attrs['groups'] - host_addr = host_attrs['address'] - self.inventory.add_host(host_addr) - for group in host_groups: - if group not in self.inventory.groups.keys(): - self.inventory.add_group(group) - self.inventory.add_child(group, host_addr) - self.inventory.set_variable(host_addr, 'address', host_addr) - self.inventory.set_variable(host_addr, 'hostname', host_name) - self.inventory.set_variable(host_addr, 'state', + self.inventory.add_host(host_name) + if self.group_by_hostgroups: + host_groups = host_attrs.get('groups') + for group in host_groups: + if group not in self.inventory.groups.keys(): + self.inventory.add_group(group) + self.inventory.add_child(group, host_name) + # If the address attribute is populated, override ansible_host with the value + if host_attrs.get('address') != '': + self.inventory.set_variable(host_name, 'ansible_host', host_attrs.get('address')) + self.inventory.set_variable(host_name, 'hostname', make_unsafe(entry.get('name'))) + self.inventory.set_variable(host_name, 'display_name', host_attrs.get('display_name')) + self.inventory.set_variable(host_name, 'state', host_attrs['state']) - self.inventory.set_variable(host_addr, 'state_type', + self.inventory.set_variable(host_name, 'state_type', host_attrs['state_type']) + # Adds all attributes to a variable 'icinga2_attributes' + construct_vars = dict(self.inventory.get_host(host_name).get_vars()) + construct_vars['icinga2_attributes'] = host_attrs + self._apply_constructable(host_name, construct_vars) return groups_dict def parse(self, inventory, loader, path, cache=True): @@ -206,11 +280,23 @@ class InventoryModule(BaseInventoryPlugin, Constructable): self._read_config_data(path) # Store the options from the YAML file - self.icinga2_url = self.get_option('url').rstrip('/') + '/v1' + self.icinga2_url = self.get_option('url') self.icinga2_user = self.get_option('user') self.icinga2_password = self.get_option('password') self.ssl_verify = self.get_option('validate_certs') self.host_filter = self.get_option('host_filter') + self.inventory_attr = self.get_option('inventory_attr') + self.group_by_hostgroups = self.get_option('group_by_hostgroups') + + if self.templar.is_template(self.icinga2_url): + self.icinga2_url = self.templar.template(variable=self.icinga2_url) + if self.templar.is_template(self.icinga2_user): + self.icinga2_user = self.templar.template(variable=self.icinga2_user) + if self.templar.is_template(self.icinga2_password): + self.icinga2_password = self.templar.template(variable=self.icinga2_password) + + self.icinga2_url = f"{self.icinga2_url.rstrip('/')}/v1" + # Not currently enabled # self.cache_key = self.get_cache_key(path) # self.use_cache = cache and self.get_option('cache') diff --git a/plugins/inventory/iocage.py b/plugins/inventory/iocage.py new file mode 100644 index 0000000000..9d4cef4a03 --- /dev/null +++ b/plugins/inventory/iocage.py @@ -0,0 +1,418 @@ + +# Copyright (c) 2024 Vladimir Botka +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: iocage +short_description: C(iocage) inventory source +version_added: 10.2.0 +author: + - Vladimir Botka (@vbotka) +requirements: + - iocage >= 1.8 +description: + - Get inventory hosts from the C(iocage) jail manager running on O(host). + - By default, O(host) is V(localhost). If O(host) is not V(localhost) it is expected that the user running Ansible on the + controller can connect to the O(host) account O(user) with SSH non-interactively and execute the command C(iocage list). + - Uses a configuration file as an inventory source, it must end in C(.iocage.yml) or C(.iocage.yaml). +extends_documentation_fragment: + - ansible.builtin.constructed + - ansible.builtin.inventory_cache +options: + plugin: + description: + - The name of this plugin, it should always be set to V(community.general.iocage) for this plugin to recognize it as + its own. + required: true + choices: ['community.general.iocage'] + type: str + host: + description: The IP/hostname of the C(iocage) host. + type: str + default: localhost + user: + description: + - C(iocage) user. It is expected that the O(user) is able to connect to the O(host) with SSH and execute the command + C(iocage list). This option is not required if O(host=localhost). + type: str + sudo: + description: + - Enable execution as root. + - This requires passwordless sudo of the command C(iocage list*). + type: bool + default: false + version_added: 10.3.0 + sudo_preserve_env: + description: + - Preserve environment if O(sudo) is enabled. + - This requires C(SETENV) sudoers tag. + type: bool + default: false + version_added: 10.3.0 + get_properties: + description: + - Get jails' properties. Creates dictionary C(iocage_properties) for each added host. + type: bool + default: false + env: + description: + - O(user)'s environment on O(host). + - Enable O(sudo_preserve_env) if O(sudo) is enabled. + type: dict + default: {} + hooks_results: + description: + - List of paths to the files in a jail. + - Content of the files is stored in the items of the list C(iocage_hooks). + - If a file is not available the item keeps the dash character C(-). + - The variable C(iocage_hooks) is not created if O(hooks_results) is empty. + type: list + elements: path + version_added: 10.4.0 + inventory_hostname_tag: + description: + - The name of the tag in the C(iocage properties notes) that contains the jails alias. + - By default, the C(iocage list -l) column C(NAME) is used to name the jail. + - This option requires the notes format C("t1=v1 t2=v2 ..."). + - The option O(get_properties) must be enabled. + type: str + version_added: 11.0.0 + inventory_hostname_required: + description: + - If enabled, the tag declared in O(inventory_hostname_tag) is required. + type: bool + default: false + version_added: 11.0.0 +notes: + - You might want to test the command C(ssh user@host iocage list -l) on the controller before using this inventory plugin + with O(user) specified and with O(host) other than V(localhost). + - If you run this inventory plugin on V(localhost) C(ssh) is not used. In this case, test the command C(iocage list -l). + - This inventory plugin creates variables C(iocage_*) for each added host. + - The values of these variables are collected from the output of the command C(iocage list -l). + - The names of these variables correspond to the output columns. + - The column C(NAME) is used to name the added host. + - The option O(hooks_results) expects the C(poolname) of a jail is mounted to C(/poolname). For example, if you activate + the pool C(iocage) this plugin expects to find the O(hooks_results) items in the path C(/iocage/iocage/jails//root). + If you mount the C(poolname) to a different path the easiest remedy is to create a symlink. +""" + +EXAMPLES = r""" +--- +# file name must end with iocage.yaml or iocage.yml +plugin: community.general.iocage +host: 10.1.0.73 +user: admin + +--- +# user is not required if iocage is running on localhost (default) +plugin: community.general.iocage + +--- +# run cryptography without legacy algorithms +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 + +--- +# execute as root +# sudoers example 'admin ALL=(ALL) NOPASSWD:SETENV: /usr/local/bin/iocage list*' +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +sudo: true +sudo_preserve_env: true +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 + +--- +# enable cache +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 +cache: true + +--- +# see inventory plugin ansible.builtin.constructed +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 +cache: true +strict: false +compose: + ansible_host: iocage_ip4 + release: iocage_release | split('-') | first +groups: + test: inventory_hostname.startswith('test') +keyed_groups: + - prefix: distro + key: iocage_release + - prefix: state + key: iocage_state + +--- +# Read the file /var/db/dhclient-hook.address.epair0b in the jails and use it as ansible_host +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +hooks_results: + - /var/db/dhclient-hook.address.epair0b +compose: + ansible_host: iocage_hooks.0 +groups: + test: inventory_hostname.startswith('test') +""" + +import re +import os +from subprocess import Popen, PIPE + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable +from ansible.utils.display import Display + +display = Display() + + +def _parse_ip4(ip4): + ''' Return dictionary iocage_ip4_dict. default = {ip4: [], msg: ''}. + If item matches ifc|IP or ifc|CIDR parse ifc, ip, and mask. + Otherwise, append item to msg. + ''' + + iocage_ip4_dict = {} + iocage_ip4_dict['ip4'] = [] + iocage_ip4_dict['msg'] = '' + + items = ip4.split(',') + for item in items: + if re.match('^\\w+\\|(?:\\d{1,3}\\.){3}\\d{1,3}.*$', item): + i = re.split('\\||/', item) + if len(i) == 3: + iocage_ip4_dict['ip4'].append({'ifc': i[0], 'ip': i[1], 'mask': i[2]}) + else: + iocage_ip4_dict['ip4'].append({'ifc': i[0], 'ip': i[1], 'mask': '-'}) + else: + iocage_ip4_dict['msg'] += item + + return iocage_ip4_dict + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + ''' Host inventory parser for ansible using iocage as source. ''' + + NAME = 'community.general.iocage' + IOCAGE = '/usr/local/bin/iocage' + + def __init__(self): + super(InventoryModule, self).__init__() + + def verify_file(self, path): + valid = False + if super(InventoryModule, self).verify_file(path): + if path.endswith(('iocage.yaml', 'iocage.yml')): + valid = True + else: + self.display.vvv('Skipping due to inventory source not ending in "iocage.yaml" nor "iocage.yml"') + return valid + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + self._read_config_data(path) + cache_key = self.get_cache_key(path) + + user_cache_setting = self.get_option('cache') + attempt_to_read_cache = user_cache_setting and cache + cache_needs_update = user_cache_setting and not cache + + if attempt_to_read_cache: + try: + results = self._cache[cache_key] + except KeyError: + cache_needs_update = True + if not attempt_to_read_cache or cache_needs_update: + results = self.get_inventory(path) + if cache_needs_update: + self._cache[cache_key] = results + + self.populate(results) + + def get_inventory(self, path): + host = self.get_option('host') + sudo = self.get_option('sudo') + sudo_preserve_env = self.get_option('sudo_preserve_env') + env = self.get_option('env') + get_properties = self.get_option('get_properties') + hooks_results = self.get_option('hooks_results') + inventory_hostname_tag = self.get_option('inventory_hostname_tag') + inventory_hostname_required = self.get_option('inventory_hostname_required') + + cmd = [] + my_env = os.environ.copy() + if host == 'localhost': + my_env.update({str(k): str(v) for k, v in env.items()}) + else: + user = self.get_option('user') + cmd.append("ssh") + cmd.append(f"{user}@{host}") + cmd.extend([f"{k}={v}" for k, v in env.items()]) + + cmd_list = cmd.copy() + if sudo: + cmd_list.append('sudo') + if sudo_preserve_env: + cmd_list.append('--preserve-env') + cmd_list.append(self.IOCAGE) + cmd_list.append('list') + cmd_list.append('--long') + try: + p = Popen(cmd_list, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError(f'Failed to run cmd={cmd_list}, rc={p.returncode}, stderr={to_native(stderr)}') + + try: + t_stdout = to_text(stdout, errors='surrogate_or_strict') + except UnicodeError as e: + raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + + except Exception as e: + raise AnsibleParserError(f'Failed to parse {to_native(path)}: {e}') from e + + results = {'_meta': {'hostvars': {}}} + self.get_jails(t_stdout, results) + + if get_properties: + for hostname, host_vars in results['_meta']['hostvars'].items(): + cmd_get_properties = cmd.copy() + cmd_get_properties.append(self.IOCAGE) + cmd_get_properties.append("get") + cmd_get_properties.append("--all") + cmd_get_properties.append(f"{hostname}") + try: + p = Popen(cmd_get_properties, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError( + f'Failed to run cmd={cmd_get_properties}, rc={p.returncode}, stderr={to_native(stderr)}') + + try: + t_stdout = to_text(stdout, errors='surrogate_or_strict') + except UnicodeError as e: + raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + + except Exception as e: + raise AnsibleError(f'Failed to get properties: {e}') from e + + self.get_properties(t_stdout, results, hostname) + + if hooks_results: + cmd_get_pool = cmd.copy() + cmd_get_pool.append(self.IOCAGE) + cmd_get_pool.append('get') + cmd_get_pool.append('--pool') + try: + p = Popen(cmd_get_pool, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError( + f'Failed to run cmd={cmd_get_pool}, rc={p.returncode}, stderr={to_native(stderr)}') + try: + iocage_pool = to_text(stdout, errors='surrogate_or_strict').strip() + except UnicodeError as e: + raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + except Exception as e: + raise AnsibleError(f'Failed to get pool: {e}') from e + + for hostname, host_vars in results['_meta']['hostvars'].items(): + iocage_hooks = [] + for hook in hooks_results: + path = f"/{iocage_pool}/iocage/jails/{hostname}/root{hook}" + cmd_cat_hook = cmd.copy() + cmd_cat_hook.append('cat') + cmd_cat_hook.append(path) + try: + p = Popen(cmd_cat_hook, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + iocage_hooks.append('-') + continue + + try: + iocage_hook = to_text(stdout, errors='surrogate_or_strict').strip() + except UnicodeError as e: + raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + + except Exception: + iocage_hooks.append('-') + else: + iocage_hooks.append(iocage_hook) + + results['_meta']['hostvars'][hostname]['iocage_hooks'] = iocage_hooks + + # Optionally, get the jails names from the properties notes. + # Requires the notes format "t1=v1 t2=v2 ..." + if inventory_hostname_tag: + if not get_properties: + raise AnsibleError('Jail properties are needed to use inventory_hostname_tag. Enable get_properties') + update = {} + for hostname, host_vars in results['_meta']['hostvars'].items(): + tags = dict(tag.split('=', 1) for tag in host_vars['iocage_properties']['notes'].split() if '=' in tag) + if inventory_hostname_tag in tags: + update[hostname] = tags[inventory_hostname_tag] + elif inventory_hostname_required: + raise AnsibleError(f'Mandatory tag {inventory_hostname_tag!r} is missing in the properties notes.') + for hostname, alias in update.items(): + results['_meta']['hostvars'][alias] = results['_meta']['hostvars'].pop(hostname) + + return results + + def get_jails(self, t_stdout, results): + lines = t_stdout.splitlines() + if len(lines) < 5: + return + indices = [i for i, val in enumerate(lines[1]) if val == '|'] + for line in lines[3::2]: + jail = [line[i + 1:j].strip() for i, j in zip(indices[:-1], indices[1:])] + iocage_name = jail[1] + iocage_ip4_dict = _parse_ip4(jail[6]) + if iocage_ip4_dict['ip4']: + iocage_ip4 = ','.join([d['ip'] for d in iocage_ip4_dict['ip4']]) + else: + iocage_ip4 = '-' + results['_meta']['hostvars'][iocage_name] = {} + results['_meta']['hostvars'][iocage_name]['iocage_jid'] = jail[0] + results['_meta']['hostvars'][iocage_name]['iocage_boot'] = jail[2] + results['_meta']['hostvars'][iocage_name]['iocage_state'] = jail[3] + results['_meta']['hostvars'][iocage_name]['iocage_type'] = jail[4] + results['_meta']['hostvars'][iocage_name]['iocage_release'] = jail[5] + results['_meta']['hostvars'][iocage_name]['iocage_ip4_dict'] = iocage_ip4_dict + results['_meta']['hostvars'][iocage_name]['iocage_ip4'] = iocage_ip4 + results['_meta']['hostvars'][iocage_name]['iocage_ip6'] = jail[7] + results['_meta']['hostvars'][iocage_name]['iocage_template'] = jail[8] + results['_meta']['hostvars'][iocage_name]['iocage_basejail'] = jail[9] + + def get_properties(self, t_stdout, results, hostname): + properties = dict(x.split(':', 1) for x in t_stdout.splitlines()) + results['_meta']['hostvars'][hostname]['iocage_properties'] = properties + + def populate(self, results): + strict = self.get_option('strict') + + for hostname, host_vars in results['_meta']['hostvars'].items(): + self.inventory.add_host(hostname, group='all') + for var, value in host_vars.items(): + self.inventory.set_variable(hostname, var, value) + self._set_composite_vars(self.get_option('compose'), host_vars, hostname, strict=True) + self._add_host_to_composed_groups(self.get_option('groups'), host_vars, hostname, strict=strict) + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_vars, hostname, strict=strict) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 4bbd79a303..fc039b03b5 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -1,71 +1,93 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: linode - author: - - Luke Murphy (@decentral1se) - short_description: Ansible dynamic inventory plugin for Linode. - requirements: - - python >= 2.7 - - linode_api4 >= 2.0.0 - description: - - Reads inventories from the Linode API v4. - - Uses a YAML configuration file that ends with linode.(yml|yaml). - - Linode labels are used by default as the hostnames. - - The default inventory groups are built from groups (deprecated by - Linode) and not tags. - extends_documentation_fragment: - - constructed - options: - plugin: - description: Marks this as an instance of the 'linode' plugin. - required: true - choices: ['linode', 'community.general.linode'] - ip_style: - description: Populate hostvars with all information available from the Linode APIv4. - type: string - default: plain - choices: - - plain - - api - version_added: 3.6.0 - access_token: - description: The Linode account personal access token. - required: true - env: - - name: LINODE_ACCESS_TOKEN - regions: - description: Populate inventory with instances in this region. - default: [] - type: list - tags: - description: Populate inventory only with instances which have at least one of the tags listed here. - default: [] - type: list - version_added: 2.0.0 - types: - description: Populate inventory with instances with this type. - default: [] - type: list - strict: - version_added: 2.0.0 - compose: - version_added: 2.0.0 - groups: - version_added: 2.0.0 - keyed_groups: - version_added: 2.0.0 -''' +DOCUMENTATION = r""" +name: linode +author: + - Luke Murphy (@decentral1se) +short_description: Ansible dynamic inventory plugin for Linode +requirements: + - linode_api4 >= 2.0.0 +description: + - Reads inventories from the Linode API v4. + - Uses a YAML configuration file that ends with linode.(yml|yaml). + - Linode labels are used by default as the hostnames. + - The default inventory groups are built from groups (deprecated by Linode) and not tags. +extends_documentation_fragment: + - constructed + - inventory_cache +options: + cache: + version_added: 4.5.0 + cache_plugin: + version_added: 4.5.0 + cache_timeout: + version_added: 4.5.0 + cache_connection: + version_added: 4.5.0 + cache_prefix: + version_added: 4.5.0 + plugin: + description: Marks this as an instance of the 'linode' plugin. + type: string + required: true + choices: ['linode', 'community.general.linode'] + ip_style: + description: Populate hostvars with all information available from the Linode APIv4. + type: string + default: plain + choices: + - plain + - api + version_added: 3.6.0 + access_token: + description: The Linode account personal access token. + type: string + required: true + env: + - name: LINODE_ACCESS_TOKEN + regions: + description: Populate inventory with instances in this region. + default: [] + type: list + elements: string + tags: + description: Populate inventory only with instances which have at least one of the tags listed here. + default: [] + type: list + elements: string + version_added: 2.0.0 + types: + description: Populate inventory with instances with this type. + default: [] + type: list + elements: string + strict: + version_added: 2.0.0 + compose: + version_added: 2.0.0 + groups: + version_added: 2.0.0 + keyed_groups: + version_added: 2.0.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" +--- # Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment. plugin: community.general.linode +--- +# You can use Jinja to template the access token. +plugin: community.general.linode +access_token: "{{ lookup('ini', 'token', section='your_username', file='~/.config/linode-cli') }}" +# For older Ansible versions, you need to write this as: +# access_token: "{{ lookup('ini', 'token section=your_username file=~/.config/linode-cli') }}" + +--- # Example with regions, types, groups and access token plugin: community.general.linode access_token: foobar @@ -74,6 +96,7 @@ regions: types: - g5-standard-2 +--- # Example with keyed_groups, groups, and compose plugin: community.general.linode access_token: foobar @@ -92,48 +115,45 @@ compose: ansible_ssh_host: ipv4[0] ansible_port: 2222 +--- # Example where control traffic limited to internal network plugin: community.general.linode access_token: foobar ip_style: api compose: ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first" -''' +""" -import os +from ansible.errors import AnsibleError +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable -from ansible.errors import AnsibleError, AnsibleParserError -from ansible.module_utils.six import string_types -from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe try: from linode_api4 import LinodeClient + from linode_api4.objects.linode import Instance from linode_api4.errors import ApiError as LinodeApiError HAS_LINODE = True except ImportError: HAS_LINODE = False -class InventoryModule(BaseInventoryPlugin, Constructable): +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): NAME = 'community.general.linode' - def _build_client(self): + def _build_client(self, loader): """Build the Linode client.""" access_token = self.get_option('access_token') - - if access_token is None: - try: - access_token = os.environ['LINODE_ACCESS_TOKEN'] - except KeyError: - pass + if self.templar.is_template(access_token): + access_token = self.templar.template(variable=access_token) if access_token is None: raise AnsibleError(( 'Could not retrieve Linode access token ' - 'from plugin configuration or environment' + 'from plugin configuration sources' )) self.client = LinodeClient(access_token) @@ -143,35 +163,32 @@ class InventoryModule(BaseInventoryPlugin, Constructable): try: self.instances = self.client.linode.instances() except LinodeApiError as exception: - raise AnsibleError('Linode client raised: %s' % exception) + raise AnsibleError(f'Linode client raised: {exception}') def _add_groups(self): """Add Linode instance groups to the dynamic inventory.""" - self.linode_groups = set( - filter(None, [ - instance.group - for instance - in self.instances - ]) - ) + self.linode_groups = {instance.group for instance in self.instances if instance.group} for linode_group in self.linode_groups: self.inventory.add_group(linode_group) - def _filter_by_config(self, regions, types, tags): + def _filter_by_config(self): """Filter instances by user specified configuration.""" + regions = self.get_option('regions') if regions: self.instances = [ instance for instance in self.instances if instance.region.id in regions ] + types = self.get_option('types') if types: self.instances = [ instance for instance in self.instances if instance.type.id in types ] + tags = self.get_option('tags') if tags: self.instances = [ instance for instance in self.instances @@ -181,20 +198,21 @@ class InventoryModule(BaseInventoryPlugin, Constructable): def _add_instances_to_groups(self): """Add instance names to their dynamic inventory groups.""" for instance in self.instances: - self.inventory.add_host(instance.label, group=instance.group) + self.inventory.add_host(make_unsafe(instance.label), group=instance.group) def _add_hostvars_for_instances(self): """Add hostvars for instances in the dynamic inventory.""" ip_style = self.get_option('ip_style') for instance in self.instances: hostvars = instance._raw_json + hostname = make_unsafe(instance.label) for hostvar_key in hostvars: if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']: continue self.inventory.set_variable( - instance.label, + hostname, hostvar_key, - hostvars[hostvar_key] + make_unsafe(hostvars[hostvar_key]) ) if ip_style == 'api': ips = instance.ips.ipv4.public + instance.ips.ipv4.private @@ -203,9 +221,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable): for ip_type in set(ip.type for ip in ips): self.inventory.set_variable( - instance.label, + hostname, ip_type, - self._ip_data([ip for ip in ips if ip.type == ip_type]) + make_unsafe(self._ip_data([ip for ip in ips if ip.type == ip_type])) ) def _ip_data(self, ip_list): @@ -224,94 +242,86 @@ class InventoryModule(BaseInventoryPlugin, Constructable): ) return data - def _validate_option(self, name, desired_type, option_value): - """Validate user specified configuration data against types.""" - if isinstance(option_value, string_types) and desired_type == list: - option_value = [option_value] - - if option_value is None: - option_value = desired_type() - - if not isinstance(option_value, desired_type): - raise AnsibleParserError( - 'The option %s (%s) must be a %s' % ( - name, option_value, desired_type - ) - ) - - return option_value - - def _get_query_options(self, config_data): - """Get user specified query options from the configuration.""" - options = { - 'regions': { - 'type_to_be': list, - 'value': config_data.get('regions', []) - }, - 'types': { - 'type_to_be': list, - 'value': config_data.get('types', []) - }, - 'tags': { - 'type_to_be': list, - 'value': config_data.get('tags', []) - }, - } - - for name in options: - options[name]['value'] = self._validate_option( - name, - options[name]['type_to_be'], - options[name]['value'] - ) - - regions = options['regions']['value'] - types = options['types']['value'] - tags = options['tags']['value'] - - return regions, types, tags - - def verify_file(self, path): - """Verify the Linode configuration file.""" - if super(InventoryModule, self).verify_file(path): - endings = ('linode.yaml', 'linode.yml') - if any((path.endswith(ending) for ending in endings)): - return True - return False - - def parse(self, inventory, loader, path, cache=True): - """Dynamically parse Linode the cloud inventory.""" - super(InventoryModule, self).parse(inventory, loader, path) - - if not HAS_LINODE: - raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.') - - config_data = self._read_config_data(path) - self._build_client() - - self._get_instances_inventory() + def _cacheable_inventory(self): + return [i._raw_json for i in self.instances] + def populate(self): strict = self.get_option('strict') - regions, types, tags = self._get_query_options(config_data) - self._filter_by_config(regions, types, tags) + + self._filter_by_config() self._add_groups() self._add_instances_to_groups() self._add_hostvars_for_instances() for instance in self.instances: - variables = self.inventory.get_host(instance.label).get_vars() + hostname = make_unsafe(instance.label) + variables = self.inventory.get_host(hostname).get_vars() self._add_host_to_composed_groups( self.get_option('groups'), variables, - instance.label, + hostname, strict=strict) self._add_host_to_keyed_groups( self.get_option('keyed_groups'), variables, - instance.label, + hostname, strict=strict) self._set_composite_vars( self.get_option('compose'), variables, - instance.label, + hostname, strict=strict) + + def verify_file(self, path): + """Verify the Linode configuration file. + + Return true/false if the config-file is valid for this plugin + + Args: + str(path): path to the config + Kwargs: + None + Raises: + None + Returns: + bool(valid): is valid config file""" + valid = False + if super(InventoryModule, self).verify_file(path): + if path.endswith(("linode.yaml", "linode.yml")): + valid = True + else: + self.display.vvv('Inventory source not ending in "linode.yaml" or "linode.yml"') + return valid + + def parse(self, inventory, loader, path, cache=True): + """Dynamically parse Linode the cloud inventory.""" + super(InventoryModule, self).parse(inventory, loader, path) + self.instances = None + + if not HAS_LINODE: + raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.') + + self._read_config_data(path) + + cache_key = self.get_cache_key(path) + + if cache: + cache = self.get_option('cache') + + update_cache = False + if cache: + try: + self.instances = [Instance(None, i["id"], i) for i in self._cache[cache_key]] + except KeyError: + update_cache = True + + # Check for None rather than False in order to allow + # for empty sets of cached instances + if self.instances is None: + self._build_client(loader) + self._get_instances_inventory() + + if update_cache: + self._cache[cache_key] = self._cacheable_inventory() + + self.populate() diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index 59bb8845ff..492d12a21b 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -1,134 +1,183 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Frank Dornheim -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Frank Dornheim +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: lxd - short_description: Returns Ansible inventory from lxd host +DOCUMENTATION = r""" +name: lxd +short_description: Returns Ansible inventory from lxd host +description: + - Get inventory from the lxd. + - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'. +version_added: "3.0.0" +author: "Frank Dornheim (@conloos)" +requirements: + - ipaddress + - lxd >= 4.0 +options: + plugin: + description: Token that ensures this is a source file for the 'lxd' plugin. + type: string + required: true + choices: ['community.general.lxd'] + url: description: - - Get inventory from the lxd. - - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'. - version_added: "3.0.0" - author: "Frank Dornheim (@conloos)" - requirements: - - ipaddress - options: - plugin: - description: Token that ensures this is a source file for the 'lxd' plugin. - required: true - choices: [ 'community.general.lxd' ] - url: - description: - - The unix domain socket path or the https URL for the lxd server. - - Sockets in filesystem have to start with C(unix:). - - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket). - default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str - client_key: - description: - - The client certificate key file path. - aliases: [ key_file ] - default: $HOME/.config/lxc/client.key - type: path - client_cert: - description: - - The client certificate file path. - aliases: [ cert_file ] - default: $HOME/.config/lxc/client.crt - type: path - trust_password: - description: - - The client trusted password. - - You need to set this password on the lxd server before - running this module using the following command - C(lxc config set core.trust_password ) - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/). - - If I(trust_password) is set, this module send a request for authentication before sending any requests. - type: str - state: - description: Filter the container according to the current status. - type: str - default: none - choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ] - prefered_container_network_interface: - description: - - If a container has multiple network interfaces, select which one is the prefered as pattern. - - Combined with the first number that can be found e.g. 'eth' + 0. - type: str - default: eth - prefered_container_network_family: - description: - - If a container has multiple network interfaces, which one is the prefered by family. - - Specify C(inet) for IPv4 and C(inet6) for IPv6. - type: str - default: inet - choices: [ 'inet', 'inet6' ] - groupby: - description: - - Create groups by the following keywords C(location), C(pattern), C(network_range), C(os), C(release), C(profile), C(vlanid). - - See example for syntax. - type: dict -''' + - The unix domain socket path or the https URL for the lxd server. + - Sockets in filesystem have to start with C(unix:). + - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket). + type: string + default: unix:/var/snap/lxd/common/lxd/unix.socket + client_key: + description: + - The client certificate key file path. + aliases: [key_file] + default: $HOME/.config/lxc/client.key + type: path + client_cert: + description: + - The client certificate file path. + aliases: [cert_file] + default: $HOME/.config/lxc/client.crt + type: path + server_cert: + description: + - The server certificate file path. + type: path + version_added: 8.0.0 + server_check_hostname: + description: + - This option controls if the server's hostname is checked as part of the HTTPS connection verification. This can be + useful to disable, if for example, the server certificate provided (see O(server_cert) option) does not cover a name + matching the one used to communicate with the server. Such mismatch is common as LXD generates self-signed server + certificates by default. + type: bool + default: true + version_added: 8.0.0 + trust_password: + description: + - The client trusted password. + - You need to set this password on the lxd server before running this module using the following command C(lxc config + set core.trust_password ) See + U(https://documentation.ubuntu.com/lxd/en/latest/authentication/#adding-client-certificates-using-a-trust-password). + - If O(trust_password) is set, this module send a request for authentication before sending any requests. + type: str + state: + description: Filter the instance according to the current status. + type: str + default: none + choices: ['STOPPED', 'STARTING', 'RUNNING', 'none'] + project: + description: Filter the instance according to the given project. + type: str + default: default + version_added: 6.2.0 + type_filter: + description: + - Filter the instances by type V(virtual-machine), V(container) or V(both). + - The first version of the inventory only supported containers. + type: str + default: container + choices: ['virtual-machine', 'container', 'both'] + version_added: 4.2.0 + prefered_instance_network_interface: + description: + - If an instance has multiple network interfaces, select which one is the preferred as pattern. + - Combined with the first number that can be found, for example C(eth) + C(0). + - The option has been renamed from O(prefered_container_network_interface) to O(prefered_instance_network_interface) + in community.general 3.8.0. The old name still works as an alias. + type: str + default: eth + aliases: + - prefered_container_network_interface + prefered_instance_network_family: + description: + - If an instance has multiple network interfaces, which one is the preferred by family. + - Specify V(inet) for IPv4 and V(inet6) for IPv6. + type: str + default: inet + choices: ['inet', 'inet6'] + groupby: + description: + - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), + C(type), C(vlanid). + - See example for syntax. + type: dict +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # simple lxd.yml plugin: community.general.lxd url: unix:/var/snap/lxd/common/lxd/unix.socket +--- # simple lxd.yml including filter plugin: community.general.lxd url: unix:/var/snap/lxd/common/lxd/unix.socket state: RUNNING +--- +# simple lxd.yml including virtual machines and containers +plugin: community.general.lxd +url: unix:/var/snap/lxd/common/lxd/unix.socket +type_filter: both + # grouping lxd.yml groupby: - testpattern: - type: pattern - attribute: test - vlan666: - type: vlanid - attribute: 666 locationBerlin: type: location attribute: Berlin - osUbuntu: - type: os - attribute: ubuntu - releaseFocal: - type: release - attribute: focal - releaseBionic: - type: release - attribute: bionic - profileDefault: - type: profile - attribute: default - profileX11: - type: profile - attribute: x11 netRangeIPv4: type: network_range attribute: 10.98.143.0/24 netRangeIPv6: type: network_range attribute: fd42:bd00:7b11:2167:216:3eff::/24 -''' + osUbuntu: + type: os + attribute: ubuntu + testpattern: + type: pattern + attribute: test + profileDefault: + type: profile + attribute: default + profileX11: + type: profile + attribute: x11 + releaseFocal: + type: release + attribute: focal + releaseBionic: + type: release + attribute: bionic + typeVM: + type: type + attribute: virtual-machine + typeContainer: + type: type + attribute: container + vlan666: + type: vlanid + attribute: 666 + projectInternals: + type: project + attribute: internals +""" -import binascii import json import re import time import os -import socket +from urllib.parse import urlencode + from ansible.plugins.inventory import BaseInventoryPlugin from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.common.dict_transformations import dict_merge -from ansible.module_utils.six import raise_from from ansible.errors import AnsibleError, AnsibleParserError from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe try: import ipaddress @@ -163,7 +212,7 @@ class InventoryModule(BaseInventoryPlugin): with open(path, 'r') as json_file: return json.load(json_file) except (IOError, json.decoder.JSONDecodeError) as err: - raise AnsibleParserError('Could not load the test data from {0}: {1}'.format(to_native(path), to_native(err))) + raise AnsibleParserError(f'Could not load the test data from {to_native(path)}: {err}') def save_json_data(self, path, file_name=None): """save data as json @@ -193,7 +242,7 @@ class InventoryModule(BaseInventoryPlugin): with open(os.path.abspath(os.path.join(cwd, *path)), 'w') as json_file: json.dump(self.data, json_file) except IOError as err: - raise AnsibleParserError('Could not save data: {0}'.format(to_native(err))) + raise AnsibleParserError(f'Could not save data: {err}') def verify_file(self, path): """Check the config @@ -233,7 +282,7 @@ class InventoryModule(BaseInventoryPlugin): if not isinstance(url, str): return False if not url.startswith(('unix:', 'https:')): - raise AnsibleError('URL is malformed: {0}'.format(to_native(url))) + raise AnsibleError(f'URL is malformed: {url}') return True def _connect_to_socket(self): @@ -254,11 +303,11 @@ class InventoryModule(BaseInventoryPlugin): urls = (url for url in url_list if self.validate_url(url)) for url in urls: try: - socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug) + socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug, self.server_cert, self.server_check_hostname) return socket_connection except LXDClientException as err: error_storage[url] = err - raise AnsibleError('No connection to the socket: {0}'.format(to_native(error_storage))) + raise AnsibleError(f'No connection to the socket: {error_storage}') def _get_networks(self): """Get Networknames @@ -283,10 +332,10 @@ class InventoryModule(BaseInventoryPlugin): network_configs = self.socket.do('GET', '/1.0/networks') return [m.split('/')[3] for m in network_configs['metadata']] - def _get_containers(self): - """Get Containernames + def _get_instances(self): + """Get instancenames - Returns all containernames + Returns all instancenames Args: None @@ -295,47 +344,59 @@ class InventoryModule(BaseInventoryPlugin): Raises: None Returns: - list(names): names of all containers""" - # e.g. {'type': 'sync', - # 'status': 'Success', - # 'status_code': 200, - # 'operation': '', - # 'error_code': 0, - # 'error': '', - # 'metadata': ['/1.0/containers/udemy-ansible-ubuntu-2004']} - containers = self.socket.do('GET', '/1.0/containers') - return [m.split('/')[3] for m in containers['metadata']] + list(names): names of all instances""" + # e.g. { + # "metadata": [ + # "/1.0/instances/foo", + # "/1.0/instances/bar" + # ], + # "status": "Success", + # "status_code": 200, + # "type": "sync" + # } + url = '/1.0/instances' + if self.project: + url = f"{url}?{urlencode(dict(project=self.project))}" + + instances = self.socket.do('GET', url) + + if self.project: + return [m.split('/')[3].split('?')[0] for m in instances['metadata']] + + return [m.split('/')[3] for m in instances['metadata']] def _get_config(self, branch, name): - """Get inventory of container + """Get inventory of instance - Get config of container + Get config of instance Args: str(branch): Name oft the API-Branch - str(name): Name of Container + str(name): Name of instance Kwargs: None Source: - https://github.com/lxc/lxd/blob/master/doc/rest-api.md + https://documentation.ubuntu.com/lxd/en/latest/rest-api/ Raises: None Returns: - dict(config): Config of the container""" + dict(config): Config of the instance""" config = {} if isinstance(branch, (tuple, list)): - config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))} + config[name] = {branch[1]: self.socket.do( + 'GET', f'/1.0/{to_native(branch[0])}/{to_native(name)}/{to_native(branch[1])}?{urlencode(dict(project=self.project))}')} else: - config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))} + config[name] = {branch: self.socket.do( + 'GET', f'/1.0/{to_native(branch)}/{to_native(name)}?{urlencode(dict(project=self.project))}')} return config - def get_container_data(self, names): - """Create Inventory of the container + def get_instance_data(self, names): + """Create Inventory of the instance - Iterate through the different branches of the containers and collect Informations. + Iterate through the different branches of the instances and collect Information. Args: - list(names): List of container names + list(names): List of instance names Kwargs: None Raises: @@ -344,20 +405,20 @@ class InventoryModule(BaseInventoryPlugin): None""" # tuple(('instances','metadata/templates')) to get section in branch # e.g. /1.0/instances//metadata/templates - branches = ['containers', ('instances', 'state')] - container_config = {} + branches = ['instances', ('instances', 'state')] + instance_config = {} for branch in branches: for name in names: - container_config['containers'] = self._get_config(branch, name) - self.data = dict_merge(container_config, self.data) + instance_config['instances'] = self._get_config(branch, name) + self.data = dict_merge(instance_config, self.data) def get_network_data(self, names): - """Create Inventory of the container + """Create Inventory of the instance - Iterate through the different branches of the containers and collect Informations. + Iterate through the different branches of the instances and collect Information. Args: - list(names): List of container names + list(names): List of instance names Kwargs: None Raises: @@ -376,54 +437,54 @@ class InventoryModule(BaseInventoryPlugin): network_config['networks'] = {name: None} self.data = dict_merge(network_config, self.data) - def extract_network_information_from_container_config(self, container_name): + def extract_network_information_from_instance_config(self, instance_name): """Returns the network interface configuration - Returns the network ipv4 and ipv6 config of the container without local-link + Returns the network ipv4 and ipv6 config of the instance without local-link Args: - str(container_name): Name oft he container + str(instance_name): Name oft he instance Kwargs: None Raises: None Returns: dict(network_configuration): network config""" - container_network_interfaces = self._get_data_entry('containers/{0}/state/metadata/network'.format(container_name)) + instance_network_interfaces = self._get_data_entry(f'instances/{instance_name}/state/metadata/network') network_configuration = None - if container_network_interfaces: + if instance_network_interfaces: network_configuration = {} - gen_interface_names = [interface_name for interface_name in container_network_interfaces if interface_name != 'lo'] + gen_interface_names = [interface_name for interface_name in instance_network_interfaces if interface_name != 'lo'] for interface_name in gen_interface_names: - gen_address = [address for address in container_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link'] + gen_address = [address for address in instance_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link'] network_configuration[interface_name] = [] for address in gen_address: address_set = {} address_set['family'] = address.get('family') address_set['address'] = address.get('address') address_set['netmask'] = address.get('netmask') - address_set['combined'] = address.get('address') + '/' + address.get('netmask') + address_set['combined'] = f"{address.get('address')}/{address.get('netmask')}" network_configuration[interface_name].append(address_set) return network_configuration - def get_prefered_container_network_interface(self, container_name): - """Helper to get the prefered interface of thr container + def get_prefered_instance_network_interface(self, instance_name): + """Helper to get the preferred interface of thr instance - Helper to get the prefered interface provide by neme pattern from 'prefered_container_network_interface'. + Helper to get the preferred interface provide by neme pattern from 'prefered_instance_network_interface'. Args: - str(containe_name): name of container + str(instance_name): name of instance Kwargs: None Raises: None Returns: str(prefered_interface): None or interface name""" - container_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name)) + instance_network_interfaces = self._get_data_entry(f'inventory/{instance_name}/network_interfaces') prefered_interface = None # init - if container_network_interfaces: # container have network interfaces + if instance_network_interfaces: # instance have network interfaces # generator if interfaces which start with the desired pattern - net_generator = [interface for interface in container_network_interfaces if interface.startswith(self.prefered_container_network_interface)] + net_generator = [interface for interface in instance_network_interfaces if interface.startswith(self.prefered_instance_network_interface)] selected_interfaces = [] # init for interface in net_generator: selected_interfaces.append(interface) @@ -431,13 +492,13 @@ class InventoryModule(BaseInventoryPlugin): prefered_interface = sorted(selected_interfaces)[0] return prefered_interface - def get_container_vlans(self, container_name): - """Get VLAN(s) from container + def get_instance_vlans(self, instance_name): + """Get VLAN(s) from instance - Helper to get the VLAN_ID from the container + Helper to get the VLAN_ID from the instance Args: - str(containe_name): name of container + str(instance_name): name of instance Kwargs: None Raises: @@ -450,13 +511,13 @@ class InventoryModule(BaseInventoryPlugin): if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)): network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)) - # get networkdevices of container and return + # get networkdevices of instance and return # e.g. # "eth0":{ "name":"eth0", # "network":"lxdbr0", # "type":"nic"}, vlan_ids = {} - devices = self._get_data_entry('containers/{0}/containers/metadata/expanded_devices'.format(to_native(container_name))) + devices = self._get_data_entry(f'instances/{to_native(instance_name)}/instances/metadata/expanded_devices') for device in devices: if 'network' in devices[device]: if devices[device]['network'] in network_vlans: @@ -492,14 +553,14 @@ class InventoryModule(BaseInventoryPlugin): except KeyError: return None - def _set_data_entry(self, container_name, key, value, path=None): + def _set_data_entry(self, instance_name, key, value, path=None): """Helper to save data Helper to save the data in self.data - Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten. + Detect if data is already in branch and use dict_merge() to prevent that branch is overwritten. Args: - str(container_name): name of container + str(instance_name): name of instance str(key): same as dict *(value): same as dict Kwargs: @@ -510,24 +571,24 @@ class InventoryModule(BaseInventoryPlugin): None""" if not path: path = self.data['inventory'] - if container_name not in path: - path[container_name] = {} + if instance_name not in path: + path[instance_name] = {} try: - if isinstance(value, dict) and key in path[container_name]: - path[container_name] = dict_merge(value, path[container_name][key]) + if isinstance(value, dict) and key in path[instance_name]: + path[instance_name] = dict_merge(value, path[instance_name][key]) else: - path[container_name][key] = value + path[instance_name][key] = value except KeyError as err: - raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err))) + raise AnsibleParserError(f"Unable to store Information: {err}") - def extract_information_from_container_configs(self): + def extract_information_from_instance_configs(self): """Process configuration information Preparation of the data Args: - dict(configs): Container configurations + dict(configs): instance configurations Kwargs: None Raises: @@ -538,33 +599,37 @@ class InventoryModule(BaseInventoryPlugin): if 'inventory' not in self.data: self.data['inventory'] = {} - for container_name in self.data['containers']: - self._set_data_entry(container_name, 'os', self._get_data_entry( - 'containers/{0}/containers/metadata/config/image.os'.format(container_name))) - self._set_data_entry(container_name, 'release', self._get_data_entry( - 'containers/{0}/containers/metadata/config/image.release'.format(container_name))) - self._set_data_entry(container_name, 'version', self._get_data_entry( - 'containers/{0}/containers/metadata/config/image.version'.format(container_name))) - self._set_data_entry(container_name, 'profile', self._get_data_entry( - 'containers/{0}/containers/metadata/profiles'.format(container_name))) - self._set_data_entry(container_name, 'location', self._get_data_entry( - 'containers/{0}/containers/metadata/location'.format(container_name))) - self._set_data_entry(container_name, 'state', self._get_data_entry( - 'containers/{0}/containers/metadata/config/volatile.last_state.power'.format(container_name))) - self._set_data_entry(container_name, 'network_interfaces', self.extract_network_information_from_container_config(container_name)) - self._set_data_entry(container_name, 'preferred_interface', self.get_prefered_container_network_interface(container_name)) - self._set_data_entry(container_name, 'vlan_ids', self.get_container_vlans(container_name)) + for instance_name in self.data['instances']: + self._set_data_entry(instance_name, 'os', self._get_data_entry( + f'instances/{instance_name}/instances/metadata/config/image.os')) + self._set_data_entry(instance_name, 'release', self._get_data_entry( + f'instances/{instance_name}/instances/metadata/config/image.release')) + self._set_data_entry(instance_name, 'version', self._get_data_entry( + f'instances/{instance_name}/instances/metadata/config/image.version')) + self._set_data_entry(instance_name, 'profile', self._get_data_entry( + f'instances/{instance_name}/instances/metadata/profiles')) + self._set_data_entry(instance_name, 'location', self._get_data_entry( + f'instances/{instance_name}/instances/metadata/location')) + self._set_data_entry(instance_name, 'state', self._get_data_entry( + f'instances/{instance_name}/instances/metadata/config/volatile.last_state.power')) + self._set_data_entry(instance_name, 'type', self._get_data_entry( + f'instances/{instance_name}/instances/metadata/type')) + self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name)) + self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name)) + self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name)) + self._set_data_entry(instance_name, 'project', self._get_data_entry( + f'instances/{instance_name}/instances/metadata/project')) - def build_inventory_network(self, container_name): - """Add the network interfaces of the container to the inventory + def build_inventory_network(self, instance_name): + """Add the network interfaces of the instance to the inventory Logic: - - if the container have no interface -> 'ansible_connection: local' - - get preferred_interface & prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: ' - - first Interface from: network_interfaces prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: ' + - if the instance have no interface -> 'ansible_connection: local' + - get preferred_interface & prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: ' + - first Interface from: network_interfaces prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: ' Args: - str(container_name): name of container + str(instance_name): name of instance Kwargs: None Raises: @@ -572,45 +637,45 @@ class InventoryModule(BaseInventoryPlugin): Returns: None""" - def interface_selection(container_name): - """Select container Interface for inventory + def interface_selection(instance_name): + """Select instance Interface for inventory Logic: - - get preferred_interface & prefered_container_network_family -> str(IP) - - first Interface from: network_interfaces prefered_container_network_family -> str(IP) + - get preferred_interface & prefered_instance_network_family -> str(IP) + - first Interface from: network_interfaces prefered_instance_network_family -> str(IP) Args: - str(container_name): name of container + str(instance_name): name of instance Kwargs: None Raises: None Returns: dict(interface_name: ip)""" - prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)) # name or None - prefered_container_network_family = self.prefered_container_network_family + prefered_interface = self._get_data_entry(f'inventory/{instance_name}/preferred_interface') # name or None + prefered_instance_network_family = self.prefered_instance_network_family ip_address = '' if prefered_interface: - interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(container_name, prefered_interface)) + interface = self._get_data_entry(f'inventory/{instance_name}/network_interfaces/{prefered_interface}') for config in interface: - if config['family'] == prefered_container_network_family: + if config['family'] == prefered_instance_network_family: ip_address = config['address'] break else: - interface = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name)) - for config in interface: - if config['family'] == prefered_container_network_family: - ip_address = config['address'] - break + interfaces = self._get_data_entry(f'inventory/{instance_name}/network_interfaces') + for interface in interfaces.values(): + for config in interface: + if config['family'] == prefered_instance_network_family: + ip_address = config['address'] + break return ip_address - if self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name)): # container have network interfaces - if self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)): # container have a preferred interface - self.inventory.set_variable(container_name, 'ansible_connection', 'ssh') - self.inventory.set_variable(container_name, 'ansible_host', interface_selection(container_name)) + if self._get_data_entry(f'inventory/{instance_name}/network_interfaces'): # instance have network interfaces + self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh') + self.inventory.set_variable(instance_name, 'ansible_host', make_unsafe(interface_selection(instance_name))) else: - self.inventory.set_variable(container_name, 'ansible_connection', 'local') + self.inventory.set_variable(instance_name, 'ansible_connection', 'local') def build_inventory_hosts(self): """Build host-part dynamic inventory @@ -626,29 +691,47 @@ class InventoryModule(BaseInventoryPlugin): None Returns: None""" - for container_name in self.data['inventory']: - # Only consider containers that match the "state" filter, if self.state is not None + for instance_name in self.data['inventory']: + instance_state = str(self._get_data_entry(f'inventory/{instance_name}/state') or "STOPPED").lower() + + # Only consider instances that match the "state" filter, if self.state is not None if self.filter: - if self.filter.lower() != self._get_data_entry('inventory/{0}/state'.format(container_name)).lower(): + if self.filter.lower() != instance_state: continue - # add container - self.inventory.add_host(container_name) - # add network informations - self.build_inventory_network(container_name) + # add instance + instance_name = make_unsafe(instance_name) + self.inventory.add_host(instance_name) + # add network information + self.build_inventory_network(instance_name) # add os - self.inventory.set_variable(container_name, 'ansible_lxd_os', self._get_data_entry('inventory/{0}/os'.format(container_name)).lower()) + v = self._get_data_entry(f'inventory/{instance_name}/os') + if v: + self.inventory.set_variable(instance_name, 'ansible_lxd_os', make_unsafe(v.lower())) # add release - self.inventory.set_variable(container_name, 'ansible_lxd_release', self._get_data_entry('inventory/{0}/release'.format(container_name)).lower()) + v = self._get_data_entry(f'inventory/{instance_name}/release') + if v: + self.inventory.set_variable( + instance_name, 'ansible_lxd_release', make_unsafe(v.lower())) # add profile - self.inventory.set_variable(container_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(container_name))) + self.inventory.set_variable( + instance_name, 'ansible_lxd_profile', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/profile'))) # add state - self.inventory.set_variable(container_name, 'ansible_lxd_state', self._get_data_entry('inventory/{0}/state'.format(container_name)).lower()) + self.inventory.set_variable( + instance_name, 'ansible_lxd_state', make_unsafe(instance_state)) + # add type + self.inventory.set_variable( + instance_name, 'ansible_lxd_type', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/type'))) # add location information - if self._get_data_entry('inventory/{0}/location'.format(container_name)) != "none": # wrong type by lxd 'none' != 'None' - self.inventory.set_variable(container_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(container_name))) + if self._get_data_entry(f'inventory/{instance_name}/location') != "none": # wrong type by lxd 'none' != 'None' + self.inventory.set_variable( + instance_name, 'ansible_lxd_location', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/location'))) # add VLAN_ID information - if self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)): - self.inventory.set_variable(container_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name))) + if self._get_data_entry(f'inventory/{instance_name}/vlan_ids'): + self.inventory.set_variable( + instance_name, 'ansible_lxd_vlan_ids', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/vlan_ids'))) + # add project + self.inventory.set_variable( + instance_name, 'ansible_lxd_project', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/project'))) def build_inventory_groups_location(self, group_name): """create group by attribute: location @@ -665,9 +748,9 @@ class InventoryModule(BaseInventoryPlugin): if group_name not in self.inventory.groups: self.inventory.add_group(group_name) - for container_name in self.inventory.hosts: - if 'ansible_lxd_location' in self.inventory.get_host(container_name).get_vars(): - self.inventory.add_child(group_name, container_name) + for instance_name in self.inventory.hosts: + if 'ansible_lxd_location' in self.inventory.get_host(instance_name).get_vars(): + self.inventory.add_child(group_name, instance_name) def build_inventory_groups_pattern(self, group_name): """create group by name pattern @@ -686,10 +769,10 @@ class InventoryModule(BaseInventoryPlugin): regex_pattern = self.groupby[group_name].get('attribute') - for container_name in self.inventory.hosts: - result = re.search(regex_pattern, container_name) + for instance_name in self.inventory.hosts: + result = re.search(regex_pattern, instance_name) if result: - self.inventory.add_child(group_name, container_name) + self.inventory.add_child(group_name, instance_name) def build_inventory_groups_network_range(self, group_name): """check if IP is in network-class @@ -710,27 +793,27 @@ class InventoryModule(BaseInventoryPlugin): network = ipaddress.ip_network(to_text(self.groupby[group_name].get('attribute'))) except ValueError as err: raise AnsibleParserError( - 'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err))) + f"Error while parsing network range {self.groupby[group_name].get('attribute')}: {err}") - for container_name in self.inventory.hosts: - if self.data['inventory'][container_name].get('network_interfaces') is not None: - for interface in self.data['inventory'][container_name].get('network_interfaces'): - for interface_family in self.data['inventory'][container_name].get('network_interfaces')[interface]: + for instance_name in self.inventory.hosts: + if self.data['inventory'][instance_name].get('network_interfaces') is not None: + for interface in self.data['inventory'][instance_name].get('network_interfaces'): + for interface_family in self.data['inventory'][instance_name].get('network_interfaces')[interface]: try: address = ipaddress.ip_address(to_text(interface_family['address'])) if address.version == network.version and address in network: - self.inventory.add_child(group_name, container_name) + self.inventory.add_child(group_name, instance_name) except ValueError: # Ignore invalid IP addresses returned by lxd pass - def build_inventory_groups_os(self, group_name): - """create group by attribute: os + def build_inventory_groups_project(self, group_name): + """create group by attribute: project Args: str(group_name): Group name Kwargs: - Noneself.data['inventory'][container_name][interface] + None Raises: None Returns: @@ -739,12 +822,34 @@ class InventoryModule(BaseInventoryPlugin): if group_name not in self.inventory.groups: self.inventory.add_group(group_name) - gen_containers = [ - container_name for container_name in self.inventory.hosts - if 'ansible_lxd_os' in self.inventory.get_host(container_name).get_vars()] - for container_name in gen_containers: - if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_os'): - self.inventory.add_child(group_name, container_name) + gen_instances = [ + instance_name for instance_name in self.inventory.hosts + if 'ansible_lxd_project' in self.inventory.get_host(instance_name).get_vars()] + for instance_name in gen_instances: + if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_project'): + self.inventory.add_child(group_name, instance_name) + + def build_inventory_groups_os(self, group_name): + """create group by attribute: os + + Args: + str(group_name): Group name + Kwargs: + None + Raises: + None + Returns: + None""" + # maybe we just want to expand one group + if group_name not in self.inventory.groups: + self.inventory.add_group(group_name) + + gen_instances = [ + instance_name for instance_name in self.inventory.hosts + if 'ansible_lxd_os' in self.inventory.get_host(instance_name).get_vars()] + for instance_name in gen_instances: + if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_os'): + self.inventory.add_child(group_name, instance_name) def build_inventory_groups_release(self, group_name): """create group by attribute: release @@ -761,12 +866,12 @@ class InventoryModule(BaseInventoryPlugin): if group_name not in self.inventory.groups: self.inventory.add_group(group_name) - gen_containers = [ - container_name for container_name in self.inventory.hosts - if 'ansible_lxd_release' in self.inventory.get_host(container_name).get_vars()] - for container_name in gen_containers: - if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_release'): - self.inventory.add_child(group_name, container_name) + gen_instances = [ + instance_name for instance_name in self.inventory.hosts + if 'ansible_lxd_release' in self.inventory.get_host(instance_name).get_vars()] + for instance_name in gen_instances: + if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_release'): + self.inventory.add_child(group_name, instance_name) def build_inventory_groups_profile(self, group_name): """create group by attribute: profile @@ -783,12 +888,12 @@ class InventoryModule(BaseInventoryPlugin): if group_name not in self.inventory.groups: self.inventory.add_group(group_name) - gen_containers = [ - container_name for container_name in self.inventory.hosts.keys() - if 'ansible_lxd_profile' in self.inventory.get_host(container_name).get_vars().keys()] - for container_name in gen_containers: - if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_profile'): - self.inventory.add_child(group_name, container_name) + gen_instances = [ + instance_name for instance_name in self.inventory.hosts.keys() + if 'ansible_lxd_profile' in self.inventory.get_host(instance_name).get_vars().keys()] + for instance_name in gen_instances: + if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_profile'): + self.inventory.add_child(group_name, instance_name) def build_inventory_groups_vlanid(self, group_name): """create group by attribute: vlanid @@ -805,12 +910,34 @@ class InventoryModule(BaseInventoryPlugin): if group_name not in self.inventory.groups: self.inventory.add_group(group_name) - gen_containers = [ - container_name for container_name in self.inventory.hosts.keys() - if 'ansible_lxd_vlan_ids' in self.inventory.get_host(container_name).get_vars().keys()] - for container_name in gen_containers: - if self.groupby[group_name].get('attribute') in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_vlan_ids').values(): - self.inventory.add_child(group_name, container_name) + gen_instances = [ + instance_name for instance_name in self.inventory.hosts.keys() + if 'ansible_lxd_vlan_ids' in self.inventory.get_host(instance_name).get_vars().keys()] + for instance_name in gen_instances: + if self.groupby[group_name].get('attribute') in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_vlan_ids').values(): + self.inventory.add_child(group_name, instance_name) + + def build_inventory_groups_type(self, group_name): + """create group by attribute: type + + Args: + str(group_name): Group name + Kwargs: + None + Raises: + None + Returns: + None""" + # maybe we just want to expand one group + if group_name not in self.inventory.groups: + self.inventory.add_group(group_name) + + gen_instances = [ + instance_name for instance_name in self.inventory.hosts + if 'ansible_lxd_type' in self.inventory.get_host(instance_name).get_vars()] + for instance_name in gen_instances: + if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_type'): + self.inventory.add_child(group_name, instance_name) def build_inventory_groups(self): """Build group-part dynamic inventory @@ -839,6 +966,8 @@ class InventoryModule(BaseInventoryPlugin): * 'release' * 'profile' * 'vlanid' + * 'type' + * 'project' Args: str(group_name): Group name @@ -864,14 +993,18 @@ class InventoryModule(BaseInventoryPlugin): self.build_inventory_groups_profile(group_name) elif self.groupby[group_name].get('type') == 'vlanid': self.build_inventory_groups_vlanid(group_name) + elif self.groupby[group_name].get('type') == 'type': + self.build_inventory_groups_type(group_name) + elif self.groupby[group_name].get('type') == 'project': + self.build_inventory_groups_project(group_name) else: - raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name))) + raise AnsibleParserError(f'Unknown group type: {to_native(group_name)}') if self.groupby: for group_name in self.groupby: if not group_name.isalnum(): - raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name))) - group_type(group_name) + raise AnsibleParserError(f'Invalid character(s) in groupname: {to_native(group_name)}') + group_type(make_unsafe(group_name)) def build_inventory(self): """Build dynamic inventory @@ -890,10 +1023,30 @@ class InventoryModule(BaseInventoryPlugin): self.build_inventory_hosts() self.build_inventory_groups() + def cleandata(self): + """Clean the dynamic inventory + + The first version of the inventory only supported container. + This will change in the future. + The following function cleans up the data and remove the all items with the wrong type. + + Args: + None + Kwargs: + None + Raises: + None + Returns: + None""" + iter_keys = list(self.data['instances'].keys()) + for instance_name in iter_keys: + if self._get_data_entry(f'instances/{instance_name}/instances/metadata/type') != self.type_filter: + del self.data['instances'][instance_name] + def _populate(self): """Return the hosts and groups - Returns the processed container configurations from the lxd import + Returns the processed instance configurations from the lxd import Args: None @@ -906,10 +1059,16 @@ class InventoryModule(BaseInventoryPlugin): if len(self.data) == 0: # If no data is injected by unittests open socket self.socket = self._connect_to_socket() - self.get_container_data(self._get_containers()) + self.get_instance_data(self._get_instances()) self.get_network_data(self._get_networks()) - self.extract_information_from_container_configs() + # The first version of the inventory only supported containers. + # This will change in the future. + # The following function cleans up the data. + if self.type_filter != 'both': + self.cleandata() + + self.extract_information_from_instance_configs() # self.display.vvv(self.save_json_data([os.path.abspath(__file__)])) @@ -934,9 +1093,7 @@ class InventoryModule(BaseInventoryPlugin): Returns: None""" if IPADDRESS_IMPORT_ERROR: - raise_from( - AnsibleError('another_library must be installed to use this plugin'), - IPADDRESS_IMPORT_ERROR) + raise AnsibleError('another_library must be installed to use this plugin') from IPADDRESS_IMPORT_ERROR super(InventoryModule, self).parse(inventory, loader, path, cache=False) # Read the inventory YAML file @@ -944,12 +1101,16 @@ class InventoryModule(BaseInventoryPlugin): try: self.client_key = self.get_option('client_key') self.client_cert = self.get_option('client_cert') + self.server_cert = self.get_option('server_cert') + self.server_check_hostname = self.get_option('server_check_hostname') + self.project = self.get_option('project') self.debug = self.DEBUG self.data = {} # store for inventory-data self.groupby = self.get_option('groupby') self.plugin = self.get_option('plugin') - self.prefered_container_network_family = self.get_option('prefered_container_network_family') - self.prefered_container_network_interface = self.get_option('prefered_container_network_interface') + self.prefered_instance_network_family = self.get_option('prefered_instance_network_family') + self.prefered_instance_network_interface = self.get_option('prefered_instance_network_interface') + self.type_filter = self.get_option('type_filter') if self.get_option('state').lower() == 'none': # none in config is str() self.filter = None else: @@ -958,6 +1119,6 @@ class InventoryModule(BaseInventoryPlugin): self.url = self.get_option('url') except Exception as err: raise AnsibleParserError( - 'All correct options required: {0}'.format(to_native(err))) + f'All correct options required: {err}') # Call our internal helper to populate the dynamic inventory self._populate() diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index e411006ff0..ea0ce560fd 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -1,54 +1,127 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: nmap - short_description: Uses nmap to find hosts to target +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: nmap +short_description: Uses nmap to find hosts to target +description: + - Uses a YAML configuration file with a valid YAML extension. +extends_documentation_fragment: + - constructed + - inventory_cache +requirements: + - nmap CLI installed +options: + plugin: + description: Token that ensures this is a source file for the P(community.general.nmap#inventory) plugin. + type: string + required: true + choices: ['nmap', 'community.general.nmap'] + sudo: + description: Set to V(true) to execute a C(sudo nmap) plugin scan. + version_added: 4.8.0 + default: false + type: boolean + address: + description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation. + type: string + required: true + env: + - name: ANSIBLE_NMAP_ADDRESS + version_added: 6.6.0 + exclude: description: - - Uses a YAML configuration file with a valid YAML extension. - extends_documentation_fragment: - - constructed - - inventory_cache - requirements: - - nmap CLI installed - options: - plugin: - description: token that ensures this is a source file for the 'nmap' plugin. - required: True - choices: ['nmap', 'community.general.nmap'] - address: - description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation. - required: True - exclude: - description: list of addresses to exclude - type: list - ports: - description: Enable/disable scanning for open ports - type: boolean - default: True - ipv4: - description: use IPv4 type addresses - type: boolean - default: True - ipv6: - description: use IPv6 type addresses - type: boolean - default: True - notes: - - At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False. - - 'TODO: add OS fingerprinting' -''' -EXAMPLES = ''' + - List of addresses to exclude. + - For example V(10.2.2.15-25) or V(10.2.2.15,10.2.2.16). + type: list + elements: string + env: + - name: ANSIBLE_NMAP_EXCLUDE + version_added: 6.6.0 + port: + description: + - Only scan specific port or port range (C(-p)). + - For example, you could pass V(22) for a single port, V(1-65535) for a range of ports, or V(U:53,137,T:21-25,139,8080,S:9) + to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all. + type: string + version_added: 6.5.0 + ports: + description: Enable/disable scanning ports. + type: boolean + default: true + ipv4: + description: Use IPv4 type addresses. + type: boolean + default: true + ipv6: + description: Use IPv6 type addresses. + type: boolean + default: true + udp_scan: + description: + - Scan using UDP. + - Depending on your system you might need O(sudo=true) for this to work. + type: boolean + default: false + version_added: 6.1.0 + icmp_timestamp: + description: + - Scan using ICMP Timestamp (C(-PP)). + - Depending on your system you might need O(sudo=true) for this to work. + type: boolean + default: false + version_added: 6.1.0 + open: + description: Only scan for open (or possibly open) ports. + type: boolean + default: false + version_added: 6.5.0 + dns_resolve: + description: Whether to always (V(true)) or never (V(false)) do DNS resolution. + type: boolean + default: false + version_added: 6.1.0 + dns_servers: + description: Specify which DNS servers to use for name resolution. + type: list + elements: string + version_added: 10.5.0 + use_arp_ping: + description: Whether to always (V(true)) use the quick ARP ping or (V(false)) a slower but more reliable method. + type: boolean + default: true + version_added: 7.4.0 +notes: + - At least one of O(ipv4) or O(ipv6) is required to be V(true); both can be V(true), but they cannot both be V(false). + - 'TODO: add OS fingerprinting.' +""" +EXAMPLES = r""" +--- # inventory.config file in YAML format plugin: community.general.nmap -strict: False +strict: false address: 192.168.0.0/24 -''' + +--- +# a sudo nmap scan to fully use nmap scan power. +plugin: community.general.nmap +sudo: true +strict: false +address: 192.168.0.0/24 + +--- +# an nmap scan specifying ports and classifying results to an inventory group +plugin: community.general.nmap +address: 192.168.0.0/24 +exclude: 192.168.0.1, web.example.com +port: 22, 443 +groups: + web_servers: "ports | selectattr('port', 'equalto', '443')" +""" import os import re @@ -61,6 +134,8 @@ from ansible.module_utils.common.text.converters import to_native, to_text from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.common.process import get_bin_path +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): @@ -77,6 +152,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): strict = self.get_option('strict') for host in hosts: + host = make_unsafe(host) hostname = host['name'] self.inventory.add_host(hostname) for var, value in host.items(): @@ -107,7 +183,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: self._nmap = get_bin_path('nmap') except ValueError as e: - raise AnsibleParserError('nmap inventory plugin requires the nmap cli tool to work: {0}'.format(to_native(e))) + raise AnsibleParserError(f'nmap inventory plugin requires the nmap cli tool to work: {e}') super(InventoryModule, self).parse(inventory, loader, path, cache=cache) @@ -134,27 +210,54 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if not user_cache_setting or cache_needs_update: # setup command cmd = [self._nmap] - if not self._options['ports']: + + if self.get_option('sudo'): + cmd.insert(0, 'sudo') + + if self.get_option('port'): + cmd.append('-p') + cmd.append(self.get_option('port')) + + if not self.get_option('ports'): cmd.append('-sP') - if self._options['ipv4'] and not self._options['ipv6']: + if self.get_option('ipv4') and not self.get_option('ipv6'): cmd.append('-4') - elif self._options['ipv6'] and not self._options['ipv4']: + elif self.get_option('ipv6') and not self.get_option('ipv4'): cmd.append('-6') - elif not self._options['ipv6'] and not self._options['ipv4']: + elif not self.get_option('ipv6') and not self.get_option('ipv4'): raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin') - if self._options['exclude']: + if self.get_option('exclude'): cmd.append('--exclude') - cmd.append(','.join(self._options['exclude'])) + cmd.append(','.join(self.get_option('exclude'))) - cmd.append(self._options['address']) + if self.get_option('dns_resolve'): + cmd.append('-n') + + if self.get_option('dns_servers'): + cmd.append('--dns-servers') + cmd.append(','.join(self.get_option('dns_servers'))) + + if self.get_option('udp_scan'): + cmd.append('-sU') + + if self.get_option('icmp_timestamp'): + cmd.append('-PP') + + if self.get_option('open'): + cmd.append('--open') + + if not self.get_option('use_arp_ping'): + cmd.append('--disable-arp-ping') + + cmd.append(self.get_option('address')) try: # execute p = Popen(cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr))) + raise AnsibleParserError(f'Failed to run nmap, rc={p.returncode}: {to_native(stderr)}') # parse results host = None @@ -165,7 +268,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: t_stdout = to_text(stdout, errors='surrogate_or_strict') except UnicodeError as e: - raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e)) + raise AnsibleParserError(f'Invalid (non unicode) input returned: {e}') for line in t_stdout.splitlines(): hits = self.find_host.match(line) @@ -206,7 +309,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): results[-1]['ports'] = ports except Exception as e: - raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e))) + raise AnsibleParserError(f"failed to parse {to_native(path)}: {e} ") if cache_needs_update: self._cache[cache_key] = results diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index c678d3e0e5..cbc46a6723 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -1,49 +1,52 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: online - author: - - Remy Leone (@sieben) - short_description: Scaleway (previously Online SAS or Online.net) inventory source - description: - - Get inventory hosts from Scaleway (previously Online SAS or Online.net). - options: - plugin: - description: token that ensures this is a source file for the 'online' plugin. - required: True - choices: ['online', 'community.general.online'] - oauth_token: - required: True - description: Online OAuth token. - env: - # in order of precedence - - name: ONLINE_TOKEN - - name: ONLINE_API_KEY - - name: ONLINE_OAUTH_TOKEN - hostnames: - description: List of preference about what to use as an hostname. - type: list - default: - - public_ipv4 - choices: - - public_ipv4 - - private_ipv4 - - hostname - groups: - description: List of groups. - type: list - choices: - - location - - offer - - rpn -''' +DOCUMENTATION = r""" +name: online +author: + - Remy Leone (@remyleone) +short_description: Scaleway (previously Online SAS or Online.net) inventory source +description: + - Get inventory hosts from Scaleway (previously Online SAS or Online.net). +options: + plugin: + description: Token that ensures this is a source file for the P(community.general.online#inventory) plugin. + type: string + required: true + choices: ['online', 'community.general.online'] + oauth_token: + required: true + description: Online OAuth token. + type: string + env: + # in order of precedence + - name: ONLINE_TOKEN + - name: ONLINE_API_KEY + - name: ONLINE_OAUTH_TOKEN + hostnames: + description: List of preference about what to use as an hostname. + type: list + elements: string + default: + - public_ipv4 + choices: + - public_ipv4 + - private_ipv4 + - hostname + groups: + description: List of groups. + type: list + elements: string + choices: + - location + - offer + - rpn +""" -EXAMPLES = r''' +EXAMPLES = r""" # online_inventory.yml file in YAML format # Example command line: ansible-inventory --list -i online_inventory.yml @@ -54,17 +57,19 @@ groups: - location - offer - rpn -''' +""" import json from sys import version as python_version +from urllib.parse import urljoin from ansible.errors import AnsibleError from ansible.module_utils.urls import open_url from ansible.plugins.inventory import BaseInventoryPlugin -from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.ansible_release import __version__ as ansible_version -from ansible.module_utils.six.moves.urllib.parse import urljoin + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe class InventoryModule(BaseInventoryPlugin): @@ -131,7 +136,7 @@ class InventoryModule(BaseInventoryPlugin): try: response = open_url(url, headers=self.headers) except Exception as e: - self.display.warning("An error happened while fetching: %s" % url) + self.display.warning(f"An error happened while fetching: {url}") return None try: @@ -166,20 +171,20 @@ class InventoryModule(BaseInventoryPlugin): "support" ) for attribute in targeted_attributes: - self.inventory.set_variable(hostname, attribute, host_infos[attribute]) + self.inventory.set_variable(hostname, attribute, make_unsafe(host_infos[attribute])) if self.extract_public_ipv4(host_infos=host_infos): - self.inventory.set_variable(hostname, "public_ipv4", self.extract_public_ipv4(host_infos=host_infos)) - self.inventory.set_variable(hostname, "ansible_host", self.extract_public_ipv4(host_infos=host_infos)) + self.inventory.set_variable(hostname, "public_ipv4", make_unsafe(self.extract_public_ipv4(host_infos=host_infos))) + self.inventory.set_variable(hostname, "ansible_host", make_unsafe(self.extract_public_ipv4(host_infos=host_infos))) if self.extract_private_ipv4(host_infos=host_infos): - self.inventory.set_variable(hostname, "public_ipv4", self.extract_private_ipv4(host_infos=host_infos)) + self.inventory.set_variable(hostname, "public_ipv4", make_unsafe(self.extract_private_ipv4(host_infos=host_infos))) if self.extract_os_name(host_infos=host_infos): - self.inventory.set_variable(hostname, "os_name", self.extract_os_name(host_infos=host_infos)) + self.inventory.set_variable(hostname, "os_name", make_unsafe(self.extract_os_name(host_infos=host_infos))) if self.extract_os_version(host_infos=host_infos): - self.inventory.set_variable(hostname, "os_version", self.extract_os_name(host_infos=host_infos)) + self.inventory.set_variable(hostname, "os_version", make_unsafe(self.extract_os_name(host_infos=host_infos))) def _filter_host(self, host_infos, hostname_preferences): @@ -198,6 +203,8 @@ class InventoryModule(BaseInventoryPlugin): if not hostname: return + hostname = make_unsafe(hostname) + self.inventory.add_host(host=hostname) self._fill_host_variables(hostname=hostname, host_infos=host_infos) @@ -207,6 +214,8 @@ class InventoryModule(BaseInventoryPlugin): if not group: return + group = make_unsafe(group) + self.inventory.add_group(group=group) self.inventory.add_host(group=group, host=hostname) @@ -234,8 +243,8 @@ class InventoryModule(BaseInventoryPlugin): } self.headers = { - 'Authorization': "Bearer %s" % token, - 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ', 1)[0]), + 'Authorization': f"Bearer {token}", + 'User-Agent': f"ansible {ansible_version} Python {python_version.split(' ', 1)[0]}", 'Content-type': 'application/json' } diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py index 921dd96525..26f7a21d88 100644 --- a/plugins/inventory/opennebula.py +++ b/plugins/inventory/opennebula.py @@ -1,82 +1,79 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™ -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' - name: opennebula - author: - - Kristian Feldsam (@feldsam) - short_description: OpenNebula inventory source - version_added: "3.8.0" - extends_documentation_fragment: - - constructed +DOCUMENTATION = r""" +name: opennebula +author: + - Kristian Feldsam (@feldsam) +short_description: OpenNebula inventory source +version_added: "3.8.0" +extends_documentation_fragment: + - constructed +description: + - Get inventory hosts from OpenNebula cloud. + - Uses an YAML configuration file ending with either C(opennebula.yml) or C(opennebula.yaml) to set parameter values. + - Uses O(api_authfile), C(~/.one/one_auth), or E(ONE_AUTH) pointing to a OpenNebula credentials file. +options: + plugin: + description: Token that ensures this is a source file for the 'opennebula' plugin. + type: string + required: true + choices: [community.general.opennebula] + api_url: description: - - Get inventory hosts from OpenNebula cloud. - - Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml) - to set parameter values. - - Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file. - options: - plugin: - description: Token that ensures this is a source file for the 'opennebula' plugin. - type: string - required: true - choices: [ community.general.opennebula ] - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - transferred over the network unencrypted. - - If not set then the value of the C(ONE_URL) environment variable is used. - env: - - name: ONE_URL - required: True - type: string - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - then the value of the C(ONE_USERNAME) environment variable is used. - env: - - name: ONE_USERNAME - type: string - api_password: - description: - - Password or a token of the user to login into OpenNebula RPC server. - - If not set, the value of the C(ONE_PASSWORD) environment variable is used. - env: - - name: ONE_PASSWORD - required: False - type: string - api_authfile: - description: - - If both I(api_username) or I(api_password) are not set, then it will try - authenticate with ONE auth file. Default path is C(~/.one/one_auth). - - Set environment variable C(ONE_AUTH) to override this path. - env: - - name: ONE_AUTH - required: False - type: string - hostname: - description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM. - type: string - default: v4_first_ip - choices: - - v4_first_ip - - v6_first_ip - - name - filter_by_label: - description: Only return servers filtered by this label. - type: string - group_by_labels: - description: Create host groups by vm labels - type: bool - default: True -''' + - URL of the OpenNebula RPC server. + - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted. + - If not set then the value of the E(ONE_URL) environment variable is used. + env: + - name: ONE_URL + required: true + type: string + api_username: + description: + - Name of the user to login into the OpenNebula RPC server. If not set then the value of the E(ONE_USERNAME) environment + variable is used. + env: + - name: ONE_USERNAME + type: string + api_password: + description: + - Password or a token of the user to login into OpenNebula RPC server. + - If not set, the value of the E(ONE_PASSWORD) environment variable is used. + env: + - name: ONE_PASSWORD + required: false + type: string + api_authfile: + description: + - If both O(api_username) or O(api_password) are not set, then it tries to authenticate with ONE auth file. Default + path is C(~/.one/one_auth). + - Set environment variable E(ONE_AUTH) to override this path. + env: + - name: ONE_AUTH + required: false + type: string + hostname: + description: Field to match the hostname. Note V(v4_first_ip) corresponds to the first IPv4 found on VM. + type: string + default: v4_first_ip + choices: + - v4_first_ip + - v6_first_ip + - name + filter_by_label: + description: Only return servers filtered by this label. + type: string + group_by_labels: + description: Create host groups by VM labels. + type: bool + default: true +""" -EXAMPLES = r''' +EXAMPLES = r""" # inventory_opennebula.yml file in YAML format # Example command line: ansible-inventory --list -i inventory_opennebula.yml @@ -84,7 +81,7 @@ EXAMPLES = r''' plugin: community.general.opennebula api_url: https://opennebula:2633/RPC2 filter_by_label: Cache -''' +""" try: import pyone @@ -95,7 +92,8 @@ except ImportError: from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable -from ansible.module_utils._text import to_native + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe from collections import namedtuple import os @@ -125,9 +123,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable): authstring = fp.read().rstrip() username, password = authstring.split(":") except (OSError, IOError): - raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile)) + raise AnsibleError(f"Could not find or read ONE_AUTH file at '{authfile}'") except Exception: - raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile)) + raise AnsibleError(f"Error occurs when reading ONE_AUTH file at '{authfile}'") auth_params = namedtuple('auth', ('url', 'username', 'password')) @@ -140,7 +138,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable): nic = [nic] for net in nic: - return net['IP'] + if net.get('IP'): + return net['IP'] return False @@ -162,13 +161,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable): if not (auth.username and auth.password): raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.') else: - one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) + one_client = pyone.OneServer(auth.url, session=f"{auth.username}:{auth.password}") # get hosts (VMs) try: vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3) except Exception as e: - raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e))) + raise AnsibleError(f"Something happened during XML-RPC call: {e}") return vm_pool @@ -195,6 +194,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable): continue server['name'] = vm.NAME + server['id'] = vm.ID + if hasattr(vm.HISTORY_RECORDS, 'HISTORY') and vm.HISTORY_RECORDS.HISTORY: + server['host'] = vm.HISTORY_RECORDS.HISTORY[-1].HOSTNAME server['LABELS'] = labels server['v4_first_ip'] = self._get_vm_ipv4(vm) server['v6_first_ip'] = self._get_vm_ipv6(vm) @@ -206,28 +208,41 @@ class InventoryModule(BaseInventoryPlugin, Constructable): def _populate(self): hostname_preference = self.get_option('hostname') group_by_labels = self.get_option('group_by_labels') + strict = self.get_option('strict') # Add a top group 'one' self.inventory.add_group(group='all') filter_by_label = self.get_option('filter_by_label') - for server in self._retrieve_servers(filter_by_label): + servers = self._retrieve_servers(filter_by_label) + for server in servers: + server = make_unsafe(server) + hostname = server['name'] # check for labels if group_by_labels and server['LABELS']: for label in server['LABELS']: self.inventory.add_group(group=label) - self.inventory.add_host(host=server['name'], group=label) + self.inventory.add_host(host=hostname, group=label) - self.inventory.add_host(host=server['name'], group='all') + self.inventory.add_host(host=hostname, group='all') for attribute, value in server.items(): - self.inventory.set_variable(server['name'], attribute, value) + self.inventory.set_variable(hostname, attribute, value) if hostname_preference != 'name': - self.inventory.set_variable(server['name'], 'ansible_host', server[hostname_preference]) + self.inventory.set_variable(hostname, 'ansible_host', server[hostname_preference]) if server.get('SSH_PORT'): - self.inventory.set_variable(server['name'], 'ansible_port', server['SSH_PORT']) + self.inventory.set_variable(hostname, 'ansible_port', server['SSH_PORT']) + + # handle construcable implementation: get composed variables if any + self._set_composite_vars(self.get_option('compose'), server, hostname, strict=strict) + + # groups based on jinja conditionals get added to specific groups + self._add_host_to_composed_groups(self.get_option('groups'), server, hostname, strict=strict) + + # groups based on variables associated with them in the inventory + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), server, hostname, strict=strict) def parse(self, inventory, loader, path, cache=True): if not HAS_PYONE: diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py deleted file mode 100644 index f52f0f1bb3..0000000000 --- a/plugins/inventory/proxmox.py +++ /dev/null @@ -1,478 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2016 Guido Günther , Daniel Lobato Garcia -# Copyright (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: proxmox - short_description: Proxmox inventory source - version_added: "1.2.0" - author: - - Jeffrey van Pelt (@Thulium-Drake) - requirements: - - requests >= 1.1 - description: - - Get inventory hosts from a Proxmox PVE cluster. - - "Uses a configuration file as an inventory source, it must end in C(.proxmox.yml) or C(.proxmox.yaml)" - - Will retrieve the first network interface with an IP for Proxmox nodes. - - Can retrieve LXC/QEMU configuration as facts. - extends_documentation_fragment: - - constructed - - inventory_cache - options: - plugin: - description: The name of this plugin, it should always be set to C(community.general.proxmox) for this plugin to recognize it as it's own. - required: yes - choices: ['community.general.proxmox'] - type: str - url: - description: - - URL to Proxmox cluster. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_URL) will be used instead. - default: 'http://localhost:8006' - type: str - env: - - name: PROXMOX_URL - version_added: 2.0.0 - user: - description: - - Proxmox authentication user. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_USER) will be used instead. - required: yes - type: str - env: - - name: PROXMOX_USER - version_added: 2.0.0 - password: - description: - - Proxmox authentication password. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_PASSWORD) will be used instead. - required: yes - type: str - env: - - name: PROXMOX_PASSWORD - version_added: 2.0.0 - validate_certs: - description: Verify SSL certificate if using HTTPS. - type: boolean - default: yes - group_prefix: - description: Prefix to apply to Proxmox groups. - default: proxmox_ - type: str - facts_prefix: - description: Prefix to apply to LXC/QEMU config facts. - default: proxmox_ - type: str - want_facts: - description: Gather LXC/QEMU configuration facts. - default: no - type: bool - want_proxmox_nodes_ansible_host: - version_added: 3.0.0 - description: - - Whether to set C(ansbile_host) for proxmox nodes. - - When set to C(true) (default), will use the first available interface. This can be different from what you expect. - default: true - type: bool - strict: - version_added: 2.5.0 - compose: - version_added: 2.5.0 - groups: - version_added: 2.5.0 - keyed_groups: - version_added: 2.5.0 -''' - -EXAMPLES = ''' -# Minimal example which will not gather additional facts for QEMU/LXC guests -# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006 -# my.proxmox.yml -plugin: community.general.proxmox -user: ansible@pve -password: secure - -# More complete example demonstrating the use of 'want_facts' and the constructed options -# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true' -# my.proxmox.yml -plugin: community.general.proxmox -url: http://pve.domain.com:8006 -user: ansible@pve -password: secure -validate_certs: false -want_facts: true -keyed_groups: - # proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true' - - key: proxmox_tags_parsed - separator: "" - prefix: group -groups: - webservers: "'web' in (proxmox_tags_parsed|list)" - mailservers: "'mail' in (proxmox_tags_parsed|list)" -compose: - ansible_port: 2222 -''' - -import re - -from ansible.module_utils.common._collections_compat import MutableMapping -from distutils.version import LooseVersion - -from ansible.errors import AnsibleError -from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable -from ansible.module_utils.six.moves.urllib.parse import urlencode - -# 3rd party imports -try: - import requests - if LooseVersion(requests.__version__) < LooseVersion('1.1.0'): - raise ImportError - HAS_REQUESTS = True -except ImportError: - HAS_REQUESTS = False - - -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - ''' Host inventory parser for ansible using Proxmox as source. ''' - - NAME = 'community.general.proxmox' - - def __init__(self): - - super(InventoryModule, self).__init__() - - # from config - self.proxmox_url = None - - self.session = None - self.cache_key = None - self.use_cache = None - - def verify_file(self, path): - - valid = False - if super(InventoryModule, self).verify_file(path): - if path.endswith(('proxmox.yaml', 'proxmox.yml')): - valid = True - else: - self.display.vvv('Skipping due to inventory source not ending in "proxmox.yaml" nor "proxmox.yml"') - return valid - - def _get_session(self): - if not self.session: - self.session = requests.session() - self.session.verify = self.get_option('validate_certs') - return self.session - - def _get_auth(self): - credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, }) - - a = self._get_session() - ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials) - - json = ret.json() - - self.credentials = { - 'ticket': json['data']['ticket'], - 'CSRFPreventionToken': json['data']['CSRFPreventionToken'], - } - - def _get_json(self, url, ignore_errors=None): - - if not self.use_cache or url not in self._cache.get(self.cache_key, {}): - - if self.cache_key not in self._cache: - self._cache[self.cache_key] = {'url': ''} - - data = [] - s = self._get_session() - while True: - headers = {'Cookie': 'PVEAuthCookie={0}'.format(self.credentials['ticket'])} - ret = s.get(url, headers=headers) - if ignore_errors and ret.status_code in ignore_errors: - break - ret.raise_for_status() - json = ret.json() - - # process results - # FIXME: This assumes 'return type' matches a specific query, - # it will break if we expand the queries and they dont have different types - if 'data' not in json: - # /hosts/:id does not have a 'data' key - data = json - break - elif isinstance(json['data'], MutableMapping): - # /facts are returned as dict in 'data' - data = json['data'] - break - else: - # /hosts 's 'results' is a list of all hosts, returned is paginated - data = data + json['data'] - break - - self._cache[self.cache_key][url] = data - - return self._cache[self.cache_key][url] - - def _get_nodes(self): - return self._get_json("%s/api2/json/nodes" % self.proxmox_url) - - def _get_pools(self): - return self._get_json("%s/api2/json/pools" % self.proxmox_url) - - def _get_lxc_per_node(self, node): - return self._get_json("%s/api2/json/nodes/%s/lxc" % (self.proxmox_url, node)) - - def _get_qemu_per_node(self, node): - return self._get_json("%s/api2/json/nodes/%s/qemu" % (self.proxmox_url, node)) - - def _get_members_per_pool(self, pool): - ret = self._get_json("%s/api2/json/pools/%s" % (self.proxmox_url, pool)) - return ret['members'] - - def _get_node_ip(self, node): - ret = self._get_json("%s/api2/json/nodes/%s/network" % (self.proxmox_url, node)) - - for iface in ret: - try: - return iface['address'] - except Exception: - return None - - def _get_agent_network_interfaces(self, node, vmid, vmtype): - result = [] - - try: - ifaces = self._get_json( - "%s/api2/json/nodes/%s/%s/%s/agent/network-get-interfaces" % ( - self.proxmox_url, node, vmtype, vmid - ) - )['result'] - - if "error" in ifaces: - if "class" in ifaces["error"]: - # This happens on Windows, even though qemu agent is running, the IP address - # cannot be fetched, as it's unsupported, also a command disabled can happen. - errorClass = ifaces["error"]["class"] - if errorClass in ["Unsupported"]: - self.display.v("Retrieving network interfaces from guest agents on windows with older qemu-guest-agents is not supported") - elif errorClass in ["CommandDisabled"]: - self.display.v("Retrieving network interfaces from guest agents has been disabled") - return result - - for iface in ifaces: - result.append({ - 'name': iface['name'], - 'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '', - 'ip-addresses': ["%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses']] if 'ip-addresses' in iface else [] - }) - except requests.HTTPError: - pass - - return result - - def _get_vm_config(self, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid)) - - node_key = 'node' - node_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), node_key.lower())) - self.inventory.set_variable(name, node_key, node) - - vmid_key = 'vmid' - vmid_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmid_key.lower())) - self.inventory.set_variable(name, vmid_key, vmid) - - vmtype_key = 'vmtype' - vmtype_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmtype_key.lower())) - self.inventory.set_variable(name, vmtype_key, vmtype) - - plaintext_configs = [ - 'tags', - ] - - for config in ret: - key = config - key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), key.lower())) - value = ret[config] - try: - # fixup disk images as they have no key - if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')): - value = ('disk_image=' + value) - - # Additional field containing parsed tags as list - if config == 'tags': - parsed_key = self.to_safe('%s%s' % (key, "_parsed")) - parsed_value = [tag.strip() for tag in value.split(",")] - self.inventory.set_variable(name, parsed_key, parsed_value) - - # The first field in the agent string tells you whether the agent is enabled - # the rest of the comma separated string is extra config for the agent - if config == 'agent' and int(value.split(',')[0]): - agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces")) - agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype) - if agent_iface_value: - self.inventory.set_variable(name, agent_iface_key, agent_iface_value) - - if not (isinstance(value, int) or ',' not in value): - # split off strings with commas to a dict - # skip over any keys that cannot be processed - try: - value = dict(key.split("=") for key in value.split(",")) - except Exception: - continue - - self.inventory.set_variable(name, key, value) - except NameError: - return None - - def _get_vm_status(self, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid)) - - status = ret['status'] - status_key = 'status' - status_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), status_key.lower())) - self.inventory.set_variable(name, status_key, status) - - def _get_vm_snapshots(self, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid)) - - snapshots_key = 'snapshots' - snapshots_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), snapshots_key.lower())) - - snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current'] - self.inventory.set_variable(name, snapshots_key, snapshots) - - def to_safe(self, word): - '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups - #> ProxmoxInventory.to_safe("foo-bar baz") - 'foo_barbaz' - ''' - regex = r"[^A-Za-z0-9\_]" - return re.sub(regex, "_", word.replace(" ", "")) - - def _apply_constructable(self, name, variables): - strict = self.get_option('strict') - self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict) - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict) - self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict) - - def _populate(self): - - self._get_auth() - - # gather vm's on nodes - for node in self._get_nodes(): - # FIXME: this can probably be cleaner - # create groups - lxc_group = 'all_lxc' - lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), lxc_group.lower())) - self.inventory.add_group(lxc_group) - qemu_group = 'all_qemu' - qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), qemu_group.lower())) - self.inventory.add_group(qemu_group) - nodes_group = 'nodes' - nodes_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), nodes_group.lower())) - self.inventory.add_group(nodes_group) - running_group = 'all_running' - running_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), running_group.lower())) - self.inventory.add_group(running_group) - stopped_group = 'all_stopped' - stopped_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), stopped_group.lower())) - self.inventory.add_group(stopped_group) - - if node.get('node'): - self.inventory.add_host(node['node']) - - if node['type'] == 'node': - self.inventory.add_child(nodes_group, node['node']) - - if node['status'] == 'offline': - continue - - # get node IP address - if self.get_option("want_proxmox_nodes_ansible_host"): - ip = self._get_node_ip(node['node']) - self.inventory.set_variable(node['node'], 'ansible_host', ip) - - # get LXC containers for this node - node_lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_lxc' % node['node']).lower())) - self.inventory.add_group(node_lxc_group) - for lxc in self._get_lxc_per_node(node['node']): - self.inventory.add_host(lxc['name']) - self.inventory.add_child(lxc_group, lxc['name']) - self.inventory.add_child(node_lxc_group, lxc['name']) - - # get LXC status when want_facts == True - if self.get_option('want_facts'): - self._get_vm_status(node['node'], lxc['vmid'], 'lxc', lxc['name']) - if lxc['status'] == 'stopped': - self.inventory.add_child(stopped_group, lxc['name']) - elif lxc['status'] == 'running': - self.inventory.add_child(running_group, lxc['name']) - - # get LXC config and snapshots for facts - if self.get_option('want_facts'): - self._get_vm_config(node['node'], lxc['vmid'], 'lxc', lxc['name']) - self._get_vm_snapshots(node['node'], lxc['vmid'], 'lxc', lxc['name']) - - self._apply_constructable(lxc["name"], self.inventory.get_host(lxc['name']).get_vars()) - - # get QEMU vm's for this node - node_qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_qemu' % node['node']).lower())) - self.inventory.add_group(node_qemu_group) - for qemu in self._get_qemu_per_node(node['node']): - if qemu.get('template'): - continue - - self.inventory.add_host(qemu['name']) - self.inventory.add_child(qemu_group, qemu['name']) - self.inventory.add_child(node_qemu_group, qemu['name']) - - # get QEMU status - self._get_vm_status(node['node'], qemu['vmid'], 'qemu', qemu['name']) - if qemu['status'] == 'stopped': - self.inventory.add_child(stopped_group, qemu['name']) - elif qemu['status'] == 'running': - self.inventory.add_child(running_group, qemu['name']) - - # get QEMU config and snapshots for facts - if self.get_option('want_facts'): - self._get_vm_config(node['node'], qemu['vmid'], 'qemu', qemu['name']) - self._get_vm_snapshots(node['node'], qemu['vmid'], 'qemu', qemu['name']) - - self._apply_constructable(qemu["name"], self.inventory.get_host(qemu['name']).get_vars()) - - # gather vm's in pools - for pool in self._get_pools(): - if pool.get('poolid'): - pool_group = 'pool_' + pool['poolid'] - pool_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), pool_group.lower())) - self.inventory.add_group(pool_group) - - for member in self._get_members_per_pool(pool['poolid']): - if member.get('name'): - if not member.get('template'): - self.inventory.add_child(pool_group, member['name']) - - def parse(self, inventory, loader, path, cache=True): - if not HAS_REQUESTS: - raise AnsibleError('This module requires Python Requests 1.1.0 or higher: ' - 'https://github.com/psf/requests.') - - super(InventoryModule, self).parse(inventory, loader, path) - - # read config from file, this sets 'options' - self._read_config_data(path) - - # get connection host - self.proxmox_url = self.get_option('url').rstrip('/') - self.proxmox_user = self.get_option('user') - self.proxmox_password = self.get_option('password') - self.cache_key = self.get_cache_key(path) - self.use_cache = cache and self.get_option('cache') - - # actually populate inventory - self._populate() diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index fa65eae321..59c19b498b 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -1,70 +1,85 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' - name: scaleway - author: - - Remy Leone (@sieben) - short_description: Scaleway inventory source +DOCUMENTATION = r""" +name: scaleway +author: + - Remy Leone (@remyleone) +short_description: Scaleway inventory source +description: + - Get inventory hosts from Scaleway. +requirements: + - PyYAML +options: + plugin: + description: Token that ensures this is a source file for the 'scaleway' plugin. + required: true + type: string + choices: ['scaleway', 'community.general.scaleway'] + regions: + description: Filter results on a specific Scaleway region. + type: list + elements: string + default: + - ams1 + - ams2 + - ams3 + - par1 + - par2 + - par3 + - waw1 + - waw2 + - waw3 + tags: + description: Filter results on a specific tag. + type: list + elements: string + scw_profile: description: - - Get inventory hosts from Scaleway. - requirements: - - PyYAML - options: - plugin: - description: Token that ensures this is a source file for the 'scaleway' plugin. - required: True - choices: ['scaleway', 'community.general.scaleway'] - regions: - description: Filter results on a specific Scaleway region. - type: list - default: - - ams1 - - par1 - - par2 - - waw1 - tags: - description: Filter results on a specific tag. - type: list - oauth_token: - description: - - Scaleway OAuth token. - - If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file - (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)). - - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). - env: - # in order of precedence - - name: SCW_TOKEN - - name: SCW_API_KEY - - name: SCW_OAUTH_TOKEN - hostnames: - description: List of preference about what to use as an hostname. - type: list - default: - - public_ipv4 - choices: - - public_ipv4 - - private_ipv4 - - public_ipv6 - - hostname - - id - variables: - description: 'Set individual variables: keys are variable names and - values are templates. Any value returned by the - L(Scaleway API, https://developer.scaleway.com/#servers-server-get) - can be used.' - type: dict -''' + - The config profile to use in config file. + - By default uses the one specified as C(active_profile) in the config file, or falls back to V(default) if that is + not defined. + type: string + version_added: 4.4.0 + oauth_token: + description: + - Scaleway OAuth token. + - If not explicitly defined or in environment variables, it tries to lookup in the C(scaleway-cli) configuration file + (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)). + - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). + type: string + env: + # in order of precedence + - name: SCW_TOKEN + - name: SCW_API_KEY + - name: SCW_OAUTH_TOKEN + hostnames: + description: List of preference about what to use as an hostname. + type: list + elements: string + default: + - public_ipv4 + choices: + - public_ipv4 + - private_ipv4 + - public_ipv6 + - hostname + - id + variables: + description: 'Set individual variables: keys are variable names and values are templates. Any value returned by the L(Scaleway + API, https://developer.scaleway.com/#servers-server-get) can be used.' + type: dict +""" -EXAMPLES = r''' +EXAMPLES = r""" # scaleway_inventory.yml file in YAML format # Example command line: ansible-inventory --list -i scaleway_inventory.yml +--- # use hostname as inventory_hostname # use the private IP address to connect to the host plugin: community.general.scaleway @@ -79,6 +94,7 @@ variables: ansible_host: private_ip state: state +--- # use hostname as inventory_hostname and public IP address to connect to the host plugin: community.general.scaleway hostnames: @@ -88,6 +104,7 @@ regions: variables: ansible_host: public_ip.address +--- # Using static strings as variables plugin: community.general.scaleway hostnames: @@ -96,7 +113,7 @@ variables: ansible_host: public_ip.address ansible_connection: "'ssh'" ansible_user: "'admin'" -''' +""" import os import json @@ -111,11 +128,11 @@ else: from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe from ansible.module_utils.urls import open_url -from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.module_utils.six import raise_from +from ansible.module_utils.common.text.converters import to_text -import ansible.module_utils.six.moves.urllib.parse as urllib_parse +import urllib.parse as urllib_parse def _fetch_information(token, url): @@ -127,7 +144,7 @@ def _fetch_information(token, url): headers={'X-Auth-Token': token, 'Content-type': 'application/json'}) except Exception as e: - raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e))) + raise AnsibleError(f"Error while fetching {url}: {e}") try: raw_json = json.loads(to_text(response.read())) except ValueError: @@ -148,7 +165,7 @@ def _fetch_information(token, url): def _build_server_url(api_endpoint): - return "/".join([api_endpoint, "servers"]) + return f"{api_endpoint}/servers" def extract_public_ipv4(server_info): @@ -269,7 +286,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): zone_info = SCALEWAY_LOCATION[zone] url = _build_server_url(zone_info["api_endpoint"]) - raw_zone_hosts_infos = _fetch_information(url=url, token=token) + raw_zone_hosts_infos = make_unsafe(_fetch_information(url=url, token=token)) for host_infos in raw_zone_hosts_infos: @@ -303,7 +320,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable): if not oauth_token and os.path.exists(scw_config_path): with open(scw_config_path) as fh: scw_config = yaml.safe_load(fh) - active_profile = scw_config.get('active_profile', 'default') + ansible_profile = self.get_option('scw_profile') + + if ansible_profile: + active_profile = ansible_profile + else: + active_profile = scw_config.get('active_profile', 'default') + if active_profile == 'default': oauth_token = scw_config.get('secret_key') else: @@ -313,7 +336,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): def parse(self, inventory, loader, path, cache=True): if YAML_IMPORT_ERROR: - raise_from(AnsibleError('PyYAML is probably missing'), YAML_IMPORT_ERROR) + raise AnsibleError('PyYAML is probably missing') from YAML_IMPORT_ERROR super(InventoryModule, self).parse(inventory, loader, path) self._read_config_data(path=path) @@ -325,4 +348,4 @@ class InventoryModule(BaseInventoryPlugin, Constructable): hostname_preference = self.get_option("hostnames") for zone in self._get_zones(config_zones): - self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference) + self.do_zone_inventory(zone=make_unsafe(zone), token=token, tags=tags, hostname_preferences=hostname_preference) diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py deleted file mode 100644 index d777875578..0000000000 --- a/plugins/inventory/stackpath_compute.py +++ /dev/null @@ -1,283 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2020 Shay Rybak -# Copyright (c) 2020 Ansible Project -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: stackpath_compute - short_description: StackPath Edge Computing inventory source - version_added: 1.2.0 - author: - - UNKNOWN (@shayrybak) - extends_documentation_fragment: - - inventory_cache - - constructed - description: - - Get inventory hosts from StackPath Edge Computing. - - Uses a YAML configuration file that ends with stackpath_compute.(yml|yaml). - options: - plugin: - description: - - A token that ensures this is a source file for the plugin. - required: true - choices: ['community.general.stackpath_compute'] - client_id: - description: - - An OAuth client ID generated from the API Management section of the StackPath customer portal - U(https://control.stackpath.net/api-management). - required: true - type: str - client_secret: - description: - - An OAuth client secret generated from the API Management section of the StackPath customer portal - U(https://control.stackpath.net/api-management). - required: true - type: str - stack_slugs: - description: - - A list of Stack slugs to query instances in. If no entry then get instances in all stacks on the account. - type: list - elements: str - use_internal_ip: - description: - - Whether or not to use internal IP addresses, If false, uses external IP addresses, internal otherwise. - - If an instance doesn't have an external IP it will not be returned when this option is set to false. - type: bool -''' - -EXAMPLES = ''' -# Example using credentials to fetch all workload instances in a stack. ---- -plugin: community.general.stackpath_compute -client_id: my_client_id -client_secret: my_client_secret -stack_slugs: -- my_first_stack_slug -- my_other_stack_slug -use_internal_ip: false -''' - -import traceback -import json - -from ansible.errors import AnsibleError -from ansible.module_utils.urls import open_url -from ansible.plugins.inventory import ( - BaseInventoryPlugin, - Constructable, - Cacheable -) -from ansible.utils.display import Display - - -display = Display() - - -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - - NAME = 'community.general.stackpath_compute' - - def __init__(self): - super(InventoryModule, self).__init__() - - # credentials - self.client_id = None - self.client_secret = None - self.stack_slug = None - self.api_host = "https://gateway.stackpath.com" - self.group_keys = [ - "stackSlug", - "workloadId", - "cityCode", - "countryCode", - "continent", - "target", - "name", - "workloadSlug" - ] - - def _validate_config(self, config): - if config['plugin'] != 'community.general.stackpath_compute': - raise AnsibleError("plugin doesn't match this plugin") - try: - client_id = config['client_id'] - if len(client_id) != 32: - raise AnsibleError("client_id must be 32 characters long") - except KeyError: - raise AnsibleError("config missing client_id, a required option") - try: - client_secret = config['client_secret'] - if len(client_secret) != 64: - raise AnsibleError("client_secret must be 64 characters long") - except KeyError: - raise AnsibleError("config missing client_id, a required option") - return True - - def _set_credentials(self): - ''' - :param config_data: contents of the inventory config file - ''' - self.client_id = self.get_option('client_id') - self.client_secret = self.get_option('client_secret') - - def _authenticate(self): - payload = json.dumps( - { - "client_id": self.client_id, - "client_secret": self.client_secret, - "grant_type": "client_credentials", - } - ) - headers = { - "Content-Type": "application/json", - } - resp = open_url( - self.api_host + '/identity/v1/oauth2/token', - headers=headers, - data=payload, - method="POST" - ) - status_code = resp.code - if status_code == 200: - body = resp.read() - self.auth_token = json.loads(body)["access_token"] - - def _query(self): - results = [] - workloads = [] - self._authenticate() - for stack_slug in self.stack_slugs: - try: - workloads = self._stackpath_query_get_list(self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads') - except Exception: - raise AnsibleError("Failed to get workloads from the StackPath API: %s" % traceback.format_exc()) - for workload in workloads: - try: - workload_instances = self._stackpath_query_get_list( - self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads/' + workload["id"] + '/instances' - ) - except Exception: - raise AnsibleError("Failed to get workload instances from the StackPath API: %s" % traceback.format_exc()) - for instance in workload_instances: - if instance["phase"] == "RUNNING": - instance["stackSlug"] = stack_slug - instance["workloadId"] = workload["id"] - instance["workloadSlug"] = workload["slug"] - instance["cityCode"] = instance["location"]["cityCode"] - instance["countryCode"] = instance["location"]["countryCode"] - instance["continent"] = instance["location"]["continent"] - instance["target"] = instance["metadata"]["labels"]["workload.platform.stackpath.net/target-name"] - try: - if instance[self.hostname_key]: - results.append(instance) - except KeyError: - pass - return results - - def _populate(self, instances): - for instance in instances: - for group_key in self.group_keys: - group = group_key + "_" + instance[group_key] - group = group.lower().replace(" ", "_").replace("-", "_") - self.inventory.add_group(group) - self.inventory.add_host(instance[self.hostname_key], - group=group) - - def _stackpath_query_get_list(self, url): - self._authenticate() - headers = { - "Content-Type": "application/json", - "Authorization": "Bearer " + self.auth_token, - } - next_page = True - result = [] - cursor = '-1' - while next_page: - resp = open_url( - url + '?page_request.first=10&page_request.after=%s' % cursor, - headers=headers, - method="GET" - ) - status_code = resp.code - if status_code == 200: - body = resp.read() - body_json = json.loads(body) - result.extend(body_json["results"]) - next_page = body_json["pageInfo"]["hasNextPage"] - if next_page: - cursor = body_json["pageInfo"]["endCursor"] - return result - - def _get_stack_slugs(self, stacks): - self.stack_slugs = [stack["slug"] for stack in stacks] - - def verify_file(self, path): - ''' - :param loader: an ansible.parsing.dataloader.DataLoader object - :param path: the path to the inventory config file - :return the contents of the config file - ''' - if super(InventoryModule, self).verify_file(path): - if path.endswith(('stackpath_compute.yml', 'stackpath_compute.yaml')): - return True - display.debug( - "stackpath_compute inventory filename must end with \ - 'stackpath_compute.yml' or 'stackpath_compute.yaml'" - ) - return False - - def parse(self, inventory, loader, path, cache=True): - - super(InventoryModule, self).parse(inventory, loader, path) - - config = self._read_config_data(path) - self._validate_config(config) - self._set_credentials() - - # get user specifications - self.use_internal_ip = self.get_option('use_internal_ip') - if self.use_internal_ip: - self.hostname_key = "ipAddress" - else: - self.hostname_key = "externalIpAddress" - - self.stack_slugs = self.get_option('stack_slugs') - if not self.stack_slugs: - try: - stacks = self._stackpath_query_get_list(self.api_host + '/stack/v1/stacks') - self._get_stack_slugs(stacks) - except Exception: - raise AnsibleError("Failed to get stack IDs from the Stackpath API: %s" % traceback.format_exc()) - - cache_key = self.get_cache_key(path) - # false when refresh_cache or --flush-cache is used - if cache: - # get the user-specified directive - cache = self.get_option('cache') - - # Generate inventory - cache_needs_update = False - if cache: - try: - results = self._cache[cache_key] - except KeyError: - # if cache expires or cache file doesn't exist - cache_needs_update = True - - if not cache or cache_needs_update: - results = self._query() - - self._populate(results) - - # If the cache has expired/doesn't exist or - # if refresh_inventory/flush cache is used - # when the user is using caching, update the cached inventory - try: - if cache_needs_update or (not cache and self.get_option('cache')): - self._cache[cache_key] = results - except Exception: - raise AnsibleError("Failed to populate data: %s" % traceback.format_exc()) diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index 89a77c88bb..564db57dac 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -1,67 +1,88 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: virtualbox - short_description: virtualbox inventory source +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: virtualbox +short_description: Virtualbox inventory source +description: + - Get inventory hosts from the local virtualbox installation. + - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml). + - The inventory_hostname is always the 'Name' of the virtualbox instance. + - Groups can be assigned to the VMs using C(VBoxManage). Multiple groups can be assigned by using V(/) as a delimeter. + - A separate parameter, O(enable_advanced_group_parsing) is exposed to change grouping behaviour. See the parameter documentation + for details. +extends_documentation_fragment: + - constructed + - inventory_cache +options: + plugin: + description: Token that ensures this is a source file for the P(community.general.virtualbox#inventory) plugin. + type: string + required: true + choices: ['virtualbox', 'community.general.virtualbox'] + running_only: + description: Toggles showing all VMs instead of only those currently running. + type: boolean + default: false + settings_password_file: + description: Provide a file containing the settings password (equivalent to C(--settingspwfile)). + type: string + network_info_path: + description: Property path to query for network information (C(ansible_host)). + type: string + default: "/VirtualBox/GuestInfo/Net/0/V4/IP" + query: + description: Create vars from virtualbox properties. + type: dictionary + default: {} + enable_advanced_group_parsing: description: - - Get inventory hosts from the local virtualbox installation. - - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml). - - The inventory_hostname is always the 'Name' of the virtualbox instance. - extends_documentation_fragment: - - constructed - - inventory_cache - options: - plugin: - description: token that ensures this is a source file for the 'virtualbox' plugin - required: True - choices: ['virtualbox', 'community.general.virtualbox'] - running_only: - description: toggles showing all vms vs only those currently running - type: boolean - default: False - settings_password_file: - description: provide a file containing the settings password (equivalent to --settingspwfile) - network_info_path: - description: property path to query for network information (ansible_host) - default: "/VirtualBox/GuestInfo/Net/0/V4/IP" - query: - description: create vars from virtualbox properties - type: dictionary - default: {} -''' + - The default group parsing rule (when this setting is set to V(false)) is to split the VirtualBox VM's group based + on the V(/) character and assign the resulting list elements as an Ansible Group. + - Setting O(enable_advanced_group_parsing=true) changes this behaviour to match VirtualBox's interpretation of groups + according to U(https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups). Groups are now split using the V(,) + character, and the V(/) character indicates nested groups. + - When enabled, a VM that's been configured using V(VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2,/TestGroup3") + results in the group C(TestGroup2) being a child group of C(TestGroup); and the VM being a part of C(TestGroup2) + and C(TestGroup3). + default: false + type: bool + version_added: 9.2.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # file must be named vbox.yaml or vbox.yml -simple_config_file: - plugin: community.general.virtualbox - settings_password_file: /etc/virtulbox/secrets - query: - logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList - compose: - ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh') +plugin: community.general.virtualbox +settings_password_file: /etc/virtualbox/secrets +query: + logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList +compose: + ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh') +--- # add hosts (all match with minishift vm) to the group container if any of the vms are in ansible_inventory' plugin: community.general.virtualbox groups: container: "'minis' in (inventory_hostname)" -''' +""" import os from subprocess import Popen, PIPE from ansible.errors import AnsibleParserError -from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text -from ansible.module_utils.common._collections_compat import MutableMapping +from ansible.module_utils.common.text.converters import to_bytes, to_text +from collections.abc import MutableMapping from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.common.process import get_bin_path +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): ''' Host inventory parser for ansible using local virtualbox. ''' @@ -115,6 +136,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars[host], host, strict=strict) def _populate_from_cache(self, source_data): + source_data = make_unsafe(source_data) hostvars = source_data.pop('_meta', {}).get('hostvars', {}) for group in source_data: if group == 'all': @@ -161,7 +183,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): v = v.strip() # found host if k.startswith('Name') and ',' not in v: # some setting strings appear in Name - current_host = v + current_host = make_unsafe(v) if current_host not in hostvars: hostvars[current_host] = {} self.inventory.add_host(current_host) @@ -169,29 +191,29 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # try to get network info netdata = self._query_vbox_data(current_host, netinfo) if netdata: - self.inventory.set_variable(current_host, 'ansible_host', netdata) + self.inventory.set_variable(current_host, 'ansible_host', make_unsafe(netdata)) # found groups elif k == 'Groups': - for group in v.split('/'): - if group: - group = self.inventory.add_group(group) - self.inventory.add_child(group, current_host) - if group not in cacheable_results: - cacheable_results[group] = {'hosts': []} - cacheable_results[group]['hosts'].append(current_host) + if self.get_option('enable_advanced_group_parsing'): + self._handle_vboxmanage_group_string(v, current_host, cacheable_results) + else: + self._handle_group_string(v, current_host, cacheable_results) continue else: # found vars, accumulate in hostvars for clean inventory set - pref_k = 'vbox_' + k.strip().replace(' ', '_') - if k.startswith(' '): - if prevkey not in hostvars[current_host]: + pref_k = make_unsafe(f"vbox_{k.strip().replace(' ', '_')}") + leading_spaces = len(k) - len(k.lstrip(' ')) + if 0 < leading_spaces <= 2: + if prevkey not in hostvars[current_host] or not isinstance(hostvars[current_host][prevkey], dict): hostvars[current_host][prevkey] = {} - hostvars[current_host][prevkey][pref_k] = v + hostvars[current_host][prevkey][pref_k] = make_unsafe(v) + elif leading_spaces > 2: + continue else: if v != '': - hostvars[current_host][pref_k] = v + hostvars[current_host][pref_k] = make_unsafe(v) if self._ungrouped_host(current_host, cacheable_results): if 'ungrouped' not in cacheable_results: cacheable_results['ungrouped'] = {'hosts': []} @@ -219,6 +241,64 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return all(find_host(host, inventory)) + def _handle_group_string(self, vboxmanage_group, current_host, cacheable_results): + '''Handles parsing the VM's Group assignment from VBoxManage according to this inventory's initial implementation.''' + # The original implementation of this inventory plugin treated `/` as + # a delimeter to split and use as Ansible Groups. + for group in vboxmanage_group.split('/'): + if group: + group = make_unsafe(group) + group = self.inventory.add_group(group) + self.inventory.add_child(group, current_host) + if group not in cacheable_results: + cacheable_results[group] = {'hosts': []} + cacheable_results[group]['hosts'].append(current_host) + + def _handle_vboxmanage_group_string(self, vboxmanage_group, current_host, cacheable_results): + '''Handles parsing the VM's Group assignment from VBoxManage according to VirtualBox documentation.''' + # Per the VirtualBox documentation, a VM can be part of many groups, + # and it is possible to have nested groups. + # Many groups are separated by commas ",", and nested groups use + # slash "/". + # https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups + # Multi groups: VBoxManage modifyvm "vm01" --groups "/TestGroup,/TestGroup2" + # Nested groups: VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2" + + for group in vboxmanage_group.split(','): + if not group: + # We could get an empty element due how to split works, and + # possible assignments from VirtualBox. e.g. ,/Group1 + continue + + if group == "/": + # This is the "root" group. We get here if the VM was not + # assigned to a particular group. Consider the host to be + # unassigned to a group. + continue + + parent_group = None + for subgroup in group.split('/'): + if not subgroup: + # Similarly to above, we could get an empty element. + # e.g //Group1 + continue + + if subgroup == '/': + # "root" group. + # Consider the host to be unassigned + continue + + subgroup = make_unsafe(subgroup) + subgroup = self.inventory.add_group(subgroup) + if parent_group is not None: + self.inventory.add_child(parent_group, subgroup) + self.inventory.add_child(subgroup, current_host) + if subgroup not in cacheable_results: + cacheable_results[subgroup] = {'hosts': []} + cacheable_results[subgroup]['hosts'].append(current_host) + + parent_group = subgroup + def verify_file(self, path): valid = False @@ -272,7 +352,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: p = Popen(cmd, stdout=PIPE) except Exception as e: - raise AnsibleParserError(to_native(e)) + raise AnsibleParserError(str(e)) source_data = p.stdout.read().splitlines() diff --git a/plugins/inventory/xen_orchestra.py b/plugins/inventory/xen_orchestra.py new file mode 100644 index 0000000000..fc0f0db757 --- /dev/null +++ b/plugins/inventory/xen_orchestra.py @@ -0,0 +1,389 @@ +# Copyright (c) 2021 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: xen_orchestra +short_description: Xen Orchestra inventory source +version_added: 4.1.0 +author: + - Dom Del Nano (@ddelnano) + - Samori Gorse (@shinuza) +requirements: + - websocket-client >= 1.0.0 +description: + - Get inventory hosts from a Xen Orchestra deployment. + - Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml). +extends_documentation_fragment: + - constructed + - inventory_cache +options: + plugin: + description: The name of this plugin, it should always be set to V(community.general.xen_orchestra) for this plugin to + recognize it as its own. + required: true + choices: ['community.general.xen_orchestra'] + type: str + api_host: + description: + - API host to XOA API. + - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_HOST) + is used instead. + type: str + env: + - name: ANSIBLE_XO_HOST + user: + description: + - Xen Orchestra user. + - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_USER) + is used instead. + required: true + type: str + env: + - name: ANSIBLE_XO_USER + password: + description: + - Xen Orchestra password. + - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_PASSWORD) + is used instead. + required: true + type: str + env: + - name: ANSIBLE_XO_PASSWORD + validate_certs: + description: Verify TLS certificate if using HTTPS. + type: boolean + default: true + use_ssl: + description: Use wss when connecting to the Xen Orchestra API. + type: boolean + default: true + use_vm_uuid: + description: + - Import Xen VMs to inventory using their UUID as the VM entry name. + - If set to V(false) use VM name labels instead of UUIDs. + type: boolean + default: true + version_added: 10.4.0 + use_host_uuid: + description: + - Import Xen Hosts to inventory using their UUID as the Host entry name. + - If set to V(false) use Host name labels instead of UUIDs. + type: boolean + default: true + version_added: 10.4.0 +""" + + +EXAMPLES = r""" +--- +# file must be named xen_orchestra.yaml or xen_orchestra.yml +plugin: community.general.xen_orchestra +api_host: 192.168.1.255 +user: xo +password: xo_pwd +validate_certs: true +use_ssl: true +groups: + kube_nodes: "'kube_node' in tags" +compose: + ansible_port: 2222 +use_vm_uuid: false +use_host_uuid: true +""" + +import json +import ssl +from time import sleep + +from ansible.errors import AnsibleError +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + +# 3rd party imports +try: + HAS_WEBSOCKET = True + import websocket + from websocket import create_connection + + if LooseVersion(websocket.__version__) <= LooseVersion('1.0.0'): + raise ImportError +except ImportError as e: + HAS_WEBSOCKET = False + + +HALTED = 'Halted' +PAUSED = 'Paused' +RUNNING = 'Running' +SUSPENDED = 'Suspended' +POWER_STATES = [RUNNING, HALTED, SUSPENDED, PAUSED] +HOST_GROUP = 'xo_hosts' +POOL_GROUP = 'xo_pools' + + +def clean_group_name(label): + return label.lower().replace(' ', '-').replace('-', '_') + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + ''' Host inventory parser for ansible using XenOrchestra as source. ''' + + NAME = 'community.general.xen_orchestra' + + def __init__(self): + + super(InventoryModule, self).__init__() + + # from config + self.counter = -1 + self.session = None + self.cache_key = None + self.use_cache = None + + @property + def pointer(self): + self.counter += 1 + return self.counter + + def create_connection(self, xoa_api_host): + validate_certs = self.get_option('validate_certs') + use_ssl = self.get_option('use_ssl') + proto = 'wss' if use_ssl else 'ws' + + sslopt = None if validate_certs else {'cert_reqs': ssl.CERT_NONE} + self.conn = create_connection( + f'{proto}://{xoa_api_host}/api/', sslopt=sslopt) + + CALL_TIMEOUT = 100 + """Number of 1/10ths of a second to wait before method call times out.""" + + def call(self, method, params): + """Calls a method on the XO server with the provided parameters.""" + id = self.pointer + self.conn.send(json.dumps({ + 'id': id, + 'jsonrpc': '2.0', + 'method': method, + 'params': params + })) + + waited = 0 + while waited < self.CALL_TIMEOUT: + response = json.loads(self.conn.recv()) + if 'id' in response and response['id'] == id: + return response + else: + sleep(0.1) + waited += 1 + + raise AnsibleError(f'Method call {method} timed out after {self.CALL_TIMEOUT / 10} seconds.') + + def login(self, user, password): + result = self.call('session.signIn', { + 'username': user, 'password': password + }) + + if 'error' in result: + raise AnsibleError(f"Could not connect: {result['error']}") + + def get_object(self, name): + answer = self.call('xo.getAllObjects', {'filter': {'type': name}}) + + if 'error' in answer: + raise AnsibleError(f"Could not request: {answer['error']}") + + return answer['result'] + + def _get_objects(self): + self.create_connection(self.xoa_api_host) + self.login(self.xoa_user, self.xoa_password) + + return { + 'vms': self.get_object('VM'), + 'pools': self.get_object('pool'), + 'hosts': self.get_object('host'), + } + + def _apply_constructable(self, name, variables): + strict = self.get_option('strict') + self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict) + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict) + self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict) + + def _add_vms(self, vms, hosts, pools): + vm_name_list = [] + for uuid, vm in vms.items(): + if self.vm_entry_name_type == 'name_label': + if vm['name_label'] not in vm_name_list: + entry_name = vm['name_label'] + vm_name_list.append(vm['name_label']) + else: + vm_duplicate_count = vm_name_list.count(vm['name_label']) + entry_name = f"{vm['name_label']}_{vm_duplicate_count}" + vm_name_list.append(vm['name_label']) + else: + entry_name = uuid + group = 'with_ip' + ip = vm.get('mainIpAddress') + power_state = vm['power_state'].lower() + pool_name = self._pool_group_name_for_uuid(pools, vm['$poolId']) + host_name = self._host_group_name_for_uuid(hosts, vm['$container']) + + self.inventory.add_host(entry_name) + + # Grouping by power state + self.inventory.add_child(power_state, entry_name) + + # Grouping by host + if host_name: + self.inventory.add_child(host_name, entry_name) + + # Grouping by pool + if pool_name: + self.inventory.add_child(pool_name, entry_name) + + # Grouping VMs with an IP together + if ip is None: + group = 'without_ip' + self.inventory.add_group(group) + self.inventory.add_child(group, entry_name) + + # Adding meta + self.inventory.set_variable(entry_name, 'uuid', uuid) + self.inventory.set_variable(entry_name, 'ip', ip) + self.inventory.set_variable(entry_name, 'ansible_host', ip) + self.inventory.set_variable(entry_name, 'power_state', power_state) + self.inventory.set_variable( + entry_name, 'name_label', vm['name_label']) + self.inventory.set_variable(entry_name, 'type', vm['type']) + self.inventory.set_variable( + entry_name, 'cpus', vm['CPUs']['number']) + self.inventory.set_variable(entry_name, 'tags', vm['tags']) + self.inventory.set_variable( + entry_name, 'memory', vm['memory']['size']) + self.inventory.set_variable( + entry_name, 'has_ip', group == 'with_ip') + self.inventory.set_variable( + entry_name, 'is_managed', vm.get('managementAgentDetected', False)) + self.inventory.set_variable( + entry_name, 'os_version', vm['os_version']) + + self._apply_constructable(entry_name, self.inventory.get_host(entry_name).get_vars()) + + def _add_hosts(self, hosts, pools): + host_name_list = [] + for host in hosts.values(): + if self.host_entry_name_type == 'name_label': + if host['name_label'] not in host_name_list: + entry_name = host['name_label'] + host_name_list.append(host['name_label']) + else: + host_duplicate_count = host_name_list.count(host['name_label']) + entry_name = f"{host['name_label']}_{host_duplicate_count}" + host_name_list.append(host['name_label']) + else: + entry_name = host['uuid'] + + group_name = f"xo_host_{clean_group_name(host['name_label'])}" + pool_name = self._pool_group_name_for_uuid(pools, host['$poolId']) + + self.inventory.add_group(group_name) + self.inventory.add_host(entry_name) + self.inventory.add_child(HOST_GROUP, entry_name) + self.inventory.add_child(pool_name, entry_name) + + self.inventory.set_variable(entry_name, 'enabled', host['enabled']) + self.inventory.set_variable( + entry_name, 'hostname', host['hostname']) + self.inventory.set_variable(entry_name, 'memory', host['memory']) + self.inventory.set_variable(entry_name, 'address', host['address']) + self.inventory.set_variable(entry_name, 'cpus', host['cpus']) + self.inventory.set_variable(entry_name, 'type', 'host') + self.inventory.set_variable(entry_name, 'tags', host['tags']) + self.inventory.set_variable(entry_name, 'version', host['version']) + self.inventory.set_variable( + entry_name, 'power_state', host['power_state'].lower()) + self.inventory.set_variable( + entry_name, 'product_brand', host['productBrand']) + + for pool in pools.values(): + group_name = f"xo_pool_{clean_group_name(pool['name_label'])}" + + self.inventory.add_group(group_name) + + def _add_pools(self, pools): + for pool in pools.values(): + group_name = f"xo_pool_{clean_group_name(pool['name_label'])}" + + self.inventory.add_group(group_name) + + # TODO: Refactor + def _pool_group_name_for_uuid(self, pools, pool_uuid): + for pool in pools: + if pool == pool_uuid: + return f"xo_pool_{clean_group_name(pools[pool_uuid]['name_label'])}" + + # TODO: Refactor + def _host_group_name_for_uuid(self, hosts, host_uuid): + for host in hosts: + if host == host_uuid: + return f"xo_host_{clean_group_name(hosts[host_uuid]['name_label'])}" + + def _populate(self, objects): + # Prepare general groups + self.inventory.add_group(HOST_GROUP) + self.inventory.add_group(POOL_GROUP) + for group in POWER_STATES: + self.inventory.add_group(group.lower()) + + self._add_pools(objects['pools']) + self._add_hosts(objects['hosts'], objects['pools']) + self._add_vms(objects['vms'], objects['hosts'], objects['pools']) + + def verify_file(self, path): + + valid = False + if super(InventoryModule, self).verify_file(path): + if path.endswith(('xen_orchestra.yaml', 'xen_orchestra.yml')): + valid = True + else: + self.display.vvv( + 'Skipping due to inventory source not ending in "xen_orchestra.yaml" nor "xen_orchestra.yml"') + return valid + + def parse(self, inventory, loader, path, cache=True): + if not HAS_WEBSOCKET: + raise AnsibleError('This plugin requires websocket-client 1.0.0 or higher: ' + 'https://github.com/websocket-client/websocket-client.') + + super(InventoryModule, self).parse(inventory, loader, path) + + # read config from file, this sets 'options' + self._read_config_data(path) + self.inventory = inventory + + self.protocol = 'wss' + self.xoa_api_host = self.get_option('api_host') + self.xoa_user = self.get_option('user') + self.xoa_password = self.get_option('password') + self.cache_key = self.get_cache_key(path) + self.use_cache = cache and self.get_option('cache') + + self.validate_certs = self.get_option('validate_certs') + if not self.get_option('use_ssl'): + self.protocol = 'ws' + + self.vm_entry_name_type = 'uuid' + if not self.get_option('use_vm_uuid'): + self.vm_entry_name_type = 'name_label' + + self.host_entry_name_type = 'uuid' + if not self.get_option('use_host_uuid'): + self.host_entry_name_type = 'name_label' + + objects = self._get_objects() + self._populate(make_unsafe(objects)) diff --git a/plugins/lookup/binary_file.py b/plugins/lookup/binary_file.py new file mode 100644 index 0000000000..3236ade3e4 --- /dev/null +++ b/plugins/lookup/binary_file.py @@ -0,0 +1,113 @@ +# +# Copyright (c) 2025, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION = r""" +name: binary_file +author: Felix Fontein (@felixfontein) +short_description: Read binary file and return it Base64 encoded +version_added: 11.2.0 +description: + - This lookup returns the contents from a file on the Ansible controller's file system. + - The file is read as a binary file and its contents are returned Base64 encoded. + This is similar to using P(ansible.builtin.file#lookup) combined with P(ansible.builtin.b64encode#filter), + except that P(ansible.builtin.file#lookup) does not support binary files as it interprets the contents as UTF-8, + which can cause the wrong content being Base64 encoded. +options: + _terms: + description: + - Paths of the files to read. + - Relative paths will be searched for in different places. See R(Ansible task paths, playbook_task_paths) for more details. + required: true + type: list + elements: str + not_exist: + description: + - Determine how to react if the specified file cannot be found. + type: str + choices: + error: Raise an error. + empty: Return an empty string for the file. + empty_str: + - Return the string C(empty) for the file. + - This cannot be confused with Base64 encoding due to the missing padding. + default: error +notes: + - This lookup does not understand 'globbing' - use the P(ansible.builtin.fileglob#lookup) lookup instead. +seealso: + - plugin: ansible.builtin.b64decode + plugin_type: filter + description: >- + The b64decode filter can be used to decode Base64 encoded data. + Note that Ansible cannot handle binary data, the data will be interpreted as UTF-8 text! + - plugin: ansible.builtin.file + plugin_type: lookup + description: You can use this lookup plugin to read text files from the Ansible controller. + - module: ansible.builtin.slurp + description: >- + Also allows to read binary files Base64 encoded, but from remote targets. + With C(delegate_to: localhost) can be redirected to run on the controller, but you have to know the path to the file to read. + Both this plugin and P(ansible.builtin.file#lookup) use some search path logic to for example also find files in the C(files) + directory of a role. + - ref: playbook_task_paths + description: Search paths used for relative files. +""" + +EXAMPLES = r""" +--- +- name: Output Base64 contents of binary files on screen + ansible.builtin.debug: + msg: "Content: {{ lookup('community.general.binary_file', item) }}" + loop: + - some-binary-file.bin +""" + +RETURN = r""" +_raw: + description: + - Base64 encoded content of requested files, or an empty string resp. the string C(empty), depending on the O(not_exist) option. + - This list contains one string per element of O(_terms) in the same order as O(_terms). + type: list + elements: str + returned: success +""" + +import base64 + +from ansible.errors import AnsibleLookupError +from ansible.plugins.lookup import LookupBase + +from ansible.utils.display import Display + +display = Display() + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + not_exist = self.get_option("not_exist") + + result = [] + for term in terms: + display.debug(f"Searching for binary file: {term!r}") + path = self.find_file_in_search_path(variables, "files", term, ignore_missing=(not_exist != "error")) + display.vvvv(f"community.general.binary_file lookup using {path} as file") + + if not path: + if not_exist == "empty": + result.append("") + continue + if not_exist == "empty_str": + result.append("empty") + continue + raise AnsibleLookupError(f"Could not locate file in community.general.binary_file lookup: {term}") + + try: + with open(path, "rb") as f: + result.append(base64.b64encode(f.read()).decode("utf-8")) + except Exception as exc: + raise AnsibleLookupError(f"Error while reading {path}: {exc}") + + return result diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py new file mode 100644 index 0000000000..e4d958a96f --- /dev/null +++ b/plugins/lookup/bitwarden.py @@ -0,0 +1,295 @@ +# Copyright (c) 2022, Jonathan Lung +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +name: bitwarden +author: + - Jonathan Lung (@lungj) +requirements: + - bw (command line utility) + - be logged into bitwarden + - bitwarden vault unlocked + - E(BW_SESSION) environment variable set +short_description: Retrieve secrets from Bitwarden +version_added: 5.4.0 +description: + - Retrieve secrets from Bitwarden. +options: + _terms: + description: Key(s) to fetch values for from login info. + required: true + type: list + elements: str + search: + description: + - Field to retrieve, for example V(name) or V(id). + - If set to V(id), only zero or one element can be returned. Use the Jinja C(first) filter to get the only list element. + - If set to V(None) or V(''), or if O(_terms) is empty, records are not filtered by fields. + type: str + default: name + version_added: 5.7.0 + field: + description: Field to fetch. Leave unset to fetch whole response. + type: str + collection_id: + description: + - Collection ID to filter results by collection. Leave unset to skip filtering. + - O(collection_id) and O(collection_name) are mutually exclusive. + type: str + version_added: 6.3.0 + collection_name: + description: + - Collection name to filter results by collection. Leave unset to skip filtering. + - O(collection_id) and O(collection_name) are mutually exclusive. + type: str + version_added: 10.4.0 + organization_id: + description: Organization ID to filter results by organization. Leave unset to skip filtering. + type: str + version_added: 8.5.0 + bw_session: + description: Pass session key instead of reading from env. + type: str + version_added: 8.4.0 + result_count: + description: + - Number of results expected for the lookup query. Task fails if O(result_count) is set but does not match the number + of query results. Leave empty to skip this check. + type: int + version_added: 10.4.0 +""" + +EXAMPLES = r""" +- name: "Get 'password' from all Bitwarden records named 'a_test'" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test', field='password') }} + +- name: "Get 'password' from Bitwarden record with ID 'bafba515-af11-47e6-abe3-af1200cd18b2'" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') | first }} + +- name: "Get 'password' from all Bitwarden records named 'a_test' from collection" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }} + +- name: "Get list of all full Bitwarden records named 'a_test'" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test') }} + +- name: "Get custom field 'api_key' from all Bitwarden records named 'a_test'" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }} + +- name: "Get 'password' from all Bitwarden records named 'a_test', using given session key" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test', field='password', bw_session='bXZ9B5TXi6...') }} + +- name: "Get all Bitwarden records from collection" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', None, collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }} + +- name: "Get all Bitwarden records from collection" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', None, collection_name='my_collections/test_collection') }} + +- name: "Get Bitwarden record named 'a_test', ensure there is exactly one match" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test', result_count=1) }} +""" + +RETURN = r""" +_raw: + description: + - A one-element list that contains a list of requested fields or JSON objects of matches. + - If you use C(query), you get a list of lists. If you use C(lookup) without C(wantlist=true), this always gets reduced + to a list of field values or JSON objects. + type: list + elements: list +""" + +from subprocess import Popen, PIPE + +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.module_utils.common.text.converters import to_bytes, to_text +from ansible.parsing.ajson import AnsibleJSONDecoder +from ansible.plugins.lookup import LookupBase + + +class BitwardenException(AnsibleError): + pass + + +class Bitwarden(object): + + def __init__(self, path='bw'): + self._cli_path = path + self._session = None + + @property + def cli_path(self): + return self._cli_path + + @property + def session(self): + return self._session + + @session.setter + def session(self, value): + self._session = value + + @property + def unlocked(self): + out, err = self._run(['status'], stdin="") + decoded = AnsibleJSONDecoder().raw_decode(out)[0] + return decoded['status'] == 'unlocked' + + def _run(self, args, stdin=None, expected_rc=0): + if self.session: + args += ['--session', self.session] + + p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE) + out, err = p.communicate(to_bytes(stdin)) + rc = p.wait() + if rc != expected_rc: + if len(args) > 2 and args[0] == 'get' and args[1] == 'item' and b'Not found.' in err: + return 'null', '' + raise BitwardenException(err) + return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict') + + def _get_matches(self, search_value, search_field, collection_id=None, organization_id=None): + """Return matching records whose search_field is equal to key. + """ + + # Prepare set of params for Bitwarden CLI + if search_field == 'id': + params = ['get', 'item', search_value] + else: + params = ['list', 'items'] + if search_value: + params.extend(['--search', search_value]) + + if collection_id: + params.extend(['--collectionid', collection_id]) + if organization_id: + params.extend(['--organizationid', organization_id]) + + out, err = self._run(params) + + # This includes things that matched in different fields. + initial_matches = AnsibleJSONDecoder().raw_decode(out)[0] + + if search_field == 'id': + if initial_matches is None: + initial_matches = [] + else: + initial_matches = [initial_matches] + + # Filter to only include results from the right field, if a search is requested by value or field + return [item for item in initial_matches + if not search_value or not search_field or item.get(search_field) == search_value] + + def get_field(self, field, search_value, search_field="name", collection_id=None, organization_id=None): + """Return a list of the specified field for records whose search_field match search_value + and filtered by collection if collection has been provided. + + If field is None, return the whole record for each match. + """ + matches = self._get_matches(search_value, search_field, collection_id, organization_id) + if not field: + return matches + field_matches = [] + for match in matches: + # if there are no custom fields, then `match` has no key 'fields' + if 'fields' in match: + custom_field_found = False + for custom_field in match['fields']: + if field == custom_field['name']: + field_matches.append(custom_field['value']) + custom_field_found = True + break + if custom_field_found: + continue + if 'login' in match and field in match['login']: + field_matches.append(match['login'][field]) + continue + if field in match: + field_matches.append(match[field]) + continue + + if matches and not field_matches: + raise AnsibleError(f"field {field} does not exist in {search_value}") + + return field_matches + + def get_collection_ids(self, collection_name: str, organization_id=None) -> list[str]: + """Return matching IDs of collections whose name is equal to collection_name.""" + + # Prepare set of params for Bitwarden CLI + params = ['list', 'collections', '--search', collection_name] + + if organization_id: + params.extend(['--organizationid', organization_id]) + + out, err = self._run(params) + + # This includes things that matched in different fields. + initial_matches = AnsibleJSONDecoder().raw_decode(out)[0] + + # Filter to only return the ID of a collections with exactly matching name + return [item['id'] for item in initial_matches + if str(item.get('name')).lower() == collection_name.lower()] + + +class LookupModule(LookupBase): + + def run(self, terms=None, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + field = self.get_option('field') + search_field = self.get_option('search') + collection_id = self.get_option('collection_id') + collection_name = self.get_option('collection_name') + organization_id = self.get_option('organization_id') + result_count = self.get_option('result_count') + _bitwarden.session = self.get_option('bw_session') + + if not _bitwarden.unlocked: + raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.") + + if not terms: + terms = [None] + + if collection_name and collection_id: + raise AnsibleOptionsError("'collection_name' and 'collection_id' are mutually exclusive!") + elif collection_name: + collection_ids = _bitwarden.get_collection_ids(collection_name, organization_id) + if not collection_ids: + raise BitwardenException("No matching collections found!") + else: + collection_ids = [collection_id] + + results = [ + _bitwarden.get_field(field, term, search_field, collection_id, organization_id) + for collection_id in collection_ids + for term in terms + ] + + for result in results: + if result_count is not None and len(result) != result_count: + raise BitwardenException( + f"Number of results doesn't match result_count! ({len(result)} != {result_count})") + + return results + + +_bitwarden = Bitwarden() diff --git a/plugins/lookup/bitwarden_secrets_manager.py b/plugins/lookup/bitwarden_secrets_manager.py new file mode 100644 index 0000000000..0227c16bae --- /dev/null +++ b/plugins/lookup/bitwarden_secrets_manager.py @@ -0,0 +1,161 @@ +# Copyright (c) 2023, jantari (https://github.com/jantari) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + + +DOCUMENTATION = r""" +name: bitwarden_secrets_manager +author: + - jantari (@jantari) +requirements: + - bws (command line utility) +short_description: Retrieve secrets from Bitwarden Secrets Manager +version_added: 7.2.0 +description: + - Retrieve secrets from Bitwarden Secrets Manager. +options: + _terms: + description: Secret ID(s) to fetch values for. + required: true + type: list + elements: str + bws_access_token: + description: The BWS access token to use for this lookup. + env: + - name: BWS_ACCESS_TOKEN + required: true + type: str +""" + +EXAMPLES = r""" +- name: Get a secret relying on the BWS_ACCESS_TOKEN environment variable for authentication + ansible.builtin.debug: + msg: >- + {{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972") }} + +- name: Get a secret passing an explicit access token for authentication + ansible.builtin.debug: + msg: >- + {{ + lookup( + "community.general.bitwarden_secrets_manager", + "2bc23e48-4932-40de-a047-5524b7ddc972", + bws_access_token="9.4f570d14-4b54-42f5-bc07-60f4450b1db5.YmluYXJ5LXNvbWV0aGluZy0xMjMK:d2h5IGhlbGxvIHRoZXJlCg==" + ) + }} + +- name: Get two different secrets each using a different access token for authentication + ansible.builtin.debug: + msg: + - '{{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972", bws_access_token=token1) }}' + - '{{ lookup("community.general.bitwarden_secrets_manager", "9d89af4c-eb5d-41f5-bb0f-4ae81215c768", bws_access_token=token2) }}' + vars: + token1: "9.4f570d14-4b54-42f5-bc07-60f4450b1db5.YmluYXJ5LXNvbWV0aGluZy0xMjMK:d2h5IGhlbGxvIHRoZXJlCg==" + token2: "1.69b72797-6ea9-4687-a11e-848e41a30ae6.YW5zaWJsZSBpcyBncmVhdD8K:YW5zaWJsZSBpcyBncmVhdAo=" + +- name: Get just the value of a secret + ansible.builtin.debug: + msg: >- + {{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972").value }} +""" + +RETURN = r""" +_raw: + description: List containing one or more secrets. + type: list + elements: dict +""" + +from subprocess import Popen, PIPE +from time import sleep + +from ansible.errors import AnsibleLookupError +from ansible.module_utils.common.text.converters import to_text +from ansible.parsing.ajson import AnsibleJSONDecoder +from ansible.plugins.lookup import LookupBase + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class BitwardenSecretsManagerException(AnsibleLookupError): + pass + + +class BitwardenSecretsManager(object): + def __init__(self, path='bws'): + self._cli_path = path + self._max_retries = 3 + self._retry_delay = 1 + + @property + def cli_path(self): + return self._cli_path + + def _run_with_retry(self, args, stdin=None, retries=0): + out, err, rc = self._run(args, stdin) + + if rc != 0: + if retries >= self._max_retries: + raise BitwardenSecretsManagerException("Max retries exceeded. Unable to retrieve secret.") + + if "Too many requests" in err: + delay = self._retry_delay * (2 ** retries) + sleep(delay) + return self._run_with_retry(args, stdin, retries + 1) + else: + raise BitwardenSecretsManagerException(f"Command failed with return code {rc}: {err}") + + return out, err, rc + + def _run(self, args, stdin=None): + p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE) + out, err = p.communicate(stdin) + rc = p.wait() + return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict'), rc + + def get_bws_version(self): + """Get the version of the Bitwarden Secrets Manager CLI. + """ + out, err, rc = self._run(['--version']) + if rc != 0: + raise BitwardenSecretsManagerException(to_text(err)) + # strip the prefix and grab the last segment, the version number + return out.split()[-1] + + def get_secret(self, secret_id, bws_access_token): + """Get and return the secret with the given secret_id. + """ + + # Prepare set of params for Bitwarden Secrets Manager CLI + # Color output was not always disabled correctly with the default 'auto' setting so explicitly disable it. + params = [ + '--color', 'no', + '--access-token', bws_access_token + ] + + # bws version 0.3.0 introduced a breaking change in the command line syntax: + # pre-0.3.0: verb noun + # 0.3.0 and later: noun verb + bws_version = self.get_bws_version() + if LooseVersion(bws_version) < LooseVersion('0.3.0'): + params.extend(['get', 'secret', secret_id]) + else: + params.extend(['secret', 'get', secret_id]) + + out, err, rc = self._run_with_retry(params) + if rc != 0: + raise BitwardenSecretsManagerException(to_text(err)) + + return AnsibleJSONDecoder().raw_decode(out)[0] + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + bws_access_token = self.get_option('bws_access_token') + + return [_bitwarden_secrets_manager.get_secret(term, bws_access_token) for term in terms] + + +_bitwarden_secrets_manager = BitwardenSecretsManager() diff --git a/plugins/lookup/cartesian.py b/plugins/lookup/cartesian.py index 98043eba34..1e07326a17 100644 --- a/plugins/lookup/cartesian.py +++ b/plugins/lookup/cartesian.py @@ -1,26 +1,27 @@ -# -*- coding: utf-8 -*- -# (c) 2013, Bradley Young -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2013, Bradley Young +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: cartesian - short_description: returns the cartesian product of lists +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: cartesian +short_description: Returns the cartesian product of lists +description: + - Takes the input lists and returns a list that represents the product of the input lists. + - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]. + - You can see the exact syntax in the examples section. +options: + _terms: description: - - Takes the input lists and returns a list that represents the product of the input lists. - - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]. - You can see the exact syntax in the examples section. - options: - _raw: - description: - - a set of lists - required: True -''' + - A set of lists. + type: list + elements: list + required: true +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Example of the change in the description ansible.builtin.debug: msg: "{{ lookup('community.general.cartesian', [1,2,3], [a, b])}}" @@ -31,15 +32,15 @@ EXAMPLES = """ with_community.general.cartesian: - "{{list1}}" - "{{list2}}" - - [1,2,3,4,5,6] + - [1, 2, 3, 4, 5, 6] """ -RETURN = """ - _list: - description: - - list of lists composed of elements of the input lists - type: list - elements: list +RETURN = r""" +_list: + description: + - List of lists composed of elements of the input lists. + type: list + elements: list """ from itertools import product @@ -63,11 +64,11 @@ class LookupModule(LookupBase): """ results = [] for x in terms: - intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader) - results.append(intermediate) + results.append(listify_lookup_plugin_terms(x, templar=self._templar)) return results def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) terms = self._lookup_variables(terms) diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py index f5ccc766c2..69a53d007e 100644 --- a/plugins/lookup/chef_databag.py +++ b/plugins/lookup/chef_databag.py @@ -1,44 +1,44 @@ -# -*- coding: utf-8 -*- -# (c) 2016, Josh Bradley -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2016, Josh Bradley +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: chef_databag - short_description: fetches data from a Chef Databag +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: chef_databag +short_description: Fetches data from a Chef Databag +description: + - 'This is a lookup plugin to provide access to chef data bags using the pychef package. It interfaces with the chef server + API using the same methods to find a knife or chef-client config file to load parameters from, starting from either the + given base path or the current working directory. The lookup order mirrors the one from Chef, all folders in the base + path are walked back looking for the following configuration file in order: C(.chef/knife.rb), C(~/.chef/knife.rb), C(/etc/chef/client.rb).' +requirements: + - "pychef (L(Python library, https://pychef.readthedocs.io), C(pip install pychef))" +options: + name: description: - - "This is a lookup plugin to provide access to chef data bags using the pychef package. - It interfaces with the chef server api using the same methods to find a knife or chef-client config file to load parameters from, - starting from either the given base path or the current working directory. - The lookup order mirrors the one from Chef, all folders in the base path are walked back looking for the following configuration - file in order : .chef/knife.rb, ~/.chef/knife.rb, /etc/chef/client.rb" - requirements: - - "pychef (python library https://pychef.readthedocs.io `pip install pychef`)" - options: - name: - description: - - Name of the databag - required: True - item: - description: - - Item to fetch - required: True -''' - -EXAMPLES = """ - - ansible.builtin.debug: - msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}" + - Name of the databag. + type: string + required: true + item: + description: + - Item to fetch. + type: string + required: true """ -RETURN = """ - _raw: - description: - - The value from the databag. - type: list - elements: dict +EXAMPLES = r""" +- ansible.builtin.debug: + msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}" +""" + +RETURN = r""" +_raw: + description: + - The value from the databag. + type: list + elements: dict """ from ansible.errors import AnsibleError @@ -78,11 +78,11 @@ class LookupModule(LookupBase): setattr(self, arg, parsed) except ValueError: raise AnsibleError( - "can't parse arg {0}={1} as string".format(arg, arg_raw) + f"can't parse arg {arg}={arg_raw} as string" ) if args: raise AnsibleError( - "unrecognized arguments to with_sequence: %r" % list(args.keys()) + f"unrecognized arguments to with_sequence: {list(args.keys())!r}" ) def run(self, terms, variables=None, **kwargs): diff --git a/plugins/lookup/collection_version.py b/plugins/lookup/collection_version.py index bb67b3b153..7a9eaf10bd 100644 --- a/plugins/lookup/collection_version.py +++ b/plugins/lookup/collection_version.py @@ -1,72 +1,68 @@ -# (c) 2021, Felix Fontein -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = """ +DOCUMENTATION = r""" name: collection_version author: Felix Fontein (@felixfontein) version_added: "4.0.0" short_description: Retrieves the version of an installed collection description: - - This lookup allows to query the version of an installed collection, and to determine whether a - collection is installed at all. - - By default it returns C(none) for non-existing collections and C(*) for collections without a - version number. The latter should only happen in development environments, or when installing - a collection from git which has no version in its C(galaxy.yml). This behavior can be adjusted - by providing other values with I(result_not_found) and I(result_no_version). + - This lookup allows to query the version of an installed collection, and to determine whether a collection is installed + at all. + - By default it returns V(none) for non-existing collections and V(*) for collections without a version number. The latter + should only happen in development environments, or when installing a collection from git which has no version in its C(galaxy.yml). + This behavior can be adjusted by providing other values with O(result_not_found) and O(result_no_version). options: _terms: description: - The collections to look for. - - For example C(community.general). + - For example V(community.general). type: list elements: str required: true result_not_found: description: - The value to return when the collection could not be found. - - By default, C(none) is returned. + - By default, V(none) is returned. type: string default: ~ result_no_version: description: - The value to return when the collection has no version number. - - This can happen for collections installed from git which do not have a version number - in C(galaxy.yml). - - By default, C(*) is returned. + - This can happen for collections installed from git which do not have a version number in C(galaxy.yml). + - By default, V(*) is returned. type: string default: '*' """ -EXAMPLES = """ +EXAMPLES = r""" - name: Check version of community.general ansible.builtin.debug: msg: "community.general version {{ lookup('community.general.collection_version', 'community.general') }}" """ -RETURN = """ - _raw: - description: - - The version number of the collections listed as input. - - If a collection can not be found, it will return the value provided in I(result_not_found). - By default, this is C(none). - - If a collection can be found, but the version not identified, it will return the value provided in - I(result_no_version). By default, this is C(*). This can happen for collections installed - from git which do not have a version number in C(galaxy.yml). - type: list - elements: str +RETURN = r""" +_raw: + description: + - The version number of the collections listed as input. + - If a collection can not be found, it returns the value provided in O(result_not_found). By default, this is V(none). + - If a collection can be found, but the version not identified, it returns the value provided in O(result_no_version). + By default, this is V(*). This can happen for collections installed from git which do not have a version number in V(galaxy.yml). + type: list + elements: str """ import json import os import re +from importlib import import_module import yaml from ansible.errors import AnsibleLookupError -from ansible.module_utils.compat.importlib import import_module from ansible.plugins.lookup import LookupBase @@ -97,15 +93,10 @@ def load_collection_meta(collection_pkg, no_version='*'): if os.path.exists(manifest_path): return load_collection_meta_manifest(manifest_path) - # Try to load galaxy.y(a)ml + # Try to load galaxy.yml galaxy_path = os.path.join(path, 'galaxy.yml') - galaxy_alt_path = os.path.join(path, 'galaxy.yaml') - # galaxy.yaml was only supported in ansible-base 2.10 and ansible-core 2.11. Support was removed - # in https://github.com/ansible/ansible/commit/595413d11346b6f26bb3d9df2d8e05f2747508a3 for - # ansible-core 2.12. - for path in (galaxy_path, galaxy_alt_path): - if os.path.exists(path): - return load_collection_meta_galaxy(path, no_version=no_version) + if os.path.exists(galaxy_path): + return load_collection_meta_galaxy(galaxy_path, no_version=no_version) return {} @@ -119,10 +110,10 @@ class LookupModule(LookupBase): for term in terms: if not FQCN_RE.match(term): - raise AnsibleLookupError('"{term}" is not a FQCN'.format(term=term)) + raise AnsibleLookupError(f'"{term}" is not a FQCN') try: - collection_pkg = import_module('ansible_collections.{fqcn}'.format(fqcn=term)) + collection_pkg = import_module(f'ansible_collections.{term}') except ImportError: # Collection not found result.append(not_found) @@ -131,7 +122,7 @@ class LookupModule(LookupBase): try: data = load_collection_meta(collection_pkg, no_version=no_version) except Exception as exc: - raise AnsibleLookupError('Error while loading metadata for {fqcn}: {error}'.format(fqcn=term, error=exc)) + raise AnsibleLookupError(f'Error while loading metadata for {term}: {exc}') result.append(data.get('version', no_version)) diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index 8b779e6aca..c9cc3c6399 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -1,110 +1,117 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Steve Gargan -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +# Copyright (c) 2015, Steve Gargan +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: consul_kv - short_description: Fetch metadata from a Consul key value store. - description: - - Lookup metadata for a playbook from the key value store in a Consul cluster. - Values can be easily set in the kv store with simple rest commands - - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata) - requirements: - - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)' - options: - _raw: - description: List of key(s) to retrieve. - type: list - recurse: - type: boolean - description: If true, will retrieve all the values that have the given key as prefix. - default: False - index: - description: - - If the key has a value with the specified index then this is returned allowing access to historical values. - datacenter: - description: - - Retrieve the key from a consul datacenter other than the default for the consul host. - token: - description: The acl token to allow access to restricted values. - host: - default: localhost - description: - - The target to connect to, must be a resolvable address. - Will be determined from C(ANSIBLE_CONSUL_URL) if that is set. - - "C(ANSIBLE_CONSUL_URL) should look like this: C(https://my.consul.server:8500)" - env: - - name: ANSIBLE_CONSUL_URL - ini: - - section: lookup_consul - key: host - port: - description: - - The port of the target host to connect to. - - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there. - default: 8500 - scheme: - default: http - description: - - Whether to use http or https. - - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there. - validate_certs: - default: True - description: Whether to verify the ssl connection or not. - env: - - name: ANSIBLE_CONSUL_VALIDATE_CERTS - ini: - - section: lookup_consul - key: validate_certs - client_cert: - description: The client cert to verify the ssl connection. - env: - - name: ANSIBLE_CONSUL_CLIENT_CERT - ini: - - section: lookup_consul - key: client_cert - url: - description: "The target to connect to, should look like this: C(https://my.consul.server:8500)." - type: str - version_added: 1.0.0 - env: - - name: ANSIBLE_CONSUL_URL - ini: - - section: lookup_consul - key: url -''' - -EXAMPLES = """ - - ansible.builtin.debug: - msg: 'key contains {{item}}' - with_community.general.consul_kv: - - 'key/to/retrieve' - - - name: Parameters can be provided after the key be more specific about what to retrieve - ansible.builtin.debug: - msg: 'key contains {{item}}' - with_community.general.consul_kv: - - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98' - - - name: retrieving a KV from a remote cluster on non default port - ansible.builtin.debug: - msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port='2000') }}" -""" - -RETURN = """ +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: consul_kv +short_description: Fetch metadata from a Consul key value store +description: + - Lookup metadata for a playbook from the key value store in a Consul cluster. Values can be easily set in the kv store + with simple rest commands. + - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata). +requirements: + - 'py-consul python library U(https://github.com/criteo/py-consul?tab=readme-ov-file#installation)' +options: _raw: + description: List of key(s) to retrieve. + type: list + elements: string + recurse: + type: boolean + description: If V(true), retrieves all the values that have the given key as prefix. + default: false + index: description: - - Value(s) stored in consul. - type: dict + - If the key has a value with the specified index then this is returned allowing access to historical values. + type: int + datacenter: + description: + - Retrieve the key from a consul datacenter other than the default for the consul host. + type: str + token: + description: The acl token to allow access to restricted values. + type: str + host: + default: localhost + type: str + description: + - The target to connect to, must be a resolvable address. + - It is determined from E(ANSIBLE_CONSUL_URL) if that is set. + ini: + - section: lookup_consul + key: host + port: + description: + - The port of the target host to connect to. + - If you use E(ANSIBLE_CONSUL_URL) this value is used from there. + type: int + default: 8500 + scheme: + default: http + type: str + description: + - Whether to use http or https. + - If you use E(ANSIBLE_CONSUL_URL) this value is used from there. + validate_certs: + default: true + description: Whether to verify the TLS connection or not. + type: bool + env: + - name: ANSIBLE_CONSUL_VALIDATE_CERTS + ini: + - section: lookup_consul + key: validate_certs + client_cert: + description: The client cert to verify the TLS connection. + type: str + env: + - name: ANSIBLE_CONSUL_CLIENT_CERT + ini: + - section: lookup_consul + key: client_cert + url: + description: + - The target to connect to. + - 'Should look like this: V(https://my.consul.server:8500).' + type: str + version_added: 1.0.0 + env: + - name: ANSIBLE_CONSUL_URL + ini: + - section: lookup_consul + key: url """ -import os -from ansible.module_utils.six.moves.urllib.parse import urlparse +EXAMPLES = r""" +- ansible.builtin.debug: + msg: 'key contains {{item}}' + with_community.general.consul_kv: + - 'key/to/retrieve' + +- name: Parameters can be provided after the key be more specific about what to retrieve + ansible.builtin.debug: + msg: 'key contains {{item}}' + with_community.general.consul_kv: + - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98' + +- name: retrieving a KV from a remote cluster on non default port + ansible.builtin.debug: + msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port=2000) }}" +""" + +RETURN = r""" +_raw: + description: + - Value(s) stored in consul. + type: dict +""" + +from urllib.parse import urlparse + from ansible.errors import AnsibleError, AnsibleAssertionError from ansible.plugins.lookup import LookupBase from ansible.module_utils.common.text.converters import to_text @@ -123,7 +130,7 @@ class LookupModule(LookupBase): if not HAS_CONSUL: raise AnsibleError( - 'python-consul is required for consul_kv lookup. see http://python-consul.readthedocs.org/en/latest/#installation') + 'py-consul is required for consul_kv lookup. see https://github.com/criteo/py-consul?tab=readme-ov-file#installation') # get options self.set_options(direct=kwargs) @@ -163,7 +170,7 @@ class LookupModule(LookupBase): values.append(to_text(results[1]['Value'])) except Exception as e: raise AnsibleError( - "Error locating '%s' in kv store. Error was %s" % (term, e)) + f"Error locating '{term}' in kv store. Error was {e}") return values @@ -184,7 +191,7 @@ class LookupModule(LookupBase): if param and len(param) > 0: name, value = param.split('=') if name not in paramvals: - raise AnsibleAssertionError("%s not a valid consul lookup parameter" % name) + raise AnsibleAssertionError(f"{name} not a valid consul lookup parameter") paramvals[name] = value except (ValueError, AssertionError) as e: raise AnsibleError(e) diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py index 9be3527b19..01e6a1a8fe 100644 --- a/plugins/lookup/credstash.py +++ b/plugins/lookup/credstash.py @@ -1,50 +1,57 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Ensighten -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2015, Ensighten +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: credstash - short_description: retrieve secrets from Credstash on AWS - requirements: - - credstash (python library) - description: - - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash" - options: - _terms: - description: term or list of terms to lookup in the credit store - type: list - required: True - table: - description: name of the credstash table to query - default: 'credential-store' - required: True - version: - description: Credstash version - region: - description: AWS region - profile_name: - description: AWS profile to use for authentication - env: - - name: AWS_PROFILE - aws_access_key_id: - description: AWS access key ID - env: - - name: AWS_ACCESS_KEY_ID - aws_secret_access_key: - description: AWS access key - env: - - name: AWS_SECRET_ACCESS_KEY - aws_session_token: - description: AWS session token - env: - - name: AWS_SESSION_TOKEN -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: credstash +short_description: Retrieve secrets from Credstash on AWS +requirements: + - credstash (python library) +description: + - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash." +options: + _terms: + description: Term or list of terms to lookup in the credit store. + type: list + elements: string + required: true + table: + description: Name of the credstash table to query. + type: str + default: 'credential-store' + version: + description: Credstash version. + type: str + default: '' + region: + description: AWS region. + type: str + profile_name: + description: AWS profile to use for authentication. + type: str + env: + - name: AWS_PROFILE + aws_access_key_id: + description: AWS access key ID. + type: str + env: + - name: AWS_ACCESS_KEY_ID + aws_secret_access_key: + description: AWS access key. + type: str + env: + - name: AWS_SECRET_ACCESS_KEY + aws_session_token: + description: AWS session token. + type: str + env: + - name: AWS_SESSION_TOKEN +""" -EXAMPLES = """ +EXAMPLES = r""" - name: first use credstash to store your secrets ansible.builtin.shell: credstash put my-github-password secure123 @@ -68,24 +75,22 @@ EXAMPLES = """ environment: production tasks: - - name: "Test credstash lookup plugin -- get the password with a context passed as a variable" - ansible.builtin.debug: - msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}" + - name: "Test credstash lookup plugin -- get the password with a context passed as a variable" + ansible.builtin.debug: + msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}" - - name: "Test credstash lookup plugin -- get the password with a context defined here" - ansible.builtin.debug: - msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}" + - name: "Test credstash lookup plugin -- get the password with a context defined here" + ansible.builtin.debug: + msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}" """ -RETURN = """ - _raw: - description: - - Value(s) stored in Credstash. - type: str +RETURN = r""" +_raw: + description: + - Value(s) stored in Credstash. + type: str """ -import os - from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase @@ -99,28 +104,39 @@ except ImportError: class LookupModule(LookupBase): - def run(self, terms, variables, **kwargs): - + def run(self, terms, variables=None, **kwargs): if not CREDSTASH_INSTALLED: raise AnsibleError('The credstash lookup plugin requires credstash to be installed.') + self.set_options(var_options=variables, direct=kwargs) + + version = self.get_option('version') + region = self.get_option('region') + table = self.get_option('table') + profile_name = self.get_option('profile_name') + aws_access_key_id = self.get_option('aws_access_key_id') + aws_secret_access_key = self.get_option('aws_secret_access_key') + aws_session_token = self.get_option('aws_session_token') + + context = { + k: v for k, v in kwargs.items() + if k not in ('version', 'region', 'table', 'profile_name', 'aws_access_key_id', 'aws_secret_access_key', 'aws_session_token') + } + + kwargs_pass = { + 'profile_name': profile_name, + 'aws_access_key_id': aws_access_key_id, + 'aws_secret_access_key': aws_secret_access_key, + 'aws_session_token': aws_session_token, + } + ret = [] for term in terms: try: - version = kwargs.pop('version', '') - region = kwargs.pop('region', None) - table = kwargs.pop('table', 'credential-store') - profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None)) - aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None)) - aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None)) - aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None)) - kwargs_pass = {'profile_name': profile_name, 'aws_access_key_id': aws_access_key_id, - 'aws_secret_access_key': aws_secret_access_key, 'aws_session_token': aws_session_token} - val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass) + ret.append(credstash.getSecret(term, version, region, table, context=context, **kwargs_pass)) except credstash.ItemNotFound: - raise AnsibleError('Key {0} not found'.format(term)) + raise AnsibleError(f'Key {term} not found') except Exception as e: - raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e)) - ret.append(val) + raise AnsibleError(f'Encountered exception while fetching {term}: {e}') return ret diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py index 80323c10fd..955ba4a89a 100644 --- a/plugins/lookup/cyberarkpassword.py +++ b/plugins/lookup/cyberarkpassword.py @@ -1,70 +1,80 @@ -# -*- coding: utf-8 -*- -# (c) 2017, Edward Nunez -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2017, Edward Nunez +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: cyberarkpassword - short_description: get secrets from CyberArk AIM - requirements: - - CyberArk AIM tool installed +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: cyberarkpassword +short_description: Get secrets from CyberArk AIM +requirements: + - CyberArk AIM tool installed +description: + - Get secrets from CyberArk AIM. +options: + _command: + description: Cyberark CLI utility. + type: string + env: + - name: AIM_CLIPASSWORDSDK_CMD + default: '/opt/CARKaim/sdk/clipasswordsdk' + appid: + description: Defines the unique ID of the application that is issuing the password request. + type: string + required: true + query: + description: Describes the filter criteria for the password retrieval. + type: string + required: true + output: description: - - Get secrets from CyberArk AIM. - options : - _command: - description: Cyberark CLI utility. - env: - - name: AIM_CLIPASSWORDSDK_CMD - default: '/opt/CARKaim/sdk/clipasswordsdk' - appid: - description: Defines the unique ID of the application that is issuing the password request. - required: True - query: - description: Describes the filter criteria for the password retrieval. - required: True - output: - description: - - Specifies the desired output fields separated by commas. - - "They could be: Password, PassProps., PasswordChangeInProcess" - default: 'password' - _extra: - description: for extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide" - notes: - - For Ansible on Windows, please change the -parameters (-p, -d, and -o) to /parameters (/p, /d, and /o) and change the location of CLIPasswordSDK.exe. -''' - -EXAMPLES = """ - - name: passing options to the lookup - ansible.builtin.debug: - msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}' - vars: - cyquery: - appid: "app_ansible" - query: "safe=CyberArk_Passwords;folder=root;object=AdminPass" - output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess" - - - - name: used in a loop - ansible.builtin.debug: - msg: "{{item}}" - with_community.general.cyberarkpassword: - appid: 'app_ansible' - query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass' - output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess' + - Specifies the desired output fields separated by commas. + - 'They could be: Password, PassProps., PasswordChangeInProcess.' + type: string + default: 'password' + _extra: + description: For extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and + ASCP Implementation Guide". +notes: + - For Ansible on Windows, please change the -parameters (C(-p), C(-d), and C(-o)) to /parameters (C(/p), C(/d), and C(/o)) + and change the location of C(CLIPasswordSDK.exe). """ -RETURN = """ - password: - description: - - The actual value stored - passprops: - description: properties assigned to the entry - type: dictionary - passwordchangeinprocess: - description: did the password change? +EXAMPLES = r""" +- name: passing options to the lookup + ansible.builtin.debug: + msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}' + vars: + cyquery: + appid: "app_ansible" + query: "safe=CyberArk_Passwords;folder=root;object=AdminPass" + output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess" + + +- name: used in a loop + ansible.builtin.debug: + msg: "{{item}}" + with_community.general.cyberarkpassword: + appid: 'app_ansible' + query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass' + output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess' +""" + +RETURN = r""" +_result: + description: A list containing one dictionary. + type: list + elements: dictionary + contains: + password: + description: + - The actual value stored. + passprops: + description: Properties assigned to the entry. + type: dictionary + passwordchangeinprocess: + description: Did the password change? """ import os @@ -74,8 +84,7 @@ from subprocess import Popen from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase -from ansible.parsing.splitter import parse_kv -from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.utils.display import Display display = Display() @@ -96,7 +105,7 @@ class CyberarkPassword: self.extra_parms = [] for key, value in kwargs.items(): self.extra_parms.append('-p') - self.extra_parms.append("%s=%s" % (key, value)) + self.extra_parms.append(f"{key}={value}") if self.appid is None: raise AnsibleError("CyberArk Error: No Application ID specified") @@ -121,8 +130,8 @@ class CyberarkPassword: all_parms = [ CLIPASSWORDSDK_CMD, 'GetPassword', - '-p', 'AppDescs.AppID=%s' % self.appid, - '-p', 'Query=%s' % self.query, + '-p', f'AppDescs.AppID={self.appid}', + '-p', f'Query={self.query}', '-o', self.output, '-d', self.b_delimiter] all_parms.extend(self.extra_parms) @@ -135,7 +144,7 @@ class CyberarkPassword: b_credential = to_bytes(tmp_output) if tmp_error: - raise AnsibleError("ERROR => %s " % (tmp_error)) + raise AnsibleError(f"ERROR => {tmp_error} ") if b_credential and b_credential.endswith(b'\n'): b_credential = b_credential[:-1] @@ -155,7 +164,7 @@ class CyberarkPassword: except subprocess.CalledProcessError as e: raise AnsibleError(e.output) except OSError as e: - raise AnsibleError("ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=(%s) => %s " % (to_text(e.errno), e.strerror)) + raise AnsibleError(f"ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=({e.errno}) => {e.strerror} ") return [result_dict] @@ -168,12 +177,11 @@ class LookupModule(LookupBase): """ def run(self, terms, variables=None, **kwargs): - - display.vvvv("%s" % terms) + display.vvvv(f"{terms}") if isinstance(terms, list): return_values = [] for term in terms: - display.vvvv("Term: %s" % term) + display.vvvv(f"Term: {term}") cyberark_conn = CyberarkPassword(**term) return_values.append(cyberark_conn.get()) return return_values diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py index 1fb75ece66..89502e9518 100644 --- a/plugins/lookup/dependent.py +++ b/plugins/lookup/dependent.py @@ -1,35 +1,33 @@ -# -*- coding: utf-8 -*- -# (c) 2015-2021, Felix Fontein -# (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2015-2021, Felix Fontein +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = """ +DOCUMENTATION = r""" name: dependent short_description: Composes a list with nested elements of other lists or dicts which can depend on previous loop variables author: Felix Fontein (@felixfontein) version_added: 3.1.0 description: - - "Takes the input lists and returns a list with elements that are lists, dictionaries, - or template expressions which evaluate to lists or dicts, composed of the elements of - the input evaluated lists and dictionaries." + - Takes the input lists and returns a list with elements that are lists, dictionaries, or template expressions which evaluate + to lists or dicts, composed of the elements of the input evaluated lists and dictionaries. options: - _raw: + _terms: description: - - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary. - The name is the index that is used in the result object. The value is iterated over as described below. + - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary. The name + is the index that is used in the result object. The value is iterated over as described below. - If the value is a list, it is simply iterated over. - - If the value is a dictionary, it is iterated over and returned as if they would be processed by the - R(ansible.builtin.dict2items filter,ansible_collections.ansible.builtin.dict2items_filter). - - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen - elements with C(item.). The result must be a list or a dictionary. + - If the value is a dictionary, it is iterated over and returned as if they would be processed by the P(ansible.builtin.dict2items#filter) + filter. + - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen elements with + C(item.). The result must be a list or a dictionary. type: list elements: dict required: true """ -EXAMPLES = """ +EXAMPLES = r""" - name: Install/remove public keys for active admin users ansible.posix.authorized_key: user: "{{ item.admin.key }}" @@ -75,9 +73,9 @@ EXAMPLES = """ loop_control: # Makes the output readable, so that it doesn't contain the whole subdictionaries and lists label: |- - {{ [item.zone.key, item.prefix.key, item.entry.key, - item.entry.value.ttl | default(3600), - item.entry.value.absent | default(False), item.entry.value.value] }} + {{ [item.zone.key, item.prefix.key, item.entry.key, + item.entry.value.ttl | default(3600), + item.entry.value.absent | default(False), item.entry.value.value] }} with_community.general.dependent: - zone: dns_setup - prefix: item.zone.value @@ -88,44 +86,55 @@ EXAMPLES = """ '': A: value: - - 1.2.3.4 + - 1.2.3.4 AAAA: value: - - "2a01:1:2:3::1" + - "2a01:1:2:3::1" 'test._domainkey': TXT: ttl: 300 value: - - '"k=rsa; t=s; p=MIGfMA..."' + - '"k=rsa; t=s; p=MIGfMA..."' example.org: 'www': A: value: - - 1.2.3.4 - - 5.6.7.8 + - 1.2.3.4 + - 5.6.7.8 """ -RETURN = """ - _list: - description: - - A list composed of dictionaries whose keys are the variable names from the input list. - type: list - elements: dict - sample: - - key1: a - key2: test - - key1: a - key2: foo - - key1: b - key2: bar +RETURN = r""" +_list: + description: + - A list composed of dictionaries whose keys are the variable names from the input list. + type: list + elements: dict + sample: + - key1: a + key2: test + - key1: a + key2: foo + - key1: b + key2: bar """ from ansible.errors import AnsibleLookupError -from ansible.module_utils.common._collections_compat import Mapping, Sequence -from ansible.module_utils.six import string_types +from collections.abc import Mapping, Sequence from ansible.plugins.lookup import LookupBase from ansible.template import Templar +try: + from ansible.template import trust_as_template as _trust_as_template + HAS_DATATAGGING = True +except ImportError: + HAS_DATATAGGING = False + + +def _make_safe(value): + if HAS_DATATAGGING and isinstance(value, str): + return _trust_as_template(value) + return value + class LookupModule(LookupBase): def __evaluate(self, expression, templar, variables): @@ -135,7 +144,11 @@ class LookupModule(LookupBase): ``variables`` are the variables to use. """ templar.available_variables = variables or {} - return templar.template("{0}{1}{2}".format("{{", expression, "}}"), cache=False) + quoted_expression = "{0}{1}{2}".format("{{", expression, "}}") + if hasattr(templar, 'evaluate_expression'): + # This is available since the Data Tagging PR has been merged + return templar.evaluate_expression(_make_safe(expression)) + return templar.template(quoted_expression) def __process(self, result, terms, index, current, templar, variables): """Fills ``result`` list with evaluated items. @@ -161,12 +174,11 @@ class LookupModule(LookupBase): values = self.__evaluate(expression, templar, variables=vars) except Exception as e: raise AnsibleLookupError( - 'Caught "{error}" while evaluating {key!r} with item == {item!r}'.format( - error=e, key=key, item=current)) + f'Caught "{e}" while evaluating {key!r} with item == {current!r}') if isinstance(values, Mapping): for idx, val in sorted(values.items()): - current[key] = dict([('key', idx), ('value', val)]) + current[key] = dict(key=idx, value=val) self.__process(result, terms, index + 1, current, templar, variables) elif isinstance(values, Sequence): for elt in values: @@ -174,37 +186,38 @@ class LookupModule(LookupBase): self.__process(result, terms, index + 1, current, templar, variables) else: raise AnsibleLookupError( - 'Did not obtain dictionary or list while evaluating {key!r} with item == {item!r}, but {type}'.format( - key=key, item=current, type=type(values))) + f'Did not obtain dictionary or list while evaluating {key!r} with item == {current!r}, but {type(values)}') def run(self, terms, variables=None, **kwargs): """Generate list.""" + self.set_options(var_options=variables, direct=kwargs) + result = [] if len(terms) > 0: - templar = Templar(loader=self._templar._loader) + if HAS_DATATAGGING: + templar = self._templar.copy_with_new_env(available_variables={}) + else: + templar = Templar(loader=self._templar._loader) data = [] vars_so_far = set() for index, term in enumerate(terms): if not isinstance(term, Mapping): raise AnsibleLookupError( - 'Parameter {index} must be a dictionary, got {type}'.format( - index=index, type=type(term))) + f'Parameter {index} must be a dictionary, got {type(term)}') if len(term) != 1: raise AnsibleLookupError( - 'Parameter {index} must be a one-element dictionary, got {count} elements'.format( - index=index, count=len(term))) + f'Parameter {index} must be a one-element dictionary, got {len(term)} elements') k, v = list(term.items())[0] if k in vars_so_far: raise AnsibleLookupError( - 'The variable {key!r} appears more than once'.format(key=k)) + f'The variable {k!r} appears more than once') vars_so_far.add(k) - if isinstance(v, string_types): + if isinstance(v, str): data.append((k, v, None)) elif isinstance(v, (Sequence, Mapping)): data.append((k, None, v)) else: raise AnsibleLookupError( - 'Parameter {key!r} (index {index}) must have a value of type string, dictionary or list, got type {type}'.format( - index=index, key=k, type=type(v))) + f'Parameter {k!r} (index {index}) must have a value of type string, dictionary or list, got type {type(v)}') self.__process(result, data, 0, {}, templar, variables) return result diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index 19ded61de7..b36f02d7d4 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -1,61 +1,123 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Jan-Piet Mens -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2015, Jan-Piet Mens +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: dig - author: Jan-Piet Mens (@jpmens) - short_description: query DNS using the dnspython library - requirements: - - dnspython (python library, http://www.dnspython.org/) +DOCUMENTATION = r""" +name: dig +author: Jan-Piet Mens (@jpmens) +short_description: Query DNS using the dnspython library +requirements: + - dnspython (python library, http://www.dnspython.org/) +description: + - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain + name). It is possible to lookup any DNS record in this manner. + - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name. + It is also possible to explicitly specify the DNS server(s) to use for lookups. + - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN. + - In addition to (default) A record, it is also possible to specify a different record type that should be queried. This + can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to + the FQDN being queried. + - If multiple values are associated with the requested record, the results are returned as a comma-separated list. In + such cases you may want to pass option C(wantlist=true) to the lookup call, or alternatively use C(query) instead of C(lookup), + which results in the record values being returned as a list over which you can iterate later on. + - By default, the lookup relies on system-wide configured DNS servers for performing the query. It is also possible to + explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation. This needs to + be passed-in as an additional parameter to the lookup. +options: + _terms: + description: Domain(s) to query. + type: list + elements: str + qtype: description: - - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain name). - It is possible to lookup any DNS record in this manner. - - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name. - It is also possible to explicitly specify the DNS server(s) to use for lookups. - - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN - - In addition to (default) A record, it is also possible to specify a different record type that should be queried. - This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried. - - If multiple values are associated with the requested record, the results will be returned as a comma-separated list. - In such cases you may want to pass option wantlist=True to the plugin, which will result in the record values being returned as a list - over which you can iterate later on. - - By default, the lookup will rely on system-wide configured DNS servers for performing the query. - It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation. - This needs to be passed-in as an additional parameter to the lookup - options: - _terms: - description: domain(s) to query - qtype: - description: record type to query - default: 'A' - choices: [A, ALL, AAAA, CNAME, DNAME, DLV, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT] - flat: - description: If 0 each record is returned as a dictionary, otherwise a string - default: 1 - retry_servfail: - description: Retry a nameserver if it returns SERVFAIL. - default: false - type: bool - version_added: 3.6.0 - notes: - - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary. - - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary. - - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly. - Syntax for specifying the record type is shown in the examples below. - - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake. -''' + - Record type to query. + - V(DLV) has been removed in community.general 6.0.0. + - V(CAA) has been added in community.general 6.3.0. + type: str + default: 'A' + choices: + - A + - ALL + - AAAA + - CAA + - CNAME + - DNAME + - DNSKEY + - DS + - HINFO + - LOC + - MX + - NAPTR + - NS + - NSEC3PARAM + - PTR + - RP + - RRSIG + - SOA + - SPF + - SRV + - SSHFP + - TLSA + - TXT + flat: + description: If 0 each record is returned as a dictionary, otherwise a string. + type: int + default: 1 + retry_servfail: + description: Retry a nameserver if it returns SERVFAIL. + default: false + type: bool + version_added: 3.6.0 + fail_on_error: + description: + - Abort execution on lookup errors. + - The default for this option is likely to change to V(true) in the future. The current default, V(false), is used for + backwards compatibility, and results in empty strings or the string V(NXDOMAIN) in the result in case of errors. + default: false + type: bool + version_added: 5.4.0 + real_empty: + description: + - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN). + - The default for this option is likely to change to V(true) in the future. + - This option is forced to V(true) if multiple domains to be queried are specified. + default: false + type: bool + version_added: 6.0.0 + class: + description: + - Class. + type: str + default: 'IN' + tcp: + description: Use TCP to lookup DNS records. + default: false + type: bool + version_added: 7.5.0 + port: + description: Use port as target port when looking up DNS records. + default: 53 + type: int + version_added: 9.5.0 +notes: + - V(ALL) is not a record in itself, merely the listed fields are available for any record results you retrieve in the form + of a dictionary. + - While the plugin supports anything which C(dnspython) supports out of the box, only a subset can be converted into a dictionary. + - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly. Syntax for specifying + the record type is shown in the examples below. + - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake. +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Simple A record (IPV4 address) lookup for example.com ansible.builtin.debug: msg: "{{ lookup('community.general.dig', 'example.com.')}}" - name: "The TXT record for example.org." ansible.builtin.debug: - msg: "{{ lookup('community.general.dig', 'example.org.', 'qtype=TXT') }}" + msg: "{{ lookup('community.general.dig', 'example.org.', qtype='TXT') }}" - name: "The TXT record for example.org, alternative syntax." ansible.builtin.debug: @@ -64,105 +126,124 @@ EXAMPLES = """ - name: use in a loop ansible.builtin.debug: msg: "MX record for gmail.com {{ item }}" - with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=True) }}" + with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=true) }}" + +- name: Lookup multiple names at once + ansible.builtin.debug: + msg: "A record found {{ item }}" + loop: "{{ query('community.general.dig', 'example.org.', 'example.com.', 'gmail.com.') }}" + +- name: Lookup multiple names at once (from list variable) + ansible.builtin.debug: + msg: "A record found {{ item }}" + loop: "{{ query('community.general.dig', *hosts) }}" + vars: + hosts: + - example.org. + - example.com. + - gmail.com. - ansible.builtin.debug: msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '192.0.2.5/PTR') }}" - ansible.builtin.debug: msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa./PTR') }}" - ansible.builtin.debug: - msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa.', 'qtype=PTR') }}" + msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa.', qtype='PTR') }}" - ansible.builtin.debug: msg: "Querying 198.51.100.23 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@198.51.100.23') }}" - ansible.builtin.debug: msg: "XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}" - with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}" + with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', flat=0, wantlist=true) }}" - name: Retry nameservers that return SERVFAIL ansible.builtin.debug: - msg: "{{ lookup('community.general.dig', 'example.org./A', 'retry_servfail=True') }}" + msg: "{{ lookup('community.general.dig', 'example.org./A', retry_servfail=true) }}" """ -RETURN = """ - _list: - description: - - List of composed strings or dictionaries with key and value - If a dictionary, fields shows the keys returned depending on query type - type: list - elements: raw - contains: - ALL: - description: - - owner, ttl, type - A: - description: - - address - AAAA: - description: - - address - CNAME: - description: - - target - DNAME: - description: - - target - DLV: - description: - - algorithm, digest_type, key_tag, digest - DNSKEY: - description: - - flags, algorithm, protocol, key - DS: - description: - - algorithm, digest_type, key_tag, digest - HINFO: - description: - - cpu, os - LOC: - description: - - latitude, longitude, altitude, size, horizontal_precision, vertical_precision - MX: - description: - - preference, exchange - NAPTR: - description: - - order, preference, flags, service, regexp, replacement - NS: - description: - - target - NSEC3PARAM: - description: - - algorithm, flags, iterations, salt - PTR: - description: - - target - RP: - description: - - mbox, txt - SOA: - description: - - mname, rname, serial, refresh, retry, expire, minimum - SPF: - description: - - strings - SRV: - description: - - priority, weight, port, target - SSHFP: - description: - - algorithm, fp_type, fingerprint - TLSA: - description: - - usage, selector, mtype, cert - TXT: - description: - - strings +RETURN = r""" +_list: + description: + - List of composed strings or of dictionaries, with fields depending + on query type. + type: list + elements: raw + contains: + ALL: + description: + - C(owner), C(ttl), C(type). + A: + description: + - C(address). + AAAA: + description: + - C(address). + CAA: + description: + - C(flags). + - C(tag). + - C(value). + version_added: 6.3.0 + CNAME: + description: + - C(target). + DNAME: + description: + - C(target). + DNSKEY: + description: + - C(flags), C(algorithm), C(protocol), C(key). + DS: + description: + - C(algorithm), C(digest_type), C(key_tag), C(digest). + HINFO: + description: + - C(cpu), C(os). + LOC: + description: + - C(latitude), C(longitude), C(altitude), C(size), C(horizontal_precision), C(vertical_precision). + MX: + description: + - C(preference), C(exchange). + NAPTR: + description: + - C(order), C(preference), C(flags), C(service), C(regexp), C(replacement). + NS: + description: + - C(target). + NSEC3PARAM: + description: + - C(algorithm), C(flags), C(iterations), C(salt). + PTR: + description: + - C(target). + RP: + description: + - C(mbox), C(txt). + SOA: + description: + - C(mname), C(rname), C(serial), C(refresh), C(retry), C(expire), C(minimum). + SPF: + description: + - C(strings). + SRV: + description: + - C(priority), C(weight), C(port), C(target). + SSHFP: + description: + - C(algorithm), C(fp_type), C(fingerprint). + TLSA: + description: + - C(usage), C(selector), C(mtype), C(cert). + TXT: + description: + - C(strings). """ from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase -from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.utils.display import Display import socket try: @@ -171,13 +252,16 @@ try: import dns.resolver import dns.reversename import dns.rdataclass - from dns.rdatatype import (A, AAAA, CNAME, DLV, DNAME, DNSKEY, DS, HINFO, LOC, + from dns.rdatatype import (A, AAAA, CAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, SOA, SPF, SRV, SSHFP, TLSA, TXT) HAVE_DNS = True except ImportError: HAVE_DNS = False +display = Display() + + def make_rdata_dict(rdata): ''' While the 'dig' lookup plugin supports anything which dnspython supports out of the box, the following supported_types list describes which @@ -188,9 +272,9 @@ def make_rdata_dict(rdata): supported_types = { A: ['address'], AAAA: ['address'], + CAA: ['flags', 'tag', 'value'], CNAME: ['target'], DNAME: ['target'], - DLV: ['algorithm', 'digest_type', 'key_tag', 'digest'], DNSKEY: ['flags', 'algorithm', 'protocol', 'key'], DS: ['algorithm', 'digest_type', 'key_tag', 'digest'], HINFO: ['cpu', 'os'], @@ -201,7 +285,7 @@ def make_rdata_dict(rdata): NSEC3PARAM: ['algorithm', 'flags', 'iterations', 'salt'], PTR: ['target'], RP: ['mbox', 'txt'], - # RRSIG: ['algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'signature'], + # RRSIG: ['type_covered', 'algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'key_tag', 'signer', 'signature'], SOA: ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'], SPF: ['strings'], SRV: ['priority', 'weight', 'port', 'target'], @@ -220,10 +304,10 @@ def make_rdata_dict(rdata): if isinstance(val, dns.name.Name): val = dns.name.Name.to_text(val) - if rdata.rdtype == DLV and f == 'digest': - val = dns.rdata._hexify(rdata.digest).replace(' ', '') if rdata.rdtype == DS and f == 'digest': val = dns.rdata._hexify(rdata.digest).replace(' ', '') + if rdata.rdtype == DNSKEY and f == 'algorithm': + val = int(val) if rdata.rdtype == DNSKEY and f == 'key': val = dns.rdata._base64ify(rdata.key).replace(' ', '') if rdata.rdtype == NSEC3PARAM and f == 'salt': @@ -261,25 +345,34 @@ class LookupModule(LookupBase): ... flat=0 # returns a dict; default is 1 == string ''' - if HAVE_DNS is False: raise AnsibleError("The dig lookup requires the python 'dnspython' library and it is not installed") + self.set_options(var_options=variables, direct=kwargs) + # Create Resolver object so that we can set NS if necessary myres = dns.resolver.Resolver(configure=True) edns_size = 4096 myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size) - domain = None - qtype = 'A' - flat = True - rdclass = dns.rdataclass.from_text('IN') + domains = [] + nameservers = [] + qtype = self.get_option('qtype') + flat = self.get_option('flat') + fail_on_error = self.get_option('fail_on_error') + real_empty = self.get_option('real_empty') + tcp = self.get_option('tcp') + port = self.get_option('port') + try: + rdclass = dns.rdataclass.from_text(self.get_option('class')) + except Exception as e: + raise AnsibleError(f"dns lookup illegal CLASS: {e}") + myres.retry_servfail = self.get_option('retry_servfail') for t in terms: if t.startswith('@'): # e.g. "@10.0.1.2,192.0.2.1" is ok. nsset = t[1:].split(',') for ns in nsset: - nameservers = [] # Check if we have a valid IP address. If so, use that, otherwise # try to resolve name to address using system's resolver. If that # fails we bail out. @@ -291,12 +384,11 @@ class LookupModule(LookupBase): nsaddr = dns.resolver.query(ns)[0].address nameservers.append(nsaddr) except Exception as e: - raise AnsibleError("dns lookup NS: %s" % to_native(e)) - myres.nameservers = nameservers + raise AnsibleError(f"dns lookup NS: {e}") continue if '=' in t: try: - opt, arg = t.split('=') + opt, arg = t.split('=', 1) except Exception: pass @@ -308,61 +400,86 @@ class LookupModule(LookupBase): try: rdclass = dns.rdataclass.from_text(arg) except Exception as e: - raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e)) + raise AnsibleError(f"dns lookup illegal CLASS: {e}") elif opt == 'retry_servfail': - myres.retry_servfail = bool(arg) + myres.retry_servfail = boolean(arg) + elif opt == 'fail_on_error': + fail_on_error = boolean(arg) + elif opt == 'real_empty': + real_empty = boolean(arg) + elif opt == 'tcp': + tcp = boolean(arg) continue if '/' in t: try: domain, qtype = t.split('/') + domains.append(domain) except Exception: - domain = t + domains.append(t) else: - domain = t + domains.append(t) - # print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass) + # print "--- domain = {domain} qtype={qtype} rdclass={rdclass}" + + if port: + myres.port = port + if len(nameservers) > 0: + myres.nameservers = nameservers + + if qtype.upper() == 'PTR': + reversed_domains = [] + for domain in domains: + try: + n = dns.reversename.from_address(domain) + reversed_domains.append(n.to_text()) + except dns.exception.SyntaxError: + pass + except Exception as e: + raise AnsibleError(f"dns.reversename unhandled exception {e}") + domains = reversed_domains + + if len(domains) > 1: + real_empty = True ret = [] - if qtype.upper() == 'PTR': + for domain in domains: try: - n = dns.reversename.from_address(domain) - domain = n.to_text() - except dns.exception.SyntaxError: - pass - except Exception as e: - raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e)) + answers = myres.query(domain, qtype, rdclass=rdclass, tcp=tcp) + for rdata in answers: + s = rdata.to_text() + if qtype.upper() == 'TXT': + s = s[1:-1] # Strip outside quotes on TXT rdata - try: - answers = myres.query(domain, qtype, rdclass=rdclass) - for rdata in answers: - s = rdata.to_text() - if qtype.upper() == 'TXT': - s = s[1:-1] # Strip outside quotes on TXT rdata + if flat: + ret.append(s) + else: + try: + rd = make_rdata_dict(rdata) + rd['owner'] = answers.canonical_name.to_text() + rd['type'] = dns.rdatatype.to_text(rdata.rdtype) + rd['ttl'] = answers.rrset.ttl + rd['class'] = dns.rdataclass.to_text(rdata.rdclass) - if flat: - ret.append(s) - else: - try: - rd = make_rdata_dict(rdata) - rd['owner'] = answers.canonical_name.to_text() - rd['type'] = dns.rdatatype.to_text(rdata.rdtype) - rd['ttl'] = answers.rrset.ttl - rd['class'] = dns.rdataclass.to_text(rdata.rdclass) + ret.append(rd) + except Exception as err: + if fail_on_error: + raise AnsibleError(f"Lookup failed: {err}") + ret.append(str(err)) - ret.append(rd) - except Exception as e: - ret.append(str(e)) - - except dns.resolver.NXDOMAIN: - ret.append('NXDOMAIN') - except dns.resolver.NoAnswer: - ret.append("") - except dns.resolver.Timeout: - ret.append('') - except dns.exception.DNSException as e: - raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e)) + except dns.resolver.NXDOMAIN as err: + if fail_on_error: + raise AnsibleError(f"Lookup failed: {err}") + if not real_empty: + ret.append('NXDOMAIN') + except (dns.resolver.NoAnswer, dns.resolver.Timeout, dns.resolver.NoNameservers) as err: + if fail_on_error: + raise AnsibleError(f"Lookup failed: {err}") + if not real_empty: + ret.append("") + except dns.exception.DNSException as err: + raise AnsibleError(f"dns.resolver unhandled exception {err}") return ret diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py index 868d3dd3a3..d83f08bb09 100644 --- a/plugins/lookup/dnstxt.py +++ b/plugins/lookup/dnstxt.py @@ -1,26 +1,33 @@ -# -*- coding: utf-8 -*- -# (c) 2012, Jan-Piet Mens -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2012, Jan-Piet Mens +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: dnstxt - author: Jan-Piet Mens (@jpmens) - short_description: query a domain(s)'s DNS txt fields - requirements: - - dns/dns.resolver (python library) +DOCUMENTATION = r""" +name: dnstxt +author: Jan-Piet Mens (@jpmens) +short_description: Query a domain(s)'s DNS txt fields +requirements: + - dns/dns.resolver (python library) +description: + - Uses a python library to return the DNS TXT record for a domain. +options: + _terms: + description: Domain or list of domains to query TXT records from. + required: true + type: list + elements: string + real_empty: description: - - Uses a python library to return the DNS TXT record for a domain. - options: - _terms: - description: domain or list of domains to query TXT records from - required: True - type: list -''' + - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN). + - The default for this option is likely to change to V(true) in the future. + default: false + type: bool + version_added: 6.0.0 +""" -EXAMPLES = """ +EXAMPLES = r""" - name: show txt entry ansible.builtin.debug: msg: "{{lookup('community.general.dnstxt', ['test.example.com'])}}" @@ -39,11 +46,11 @@ EXAMPLES = """ with_community.general.dnstxt: "{{lookup('community.general.dnstxt', ['test.example.com']).split(',')}}" """ -RETURN = """ - _list: - description: - - values returned by the DNS TXT record. - type: list +RETURN = r""" +_list: + description: + - Values returned by the DNS TXT record. + type: list """ HAVE_DNS = False @@ -55,7 +62,6 @@ except ImportError: pass from ansible.errors import AnsibleError -from ansible.module_utils.common.text.converters import to_native from ansible.plugins.lookup import LookupBase # ============================================================== @@ -69,10 +75,13 @@ from ansible.plugins.lookup import LookupBase class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) if HAVE_DNS is False: raise AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed") + real_empty = self.get_option('real_empty') + ret = [] for term in terms: domain = term.split()[0] @@ -84,13 +93,19 @@ class LookupModule(LookupBase): string.append(s[1:-1]) # Strip outside quotes on TXT rdata except dns.resolver.NXDOMAIN: + if real_empty: + continue string = 'NXDOMAIN' except dns.resolver.Timeout: + if real_empty: + continue string = '' except dns.resolver.NoAnswer: + if real_empty: + continue string = '' except DNSException as e: - raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e)) + raise AnsibleError(f"dns.resolver unhandled exception {e}") ret.append(''.join(string)) diff --git a/plugins/lookup/dsv.py b/plugins/lookup/dsv.py index d7826bcd4d..594dd40f4e 100644 --- a/plugins/lookup/dsv.py +++ b/plugins/lookup/dsv.py @@ -1,9 +1,8 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Adam Migus -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function +# Copyright (c) 2020, Adam Migus +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" name: dsv @@ -11,76 +10,78 @@ author: Adam Migus (@amigus) short_description: Get secrets from Thycotic DevOps Secrets Vault version_added: 1.0.0 description: - - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a - DSV I(tenant) using a I(client_id) and I(client_secret). + - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a DSV O(tenant) using a O(client_id) and O(client_secret). requirements: - - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/ + - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/ options: - _terms: - description: The path to the secret, e.g. C(/staging/servers/web1). - required: true - tenant: - description: The first format parameter in the default I(url_template). - env: - - name: DSV_TENANT - ini: - - section: dsv_lookup - key: tenant - required: true - tld: - default: com - description: The top-level domain of the tenant; the second format - parameter in the default I(url_template). - env: - - name: DSV_TLD - ini: - - section: dsv_lookup - key: tld - required: false - client_id: - description: The client_id with which to request the Access Grant. - env: - - name: DSV_CLIENT_ID - ini: - - section: dsv_lookup - key: client_id - required: true - client_secret: - description: The client secret associated with the specific I(client_id). - env: - - name: DSV_CLIENT_SECRET - ini: - - section: dsv_lookup - key: client_secret - required: true - url_template: - default: https://{}.secretsvaultcloud.{}/v1 - description: The path to prepend to the base URL to form a valid REST - API request. - env: - - name: DSV_URL_TEMPLATE - ini: - - section: dsv_lookup - key: url_template - required: false + _terms: + description: The path to the secret, for example V(/staging/servers/web1). + required: true + tenant: + description: The first format parameter in the default O(url_template). + type: string + env: + - name: DSV_TENANT + ini: + - section: dsv_lookup + key: tenant + required: true + tld: + default: com + description: The top-level domain of the tenant; the second format parameter in the default O(url_template). + type: string + env: + - name: DSV_TLD + ini: + - section: dsv_lookup + key: tld + required: false + client_id: + description: The client_id with which to request the Access Grant. + type: string + env: + - name: DSV_CLIENT_ID + ini: + - section: dsv_lookup + key: client_id + required: true + client_secret: + description: The client secret associated with the specific O(client_id). + type: string + env: + - name: DSV_CLIENT_SECRET + ini: + - section: dsv_lookup + key: client_secret + required: true + url_template: + default: https://{}.secretsvaultcloud.{}/v1 + description: The path to prepend to the base URL to form a valid REST API request. + type: string + env: + - name: DSV_URL_TEMPLATE + ini: + - section: dsv_lookup + key: url_template + required: false """ RETURN = r""" _list: - description: - - One or more JSON responses to C(GET /secrets/{path}). - - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret). - type: list - elements: dict + description: + - One or more JSON responses to C(GET /secrets/{path}). + - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret). + type: list + elements: dict """ EXAMPLES = r""" - hosts: localhost vars: - secret: "{{ lookup('community.general.dsv', '/test/secret') }}" + secret: "{{ lookup('community.general.dsv', '/test/secret') }}" tasks: - - ansible.builtin.debug: - msg: 'the password is {{ secret["data"]["password"] }}' + - ansible.builtin.debug: + msg: 'the password is {{ secret["data"]["password"] }}' """ from ansible.errors import AnsibleError, AnsibleOptionsError @@ -105,11 +106,15 @@ display = Display() class LookupModule(LookupBase): @staticmethod def Client(vault_parameters): - return SecretsVault(**vault_parameters) + try: + vault = SecretsVault(**vault_parameters) + return vault + except TypeError: + raise AnsibleError("python-dsv-sdk==0.0.1 must be installed to use this plugin") def run(self, terms, variables, **kwargs): if sdk_is_missing: - raise AnsibleError("python-dsv-sdk must be installed to use this plugin") + raise AnsibleError("python-dsv-sdk==0.0.1 must be installed to use this plugin") self.set_options(var_options=variables, direct=kwargs) @@ -118,23 +123,24 @@ class LookupModule(LookupBase): "tenant": self.get_option("tenant"), "client_id": self.get_option("client_id"), "client_secret": self.get_option("client_secret"), + "tld": self.get_option("tld"), "url_template": self.get_option("url_template"), } ) result = [] for term in terms: - display.debug("dsv_lookup term: %s" % term) + display.debug(f"dsv_lookup term: {term}") try: path = term.lstrip("[/:]") if path == "": - raise AnsibleOptionsError("Invalid secret path: %s" % term) + raise AnsibleOptionsError(f"Invalid secret path: {term}") - display.vvv(u"DevOps Secrets Vault GET /secrets/%s" % path) + display.vvv(f"DevOps Secrets Vault GET /secrets/{path}") result.append(vault.get_secret_json(path)) except SecretsVaultError as error: raise AnsibleError( - "DevOps Secrets Vault lookup failure: %s" % error.message + f"DevOps Secrets Vault lookup failure: {error.message}" ) return result diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py index 0c81d0215b..65a9d23d2f 100644 --- a/plugins/lookup/etcd.py +++ b/plugins/lookup/etcd.py @@ -1,59 +1,51 @@ -# -*- coding: utf-8 -*- -# (c) 2013, Jan-Piet Mens +# Copyright (c) 2013, Jan-Piet Mens # (m) 2016, Mihai Moldovanu # (m) 2017, Juan Manuel Parrilla -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -DOCUMENTATION = ''' - author: - - Jan-Piet Mens (@jpmens) - name: etcd - short_description: get info from an etcd server +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Jan-Piet Mens (@jpmens) +name: etcd +short_description: Get info from an etcd server +description: + - Retrieves data from an etcd server. +options: + _terms: description: - - Retrieves data from an etcd server - options: - _terms: - description: - - the list of keys to lookup on the etcd server - type: list - elements: string - required: True - url: - description: - - Environment variable with the url for the etcd server - default: 'http://127.0.0.1:4001' - env: - - name: ANSIBLE_ETCD_URL - version: - description: - - Environment variable with the etcd protocol version - default: 'v1' - env: - - name: ANSIBLE_ETCD_VERSION - validate_certs: - description: - - toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs. - default: True - type: boolean -''' + - The list of keys to lookup on the etcd server. + type: list + elements: string + required: true + url: + description: + - Environment variable with the URL for the etcd server. + type: string + default: 'http://127.0.0.1:4001' + env: + - name: ANSIBLE_ETCD_URL + version: + description: + - Environment variable with the etcd protocol version. + type: string + default: 'v1' + env: + - name: ANSIBLE_ETCD_VERSION + validate_certs: + description: + - Toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs. + default: true + type: boolean +seealso: + - module: community.general.etcd3 + - plugin: community.general.etcd3 + plugin_type: lookup +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "a value from a locally running etcd" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd', 'foo/bar') }}" @@ -62,18 +54,18 @@ EXAMPLES = ''' ansible.builtin.debug: msg: "{{ lookup('community.general.etcd', 'foo', 'bar', 'baz') }}" -- name: "since Ansible 2.5 you can set server options inline" +- name: "you can set server options inline" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd', 'foo', version='v2', url='http://192.168.0.27:4001') }}" -''' +""" -RETURN = ''' - _raw: - description: - - list of values associated with input keys - type: list - elements: string -''' +RETURN = r""" +_raw: + description: + - List of values associated with input keys. + type: list + elements: string +""" import json @@ -110,7 +102,7 @@ class Etcd: def __init__(self, url, version, validate_certs): self.url = url self.version = version - self.baseurl = '%s/%s/keys' % (self.url, self.version) + self.baseurl = f'{self.url}/{self.version}/keys' self.validate_certs = validate_certs def _parse_node(self, node): @@ -131,7 +123,7 @@ class Etcd: return path def get(self, key): - url = "%s/%s?recursive=true" % (self.baseurl, key) + url = f"{self.baseurl}/{key}?recursive=true" data = None value = {} try: diff --git a/plugins/lookup/etcd3.py b/plugins/lookup/etcd3.py index 5b2c334c41..0312f17127 100644 --- a/plugins/lookup/etcd3.py +++ b/plugins/lookup/etcd3.py @@ -1,106 +1,105 @@ -# -*- coding: utf-8 -*- # -# (c) 2020, SCC France, Eric Belhomme -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, SCC France, Eric Belhomme +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: - - Eric Belhomme (@eric-belhomme) - version_added: '0.2.0' - name: etcd3 - short_description: Get key values from etcd3 server +DOCUMENTATION = r""" +author: + - Eric Belhomme (@eric-belhomme) +version_added: '0.2.0' +name: etcd3 +short_description: Get key values from etcd3 server +description: + - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API. + - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some E(ETCDCTL_*) environment + variables. + - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview. +options: + _terms: description: - - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API. - - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some C(ETCDCTL_*) environment variables. - - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview. + - The list of keys (or key prefixes) to look up on the etcd3 server. + type: list + elements: str + required: true + prefix: + description: + - Look for key or prefix key. + type: bool + default: false + endpoints: + description: + - Counterpart of E(ETCDCTL_ENDPOINTS) environment variable. Specify the etcd3 connection with an URL form, for example + V(https://hostname:2379), or V(:) form. + - The V(host) part is overwritten by O(host) option, if defined. + - The V(port) part is overwritten by O(port) option, if defined. + env: + - name: ETCDCTL_ENDPOINTS + default: '127.0.0.1:2379' + type: str + host: + description: + - Etcd3 listening client host. + - Takes precedence over O(endpoints). + type: str + port: + description: + - Etcd3 listening client port. + - Takes precedence over O(endpoints). + type: int + ca_cert: + description: + - Etcd3 CA authority. + env: + - name: ETCDCTL_CACERT + type: str + cert_cert: + description: + - Etcd3 client certificate. + env: + - name: ETCDCTL_CERT + type: str + cert_key: + description: + - Etcd3 client private key. + env: + - name: ETCDCTL_KEY + type: str + timeout: + description: + - Client timeout. + default: 60 + env: + - name: ETCDCTL_DIAL_TIMEOUT + type: int + user: + description: + - Authenticated user name. + env: + - name: ETCDCTL_USER + type: str + password: + description: + - Authenticated user password. + env: + - name: ETCDCTL_PASSWORD + type: str - options: - _terms: - description: - - The list of keys (or key prefixes) to look up on the etcd3 server. - type: list - elements: str - required: True - prefix: - description: - - Look for key or prefix key. - type: bool - default: False - endpoints: - description: - - Counterpart of C(ETCDCTL_ENDPOINTS) environment variable. - Specify the etcd3 connection with and URL form eg. C(https://hostname:2379) or C(:) form. - - The C(host) part is overwritten by I(host) option, if defined. - - The C(port) part is overwritten by I(port) option, if defined. - env: - - name: ETCDCTL_ENDPOINTS - default: '127.0.0.1:2379' - type: str - host: - description: - - etcd3 listening client host. - - Takes precedence over I(endpoints). - type: str - port: - description: - - etcd3 listening client port. - - Takes precedence over I(endpoints). - type: int - ca_cert: - description: - - etcd3 CA authority. - env: - - name: ETCDCTL_CACERT - type: str - cert_cert: - description: - - etcd3 client certificate. - env: - - name: ETCDCTL_CERT - type: str - cert_key: - description: - - etcd3 client private key. - env: - - name: ETCDCTL_KEY - type: str - timeout: - description: - - Client timeout. - default: 60 - env: - - name: ETCDCTL_DIAL_TIMEOUT - type: int - user: - description: - - Authenticated user name. - env: - - name: ETCDCTL_USER - type: str - password: - description: - - Authenticated user password. - env: - - name: ETCDCTL_PASSWORD - type: str +notes: + - O(host) and O(port) options take precedence over (endpoints) option. + - The recommended way to connect to etcd3 server is using E(ETCDCTL_ENDPOINT) environment variable and keep O(endpoints), + O(host), and O(port) unused. +seealso: + - module: community.general.etcd3 + - plugin: community.general.etcd + plugin_type: lookup - notes: - - I(host) and I(port) options take precedence over (endpoints) option. - - The recommended way to connect to etcd3 server is using C(ETCDCTL_ENDPOINT) - environment variable and keep I(endpoints), I(host), and I(port) unused. - seealso: - - module: community.general.etcd3 - - ref: etcd_lookup - description: The etcd v2 lookup. +requirements: + - "etcd3 >= 0.10" +""" - requirements: - - "etcd3 >= 0.10" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: "a value from a locally running etcd" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd3', 'foo/bar') }}" @@ -116,31 +115,30 @@ EXAMPLES = ''' - name: "connect to etcd3 with a client certificate" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd3', 'foo/bar', cert_cert='/etc/ssl/etcd/client.pem', cert_key='/etc/ssl/etcd/client.key') }}" -''' +""" -RETURN = ''' - _raw: - description: - - List of keys and associated values. - type: list - elements: dict - contains: - key: - description: The element's key. - type: str - value: - description: The element's value. - type: str -''' +RETURN = r""" +_raw: + description: + - List of keys and associated values. + type: list + elements: dict + contains: + key: + description: The element's key. + type: str + value: + description: The element's value. + type: str +""" import re -from ansible.plugins.lookup import LookupBase -from ansible.utils.display import Display +from ansible.errors import AnsibleLookupError from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.common.text.converters import to_native from ansible.plugins.lookup import LookupBase -from ansible.errors import AnsibleError, AnsibleLookupError +from ansible.utils.display import Display try: import etcd3 @@ -168,7 +166,7 @@ def etcd3_client(client_params): etcd = etcd3.client(**client_params) etcd.status() except Exception as exp: - raise AnsibleLookupError('Cannot connect to etcd cluster: %s' % (to_native(exp))) + raise AnsibleLookupError(f'Cannot connect to etcd cluster: {exp}') return etcd @@ -204,7 +202,7 @@ class LookupModule(LookupBase): cnx_log = dict(client_params) if 'password' in cnx_log: cnx_log['password'] = '' - display.verbose("etcd3 connection parameters: %s" % cnx_log) + display.verbose(f"etcd3 connection parameters: {cnx_log}") # connect to etcd3 server etcd = etcd3_client(client_params) @@ -218,12 +216,12 @@ class LookupModule(LookupBase): if val and meta: ret.append({'key': to_native(meta.key), 'value': to_native(val)}) except Exception as exp: - display.warning('Caught except during etcd3.get_prefix: %s' % (to_native(exp))) + display.warning(f'Caught except during etcd3.get_prefix: {exp}') else: try: val, meta = etcd.get(term) if val and meta: ret.append({'key': to_native(meta.key), 'value': to_native(val)}) except Exception as exp: - display.warning('Caught except during etcd3.get: %s' % (to_native(exp))) + display.warning(f'Caught except during etcd3.get: {exp}') return ret diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index 1c83486b05..49326edb87 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -1,24 +1,26 @@ -# -*- coding: utf-8 -*- -# (c) 2016 Dag Wieers -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2016 Dag Wieers +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" name: filetree author: Dag Wieers (@dagwieers) -short_description: recursively match all files in a directory tree +short_description: Recursively match all files in a directory tree description: -- This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership. -- Supports directories, files and symlinks, including SELinux and other file properties. -- If you provide more than one path, it will implement a first_found logic, and will not process entries it already processed in previous paths. - This enables merging different trees in order of importance, or add role_vars to specific paths to influence different instances of the same role. + - This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership. + - Supports directories, files and symlinks, including SELinux and other file properties. + - If you provide more than one path, it implements a first_found logic, and does not process entries it already processed + in previous paths. This enables merging different trees in order of importance, or add role_vars to specific paths to + influence different instances of the same role. options: _terms: - description: path(s) of files to read - required: True -''' + description: Path(s) of files to read. + required: true + type: list + elements: string +""" EXAMPLES = r""" - name: Create directories @@ -45,7 +47,7 @@ EXAMPLES = r""" dest: /web/{{ item.path }} state: link follow: false # avoid corrupting target files if the link already exists - force: yes + force: true mode: '{{ item.mode }}' with_community.general.filetree: web/ when: item.state == 'link' @@ -56,61 +58,61 @@ EXAMPLES = r""" """ RETURN = r""" - _raw: - description: List of dictionaries with file information. - type: list - elements: dict - contains: - src: - description: - - Full path to file. - - Not returned when I(item.state) is set to C(directory). - type: path - root: - description: Allows filtering by original location. - type: path - path: - description: Contains the relative path to root. - type: path - mode: - description: The permissions the resulting file or directory. - type: str - state: - description: TODO - type: str - owner: - description: Name of the user that owns the file/directory. - type: raw - group: - description: Name of the group that owns the file/directory. - type: raw - seuser: - description: The user part of the SELinux file context. - type: raw - serole: - description: The role part of the SELinux file context. - type: raw - setype: - description: The type part of the SELinux file context. - type: raw - selevel: - description: The level part of the SELinux file context. - type: raw - uid: - description: Owner ID of the file/directory. - type: int - gid: - description: Group ID of the file/directory. - type: int - size: - description: Size of the target. - type: int - mtime: - description: Time of last modification. - type: float - ctime: - description: Time of last metadata update or creation (depends on OS). - type: float +_raw: + description: List of dictionaries with file information. + type: list + elements: dict + contains: + src: + description: + - Full path to file. + - Not returned when RV(_raw[].state) is set to V(directory). + type: path + root: + description: Allows filtering by original location. + type: path + path: + description: Contains the relative path to root. + type: path + mode: + description: The permissions the resulting file or directory. + type: str + state: + description: TODO. + type: str + owner: + description: Name of the user that owns the file/directory. + type: raw + group: + description: Name of the group that owns the file/directory. + type: raw + seuser: + description: The user part of the SELinux file context. + type: raw + serole: + description: The role part of the SELinux file context. + type: raw + setype: + description: The type part of the SELinux file context. + type: raw + selevel: + description: The level part of the SELinux file context. + type: raw + uid: + description: Owner ID of the file/directory. + type: int + gid: + description: Group ID of the file/directory. + type: int + size: + description: Size of the target. + type: int + mtime: + description: Time of last modification. + type: float + ctime: + description: Time of last metadata update or creation (depends on OS). + type: float """ import os import pwd @@ -155,7 +157,7 @@ def file_props(root, path): try: st = os.lstat(abspath) except OSError as e: - display.warning('filetree: Error using stat() on path %s (%s)' % (abspath, e)) + display.warning(f'filetree: Error using stat() on path {abspath} ({e})') return None ret = dict(root=root, path=path) @@ -169,7 +171,7 @@ def file_props(root, path): ret['state'] = 'file' ret['src'] = abspath else: - display.warning('filetree: Error file type of %s is not supported' % abspath) + display.warning(f'filetree: Error file type of {abspath} is not supported') return None ret['uid'] = st.st_uid @@ -182,7 +184,7 @@ def file_props(root, path): ret['group'] = to_text(grp.getgrgid(st.st_gid).gr_name) except KeyError: ret['group'] = st.st_gid - ret['mode'] = '0%03o' % (stat.S_IMODE(st.st_mode)) + ret['mode'] = f'0{stat.S_IMODE(st.st_mode):03o}' ret['size'] = st.st_size ret['mtime'] = st.st_mtime ret['ctime'] = st.st_ctime @@ -200,6 +202,8 @@ def file_props(root, path): class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + basedir = self.get_basedir(variables) ret = [] @@ -207,7 +211,7 @@ class LookupModule(LookupBase): term_file = os.path.basename(term) dwimmed_path = self._loader.path_dwim_relative(basedir, 'files', os.path.dirname(term)) path = os.path.join(dwimmed_path, term_file) - display.debug("Walking '{0}'".format(path)) + display.debug(f"Walking '{path}'") for root, dirs, files in os.walk(path, topdown=True): for entry in dirs + files: relpath = os.path.relpath(os.path.join(root, entry), path) @@ -216,7 +220,7 @@ class LookupModule(LookupBase): if relpath not in [entry['path'] for entry in ret]: props = file_props(path, relpath) if props is not None: - display.debug(" found '{0}'".format(os.path.join(path, relpath))) + display.debug(f" found '{os.path.join(path, relpath)}'") ret.append(props) return ret diff --git a/plugins/lookup/flattened.py b/plugins/lookup/flattened.py index c2e4494fd4..0ed92afa27 100644 --- a/plugins/lookup/flattened.py +++ b/plugins/lookup/flattened.py @@ -1,39 +1,40 @@ -# -*- coding: utf-8 -*- -# (c) 2013, Serge van Ginderachter -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2013, Serge van Ginderachter +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: flattened - author: Serge van Ginderachter (!UNKNOWN) - short_description: return single list completely flattened - description: - - given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left. - options: - _terms: - description: lists to flatten - required: True - notes: - - unlike 'items' which only flattens 1 level, this plugin will continue to flatten until it cannot find lists anymore. - - aka highlander plugin, there can only be one (list). -''' +DOCUMENTATION = r""" +name: flattened +author: Serge van Ginderachter (!UNKNOWN) +short_description: Return single list completely flattened +description: + - Given one or more lists, this lookup flattens any list elements found recursively until only 1 list is left. +options: + _terms: + description: Lists to flatten. + type: list + elements: raw + required: true +notes: + - Unlike the P(ansible.builtin.items#lookup) lookup which only flattens 1 level, this plugin continues to flatten until + it cannot find lists anymore. + - Aka highlander plugin, there can only be one (list). +""" -EXAMPLES = """ +EXAMPLES = r""" - name: "'unnest' all elements into single list" ansible.builtin.debug: - msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], [a,b,c], [[5,6,1,3], [34,a,b,c]])}}" + msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], ['a','b','c'], [[5,6,1,3], [34,'a','b','c']])}}" """ -RETURN = """ - _raw: - description: - - flattened list - type: list +RETURN = r""" +_raw: + description: + - Flattened list. + type: list """ from ansible.errors import AnsibleError -from ansible.module_utils.six import string_types from ansible.plugins.lookup import LookupBase from ansible.utils.listify import listify_lookup_plugin_terms @@ -61,15 +62,15 @@ class LookupModule(LookupBase): # ignore undefined items break - if isinstance(term, string_types): + if isinstance(term, str): # convert a variable to a list - term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader) + term2 = listify_lookup_plugin_terms(term, templar=self._templar) # but avoid converting a plain string to a list of one string if term2 != [term]: term = term2 if isinstance(term, list): - # if it's a list, check recursively for items that are a list + # if it is a list, check recursively for items that are a list term = self._do_flatten(term, variables) ret.extend(term) else: @@ -77,9 +78,10 @@ class LookupModule(LookupBase): return ret - def run(self, terms, variables, **kwargs): - + def run(self, terms, variables=None, **kwargs): if not isinstance(terms, list): raise AnsibleError("with_flattened expects a list") + self.set_options(var_options=variables, direct=kwargs) + return self._do_flatten(terms, variables) diff --git a/plugins/lookup/github_app_access_token.py b/plugins/lookup/github_app_access_token.py new file mode 100644 index 0000000000..0b4f4d53ee --- /dev/null +++ b/plugins/lookup/github_app_access_token.py @@ -0,0 +1,223 @@ +# Copyright (c) 2023, Poh Wei Sheng +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +name: github_app_access_token +author: + - Poh Wei Sheng (@weisheng-p) + - Bruno Lavoie (@blavoie) +short_description: Obtain short-lived Github App Access tokens +version_added: '8.2.0' +requirements: + - jwt (https://github.com/GehirnInc/python-jwt) OR + - PyJWT (https://pypi.org/project/PyJWT/) AND cryptography (https://pypi.org/project/cryptography/) +description: + - This generates a Github access token that can be used with a C(git) command, if you use a Github App. +options: + key_path: + description: + - Path to your private key. + - Either O(key_path) or O(private_key) must be specified. + type: path + app_id: + description: + - Your GitHub App ID, you can find this in the Settings page. + required: true + type: str + installation_id: + description: + - The installation ID that contains the git repository you would like access to. + - As of 2023-12-24, this can be found at Settings page > Integrations > Application. The last part of the URL in the + configure button is the installation ID. + - Alternatively, you can use PyGithub (U(https://github.com/PyGithub/PyGithub)) to get your installation ID. + required: true + type: str + private_key: + description: + - GitHub App private key in PEM file format as string. + - Either O(key_path) or O(private_key) must be specified. + type: str + version_added: 10.0.0 + token_expiry: + description: + - How long the token should last for in seconds. + default: 600 + type: int + github_url: + description: + - Base URL for the GitHub API (for GitHub Enterprise Server). + - "Example: C(https://github-enterprise-server.example.com/api/v3)" + default: https://api.github.com + type: str + version_added: 11.4.0 +""" + +EXAMPLES = r""" +- name: Get access token to be used for git checkout with app_id=123456, installation_id=64209 + ansible.builtin.git: + repo: >- + https://x-access-token:{{ github_token }}@github.com/hidden_user/super-secret-repo.git + dest: /srv/checkout + vars: + github_token: >- + {{ lookup('community.general.github_app_access_token', key_path='/home/to_your/key', + app_id='123456', installation_id='64209') }} +""" + +RETURN = r""" +_raw: + description: A one-element list containing your GitHub access token. + type: list + elements: str +""" + +try: + import jwt + HAS_JWT = True +except ImportError: + HAS_JWT = False + +HAS_PYTHON_JWT = False # vs pyjwt +if HAS_JWT and hasattr(jwt, 'JWT'): + HAS_PYTHON_JWT = True + from jwt import jwk_from_pem, JWT + jwt_instance = JWT() + +try: + from cryptography.hazmat.primitives import serialization + HAS_CRYPTOGRAPHY = True +except ImportError: + HAS_CRYPTOGRAPHY = False + + +import time +import json +from urllib.error import HTTPError + +from ansible.module_utils.urls import open_url +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display + +display = Display() + + +class PythonJWT: + + @staticmethod + def read_key(path, private_key=None): + try: + if private_key: + return jwk_from_pem(private_key.encode('utf-8')) + with open(path, 'rb') as pem_file: + return jwk_from_pem(pem_file.read()) + except Exception as e: + raise AnsibleError(f"Error while parsing key file: {e}") + + @staticmethod + def encode_jwt(app_id, jwk, exp=600): + now = int(time.time()) + payload = { + 'iat': now, + 'exp': now + exp, + 'iss': app_id, + } + try: + return jwt_instance.encode(payload, jwk, alg='RS256') + except Exception as e: + raise AnsibleError(f"Error while encoding jwt: {e}") + + +def read_key(path, private_key=None): + if HAS_PYTHON_JWT: + return PythonJWT.read_key(path, private_key) + try: + if private_key: + key_bytes = private_key.encode('utf-8') + else: + with open(path, 'rb') as pem_file: + key_bytes = pem_file.read() + return serialization.load_pem_private_key(key_bytes, password=None) + except Exception as e: + raise AnsibleError(f"Error while parsing key file: {e}") + + +def encode_jwt(app_id, private_key_obj, exp=600): + if HAS_PYTHON_JWT: + return PythonJWT.encode_jwt(app_id, private_key_obj) + now = int(time.time()) + payload = { + 'iat': now, + 'exp': now + exp, + 'iss': app_id, + } + try: + return jwt.encode(payload, private_key_obj, algorithm='RS256') + except Exception as e: + raise AnsibleError(f"Error while encoding jwt: {e}") + + +def post_request(generated_jwt, installation_id, api_base): + base = api_base.rstrip('/') + github_url = f"{base}/app/installations/{installation_id}/access_tokens" + + headers = { + "Authorization": f'Bearer {generated_jwt}', + "Accept": "application/vnd.github.v3+json", + } + try: + response = open_url(github_url, headers=headers, method='POST') + except HTTPError as e: + try: + error_body = json.loads(e.read().decode()) + display.vvv(f"Error returned: {error_body}") + except Exception: + error_body = {} + if e.code == 404: + raise AnsibleError("Github return error. Please confirm your installation_id value is valid") + elif e.code == 401: + raise AnsibleError("Github return error. Please confirm your private key is valid") + raise AnsibleError(f"Unexpected data returned: {e} -- {error_body}") + response_body = response.read() + try: + json_data = json.loads(response_body.decode('utf-8')) + except json.decoder.JSONDecodeError as e: + raise AnsibleError(f"Error while dencoding JSON respone from github: {e}") + return json_data.get('token') + + +def get_token(key_path, app_id, installation_id, private_key, github_url, expiry=600): + jwk = read_key(key_path, private_key) + generated_jwt = encode_jwt(app_id, jwk, exp=expiry) + return post_request(generated_jwt, installation_id, github_url) + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + if not HAS_JWT: + raise AnsibleError('Python jwt library is required. ' + 'Please install using "pip install pyjwt"') + + if not HAS_PYTHON_JWT and not HAS_CRYPTOGRAPHY: + raise AnsibleError('Python cryptography library is required. ' + 'Please install using "pip install cryptography"') + + self.set_options(var_options=variables, direct=kwargs) + + if not (self.get_option("key_path") or self.get_option("private_key")): + raise AnsibleOptionsError("One of key_path or private_key is required") + if self.get_option("key_path") and self.get_option("private_key"): + raise AnsibleOptionsError("key_path and private_key are mutually exclusive") + + t = get_token( + self.get_option('key_path'), + self.get_option('app_id'), + self.get_option('installation_id'), + self.get_option('private_key'), + self.get_option('github_url'), + self.get_option('token_expiry'), + ) + + return [t] diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index 5b440469eb..d031987a81 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -1,42 +1,49 @@ -# -*- coding: utf-8 -*- -# (c) 2017, Juan Manuel Parrilla -# (c) 2012-17 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2017, Juan Manuel Parrilla +# Copyright (c) 2012-17 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: - - Juan Manuel Parrilla (@jparrill) - name: hiera - short_description: get info from hiera data - requirements: - - hiera (command line utility) +DOCUMENTATION = r""" +author: + - Juan Manuel Parrilla (@jparrill) +name: hiera +short_description: Get info from hiera data +requirements: + - hiera (command line utility) +description: + - Retrieves data from an Puppetmaster node using Hiera as ENC. +deprecated: + removed_in: 13.0.0 + why: >- + Hiera has been deprecated a long time ago. + If you disagree with this deprecation, please create an issue in the community.general repository. + alternative: Unknown. +options: + _terms: description: - - Retrieves data from an Puppetmaster node using Hiera as ENC - options: - _hiera_key: - description: - - The list of keys to lookup on the Puppetmaster - type: list - elements: string - required: True - _bin_file: - description: - - Binary file to execute Hiera - default: '/usr/bin/hiera' - env: - - name: ANSIBLE_HIERA_BIN - _hierarchy_file: - description: - - File that describes the hierarchy of Hiera - default: '/etc/hiera.yaml' - env: - - name: ANSIBLE_HIERA_CFG + - The list of keys to lookup on the Puppetmaster. + type: list + elements: string + required: true + executable: + description: + - Binary file to execute Hiera. + type: string + default: '/usr/bin/hiera' + env: + - name: ANSIBLE_HIERA_BIN + config_file: + description: + - File that describes the hierarchy of Hiera. + type: string + default: '/etc/hiera.yaml' + env: + - name: ANSIBLE_HIERA_CFG # FIXME: incomplete options .. _terms? environment/fqdn? -''' +""" -EXAMPLES = """ +EXAMPLES = r""" # All this examples depends on hiera.yml that describes the hierarchy - name: "a value from Hiera 'DB'" @@ -52,39 +59,39 @@ EXAMPLES = """ msg: "{{ lookup('community.general.hiera', 'foo fqdn=puppet01.localdomain') }}" """ -RETURN = """ - _raw: - description: - - a value associated with input key - type: list - elements: str +RETURN = r""" +_raw: + description: + - A value associated with input key. + type: list + elements: str """ -import os - from ansible.plugins.lookup import LookupBase from ansible.utils.cmd_functions import run_cmd from ansible.module_utils.common.text.converters import to_text -ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml') -ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera') - class Hiera(object): + def __init__(self, hiera_cfg, hiera_bin): + self.hiera_cfg = hiera_cfg + self.hiera_bin = hiera_bin + def get(self, hiera_key): - pargs = [ANSIBLE_HIERA_BIN] - pargs.extend(['-c', ANSIBLE_HIERA_CFG]) + pargs = [self.hiera_bin] + pargs.extend(['-c', self.hiera_cfg]) pargs.extend(hiera_key) - rc, output, err = run_cmd("{0} -c {1} {2}".format( - ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0])) + rc, output, err = run_cmd(f"{self.hiera_bin} -c {self.hiera_cfg} {hiera_key[0]}") return to_text(output.strip()) class LookupModule(LookupBase): - def run(self, terms, variables=''): - hiera = Hiera() + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + + hiera = Hiera(self.get_option('config_file'), self.get_option('executable')) ret = [hiera.get(terms)] return ret diff --git a/plugins/lookup/keyring.py b/plugins/lookup/keyring.py index 73f9c5f4a9..73fca84e6f 100644 --- a/plugins/lookup/keyring.py +++ b/plugins/lookup/keyring.py @@ -1,38 +1,39 @@ -# -*- coding: utf-8 -*- -# (c) 2016, Samuel Boucher -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Samuel Boucher +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: keyring - author: - - Samuel Boucher (!UNKNOWN) - requirements: - - keyring (python library) - short_description: grab secrets from the OS keyring - description: - - Allows you to access data stored in the OS provided keyring/keychain. -''' +DOCUMENTATION = r""" +name: keyring +author: + - Samuel Boucher (!UNKNOWN) +requirements: + - keyring (python library) +short_description: Grab secrets from the OS keyring +description: + - Allows you to access data stored in the OS provided keyring/keychain. +""" -EXAMPLES = """ -- name : output secrets to screen (BAD IDEA) +EXAMPLES = r""" +- name: output secrets to screen (BAD IDEA) ansible.builtin.debug: msg: "Password: {{item}}" with_community.general.keyring: - 'servicename username' - name: access mysql with password from keyring - mysql_db: login_password={{lookup('community.general.keyring','mysql joe')}} login_user=joe + community.mysql.mysql_db: + login_password: "{{ lookup('community.general.keyring', 'mysql joe') }}" + login_user: joe """ -RETURN = """ - _raw: - description: Secrets stored. - type: list - elements: str +RETURN = r""" +_raw: + description: Secrets stored. + type: list + elements: str """ HAS_KEYRING = True @@ -52,17 +53,19 @@ display = Display() class LookupModule(LookupBase): - def run(self, terms, **kwargs): + def run(self, terms, variables=None, **kwargs): if not HAS_KEYRING: - raise AnsibleError(u"Can't LOOKUP(keyring): missing required python library 'keyring'") + raise AnsibleError("Can't LOOKUP(keyring): missing required python library 'keyring'") - display.vvvv(u"keyring: %s" % keyring.get_keyring()) + self.set_options(var_options=variables, direct=kwargs) + + display.vvvv(f"keyring: {keyring.get_keyring()}") ret = [] for term in terms: (servicename, username) = (term.split()[0], term.split()[1]) - display.vvvv(u"username: %s, servicename: %s " % (username, servicename)) + display.vvvv(f"username: {username}, servicename: {servicename} ") password = keyring.get_password(servicename, username) if password is None: - raise AnsibleError(u"servicename: %s for user %s not found" % (servicename, username)) + raise AnsibleError(f"servicename: {servicename} for user {username} not found") ret.append(password.rstrip()) return ret diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py index 920d33176f..8a3999c372 100644 --- a/plugins/lookup/lastpass.py +++ b/plugins/lookup/lastpass.py @@ -1,40 +1,42 @@ -# -*- coding: utf-8 -*- -# (c) 2016, Andrew Zenk -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2016, Andrew Zenk +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: lastpass - author: - - Andrew Zenk (!UNKNOWN) - requirements: - - lpass (command line utility) - - must have already logged into lastpass - short_description: fetch data from lastpass - description: - - use the lpass command line utility to fetch specific fields from lastpass - options: - _terms: - description: key from which you want to retrieve the field - required: True - field: - description: field to return from lastpass - default: 'password' -''' +DOCUMENTATION = r""" +name: lastpass +author: + - Andrew Zenk (!UNKNOWN) +requirements: + - lpass (command line utility) + - must have already logged into LastPass +short_description: Fetch data from LastPass +description: + - Use the lpass command line utility to fetch specific fields from LastPass. +options: + _terms: + description: Key from which you want to retrieve the field. + required: true + type: list + elements: str + field: + description: Field to return from LastPass. + default: 'password' + type: str +""" -EXAMPLES = """ -- name: get 'custom_field' from lastpass entry 'entry-name' +EXAMPLES = r""" +- name: get 'custom_field' from LastPass entry 'entry-name' ansible.builtin.debug: msg: "{{ lookup('community.general.lastpass', 'entry-name', field='custom_field') }}" """ -RETURN = """ - _raw: - description: secrets stored - type: list - elements: str +RETURN = r""" +_raw: + description: Secrets stored. + type: list + elements: str """ from subprocess import Popen, PIPE @@ -79,21 +81,23 @@ class LPass(object): def get_field(self, key, field): if field in ['username', 'password', 'url', 'notes', 'id', 'name']: - out, err = self._run(self._build_args("show", ["--{0}".format(field), key])) + out, err = self._run(self._build_args("show", [f"--{field}", key])) else: - out, err = self._run(self._build_args("show", ["--field={0}".format(field), key])) + out, err = self._run(self._build_args("show", [f"--field={field}", key])) return out.strip() class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + field = self.get_option('field') + lp = LPass() if not lp.logged_in: - raise AnsibleError("Not logged into lastpass: please run 'lpass login' first") + raise AnsibleError("Not logged into LastPass: please run 'lpass login' first") - field = kwargs.get('field', 'password') values = [] for term in terms: values.append(lp.get_field(term, field)) diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py index 9dd46e338a..f9b0d9482f 100644 --- a/plugins/lookup/lmdb_kv.py +++ b/plugins/lookup/lmdb_kv.py @@ -1,29 +1,33 @@ -# -*- coding: utf-8 -*- -# (c) 2017-2018, Jan-Piet Mens -# (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2017-2018, Jan-Piet Mens +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: lmdb_kv - author: - - Jan-Piet Mens (@jpmens) - version_added: '0.2.0' - short_description: fetch data from LMDB - description: - - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it - requirements: - - lmdb (python library https://lmdb.readthedocs.io/en/release/) - options: - _terms: - description: list of keys to query - db: - description: path to LMDB database - default: 'ansible.mdb' -''' +DOCUMENTATION = r""" +name: lmdb_kv +author: + - Jan-Piet Mens (@jpmens) +version_added: '0.2.0' +short_description: Fetch data from LMDB +description: + - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it. +requirements: + - lmdb (Python library U(https://lmdb.readthedocs.io/en/release/)) +options: + _terms: + description: List of keys to query. + type: list + elements: str + db: + description: Path to LMDB database. + type: str + default: 'ansible.mdb' + vars: + - name: lmdb_kv_db +""" -EXAMPLES = """ +EXAMPLES = r""" - name: query LMDB for a list of country codes ansible.builtin.debug: msg: "{{ query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') }}" @@ -34,7 +38,7 @@ EXAMPLES = """ vars: - lmdb_kv_db: jp.mdb with_community.general.lmdb_kv: - - "n*" + - "n*" - name: get an item by key ansible.builtin.assert: @@ -42,13 +46,13 @@ EXAMPLES = """ - item == 'Belgium' vars: - lmdb_kv_db: jp.mdb - with_community.general.lmdb_kv: - - be + with_community.general.lmdb_kv: + - be """ -RETURN = """ +RETURN = r""" _raw: - description: value(s) stored in LMDB + description: Value(s) stored in LMDB. type: list elements: raw """ @@ -57,6 +61,7 @@ _raw: from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase from ansible.module_utils.common.text.converters import to_native, to_text + HAVE_LMDB = True try: import lmdb @@ -66,8 +71,7 @@ except ImportError: class LookupModule(LookupBase): - def run(self, terms, variables, **kwargs): - + def run(self, terms, variables=None, **kwargs): ''' terms contain any number of keys to be retrieved. If terms is None, all keys from the database are returned @@ -80,19 +84,17 @@ class LookupModule(LookupBase): vars: - lmdb_kv_db: "jp.mdb" ''' - if HAVE_LMDB is False: raise AnsibleError("Can't LOOKUP(lmdb_kv): this module requires lmdb to be installed") - db = variables.get('lmdb_kv_db', None) - if db is None: - db = kwargs.get('db', 'ansible.mdb') - db = str(db) + self.set_options(var_options=variables, direct=kwargs) + + db = self.get_option('db') try: - env = lmdb.open(db, readonly=True) + env = lmdb.open(str(db), readonly=True) except Exception as e: - raise AnsibleError("LMDB can't open database %s: %s" % (db, to_native(e))) + raise AnsibleError(f"LMDB cannot open database {db}: {e}") ret = [] if len(terms) == 0: diff --git a/plugins/lookup/manifold.py b/plugins/lookup/manifold.py deleted file mode 100644 index 01bb13cf0b..0000000000 --- a/plugins/lookup/manifold.py +++ /dev/null @@ -1,279 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2018, Arigato Machine Inc. -# (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: - - Kyrylo Galanov (!UNKNOWN) - name: manifold - short_description: get credentials from Manifold.co - description: - - Retrieves resources' credentials from Manifold.co - options: - _terms: - description: - - Optional list of resource labels to lookup on Manifold.co. If no resources are specified, all - matched resources will be returned. - type: list - elements: string - required: False - api_token: - description: - - manifold API token - type: string - required: True - env: - - name: MANIFOLD_API_TOKEN - project: - description: - - The project label you want to get the resource for. - type: string - required: False - team: - description: - - The team label you want to get the resource for. - type: string - required: False -''' - -EXAMPLES = ''' - - name: all available resources - ansible.builtin.debug: - msg: "{{ lookup('community.general.manifold', api_token='SecretToken') }}" - - name: all available resources for a specific project in specific team - ansible.builtin.debug: - msg: "{{ lookup('community.general.manifold', api_token='SecretToken', project='poject-1', team='team-2') }}" - - name: two specific resources - ansible.builtin.debug: - msg: "{{ lookup('community.general.manifold', 'resource-1', 'resource-2') }}" -''' - -RETURN = ''' - _raw: - description: - - dictionary of credentials ready to be consumed as environment variables. If multiple resources define - the same environment variable(s), the last one returned by the Manifold API will take precedence. - type: dict -''' -from ansible.errors import AnsibleError -from ansible.plugins.lookup import LookupBase -from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError -from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils import six -from ansible.utils.display import Display -from traceback import format_exception -import json -import sys -import os - -display = Display() - - -class ApiError(Exception): - pass - - -class ManifoldApiClient(object): - base_url = 'https://api.{api}.manifold.co/v1/{endpoint}' - http_agent = 'python-manifold-ansible-1.0.0' - - def __init__(self, token): - self._token = token - - def request(self, api, endpoint, *args, **kwargs): - """ - Send a request to API backend and pre-process a response. - :param api: API to send a request to - :type api: str - :param endpoint: API endpoint to fetch data from - :type endpoint: str - :param args: other args for open_url - :param kwargs: other kwargs for open_url - :return: server response. JSON response is automatically deserialized. - :rtype: dict | list | str - """ - - default_headers = { - 'Authorization': "Bearer {0}".format(self._token), - 'Accept': "*/*" # Otherwise server doesn't set content-type header - } - - url = self.base_url.format(api=api, endpoint=endpoint) - - headers = default_headers - arg_headers = kwargs.pop('headers', None) - if arg_headers: - headers.update(arg_headers) - - try: - display.vvvv('manifold lookup connecting to {0}'.format(url)) - response = open_url(url, headers=headers, http_agent=self.http_agent, *args, **kwargs) - data = response.read() - if response.headers.get('content-type') == 'application/json': - data = json.loads(data) - return data - except ValueError: - raise ApiError('JSON response can\'t be parsed while requesting {url}:\n{json}'.format(json=data, url=url)) - except HTTPError as e: - raise ApiError('Server returned: {err} while requesting {url}:\n{response}'.format( - err=str(e), url=url, response=e.read())) - except URLError as e: - raise ApiError('Failed lookup url for {url} : {err}'.format(url=url, err=str(e))) - except SSLValidationError as e: - raise ApiError('Error validating the server\'s certificate for {url}: {err}'.format(url=url, err=str(e))) - except ConnectionError as e: - raise ApiError('Error connecting to {url}: {err}'.format(url=url, err=str(e))) - - def get_resources(self, team_id=None, project_id=None, label=None): - """ - Get resources list - :param team_id: ID of the Team to filter resources by - :type team_id: str - :param project_id: ID of the project to filter resources by - :type project_id: str - :param label: filter resources by a label, returns a list with one or zero elements - :type label: str - :return: list of resources - :rtype: list - """ - api = 'marketplace' - endpoint = 'resources' - query_params = {} - - if team_id: - query_params['team_id'] = team_id - if project_id: - query_params['project_id'] = project_id - if label: - query_params['label'] = label - - if query_params: - endpoint += '?' + urlencode(query_params) - - return self.request(api, endpoint) - - def get_teams(self, label=None): - """ - Get teams list - :param label: filter teams by a label, returns a list with one or zero elements - :type label: str - :return: list of teams - :rtype: list - """ - api = 'identity' - endpoint = 'teams' - data = self.request(api, endpoint) - # Label filtering is not supported by API, however this function provides uniform interface - if label: - data = list(filter(lambda x: x['body']['label'] == label, data)) - return data - - def get_projects(self, label=None): - """ - Get projects list - :param label: filter projects by a label, returns a list with one or zero elements - :type label: str - :return: list of projects - :rtype: list - """ - api = 'marketplace' - endpoint = 'projects' - query_params = {} - - if label: - query_params['label'] = label - - if query_params: - endpoint += '?' + urlencode(query_params) - - return self.request(api, endpoint) - - def get_credentials(self, resource_id): - """ - Get resource credentials - :param resource_id: ID of the resource to filter credentials by - :type resource_id: str - :return: - """ - api = 'marketplace' - endpoint = 'credentials?' + urlencode({'resource_id': resource_id}) - return self.request(api, endpoint) - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, api_token=None, project=None, team=None): - """ - :param terms: a list of resources lookups to run. - :param variables: ansible variables active at the time of the lookup - :param api_token: API token - :param project: optional project label - :param team: optional team label - :return: a dictionary of resources credentials - """ - - if not api_token: - api_token = os.getenv('MANIFOLD_API_TOKEN') - if not api_token: - raise AnsibleError('API token is required. Please set api_token parameter or MANIFOLD_API_TOKEN env var') - - try: - labels = terms - client = ManifoldApiClient(api_token) - - if team: - team_data = client.get_teams(team) - if len(team_data) == 0: - raise AnsibleError("Team '{0}' does not exist".format(team)) - team_id = team_data[0]['id'] - else: - team_id = None - - if project: - project_data = client.get_projects(project) - if len(project_data) == 0: - raise AnsibleError("Project '{0}' does not exist".format(project)) - project_id = project_data[0]['id'] - else: - project_id = None - - if len(labels) == 1: # Use server-side filtering if one resource is requested - resources_data = client.get_resources(team_id=team_id, project_id=project_id, label=labels[0]) - else: # Get all resources and optionally filter labels - resources_data = client.get_resources(team_id=team_id, project_id=project_id) - if labels: - resources_data = list(filter(lambda x: x['body']['label'] in labels, resources_data)) - - if labels and len(resources_data) < len(labels): - fetched_labels = [r['body']['label'] for r in resources_data] - not_found_labels = [label for label in labels if label not in fetched_labels] - raise AnsibleError("Resource(s) {0} do not exist".format(', '.join(not_found_labels))) - - credentials = {} - cred_map = {} - for resource in resources_data: - resource_credentials = client.get_credentials(resource['id']) - if len(resource_credentials) and resource_credentials[0]['body']['values']: - for cred_key, cred_val in six.iteritems(resource_credentials[0]['body']['values']): - label = resource['body']['label'] - if cred_key in credentials: - display.warning("'{cred_key}' with label '{old_label}' was replaced by resource data " - "with label '{new_label}'".format(cred_key=cred_key, - old_label=cred_map[cred_key], - new_label=label)) - credentials[cred_key] = cred_val - cred_map[cred_key] = label - - ret = [credentials] - return ret - except ApiError as e: - raise AnsibleError('API Error: {0}'.format(str(e))) - except AnsibleError as e: - raise e - except Exception: - exc_type, exc_value, exc_traceback = sys.exc_info() - raise AnsibleError(format_exception(exc_type, exc_value, exc_traceback)) diff --git a/plugins/lookup/merge_variables.py b/plugins/lookup/merge_variables.py new file mode 100644 index 0000000000..5c1686b499 --- /dev/null +++ b/plugins/lookup/merge_variables.py @@ -0,0 +1,232 @@ +# Copyright (c) 2020, Thales Netherlands +# Copyright (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Roy Lenferink (@rlenferink) + - Mark Ettema (@m-a-r-k-e) + - Alexander Petrenz (@alpex8) +name: merge_variables +short_description: Merge variables whose names match a given pattern +description: + - This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or regular expressions, + optionally. +version_added: 6.5.0 +options: + _terms: + description: + - Depending on the value of O(pattern_type), this is a list of prefixes, suffixes, or regular expressions that is used + to match all variables that should be merged. + required: true + type: list + elements: str + pattern_type: + description: + - Change the way of searching for the specified pattern. + type: str + default: 'regex' + choices: + - prefix + - suffix + - regex + env: + - name: ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE + ini: + - section: merge_variables_lookup + key: pattern_type + initial_value: + description: + - An initial value to start with. + type: raw + override: + description: + - Return an error, print a warning or ignore it when a key is overwritten. + - The default behavior V(error) makes the plugin fail when a key would be overwritten. + - When V(warn) and V(ignore) are used, note that it is important to know that the variables are sorted by name before + being merged. Keys for later variables in this order overwrite keys of the same name for variables earlier in this + order. To avoid potential confusion, better use O(override=error) whenever possible. + type: str + default: 'error' + choices: + - error + - warn + - ignore + env: + - name: ANSIBLE_MERGE_VARIABLES_OVERRIDE + ini: + - section: merge_variables_lookup + key: override + groups: + description: + - Search for variables across hosts that belong to the given groups. This allows to collect configuration pieces across + different hosts (for example a service on a host with its database on another host). + type: list + elements: str + version_added: 8.5.0 +""" + +EXAMPLES = r""" +# Some example variables, they can be defined anywhere as long as they are in scope +test_init_list: + - "list init item 1" + - "list init item 2" + +testa__test_list: + - "test a item 1" + +testb__test_list: + - "test b item 1" + +testa__test_dict: + ports: + - 1 + +testb__test_dict: + ports: + - 3 + +# Merge variables that end with '__test_dict' and store the result in a variable 'example_a' +example_a: "{{ lookup('community.general.merge_variables', '__test_dict', pattern_type='suffix') }}" + +# The variable example_a now contains: +# ports: +# - 1 +# - 3 + +# Merge variables that match the '^.+__test_list$' regular expression, starting with an initial value and store the +# result in a variable 'example_b' +example_b: "{{ lookup('community.general.merge_variables', '^.+__test_list$', initial_value=test_init_list) }}" + +# The variable example_b now contains: +# - "list init item 1" +# - "list init item 2" +# - "test a item 1" +# - "test b item 1" +""" + +RETURN = r""" +_raw: + description: In case the search matches list items, a list is returned. In case the search matches dicts, a dict is returned. + type: raw + elements: raw +""" + +import re + +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display + +display = Display() + + +def _verify_and_get_type(variable): + if isinstance(variable, list): + return "list" + elif isinstance(variable, dict): + return "dict" + else: + raise AnsibleError("Not supported type detected, variable must be a list or a dict") + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + self.set_options(direct=kwargs) + initial_value = self.get_option("initial_value", None) + self._override = self.get_option('override', 'error') + self._pattern_type = self.get_option('pattern_type', 'regex') + self._groups = self.get_option('groups', None) + + ret = [] + for term in terms: + if not isinstance(term, str): + raise AnsibleError(f"Non-string type '{type(term)}' passed, only 'str' types are allowed!") + + if not self._groups: # consider only own variables + ret.append(self._merge_vars(term, initial_value, variables)) + else: # consider variables of hosts in given groups + cross_host_merge_result = initial_value + for host in variables["hostvars"]: + if self._is_host_in_allowed_groups(variables["hostvars"][host]["group_names"]): + host_variables = dict(variables["hostvars"].raw_get(host)) + host_variables["hostvars"] = variables["hostvars"] # re-add hostvars + cross_host_merge_result = self._merge_vars(term, cross_host_merge_result, host_variables) + ret.append(cross_host_merge_result) + + return ret + + def _is_host_in_allowed_groups(self, host_groups): + if 'all' in self._groups: + return True + + group_intersection = [host_group_name for host_group_name in host_groups if host_group_name in self._groups] + if group_intersection: + return True + + return False + + def _var_matches(self, key, search_pattern): + if self._pattern_type == "prefix": + return key.startswith(search_pattern) + elif self._pattern_type == "suffix": + return key.endswith(search_pattern) + elif self._pattern_type == "regex": + matcher = re.compile(search_pattern) + return matcher.search(key) + + return False + + def _merge_vars(self, search_pattern, initial_value, variables): + display.vvv(f"Merge variables with {self._pattern_type}: {search_pattern}") + var_merge_names = sorted([key for key in variables.keys() if self._var_matches(key, search_pattern)]) + display.vvv(f"The following variables will be merged: {var_merge_names}") + prev_var_type = None + result = None + + if initial_value is not None: + prev_var_type = _verify_and_get_type(initial_value) + result = initial_value + + for var_name in var_merge_names: + temp_templar = self._templar.copy_with_new_env(available_variables=variables) # tmp. switch renderer to context of current variables + var_value = temp_templar.template(variables[var_name]) # Render jinja2 templates + var_type = _verify_and_get_type(var_value) + + if prev_var_type is None: + prev_var_type = var_type + elif prev_var_type != var_type: + raise AnsibleError("Unable to merge, not all variables are of the same type") + + if result is None: + result = var_value + continue + + if var_type == "dict": + result = self._merge_dict(var_value, result, [var_name]) + else: # var_type == "list" + result += var_value + + return result + + def _merge_dict(self, src, dest, path): + for key, value in src.items(): + if isinstance(value, dict): + node = dest.setdefault(key, {}) + self._merge_dict(value, node, path + [key]) + elif isinstance(value, list) and key in dest: + dest[key] += value + else: + if (key in dest) and dest[key] != value: + msg = f"The key '{key}' with value '{dest[key]}' will be overwritten with value '{value}' from '{'.'.join(path)}.{key}'" + + if self._override == "error": + raise AnsibleError(msg) + if self._override == "warn": + display.warning(msg) + + dest[key] = value + + return dest diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py index 9f97a90e71..ab68796ed1 100644 --- a/plugins/lookup/onepassword.py +++ b/plugins/lookup/onepassword.py @@ -1,63 +1,44 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Scott Buchanan -# Copyright: (c) 2016, Andrew Zenk (lastpass.py used as starting point) -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Scott Buchanan +# Copyright (c) 2016, Andrew Zenk (lastpass.py used as starting point) +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: onepassword - author: - - Scott Buchanan (@scottsb) - - Andrew Zenk (@azenk) - - Sam Doran (@samdoran) - requirements: - - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) - short_description: fetch field values from 1Password - description: - - C(onepassword) wraps the C(op) command line utility to fetch specific field values from 1Password. - options: - _terms: - description: identifier(s) (UUID, name, or subdomain; case-insensitive) of item(s) to retrieve. - required: True - field: - description: field to return from each matching item (case-insensitive). - default: 'password' - master_password: - description: The password used to unlock the specified vault. - aliases: ['vault_password'] - section: - description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section. - domain: - description: Domain of 1Password. Default is U(1password.com). - version_added: 3.2.0 - default: '1password.com' - type: str - subdomain: - description: The 1Password subdomain to authenticate against. - username: - description: The username used to sign in. - secret_key: - description: The secret key used when performing an initial sign in. - vault: - description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults. - notes: - - This lookup will use an existing 1Password session if one exists. If not, and you have already - performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required. - You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op). - - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password). - - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials - needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength - to the 1Password master password. - - This lookup stores potentially sensitive data from 1Password as Ansible facts. - Facts are subject to caching if enabled, which means this data could be stored in clear text - on disk or in a database. - - Tested with C(op) version 0.5.3 -''' +DOCUMENTATION = r""" +name: onepassword +author: + - Scott Buchanan (@scottsb) + - Andrew Zenk (@azenk) + - Sam Doran (@samdoran) +short_description: Fetch field values from 1Password +description: + - P(community.general.onepassword#lookup) wraps the C(op) command line utility to fetch specific field values from 1Password. +requirements: + - C(op) 1Password command line utility +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + account_id: + version_added: 7.5.0 + domain: + version_added: 3.2.0 + field: + description: Field to return from each matching item (case-insensitive). + default: 'password' + type: str + service_account_token: + version_added: 7.1.0 +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" -EXAMPLES = """ +EXAMPLES = r""" # These examples only work when already signed in to 1Password - name: Retrieve password for KITT when already signed in to 1Password ansible.builtin.debug: @@ -73,126 +54,165 @@ EXAMPLES = """ - name: Retrieve password for HAL when not signed in to 1Password ansible.builtin.debug: - var: lookup('community.general.onepassword' - 'HAL 9000' - subdomain='Discovery' - master_password=vault_master_password) + var: lookup('community.general.onepassword', 'HAL 9000', subdomain='Discovery', master_password=vault_master_password) - name: Retrieve password for HAL when never signed in to 1Password ansible.builtin.debug: - var: lookup('community.general.onepassword' - 'HAL 9000' - subdomain='Discovery' - master_password=vault_master_password - username='tweety@acme.com' - secret_key=vault_secret_key) + var: >- + lookup('community.general.onepassword', 'HAL 9000', subdomain='Discovery', master_password=vault_master_password, + username='tweety@acme.com', secret_key=vault_secret_key) + +- name: Retrieve password from specific account + ansible.builtin.debug: + var: lookup('community.general.onepassword', 'HAL 9000', account_id='abc123') """ -RETURN = """ - _raw: - description: field data requested - type: list - elements: str +RETURN = r""" +_raw: + description: Field data requested. + type: list + elements: str """ -import errno -import json +import abc import os - -from subprocess import Popen, PIPE +import json +import subprocess from ansible.plugins.lookup import LookupBase -from ansible.errors import AnsibleLookupError +from ansible.errors import AnsibleLookupError, AnsibleOptionsError +from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.text.converters import to_bytes, to_text +from ansible_collections.community.general.plugins.module_utils.onepassword import OnePasswordConfig -class OnePass(object): - def __init__(self, path='op'): - self.cli_path = path - self.config_file_path = os.path.expanduser('~/.op/config') - self.logged_in = False - self.token = None - self.subdomain = None - self.domain = None - self.username = None - self.secret_key = None - self.master_password = None +def _lower_if_possible(value): + """Return the lower case version value, otherwise return the value""" + try: + return value.lower() + except AttributeError: + return value - def get_token(self): - # If the config file exists, assume an initial signin has taken place and try basic sign in - if os.path.isfile(self.config_file_path): - if not self.master_password: - raise AnsibleLookupError('Unable to sign in to 1Password. master_password is required.') +class OnePassCLIBase(object, metaclass=abc.ABCMeta): + bin = "op" - try: - args = ['signin', '--output=raw'] + def __init__( + self, + subdomain=None, + domain="1password.com", + username=None, + secret_key=None, + master_password=None, + service_account_token=None, + account_id=None, + connect_host=None, + connect_token=None, + ): + self.subdomain = subdomain + self.domain = domain + self.username = username + self.master_password = master_password + self.secret_key = secret_key + self.service_account_token = service_account_token + self.account_id = account_id + self.connect_host = connect_host + self.connect_token = connect_token - if self.subdomain: - args = ['signin', self.subdomain, '--output=raw'] + self._path = None + self._version = None - rc, out, err = self._run(args, command_input=to_bytes(self.master_password)) - self.token = out.strip() + def _check_required_params(self, required_params): + non_empty_attrs = {param: getattr(self, param) for param in required_params if getattr(self, param, None)} + missing = set(required_params).difference(non_empty_attrs) + if missing: + prefix = "Unable to sign in to 1Password. Missing required parameter" + plural = "" + suffix = f": {', '.join(missing)}." + if len(missing) > 1: + plural = "s" - except AnsibleLookupError: - self.full_login() + msg = f"{prefix}{plural}{suffix}" + raise AnsibleLookupError(msg) - else: - # Attempt a full sign in since there appears to be no existing sign in - self.full_login() + @abc.abstractmethod + def _parse_field(self, data_json, field_name, section_title): + """Main method for parsing data returned from the op command line tool""" - def assert_logged_in(self): - try: - rc, out, err = self._run(['get', 'account'], ignore_errors=True) - if rc == 0: - self.logged_in = True - if not self.logged_in: - self.get_token() - except OSError as e: - if e.errno == errno.ENOENT: - raise AnsibleLookupError("1Password CLI tool '%s' not installed in path on control machine" % self.cli_path) - raise e + def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False, environment_update=None): + command = [self.path] + args + call_kwargs = { + "stdout": subprocess.PIPE, + "stderr": subprocess.PIPE, + "stdin": subprocess.PIPE, + } - def get_raw(self, item_id, vault=None): - args = ["get", "item", item_id] - if vault is not None: - args += ['--vault={0}'.format(vault)] - if not self.logged_in: - args += [to_bytes('--session=') + self.token] - rc, output, dummy = self._run(args) - return output + if environment_update: + env = os.environ.copy() + env.update(environment_update) + call_kwargs["env"] = env - def get_field(self, item_id, field, section=None, vault=None): - output = self.get_raw(item_id, vault) - return self._parse_field(output, field, section) if output != '' else '' - - def full_login(self): - if None in [self.subdomain, self.username, self.secret_key, self.master_password]: - raise AnsibleLookupError('Unable to perform initial sign in to 1Password. ' - 'subdomain, username, secret_key, and master_password are required to perform initial sign in.') - - args = [ - 'signin', - '{0}.{1}'.format(self.subdomain, self.domain), - to_bytes(self.username), - to_bytes(self.secret_key), - '--output=raw', - ] - - rc, out, err = self._run(args, command_input=to_bytes(self.master_password)) - self.token = out.strip() - - def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False): - command = [self.cli_path] + args - p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) + p = subprocess.Popen(command, **call_kwargs) out, err = p.communicate(input=command_input) rc = p.wait() + if not ignore_errors and rc != expected_rc: - raise AnsibleLookupError(to_text(err)) + raise AnsibleLookupError(str(err)) + return rc, out, err - def _parse_field(self, data_json, field_name, section_title=None): + @abc.abstractmethod + def assert_logged_in(self): + """Check whether a login session exists""" + + @abc.abstractmethod + def full_signin(self): + """Performa full login""" + + @abc.abstractmethod + def get_raw(self, item_id, vault=None, token=None): + """Gets the specified item from the vault""" + + @abc.abstractmethod + def signin(self): + """Sign in using the master password""" + + @property + def path(self): + if self._path is None: + self._path = get_bin_path(self.bin) + + return self._path + + @property + def version(self): + if self._version is None: + self._version = self.get_current_version() + + return self._version + + @classmethod + def get_current_version(cls): + """Standalone method to get the op CLI version. Useful when determining which class to load + based on the current version.""" + try: + bin_path = get_bin_path(cls.bin) + except ValueError: + raise AnsibleLookupError(f"Unable to locate '{cls.bin}' command line tool") + + try: + b_out = subprocess.check_output([bin_path, "--version"], stderr=subprocess.PIPE) + except subprocess.CalledProcessError as cpe: + raise AnsibleLookupError(f"Unable to get the op version: {cpe}") + + return to_text(b_out).strip() + + +class OnePassCLIv1(OnePassCLIBase): + supports_version = "1" + + def _parse_field(self, data_json, field_name, section_title): """ Retrieves the desired field from the `op` response payload @@ -246,39 +266,437 @@ class OnePass(object): # check the details dictionary for `field_name` and return it immediately if it exists # when the entry is a "password" instead of a "login" item, the password field is a key # in the `details` dictionary: - if field_name in data['details']: - return data['details'][field_name] + if field_name in data["details"]: + return data["details"][field_name] # when the field is not found above, iterate through the fields list in the object details - for field_data in data['details'].get('fields', []): - if field_data.get('name', '').lower() == field_name.lower(): - return field_data.get('value', '') - for section_data in data['details'].get('sections', []): - if section_title is not None and section_title.lower() != section_data['title'].lower(): + for field_data in data["details"].get("fields", []): + if field_data.get("name", "").lower() == field_name.lower(): + return field_data.get("value", "") + + for section_data in data["details"].get("sections", []): + if section_title is not None and section_title.lower() != section_data["title"].lower(): continue - for field_data in section_data.get('fields', []): - if field_data.get('t', '').lower() == field_name.lower(): - return field_data.get('v', '') - return '' + + for field_data in section_data.get("fields", []): + if field_data.get("t", "").lower() == field_name.lower(): + return field_data.get("v", "") + + return "" + + def assert_logged_in(self): + args = ["get", "account"] + if self.account_id: + args.extend(["--account", self.account_id]) + elif self.subdomain: + account = f"{self.subdomain}.{self.domain}" + args.extend(["--account", account]) + + rc, out, err = self._run(args, ignore_errors=True) + + return not bool(rc) + + def full_signin(self): + if self.connect_host or self.connect_token: + raise AnsibleLookupError( + "1Password Connect is not available with 1Password CLI version 1. Please use version 2 or later.") + + if self.service_account_token: + raise AnsibleLookupError( + "1Password CLI version 1 does not support Service Accounts. Please use version 2 or later.") + + required_params = [ + "subdomain", + "username", + "secret_key", + "master_password", + ] + self._check_required_params(required_params) + + args = [ + "signin", + f"{self.subdomain}.{self.domain}", + to_bytes(self.username), + to_bytes(self.secret_key), + "--raw", + ] + + return self._run(args, command_input=to_bytes(self.master_password)) + + def get_raw(self, item_id, vault=None, token=None): + args = ["get", "item", item_id] + + if self.account_id: + args.extend(["--account", self.account_id]) + + if vault is not None: + args += [f"--vault={vault}"] + + if token is not None: + args += [to_bytes("--session=") + token] + + return self._run(args) + + def signin(self): + self._check_required_params(['master_password']) + + args = ["signin", "--raw"] + if self.subdomain: + args.append(self.subdomain) + + return self._run(args, command_input=to_bytes(self.master_password)) + + +class OnePassCLIv2(OnePassCLIBase): + """ + CLIv2 Syntax Reference: https://developer.1password.com/docs/cli/upgrade#step-2-update-your-scripts + """ + supports_version = "2" + + def _parse_field(self, data_json, field_name, section_title=None): + """ + Schema reference: https://developer.1password.com/docs/cli/item-template-json + + Example Data: + + # Password item + { + "id": "ywvdbojsguzgrgnokmcxtydgdv", + "title": "Authy Backup", + "version": 1, + "vault": { + "id": "bcqxysvcnejjrwzoqrwzcqjqxc", + "name": "Personal" + }, + "category": "PASSWORD", + "last_edited_by": "7FUPZ8ZNE02KSHMAIMKHIVUE17", + "created_at": "2015-01-18T13:13:38Z", + "updated_at": "2016-02-20T16:23:54Z", + "additional_information": "Jan 18, 2015, 08:13:38", + "fields": [ + { + "id": "password", + "type": "CONCEALED", + "purpose": "PASSWORD", + "label": "password", + "value": "OctoberPoppyNuttyDraperySabbath", + "reference": "op://Personal/Authy Backup/password", + "password_details": { + "strength": "FANTASTIC" + } + }, + { + "id": "notesPlain", + "type": "STRING", + "purpose": "NOTES", + "label": "notesPlain", + "value": "Backup password to restore Authy", + "reference": "op://Personal/Authy Backup/notesPlain" + } + ] + } + + # Login item + { + "id": "awk4s2u44fhnrgppszcsvc663i", + "title": "Dummy Login", + "version": 2, + "vault": { + "id": "stpebbaccrq72xulgouxsk4p7y", + "name": "Personal" + }, + "category": "LOGIN", + "last_edited_by": "LSGPJERUYBH7BFPHMZ2KKGL6AU", + "created_at": "2018-04-25T21:55:19Z", + "updated_at": "2018-04-25T21:56:06Z", + "additional_information": "agent.smith", + "urls": [ + { + "primary": true, + "href": "https://acme.com" + } + ], + "sections": [ + { + "id": "linked items", + "label": "Related Items" + } + ], + "fields": [ + { + "id": "username", + "type": "STRING", + "purpose": "USERNAME", + "label": "username", + "value": "agent.smith", + "reference": "op://Personal/Dummy Login/username" + }, + { + "id": "password", + "type": "CONCEALED", + "purpose": "PASSWORD", + "label": "password", + "value": "Q7vFwTJcqwxKmTU]Dzx7NW*wrNPXmj", + "entropy": 159.6083697084228, + "reference": "op://Personal/Dummy Login/password", + "password_details": { + "entropy": 159, + "generated": true, + "strength": "FANTASTIC" + } + }, + { + "id": "notesPlain", + "type": "STRING", + "purpose": "NOTES", + "label": "notesPlain", + "reference": "op://Personal/Dummy Login/notesPlain" + } + ] + } + """ + data = json.loads(data_json) + field_name = _lower_if_possible(field_name) + for field in data.get("fields", []): + if section_title is None: + # If the field name exists in the section, return that value + if field.get(field_name): + return field.get(field_name) + + # If the field name doesn't exist in the section, match on the value of "label" + # then "id" and return "value" + if field.get("label", "").lower() == field_name: + return field.get("value", "") + + if field.get("id", "").lower() == field_name: + return field.get("value", "") + + # Look at the section data and get an identifier. The value of 'id' is either a unique ID + # or a human-readable string. If a 'label' field exists, prefer that since + # it is the value visible in the 1Password UI when both 'id' and 'label' exist. + section = field.get("section", {}) + section_title = _lower_if_possible(section_title) + + current_section_title = section.get("label", section.get("id", "")).lower() + if section_title == current_section_title: + # In the correct section. Check "label" then "id" for the desired field_name + if field.get("label", "").lower() == field_name: + return field.get("value", "") + + if field.get("id", "").lower() == field_name: + return field.get("value", "") + + return "" + + def assert_logged_in(self): + if self.connect_host and self.connect_token: + return True + + if self.service_account_token: + args = ["whoami"] + environment_update = {"OP_SERVICE_ACCOUNT_TOKEN": self.service_account_token} + rc, out, err = self._run(args, environment_update=environment_update) + + return not bool(rc) + + args = ["account", "list"] + if self.subdomain: + account = f"{self.subdomain}.{self.domain}" + args.extend(["--account", account]) + + rc, out, err = self._run(args) + + if out: + # Running 'op account get' if there are no accounts configured on the system drops into + # an interactive prompt. Only run 'op account get' after first listing accounts to see + # if there are any previously configured accounts. + args = ["account", "get"] + if self.account_id: + args.extend(["--account", self.account_id]) + elif self.subdomain: + account = f"{self.subdomain}.{self.domain}" + args.extend(["--account", account]) + + rc, out, err = self._run(args, ignore_errors=True) + + return not bool(rc) + + return False + + def full_signin(self): + required_params = [ + "subdomain", + "username", + "secret_key", + "master_password", + ] + self._check_required_params(required_params) + + args = [ + "account", "add", "--raw", + "--address", f"{self.subdomain}.{self.domain}", + "--email", to_bytes(self.username), + "--signin", + ] + + environment_update = {"OP_SECRET_KEY": self.secret_key} + return self._run(args, command_input=to_bytes(self.master_password), environment_update=environment_update) + + def _add_parameters_and_run(self, args, vault=None, token=None): + if self.account_id: + args.extend(["--account", self.account_id]) + + if vault is not None: + args += [f"--vault={vault}"] + + if self.connect_host and self.connect_token: + if vault is None: + raise AnsibleLookupError("'vault' is required with 1Password Connect") + environment_update = { + "OP_CONNECT_HOST": self.connect_host, + "OP_CONNECT_TOKEN": self.connect_token, + } + return self._run(args, environment_update=environment_update) + + if self.service_account_token: + if vault is None: + raise AnsibleLookupError("'vault' is required with 'service_account_token'") + environment_update = {"OP_SERVICE_ACCOUNT_TOKEN": self.service_account_token} + return self._run(args, environment_update=environment_update) + + if token is not None: + args += [to_bytes("--session=") + token] + + return self._run(args) + + def get_raw(self, item_id, vault=None, token=None): + args = ["item", "get", item_id, "--format", "json"] + return self._add_parameters_and_run(args, vault=vault, token=token) + + def signin(self): + self._check_required_params(['master_password']) + + args = ["signin", "--raw"] + if self.subdomain: + args.extend(["--account", self.subdomain]) + + return self._run(args, command_input=to_bytes(self.master_password)) + + +class OnePass(object): + def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None, + service_account_token=None, account_id=None, connect_host=None, connect_token=None, cli_class=None): + self.subdomain = subdomain + self.domain = domain + self.username = username + self.secret_key = secret_key + self.master_password = master_password + self.service_account_token = service_account_token + self.account_id = account_id + self.connect_host = connect_host + self.connect_token = connect_token + + self.logged_in = False + self.token = None + + self._config = OnePasswordConfig() + self._cli = self._get_cli_class(cli_class) + + if (self.connect_host or self.connect_token) and None in (self.connect_host, self.connect_token): + raise AnsibleOptionsError("connect_host and connect_token are required together") + + def _get_cli_class(self, cli_class=None): + if cli_class is not None: + return cli_class(self.subdomain, self.domain, self.username, self.secret_key, self.master_password, self.service_account_token) + + version = OnePassCLIBase.get_current_version() + for cls in OnePassCLIBase.__subclasses__(): + if cls.supports_version == version.split(".")[0]: + try: + return cls(self.subdomain, self.domain, self.username, self.secret_key, self.master_password, self.service_account_token, + self.account_id, self.connect_host, self.connect_token) + except TypeError as e: + raise AnsibleLookupError(e) + + raise AnsibleLookupError(f"op version {version} is unsupported") + + def set_token(self): + if self._config.config_file_path and os.path.isfile(self._config.config_file_path): + # If the config file exists, assume an initial sign in has taken place and try basic sign in + try: + rc, out, err = self._cli.signin() + except AnsibleLookupError as exc: + test_strings = ( + "missing required parameters", + "unauthorized", + ) + if any(string in exc.message.lower() for string in test_strings): + # A required parameter is missing, or a bad master password was supplied + # so don't bother attempting a full signin + raise + + rc, out, err = self._cli.full_signin() + + self.token = out.strip() + + else: + # Attempt a full signin since there appears to be no existing signin + rc, out, err = self._cli.full_signin() + self.token = out.strip() + + def assert_logged_in(self): + logged_in = self._cli.assert_logged_in() + if logged_in: + self.logged_in = logged_in + pass + else: + self.set_token() + + def get_raw(self, item_id, vault=None): + rc, out, err = self._cli.get_raw(item_id, vault, self.token) + return out + + def get_field(self, item_id, field, section=None, vault=None): + output = self.get_raw(item_id, vault) + if output: + return self._cli._parse_field(output, field, section) + + return "" class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): - op = OnePass() + self.set_options(var_options=variables, direct=kwargs) - field = kwargs.get('field', 'password') - section = kwargs.get('section') - vault = kwargs.get('vault') - op.subdomain = kwargs.get('subdomain') - op.domain = kwargs.get('domain', '1password.com') - op.username = kwargs.get('username') - op.secret_key = kwargs.get('secret_key') - op.master_password = kwargs.get('master_password', kwargs.get('vault_password')) + field = self.get_option("field") + section = self.get_option("section") + vault = self.get_option("vault") + subdomain = self.get_option("subdomain") + domain = self.get_option("domain") + username = self.get_option("username") + secret_key = self.get_option("secret_key") + master_password = self.get_option("master_password") + service_account_token = self.get_option("service_account_token") + account_id = self.get_option("account_id") + connect_host = self.get_option("connect_host") + connect_token = self.get_option("connect_token") + op = OnePass( + subdomain=subdomain, + domain=domain, + username=username, + secret_key=secret_key, + master_password=master_password, + service_account_token=service_account_token, + account_id=account_id, + connect_host=connect_host, + connect_token=connect_token, + ) op.assert_logged_in() values = [] for term in terms: values.append(op.get_field(term, field, section, vault)) + return values diff --git a/plugins/lookup/onepassword_doc.py b/plugins/lookup/onepassword_doc.py new file mode 100644 index 0000000000..e62db6d1e2 --- /dev/null +++ b/plugins/lookup/onepassword_doc.py @@ -0,0 +1,89 @@ +# Copyright (c) 2023, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: onepassword_doc +author: + - Sam Doran (@samdoran) +requirements: + - C(op) 1Password command line utility version 2 or later. +short_description: Fetch documents stored in 1Password +version_added: "8.1.0" +description: + - P(community.general.onepassword_doc#lookup) wraps C(op) command line utility to fetch one or more documents from 1Password. +notes: + - The document contents are a string exactly as stored in 1Password. + - This plugin requires C(op) version 2 or later. +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" + +EXAMPLES = r""" +--- +- name: Retrieve a private key from 1Password + ansible.builtin.debug: + var: lookup('community.general.onepassword_doc', 'Private key') +""" + +RETURN = r""" +_raw: + description: Requested document. + type: list + elements: string +""" + +from ansible_collections.community.general.plugins.lookup.onepassword import OnePass, OnePassCLIv2 +from ansible.plugins.lookup import LookupBase + + +class OnePassCLIv2Doc(OnePassCLIv2): + def get_raw(self, item_id, vault=None, token=None): + args = ["document", "get", item_id] + return self._add_parameters_and_run(args, vault=vault, token=token) + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + + vault = self.get_option("vault") + subdomain = self.get_option("subdomain") + domain = self.get_option("domain", "1password.com") + username = self.get_option("username") + secret_key = self.get_option("secret_key") + master_password = self.get_option("master_password") + service_account_token = self.get_option("service_account_token") + account_id = self.get_option("account_id") + connect_host = self.get_option("connect_host") + connect_token = self.get_option("connect_token") + + op = OnePass( + subdomain=subdomain, + domain=domain, + username=username, + secret_key=secret_key, + master_password=master_password, + service_account_token=service_account_token, + account_id=account_id, + connect_host=connect_host, + connect_token=connect_token, + cli_class=OnePassCLIv2Doc, + ) + op.assert_logged_in() + + values = [] + for term in terms: + values.append(op.get_raw(term, vault)) + + return values diff --git a/plugins/lookup/onepassword_raw.py b/plugins/lookup/onepassword_raw.py index d1958f78cd..b75be3d630 100644 --- a/plugins/lookup/onepassword_raw.py +++ b/plugins/lookup/onepassword_raw.py @@ -1,55 +1,41 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Scott Buchanan -# Copyright: (c) 2016, Andrew Zenk (lastpass.py used as starting point) -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Scott Buchanan +# Copyright (c) 2016, Andrew Zenk (lastpass.py used as starting point) +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: onepassword_raw - author: - - Scott Buchanan (@scottsb) - - Andrew Zenk (@azenk) - - Sam Doran (@samdoran) - requirements: - - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) - short_description: fetch an entire item from 1Password - description: - - C(onepassword_raw) wraps C(op) command line utility to fetch an entire item from 1Password - options: - _terms: - description: identifier(s) (UUID, name, or domain; case-insensitive) of item(s) to retrieve. - required: True - master_password: - description: The password used to unlock the specified vault. - aliases: ['vault_password'] - section: - description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section. - subdomain: - description: The 1Password subdomain to authenticate against. - username: - description: The username used to sign in. - secret_key: - description: The secret key used when performing an initial sign in. - vault: - description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults. - notes: - - This lookup will use an existing 1Password session if one exists. If not, and you have already - performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required. - You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op). - - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password). - - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials - needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength - to the 1Password master password. - - This lookup stores potentially sensitive data from 1Password as Ansible facts. - Facts are subject to caching if enabled, which means this data could be stored in clear text - on disk or in a database. - - Tested with C(op) version 0.5.3 -''' +DOCUMENTATION = r""" +name: onepassword_raw +author: + - Scott Buchanan (@scottsb) + - Andrew Zenk (@azenk) + - Sam Doran (@samdoran) +requirements: + - C(op) 1Password command line utility +short_description: Fetch an entire item from 1Password +description: + - P(community.general.onepassword_raw#lookup) wraps C(op) command line utility to fetch an entire item from 1Password. +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + account_id: + version_added: 7.5.0 + domain: + version_added: 6.0.0 + service_account_token: + version_added: 7.1.0 +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" -EXAMPLES = """ +EXAMPLES = r""" +--- - name: Retrieve all data about Wintermute ansible.builtin.debug: var: lookup('community.general.onepassword_raw', 'Wintermute') @@ -59,11 +45,11 @@ EXAMPLES = """ var: lookup('community.general.onepassword_raw', 'Wintermute', subdomain='Turing', vault_password='DmbslfLvasjdl') """ -RETURN = """ - _raw: - description: field data requested - type: list - elements: dict +RETURN = r""" +_raw: + description: Entire item requested. + type: list + elements: dict """ import json @@ -75,18 +61,35 @@ from ansible.plugins.lookup import LookupBase class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): - op = OnePass() + self.set_options(var_options=variables, direct=kwargs) - vault = kwargs.get('vault') - op.subdomain = kwargs.get('subdomain') - op.username = kwargs.get('username') - op.secret_key = kwargs.get('secret_key') - op.master_password = kwargs.get('master_password', kwargs.get('vault_password')) + vault = self.get_option("vault") + subdomain = self.get_option("subdomain") + domain = self.get_option("domain", "1password.com") + username = self.get_option("username") + secret_key = self.get_option("secret_key") + master_password = self.get_option("master_password") + service_account_token = self.get_option("service_account_token") + account_id = self.get_option("account_id") + connect_host = self.get_option("connect_host") + connect_token = self.get_option("connect_token") + op = OnePass( + subdomain=subdomain, + domain=domain, + username=username, + secret_key=secret_key, + master_password=master_password, + service_account_token=service_account_token, + account_id=account_id, + connect_host=connect_host, + connect_token=connect_token, + ) op.assert_logged_in() values = [] for term in terms: data = json.loads(op.get_raw(term, vault)) values.append(data) + return values diff --git a/plugins/lookup/onepassword_ssh_key.py b/plugins/lookup/onepassword_ssh_key.py new file mode 100644 index 0000000000..35e3034e04 --- /dev/null +++ b/plugins/lookup/onepassword_ssh_key.py @@ -0,0 +1,118 @@ +# Copyright (c) 2025, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: onepassword_ssh_key +author: + - Mohammed Babelly (@mohammedbabelly20) +requirements: + - C(op) 1Password command line utility version 2 or later. +short_description: Fetch SSH keys stored in 1Password +version_added: "10.3.0" +description: + - P(community.general.onepassword_ssh_key#lookup) wraps C(op) command line utility to fetch SSH keys from 1Password. +notes: + - By default, it returns the private key value in PKCS#8 format, unless O(ssh_format=true) is passed. + - The pluging works only for C(SSHKEY) type items. + - This plugin requires C(op) version 2 or later. +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + ssh_format: + description: Output key in SSH format if V(true). Otherwise, outputs in the default format (PKCS#8). + default: false + type: bool + +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" + +EXAMPLES = r""" +--- +- name: Retrieve the private SSH key from 1Password + ansible.builtin.debug: + msg: "{{ lookup('community.general.onepassword_ssh_key', 'SSH Key', ssh_format=true) }}" +""" + +RETURN = r""" +_raw: + description: Private key of SSH keypair. + type: list + elements: string +""" +import json + +from ansible_collections.community.general.plugins.lookup.onepassword import ( + OnePass, + OnePassCLIv2, +) +from ansible.errors import AnsibleLookupError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + def get_ssh_key(self, out, item_id, ssh_format=False): + data = json.loads(out) + + if data.get("category") != "SSH_KEY": + raise AnsibleLookupError(f"Item {item_id} is not an SSH key") + + private_key_field = next( + ( + field + for field in data.get("fields", {}) + if field.get("id") == "private_key" and field.get("type") == "SSHKEY" + ), + None, + ) + if not private_key_field: + raise AnsibleLookupError(f"No private key found for item {item_id}.") + + if ssh_format: + return ( + private_key_field.get("ssh_formats", {}) + .get("openssh", {}) + .get("value", "") + ) + return private_key_field.get("value", "") + + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + + ssh_format = self.get_option("ssh_format") + vault = self.get_option("vault") + subdomain = self.get_option("subdomain") + domain = self.get_option("domain", "1password.com") + username = self.get_option("username") + secret_key = self.get_option("secret_key") + master_password = self.get_option("master_password") + service_account_token = self.get_option("service_account_token") + account_id = self.get_option("account_id") + connect_host = self.get_option("connect_host") + connect_token = self.get_option("connect_token") + + op = OnePass( + subdomain=subdomain, + domain=domain, + username=username, + secret_key=secret_key, + master_password=master_password, + service_account_token=service_account_token, + account_id=account_id, + connect_host=connect_host, + connect_token=connect_token, + cli_class=OnePassCLIv2, + ) + op.assert_logged_in() + + return [ + self.get_ssh_key(op.get_raw(term, vault), term, ssh_format=ssh_format) + for term in terms + ] diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 7c00f432b1..31305d81bb 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -1,159 +1,268 @@ -# -*- coding: utf-8 -*- -# (c) 2017, Patrick Deelman -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2017, Patrick Deelman +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: passwordstore - author: - - Patrick Deelman (!UNKNOWN) - short_description: manage passwords with passwordstore.org's pass utility +DOCUMENTATION = r""" +name: passwordstore +author: + - Patrick Deelman (!UNKNOWN) +short_description: Manage passwords with passwordstore.org's pass utility +description: + - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility. It can also retrieve, + create or update YAML style keys stored as multilines in the passwordfile. + - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to C(~/.gnupg/gpg-agent.conf). Where + this is not possible, consider using O(lock=readwrite) instead. +options: + _terms: + description: Query key. + required: true + directory: description: - - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility. - It also retrieves YAML style keys stored as multilines in the passwordfile. - options: - _terms: - description: query key. - required: True - passwordstore: - description: location of the password store. - default: '~/.password-store' - directory: - description: The directory of the password store. - env: - - name: PASSWORD_STORE_DIR - create: - description: Create the password if it does not already exist. Takes precedence over C(missing). - type: bool - default: false - overwrite: - description: Overwrite the password if it does already exist. - type: bool - default: 'no' - umask: - description: - - Sets the umask for the created .gpg files. The first octed must be greater than 3 (user readable). - - Note pass' default value is C('077'). - env: - - name: PASSWORD_STORE_UMASK - version_added: 1.3.0 - returnall: - description: Return all the content of the password, not only the first line. - type: bool - default: 'no' - subkey: - description: Return a specific subkey of the password. When set to C(password), always returns the first line. - default: password - userpass: - description: Specify a password to save, instead of a generated one. - length: - description: The length of the generated password. - type: integer - default: 16 - backup: - description: Used with C(overwrite=yes). Backup the previous password in a subkey. - type: bool - default: 'no' - nosymbols: - description: use alphanumeric characters. - type: bool - default: 'no' - missing: - description: - - List of preference about what to do if the password file is missing. - - If I(create=true), the value for this option is ignored and assumed to be C(create). - - If set to C(error), the lookup will error out if the passname does not exist. - - If set to C(create), the passname will be created with the provided length I(length) if it does not exist. - - If set to C(empty) or C(warn), will return a C(none) in case the passname does not exist. - When using C(lookup) and not C(query), this will be translated to an empty string. - version_added: 3.1.0 - type: str - default: error - choices: - - error - - warn - - empty - - create -''' -EXAMPLES = """ -# Debug is used for examples, BAD IDEA to show passwords on screen -- name: Basic lookup. Fails if example/test doesn't exist - ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test')}}" + - The directory of the password store. + - If O(backend=pass), the default is V(~/.password-store) is used. + - If O(backend=gopass), then the default is the C(path) field in C(~/.config/gopass/config.yml), falling back to V(~/.local/share/gopass/stores/root) + if C(path) is not defined in the gopass config. + type: path + vars: + - name: passwordstore + env: + - name: PASSWORD_STORE_DIR + create: + description: Create the password or the subkey if it does not already exist. Takes precedence over O(missing). + type: bool + default: false + overwrite: + description: Overwrite the password or the subkey if it does already exist. + type: bool + default: false + umask: + description: + - Sets the umask for the created V(.gpg) files. The first octed must be greater than 3 (user readable). + - Note pass' default value is V('077'). + type: string + env: + - name: PASSWORD_STORE_UMASK + version_added: 1.3.0 + returnall: + description: Return all the content of the password, not only the first line. + type: bool + default: false + subkey: + description: + - By default return a specific subkey of the password. When set to V(password), always returns the first line. + - With O(overwrite=true), it creates the subkey and returns it. + type: str + default: password + userpass: + description: Specify a password to save, instead of a generated one. + type: str + length: + description: The length of the generated password. + type: integer + default: 16 + backup: + description: Used with O(overwrite=true). Backup the previous password or subkey in a subkey. + type: bool + default: false + nosymbols: + description: Use alphanumeric characters. + type: bool + default: false + missing: + description: + - List of preference about what to do if the password file is missing. + - If O(create=true), the value for this option is ignored and assumed to be V(create). + - If set to V(error), the lookup fails out if the passname does not exist. + - If set to V(create), the passname is created with the provided length O(length) if it does not exist. + - If set to V(empty) or V(warn), it returns a V(none) in case the passname does not exist. When using C(lookup) and + not C(query), this is translated to an empty string. + version_added: 3.1.0 + type: str + default: error + choices: + - error + - warn + - empty + - create + lock: + description: + - How to synchronize operations. + - The default of V(write) only synchronizes write operations. + - V(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel. + - V(none) does not do any synchronization. + ini: + - section: passwordstore_lookup + key: lock + type: str + default: write + choices: + - readwrite + - write + - none + version_added: 4.5.0 + locktimeout: + description: + - Lock timeout applied when O(lock) is not V(none). + - Time with a unit suffix, V(s), V(m), V(h) for seconds, minutes, and hours, respectively. For example, V(900s) equals + V(15m). + - Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details. + ini: + - section: passwordstore_lookup + key: locktimeout + type: str + default: 15m + version_added: 4.5.0 + backend: + description: + - Specify which backend to use. + - Defaults to V(pass), passwordstore.org's original pass utility. + - V(gopass) support is incomplete. + ini: + - section: passwordstore_lookup + key: backend + vars: + - name: passwordstore_backend + type: str + default: pass + choices: + - pass + - gopass + version_added: 5.2.0 + timestamp: + description: Add the password generation information to the end of the file. + type: bool + default: true + version_added: 8.1.0 + preserve: + description: Include the old (edited) password inside the pass file. + type: bool + default: true + version_added: 8.1.0 + missing_subkey: + description: + - Preference about what to do if the password subkey is missing. + - If set to V(error), the lookup fails out if the subkey does not exist. + - If set to V(empty) or V(warn), it returns a V(none) in case the subkey does not exist. + version_added: 8.6.0 + type: str + default: empty + choices: + - error + - warn + - empty + ini: + - section: passwordstore_lookup + key: missing_subkey +notes: + - The lookup supports passing all options as lookup parameters since community.general 6.0.0. +""" +EXAMPLES = r""" +ansible.cfg: | + [passwordstore_lookup] + lock=readwrite + locktimeout=45s + missing_subkey=warn -- name: Basic lookup. Warns if example/test does not exist and returns empty string - ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test missing=warn')}}" +tasks.yml: |- + --- -- name: Create pass with random 16 character password. If password exists just give the password - ansible.builtin.debug: - var: mypassword - vars: - mypassword: "{{ lookup('community.general.passwordstore', 'example/test create=true')}}" + # Debug is used for examples, BAD IDEA to show passwords on screen + - name: Basic lookup. Fails if example/test does not exist + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test')}}" -- name: Create pass with random 16 character password. If password exists just give the password - ansible.builtin.debug: - var: mypassword - vars: - mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=create')}}" + - name: Basic lookup. Warns if example/test does not exist and returns empty string + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', missing='warn')}}" -- name: Prints 'abc' if example/test does not exist, just give the password otherwise - ansible.builtin.debug: - var: mypassword - vars: - mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=empty') | default('abc', true) }}" + - name: Create pass with random 16 character password. If password exists just give the password + ansible.builtin.debug: + var: mypassword + vars: + mypassword: "{{ lookup('community.general.passwordstore', 'example/test', create=true)}}" -- name: Different size password - ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test create=true length=42')}}" + - name: Create pass with random 16 character password. If password exists just give the password + ansible.builtin.debug: + var: mypassword + vars: + mypassword: "{{ lookup('community.general.passwordstore', 'example/test', missing='create')}}" -- name: Create password and overwrite the password if it exists. As a bonus, this module includes the old password inside the pass file - ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test create=true overwrite=true')}}" + - name: >- + Create a random 16 character password in a subkey. If the password file already exists, just add the subkey in it. + If the subkey exists, returns it + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, subkey='foo') }}" -- name: Create an alphanumeric password - ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test create=true nosymbols=true') }}" + - name: >- + Create a random 16 character password in a subkey. Overwrite if it already exists and backup the old one. + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, subkey='user', overwrite=true, backup=true) }}" -- name: Return the value for user in the KV pair user, username - ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test subkey=user')}}" + - name: Prints 'abc' if example/test does not exist, just give the password otherwise + ansible.builtin.debug: + var: mypassword + vars: + mypassword: >- + {{ lookup('community.general.passwordstore', 'example/test', missing='empty') + | default('abc', true) }} -- name: Return the entire password file content - ansible.builtin.set_fact: - passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test returnall=true')}}" + - name: Different size password + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, length=42)}}" + + - name: >- + Create password and overwrite the password if it exists. + As a bonus, this module includes the old password inside the pass file + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, overwrite=true)}}" + + - name: Create an alphanumeric password + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, nosymbols=true) }}" + + - name: Return the value for user in the KV pair user, username + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', subkey='user')}}" + + - name: Return the entire password file content + ansible.builtin.set_fact: + passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test', returnall=true)}}" """ -RETURN = """ +RETURN = r""" _raw: description: - - a password + - A password. type: list elements: str """ +from contextlib import contextmanager import os +import re import subprocess import time import yaml - -from distutils import util from ansible.errors import AnsibleError, AnsibleAssertionError from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text +from ansible.module_utils.parsing.convert_bool import boolean from ansible.utils.display import Display from ansible.utils.encrypt import random_password from ansible.plugins.lookup import LookupBase from ansible import constants as C +from ansible_collections.community.general.plugins.module_utils._filelock import FileLock + display = Display() # backhacked check_output with input for python 2.7 # http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output +# note: contains special logic for calling 'pass', so not a drop-in replacement for check_output def check_output2(*popenargs, **kwargs): if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') @@ -175,9 +284,10 @@ def check_output2(*popenargs, **kwargs): process.wait() raise retcode = process.poll() - if retcode != 0 or \ - b'encryption failed: Unusable public key' in b_out or \ - b'encryption failed: Unusable public key' in b_err: + if retcode == 0 and (b'encryption failed: Unusable public key' in b_out or + b'encryption failed: Unusable public key' in b_err): + retcode = 78 # os.EX_CONFIG + if retcode != 0: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] @@ -190,6 +300,24 @@ def check_output2(*popenargs, **kwargs): class LookupModule(LookupBase): + def __init__(self, loader=None, templar=None, **kwargs): + + super(LookupModule, self).__init__(loader, templar, **kwargs) + self.realpass = None + + def is_real_pass(self): + if self.realpass is None: + try: + passoutput = to_text( + check_output2([self.pass_cmd, "--version"], env=self.env), + errors='surrogate_or_strict' + ) + self.realpass = 'pass: the standard unix password manager' in passoutput + except (subprocess.CalledProcessError) as e: + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') + + return self.realpass + def parse_params(self, term): # I went with the "traditional" param followed with space separated KV pairs. # Waiting for final implementation of lookup parameter parsing. @@ -203,7 +331,7 @@ class LookupModule(LookupBase): for param in params[1:]: name, value = param.split('=', 1) if name not in self.paramvals: - raise AnsibleAssertionError('%s not in paramvals' % name) + raise AnsibleAssertionError(f'{name} not in paramvals') self.paramvals[name] = value except (ValueError, AssertionError) as e: raise AnsibleError(e) @@ -211,32 +339,34 @@ class LookupModule(LookupBase): try: for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']: if not isinstance(self.paramvals[key], bool): - self.paramvals[key] = util.strtobool(self.paramvals[key]) + self.paramvals[key] = boolean(self.paramvals[key]) except (ValueError, AssertionError) as e: raise AnsibleError(e) if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']: - raise AnsibleError("{0} is not a valid option for missing".format(self.paramvals['missing'])) + raise AnsibleError(f"{self.paramvals['missing']} is not a valid option for missing") if not isinstance(self.paramvals['length'], int): if self.paramvals['length'].isdigit(): self.paramvals['length'] = int(self.paramvals['length']) else: - raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length'])) + raise AnsibleError(f"{self.paramvals['length']} is not a correct value for length") if self.paramvals['create']: self.paramvals['missing'] = 'create' # Collect pass environment variables from the plugin's parameters. self.env = os.environ.copy() + self.env['LANGUAGE'] = 'C' # make sure to get errors in English as required by check_output2 - # Set PASSWORD_STORE_DIR if directory is set - if self.paramvals['directory']: - if os.path.isdir(self.paramvals['directory']): - self.env['PASSWORD_STORE_DIR'] = self.paramvals['directory'] - else: - raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory'])) + if self.backend == 'gopass': + self.env['GOPASS_NO_REMINDER'] = "YES" + elif os.path.isdir(self.paramvals['directory']): + # Set PASSWORD_STORE_DIR + self.env['PASSWORD_STORE_DIR'] = self.paramvals['directory'] + elif self.is_real_pass(): + raise AnsibleError(f"Passwordstore directory '{self.paramvals['directory']}' does not exist") # Set PASSWORD_STORE_UMASK if umask is set - if 'umask' in self.paramvals: + if self.paramvals.get('umask') is not None: if len(self.paramvals['umask']) != 3: raise AnsibleError('Passwordstore umask must have a length of 3.') elif int(self.paramvals['umask'][0]) > 3: @@ -247,7 +377,8 @@ class LookupModule(LookupBase): def check_pass(self): try: self.passoutput = to_text( - check_output2(["pass", "show", self.passname], env=self.env), + check_output2([self.pass_cmd, 'show'] + + [self.passname], env=self.env), errors='surrogate_or_strict' ).splitlines() self.password = self.passoutput[0] @@ -261,19 +392,22 @@ class LookupModule(LookupBase): if ':' in line: name, value = line.split(':', 1) self.passdict[name.strip()] = value.strip() + if (self.backend == 'gopass' or + os.path.isfile(os.path.join(self.paramvals['directory'], f"{self.passname}.gpg")) + or not self.is_real_pass()): + # When using real pass, only accept password as found if there is a .gpg file for it (might be a tree node otherwise) + return True except (subprocess.CalledProcessError) as e: - if e.returncode != 0 and 'not in the password store' in e.output: - # if pass returns 1 and return string contains 'is not in the password store.' - # We need to determine if this is valid or Error. - if self.paramvals['missing'] == 'error': - raise AnsibleError('passwordstore: passname {0} not found and missing=error is set'.format(self.passname)) - else: - if self.paramvals['missing'] == 'warn': - display.warning('passwordstore: passname {0} not found'.format(self.passname)) - return False - else: - raise AnsibleError(e) - return True + # 'not in password store' is the expected error if a password wasn't found + if 'not in the password store' not in e.output: + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') + + if self.paramvals['missing'] == 'error': + raise AnsibleError(f'passwordstore: passname {self.passname} not found and missing=error is set') + elif self.paramvals['missing'] == 'warn': + display.warning(f'passwordstore: passname {self.passname} not found') + + return False def get_newpass(self): if self.paramvals['nosymbols']: @@ -289,17 +423,51 @@ class LookupModule(LookupBase): def update_password(self): # generate new password, insert old lines from current result and return new password + # if the target is a subkey, only modify the subkey newpass = self.get_newpass() datetime = time.strftime("%d/%m/%Y %H:%M:%S") - msg = newpass + '\n' - if self.passoutput[1:]: - msg += '\n'.join(self.passoutput[1:]) + '\n' - if self.paramvals['backup']: - msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime) + subkey = self.paramvals["subkey"] + + if subkey != "password": + + msg_lines = [] + subkey_exists = False + subkey_line = f"{subkey}: {newpass}" + oldpass = None + + for line in self.passoutput: + if line.startswith(f"{subkey}: "): + oldpass = self.passdict[subkey] + line = subkey_line + subkey_exists = True + + msg_lines.append(line) + + if not subkey_exists: + msg_lines.insert(2, subkey_line) + + if self.paramvals["timestamp"] and self.paramvals["backup"] and oldpass and oldpass != newpass: + msg_lines.append( + f"lookup_pass: old subkey '{subkey}' password was {oldpass} (Updated on {datetime})\n" + ) + + msg = os.linesep.join(msg_lines) + + else: + msg = newpass + + if self.paramvals['preserve'] or self.paramvals['timestamp']: + msg += '\n' + if self.paramvals['preserve'] and self.passoutput[1:]: + msg += '\n'.join(self.passoutput[1:]) + msg += '\n' + if self.paramvals['timestamp'] and self.paramvals['backup']: + msg += f"lookup_pass: old password was {self.password} (Updated on {datetime})\n" + try: - check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg, env=self.env) + check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env) except (subprocess.CalledProcessError) as e: - raise AnsibleError(e) + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') return newpass def generate_password(self): @@ -307,11 +475,21 @@ class LookupModule(LookupBase): # use pwgen to generate the password and insert values with pass -m newpass = self.get_newpass() datetime = time.strftime("%d/%m/%Y %H:%M:%S") - msg = newpass + '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime) + subkey = self.paramvals["subkey"] + + if subkey != "password": + msg = f"\n\n{subkey}: {newpass}" + else: + msg = newpass + + if self.paramvals['timestamp']: + msg += f"\nlookup_pass: First generated by ansible on {datetime}\n" + try: - check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg, env=self.env) + check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env) except (subprocess.CalledProcessError) as e: - raise AnsibleError(e) + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') + return newpass def get_passresult(self): @@ -323,34 +501,98 @@ class LookupModule(LookupBase): if self.paramvals['subkey'] in self.passdict: return self.passdict[self.paramvals['subkey']] else: + if self.paramvals["missing_subkey"] == "error": + raise AnsibleError( + f"passwordstore: subkey {self.paramvals['subkey']} for passname {self.passname} not found and missing_subkey=error is set" + ) + + if self.paramvals["missing_subkey"] == "warn": + display.warning( + f"passwordstore: subkey {self.paramvals['subkey']} for passname {self.passname} not found" + ) + return None - def run(self, terms, variables, **kwargs): - result = [] + @contextmanager + def opt_lock(self, type): + if self.get_option('lock') == type: + tmpdir = os.environ.get('TMPDIR', '/tmp') + user = os.environ.get('USER') + lockfile = os.path.join(tmpdir, f'.{user}.passwordstore.lock') + with FileLock().lock_file(lockfile, tmpdir, self.lock_timeout): + self.locked = type + yield + self.locked = None + else: + yield + + def setup(self, variables): + self.backend = self.get_option('backend') + self.pass_cmd = self.backend # pass and gopass are commands as well + self.locked = None + timeout = self.get_option('locktimeout') + if not re.match('^[0-9]+[smh]$', timeout): + raise AnsibleError(f"{timeout} is not a correct value for locktimeout") + unit_to_seconds = {"s": 1, "m": 60, "h": 3600} + self.lock_timeout = int(timeout[:-1]) * unit_to_seconds[timeout[-1]] + + directory = self.get_option('directory') + if directory is None: + if self.backend == 'gopass': + try: + with open(os.path.expanduser('~/.config/gopass/config.yml')) as f: + directory = yaml.safe_load(f)['path'] + except (FileNotFoundError, KeyError, yaml.YAMLError): + directory = os.path.expanduser('~/.local/share/gopass/stores/root') + else: + directory = os.path.expanduser('~/.password-store') + self.paramvals = { - 'subkey': 'password', - 'directory': variables.get('passwordstore'), - 'create': False, - 'returnall': False, - 'overwrite': False, - 'nosymbols': False, - 'userpass': '', - 'length': 16, - 'backup': False, - 'missing': 'error', + 'subkey': self.get_option('subkey'), + 'directory': directory, + 'create': self.get_option('create'), + 'returnall': self.get_option('returnall'), + 'overwrite': self.get_option('overwrite'), + 'nosymbols': self.get_option('nosymbols'), + 'userpass': self.get_option('userpass') or '', + 'length': self.get_option('length'), + 'backup': self.get_option('backup'), + 'missing': self.get_option('missing'), + 'umask': self.get_option('umask'), + 'timestamp': self.get_option('timestamp'), + 'preserve': self.get_option('preserve'), + "missing_subkey": self.get_option("missing_subkey"), } + def run(self, terms, variables, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + self.setup(variables) + result = [] + for term in terms: self.parse_params(term) # parse the input into paramvals - if self.check_pass(): # password exists - if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password': - result.append(self.update_password()) - else: - result.append(self.get_passresult()) - else: # password does not exist - if self.paramvals['missing'] == 'create': - result.append(self.generate_password()) - else: - result.append(None) + with self.opt_lock('readwrite'): + if self.check_pass(): # password file exists + if self.paramvals['overwrite']: # if "overwrite", always update password + with self.opt_lock('write'): + result.append(self.update_password()) + elif ( + self.paramvals["subkey"] != "password" + and not self.passdict.get(self.paramvals["subkey"]) + and self.paramvals["missing"] == "create" + ): # target is a subkey, this subkey is not in passdict BUT missing == create + with self.opt_lock('write'): + result.append(self.update_password()) + else: + result.append(self.get_passresult()) + else: # password does not exist + if self.paramvals['missing'] == 'create': + with self.opt_lock('write'): + if self.locked == 'write' and self.check_pass(): # lookup password again if under write lock + result.append(self.get_passresult()) + else: + result.append(self.generate_password()) + else: + result.append(None) return result diff --git a/plugins/lookup/random_pet.py b/plugins/lookup/random_pet.py index 6caf178e4b..0ab3ee29d3 100644 --- a/plugins/lookup/random_pet.py +++ b/plugins/lookup/random_pet.py @@ -1,44 +1,43 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Abhijeet Kasurde -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Abhijeet Kasurde +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' - name: random_pet - author: - - Abhijeet Kasurde (@Akasurde) - short_description: Generates random pet names - version_added: '3.1.0' - requirements: - - petname U(https://github.com/dustinkirkland/python-petname) +DOCUMENTATION = r""" +name: random_pet +author: + - Abhijeet Kasurde (@Akasurde) +short_description: Generates random pet names +version_added: '3.1.0' +requirements: + - petname U(https://github.com/dustinkirkland/python-petname) +description: + - Generates random pet names that can be used as unique identifiers for the resources. +options: + words: description: - - Generates random pet names that can be used as unique identifiers for the resources. - options: - words: - description: - - The number of words in the pet name. - default: 2 - type: int - length: - description: - - The maximal length of every component of the pet name. - - Values below 3 will be set to 3 by petname. - default: 6 - type: int - prefix: - description: A string to prefix with the name. - type: str - separator: - description: The character to separate words in the pet name. - default: "-" - type: str -''' + - The number of words in the pet name. + default: 2 + type: int + length: + description: + - The maximal length of every component of the pet name. + - Values below V(3) are set to V(3) by petname. + default: 6 + type: int + prefix: + description: A string to prefix with the name. + type: str + separator: + description: The character to separate words in the pet name. + default: "-" + type: str +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Generate pet name ansible.builtin.debug: var: lookup('community.general.random_pet') @@ -58,14 +57,14 @@ EXAMPLES = r''' ansible.builtin.debug: var: lookup('community.general.random_pet', length=7) # Example result: 'natural-peacock' -''' +""" -RETURN = r''' - _raw: - description: A one-element list containing a random pet name - type: list - elements: str -''' +RETURN = r""" +_raw: + description: A one-element list containing a random pet name. + type: list + elements: str +""" try: import petname @@ -94,6 +93,6 @@ class LookupModule(LookupBase): values = petname.Generate(words=words, separator=separator, letters=length) if prefix: - values = "%s%s%s" % (prefix, separator, values) + values = f"{prefix}{separator}{values}" return [values] diff --git a/plugins/lookup/random_string.py b/plugins/lookup/random_string.py index d67a75ed99..027a587ad8 100644 --- a/plugins/lookup/random_string.py +++ b/plugins/lookup/random_string.py @@ -1,124 +1,157 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Abhijeet Kasurde -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Abhijeet Kasurde +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" - name: random_string - author: - - Abhijeet Kasurde (@Akasurde) - short_description: Generates random string - version_added: '3.2.0' +name: random_string +author: + - Abhijeet Kasurde (@Akasurde) +short_description: Generates random string +version_added: '3.2.0' +description: + - Generates random string based upon the given constraints. + - Uses L(secrets.SystemRandom,https://docs.python.org/3/library/secrets.html#secrets.SystemRandom), so should be strong enough + for cryptographic purposes. +options: + length: + description: The length of the string. + default: 8 + type: int + upper: description: - - Generates random string based upon the given constraints. - options: - length: - description: The length of the string. - default: 8 - type: int - upper: - description: - - Include uppercase letters in the string. - default: true - type: bool - lower: - description: - - Include lowercase letters in the string. - default: true - type: bool - numbers: - description: - - Include numbers in the string. - default: true - type: bool - special: - description: - - Include special characters in the string. - - Special characters are taken from Python standard library C(string). - See L(the documentation of string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation) - for which characters will be used. - - The choice of special characters can be changed to setting I(override_special). - default: true - type: bool - min_numeric: - description: - - Minimum number of numeric characters in the string. - - If set, overrides I(numbers=false). - default: 0 - type: int - min_upper: - description: - - Minimum number of uppercase alphabets in the string. - - If set, overrides I(upper=false). - default: 0 - type: int - min_lower: - description: - - Minimum number of lowercase alphabets in the string. - - If set, overrides I(lower=false). - default: 0 - type: int - min_special: - description: - - Minimum number of special character in the string. - default: 0 - type: int - override_special: - description: - - Overide a list of special characters to use in the string. - - If set I(min_special) should be set to a non-default value. - type: str - override_all: - description: - - Override all values of I(numbers), I(upper), I(lower), and I(special) with - the given list of characters. - type: str - base64: - description: - - Returns base64 encoded string. - type: bool - default: false + - Possibly include uppercase letters in the string. + - To ensure atleast one uppercase letter, set O(min_upper) to V(1). + default: true + type: bool + lower: + description: + - Possibly include lowercase letters in the string. + - To ensure atleast one lowercase letter, set O(min_lower) to V(1). + default: true + type: bool + numbers: + description: + - Possibly include numbers in the string. + - To ensure atleast one numeric character, set O(min_numeric) to V(1). + default: true + type: bool + special: + description: + - Possibly include special characters in the string. + - Special characters are taken from Python standard library C(string). See L(the documentation of + string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation) + for which characters are used. + - The choice of special characters can be changed to setting O(override_special). + - To ensure atleast one special character, set O(min_special) to V(1). + default: true + type: bool + min_numeric: + description: + - Minimum number of numeric characters in the string. + - If set, overrides O(numbers=false). + default: 0 + type: int + min_upper: + description: + - Minimum number of uppercase alphabets in the string. + - If set, overrides O(upper=false). + default: 0 + type: int + min_lower: + description: + - Minimum number of lowercase alphabets in the string. + - If set, overrides O(lower=false). + default: 0 + type: int + min_special: + description: + - Minimum number of special character in the string. + default: 0 + type: int + override_special: + description: + - Override a list of special characters to use in the string. + - If set O(min_special) should be set to a non-default value. + type: str + override_all: + description: + - Override all values of O(numbers), O(upper), O(lower), and O(special) with the given list of characters. + type: str + ignore_similar_chars: + description: + - Ignore similar characters, such as V(l) and V(1), or V(O) and V(0). + - These characters can be configured in O(similar_chars). + default: false + type: bool + version_added: 7.5.0 + similar_chars: + description: + - Override a list of characters not to be use in the string. + default: "il1LoO0" + type: str + version_added: 7.5.0 + base64: + description: + - Returns base64 encoded string. + type: bool + default: false + seed: + description: + - Seed for random string generator. + - B(Note) that this drastically reduces the security of this plugin. First, when O(seed) is provided, a non-cryptographic random number generator is used. + Second, if the seed does not contain enough entropy, the generated string is weak. + B(Do not use the generated string as a password or a secure token when using this option!) + type: str + version_added: 11.3.0 """ EXAMPLES = r""" - name: Generate random string ansible.builtin.debug: var: lookup('community.general.random_string') - # Example result: ['DeadBeeF'] + # Example result: 'DeadBeeF' + +- name: Generate random string with seed + ansible.builtin.debug: + var: lookup('community.general.random_string', seed=12345) + # Example result: '6[~(2q5O' + # NOTE: Do **not** use this string as a password or a secure token, + # unless you know exactly what you are doing! + # Specifying seed uses a non-secure random number generator. - name: Generate random string with length 12 ansible.builtin.debug: var: lookup('community.general.random_string', length=12) - # Example result: ['Uan0hUiX5kVG'] + # Example result: 'Uan0hUiX5kVG' - name: Generate base64 encoded random string ansible.builtin.debug: var: lookup('community.general.random_string', base64=True) - # Example result: ['NHZ6eWN5Qk0='] + # Example result: 'NHZ6eWN5Qk0=' -- name: Generate a random string with 1 lower, 1 upper, 1 number and 1 special char (atleast) +- name: Generate a random string with 1 lower, 1 upper, 1 number and 1 special char (at least) ansible.builtin.debug: var: lookup('community.general.random_string', min_lower=1, min_upper=1, min_special=1, min_numeric=1) - # Example result: ['&Qw2|E[-'] + # Example result: '&Qw2|E[-' - name: Generate a random string with all lower case characters - debug: + ansible.builtin.debug: var: query('community.general.random_string', upper=false, numbers=false, special=false) # Example result: ['exolxzyz'] - name: Generate random hexadecimal string - debug: + ansible.builtin.debug: var: query('community.general.random_string', upper=false, lower=false, override_special=hex_chars, numbers=false) vars: hex_chars: '0123456789ABCDEF' # Example result: ['D2A40737'] - name: Generate random hexadecimal string with override_all - debug: + ansible.builtin.debug: var: query('community.general.random_string', override_all=hex_chars) vars: hex_chars: '0123456789ABCDEF' @@ -126,14 +159,15 @@ EXAMPLES = r""" """ RETURN = r""" - _raw: - description: A one-element list containing a random string - type: list - elements: str +_raw: + description: A one-element list containing a random string. + type: list + elements: str """ import base64 import random +import secrets import string from ansible.errors import AnsibleLookupError @@ -163,16 +197,30 @@ class LookupModule(LookupBase): lower_chars = string.ascii_lowercase upper_chars = string.ascii_uppercase special_chars = string.punctuation - random_generator = random.SystemRandom() self.set_options(var_options=variables, direct=kwargs) length = self.get_option("length") base64_flag = self.get_option("base64") override_all = self.get_option("override_all") + ignore_similar_chars = self.get_option("ignore_similar_chars") + similar_chars = self.get_option("similar_chars") + seed = self.get_option("seed") + + if seed is None: + random_generator = secrets.SystemRandom() + else: + random_generator = random.Random(seed) + values = "" available_chars_set = "" + if ignore_similar_chars: + number_chars = "".join([sc for sc in number_chars if sc not in similar_chars]) + lower_chars = "".join([sc for sc in lower_chars if sc not in similar_chars]) + upper_chars = "".join([sc for sc in upper_chars if sc not in similar_chars]) + special_chars = "".join([sc for sc in special_chars if sc not in similar_chars]) + if override_all: # Override all the values available_chars_set = override_all @@ -209,10 +257,11 @@ class LookupModule(LookupBase): remaining_pass_len = length - len(values) values += self.get_random(random_generator, available_chars_set, remaining_pass_len) - # Get pseudo randomization shuffled_values = list(values) - # Randomize the order - random.shuffle(shuffled_values) + if seed is None: + # Get pseudo randomization + # Randomize the order + random.shuffle(shuffled_values) if base64_flag: return [self.b64encode("".join(shuffled_values))] diff --git a/plugins/lookup/random_words.py b/plugins/lookup/random_words.py index a2381aa38f..dd06e701f8 100644 --- a/plugins/lookup/random_words.py +++ b/plugins/lookup/random_words.py @@ -1,51 +1,50 @@ -# -*- coding: utf-8 -*- -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later """The community.general.random_words Ansible lookup plugin.""" -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" - name: random_words - author: - - Thomas Sjögren (@konstruktoid) - short_description: Return a number of random words - version_added: "4.0.0" - requirements: - - xkcdpass U(https://github.com/redacted/XKCD-password-generator) +name: random_words +author: + - Thomas Sjögren (@konstruktoid) +short_description: Return a number of random words +version_added: "4.0.0" +requirements: + - xkcdpass U(https://github.com/redacted/XKCD-password-generator) +description: + - Returns a number of random words. The output can for example be used for passwords. + - See U(https://xkcd.com/936/) for background. +options: + numwords: description: - - Returns a number of random words. The output can for example be used for - passwords. - - See U(https://xkcd.com/936/) for background. - options: - numwords: - description: - - The number of words. - default: 6 - type: int - min_length: - description: - - Minimum length of words to make password. - default: 5 - type: int - max_length: - description: - - Maximum length of words to make password. - default: 9 - type: int - delimiter: - description: - - The delimiter character between words. - default: " " - type: str - case: - description: - - The method for setting the case of each word in the passphrase. - choices: ["alternating", "upper", "lower", "random", "capitalize"] - default: "lower" - type: str + - The number of words. + default: 6 + type: int + min_length: + description: + - Minimum length of words to make password. + default: 5 + type: int + max_length: + description: + - Maximum length of words to make password. + default: 9 + type: int + delimiter: + description: + - The delimiter character between words. + default: " " + type: str + case: + description: + - The method for setting the case of each word in the passphrase. + choices: ["alternating", "upper", "lower", "random", "capitalize"] + default: "lower" + type: str """ EXAMPLES = r""" @@ -72,10 +71,10 @@ EXAMPLES = r""" """ RETURN = r""" - _raw: - description: A single-element list containing random words. - type: list - elements: str +_raw: + description: A single-element list containing random words. + type: list + elements: str """ from ansible.errors import AnsibleLookupError diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py index 8de7e04cce..0073796a22 100644 --- a/plugins/lookup/redis.py +++ b/plugins/lookup/redis.py @@ -1,51 +1,53 @@ -# -*- coding: utf-8 -*- -# (c) 2012, Jan-Piet Mens -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2012, Jan-Piet Mens +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: redis - author: - - Jan-Piet Mens (@jpmens) - - Ansible Core Team - short_description: fetch data from Redis - description: - - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it - requirements: - - redis (python library https://github.com/andymccurdy/redis-py/) - options: - _terms: - description: list of keys to query - host: - description: location of Redis host - default: '127.0.0.1' - env: - - name: ANSIBLE_REDIS_HOST - ini: - - section: lookup_redis - key: host - port: - description: port on which Redis is listening on - default: 6379 - type: int - env: - - name: ANSIBLE_REDIS_PORT - ini: - - section: lookup_redis - key: port - socket: - description: path to socket on which to query Redis, this option overrides host and port options when set. - type: path - env: - - name: ANSIBLE_REDIS_SOCKET - ini: - - section: lookup_redis - key: socket -''' +DOCUMENTATION = r""" +name: redis +author: + - Jan-Piet Mens (@jpmens) + - Ansible Core Team +short_description: Fetch data from Redis +description: + - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it. +requirements: + - redis (python library https://github.com/andymccurdy/redis-py/) +options: + _terms: + description: List of keys to query. + type: list + elements: string + host: + description: Location of Redis host. + type: string + default: '127.0.0.1' + env: + - name: ANSIBLE_REDIS_HOST + ini: + - section: lookup_redis + key: host + port: + description: Port on which Redis is listening on. + default: 6379 + type: int + env: + - name: ANSIBLE_REDIS_PORT + ini: + - section: lookup_redis + key: port + socket: + description: Path to socket on which to query Redis, this option overrides host and port options when set. + type: path + env: + - name: ANSIBLE_REDIS_SOCKET + ini: + - section: lookup_redis + key: socket +""" -EXAMPLES = """ +EXAMPLES = r""" - name: query redis for somekey (default or configured settings used) ansible.builtin.debug: msg: "{{ lookup('community.general.redis', 'somekey') }}" @@ -62,18 +64,15 @@ EXAMPLES = """ - name: use list directly with a socket ansible.builtin.debug: msg: "{{ lookup('community.general.redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}" - """ -RETURN = """ +RETURN = r""" _raw: - description: value(s) stored in Redis + description: Value(s) stored in Redis. type: list elements: str """ -import os - HAVE_REDIS = False try: import redis @@ -114,5 +113,5 @@ class LookupModule(LookupBase): ret.append(to_text(res)) except Exception as e: # connection failed or key not found - raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e)) + raise AnsibleError(f'Encountered exception while fetching {term}: {e}') return ret diff --git a/plugins/lookup/revbitspss.py b/plugins/lookup/revbitspss.py new file mode 100644 index 0000000000..86e3fbe38c --- /dev/null +++ b/plugins/lookup/revbitspss.py @@ -0,0 +1,102 @@ +# Copyright (c) 2021, RevBits +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + + +DOCUMENTATION = r""" +name: revbitspss +author: RevBits (@RevBits) +short_description: Get secrets from RevBits PAM server +version_added: 4.1.0 +description: + - Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM Server using API key authentication with the REST + API. +requirements: + - revbits_ansible - U(https://pypi.org/project/revbits_ansible/) +options: + _terms: + description: + - This is an array of keys for secrets which you want to fetch from RevBits PAM. + required: true + type: list + elements: string + base_url: + description: + - This is the base URL of the server, for example V(https://server-url-here). + required: true + type: string + api_key: + description: + - This is the API key for authentication. You can get it from the RevBits PAM secret manager module. + required: true + type: string +""" + +RETURN = r""" +_list: + description: + - The JSON responses which you can access with defined keys. + - If you are fetching secrets named as UUID, PASSWORD it returns the dict of all secrets. + type: list + elements: dict +""" + +EXAMPLES = r""" +--- +- hosts: localhost + vars: + secret: >- + {{ + lookup( + 'community.general.revbitspss', + 'UUIDPAM', 'DB_PASS', + base_url='https://server-url-here', + api_key='API_KEY_GOES_HERE' + ) + }} + tasks: + - ansible.builtin.debug: + msg: >- + UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }} +""" + +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display +from ansible.errors import AnsibleError + +try: + from pam.revbits_ansible.server import SecretServer +except ImportError as imp_exc: + ANOTHER_LIBRARY_IMPORT_ERROR = imp_exc +else: + ANOTHER_LIBRARY_IMPORT_ERROR = None + + +display = Display() + + +class LookupModule(LookupBase): + + @staticmethod + def Client(server_parameters): + return SecretServer(**server_parameters) + + def run(self, terms, variables, **kwargs): + if ANOTHER_LIBRARY_IMPORT_ERROR: + raise AnsibleError('revbits_ansible must be installed to use this plugin') from ANOTHER_LIBRARY_IMPORT_ERROR + self.set_options(var_options=variables, direct=kwargs) + secret_server = LookupModule.Client( + { + "base_url": self.get_option('base_url'), + "api_key": self.get_option('api_key'), + } + ) + result = [] + for term in terms: + try: + display.vvv(f"Secret Server lookup of Secret with ID {term}") + result.append({term: secret_server.get_pam_secret(term)}) + except Exception as error: + raise AnsibleError(f"Secret Server lookup failure: {error.message}") + return result diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py index 56cfdf1143..54d96e91d2 100644 --- a/plugins/lookup/shelvefile.py +++ b/plugins/lookup/shelvefile.py @@ -1,35 +1,40 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Alejandro Guirao -# (c) 2012-17 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2015, Alejandro Guirao +# Copyright (c) 2012-17 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: shelvefile - author: Alejandro Guirao (!UNKNOWN) - short_description: read keys from Python shelve file - description: - - Read keys from Python shelve file. - options: - _terms: - description: sets of key value pairs of parameters - key: - description: key to query - required: True - file: - description: path to shelve file - required: True -''' - -EXAMPLES = """ -- name: retrieve a string value corresponding to a key inside a Python shelve file - ansible.builtin.debug: msg="{{ lookup('community.general.shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }} +DOCUMENTATION = r""" +name: shelvefile +author: Alejandro Guirao (!UNKNOWN) +short_description: Read keys from Python shelve file +description: + - Read keys from Python shelve file. +options: + _terms: + description: Sets of key value pairs of parameters. + type: list + elements: str + key: + description: Key to query. + type: str + required: true + file: + description: Path to shelve file. + type: path + required: true """ -RETURN = """ +EXAMPLES = r""" +--- +- name: Retrieve a string value corresponding to a key inside a Python shelve file + ansible.builtin.debug: + msg: "{{ lookup('community.general.shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }}" +""" + +RETURN = r""" _list: - description: value(s) of key(s) in shelve file(s) + description: Value(s) of key(s) in shelve file(s). type: list elements: str """ @@ -52,7 +57,6 @@ class LookupModule(LookupBase): return res def run(self, terms, variables=None, **kwargs): - if not isinstance(terms, list): terms = [terms] @@ -66,7 +70,7 @@ class LookupModule(LookupBase): for param in params: name, value = param.split('=') if name not in paramvals: - raise AnsibleAssertionError('%s not in paramvals' % name) + raise AnsibleAssertionError(f'{name} not in paramvals') paramvals[name] = value except (ValueError, AssertionError) as e: @@ -81,11 +85,11 @@ class LookupModule(LookupBase): if shelvefile: res = self.read_shelve(shelvefile, key) if res is None: - raise AnsibleError("Key %s not found in shelve file %s" % (key, shelvefile)) + raise AnsibleError(f"Key {key} not found in shelve file {shelvefile}") # Convert the value read to string ret.append(to_text(res)) break else: - raise AnsibleError("Could not locate shelve file in lookup: %s" % paramvals['file']) + raise AnsibleError(f"Could not locate shelve file in lookup: {paramvals['file']}") return ret diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index 3b561e94fc..e612446374 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -1,9 +1,8 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Adam Migus -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function +# Copyright (c) 2020, Adam Migus +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" name: tss @@ -11,181 +10,283 @@ author: Adam Migus (@amigus) short_description: Get secrets from Thycotic Secret Server version_added: 1.0.0 description: - - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret - Server using token authentication with I(username) and I(password) on - the REST API at I(base_url). + - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret Server using token authentication with O(username) + and O(password) on the REST API at O(base_url). + - When using self-signed certificates the environment variable E(REQUESTS_CA_BUNDLE) can be set to a file containing the + trusted certificates (in C(.pem) format). + - For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt'). requirements: - - python-tss-sdk - https://pypi.org/project/python-tss-sdk/ + - python-tss-sdk - https://pypi.org/project/python-tss-sdk/ options: - _terms: - description: The integer ID of the secret. - required: true - type: int - base_url: - description: The base URL of the server, e.g. C(https://localhost/SecretServer). - env: - - name: TSS_BASE_URL - ini: - - section: tss_lookup - key: base_url - required: true - username: - description: The username with which to request the OAuth2 Access Grant. - env: - - name: TSS_USERNAME - ini: - - section: tss_lookup - key: username - password: - description: - - The password associated with the supplied username. - - Required when I(token) is not provided. - env: - - name: TSS_PASSWORD - ini: - - section: tss_lookup - key: password - domain: - default: "" - description: - - The domain with which to request the OAuth2 Access Grant. - - Optional when I(token) is not provided. - - Requires C(python-tss-sdk) version 1.0.0 or greater. - env: - - name: TSS_DOMAIN - ini: - - section: tss_lookup - key: domain - required: false - version_added: 3.6.0 - token: - description: - - Existing token for Thycotic authorizer. - - If provided, I(username) and I(password) are not needed. - - Requires C(python-tss-sdk) version 1.0.0 or greater. - env: - - name: TSS_TOKEN - ini: - - section: tss_lookup - key: token - version_added: 3.7.0 - api_path_uri: - default: /api/v1 - description: The path to append to the base URL to form a valid REST - API request. - env: - - name: TSS_API_PATH_URI - required: false - token_path_uri: - default: /oauth2/token - description: The path to append to the base URL to form a valid OAuth2 - Access Grant request. - env: - - name: TSS_TOKEN_PATH_URI - required: false + _terms: + description: The integer ID of the secret. + required: true + type: list + elements: int + secret_path: + description: Indicate a full path of secret including folder and secret name when the secret ID is set to 0. + required: false + type: str + version_added: 7.2.0 + fetch_secret_ids_from_folder: + description: + - Boolean flag which indicates whether secret IDs are in a folder is fetched by folder ID or not. + - V(true) then the terms are considered as a folder IDs. Otherwise (default), they are considered as secret IDs. + required: false + type: bool + version_added: 7.1.0 + fetch_attachments: + description: + - Boolean flag which indicates whether attached files are downloaded or not. + - The download only happens if O(file_download_path) has been provided. + required: false + type: bool + version_added: 7.0.0 + file_download_path: + description: Indicate the file attachment download location. + required: false + type: path + version_added: 7.0.0 + base_url: + description: The base URL of the server, for example V(https://localhost/SecretServer). + type: string + env: + - name: TSS_BASE_URL + ini: + - section: tss_lookup + key: base_url + required: true + username: + description: The username with which to request the OAuth2 Access Grant. + type: string + env: + - name: TSS_USERNAME + ini: + - section: tss_lookup + key: username + password: + description: + - The password associated with the supplied username. + - Required when O(token) is not provided. + type: string + env: + - name: TSS_PASSWORD + ini: + - section: tss_lookup + key: password + domain: + default: "" + description: + - The domain with which to request the OAuth2 Access Grant. + - Optional when O(token) is not provided. + - Requires C(python-tss-sdk) version 1.0.0 or greater. + type: string + env: + - name: TSS_DOMAIN + ini: + - section: tss_lookup + key: domain + required: false + version_added: 3.6.0 + token: + description: + - Existing token for Thycotic authorizer. + - If provided, O(username) and O(password) are not needed. + - Requires C(python-tss-sdk) version 1.0.0 or greater. + type: string + env: + - name: TSS_TOKEN + ini: + - section: tss_lookup + key: token + version_added: 3.7.0 + api_path_uri: + default: /api/v1 + description: The path to append to the base URL to form a valid REST API request. + type: string + env: + - name: TSS_API_PATH_URI + required: false + token_path_uri: + default: /oauth2/token + description: The path to append to the base URL to form a valid OAuth2 Access Grant request. + type: string + env: + - name: TSS_TOKEN_PATH_URI + required: false """ RETURN = r""" _list: - description: - - The JSON responses to C(GET /secrets/{id}). - - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get). - type: list - elements: dict + description: + - The JSON responses to C(GET /secrets/{id}). + - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get). + type: list + elements: dict """ EXAMPLES = r""" - hosts: localhost vars: - secret: >- - {{ - lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - username='user.name', - password='password' - ) - }} + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password' + ) + }} tasks: - - ansible.builtin.debug: - msg: > - the password is {{ - (secret['items'] - | items2dict(key_name='slug', - value_name='itemValue'))['password'] - }} + - ansible.builtin.debug: + msg: > + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} - hosts: localhost vars: - secret: >- - {{ - lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - username='user.name', - password='password', - domain='domain' - ) - }} + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password', + domain='domain' + ) + }} tasks: - - ansible.builtin.debug: - msg: > - the password is {{ - (secret['items'] - | items2dict(key_name='slug', - value_name='itemValue'))['password'] - }} + - ansible.builtin.debug: + msg: > + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} - hosts: localhost vars: - secret_password: >- - {{ - ((lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - token='thycotic_access_token', - ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] - }} + secret_password: >- + {{ + ((lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + token='thycotic_access_token', + ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] + }} tasks: - - ansible.builtin.debug: - msg: the password is {{ secret_password }} + - ansible.builtin.debug: + msg: the password is {{ secret_password }} + +# Private key stores into certificate file which is attached with secret. +# If fetch_attachments=True then private key file will be download on specified path +# and file content will display in debug message. +- hosts: localhost + vars: + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + fetch_attachments=True, + file_download_path='/home/certs', + base_url='https://secretserver.domain.com/SecretServer/', + token='thycotic_access_token' + ) + }} + tasks: + - ansible.builtin.debug: + msg: > + the private key is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['private-key'] + }} + +# If fetch_secret_ids_from_folder=true then secret IDs are in a folder is fetched based on folder ID +- hosts: localhost + vars: + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + fetch_secret_ids_from_folder=true, + base_url='https://secretserver.domain.com/SecretServer/', + token='thycotic_access_token' + ) + }} + tasks: + - ansible.builtin.debug: + msg: > + the secret id's are {{ + secret + }} + +# If secret ID is 0 and secret_path has value then secret is fetched by secret path +- hosts: localhost + vars: + secret: >- + {{ + lookup( + 'community.general.tss', + 0, + secret_path='\folderName\secretName' + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password' + ) + }} + tasks: + - ansible.builtin.debug: + msg: >- + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} """ import abc - +import os from ansible.errors import AnsibleError, AnsibleOptionsError -from ansible.module_utils import six from ansible.plugins.lookup import LookupBase from ansible.utils.display import Display try: - from thycotic.secrets.server import SecretServer, SecretServerError + from delinea.secrets.server import SecretServer, SecretServerError, PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer HAS_TSS_SDK = True -except ImportError: - SecretServer = None - SecretServerError = None - HAS_TSS_SDK = False - -try: - from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer - + HAS_DELINEA_SS_SDK = True HAS_TSS_AUTHORIZER = True except ImportError: - PasswordGrantAuthorizer = None - DomainPasswordGrantAuthorizer = None - AccessTokenAuthorizer = None - HAS_TSS_AUTHORIZER = False + try: + from thycotic.secrets.server import SecretServer, SecretServerError, PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer + + HAS_TSS_SDK = True + HAS_DELINEA_SS_SDK = False + HAS_TSS_AUTHORIZER = True + except ImportError: + SecretServer = None + SecretServerError = None + HAS_TSS_SDK = False + HAS_DELINEA_SS_SDK = False + PasswordGrantAuthorizer = None + DomainPasswordGrantAuthorizer = None + AccessTokenAuthorizer = None + HAS_TSS_AUTHORIZER = False display = Display() -@six.add_metaclass(abc.ABCMeta) -class TSSClient(object): +class TSSClient(object, metaclass=abc.ABCMeta): def __init__(self): self._client = None @@ -196,13 +297,49 @@ class TSSClient(object): else: return TSSClientV0(**server_parameters) - def get_secret(self, term): - display.debug("tss_lookup term: %s" % term) - + def get_secret(self, term, secret_path, fetch_file_attachments, file_download_path): + display.debug(f"tss_lookup term: {term}") secret_id = self._term_to_secret_id(term) - display.vvv(u"Secret Server lookup of Secret with ID %d" % secret_id) + if secret_id == 0 and secret_path: + fetch_secret_by_path = True + display.vvv(f"Secret Server lookup of Secret with path {secret_path}") + else: + fetch_secret_by_path = False + display.vvv(f"Secret Server lookup of Secret with ID {secret_id}") - return self._client.get_secret_json(secret_id) + if fetch_file_attachments: + if fetch_secret_by_path: + obj = self._client.get_secret_by_path(secret_path, fetch_file_attachments) + else: + obj = self._client.get_secret(secret_id, fetch_file_attachments) + for i in obj['items']: + if file_download_path and os.path.isdir(file_download_path): + if i['isFile']: + try: + file_content = i['itemValue'].content + with open(os.path.join(file_download_path, f"{obj['id']}_{i['slug']}"), "wb") as f: + f.write(file_content) + except ValueError: + raise AnsibleOptionsError(f"Failed to download {i['slug']}") + except AttributeError: + display.warning(f"Could not read file content for {i['slug']}") + finally: + i['itemValue'] = "*** Not Valid For Display ***" + else: + raise AnsibleOptionsError("File download path does not exist") + return obj + else: + if fetch_secret_by_path: + return self._client.get_secret_by_path(secret_path, False) + else: + return self._client.get_secret_json(secret_id) + + def get_secret_ids_by_folderid(self, term): + display.debug(f"tss_lookup term: {term}") + folder_id = self._term_to_folder_id(term) + display.vvv(f"Secret Server lookup of Secret id's with Folder ID {folder_id}") + + return self._client.get_secret_ids_by_folderid(folder_id) @staticmethod def _term_to_secret_id(term): @@ -211,6 +348,13 @@ class TSSClient(object): except ValueError: raise AnsibleOptionsError("Secret ID must be an integer") + @staticmethod + def _term_to_folder_id(term): + try: + return int(term) + except ValueError: + raise AnsibleOptionsError("Folder ID must be an integer") + class TSSClientV0(TSSClient): def __init__(self, **server_parameters): @@ -279,6 +423,20 @@ class LookupModule(LookupBase): ) try: - return [tss.get_secret(term) for term in terms] + if self.get_option("fetch_secret_ids_from_folder"): + if HAS_DELINEA_SS_SDK: + return [tss.get_secret_ids_by_folderid(term) for term in terms] + else: + raise AnsibleError("latest python-tss-sdk must be installed to use this plugin") + else: + return [ + tss.get_secret( + term, + self.get_option("secret_path"), + self.get_option("fetch_attachments"), + self.get_option("file_download_path"), + ) + for term in terms + ] except SecretServerError as error: - raise AnsibleError("Secret Server lookup failure: %s" % error.message) + raise AnsibleError(f"Secret Server lookup failure: {error.message}") diff --git a/tests/integration/targets/__init__.py b/plugins/module_utils/__init__.py similarity index 100% rename from tests/integration/targets/__init__.py rename to plugins/module_utils/__init__.py diff --git a/plugins/module_utils/_filelock.py b/plugins/module_utils/_filelock.py new file mode 100644 index 0000000000..f5d0e27608 --- /dev/null +++ b/plugins/module_utils/_filelock.py @@ -0,0 +1,108 @@ +# Copyright (c) 2018, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +# NOTE: +# This has been vendored from ansible.module_utils.common.file. This code has been removed from there for ansible-core 2.16. + +from __future__ import annotations + +import os +import stat +import time +import fcntl +import sys + +from contextlib import contextmanager + + +class LockTimeout(Exception): + pass + + +class FileLock: + ''' + Currently FileLock is implemented via fcntl.flock on a lock file, however this + behaviour may change in the future. Avoid mixing lock types fcntl.flock, + fcntl.lockf and module_utils.common.file.FileLock as it will certainly cause + unwanted and/or unexpected behaviour + ''' + def __init__(self): + self.lockfd = None + + @contextmanager + def lock_file(self, path, tmpdir, lock_timeout=None): + ''' + Context for lock acquisition + ''' + try: + self.set_lock(path, tmpdir, lock_timeout) + yield + finally: + self.unlock() + + def set_lock(self, path, tmpdir, lock_timeout=None): + ''' + Create a lock file based on path with flock to prevent other processes + using given path. + Please note that currently file locking only works when it is executed by + the same user, for example single user scenarios + + :kw path: Path (file) to lock + :kw tmpdir: Path where to place the temporary .lock file + :kw lock_timeout: + Wait n seconds for lock acquisition, fail if timeout is reached. + 0 = Do not wait, fail if lock cannot be acquired immediately, + Default is None, wait indefinitely until lock is released. + :returns: True + ''' + lock_path = os.path.join(tmpdir, f'ansible-{os.path.basename(path)}.lock') + l_wait = 0.1 + r_exception = IOError + if sys.version_info[0] == 3: + r_exception = BlockingIOError + + self.lockfd = open(lock_path, 'w') + + if lock_timeout <= 0: + fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) + os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD) + return True + + if lock_timeout: + e_secs = 0 + while e_secs < lock_timeout: + try: + fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) + os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD) + return True + except r_exception: + time.sleep(l_wait) + e_secs += l_wait + continue + + self.lockfd.close() + raise LockTimeout(f'{lock_timeout} sec') + + fcntl.flock(self.lockfd, fcntl.LOCK_EX) + os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD) + + return True + + def unlock(self): + ''' + Make sure lock file is available for everyone and Unlock the file descriptor + locked by set_lock + + :returns: True + ''' + if not self.lockfd: + return True + + try: + fcntl.flock(self.lockfd, fcntl.LOCK_UN) + self.lockfd.close() + except ValueError: # file wasn't opened, let context manager fail gracefully + pass + + return True diff --git a/plugins/module_utils/_mount.py b/plugins/module_utils/_mount.py index 391d468178..33d191c845 100644 --- a/plugins/module_utils/_mount.py +++ b/plugins/module_utils/_mount.py @@ -1,57 +1,14 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is based on # Lib/posixpath.py of cpython +# +# Copyright (c) 2001-2022 Python Software Foundation. All rights reserved. # It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 -# -# 1. This LICENSE AGREEMENT is between the Python Software Foundation -# ("PSF"), and the Individual or Organization ("Licensee") accessing and -# otherwise using this software ("Python") in source or binary form and -# its associated documentation. -# -# 2. Subject to the terms and conditions of this License Agreement, PSF hereby -# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, -# analyze, test, perform and/or display publicly, prepare derivative works, -# distribute, and otherwise use Python alone or in any derivative version, -# provided, however, that PSF's License Agreement and PSF's notice of copyright, -# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" -# are retained in Python alone or in any derivative version prepared by Licensee. -# -# 3. In the event Licensee prepares a derivative work that is based on -# or incorporates Python or any part thereof, and wants to make -# the derivative work available to others as provided herein, then -# Licensee hereby agrees to include in any such work a brief summary of -# the changes made to Python. -# -# 4. PSF is making Python available to Licensee on an "AS IS" -# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND -# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT -# INFRINGE ANY THIRD PARTY RIGHTS. -# -# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, -# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. -# -# 6. This License Agreement will automatically terminate upon a material -# breach of its terms and conditions. -# -# 7. Nothing in this License Agreement shall be deemed to create any -# relationship of agency, partnership, or joint venture between PSF and -# Licensee. This License Agreement does not grant permission to use PSF -# trademarks or trade name in a trademark sense to endorse or promote -# products or services of Licensee, or any third party. -# -# 8. By copying, installing or otherwise using Python, Licensee -# agrees to be bound by the terms and conditions of this License -# Agreement. +# (See LICENSES/PSF-2.0.txt in this collection) +# SPDX-License-Identifier: PSF-2.0 -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type import os diff --git a/plugins/module_utils/_stormssh.py b/plugins/module_utils/_stormssh.py new file mode 100644 index 0000000000..42a72eb674 --- /dev/null +++ b/plugins/module_utils/_stormssh.py @@ -0,0 +1,252 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is based on +# the config parser from here: https://github.com/emre/storm/blob/master/storm/parsers/ssh_config_parser.py +# Copyright (C) <2013> +# SPDX-License-Identifier: MIT + +from __future__ import annotations +import os +import re +import traceback +from operator import itemgetter + + +try: + from paramiko.config import SSHConfig +except ImportError: + SSHConfig = object + HAS_PARAMIKO = False + PARAMIKO_IMPORT_ERROR = traceback.format_exc() +else: + HAS_PARAMIKO = True + PARAMIKO_IMPORT_ERROR = None + + +class StormConfig(SSHConfig): + def parse(self, file_obj): + """ + Read an OpenSSH config from the given file object. + @param file_obj: a file-like object to read the config file from + @type file_obj: file + """ + order = 1 + host = {"host": ['*'], "config": {}, } + for line in file_obj: + line = line.rstrip('\n').lstrip() + if line == '': + self._config.append({ + 'type': 'empty_line', + 'value': line, + 'host': '', + 'order': order, + }) + order += 1 + continue + + if line.startswith('#'): + self._config.append({ + 'type': 'comment', + 'value': line, + 'host': '', + 'order': order, + }) + order += 1 + continue + + if '=' in line: + # Ensure ProxyCommand gets properly split + if line.lower().strip().startswith('proxycommand'): + proxy_re = re.compile(r"^(proxycommand)\s*=*\s*(.*)", re.I) + match = proxy_re.match(line) + key, value = match.group(1).lower(), match.group(2) + else: + key, value = line.split('=', 1) + key = key.strip().lower() + else: + # find first whitespace, and split there + i = 0 + while (i < len(line)) and not line[i].isspace(): + i += 1 + if i == len(line): + raise Exception(f'Unparsable line: {line!r}') + key = line[:i].lower() + value = line[i:].lstrip() + if key == 'host': + self._config.append(host) + value = value.split() + host = { + key: value, + 'config': {}, + 'type': 'entry', + 'order': order + } + order += 1 + elif key in ['identityfile', 'localforward', 'remoteforward']: + if key in host['config']: + host['config'][key].append(value) + else: + host['config'][key] = [value] + elif key not in host['config']: + host['config'].update({key: value}) + self._config.append(host) + + +class ConfigParser(object): + """ + Config parser for ~/.ssh/config files. + """ + + def __init__(self, ssh_config_file=None): + if not ssh_config_file: + ssh_config_file = self.get_default_ssh_config_file() + + self.defaults = {} + + self.ssh_config_file = ssh_config_file + + if not os.path.exists(self.ssh_config_file): + if not os.path.exists(os.path.dirname(self.ssh_config_file)): + os.makedirs(os.path.dirname(self.ssh_config_file)) + open(self.ssh_config_file, 'w+').close() + os.chmod(self.ssh_config_file, 0o600) + + self.config_data = [] + + def get_default_ssh_config_file(self): + return os.path.expanduser("~/.ssh/config") + + def load(self): + config = StormConfig() + + with open(self.ssh_config_file) as fd: + config.parse(fd) + + for entry in config.__dict__.get("_config"): + if entry.get("host") == ["*"]: + self.defaults.update(entry.get("config")) + + if entry.get("type") in ["comment", "empty_line"]: + self.config_data.append(entry) + continue + + host_item = { + 'host': entry["host"][0], + 'options': entry.get("config"), + 'type': 'entry', + 'order': entry.get("order", 0), + } + + if len(entry["host"]) > 1: + host_item.update({ + 'host': " ".join(entry["host"]), + }) + # minor bug in paramiko.SSHConfig that duplicates + # "Host *" entries. + if entry.get("config") and len(entry.get("config")) > 0: + self.config_data.append(host_item) + + return self.config_data + + def add_host(self, host, options): + self.config_data.append({ + 'host': host, + 'options': options, + 'order': self.get_last_index(), + }) + + return self + + def update_host(self, host, options, use_regex=False): + for index, host_entry in enumerate(self.config_data): + if host_entry.get("host") == host or \ + (use_regex and re.match(host, host_entry.get("host"))): + + if 'deleted_fields' in options: + deleted_fields = options.pop("deleted_fields") + for deleted_field in deleted_fields: + del self.config_data[index]["options"][deleted_field] + + self.config_data[index]["options"].update(options) + + return self + + def search_host(self, search_string): + results = [] + for host_entry in self.config_data: + if host_entry.get("type") != 'entry': + continue + if host_entry.get("host") == "*": + continue + + searchable_information = host_entry.get("host") + for key, value in host_entry.get("options").items(): + if isinstance(value, list): + value = " ".join(value) + if isinstance(value, int): + value = str(value) + + searchable_information += f" {value}" + + if search_string in searchable_information: + results.append(host_entry) + + return results + + def delete_host(self, host): + found = 0 + for index, host_entry in enumerate(self.config_data): + if host_entry.get("host") == host: + del self.config_data[index] + found += 1 + + if found == 0: + raise ValueError('No host found') + return self + + def delete_all_hosts(self): + self.config_data = [] + self.write_to_ssh_config() + + return self + + def dump(self): + if len(self.config_data) < 1: + return + + file_content = "" + self.config_data = sorted(self.config_data, key=itemgetter("order")) + + for host_item in self.config_data: + if host_item.get("type") in ['comment', 'empty_line']: + file_content += f"{host_item.get('value')}\n" + continue + host_item_content = f"Host {host_item.get('host')}\n" + for key, value in host_item.get("options").items(): + if isinstance(value, list): + sub_content = "" + for value_ in value: + sub_content += f" {key} {value_}\n" + host_item_content += sub_content + else: + host_item_content += f" {key} {value}\n" + file_content += host_item_content + + return file_content + + def write_to_ssh_config(self): + with open(self.ssh_config_file, 'w+') as f: + data = self.dump() + if data: + f.write(data) + return self + + def get_last_index(self): + last_index = 0 + indexes = [] + for item in self.config_data: + if item.get("order"): + indexes.append(item.get("order")) + if len(indexes) > 0: + last_index = max(indexes) + + return last_index diff --git a/plugins/module_utils/alicloud_ecs.py b/plugins/module_utils/alicloud_ecs.py index d4d3bf76c9..e752b4aa4a 100644 --- a/plugins/module_utils/alicloud_ecs.py +++ b/plugins/module_utils/alicloud_ecs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -7,13 +6,14 @@ # # Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import json +import traceback from ansible.module_utils.basic import env_fallback try: @@ -27,8 +27,11 @@ try: import footmark.dns import footmark.ram import footmark.market + + FOOTMARK_IMP_ERR = None HAS_FOOTMARK = True except ImportError: + FOOTMARK_IMP_ERR = traceback.format_exc() HAS_FOOTMARK = False @@ -85,10 +88,10 @@ def connect_to_acs(acs_module, region, **params): if not conn: if region not in [acs_module_region.id for acs_module_region in acs_module.regions()]: raise AnsibleACSError( - "Region %s does not seem to be available for acs module %s." % (region, acs_module.__name__)) + f"Region {region} does not seem to be available for acs module {acs_module.__name__}.") else: raise AnsibleACSError( - "Unknown problem connecting to region %s for acs module %s." % (region, acs_module.__name__)) + f"Unknown problem connecting to region {region} for acs module {acs_module.__name__}.") return conn @@ -122,7 +125,7 @@ def get_assume_role(params): def get_profile(params): if not params['alicloud_access_key'] and not params['ecs_role_name'] and params['profile']: - path = params['shared_credentials_file'] if params['shared_credentials_file'] else os.getenv('HOME') + '/.aliyun/config.json' + path = params['shared_credentials_file'] if params['shared_credentials_file'] else f"{os.getenv('HOME')}/.aliyun/config.json" auth = {} with open(path, 'r') as f: for pro in json.load(f)['profiles']: diff --git a/plugins/module_utils/android_sdkmanager.py b/plugins/module_utils/android_sdkmanager.py new file mode 100644 index 0000000000..b25a1a04fc --- /dev/null +++ b/plugins/module_utils/android_sdkmanager.py @@ -0,0 +1,146 @@ + +# Copyright (c) 2024, Stanislav Shamilov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +import re + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + +__state_map = { + "present": "--install", + "absent": "--uninstall" +} + +# sdkmanager --help 2>&1 | grep -A 2 -- --channel +__channel_map = { + "stable": 0, + "beta": 1, + "dev": 2, + "canary": 3 +} + + +def __map_channel(channel_name): + if channel_name not in __channel_map: + raise ValueError(f"Unknown channel name '{channel_name}'") + return __channel_map[channel_name] + + +def sdkmanager_runner(module, **kwargs): + return CmdRunner( + module, + command='sdkmanager', + arg_formats=dict( + state=cmd_runner_fmt.as_map(__state_map), + name=cmd_runner_fmt.as_list(), + installed=cmd_runner_fmt.as_fixed("--list_installed"), + list=cmd_runner_fmt.as_fixed('--list'), + newer=cmd_runner_fmt.as_fixed("--newer"), + sdk_root=cmd_runner_fmt.as_opt_eq_val("--sdk_root"), + channel=cmd_runner_fmt.as_func(lambda x: [f"--channel={__map_channel(x)}"]) + ), + force_lang="C.UTF-8", # Without this, sdkmanager binary crashes + **kwargs + ) + + +class Package: + def __init__(self, name): + self.name = name + + def __hash__(self): + return hash(self.name) + + def __ne__(self, other): + if not isinstance(other, Package): + return True + return self.name != other.name + + def __eq__(self, other): + if not isinstance(other, Package): + return False + + return self.name == other.name + + +class SdkManagerException(Exception): + pass + + +class AndroidSdkManager(object): + _RE_INSTALLED_PACKAGES_HEADER = re.compile(r'^Installed packages:$') + _RE_UPDATABLE_PACKAGES_HEADER = re.compile(r'^Available Updates:$') + + # Example: ' platform-tools | 27.0.0 | Android SDK Platform-Tools 27 | platform-tools ' + _RE_INSTALLED_PACKAGE = re.compile(r'^\s*(?P\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*.+\s*\|\s*(\S+)\s*$') + + # Example: ' platform-tools | 27.0.0 | 35.0.2' + _RE_UPDATABLE_PACKAGE = re.compile(r'^\s*(?P\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*[0-9].*\b\s*$') + + _RE_UNKNOWN_PACKAGE = re.compile(r'^Warning: Failed to find package \'(?P\S+)\'\s*$') + _RE_ACCEPT_LICENSE = re.compile(r'^The following packages can not be installed since their licenses or those of ' + r'the packages they depend on were not accepted') + + def __init__(self, module): + self.runner = sdkmanager_runner(module) + + def get_installed_packages(self): + with self.runner('installed sdk_root channel') as ctx: + rc, stdout, stderr = ctx.run() + return self._parse_packages(stdout, self._RE_INSTALLED_PACKAGES_HEADER, self._RE_INSTALLED_PACKAGE) + + def get_updatable_packages(self): + with self.runner('list newer sdk_root channel') as ctx: + rc, stdout, stderr = ctx.run() + return self._parse_packages(stdout, self._RE_UPDATABLE_PACKAGES_HEADER, self._RE_UPDATABLE_PACKAGE) + + def apply_packages_changes(self, packages, accept_licenses=False): + """ Install or delete packages, depending on the `module.vars.state` parameter """ + if len(packages) == 0: + return 0, '', '' + + if accept_licenses: + license_prompt_answer = 'y' + else: + license_prompt_answer = 'N' + for package in packages: + with self.runner('state name sdk_root channel', data=license_prompt_answer) as ctx: + rc, stdout, stderr = ctx.run(name=package.name) + + for line in stdout.splitlines(): + if self._RE_ACCEPT_LICENSE.match(line): + raise SdkManagerException("Licenses for some packages were not accepted") + + if rc != 0: + self._try_parse_stderr(stderr) + return rc, stdout, stderr + return 0, '', '' + + def _try_parse_stderr(self, stderr): + data = stderr.splitlines() + for line in data: + unknown_package_regex = self._RE_UNKNOWN_PACKAGE.match(line) + if unknown_package_regex: + package = unknown_package_regex.group('package') + raise SdkManagerException(f"Unknown package {package}") + + @staticmethod + def _parse_packages(stdout, header_regexp, row_regexp): + data = stdout.splitlines() + + section_found = False + packages = set() + + for line in data: + if not section_found: + section_found = header_regexp.match(line) + continue + else: + p = row_regexp.match(line) + if p: + packages.add(Package(p.group('name'))) + return packages diff --git a/plugins/module_utils/btrfs.py b/plugins/module_utils/btrfs.py new file mode 100644 index 0000000000..3c9ad3b382 --- /dev/null +++ b/plugins/module_utils/btrfs.py @@ -0,0 +1,460 @@ +# Copyright (c) 2022, Gregory Furlong +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible.module_utils.common.text.converters import to_bytes +import re +import os + + +def normalize_subvolume_path(path): + """ + Normalizes btrfs subvolume paths to ensure exactly one leading slash, no trailing slashes and no consecutive slashes. + In addition, if the path is prefixed with a leading , this value is removed. + """ + fstree_stripped = re.sub(r'^', '', path) + result = re.sub(r'/+$', '', re.sub(r'/+', '/', f"/{fstree_stripped}")) + return result if len(result) > 0 else '/' + + +class BtrfsModuleException(Exception): + pass + + +class BtrfsCommands(object): + + """ + Provides access to a subset of the Btrfs command line + """ + + def __init__(self, module): + self.__module = module + self.__btrfs = self.__module.get_bin_path("btrfs", required=True) + + def filesystem_show(self): + command = f"{self.__btrfs} filesystem show -d" + result = self.__module.run_command(command, check_rc=True) + stdout = [x.strip() for x in result[1].splitlines()] + filesystems = [] + current = None + for line in stdout: + if line.startswith('Label'): + current = self.__parse_filesystem(line) + filesystems.append(current) + elif line.startswith('devid'): + current['devices'].append(self.__parse_filesystem_device(line)) + return filesystems + + def __parse_filesystem(self, line): + label = re.sub(r'\s*uuid:.*$', '', re.sub(r'^Label:\s*', '', line)) + id = re.sub(r'^.*uuid:\s*', '', line) + + filesystem = {} + filesystem['label'] = label.strip("'") if label != 'none' else None + filesystem['uuid'] = id + filesystem['devices'] = [] + filesystem['mountpoints'] = [] + filesystem['subvolumes'] = [] + filesystem['default_subvolid'] = None + return filesystem + + def __parse_filesystem_device(self, line): + return re.sub(r'^.*path\s', '', line) + + def subvolumes_list(self, filesystem_path): + command = f"{self.__btrfs} subvolume list -tap {filesystem_path}" + result = self.__module.run_command(command, check_rc=True) + stdout = [x.split('\t') for x in result[1].splitlines()] + subvolumes = [{'id': 5, 'parent': None, 'path': '/'}] + if len(stdout) > 2: + subvolumes.extend([self.__parse_subvolume_list_record(x) for x in stdout[2:]]) + return subvolumes + + def __parse_subvolume_list_record(self, item): + return { + 'id': int(item[0]), + 'parent': int(item[2]), + 'path': normalize_subvolume_path(item[5]), + } + + def subvolume_get_default(self, filesystem_path): + command = [self.__btrfs, "subvolume", "get-default", to_bytes(filesystem_path)] + result = self.__module.run_command(command, check_rc=True) + # ID [n] ... + return int(result[1].strip().split()[1]) + + def subvolume_set_default(self, filesystem_path, subvolume_id): + command = [self.__btrfs, "subvolume", "set-default", str(subvolume_id), to_bytes(filesystem_path)] + result = self.__module.run_command(command, check_rc=True) + + def subvolume_create(self, subvolume_path): + command = [self.__btrfs, "subvolume", "create", to_bytes(subvolume_path)] + result = self.__module.run_command(command, check_rc=True) + + def subvolume_snapshot(self, snapshot_source, snapshot_destination): + command = [self.__btrfs, "subvolume", "snapshot", to_bytes(snapshot_source), to_bytes(snapshot_destination)] + result = self.__module.run_command(command, check_rc=True) + + def subvolume_delete(self, subvolume_path): + command = [self.__btrfs, "subvolume", "delete", to_bytes(subvolume_path)] + result = self.__module.run_command(command, check_rc=True) + + +class BtrfsInfoProvider(object): + + """ + Utility providing details of the currently available btrfs filesystems + """ + + def __init__(self, module): + self.__module = module + self.__btrfs_api = BtrfsCommands(module) + self.__findmnt_path = self.__module.get_bin_path("findmnt", required=True) + + def get_filesystems(self): + filesystems = self.__btrfs_api.filesystem_show() + mountpoints = self.__find_mountpoints() + for filesystem in filesystems: + device_mountpoints = self.__filter_mountpoints_for_devices(mountpoints, filesystem['devices']) + filesystem['mountpoints'] = device_mountpoints + + if len(device_mountpoints) > 0: + + # any path within the filesystem can be used to query metadata + mountpoint = device_mountpoints[0]['mountpoint'] + filesystem['subvolumes'] = self.get_subvolumes(mountpoint) + filesystem['default_subvolid'] = self.get_default_subvolume_id(mountpoint) + + return filesystems + + def get_mountpoints(self, filesystem_devices): + mountpoints = self.__find_mountpoints() + return self.__filter_mountpoints_for_devices(mountpoints, filesystem_devices) + + def get_subvolumes(self, filesystem_path): + return self.__btrfs_api.subvolumes_list(filesystem_path) + + def get_default_subvolume_id(self, filesystem_path): + return self.__btrfs_api.subvolume_get_default(filesystem_path) + + def __filter_mountpoints_for_devices(self, mountpoints, devices): + return [m for m in mountpoints if (m['device'] in devices)] + + def __find_mountpoints(self): + command = f"{self.__findmnt_path} -t btrfs -nvP" + result = self.__module.run_command(command) + mountpoints = [] + if result[0] == 0: + lines = result[1].splitlines() + for line in lines: + mountpoint = self.__parse_mountpoint_pairs(line) + mountpoints.append(mountpoint) + return mountpoints + + def __parse_mountpoint_pairs(self, line): + pattern = re.compile(r'^TARGET="(?P.*)"\s+SOURCE="(?P.*)"\s+FSTYPE="(?P.*)"\s+OPTIONS="(?P.*)"\s*$') + match = pattern.search(line) + if match is not None: + groups = match.groupdict() + + return { + 'mountpoint': groups['target'], + 'device': groups['source'], + 'subvolid': self.__extract_mount_subvolid(groups['options']), + } + else: + raise BtrfsModuleException(f"Failed to parse findmnt result for line: '{line}'") + + def __extract_mount_subvolid(self, mount_options): + for option in mount_options.split(','): + if option.startswith('subvolid='): + return int(option[len('subvolid='):]) + raise BtrfsModuleException(f"Failed to find subvolid for mountpoint in options '{mount_options}'") + + +class BtrfsSubvolume(object): + + """ + Wrapper class providing convenience methods for inspection of a btrfs subvolume + """ + + def __init__(self, filesystem, subvolume_id): + self.__filesystem = filesystem + self.__subvolume_id = subvolume_id + + def get_filesystem(self): + return self.__filesystem + + def is_mounted(self): + mountpoints = self.get_mountpoints() + return mountpoints is not None and len(mountpoints) > 0 + + def is_filesystem_root(self): + return 5 == self.__subvolume_id + + def is_filesystem_default(self): + return self.__filesystem.default_subvolid == self.__subvolume_id + + def get_mounted_path(self): + mountpoints = self.get_mountpoints() + if mountpoints is not None and len(mountpoints) > 0: + return mountpoints[0] + elif self.parent is not None: + parent = self.__filesystem.get_subvolume_by_id(self.parent) + parent_path = parent.get_mounted_path() + if parent_path is not None: + return parent_path + os.path.sep + self.name + else: + return None + + def get_mountpoints(self): + return self.__filesystem.get_mountpoints_by_subvolume_id(self.__subvolume_id) + + def get_child_relative_path(self, absolute_child_path): + """ + Get the relative path from this subvolume to the named child subvolume. + The provided parameter is expected to be normalized as by normalize_subvolume_path. + """ + path = self.path + if absolute_child_path.startswith(path): + relative = absolute_child_path[len(path):] + return re.sub(r'^/*', '', relative) + else: + raise BtrfsModuleException(f"Path '{absolute_child_path}' doesn't start with '{path}'") + + def get_parent_subvolume(self): + parent_id = self.parent + return self.__filesystem.get_subvolume_by_id(parent_id) if parent_id is not None else None + + def get_child_subvolumes(self): + return self.__filesystem.get_subvolume_children(self.__subvolume_id) + + @property + def __info(self): + return self.__filesystem.get_subvolume_info_for_id(self.__subvolume_id) + + @property + def id(self): + return self.__subvolume_id + + @property + def name(self): + return self.path.split('/').pop() + + @property + def path(self): + return self.__info['path'] + + @property + def parent(self): + return self.__info['parent'] + + +class BtrfsFilesystem(object): + + """ + Wrapper class providing convenience methods for inspection of a btrfs filesystem + """ + + def __init__(self, info, provider, module): + self.__provider = provider + + # constant for module execution + self.__uuid = info['uuid'] + self.__label = info['label'] + self.__devices = info['devices'] + + # refreshable + self.__default_subvolid = info['default_subvolid'] if 'default_subvolid' in info else None + self.__update_mountpoints(info['mountpoints'] if 'mountpoints' in info else []) + self.__update_subvolumes(info['subvolumes'] if 'subvolumes' in info else []) + + @property + def uuid(self): + return self.__uuid + + @property + def label(self): + return self.__label + + @property + def default_subvolid(self): + return self.__default_subvolid + + @property + def devices(self): + return list(self.__devices) + + def refresh(self): + self.refresh_mountpoints() + self.refresh_subvolumes() + self.refresh_default_subvolume() + + def refresh_mountpoints(self): + mountpoints = self.__provider.get_mountpoints(list(self.__devices)) + self.__update_mountpoints(mountpoints) + + def __update_mountpoints(self, mountpoints): + self.__mountpoints = dict() + for i in mountpoints: + subvolid = i['subvolid'] + mountpoint = i['mountpoint'] + if subvolid not in self.__mountpoints: + self.__mountpoints[subvolid] = [] + self.__mountpoints[subvolid].append(mountpoint) + + def refresh_subvolumes(self): + filesystem_path = self.get_any_mountpoint() + if filesystem_path is not None: + subvolumes = self.__provider.get_subvolumes(filesystem_path) + self.__update_subvolumes(subvolumes) + + def __update_subvolumes(self, subvolumes): + # TODO strategy for retaining information on deleted subvolumes? + self.__subvolumes = dict() + for subvolume in subvolumes: + self.__subvolumes[subvolume['id']] = subvolume + + def refresh_default_subvolume(self): + filesystem_path = self.get_any_mountpoint() + if filesystem_path is not None: + self.__default_subvolid = self.__provider.get_default_subvolume_id(filesystem_path) + + def contains_device(self, device): + return device in self.__devices + + def contains_subvolume(self, subvolume): + return self.get_subvolume_by_name(subvolume) is not None + + def get_subvolume_by_id(self, subvolume_id): + return BtrfsSubvolume(self, subvolume_id) if subvolume_id in self.__subvolumes else None + + def get_subvolume_info_for_id(self, subvolume_id): + return self.__subvolumes[subvolume_id] if subvolume_id in self.__subvolumes else None + + def get_subvolume_by_name(self, subvolume): + for subvolume_info in self.__subvolumes.values(): + if subvolume_info['path'] == subvolume: + return BtrfsSubvolume(self, subvolume_info['id']) + return None + + def get_any_mountpoint(self): + for subvol_mountpoints in self.__mountpoints.values(): + if len(subvol_mountpoints) > 0: + return subvol_mountpoints[0] + # maybe error? + return None + + def get_any_mounted_subvolume(self): + for subvolid, subvol_mountpoints in self.__mountpoints.items(): + if len(subvol_mountpoints) > 0: + return self.get_subvolume_by_id(subvolid) + return None + + def get_mountpoints_by_subvolume_id(self, subvolume_id): + return self.__mountpoints[subvolume_id] if subvolume_id in self.__mountpoints else [] + + def get_nearest_subvolume(self, subvolume): + """Return the identified subvolume if existing, else the closest matching parent""" + subvolumes_by_path = self.__get_subvolumes_by_path() + while len(subvolume) > 1: + if subvolume in subvolumes_by_path: + return BtrfsSubvolume(self, subvolumes_by_path[subvolume]['id']) + else: + subvolume = re.sub(r'/[^/]+$', '', subvolume) + + return BtrfsSubvolume(self, 5) + + def get_mountpath_as_child(self, subvolume_name): + """Find a path to the target subvolume through a mounted ancestor""" + nearest = self.get_nearest_subvolume(subvolume_name) + if nearest.path == subvolume_name: + nearest = nearest.get_parent_subvolume() + if nearest is None or nearest.get_mounted_path() is None: + raise BtrfsModuleException(f"Failed to find a path '{subvolume_name}' through a mounted parent subvolume") + else: + return nearest.get_mounted_path() + os.path.sep + nearest.get_child_relative_path(subvolume_name) + + def get_subvolume_children(self, subvolume_id): + return [BtrfsSubvolume(self, x['id']) for x in self.__subvolumes.values() if x['parent'] == subvolume_id] + + def __get_subvolumes_by_path(self): + result = {} + for s in self.__subvolumes.values(): + path = s['path'] + result[path] = s + return result + + def is_mounted(self): + return self.__mountpoints is not None and len(self.__mountpoints) > 0 + + def get_summary(self): + subvolumes = [] + sources = self.__subvolumes.values() if self.__subvolumes is not None else [] + for subvolume in sources: + id = subvolume['id'] + subvolumes.append({ + 'id': id, + 'path': subvolume['path'], + 'parent': subvolume['parent'], + 'mountpoints': self.get_mountpoints_by_subvolume_id(id), + }) + + return { + 'default_subvolume': self.__default_subvolid, + 'devices': self.__devices, + 'label': self.__label, + 'uuid': self.__uuid, + 'subvolumes': subvolumes, + } + + +class BtrfsFilesystemsProvider(object): + + """ + Provides methods to query available btrfs filesystems + """ + + def __init__(self, module): + self.__module = module + self.__provider = BtrfsInfoProvider(module) + self.__filesystems = None + + def get_matching_filesystem(self, criteria): + if criteria['device'] is not None: + criteria['device'] = os.path.realpath(criteria['device']) + + self.__check_init() + matching = [f for f in self.__filesystems.values() if self.__filesystem_matches_criteria(f, criteria)] + if len(matching) == 1: + return matching[0] + else: + raise BtrfsModuleException( + f"Found {len(matching)} filesystems matching criteria uuid={criteria['uuid']} label={criteria['label']} device={criteria['device']}" + ) + + def __filesystem_matches_criteria(self, filesystem, criteria): + return ((criteria['uuid'] is None or filesystem.uuid == criteria['uuid']) and + (criteria['label'] is None or filesystem.label == criteria['label']) and + (criteria['device'] is None or filesystem.contains_device(criteria['device']))) + + def get_filesystem_for_device(self, device): + real_device = os.path.realpath(device) + self.__check_init() + for fs in self.__filesystems.values(): + if fs.contains_device(real_device): + return fs + return None + + def get_filesystems(self): + self.__check_init() + return list(self.__filesystems.values()) + + def __check_init(self): + if self.__filesystems is None: + self.__filesystems = dict() + for f in self.__provider.get_filesystems(): + uuid = f['uuid'] + self.__filesystems[uuid] = BtrfsFilesystem(f, self.__provider, self.__module) diff --git a/plugins/module_utils/cloud.py b/plugins/module_utils/cloud.py index 7619023a3c..c8043a8d9e 100644 --- a/plugins/module_utils/cloud.py +++ b/plugins/module_utils/cloud.py @@ -1,11 +1,9 @@ -# -*- coding: utf-8 -*- # -# (c) 2016 Allen Sanabria, -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016 Allen Sanabria, +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations """ @@ -134,7 +132,7 @@ class CloudRetry(object): if isinstance(e, cls.base_class): # pylint: disable=isinstance-second-argument-not-valid-type response_code = cls.status_code_from_exception(e) if cls.found(response_code, catch_extra_error_codes): - msg = "{0}: Retrying in {1} seconds...".format(str(e), delay) + msg = f"{e}: Retrying in {delay} seconds..." syslog.syslog(syslog.LOG_INFO, msg) time.sleep(delay) else: diff --git a/plugins/module_utils/cmd_runner.py b/plugins/module_utils/cmd_runner.py new file mode 100644 index 0000000000..b4903e1452 --- /dev/null +++ b/plugins/module_utils/cmd_runner.py @@ -0,0 +1,207 @@ +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import os + +from ansible.module_utils.common.collections import is_sequence +from ansible.module_utils.common.locale import get_best_parsable_locale +from ansible_collections.community.general.plugins.module_utils import cmd_runner_fmt + + +def _ensure_list(value): + return list(value) if is_sequence(value) else [value] + + +def _process_as_is(rc, out, err): + return rc, out, err + + +class CmdRunnerException(Exception): + pass + + +class MissingArgumentFormat(CmdRunnerException): + def __init__(self, arg, args_order, args_formats): + self.args_order = args_order + self.arg = arg + self.args_formats = args_formats + + def __repr__(self): + return f"MissingArgumentFormat({self.arg!r}, {self.args_order!r}, {self.args_formats!r})" + + def __str__(self): + return f"Cannot find format for parameter {self.arg} {self.args_order} in: {self.args_formats}" + + +class MissingArgumentValue(CmdRunnerException): + def __init__(self, args_order, arg): + self.args_order = args_order + self.arg = arg + + def __repr__(self): + return f"MissingArgumentValue({self.args_order!r}, {self.arg!r})" + + def __str__(self): + return f"Cannot find value for parameter {self.arg} in {self.args_order}" + + +class FormatError(CmdRunnerException): + def __init__(self, name, value, args_formats, exc): + self.name = name + self.value = value + self.args_formats = args_formats + self.exc = exc + super(FormatError, self).__init__() + + def __repr__(self): + return f"FormatError({self.name!r}, {self.value!r}, {self.args_formats!r}, {self.exc!r})" + + def __str__(self): + return f"Failed to format parameter {self.name} with value {self.value}: {self.exc}" + + +class CmdRunner(object): + """ + Wrapper for ``AnsibleModule.run_command()``. + + It aims to provide a reusable runner with consistent argument formatting + and sensible defaults. + """ + + @staticmethod + def _prepare_args_order(order): + return tuple(order) if is_sequence(order) else tuple(order.split()) + + def __init__(self, module, command, arg_formats=None, default_args_order=(), + check_rc=False, force_lang="C", path_prefix=None, environ_update=None): + self.module = module + self.command = _ensure_list(command) + self.default_args_order = self._prepare_args_order(default_args_order) + if arg_formats is None: + arg_formats = {} + self.arg_formats = {} + for fmt_name, fmt in arg_formats.items(): + if not cmd_runner_fmt.is_argformat(fmt): + fmt = cmd_runner_fmt.as_func(func=fmt, ignore_none=True) + self.arg_formats[fmt_name] = fmt + self.check_rc = check_rc + if force_lang == "auto": + try: + self.force_lang = get_best_parsable_locale(module) + except RuntimeWarning: + self.force_lang = "C" + else: + self.force_lang = force_lang + self.path_prefix = path_prefix + if environ_update is None: + environ_update = {} + self.environ_update = environ_update + + _cmd = self.command[0] + self.command[0] = _cmd if (os.path.isabs(_cmd) or '/' in _cmd) else module.get_bin_path(_cmd, opt_dirs=path_prefix, required=True) + + @property + def binary(self): + return self.command[0] + + def __call__(self, args_order=None, output_process=None, check_mode_skip=False, check_mode_return=None, **kwargs): + if output_process is None: + output_process = _process_as_is + if args_order is None: + args_order = self.default_args_order + args_order = self._prepare_args_order(args_order) + for p in args_order: + if p not in self.arg_formats: + raise MissingArgumentFormat(p, args_order, tuple(self.arg_formats.keys())) + return _CmdRunnerContext(runner=self, + args_order=args_order, + output_process=output_process, + check_mode_skip=check_mode_skip, + check_mode_return=check_mode_return, **kwargs) + + def has_arg_format(self, arg): + return arg in self.arg_formats + + # not decided whether to keep it or not, but if deprecating it will happen in a farther future. + context = __call__ + + +class _CmdRunnerContext(object): + def __init__(self, runner, args_order, output_process, check_mode_skip, check_mode_return, **kwargs): + self.runner = runner + self.args_order = tuple(args_order) + self.output_process = output_process + self.check_mode_skip = check_mode_skip + self.check_mode_return = check_mode_return + self.run_command_args = dict(kwargs) + + self.environ_update = runner.environ_update + self.environ_update.update(self.run_command_args.get('environ_update', {})) + if runner.force_lang: + self.environ_update.update({ + 'LANGUAGE': runner.force_lang, + 'LC_ALL': runner.force_lang, + }) + self.run_command_args['environ_update'] = self.environ_update + + if 'check_rc' not in self.run_command_args: + self.run_command_args['check_rc'] = runner.check_rc + self.check_rc = self.run_command_args['check_rc'] + + self.cmd = None + self.results_rc = None + self.results_out = None + self.results_err = None + self.results_processed = None + + def run(self, **kwargs): + runner = self.runner + module = self.runner.module + self.cmd = list(runner.command) + self.context_run_args = dict(kwargs) + + named_args = dict(module.params) + named_args.update(kwargs) + for arg_name in self.args_order: + value = None + try: + if arg_name in named_args: + value = named_args[arg_name] + elif not runner.arg_formats[arg_name].ignore_missing_value: + raise MissingArgumentValue(self.args_order, arg_name) + self.cmd.extend(runner.arg_formats[arg_name](value)) + except MissingArgumentValue: + raise + except Exception as e: + raise FormatError(arg_name, value, runner.arg_formats[arg_name], e) + + if self.check_mode_skip and module.check_mode: + return self.check_mode_return + results = module.run_command(self.cmd, **self.run_command_args) + self.results_rc, self.results_out, self.results_err = results + self.results_processed = self.output_process(*results) + return self.results_processed + + @property + def run_info(self): + return dict( + check_rc=self.check_rc, + environ_update=self.environ_update, + args_order=self.args_order, + cmd=self.cmd, + run_command_args=self.run_command_args, + context_run_args=self.context_run_args, + results_rc=self.results_rc, + results_out=self.results_out, + results_err=self.results_err, + results_processed=self.results_processed, + ) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return False diff --git a/plugins/module_utils/cmd_runner_fmt.py b/plugins/module_utils/cmd_runner_fmt.py new file mode 100644 index 0000000000..dcb9fc8e20 --- /dev/null +++ b/plugins/module_utils/cmd_runner_fmt.py @@ -0,0 +1,116 @@ +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from functools import wraps + +from ansible.module_utils.common.collections import is_sequence + + +def _ensure_list(value): + return list(value) if is_sequence(value) else [value] + + +class _ArgFormat(object): + def __init__(self, func, ignore_none=True, ignore_missing_value=False): + self.func = func + self.ignore_none = ignore_none + self.ignore_missing_value = ignore_missing_value + + def __call__(self, value): + ignore_none = self.ignore_none if self.ignore_none is not None else True + if value is None and ignore_none: + return [] + f = self.func + return [str(x) for x in f(value)] + + def __str__(self): + return f"" + + def __repr__(self): + return str(self) + + +def as_bool(args_true, args_false=None, ignore_none=None): + if args_false is not None: + if ignore_none is None: + ignore_none = False + else: + args_false = [] + return _ArgFormat(lambda value: _ensure_list(args_true) if value else _ensure_list(args_false), ignore_none=ignore_none) + + +def as_bool_not(args): + return as_bool([], args, ignore_none=False) + + +def as_optval(arg, ignore_none=None): + return _ArgFormat(lambda value: [f"{arg}{value}"], ignore_none=ignore_none) + + +def as_opt_val(arg, ignore_none=None): + return _ArgFormat(lambda value: [arg, value], ignore_none=ignore_none) + + +def as_opt_eq_val(arg, ignore_none=None): + return _ArgFormat(lambda value: [f"{arg}={value}"], ignore_none=ignore_none) + + +def as_list(ignore_none=None, min_len=0, max_len=None): + def func(value): + value = _ensure_list(value) + if len(value) < min_len: + raise ValueError(f"Parameter must have at least {min_len} element(s)") + if max_len is not None and len(value) > max_len: + raise ValueError(f"Parameter must have at most {max_len} element(s)") + return value + return _ArgFormat(func, ignore_none=ignore_none) + + +def as_fixed(*args): + if len(args) == 1 and is_sequence(args[0]): + args = args[0] + return _ArgFormat(lambda value: _ensure_list(args), ignore_none=False, ignore_missing_value=True) + + +def as_func(func, ignore_none=None): + return _ArgFormat(func, ignore_none=ignore_none) + + +def as_map(_map, default=None, ignore_none=None): + if default is None: + default = [] + return _ArgFormat(lambda value: _ensure_list(_map.get(value, default)), ignore_none=ignore_none) + + +def unpack_args(func): + @wraps(func) + def wrapper(v): + return func(*v) + return wrapper + + +def unpack_kwargs(func): + @wraps(func) + def wrapper(v): + return func(**v) + return wrapper + + +def stack(fmt): + @wraps(fmt) + def wrapper(*args, **kwargs): + new_func = fmt(ignore_none=True, *args, **kwargs) + + def stacking(value): + stack = [new_func(v) for v in value if v] + stack = [x for args in stack for x in args] + return stack + return _ArgFormat(stacking, ignore_none=True) + return wrapper + + +def is_argformat(fmt): + return isinstance(fmt, _ArgFormat) diff --git a/plugins/module_utils/consul.py b/plugins/module_utils/consul.py new file mode 100644 index 0000000000..b814485c55 --- /dev/null +++ b/plugins/module_utils/consul.py @@ -0,0 +1,349 @@ + +# Copyright (c) 2022, Håkon Lerring +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +import copy +import json +import re +from urllib import error as urllib_error +from urllib.parse import urlencode + +from ansible.module_utils.urls import open_url + + +def get_consul_url(configuration): + return f"{configuration.scheme}://{configuration.host}:{configuration.port}/v1" + + +def get_auth_headers(configuration): + if configuration.token is None: + return {} + else: + return {"X-Consul-Token": configuration.token} + + +class RequestError(Exception): + def __init__(self, status, response_data=None): + self.status = status + self.response_data = response_data + + def __str__(self): + if self.response_data is None: + # self.status is already the message (backwards compat) + return self.status + return f"HTTP {self.status}: {self.response_data}" + + +def handle_consul_response_error(response): + if 400 <= response.status_code < 600: + raise RequestError(f"{response.status_code} {response.content}") + + +AUTH_ARGUMENTS_SPEC = dict( + host=dict(default="localhost"), + port=dict(type="int", default=8500), + scheme=dict(default="http"), + validate_certs=dict(type="bool", default=True), + token=dict(no_log=True), + ca_path=dict(), +) + + +def camel_case_key(key): + parts = [] + for part in key.split("_"): + if part in {"id", "ttl", "jwks", "jwt", "oidc", "iam", "sts"}: + parts.append(part.upper()) + else: + parts.append(part.capitalize()) + return "".join(parts) + + +def validate_check(check): + validate_duration_keys = ['Interval', 'Ttl', 'Timeout'] + validate_tcp_regex = r"(?P.*):(?P(?:[0-9]+))$" + if check.get('Tcp') is not None: + match = re.match(validate_tcp_regex, check['Tcp']) + if not match: + raise Exception('tcp check must be in host:port format') + for duration in validate_duration_keys: + if duration in check and check[duration] is not None: + check[duration] = validate_duration(check[duration]) + + +def validate_duration(duration): + if duration: + if not re.search(r"\d+(?:ns|us|ms|s|m|h)", duration): + duration = f"{duration}s" + return duration + + +STATE_PARAMETER = "state" +STATE_PRESENT = "present" +STATE_ABSENT = "absent" + +OPERATION_READ = "read" +OPERATION_CREATE = "create" +OPERATION_UPDATE = "update" +OPERATION_DELETE = "remove" + + +def _normalize_params(params, arg_spec): + final_params = {} + for k, v in params.items(): + if k not in arg_spec or v is None: # Alias + continue + spec = arg_spec[k] + if ( + spec.get("type") == "list" + and spec.get("elements") == "dict" + and spec.get("options") + and v + ): + v = [_normalize_params(d, spec["options"]) for d in v] + elif spec.get("type") == "dict" and spec.get("options") and v: + v = _normalize_params(v, spec["options"]) + final_params[k] = v + return final_params + + +class _ConsulModule: + """Base class for Consul modules. + + This class is considered private, till the API is fully fleshed out. + As such backwards incompatible changes can occur even in bugfix releases. + """ + + api_endpoint = None # type: str + unique_identifiers = None # type: list + result_key = None # type: str + create_only_fields = set() + operational_attributes = set() + params = {} + + def __init__(self, module): + self._module = module + self.params = _normalize_params(module.params, module.argument_spec) + self.api_params = { + k: camel_case_key(k) + for k in self.params + if k not in STATE_PARAMETER and k not in AUTH_ARGUMENTS_SPEC + } + + self.operational_attributes.update({"CreateIndex", "CreateTime", "Hash", "ModifyIndex"}) + + def execute(self): + obj = self.read_object() + + changed = False + diff = {} + if self.params[STATE_PARAMETER] == STATE_PRESENT: + obj_from_module = self.module_to_obj(obj is not None) + if obj is None: + operation = OPERATION_CREATE + new_obj = self.create_object(obj_from_module) + diff = {"before": {}, "after": new_obj} + changed = True + else: + operation = OPERATION_UPDATE + if self._needs_update(obj, obj_from_module): + new_obj = self.update_object(obj, obj_from_module) + diff = {"before": obj, "after": new_obj} + changed = True + else: + new_obj = obj + elif self.params[STATE_PARAMETER] == STATE_ABSENT: + operation = OPERATION_DELETE + if obj is not None: + self.delete_object(obj) + changed = True + diff = {"before": obj, "after": {}} + else: + diff = {"before": {}, "after": {}} + new_obj = None + else: + raise RuntimeError("Unknown state supplied.") + + result = {"changed": changed} + if changed: + result["operation"] = operation + if self._module._diff: + result["diff"] = diff + if self.result_key: + result[self.result_key] = new_obj + self._module.exit_json(**result) + + def module_to_obj(self, is_update): + obj = {} + for k, v in self.params.items(): + result = self.map_param(k, v, is_update) + if result: + obj[result[0]] = result[1] + return obj + + def map_param(self, k, v, is_update): + def helper(item): + return {camel_case_key(k): v for k, v in item.items()} + + def needs_camel_case(k): + spec = self._module.argument_spec[k] + return ( + spec.get("type") == "list" + and spec.get("elements") == "dict" + and spec.get("options") + ) or (spec.get("type") == "dict" and spec.get("options")) + + if k in self.api_params and v is not None: + if isinstance(v, dict) and needs_camel_case(k): + v = helper(v) + elif isinstance(v, (list, tuple)) and needs_camel_case(k): + v = [helper(i) for i in v] + if is_update and k in self.create_only_fields: + return + return camel_case_key(k), v + + def _needs_update(self, api_obj, module_obj): + api_obj = copy.deepcopy(api_obj) + module_obj = copy.deepcopy(module_obj) + return self.needs_update(api_obj, module_obj) + + def needs_update(self, api_obj, module_obj): + for k, v in module_obj.items(): + if k not in api_obj: + return True + if api_obj[k] != v: + return True + return False + + def prepare_object(self, existing, obj): + existing = { + k: v for k, v in existing.items() if k not in self.operational_attributes + } + for k, v in obj.items(): + existing[k] = v + return existing + + def id_from_obj(self, obj, camel_case=False): + def key_func(key): + return camel_case_key(key) if camel_case else key + + if self.unique_identifiers: + for identifier in self.unique_identifiers: + identifier = key_func(identifier) + if identifier in obj: + return obj[identifier] + return None + + def endpoint_url(self, operation, identifier=None): + if operation == OPERATION_CREATE: + return self.api_endpoint + elif identifier: + return f"{self.api_endpoint}/{identifier}" + raise RuntimeError("invalid arguments passed") + + def read_object(self): + identifier = self.id_from_obj(self.params) + url = self.endpoint_url(OPERATION_READ, identifier) + try: + return self.get(url) + except RequestError as e: + if e.status == 404: + return + elif e.status == 403 and b"ACL not found" in e.response_data: + return + raise + + def create_object(self, obj): + if self._module.check_mode: + return obj + else: + url = self.endpoint_url(OPERATION_CREATE) + created_obj = self.put(url, data=self.prepare_object({}, obj)) + if created_obj is None: + created_obj = self.read_object() + return created_obj + + def update_object(self, existing, obj): + merged_object = self.prepare_object(existing, obj) + if self._module.check_mode: + return merged_object + else: + url = self.endpoint_url(OPERATION_UPDATE, self.id_from_obj(existing, camel_case=True)) + updated_obj = self.put(url, data=merged_object) + if updated_obj is None: + updated_obj = self.read_object() + return updated_obj + + def delete_object(self, obj): + if self._module.check_mode: + return {} + else: + url = self.endpoint_url(OPERATION_DELETE, self.id_from_obj(obj, camel_case=True)) + return self.delete(url) + + def _request(self, method, url_parts, data=None, params=None): + module_params = self.params + + if not isinstance(url_parts, (tuple, list)): + url_parts = [url_parts] + if params: + # Remove values that are None + params = {k: v for k, v in params.items() if v is not None} + + ca_path = module_params.get("ca_path") + base_url = f"{module_params['scheme']}://{module_params['host']}:{module_params['port']}/v1" + url = "/".join([base_url] + list(url_parts)) + + headers = {} + token = self.params.get("token") + if token: + headers["X-Consul-Token"] = token + + try: + if data is not None: + data = json.dumps(data) + headers["Content-Type"] = "application/json" + if params: + url = f"{url}?{urlencode(params)}" + response = open_url( + url, + method=method, + data=data, + headers=headers, + validate_certs=module_params["validate_certs"], + ca_path=ca_path, + ) + response_data = response.read() + status = ( + response.status if hasattr(response, "status") else response.getcode() + ) + + except urllib_error.URLError as e: + if isinstance(e, urllib_error.HTTPError): + status = e.code + response_data = e.fp.read() + else: + self._module.fail_json( + msg=f"Could not connect to consul agent at {module_params['host']}:{module_params['port']}, error was {e}" + ) + raise + + if 400 <= status < 600: + raise RequestError(status, response_data) + + if response_data: + return json.loads(response_data) + return None + + def get(self, url_parts, **kwargs): + return self._request("GET", url_parts, **kwargs) + + def put(self, url_parts, **kwargs): + return self._request("PUT", url_parts, **kwargs) + + def delete(self, url_parts, **kwargs): + return self._request("DELETE", url_parts, **kwargs) diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py index 86c4694524..3003875c09 100644 --- a/plugins/module_utils/csv.py +++ b/plugins/module_utils/csv.py @@ -1,17 +1,15 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) -# Copyright: (c) 2018, Dag Wieers (@dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) +# Copyright (c) 2018, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import csv -from io import BytesIO, StringIO +from io import StringIO from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.six import PY3 class CustomDialectFailureError(Exception): @@ -39,28 +37,27 @@ def initialize_dialect(dialect, **kwargs): csv.register_dialect("unix", unix_dialect) if dialect not in csv.list_dialects(): - raise DialectNotAvailableError("Dialect '%s' is not supported by your version of python." % dialect) + raise DialectNotAvailableError(f"Dialect '{dialect}' is not supported by your version of python.") # Create a dictionary from only set options - dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None) + dialect_params = {k: v for k, v in kwargs.items() if v is not None} if dialect_params: try: csv.register_dialect('custom', dialect, **dialect_params) except TypeError as e: - raise CustomDialectFailureError("Unable to create custom dialect: %s" % to_native(e)) + raise CustomDialectFailureError(f"Unable to create custom dialect: {e}") dialect = 'custom' return dialect def read_csv(data, dialect, fieldnames=None): - + BOM = to_native('\ufeff') data = to_native(data, errors='surrogate_or_strict') + if data.startswith(BOM): + data = data[len(BOM):] - if PY3: - fake_fh = StringIO(data) - else: - fake_fh = BytesIO(data) + fake_fh = StringIO(data) reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect) diff --git a/plugins/module_utils/database.py b/plugins/module_utils/database.py index 825d3a2be9..bb4c0efcee 100644 --- a/plugins/module_utils/database.py +++ b/plugins/module_utils/database.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -7,10 +6,10 @@ # # Copyright (c) 2014, Toshio Kuratomi # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re @@ -103,19 +102,19 @@ def _identifier_parse(identifier, quote_char): dot = identifier.index('.') except ValueError: identifier = identifier.replace(quote_char, quote_char * 2) - identifier = ''.join((quote_char, identifier, quote_char)) + identifier = f"{quote_char}{identifier}{quote_char}" further_identifiers = [identifier] else: if dot == 0 or dot >= len(identifier) - 1: identifier = identifier.replace(quote_char, quote_char * 2) - identifier = ''.join((quote_char, identifier, quote_char)) + identifier = f"{quote_char}{identifier}{quote_char}" further_identifiers = [identifier] else: first_identifier = identifier[:dot] next_identifier = identifier[dot + 1:] further_identifiers = _identifier_parse(next_identifier, quote_char) first_identifier = first_identifier.replace(quote_char, quote_char * 2) - first_identifier = ''.join((quote_char, first_identifier, quote_char)) + first_identifier = f"{quote_char}{first_identifier}{quote_char}" further_identifiers.insert(0, first_identifier) return further_identifiers @@ -124,14 +123,14 @@ def _identifier_parse(identifier, quote_char): def pg_quote_identifier(identifier, id_type): identifier_fragments = _identifier_parse(identifier, quote_char='"') if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]: - raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type])) + raise SQLParseError(f'PostgreSQL does not support {id_type} with more than {_PG_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots') return '.'.join(identifier_fragments) def mysql_quote_identifier(identifier, id_type): identifier_fragments = _identifier_parse(identifier, quote_char='`') if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]: - raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type])) + raise SQLParseError(f'MySQL does not support {id_type} with more than {_MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots') special_cased_fragments = [] for fragment in identifier_fragments: @@ -186,5 +185,4 @@ def check_input(module, *args): dangerous_elements.append(elem) if dangerous_elements: - module.fail_json(msg="Passed input '%s' is " - "potentially dangerous" % ', '.join(dangerous_elements)) + module.fail_json(msg=f"Passed input '{', '.join(dangerous_elements)}' is potentially dangerous") diff --git a/plugins/module_utils/datetime.py b/plugins/module_utils/datetime.py new file mode 100644 index 0000000000..f11375f0eb --- /dev/null +++ b/plugins/module_utils/datetime.py @@ -0,0 +1,30 @@ +# +# Copyright (c) 2023 Felix Fontein +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations + +import datetime as _datetime +import sys + + +_USE_TIMEZONE = sys.version_info >= (3, 6) + + +def ensure_timezone_info(value): + if not _USE_TIMEZONE or value.tzinfo is not None: + return value + return value.astimezone(_datetime.timezone.utc) + + +def fromtimestamp(value): + if _USE_TIMEZONE: + return _datetime.fromtimestamp(value, tz=_datetime.timezone.utc) + return _datetime.utcfromtimestamp(value) + + +def now(): + if _USE_TIMEZONE: + return _datetime.datetime.now(tz=_datetime.timezone.utc) + return _datetime.datetime.utcnow() diff --git a/plugins/module_utils/deps.py b/plugins/module_utils/deps.py new file mode 100644 index 0000000000..a24cd63838 --- /dev/null +++ b/plugins/module_utils/deps.py @@ -0,0 +1,100 @@ +# (c) 2022, Alexei Znamensky +# Copyright (c) 2022, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations + + +import traceback +from contextlib import contextmanager + +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.basic import missing_required_lib + + +_deps = dict() + + +class _Dependency(object): + _states = ["pending", "failure", "success"] + + def __init__(self, name, reason=None, url=None, msg=None): + self.name = name + self.reason = reason + self.url = url + self.msg = msg + + self.state = 0 + self.trace = None + self.exc = None + + def succeed(self): + self.state = 2 + + def fail(self, exc, trace): + self.state = 1 + self.exc = exc + self.trace = trace + + @property + def message(self): + if self.msg: + return to_native(self.msg) + else: + return missing_required_lib(self.name, reason=self.reason, url=self.url) + + @property + def failed(self): + return self.state == 1 + + def validate(self, module): + if self.failed: + module.fail_json(msg=self.message, exception=self.trace) + + def __str__(self): + return f"" + + +@contextmanager +def declare(name, *args, **kwargs): + dep = _Dependency(name, *args, **kwargs) + try: + yield dep + except Exception as e: + dep.fail(e, traceback.format_exc()) + else: + dep.succeed() + finally: + _deps[name] = dep + + +def _select_names(spec): + dep_names = sorted(_deps) + + if spec: + if spec.startswith("-"): + spec_split = spec[1:].split(":") + for d in spec_split: + dep_names.remove(d) + else: + spec_split = spec.split(":") + dep_names = [] + for d in spec_split: + _deps[d] # ensure it exists + dep_names.append(d) + + return dep_names + + +def validate(module, spec=None): + for dep in _select_names(spec): + _deps[dep].validate(module) + + +def failed(spec=None): + return any(_deps[d].failed for d in _select_names(spec)) + + +def clear(): + _deps.clear() diff --git a/plugins/module_utils/dimensiondata.py b/plugins/module_utils/dimensiondata.py index bcb02e8476..a0430b445e 100644 --- a/plugins/module_utils/dimensiondata.py +++ b/plugins/module_utils/dimensiondata.py @@ -1,8 +1,8 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2016 Dimension Data # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # # Authors: # - Aimon Bustardo @@ -11,22 +11,22 @@ # # Common functionality to be used by various module components -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations +import configparser import os import re import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six.moves import configparser +# (TODO: remove AnsibleModule from next line!) +from ansible.module_utils.basic import AnsibleModule, missing_required_lib # noqa: F401, pylint: disable=unused-import from os.path import expanduser from uuid import UUID LIBCLOUD_IMP_ERR = None try: - from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus - from libcloud.compute.base import Node, NodeLocation + from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus # noqa: F401, pylint: disable=unused-import + from libcloud.compute.base import Node, NodeLocation # noqa: F401, pylint: disable=unused-import from libcloud.compute.providers import get_driver from libcloud.compute.types import Provider @@ -37,7 +37,7 @@ except ImportError: LIBCLOUD_IMP_ERR = traceback.format_exc() HAS_LIBCLOUD = False -# MCP 2.x version patten for location (datacenter) names. +# MCP 2.x version pattern for location (datacenter) names. # # Note that this is not a totally reliable way of determining MCP version. # Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties. @@ -73,7 +73,7 @@ class DimensionDataModule(object): # Region and location are common to all Dimension Data modules. region = self.module.params['region'] - self.region = 'dd-{0}'.format(region) + self.region = f'dd-{region}' self.location = self.module.params['location'] libcloud.security.VERIFY_SSL_CERT = self.module.params['validate_certs'] @@ -140,7 +140,7 @@ class DimensionDataModule(object): if not user_id or not key: home = expanduser('~') config = configparser.RawConfigParser() - config.read("%s/.dimensiondata" % home) + config.read(f"{home}/.dimensiondata") try: user_id = config.get("dimensiondatacloud", "MCP_USER") @@ -190,7 +190,7 @@ class DimensionDataModule(object): if network_domain: return network_domain - raise UnknownNetworkError("Network '%s' could not be found" % locator) + raise UnknownNetworkError(f"Network '{locator}' could not be found") def get_vlan(self, locator, location, network_domain): """ @@ -212,7 +212,7 @@ class DimensionDataModule(object): if vlan: return vlan - raise UnknownVLANError("VLAN '%s' could not be found" % locator) + raise UnknownVLANError(f"VLAN '{locator}' could not be found") @staticmethod def argument_spec(**additional_argument_spec): diff --git a/plugins/module_utils/django.py b/plugins/module_utils/django.py new file mode 100644 index 0000000000..4c052a1d6e --- /dev/null +++ b/plugins/module_utils/django.py @@ -0,0 +1,150 @@ +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils import cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + +django_std_args = dict( + # environmental options + venv=dict(type="path"), + # default options of django-admin + settings=dict(type="str", required=True), + pythonpath=dict(type="path"), + traceback=dict(type="bool"), + verbosity=dict(type="int", choices=[0, 1, 2, 3]), + skip_checks=dict(type="bool"), +) +_database_dash = dict( + database=dict(type="str", default="default"), +) +_data = dict( + excludes=dict(type="list", elements="str"), + format=dict(type="str", default="json", choices=["xml", "json", "jsonl", "yaml"]), +) +_pks = dict( + primary_keys=dict(type="list", elements="str"), +) + +_django_std_arg_fmts = dict( + all=cmd_runner_fmt.as_bool("--all"), + app=cmd_runner_fmt.as_opt_val("--app"), + apps=cmd_runner_fmt.as_list(), + apps_models=cmd_runner_fmt.as_list(), + check=cmd_runner_fmt.as_bool("--check"), + command=cmd_runner_fmt.as_list(), + database_dash=cmd_runner_fmt.as_opt_eq_val("--database"), + database_stacked_dash=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--database"), + deploy=cmd_runner_fmt.as_bool("--deploy"), + dry_run=cmd_runner_fmt.as_bool("--dry-run"), + excludes=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--exclude"), + fail_level=cmd_runner_fmt.as_opt_val("--fail-level"), + fixture=cmd_runner_fmt.as_opt_val("--output"), + fixtures=cmd_runner_fmt.as_list(), + format=cmd_runner_fmt.as_opt_val("--format"), + ignore_non_existent=cmd_runner_fmt.as_bool("--ignorenonexistent"), + indent=cmd_runner_fmt.as_opt_val("--indent"), + natural_foreign=cmd_runner_fmt.as_bool("--natural-foreign"), + natural_primary=cmd_runner_fmt.as_bool("--natural-primary"), + no_color=cmd_runner_fmt.as_fixed("--no-color"), + noinput=cmd_runner_fmt.as_fixed("--noinput"), + primary_keys=lambda v: ["--pks", ",".join(v)], + pythonpath=cmd_runner_fmt.as_opt_eq_val("--pythonpath"), + settings=cmd_runner_fmt.as_opt_eq_val("--settings"), + skip_checks=cmd_runner_fmt.as_bool("--skip-checks"), + tags=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--tag"), + traceback=cmd_runner_fmt.as_bool("--traceback"), + verbosity=cmd_runner_fmt.as_opt_val("--verbosity"), + version=cmd_runner_fmt.as_fixed("--version"), +) + +# keys can be used in _django_args +_args_menu = dict( + std=(django_std_args, _django_std_arg_fmts), + database=(_database_dash, {"database": _django_std_arg_fmts["database_dash"]}), # deprecate, remove in 13.0.0 + noinput=({}, {"noinput": cmd_runner_fmt.as_fixed("--noinput")}), # deprecate, remove in 13.0.0 + dry_run=({}, {"dry_run": cmd_runner_fmt.as_bool("--dry-run")}), # deprecate, remove in 13.0.0 + check=({}, {"check": cmd_runner_fmt.as_bool("--check")}), # deprecate, remove in 13.0.0 + database_dash=(_database_dash, {}), + data=(_data, {}), +) + + +class _DjangoRunner(PythonRunner): + def __init__(self, module, arg_formats=None, **kwargs): + arg_fmts = dict(arg_formats) if arg_formats else {} + arg_fmts.update(_django_std_arg_fmts) + + super(_DjangoRunner, self).__init__(module, ["-m", "django"], arg_formats=arg_fmts, **kwargs) + + def __call__(self, output_process=None, check_mode_skip=False, check_mode_return=None, **kwargs): + args_order = ( + ("command", "no_color", "settings", "pythonpath", "traceback", "verbosity", "skip_checks") + self._prepare_args_order(self.default_args_order) + ) + return super(_DjangoRunner, self).__call__(args_order, output_process, check_mode_skip=check_mode_skip, check_mode_return=check_mode_return, **kwargs) + + def bare_context(self, *args, **kwargs): + return super(_DjangoRunner, self).__call__(*args, **kwargs) + + +class DjangoModuleHelper(ModuleHelper): + module = {} + django_admin_cmd = None + arg_formats = {} + django_admin_arg_order = () + _django_args = [] + _check_mode_arg = "" + + def __init__(self): + self.module["argument_spec"], self.arg_formats = self._build_args(self.module.get("argument_spec", {}), + self.arg_formats, + *(["std"] + self._django_args)) + super(DjangoModuleHelper, self).__init__(self.module) + if self.django_admin_cmd is not None: + self.vars.command = self.django_admin_cmd + + @staticmethod + def _build_args(arg_spec, arg_format, *names): + res_arg_spec = {} + res_arg_fmts = {} + for name in names: + args, fmts = _args_menu[name] + res_arg_spec = dict_merge(res_arg_spec, args) + res_arg_fmts = dict_merge(res_arg_fmts, fmts) + res_arg_spec = dict_merge(res_arg_spec, arg_spec) + res_arg_fmts = dict_merge(res_arg_fmts, arg_format) + + return res_arg_spec, res_arg_fmts + + def __run__(self): + runner = _DjangoRunner(self.module, + default_args_order=self.django_admin_arg_order, + arg_formats=self.arg_formats, + venv=self.vars.venv, + check_rc=True) + + run_params = self.vars.as_dict() + if self._check_mode_arg: + run_params.update({self._check_mode_arg: self.check_mode}) + + rc, out, err = runner.bare_context("version").run() + self.vars.version = out.strip() + + with runner() as ctx: + results = ctx.run(**run_params) + self.vars.stdout = ctx.results_out + self.vars.stderr = ctx.results_err + self.vars.cmd = ctx.cmd + self.vars.set("run_info", ctx.run_info, verbosity=3) + + return results + + @classmethod + def execute(cls): + cls().run() diff --git a/plugins/module_utils/gandi_livedns_api.py b/plugins/module_utils/gandi_livedns_api.py index 2c785353ad..135fc6188c 100644 --- a/plugins/module_utils/gandi_livedns_api.py +++ b/plugins/module_utils/gandi_livedns_api.py @@ -1,13 +1,12 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2019 Gregory Thiemonge -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2019 Gregory Thiemonge +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import json -from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.urls import fetch_url @@ -32,6 +31,7 @@ class GandiLiveDNSAPI(object): def __init__(self, module): self.module = module self.api_key = module.params['api_key'] + self.personal_access_token = module.params['personal_access_token'] def _build_error_message(self, module, info): s = '' @@ -42,21 +42,26 @@ class GandiLiveDNSAPI(object): error = errors[0] name = error.get('name') if name: - s += '{0} :'.format(name) + s += f'{name} :' description = error.get('description') if description: s += description return s def _gandi_api_call(self, api_call, method='GET', payload=None, error_on_404=True): - headers = {'Authorization': 'Apikey {0}'.format(self.api_key), + authorization_header = ( + f'Bearer {self.personal_access_token}' + if self.personal_access_token + else f'Apikey {self.api_key}' + ) + headers = {'Authorization': authorization_header, 'Content-Type': 'application/json'} data = None if payload: try: data = json.dumps(payload) except Exception as e: - self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e)) + self.module.fail_json(msg=f"Failed to encode payload as JSON: {e} ") resp, info = fetch_url(self.module, self.api_endpoint + api_call, @@ -68,7 +73,7 @@ class GandiLiveDNSAPI(object): if info['status'] >= 400 and (info['status'] != 404 or error_on_404): err_s = self.error_strings.get(info['status'], '') - error_msg = "API Error {0}: {1}".format(err_s, self._build_error_message(self.module, info)) + error_msg = f"API Error {err_s}: {self._build_error_message(self.module, info)}" result = None try: @@ -80,7 +85,7 @@ class GandiLiveDNSAPI(object): try: result = json.loads(to_text(content, errors='surrogate_or_strict')) except (getattr(json, 'JSONDecodeError', ValueError)) as e: - error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content) + error_msg += f"; Failed to parse API response with error {e}: {content}" if error_msg: self.module.fail_json(msg=error_msg) @@ -109,11 +114,11 @@ class GandiLiveDNSAPI(object): return [self.build_result(r, domain) for r in results] def get_records(self, record, type, domain): - url = '/domains/%s/records' % (domain) + url = f'/domains/{domain}/records' if record: - url += '/%s' % (record) + url += f'/{record}' if type: - url += '/%s' % (type) + url += f'/{type}' records, status = self._gandi_api_call(url, error_on_404=False) @@ -132,7 +137,7 @@ class GandiLiveDNSAPI(object): return records def create_record(self, record, type, values, ttl, domain): - url = '/domains/%s/records' % (domain) + url = f'/domains/{domain}/records' new_record = { 'rrset_name': record, 'rrset_type': type, @@ -147,7 +152,7 @@ class GandiLiveDNSAPI(object): return None def update_record(self, record, type, values, ttl, domain): - url = '/domains/%s/records/%s/%s' % (domain, record, type) + url = f'/domains/{domain}/records/{record}/{type}' new_record = { 'rrset_values': values, 'rrset_ttl': ttl, @@ -156,7 +161,7 @@ class GandiLiveDNSAPI(object): return record def delete_record(self, record, type, domain): - url = '/domains/%s/records/%s/%s' % (domain, record, type) + url = f'/domains/{domain}/records/{record}/{type}' self._gandi_api_call(url, method='DELETE') diff --git a/plugins/module_utils/gconftool2.py b/plugins/module_utils/gconftool2.py new file mode 100644 index 0000000000..7d11078edf --- /dev/null +++ b/plugins/module_utils/gconftool2.py @@ -0,0 +1,31 @@ +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +_state_map = { + "present": "--set", + "absent": "--unset", + "get": "--get", +} + + +def gconftool2_runner(module, **kwargs): + return CmdRunner( + module, + command='gconftool-2', + arg_formats=dict( + state=cmd_runner_fmt.as_map(_state_map), + key=cmd_runner_fmt.as_list(), + value_type=cmd_runner_fmt.as_opt_val("--type"), + value=cmd_runner_fmt.as_list(), + direct=cmd_runner_fmt.as_bool("--direct"), + config_source=cmd_runner_fmt.as_opt_val("--config-source"), + version=cmd_runner_fmt.as_fixed("--version"), + ), + **kwargs + ) diff --git a/plugins/module_utils/gio_mime.py b/plugins/module_utils/gio_mime.py new file mode 100644 index 0000000000..15122b1ef1 --- /dev/null +++ b/plugins/module_utils/gio_mime.py @@ -0,0 +1,32 @@ +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def gio_mime_runner(module, **kwargs): + return CmdRunner( + module, + command=['gio'], + arg_formats=dict( + mime=cmd_runner_fmt.as_fixed('mime'), + mime_type=cmd_runner_fmt.as_list(), + handler=cmd_runner_fmt.as_list(), + version=cmd_runner_fmt.as_fixed('--version'), + ), + **kwargs + ) + + +def gio_mime_get(runner, mime_type): + def process(rc, out, err): + if err.startswith("No default applications for"): + return None + out = out.splitlines()[0] + return out.split()[-1] + + with runner("mime mime_type", output_process=process) as ctx: + return ctx.run(mime_type=mime_type) diff --git a/plugins/module_utils/gitlab.py b/plugins/module_utils/gitlab.py index 5ddafa2b42..7ad11ab5a2 100644 --- a/plugins/module_utils/gitlab.py +++ b/plugins/module_utils/gitlab.py @@ -1,106 +1,177 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2018, Marcus Watkins -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Marcus Watkins +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -from distutils.version import StrictVersion +from __future__ import annotations from ansible.module_utils.basic import missing_required_lib -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.common.text.converters import to_native -try: - from urllib import quote_plus # Python 2.X -except ImportError: - from urllib.parse import quote_plus # Python 3+ +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +from urllib.parse import urljoin import traceback + +def _determine_list_all_kwargs(version): + gitlab_version = LooseVersion(version) + if gitlab_version >= LooseVersion('4.0.0'): + # 4.0.0 removed 'as_list' + return {'iterator': True, 'per_page': 100} + elif gitlab_version >= LooseVersion('3.7.0'): + # 3.7.0 added 'get_all' + return {'as_list': False, 'get_all': True, 'per_page': 100} + else: + return {'as_list': False, 'all': True, 'per_page': 100} + + GITLAB_IMP_ERR = None try: import gitlab + import requests HAS_GITLAB_PACKAGE = True + list_all_kwargs = _determine_list_all_kwargs(gitlab.__version__) except Exception: + gitlab = None GITLAB_IMP_ERR = traceback.format_exc() HAS_GITLAB_PACKAGE = False + list_all_kwargs = {} -def request(module, api_url, project, path, access_token, private_token, rawdata='', method='GET'): - url = "%s/v4/projects/%s%s" % (api_url, quote_plus(project), path) - headers = {} - if access_token: - headers['Authorization'] = "Bearer %s" % access_token - else: - headers['Private-Token'] = private_token - - headers['Accept'] = "application/json" - headers['Content-Type'] = "application/json" - - response, info = fetch_url(module=module, url=url, headers=headers, data=rawdata, method=method) - status = info['status'] - content = "" - if response: - content = response.read() - if status == 204: - return True, content - elif status == 200 or status == 201: - return True, json.loads(content) - else: - return False, str(status) + ": " + content +def auth_argument_spec(spec=None): + arg_spec = (dict( + ca_path=dict(type='str'), + api_token=dict(type='str', no_log=True), + api_oauth_token=dict(type='str', no_log=True), + api_job_token=dict(type='str', no_log=True), + )) + if spec: + arg_spec.update(spec) + return arg_spec -def findProject(gitlab_instance, identifier): +def find_project(gitlab_instance, identifier): try: project = gitlab_instance.projects.get(identifier) except Exception as e: current_user = gitlab_instance.user try: - project = gitlab_instance.projects.get(current_user.username + '/' + identifier) + project = gitlab_instance.projects.get(f"{current_user.username}/{identifier}") except Exception as e: return None return project -def findGroup(gitlab_instance, identifier): +def find_group(gitlab_instance, identifier): try: - project = gitlab_instance.groups.get(identifier) + group = gitlab_instance.groups.get(identifier) except Exception as e: return None - return project + return group -def gitlabAuthentication(module): +def ensure_gitlab_package(module, min_version=None): + if not HAS_GITLAB_PACKAGE: + module.fail_json( + msg=missing_required_lib("python-gitlab", url='https://python-gitlab.readthedocs.io/en/stable/'), + exception=GITLAB_IMP_ERR + ) + gitlab_version = gitlab.__version__ + if min_version is not None and LooseVersion(gitlab_version) < LooseVersion(min_version): + module.fail_json(msg=( + f"This module requires python-gitlab Python module >= {min_version} (installed version: " + f"{gitlab_version}). Please upgrade python-gitlab to version {min_version} or above." + )) + + +def gitlab_authentication(module, min_version=None): + ensure_gitlab_package(module, min_version=min_version) + gitlab_url = module.params['api_url'] validate_certs = module.params['validate_certs'] + ca_path = module.params['ca_path'] gitlab_user = module.params['api_username'] gitlab_password = module.params['api_password'] gitlab_token = module.params['api_token'] + gitlab_oauth_token = module.params['api_oauth_token'] + gitlab_job_token = module.params['api_job_token'] - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + verify = ca_path if validate_certs and ca_path else validate_certs try: - # python-gitlab library remove support for username/password authentication since 1.13.0 - # Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0 - # This condition allow to still support older version of the python-gitlab library - if StrictVersion(gitlab.__version__) < StrictVersion("1.13.0"): - gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password, - private_token=gitlab_token, api_version=4) - else: - gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token, api_version=4) + # We can create an oauth_token using a username and password + # https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow + if gitlab_user: + data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password} + resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=verify) + resp_data = resp.json() + gitlab_oauth_token = resp_data["access_token"] + gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=verify, private_token=gitlab_token, + oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4) gitlab_instance.auth() except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e: - module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e)) + module.fail_json(msg=f"Failed to connect to GitLab server: {e}") except (gitlab.exceptions.GitlabHttpError) as e: - module.fail_json(msg="Failed to connect to GitLab server: %s. \ - GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e)) + module.fail_json(msg=( + f"Failed to connect to GitLab server: {e}. GitLab remove Session API now " + "that private tokens are removed from user API endpoints since version 10.2." + )) return gitlab_instance + + +def filter_returned_variables(gitlab_variables): + # pop properties we don't know + existing_variables = [dict(x.attributes) for x in gitlab_variables] + KNOWN = ['key', 'value', 'description', 'masked', 'hidden', 'protected', 'variable_type', 'environment_scope', 'raw'] + for item in existing_variables: + for key in list(item.keys()): + if key not in KNOWN: + item.pop(key) + return existing_variables + + +def vars_to_variables(vars, module): + # transform old vars to new variables structure + variables = list() + for item, value in vars.items(): + if isinstance(value, (str, int, float)): + variables.append( + { + "name": item, + "value": str(value), + "description": None, + "masked": False, + "protected": False, + "hidden": False, + "raw": False, + "variable_type": "env_var", + } + ) + + elif isinstance(value, dict): + new_item = { + "name": item, + "value": value.get('value'), + "description": value.get('description'), + "masked": value.get('masked'), + "hidden": value.get('hidden'), + "protected": value.get('protected'), + "raw": value.get('raw'), + "variable_type": value.get('variable_type'), + } + + if value.get('environment_scope'): + new_item['environment_scope'] = value.get('environment_scope') + + variables.append(new_item) + + else: + module.fail_json(msg="value must be of type string, integer, float or dict") + + return variables diff --git a/plugins/module_utils/heroku.py b/plugins/module_utils/heroku.py index 70b144c077..149e11162e 100644 --- a/plugins/module_utils/heroku.py +++ b/plugins/module_utils/heroku.py @@ -1,9 +1,8 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2018, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import traceback diff --git a/plugins/module_utils/homebrew.py b/plugins/module_utils/homebrew.py new file mode 100644 index 0000000000..88e92461c3 --- /dev/null +++ b/plugins/module_utils/homebrew.py @@ -0,0 +1,135 @@ +# Copyright (c) Ansible project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations + + +import os +import re + + +def _create_regex_group_complement(s): + lines = (line.strip() for line in s.split("\n") if line.strip()) + chars = [_f for _f in (line.split("#")[0].strip() for line in lines) if _f] + group = rf"[^{''.join(chars)}]" + return re.compile(group) + + +class HomebrewValidate(object): + # class regexes ------------------------------------------------ {{{ + VALID_PATH_CHARS = rf""" + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + : # colons + {os.path.sep} # the OS-specific path separator + . # dots + \- # dashes + """ + + VALID_BREW_PATH_CHARS = rf""" + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + {os.path.sep} # the OS-specific path separator + . # dots + \- # dashes + """ + + VALID_PACKAGE_CHARS = r""" + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + . # dots + / # slash (for taps) + \+ # plusses + \- # dashes + : # colons (for URLs) + @ # at-sign + """ + + INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS) + INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS) + INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS) + # /class regexes ----------------------------------------------- }}} + + # class validations -------------------------------------------- {{{ + @classmethod + def valid_path(cls, path): + """ + `path` must be one of: + - list of paths + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - colons + - os.path.sep + """ + + if isinstance(path, str): + return not cls.INVALID_PATH_REGEX.search(path) + + try: + iter(path) + except TypeError: + return False + else: + paths = path + return all(cls.valid_brew_path(path_) for path_ in paths) + + @classmethod + def valid_brew_path(cls, brew_path): + """ + `brew_path` must be one of: + - None + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - os.path.sep + """ + + if brew_path is None: + return True + + return isinstance( + brew_path, str + ) and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) + + @classmethod + def valid_package(cls, package): + """A valid package is either None or alphanumeric.""" + + if package is None: + return True + + return isinstance( + package, str + ) and not cls.INVALID_PACKAGE_REGEX.search(package) + + +def parse_brew_path(module): + # type: (...) -> str + """Attempt to find the Homebrew executable path. + + Requires: + - module has a `path` parameter + - path is a valid path string for the target OS. Otherwise, module.fail_json() + is called with msg="Invalid_path: ". + """ + path = module.params["path"] + if not HomebrewValidate.valid_path(path): + module.fail_json(msg=f"Invalid path: {path}") + + if isinstance(path, str): + paths = path.split(":") + elif isinstance(path, list): + paths = path + else: + module.fail_json(msg=f"Invalid path: {path}") + + brew_path = module.get_bin_path("brew", required=True, opt_dirs=paths) + if not HomebrewValidate.valid_brew_path(brew_path): + module.fail_json(msg=f"Invalid brew path: {brew_path}") + + return brew_path diff --git a/plugins/module_utils/hwc_utils.py b/plugins/module_utils/hwc_utils.py index 489e90dd3c..dee53cd787 100644 --- a/plugins/module_utils/hwc_utils.py +++ b/plugins/module_utils/hwc_utils.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c), Google Inc, 2017 -# Simplified BSD License (see licenses/simplified_bsd.txt or -# https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import time @@ -32,7 +30,7 @@ class HwcModuleException(Exception): self._message = message def __str__(self): - return "[HwcClientException] message=%s" % self._message + return f"[HwcClientException] message={self._message}" class HwcClientException(Exception): @@ -43,9 +41,8 @@ class HwcClientException(Exception): self._message = message def __str__(self): - msg = " code=%s," % str(self._code) if self._code != 0 else "" - return "[HwcClientException]%s message=%s" % ( - msg, self._message) + msg = f" code={self._code!s}," if self._code != 0 else "" + return f"[HwcClientException]{msg} message={self._message}" class HwcClientException404(HwcClientException): @@ -53,7 +50,7 @@ class HwcClientException404(HwcClientException): super(HwcClientException404, self).__init__(404, message) def __str__(self): - return "[HwcClientException404] message=%s" % self._message + return f"[HwcClientException404] message={self._message}" def session_method_wrapper(f): @@ -63,7 +60,7 @@ def session_method_wrapper(f): r = f(self, url, *args, **kwargs) except Exception as ex: raise HwcClientException( - 0, "Sending request failed, error=%s" % ex) + 0, f"Sending request failed, error={ex}") result = None if r.content: @@ -71,7 +68,7 @@ def session_method_wrapper(f): result = r.json() except Exception as ex: raise HwcClientException( - 0, "Parsing response to json failed, error: %s" % ex) + 0, f"Parsing response to json failed, error: {ex}") code = r.status_code if code not in [200, 201, 202, 203, 204, 205, 206, 207, 208, 226]: @@ -100,7 +97,7 @@ class _ServiceClient(object): self._client = client self._endpoint = endpoint self._default_header = { - 'User-Agent': "Huawei-Ansible-MM-%s" % product, + 'User-Agent': f"Huawei-Ansible-MM-{product}", 'Accept': 'application/json', } @@ -188,7 +185,7 @@ class Config(object): raise_exc=False) def _get_service_endpoint(self, client, service_type, region): - k = "%s.%s" % (service_type, region if region else "") + k = f"{service_type}.{region if region else ''}" if k in self._endpoints: return self._endpoints.get(k) @@ -199,11 +196,11 @@ class Config(object): region_name=region, interface="public") except Exception as ex: raise HwcClientException( - 0, "Getting endpoint failed, error=%s" % ex) + 0, f"Getting endpoint failed, error={ex}") if url == "": raise HwcClientException( - 0, "Can not find the enpoint for %s" % service_type) + 0, f"Cannot find the endpoint for {service_type}") if url[-1] != "/": url += "/" @@ -342,7 +339,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3): if not_found_times > 10: raise HwcModuleException( - "not found the object for %d times" % not_found_times) + f"not found the object for {not_found_times} times") else: not_found_times = 0 @@ -351,7 +348,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3): if pending and status not in pending: raise HwcModuleException( - "unexpect status(%s) occured" % status) + f"unexpected status({status}) occurred") if not is_last_time: wait *= 2 @@ -362,7 +359,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3): time.sleep(wait) - raise HwcModuleException("asycn wait timeout after %d seconds" % timeout) + raise HwcModuleException(f"async wait timeout after {timeout} seconds") def navigate_value(data, index, array_index=None): @@ -381,7 +378,7 @@ def navigate_value(data, index, array_index=None): i = index[n] if i not in d: raise HwcModuleException( - "navigate value failed: key(%s) is not exist in dict" % i) + f"navigate value failed: key({i}) is not exist in dict") d = d[i] if not array_index: diff --git a/plugins/module_utils/ibm_sa_utils.py b/plugins/module_utils/ibm_sa_utils.py index 4f70f844cd..0c8f3d274d 100644 --- a/plugins/module_utils/ibm_sa_utils.py +++ b/plugins/module_utils/ibm_sa_utils.py @@ -1,11 +1,10 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import traceback diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 0ede2dc0ba..ad07f27b1b 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -1,45 +1,23 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, Eike Frost -# -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# BSD 2-Clause license (see LICENSES/BSD-2-Clause.txt) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type import json import traceback +import copy +from urllib.parse import urlencode, quote +from urllib.error import HTTPError from ansible.module_utils.urls import open_url -from ansible.module_utils.six.moves.urllib.parse import urlencode, quote -from ansible.module_utils.six.moves.urllib.error import HTTPError from ansible.module_utils.common.text.converters import to_native, to_text +URL_REALM_INFO = "{url}/realms/{realm}" URL_REALMS = "{url}/admin/realms" URL_REALM = "{url}/admin/realms/{realm}" +URL_REALM_KEYS_METADATA = "{url}/admin/realms/{realm}/keys" URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token" URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}" @@ -49,23 +27,62 @@ URL_CLIENT_ROLES = "{url}/admin/realms/{realm}/clients/{id}/roles" URL_CLIENT_ROLE = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}" URL_CLIENT_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}/composites" +URL_CLIENT_ROLE_SCOPE_CLIENTS = "{url}/admin/realms/{realm}/clients/{id}/scope-mappings/clients/{scopeid}" +URL_CLIENT_ROLE_SCOPE_REALM = "{url}/admin/realms/{realm}/clients/{id}/scope-mappings/realm" + URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles" URL_REALM_ROLE = "{url}/admin/realms/{realm}/roles/{name}" +URL_REALM_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm" +URL_REALM_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm/available" +URL_REALM_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm/composite" URL_REALM_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/roles/{name}/composites" +URL_ROLES_BY_ID = "{url}/admin/realms/{realm}/roles-by-id/{id}" +URL_ROLES_BY_ID_COMPOSITES_CLIENTS = "{url}/admin/realms/{realm}/roles-by-id/{id}/composites/clients/{cid}" +URL_ROLES_BY_ID_COMPOSITES = "{url}/admin/realms/{realm}/roles-by-id/{id}/composites" + URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}" URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates" URL_GROUPS = "{url}/admin/realms/{realm}/groups" URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}" +URL_GROUP_CHILDREN = "{url}/admin/realms/{realm}/groups/{groupid}/children" URL_CLIENTSCOPES = "{url}/admin/realms/{realm}/client-scopes" URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}" URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models" URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}" -URL_CLIENT_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}" -URL_CLIENT_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available" -URL_CLIENT_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite" +URL_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-default-client-scopes" +URL_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-default-client-scopes/{id}" +URL_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-optional-client-scopes" +URL_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-optional-client-scopes/{id}" + +URL_CLIENT_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes" +URL_CLIENT_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes/{id}" +URL_CLIENT_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes" +URL_CLIENT_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes/{id}" + +URL_CLIENT_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}" +URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available" +URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite" + +URL_USERS = "{url}/admin/realms/{realm}/users" +URL_USER = "{url}/admin/realms/{realm}/users/{id}" +URL_USER_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings" +URL_USER_REALM_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm" +URL_USER_CLIENTS_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients" +URL_USER_CLIENT_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client_id}" +URL_USER_GROUPS = "{url}/admin/realms/{realm}/users/{id}/groups" +URL_USER_GROUP = "{url}/admin/realms/{realm}/users/{id}/groups/{group_id}" + +URL_CLIENT_SERVICE_ACCOUNT_USER = "{url}/admin/realms/{realm}/clients/{id}/service-account-user" +URL_CLIENT_USER_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}" +URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/available" +URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/composite" + +URL_REALM_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{group}/role-mappings/realm" + +URL_CLIENTSECRET = "{url}/admin/realms/{realm}/clients/{id}/client-secret" URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows" URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}" @@ -77,15 +94,36 @@ URL_AUTHENTICATION_EXECUTION_CONFIG = "{url}/admin/realms/{realm}/authentication URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/raise-priority" URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/lower-priority" URL_AUTHENTICATION_CONFIG = "{url}/admin/realms/{realm}/authentication/config/{id}" +URL_AUTHENTICATION_REGISTER_REQUIRED_ACTION = "{url}/admin/realms/{realm}/authentication/register-required-action" +URL_AUTHENTICATION_REQUIRED_ACTIONS = "{url}/admin/realms/{realm}/authentication/required-actions" +URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS = "{url}/admin/realms/{realm}/authentication/required-actions/{alias}" URL_IDENTITY_PROVIDERS = "{url}/admin/realms/{realm}/identity-provider/instances" URL_IDENTITY_PROVIDER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}" URL_IDENTITY_PROVIDER_MAPPERS = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers" URL_IDENTITY_PROVIDER_MAPPER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers/{id}" +URL_IDENTITY_PROVIDER_IMPORT = "{url}/admin/realms/{realm}/identity-provider/import-config" URL_COMPONENTS = "{url}/admin/realms/{realm}/components" URL_COMPONENT = "{url}/admin/realms/{realm}/components/{id}" +URL_AUTHZ_AUTHORIZATION_SCOPE = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope/{id}" +URL_AUTHZ_AUTHORIZATION_SCOPES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope" + +# This URL is used for: +# - Querying client authorization permissions +# - Removing client authorization permissions +URL_AUTHZ_POLICIES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy" +URL_AUTHZ_POLICY = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy/{id}" + +URL_AUTHZ_PERMISSION = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/permission/{permission_type}/{id}" +URL_AUTHZ_PERMISSIONS = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/permission/{permission_type}" + +URL_AUTHZ_RESOURCES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/resource" + +URL_AUTHZ_CUSTOM_POLICY = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy/{policy_type}" +URL_AUTHZ_CUSTOM_POLICIES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy" + def keycloak_argument_spec(): """ @@ -101,7 +139,10 @@ def keycloak_argument_spec(): auth_username=dict(type='str', aliases=['username']), auth_password=dict(type='str', aliases=['password'], no_log=True), validate_certs=dict(type='bool', default=True), + connection_timeout=dict(type='int', default=10), token=dict(type='str', no_log=True), + refresh_token=dict(type='str', no_log=True), + http_agent=dict(type='str', default='Ansible'), ) @@ -110,57 +151,142 @@ def camel(words): class KeycloakError(Exception): - pass + def __init__(self, msg, authError=None): + self.msg = msg + self.authError = authError + + def __str__(self): + return str(self.msg) + + +def _token_request(module_params, payload): + """ Obtains connection header with token for the authentication, + using the provided auth_username/auth_password + :param module_params: parameters of the module + :param payload: + type: + dict + description: + Authentication request payload. Must contain at least + 'grant_type' and 'client_id', optionally 'client_secret', + along with parameters based on 'grant_type'; e.g., + 'username'/'password' for type 'password', + 'refresh_token' for type 'refresh_token'. + :return: access token + """ + base_url = module_params.get('auth_keycloak_url') + if not base_url.lower().startswith(('http', 'https')): + raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url) + auth_realm = module_params.get('auth_realm') + auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm) + http_agent = module_params.get('http_agent') + validate_certs = module_params.get('validate_certs') + connection_timeout = module_params.get('connection_timeout') + + try: + r = json.loads(to_native(open_url(auth_url, method='POST', + validate_certs=validate_certs, http_agent=http_agent, timeout=connection_timeout, + data=urlencode(payload)).read())) + + return r['access_token'] + except ValueError as e: + raise KeycloakError( + 'API returned invalid JSON when trying to obtain access token from %s: %s' + % (auth_url, str(e))) + except KeyError: + raise KeycloakError( + 'API did not include access_token field in response from %s' % auth_url) + except Exception as e: + raise KeycloakError('Could not obtain access token from %s: %s' + % (auth_url, str(e)), authError=e) + + +def _request_token_using_credentials(module_params): + """ Obtains connection header with token for the authentication, + using the provided auth_username/auth_password + :param module_params: parameters of the module. Must include 'auth_username' and 'auth_password'. + :return: connection header + """ + client_id = module_params.get('auth_client_id') + auth_username = module_params.get('auth_username') + auth_password = module_params.get('auth_password') + client_secret = module_params.get('auth_client_secret') + + temp_payload = { + 'grant_type': 'password', + 'client_id': client_id, + 'client_secret': client_secret, + 'username': auth_username, + 'password': auth_password, + } + # Remove empty items, for instance missing client_secret + payload = {k: v for k, v in temp_payload.items() if v is not None} + + return _token_request(module_params, payload) + + +def _request_token_using_refresh_token(module_params): + """ Obtains connection header with token for the authentication, + using the provided refresh_token + :param module_params: parameters of the module. Must include 'refresh_token'. + :return: connection header + """ + client_id = module_params.get('auth_client_id') + refresh_token = module_params.get('refresh_token') + client_secret = module_params.get('auth_client_secret') + + temp_payload = { + 'grant_type': 'refresh_token', + 'client_id': client_id, + 'client_secret': client_secret, + 'refresh_token': refresh_token, + } + # Remove empty items, for instance missing client_secret + payload = {k: v for k, v in temp_payload.items() if v is not None} + + return _token_request(module_params, payload) + + +def _request_token_using_client_credentials(module_params): + """ Obtains connection header with token for the authentication, + using the provided auth_client_id and auth_client_secret by grant_type + client_credentials. Ensure that the used client uses client authorization + with service account roles enabled and required service roles assigned. + :param module_params: parameters of the module. Must include 'auth_client_id' + and 'auth_client_secret'.. + :return: connection header + """ + client_id = module_params.get('auth_client_id') + client_secret = module_params.get('auth_client_secret') + + temp_payload = { + 'grant_type': 'client_credentials', + 'client_id': client_id, + 'client_secret': client_secret, + } + # Remove empty items, for instance missing client_secret + payload = {k: v for k, v in temp_payload.items() if v is not None} + + return _token_request(module_params, payload) def get_token(module_params): """ Obtains connection header with token for the authentication, - token already given or obtained from credentials - :param module_params: parameters of the module - :return: connection header + token already given or obtained from credentials + :param module_params: parameters of the module + :return: connection header """ token = module_params.get('token') - base_url = module_params.get('auth_keycloak_url') - - if not base_url.lower().startswith(('http', 'https')): - raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url) if token is None: - base_url = module_params.get('auth_keycloak_url') - validate_certs = module_params.get('validate_certs') - auth_realm = module_params.get('auth_realm') - client_id = module_params.get('auth_client_id') + auth_client_id = module_params.get('auth_client_id') + auth_client_secret = module_params.get('auth_client_secret') auth_username = module_params.get('auth_username') - auth_password = module_params.get('auth_password') - client_secret = module_params.get('auth_client_secret') - auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm) - temp_payload = { - 'grant_type': 'password', - 'client_id': client_id, - 'client_secret': client_secret, - 'username': auth_username, - 'password': auth_password, - } - # Remove empty items, for instance missing client_secret - payload = dict( - (k, v) for k, v in temp_payload.items() if v is not None) - try: - r = json.loads(to_native(open_url(auth_url, method='POST', - validate_certs=validate_certs, - data=urlencode(payload)).read())) - except ValueError as e: - raise KeycloakError( - 'API returned invalid JSON when trying to obtain access token from %s: %s' - % (auth_url, str(e))) - except Exception as e: - raise KeycloakError('Could not obtain access token from %s: %s' - % (auth_url, str(e))) + if auth_client_id is not None and auth_client_secret is not None and auth_username is None: + token = _request_token_using_client_credentials(module_params) + else: + token = _request_token_using_credentials(module_params) - try: - token = r['access_token'] - except KeyError: - raise KeycloakError( - 'Could not obtain access token from %s' % auth_url) return { 'Authorization': 'Bearer ' + token, 'Content-Type': 'application/json' @@ -196,24 +322,30 @@ def is_struct_included(struct1, struct2, exclude=None): Return True if all element of dict 1 are present in dict 2, return false otherwise. """ if isinstance(struct1, list) and isinstance(struct2, list): + if not struct1 and not struct2: + return True for item1 in struct1: if isinstance(item1, (list, dict)): for item2 in struct2: - if not is_struct_included(item1, item2, exclude): - return False + if is_struct_included(item1, item2, exclude): + break + else: + return False else: if item1 not in struct2: return False return True elif isinstance(struct1, dict) and isinstance(struct2, dict): + if not struct1 and not struct2: + return True try: for key in struct1: if not (exclude and key in exclude): if not is_struct_included(struct1[key], struct2[key], exclude): return False - return True except KeyError: return False + return True elif isinstance(struct1, bool) and isinstance(struct2, bool): return struct1 == struct2 else: @@ -224,12 +356,151 @@ class KeycloakAPI(object): """ Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which is obtained through OpenID connect """ + def __init__(self, module, connection_header): self.module = module self.baseurl = self.module.params.get('auth_keycloak_url') self.validate_certs = self.module.params.get('validate_certs') + self.connection_timeout = self.module.params.get('connection_timeout') self.restheaders = connection_header + self.http_agent = self.module.params.get('http_agent') + def _request(self, url, method, data=None): + """ Makes a request to Keycloak and returns the raw response. + If a 401 is returned, attempts to re-authenticate + using first the module's refresh_token (if provided) + and then the module's username/password (if provided). + On successful re-authentication, the new token is stored + in the restheaders for future requests. + + :param url: request path + :param method: request method (e.g., 'GET', 'POST', etc.) + :param data: (optional) data for request + :return: raw API response + """ + def make_request_catching_401(): + try: + return open_url(url, method=method, data=data, + http_agent=self.http_agent, headers=self.restheaders, + timeout=self.connection_timeout, + validate_certs=self.validate_certs) + except HTTPError as e: + if e.code != 401: + raise e + return e + + r = make_request_catching_401() + + if isinstance(r, Exception): + # Try to refresh token and retry, if available + refresh_token = self.module.params.get('refresh_token') + if refresh_token is not None: + try: + token = _request_token_using_refresh_token(self.module.params) + self.restheaders['Authorization'] = 'Bearer ' + token + + r = make_request_catching_401() + except KeycloakError as e: + # Token refresh returns 400 if token is expired/invalid, so continue on if we get a 400 + if e.authError is not None and e.authError.code != 400: + raise e + + if isinstance(r, Exception): + # Try to re-auth with username/password, if available + auth_username = self.module.params.get('auth_username') + auth_password = self.module.params.get('auth_password') + if auth_username is not None and auth_password is not None: + token = _request_token_using_credentials(self.module.params) + self.restheaders['Authorization'] = 'Bearer ' + token + + r = make_request_catching_401() + + if isinstance(r, Exception): + # Try to re-auth with client_id and client_secret, if available + auth_client_id = self.module.params.get('auth_client_id') + auth_client_secret = self.module.params.get('auth_client_secret') + if auth_client_id is not None and auth_client_secret is not None: + try: + token = _request_token_using_client_credentials(self.module.params) + self.restheaders['Authorization'] = 'Bearer ' + token + + r = make_request_catching_401() + except KeycloakError as e: + # Token refresh returns 400 if token is expired/invalid, so continue on if we get a 400 + if e.authError is not None and e.authError.code != 400: + raise e + + if isinstance(r, Exception): + # Either no re-auth options were available, or they all failed + raise r + + return r + + def _request_and_deserialize(self, url, method, data=None): + """ Wraps the _request method with JSON deserialization of the response. + + :param url: request path + :param method: request method (e.g., 'GET', 'POST', etc.) + :param data: (optional) data for request + :return: raw API response + """ + return json.loads(to_native(self._request(url, method, data).read())) + + def get_realm_info_by_id(self, realm='master'): + """ Obtain realm public info by id + + :param realm: realm id + :return: dict of real, representation or None if none matching exist + """ + realm_info_url = URL_REALM_INFO.format(url=self.baseurl, realm=realm) + + try: + return self._request_and_deserialize(realm_info_url, method='GET') + + except HTTPError as e: + if e.code == 404: + return None + else: + self.fail_request(e, msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + except Exception as e: + self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + + def get_realm_keys_metadata_by_id(self, realm='master'): + """Obtain realm public info by id + + :param realm: realm id + + :return: None, or a 'KeysMetadataRepresentation' + (https://www.keycloak.org/docs-api/latest/rest-api/index.html#KeysMetadataRepresentation) + -- a dict containing the keys 'active' and 'keys', the former containing a mapping + from algorithms to key-ids, the latter containing a list of dicts with key + information. + """ + realm_keys_metadata_url = URL_REALM_KEYS_METADATA.format(url=self.baseurl, realm=realm) + + try: + return self._request_and_deserialize(realm_keys_metadata_url, method="GET") + + except HTTPError as e: + if e.code == 404: + return None + else: + self.fail_request(e, msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + except Exception as e: + self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + + # The Keycloak API expects the realm name (like `master`) not the ID when fetching the realm data. + # See the Keycloak API docs: https://www.keycloak.org/docs-api/latest/rest-api/#_realms_admin def get_realm_by_id(self, realm='master'): """ Obtain realm representation by id @@ -239,15 +510,14 @@ class KeycloakAPI(object): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(realm_url, method='GET', headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(realm_url, method='GET') except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), exception=traceback.format_exc()) @@ -264,11 +534,10 @@ class KeycloakAPI(object): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return open_url(realm_url, method='PUT', headers=self.restheaders, - data=json.dumps(realmrep), validate_certs=self.validate_certs) + return self._request(realm_url, method='PUT', data=json.dumps(realmrep)) except Exception as e: - self.module.fail_json(msg='Could not update realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not update realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) def create_realm(self, realmrep): """ Create a realm in keycloak @@ -278,11 +547,10 @@ class KeycloakAPI(object): realm_url = URL_REALMS.format(url=self.baseurl) try: - return open_url(realm_url, method='POST', headers=self.restheaders, - data=json.dumps(realmrep), validate_certs=self.validate_certs) + return self._request(realm_url, method='POST', data=json.dumps(realmrep)) except Exception as e: - self.module.fail_json(msg='Could not create realm %s: %s' % (realmrep['id'], str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not create realm %s: %s' % (realmrep['id'], str(e)), + exception=traceback.format_exc()) def delete_realm(self, realm="master"): """ Delete a realm from Keycloak @@ -293,11 +561,10 @@ class KeycloakAPI(object): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return open_url(realm_url, method='DELETE', headers=self.restheaders, - validate_certs=self.validate_certs) + return self._request(realm_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Could not delete realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not delete realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) def get_clients(self, realm='master', filter=None): """ Obtains client representations for clients in a realm @@ -311,14 +578,13 @@ class KeycloakAPI(object): clientlist_url += '?clientId=%s' % filter try: - return json.loads(to_native(open_url(clientlist_url, method='GET', headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientlist_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of clients for realm %s: %s' % (realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain list of clients for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of clients for realm %s: %s' + % (realm, str(e))) def get_client_by_clientid(self, client_id, realm='master'): """ Get client representation by clientId @@ -342,15 +608,14 @@ class KeycloakAPI(object): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return json.loads(to_native(open_url(client_url, method='GET', headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(client_url, method='GET') except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not obtain client %s for realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not obtain client %s for realm %s: %s' + % (id, realm, str(e))) except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client %s for realm %s: %s' % (id, realm, str(e))) @@ -381,11 +646,10 @@ class KeycloakAPI(object): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(client_url, method='PUT', headers=self.restheaders, - data=json.dumps(clientrep), validate_certs=self.validate_certs) + return self._request(client_url, method='PUT', data=json.dumps(clientrep)) except Exception as e: - self.module.fail_json(msg='Could not update client %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not update client %s in realm %s: %s' + % (id, realm, str(e))) def create_client(self, clientrep, realm="master"): """ Create a client in keycloak @@ -396,11 +660,10 @@ class KeycloakAPI(object): client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm) try: - return open_url(client_url, method='POST', headers=self.restheaders, - data=json.dumps(clientrep), validate_certs=self.validate_certs) + return self._request(client_url, method='POST', data=json.dumps(clientrep)) except Exception as e: - self.module.fail_json(msg='Could not create client %s in realm %s: %s' - % (clientrep['clientId'], realm, str(e))) + self.fail_request(e, msg='Could not create client %s in realm %s: %s' + % (clientrep['clientId'], realm, str(e))) def delete_client(self, id, realm="master"): """ Delete a client from Keycloak @@ -412,11 +675,10 @@ class KeycloakAPI(object): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(client_url, method='DELETE', headers=self.restheaders, - validate_certs=self.validate_certs) + return self._request(client_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Could not delete client %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not delete client %s in realm %s: %s' + % (id, realm, str(e))) def get_client_roles_by_id(self, cid, realm="master"): """ Fetch the roles of the a client on the Keycloak server. @@ -427,16 +689,14 @@ class KeycloakAPI(object): """ client_roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(client_roles_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(client_roles_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch rolemappings for client %s in realm %s: %s" - % (cid, realm, str(e))) + self.fail_request(e, msg="Could not fetch rolemappings for client %s in realm %s: %s" + % (cid, realm, str(e))) - def get_client_role_by_name(self, gid, cid, name, realm="master"): + def get_client_role_id_by_name(self, cid, name, realm="master"): """ Get the role ID of a client. - :param gid: ID of the group from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. :param name: Name of the role. :param realm: Realm from which to obtain the rolemappings. @@ -448,7 +708,7 @@ class KeycloakAPI(object): return role['id'] return None - def get_client_rolemapping_by_id(self, gid, cid, rid, realm='master'): + def get_client_group_rolemapping_by_id(self, gid, cid, rid, realm='master'): """ Obtain client representation by id :param gid: ID of the group from which to obtain the rolemappings. @@ -457,35 +717,33 @@ class KeycloakAPI(object): :param realm: client from this realm :return: dict of rolemapping representation or None if none matching exist """ - rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) + rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") for role in rolemappings: if rid == role['id']: return role except Exception as e: - self.module.fail_json(msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) return None - def get_client_available_rolemappings(self, gid, cid, realm="master"): - """ Fetch the available role of a client in a specified goup on the Keycloak server. + def get_client_group_available_rolemappings(self, gid, cid, realm="master"): + """ Fetch the available role of a client in a specified group on the Keycloak server. :param gid: ID of the group from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. :param realm: Realm from which to obtain the rolemappings. :return: The rollemappings of specified group and client of the realm (default "master"). """ - available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid) + available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - return json.loads(to_native(open_url(available_rolemappings_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(available_rolemappings_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) - def get_client_composite_rolemappings(self, gid, cid, realm="master"): + def get_client_group_composite_rolemappings(self, gid, cid, realm="master"): """ Fetch the composite role of a client in a specified group on the Keycloak server. :param gid: ID of the group from which to obtain the rolemappings. @@ -493,16 +751,89 @@ class KeycloakAPI(object): :param realm: Realm from which to obtain the rolemappings. :return: The rollemappings of specified group and client of the realm (default "master"). """ - available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid) + composite_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - return json.loads(to_native(open_url(available_rolemappings_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(composite_rolemappings_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) + + def get_role_by_id(self, rid, realm="master"): + """ Fetch a role by its id on the Keycloak server. + + :param rid: ID of the role. + :param realm: Realm from which to obtain the rolemappings. + :return: The role. + """ + client_roles_url = URL_ROLES_BY_ID.format(url=self.baseurl, realm=realm, id=rid) + try: + return self._request_and_deserialize(client_roles_url, method="GET") + except Exception as e: + self.fail_request(e, msg="Could not fetch role for id %s in realm %s: %s" + % (rid, realm, str(e))) + + def get_client_roles_by_id_composite_rolemappings(self, rid, cid, realm="master"): + """ Fetch a role by its id on the Keycloak server. + + :param rid: ID of the composite role. + :param cid: ID of the client from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The role. + """ + client_roles_url = URL_ROLES_BY_ID_COMPOSITES_CLIENTS.format(url=self.baseurl, realm=realm, id=rid, cid=cid) + try: + return self._request_and_deserialize(client_roles_url, method="GET") + except Exception as e: + self.fail_request(e, msg="Could not fetch role for id %s and cid %s in realm %s: %s" + % (rid, cid, realm, str(e))) + + def add_client_roles_by_id_composite_rolemapping(self, rid, roles_rep, realm="master"): + """ Assign roles to composite role + + :param rid: ID of the composite role. + :param roles_rep: Representation of the roles to assign. + :param realm: Realm from which to obtain the rolemappings. + :return: None. + """ + available_rolemappings_url = URL_ROLES_BY_ID_COMPOSITES.format(url=self.baseurl, realm=realm, id=rid) + try: + self._request(available_rolemappings_url, method="POST", data=json.dumps(roles_rep)) + except Exception as e: + self.fail_request(e, msg="Could not assign roles to composite role %s and realm %s: %s" + % (rid, realm, str(e))) + + def add_group_realm_rolemapping(self, gid, role_rep, realm="master"): + """ Add the specified realm role to specified group on the Keycloak server. + + :param gid: ID of the group to add the role mapping. + :param role_rep: Representation of the role to assign. + :param realm: Realm from which to obtain the rolemappings. + :return: None. + """ + url = URL_REALM_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, group=gid) + try: + self._request(url, method="POST", data=json.dumps(role_rep)) + except Exception as e: + self.fail_request(e, msg="Could add realm role mappings for group %s, realm %s: %s" + % (gid, realm, str(e))) + + def delete_group_realm_rolemapping(self, gid, role_rep, realm="master"): + """ Delete the specified realm role from the specified group on the Keycloak server. + + :param gid: ID of the group from which to obtain the rolemappings. + :param role_rep: Representation of the role to assign. + :param realm: Realm from which to obtain the rolemappings. + :return: None. + """ + url = URL_REALM_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, group=gid) + try: + self._request(url, method="DELETE", data=json.dumps(role_rep)) + except Exception as e: + self.fail_request(e, msg="Could not delete realm role mappings for group %s, realm %s: %s" + % (gid, realm, str(e))) def add_group_rolemapping(self, gid, cid, role_rep, realm="master"): - """ Fetch the composite role of a client in a specified goup on the Keycloak server. + """ Fetch the composite role of a client in a specified group on the Keycloak server. :param gid: ID of the group from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. @@ -510,12 +841,12 @@ class KeycloakAPI(object): :param realm: Realm from which to obtain the rolemappings. :return: None. """ - available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) + available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - open_url(available_rolemappings_url, method="POST", headers=self.restheaders, data=json.dumps(role_rep), validate_certs=self.validate_certs) + self._request(available_rolemappings_url, method="POST", data=json.dumps(role_rep)) except Exception as e: - self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) def delete_group_rolemapping(self, gid, cid, role_rep, realm="master"): """ Delete the rolemapping of a client in a specified group on the Keycloak server. @@ -526,12 +857,201 @@ class KeycloakAPI(object): :param realm: Realm from which to obtain the rolemappings. :return: None. """ - available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) + available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - open_url(available_rolemappings_url, method="DELETE", headers=self.restheaders, validate_certs=self.validate_certs) + self._request(available_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) except Exception as e: - self.module.fail_json(msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) + + def get_client_user_rolemapping_by_id(self, uid, cid, rid, realm='master'): + """ Obtain client representation by id + + :param uid: ID of the user from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param rid: ID of the role. + :param realm: client from this realm + :return: dict of rolemapping representation or None if none matching exist + """ + rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid) + try: + rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") + for role in rolemappings: + if rid == role['id']: + return role + except Exception as e: + self.fail_request(e, msg="Could not fetch rolemappings for client %s and user %s, realm %s: %s" + % (cid, uid, realm, str(e))) + return None + + def get_client_user_available_rolemappings(self, uid, cid, realm="master"): + """ Fetch the available role of a client for a specified user on the Keycloak server. + + :param uid: ID of the user from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The effective rollemappings of specified client and user of the realm (default "master"). + """ + available_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid, client=cid) + try: + return self._request_and_deserialize(available_rolemappings_url, method="GET") + except Exception as e: + self.fail_request(e, msg="Could not fetch effective rolemappings for client %s and user %s, realm %s: %s" + % (cid, uid, realm, str(e))) + + def get_client_user_composite_rolemappings(self, uid, cid, realm="master"): + """ Fetch the composite role of a client for a specified user on the Keycloak server. + + :param uid: ID of the user from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The rollemappings of specified group and client of the realm (default "master"). + """ + composite_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid, client=cid) + try: + return self._request_and_deserialize(composite_rolemappings_url, method="GET") + except Exception as e: + self.fail_request(e, msg="Could not fetch available rolemappings for user %s of realm %s: %s" + % (uid, realm, str(e))) + + def get_realm_user_rolemapping_by_id(self, uid, rid, realm='master'): + """ Obtain role representation by id + + :param uid: ID of the user from which to obtain the rolemappings. + :param rid: ID of the role. + :param realm: client from this realm + :return: dict of rolemapping representation or None if none matching exist + """ + rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid) + try: + rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") + for role in rolemappings: + if rid == role['id']: + return role + except Exception as e: + self.fail_request(e, msg="Could not fetch rolemappings for user %s, realm %s: %s" + % (uid, realm, str(e))) + return None + + def get_realm_user_available_rolemappings(self, uid, realm="master"): + """ Fetch the available role of a realm for a specified user on the Keycloak server. + + :param uid: ID of the user from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The rollemappings of specified group and client of the realm (default "master"). + """ + available_rolemappings_url = URL_REALM_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid) + try: + return self._request_and_deserialize(available_rolemappings_url, method="GET") + except Exception as e: + self.fail_request(e, msg="Could not fetch available rolemappings for user %s of realm %s: %s" + % (uid, realm, str(e))) + + def get_realm_user_composite_rolemappings(self, uid, realm="master"): + """ Fetch the composite role of a realm for a specified user on the Keycloak server. + + :param uid: ID of the user from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The effective rollemappings of specified client and user of the realm (default "master"). + """ + composite_rolemappings_url = URL_REALM_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid) + try: + return self._request_and_deserialize(composite_rolemappings_url, method="GET") + except Exception as e: + self.fail_request(e, msg="Could not fetch effective rolemappings for user %s, realm %s: %s" + % (uid, realm, str(e))) + + def get_user_by_username(self, username, realm="master"): + """ Fetch a keycloak user within a realm based on its username. + + If the user does not exist, None is returned. + :param username: Username of the user to fetch. + :param realm: Realm in which the user resides; default 'master' + """ + users_url = URL_USERS.format(url=self.baseurl, realm=realm) + users_url += '?username=%s&exact=true' % username + try: + userrep = None + users = self._request_and_deserialize(users_url, method='GET') + for user in users: + if user['username'] == username: + userrep = user + break + return userrep + + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the user for realm %s and username %s: %s' + % (realm, username, str(e))) + except Exception as e: + self.fail_request(e, msg='Could not obtain the user for realm %s and username %s: %s' + % (realm, username, str(e))) + + def get_service_account_user_by_client_id(self, client_id, realm="master"): + """ Fetch a keycloak service account user within a realm based on its client_id. + + If the user does not exist, None is returned. + :param client_id: clientId of the service account user to fetch. + :param realm: Realm in which the user resides; default 'master' + """ + cid = self.get_client_id(client_id, realm=realm) + + service_account_user_url = URL_CLIENT_SERVICE_ACCOUNT_USER.format(url=self.baseurl, realm=realm, id=cid) + try: + return self._request_and_deserialize(service_account_user_url, method='GET') + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the service-account-user for realm %s and client_id %s: %s' + % (realm, client_id, str(e))) + except Exception as e: + self.fail_request(e, msg='Could not obtain the service-account-user for realm %s and client_id %s: %s' + % (realm, client_id, str(e))) + + def add_user_rolemapping(self, uid, cid, role_rep, realm="master"): + """ Assign a realm or client role to a specified user on the Keycloak server. + + :param uid: ID of the user roles are assigned to. + :param cid: ID of the client from which to obtain the rolemappings. If empty, roles are from the realm + :param role_rep: Representation of the role to assign. + :param realm: Realm from which to obtain the rolemappings. + :return: None. + """ + if cid is None: + user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid) + try: + self._request(user_realm_rolemappings_url, method="POST", data=json.dumps(role_rep)) + except Exception as e: + self.fail_request(e, msg="Could not map roles to userId %s for realm %s and roles %s: %s" + % (uid, realm, json.dumps(role_rep), str(e))) + else: + user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid) + try: + self._request(user_client_rolemappings_url, method="POST", data=json.dumps(role_rep)) + except Exception as e: + self.fail_request(e, msg="Could not map roles to userId %s for client %s, realm %s and roles %s: %s" + % (cid, uid, realm, json.dumps(role_rep), str(e))) + + def delete_user_rolemapping(self, uid, cid, role_rep, realm="master"): + """ Delete the rolemapping of a client in a specified user on the Keycloak server. + + :param uid: ID of the user from which to remove the rolemappings. + :param cid: ID of the client from which to remove the rolemappings. + :param role_rep: Representation of the role to remove from rolemappings. + :param realm: Realm from which to remove the rolemappings. + :return: None. + """ + if cid is None: + user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid) + try: + self._request(user_realm_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) + except Exception as e: + self.fail_request(e, msg="Could not remove roles %s from userId %s, realm %s: %s" + % (json.dumps(role_rep), uid, realm, str(e))) + else: + user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid) + try: + self._request(user_client_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) + except Exception as e: + self.fail_request(e, msg="Could not remove roles %s for client %s from userId %s, realm %s: %s" + % (json.dumps(role_rep), cid, uid, realm, str(e))) def get_client_templates(self, realm='master'): """ Obtains client template representations for client templates in a realm @@ -542,14 +1062,13 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of client templates for realm %s: %s' % (realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain list of client templates for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of client templates for realm %s: %s' + % (realm, str(e))) def get_client_template_by_id(self, id, realm='master'): """ Obtain client template representation by id @@ -561,14 +1080,13 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, id=id, realm=realm) try: - return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client templates %s for realm %s: %s' % (id, realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain client template %s for realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not obtain client template %s for realm %s: %s' + % (id, realm, str(e))) def get_client_template_by_name(self, name, realm='master'): """ Obtain client template representation by name @@ -607,11 +1125,10 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(url, method='PUT', headers=self.restheaders, - data=json.dumps(clienttrep), validate_certs=self.validate_certs) + return self._request(url, method='PUT', data=json.dumps(clienttrep)) except Exception as e: - self.module.fail_json(msg='Could not update client template %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not update client template %s in realm %s: %s' + % (id, realm, str(e))) def create_client_template(self, clienttrep, realm="master"): """ Create a client in keycloak @@ -622,11 +1139,10 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm) try: - return open_url(url, method='POST', headers=self.restheaders, - data=json.dumps(clienttrep), validate_certs=self.validate_certs) + return self._request(url, method='POST', data=json.dumps(clienttrep)) except Exception as e: - self.module.fail_json(msg='Could not create client template %s in realm %s: %s' - % (clienttrep['clientId'], realm, str(e))) + self.fail_request(e, msg='Could not create client template %s in realm %s: %s' + % (clienttrep['clientId'], realm, str(e))) def delete_client_template(self, id, realm="master"): """ Delete a client template from Keycloak @@ -638,11 +1154,10 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(url, method='DELETE', headers=self.restheaders, - validate_certs=self.validate_certs) + return self._request(url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Could not delete client template %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not delete client template %s in realm %s: %s' + % (id, realm, str(e))) def get_clientscopes(self, realm="master"): """ Fetch the name and ID of all clientscopes on the Keycloak server. @@ -655,11 +1170,10 @@ class KeycloakAPI(object): """ clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(clientscopes_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientscopes_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch list of clientscopes in realm %s: %s" - % (realm, str(e))) + self.fail_request(e, msg="Could not fetch list of clientscopes in realm %s: %s" + % (realm, str(e))) def get_clientscope_by_clientscopeid(self, cid, realm="master"): """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. @@ -672,15 +1186,14 @@ class KeycloakAPI(object): """ clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(clientscope_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientscope_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s" - % (cid, realm, str(e))) + self.fail_request(e, msg="Could not fetch clientscope %s in realm %s: %s" + % (cid, realm, str(e))) except Exception as e: self.module.fail_json(msg="Could not clientscope group %s in realm %s: %s" % (cid, realm, str(e))) @@ -717,11 +1230,10 @@ class KeycloakAPI(object): """ clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) try: - return open_url(clientscopes_url, method='POST', headers=self.restheaders, - data=json.dumps(clientscoperep), validate_certs=self.validate_certs) + return self._request(clientscopes_url, method='POST', data=json.dumps(clientscoperep)) except Exception as e: - self.module.fail_json(msg="Could not create clientscope %s in realm %s: %s" - % (clientscoperep['name'], realm, str(e))) + self.fail_request(e, msg="Could not create clientscope %s in realm %s: %s" + % (clientscoperep['name'], realm, str(e))) def update_clientscope(self, clientscoperep, realm="master"): """ Update an existing clientscope. @@ -732,12 +1244,11 @@ class KeycloakAPI(object): clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=clientscoperep['id']) try: - return open_url(clientscope_url, method='PUT', headers=self.restheaders, - data=json.dumps(clientscoperep), validate_certs=self.validate_certs) + return self._request(clientscope_url, method='PUT', data=json.dumps(clientscoperep)) except Exception as e: - self.module.fail_json(msg='Could not update clientscope %s in realm %s: %s' - % (clientscoperep['name'], realm, str(e))) + self.fail_request(e, msg='Could not update clientscope %s in realm %s: %s' + % (clientscoperep['name'], realm, str(e))) def delete_clientscope(self, name=None, cid=None, realm="master"): """ Delete a clientscope. One of name or cid must be provided. @@ -754,8 +1265,8 @@ class KeycloakAPI(object): # prefer an exception since this is almost certainly a programming error in the module itself. raise Exception("Unable to delete group - one of group ID or name must be provided.") - # only lookup the name if cid isn't provided. - # in the case that both are provided, prefer the ID, since it's one + # only lookup the name if cid is not provided. + # in the case that both are provided, prefer the ID, since it is one # less lookup. if cid is None and name is not None: for clientscope in self.get_clientscopes(realm=realm): @@ -770,11 +1281,10 @@ class KeycloakAPI(object): # should have a good cid by here. clientscope_url = URL_CLIENTSCOPE.format(realm=realm, id=cid, url=self.baseurl) try: - return open_url(clientscope_url, method='DELETE', headers=self.restheaders, - validate_certs=self.validate_certs) + return self._request(clientscope_url, method='DELETE') except Exception as e: - self.module.fail_json(msg="Unable to delete clientscope %s: %s" % (cid, str(e))) + self.fail_request(e, msg="Unable to delete clientscope %s: %s" % (cid, str(e))) def get_clientscope_protocolmappers(self, cid, realm="master"): """ Fetch the name and ID of all clientscopes on the Keycloak server. @@ -788,11 +1298,10 @@ class KeycloakAPI(object): """ protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(id=cid, url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(protocolmappers_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(protocolmappers_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch list of protocolmappers in realm %s: %s" - % (realm, str(e))) + self.fail_request(e, msg="Could not fetch list of protocolmappers in realm %s: %s" + % (realm, str(e))) def get_clientscope_protocolmapper_by_protocolmapperid(self, pid, cid, realm="master"): """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. @@ -807,15 +1316,14 @@ class KeycloakAPI(object): """ protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=pid) try: - return json.loads(to_native(open_url(protocolmapper_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(protocolmapper_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" - % (pid, realm, str(e))) + self.fail_request(e, msg="Could not fetch protocolmapper %s in realm %s: %s" + % (pid, realm, str(e))) except Exception as e: self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" % (cid, realm, str(e))) @@ -854,11 +1362,10 @@ class KeycloakAPI(object): """ protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(url=self.baseurl, id=cid, realm=realm) try: - return open_url(protocolmappers_url, method='POST', headers=self.restheaders, - data=json.dumps(mapper_rep), validate_certs=self.validate_certs) + return self._request(protocolmappers_url, method='POST', data=json.dumps(mapper_rep)) except Exception as e: - self.module.fail_json(msg="Could not create protocolmapper %s in realm %s: %s" - % (mapper_rep['name'], realm, str(e))) + self.fail_request(e, msg="Could not create protocolmapper %s in realm %s: %s" + % (mapper_rep['name'], realm, str(e))) def update_clientscope_protocolmappers(self, cid, mapper_rep, realm="master"): """ Update an existing clientscope. @@ -870,12 +1377,177 @@ class KeycloakAPI(object): protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=mapper_rep['id']) try: - return open_url(protocolmapper_url, method='PUT', headers=self.restheaders, - data=json.dumps(mapper_rep), validate_certs=self.validate_certs) + return self._request(protocolmapper_url, method='PUT', data=json.dumps(mapper_rep)) except Exception as e: - self.module.fail_json(msg='Could not update protocolmappers for clientscope %s in realm %s: %s' - % (mapper_rep, realm, str(e))) + self.fail_request(e, msg='Could not update protocolmappers for clientscope %s in realm %s: %s' + % (mapper_rep, realm, str(e))) + + def get_default_clientscopes(self, realm, client_id=None): + """Fetch the name and ID of all clientscopes on the Keycloak server. + + To fetch the full data of the client scope, make a subsequent call to + get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return. + + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + :return The default clientscopes of this realm or client + """ + url = URL_DEFAULT_CLIENTSCOPES if client_id is None else URL_CLIENT_DEFAULT_CLIENTSCOPES + return self._get_clientscopes_of_type(realm, url, 'default', client_id) + + def get_optional_clientscopes(self, realm, client_id=None): + """Fetch the name and ID of all clientscopes on the Keycloak server. + + To fetch the full data of the client scope, make a subsequent call to + get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return. + + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + :return The optional clientscopes of this realm or client + """ + url = URL_OPTIONAL_CLIENTSCOPES if client_id is None else URL_CLIENT_OPTIONAL_CLIENTSCOPES + return self._get_clientscopes_of_type(realm, url, 'optional', client_id) + + def _get_clientscopes_of_type(self, realm, url_template, scope_type, client_id=None): + """Fetch the name and ID of all clientscopes on the Keycloak server. + + To fetch the full data of the client scope, make a subsequent call to + get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return. + + :param realm: Realm in which the clientscope resides. + :param url_template the template for the right type + :param scope_type this can be either optional or default + :param client_id: The client in which the clientscope resides. + :return The clientscopes of the specified type of this realm + """ + if client_id is None: + clientscopes_url = url_template.format(url=self.baseurl, realm=realm) + try: + return self._request_and_deserialize(clientscopes_url, method="GET") + except Exception as e: + self.fail_request(e, msg="Could not fetch list of %s clientscopes in realm %s: %s" % (scope_type, realm, str(e))) + else: + cid = self.get_client_id(client_id=client_id, realm=realm) + clientscopes_url = url_template.format(url=self.baseurl, realm=realm, cid=cid) + try: + return self._request_and_deserialize(clientscopes_url, method="GET") + except Exception as e: + self.fail_request(e, msg="Could not fetch list of %s clientscopes in client %s: %s" % (scope_type, client_id, clientscopes_url)) + + def _decide_url_type_clientscope(self, client_id=None, scope_type="default"): + """Decides which url to use. + :param scope_type this can be either optional or default + :param client_id: The client in which the clientscope resides. + """ + if client_id is None: + if scope_type == "default": + return URL_DEFAULT_CLIENTSCOPE + if scope_type == "optional": + return URL_OPTIONAL_CLIENTSCOPE + else: + if scope_type == "default": + return URL_CLIENT_DEFAULT_CLIENTSCOPE + if scope_type == "optional": + return URL_CLIENT_OPTIONAL_CLIENTSCOPE + + def add_default_clientscope(self, id, realm="master", client_id=None): + """Add a client scope as default either on realm or client level. + + :param id: Client scope Id. + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + """ + self._action_type_clientscope(id, client_id, "default", realm, 'add') + + def add_optional_clientscope(self, id, realm="master", client_id=None): + """Add a client scope as optional either on realm or client level. + + :param id: Client scope Id. + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + """ + self._action_type_clientscope(id, client_id, "optional", realm, 'add') + + def delete_default_clientscope(self, id, realm="master", client_id=None): + """Remove a client scope as default either on realm or client level. + + :param id: Client scope Id. + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + """ + self._action_type_clientscope(id, client_id, "default", realm, 'delete') + + def delete_optional_clientscope(self, id, realm="master", client_id=None): + """Remove a client scope as optional either on realm or client level. + + :param id: Client scope Id. + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + """ + self._action_type_clientscope(id, client_id, "optional", realm, 'delete') + + def _action_type_clientscope(self, id=None, client_id=None, scope_type="default", realm="master", action='add'): + """ Delete or add a clientscope of type. + :param name: The name of the clientscope. A lookup will be performed to retrieve the clientscope ID. + :param client_id: The ID of the clientscope (preferred to name). + :param scope_type 'default' or 'optional' + :param realm: The realm in which this group resides, default "master". + """ + cid = None if client_id is None else self.get_client_id(client_id=client_id, realm=realm) + # should have a good cid by here. + clientscope_type_url = self._decide_url_type_clientscope(client_id, scope_type).format(realm=realm, id=id, cid=cid, url=self.baseurl) + try: + method = 'PUT' if action == "add" else 'DELETE' + return self._request(clientscope_type_url, method=method) + + except Exception as e: + place = 'realm' if client_id is None else 'client ' + client_id + self.fail_request(e, msg="Unable to %s %s clientscope %s @ %s : %s" % (action, scope_type, id, place, str(e))) + + def create_clientsecret(self, id, realm="master"): + """ Generate a new client secret by id + + :param id: id (not clientId) of client to be queried + :param realm: client from this realm + :return: dict of credential representation + """ + clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id) + + try: + return self._request_and_deserialize(clientsecret_url, method='POST') + + except HTTPError as e: + if e.code == 404: + return None + else: + self.fail_request(e, msg='Could not obtain clientsecret of client %s for realm %s: %s' + % (id, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s' + % (id, realm, str(e))) + + def get_clientsecret(self, id, realm="master"): + """ Obtain client secret by id + + :param id: id (not clientId) of client to be queried + :param realm: client from this realm + :return: dict of credential representation + """ + clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id) + + try: + return self._request_and_deserialize(clientsecret_url, method='GET') + + except HTTPError as e: + if e.code == 404: + return None + else: + self.fail_request(e, msg='Could not obtain clientsecret of client %s for realm %s: %s' + % (id, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s' + % (id, realm, str(e))) def get_groups(self, realm="master"): """ Fetch the name and ID of all groups on the Keycloak server. @@ -887,11 +1559,10 @@ class KeycloakAPI(object): """ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(groups_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch list of groups in realm %s: %s" - % (realm, str(e))) + self.fail_request(e, msg="Could not fetch list of groups in realm %s: %s" + % (realm, str(e))) def get_group_by_groupid(self, gid, realm="master"): """ Fetch a keycloak group from the provided realm using the group's unique ID. @@ -904,20 +1575,33 @@ class KeycloakAPI(object): """ groups_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=gid) try: - return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) - + return self._request_and_deserialize(groups_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" - % (gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch group %s in realm %s: %s" + % (gid, realm, str(e))) except Exception as e: self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" % (gid, realm, str(e))) - def get_group_by_name(self, name, realm="master"): + def get_subgroups(self, parent, realm="master"): + if 'subGroupCount' in parent: + # Since version 23, when GETting a group Keycloak does not + # return subGroups but only a subGroupCount. + # Children must be fetched in a second request. + if parent['subGroupCount'] == 0: + group_children = [] + else: + group_children_url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent['id']) + "?max=" + str(parent['subGroupCount']) + group_children = self._request_and_deserialize(group_children_url, method="GET") + subgroups = group_children + else: + subgroups = parent['subGroups'] + return subgroups + + def get_group_by_name(self, name, realm="master", parents=None): """ Fetch a keycloak group within a realm based on its name. The Keycloak API does not allow filtering of the Groups resource by name. @@ -927,10 +1611,18 @@ class KeycloakAPI(object): If the group does not exist, None is returned. :param name: Name of the group to fetch. :param realm: Realm in which the group resides; default 'master' + :param parents: Optional list of parents when group to look for is a subgroup """ - groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) try: - all_groups = self.get_groups(realm=realm) + if parents: + parent = self.get_subgroup_direct_parent(parents, realm) + + if not parent: + return None + + all_groups = self.get_subgroups(parent, realm) + else: + all_groups = self.get_groups(realm=realm) for group in all_groups: if group['name'] == name: @@ -942,6 +1634,102 @@ class KeycloakAPI(object): self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" % (name, realm, str(e))) + def _get_normed_group_parent(self, parent): + """ Converts parent dict information into a more easy to use form. + + :param parent: parent describing dict + """ + if parent['id']: + return (parent['id'], True) + + return (parent['name'], False) + + def get_subgroup_by_chain(self, name_chain, realm="master"): + """ Access a subgroup API object by walking down a given name/id chain. + + Groups can be given either as by name or by ID, the first element + must either be a toplvl group or given as ID, all parents must exist. + + If the group cannot be found, None is returned. + :param name_chain: Topdown ordered list of subgroup parent (ids or names) + its own name at the end + :param realm: Realm in which the group resides; default 'master' + """ + cp = name_chain[0] + + # for 1st parent in chain we must query the server + cp, is_id = self._get_normed_group_parent(cp) + + if is_id: + tmp = self.get_group_by_groupid(cp, realm=realm) + else: + # given as name, assume toplvl group + tmp = self.get_group_by_name(cp, realm=realm) + + if not tmp: + return None + + for p in name_chain[1:]: + for sg in self.get_subgroups(tmp, realm): + pv, is_id = self._get_normed_group_parent(p) + + if is_id: + cmpkey = "id" + else: + cmpkey = "name" + + if pv == sg[cmpkey]: + tmp = sg + break + + if not tmp: + return None + + return tmp + + def get_subgroup_direct_parent(self, parents, realm="master", children_to_resolve=None): + """ Get keycloak direct parent group API object for a given chain of parents. + + To successfully work the API for subgroups we actually don't need + to "walk the whole tree" for nested groups but only need to know + the ID for the direct predecessor of current subgroup. This + method will guarantee us this information getting there with + as minimal work as possible. + + Note that given parent list can and might be incomplete at the + upper levels as long as it starts with an ID instead of a name + + If the group does not exist, None is returned. + :param parents: Topdown ordered list of subgroup parents + :param realm: Realm in which the group resides; default 'master' + """ + if children_to_resolve is None: + # start recursion by reversing parents (in optimal cases + # we dont need to walk the whole tree upwarts) + parents = list(reversed(parents)) + children_to_resolve = [] + + if not parents: + # walk complete parents list to the top, all names, no id's, + # try to resolve it assuming list is complete and 1st + # element is a toplvl group + return self.get_subgroup_by_chain(list(reversed(children_to_resolve)), realm=realm) + + cp = parents[0] + unused, is_id = self._get_normed_group_parent(cp) + + if is_id: + # current parent is given as ID, we can stop walking + # upwards searching for an entry point + return self.get_subgroup_by_chain([cp] + list(reversed(children_to_resolve)), realm=realm) + else: + # current parent is given as name, it must be resolved + # later, try next parent (recurse) + children_to_resolve.append(cp) + return self.get_subgroup_direct_parent( + parents[1:], + realm=realm, children_to_resolve=children_to_resolve + ) + def create_group(self, grouprep, realm="master"): """ Create a Keycloak group. @@ -950,11 +1738,37 @@ class KeycloakAPI(object): """ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) try: - return open_url(groups_url, method='POST', headers=self.restheaders, - data=json.dumps(grouprep), validate_certs=self.validate_certs) + return self._request(groups_url, method='POST', data=json.dumps(grouprep)) except Exception as e: - self.module.fail_json(msg="Could not create group %s in realm %s: %s" - % (grouprep['name'], realm, str(e))) + self.fail_request(e, msg="Could not create group %s in realm %s: %s" + % (grouprep['name'], realm, str(e))) + + def create_subgroup(self, parents, grouprep, realm="master"): + """ Create a Keycloak subgroup. + + :param parents: list of one or more parent groups + :param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name. + :return: HTTPResponse object on success + """ + parent_id = "---UNDETERMINED---" + try: + parent_id = self.get_subgroup_direct_parent(parents, realm) + + if not parent_id: + raise Exception( + "Could not determine subgroup parent ID for given" + " parent chain {0}. Assure that all parents exist" + " already and the list is complete and properly" + " ordered, starts with an ID or starts at the" + " top level".format(parents) + ) + + parent_id = parent_id["id"] + url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent_id) + return self._request(url, method='POST', data=json.dumps(grouprep)) + except Exception as e: + self.fail_request(e, msg="Could not create subgroup %s for parent group %s in realm %s: %s" + % (grouprep['name'], parent_id, realm, str(e))) def update_group(self, grouprep, realm="master"): """ Update an existing group. @@ -965,11 +1779,10 @@ class KeycloakAPI(object): group_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=grouprep['id']) try: - return open_url(group_url, method='PUT', headers=self.restheaders, - data=json.dumps(grouprep), validate_certs=self.validate_certs) + return self._request(group_url, method='PUT', data=json.dumps(grouprep)) except Exception as e: - self.module.fail_json(msg='Could not update group %s in realm %s: %s' - % (grouprep['name'], realm, str(e))) + self.fail_request(e, msg='Could not update group %s in realm %s: %s' + % (grouprep['name'], realm, str(e))) def delete_group(self, name=None, groupid=None, realm="master"): """ Delete a group. One of name or groupid must be provided. @@ -987,7 +1800,7 @@ class KeycloakAPI(object): raise Exception("Unable to delete group - one of group ID or name must be provided.") # only lookup the name if groupid isn't provided. - # in the case that both are provided, prefer the ID, since it's one + # in the case that both are provided, prefer the ID, since it is one # less lookup. if groupid is None and name is not None: for group in self.get_groups(realm=realm): @@ -1002,10 +1815,9 @@ class KeycloakAPI(object): # should have a good groupid by here. group_url = URL_GROUP.format(realm=realm, groupid=groupid, url=self.baseurl) try: - return open_url(group_url, method='DELETE', headers=self.restheaders, - validate_certs=self.validate_certs) + return self._request(group_url, method='DELETE') except Exception as e: - self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e))) + self.fail_request(e, msg="Unable to delete group %s: %s" % (groupid, str(e))) def get_realm_roles(self, realm='master'): """ Obtains role representations for roles in a realm @@ -1015,14 +1827,13 @@ class KeycloakAPI(object): """ rolelist_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(rolelist_url, method='GET', headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(rolelist_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for realm %s: %s' % (realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain list of roles for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of roles for realm %s: %s' + % (realm, str(e))) def get_realm_role(self, name, realm='master'): """ Fetch a keycloak role from the provided realm using the role's name. @@ -1031,16 +1842,15 @@ class KeycloakAPI(object): :param name: Name of the role to fetch. :param realm: Realm in which the role resides; default 'master'. """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name)) + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe='')) try: - return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(role_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch role %s in realm %s: %s' - % (name, realm, str(e))) + self.fail_request(e, msg='Could not fetch role %s in realm %s: %s' + % (name, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch role %s in realm %s: %s' % (name, realm, str(e))) @@ -1053,11 +1863,13 @@ class KeycloakAPI(object): """ roles_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm) try: - return open_url(roles_url, method='POST', headers=self.restheaders, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + if "composites" in rolerep: + keycloak_compatible_composites = self.convert_role_composites(rolerep["composites"]) + rolerep["composites"] = keycloak_compatible_composites + return self._request(roles_url, method='POST', data=json.dumps(rolerep)) except Exception as e: - self.module.fail_json(msg='Could not create role %s in realm %s: %s' - % (rolerep['name'], realm, str(e))) + self.fail_request(e, msg='Could not create role %s in realm %s: %s' + % (rolerep['name'], realm, str(e))) def update_realm_role(self, rolerep, realm='master'): """ Update an existing realm role. @@ -1065,13 +1877,116 @@ class KeycloakAPI(object): :param rolerep: A RoleRepresentation of the updated role. :return HTTPResponse object on success """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name'])) + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name']), safe='') try: - return open_url(role_url, method='PUT', headers=self.restheaders, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + composites = None + if "composites" in rolerep: + composites = copy.deepcopy(rolerep["composites"]) + del rolerep["composites"] + role_response = self._request(role_url, method='PUT', data=json.dumps(rolerep)) + if composites is not None: + self.update_role_composites(rolerep=rolerep, composites=composites, realm=realm) + return role_response except Exception as e: - self.module.fail_json(msg='Could not update role %s in realm %s: %s' - % (rolerep['name'], realm, str(e))) + self.fail_request(e, msg='Could not update role %s in realm %s: %s' + % (rolerep['name'], realm, str(e))) + + def get_role_composites(self, rolerep, clientid=None, realm='master'): + composite_url = '' + try: + if clientid is not None: + client = self.get_client_by_clientid(client_id=clientid, realm=realm) + cid = client['id'] + composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe='')) + else: + composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) + # Get existing composites + return self._request_and_deserialize(composite_url, method='GET') + except Exception as e: + self.fail_request(e, msg='Could not get role %s composites in realm %s: %s' + % (rolerep['name'], realm, str(e))) + + def create_role_composites(self, rolerep, composites, clientid=None, realm='master'): + composite_url = '' + try: + if clientid is not None: + client = self.get_client_by_clientid(client_id=clientid, realm=realm) + cid = client['id'] + composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe='')) + else: + composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) + # Get existing composites + # create new composites + return self._request(composite_url, method='POST', data=json.dumps(composites)) + except Exception as e: + self.fail_request(e, msg='Could not create role %s composites in realm %s: %s' + % (rolerep['name'], realm, str(e))) + + def delete_role_composites(self, rolerep, composites, clientid=None, realm='master'): + composite_url = '' + try: + if clientid is not None: + client = self.get_client_by_clientid(client_id=clientid, realm=realm) + cid = client['id'] + composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe='')) + else: + composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) + # Get existing composites + # create new composites + return self._request(composite_url, method='DELETE', data=json.dumps(composites)) + except Exception as e: + self.fail_request(e, msg='Could not create role %s composites in realm %s: %s' + % (rolerep['name'], realm, str(e))) + + def update_role_composites(self, rolerep, composites, clientid=None, realm='master'): + # Get existing composites + existing_composites = self.get_role_composites(rolerep=rolerep, clientid=clientid, realm=realm) + composites_to_be_created = [] + composites_to_be_deleted = [] + for composite in composites: + composite_found = False + existing_composite_client = None + for existing_composite in existing_composites: + if existing_composite["clientRole"]: + existing_composite_client = self.get_client_by_id(existing_composite["containerId"], realm=realm) + if ("client_id" in composite + and composite['client_id'] is not None + and existing_composite_client["clientId"] == composite["client_id"] + and composite["name"] == existing_composite["name"]): + composite_found = True + break + else: + if (("client_id" not in composite or composite['client_id'] is None) + and composite["name"] == existing_composite["name"]): + composite_found = True + break + if not composite_found and ('state' not in composite or composite['state'] == 'present'): + if "client_id" in composite and composite['client_id'] is not None: + client_roles = self.get_client_roles(clientid=composite['client_id'], realm=realm) + for client_role in client_roles: + if client_role['name'] == composite['name']: + composites_to_be_created.append(client_role) + break + else: + realm_role = self.get_realm_role(name=composite["name"], realm=realm) + composites_to_be_created.append(realm_role) + elif composite_found and 'state' in composite and composite['state'] == 'absent': + if "client_id" in composite and composite['client_id'] is not None: + client_roles = self.get_client_roles(clientid=composite['client_id'], realm=realm) + for client_role in client_roles: + if client_role['name'] == composite['name']: + composites_to_be_deleted.append(client_role) + break + else: + realm_role = self.get_realm_role(name=composite["name"], realm=realm) + composites_to_be_deleted.append(realm_role) + + if len(composites_to_be_created) > 0: + # create new composites + self.create_role_composites(rolerep=rolerep, composites=composites_to_be_created, clientid=clientid, realm=realm) + if len(composites_to_be_deleted) > 0: + # delete new composites + self.delete_role_composites(rolerep=rolerep, composites=composites_to_be_deleted, clientid=clientid, realm=realm) def delete_realm_role(self, name, realm='master'): """ Delete a realm role. @@ -1079,13 +1994,12 @@ class KeycloakAPI(object): :param name: The name of the role. :param realm: The realm in which this role resides, default "master". """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name)) + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe='')) try: - return open_url(role_url, method='DELETE', headers=self.restheaders, - validate_certs=self.validate_certs) + return self._request(role_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete role %s in realm %s: %s' - % (name, realm, str(e))) + self.fail_request(e, msg='Unable to delete role %s in realm %s: %s' + % (name, realm, str(e))) def get_client_roles(self, clientid, realm='master'): """ Obtains role representations for client roles in a specific client @@ -1100,14 +2014,13 @@ class KeycloakAPI(object): % (clientid, realm)) rolelist_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(rolelist_url, method='GET', headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(rolelist_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for client %s in realm %s: %s' % (clientid, realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain list of roles for client %s in realm %s: %s' - % (clientid, realm, str(e))) + self.fail_request(e, msg='Could not obtain list of roles for client %s in realm %s: %s' + % (clientid, realm, str(e))) def get_client_role(self, name, clientid, realm='master'): """ Fetch a keycloak client role from the provided realm using the role's name. @@ -1122,16 +2035,15 @@ class KeycloakAPI(object): if cid is None: self.module.fail_json(msg='Could not find client %s in realm %s' % (clientid, realm)) - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name)) + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe='')) try: - return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(role_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch role %s in client %s of realm %s: %s' - % (name, clientid, realm, str(e))) + self.fail_request(e, msg='Could not fetch role %s in client %s of realm %s: %s' + % (name, clientid, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch role %s for client %s in realm %s: %s' % (name, clientid, realm, str(e))) @@ -1150,11 +2062,28 @@ class KeycloakAPI(object): % (clientid, realm)) roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) try: - return open_url(roles_url, method='POST', headers=self.restheaders, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + if "composites" in rolerep: + keycloak_compatible_composites = self.convert_role_composites(rolerep["composites"]) + rolerep["composites"] = keycloak_compatible_composites + return self._request(roles_url, method='POST', data=json.dumps(rolerep)) except Exception as e: - self.module.fail_json(msg='Could not create role %s for client %s in realm %s: %s' - % (rolerep['name'], clientid, realm, str(e))) + self.fail_request(e, msg='Could not create role %s for client %s in realm %s: %s' + % (rolerep['name'], clientid, realm, str(e))) + + def convert_role_composites(self, composites): + keycloak_compatible_composites = { + 'client': {}, + 'realm': [] + } + for composite in composites: + if 'state' not in composite or composite['state'] == 'present': + if "client_id" in composite and composite["client_id"] is not None: + if composite["client_id"] not in keycloak_compatible_composites["client"]: + keycloak_compatible_composites["client"][composite["client_id"]] = [] + keycloak_compatible_composites["client"][composite["client_id"]].append(composite["name"]) + else: + keycloak_compatible_composites["realm"].append(composite["name"]) + return keycloak_compatible_composites def update_client_role(self, rolerep, clientid, realm="master"): """ Update an existing client role. @@ -1168,13 +2097,19 @@ class KeycloakAPI(object): if cid is None: self.module.fail_json(msg='Could not find client %s in realm %s' % (clientid, realm)) - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name'])) + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name'], safe='')) try: - return open_url(role_url, method='PUT', headers=self.restheaders, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + composites = None + if "composites" in rolerep: + composites = copy.deepcopy(rolerep["composites"]) + del rolerep['composites'] + update_role_response = self._request(role_url, method='PUT', data=json.dumps(rolerep)) + if composites is not None: + self.update_role_composites(rolerep=rolerep, clientid=clientid, composites=composites, realm=realm) + return update_role_response except Exception as e: - self.module.fail_json(msg='Could not update role %s for client %s in realm %s: %s' - % (rolerep['name'], clientid, realm, str(e))) + self.fail_request(e, msg='Could not update role %s for client %s in realm %s: %s' + % (rolerep['name'], clientid, realm, str(e))) def delete_client_role(self, name, clientid, realm="master"): """ Delete a role. One of name or roleid must be provided. @@ -1187,17 +2122,16 @@ class KeycloakAPI(object): if cid is None: self.module.fail_json(msg='Could not find client %s in realm %s' % (clientid, realm)) - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name)) + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe='')) try: - return open_url(role_url, method='DELETE', headers=self.restheaders, - validate_certs=self.validate_certs) + return self._request(role_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete role %s for client %s in realm %s: %s' - % (name, clientid, realm, str(e))) + self.fail_request(e, msg='Unable to delete role %s for client %s in realm %s: %s' + % (name, clientid, realm, str(e))) def get_authentication_flow_by_alias(self, alias, realm='master'): """ - Get an authentication flow by it's alias + Get an authentication flow by its alias :param alias: Alias of the authentication flow to get. :param realm: Realm. :return: Authentication flow representation. @@ -1205,14 +2139,14 @@ class KeycloakAPI(object): try: authentication_flow = {} # Check if the authentication flow exists on the Keycloak serveraders - authentications = json.load(open_url(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET', headers=self.restheaders)) + authentications = json.load(self._request(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET')) for authentication in authentications: if authentication["alias"] == alias: authentication_flow = authentication break return authentication_flow except Exception as e: - self.module.fail_json(msg="Unable get authentication flow %s: %s" % (alias, str(e))) + self.fail_request(e, msg="Unable get authentication flow %s: %s" % (alias, str(e))) def delete_authentication_flow_by_id(self, id, realm='master'): """ @@ -1224,11 +2158,10 @@ class KeycloakAPI(object): flow_url = URL_AUTHENTICATION_FLOW.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(flow_url, method='DELETE', headers=self.restheaders, - validate_certs=self.validate_certs) + return self._request(flow_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Could not delete authentication flow %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not delete authentication flow %s in realm %s: %s' + % (id, realm, str(e))) def copy_auth_flow(self, config, realm='master'): """ @@ -1241,27 +2174,25 @@ class KeycloakAPI(object): new_name = dict( newName=config["alias"] ) - open_url( + self._request( URL_AUTHENTICATION_FLOW_COPY.format( url=self.baseurl, realm=realm, - copyfrom=quote(config["copyFrom"])), + copyfrom=quote(config["copyFrom"], safe='')), method='POST', - headers=self.restheaders, data=json.dumps(new_name)) flow_list = json.load( - open_url( + self._request( URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), - method='GET', - headers=self.restheaders)) + method='GET')) for flow in flow_list: if flow["alias"] == config["alias"]: return flow return None except Exception as e: - self.module.fail_json(msg='Could not copy authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) + self.fail_request(e, msg='Could not copy authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) def create_empty_auth_flow(self, config, realm='master'): """ @@ -1277,27 +2208,25 @@ class KeycloakAPI(object): description=config["description"], topLevel=True ) - open_url( + self._request( URL_AUTHENTICATION_FLOWS.format( url=self.baseurl, realm=realm), method='POST', - headers=self.restheaders, data=json.dumps(new_flow)) flow_list = json.load( - open_url( + self._request( URL_AUTHENTICATION_FLOWS.format( url=self.baseurl, realm=realm), - method='GET', - headers=self.restheaders)) + method='GET')) for flow in flow_list: if flow["alias"] == config["alias"]: return flow return None except Exception as e: - self.module.fail_json(msg='Could not create empty authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) + self.fail_request(e, msg='Could not create empty authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) def update_authentication_executions(self, flowAlias, updatedExec, realm='master'): """ Update authentication executions @@ -1307,14 +2236,16 @@ class KeycloakAPI(object): :return: HTTPResponse object on success """ try: - open_url( + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS.format( url=self.baseurl, realm=realm, - flowalias=quote(flowAlias)), + flowalias=quote(flowAlias, safe='')), method='PUT', - headers=self.restheaders, data=json.dumps(updatedExec)) + except HTTPError as e: + self.fail_request(e, msg="Unable to update execution '%s': %s: %s %s" + % (flowAlias, repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(updatedExec))) except Exception as e: self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e))) @@ -1326,18 +2257,34 @@ class KeycloakAPI(object): :return: HTTPResponse object on success """ try: - open_url( + self._request( URL_AUTHENTICATION_EXECUTION_CONFIG.format( url=self.baseurl, realm=realm, id=executionId), method='POST', - headers=self.restheaders, data=json.dumps(authenticationConfig)) except Exception as e: - self.module.fail_json(msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e))) + self.fail_request(e, msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e))) - def create_subflow(self, subflowName, flowAlias, realm='master'): + def delete_authentication_config(self, configId, realm='master'): + """ Delete authenticator config + + :param configId: id of authentication config + :param realm: realm of authentication config to be deleted + """ + try: + # Send a DELETE request to remove the specified authentication config from the Keycloak server. + self._request( + URL_AUTHENTICATION_CONFIG.format( + url=self.baseurl, + realm=realm, + id=configId), + method='DELETE') + except Exception as e: + self.fail_request(e, msg="Unable to delete authentication config %s: %s" % (configId, str(e))) + + def create_subflow(self, subflowName, flowAlias, realm='master', flowType='basic-flow'): """ Create new sublow on the flow :param subflowName: name of the subflow to create @@ -1348,17 +2295,16 @@ class KeycloakAPI(object): newSubFlow = {} newSubFlow["alias"] = subflowName newSubFlow["provider"] = "registration-page-form" - newSubFlow["type"] = "basic-flow" - open_url( + newSubFlow["type"] = flowType + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format( url=self.baseurl, realm=realm, - flowalias=quote(flowAlias)), + flowalias=quote(flowAlias, safe='')), method='POST', - headers=self.restheaders, data=json.dumps(newSubFlow)) except Exception as e: - self.module.fail_json(msg="Unable to create new subflow %s: %s" % (subflowName, str(e))) + self.fail_request(e, msg="Unable to create new subflow %s: %s" % (subflowName, str(e))) def create_execution(self, execution, flowAlias, realm='master'): """ Create new execution on the flow @@ -1371,16 +2317,18 @@ class KeycloakAPI(object): newExec = {} newExec["provider"] = execution["providerId"] newExec["requirement"] = execution["requirement"] - open_url( + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION.format( url=self.baseurl, realm=realm, - flowalias=quote(flowAlias)), + flowalias=quote(flowAlias, safe='')), method='POST', - headers=self.restheaders, data=json.dumps(newExec)) + except HTTPError as e: + self.fail_request(e, msg="Unable to create new execution '%s' %s: %s: %s %s" + % (flowAlias, execution["providerId"], repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(newExec))) except Exception as e: - self.module.fail_json(msg="Unable to create new execution %s: %s" % (execution["provider"], str(e))) + self.module.fail_json(msg="Unable to create new execution '%s' %s: %s" % (flowAlias, execution["providerId"], repr(e))) def change_execution_priority(self, executionId, diff, realm='master'): """ Raise or lower execution priority of diff time @@ -1393,24 +2341,22 @@ class KeycloakAPI(object): try: if diff > 0: for i in range(diff): - open_url( + self._request( URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY.format( url=self.baseurl, realm=realm, id=executionId), - method='POST', - headers=self.restheaders) + method='POST') elif diff < 0: for i in range(-diff): - open_url( + self._request( URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY.format( url=self.baseurl, realm=realm, id=executionId), - method='POST', - headers=self.restheaders) + method='POST') except Exception as e: - self.module.fail_json(msg="Unable to change execution priority %s: %s" % (executionId, str(e))) + self.fail_request(e, msg="Unable to change execution priority %s: %s" % (executionId, str(e))) def get_executions_representation(self, config, realm='master'): """ @@ -1422,29 +2368,128 @@ class KeycloakAPI(object): try: # Get executions created executions = json.load( - open_url( + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS.format( url=self.baseurl, realm=realm, - flowalias=quote(config["alias"])), - method='GET', - headers=self.restheaders)) + flowalias=quote(config["alias"], safe='')), + method='GET')) for execution in executions: if "authenticationConfig" in execution: execConfigId = execution["authenticationConfig"] execConfig = json.load( - open_url( + self._request( URL_AUTHENTICATION_CONFIG.format( url=self.baseurl, realm=realm, id=execConfigId), - method='GET', - headers=self.restheaders)) + method='GET')) execution["authenticationConfig"] = execConfig return executions except Exception as e: - self.module.fail_json(msg='Could not get executions for authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) + self.fail_request(e, msg='Could not get executions for authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) + + def get_required_actions(self, realm='master'): + """ + Get required actions. + :param realm: Realm name (not id). + :return: List of representations of the required actions. + """ + + try: + required_actions = json.load( + self._request( + URL_AUTHENTICATION_REQUIRED_ACTIONS.format( + url=self.baseurl, + realm=realm + ), + method='GET' + ) + ) + + return required_actions + except Exception: + return None + + def register_required_action(self, rep, realm='master'): + """ + Register required action. + :param rep: JSON containing 'providerId', and 'name' attributes. + :param realm: Realm name (not id). + :return: Representation of the required action. + """ + + data = { + 'name': rep['name'], + 'providerId': rep['providerId'] + } + + try: + return self._request( + URL_AUTHENTICATION_REGISTER_REQUIRED_ACTION.format( + url=self.baseurl, + realm=realm + ), + method='POST', + data=json.dumps(data), + ) + except Exception as e: + self.fail_request( + e, + msg='Unable to register required action %s in realm %s: %s' + % (rep["name"], realm, str(e)) + ) + + def update_required_action(self, alias, rep, realm='master'): + """ + Update required action. + :param alias: Alias of required action. + :param rep: JSON describing new state of required action. + :param realm: Realm name (not id). + :return: HTTPResponse object on success. + """ + + try: + return self._request( + URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS.format( + url=self.baseurl, + alias=quote(alias, safe=''), + realm=realm + ), + method='PUT', + data=json.dumps(rep), + ) + except Exception as e: + self.fail_request( + e, + msg='Unable to update required action %s in realm %s: %s' + % (alias, realm, str(e)) + ) + + def delete_required_action(self, alias, realm='master'): + """ + Delete required action. + :param alias: Alias of required action. + :param realm: Realm name (not id). + :return: HTTPResponse object on success. + """ + + try: + return self._request( + URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS.format( + url=self.baseurl, + alias=quote(alias, safe=''), + realm=realm + ), + method='DELETE', + ) + except Exception as e: + self.fail_request( + e, + msg='Unable to delete required action %s in realm %s: %s' + % (alias, realm, str(e)) + ) def get_identity_providers(self, realm='master'): """ Fetch representations for identity providers in a realm @@ -1453,14 +2498,13 @@ class KeycloakAPI(object): """ idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(idps_url, method='GET', headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(idps_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity providers for realm %s: %s' % (realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain list of identity providers for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of identity providers for realm %s: %s' + % (realm, str(e))) def get_identity_provider(self, alias, realm='master'): """ Fetch identity provider representation from a realm using the idp's alias. @@ -1470,14 +2514,13 @@ class KeycloakAPI(object): """ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) try: - return json.loads(to_native(open_url(idp_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(idp_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s' - % (alias, realm, str(e))) + self.fail_request(e, msg='Could not fetch identity provider %s in realm %s: %s' + % (alias, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s' % (alias, realm, str(e))) @@ -1490,11 +2533,10 @@ class KeycloakAPI(object): """ idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) try: - return open_url(idps_url, method='POST', headers=self.restheaders, - data=json.dumps(idprep), validate_certs=self.validate_certs) + return self._request(idps_url, method='POST', data=json.dumps(idprep)) except Exception as e: - self.module.fail_json(msg='Could not create identity provider %s in realm %s: %s' - % (idprep['alias'], realm, str(e))) + self.fail_request(e, msg='Could not create identity provider %s in realm %s: %s' + % (idprep['alias'], realm, str(e))) def update_identity_provider(self, idprep, realm='master'): """ Update an existing identity provider. @@ -1504,11 +2546,10 @@ class KeycloakAPI(object): """ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=idprep['alias']) try: - return open_url(idp_url, method='PUT', headers=self.restheaders, - data=json.dumps(idprep), validate_certs=self.validate_certs) + return self._request(idp_url, method='PUT', data=json.dumps(idprep)) except Exception as e: - self.module.fail_json(msg='Could not update identity provider %s in realm %s: %s' - % (idprep['alias'], realm, str(e))) + self.fail_request(e, msg='Could not update identity provider %s in realm %s: %s' + % (idprep['alias'], realm, str(e))) def delete_identity_provider(self, alias, realm='master'): """ Delete an identity provider. @@ -1517,11 +2558,10 @@ class KeycloakAPI(object): """ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) try: - return open_url(idp_url, method='DELETE', headers=self.restheaders, - validate_certs=self.validate_certs) + return self._request(idp_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete identity provider %s in realm %s: %s' - % (alias, realm, str(e))) + self.fail_request(e, msg='Unable to delete identity provider %s in realm %s: %s' + % (alias, realm, str(e))) def get_identity_provider_mappers(self, alias, realm='master'): """ Fetch representations for identity provider mappers @@ -1531,14 +2571,30 @@ class KeycloakAPI(object): """ mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) try: - return json.loads(to_native(open_url(mappers_url, method='GET', headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(mappers_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity provider mappers for idp %s in realm %s: %s' % (alias, realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s' - % (alias, realm, str(e))) + self.fail_request(e, msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s' + % (alias, realm, str(e))) + + def fetch_idp_endpoints_import_config_url(self, fromUrl, providerId='oidc', realm='master'): + """ Import an identity provider configuration through Keycloak server from a well-known URL. + :param fromUrl: URL to import the identity provider configuration from. + "param providerId: Provider ID of the identity provider to import, default 'oidc'. + :param realm: Realm + :return: IDP endpoins. + """ + try: + payload = { + "providerId": providerId, + "fromUrl": fromUrl + } + idps_url = URL_IDENTITY_PROVIDER_IMPORT.format(url=self.baseurl, realm=realm) + return self._request_and_deserialize(idps_url, method='POST', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg='Could not import the IdP config in realm %s: %s' % (realm, str(e))) def get_identity_provider_mapper(self, mid, alias, realm='master'): """ Fetch identity provider representation from a realm using the idp's alias. @@ -1549,14 +2605,13 @@ class KeycloakAPI(object): """ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) try: - return json.loads(to_native(open_url(mapper_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(mapper_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' - % (mid, alias, realm, str(e))) + self.fail_request(e, msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' + % (mid, alias, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' % (mid, alias, realm, str(e))) @@ -1570,11 +2625,10 @@ class KeycloakAPI(object): """ mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) try: - return open_url(mappers_url, method='POST', headers=self.restheaders, - data=json.dumps(mapper), validate_certs=self.validate_certs) + return self._request(mappers_url, method='POST', data=json.dumps(mapper)) except Exception as e: - self.module.fail_json(msg='Could not create identity provider mapper %s for idp %s in realm %s: %s' - % (mapper['name'], alias, realm, str(e))) + self.fail_request(e, msg='Could not create identity provider mapper %s for idp %s in realm %s: %s' + % (mapper['name'], alias, realm, str(e))) def update_identity_provider_mapper(self, mapper, alias, realm='master'): """ Update an existing identity provider. @@ -1585,11 +2639,10 @@ class KeycloakAPI(object): """ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mapper['id']) try: - return open_url(mapper_url, method='PUT', headers=self.restheaders, - data=json.dumps(mapper), validate_certs=self.validate_certs) + return self._request(mapper_url, method='PUT', data=json.dumps(mapper)) except Exception as e: - self.module.fail_json(msg='Could not update mapper %s for identity provider %s in realm %s: %s' - % (mapper['id'], alias, realm, str(e))) + self.fail_request(e, msg='Could not update mapper %s for identity provider %s in realm %s: %s' + % (mapper['id'], alias, realm, str(e))) def delete_identity_provider_mapper(self, mid, alias, realm='master'): """ Delete an identity provider. @@ -1599,11 +2652,10 @@ class KeycloakAPI(object): """ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) try: - return open_url(mapper_url, method='DELETE', headers=self.restheaders, - validate_certs=self.validate_certs) + return self._request(mapper_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete mapper %s for identity provider %s in realm %s: %s' - % (mid, alias, realm, str(e))) + self.fail_request(e, msg='Unable to delete mapper %s for identity provider %s in realm %s: %s' + % (mid, alias, realm, str(e))) def get_components(self, filter=None, realm='master'): """ Fetch representations for components in a realm @@ -1616,14 +2668,13 @@ class KeycloakAPI(object): comps_url += '?%s' % filter try: - return json.loads(to_native(open_url(comps_url, method='GET', headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(comps_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of components for realm %s: %s' % (realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain list of components for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of components for realm %s: %s' + % (realm, str(e))) def get_component(self, cid, realm='master'): """ Fetch component representation from a realm using its cid. @@ -1633,14 +2684,13 @@ class KeycloakAPI(object): """ comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(comp_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(comp_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch component %s in realm %s: %s' - % (cid, realm, str(e))) + self.fail_request(e, msg='Could not fetch component %s in realm %s: %s' + % (cid, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch component %s in realm %s: %s' % (cid, realm, str(e))) @@ -1653,17 +2703,15 @@ class KeycloakAPI(object): """ comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm) try: - resp = open_url(comps_url, method='POST', headers=self.restheaders, - data=json.dumps(comprep), validate_certs=self.validate_certs) + resp = self._request(comps_url, method='POST', data=json.dumps(comprep)) comp_url = resp.getheader('Location') if comp_url is None: self.module.fail_json(msg='Could not create component in realm %s: %s' % (realm, 'unexpected response')) - return json.loads(to_native(open_url(comp_url, method="GET", headers=self.restheaders, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(comp_url, method="GET") except Exception as e: - self.module.fail_json(msg='Could not create component in realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not create component in realm %s: %s' + % (realm, str(e))) def update_component(self, comprep, realm='master'): """ Update an existing component. @@ -1676,11 +2724,10 @@ class KeycloakAPI(object): self.module.fail_json(msg='Cannot update component without id') comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) try: - return open_url(comp_url, method='PUT', headers=self.restheaders, - data=json.dumps(comprep), validate_certs=self.validate_certs) + return self._request(comp_url, method='PUT', data=json.dumps(comprep)) except Exception as e: - self.module.fail_json(msg='Could not update component %s in realm %s: %s' - % (cid, realm, str(e))) + self.fail_request(e, msg='Could not update component %s in realm %s: %s' + % (cid, realm, str(e))) def delete_component(self, cid, realm='master'): """ Delete an component. @@ -1689,8 +2736,505 @@ class KeycloakAPI(object): """ comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) try: - return open_url(comp_url, method='DELETE', headers=self.restheaders, - validate_certs=self.validate_certs) + return self._request(comp_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete component %s in realm %s: %s' - % (cid, realm, str(e))) + self.fail_request(e, msg='Unable to delete component %s in realm %s: %s' + % (cid, realm, str(e))) + + def get_authz_authorization_scope_by_name(self, name, client_id, realm): + url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm) + search_url = "%s/search?name=%s" % (url, quote(name, safe='')) + + try: + return self._request_and_deserialize(search_url, method='GET') + except Exception: + return False + + def create_authz_authorization_scope(self, payload, client_id, realm): + """Create an authorization scope for a Keycloak client""" + url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm) + + try: + return self._request(url, method='POST', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg='Could not create authorization scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + + def update_authz_authorization_scope(self, payload, id, client_id, realm): + """Update an authorization scope for a Keycloak client""" + url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) + + try: + return self._request(url, method='PUT', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg='Could not create update scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + + def remove_authz_authorization_scope(self, id, client_id, realm): + """Remove an authorization scope from a Keycloak client""" + url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) + + try: + return self._request(url, method='DELETE') + except Exception as e: + self.fail_request(e, msg='Could not delete scope %s for client %s in realm %s: %s' % (id, client_id, realm, str(e))) + + def get_user_by_id(self, user_id, realm='master'): + """ + Get a User by its ID. + :param user_id: ID of the user. + :param realm: Realm + :return: Representation of the user. + """ + try: + user_url = URL_USER.format( + url=self.baseurl, + realm=realm, + id=user_id) + userrep = json.load( + self._request( + user_url, + method='GET')) + return userrep + except Exception as e: + self.fail_request(e, msg='Could not get user %s in realm %s: %s' + % (user_id, realm, str(e))) + + def create_user(self, userrep, realm='master'): + """ + Create a new User. + :param userrep: Representation of the user to create + :param realm: Realm + :return: Representation of the user created. + """ + try: + if 'attributes' in userrep and isinstance(userrep['attributes'], list): + attributes = copy.deepcopy(userrep['attributes']) + userrep['attributes'] = self.convert_user_attributes_to_keycloak_dict(attributes=attributes) + users_url = URL_USERS.format( + url=self.baseurl, + realm=realm) + self._request(users_url, + method='POST', + data=json.dumps(userrep)) + created_user = self.get_user_by_username( + username=userrep['username'], + realm=realm) + return created_user + except Exception as e: + self.fail_request(e, msg='Could not create user %s in realm %s: %s' + % (userrep['username'], realm, str(e))) + + def convert_user_attributes_to_keycloak_dict(self, attributes): + keycloak_user_attributes_dict = {} + for attribute in attributes: + if ('state' not in attribute or attribute['state'] == 'present') and 'name' in attribute: + keycloak_user_attributes_dict[attribute['name']] = attribute['values'] if 'values' in attribute else [] + return keycloak_user_attributes_dict + + def convert_keycloak_user_attributes_dict_to_module_list(self, attributes): + module_attributes_list = [] + for key in attributes: + attr = {} + attr['name'] = key + attr['values'] = attributes[key] + module_attributes_list.append(attr) + return module_attributes_list + + def update_user(self, userrep, realm='master'): + """ + Update a User. + :param userrep: Representation of the user to update. This representation must include the ID of the user. + :param realm: Realm + :return: Representation of the updated user. + """ + try: + if 'attributes' in userrep and isinstance(userrep['attributes'], list): + attributes = copy.deepcopy(userrep['attributes']) + userrep['attributes'] = self.convert_user_attributes_to_keycloak_dict(attributes=attributes) + user_url = URL_USER.format( + url=self.baseurl, + realm=realm, + id=userrep["id"]) + self._request( + user_url, + method='PUT', + data=json.dumps(userrep)) + updated_user = self.get_user_by_id( + user_id=userrep['id'], + realm=realm) + return updated_user + except Exception as e: + self.fail_request(e, msg='Could not update user %s in realm %s: %s' + % (userrep['username'], realm, str(e))) + + def delete_user(self, user_id, realm='master'): + """ + Delete a User. + :param user_id: ID of the user to be deleted + :param realm: Realm + :return: HTTP response. + """ + try: + user_url = URL_USER.format( + url=self.baseurl, + realm=realm, + id=user_id) + return self._request( + user_url, + method='DELETE') + except Exception as e: + self.fail_request(e, msg='Could not delete user %s in realm %s: %s' + % (user_id, realm, str(e))) + + def get_user_groups(self, user_id, realm='master'): + """ + Get the group names for a user. + :param user_id: User ID + :param realm: Realm + :return: The client group names as a list of strings. + """ + user_groups = self.get_user_group_details(user_id, realm) + return [user_group['name'] for user_group in user_groups if 'name' in user_group] + + def get_user_group_details(self, user_id, realm='master'): + """ + Get the group details for a user. + :param user_id: User ID + :param realm: Realm + :return: The client group details as a list of dictionaries. + """ + try: + user_groups_url = URL_USER_GROUPS.format(url=self.baseurl, realm=realm, id=user_id) + return self._request_and_deserialize(user_groups_url, method='GET') + except Exception as e: + self.fail_request(e, msg='Could not get groups for user %s in realm %s: %s' + % (user_id, realm, str(e))) + + def add_user_in_group(self, user_id, group_id, realm='master'): + """DEPRECATED: Call add_user_to_group(...) instead. This method is scheduled for removal in community.general 13.0.0.""" + return self.add_user_to_group(user_id, group_id, realm) + + def add_user_to_group(self, user_id, group_id, realm='master'): + """ + Add a user to a group. + :param user_id: User ID + :param group_id: Group Id to add the user to. + :param realm: Realm + :return: HTTP Response + """ + try: + user_group_url = URL_USER_GROUP.format( + url=self.baseurl, + realm=realm, + id=user_id, + group_id=group_id) + return self._request( + user_group_url, + method='PUT') + except Exception as e: + self.fail_request(e, msg='Could not add user %s to group %s in realm %s: %s' + % (user_id, group_id, realm, str(e))) + + def remove_user_from_group(self, user_id, group_id, realm='master'): + """ + Remove a user from a group for a user. + :param user_id: User ID + :param group_id: Group Id to add the user to. + :param realm: Realm + :return: HTTP response + """ + try: + user_group_url = URL_USER_GROUP.format( + url=self.baseurl, + realm=realm, + id=user_id, + group_id=group_id) + return self._request( + user_group_url, + method='DELETE') + except Exception as e: + self.fail_request(e, msg='Could not remove user %s from group %s in realm %s: %s' + % (user_id, group_id, realm, str(e))) + + def update_user_groups_membership(self, userrep, groups, realm='master'): + """ + Update user's group membership + :param userrep: Representation of the user. This representation must include the ID. + :param realm: Realm + :return: True if group membership has been changed. False Otherwise. + """ + try: + groups_to_add, groups_to_remove = self.extract_groups_to_add_to_and_remove_from_user(groups) + if not groups_to_add and not groups_to_remove: + return False + + user_groups = self.get_user_group_details(user_id=userrep['id'], realm=realm) + user_group_names = [user_group['name'] for user_group in user_groups if 'name' in user_group] + user_group_paths = [user_group['path'] for user_group in user_groups if 'path' in user_group] + + groups_to_add = [group_to_add for group_to_add in groups_to_add + if group_to_add not in user_group_names and group_to_add not in user_group_paths] + groups_to_remove = [group_to_remove for group_to_remove in groups_to_remove + if group_to_remove in user_group_names or group_to_remove in user_group_paths] + if not groups_to_add and not groups_to_remove: + return False + + for group_to_add in groups_to_add: + realm_group = self.find_group_by_path(group_to_add, realm=realm) + if realm_group: + self.add_user_to_group(user_id=userrep['id'], group_id=realm_group['id'], realm=realm) + + for group_to_remove in groups_to_remove: + realm_group = self.find_group_by_path(group_to_remove, realm=realm) + if realm_group: + self.remove_user_from_group(user_id=userrep['id'], group_id=realm_group['id'], realm=realm) + + return True + except Exception as e: + self.module.fail_json(msg='Could not update group membership for user %s in realm %s: %s' + % (userrep['username'], realm, e)) + + def extract_groups_to_add_to_and_remove_from_user(self, groups): + groups_to_add = [] + groups_to_remove = [] + if isinstance(groups, list): + for group in groups: + group_name = group['name'] if isinstance(group, dict) and 'name' in group else group + if isinstance(group, dict): + if 'state' not in group or group['state'] == 'present': + groups_to_add.append(group_name) + else: + groups_to_remove.append(group_name) + return groups_to_add, groups_to_remove + + def find_group_by_path(self, target, realm='master'): + """ + Finds a realm group by path, e.g. '/my/group'. + The path is formed by prepending a '/' character to `target` unless it's already present. + This adds support for finding top level groups by name and subgroups by path. + """ + groups = self.get_groups(realm=realm) + path = target if target.startswith('/') else '/' + target + for segment in path.split('/'): + if not segment: + continue + abort = True + for group in groups: + if group['path'] == path: + return self.get_group_by_groupid(group['id'], realm=realm) + if group['name'] == segment: + groups = self.get_subgroups(group, realm=realm) + abort = False + break + if abort: + break + return None + + def convert_user_group_list_of_str_to_list_of_dict(self, groups): + list_of_groups = [] + if isinstance(groups, list) and len(groups) > 0: + for group in groups: + if isinstance(group, str): + group_dict = {} + group_dict['name'] = group + list_of_groups.append(group_dict) + return list_of_groups + + def create_authz_custom_policy(self, policy_type, payload, client_id, realm): + """Create a custom policy for a Keycloak client""" + url = URL_AUTHZ_CUSTOM_POLICY.format(url=self.baseurl, policy_type=policy_type, client_id=client_id, realm=realm) + + try: + return self._request(url, method='POST', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + + def remove_authz_custom_policy(self, policy_id, client_id, realm): + """Remove a custom policy from a Keycloak client""" + url = URL_AUTHZ_CUSTOM_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm) + delete_url = "%s/%s" % (url, policy_id) + + try: + return self._request(delete_url, method='DELETE') + except Exception as e: + self.fail_request(e, msg='Could not delete custom policy %s for client %s in realm %s: %s' % (id, client_id, realm, str(e))) + + def get_authz_permission_by_name(self, name, client_id, realm): + """Get authorization permission by name""" + url = URL_AUTHZ_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm) + search_url = "%s/search?name=%s" % (url, name.replace(' ', '%20')) + + try: + return self._request_and_deserialize(search_url, method='GET') + except Exception: + return False + + def create_authz_permission(self, payload, permission_type, client_id, realm): + """Create an authorization permission for a Keycloak client""" + url = URL_AUTHZ_PERMISSIONS.format(url=self.baseurl, permission_type=permission_type, client_id=client_id, realm=realm) + + try: + return self._request(url, method='POST', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + + def remove_authz_permission(self, id, client_id, realm): + """Create an authorization permission for a Keycloak client""" + url = URL_AUTHZ_POLICY.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) + + try: + return self._request(url, method='DELETE') + except Exception as e: + self.fail_request(e, msg='Could not delete permission %s for client %s in realm %s: %s' % (id, client_id, realm, str(e))) + + def update_authz_permission(self, payload, permission_type, id, client_id, realm): + """Update a permission for a Keycloak client""" + url = URL_AUTHZ_PERMISSION.format(url=self.baseurl, permission_type=permission_type, id=id, client_id=client_id, realm=realm) + + try: + return self._request(url, method='PUT', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg='Could not create update permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + + def get_authz_resource_by_name(self, name, client_id, realm): + """Get authorization resource by name""" + url = URL_AUTHZ_RESOURCES.format(url=self.baseurl, client_id=client_id, realm=realm) + search_url = "%s/search?name=%s" % (url, name.replace(' ', '%20')) + + try: + return self._request_and_deserialize(search_url, method='GET') + except Exception: + return False + + def get_authz_policy_by_name(self, name, client_id, realm): + """Get authorization policy by name""" + url = URL_AUTHZ_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm) + search_url = "%s/search?name=%s&permission=false" % (url, name.replace(' ', '%20')) + + try: + return self._request_and_deserialize(search_url, method='GET') + except Exception: + return False + + def get_client_role_scope_from_client(self, clientid, clientscopeid, realm="master"): + """ Fetch the roles associated with the client's scope for a specific client on the Keycloak server. + :param clientid: ID of the client from which to obtain the associated roles. + :param clientscopeid: ID of the client who owns the roles. + :param realm: Realm from which to obtain the scope. + :return: The client scope of roles from specified client. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + try: + return self._request_and_deserialize(client_role_scope_url, method='GET') + except Exception as e: + self.fail_request(e, msg='Could not fetch roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + def update_client_role_scope_from_client(self, payload, clientid, clientscopeid, realm="master"): + """ Update and fetch the roles associated with the client's scope on the Keycloak server. + :param payload: List of roles to be added to the scope. + :param clientid: ID of the client to update scope. + :param clientscopeid: ID of the client who owns the roles. + :param realm: Realm from which to obtain the clients. + :return: The client scope of roles from specified client. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + try: + self._request(client_role_scope_url, method='POST', data=json.dumps(payload)) + + except Exception as e: + self.fail_request(e, msg='Could not update roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + return self.get_client_role_scope_from_client(clientid, clientscopeid, realm) + + def delete_client_role_scope_from_client(self, payload, clientid, clientscopeid, realm="master"): + """ Delete the roles contains in the payload from the client's scope on the Keycloak server. + :param payload: List of roles to be deleted. + :param clientid: ID of the client to delete roles from scope. + :param clientscopeid: ID of the client who owns the roles. + :param realm: Realm from which to obtain the clients. + :return: The client scope of roles from specified client. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + try: + self._request(client_role_scope_url, method='DELETE', data=json.dumps(payload)) + + except Exception as e: + self.fail_request(e, msg='Could not delete roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + return self.get_client_role_scope_from_client(clientid, clientscopeid, realm) + + def get_client_role_scope_from_realm(self, clientid, realm="master"): + """ Fetch the realm roles from the client's scope on the Keycloak server. + :param clientid: ID of the client from which to obtain the associated realm roles. + :param realm: Realm from which to obtain the clients. + :return: The client realm roles scope. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) + try: + return self._request_and_deserialize(client_role_scope_url, method='GET') + except Exception as e: + self.fail_request(e, msg='Could not fetch roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + def update_client_role_scope_from_realm(self, payload, clientid, realm="master"): + """ Update and fetch the realm roles from the client's scope on the Keycloak server. + :param payload: List of realm roles to add. + :param clientid: ID of the client to update scope. + :param realm: Realm from which to obtain the clients. + :return: The client realm roles scope. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) + try: + self._request(client_role_scope_url, method='POST', data=json.dumps(payload)) + + except Exception as e: + self.fail_request(e, msg='Could not update roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + return self.get_client_role_scope_from_realm(clientid, realm) + + def delete_client_role_scope_from_realm(self, payload, clientid, realm="master"): + """ Delete the realm roles contains in the payload from the client's scope on the Keycloak server. + :param payload: List of realm roles to delete. + :param clientid: ID of the client to delete roles from scope. + :param realm: Realm from which to obtain the clients. + :return: The client realm roles scope. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) + try: + self._request(client_role_scope_url, method='DELETE', data=json.dumps(payload)) + + except Exception as e: + self.fail_request(e, msg='Could not delete roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + return self.get_client_role_scope_from_realm(clientid, realm) + + def fail_request(self, e, msg, **kwargs): + """ Triggers a module failure. This should be called + when an exception occurs during/after a request. + Attempts to parse the exception e as an HTTP error + and append it to msg. + + :param e: exception which triggered the failure + :param msg: error message to display to the user + :param kwargs: additional arguments to pass to module.fail_json + :return: None + """ + try: + if isinstance(e, HTTPError): + msg = "%s: %s" % (msg, to_native(e.read())) + except Exception: + pass + self.module.fail_json(msg, **kwargs) + + def fail_open_url(self, e, msg, **kwargs): + """ DEPRECATED: Use fail_request instead. + + Triggers a module failure. This should be called + when an exception occurs during/after a request. + Attempts to parse the exception e as an HTTP error + and append it to msg. + + :param e: exception which triggered the failure + :param msg: error message to display to the user + :param kwargs: additional arguments to pass to module.fail_json + :return: None + """ + return self.fail_request(e, msg, **kwargs) diff --git a/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py b/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py new file mode 100644 index 0000000000..2118e8f6e2 --- /dev/null +++ b/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +# Copyright (c) 2022, John Cant +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import \ + keycloak_argument_spec + + +def keycloak_clientsecret_module(): + """ + Returns an AnsibleModule definition for modules that interact with a client + secret. + + :return: argument_spec dict + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + realm=dict(default='master'), + id=dict(type='str'), + client_id=dict(type='str', aliases=['clientId']), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'client_id'], + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + mutually_exclusive=[ + ['token', 'auth_realm'], + ['token', 'auth_username'], + ['token', 'auth_password'] + ]) + + return module + + +def keycloak_clientsecret_module_resolve_params(module, kc): + """ + Given an AnsibleModule definition for keycloak_clientsecret_*, and a + KeycloakAPI client, resolve the params needed to interact with the Keycloak + client secret, looking up the client by clientId if necessary via an API + call. + + :return: tuple of id, realm + """ + + realm = module.params.get('realm') + id = module.params.get('id') + client_id = module.params.get('client_id') + + # only lookup the client_id if id isn't provided. + # in the case that both are provided, prefer the ID, since it is one + # less lookup. + if id is None: + # Due to the required_one_of spec, client_id is guaranteed to not be None + client = kc.get_client_by_clientid(client_id, realm=realm) + + if client is None: + module.fail_json( + msg=f'Client does not exist {client_id}' + ) + + id = client['id'] + + return id, realm diff --git a/plugins/module_utils/ilo_redfish_utils.py b/plugins/module_utils/ilo_redfish_utils.py new file mode 100644 index 0000000000..fd5b7fe64d --- /dev/null +++ b/plugins/module_utils/ilo_redfish_utils.py @@ -0,0 +1,305 @@ + +# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +import time + + +class iLORedfishUtils(RedfishUtils): + + def get_ilo_sessions(self): + result = {} + # listing all users has always been slower than other operations, why? + session_list = [] + sessions_results = [] + # Get these entries, but does not fail if not found + properties = ['Description', 'Id', 'Name', 'UserName'] + + # Changed self.sessions_uri to Hardcoded string. + response = self.get_request(f"{self.root_uri}{self.service_root}SessionService/Sessions/") + if not response['ret']: + return response + result['ret'] = True + data = response['data'] + + current_session = None + if 'Oem' in data: + if data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]: + current_session = data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"] + + for sessions in data['Members']: + # session_list[] are URIs + session_list.append(sessions['@odata.id']) + # for each session, get details + for uri in session_list: + session = {} + if uri != current_session: + response = self.get_request(self.root_uri + uri) + if not response['ret']: + return response + data = response['data'] + for property in properties: + if property in data: + session[property] = data[property] + sessions_results.append(session) + result["msg"] = sessions_results + result["ret"] = True + return result + + def set_ntp_server(self, mgr_attributes): + result = {} + setkey = mgr_attributes['mgr_attr_name'] + + nic_info = self.get_manager_ethernet_uri() + ethuri = nic_info["nic_addr"] + + response = self.get_request(self.root_uri + ethuri) + if not response['ret']: + return response + result['ret'] = True + data = response['data'] + payload = {"DHCPv4": { + "UseNTPServers": "" + }} + + if data["DHCPv4"]["UseNTPServers"]: + payload["DHCPv4"]["UseNTPServers"] = False + res_dhv4 = self.patch_request(self.root_uri + ethuri, payload) + if not res_dhv4['ret']: + return res_dhv4 + + payload = {"DHCPv6": { + "UseNTPServers": "" + }} + + if data["DHCPv6"]["UseNTPServers"]: + payload["DHCPv6"]["UseNTPServers"] = False + res_dhv6 = self.patch_request(self.root_uri + ethuri, payload) + if not res_dhv6['ret']: + return res_dhv6 + + datetime_uri = f"{self.manager_uri}DateTime" + + listofips = mgr_attributes['mgr_attr_value'].split(" ") + if len(listofips) > 2: + return {'ret': False, 'changed': False, 'msg': "More than 2 NTP Servers mentioned"} + + ntp_list = [] + for ips in listofips: + ntp_list.append(ips) + + while len(ntp_list) < 2: + ntp_list.append("0.0.0.0") + + payload = {setkey: ntp_list} + + response1 = self.patch_request(self.root_uri + datetime_uri, payload) + if not response1['ret']: + return response1 + + return {'ret': True, 'changed': True, 'msg': f"Modified {mgr_attributes['mgr_attr_name']}"} + + def set_time_zone(self, attr): + key = attr['mgr_attr_name'] + + uri = f"{self.manager_uri}DateTime/" + response = self.get_request(self.root_uri + uri) + if not response['ret']: + return response + + data = response["data"] + + if key not in data: + return {'ret': False, 'changed': False, 'msg': f"Key {key} not found"} + + timezones = data["TimeZoneList"] + index = "" + for tz in timezones: + if attr['mgr_attr_value'] in tz["Name"]: + index = tz["Index"] + break + + payload = {key: {"Index": index}} + response = self.patch_request(self.root_uri + uri, payload) + if not response['ret']: + return response + + return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"} + + def set_dns_server(self, attr): + key = attr['mgr_attr_name'] + nic_info = self.get_manager_ethernet_uri() + uri = nic_info["nic_addr"] + + listofips = attr['mgr_attr_value'].split(" ") + if len(listofips) > 3: + return {'ret': False, 'changed': False, 'msg': "More than 3 DNS Servers mentioned"} + + dns_list = [] + for ips in listofips: + dns_list.append(ips) + + while len(dns_list) < 3: + dns_list.append("0.0.0.0") + + payload = { + "Oem": { + "Hpe": { + "IPv4": { + key: dns_list + } + } + } + } + + response = self.patch_request(self.root_uri + uri, payload) + if not response['ret']: + return response + + return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"} + + def set_domain_name(self, attr): + key = attr['mgr_attr_name'] + + nic_info = self.get_manager_ethernet_uri() + ethuri = nic_info["nic_addr"] + + response = self.get_request(self.root_uri + ethuri) + if not response['ret']: + return response + + data = response['data'] + + payload = {"DHCPv4": { + "UseDomainName": "" + }} + + if data["DHCPv4"]["UseDomainName"]: + payload["DHCPv4"]["UseDomainName"] = False + res_dhv4 = self.patch_request(self.root_uri + ethuri, payload) + if not res_dhv4['ret']: + return res_dhv4 + + payload = {"DHCPv6": { + "UseDomainName": "" + }} + + if data["DHCPv6"]["UseDomainName"]: + payload["DHCPv6"]["UseDomainName"] = False + res_dhv6 = self.patch_request(self.root_uri + ethuri, payload) + if not res_dhv6['ret']: + return res_dhv6 + + domain_name = attr['mgr_attr_value'] + + payload = {"Oem": { + "Hpe": { + key: domain_name + } + }} + + response = self.patch_request(self.root_uri + ethuri, payload) + if not response['ret']: + return response + return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"} + + def set_wins_registration(self, mgrattr): + Key = mgrattr['mgr_attr_name'] + + nic_info = self.get_manager_ethernet_uri() + ethuri = nic_info["nic_addr"] + + payload = { + "Oem": { + "Hpe": { + "IPv4": { + Key: False + } + } + } + } + + response = self.patch_request(self.root_uri + ethuri, payload) + if not response['ret']: + return response + return {'ret': True, 'changed': True, 'msg': f"Modified {mgrattr['mgr_attr_name']}"} + + def get_server_poststate(self): + # Get server details + response = self.get_request(self.root_uri + self.systems_uri) + if not response["ret"]: + return response + server_data = response["data"] + + if "Hpe" in server_data["Oem"]: + return { + "ret": True, + "server_poststate": server_data["Oem"]["Hpe"]["PostState"] + } + else: + return { + "ret": True, + "server_poststate": server_data["Oem"]["Hp"]["PostState"] + } + + def wait_for_ilo_reboot_completion(self, polling_interval=60, max_polling_time=1800): + # This method checks if OOB controller reboot is completed + time.sleep(10) + + # Check server poststate + state = self.get_server_poststate() + if not state["ret"]: + return state + + count = int(max_polling_time / polling_interval) + times = 0 + + # When server is powered OFF + pcount = 0 + while state["server_poststate"] in ["PowerOff", "Off"] and pcount < 5: + time.sleep(10) + state = self.get_server_poststate() + if not state["ret"]: + return state + + if state["server_poststate"] not in ["PowerOff", "Off"]: + break + pcount = pcount + 1 + if state["server_poststate"] in ["PowerOff", "Off"]: + return { + "ret": False, + "changed": False, + "msg": "Server is powered OFF" + } + + # When server is not rebooting + if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]: + return { + "ret": True, + "changed": False, + "msg": "Server is not rebooting" + } + + while state["server_poststate"] not in ["InPostDiscoveryComplete", "FinishedPost"] and count > times: + state = self.get_server_poststate() + if not state["ret"]: + return state + + if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]: + return { + "ret": True, + "changed": True, + "msg": "Server reboot is completed" + } + time.sleep(polling_interval) + times = times + 1 + + return { + "ret": False, + "changed": False, + "msg": f"Server Reboot has failed, server state: {state} " + } diff --git a/plugins/module_utils/influxdb.py b/plugins/module_utils/influxdb.py index 92c7802335..9eed90cfda 100644 --- a/plugins/module_utils/influxdb.py +++ b/plugins/module_utils/influxdb.py @@ -1,19 +1,19 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2017, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import traceback from ansible.module_utils.basic import missing_required_lib -from distutils.version import LooseVersion + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion REQUESTS_IMP_ERR = None try: - import requests.exceptions + import requests.exceptions # noqa: F401, pylint: disable=unused-import HAS_REQUESTS = True except ImportError: REQUESTS_IMP_ERR = traceback.format_exc() @@ -23,7 +23,7 @@ INFLUXDB_IMP_ERR = None try: from influxdb import InfluxDBClient from influxdb import __version__ as influxdb_version - from influxdb import exceptions + from influxdb import exceptions # noqa: F401, pylint: disable=unused-import HAS_INFLUXDB = True except ImportError: INFLUXDB_IMP_ERR = traceback.format_exc() diff --git a/plugins/module_utils/ipa.py b/plugins/module_utils/ipa.py index 3d8c2580d8..96010d503b 100644 --- a/plugins/module_utils/ipa.py +++ b/plugins/module_utils/ipa.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -7,10 +6,10 @@ # # Copyright (c) 2016 Thomas Krahn (@Nosmoht) # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json import os @@ -19,10 +18,9 @@ import uuid import re from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text -from ansible.module_utils.six import PY3 -from ansible.module_utils.six.moves.urllib.parse import quote from ansible.module_utils.urls import fetch_url, HAS_GSSAPI from ansible.module_utils.basic import env_fallback, AnsibleFallbackNotFound +from urllib.parse import quote def _env_then_dns_fallback(*args, **kwargs): @@ -53,16 +51,16 @@ class IPAClient(object): self.use_gssapi = False def get_base_url(self): - return '%s://%s/ipa' % (self.protocol, self.host) + return f'{self.protocol}://{self.host}/ipa' def get_json_url(self): - return '%s/session/json' % self.get_base_url() + return f'{self.get_base_url()}/session/json' def login(self, username, password): if 'KRB5CCNAME' in os.environ and HAS_GSSAPI: self.use_gssapi = True elif 'KRB5_CLIENT_KTNAME' in os.environ and HAS_GSSAPI: - ccache = "MEMORY:" + str(uuid.uuid4()) + ccache = f"MEMORY:{uuid.uuid4()!s}" os.environ['KRB5CCNAME'] = ccache self.use_gssapi = True else: @@ -73,8 +71,8 @@ class IPAClient(object): 'GSSAPI. To use GSSAPI, please set the ' 'KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) ' ' environment variables.') - url = '%s/session/login_password' % self.get_base_url() - data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe='')) + url = f'{self.get_base_url()}/session/login_password' + data = f"user={quote(username, safe='')}&password={quote(password, safe='')}" headers = {'referer': self.get_base_url(), 'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain'} @@ -99,11 +97,11 @@ class IPAClient(object): err_string = e.get('message') else: err_string = e - self.module.fail_json(msg='%s: %s' % (msg, err_string)) + self.module.fail_json(msg=f'{msg}: {err_string}') def get_ipa_version(self): response = self.ping()['summary'] - ipa_ver_regex = re.compile(r'IPA server version (\d\.\d\.\d).*') + ipa_ver_regex = re.compile(r'IPA server version (\d+\.\d+\.\d+).*') version_match = ipa_ver_regex.match(response) ipa_version = None if version_match: @@ -116,7 +114,7 @@ class IPAClient(object): def _post_json(self, method, name, item=None): if item is None: item = {} - url = '%s/session/json' % self.get_base_url() + url = f'{self.get_base_url()}/session/json' data = dict(method=method) # TODO: We should probably handle this a little better. @@ -134,20 +132,13 @@ class IPAClient(object): if status_code not in [200, 201, 204]: self._fail(method, info['msg']) except Exception as e: - self._fail('post %s' % method, to_native(e)) + self._fail(f'post {method}', to_native(e)) - if PY3: - charset = resp.headers.get_content_charset('latin-1') - else: - response_charset = resp.headers.getparam('charset') - if response_charset: - charset = response_charset - else: - charset = 'latin-1' + charset = resp.headers.get_content_charset('latin-1') resp = json.loads(to_text(resp.read(), encoding=charset)) err = resp.get('error') if err is not None: - self._fail('response %s' % method, err) + self._fail(f'response {method}', err) if 'result' in resp: result = resp.get('result') diff --git a/plugins/module_utils/jenkins.py b/plugins/module_utils/jenkins.py new file mode 100644 index 0000000000..26334f89b8 --- /dev/null +++ b/plugins/module_utils/jenkins.py @@ -0,0 +1,33 @@ + +# Copyright (c) 2022, Alexei Znamensky +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +import os +import time + + +def download_updates_file(updates_expiration): + updates_filename = 'jenkins-plugin-cache.json' + updates_dir = os.path.expanduser('~/.ansible/tmp') + updates_file = os.path.join(updates_dir, updates_filename) + download_updates = True + + # Make sure the destination directory exists + if not os.path.isdir(updates_dir): + os.makedirs(updates_dir, 0o700) + + # Check if we need to download new updates file + if os.path.isfile(updates_file): + # Get timestamp when the file was changed last time + ts_file = os.stat(updates_file).st_mtime + ts_now = time.time() + + if ts_now - ts_file < updates_expiration: + download_updates = False + + return updates_file, download_updates diff --git a/plugins/module_utils/known_hosts.py b/plugins/module_utils/known_hosts.py index ea6c95b6e2..ec20b8d88b 100644 --- a/plugins/module_utils/known_hosts.py +++ b/plugins/module_utils/known_hosts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -7,16 +6,16 @@ # # Copyright (c), Michael DeHaan , 2012-2013 # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import hmac import re -from ansible.module_utils.six.moves.urllib.parse import urlparse +from urllib.parse import urlparse try: from hashlib import sha1 @@ -59,17 +58,14 @@ def get_fqdn_and_port(repo_url): elif "://" in repo_url: # this should be something we can parse with urlparse parts = urlparse(repo_url) - # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so - # ensure we actually have a parts[1] before continuing. - if parts[1] != '': - fqdn = parts[1] - if "@" in fqdn: - fqdn = fqdn.split("@", 1)[1] - match = ipv6_re.match(fqdn) - if match: - fqdn, port = match.groups() - elif ":" in fqdn: - fqdn, port = fqdn.split(":")[0:2] + fqdn = parts[1] + if "@" in fqdn: + fqdn = fqdn.split("@", 1)[1] + match = ipv6_re.match(fqdn) + if match: + fqdn, port = match.groups() + elif ":" in fqdn: + fqdn, port = fqdn.split(":")[0:2] return fqdn, port @@ -102,13 +98,11 @@ def not_in_host_file(self, host): continue try: - host_fh = open(hf) + with open(hf) as host_fh: + data = host_fh.read() except IOError: hfiles_not_found += 1 continue - else: - data = host_fh.read() - host_fh.close() for line in data.split("\n"): if line is None or " " not in line: @@ -152,28 +146,28 @@ def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False): try: os.makedirs(user_ssh_dir, int('700', 8)) except Exception: - module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir) + module.fail_json(msg=f"failed to create host key directory: {user_ssh_dir}") else: - module.fail_json(msg="%s does not exist" % user_ssh_dir) + module.fail_json(msg=f"{user_ssh_dir} does not exist") elif not os.path.isdir(user_ssh_dir): - module.fail_json(msg="%s is not a directory" % user_ssh_dir) + module.fail_json(msg=f"{user_ssh_dir} is not a directory") if port: - this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn) + this_cmd = f"{keyscan_cmd} -t {key_type} -p {port} {fqdn}" else: - this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn) + this_cmd = f"{keyscan_cmd} -t {key_type} {fqdn}" rc, out, err = module.run_command(this_cmd) # ssh-keyscan gives a 0 exit code and prints nothing on timeout if rc != 0 or not out: msg = 'failed to retrieve hostkey' if not out: - msg += '. "%s" returned no matches.' % this_cmd + msg += f'. "{this_cmd}" returned no matches.' else: - msg += ' using command "%s". [stdout]: %s' % (this_cmd, out) + msg += f' using command "{this_cmd}". [stdout]: {out}' if err: - msg += ' [stderr]: %s' % err + msg += f' [stderr]: {err}' module.fail_json(msg=msg) diff --git a/plugins/module_utils/ldap.py b/plugins/module_utils/ldap.py index 30dbaf7640..e0ee5940e2 100644 --- a/plugins/module_utils/ldap.py +++ b/plugins/module_utils/ldap.py @@ -1,19 +1,21 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Peter Sagerson -# Copyright: (c) 2016, Jiri Tyr -# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) +# Copyright (c) 2016, Peter Sagerson +# Copyright (c) 2016, Jiri Tyr +# Copyright (c) 2017-2018 Keller Fuchs (@KellerFuchs) # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations +import re import traceback from ansible.module_utils.common.text.converters import to_native try: import ldap + import ldap.dn + import ldap.filter import ldap.sasl HAS_LDAP = True @@ -30,33 +32,50 @@ def gen_specs(**specs): specs.update({ 'bind_dn': dict(), 'bind_pw': dict(default='', no_log=True), + 'ca_path': dict(type='path'), 'dn': dict(required=True), 'referrals_chasing': dict(type='str', default='anonymous', choices=['disabled', 'anonymous']), 'server_uri': dict(default='ldapi:///'), 'start_tls': dict(default=False, type='bool'), 'validate_certs': dict(default=True, type='bool'), 'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'), + 'xorder_discovery': dict(choices=['enable', 'auto', 'disable'], default='auto', type='str'), + 'client_cert': dict(default=None, type='path'), + 'client_key': dict(default=None, type='path'), }) return specs +def ldap_required_together(): + return [['client_cert', 'client_key']] + + class LdapGeneric(object): def __init__(self, module): # Shortcuts self.module = module self.bind_dn = self.module.params['bind_dn'] self.bind_pw = self.module.params['bind_pw'] - self.dn = self.module.params['dn'] + self.ca_path = self.module.params['ca_path'] self.referrals_chasing = self.module.params['referrals_chasing'] self.server_uri = self.module.params['server_uri'] self.start_tls = self.module.params['start_tls'] self.verify_cert = self.module.params['validate_certs'] self.sasl_class = self.module.params['sasl_class'] + self.xorder_discovery = self.module.params['xorder_discovery'] + self.client_cert = self.module.params['client_cert'] + self.client_key = self.module.params['client_key'] # Establish connection self.connection = self._connect_to_ldap() + if self.xorder_discovery == "enable" or (self.xorder_discovery == "auto" and not self._xorder_dn()): + # Try to find the X_ORDERed version of the DN + self.dn = self._find_dn() + else: + self.dn = self.module.params['dn'] + def fail(self, msg, exn): self.module.fail_json( msg=msg, @@ -64,10 +83,35 @@ class LdapGeneric(object): exception=traceback.format_exc() ) + def _find_dn(self): + dn = self.module.params['dn'] + + explode_dn = ldap.dn.explode_dn(dn) + + if len(explode_dn) > 1: + try: + escaped_value = ldap.filter.escape_filter_chars(explode_dn[0]) + filterstr = f"({escaped_value})" + dns = self.connection.search_s(','.join(explode_dn[1:]), + ldap.SCOPE_ONELEVEL, filterstr) + if len(dns) == 1: + dn, dummy = dns[0] + except Exception: + pass + + return dn + def _connect_to_ldap(self): if not self.verify_cert: ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) + if self.ca_path: + ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, self.ca_path) + + if self.client_cert and self.client_key: + ldap.set_option(ldap.OPT_X_TLS_CERTFILE, self.client_cert) + ldap.set_option(ldap.OPT_X_TLS_KEYFILE, self.client_key) + connection = ldap.initialize(self.server_uri) if self.referrals_chasing == 'disabled': @@ -90,3 +134,10 @@ class LdapGeneric(object): self.fail("Cannot bind to the server.", e) return connection + + def _xorder_dn(self): + # match X_ORDERed DNs + regex = r".+\{\d+\}.+" + explode_dn = ldap.dn.explode_dn(self.module.params['dn']) + + return re.match(regex, explode_dn[0]) is not None diff --git a/plugins/module_utils/linode.py b/plugins/module_utils/linode.py index 9d7c37e68d..3700082bd8 100644 --- a/plugins/module_utils/linode.py +++ b/plugins/module_utils/linode.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -7,16 +6,14 @@ # # Copyright (c), Luke Murphy @decentral1se # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +from ansible.module_utils.ansible_release import __version__ as ansible_version def get_user_agent(module): """Retrieve a user-agent to send with LinodeClient requests.""" - try: - from ansible.module_utils.ansible_release import __version__ as ansible_version - except ImportError: - ansible_version = 'unknown' - return 'Ansible-%s/%s' % (module, ansible_version) + return f'Ansible-{module}/{ansible_version}' diff --git a/plugins/module_utils/locale_gen.py b/plugins/module_utils/locale_gen.py new file mode 100644 index 0000000000..b8a48d320b --- /dev/null +++ b/plugins/module_utils/locale_gen.py @@ -0,0 +1,29 @@ +# Copyright (c) 2023, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def locale_runner(module): + runner = CmdRunner( + module, + command=["locale", "-a"], + check_rc=True, + ) + return runner + + +def locale_gen_runner(module): + runner = CmdRunner( + module, + command="locale-gen", + arg_formats=dict( + name=cmd_runner_fmt.as_list(), + purge=cmd_runner_fmt.as_fixed('--purge'), + ), + check_rc=True, + ) + return runner diff --git a/plugins/module_utils/lxd.py b/plugins/module_utils/lxd.py index e393090799..cc8e05c0f0 100644 --- a/plugins/module_utils/lxd.py +++ b/plugins/module_utils/lxd.py @@ -1,33 +1,25 @@ -# -*- coding: utf-8 -*- -# (c) 2016, Hiroaki Nakamura -# -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2016, Hiroaki Nakamura +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations +import http.client as http_client +import os import socket import ssl +import json +from urllib.parse import urlparse from ansible.module_utils.urls import generic_urlparse -from ansible.module_utils.six.moves.urllib.parse import urlparse -from ansible.module_utils.six.moves import http_client from ansible.module_utils.common.text.converters import to_text # httplib/http.client connection using unix domain socket HTTPConnection = http_client.HTTPConnection HTTPSConnection = http_client.HTTPSConnection -import json - class UnixHTTPConnection(HTTPConnection): def __init__(self, path): @@ -47,7 +39,7 @@ class LXDClientException(Exception): class LXDClient(object): - def __init__(self, url, key_file=None, cert_file=None, debug=False): + def __init__(self, url, key_file=None, cert_file=None, debug=False, server_cert_file=None, server_check_hostname=True): """LXD Client. :param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1) @@ -58,6 +50,10 @@ class LXDClient(object): :type cert_file: ``str`` :param debug: The debug flag. The request and response are stored in logs when debug is true. :type debug: ``bool`` + :param server_cert_file: The path of the server certificate file. + :type server_cert_file: ``str`` + :param server_check_hostname: Whether to check the server's hostname as part of TLS verification. + :type debug: ``bool`` """ self.url = url self.debug = debug @@ -66,7 +62,11 @@ class LXDClient(object): self.cert_file = cert_file self.key_file = key_file parts = generic_urlparse(urlparse(self.url)) - ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) + ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) + if server_cert_file: + # Check that the received cert is signed by the provided server_cert_file + ctx.load_verify_locations(cafile=server_cert_file) + ctx.check_hostname = server_check_hostname ctx.load_cert_chain(cert_file, keyfile=key_file) self.connection = HTTPSConnection(parts.get('netloc'), context=ctx) elif url.startswith('unix:'): @@ -75,11 +75,14 @@ class LXDClient(object): else: raise LXDClientException('URL scheme must be unix: or https:') - def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None): + def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None, wait_for_container=None): resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout) if resp_json['type'] == 'async': - url = '{0}/wait'.format(resp_json['operation']) + url = f"{resp_json['operation']}/wait" resp_json = self._send_request('GET', url) + if wait_for_container: + while resp_json['metadata']['status'] == 'Running': + resp_json = self._send_request('GET', url) if resp_json['metadata']['status'] != 'Success': self._raise_err_from_json(resp_json) return resp_json @@ -127,3 +130,11 @@ class LXDClient(object): if err is None: err = resp_json.get('error', None) return err + + +def default_key_file(): + return os.path.expanduser('~/.config/lxc/client.key') + + +def default_cert_file(): + return os.path.expanduser('~/.config/lxc/client.crt') diff --git a/plugins/module_utils/manageiq.py b/plugins/module_utils/manageiq.py index 98e5590cc6..477fc9a326 100644 --- a/plugins/module_utils/manageiq.py +++ b/plugins/module_utils/manageiq.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2017, Daniel Korn # @@ -8,10 +7,10 @@ # still belong to the author of the module, and may assign their own license # to the complete work. # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os @@ -94,12 +93,12 @@ class ManageIQ(object): ca_bundle_path = params['ca_cert'] self._module = module - self._api_url = url + '/api' + self._api_url = f"{url}/api" self._auth = dict(user=username, password=password, token=token) try: self._client = ManageIQClient(self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path) except Exception as e: - self.module.fail_json(msg="failed to open connection (%s): %s" % (url, str(e))) + self.module.fail_json(msg=f"failed to open connection ({url}): {e}") @property def module(self): @@ -139,7 +138,7 @@ class ManageIQ(object): except ValueError: return None except Exception as e: - self.module.fail_json(msg="failed to find resource {error}".format(error=e)) + self.module.fail_json(msg=f"failed to find resource {e}") return vars(entity) def find_collection_resource_or_fail(self, collection_name, **params): @@ -152,6 +151,290 @@ class ManageIQ(object): if resource: return resource else: - msg = "{collection_name} where {params} does not exist in manageiq".format( - collection_name=collection_name, params=str(params)) + msg = f"{collection_name} where {params!s} does not exist in manageiq" self.module.fail_json(msg=msg) + + def policies(self, resource_id, resource_type, resource_name): + manageiq = ManageIQ(self.module) + + # query resource id, fail if resource does not exist + if resource_id is None: + resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id'] + + return ManageIQPolicies(manageiq, resource_type, resource_id) + + def query_resource_id(self, resource_type, resource_name): + """ Query the resource name in ManageIQ. + + Returns: + the resource ID if it exists in ManageIQ, Fail otherwise. + """ + resource = self.find_collection_resource_by(resource_type, name=resource_name) + if resource: + return resource["id"] + else: + msg = f"{resource_name} {resource_type} does not exist in manageiq" + self.module.fail_json(msg=msg) + + +class ManageIQPolicies(object): + """ + Object to execute policies management operations of manageiq resources. + """ + + def __init__(self, manageiq, resource_type, resource_id): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + self.resource_type = resource_type + self.resource_id = resource_id + self.resource_url = f'{self.api_url}/{resource_type}/{resource_id}' + + def query_profile_href(self, profile): + """ Add or Update the policy_profile href field + + Example: + {name: STR, ...} => {name: STR, href: STR} + """ + resource = self.manageiq.find_collection_resource_or_fail( + "policy_profiles", **profile) + return dict(name=profile['name'], href=resource['href']) + + def query_resource_profiles(self): + """ Returns a set of the profile objects objects assigned to the resource + """ + url = '{resource_url}/policy_profiles?expand=resources' + try: + response = self.client.get(url.format(resource_url=self.resource_url)) + except Exception as e: + msg = f"Failed to query {self.resource_type} policies: {e}" + self.module.fail_json(msg=msg) + + resources = response.get('resources', []) + + # clean the returned rest api profile object to look like: + # {profile_name: STR, profile_description: STR, policies: ARR} + profiles = [self.clean_profile_object(profile) for profile in resources] + + return profiles + + def query_profile_policies(self, profile_id): + """ Returns a set of the policy objects assigned to the resource + """ + url = '{api_url}/policy_profiles/{profile_id}?expand=policies' + try: + response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id)) + except Exception as e: + msg = f"Failed to query {self.resource_type} policies: {e}" + self.module.fail_json(msg=msg) + + resources = response.get('policies', []) + + # clean the returned rest api policy object to look like: + # {name: STR, description: STR, active: BOOL} + policies = [self.clean_policy_object(policy) for policy in resources] + + return policies + + def clean_policy_object(self, policy): + """ Clean a policy object to have human readable form of: + { + name: STR, + description: STR, + active: BOOL + } + """ + name = policy.get('name') + description = policy.get('description') + active = policy.get('active') + + return dict( + name=name, + description=description, + active=active) + + def clean_profile_object(self, profile): + """ Clean a profile object to have human readable form of: + { + profile_name: STR, + profile_description: STR, + policies: ARR + } + """ + profile_id = profile['id'] + name = profile.get('name') + description = profile.get('description') + policies = self.query_profile_policies(profile_id) + + return dict( + profile_name=name, + profile_description=description, + policies=policies) + + def profiles_to_update(self, profiles, action): + """ Create a list of policies we need to update in ManageIQ. + + Returns: + Whether or not a change took place and a message describing the + operation executed. + """ + profiles_to_post = [] + assigned_profiles = self.query_resource_profiles() + + # make a list of assigned full profile names strings + # e.g. ['openscap profile', ...] + assigned_profiles_set = set(profile['profile_name'] for profile in assigned_profiles) + + for profile in profiles: + assigned = profile.get('name') in assigned_profiles_set + + if (action == 'unassign' and assigned) or (action == 'assign' and not assigned): + # add/update the policy profile href field + # {name: STR, ...} => {name: STR, href: STR} + profile = self.query_profile_href(profile) + profiles_to_post.append(profile) + + return profiles_to_post + + def assign_or_unassign_profiles(self, profiles, action): + """ Perform assign/unassign action + """ + # get a list of profiles needed to be changed + profiles_to_post = self.profiles_to_update(profiles, action) + if not profiles_to_post: + return dict( + changed=False, + msg=f"Profiles {profiles} already {action}ed, nothing to do") + + # try to assign or unassign profiles to resource + url = f'{self.resource_url}/policy_profiles' + try: + response = self.client.post(url, action=action, resources=profiles_to_post) + except Exception as e: + msg = f"Failed to {action} profile: {e}" + self.module.fail_json(msg=msg) + + # check all entities in result to be successful + for result in response['results']: + if not result['success']: + msg = f"Failed to {action}: {result['message']}" + self.module.fail_json(msg=msg) + + # successfully changed all needed profiles + return dict( + changed=True, + msg=f"Successfully {action}ed profiles: {profiles}") + + +class ManageIQTags(object): + """ + Object to execute tags management operations of manageiq resources. + """ + + def __init__(self, manageiq, resource_type, resource_id): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + self.resource_type = resource_type + self.resource_id = resource_id + self.resource_url = f'{self.api_url}/{resource_type}/{resource_id}' + + def full_tag_name(self, tag): + """ Returns the full tag name in manageiq + """ + return f"/managed/{tag['category']}/{tag['name']}" + + def clean_tag_object(self, tag): + """ Clean a tag object to have human readable form of: + { + full_name: STR, + name: STR, + display_name: STR, + category: STR + } + """ + full_name = tag.get('name') + categorization = tag.get('categorization', {}) + + return dict( + full_name=full_name, + name=categorization.get('name'), + display_name=categorization.get('display_name'), + category=categorization.get('category', {}).get('name')) + + def query_resource_tags(self): + """ Returns a set of the tag objects assigned to the resource + """ + url = '{resource_url}/tags?expand=resources&attributes=categorization' + try: + response = self.client.get(url.format(resource_url=self.resource_url)) + except Exception as e: + msg = f"Failed to query {self.resource_type} tags: {e}" + self.module.fail_json(msg=msg) + + resources = response.get('resources', []) + + # clean the returned rest api tag object to look like: + # {full_name: STR, name: STR, display_name: STR, category: STR} + tags = [self.clean_tag_object(tag) for tag in resources] + + return tags + + def tags_to_update(self, tags, action): + """ Create a list of tags we need to update in ManageIQ. + + Returns: + Whether or not a change took place and a message describing the + operation executed. + """ + tags_to_post = [] + assigned_tags = self.query_resource_tags() + + # make a list of assigned full tag names strings + # e.g. ['/managed/environment/prod', ...] + assigned_tags_set = set(tag['full_name'] for tag in assigned_tags) + + for tag in tags: + assigned = self.full_tag_name(tag) in assigned_tags_set + + if assigned and action == 'unassign': + tags_to_post.append(tag) + elif (not assigned) and action == 'assign': + tags_to_post.append(tag) + + return tags_to_post + + def assign_or_unassign_tags(self, tags, action): + """ Perform assign/unassign action + """ + # get a list of tags needed to be changed + tags_to_post = self.tags_to_update(tags, action) + if not tags_to_post: + return dict( + changed=False, + msg=f"Tags already {action}ed, nothing to do") + + # try to assign or unassign tags to resource + url = f'{self.resource_url}/tags' + try: + response = self.client.post(url, action=action, resources=tags) + except Exception as e: + msg = f"Failed to {action} tag: {e}" + self.module.fail_json(msg=msg) + + # check all entities in result to be successful + for result in response['results']: + if not result['success']: + msg = f"Failed to {action}: {result['message']}" + self.module.fail_json(msg=msg) + + # successfully changed all needed tags + return dict( + changed=True, + msg=f"Successfully {action}ed tags") diff --git a/plugins/module_utils/memset.py b/plugins/module_utils/memset.py index 7813290a72..cbfbc9108a 100644 --- a/plugins/module_utils/memset.py +++ b/plugins/module_utils/memset.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -7,14 +6,15 @@ # # Copyright (c) 2018, Simon Weald # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import open_url, urllib_error +from urllib.parse import urlencode +from ansible.module_utils.urls import open_url from ansible.module_utils.basic import json +import urllib.error as urllib_error class Response(object): @@ -25,6 +25,7 @@ class Response(object): def __init__(self): self.content = None self.status_code = None + self.stderr = None def json(self): return json.loads(self.content) @@ -54,7 +55,7 @@ def memset_api_call(api_key, api_method, payload=None): data = urlencode(payload) headers = {'Content-Type': 'application/x-www-form-urlencoded'} api_uri_base = 'https://api.memset.com/v1/json/' - api_uri = '{0}{1}/' . format(api_uri_base, api_method) + api_uri = f'{api_uri_base}{api_method}/' try: resp = open_url(api_uri, data=data, headers=headers, method="POST", force_basic_auth=True, url_username=api_key) @@ -71,14 +72,18 @@ def memset_api_call(api_key, api_method, payload=None): response.status_code = errorcode if response.status_code is not None: - msg = "Memset API returned a {0} response ({1}, {2})." . format(response.status_code, response.json()['error_type'], response.json()['error']) + msg = f"Memset API returned a {response.status_code} response ({response.json()['error_type']}, {response.json()['error']})." else: - msg = "Memset API returned an error ({0}, {1})." . format(response.json()['error_type'], response.json()['error']) + msg = f"Memset API returned an error ({response.json()['error_type']}, {response.json()['error']})." + except urllib_error.URLError as e: + has_failed = True + msg = f"An URLError occurred ({type(e)})." + response.stderr = f"{e}" if msg is None: msg = response.json() - return(has_failed, msg, response) + return has_failed, msg, response def check_zone_domain(data, domain): @@ -92,7 +97,7 @@ def check_zone_domain(data, domain): if zone_domain['domain'] == domain: exists = True - return(exists) + return exists def check_zone(data, name): @@ -109,7 +114,7 @@ def check_zone(data, name): if counter == 1: exists = True - return(exists, counter) + return exists, counter def get_zone_id(zone_name, current_zones): @@ -135,4 +140,4 @@ def get_zone_id(zone_name, current_zones): zone_id = None msg = 'Zone ID could not be returned as duplicate zone names were detected' - return(zone_exists, msg, counter, zone_id) + return zone_exists, msg, counter, zone_id diff --git a/plugins/module_utils/mh/base.py b/plugins/module_utils/mh/base.py index 90c228b306..688d65fc35 100644 --- a/plugins/module_utils/mh/base.py +++ b/plugins/module_utils/mh/base.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2020, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException as _MHE @@ -14,6 +13,9 @@ from ansible_collections.community.general.plugins.module_utils.mh.deco import m class ModuleHelperBase(object): module = None ModuleHelperException = _MHE + _delegated_to_module = ( + 'check_mode', 'get_bin_path', 'warn', 'deprecate', 'debug', + ) def __init__(self, module=None): self._changed = False @@ -24,6 +26,22 @@ class ModuleHelperBase(object): if not isinstance(self.module, AnsibleModule): self.module = AnsibleModule(**self.module) + @property + def diff_mode(self): + return self.module._diff + + @property + def verbosity(self): + return self.module._verbosity + + def do_raise(self, *args, **kwargs): + raise _MHE(*args, **kwargs) + + def __getattr__(self, attr): + if attr in self._delegated_to_module: + return getattr(self.module, attr) + raise AttributeError(f"ModuleHelperBase has no attribute '{attr}'") + def __init_module__(self): pass diff --git a/plugins/module_utils/mh/deco.py b/plugins/module_utils/mh/deco.py index 91f0d97744..0be576ccfa 100644 --- a/plugins/module_utils/mh/deco.py +++ b/plugins/module_utils/mh/deco.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2020, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import traceback from functools import wraps @@ -12,23 +11,21 @@ from functools import wraps from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException -def cause_changes(on_success=None, on_failure=None): - +def cause_changes(when=None): def deco(func): - if on_success is None and on_failure is None: - return func - @wraps(func) - def wrapper(*args, **kwargs): + def wrapper(self, *args, **kwargs): try: - self = args[0] - func(*args, **kwargs) - if on_success is not None: - self.changed = on_success + func(self, *args, **kwargs) + if when == "success": + self.changed = True except Exception: - if on_failure is not None: - self.changed = on_failure + if when == "failure": + self.changed = True raise + finally: + if when == "always": + self.changed = True return wrapper @@ -36,19 +33,61 @@ def cause_changes(on_success=None, on_failure=None): def module_fails_on_exception(func): + conflict_list = ('msg', 'exception', 'output', 'vars', 'changed') + @wraps(func) def wrapper(self, *args, **kwargs): + def fix_key(k): + return k if k not in conflict_list else f"_{k}" + + def fix_var_conflicts(output): + result = {fix_key(k): v for k, v in output.items()} + return result + try: func(self, *args, **kwargs) - except SystemExit: - raise except ModuleHelperException as e: if e.update_output: self.update_output(e.update_output) + # patchy solution to resolve conflict with output variables + output = fix_var_conflicts(self.output) self.module.fail_json(msg=e.msg, exception=traceback.format_exc(), - output=self.output, vars=self.vars.output(), **self.output) + output=self.output, vars=self.vars.output(), **output) except Exception as e: - msg = "Module failed with exception: {0}".format(str(e).strip()) + # patchy solution to resolve conflict with output variables + output = fix_var_conflicts(self.output) + msg = f"Module failed with exception: {str(e).strip()}" self.module.fail_json(msg=msg, exception=traceback.format_exc(), - output=self.output, vars=self.vars.output(), **self.output) + output=self.output, vars=self.vars.output(), **output) return wrapper + + +def check_mode_skip(func): + @wraps(func) + def wrapper(self, *args, **kwargs): + if not self.module.check_mode: + return func(self, *args, **kwargs) + + return wrapper + + +def check_mode_skip_returns(callable=None, value=None): + + def deco(func): + if callable is not None: + @wraps(func) + def wrapper_callable(self, *args, **kwargs): + if self.module.check_mode: + return callable(self, *args, **kwargs) + return func(self, *args, **kwargs) + return wrapper_callable + + else: + @wraps(func) + def wrapper_value(self, *args, **kwargs): + if self.module.check_mode: + return value + return func(self, *args, **kwargs) + return wrapper_value + + return deco diff --git a/plugins/module_utils/mh/exceptions.py b/plugins/module_utils/mh/exceptions.py index 558dcca05f..94bb7d7fff 100644 --- a/plugins/module_utils/mh/exceptions.py +++ b/plugins/module_utils/mh/exceptions.py @@ -1,22 +1,17 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2020, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations + +from ansible.module_utils.common.text.converters import to_native class ModuleHelperException(Exception): - @staticmethod - def _get_remove(key, kwargs): - if key in kwargs: - result = kwargs[key] - del kwargs[key] - return result - return None - - def __init__(self, *args, **kwargs): - self.msg = self._get_remove('msg', kwargs) or "Module failed with exception: {0}".format(self) - self.update_output = self._get_remove('update_output', kwargs) or {} + def __init__(self, msg, update_output=None, *args, **kwargs): + self.msg = to_native(msg or f"Module failed with exception: {self}") + if update_output is None: + update_output = {} + self.update_output = update_output super(ModuleHelperException, self).__init__(*args) diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py deleted file mode 100644 index 88b89d159b..0000000000 --- a/plugins/module_utils/mh/mixins/cmd.py +++ /dev/null @@ -1,189 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -from functools import partial - - -class ArgFormat(object): - """ - Argument formatter for use as a command line parameter. Used in CmdMixin. - """ - BOOLEAN = 0 - PRINTF = 1 - FORMAT = 2 - BOOLEAN_NOT = 3 - - @staticmethod - def stars_deco(num): - if num == 1: - def deco(f): - return lambda v: f(*v) - return deco - elif num == 2: - def deco(f): - return lambda v: f(**v) - return deco - - return lambda f: f - - def __init__(self, name, fmt=None, style=FORMAT, stars=0): - """ - Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for - the CLI command execution. - :param name: Name of the argument to be formatted - :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that - :param style: Whether arg_format (as str) should use printf-style formatting. - Ignored if arg_format is None or not a str (should be callable). - :param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value - """ - def printf_fmt(_fmt, v): - try: - return [_fmt % v] - except TypeError as e: - if e.args[0] != 'not all arguments converted during string formatting': - raise - return [_fmt] - - _fmts = { - ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []), - ArgFormat.BOOLEAN_NOT: lambda _fmt, v: ([] if bool(v) else [_fmt]), - ArgFormat.PRINTF: printf_fmt, - ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)], - } - - self.name = name - self.stars = stars - self.style = style - - if fmt is None: - fmt = "{0}" - style = ArgFormat.FORMAT - - if isinstance(fmt, str): - func = _fmts[style] - self.arg_format = partial(func, fmt) - elif isinstance(fmt, list) or isinstance(fmt, tuple): - self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt] - elif hasattr(fmt, '__call__'): - self.arg_format = fmt - else: - raise TypeError('Parameter fmt must be either: a string, a list/tuple of ' - 'strings or a function: type={0}, value={1}'.format(type(fmt), fmt)) - - if stars: - self.arg_format = (self.stars_deco(stars))(self.arg_format) - - def to_text(self, value): - if value is None and self.style != ArgFormat.BOOLEAN_NOT: - return [] - func = self.arg_format - return [str(p) for p in func(value)] - - -class CmdMixin(object): - """ - Mixin for mapping module options to running a CLI command with its arguments. - """ - command = None - command_args_formats = {} - run_command_fixed_options = {} - check_rc = False - force_lang = "C" - - @property - def module_formats(self): - result = {} - for param in self.module.params.keys(): - result[param] = ArgFormat(param) - return result - - @property - def custom_formats(self): - result = {} - for param, fmt_spec in self.command_args_formats.items(): - result[param] = ArgFormat(param, **fmt_spec) - return result - - def _calculate_args(self, extra_params=None, params=None): - def add_arg_formatted_param(_cmd_args, arg_format, _value): - args = list(arg_format.to_text(_value)) - return _cmd_args + args - - def find_format(_param): - return self.custom_formats.get(_param, self.module_formats.get(_param)) - - extra_params = extra_params or dict() - cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command) - try: - cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True) - except ValueError: - pass - param_list = params if params else self.vars.keys() - - for param in param_list: - if isinstance(param, dict): - if len(param) != 1: - raise self.ModuleHelperException("run_command parameter as a dict must " - "contain only one key: {0}".format(param)) - _param = list(param.keys())[0] - fmt = find_format(_param) - value = param[_param] - elif isinstance(param, str): - if param in self.vars.keys(): - fmt = find_format(param) - value = self.vars[param] - elif param in extra_params: - fmt = find_format(param) - value = extra_params[param] - else: - raise self.ModuleHelperException('Cannot determine value for parameter: {0}'.format(param)) - else: - raise self.ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param)) - cmd_args = add_arg_formatted_param(cmd_args, fmt, value) - - return cmd_args - - def process_command_output(self, rc, out, err): - return rc, out, err - - def run_command(self, - extra_params=None, - params=None, - process_output=None, - publish_rc=True, - publish_out=True, - publish_err=True, - publish_cmd=True, - *args, **kwargs): - cmd_args = self._calculate_args(extra_params, params) - options = dict(self.run_command_fixed_options) - options['check_rc'] = options.get('check_rc', self.check_rc) - options.update(kwargs) - env_update = dict(options.get('environ_update', {})) - if self.force_lang: - env_update.update({ - 'LANGUAGE': self.force_lang, - 'LC_ALL': self.force_lang, - }) - self.update_output(force_lang=self.force_lang) - options['environ_update'] = env_update - rc, out, err = self.module.run_command(cmd_args, *args, **options) - if publish_rc: - self.update_output(rc=rc) - if publish_out: - self.update_output(stdout=out) - if publish_err: - self.update_output(stderr=err) - if publish_cmd: - self.update_output(cmd_args=cmd_args) - if process_output is None: - _process = self.process_command_output - else: - _process = process_output - - return _process(rc, out, err) diff --git a/plugins/module_utils/mh/mixins/deprecate_attrs.py b/plugins/module_utils/mh/mixins/deprecate_attrs.py new file mode 100644 index 0000000000..166e365782 --- /dev/null +++ b/plugins/module_utils/mh/mixins/deprecate_attrs.py @@ -0,0 +1,60 @@ +# (c) 2020, Alexei Znamensky +# Copyright (c) 2020, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations + + +from ansible.module_utils.basic import AnsibleModule + + +class DeprecateAttrsMixin(object): + + def _deprecate_setup(self, attr, target, module): + if target is None: + target = self + if not hasattr(target, attr): + raise ValueError(f"Target {target} has no attribute {attr}") + if module is None: + if isinstance(target, AnsibleModule): + module = target + elif hasattr(target, "module") and isinstance(target.module, AnsibleModule): + module = target.module + else: + raise ValueError("Failed to automatically discover the AnsibleModule instance. Pass 'module' parameter explicitly.") + + # setup internal state dicts + value_attr = "__deprecated_attr_value" + trigger_attr = "__deprecated_attr_trigger" + if not hasattr(target, value_attr): + setattr(target, value_attr, {}) + if not hasattr(target, trigger_attr): + setattr(target, trigger_attr, {}) + value_dict = getattr(target, value_attr) + trigger_dict = getattr(target, trigger_attr) + return target, module, value_dict, trigger_dict + + def _deprecate_attr(self, attr, msg, version=None, date=None, collection_name=None, target=None, value=None, module=None): + target, module, value_dict, trigger_dict = self._deprecate_setup(attr, target, module) + + value_dict[attr] = getattr(target, attr, value) + trigger_dict[attr] = False + + def _trigger(): + if not trigger_dict[attr]: + module.deprecate(msg, version=version, date=date, collection_name=collection_name) + trigger_dict[attr] = True + + def _getter(_self): + _trigger() + return value_dict[attr] + + def _setter(_self, new_value): + _trigger() + value_dict[attr] = new_value + + # override attribute + prop = property(_getter) + setattr(target, attr, prop) + setattr(target, f"_{attr}_setter", prop.setter(_setter)) diff --git a/plugins/module_utils/mh/mixins/deps.py b/plugins/module_utils/mh/mixins/deps.py deleted file mode 100644 index 1c6c9ae484..0000000000 --- a/plugins/module_utils/mh/mixins/deps.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import traceback - -from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase -from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception - - -class DependencyCtxMgr(object): - def __init__(self, name, msg=None): - self.name = name - self.msg = msg - self.has_it = False - self.exc_type = None - self.exc_val = None - self.exc_tb = None - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.has_it = exc_type is None - self.exc_type = exc_type - self.exc_val = exc_val - self.exc_tb = exc_tb - return not self.has_it - - @property - def text(self): - return self.msg or str(self.exc_val) - - -class DependencyMixin(ModuleHelperBase): - _dependencies = [] - - @classmethod - def dependency(cls, name, msg): - cls._dependencies.append(DependencyCtxMgr(name, msg)) - return cls._dependencies[-1] - - def fail_on_missing_deps(self): - for d in self._dependencies: - if not d.has_it: - self.module.fail_json(changed=False, - exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)), - msg=d.text, - **self.output) - - @module_fails_on_exception - def run(self): - self.fail_on_missing_deps() - super(DependencyMixin, self).run() diff --git a/plugins/module_utils/mh/mixins/state.py b/plugins/module_utils/mh/mixins/state.py index b946090ac9..a04c3b1386 100644 --- a/plugins/module_utils/mh/mixins/state.py +++ b/plugins/module_utils/mh/mixins/state.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2020, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations class StateMixin(object): @@ -16,7 +15,7 @@ class StateMixin(object): return self.default_state if state is None else state def _method(self, state): - return "{0}_{1}".format(self.state_param, state) + return f"{self.state_param}_{state}" def __run__(self): state = self._state() @@ -36,4 +35,4 @@ class StateMixin(object): return func() def __state_fallback__(self): - raise ValueError("Cannot find method: {0}".format(self._method(self._state()))) + raise ValueError(f"Cannot find method: {self._method(self._state())}") diff --git a/plugins/module_utils/mh/mixins/vars.py b/plugins/module_utils/mh/mixins/vars.py deleted file mode 100644 index a11110ed60..0000000000 --- a/plugins/module_utils/mh/mixins/vars.py +++ /dev/null @@ -1,134 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import copy - - -class VarMeta(object): - NOTHING = object() - - def __init__(self, diff=False, output=True, change=None, fact=False): - self.init = False - self.initial_value = None - self.value = None - - self.diff = diff - self.change = diff if change is None else change - self.output = output - self.fact = fact - - def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING): - if diff is not None: - self.diff = diff - if output is not None: - self.output = output - if change is not None: - self.change = change - if fact is not None: - self.fact = fact - if initial_value is not self.NOTHING: - self.initial_value = copy.deepcopy(initial_value) - - def set_value(self, value): - if not self.init: - self.initial_value = copy.deepcopy(value) - self.init = True - self.value = value - return self - - @property - def has_changed(self): - return self.change and (self.initial_value != self.value) - - @property - def diff_result(self): - return None if not (self.diff and self.has_changed) else { - 'before': self.initial_value, - 'after': self.value, - } - - def __str__(self): - return "".format( - self.value, self.initial_value, self.diff, self.output, self.change - ) - - -class VarDict(object): - def __init__(self): - self._data = dict() - self._meta = dict() - - def __getitem__(self, item): - return self._data[item] - - def __setitem__(self, key, value): - self.set(key, value) - - def __getattr__(self, item): - try: - return self._data[item] - except KeyError: - return getattr(self._data, item) - - def __setattr__(self, key, value): - if key in ('_data', '_meta'): - super(VarDict, self).__setattr__(key, value) - else: - self.set(key, value) - - def meta(self, name): - return self._meta[name] - - def set_meta(self, name, **kwargs): - self.meta(name).set(**kwargs) - - def set(self, name, value, **kwargs): - if name in ('_data', '_meta'): - raise ValueError("Names _data and _meta are reserved for use by ModuleHelper") - self._data[name] = value - if name in self._meta: - meta = self.meta(name) - else: - meta = VarMeta(**kwargs) - meta.set_value(value) - self._meta[name] = meta - - def output(self): - return dict((k, v) for k, v in self._data.items() if self.meta(k).output) - - def diff(self): - diff_results = [(k, self.meta(k).diff_result) for k in self._data] - diff_results = [dr for dr in diff_results if dr[1] is not None] - if diff_results: - before = dict((dr[0], dr[1]['before']) for dr in diff_results) - after = dict((dr[0], dr[1]['after']) for dr in diff_results) - return {'before': before, 'after': after} - return None - - def facts(self): - facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact) - return facts_result if facts_result else None - - def change_vars(self): - return [v for v in self._data if self.meta(v).change] - - def has_changed(self, v): - return self._meta[v].has_changed - - -class VarsMixin(object): - - def __init__(self, module=None): - self.vars = VarDict() - super(VarsMixin, self).__init__(module) - - def update_vars(self, meta=None, **kwargs): - if meta is None: - meta = {} - for k, v in kwargs.items(): - self.vars.set(k, v, **meta) diff --git a/plugins/module_utils/mh/module_helper.py b/plugins/module_utils/mh/module_helper.py index b27b60df9a..fdce99045c 100644 --- a/plugins/module_utils/mh/module_helper.py +++ b/plugins/module_utils/mh/module_helper.py @@ -1,32 +1,30 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# (c) 2020-2024, Alexei Znamensky +# Copyright (c) 2020-2024, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations -from __future__ import absolute_import, division, print_function -__metaclass__ = type from ansible.module_utils.common.dict_transformations import dict_merge -from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule -from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin +from ansible_collections.community.general.plugins.module_utils.vardict import VarDict +from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _VD +from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin -class ModuleHelper(VarsMixin, DependencyMixin, ModuleHelperBase): - _output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed') +class ModuleHelper(DeprecateAttrsMixin, ModuleHelperBase): facts_name = None output_params = () diff_params = () change_params = () facts_params = () - VarDict = _VD # for backward compatibility, will be deprecated at some point - def __init__(self, module=None): super(ModuleHelper, self).__init__(module) + + self.vars = VarDict() for name, value in self.module.params.items(): self.vars.set( name, value, @@ -36,6 +34,12 @@ class ModuleHelper(VarsMixin, DependencyMixin, ModuleHelperBase): fact=name in self.facts_params, ) + def update_vars(self, meta=None, **kwargs): + if meta is None: + meta = {} + for k, v in kwargs.items(): + self.vars.set(k, v, **meta) + def update_output(self, **kwargs): self.update_vars(meta={"output": True}, **kwargs) @@ -43,7 +47,7 @@ class ModuleHelper(VarsMixin, DependencyMixin, ModuleHelperBase): self.update_vars(meta={"fact": True}, **kwargs) def _vars_changed(self): - return any(self.vars.has_changed(v) for v in self.vars.change_vars()) + return self.vars.has_changed def has_changed(self): return self.changed or self._vars_changed() @@ -55,25 +59,13 @@ class ModuleHelper(VarsMixin, DependencyMixin, ModuleHelperBase): facts = self.vars.facts() if facts is not None: result['ansible_facts'] = {self.facts_name: facts} - if self.module._diff: + if self.diff_mode: diff = result.get('diff', {}) vars_diff = self.vars.diff() or {} result['diff'] = dict_merge(dict(diff), vars_diff) - for varname in result: - if varname in self._output_conflict_list: - result["_" + varname] = result[varname] - del result[varname] return result class StateModuleHelper(StateMixin, ModuleHelper): pass - - -class CmdModuleHelper(CmdMixin, ModuleHelper): - pass - - -class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper): - pass diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index a6b35bdd33..f5c6275741 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -1,18 +1,16 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2020, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations +# pylint: disable=unused-import from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ( - ModuleHelper, StateModuleHelper, CmdModuleHelper, CmdStateModuleHelper, AnsibleModule + ModuleHelper, StateModuleHelper, +) +from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401 +from ansible_collections.community.general.plugins.module_utils.mh.deco import ( + cause_changes, module_fails_on_exception, check_mode_skip, check_mode_skip_returns, ) -from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat -from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr -from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException -from ansible_collections.community.general.plugins.module_utils.mh.deco import cause_changes, module_fails_on_exception -from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict diff --git a/plugins/module_utils/net_tools/pritunl/api.py b/plugins/module_utils/net_tools/pritunl/api.py index 4dffe2b626..7d6bd7fe86 100644 --- a/plugins/module_utils/net_tools/pritunl/api.py +++ b/plugins/module_utils/net_tools/pritunl/api.py @@ -1,12 +1,12 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Florian Dambrine -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later """ Pritunl API that offers CRUD operations on Pritunl Organizations and Users """ -from __future__ import absolute_import, division, print_function +from __future__ import annotations import base64 import hashlib @@ -15,11 +15,8 @@ import json import time import uuid -from ansible.module_utils.six import iteritems from ansible.module_utils.urls import open_url -__metaclass__ = type - class PritunlException(Exception): pass @@ -65,7 +62,7 @@ def _delete_pritunl_organization( api_token=api_token, api_secret=api_secret, method="DELETE", - path="/organization/%s" % (organization_id), + path=f"/organization/{organization_id}", validate_certs=validate_certs, ) @@ -78,7 +75,7 @@ def _post_pritunl_organization( api_secret=api_secret, base_url=base_url, method="POST", - path="/organization/%s", + path="/organization", headers={"Content-Type": "application/json"}, data=json.dumps(organization_data), validate_certs=validate_certs, @@ -93,7 +90,7 @@ def _get_pritunl_users( api_secret=api_secret, base_url=base_url, method="GET", - path="/user/%s" % organization_id, + path=f"/user/{organization_id}", validate_certs=validate_certs, ) @@ -106,7 +103,7 @@ def _delete_pritunl_user( api_secret=api_secret, base_url=base_url, method="DELETE", - path="/user/%s/%s" % (organization_id, user_id), + path=f"/user/{organization_id}/{user_id}", validate_certs=validate_certs, ) @@ -119,7 +116,7 @@ def _post_pritunl_user( api_secret=api_secret, base_url=base_url, method="POST", - path="/user/%s" % organization_id, + path=f"/user/{organization_id}", headers={"Content-Type": "application/json"}, data=json.dumps(user_data), validate_certs=validate_certs, @@ -140,7 +137,7 @@ def _put_pritunl_user( api_secret=api_secret, base_url=base_url, method="PUT", - path="/user/%s/%s" % (organization_id, user_id), + path=f"/user/{organization_id}/{user_id}", headers={"Content-Type": "application/json"}, data=json.dumps(user_data), validate_certs=validate_certs, @@ -169,7 +166,7 @@ def list_pritunl_organizations( else: if not any( filter_val != org[filter_key] - for filter_key, filter_val in iteritems(filters) + for filter_key, filter_val in filters.items() ): orgs.append(org) @@ -200,7 +197,7 @@ def list_pritunl_users( else: if not any( filter_val != user[filter_key] - for filter_key, filter_val in iteritems(filters) + for filter_key, filter_val in filters.items() ): users.append(user) @@ -219,12 +216,12 @@ def post_pritunl_organization( api_secret=api_secret, base_url=base_url, organization_data={"name": organization_name}, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not add organization %s to Pritunl" % (organization_name) + f"Could not add organization {organization_name} to Pritunl" ) # The user PUT request returns the updated user object return json.loads(response.read()) @@ -247,13 +244,12 @@ def post_pritunl_user( base_url=base_url, organization_id=organization_id, user_data=user_data, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not remove user %s from organization %s from Pritunl" - % (user_id, organization_id) + f"Could not remove user {user_id} from organization {organization_id} from Pritunl" ) # user POST request returns an array of a single item, # so return this item instead of the list @@ -266,13 +262,12 @@ def post_pritunl_user( organization_id=organization_id, user_data=user_data, user_id=user_id, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not update user %s from organization %s from Pritunl" - % (user_id, organization_id) + f"Could not update user {user_id} from organization {organization_id} from Pritunl" ) # The user PUT request returns the updated user object return json.loads(response.read()) @@ -286,12 +281,12 @@ def delete_pritunl_organization( api_secret=api_secret, base_url=base_url, organization_id=organization_id, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not remove organization %s from Pritunl" % (organization_id) + f"Could not remove organization {organization_id} from Pritunl" ) return json.loads(response.read()) @@ -306,13 +301,12 @@ def delete_pritunl_user( base_url=base_url, organization_id=organization_id, user_id=user_id, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not remove user %s from organization %s from Pritunl" - % (user_id, organization_id) + f"Could not remove user {user_id} from organization {organization_id} from Pritunl" ) return json.loads(response.read()) @@ -330,15 +324,12 @@ def pritunl_auth_request( ): """ Send an API call to a Pritunl server. - Taken from https://pritunl.com/api and adaped work with Ansible open_url + Taken from https://pritunl.com/api and adapted to work with Ansible open_url """ auth_timestamp = str(int(time.time())) auth_nonce = uuid.uuid4().hex - auth_string = "&".join( - [api_token, auth_timestamp, auth_nonce, method.upper(), path] - + ([data] if data else []) - ) + auth_string = f"{api_token}&{auth_timestamp}&{auth_nonce}&{method.upper()}&{path}" auth_signature = base64.b64encode( hmac.new( @@ -357,7 +348,7 @@ def pritunl_auth_request( auth_headers.update(headers) try: - uri = "%s%s" % (base_url, path) + uri = f"{base_url}{path}" return open_url( uri, diff --git a/plugins/module_utils/ocapi_utils.py b/plugins/module_utils/ocapi_utils.py new file mode 100644 index 0000000000..fd606d9bcc --- /dev/null +++ b/plugins/module_utils/ocapi_utils.py @@ -0,0 +1,491 @@ +# Copyright (c) 2022 Western Digital Corporation +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import json +import os +import uuid +from urllib.error import URLError, HTTPError +from urllib.parse import urlparse + +from ansible.module_utils.urls import open_url +from ansible.module_utils.common.text.converters import to_native + + +GET_HEADERS = {'accept': 'application/json'} +PUT_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'} +POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'} +DELETE_HEADERS = {'accept': 'application/json'} + +HEALTH_OK = 5 + + +class OcapiUtils(object): + + def __init__(self, creds, base_uri, proxy_slot_number, timeout, module): + self.root_uri = base_uri + self.proxy_slot_number = proxy_slot_number + self.creds = creds + self.timeout = timeout + self.module = module + + def _auth_params(self): + """ + Return tuple of required authentication params based on the username and password. + + :return: tuple of username, password + """ + username = self.creds['user'] + password = self.creds['pswd'] + force_basic_auth = True + return username, password, force_basic_auth + + def get_request(self, uri): + req_headers = dict(GET_HEADERS) + username, password, basic_auth = self._auth_params() + try: + resp = open_url(uri, method="GET", headers=req_headers, + url_username=username, url_password=password, + force_basic_auth=basic_auth, validate_certs=False, + follow_redirects='all', + use_proxy=True, timeout=self.timeout) + data = json.loads(to_native(resp.read())) + headers = {k.lower(): v for (k, v) in resp.info().items()} + except HTTPError as e: + return {'ret': False, + 'msg': f"HTTP Error {e.code} on GET request to '{uri}'", + 'status': e.code} + except URLError as e: + return {'ret': False, 'msg': f"URL Error on GET request to '{uri}': '{e.reason}'"} + # Almost all errors should be caught above, but just in case + except Exception as e: + return {'ret': False, + 'msg': f"Failed GET request to '{uri}': '{e}'"} + return {'ret': True, 'data': data, 'headers': headers} + + def delete_request(self, uri, etag=None): + req_headers = dict(DELETE_HEADERS) + if etag is not None: + req_headers['If-Match'] = etag + username, password, basic_auth = self._auth_params() + try: + resp = open_url(uri, method="DELETE", headers=req_headers, + url_username=username, url_password=password, + force_basic_auth=basic_auth, validate_certs=False, + follow_redirects='all', + use_proxy=True, timeout=self.timeout) + if resp.status != 204: + data = json.loads(to_native(resp.read())) + else: + data = "" + headers = {k.lower(): v for (k, v) in resp.info().items()} + except HTTPError as e: + return {'ret': False, + 'msg': f"HTTP Error {e.code} on DELETE request to '{uri}'", + 'status': e.code} + except URLError as e: + return {'ret': False, 'msg': f"URL Error on DELETE request to '{uri}': '{e.reason}'"} + # Almost all errors should be caught above, but just in case + except Exception as e: + return {'ret': False, + 'msg': f"Failed DELETE request to '{uri}': '{e}'"} + return {'ret': True, 'data': data, 'headers': headers} + + def put_request(self, uri, payload, etag=None): + req_headers = dict(PUT_HEADERS) + if etag is not None: + req_headers['If-Match'] = etag + username, password, basic_auth = self._auth_params() + try: + resp = open_url(uri, data=json.dumps(payload), + headers=req_headers, method="PUT", + url_username=username, url_password=password, + force_basic_auth=basic_auth, validate_certs=False, + follow_redirects='all', + use_proxy=True, timeout=self.timeout) + headers = {k.lower(): v for (k, v) in resp.info().items()} + except HTTPError as e: + return {'ret': False, + 'msg': f"HTTP Error {e.code} on PUT request to '{uri}'", + 'status': e.code} + except URLError as e: + return {'ret': False, 'msg': f"URL Error on PUT request to '{uri}': '{e.reason}'"} + # Almost all errors should be caught above, but just in case + except Exception as e: + return {'ret': False, + 'msg': f"Failed PUT request to '{uri}': '{e}'"} + return {'ret': True, 'headers': headers, 'resp': resp} + + def post_request(self, uri, payload, content_type="application/json", timeout=None): + req_headers = dict(POST_HEADERS) + if content_type != "application/json": + req_headers["content-type"] = content_type + username, password, basic_auth = self._auth_params() + if content_type == "application/json": + request_data = json.dumps(payload) + else: + request_data = payload + try: + resp = open_url(uri, data=request_data, + headers=req_headers, method="POST", + url_username=username, url_password=password, + force_basic_auth=basic_auth, validate_certs=False, + follow_redirects='all', + use_proxy=True, timeout=self.timeout if timeout is None else timeout) + headers = {k.lower(): v for (k, v) in resp.info().items()} + except HTTPError as e: + return {'ret': False, + 'msg': f"HTTP Error {e.code} on POST request to '{uri}'", + 'status': e.code} + except URLError as e: + return {'ret': False, 'msg': f"URL Error on POST request to '{uri}': '{e.reason}'"} + # Almost all errors should be caught above, but just in case + except Exception as e: + return {'ret': False, + 'msg': f"Failed POST request to '{uri}': '{e}'"} + return {'ret': True, 'headers': headers, 'resp': resp} + + def get_uri_with_slot_number_query_param(self, uri): + """Return the URI with proxy slot number added as a query param, if there is one. + + If a proxy slot number is provided, to access it, we must append it as a query parameter. + This method returns the given URI with the slotnumber query param added, if there is one. + If there is not a proxy slot number, it just returns the URI as it was passed in. + """ + if self.proxy_slot_number is not None: + parsed_url = urlparse(uri) + return parsed_url._replace(query=f"slotnumber={self.proxy_slot_number}").geturl() + else: + return uri + + def manage_system_power(self, command): + """Process a command to manage the system power. + + :param str command: The Ansible command being processed. + """ + if command == "PowerGracefulRestart": + resource_uri = self.root_uri + resource_uri = self.get_uri_with_slot_number_query_param(resource_uri) + + # Get the resource so that we have the Etag + response = self.get_request(resource_uri) + if 'etag' not in response['headers']: + return {'ret': False, 'msg': 'Etag not found in response.'} + etag = response['headers']['etag'] + if response['ret'] is False: + return response + + # Issue the PUT to do the reboot (unless we are in check mode) + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + payload = {'Reboot': True} + response = self.put_request(resource_uri, payload, etag) + if response['ret'] is False: + return response + elif command.startswith("PowerMode"): + return self.manage_power_mode(command) + else: + return {'ret': False, 'msg': f"Invalid command: {command}"} + + return {'ret': True} + + def manage_chassis_indicator_led(self, command): + """Process a command to manage the chassis indicator LED. + + :param string command: The Ansible command being processed. + """ + return self.manage_indicator_led(command, self.root_uri) + + def manage_indicator_led(self, command, resource_uri=None): + """Process a command to manage an indicator LED. + + :param string command: The Ansible command being processed. + :param string resource_uri: URI of the resource whose indicator LED is being managed. + """ + key = "IndicatorLED" + if resource_uri is None: + resource_uri = self.root_uri + resource_uri = self.get_uri_with_slot_number_query_param(resource_uri) + + payloads = { + 'IndicatorLedOn': { + 'ID': 2 + }, + 'IndicatorLedOff': { + 'ID': 4 + } + } + + response = self.get_request(resource_uri) + if 'etag' not in response['headers']: + return {'ret': False, 'msg': 'Etag not found in response.'} + etag = response['headers']['etag'] + if response['ret'] is False: + return response + data = response['data'] + if key not in data: + return {'ret': False, 'msg': f"Key {key} not found"} + if 'ID' not in data[key]: + return {'ret': False, 'msg': 'IndicatorLED for resource has no ID.'} + + if command in payloads.keys(): + # See if the LED is already set as requested. + current_led_status = data[key]['ID'] + if current_led_status == payloads[command]['ID']: + return {'ret': True, 'changed': False} + + # Set the LED (unless we are in check mode) + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + payload = {'IndicatorLED': payloads[command]} + response = self.put_request(resource_uri, payload, etag) + if response['ret'] is False: + return response + else: + return {'ret': False, 'msg': 'Invalid command'} + + return {'ret': True} + + def manage_power_mode(self, command): + key = "PowerState" + resource_uri = self.get_uri_with_slot_number_query_param(self.root_uri) + + payloads = { + "PowerModeNormal": 2, + "PowerModeLow": 4 + } + + response = self.get_request(resource_uri) + if 'etag' not in response['headers']: + return {'ret': False, 'msg': 'Etag not found in response.'} + etag = response['headers']['etag'] + if response['ret'] is False: + return response + data = response['data'] + if key not in data: + return {'ret': False, 'msg': f"Key {key} not found"} + if 'ID' not in data[key]: + return {'ret': False, 'msg': 'PowerState for resource has no ID.'} + + if command in payloads.keys(): + # See if the PowerState is already set as requested. + current_power_state = data[key]['ID'] + if current_power_state == payloads[command]: + return {'ret': True, 'changed': False} + + # Set the Power State (unless we are in check mode) + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + payload = {'PowerState': {"ID": payloads[command]}} + response = self.put_request(resource_uri, payload, etag) + if response['ret'] is False: + return response + else: + return {'ret': False, 'msg': f"Invalid command: {command}"} + + return {'ret': True} + + def prepare_multipart_firmware_upload(self, filename): + """Prepare a multipart/form-data body for OCAPI firmware upload. + + :arg filename: The name of the file to upload. + :returns: tuple of (content_type, body) where ``content_type`` is + the ``multipart/form-data`` ``Content-Type`` header including + ``boundary`` and ``body`` is the prepared bytestring body + + Prepares the body to include "FirmwareFile" field with the contents of the file. + Because some OCAPI targets do not support Base-64 encoding for multipart/form-data, + this method sends the file as binary. + """ + boundary = str(uuid.uuid4()) # Generate a random boundary + body = f"--{boundary}\r\n" + body += f'Content-Disposition: form-data; name="FirmwareFile"; filename="{to_native(os.path.basename(filename))}"\r\n' + body += 'Content-Type: application/octet-stream\r\n\r\n' + body_bytes = bytearray(body, 'utf-8') + with open(filename, 'rb') as f: + body_bytes += f.read() + body_bytes += bytearray(f"\r\n--{boundary}--", 'utf-8') + return (f"multipart/form-data; boundary={boundary}", + body_bytes) + + def upload_firmware_image(self, update_image_path): + """Perform Firmware Upload to the OCAPI storage device. + + :param str update_image_path: The path/filename of the firmware image, on the local filesystem. + """ + if not (os.path.exists(update_image_path) and os.path.isfile(update_image_path)): + return {'ret': False, 'msg': 'File does not exist.'} + url = f"{self.root_uri}OperatingSystem" + url = self.get_uri_with_slot_number_query_param(url) + content_type, b_form_data = self.prepare_multipart_firmware_upload(update_image_path) + + # Post the firmware (unless we are in check mode) + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + result = self.post_request(url, b_form_data, content_type=content_type, timeout=300) + if result['ret'] is False: + return result + return {'ret': True} + + def update_firmware_image(self): + """Perform a Firmware Update on the OCAPI storage device.""" + resource_uri = self.root_uri + resource_uri = self.get_uri_with_slot_number_query_param(resource_uri) + # We have to do a GET to obtain the Etag. It's required on the PUT. + response = self.get_request(resource_uri) + if response['ret'] is False: + return response + if 'etag' not in response['headers']: + return {'ret': False, 'msg': 'Etag not found in response.'} + etag = response['headers']['etag'] + + # Issue the PUT (unless we are in check mode) + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + payload = {'FirmwareUpdate': True} + response = self.put_request(resource_uri, payload, etag) + if response['ret'] is False: + return response + + return {'ret': True, 'jobUri': response["headers"]["location"]} + + def activate_firmware_image(self): + """Perform a Firmware Activate on the OCAPI storage device.""" + resource_uri = self.root_uri + resource_uri = self.get_uri_with_slot_number_query_param(resource_uri) + # We have to do a GET to obtain the Etag. It's required on the PUT. + response = self.get_request(resource_uri) + if 'etag' not in response['headers']: + return {'ret': False, 'msg': 'Etag not found in response.'} + etag = response['headers']['etag'] + if response['ret'] is False: + return response + + # Issue the PUT (unless we are in check mode) + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + payload = {'FirmwareActivate': True} + response = self.put_request(resource_uri, payload, etag) + if response['ret'] is False: + return response + + return {'ret': True, 'jobUri': response["headers"]["location"]} + + def get_job_status(self, job_uri): + """Get the status of a job. + + :param str job_uri: The URI of the job's status monitor. + """ + job_uri = self.get_uri_with_slot_number_query_param(job_uri) + response = self.get_request(job_uri) + if response['ret'] is False: + if response.get('status') == 404: + # Job not found -- assume 0% + return { + "ret": True, + "percentComplete": 0, + "operationStatus": "Not Available", + "operationStatusId": 1, + "operationHealth": None, + "operationHealthId": None, + "details": "Job does not exist.", + "jobExists": False + } + else: + return response + details = response["data"]["Status"].get("Details") + if isinstance(details, str): + details = [details] + health_list = response["data"]["Status"]["Health"] + return_value = { + "ret": True, + "percentComplete": response["data"]["PercentComplete"], + "operationStatus": response["data"]["Status"]["State"]["Name"], + "operationStatusId": response["data"]["Status"]["State"]["ID"], + "operationHealth": health_list[0]["Name"] if len(health_list) > 0 else None, + "operationHealthId": health_list[0]["ID"] if len(health_list) > 0 else None, + "details": details, + "jobExists": True + } + return return_value + + def delete_job(self, job_uri): + """Delete the OCAPI job referenced by the specified job_uri.""" + job_uri = self.get_uri_with_slot_number_query_param(job_uri) + # We have to do a GET to obtain the Etag. It's required on the DELETE. + response = self.get_request(job_uri) + + if response['ret'] is True: + if 'etag' not in response['headers']: + return {'ret': False, 'msg': 'Etag not found in response.'} + else: + etag = response['headers']['etag'] + + if response['data']['PercentComplete'] != 100: + return { + 'ret': False, + 'changed': False, + 'msg': 'Cannot delete job because it is in progress.' + } + + if response['ret'] is False: + if response['status'] == 404: + return { + 'ret': True, + 'changed': False, + 'msg': 'Job already deleted.' + } + return response + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + + # Do the DELETE (unless we are in check mode) + response = self.delete_request(job_uri, etag) + if response['ret'] is False: + if response['status'] == 404: + return { + 'ret': True, + 'changed': False + } + elif response['status'] == 409: + return { + 'ret': False, + 'changed': False, + 'msg': 'Cannot delete job because it is in progress.' + } + return response + return { + 'ret': True, + 'changed': True + } diff --git a/plugins/module_utils/oneandone.py b/plugins/module_utils/oneandone.py index 5f65b670f3..1c9cb73d73 100644 --- a/plugins/module_utils/oneandone.py +++ b/plugins/module_utils/oneandone.py @@ -1,14 +1,8 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) Ansible project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import time @@ -215,7 +209,7 @@ def wait_for_resource_creation_completion(oneandone_conn, (resource_type != OneAndOneResources.server and resource_state.lower() == 'active')): return elif resource_state.lower() == 'failed': - raise Exception('%s creation failed for %s' % (resource_type, resource_id)) + raise Exception(f'{resource_type} creation failed for {resource_id}') elif resource_state.lower() in ('active', 'enabled', 'deploying', @@ -223,10 +217,10 @@ def wait_for_resource_creation_completion(oneandone_conn, continue else: raise Exception( - 'Unknown %s state %s' % (resource_type, resource_state)) + f'Unknown {resource_type} state {resource_state}') raise Exception( - 'Timed out waiting for %s completion for %s' % (resource_type, resource_id)) + f'Timed out waiting for {resource_type} completion for {resource_id}') def wait_for_resource_deletion_completion(oneandone_conn, @@ -252,7 +246,7 @@ def wait_for_resource_deletion_completion(oneandone_conn, _type = 'PRIVATENETWORK' else: raise Exception( - 'Unsupported wait_for delete operation for %s resource' % resource_type) + f'Unsupported wait_for delete operation for {resource_type} resource') for log in logs: if (log['resource']['id'] == resource_id and @@ -261,4 +255,4 @@ def wait_for_resource_deletion_completion(oneandone_conn, log['status']['state'] == 'OK'): return raise Exception( - 'Timed out waiting for %s deletion for %s' % (resource_type, resource_id)) + f'Timed out waiting for {resource_type} deletion for {resource_id}') diff --git a/plugins/module_utils/onepassword.py b/plugins/module_utils/onepassword.py new file mode 100644 index 0000000000..5e52a9af41 --- /dev/null +++ b/plugins/module_utils/onepassword.py @@ -0,0 +1,29 @@ +# Copyright (c) Ansible project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations + +import os + + +class OnePasswordConfig(object): + _config_file_paths = ( + "~/.op/config", + "~/.config/op/config", + "~/.config/.op/config", + ) + + def __init__(self): + self._config_file_path = "" + + @property + def config_file_path(self): + if self._config_file_path: + return self._config_file_path + + for path in self._config_file_paths: + realpath = os.path.expanduser(path) + if os.path.exists(realpath): + self._config_file_path = realpath + return self._config_file_path diff --git a/plugins/module_utils/oneview.py b/plugins/module_utils/oneview.py index 6d786b0b80..1f57355f58 100644 --- a/plugins/module_utils/oneview.py +++ b/plugins/module_utils/oneview.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -7,16 +6,16 @@ # # Copyright (2016-2017) Hewlett Packard Enterprise Development LP # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import abc import collections import json -import os import traceback +from collections.abc import Mapping HPE_ONEVIEW_IMP_ERR = None try: @@ -26,10 +25,8 @@ except ImportError: HPE_ONEVIEW_IMP_ERR = traceback.format_exc() HAS_HPE_ONEVIEW = False -from ansible.module_utils import six from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.common._collections_compat import Mapping def transform_list_to_dict(list_): @@ -130,7 +127,7 @@ class OneViewModuleException(Exception): self.msg = None self.oneview_response = None - if isinstance(data, six.string_types): + if isinstance(data, str): self.msg = data else: self.oneview_response = data @@ -180,8 +177,7 @@ class OneViewModuleResourceNotFound(OneViewModuleException): pass -@six.add_metaclass(abc.ABCMeta) -class OneViewModuleBase(object): +class OneViewModuleBase(object, metaclass=abc.ABCMeta): MSG_CREATED = 'Resource created successfully.' MSG_UPDATED = 'Resource updated successfully.' MSG_DELETED = 'Resource deleted successfully.' @@ -399,11 +395,11 @@ class OneViewModuleBase(object): resource1 = first_resource resource2 = second_resource - debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2) + debug_resources = f"resource1 = {resource1}, resource2 = {resource2}" # The first resource is True / Not Null and the second resource is False / Null if resource1 and not resource2: - self.module.log("resource1 and not resource2. " + debug_resources) + self.module.log(f"resource1 and not resource2. {debug_resources}") return False # Checks all keys in first dict against the second dict @@ -453,15 +449,15 @@ class OneViewModuleBase(object): resource1 = first_resource resource2 = second_resource - debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2) + debug_resources = f"resource1 = {resource1}, resource2 = {resource2}" # The second list is null / empty / False if not resource2: - self.module.log("resource 2 is null. " + debug_resources) + self.module.log(f"resource 2 is null. {debug_resources}") return False if len(resource1) != len(resource2): - self.module.log("resources have different length. " + debug_resources) + self.module.log(f"resources have different length. {debug_resources}") return False resource1 = sorted(resource1, key=_str_sorted) @@ -471,15 +467,15 @@ class OneViewModuleBase(object): if isinstance(val, Mapping): # change comparison function to compare dictionaries if not self.compare(val, resource2[i]): - self.module.log("resources are different. " + debug_resources) + self.module.log(f"resources are different. {debug_resources}") return False elif isinstance(val, list): # recursive call if not self.compare_list(val, resource2[i]): - self.module.log("lists are different. " + debug_resources) + self.module.log(f"lists are different. {debug_resources}") return False elif _standardize_value(val) != _standardize_value(resource2[i]): - self.module.log("values are different. " + debug_resources) + self.module.log(f"values are different. {debug_resources}") return False # no differences found diff --git a/plugins/module_utils/online.py b/plugins/module_utils/online.py index b5acbcc017..303abffab2 100644 --- a/plugins/module_utils/online.py +++ b/plugins/module_utils/online.py @@ -1,8 +1,8 @@ -# -*- coding: utf-8 -*- -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) Ansible project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json import sys diff --git a/plugins/module_utils/opennebula.py b/plugins/module_utils/opennebula.py index c896a9c6fa..ce9ec76b0d 100644 --- a/plugins/module_utils/opennebula.py +++ b/plugins/module_utils/opennebula.py @@ -1,20 +1,19 @@ -# -*- coding: utf-8 -*- # # Copyright 2018 www.privaz.io Valletech AB # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import time import ssl from os import environ -from ansible.module_utils.six import string_types from ansible.module_utils.basic import AnsibleModule +IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] HAS_PYONE = True try: @@ -25,6 +24,41 @@ except ImportError: HAS_PYONE = False +# A helper function to mitigate https://github.com/OpenNebula/one/issues/6064. +# It allows for easily handling lists like "NIC" or "DISK" in the JSON-like template representation. +# There are either lists of dictionaries (length > 1) or just dictionaries. +def flatten(to_flatten, extract=False): + """Flattens nested lists (with optional value extraction).""" + def recurse(to_flatten): + return sum(map(recurse, to_flatten), []) if isinstance(to_flatten, list) else [to_flatten] + value = recurse(to_flatten) + if extract and len(value) == 1: + return value[0] + return value + + +# A helper function to mitigate https://github.com/OpenNebula/one/issues/6064. +# It renders JSON-like template representation into OpenNebula's template syntax (string). +def render(to_render): + """Converts dictionary to OpenNebula template.""" + def recurse(to_render): + for key, value in sorted(to_render.items()): + if value is None: + continue + if isinstance(value, dict): + yield f"{key}=[{','.join(recurse(value))}]" + continue + if isinstance(value, list): + for item in value: + yield f"{key}=[{','.join(recurse(item))}]" + continue + if isinstance(value, str): + yield '{0:}="{1:}"'.format(key, value.replace('\\', '\\\\').replace('"', '\\"')) + continue + yield f'{key}="{value}"' + return '\n'.join(recurse(to_render)) + + class OpenNebulaModule: """ Base class for all OpenNebula Ansible Modules. @@ -83,14 +117,14 @@ class OpenNebulaModule: if self.module.params.get("api_username"): username = self.module.params.get("api_username") else: - self.fail("Either api_username or the environment vairable ONE_USERNAME must be provided") + self.fail("Either api_username or the environment variable ONE_USERNAME must be provided") if self.module.params.get("api_password"): password = self.module.params.get("api_password") else: - self.fail("Either api_password or the environment vairable ONE_PASSWORD must be provided") + self.fail("Either api_password or the environment variable ONE_PASSWORD must be provided") - session = "%s:%s" % (username, password) + session = f"{username}:{password}" if not self.module.params.get("validate_certs") and "PYTHONHTTPSVERIFY" not in environ: return OneServer(url, session=session, context=no_ssl_validation_context) @@ -228,7 +262,7 @@ class OpenNebulaModule: self.cast_template(template[key]) elif isinstance(value, list): template[key] = ', '.join(value) - elif not isinstance(value, string_types): + elif not isinstance(value, str): template[key] = str(value) def requires_template_update(self, current, desired): @@ -278,11 +312,11 @@ class OpenNebulaModule: current_state = state() if current_state in invalid_states: - self.fail('invalid %s state %s' % (element_name, state_name(current_state))) + self.fail(f'invalid {element_name} state {state_name(current_state)}') if transition_states: if current_state not in transition_states: - self.fail('invalid %s transition state %s' % (element_name, state_name(current_state))) + self.fail(f'invalid {element_name} transition state {state_name(current_state)}') if current_state in target_states: return True @@ -300,7 +334,7 @@ class OpenNebulaModule: try: self.run(self.one, self.module, self.result) except OneException as e: - self.fail(msg="OpenNebula Exception: %s" % e) + self.fail(msg=f"OpenNebula Exception: {e}") def run(self, one, module, result): """ @@ -311,3 +345,90 @@ class OpenNebulaModule: result: the Ansible result """ raise NotImplementedError("Method requires implementation") + + def get_image_list_id(self, image, element): + """ + This is a helper function for get_image_info to iterate over a simple list of objects + """ + list_of_id = [] + + if element == 'VMS': + image_list = image.VMS + if element == 'CLONES': + image_list = image.CLONES + if element == 'APP_CLONES': + image_list = image.APP_CLONES + + for iter in image_list.ID: + list_of_id.append( + # These are optional so firstly check for presence + getattr(iter, 'ID', 'Null'), + ) + return list_of_id + + def get_image_snapshots_list(self, image): + """ + This is a helper function for get_image_info to iterate over a dictionary + """ + list_of_snapshots = [] + + for iter in image.SNAPSHOTS.SNAPSHOT: + list_of_snapshots.append({ + 'date': iter['DATE'], + 'parent': iter['PARENT'], + 'size': iter['SIZE'], + # These are optional so firstly check for presence + 'allow_orhans': getattr(image.SNAPSHOTS, 'ALLOW_ORPHANS', 'Null'), + 'children': getattr(iter, 'CHILDREN', 'Null'), + 'active': getattr(iter, 'ACTIVE', 'Null'), + 'name': getattr(iter, 'NAME', 'Null'), + }) + return list_of_snapshots + + def get_image_info(self, image): + """ + This method is used by one_image and one_image_info modules to retrieve + information from XSD scheme of an image + Returns: a copy of the parameters that includes the resolved parameters. + """ + info = { + 'id': image.ID, + 'name': image.NAME, + 'state': IMAGE_STATES[image.STATE], + 'running_vms': image.RUNNING_VMS, + 'used': bool(image.RUNNING_VMS), + 'user_name': image.UNAME, + 'user_id': image.UID, + 'group_name': image.GNAME, + 'group_id': image.GID, + 'permissions': { + 'owner_u': image.PERMISSIONS.OWNER_U, + 'owner_m': image.PERMISSIONS.OWNER_M, + 'owner_a': image.PERMISSIONS.OWNER_A, + 'group_u': image.PERMISSIONS.GROUP_U, + 'group_m': image.PERMISSIONS.GROUP_M, + 'group_a': image.PERMISSIONS.GROUP_A, + 'other_u': image.PERMISSIONS.OTHER_U, + 'other_m': image.PERMISSIONS.OTHER_M, + 'other_a': image.PERMISSIONS.OTHER_A + }, + 'type': image.TYPE, + 'disk_type': image.DISK_TYPE, + 'persistent': image.PERSISTENT, + 'regtime': image.REGTIME, + 'source': image.SOURCE, + 'path': image.PATH, + 'fstype': getattr(image, 'FSTYPE', 'Null'), + 'size': image.SIZE, + 'cloning_ops': image.CLONING_OPS, + 'cloning_id': image.CLONING_ID, + 'target_snapshot': image.TARGET_SNAPSHOT, + 'datastore_id': image.DATASTORE_ID, + 'datastore': image.DATASTORE, + 'vms': self.get_image_list_id(image, 'VMS'), + 'clones': self.get_image_list_id(image, 'CLONES'), + 'app_clones': self.get_image_list_id(image, 'APP_CLONES'), + 'snapshots': self.get_image_snapshots_list(image), + 'template': image.TEMPLATE, + } + return info diff --git a/plugins/module_utils/oracle/oci_utils.py b/plugins/module_utils/oracle/oci_utils.py index 88e577af5c..0910d24cae 100644 --- a/plugins/module_utils/oracle/oci_utils.py +++ b/plugins/module_utils/oracle/oci_utils.py @@ -1,21 +1,27 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, 2018, 2019 Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This module utils is deprecated and will be removed in community.general 13.0.0 +# import logging import logging.config import os import tempfile -from datetime import datetime +# (TODO: remove next line!) +from datetime import datetime # noqa: F401, pylint: disable=unused-import from operator import eq import time try: - import yaml + import yaml # noqa: F401, pylint: disable=unused-import import oci from oci.constants import HEADER_NEXT_PAGE @@ -40,7 +46,6 @@ except ImportError: from ansible.module_utils.common.text.converters import to_bytes -from ansible.module_utils.six import iteritems __version__ = "1.6.0-dev" @@ -432,7 +437,7 @@ def check_and_update_attributes( target_instance, attr_name, input_value, existing_value, changed ): """ - This function checks the difference between two resource attributes of literal types and sets the attrbute + This function checks the difference between two resource attributes of literal types and sets the attribute value in the target instance type holding the attribute. :param target_instance: The instance which contains the attribute whose values to be compared :param attr_name: Name of the attribute whose value required to be compared @@ -559,7 +564,7 @@ def are_lists_equal(s, t): if s is None and t is None: return True - if (s is None and len(t) >= 0) or (t is None and len(s) >= 0) or (len(s) != len(t)): + if s is None or t is None or (len(s) != len(t)): return False if len(s) == 0: @@ -568,7 +573,7 @@ def are_lists_equal(s, t): s = to_dict(s) t = to_dict(t) - if type(s[0]) == dict: + if isinstance(s[0], dict): # Handle list of dicts. Dictionary returned by the API may have additional keys. For example, a get call on # service gateway has an attribute `services` which is a list of `ServiceIdResponseDetails`. This has a key # `service_name` which is not provided in the list of `services` by a user while making an update call; only @@ -602,9 +607,9 @@ def get_attr_to_update(get_fn, kwargs_get, module, update_attributes): user_provided_attr_value = module.params.get(attr, None) unequal_list_attr = ( - type(resources_attr_value) == list or type(user_provided_attr_value) == list + isinstance(resources_attr_value, list) or isinstance(user_provided_attr_value, list) ) and not are_lists_equal(user_provided_attr_value, resources_attr_value) - unequal_attr = type(resources_attr_value) != list and to_dict( + unequal_attr = not isinstance(resources_attr_value, list) and to_dict( resources_attr_value ) != to_dict(user_provided_attr_value) if unequal_list_attr or unequal_attr: @@ -691,7 +696,7 @@ def check_and_create_resource( :param model: Model used to create a resource. :param exclude_attributes: The attributes which should not be used to distinguish the resource. e.g. display_name, dns_label. - :param dead_states: List of states which can't transition to any of the usable states of the resource. This deafults + :param dead_states: List of states which can't transition to any of the usable states of the resource. This defaults to ["TERMINATING", "TERMINATED", "FAULTY", "FAILED", "DELETING", "DELETED", "UNKNOWN_ENUM_VALUE"] :param default_attribute_values: A dictionary containing default values for attributes. :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True} @@ -783,7 +788,7 @@ def _get_attributes_to_consider(exclude_attributes, model, module): attributes_to_consider = list(model.attribute_map) if "freeform_tags" in attributes_to_consider: attributes_to_consider.remove("freeform_tags") - # Temporarily removing node_count as the exisiting resource does not reflect it + # Temporarily removing node_count as the existing resource does not reflect it if "node_count" in attributes_to_consider: attributes_to_consider.remove("node_count") _debug("attributes to consider: {0}".format(attributes_to_consider)) @@ -814,7 +819,7 @@ def is_attr_assigned_default(default_attribute_values, attr, assigned_value): # this is to ensure forward compatibility when the API returns new keys that are not known during # the time when the module author provided default values for the attribute keys = {} - for k, v in iteritems(assigned_value.items()): + for k, v in assigned_value.items().items(): if k in default_val_for_attr: keys[k] = v @@ -934,9 +939,9 @@ def tuplize(d): list_of_tuples = [] key_list = sorted(list(d.keys())) for key in key_list: - if type(d[key]) == list: + if isinstance(d[key], list): # Convert a value which is itself a list of dict to a list of tuples. - if d[key] and type(d[key][0]) == dict: + if d[key] and isinstance(d[key][0], dict): sub_tuples = [] for sub_dict in d[key]: sub_tuples.append(tuplize(sub_dict)) @@ -946,7 +951,7 @@ def tuplize(d): list_of_tuples.append((sub_tuples is None, key, sub_tuples)) else: list_of_tuples.append((d[key] is None, key, d[key])) - elif type(d[key]) == dict: + elif isinstance(d[key], dict): tupled_value = tuplize(d[key]) list_of_tuples.append((tupled_value is None, key, tupled_value)) else: @@ -967,13 +972,13 @@ def sort_dictionary(d): """ sorted_d = {} for key in d: - if type(d[key]) == list: - if d[key] and type(d[key][0]) == dict: + if isinstance(d[key], list): + if d[key] and isinstance(d[key][0], dict): sorted_value = sort_list_of_dictionary(d[key]) sorted_d[key] = sorted_value else: sorted_d[key] = sorted(d[key]) - elif type(d[key]) == dict: + elif isinstance(d[key], dict): sorted_d[key] = sort_dictionary(d[key]) else: sorted_d[key] = d[key] @@ -1024,10 +1029,7 @@ def check_if_user_value_matches_resources_attr( return if ( - resources_value_for_attr is None - and len(user_provided_value_for_attr) >= 0 - or user_provided_value_for_attr is None - and len(resources_value_for_attr) >= 0 + resources_value_for_attr is None or user_provided_value_for_attr is None ): res[0] = False return @@ -1042,7 +1044,7 @@ def check_if_user_value_matches_resources_attr( if ( user_provided_value_for_attr - and type(user_provided_value_for_attr[0]) == dict + and isinstance(user_provided_value_for_attr[0], dict) ): # Process a list of dict sorted_user_provided_value_for_attr = sort_list_of_dictionary( @@ -1189,7 +1191,7 @@ def are_dicts_equal( def should_dict_attr_be_excluded(map_option_name, option_key, exclude_list): - """An entry for the Exclude list for excluding a map's key is specifed as a dict with the map option name as the + """An entry for the Exclude list for excluding a map's key is specified as a dict with the map option name as the key, and the value as a list of keys to be excluded within that map. For example, if the keys "k1" and "k2" of a map option named "m1" needs to be excluded, the exclude list must have an entry {'m1': ['k1','k2']} """ for exclude_item in exclude_list: @@ -1530,7 +1532,7 @@ def delete_and_wait( result[resource_type] = resource return result # oci.wait_until() returns an instance of oci.util.Sentinel in case the resource is not found. - if type(wait_response) is not Sentinel: + if not isinstance(wait_response, Sentinel): resource = to_dict(wait_response.data) else: resource["lifecycle_state"] = "DELETED" @@ -1545,7 +1547,7 @@ def delete_and_wait( except ServiceError as ex: # DNS API throws a 400 InvalidParameter when a zone id is provided for zone_name_or_id and if the zone # resource is not available, instead of the expected 404. So working around this for now. - if type(client) == oci.dns.DnsClient: + if isinstance(client, oci.dns.DnsClient): if ex.status == 400 and ex.code == "InvalidParameter": _debug( "Resource {0} with {1} already deleted. So returning changed=False".format( @@ -1772,7 +1774,7 @@ def update_class_type_attr_difference( ): """ Checks the difference and updates an attribute which is represented by a class - instance. Not aplicable if the attribute type is a primitive value. + instance. Not applicable if the attribute type is a primitive value. For example, if a class name is A with an attribute x, then if A.x = X(), then only this method works. :param update_class_details The instance which should be updated if there is change in @@ -1934,7 +1936,7 @@ def get_target_resource_from_list( module, list_resource_fn, target_resource_id=None, **kwargs ): """ - Returns a resource filtered by identifer from a list of resources. This method should be + Returns a resource filtered by identifier from a list of resources. This method should be used as an alternative of 'get resource' method when 'get resource' is nor provided by resource api. This method returns a wrapper of response object but that should not be used as an input to 'wait_until' utility as this is only a partial wrapper of response object. diff --git a/plugins/module_utils/pacemaker.py b/plugins/module_utils/pacemaker.py new file mode 100644 index 0000000000..355fd55cc2 --- /dev/null +++ b/plugins/module_utils/pacemaker.py @@ -0,0 +1,79 @@ +# Copyright (c) 2025, Dexter Le +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import re + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + +_state_map = { + "present": "create", + "absent": "remove", + "cloned": "clone", + "status": "status", + "enabled": "enable", + "disabled": "disable", + "online": "start", + "offline": "stop", + "maintenance": "set", + "config": "config", + "cleanup": "cleanup", +} + + +def fmt_resource_type(value): + return [":".join(value[k] for k in ['resource_standard', 'resource_provider', 'resource_name'] if value.get(k) is not None)] + + +def fmt_resource_operation(value): + cmd = [] + for op in value: + cmd.append("op") + cmd.append(op.get('operation_action')) + for operation_option in op.get('operation_option'): + cmd.append(operation_option) + + return cmd + + +def fmt_resource_argument(value): + return ['--group' if value['argument_action'] == 'group' else value['argument_action']] + value['argument_option'] + + +def get_pacemaker_maintenance_mode(runner): + with runner("cli_action config") as ctx: + rc, out, err = ctx.run(cli_action="property") + maint_mode_re = re.compile(r"maintenance-mode.*true", re.IGNORECASE) + maintenance_mode_output = [line for line in out.splitlines() if maint_mode_re.search(line)] + return bool(maintenance_mode_output) + + +def pacemaker_runner(module, **kwargs): + runner_command = ['pcs'] + runner = CmdRunner( + module, + command=runner_command, + arg_formats=dict( + cli_action=cmd_runner_fmt.as_list(), + state=cmd_runner_fmt.as_map(_state_map), + name=cmd_runner_fmt.as_list(), + resource_type=cmd_runner_fmt.as_func(fmt_resource_type), + resource_option=cmd_runner_fmt.as_list(), + resource_operation=cmd_runner_fmt.as_func(fmt_resource_operation), + resource_meta=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("meta"), + resource_argument=cmd_runner_fmt.as_func(fmt_resource_argument), + resource_clone_ids=cmd_runner_fmt.as_list(), + resource_clone_meta=cmd_runner_fmt.as_list(), + apply_all=cmd_runner_fmt.as_bool("--all"), + agent_validation=cmd_runner_fmt.as_bool("--agent-validation"), + wait=cmd_runner_fmt.as_opt_eq_val("--wait"), + config=cmd_runner_fmt.as_fixed("config"), + force=cmd_runner_fmt.as_bool("--force"), + version=cmd_runner_fmt.as_fixed("--version"), + output_format=cmd_runner_fmt.as_opt_eq_val("--output-format"), + ), + **kwargs + ) + return runner diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py new file mode 100644 index 0000000000..3d81a6c5f2 --- /dev/null +++ b/plugins/module_utils/pipx.py @@ -0,0 +1,119 @@ +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +import json + + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +pipx_common_argspec = { + "global": dict(type='bool', default=False), + "executable": dict(type='path'), +} + + +_state_map = dict( + install='install', + install_all='install-all', + present='install', + uninstall='uninstall', + absent='uninstall', + uninstall_all='uninstall-all', + inject='inject', + uninject='uninject', + upgrade='upgrade', + upgrade_shared='upgrade-shared', + upgrade_all='upgrade-all', + reinstall='reinstall', + reinstall_all='reinstall-all', + pin='pin', + unpin='unpin', +) + + +def pipx_runner(module, command, **kwargs): + arg_formats = dict( + state=cmd_runner_fmt.as_map(_state_map), + name=cmd_runner_fmt.as_list(), + name_source=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda n, s: [s] if s else [n])), + install_apps=cmd_runner_fmt.as_bool("--include-apps"), + install_deps=cmd_runner_fmt.as_bool("--include-deps"), + inject_packages=cmd_runner_fmt.as_list(), + force=cmd_runner_fmt.as_bool("--force"), + include_injected=cmd_runner_fmt.as_bool("--include-injected"), + index_url=cmd_runner_fmt.as_opt_val('--index-url'), + python=cmd_runner_fmt.as_opt_val('--python'), + system_site_packages=cmd_runner_fmt.as_bool("--system-site-packages"), + _list=cmd_runner_fmt.as_fixed(['list', '--include-injected', '--json']), + editable=cmd_runner_fmt.as_bool("--editable"), + pip_args=cmd_runner_fmt.as_opt_eq_val('--pip-args'), + suffix=cmd_runner_fmt.as_opt_val('--suffix'), + spec_metadata=cmd_runner_fmt.as_list(), + version=cmd_runner_fmt.as_fixed('--version'), + ) + arg_formats["global"] = cmd_runner_fmt.as_bool("--global") + + runner = CmdRunner( + module, + command=command, + arg_formats=arg_formats, + environ_update={'USE_EMOJI': '0', 'PIPX_USE_EMOJI': '0'}, + check_rc=True, + **kwargs + ) + return runner + + +def _make_entry(venv_name, venv, include_injected, include_deps): + entry = { + 'name': venv_name, + 'version': venv['metadata']['main_package']['package_version'], + 'pinned': venv['metadata']['main_package'].get('pinned'), + } + if include_injected: + entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()} + if include_deps: + entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies']) + return entry + + +def make_process_dict(include_injected, include_deps=False): + def process_dict(rc, out, err): + if not out: + return {} + + results = {} + raw_data = json.loads(out) + for venv_name, venv in raw_data['venvs'].items(): + results[venv_name] = _make_entry(venv_name, venv, include_injected, include_deps) + + return results, raw_data + + return process_dict + + +def make_process_list(mod_helper, **kwargs): + # + # ATTENTION! + # + # The function `make_process_list()` is deprecated and will be removed in community.general 13.0.0 + # + process_dict = make_process_dict(mod_helper, **kwargs) + + def process_list(rc, out, err): + res_dict, raw_data = process_dict(rc, out, err) + + if kwargs.get("include_raw"): + mod_helper.vars.raw_output = raw_data + + return [ + entry + for name, entry in res_dict.items() + if name == kwargs.get("name") + ] + return process_list diff --git a/plugins/module_utils/pkg_req.py b/plugins/module_utils/pkg_req.py new file mode 100644 index 0000000000..13c824440f --- /dev/null +++ b/plugins/module_utils/pkg_req.py @@ -0,0 +1,71 @@ +# Copyright (c) 2025, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils import deps + + +with deps.declare("packaging"): + from packaging.requirements import Requirement + from packaging.version import parse as parse_version, InvalidVersion + + +class PackageRequirement: + def __init__(self, module, name): + self.module = module + self.parsed_name, self.requirement = self._parse_spec(name) + + def _parse_spec(self, name): + """ + Parse a package name that may include version specifiers using PEP 508. + Returns a tuple of (name, requirement) where requirement is of type packaging.requirements.Requirement and it may be None. + + Example inputs: + "package" + "package>=1.0" + "package>=1.0,<2.0" + "package[extra]>=1.0" + "package[foo,bar]>=1.0,!=1.5" + + :param name: Package name with optional version specifiers and extras + :return: Tuple of (name, requirement) + :raises ValueError: If the package specification is invalid + """ + if not name: + return name, None + + # Quick check for simple package names + if not any(c in name for c in '>= -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import atexit -import time -import re -import traceback - -PROXMOXER_IMP_ERR = None -try: - from proxmoxer import ProxmoxAPI - HAS_PROXMOXER = True -except ImportError: - HAS_PROXMOXER = False - PROXMOXER_IMP_ERR = traceback.format_exc() - - -from ansible.module_utils.basic import env_fallback, missing_required_lib - - -def proxmox_auth_argument_spec(): - return dict( - api_host=dict(type='str', - required=True, - fallback=(env_fallback, ['PROXMOX_HOST']) - ), - api_user=dict(type='str', - required=True, - fallback=(env_fallback, ['PROXMOX_USER']) - ), - api_password=dict(type='str', - no_log=True, - fallback=(env_fallback, ['PROXMOX_PASSWORD']) - ), - api_token_id=dict(type='str', - no_log=False - ), - api_token_secret=dict(type='str', - no_log=True - ), - validate_certs=dict(type='bool', - default=False - ), - ) - - -def proxmox_to_ansible_bool(value): - '''Convert Proxmox representation of a boolean to be ansible-friendly''' - return True if value == 1 else False - - -class ProxmoxAnsible(object): - """Base class for Proxmox modules""" - def __init__(self, module): - self.module = module - self.proxmox_api = self._connect() - # Test token validity - try: - self.proxmox_api.version.get() - except Exception as e: - module.fail_json(msg='%s' % e, exception=traceback.format_exc()) - - def _connect(self): - api_host = self.module.params['api_host'] - api_user = self.module.params['api_user'] - api_password = self.module.params['api_password'] - api_token_id = self.module.params['api_token_id'] - api_token_secret = self.module.params['api_token_secret'] - validate_certs = self.module.params['validate_certs'] - - auth_args = {'user': api_user} - if api_password: - auth_args['password'] = api_password - else: - auth_args['token_name'] = api_token_id - auth_args['token_value'] = api_token_secret - - try: - return ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args) - except Exception as e: - self.module.fail_json(msg='%s' % e, exception=traceback.format_exc()) diff --git a/plugins/module_utils/puppet.py b/plugins/module_utils/puppet.py new file mode 100644 index 0000000000..3b093d8c9d --- /dev/null +++ b/plugins/module_utils/puppet.py @@ -0,0 +1,108 @@ +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +import os + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +_PUPPET_PATH_PREFIX = ["/opt/puppetlabs/bin"] + + +def get_facter_dir(): + if os.getuid() == 0: + return '/etc/facter/facts.d' + else: + return os.path.expanduser('~/.facter/facts.d') + + +def _puppet_cmd(module): + return module.get_bin_path("puppet", False, _PUPPET_PATH_PREFIX) + + +# If the `timeout` CLI command feature is removed, +# Then we could add this as a fixed param to `puppet_runner` +def ensure_agent_enabled(module): + runner = CmdRunner( + module, + command="puppet", + path_prefix=_PUPPET_PATH_PREFIX, + arg_formats=dict( + _agent_disabled=cmd_runner_fmt.as_fixed(['config', 'print', 'agent_disabled_lockfile']), + ), + check_rc=False, + ) + + rc, stdout, stderr = runner("_agent_disabled").run() + if os.path.exists(stdout.strip()): + module.fail_json( + msg="Puppet agent is administratively disabled.", + disabled=True) + elif rc != 0: + module.fail_json( + msg="Puppet agent state could not be determined.") + + +def puppet_runner(module): + + # Keeping backward compatibility, allow for running with the `timeout` CLI command. + # If this can be replaced with ansible `timeout` parameter in playbook, + # then this function could be removed. + def _prepare_base_cmd(): + _tout_cmd = module.get_bin_path("timeout", False) + if _tout_cmd: + cmd = ["timeout", "-s", "9", module.params["timeout"], _puppet_cmd(module)] + else: + cmd = ["puppet"] + return cmd + + def noop_func(v): + return ["--noop"] if module.check_mode or v else ["--no-noop"] + + _logdest_map = { + "syslog": ["--logdest", "syslog"], + "all": ["--logdest", "syslog", "--logdest", "console"], + } + + @cmd_runner_fmt.unpack_args + def execute_func(execute, manifest): + if execute: + return ["--execute", execute] + else: + return [manifest] + + runner = CmdRunner( + module, + command=_prepare_base_cmd(), + path_prefix=_PUPPET_PATH_PREFIX, + arg_formats=dict( + _agent_fixed=cmd_runner_fmt.as_fixed([ + "agent", "--onetime", "--no-daemonize", "--no-usecacheonfailure", + "--no-splay", "--detailed-exitcodes", "--verbose", "--color", "0", + ]), + _apply_fixed=cmd_runner_fmt.as_fixed(["apply", "--detailed-exitcodes"]), + puppetmaster=cmd_runner_fmt.as_opt_val("--server"), + show_diff=cmd_runner_fmt.as_bool("--show-diff"), + confdir=cmd_runner_fmt.as_opt_val("--confdir"), + environment=cmd_runner_fmt.as_opt_val("--environment"), + tags=cmd_runner_fmt.as_func(lambda v: ["--tags", ",".join(v)]), + skip_tags=cmd_runner_fmt.as_func(lambda v: ["--skip_tags", ",".join(v)]), + certname=cmd_runner_fmt.as_opt_eq_val("--certname"), + noop=cmd_runner_fmt.as_func(noop_func), + use_srv_records=cmd_runner_fmt.as_bool("--usr_srv_records", "--no-usr_srv_records", ignore_none=True), + logdest=cmd_runner_fmt.as_map(_logdest_map, default=[]), + modulepath=cmd_runner_fmt.as_opt_eq_val("--modulepath"), + _execute=cmd_runner_fmt.as_func(execute_func), + summarize=cmd_runner_fmt.as_bool("--summarize"), + waitforlock=cmd_runner_fmt.as_opt_val("--waitforlock"), + debug=cmd_runner_fmt.as_bool("--debug"), + verbose=cmd_runner_fmt.as_bool("--verbose"), + ), + check_rc=False, + force_lang=module.params["environment_lang"], + ) + return runner diff --git a/plugins/module_utils/pure.py b/plugins/module_utils/pure.py deleted file mode 100644 index ebd41b1ce5..0000000000 --- a/plugins/module_utils/pure.py +++ /dev/null @@ -1,112 +0,0 @@ -# -*- coding: utf-8 -*- - -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (c), Simon Dodsley ,2017 -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -HAS_PURESTORAGE = True -try: - from purestorage import purestorage -except ImportError: - HAS_PURESTORAGE = False - -HAS_PURITY_FB = True -try: - from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix, rest -except ImportError: - HAS_PURITY_FB = False - -from functools import wraps -from os import environ -from os import path -import platform - -VERSION = 1.2 -USER_AGENT_BASE = 'Ansible' -API_AGENT_VERSION = 1.5 - - -def get_system(module): - """Return System Object or Fail""" - user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % { - 'base': USER_AGENT_BASE, - 'class': __name__, - 'version': VERSION, - 'platform': platform.platform() - } - array_name = module.params['fa_url'] - api = module.params['api_token'] - - if array_name and api: - system = purestorage.FlashArray(array_name, api_token=api, user_agent=user_agent) - elif environ.get('PUREFA_URL') and environ.get('PUREFA_API'): - system = purestorage.FlashArray(environ.get('PUREFA_URL'), api_token=(environ.get('PUREFA_API')), user_agent=user_agent) - else: - module.fail_json(msg="You must set PUREFA_URL and PUREFA_API environment variables or the fa_url and api_token module arguments") - try: - system.get() - except Exception: - module.fail_json(msg="Pure Storage FlashArray authentication failed. Check your credentials") - return system - - -def get_blade(module): - """Return System Object or Fail""" - user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % { - 'base': USER_AGENT_BASE, - 'class': __name__, - 'version': VERSION, - 'platform': platform.platform() - } - blade_name = module.params['fb_url'] - api = module.params['api_token'] - - if blade_name and api: - blade = PurityFb(blade_name) - blade.disable_verify_ssl() - try: - blade.login(api) - versions = blade.api_version.list_versions().versions - if API_AGENT_VERSION in versions: - blade._api_client.user_agent = user_agent - except rest.ApiException as e: - module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials") - elif environ.get('PUREFB_URL') and environ.get('PUREFB_API'): - blade = PurityFb(environ.get('PUREFB_URL')) - blade.disable_verify_ssl() - try: - blade.login(environ.get('PUREFB_API')) - versions = blade.api_version.list_versions().versions - if API_AGENT_VERSION in versions: - blade._api_client.user_agent = user_agent - except rest.ApiException as e: - module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials") - else: - module.fail_json(msg="You must set PUREFB_URL and PUREFB_API environment variables or the fb_url and api_token module arguments") - return blade - - -def purefa_argument_spec(): - """Return standard base dictionary used for the argument_spec argument in AnsibleModule""" - - return dict( - fa_url=dict(), - api_token=dict(no_log=True), - ) - - -def purefb_argument_spec(): - """Return standard base dictionary used for the argument_spec argument in AnsibleModule""" - - return dict( - fb_url=dict(), - api_token=dict(no_log=True), - ) diff --git a/plugins/module_utils/python_runner.py b/plugins/module_utils/python_runner.py new file mode 100644 index 0000000000..7d9b94f50e --- /dev/null +++ b/plugins/module_utils/python_runner.py @@ -0,0 +1,34 @@ +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import os + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, _ensure_list + + +class PythonRunner(CmdRunner): + def __init__(self, module, command, arg_formats=None, default_args_order=(), + check_rc=False, force_lang="C", path_prefix=None, environ_update=None, + python="python", venv=None): + self.python = python + self.venv = venv + self.has_venv = venv is not None + + if os.path.isabs(python) or '/' in python: + self.python = python + elif self.has_venv: + if path_prefix is None: + path_prefix = [] + path_prefix.append(os.path.join(venv, "bin")) + if environ_update is None: + environ_update = {} + environ_update["PATH"] = f"{':'.join(path_prefix)}:{os.environ['PATH']}" + environ_update["VIRTUAL_ENV"] = venv + + python_cmd = [self.python] + _ensure_list(command) + + super(PythonRunner, self).__init__(module, python_cmd, arg_formats, default_args_order, + check_rc, force_lang, path_prefix, environ_update) diff --git a/plugins/module_utils/rax.py b/plugins/module_utils/rax.py deleted file mode 100644 index 84effee97c..0000000000 --- a/plugins/module_utils/rax.py +++ /dev/null @@ -1,316 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by -# Ansible still belong to the author of the module, and may assign their own -# license to the complete work. -# -# Copyright (c), Michael DeHaan , 2012-2013 -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -import os -import re -from uuid import UUID - -from ansible.module_utils.six import text_type, binary_type - -FINAL_STATUSES = ('ACTIVE', 'ERROR') -VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use', - 'error', 'error_deleting') - -CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN', - 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN'] -CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS', - 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP', - 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP'] - -NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None)) -PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000" -SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111" - - -def rax_slugify(value): - """Prepend a key with rax_ and normalize the key name""" - return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) - - -def rax_clb_node_to_dict(obj): - """Function to convert a CLB Node object to a dict""" - if not obj: - return {} - node = obj.to_dict() - node['id'] = obj.id - node['weight'] = obj.weight - return node - - -def rax_to_dict(obj, obj_type='standard'): - """Generic function to convert a pyrax object to a dict - - obj_type values: - standard - clb - server - - """ - instance = {} - for key in dir(obj): - value = getattr(obj, key) - if obj_type == 'clb' and key == 'nodes': - instance[key] = [] - for node in value: - instance[key].append(rax_clb_node_to_dict(node)) - elif (isinstance(value, list) and len(value) > 0 and - not isinstance(value[0], NON_CALLABLES)): - instance[key] = [] - for item in value: - instance[key].append(rax_to_dict(item)) - elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')): - if obj_type == 'server': - if key == 'image': - if not value: - instance['rax_boot_source'] = 'volume' - else: - instance['rax_boot_source'] = 'local' - key = rax_slugify(key) - instance[key] = value - - if obj_type == 'server': - for attr in ['id', 'accessIPv4', 'name', 'status']: - instance[attr] = instance.get(rax_slugify(attr)) - - return instance - - -def rax_find_bootable_volume(module, rax_module, server, exit=True): - """Find a servers bootable volume""" - cs = rax_module.cloudservers - cbs = rax_module.cloud_blockstorage - server_id = rax_module.utils.get_id(server) - volumes = cs.volumes.get_server_volumes(server_id) - bootable_volumes = [] - for volume in volumes: - vol = cbs.get(volume) - if module.boolean(vol.bootable): - bootable_volumes.append(vol) - if not bootable_volumes: - if exit: - module.fail_json(msg='No bootable volumes could be found for ' - 'server %s' % server_id) - else: - return False - elif len(bootable_volumes) > 1: - if exit: - module.fail_json(msg='Multiple bootable volumes found for server ' - '%s' % server_id) - else: - return False - - return bootable_volumes[0] - - -def rax_find_image(module, rax_module, image, exit=True): - """Find a server image by ID or Name""" - cs = rax_module.cloudservers - try: - UUID(image) - except ValueError: - try: - image = cs.images.find(human_id=image) - except(cs.exceptions.NotFound, - cs.exceptions.NoUniqueMatch): - try: - image = cs.images.find(name=image) - except (cs.exceptions.NotFound, - cs.exceptions.NoUniqueMatch): - if exit: - module.fail_json(msg='No matching image found (%s)' % - image) - else: - return False - - return rax_module.utils.get_id(image) - - -def rax_find_volume(module, rax_module, name): - """Find a Block storage volume by ID or name""" - cbs = rax_module.cloud_blockstorage - try: - UUID(name) - volume = cbs.get(name) - except ValueError: - try: - volume = cbs.find(name=name) - except rax_module.exc.NotFound: - volume = None - except Exception as e: - module.fail_json(msg='%s' % e) - return volume - - -def rax_find_network(module, rax_module, network): - """Find a cloud network by ID or name""" - cnw = rax_module.cloud_networks - try: - UUID(network) - except ValueError: - if network.lower() == 'public': - return cnw.get_server_networks(PUBLIC_NET_ID) - elif network.lower() == 'private': - return cnw.get_server_networks(SERVICE_NET_ID) - else: - try: - network_obj = cnw.find_network_by_label(network) - except (rax_module.exceptions.NetworkNotFound, - rax_module.exceptions.NetworkLabelNotUnique): - module.fail_json(msg='No matching network found (%s)' % - network) - else: - return cnw.get_server_networks(network_obj) - else: - return cnw.get_server_networks(network) - - -def rax_find_server(module, rax_module, server): - """Find a Cloud Server by ID or name""" - cs = rax_module.cloudservers - try: - UUID(server) - server = cs.servers.get(server) - except ValueError: - servers = cs.servers.list(search_opts=dict(name='^%s$' % server)) - if not servers: - module.fail_json(msg='No Server was matched by name, ' - 'try using the Server ID instead') - if len(servers) > 1: - module.fail_json(msg='Multiple servers matched by name, ' - 'try using the Server ID instead') - - # We made it this far, grab the first and hopefully only server - # in the list - server = servers[0] - return server - - -def rax_find_loadbalancer(module, rax_module, loadbalancer): - """Find a Cloud Load Balancer by ID or name""" - clb = rax_module.cloud_loadbalancers - try: - found = clb.get(loadbalancer) - except Exception: - found = [] - for lb in clb.list(): - if loadbalancer == lb.name: - found.append(lb) - - if not found: - module.fail_json(msg='No loadbalancer was matched') - - if len(found) > 1: - module.fail_json(msg='Multiple loadbalancers matched') - - # We made it this far, grab the first and hopefully only item - # in the list - found = found[0] - - return found - - -def rax_argument_spec(): - """Return standard base dictionary used for the argument_spec - argument in AnsibleModule - - """ - return dict( - api_key=dict(type='str', aliases=['password'], no_log=True), - auth_endpoint=dict(type='str'), - credentials=dict(type='path', aliases=['creds_file']), - env=dict(type='str'), - identity_type=dict(type='str', default='rackspace'), - region=dict(type='str'), - tenant_id=dict(type='str'), - tenant_name=dict(type='str'), - username=dict(type='str'), - validate_certs=dict(type='bool', aliases=['verify_ssl']), - ) - - -def rax_required_together(): - """Return the default list used for the required_together argument to - AnsibleModule""" - return [['api_key', 'username']] - - -def setup_rax_module(module, rax_module, region_required=True): - """Set up pyrax in a standard way for all modules""" - rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version, - rax_module.USER_AGENT) - - api_key = module.params.get('api_key') - auth_endpoint = module.params.get('auth_endpoint') - credentials = module.params.get('credentials') - env = module.params.get('env') - identity_type = module.params.get('identity_type') - region = module.params.get('region') - tenant_id = module.params.get('tenant_id') - tenant_name = module.params.get('tenant_name') - username = module.params.get('username') - verify_ssl = module.params.get('validate_certs') - - if env is not None: - rax_module.set_environment(env) - - rax_module.set_setting('identity_type', identity_type) - if verify_ssl is not None: - rax_module.set_setting('verify_ssl', verify_ssl) - if auth_endpoint is not None: - rax_module.set_setting('auth_endpoint', auth_endpoint) - if tenant_id is not None: - rax_module.set_setting('tenant_id', tenant_id) - if tenant_name is not None: - rax_module.set_setting('tenant_name', tenant_name) - - try: - username = username or os.environ.get('RAX_USERNAME') - if not username: - username = rax_module.get_setting('keyring_username') - if username: - api_key = 'USE_KEYRING' - if not api_key: - api_key = os.environ.get('RAX_API_KEY') - credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or - os.environ.get('RAX_CREDS_FILE')) - region = (region or os.environ.get('RAX_REGION') or - rax_module.get_setting('region')) - except KeyError as e: - module.fail_json(msg='Unable to load %s' % e.message) - - try: - if api_key and username: - if api_key == 'USE_KEYRING': - rax_module.keyring_auth(username, region=region) - else: - rax_module.set_credentials(username, api_key=api_key, - region=region) - elif credentials: - credentials = os.path.expanduser(credentials) - rax_module.set_credential_file(credentials, region=region) - else: - raise Exception('No credentials supplied!') - except Exception as e: - if e.message: - msg = str(e.message) - else: - msg = repr(e) - module.fail_json(msg=msg) - - if region_required and region not in rax_module.regions: - module.fail_json(msg='%s is not a valid region, must be one of: %s' % - (region, ','.join(rax_module.regions))) - - return rax_module diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index c2f17e03fb..ab551b44c5 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1,23 +1,29 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017-2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations +import http.client as http_client import json +import os +import random +import string +import time from ansible.module_utils.urls import open_url from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.six.moves import http_client -from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError -from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.common.text.converters import to_bytes +from urllib.error import URLError, HTTPError +from urllib.parse import urlparse GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', 'OData-Version': '4.0'} PATCH_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', 'OData-Version': '4.0'} +PUT_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', + 'OData-Version': '4.0'} DELETE_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} FAIL_MSG = 'Issuing a data modification command without specifying the '\ @@ -25,20 +31,40 @@ FAIL_MSG = 'Issuing a data modification command without specifying the '\ 'than one %(resource)s is no longer allowed. Use the `resource_id` '\ 'option to specify the target %(resource)s ID.' +# Use together with the community.general.redfish docs fragment +REDFISH_COMMON_ARGUMENT_SPEC = { + "validate_certs": { + "type": "bool", + "default": False, + }, + "ca_path": { + "type": "path", + }, + "ciphers": { + "type": "list", + "elements": "str", + }, +} + class RedfishUtils(object): def __init__(self, creds, root_uri, timeout, module, resource_id=None, - data_modification=False, strip_etag_quotes=False): + data_modification=False, strip_etag_quotes=False, ciphers=None): self.root_uri = root_uri self.creds = creds self.timeout = timeout self.module = module self.service_root = '/redfish/v1/' + self.session_service_uri = '/redfish/v1/SessionService' + self.sessions_uri = '/redfish/v1/SessionService/Sessions' self.resource_id = resource_id self.data_modification = data_modification self.strip_etag_quotes = strip_etag_quotes - self._init_session() + self.ciphers = ciphers if ciphers is not None else module.params.get("ciphers") + self._vendor = None + self.validate_certs = module.params.get("validate_certs", False) + self.ca_path = module.params.get("ca_path") def _auth_params(self, headers): """ @@ -60,24 +86,108 @@ class RedfishUtils(object): force_basic_auth = True return username, password, force_basic_auth + def _check_request_payload(self, req_pyld, cur_pyld, uri): + """ + Checks the request payload with the values currently held by the + service. Will check if changes are needed and if properties are + supported by the service. + + :param req_pyld: dict containing the properties to apply + :param cur_pyld: dict containing the properties currently set + :param uri: string containing the URI being modified + :return: dict containing response information + """ + + change_required = False + for prop in req_pyld: + # Check if the property is supported by the service + if prop not in cur_pyld: + return {'ret': False, + 'changed': False, + 'msg': '%s does not support the property %s' % (uri, prop), + 'changes_required': False} + + # Perform additional checks based on the type of property + if isinstance(req_pyld[prop], dict) and isinstance(cur_pyld[prop], dict): + # If the property is a dictionary, check the nested properties + sub_resp = self._check_request_payload(req_pyld[prop], cur_pyld[prop], uri) + if not sub_resp['ret']: + # Unsupported property or other error condition; no change + return sub_resp + if sub_resp['changes_required']: + # Subordinate dictionary requires changes + change_required = True + + else: + # For other properties, just compare the values + + # Note: This is also a fallthrough for cases where the request + # payload and current settings do not match in their data type. + # There are cases where this can be expected, such as when a + # property is always 'null' in responses, so we want to attempt + # the PATCH request. + + # Note: This is also a fallthrough for properties that are + # arrays of objects. Some services erroneously omit properties + # within arrays of objects when not configured, and it is + # expecting the client to provide them anyway. + + if req_pyld[prop] != cur_pyld[prop]: + change_required = True + + resp = {'ret': True, 'changes_required': change_required} + if not change_required: + # No changes required; all properties set + resp['changed'] = False + resp['msg'] = 'Properties in %s are already set' % uri + return resp + + def _request(self, uri, **kwargs): + kwargs.setdefault("validate_certs", self.validate_certs) + kwargs.setdefault("follow_redirects", "all") + kwargs.setdefault("use_proxy", True) + kwargs.setdefault("timeout", self.timeout) + kwargs.setdefault("ciphers", self.ciphers) + kwargs.setdefault("ca_path", self.ca_path) + resp = open_url(uri, **kwargs) + headers = {k.lower(): v for (k, v) in resp.info().items()} + return resp, headers + # The following functions are to send GET/POST/PATCH/DELETE requests - def get_request(self, uri): + def get_request(self, uri, override_headers=None, allow_no_resp=False, timeout=None): req_headers = dict(GET_HEADERS) + if override_headers: + req_headers.update(override_headers) username, password, basic_auth = self._auth_params(req_headers) + if timeout is None: + timeout = self.timeout try: - resp = open_url(uri, method="GET", headers=req_headers, - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) - data = json.loads(to_native(resp.read())) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + # Service root is an unauthenticated resource; remove credentials + # in case the caller will be using sessions later. + if uri == (self.root_uri + self.service_root): + basic_auth = False + resp, headers = self._request( + uri, + method="GET", + headers=req_headers, + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + timeout=timeout, + ) + try: + data = json.loads(to_native(resp.read())) + except Exception as e: + # No response data; this is okay in certain cases + data = None + if not allow_no_resp: + raise except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on GET request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'" % (uri, e.reason)} @@ -85,25 +195,44 @@ class RedfishUtils(object): except Exception as e: return {'ret': False, 'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))} - return {'ret': True, 'data': data, 'headers': headers} + return {'ret': True, 'data': data, 'headers': headers, 'resp': resp} - def post_request(self, uri, pyld): + def post_request(self, uri, pyld, multipart=False): req_headers = dict(POST_HEADERS) username, password, basic_auth = self._auth_params(req_headers) try: - resp = open_url(uri, data=json.dumps(pyld), - headers=req_headers, method="POST", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + # When performing a POST to the session collection, credentials are + # provided in the request body. Do not provide the basic auth + # header since this can cause conflicts with some services + if self.sessions_uri is not None and uri == (self.root_uri + self.sessions_uri): + basic_auth = False + if multipart: + # Multipart requests require special handling to encode the request body + multipart_encoder = self._prepare_multipart(pyld) + data = multipart_encoder[0] + req_headers['content-type'] = multipart_encoder[1] + else: + data = json.dumps(pyld) + resp, headers = self._request( + uri, + data=data, + headers=req_headers, + method="POST", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) + try: + data = json.loads(to_native(resp.read())) + except Exception as e: + # No response data; this is okay in many cases + data = None except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on POST request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'" % (uri, e.reason)} @@ -111,11 +240,62 @@ class RedfishUtils(object): except Exception as e: return {'ret': False, 'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))} - return {'ret': True, 'headers': headers, 'resp': resp} + return {'ret': True, 'data': data, 'headers': headers, 'resp': resp} - def patch_request(self, uri, pyld): + def patch_request(self, uri, pyld, check_pyld=False): req_headers = dict(PATCH_HEADERS) r = self.get_request(uri) + if r['ret']: + # Get etag from etag header or @odata.etag property + etag = r['headers'].get('etag') + if not etag: + etag = r['data'].get('@odata.etag') + if etag: + if self.strip_etag_quotes: + etag = etag.strip('"') + req_headers['If-Match'] = etag + + if check_pyld: + # Check the payload with the current settings to see if changes + # are needed or if there are unsupported properties + if r['ret']: + check_resp = self._check_request_payload(pyld, r['data'], uri) + if not check_resp.pop('changes_required'): + check_resp['changed'] = False + return check_resp + else: + r['changed'] = False + return r + + username, password, basic_auth = self._auth_params(req_headers) + try: + resp, dummy = self._request( + uri, + data=json.dumps(pyld), + headers=req_headers, + method="PATCH", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) + except HTTPError as e: + msg, data = self._get_extended_message(e) + return {'ret': False, 'changed': False, + 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'" + % (e.code, uri, msg), + 'status': e.code, 'data': data} + except URLError as e: + return {'ret': False, 'changed': False, + 'msg': "URL Error on PATCH request to '%s': '%s'" % (uri, e.reason)} + # Almost all errors should be caught above, but just in case + except Exception as e: + return {'ret': False, 'changed': False, + 'msg': "Failed PATCH request to '%s': '%s'" % (uri, to_text(e))} + return {'ret': True, 'changed': True, 'resp': resp, 'msg': 'Modified %s' % uri} + + def put_request(self, uri, pyld): + req_headers = dict(PUT_HEADERS) + r = self.get_request(uri) if r['ret']: # Get etag from etag header or @odata.etag property etag = r['headers'].get('etag') @@ -127,25 +307,28 @@ class RedfishUtils(object): req_headers['If-Match'] = etag username, password, basic_auth = self._auth_params(req_headers) try: - resp = open_url(uri, data=json.dumps(pyld), - headers=req_headers, method="PATCH", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) + resp, dummy = self._request( + uri, + data=json.dumps(pyld), + headers=req_headers, + method="PUT", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, - 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'" + 'msg': "HTTP Error %s on PUT request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: - return {'ret': False, 'msg': "URL Error on PATCH request to '%s': '%s'" + return {'ret': False, 'msg': "URL Error on PUT request to '%s': '%s'" % (uri, e.reason)} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, - 'msg': "Failed PATCH request to '%s': '%s'" % (uri, to_text(e))} + 'msg': "Failed PUT request to '%s': '%s'" % (uri, to_text(e))} return {'ret': True, 'resp': resp} def delete_request(self, uri, pyld=None): @@ -153,18 +336,21 @@ class RedfishUtils(object): username, password, basic_auth = self._auth_params(req_headers) try: data = json.dumps(pyld) if pyld else None - resp = open_url(uri, data=data, - headers=req_headers, method="DELETE", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) + resp, dummy = self._request( + uri, + data=data, + headers=req_headers, + method="DELETE", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on DELETE request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'" % (uri, e.reason)} @@ -174,6 +360,59 @@ class RedfishUtils(object): 'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))} return {'ret': True, 'resp': resp} + @staticmethod + def _prepare_multipart(fields): + """Prepares a multipart body based on a set of fields provided. + + Ideally it would have been good to use the existing 'prepare_multipart' + found in ansible.module_utils.urls, but it takes files and encodes them + as Base64 strings, which is not expected by Redfish services. It also + adds escaping of certain bytes in the payload, such as inserting '\r' + any time it finds a standalone '\n', which corrupts the image payload + send to the service. This implementation is simplified to Redfish's + usage and doesn't necessarily represent an exhaustive method of + building multipart requests. + """ + + def write_buffer(body, line): + # Adds to the multipart body based on the provided data type + # At this time there is only support for strings, dictionaries, and bytes (default) + if isinstance(line, str): + body.append(to_bytes(line, encoding='utf-8')) + elif isinstance(line, dict): + body.append(to_bytes(json.dumps(line), encoding='utf-8')) + else: + body.append(line) + return + + # Generate a random boundary marker; may need to consider probing the + # payload for potential conflicts in the future + boundary = ''.join(random.choice(string.digits + string.ascii_letters) for i in range(30)) + body = [] + for form in fields: + # Fill in the form details + write_buffer(body, '--' + boundary) + + # Insert the headers (Content-Disposition and Content-Type) + if 'filename' in fields[form]: + name = os.path.basename(fields[form]['filename']).replace('"', '\\"') + write_buffer(body, 'Content-Disposition: form-data; name="%s"; filename="%s"' % (to_text(form), to_text(name))) + else: + write_buffer(body, 'Content-Disposition: form-data; name="%s"' % form) + write_buffer(body, 'Content-Type: %s' % fields[form]['mime_type']) + write_buffer(body, '') + + # Insert the payload; read from the file if not given by the caller + if 'content' not in fields[form]: + with open(to_bytes(fields[form]['filename'], errors='surrogate_or_strict'), 'rb') as f: + fields[form]['content'] = f.read() + write_buffer(body, fields[form]['content']) + + # Finalize the entire request + write_buffer(body, '--' + boundary + '--') + write_buffer(body, '') + return (b'\r\n'.join(body), 'multipart/form-data; boundary=' + boundary) + @staticmethod def _get_extended_message(error): """ @@ -181,20 +420,53 @@ class RedfishUtils(object): :param error: an HTTPError exception :type error: HTTPError :return: the ExtendedInfo message if present, else standard HTTP error + :return: the JSON data of the response if present """ msg = http_client.responses.get(error.code, '') + data = None if error.code >= 400: try: body = error.read().decode('utf-8') data = json.loads(body) ext_info = data['error']['@Message.ExtendedInfo'] - msg = ext_info[0]['Message'] + # if the ExtendedInfo contains a user friendly message send it + # otherwise try to send the entire contents of ExtendedInfo + try: + msg = ext_info[0]['Message'] + except Exception: + msg = str(data['error']['@Message.ExtendedInfo']) except Exception: pass - return msg + return msg, data - def _init_session(self): - pass + def _get_vendor(self): + # If we got the vendor info once, don't get it again + if self._vendor is not None: + return {'ret': 'True', 'Vendor': self._vendor} + + # Find the vendor info from the service root + response = self.get_request(self.root_uri + self.service_root) + if response['ret'] is False: + return {'ret': False, 'Vendor': ''} + data = response['data'] + + if 'Vendor' in data: + # Extract the vendor string from the Vendor property + self._vendor = data["Vendor"] + return {'ret': True, 'Vendor': data["Vendor"]} + elif 'Oem' in data and len(data['Oem']) > 0: + # Determine the vendor from the OEM object if needed + vendor = list(data['Oem'].keys())[0] + if vendor == 'Hpe' or vendor == 'Hp': + # HPE uses Pascal-casing for their OEM object + # Older systems reported 'Hp' (pre-split) + vendor = 'HPE' + self._vendor = vendor + return {'ret': True, 'Vendor': vendor} + else: + # Could not determine; use an empty string + self._vendor = '' + return {'ret': True, 'Vendor': ''} def _find_accountservice_resource(self): response = self.get_request(self.root_uri + self.service_root) @@ -216,22 +488,23 @@ class RedfishUtils(object): return {'ret': True} def _find_sessionservice_resource(self): + # Get the service root response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] - if 'SessionService' not in data: + + # Check for the session service and session collection. Well-known + # defaults are provided in the constructor, but services that predate + # Redfish 1.6.0 might contain different values. + self.session_service_uri = data.get('SessionService', {}).get('@odata.id') + self.sessions_uri = data.get('Links', {}).get('Sessions', {}).get('@odata.id') + + # If one isn't found, return an error + if self.session_service_uri is None: return {'ret': False, 'msg': "SessionService resource not found"} - else: - session_service = data["SessionService"]["@odata.id"] - response = self.get_request(self.root_uri + session_service) - if response['ret'] is False: - return response - data = response['data'] - sessions = data['Sessions']['@odata.id'] - if sessions[-1:] == '/': - sessions = sessions[:-1] - self.sessions_uri = sessions + if self.sessions_uri is None: + return {'ret': False, 'msg': "SessionCollection resource not found"} return {'ret': True} def _get_resource_uri_by_id(self, uris, id_prop): @@ -289,9 +562,9 @@ class RedfishUtils(object): data = response['data'] self.firmware_uri = self.software_uri = None if 'FirmwareInventory' in data: - self.firmware_uri = data['FirmwareInventory'][u'@odata.id'] + self.firmware_uri = data['FirmwareInventory']['@odata.id'] if 'SoftwareInventory' in data: - self.software_uri = data['SoftwareInventory'][u'@odata.id'] + self.software_uri = data['SoftwareInventory']['@odata.id'] return {'ret': True} def _find_chassis_resource(self): @@ -365,12 +638,13 @@ class RedfishUtils(object): data = response['data'] if 'Parameters' in data: params = data['Parameters'] - ai = dict((p['Name'], p) - for p in params if 'Name' in p) + ai = {p['Name']: p for p in params if 'Name' in p} if not ai: - ai = dict((k[:-24], - {'AllowableValues': v}) for k, v in action.items() - if k.endswith('@Redfish.AllowableValues')) + ai = { + k[:-24]: {'AllowableValues': v} + for k, v in action.items() + if k.endswith('@Redfish.AllowableValues') + } return ai def _get_allowable_values(self, action, name, default_values=None): @@ -383,6 +657,24 @@ class RedfishUtils(object): allowable_values = default_values return allowable_values + def check_service_availability(self): + """ + Checks if the service is accessible. + + :return: dict containing the status of the service + """ + + # Get the service root + # Override the timeout since the service root is expected to be readily + # available. + service_root = self.get_request(self.root_uri + self.service_root, timeout=10) + if service_root['ret'] is False: + # Failed, either due to a timeout or HTTP error; not available + return {'ret': True, 'available': False} + + # Successfully accessed the service root; available + return {'ret': True, 'available': True} + def get_logs(self): log_svcs_uri_list = [] list_of_logs = [] @@ -404,12 +696,12 @@ class RedfishUtils(object): return response data = response['data'] for log_svcs_entry in data.get('Members', []): - response = self.get_request(self.root_uri + log_svcs_entry[u'@odata.id']) + response = self.get_request(self.root_uri + log_svcs_entry['@odata.id']) if response['ret'] is False: return response _data = response['data'] if 'Entries' in _data: - log_svcs_uri_list.append(_data['Entries'][u'@odata.id']) + log_svcs_uri_list.append(_data['Entries']['@odata.id']) # For each entry in LogServices, get log name and all log entries for log_svcs_uri in log_svcs_uri_list: @@ -429,7 +721,7 @@ class RedfishUtils(object): entry[prop] = logEntry.get(prop) if entry: list_of_log_entries.append(entry) - log_name = log_svcs_uri.split('/')[-1] + log_name = log_svcs_uri.rstrip('/').split('/')[-1] logs[log_name] = list_of_log_entries list_of_logs.append(logs) @@ -452,15 +744,15 @@ class RedfishUtils(object): return response data = response['data'] - for log_svcs_entry in data[u'Members']: + for log_svcs_entry in data['Members']: response = self.get_request(self.root_uri + log_svcs_entry["@odata.id"]) if response['ret'] is False: return response _data = response['data'] # Check to make sure option is available, otherwise error is ugly if "Actions" in _data: - if "#LogService.ClearLog" in _data[u"Actions"]: - self.post_request(self.root_uri + _data[u"Actions"]["#LogService.ClearLog"]["target"], {}) + if "#LogService.ClearLog" in _data["Actions"]: + self.post_request(self.root_uri + _data["Actions"]["#LogService.ClearLog"]["target"], {}) if response['ret'] is False: return response return {'ret': True} @@ -493,7 +785,8 @@ class RedfishUtils(object): properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers', 'Location', 'Manufacturer', 'Model', 'Name', 'Id', 'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status'] - key = "StorageControllers" + key = "Controllers" + deprecated_key = "StorageControllers" # Find Storage service response = self.get_request(self.root_uri + systems_uri) @@ -514,14 +807,37 @@ class RedfishUtils(object): # Loop through Members and their StorageControllers # and gather properties from each StorageController - if data[u'Members']: - for storage_member in data[u'Members']: - storage_member_uri = storage_member[u'@odata.id'] + if data['Members']: + for storage_member in data['Members']: + storage_member_uri = storage_member['@odata.id'] response = self.get_request(self.root_uri + storage_member_uri) data = response['data'] if key in data: - controller_list = data[key] + controllers_uri = data[key]['@odata.id'] + + response = self.get_request(self.root_uri + controllers_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if data['Members']: + for controller_member in data['Members']: + controller_member_uri = controller_member['@odata.id'] + response = self.get_request(self.root_uri + controller_member_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + controller_result = {} + for property in properties: + if property in data: + controller_result[property] = data[property] + controller_results.append(controller_result) + elif deprecated_key in data: + controller_list = data[deprecated_key] for controller in controller_list: controller_result = {} for property in properties: @@ -543,7 +859,7 @@ class RedfishUtils(object): properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes', 'EncryptionAbility', 'EncryptionStatus', 'FailurePredicted', 'HotspareType', 'Id', 'Identifiers', - 'Manufacturer', 'MediaType', 'Model', 'Name', + 'Links', 'Manufacturer', 'MediaType', 'Model', 'Name', 'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision', 'RotationSpeedRPM', 'SerialNumber', 'Status'] @@ -559,16 +875,16 @@ class RedfishUtils(object): if 'Storage' in data: # Get a list of all storage controllers and build respective URIs - storage_uri = data[u'Storage'][u'@odata.id'] + storage_uri = data['Storage']['@odata.id'] response = self.get_request(self.root_uri + storage_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] - if data[u'Members']: - for controller in data[u'Members']: - controller_list.append(controller[u'@odata.id']) + if data['Members']: + for controller in data['Members']: + controller_list.append(controller['@odata.id']) for c in controller_list: uri = self.root_uri + c response = self.get_request(uri) @@ -576,7 +892,26 @@ class RedfishUtils(object): return response data = response['data'] controller_name = 'Controller 1' - if 'StorageControllers' in data: + storage_id = data['Id'] + if 'Controllers' in data: + controllers_uri = data['Controllers']['@odata.id'] + + response = self.get_request(self.root_uri + controllers_uri) + if response['ret'] is False: + return response + result['ret'] = True + cdata = response['data'] + + if cdata['Members']: + controller_member_uri = cdata['Members'][0]['@odata.id'] + + response = self.get_request(self.root_uri + controller_member_uri) + if response['ret'] is False: + return response + result['ret'] = True + cdata = response['data'] + controller_name = cdata['Name'] + elif 'StorageControllers' in data: sc = data['StorageControllers'] if sc: if 'Name' in sc[0]: @@ -586,18 +921,25 @@ class RedfishUtils(object): controller_name = 'Controller %s' % sc_id drive_results = [] if 'Drives' in data: - for device in data[u'Drives']: - disk_uri = self.root_uri + device[u'@odata.id'] + for device in data['Drives']: + disk_uri = self.root_uri + device['@odata.id'] response = self.get_request(disk_uri) data = response['data'] drive_result = {} + drive_result['RedfishURI'] = data['@odata.id'] for property in properties: if property in data: if data[property] is not None: - drive_result[property] = data[property] + if property == "Links": + if "Volumes" in data["Links"].keys(): + volumes = [v["@odata.id"] for v in data["Links"]["Volumes"]] + drive_result["Volumes"] = volumes + else: + drive_result[property] = data[property] drive_results.append(drive_result) drives = {'Controller': controller_name, + 'StorageId': storage_id, 'Drives': drive_results} result["entries"].append(drives) @@ -610,8 +952,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for controller in data[u'Members']: - controller_list.append(controller[u'@odata.id']) + for controller in data['Members']: + controller_list.append(controller['@odata.id']) for c in controller_list: uri = self.root_uri + c @@ -625,7 +967,7 @@ class RedfishUtils(object): sc_id = data.get('Id', '1') controller_name = 'Controller %s' % sc_id drive_results = [] - for device in data[u'Devices']: + for device in data['Devices']: drive_result = {} for property in properties: if property in device: @@ -663,7 +1005,7 @@ class RedfishUtils(object): if 'Storage' in data: # Get a list of all storage controllers and build respective URIs - storage_uri = data[u'Storage'][u'@odata.id'] + storage_uri = data['Storage']['@odata.id'] response = self.get_request(self.root_uri + storage_uri) if response['ret'] is False: return response @@ -671,16 +1013,34 @@ class RedfishUtils(object): data = response['data'] if data.get('Members'): - for controller in data[u'Members']: - controller_list.append(controller[u'@odata.id']) - for c in controller_list: + for controller in data['Members']: + controller_list.append(controller['@odata.id']) + for idx, c in enumerate(controller_list): uri = self.root_uri + c response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] - controller_name = 'Controller 1' - if 'StorageControllers' in data: + controller_name = 'Controller %s' % str(idx) + if 'Controllers' in data: + response = self.get_request(self.root_uri + data['Controllers']['@odata.id']) + if response['ret'] is False: + return response + c_data = response['data'] + + if c_data.get('Members') and c_data['Members']: + response = self.get_request(self.root_uri + c_data['Members'][0]['@odata.id']) + if response['ret'] is False: + return response + member_data = response['data'] + + if member_data: + if 'Name' in member_data: + controller_name = member_data['Name'] + else: + controller_id = member_data.get('Id', '1') + controller_name = 'Controller %s' % controller_id + elif 'StorageControllers' in data: sc = data['StorageControllers'] if sc: if 'Name' in sc[0]: @@ -689,15 +1049,16 @@ class RedfishUtils(object): sc_id = sc[0].get('Id', '1') controller_name = 'Controller %s' % sc_id volume_results = [] + volume_list = [] if 'Volumes' in data: # Get a list of all volumes and build respective URIs - volumes_uri = data[u'Volumes'][u'@odata.id'] + volumes_uri = data['Volumes']['@odata.id'] response = self.get_request(self.root_uri + volumes_uri) data = response['data'] if data.get('Members'): - for volume in data[u'Members']: - volume_list.append(volume[u'@odata.id']) + for volume in data['Members']: + volume_list.append(volume['@odata.id']) for v in volume_list: uri = self.root_uri + v response = self.get_request(uri) @@ -714,10 +1075,10 @@ class RedfishUtils(object): # Get related Drives Id drive_id_list = [] if 'Links' in data: - if 'Drives' in data[u'Links']: - for link in data[u'Links'][u'Drives']: - drive_id_link = link[u'@odata.id'] - drive_id = drive_id_link.split("/")[-1] + if 'Drives' in data['Links']: + for link in data['Links']['Drives']: + drive_id_link = link['@odata.id'] + drive_id = drive_id_link.rstrip('/').split('/')[-1] drive_id_list.append({'Id': drive_id}) volume_result['Linked_drives'] = drive_id_list volume_results.append(volume_result) @@ -732,30 +1093,26 @@ class RedfishUtils(object): def get_multi_volume_inventory(self): return self.aggregate_systems(self.get_volume_inventory) - def manage_indicator_led(self, command): - result = {} - key = 'IndicatorLED' + def manage_system_indicator_led(self, command): + return self.manage_indicator_led(command, self.systems_uri) + def manage_chassis_indicator_led(self, command): + return self.manage_indicator_led(command, self.chassis_uri) + + def manage_indicator_led(self, command, resource_uri=None): + # If no resource is specified; default to the Chassis resource + if resource_uri is None: + resource_uri = self.chassis_uri + + # Perform a PATCH on the IndicatorLED property based on the requested command payloads = {'IndicatorLedOn': 'Lit', 'IndicatorLedOff': 'Off', "IndicatorLedBlink": 'Blinking'} - - result = {} - response = self.get_request(self.root_uri + self.chassis_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - if command in payloads.keys(): - payload = {'IndicatorLED': payloads[command]} - response = self.patch_request(self.root_uri + self.chassis_uri, payload) - if response['ret'] is False: - return response - else: - return {'ret': False, 'msg': 'Invalid command'} - - return result + if command not in payloads.keys(): + return {'ret': False, 'msg': 'Invalid command (%s)' % command} + payload = {'IndicatorLED': payloads[command]} + resp = self.patch_request(self.root_uri + resource_uri, payload, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Set IndicatorLED to %s' % payloads[command] + return resp def _map_reset_type(self, reset_type, allowable_values): equiv_types = { @@ -780,20 +1137,27 @@ class RedfishUtils(object): return self.manage_power(command, self.systems_uri, '#ComputerSystem.Reset') - def manage_manager_power(self, command): + def manage_manager_power(self, command, wait=False, wait_timeout=120): return self.manage_power(command, self.manager_uri, - '#Manager.Reset') + '#Manager.Reset', wait, wait_timeout) - def manage_power(self, command, resource_uri, action_name): + def manage_power(self, command, resource_uri, action_name, wait=False, + wait_timeout=120): key = "Actions" reset_type_values = ['On', 'ForceOff', 'GracefulShutdown', 'GracefulRestart', 'ForceRestart', 'Nmi', - 'ForceOn', 'PushPowerButton', 'PowerCycle'] + 'ForceOn', 'PushPowerButton', 'PowerCycle', + 'FullPowerCycle'] # command should be PowerOn, PowerForceOff, etc. if not command.startswith('Power'): return {'ret': False, 'msg': 'Invalid Command (%s)' % command} - reset_type = command[5:] + + # Commands (except PowerCycle) will be stripped of the 'Power' prefix + if command == 'PowerCycle': + reset_type = command + else: + reset_type = command[5:] # map Reboot to a ResetType that does a reboot if reset_type == 'Reboot': @@ -839,34 +1203,123 @@ class RedfishUtils(object): response = self.post_request(self.root_uri + action_uri, payload) if response['ret'] is False: return response + + # If requested to wait for the service to be available again, block + # until it is ready + if wait: + elapsed_time = 0 + start_time = time.time() + # Start with a large enough sleep. Some services will process new + # requests while in the middle of shutting down, thus breaking out + # early. + time.sleep(30) + + # Periodically check for the service's availability. + while elapsed_time <= wait_timeout: + status = self.check_service_availability() + if status['available']: + # It is available; we are done + break + time.sleep(5) + elapsed_time = time.time() - start_time + + if elapsed_time > wait_timeout: + # Exhausted the wait timer; error + return {'ret': False, 'changed': True, + 'msg': 'The service did not become available after %d seconds' % wait_timeout} return {'ret': True, 'changed': True} - def _find_account_uri(self, username=None, acct_id=None): - if not any((username, acct_id)): - return {'ret': False, 'msg': - 'Must provide either account_id or account_username'} + def manager_reset_to_defaults(self, command): + return self.reset_to_defaults(command, self.manager_uri, + '#Manager.ResetToDefaults') - response = self.get_request(self.root_uri + self.accounts_uri) + def reset_to_defaults(self, command, resource_uri, action_name): + key = "Actions" + reset_type_values = ['ResetAll', + 'PreserveNetworkAndUsers', + 'PreserveNetwork'] + + if command not in reset_type_values: + return {'ret': False, 'msg': 'Invalid Command (%s)' % command} + + # read the resource and get the current power state + response = self.get_request(self.root_uri + resource_uri) if response['ret'] is False: return response data = response['data'] - uris = [a.get('@odata.id') for a in data.get('Members', []) if - a.get('@odata.id')] - for uri in uris: - response = self.get_request(self.root_uri + uri) + # get the reset Action and target URI + if key not in data or action_name not in data[key]: + return {'ret': False, 'msg': 'Action %s not found' % action_name} + reset_action = data[key][action_name] + if 'target' not in reset_action: + return {'ret': False, + 'msg': 'target URI missing from Action %s' % action_name} + action_uri = reset_action['target'] + + # get AllowableValues + ai = self._get_all_action_info_values(reset_action) + allowable_values = ai.get('ResetType', {}).get('AllowableValues', []) + + # map ResetType to an allowable value if needed + if allowable_values and command not in allowable_values: + return {'ret': False, + 'msg': 'Specified reset type (%s) not supported ' + 'by service. Supported types: %s' % + (command, allowable_values)} + + # define payload + payload = {'ResetType': command} + + # POST to Action URI + response = self.post_request(self.root_uri + action_uri, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True} + + def _find_account_uri(self, username=None, acct_id=None, password_change_uri=None): + if not any((username, acct_id)): + return {'ret': False, 'msg': + 'Must provide either account_id or account_username'} + + if password_change_uri: + # Password change required; go directly to the specified URI + response = self.get_request(self.root_uri + password_change_uri) if response['ret'] is False: - continue + return response data = response['data'] headers = response['headers'] if username: if username == data.get('UserName'): return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} + 'headers': headers, 'uri': password_change_uri} if acct_id: if acct_id == data.get('Id'): return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} + 'headers': headers, 'uri': password_change_uri} + else: + # Walk the accounts collection to find the desired user + response = self.get_request(self.root_uri + self.accounts_uri) + if response['ret'] is False: + return response + data = response['data'] + + uris = [a.get('@odata.id') for a in data.get('Members', []) if + a.get('@odata.id')] + for uri in uris: + response = self.get_request(self.root_uri + uri) + if response['ret'] is False: + continue + data = response['data'] + headers = response['headers'] + if username: + if username == data.get('UserName'): + return {'ret': True, 'data': data, + 'headers': headers, 'uri': uri} + if acct_id: + if acct_id == data.get('Id'): + return {'ret': True, 'data': data, + 'headers': headers, 'uri': uri} return {'ret': False, 'no_match': True, 'msg': 'No account with the given account_id or account_username found'} @@ -901,7 +1354,8 @@ class RedfishUtils(object): user_list = [] users_results = [] # Get these entries, but does not fail if not found - properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled'] + properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled', + 'AccountTypes', 'OEMAccountTypes'] response = self.get_request(self.root_uri + self.accounts_uri) if response['ret'] is False: @@ -910,7 +1364,7 @@ class RedfishUtils(object): data = response['data'] for users in data.get('Members', []): - user_list.append(users[u'@odata.id']) # user_list[] are URIs + user_list.append(users['@odata.id']) # user_list[] are URIs # for each user, get details for uri in user_list: @@ -924,6 +1378,12 @@ class RedfishUtils(object): if property in data: user[property] = data[property] + # Filter out empty account slots + # An empty account slot can be detected if the username is an empty + # string and if the account is disabled + if user.get('UserName', '') == '' and not user.get('Enabled', False): + continue + users_results.append(user) result["entries"] = users_results return result @@ -946,10 +1406,11 @@ class RedfishUtils(object): payload['Password'] = user.get('account_password') if user.get('account_roleid'): payload['RoleId'] = user.get('account_roleid') - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} + if user.get('account_accounttypes'): + payload['AccountTypes'] = user.get('account_accounttypes') + if user.get('account_oemaccounttypes'): + payload['OEMAccountTypes'] = user.get('account_oemaccounttypes') + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def add_user(self, user): if not user.get('account_username'): @@ -979,6 +1440,10 @@ class RedfishUtils(object): payload['Password'] = user.get('account_password') if user.get('account_roleid'): payload['RoleId'] = user.get('account_roleid') + if user.get('account_accounttypes'): + payload['AccountTypes'] = user.get('account_accounttypes') + if user.get('account_oemaccounttypes'): + payload['OEMAccountTypes'] = user.get('account_oemaccounttypes') if user.get('account_id'): payload['Id'] = user.get('account_id') @@ -997,17 +1462,9 @@ class RedfishUtils(object): if not response['ret']: return response uri = response['uri'] - data = response['data'] - - if data.get('Enabled', True): - # account already enabled, nothing to do - return {'ret': True, 'changed': False} payload = {'Enabled': True} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def delete_user_via_patch(self, user, uri=None, data=None): if not uri: @@ -1018,17 +1475,10 @@ class RedfishUtils(object): uri = response['uri'] data = response['data'] - if data and data.get('UserName') == '' and not data.get('Enabled', False): - # account UserName already cleared, nothing to do - return {'ret': True, 'changed': False} - payload = {'UserName': ''} if data.get('Enabled', False): payload['Enabled'] = False - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def delete_user(self, user): response = self._find_account_uri(username=user.get('account_username'), @@ -1065,18 +1515,10 @@ class RedfishUtils(object): acct_id=user.get('account_id')) if not response['ret']: return response + uri = response['uri'] - data = response['data'] - - if not data.get('Enabled'): - # account already disabled, nothing to do - return {'ret': True, 'changed': False} - payload = {'Enabled': False} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def update_user_role(self, user): if not user.get('account_roleid'): @@ -1087,30 +1529,25 @@ class RedfishUtils(object): acct_id=user.get('account_id')) if not response['ret']: return response + uri = response['uri'] - data = response['data'] - - if data.get('RoleId') == user.get('account_roleid'): - # account already has RoleId , nothing to do - return {'ret': True, 'changed': False} - - payload = {'RoleId': user.get('account_roleid')} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} + payload = {'RoleId': user['account_roleid']} + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def update_user_password(self, user): + if not user.get('account_password'): + return {'ret': False, 'msg': + 'Must provide account_password for UpdateUserPassword command'} + response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) + acct_id=user.get('account_id'), + password_change_uri=user.get('account_passwordchangerequired')) if not response['ret']: return response + uri = response['uri'] payload = {'Password': user['account_password']} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def update_user_name(self, user): if not user.get('account_updatename'): @@ -1121,53 +1558,77 @@ class RedfishUtils(object): acct_id=user.get('account_id')) if not response['ret']: return response + uri = response['uri'] payload = {'UserName': user['account_updatename']} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def update_accountservice_properties(self, user): - if user.get('account_properties') is None: + account_properties = user.get('account_properties') + if account_properties is None: return {'ret': False, 'msg': 'Must provide account_properties for UpdateAccountServiceProperties command'} - account_properties = user.get('account_properties') - # Find AccountService + # Find the AccountService resource response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] - if 'AccountService' not in data: + accountservice_uri = data.get("AccountService", {}).get("@odata.id") + if accountservice_uri is None: return {'ret': False, 'msg': "AccountService resource not found"} - accountservice_uri = data["AccountService"]["@odata.id"] - # Check support or not - response = self.get_request(self.root_uri + accountservice_uri) - if response['ret'] is False: + # Perform a PATCH on the AccountService resource with the requested properties + resp = self.patch_request(self.root_uri + accountservice_uri, account_properties, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Modified account service' + return resp + + def update_user_accounttypes(self, user): + account_types = user.get('account_accounttypes') + oemaccount_types = user.get('account_oemaccounttypes') + if account_types is None and oemaccount_types is None: + return {'ret': False, 'msg': + 'Must provide account_accounttypes or account_oemaccounttypes for UpdateUserAccountTypes command'} + + response = self._find_account_uri(username=user.get('account_username'), + acct_id=user.get('account_id')) + if not response['ret']: return response - data = response['data'] - for property_name in account_properties.keys(): - if property_name not in data: - return {'ret': False, 'msg': - 'property %s not supported' % property_name} - # if properties is already matched, nothing to do - need_change = False - for property_name in account_properties.keys(): - if account_properties[property_name] != data[property_name]: - need_change = True - break + uri = response['uri'] + payload = {} + if user.get('account_accounttypes'): + payload['AccountTypes'] = user.get('account_accounttypes') + if user.get('account_oemaccounttypes'): + payload['OEMAccountTypes'] = user.get('account_oemaccounttypes') - if not need_change: - return {'ret': True, 'changed': False, 'msg': "AccountService properties already set"} + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) - payload = account_properties - response = self.patch_request(self.root_uri + accountservice_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "Modified AccountService properties"} + def check_password_change_required(self, return_data): + """ + Checks a response if a user needs to change their password + + :param return_data: The return data for a failed request + :return: None or the URI of the account to update + """ + uri = None + if 'data' in return_data: + # Find the extended messages in the response payload + extended_messages = return_data['data'].get('error', {}).get('@Message.ExtendedInfo', []) + if len(extended_messages) == 0: + extended_messages = return_data['data'].get('@Message.ExtendedInfo', []) + # Go through each message and look for Base.1.X.PasswordChangeRequired + for message in extended_messages: + message_id = message.get('MessageId') + if message_id is None: + # While this is invalid, treat the lack of a MessageId as "no message" + continue + if message_id.startswith('Base.1.') and message_id.endswith('.PasswordChangeRequired'): + # Password change required; get the URI of the user account + uri = message['MessageArgs'][0] + break + return uri def get_sessions(self): result = {} @@ -1183,8 +1644,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for sessions in data[u'Members']: - session_list.append(sessions[u'@odata.id']) # session_list[] are URIs + for sessions in data['Members']: + session_list.append(sessions['@odata.id']) # session_list[] are URIs # for each session, get details for uri in session_list: @@ -1210,15 +1671,15 @@ class RedfishUtils(object): # if no active sessions, return as success if data['Members@odata.count'] == 0: - return {'ret': True, 'changed': False, 'msg': "There is no active sessions"} + return {'ret': True, 'changed': False, 'msg': "There are no active sessions"} # loop to delete every active session - for session in data[u'Members']: - response = self.delete_request(self.root_uri + session[u'@odata.id']) + for session in data['Members']: + response = self.delete_request(self.root_uri + session['@odata.id']) if response['ret'] is False: return response - return {'ret': True, 'changed': True, 'msg': "Clear all sessions successfully"} + return {'ret': True, 'changed': True, 'msg': "Cleared all sessions successfully"} def create_session(self): if not self.creds.get('user') or not self.creds.get('pswd'): @@ -1280,6 +1741,8 @@ class RedfishUtils(object): data = response['data'] + result['multipart_supported'] = 'MultipartHttpPushUri' in data + if "Actions" in data: actions = data['Actions'] if len(actions) > 0: @@ -1299,29 +1762,37 @@ class RedfishUtils(object): def _software_inventory(self, uri): result = {} - response = self.get_request(self.root_uri + uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - result['entries'] = [] - for member in data[u'Members']: - uri = self.root_uri + member[u'@odata.id'] - # Get details for each software or firmware member - response = self.get_request(uri) + + while uri: + response = self.get_request(self.root_uri + uri) if response['ret'] is False: return response result['ret'] = True + data = response['data'] - software = {} - # Get these standard properties if present - for key in ['Name', 'Id', 'Status', 'Version', 'Updateable', - 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer', - 'ReleaseDate']: - if key in data: - software[key] = data.get(key) - result['entries'].append(software) + if data.get('Members@odata.nextLink'): + uri = data.get('Members@odata.nextLink') + else: + uri = None + + for member in data['Members']: + fw_uri = self.root_uri + member['@odata.id'] + # Get details for each software or firmware member + response = self.get_request(fw_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + software = {} + # Get these standard properties if present + for key in ['Name', 'Id', 'Status', 'Version', 'Updateable', + 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer', + 'ReleaseDate']: + if key in data: + software[key] = data.get(key) + result['entries'].append(software) + return result def get_firmware_inventory(self): @@ -1336,11 +1807,85 @@ class RedfishUtils(object): else: return self._software_inventory(self.software_uri) + def _operation_results(self, response, data, handle=None): + """ + Builds the results for an operation from task, job, or action response. + + :param response: HTTP response object + :param data: HTTP response data + :param handle: The task or job handle that was last used + :return: dict containing operation results + """ + + operation_results = {'status': None, 'messages': [], 'handle': None, 'ret': True, + 'resets_requested': []} + + if response.status == 204: + # No content; successful, but nothing to return + # Use the Redfish "Completed" enum from TaskState for the operation status + operation_results['status'] = 'Completed' + else: + # Parse the response body for details + + # Determine the next handle, if any + operation_results['handle'] = handle + if response.status == 202: + # Task generated; get the task monitor URI + operation_results['handle'] = response.getheader('Location', handle) + + # Pull out the status and messages based on the body format + if data is not None: + response_type = data.get('@odata.type', '') + if response_type.startswith('#Task.') or response_type.startswith('#Job.'): + # Task and Job have similar enough structures to treat the same + operation_results['status'] = data.get('TaskState', data.get('JobState')) + operation_results['messages'] = data.get('Messages', []) + else: + # Error response body, which is a bit of a misnomer since it is used in successful action responses + operation_results['status'] = 'Completed' + if response.status >= 400: + operation_results['status'] = 'Exception' + operation_results['messages'] = data.get('error', {}).get('@Message.ExtendedInfo', []) + else: + # No response body (or malformed); build based on status code + operation_results['status'] = 'Completed' + if response.status == 202: + operation_results['status'] = 'New' + elif response.status >= 400: + operation_results['status'] = 'Exception' + + # Clear out the handle if the operation is complete + if operation_results['status'] in ['Completed', 'Cancelled', 'Exception', 'Killed']: + operation_results['handle'] = None + + # Scan the messages to see if next steps are needed + for message in operation_results['messages']: + message_id = message.get('MessageId') + if message_id is None: + # While this is invalid, treat the lack of a MessageId as "no message" + continue + + if message_id.startswith('Update.1.') and message_id.endswith('.OperationTransitionedToJob'): + # Operation rerouted to a job; update the status and handle + operation_results['status'] = 'New' + operation_results['handle'] = message['MessageArgs'][0] + operation_results['resets_requested'] = [] + # No need to process other messages in this case + break + + if message_id.startswith('Base.1.') and message_id.endswith('.ResetRequired'): + # A reset to some device is needed to continue the update + reset = {'uri': message['MessageArgs'][0], 'type': message['MessageArgs'][1]} + operation_results['resets_requested'].append(reset) + + return operation_results + def simple_update(self, update_opts): image_uri = update_opts.get('update_image_uri') protocol = update_opts.get('update_protocol') targets = update_opts.get('update_targets') creds = update_opts.get('update_creds') + apply_time = update_opts.get('update_apply_time') if not image_uri: return {'ret': False, 'msg': @@ -1391,11 +1936,131 @@ class RedfishUtils(object): payload["Username"] = creds.get('username') if creds.get('password'): payload["Password"] = creds.get('password') + if apply_time: + payload["@Redfish.OperationApplyTime"] = apply_time response = self.post_request(self.root_uri + update_uri, payload) if response['ret'] is False: return response return {'ret': True, 'changed': True, - 'msg': "SimpleUpdate requested"} + 'msg': "SimpleUpdate requested", + 'update_status': self._operation_results(response['resp'], response['data'])} + + def multipath_http_push_update(self, update_opts): + """ + Provides a software update via the URI specified by the + MultipartHttpPushUri property. Callers should adjust the 'timeout' + variable in the base object to accommodate the size of the image and + speed of the transfer. For example, a 200MB image will likely take + more than the default 10 second timeout. + + :param update_opts: The parameters for the update operation + :return: dict containing the response of the update request + """ + image_file = update_opts.get('update_image_file') + targets = update_opts.get('update_targets') + apply_time = update_opts.get('update_apply_time') + oem_params = update_opts.get('update_oem_params') + custom_oem_header = update_opts.get('update_custom_oem_header') + custom_oem_mime_type = update_opts.get('update_custom_oem_mime_type') + custom_oem_params = update_opts.get('update_custom_oem_params') + + # Ensure the image file is provided + if not image_file: + return {'ret': False, 'msg': + 'Must specify update_image_file for the MultipartHTTPPushUpdate command'} + if not os.path.isfile(image_file): + return {'ret': False, 'msg': + 'Must specify a valid file for the MultipartHTTPPushUpdate command'} + try: + with open(image_file, 'rb') as f: + image_payload = f.read() + except Exception as e: + return {'ret': False, 'msg': + 'Could not read file %s' % image_file} + + # Check that multipart HTTP push updates are supported + response = self.get_request(self.root_uri + self.update_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'MultipartHttpPushUri' not in data: + return {'ret': False, 'msg': 'Service does not support MultipartHttpPushUri'} + update_uri = data['MultipartHttpPushUri'] + + # Assemble the JSON payload portion of the request + payload = {} + if targets: + payload["Targets"] = targets + if apply_time: + payload["@Redfish.OperationApplyTime"] = apply_time + if oem_params: + payload["Oem"] = oem_params + multipart_payload = { + 'UpdateParameters': {'content': json.dumps(payload), 'mime_type': 'application/json'}, + 'UpdateFile': {'filename': image_file, 'content': image_payload, 'mime_type': 'application/octet-stream'} + } + if custom_oem_params: + multipart_payload[custom_oem_header] = {'content': custom_oem_params} + if custom_oem_mime_type: + multipart_payload[custom_oem_header]['mime_type'] = custom_oem_mime_type + + response = self.post_request(self.root_uri + update_uri, multipart_payload, multipart=True) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, + 'msg': "MultipartHTTPPushUpdate requested", + 'update_status': self._operation_results(response['resp'], response['data'])} + + def get_update_status(self, update_handle): + """ + Gets the status of an update operation. + + :param handle: The task or job handle tracking the update + :return: dict containing the response of the update status + """ + + if not update_handle: + return {'ret': False, 'msg': 'Must provide a handle tracking the update.'} + + # Get the task or job tracking the update + response = self.get_request(self.root_uri + update_handle, allow_no_resp=True) + if response['ret'] is False: + return response + + # Inspect the response to build the update status + return self._operation_results(response['resp'], response['data'], update_handle) + + def perform_requested_update_operations(self, update_handle): + """ + Performs requested operations to allow the update to continue. + + :param handle: The task or job handle tracking the update + :return: dict containing the result of the operations + """ + + # Get the current update status + update_status = self.get_update_status(update_handle) + if update_status['ret'] is False: + return update_status + + changed = False + + # Perform any requested updates + for reset in update_status['resets_requested']: + resp = self.post_request(self.root_uri + reset['uri'], {'ResetType': reset['type']}) + if resp['ret'] is False: + # Override the 'changed' indicator since other resets may have + # been successful + resp['changed'] = changed + return resp + changed = True + + msg = 'No operations required for the update' + if changed: + # Will need to consider finetuning this message if the scope of the + # requested operations grow over time + msg = 'One or more components reset to continue the update' + return {'ret': True, 'changed': changed, 'msg': msg} def get_bios_attributes(self, systems_uri): result = {} @@ -1419,7 +2084,7 @@ class RedfishUtils(object): return response result['ret'] = True data = response['data'] - for attribute in data[u'Attributes'].items(): + for attribute in data['Attributes'].items(): bios_attributes[attribute[0]] = attribute[1] result["entries"] = bios_attributes return result @@ -1535,60 +2200,52 @@ class RedfishUtils(object): return self.aggregate_systems(self.get_boot_override) def set_bios_default_settings(self): - result = {} - key = "Bios" - - # Search for 'key' entry and extract URI from it + # Find the Bios resource from the requested ComputerSystem resource response = self.get_request(self.root_uri + self.systems_uri) if response['ret'] is False: return response - result['ret'] = True data = response['data'] + bios_uri = data.get('Bios', {}).get('@odata.id') + if bios_uri is None: + return {'ret': False, 'msg': 'Bios resource not found'} - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - bios_uri = data[key]["@odata.id"] - - # Extract proper URI + # Find the URI of the ResetBios action response = self.get_request(self.root_uri + bios_uri) if response['ret'] is False: return response - result['ret'] = True data = response['data'] - reset_bios_settings_uri = data["Actions"]["#Bios.ResetBios"]["target"] + reset_bios_uri = data.get('Actions', {}).get('#Bios.ResetBios', {}).get('target') + if reset_bios_uri is None: + return {'ret': False, 'msg': 'ResetBios action not found'} - response = self.post_request(self.root_uri + reset_bios_settings_uri, {}) + # Perform the ResetBios action + response = self.post_request(self.root_uri + reset_bios_uri, {}) if response['ret'] is False: return response - return {'ret': True, 'changed': True, 'msg': "Set BIOS to default settings"} + return {'ret': True, 'changed': True, 'msg': "BIOS set to default settings"} def set_boot_override(self, boot_opts): - result = {} - key = "Boot" - + # Extract the requested boot override options bootdevice = boot_opts.get('bootdevice') uefi_target = boot_opts.get('uefi_target') boot_next = boot_opts.get('boot_next') override_enabled = boot_opts.get('override_enabled') boot_override_mode = boot_opts.get('boot_override_mode') - if not bootdevice and override_enabled != 'Disabled': return {'ret': False, 'msg': "bootdevice option required for temporary boot override"} - # Search for 'key' entry and extract URI from it + # Get the current boot override options from the Boot property response = self.get_request(self.root_uri + self.systems_uri) if response['ret'] is False: return response - result['ret'] = True data = response['data'] + boot = data.get('Boot') + if boot is None: + return {'ret': False, 'msg': "Boot property not found"} + cur_override_mode = boot.get('BootSourceOverrideMode') - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - boot = data[key] - + # Check if the requested target is supported by the system if override_enabled != 'Disabled': annotation = 'BootSourceOverrideTarget@Redfish.AllowableValues' if annotation in boot: @@ -1598,26 +2255,18 @@ class RedfishUtils(object): 'msg': "Boot device %s not in list of allowable values (%s)" % (bootdevice, allowable_values)} - # read existing values - cur_enabled = boot.get('BootSourceOverrideEnabled') - target = boot.get('BootSourceOverrideTarget') - cur_uefi_target = boot.get('UefiTargetBootSourceOverride') - cur_boot_next = boot.get('BootNext') - cur_override_mode = boot.get('BootSourceOverrideMode') - + # Build the request payload based on the desired boot override options if override_enabled == 'Disabled': payload = { 'Boot': { - 'BootSourceOverrideEnabled': override_enabled + 'BootSourceOverrideEnabled': override_enabled, + 'BootSourceOverrideTarget': 'None' } } elif bootdevice == 'UefiTarget': if not uefi_target: return {'ret': False, 'msg': "uefi_target option required to SetOneTimeBoot for UefiTarget"} - if override_enabled == cur_enabled and target == bootdevice and uefi_target == cur_uefi_target: - # If properties are already set, no changes needed - return {'ret': True, 'changed': False} payload = { 'Boot': { 'BootSourceOverrideEnabled': override_enabled, @@ -1625,13 +2274,13 @@ class RedfishUtils(object): 'UefiTargetBootSourceOverride': uefi_target } } + # If needed, also specify UEFI mode + if cur_override_mode == 'Legacy': + payload['Boot']['BootSourceOverrideMode'] = 'UEFI' elif bootdevice == 'UefiBootNext': if not boot_next: return {'ret': False, 'msg': "boot_next option required to SetOneTimeBoot for UefiBootNext"} - if cur_enabled == override_enabled and target == bootdevice and boot_next == cur_boot_next: - # If properties are already set, no changes needed - return {'ret': True, 'changed': False} payload = { 'Boot': { 'BootSourceOverrideEnabled': override_enabled, @@ -1639,11 +2288,10 @@ class RedfishUtils(object): 'BootNext': boot_next } } + # If needed, also specify UEFI mode + if cur_override_mode == 'Legacy': + payload['Boot']['BootSourceOverrideMode'] = 'UEFI' else: - if (cur_enabled == override_enabled and target == bootdevice and - (cur_override_mode == boot_override_mode or not boot_override_mode)): - # If properties are already set, no changes needed - return {'ret': True, 'changed': False} payload = { 'Boot': { 'BootSourceOverrideEnabled': override_enabled, @@ -1653,32 +2301,35 @@ class RedfishUtils(object): if boot_override_mode: payload['Boot']['BootSourceOverrideMode'] = boot_override_mode - response = self.patch_request(self.root_uri + self.systems_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True} + # Apply the requested boot override request + resp = self.patch_request(self.root_uri + self.systems_uri, payload, check_pyld=True) + if resp['ret'] is False: + # WORKAROUND + # Older Dell systems do not allow BootSourceOverrideEnabled to be + # specified with UefiTarget as the target device + vendor = self._get_vendor()['Vendor'] + if vendor == 'Dell': + if bootdevice == 'UefiTarget' and override_enabled != 'Disabled': + payload['Boot'].pop('BootSourceOverrideEnabled', None) + resp = self.patch_request(self.root_uri + self.systems_uri, payload, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Updated the boot override settings' + return resp def set_bios_attributes(self, attributes): - result = {} - key = "Bios" - - # Search for 'key' entry and extract URI from it + # Find the Bios resource from the requested ComputerSystem resource response = self.get_request(self.root_uri + self.systems_uri) if response['ret'] is False: return response - result['ret'] = True data = response['data'] + bios_uri = data.get('Bios', {}).get('@odata.id') + if bios_uri is None: + return {'ret': False, 'msg': 'Bios resource not found'} - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - bios_uri = data[key]["@odata.id"] - - # Extract proper URI + # Get the current BIOS settings response = self.get_request(self.root_uri + bios_uri) if response['ret'] is False: return response - result['ret'] = True data = response['data'] # Make a copy of the attributes dict @@ -1689,19 +2340,19 @@ class RedfishUtils(object): # Check the attributes for attr_name, attr_value in attributes.items(): # Check if attribute exists - if attr_name not in data[u'Attributes']: + if attr_name not in data['Attributes']: # Remove and proceed to next attribute if this isn't valid attrs_bad.update({attr_name: attr_value}) del attrs_to_patch[attr_name] continue # If already set to requested value, remove it from PATCH payload - if data[u'Attributes'][attr_name] == attributes[attr_name]: + if data['Attributes'][attr_name] == attr_value: del attrs_to_patch[attr_name] warning = "" if attrs_bad: - warning = "Incorrect attributes %s" % (attrs_bad) + warning = "Unsupported attributes %s" % (attrs_bad) # Return success w/ changed=False if no attrs need to be changed if not attrs_to_patch: @@ -1709,16 +2360,26 @@ class RedfishUtils(object): 'msg': "BIOS attributes already set", 'warning': warning} - # Get the SettingsObject URI - set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"]["@odata.id"] + # Get the SettingsObject URI to apply the attributes + set_bios_attr_uri = data.get("@Redfish.Settings", {}).get("SettingsObject", {}).get("@odata.id") + if set_bios_attr_uri is None: + return {'ret': False, 'msg': "Settings resource for BIOS attributes not found."} # Construct payload and issue PATCH command payload = {"Attributes": attrs_to_patch} + + # WORKAROUND + # Dell systems require manually setting the apply time to "OnReset" + # to spawn a proprietary job to apply the BIOS settings + vendor = self._get_vendor()['Vendor'] + if vendor == 'Dell': + payload.update({"@Redfish.SettingsApplyTime": {"ApplyTime": "OnReset"}}) + response = self.patch_request(self.root_uri + set_bios_attr_uri, payload) if response['ret'] is False: return response return {'ret': True, 'changed': True, - 'msg': "Modified BIOS attributes %s" % (attrs_to_patch), + 'msg': "Modified BIOS attributes %s. A reboot is required" % (attrs_to_patch), 'warning': warning} def set_boot_order(self, boot_list): @@ -1740,7 +2401,7 @@ class RedfishUtils(object): boot_order = boot['BootOrder'] boot_options_dict = self._get_boot_options_dict(boot) - # validate boot_list against BootOptionReferences if available + # Verify the requested boot options are valid if boot_options_dict: boot_option_references = boot_options_dict.keys() for ref in boot_list: @@ -1748,20 +2409,16 @@ class RedfishUtils(object): return {'ret': False, 'msg': "BootOptionReference %s not found in BootOptions" % ref} - # If requested BootOrder is already set, nothing to do - if boot_order == boot_list: - return {'ret': True, 'changed': False, - 'msg': "BootOrder already set to %s" % boot_list} - + # Apply the boot order payload = { 'Boot': { 'BootOrder': boot_list } } - response = self.patch_request(self.root_uri + systems_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "BootOrder set"} + resp = self.patch_request(self.root_uri + systems_uri, payload, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Modified the boot order' + return resp def set_default_boot_order(self): systems_uri = self.systems_uri @@ -1834,12 +2491,16 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for device in data[u'Fans']: - fan = {} - for property in properties: - if property in device: - fan[property] = device[property] - fan_results.append(fan) + # Checking if fans are present + if 'Fans' in data: + for device in data['Fans']: + fan = {} + for property in properties: + if property in device: + fan[property] = device[property] + fan_results.append(fan) + else: + return {'ret': False, 'msg': "No Fans present"} result["entries"] = fan_results return result @@ -1871,14 +2532,13 @@ class RedfishUtils(object): for property in properties: if property in data: chassis_power_result[property] = data[property] - else: - return {'ret': False, 'msg': 'Key PowerControl not found.'} chassis_power_results.append(chassis_power_result) - else: - return {'ret': False, 'msg': 'Key Power not found.'} - result['entries'] = chassis_power_results - return result + if len(chassis_power_results) > 0: + result['entries'] = chassis_power_results + return result + else: + return {'ret': False, 'msg': 'Power information not found.'} def get_chassis_thermals(self): result = {} @@ -1908,7 +2568,7 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] if "Temperatures" in data: - for sensor in data[u'Temperatures']: + for sensor in data['Temperatures']: sensor_result = {} for property in properties: if property in sensor: @@ -1929,7 +2589,7 @@ class RedfishUtils(object): key = "Processors" # Get these entries, but does not fail if not found properties = ['Id', 'Name', 'Manufacturer', 'Model', 'MaxSpeedMHz', - 'TotalCores', 'TotalThreads', 'Status'] + 'ProcessorArchitecture', 'TotalCores', 'TotalThreads', 'Status'] # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + systems_uri) @@ -1950,8 +2610,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for cpu in data[u'Members']: - cpu_list.append(cpu[u'@odata.id']) + for cpu in data['Members']: + cpu_list.append(cpu['@odata.id']) for c in cpu_list: cpu = {} @@ -2000,8 +2660,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for dimm in data[u'Members']: - memory_list.append(dimm[u'@odata.id']) + for dimm in data['Members']: + memory_list.append(dimm['@odata.id']) for m in memory_list: dimm = {} @@ -2029,15 +2689,28 @@ class RedfishUtils(object): def get_multi_memory_inventory(self): return self.aggregate_systems(self.get_memory_inventory) + def get_nic(self, resource_uri): + result = {} + properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses', + 'NameServers', 'MACAddress', 'PermanentMACAddress', + 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status', 'LinkStatus'] + response = self.get_request(self.root_uri + resource_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + nic = {} + for property in properties: + if property in data: + nic[property] = data[property] + result['entries'] = nic + return result + def get_nic_inventory(self, resource_uri): result = {} nic_list = [] nic_results = [] key = "EthernetInterfaces" - # Get these entries, but does not fail if not found - properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses', - 'NameServers', 'MACAddress', 'PermanentMACAddress', - 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status'] response = self.get_request(self.root_uri + resource_uri) if response['ret'] is False: @@ -2057,22 +2730,13 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for nic in data[u'Members']: - nic_list.append(nic[u'@odata.id']) + for nic in data['Members']: + nic_list.append(nic['@odata.id']) for n in nic_list: - nic = {} - uri = self.root_uri + n - response = self.get_request(uri) - if response['ret'] is False: - return response - data = response['data'] - - for property in properties: - if property in data: - nic[property] = data[property] - - nic_results.append(nic) + nic = self.get_nic(n) + if nic['ret']: + nic_results.append(nic['entries']) result["entries"] = nic_results return result @@ -2122,8 +2786,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for virtualmedia in data[u'Members']: - virtualmedia_list.append(virtualmedia[u'@odata.id']) + for virtualmedia in data['Members']: + virtualmedia_list.append(virtualmedia['@odata.id']) for n in virtualmedia_list: virtualmedia = {} @@ -2141,11 +2805,15 @@ class RedfishUtils(object): result["entries"] = virtualmedia_results return result - def get_multi_virtualmedia(self): + def get_multi_virtualmedia(self, resource_type='Manager'): ret = True entries = [] - resource_uris = self.manager_uris + # Given resource_type, use the proper URI + if resource_type == 'Systems': + resource_uris = self.systems_uris + elif resource_type == 'Manager': + resource_uris = self.manager_uris for resource_uri in resource_uris: virtualmedia = self.get_virtualmedia(resource_uri) @@ -2157,7 +2825,7 @@ class RedfishUtils(object): @staticmethod def _find_empty_virt_media_slot(resources, media_types, - media_match_strict=True): + media_match_strict=True, vendor=''): for uri, data in resources.items(): # check MediaTypes if 'MediaTypes' in data and media_types: @@ -2166,6 +2834,9 @@ class RedfishUtils(object): else: if media_match_strict: continue + # Base on current Lenovo server capability, filter out slot RDOC1/2 and Remote1/2/3/4 which are not supported to Insert/Eject. + if vendor == 'Lenovo' and ('RDOC' in uri or 'Remote' in uri): + continue # if ejected, 'Inserted' should be False and 'ImageName' cleared if (not data.get('Inserted', False) and not data.get('ImageName')): @@ -2220,22 +2891,35 @@ class RedfishUtils(object): payload[param] = options.get(option) return payload - def virtual_media_insert_via_patch(self, options, param_map, uri, data): + def virtual_media_insert_via_patch(self, options, param_map, uri, data, image_only=False): # get AllowableValues - ai = dict((k[:-24], - {'AllowableValues': v}) for k, v in data.items() - if k.endswith('@Redfish.AllowableValues')) + ai = { + k[:-24]: {'AllowableValues': v} + for k, v in data.items() + if k.endswith('@Redfish.AllowableValues') + } # construct payload payload = self._insert_virt_media_payload(options, param_map, data, ai) - if 'Inserted' not in payload: + if 'Inserted' not in payload and not image_only: + # Add Inserted to the payload if needed payload['Inserted'] = True - # PATCH the resource - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"} - def virtual_media_insert(self, options): + # PATCH the resource + resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True) + if resp['ret'] is False: + # WORKAROUND + # Older HPE systems with iLO 4 and Supermicro do not support + # specifying Inserted or WriteProtected + vendor = self._get_vendor()['Vendor'] + if vendor == 'HPE' or vendor == 'Supermicro': + payload.pop('Inserted', None) + payload.pop('WriteProtected', None) + resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'VirtualMedia inserted' + return resp + + def virtual_media_insert(self, options, resource_type='Manager'): param_map = { 'Inserted': 'inserted', 'WriteProtected': 'write_protected', @@ -2251,20 +2935,26 @@ class RedfishUtils(object): media_types = options.get('media_types') # locate and read the VirtualMedia resources - response = self.get_request(self.root_uri + self.manager_uri) + # Given resource_type, use the proper URI + if resource_type == 'Systems': + resource_uri = self.systems_uri + elif resource_type == 'Manager': + resource_uri = self.manager_uri + response = self.get_request(self.root_uri + resource_uri) if response['ret'] is False: return response data = response['data'] if 'VirtualMedia' not in data: return {'ret': False, 'msg': "VirtualMedia resource not found"} + virt_media_uri = data["VirtualMedia"]["@odata.id"] response = self.get_request(self.root_uri + virt_media_uri) if response['ret'] is False: return response data = response['data'] virt_media_list = [] - for member in data[u'Members']: - virt_media_list.append(member[u'@odata.id']) + for member in data['Members']: + virt_media_list.append(member['@odata.id']) resources, headers = self._read_virt_media_resources(virt_media_list) # see if image already inserted; if so, nothing to do @@ -2274,12 +2964,13 @@ class RedfishUtils(object): # find an empty slot to insert the media # try first with strict media_type matching + vendor = self._get_vendor()['Vendor'] uri, data = self._find_empty_virt_media_slot( - resources, media_types, media_match_strict=True) + resources, media_types, media_match_strict=True, vendor=vendor) if not uri: # if not found, try without strict media_type matching uri, data = self._find_empty_virt_media_slot( - resources, media_types, media_match_strict=False) + resources, media_types, media_match_strict=False, vendor=vendor) if not uri: return {'ret': False, 'msg': "Unable to find an available VirtualMedia resource " @@ -2314,44 +3005,71 @@ class RedfishUtils(object): payload = self._insert_virt_media_payload(options, param_map, data, ai) # POST to action response = self.post_request(self.root_uri + action_uri, payload) + if response['ret'] is False and ('Inserted' in payload or 'WriteProtected' in payload): + # WORKAROUND + # Older HPE systems with iLO 4 and Supermicro do not support + # specifying Inserted or WriteProtected + vendor = self._get_vendor()['Vendor'] + if vendor == 'HPE' or vendor == 'Supermicro': + payload.pop('Inserted', None) + payload.pop('WriteProtected', None) + response = self.post_request(self.root_uri + action_uri, payload) if response['ret'] is False: return response return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"} - def virtual_media_eject_via_patch(self, uri): + def virtual_media_eject_via_patch(self, uri, image_only=False): # construct payload payload = { 'Inserted': False, 'Image': None } - # PATCH resource - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, - 'msg': "VirtualMedia ejected"} - def virtual_media_eject(self, options): + # Inserted is not writable + if image_only: + del payload['Inserted'] + + # PATCH resource + resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True) + if resp['ret'] is False and 'Inserted' in payload: + # WORKAROUND + # Older HPE systems with iLO 4 and Supermicro do not support + # specifying Inserted + vendor = self._get_vendor()['Vendor'] + if vendor == 'HPE' or vendor == 'Supermicro': + payload.pop('Inserted', None) + resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'VirtualMedia ejected' + return resp + + def virtual_media_eject(self, options, resource_type='Manager'): image_url = options.get('image_url') if not image_url: return {'ret': False, 'msg': "image_url option required for VirtualMediaEject"} # locate and read the VirtualMedia resources - response = self.get_request(self.root_uri + self.manager_uri) + # Given resource_type, use the proper URI + if resource_type == 'Systems': + resource_uri = self.systems_uri + elif resource_type == 'Manager': + resource_uri = self.manager_uri + response = self.get_request(self.root_uri + resource_uri) if response['ret'] is False: return response data = response['data'] if 'VirtualMedia' not in data: return {'ret': False, 'msg': "VirtualMedia resource not found"} + virt_media_uri = data["VirtualMedia"]["@odata.id"] response = self.get_request(self.root_uri + virt_media_uri) if response['ret'] is False: return response data = response['data'] virt_media_list = [] - for member in data[u'Members']: - virt_media_list.append(member[u'@odata.id']) + for member in data['Members']: + virt_media_list.append(member['@odata.id']) resources, headers = self._read_virt_media_resources(virt_media_list) # find the VirtualMedia resource to eject @@ -2409,8 +3127,7 @@ class RedfishUtils(object): # Get a list of all Chassis and build URIs, then get all PowerSupplies # from each Power entry in the Chassis - chassis_uri_list = self.chassis_uris - for chassis_uri in chassis_uri_list: + for chassis_uri in self.chassis_uris: response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response @@ -2419,7 +3136,7 @@ class RedfishUtils(object): data = response['data'] if 'Power' in data: - power_uri = data[u'Power'][u'@odata.id'] + power_uri = data['Power']['@odata.id'] else: continue @@ -2457,7 +3174,7 @@ class RedfishUtils(object): result = {} inventory = {} # Get these entries, but does not fail if not found - properties = ['Status', 'HostName', 'PowerState', 'Model', 'Manufacturer', + properties = ['Status', 'HostName', 'PowerState', 'BootProgress', 'Model', 'Manufacturer', 'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag', 'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary', 'ProcessorSummary', 'TrustedModules', 'Name', 'Id'] @@ -2536,43 +3253,20 @@ class RedfishUtils(object): else: payload[service_name][service_property] = value - # Find NetworkProtocol + # Find the ManagerNetworkProtocol resource response = self.get_request(self.root_uri + self.manager_uri) if response['ret'] is False: return response data = response['data'] - if 'NetworkProtocol' not in data: + networkprotocol_uri = data.get("NetworkProtocol", {}).get("@odata.id") + if networkprotocol_uri is None: return {'ret': False, 'msg': "NetworkProtocol resource not found"} - networkprotocol_uri = data["NetworkProtocol"]["@odata.id"] - # Check service property support or not - response = self.get_request(self.root_uri + networkprotocol_uri) - if response['ret'] is False: - return response - data = response['data'] - for service_name in payload.keys(): - if service_name not in data: - return {'ret': False, 'msg': "%s service not supported" % service_name} - for service_property in payload[service_name].keys(): - if service_property not in data[service_name]: - return {'ret': False, 'msg': "%s property for %s service not supported" % (service_property, service_name)} - - # if the protocol is already set, nothing to do - need_change = False - for service_name in payload.keys(): - for service_property in payload[service_name].keys(): - value = payload[service_name][service_property] - if value != data[service_name][service_property]: - need_change = True - break - - if not need_change: - return {'ret': True, 'changed': False, 'msg': "Manager NetworkProtocol services already set"} - - response = self.patch_request(self.root_uri + networkprotocol_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "Modified Manager NetworkProtocol services"} + # Modify the ManagerNetworkProtocol resource + resp = self.patch_request(self.root_uri + networkprotocol_uri, payload, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Modified manager network protocol settings' + return resp @staticmethod def to_singular(resource_name): @@ -2697,16 +3391,50 @@ class RedfishUtils(object): return self.aggregate_managers(self.get_manager_health_report) def set_manager_nic(self, nic_addr, nic_config): + # Get the manager ethernet interface uri + nic_info = self.get_manager_ethernet_uri(nic_addr) + + if nic_info.get('nic_addr') is None: + return nic_info + else: + target_ethernet_uri = nic_info['nic_addr'] + target_ethernet_current_setting = nic_info['ethernet_setting'] + + # Convert input to payload and check validity + # Note: Some properties in the EthernetInterface resource are arrays of + # objects. The call into this module expects a flattened view, meaning + # the user specifies exactly one object for an array property. For + # example, if a user provides IPv4StaticAddresses in the request to this + # module, it will turn that into an array of one member. This pattern + # should be avoided for future commands in this module, but needs to be + # preserved here for backwards compatibility. + payload = {} + for property in nic_config.keys(): + value = nic_config[property] + if property in target_ethernet_current_setting and isinstance(value, dict) and isinstance(target_ethernet_current_setting[property], list): + payload[property] = list() + payload[property].append(value) + else: + payload[property] = value + + # Modify the EthernetInterface resource + resp = self.patch_request(self.root_uri + target_ethernet_uri, payload, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Modified manager NIC' + return resp + + # A helper function to get the EthernetInterface URI + def get_manager_ethernet_uri(self, nic_addr='null'): # Get EthernetInterface collection response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: + if not response['ret']: return response data = response['data'] if 'EthernetInterfaces' not in data: return {'ret': False, 'msg': "EthernetInterfaces resource not found"} ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"] response = self.get_request(self.root_uri + ethernetinterfaces_uri) - if response['ret'] is False: + if not response['ret']: return response data = response['data'] uris = [a.get('@odata.id') for a in data.get('Members', []) if @@ -2721,74 +3449,568 @@ class RedfishUtils(object): nic_addr = nic_addr.split(':')[0] # split port if existing for uri in uris: response = self.get_request(self.root_uri + uri) - if response['ret'] is False: + if not response['ret']: return response data = response['data'] - if '"' + nic_addr.lower() + '"' in str(data).lower() or "'" + nic_addr.lower() + "'" in str(data).lower(): + data_string = json.dumps(data) + if nic_addr.lower() in data_string.lower(): target_ethernet_uri = uri target_ethernet_current_setting = data break + + nic_info = {} + nic_info['nic_addr'] = target_ethernet_uri + nic_info['ethernet_setting'] = target_ethernet_current_setting + if target_ethernet_uri is None: - return {'ret': False, 'msg': "No matched EthernetInterface found under Manager"} + return {} + else: + return nic_info - # Convert input to payload and check validity - payload = {} - for property in nic_config.keys(): - value = nic_config[property] - if property not in target_ethernet_current_setting: - return {'ret': False, 'msg': "Property %s in nic_config is invalid" % property} - if isinstance(value, dict): - if isinstance(target_ethernet_current_setting[property], dict): - payload[property] = value - elif isinstance(target_ethernet_current_setting[property], list): - payload[property] = list() - payload[property].append(value) - else: - return {'ret': False, 'msg': "Value of property %s in nic_config is invalid" % property} - else: - payload[property] = value + def set_hostinterface_attributes(self, hostinterface_config, hostinterface_id=None): + if hostinterface_config is None: + return {'ret': False, 'msg': + 'Must provide hostinterface_config for SetHostInterface command'} - # If no need change, nothing to do. If error detected, report it - need_change = False - for property in payload.keys(): - set_value = payload[property] - cur_value = target_ethernet_current_setting[property] - # type is simple(not dict/list) - if not isinstance(set_value, dict) and not isinstance(set_value, list): - if set_value != cur_value: - need_change = True - # type is dict - if isinstance(set_value, dict): - for subprop in payload[property].keys(): - if subprop not in target_ethernet_current_setting[property]: - # Not configured already; need to apply the request - need_change = True - break - sub_set_value = payload[property][subprop] - sub_cur_value = target_ethernet_current_setting[property][subprop] - if sub_set_value != sub_cur_value: - need_change = True - # type is list - if isinstance(set_value, list): - if len(set_value) != len(cur_value): - # if arrays are not the same len, no need to check each element - need_change = True - continue - for i in range(len(set_value)): - for subprop in payload[property][i].keys(): - if subprop not in target_ethernet_current_setting[property][i]: - # Not configured already; need to apply the request - need_change = True - break - sub_set_value = payload[property][i][subprop] - sub_cur_value = target_ethernet_current_setting[property][i][subprop] - if sub_set_value != sub_cur_value: - need_change = True - - if not need_change: - return {'ret': True, 'changed': False, 'msg': "Manager NIC already set"} - - response = self.patch_request(self.root_uri + target_ethernet_uri, payload) + # Find the HostInterfaceCollection resource + response = self.get_request(self.root_uri + self.manager_uri) if response['ret'] is False: return response - return {'ret': True, 'changed': True, 'msg': "Modified Manager NIC"} + data = response['data'] + hostinterfaces_uri = data.get("HostInterfaces", {}).get("@odata.id") + if hostinterfaces_uri is None: + return {'ret': False, 'msg': "HostInterface resource not found"} + response = self.get_request(self.root_uri + hostinterfaces_uri) + if response['ret'] is False: + return response + data = response['data'] + uris = [a.get('@odata.id') for a in data.get('Members', []) if a.get('@odata.id')] + + # Capture list of URIs that match a specified HostInterface resource Id + if hostinterface_id: + matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.rstrip('/').split('/')[-1]] + if hostinterface_id and matching_hostinterface_uris: + hostinterface_uri = list.pop(matching_hostinterface_uris) + elif hostinterface_id and not matching_hostinterface_uris: + return {'ret': False, 'msg': "HostInterface ID %s not present." % hostinterface_id} + elif len(uris) == 1: + hostinterface_uri = list.pop(uris) + else: + return {'ret': False, 'msg': "HostInterface ID not defined and multiple interfaces detected."} + + # Modify the HostInterface resource + resp = self.patch_request(self.root_uri + hostinterface_uri, hostinterface_config, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Modified host interface' + return resp + + def get_hostinterfaces(self): + result = {} + hostinterface_results = [] + properties = ['Id', 'Name', 'Description', 'HostInterfaceType', 'Status', + 'InterfaceEnabled', 'ExternallyAccessible', 'AuthenticationModes', + 'AuthNoneRoleId', 'CredentialBootstrapping'] + manager_uri_list = self.manager_uris + for manager_uri in manager_uri_list: + response = self.get_request(self.root_uri + manager_uri) + if response['ret'] is False: + return response + + result['ret'] = True + data = response['data'] + hostinterfaces_uri = data.get("HostInterfaces", {}).get("@odata.id") + if hostinterfaces_uri is None: + continue + + response = self.get_request(self.root_uri + hostinterfaces_uri) + data = response['data'] + + if 'Members' in data: + for hostinterface in data['Members']: + hostinterface_uri = hostinterface['@odata.id'] + hostinterface_response = self.get_request(self.root_uri + hostinterface_uri) + # dictionary for capturing individual HostInterface properties + hostinterface_data_temp = {} + if hostinterface_response['ret'] is False: + return hostinterface_response + hostinterface_data = hostinterface_response['data'] + for property in properties: + if property in hostinterface_data: + if hostinterface_data[property] is not None: + hostinterface_data_temp[property] = hostinterface_data[property] + # Check for the presence of a ManagerEthernetInterface + # object, a link to a _single_ EthernetInterface that the + # BMC uses to communicate with the host. + if 'ManagerEthernetInterface' in hostinterface_data: + interface_uri = hostinterface_data['ManagerEthernetInterface']['@odata.id'] + interface_response = self.get_nic(interface_uri) + if interface_response['ret'] is False: + return interface_response + hostinterface_data_temp['ManagerEthernetInterface'] = interface_response['entries'] + + # Check for the presence of a HostEthernetInterfaces + # object, a link to a _collection_ of EthernetInterfaces + # that the host uses to communicate with the BMC. + if 'HostEthernetInterfaces' in hostinterface_data: + interfaces_uri = hostinterface_data['HostEthernetInterfaces']['@odata.id'] + interfaces_response = self.get_request(self.root_uri + interfaces_uri) + if interfaces_response['ret'] is False: + return interfaces_response + interfaces_data = interfaces_response['data'] + if 'Members' in interfaces_data: + for interface in interfaces_data['Members']: + interface_uri = interface['@odata.id'] + interface_response = self.get_nic(interface_uri) + if interface_response['ret'] is False: + return interface_response + # Check if this is the first + # HostEthernetInterfaces item and create empty + # list if so. + if 'HostEthernetInterfaces' not in hostinterface_data_temp: + hostinterface_data_temp['HostEthernetInterfaces'] = [] + + hostinterface_data_temp['HostEthernetInterfaces'].append(interface_response['entries']) + + hostinterface_results.append(hostinterface_data_temp) + else: + continue + result["entries"] = hostinterface_results + if not result["entries"]: + return {'ret': False, 'msg': "No HostInterface objects found"} + return result + + def get_manager_inventory(self, manager_uri): + result = {} + inventory = {} + # Get these entries, but does not fail if not found + properties = ['Id', 'FirmwareVersion', 'ManagerType', 'Manufacturer', 'Model', + 'PartNumber', 'PowerState', 'SerialNumber', 'ServiceIdentification', + 'Status', 'UUID'] + + response = self.get_request(self.root_uri + manager_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + for property in properties: + if property in data: + inventory[property] = data[property] + + result["entries"] = inventory + return result + + def get_multi_manager_inventory(self): + return self.aggregate_managers(self.get_manager_inventory) + + def get_service_identification(self, manager): + result = {} + if manager is None: + if len(self.manager_uris) == 1: + manager = self.manager_uris[0].rstrip('/').split('/')[-1] + elif len(self.manager_uris) > 1: + entries = self.get_multi_manager_inventory()['entries'] + managers = [m[0]['manager_uri'] for m in entries if m[1].get('ServiceIdentification')] + if len(managers) == 1: + manager = managers[0].rstrip('/').split('/')[-1] + else: + self.module.fail_json(msg=[ + "Multiple managers with ServiceIdentification were found: %s" % str(managers), + "Please specify by using the 'manager' parameter in your playbook"]) + elif len(self.manager_uris) == 0: + self.module.fail_json(msg="No manager identities were found") + response = self.get_request(self.root_uri + '/redfish/v1/Managers/' + manager, override_headers=None) + try: + result['service_identification'] = response['data']['ServiceIdentification'] + except Exception as e: + self.module.fail_json(msg="Service ID not found for manager %s" % manager) + result['ret'] = True + return result + + def set_service_identification(self, service_id): + data = {"ServiceIdentification": service_id} + resp = self.patch_request(self.root_uri + '/redfish/v1/Managers/' + self.resource_id, data, check_pyld=True) + return resp + + def set_session_service(self, sessions_config): + if sessions_config is None: + return {'ret': False, 'msg': + 'Must provide sessions_config for SetSessionService command'} + + resp = self.patch_request(self.root_uri + self.session_service_uri, sessions_config, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Modified session service' + return resp + + def verify_bios_attributes(self, bios_attributes): + # This method verifies BIOS attributes against the provided input + server_bios = self.get_bios_attributes(self.systems_uri) + if server_bios["ret"] is False: + return server_bios + + bios_dict = {} + wrong_param = {} + + # Verify bios_attributes with BIOS settings available in the server + for key, value in bios_attributes.items(): + if key in server_bios["entries"]: + if server_bios["entries"][key] != value: + bios_dict.update({key: value}) + else: + wrong_param.update({key: value}) + + if wrong_param: + return { + "ret": False, + "msg": "Wrong parameters are provided: %s" % wrong_param + } + + if bios_dict: + return { + "ret": False, + "msg": "BIOS parameters are not matching: %s" % bios_dict + } + + return { + "ret": True, + "changed": False, + "msg": "BIOS verification completed" + } + + def enable_secure_boot(self): + # This function enable Secure Boot on an OOB controller + + response = self.get_request(self.root_uri + self.systems_uri) + if response["ret"] is False: + return response + + server_details = response["data"] + secure_boot_url = server_details["SecureBoot"]["@odata.id"] + + response = self.get_request(self.root_uri + secure_boot_url) + if response["ret"] is False: + return response + + body = {} + body["SecureBootEnable"] = True + + return self.patch_request(self.root_uri + secure_boot_url, body, check_pyld=True) + + def set_secure_boot(self, secure_boot_enable): + # This function enable Secure Boot on an OOB controller + + response = self.get_request(self.root_uri + self.systems_uri) + if response["ret"] is False: + return response + + server_details = response["data"] + secure_boot_url = server_details["SecureBoot"]["@odata.id"] + + response = self.get_request(self.root_uri + secure_boot_url) + if response["ret"] is False: + return response + + body = {} + body["SecureBootEnable"] = secure_boot_enable + + return self.patch_request(self.root_uri + secure_boot_url, body, check_pyld=True) + + def get_hpe_thermal_config(self): + result = {} + key = "Thermal" + # Go through list + for chassis_uri in self.chassis_uris: + response = self.get_request(self.root_uri + chassis_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + val = data.get('Oem', {}).get('Hpe', {}).get('ThermalConfiguration') + if val is not None: + return {"ret": True, "current_thermal_config": val} + return {"ret": False} + + def get_hpe_fan_percent_min(self): + result = {} + key = "Thermal" + # Go through list + for chassis_uri in self.chassis_uris: + response = self.get_request(self.root_uri + chassis_uri) + if response['ret'] is False: + return response + data = response['data'] + val = data.get('Oem', {}).get('Hpe', {}).get('FanPercentMinimum') + if val is not None: + return {"ret": True, "fan_percent_min": val} + return {"ret": False} + + def delete_volumes(self, storage_subsystem_id, volume_ids): + # Find the Storage resource from the requested ComputerSystem resource + response = self.get_request(self.root_uri + self.systems_uri) + if response['ret'] is False: + return response + data = response['data'] + storage_uri = data.get('Storage', {}).get('@odata.id') + if storage_uri is None: + return {'ret': False, 'msg': 'Storage resource not found'} + + # Get Storage Collection + response = self.get_request(self.root_uri + storage_uri) + if response['ret'] is False: + return response + data = response['data'] + + # Collect Storage Subsystems + self.storage_subsystems_uris = [i['@odata.id'] for i in response['data'].get('Members', [])] + if not self.storage_subsystems_uris: + return { + 'ret': False, + 'msg': "StorageCollection's Members array is either empty or missing"} + + # Matching Storage Subsystem ID with user input + self.storage_subsystem_uri = "" + for storage_subsystem_uri in self.storage_subsystems_uris: + if storage_subsystem_uri.rstrip('/').split('/')[-1] == storage_subsystem_id: + self.storage_subsystem_uri = storage_subsystem_uri + + if not self.storage_subsystem_uri: + return { + 'ret': False, + 'msg': "Provided Storage Subsystem ID %s does not exist on the server" % storage_subsystem_id} + + # Get Volume Collection + response = self.get_request(self.root_uri + self.storage_subsystem_uri) + if response['ret'] is False: + return response + data = response['data'] + + response = self.get_request(self.root_uri + data['Volumes']['@odata.id']) + if response['ret'] is False: + return response + data = response['data'] + + # Collect Volumes + self.volume_uris = [i['@odata.id'] for i in response['data'].get('Members', [])] + if not self.volume_uris: + return { + 'ret': True, 'changed': False, + 'msg': "VolumeCollection's Members array is either empty or missing"} + + # Delete each volume + for volume in self.volume_uris: + if volume.rstrip('/').split('/')[-1] in volume_ids: + response = self.delete_request(self.root_uri + volume) + if response['ret'] is False: + return response + + return {'ret': True, 'changed': True, + 'msg': "The following volumes were deleted: %s" % str(volume_ids)} + + def create_volume(self, volume_details, storage_subsystem_id, storage_none_volume_deletion=False): + # Find the Storage resource from the requested ComputerSystem resource + response = self.get_request(self.root_uri + self.systems_uri) + if response['ret'] is False: + return response + data = response['data'] + storage_uri = data.get('Storage', {}).get('@odata.id') + if storage_uri is None: + return {'ret': False, 'msg': 'Storage resource not found'} + + # Get Storage Collection + response = self.get_request(self.root_uri + storage_uri) + if response['ret'] is False: + return response + data = response['data'] + + # Collect Storage Subsystems + self.storage_subsystems_uris = [i['@odata.id'] for i in response['data'].get('Members', [])] + if not self.storage_subsystems_uris: + return { + 'ret': False, + 'msg': "StorageCollection's Members array is either empty or missing"} + + # Matching Storage Subsystem ID with user input + self.storage_subsystem_uri = "" + for storage_subsystem_uri in self.storage_subsystems_uris: + if storage_subsystem_uri.rstrip('/').split('/')[-1] == storage_subsystem_id: + self.storage_subsystem_uri = storage_subsystem_uri + + if not self.storage_subsystem_uri: + return { + 'ret': False, + 'msg': "Provided Storage Subsystem ID %s does not exist on the server" % storage_subsystem_id} + + # Validate input parameters + required_parameters = ['RAIDType', 'Drives'] + allowed_parameters = ['CapacityBytes', 'DisplayName', 'InitializeMethod', 'MediaSpanCount', + 'Name', 'ReadCachePolicy', 'StripSizeBytes', 'VolumeUsage', 'WriteCachePolicy'] + + for parameter in required_parameters: + if not volume_details.get(parameter): + return { + 'ret': False, + 'msg': "%s are required parameter to create a volume" % str(required_parameters)} + + # Navigate to the volume uri of the correct storage subsystem + response = self.get_request(self.root_uri + self.storage_subsystem_uri) + if response['ret'] is False: + return response + data = response['data'] + + # Deleting any volumes of RAIDType None present on the Storage Subsystem + if storage_none_volume_deletion: + response = self.get_request(self.root_uri + data['Volumes']['@odata.id']) + if response['ret'] is False: + return response + volume_data = response['data'] + + if "Members" in volume_data: + for member in volume_data["Members"]: + response = self.get_request(self.root_uri + member['@odata.id']) + if response['ret'] is False: + return response + member_data = response['data'] + + if member_data["RAIDType"] == "None": + response = self.delete_request(self.root_uri + member['@odata.id']) + if response['ret'] is False: + return response + + # Construct payload and issue POST command to create volume + volume_details["Links"] = {} + volume_details["Links"]["Drives"] = [] + for drive in volume_details["Drives"]: + volume_details["Links"]["Drives"].append({"@odata.id": drive}) + del volume_details["Drives"] + payload = volume_details + response = self.post_request(self.root_uri + data['Volumes']['@odata.id'], payload) + if response['ret'] is False: + return response + + return {'ret': True, 'changed': True, + 'msg': "Volume Created"} + + def get_bios_registries(self): + # Get /redfish/v1 + response = self.get_request(self.root_uri + self.systems_uri) + if not response["ret"]: + return response + + server_details = response["data"] + + # Get Registries URI + if "Bios" not in server_details: + msg = "Getting BIOS URI failed, Key 'Bios' not found in /redfish/v1/Systems/1/ response: %s" + return { + "ret": False, + "msg": msg % str(server_details) + } + + bios_uri = server_details["Bios"]["@odata.id"] + bios_resp = self.get_request(self.root_uri + bios_uri) + if not bios_resp["ret"]: + return bios_resp + + bios_data = bios_resp["data"] + attribute_registry = bios_data["AttributeRegistry"] + + reg_uri = self.root_uri + self.service_root + "Registries/" + attribute_registry + reg_resp = self.get_request(reg_uri) + if not reg_resp["ret"]: + return reg_resp + + reg_data = reg_resp["data"] + + # Get BIOS attribute registry URI + lst = [] + + # Get the location URI + response = self.check_location_uri(reg_data, reg_uri) + if not response["ret"]: + return response + + rsp_data, rsp_uri = response["rsp_data"], response["rsp_uri"] + + if "RegistryEntries" not in rsp_data: + return { + "msg": "'RegistryEntries' not present in %s response, %s" % (rsp_uri, str(rsp_data)), + "ret": False + } + + return { + "bios_registry": rsp_data, + "bios_registry_uri": rsp_uri, + "ret": True + } + + def check_location_uri(self, resp_data, resp_uri): + # Get the location URI response + # return {"msg": self.creds, "ret": False} + vendor = self._get_vendor()['Vendor'] + rsp_uri = "" + for loc in resp_data['Location']: + if loc['Language'].startswith("en"): + rsp_uri = loc['Uri'] + if vendor == 'HPE': + # WORKAROUND + # HPE systems with iLO 4 will have BIOS Attribute Registries location URI as a dictionary with key 'extref' + # Hence adding condition to fetch the Uri + if isinstance(loc['Uri'], dict) and "extref" in loc['Uri'].keys(): + rsp_uri = loc['Uri']['extref'] + if not rsp_uri: + msg = "Language 'en' not found in BIOS Attribute Registries location, URI: %s, response: %s" + return { + "ret": False, + "msg": msg % (resp_uri, str(resp_data)) + } + + res = self.get_request(self.root_uri + rsp_uri) + if res['ret'] is False: + # WORKAROUND + # HPE systems with iLO 4 or iLO5 compresses (gzip) for some URIs + # Hence adding encoding to the header + if vendor == 'HPE': + override_headers = {"Accept-Encoding": "gzip"} + res = self.get_request(self.root_uri + rsp_uri, override_headers=override_headers) + if res['ret']: + return { + "ret": True, + "rsp_data": res["data"], + "rsp_uri": rsp_uri + } + return res + + def get_accountservice_properties(self): + # Find the AccountService resource + response = self.get_request(self.root_uri + self.service_root) + if response['ret'] is False: + return response + data = response['data'] + accountservice_uri = data.get("AccountService", {}).get("@odata.id") + if accountservice_uri is None: + return {'ret': False, 'msg': "AccountService resource not found"} + + response = self.get_request(self.root_uri + accountservice_uri) + if response['ret'] is False: + return response + return { + 'ret': True, + 'entries': response['data'] + } + + def get_power_restore_policy(self, systems_uri): + # Retrieve System resource + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + return { + 'ret': True, + 'entries': response['data']['PowerRestorePolicy'] + } + + def get_multi_power_restore_policy(self): + return self.aggregate_systems(self.get_power_restore_policy) + + def set_power_restore_policy(self, policy): + body = {'PowerRestorePolicy': policy} + return self.patch_request(self.root_uri + self.systems_uri, body, check_pyld=True) diff --git a/plugins/module_utils/redhat.py b/plugins/module_utils/redhat.py deleted file mode 100644 index 85f4a6aab2..0000000000 --- a/plugins/module_utils/redhat.py +++ /dev/null @@ -1,271 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (c), James Laska -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -import os -import re -import shutil -import tempfile -import types - -from ansible.module_utils.six.moves import configparser - - -class RegistrationBase(object): - def __init__(self, module, username=None, password=None): - self.module = module - self.username = username - self.password = password - - def configure(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def enable(self): - # Remove any existing redhat.repo - redhat_repo = '/etc/yum.repos.d/redhat.repo' - if os.path.isfile(redhat_repo): - os.unlink(redhat_repo) - - def register(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unregister(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unsubscribe(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def update_plugin_conf(self, plugin, enabled=True): - plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin - - if os.path.isfile(plugin_conf): - tmpfd, tmpfile = tempfile.mkstemp() - shutil.copy2(plugin_conf, tmpfile) - cfg = configparser.ConfigParser() - cfg.read([tmpfile]) - - if enabled: - cfg.set('main', 'enabled', 1) - else: - cfg.set('main', 'enabled', 0) - - fd = open(tmpfile, 'w+') - cfg.write(fd) - fd.close() - self.module.atomic_move(tmpfile, plugin_conf) - - def subscribe(self, **kwargs): - raise NotImplementedError("Must be implemented by a sub-class") - - -class Rhsm(RegistrationBase): - def __init__(self, module, username=None, password=None): - RegistrationBase.__init__(self, module, username, password) - self.config = self._read_config() - self.module = module - - def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'): - ''' - Load RHSM configuration from /etc/rhsm/rhsm.conf. - Returns: - * ConfigParser object - ''' - - # Read RHSM defaults ... - cp = configparser.ConfigParser() - cp.read(rhsm_conf) - - # Add support for specifying a default value w/o having to standup some configuration - # Yeah, I know this should be subclassed ... but, oh well - def get_option_default(self, key, default=''): - sect, opt = key.split('.', 1) - if self.has_section(sect) and self.has_option(sect, opt): - return self.get(sect, opt) - else: - return default - - cp.get_option = types.MethodType(get_option_default, cp, configparser.ConfigParser) - - return cp - - def enable(self): - ''' - Enable the system to receive updates from subscription-manager. - This involves updating affected yum plugins and removing any - conflicting yum repositories. - ''' - RegistrationBase.enable(self) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', True) - - def configure(self, **kwargs): - ''' - Configure the system as directed for registration with RHN - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'config'] - - # Pass supplied **kwargs as parameters to subscription-manager. Ignore - # non-configuration parameters and replace '_' with '.'. For example, - # 'server_hostname' becomes '--system.hostname'. - for k, v in kwargs.items(): - if re.search(r'^(system|rhsm)_', k): - args.append('--%s=%s' % (k.replace('_', '.'), v)) - - self.module.run_command(args, check_rc=True) - - @property - def is_registered(self): - ''' - Determine whether the current system - Returns: - * Boolean - whether the current system is currently registered to - RHN. - ''' - args = ['subscription-manager', 'identity'] - rc, stdout, stderr = self.module.run_command(args, check_rc=False) - if rc == 0: - return True - else: - return False - - def register(self, username, password, autosubscribe, activationkey): - ''' - Register the current system to the provided RHN server - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'register'] - - # Generate command arguments - if activationkey: - args.append('--activationkey "%s"' % activationkey) - else: - if autosubscribe: - args.append('--autosubscribe') - if username: - args.extend(['--username', username]) - if password: - args.extend(['--password', password]) - - # Do the needful... - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - - def unsubscribe(self): - ''' - Unsubscribe a system from all subscribed channels - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'unsubscribe', '--all'] - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - - def unregister(self): - ''' - Unregister a currently registered system - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'unregister'] - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', False) - - def subscribe(self, regexp): - ''' - Subscribe current system to available pools matching the specified - regular expression - Raises: - * Exception - if error occurs while running command - ''' - - # Available pools ready for subscription - available_pools = RhsmPools(self.module) - - for pool in available_pools.filter(regexp): - pool.subscribe() - - -class RhsmPool(object): - ''' - Convenience class for housing subscription information - ''' - - def __init__(self, module, **kwargs): - self.module = module - for k, v in kwargs.items(): - setattr(self, k, v) - - def __str__(self): - return str(self.__getattribute__('_name')) - - def subscribe(self): - args = "subscription-manager subscribe --pool %s" % self.PoolId - rc, stdout, stderr = self.module.run_command(args, check_rc=True) - if rc == 0: - return True - else: - return False - - -class RhsmPools(object): - """ - This class is used for manipulating pools subscriptions with RHSM - """ - def __init__(self, module): - self.module = module - self.products = self._load_product_list() - - def __iter__(self): - return self.products.__iter__() - - def _load_product_list(self): - """ - Loads list of all available pools for system in data structure - """ - args = "subscription-manager list --available" - rc, stdout, stderr = self.module.run_command(args, check_rc=True) - - products = [] - for line in stdout.split('\n'): - # Remove leading+trailing whitespace - line = line.strip() - # An empty line implies the end of an output group - if len(line) == 0: - continue - # If a colon ':' is found, parse - elif ':' in line: - (key, value) = line.split(':', 1) - key = key.strip().replace(" ", "") # To unify - value = value.strip() - if key in ['ProductName', 'SubscriptionName']: - # Remember the name for later processing - products.append(RhsmPool(self.module, _name=value, key=value)) - elif products: - # Associate value with most recently recorded product - products[-1].__setattr__(key, value) - # FIXME - log some warning? - # else: - # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) - return products - - def filter(self, regexp='^$'): - ''' - Return a list of RhsmPools whose name matches the provided regular expression - ''' - r = re.compile(regexp) - for product in self.products: - if r.search(product._name): - yield product diff --git a/plugins/module_utils/redis.py b/plugins/module_utils/redis.py index 9d55aecad0..d3de8e63e9 100644 --- a/plugins/module_utils/redis.py +++ b/plugins/module_utils/redis.py @@ -1,12 +1,11 @@ -# -*- coding: utf-8 -*- # -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations from ansible.module_utils.basic import missing_required_lib -__metaclass__ = type import traceback @@ -15,6 +14,7 @@ try: from redis import Redis from redis import __version__ as redis_version HAS_REDIS_PACKAGE = True + REDIS_IMP_ERR = None except ImportError: REDIS_IMP_ERR = traceback.format_exc() HAS_REDIS_PACKAGE = False @@ -22,25 +22,26 @@ except ImportError: try: import certifi HAS_CERTIFI_PACKAGE = True + CERTIFI_IMPORT_ERROR = None except ImportError: CERTIFI_IMPORT_ERROR = traceback.format_exc() HAS_CERTIFI_PACKAGE = False -def fail_imports(module): +def fail_imports(module, needs_certifi=True): errors = [] traceback = [] if not HAS_REDIS_PACKAGE: errors.append(missing_required_lib('redis')) traceback.append(REDIS_IMP_ERR) - if not HAS_CERTIFI_PACKAGE: + if not HAS_CERTIFI_PACKAGE and needs_certifi: errors.append(missing_required_lib('certifi')) traceback.append(CERTIFI_IMPORT_ERROR) if errors: - module.fail_json(errors=errors, traceback='\n'.join(traceback)) + module.fail_json(msg='\n'.join(errors), traceback='\n'.join(traceback)) -def redis_auth_argument_spec(): +def redis_auth_argument_spec(tls_default=True): return dict( login_host=dict(type='str', default='localhost',), @@ -50,14 +51,44 @@ def redis_auth_argument_spec(): ), login_port=dict(type='int', default=6379), tls=dict(type='bool', - default=True), + default=tls_default), validate_certs=dict(type='bool', default=True ), - ca_certs=dict(type='str') + ca_certs=dict(type='str'), + client_cert_file=dict(type='str'), + client_key_file=dict(type='str'), ) +def redis_auth_params(module): + login_host = module.params['login_host'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_port = module.params['login_port'] + tls = module.params['tls'] + validate_certs = 'required' if module.params['validate_certs'] else None + ca_certs = module.params['ca_certs'] + if tls and ca_certs is None: + ca_certs = str(certifi.where()) + client_cert_file = module.params['client_cert_file'] + client_key_file = module.params['client_key_file'] + if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None: + module.fail_json( + msg='The option `username` in only supported with redis >= 3.4.0.') + params = {'host': login_host, + 'port': login_port, + 'password': login_password, + 'ssl_ca_certs': ca_certs, + 'ssl_certfile': client_cert_file, + 'ssl_keyfile': client_key_file, + 'ssl_cert_reqs': validate_certs, + 'ssl': tls} + if login_user is not None: + params['username'] = login_user + return params + + class RedisAnsible(object): '''Base class for Redis module''' @@ -66,28 +97,8 @@ class RedisAnsible(object): self.connection = self._connect() def _connect(self): - login_host = self.module.params['login_host'] - login_user = self.module.params['login_user'] - login_password = self.module.params['login_password'] - login_port = self.module.params['login_port'] - tls = self.module.params['tls'] - validate_certs = 'required' if self.module.params['validate_certs'] else None - ca_certs = self.module.params['ca_certs'] - if tls and ca_certs is None: - ca_certs = str(certifi.where()) - if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None: - self.module.fail_json( - msg='The option `username` in only supported with redis >= 3.4.0.') - params = {'host': login_host, - 'port': login_port, - 'password': login_password, - 'ssl_ca_certs': ca_certs, - 'ssl_cert_reqs': validate_certs, - 'ssl': tls} - if login_user is not None: - params['username'] = login_user try: - return Redis(**params) + return Redis(**redis_auth_params(self.module)) except Exception as e: - self.module.fail_json(msg='{0}'.format(str(e))) + self.module.fail_json(msg=f'{e}') return None diff --git a/plugins/module_utils/remote_management/lxca/common.py b/plugins/module_utils/remote_management/lxca/common.py index 07092b9642..1f06839d39 100644 --- a/plugins/module_utils/remote_management/lxca/common.py +++ b/plugins/module_utils/remote_management/lxca/common.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by @@ -6,13 +5,13 @@ # own license to the complete work. # # Copyright (C) 2017 Lenovo, Inc. -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause # # Contains LXCA common class # Lenovo xClarity Administrator (LXCA) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import traceback try: diff --git a/plugins/module_utils/rundeck.py b/plugins/module_utils/rundeck.py index afbbb48108..7b9f56339a 100644 --- a/plugins/module_utils/rundeck.py +++ b/plugins/module_utils/rundeck.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Phillipe Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Phillipe Smith +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import json @@ -27,7 +26,7 @@ def api_argument_spec(): return api_argument_spec -def api_request(module, endpoint, data=None, method="GET"): +def api_request(module, endpoint, data=None, method="GET", content_type="application/json"): """Manages Rundeck API requests via HTTP(S) :arg module: The AnsibleModule (used to get url, api_version, api_token, etc). @@ -54,15 +53,11 @@ def api_request(module, endpoint, data=None, method="GET"): response, info = fetch_url( module=module, - url="%s/api/%s/%s" % ( - module.params["url"], - module.params["api_version"], - endpoint - ), + url=f"{module.params['url']}/api/{module.params['api_version']}/{endpoint}", data=json.dumps(data), method=method, headers={ - "Content-Type": "application/json", + "Content-Type": content_type, "Accept": "application/json", "X-Rundeck-Auth-Token": module.params["api_token"] } @@ -71,7 +66,9 @@ def api_request(module, endpoint, data=None, method="GET"): if info["status"] == 403: module.fail_json(msg="Token authorization failed", execution_info=json.loads(info["body"])) - if info["status"] == 409: + elif info["status"] == 404: + return None, info + elif info["status"] == 409: module.fail_json(msg="Job executions limit reached", execution_info=json.loads(info["body"])) elif info["status"] >= 500: @@ -80,12 +77,18 @@ def api_request(module, endpoint, data=None, method="GET"): try: content = response.read() - json_response = json.loads(content) - return json_response, info + + if not content: + return None, info + else: + json_response = json.loads(content) + return json_response, info except AttributeError as error: - module.fail_json(msg="Rundeck API request error", - exception=to_native(error), - execution_info=info) + module.fail_json( + msg="Rundeck API request error", + exception=to_native(error), + execution_info=info + ) except ValueError as error: module.fail_json( msg="No valid JSON response", diff --git a/plugins/module_utils/saslprep.py b/plugins/module_utils/saslprep.py index 3e16c7169e..b02cedd874 100644 --- a/plugins/module_utils/saslprep.py +++ b/plugins/module_utils/saslprep.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. @@ -6,12 +5,12 @@ # still belong to the author of the module, and may assign their own license # to the complete work. -# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) +# Copyright (c) 2020, Andrew Klychkov (@Andersson007) # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from stringprep import ( in_table_a1, @@ -30,11 +29,9 @@ from stringprep import ( ) from unicodedata import normalize -from ansible.module_utils.six import text_type - def is_unicode_str(string): - return True if isinstance(string, text_type) else False + return True if isinstance(string, str) else False def mapping_profile(string): @@ -53,11 +50,11 @@ def mapping_profile(string): if in_table_c12(c): # map non-ASCII space characters # (that can be mapped) to Unicode space - tmp.append(u' ') + tmp.append(' ') else: tmp.append(c) - return u"".join(tmp) + return "".join(tmp) def is_ral_string(string): @@ -108,35 +105,31 @@ def prohibited_output_profile(string): for c in string: # RFC4013 2.3. Prohibited Output: if in_table_c12(c): - raise ValueError('%s: prohibited non-ASCII space characters ' - 'that cannot be replaced (C.1.2).' % RFC) + raise ValueError(f'{RFC}: prohibited non-ASCII space characters that cannot be replaced (C.1.2).') if in_table_c21_c22(c): - raise ValueError('%s: prohibited control characters (C.2.1).' % RFC) + raise ValueError(f'{RFC}: prohibited control characters (C.2.1).') if in_table_c3(c): - raise ValueError('%s: prohibited private Use characters (C.3).' % RFC) + raise ValueError(f'{RFC}: prohibited private Use characters (C.3).') if in_table_c4(c): - raise ValueError('%s: prohibited non-character code points (C.4).' % RFC) + raise ValueError(f'{RFC}: prohibited non-character code points (C.4).') if in_table_c5(c): - raise ValueError('%s: prohibited surrogate code points (C.5).' % RFC) + raise ValueError(f'{RFC}: prohibited surrogate code points (C.5).') if in_table_c6(c): - raise ValueError('%s: prohibited inappropriate for plain text ' - 'characters (C.6).' % RFC) + raise ValueError(f'{RFC}: prohibited inappropriate for plain text characters (C.6).') if in_table_c7(c): - raise ValueError('%s: prohibited inappropriate for canonical ' - 'representation characters (C.7).' % RFC) + raise ValueError(f'{RFC}: prohibited inappropriate for canonical representation characters (C.7).') if in_table_c8(c): - raise ValueError('%s: prohibited change display properties / ' - 'deprecated characters (C.8).' % RFC) + raise ValueError(f'{RFC}: prohibited change display properties / deprecated characters (C.8).') if in_table_c9(c): - raise ValueError('%s: prohibited tagging characters (C.9).' % RFC) + raise ValueError(f'{RFC}: prohibited tagging characters (C.9).') # RFC4013, 2.4. Bidirectional Characters: if is_prohibited_bidi_ch(c): - raise ValueError('%s: prohibited bidi characters (%s).' % (RFC, bidi_table)) + raise ValueError(f'{RFC}: prohibited bidi characters ({bidi_table}).') # RFC4013, 2.5. Unassigned Code Points: if in_table_a1(c): - raise ValueError('%s: prohibited unassigned code points (A.1).' % RFC) + raise ValueError(f'{RFC}: prohibited unassigned code points (A.1).') def saslprep(string): @@ -157,9 +150,8 @@ def saslprep(string): # RFC4013: "The algorithm assumes all strings are # comprised of characters from the Unicode [Unicode] character set." # Validate the string is a Unicode string - # (text_type is the string type if PY3 and unicode otherwise): if not is_unicode_str(string): - raise TypeError('input must be of type %s, not %s' % (text_type, type(string))) + raise TypeError(f'input must be of type str, not {type(string)}') # RFC4013: 2.1. Mapping. string = mapping_profile(string) @@ -168,7 +160,7 @@ def saslprep(string): # "This profile specifies using Unicode normalization form KC." string = normalize('NFKC', string) if not string: - return u'' + return '' # RFC4013: 2.3. Prohibited Output. # RFC4013: 2.4. Bidirectional Characters. diff --git a/plugins/module_utils/scaleway.py b/plugins/module_utils/scaleway.py index bcada5fcb9..0798e61317 100644 --- a/plugins/module_utils/scaleway.py +++ b/plugins/module_utils/scaleway.py @@ -1,14 +1,32 @@ -# -*- coding: utf-8 -*- -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations import json import re import sys +import datetime +import time +import traceback +from urllib.parse import urlencode -from ansible.module_utils.basic import env_fallback +from ansible.module_utils.basic import env_fallback, missing_required_lib from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode + +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + +SCALEWAY_SECRET_IMP_ERR = None +try: + from passlib.hash import argon2 + HAS_SCALEWAY_SECRET_PACKAGE = True +except Exception: + argon2 = None + SCALEWAY_SECRET_IMP_ERR = traceback.format_exc() + HAS_SCALEWAY_SECRET_PACKAGE = False def scaleway_argument_spec(): @@ -22,12 +40,20 @@ def scaleway_argument_spec(): ) -def payload_from_object(scw_object): +def scaleway_waitable_resource_argument_spec(): return dict( - (k, v) + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=300), + wait_sleep_time=dict(type="int", default=3), + ) + + +def payload_from_object(scw_object): + return { + k: v for k, v in scw_object.items() if k != 'id' and v is not None - ) + } class ScalewayException(Exception): @@ -59,6 +85,61 @@ def parse_pagination_link(header): return parsed_relations +def filter_sensitive_attributes(container, attributes): + ''' + WARNING: This function is effectively private, **do not use it**! + It will be removed or renamed once changing its name no longer triggers a pylint bug. + ''' + for attr in attributes: + container[attr] = "SENSITIVE_VALUE" + + return container + + +class SecretVariables(object): + @staticmethod + def ensure_scaleway_secret_package(module): + if not HAS_SCALEWAY_SECRET_PACKAGE: + module.fail_json( + msg=missing_required_lib("passlib[argon2]", url='https://passlib.readthedocs.io/en/stable/'), + exception=SCALEWAY_SECRET_IMP_ERR + ) + + @staticmethod + def dict_to_list(source_dict): + return [dict(key=k, value=v) for k, v in source_dict.items()] + + @staticmethod + def list_to_dict(source_list, hashed=False): + key_value = 'hashed_value' if hashed else 'value' + return {var['key']: var[key_value] for var in source_list} + + @classmethod + def decode(cls, secrets_list, values_list): + secrets_dict = cls.list_to_dict(secrets_list, hashed=True) + values_dict = cls.list_to_dict(values_list, hashed=False) + for key in values_dict: + if key in secrets_dict: + if argon2.verify(values_dict[key], secrets_dict[key]): + secrets_dict[key] = values_dict[key] + else: + secrets_dict[key] = secrets_dict[key] + + return cls.dict_to_list(secrets_dict) + + +def resource_attributes_should_be_changed(target, wished, verifiable_mutable_attributes, mutable_attributes): + diff = dict() + for attr in verifiable_mutable_attributes: + if wished[attr] is not None and target[attr] != wished[attr]: + diff[attr] = wished[attr] + + if diff: + return {attr: wished[attr] for attr in mutable_attributes} + else: + return diff + + class Response(object): def __init__(self, resp, info): @@ -165,19 +246,170 @@ class Scaleway(object): def warn(self, x): self.module.warn(str(x)) + def fetch_state(self, resource): + self.module.debug("fetch_state of resource: %s" % resource["id"]) + response = self.get(path=self.api_path + "/%s" % resource["id"]) + + if response.status_code == 404: + return "absent" + + if not response.ok: + msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json) + self.module.fail_json(msg=msg) + + try: + self.module.debug("Resource %s in state: %s" % (resource["id"], response.json["status"])) + return response.json["status"] + except KeyError: + self.module.fail_json(msg="Could not fetch state in %s" % response.json) + + def fetch_paginated_resources(self, resource_key, **pagination_kwargs): + response = self.get( + path=self.api_path, + params=pagination_kwargs) + + status_code = response.status_code + if not response.ok: + self.module.fail_json(msg='Error getting {0} [{1}: {2}]'.format( + resource_key, + response.status_code, response.json['message'])) + + return response.json[resource_key] + + def fetch_all_resources(self, resource_key, **pagination_kwargs): + resources = [] + + result = [None] + while len(result) != 0: + result = self.fetch_paginated_resources(resource_key, **pagination_kwargs) + resources += result + if 'page' in pagination_kwargs: + pagination_kwargs['page'] += 1 + else: + pagination_kwargs['page'] = 2 + + return resources + + def wait_to_complete_state_transition(self, resource, stable_states, force_wait=False): + wait = self.module.params["wait"] + + if not (wait or force_wait): + return + + wait_timeout = self.module.params["wait_timeout"] + wait_sleep_time = self.module.params["wait_sleep_time"] + + # Prevent requesting the resource status too soon + time.sleep(wait_sleep_time) + + start = now() + end = start + datetime.timedelta(seconds=wait_timeout) + + while now() < end: + self.module.debug("We are going to wait for the resource to finish its transition") + + state = self.fetch_state(resource) + if state in stable_states: + self.module.debug("It seems that the resource is not in transition anymore.") + self.module.debug("load-balancer in state: %s" % self.fetch_state(resource)) + break + + time.sleep(wait_sleep_time) + else: + self.module.fail_json(msg="Server takes too long to finish its transition") + SCALEWAY_LOCATION = { - 'par1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-1'}, - 'EMEA-FR-PAR1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-1'}, + 'par1': { + 'name': 'Paris 1', + 'country': 'FR', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-1', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-1' + }, - 'par2': {'name': 'Paris 2', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-2'}, - 'EMEA-FR-PAR2': {'name': 'Paris 2', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-2'}, + 'EMEA-FR-PAR1': { + 'name': 'Paris 1', + 'country': 'FR', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-1', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-1' + }, - 'ams1': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/nl-ams-1'}, - 'EMEA-NL-EVS': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/nl-ams-1'}, + 'par2': { + 'name': 'Paris 2', + 'country': 'FR', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-2', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-2' + }, - 'waw1': {'name': 'Warsaw 1', 'country': 'PL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/pl-waw-1'}, - 'EMEA-PL-WAW1': {'name': 'Warsaw 1', 'country': 'PL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/pl-waw-1'}, + 'EMEA-FR-PAR2': { + 'name': 'Paris 2', + 'country': 'FR', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-2', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-2' + }, + + 'par3': { + 'name': 'Paris 3', + 'country': 'FR', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-3', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-3' + }, + + 'ams1': { + 'name': 'Amsterdam 1', + 'country': 'NL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-1', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-1' + }, + + 'EMEA-NL-EVS': { + 'name': 'Amsterdam 1', + 'country': 'NL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-1', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-1' + }, + + 'ams2': { + 'name': 'Amsterdam 2', + 'country': 'NL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-2', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-2' + }, + + 'ams3': { + 'name': 'Amsterdam 3', + 'country': 'NL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-3', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-3' + }, + + 'waw1': { + 'name': 'Warsaw 1', + 'country': 'PL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-1', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-1' + }, + + 'EMEA-PL-WAW1': { + 'name': 'Warsaw 1', + 'country': 'PL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-1', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-1' + }, + + 'waw2': { + 'name': 'Warsaw 2', + 'country': 'PL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-2', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-2' + }, + + 'waw3': { + 'name': 'Warsaw 3', + 'country': 'PL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-3', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-3' + }, } SCALEWAY_ENDPOINT = "https://api.scaleway.com" @@ -191,6 +423,11 @@ SCALEWAY_REGIONS = [ SCALEWAY_ZONES = [ "fr-par-1", "fr-par-2", + "fr-par-3", "nl-ams-1", + "nl-ams-2", + "nl-ams-3", "pl-waw-1", + "pl-waw-2", + "pl-waw-3", ] diff --git a/plugins/module_utils/snap.py b/plugins/module_utils/snap.py new file mode 100644 index 0000000000..d672a7b519 --- /dev/null +++ b/plugins/module_utils/snap.py @@ -0,0 +1,53 @@ +# Copyright (c) 2023, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +_alias_state_map = dict( + present='alias', + absent='unalias', + info='aliases', +) + +_state_map = dict( + present='install', + absent='remove', + enabled='enable', + disabled='disable', + refresh='refresh', +) + + +def snap_runner(module, **kwargs): + runner = CmdRunner( + module, + "snap", + arg_formats=dict( + state_alias=cmd_runner_fmt.as_map(_alias_state_map), # snap_alias only + name=cmd_runner_fmt.as_list(), + alias=cmd_runner_fmt.as_list(), # snap_alias only + state=cmd_runner_fmt.as_map(_state_map), + _list=cmd_runner_fmt.as_fixed("list"), + _set=cmd_runner_fmt.as_fixed("set"), + get=cmd_runner_fmt.as_fixed(["get", "-d"]), + classic=cmd_runner_fmt.as_bool("--classic"), + channel=cmd_runner_fmt.as_func(lambda v: [] if v == 'stable' else ['--channel', f'{v}']), + options=cmd_runner_fmt.as_list(), + info=cmd_runner_fmt.as_fixed("info"), + dangerous=cmd_runner_fmt.as_bool("--dangerous"), + version=cmd_runner_fmt.as_fixed("version"), + ), + check_rc=False, + **kwargs + ) + return runner + + +def get_version(runner): + with runner("version") as ctx: + rc, out, err = ctx.run() + return dict(x.split() for x in out.splitlines() if len(x.split()) == 2) diff --git a/plugins/module_utils/source_control/bitbucket.py b/plugins/module_utils/source_control/bitbucket.py index 1d584391d9..a3d3fa5f2f 100644 --- a/plugins/module_utils/source_control/bitbucket.py +++ b/plugins/module_utils/source_control/bitbucket.py @@ -1,9 +1,8 @@ -# -*- coding: utf-8 -*- +# Copyright (c) Ansible project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json @@ -27,7 +26,7 @@ class BitbucketHelper: # TODO: # - Rename user to username once current usage of username is removed # - Alias user to username and deprecate it - user=dict(type='str', fallback=(env_fallback, ['BITBUCKET_USERNAME'])), + user=dict(type='str', aliases=['username'], fallback=(env_fallback, ['BITBUCKET_USERNAME'])), password=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_PASSWORD'])), ) @@ -55,14 +54,14 @@ class BitbucketHelper: if info['status'] == 200: self.access_token = content['access_token'] else: - self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info)) + self.module.fail_json(msg=f'Failed to retrieve access token: {info}') def request(self, api_url, method, data=None, headers=None): headers = headers or {} if self.access_token: headers.update({ - 'Authorization': 'Bearer {0}'.format(self.access_token), + 'Authorization': f'Bearer {self.access_token}', }) elif self.module.params['user'] and self.module.params['password']: headers.update({ diff --git a/plugins/module_utils/ssh.py b/plugins/module_utils/ssh.py new file mode 100644 index 0000000000..851efcbe86 --- /dev/null +++ b/plugins/module_utils/ssh.py @@ -0,0 +1,19 @@ +# Copyright (c) 2015, Björn Andersson +# Copyright (c) 2021, Ansible Project +# Copyright (c) 2021, Abhijeet Kasurde +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +import os + + +def determine_config_file(user, config_file): + if user: + config_file = os.path.join(os.path.expanduser(f'~{user}'), '.ssh', 'config') + elif config_file is None: + config_file = '/etc/ssh/ssh_config' + return config_file diff --git a/plugins/module_utils/storage/emc/emc_vnx.py b/plugins/module_utils/storage/emc/emc_vnx.py index 5922512676..b6a4d30463 100644 --- a/plugins/module_utils/storage/emc/emc_vnx.py +++ b/plugins/module_utils/storage/emc/emc_vnx.py @@ -1,16 +1,8 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# (c) 2018 Luca 'remix_tj' Lorenzetto -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2018 Luca 'remix_tj' Lorenzetto +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations emc_vnx_argument_spec = { diff --git a/plugins/module_utils/storage/hpe3par/hpe3par.py b/plugins/module_utils/storage/hpe3par/hpe3par.py index b7734444dd..da88db1ce6 100644 --- a/plugins/module_utils/storage/hpe3par/hpe3par.py +++ b/plugins/module_utils/storage/hpe3par/hpe3par.py @@ -1,9 +1,8 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2018, Hewlett Packard Enterprise Development LP +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils import basic @@ -21,7 +20,7 @@ def convert_to_binary_multiple(size_with_unit): if float(size) < 0: return -1 if not valid_unit: - raise ValueError("%s does not have a valid unit. The unit must be one of %s" % (size_with_unit, valid_units)) + raise ValueError(f"{size_with_unit} does not have a valid unit. The unit must be one of {valid_units}") size = size_with_unit.replace(" ", "").split('iB')[0] size_kib = basic.human_to_bytes(size) diff --git a/plugins/module_utils/systemd.py b/plugins/module_utils/systemd.py new file mode 100644 index 0000000000..00ce292feb --- /dev/null +++ b/plugins/module_utils/systemd.py @@ -0,0 +1,32 @@ +# Copyright (c) 2025, Marco Noce +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def systemd_runner(module, command, **kwargs): + arg_formats = dict( + version=cmd_runner_fmt.as_fixed("--version"), + list_units=cmd_runner_fmt.as_fixed(["list-units", "--no-pager"]), + types=cmd_runner_fmt.as_func(lambda v: [] if not v else ["--type", ",".join(v)]), + all=cmd_runner_fmt.as_fixed("--all"), + plain=cmd_runner_fmt.as_fixed("--plain"), + no_legend=cmd_runner_fmt.as_fixed("--no-legend"), + show=cmd_runner_fmt.as_fixed("show"), + props=cmd_runner_fmt.as_func(lambda v: [] if not v else ["-p", ",".join(v)]), + dashdash=cmd_runner_fmt.as_fixed("--"), + unit=cmd_runner_fmt.as_list(), + ) + + runner = CmdRunner( + module, + command=command, + arg_formats=arg_formats, + check_rc=True, + **kwargs + ) + return runner diff --git a/plugins/module_utils/univention_umc.py b/plugins/module_utils/univention_umc.py index a44a0052a9..1475a91542 100644 --- a/plugins/module_utils/univention_umc.py +++ b/plugins/module_utils/univention_umc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. @@ -9,10 +8,10 @@ # Copyright (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations """Univention Corporate Server (UCS) access module. @@ -90,7 +89,7 @@ def uldap(): def construct(): try: secret_file = open('/etc/ldap.secret', 'r') - bind_dn = 'cn=admin,{0}'.format(base_dn()) + bind_dn = f'cn=admin,{base_dn()}' except IOError: # pragma: no cover secret_file = open('/etc/machine.secret', 'r') bind_dn = config_registry()["ldap/hostdn"] @@ -187,7 +186,7 @@ def module_by_name(module_name_): univention.admin.modules.init(uldap(), position_base_dn(), module) return module - return _singleton('module/%s' % module_name_, construct) + return _singleton(f'module/{module_name_}', construct) def get_umc_admin_objects(): diff --git a/plugins/module_utils/utm_utils.py b/plugins/module_utils/utm_utils.py index 7e6ff3093e..2e7432fb38 100644 --- a/plugins/module_utils/utm_utils.py +++ b/plugins/module_utils/utm_utils.py @@ -1,16 +1,15 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # -# Copyright: (c) 2018, Johannes Brunswicker +# Copyright (c) 2018, Johannes Brunswicker # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json @@ -72,8 +71,9 @@ class UTM: """ self.info_only = info_only self.module = module - self.request_url = module.params.get('utm_protocol') + "://" + module.params.get('utm_host') + ":" + to_native( - module.params.get('utm_port')) + "/api/objects/" + endpoint + "/" + self.request_url = ( + f"{module.params.get('utm_protocol')}://{module.params.get('utm_host')}:{module.params.get('utm_port')}/api/objects/{endpoint}/" + ) """ The change_relevant_keys will be checked for changes to determine whether the object needs to be updated @@ -83,9 +83,8 @@ class UTM: self.module.params['url_password'] = module.params.get('utm_token') if all(elem in self.change_relevant_keys for elem in module.params.keys()): raise UTMModuleConfigurationError( - "The keys " + to_native( - self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native( - list(module.params.keys()))) + f"The keys {self.change_relevant_keys} to check are not in the modules keys:\n{list(module.params.keys())}" + ) def execute(self): try: @@ -184,7 +183,7 @@ class UTM: result = None if response is not None: results = json.loads(response.read()) - result = next(iter(filter(lambda d: d['name'] == module.params.get('name'), results)), None) + result = next((d for d in results if d['name'] == module.params.get('name')), None) return info, result def _clean_result(self, result): diff --git a/plugins/module_utils/vardict.py b/plugins/module_utils/vardict.py new file mode 100644 index 0000000000..ccea7d5bb6 --- /dev/null +++ b/plugins/module_utils/vardict.py @@ -0,0 +1,196 @@ +# (c) 2023, Alexei Znamensky +# Copyright (c) 2023, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations + +import copy + + +class _Variable(object): + NOTHING = object() + + def __init__(self, diff=False, output=True, change=None, fact=False, verbosity=0): + self.init = False + self.initial_value = None + self.value = None + + self.diff = None + self._change = None + self.output = None + self.fact = None + self._verbosity = None + self.set_meta(output=output, diff=diff, change=change, fact=fact, verbosity=verbosity) + + def getchange(self): + return self.diff if self._change is None else self._change + + def setchange(self, value): + self._change = value + + def getverbosity(self): + return self._verbosity + + def setverbosity(self, v): + if not (0 <= v <= 4): + raise ValueError("verbosity must be an int in the range 0 to 4") + self._verbosity = v + + change = property(getchange, setchange) + verbosity = property(getverbosity, setverbosity) + + def set_meta(self, output=None, diff=None, change=None, fact=None, initial_value=NOTHING, verbosity=None): + """Set the metadata for the variable + + Args: + output (bool, optional): flag indicating whether the variable should be in the output of the module. Defaults to None. + diff (bool, optional): flag indicating whether to generate diff mode output for this variable. Defaults to None. + change (bool, optional): flag indicating whether to track if changes happened to this variable. Defaults to None. + fact (bool, optional): flag indicating whether the variable should be exposed as a fact of the module. Defaults to None. + initial_value (any, optional): initial value of the variable, to be used with `change`. Defaults to NOTHING. + verbosity (int, optional): level of verbosity in which this variable is reported by the module as `output`, `fact` or `diff`. Defaults to None. + """ + if output is not None: + self.output = output + if change is not None: + self.change = change + if diff is not None: + self.diff = diff + if fact is not None: + self.fact = fact + if initial_value is not _Variable.NOTHING: + self.initial_value = copy.deepcopy(initial_value) + if verbosity is not None: + self.verbosity = verbosity + + def as_dict(self, meta_only=False): + d = { + "diff": self.diff, + "change": self.change, + "output": self.output, + "fact": self.fact, + "verbosity": self.verbosity, + } + if not meta_only: + d["initial_value"] = copy.deepcopy(self.initial_value) + d["value"] = self.value + return d + + def set_value(self, value): + if not self.init: + self.initial_value = copy.deepcopy(value) + self.init = True + self.value = value + return self + + def is_visible(self, verbosity): + return self.verbosity <= verbosity + + @property + def has_changed(self): + return self.change and (self.initial_value != self.value) + + @property + def diff_result(self): + if self.diff and self.has_changed: + return {'before': self.initial_value, 'after': self.value} + return + + def __str__(self): + return ( + f"" + ) + + +class VarDict(object): + reserved_names = ('__vars__', '_var', 'var', 'set_meta', 'get_meta', 'set', 'output', 'diff', 'facts', 'has_changed', 'as_dict') + + def __init__(self): + self.__vars__ = dict() + + def __getitem__(self, item): + return self.__vars__[item].value + + def __setitem__(self, key, value): + self.set(key, value) + + def __getattr__(self, item): + try: + return self.__vars__[item].value + except KeyError: + return getattr(super(VarDict, self), item) + + def __setattr__(self, key, value): + if key == '__vars__': + super(VarDict, self).__setattr__(key, value) + else: + self.set(key, value) + + def _var(self, name): + return self.__vars__[name] + + def var(self, name): + return self._var(name).as_dict() + + def set_meta(self, name, **kwargs): + """Set the metadata for the variable + + Args: + name (str): name of the variable having its metadata changed + output (bool, optional): flag indicating whether the variable should be in the output of the module. Defaults to None. + diff (bool, optional): flag indicating whether to generate diff mode output for this variable. Defaults to None. + change (bool, optional): flag indicating whether to track if changes happened to this variable. Defaults to None. + fact (bool, optional): flag indicating whether the variable should be exposed as a fact of the module. Defaults to None. + initial_value (any, optional): initial value of the variable, to be used with `change`. Defaults to NOTHING. + verbosity (int, optional): level of verbosity in which this variable is reported by the module as `output`, `fact` or `diff`. Defaults to None. + """ + self._var(name).set_meta(**kwargs) + + def get_meta(self, name): + return self._var(name).as_dict(meta_only=True) + + def set(self, name, value, **kwargs): + """Set the value and optionally metadata for a variable. The variable is not required to exist prior to calling `set`. + + For details on the accepted metada see the documentation for method `set_meta`. + + Args: + name (str): name of the variable being changed + value (any): the value of the variable, it can be of any type + + Raises: + ValueError: Raised if trying to set a variable with a reserved name. + """ + if name in self.reserved_names: + raise ValueError(f"Name {name} is reserved") + if name in self.__vars__: + var = self._var(name) + var.set_meta(**kwargs) + else: + var = _Variable(**kwargs) + var.set_value(value) + self.__vars__[name] = var + + def output(self, verbosity=0): + return {n: v.value for n, v in self.__vars__.items() if v.output and v.is_visible(verbosity)} + + def diff(self, verbosity=0): + diff_results = [(n, v.diff_result) for n, v in self.__vars__.items() if v.diff_result and v.is_visible(verbosity)] + if diff_results: + before = {n: dr['before'] for n, dr in diff_results} + after = {n: dr['after'] for n, dr in diff_results} + return {'before': before, 'after': after} + return None + + def facts(self, verbosity=0): + facts_result = {n: v.value for n, v in self.__vars__.items() if v.fact and v.is_visible(verbosity)} + return facts_result if facts_result else None + + @property + def has_changed(self): + return any(var.has_changed for var in self.__vars__.values()) + + def as_dict(self): + return {name: var.value for name, var in self.__vars__.items()} diff --git a/plugins/module_utils/version.py b/plugins/module_utils/version.py new file mode 100644 index 0000000000..18cd6d12fe --- /dev/null +++ b/plugins/module_utils/version.py @@ -0,0 +1,11 @@ + +# Copyright (c) 2021, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Provide version object to compare version numbers.""" + +from __future__ import annotations + + +from ansible.module_utils.compat.version import LooseVersion # noqa: F401, pylint: disable=unused-import diff --git a/plugins/module_utils/vexata.py b/plugins/module_utils/vexata.py index 3d6fb7aaca..ed0b11480c 100644 --- a/plugins/module_utils/vexata.py +++ b/plugins/module_utils/vexata.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # -# Copyright: (c) 2019, Sandeep Kasargod -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2019, Sandeep Kasargod +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations HAS_VEXATAPI = True @@ -13,7 +12,6 @@ try: except ImportError: HAS_VEXATAPI = False -from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.basic import env_fallback VXOS_VERSION = None @@ -22,10 +20,9 @@ VXOS_VERSION = None def get_version(iocs_json): if not iocs_json: raise Exception('Invalid IOC json') - active = filter(lambda x: x['mgmtRole'], iocs_json) - if not active: + active = next((x for x in iocs_json if x['mgmtRole']), None) + if active is None: raise Exception('Unable to detect active IOC') - active = active[0] ver = active['swVersion'] if ver[0] != 'v': raise Exception('Illegal version string') @@ -59,7 +56,7 @@ def get_array(module): else: module.fail_json(msg='Test connection to array failed.') except Exception as e: - module.fail_json(msg='Vexata API access failed: {0}'.format(to_native(e))) + module.fail_json(msg=f'Vexata API access failed: {e}') def argument_spec(): diff --git a/plugins/module_utils/wdc_redfish_utils.py b/plugins/module_utils/wdc_redfish_utils.py new file mode 100644 index 0000000000..564be3829e --- /dev/null +++ b/plugins/module_utils/wdc_redfish_utils.py @@ -0,0 +1,553 @@ + +# Copyright (c) 2022 Western Digital Corporation +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import datetime +import re +import time +import tarfile +import os +from urllib.parse import urlparse, urlunparse + +from ansible.module_utils.urls import fetch_file +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils + + +class WdcRedfishUtils(RedfishUtils): + """Extension to RedfishUtils to support WDC enclosures.""" + # Status codes returned by WDC FW Update Status + UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE = 0 + UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS = 1 + UPDATE_STATUS_CODE_FW_UPDATE_COMPLETED_WAITING_FOR_ACTIVATION = 2 + UPDATE_STATUS_CODE_FW_UPDATE_FAILED = 3 + + # Status messages returned by WDC FW Update Status + UPDATE_STATUS_MESSAGE_READY_FOR_FW_UDPATE = "Ready for FW update" + UDPATE_STATUS_MESSAGE_FW_UPDATE_IN_PROGRESS = "FW update in progress" + UPDATE_STATUS_MESSAGE_FW_UPDATE_COMPLETED_WAITING_FOR_ACTIVATION = "FW update completed. Waiting for activation." + UPDATE_STATUS_MESSAGE_FW_UPDATE_FAILED = "FW update failed." + + # Dict keys for resource bodies + # Standard keys + ACTIONS = "Actions" + OEM = "Oem" + WDC = "WDC" + TARGET = "target" + + # Keys for specific operations + CHASSIS_LOCATE = "#Chassis.Locate" + CHASSIS_POWER_MODE = "#Chassis.PowerMode" + + def __init__(self, + creds, + root_uris, + timeout, + module, + resource_id, + data_modification): + super(WdcRedfishUtils, self).__init__(creds=creds, + root_uri=root_uris[0], + timeout=timeout, + module=module, + resource_id=resource_id, + data_modification=data_modification) + # Update the root URI if we cannot perform a Redfish GET to the first one + self._set_root_uri(root_uris) + + def _set_root_uri(self, root_uris): + """Set the root URI from a list of options. + + If the current root URI is good, just keep it. Else cycle through our options until we find a good one. + A URI is considered good if we can GET uri/redfish/v1. + """ + for root_uri in root_uris: + uri = f"{root_uri}/redfish/v1" + response = self.get_request(uri) + if response['ret']: + self.root_uri = root_uri + break + + def _find_updateservice_resource(self): + """Find the update service resource as well as additional WDC-specific resources.""" + response = super(WdcRedfishUtils, self)._find_updateservice_resource() + if not response['ret']: + return response + return self._find_updateservice_additional_uris() + + def _is_enclosure_multi_tenant_and_fetch_gen(self): + """Determine if the enclosure is multi-tenant. + + The serial number of a multi-tenant enclosure will end in "-A" or "-B". + Fetching enclsoure generation. + + :return: True/False if the enclosure is multi-tenant or not and return enclosure generation; + None if unable to determine. + """ + response = self.get_request(f"{self.root_uri}{self.service_root}Chassis/Enclosure") + if response['ret'] is False: + return None + pattern = r".*-[A,B]" + data = response['data'] + if 'EnclVersion' not in data: + enc_version = 'G1' + else: + enc_version = data['EnclVersion'] + return re.match(pattern, data['SerialNumber']) is not None, enc_version + + def _find_updateservice_additional_uris(self): + """Find & set WDC-specific update service URIs""" + response = self.get_request(self.root_uri + self._update_uri()) + if response['ret'] is False: + return response + data = response['data'] + if 'Actions' not in data: + return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} + if '#UpdateService.SimpleUpdate' not in data['Actions']: + return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} + action = data['Actions']['#UpdateService.SimpleUpdate'] + if 'target' not in action: + return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} + self.simple_update_uri = action['target'] + + # Simple update status URI is not provided via GET /redfish/v1/UpdateService + # So we have to hard code it. + self.simple_update_status_uri = f"{self.simple_update_uri}/Status" + + # FWActivate URI + if 'Oem' not in data['Actions']: + return {'ret': False, 'msg': 'Service does not support OEM operations'} + if 'WDC' not in data['Actions']['Oem']: + return {'ret': False, 'msg': 'Service does not support WDC operations'} + if '#UpdateService.FWActivate' not in data['Actions']['Oem']['WDC']: + return {'ret': False, 'msg': 'Service does not support FWActivate'} + action = data['Actions']['Oem']['WDC']['#UpdateService.FWActivate'] + if 'target' not in action: + return {'ret': False, 'msg': 'Service does not support FWActivate'} + self.firmware_activate_uri = action['target'] + return {'ret': True} + + def _simple_update_status_uri(self): + return self.simple_update_status_uri + + def _firmware_activate_uri(self): + return self.firmware_activate_uri + + def _update_uri(self): + return self.update_uri + + def get_simple_update_status(self): + """Issue Redfish HTTP GET to return the simple update status""" + result = {} + response = self.get_request(self.root_uri + self._simple_update_status_uri()) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + result['entries'] = data + return result + + def firmware_activate(self, update_opts): + """Perform FWActivate using Redfish HTTP API.""" + creds = update_opts.get('update_creds') + payload = {} + if creds: + if creds.get('username'): + payload["Username"] = creds.get('username') + if creds.get('password'): + payload["Password"] = creds.get('password') + + # Make sure the service supports FWActivate + response = self.get_request(self.root_uri + self._update_uri()) + if response['ret'] is False: + return response + data = response['data'] + if 'Actions' not in data: + return {'ret': False, 'msg': 'Service does not support FWActivate'} + + response = self.post_request(self.root_uri + self._firmware_activate_uri(), payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, + 'msg': "FWActivate requested"} + + def _get_bundle_version(self, + bundle_uri): + """Get the firmware version from a bundle file, and whether or not it is multi-tenant. + + Only supports HTTP at this time. Assumes URI exists and is a tarfile. + Looks for a file oobm-[version].pkg, such as 'oobm-4.0.13.pkg`. Extracts the version number + from that filename (in the above example, the version number is "4.0.13". + + To determine if the bundle is multi-tenant or not, it looks inside the .bin file within the tarfile, + and checks the appropriate byte in the file. + + If not tarfile, the bundle is checked for 2048th byte to determine whether it is Gen2 bundle. + Gen2 is always single tenant at this time. + + :param str bundle_uri: HTTP URI of the firmware bundle. + :return: Firmware version number contained in the bundle, whether or not the bundle is multi-tenant + and bundle generation. Either value will be None if unable to determine. + :rtype: str or None, bool or None + """ + bundle_temp_filename = fetch_file(module=self.module, + url=bundle_uri) + bundle_version = None + is_multi_tenant = None + gen = None + + # If not tarfile, then if the file has "MMG2" or "DPG2" at 2048th byte + # then the bundle is for MM or DP G2 + if not tarfile.is_tarfile(bundle_temp_filename): + cookie1 = None + with open(bundle_temp_filename, "rb") as bundle_file: + file_size = os.path.getsize(bundle_temp_filename) + if file_size >= 2052: + bundle_file.seek(2048) + cookie1 = bundle_file.read(4) + # It is anticipated that DP firmware bundle will be having the value "DPG2" + # for cookie1 in the header + if cookie1 and cookie1.decode("utf8") == "MMG2" or cookie1.decode("utf8") == "DPG2": + file_name, ext = os.path.splitext(str(bundle_uri.rsplit('/', 1)[1])) + # G2 bundle file name: Ultrastar-Data102_3000_SEP_1010-032_2.1.12 + parsedFileName = file_name.split('_') + if len(parsedFileName) == 5: + bundle_version = parsedFileName[4] + # MM G2 is always single tanant + is_multi_tenant = False + gen = "G2" + + return bundle_version, is_multi_tenant, gen + + # Bundle is for MM or DP G1 + tf = tarfile.open(bundle_temp_filename) + pattern_pkg = r"oobm-(.+)\.pkg" + pattern_bin = r"(.*\.bin)" + bundle_version = None + is_multi_tenant = None + for filename in tf.getnames(): + match_pkg = re.match(pattern_pkg, filename) + if match_pkg is not None: + bundle_version = match_pkg.group(1) + match_bin = re.match(pattern_bin, filename) + if match_bin is not None: + bin_filename = match_bin.group(1) + bin_file = tf.extractfile(bin_filename) + bin_file.seek(11) + byte_11 = bin_file.read(1) + is_multi_tenant = byte_11 == b'\x80' + gen = "G1" + + return bundle_version, is_multi_tenant, gen + + @staticmethod + def uri_is_http(uri): + """Return True if the specified URI is http or https. + + :param str uri: A URI. + :return: True if the URI is http or https, else False + :rtype: bool + """ + parsed_bundle_uri = urlparse(uri) + return parsed_bundle_uri.scheme.lower() in ['http', 'https'] + + def update_and_activate(self, update_opts): + """Update and activate the firmware in a single action. + + Orchestrates the firmware update so that everything can be done in a single command. + Compares the update version with the already-installed version -- skips update if they are the same. + Performs retries, handles timeouts as needed. + + """ + # Convert credentials to standard HTTP format + if update_opts.get("update_creds") is not None and "username" in update_opts["update_creds"] and "password" in update_opts["update_creds"]: + update_creds = update_opts["update_creds"] + parsed_url = urlparse(update_opts["update_image_uri"]) + if update_creds: + original_netloc = parsed_url.netloc + parsed_url = parsed_url._replace(netloc=f"{update_creds.get('username')}:{update_creds.get('password')}@{original_netloc}") + update_opts["update_image_uri"] = urlunparse(parsed_url) + del update_opts["update_creds"] + + # Make sure bundle URI is HTTP(s) + bundle_uri = update_opts["update_image_uri"] + + if not self.uri_is_http(bundle_uri): + return { + 'ret': False, + 'msg': 'Bundle URI must be HTTP or HTTPS' + } + # Make sure IOM is ready for update + result = self.get_simple_update_status() + if result['ret'] is False: + return result + update_status = result['entries'] + status_code = update_status['StatusCode'] + status_description = update_status['Description'] + if status_code not in [ + self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE, + self.UPDATE_STATUS_CODE_FW_UPDATE_FAILED + ]: + return { + 'ret': False, + 'msg': f'Target is not ready for FW update. Current status: {status_code} ({status_description})'} + + # Check the FW version in the bundle file, and compare it to what is already on the IOMs + + # Bundle version number + bundle_firmware_version, is_bundle_multi_tenant, bundle_gen = self._get_bundle_version(bundle_uri) + if bundle_firmware_version is None or is_bundle_multi_tenant is None or bundle_gen is None: + return { + 'ret': False, + 'msg': 'Unable to extract bundle version or multi-tenant status or generation from update image file' + } + + is_enclosure_multi_tenant, enclosure_gen = self._is_enclosure_multi_tenant_and_fetch_gen() + + # Verify that the bundle is correctly multi-tenant or not + if is_enclosure_multi_tenant != is_bundle_multi_tenant: + return { + 'ret': False, + 'msg': f'Enclosure multi-tenant is {is_enclosure_multi_tenant} but bundle multi-tenant is {is_bundle_multi_tenant}' + } + + # Verify that the bundle is compliant with the target enclosure + if enclosure_gen != bundle_gen: + return { + 'ret': False, + 'msg': f'Enclosure generation is {enclosure_gen} but bundle is of {bundle_gen}' + } + + # Version number installed on IOMs + firmware_inventory = self.get_firmware_inventory() + if not firmware_inventory["ret"]: + return firmware_inventory + firmware_inventory_dict = {} + for entry in firmware_inventory["entries"]: + firmware_inventory_dict[entry["Id"]] = entry + iom_a_firmware_version = firmware_inventory_dict.get("IOModuleA_OOBM", {}).get("Version") + iom_b_firmware_version = firmware_inventory_dict.get("IOModuleB_OOBM", {}).get("Version") + # If version is None, we will proceed with the update, because we cannot tell + # for sure that we have a full version match. + if is_enclosure_multi_tenant: + # For multi-tenant, only one of the IOMs will be affected by the firmware update, + # so see if that IOM already has the same firmware version as the bundle. + firmware_already_installed = bundle_firmware_version == self._get_installed_firmware_version_of_multi_tenant_system( + iom_a_firmware_version, + iom_b_firmware_version) + else: + # For single-tenant, see if both IOMs already have the same firmware version as the bundle. + firmware_already_installed = bundle_firmware_version == iom_a_firmware_version == iom_b_firmware_version + # If this FW already installed, return changed: False, and do not update the firmware. + if firmware_already_installed: + return { + 'ret': True, + 'changed': False, + 'msg': f'Version {bundle_firmware_version} already installed' + } + + # Version numbers don't match the bundle -- proceed with update (unless we are in check mode) + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + update_successful = False + retry_interval_seconds = 5 + max_number_of_retries = 5 + retry_number = 0 + while retry_number < max_number_of_retries and not update_successful: + if retry_number != 0: + time.sleep(retry_interval_seconds) + retry_number += 1 + + result = self.simple_update(update_opts) + if result['ret'] is not True: + # Sometimes a timeout error is returned even though the update actually was requested. + # Check the update status to see if the update is in progress. + status_result = self.get_simple_update_status() + if status_result['ret'] is False: + continue + update_status = status_result['entries'] + status_code = update_status['StatusCode'] + if status_code != self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS: + # Update is not in progress -- retry until max number of retries + continue + else: + update_successful = True + else: + update_successful = True + if not update_successful: + # Unable to get SimpleUpdate to work. Return the failure from the SimpleUpdate + return result + + # Wait for "ready to activate" + max_wait_minutes = 30 + polling_interval_seconds = 30 + status_code = self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE + start_time = datetime.datetime.now() + # For a short time, target will still say "ready for firmware update" before it transitions + # to "update in progress" + status_codes_for_update_incomplete = [ + self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS, + self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE + ] + iteration = 0 + while status_code in status_codes_for_update_incomplete \ + and datetime.datetime.now() - start_time < datetime.timedelta(minutes=max_wait_minutes): + if iteration != 0: + time.sleep(polling_interval_seconds) + iteration += 1 + result = self.get_simple_update_status() + if result['ret'] is False: + continue # We may get timeouts, just keep trying until we give up + update_status = result['entries'] + status_code = update_status['StatusCode'] + status_description = update_status['Description'] + if status_code == self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS: + # Once it says update in progress, "ready for update" is no longer a valid status code + status_codes_for_update_incomplete = [self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS] + + # Update no longer in progress -- verify that it finished + if status_code != self.UPDATE_STATUS_CODE_FW_UPDATE_COMPLETED_WAITING_FOR_ACTIVATION: + return { + 'ret': False, + 'msg': f'Target is not ready for FW activation after update. Current status: {status_code} ({status_description})'} + + self.firmware_activate(update_opts) + return {'ret': True, 'changed': True, + 'msg': "Firmware updated and activation initiated."} + + def _get_installed_firmware_version_of_multi_tenant_system(self, + iom_a_firmware_version, + iom_b_firmware_version): + """Return the version for the active IOM on a multi-tenant system. + + Only call this on a multi-tenant system. + Given the installed firmware versions for IOM A, B, this method will determine which IOM is active + for this tenanat, and return that IOM's firmware version. + """ + # To determine which IOM we are on, try to GET each IOM resource + # The one we are on will return valid data. + # The other will return an error with message "IOM Module A/B cannot be read" + which_iom_is_this = None + for iom_letter in ['A', 'B']: + iom_uri = f"Chassis/IOModule{iom_letter}FRU" + response = self.get_request(self.root_uri + self.service_root + iom_uri) + if response['ret'] is False: + continue + data = response['data'] + if "Id" in data: # Assume if there is an "Id", it is valid + which_iom_is_this = iom_letter + break + if which_iom_is_this == 'A': + return iom_a_firmware_version + elif which_iom_is_this == 'B': + return iom_b_firmware_version + else: + return None + + @staticmethod + def _get_led_locate_uri(data): + """Get the LED locate URI given a resource body.""" + if WdcRedfishUtils.ACTIONS not in data: + return None + if WdcRedfishUtils.OEM not in data[WdcRedfishUtils.ACTIONS]: + return None + if WdcRedfishUtils.WDC not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM]: + return None + if WdcRedfishUtils.CHASSIS_LOCATE not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]: + return None + if WdcRedfishUtils.TARGET not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_LOCATE]: + return None + return data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_LOCATE][WdcRedfishUtils.TARGET] + + @staticmethod + def _get_power_mode_uri(data): + """Get the Power Mode URI given a resource body.""" + if WdcRedfishUtils.ACTIONS not in data: + return None + if WdcRedfishUtils.OEM not in data[WdcRedfishUtils.ACTIONS]: + return None + if WdcRedfishUtils.WDC not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM]: + return None + if WdcRedfishUtils.CHASSIS_POWER_MODE not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]: + return None + if WdcRedfishUtils.TARGET not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_POWER_MODE]: + return None + return data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_POWER_MODE][WdcRedfishUtils.TARGET] + + def manage_indicator_led(self, command, resource_uri): + key = 'IndicatorLED' + + payloads = {'IndicatorLedOn': 'On', 'IndicatorLedOff': 'Off'} + current_led_status_map = {'IndicatorLedOn': 'Blinking', 'IndicatorLedOff': 'Off'} + + result = {} + response = self.get_request(self.root_uri + resource_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + if key not in data: + return {'ret': False, 'msg': f"Key {key} not found"} + current_led_status = data[key] + if current_led_status == current_led_status_map[command]: + return {'ret': True, 'changed': False} + + led_locate_uri = self._get_led_locate_uri(data) + if led_locate_uri is None: + return {'ret': False, 'msg': 'LED locate URI not found.'} + + if command in payloads.keys(): + payload = {'LocateState': payloads[command]} + response = self.post_request(self.root_uri + led_locate_uri, payload) + if response['ret'] is False: + return response + else: + return {'ret': False, 'msg': 'Invalid command'} + + return result + + def manage_chassis_power_mode(self, command): + return self.manage_power_mode(command, self.chassis_uri) + + def manage_power_mode(self, command, resource_uri=None): + if resource_uri is None: + resource_uri = self.chassis_uri + + payloads = {'PowerModeNormal': 'Normal', 'PowerModeLow': 'Low'} + requested_power_mode = payloads[command] + + result = {} + response = self.get_request(self.root_uri + resource_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + # Make sure the response includes Oem.WDC.PowerMode, and get current power mode + power_mode = 'PowerMode' + if WdcRedfishUtils.OEM not in data or WdcRedfishUtils.WDC not in data[WdcRedfishUtils.OEM] or\ + power_mode not in data[WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]: + return {'ret': False, 'msg': 'Resource does not support Oem.WDC.PowerMode'} + current_power_mode = data[WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][power_mode] + if current_power_mode == requested_power_mode: + return {'ret': True, 'changed': False} + + power_mode_uri = self._get_power_mode_uri(data) + if power_mode_uri is None: + return {'ret': False, 'msg': 'Power Mode URI not found.'} + + if command in payloads.keys(): + payload = {'PowerMode': payloads[command]} + response = self.post_request(self.root_uri + power_mode_uri, payload) + if response['ret'] is False: + return response + else: + return {'ret': False, 'msg': 'Invalid command'} + + return result diff --git a/plugins/module_utils/xdg_mime.py b/plugins/module_utils/xdg_mime.py new file mode 100644 index 0000000000..d02002737b --- /dev/null +++ b/plugins/module_utils/xdg_mime.py @@ -0,0 +1,34 @@ +# Copyright (c) 2025, Marcos Alano +# Based on gio_mime module. Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def xdg_mime_runner(module, **kwargs): + return CmdRunner( + module, + command=['xdg-mime'], + arg_formats=dict( + default=cmd_runner_fmt.as_fixed('default'), + query=cmd_runner_fmt.as_fixed('query'), + mime_types=cmd_runner_fmt.as_list(), + handler=cmd_runner_fmt.as_list(), + version=cmd_runner_fmt.as_fixed('--version'), + ), + **kwargs + ) + + +def xdg_mime_get(runner, mime_type): + def process(rc, out, err): + if not out.strip(): + return None + out = out.splitlines()[0] + return out.split()[-1] + + with runner("query default mime_types", output_process=process) as ctx: + return ctx.run(mime_types=mime_type) diff --git a/plugins/module_utils/xenserver.py b/plugins/module_utils/xenserver.py index 015b10215e..32576000cc 100644 --- a/plugins/module_utils/xenserver.py +++ b/plugins/module_utils/xenserver.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # -# Copyright: (c) 2018, Bojan Vitnik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Bojan Vitnik +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import atexit import time @@ -27,22 +26,18 @@ def xenserver_common_argument_spec(): return dict( hostname=dict(type='str', aliases=['host', 'pool'], - required=False, default='localhost', fallback=(env_fallback, ['XENSERVER_HOST']), ), username=dict(type='str', aliases=['user', 'admin'], - required=False, default='root', fallback=(env_fallback, ['XENSERVER_USER'])), password=dict(type='str', aliases=['pass', 'pwd'], - required=False, no_log=True, fallback=(env_fallback, ['XENSERVER_PASSWORD'])), validate_certs=dict(type='bool', - required=False, default=True, fallback=(env_fallback, ['XENSERVER_VALIDATE_CERTS'])), ) @@ -293,29 +288,29 @@ def get_object_ref(module, name, uuid=None, obj_type="VM", fail=True, msg_prefix try: # Find object by UUID. If no object is found using given UUID, # an exception will be generated. - obj_ref = xapi_session.xenapi_request("%s.get_by_uuid" % real_obj_type, (uuid,)) + obj_ref = xapi_session.xenapi_request(f"{real_obj_type}.get_by_uuid", (uuid,)) except XenAPI.Failure as f: if fail: - module.fail_json(msg="%s%s with UUID '%s' not found!" % (msg_prefix, obj_type, uuid)) + module.fail_json(msg=f"{msg_prefix}{obj_type} with UUID '{uuid}' not found!") elif name: try: # Find object by name (name_label). - obj_ref_list = xapi_session.xenapi_request("%s.get_by_name_label" % real_obj_type, (name,)) + obj_ref_list = xapi_session.xenapi_request(f"{real_obj_type}.get_by_name_label", (name,)) except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") # If obj_ref_list is empty. if not obj_ref_list: if fail: - module.fail_json(msg="%s%s with name '%s' not found!" % (msg_prefix, obj_type, name)) + module.fail_json(msg=f"{msg_prefix}{obj_type} with name '{name}' not found!") # If obj_ref_list contains multiple object references. elif len(obj_ref_list) > 1: - module.fail_json(msg="%smultiple %ss with name '%s' found! Please use UUID." % (msg_prefix, obj_type, name)) + module.fail_json(msg=f"{msg_prefix}multiple {obj_type}s with name '{name}' found! Please use UUID.") # The obj_ref_list contains only one object reference. else: obj_ref = obj_ref_list[0] else: - module.fail_json(msg="%sno valid name or UUID supplied for %s!" % (msg_prefix, obj_type)) + module.fail_json(msg=f"{msg_prefix}no valid name or UUID supplied for {obj_type}!") return obj_ref @@ -401,7 +396,7 @@ def gather_vm_params(module, vm_ref): vm_params['customization_agent'] = "custom" except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") return vm_params @@ -477,12 +472,13 @@ def gather_vm_facts(module, vm_params): "mac": vm_vif_params['MAC'], "vif_device": vm_vif_params['device'], "mtu": vm_vif_params['MTU'], - "ip": vm_guest_metrics_networks.get("%s/ip" % vm_vif_params['device'], ''), + "ip": vm_guest_metrics_networks.get(f"{vm_vif_params['device']}/ip", ''), "prefix": "", "netmask": "", "gateway": "", - "ip6": [vm_guest_metrics_networks[ipv6] for ipv6 in sorted(vm_guest_metrics_networks.keys()) if ipv6.startswith("%s/ipv6/" % - vm_vif_params['device'])], + "ip6": [vm_guest_metrics_networks[ipv6] + for ipv6 in sorted(vm_guest_metrics_networks.keys()) + if ipv6.startswith(f"{vm_vif_params['device']}/ipv6/")], "prefix6": "", "gateway6": "", } @@ -503,7 +499,7 @@ def gather_vm_facts(module, vm_params): vm_xenstore_data = vm_params['xenstore_data'] for f in ['prefix', 'netmask', 'gateway', 'prefix6', 'gateway6']: - vm_network_params[f] = vm_xenstore_data.get("vm-data/networks/%s/%s" % (vm_vif_params['device'], f), "") + vm_network_params[f] = vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/{f}", "") vm_facts['networks'].append(vm_network_params) @@ -570,14 +566,14 @@ def set_vm_power_state(module, vm_ref, power_state, timeout=300): if not module.check_mode: xapi_session.xenapi.VM.hard_reboot(vm_ref) else: - module.fail_json(msg="Cannot restart VM in state '%s'!" % vm_power_state_current) + module.fail_json(msg=f"Cannot restart VM in state '{vm_power_state_current}'!") elif power_state == "suspended": # running state is required for suspend. if vm_power_state_current == "poweredon": if not module.check_mode: xapi_session.xenapi.VM.suspend(vm_ref) else: - module.fail_json(msg="Cannot suspend VM in state '%s'!" % vm_power_state_current) + module.fail_json(msg=f"Cannot suspend VM in state '{vm_power_state_current}'!") elif power_state == "shutdownguest": # running state is required for guest shutdown. if vm_power_state_current == "poweredon": @@ -589,9 +585,9 @@ def set_vm_power_state(module, vm_ref, power_state, timeout=300): task_result = wait_for_task(module, task_ref, timeout) if task_result: - module.fail_json(msg="Guest shutdown task failed: '%s'!" % task_result) + module.fail_json(msg=f"Guest shutdown task failed: '{task_result}'!") else: - module.fail_json(msg="Cannot shutdown guest when VM is in state '%s'!" % vm_power_state_current) + module.fail_json(msg=f"Cannot shutdown guest when VM is in state '{vm_power_state_current}'!") elif power_state == "rebootguest": # running state is required for guest reboot. if vm_power_state_current == "poweredon": @@ -603,15 +599,15 @@ def set_vm_power_state(module, vm_ref, power_state, timeout=300): task_result = wait_for_task(module, task_ref, timeout) if task_result: - module.fail_json(msg="Guest reboot task failed: '%s'!" % task_result) + module.fail_json(msg=f"Guest reboot task failed: '{task_result}'!") else: - module.fail_json(msg="Cannot reboot guest when VM is in state '%s'!" % vm_power_state_current) + module.fail_json(msg=f"Cannot reboot guest when VM is in state '{vm_power_state_current}'!") else: - module.fail_json(msg="Requested VM power state '%s' is unsupported!" % power_state) + module.fail_json(msg=f"Requested VM power state '{power_state}' is unsupported!") state_changed = True except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") return (state_changed, vm_power_state_resulting) @@ -670,7 +666,7 @@ def wait_for_task(module, task_ref, timeout=300): xapi_session.xenapi.task.destroy(task_ref) except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") return result @@ -702,7 +698,7 @@ def wait_for_vm_ip_address(module, vm_ref, timeout=300): vm_power_state = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower()) if vm_power_state != 'poweredon': - module.fail_json(msg="Cannot wait for VM IP address when VM is in state '%s'!" % vm_power_state) + module.fail_json(msg=f"Cannot wait for VM IP address when VM is in state '{vm_power_state}'!") interval = 2 @@ -733,7 +729,7 @@ def wait_for_vm_ip_address(module, vm_ref, timeout=300): module.fail_json(msg="Timed out waiting for VM IP address!") except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") return vm_guest_metrics @@ -796,7 +792,7 @@ class XAPI(object): # If scheme is not specified we default to http:// because https:// # is problematic in most setups. if not hostname.startswith("http://") and not hostname.startswith("https://"): - hostname = "http://%s" % hostname + hostname = f"http://{hostname}" try: # ignore_ssl is supported in XenAPI library from XenServer 7.2 @@ -815,7 +811,7 @@ class XAPI(object): try: cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, 'Ansible') except XenAPI.Failure as f: - module.fail_json(msg="Unable to log on to XenServer at %s as %s: %s" % (hostname, username, f.details)) + module.fail_json(msg=f"Unable to log on to XenServer at {hostname} as {username}: {f.details}") # Disabling atexit should be used in special cases only. if disconnect_atexit: @@ -858,4 +854,4 @@ class XenServerObject(object): self.default_sr_ref = self.xapi_session.xenapi.pool.get_default_SR(self.pool_ref) self.xenserver_version = get_xenserver_version(module) except XenAPI.Failure as f: - self.module.fail_json(msg="XAPI ERROR: %s" % f.details) + self.module.fail_json(msg=f"XAPI ERROR: {f.details}") diff --git a/plugins/module_utils/xfconf.py b/plugins/module_utils/xfconf.py new file mode 100644 index 0000000000..8febbf450d --- /dev/null +++ b/plugins/module_utils/xfconf.py @@ -0,0 +1,43 @@ +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible.module_utils.parsing.convert_bool import boolean +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +@cmd_runner_fmt.unpack_args +def _values_fmt(values, value_types): + result = [] + for value, value_type in zip(values, value_types): + if value_type == 'bool': + value = 'true' if boolean(value) else 'false' + result.extend(['--type', f'{value_type}', '--set', f'{value}']) + return result + + +def xfconf_runner(module, **kwargs): + runner = CmdRunner( + module, + command='xfconf-query', + arg_formats=dict( + channel=cmd_runner_fmt.as_opt_val("--channel"), + property=cmd_runner_fmt.as_opt_val("--property"), + force_array=cmd_runner_fmt.as_bool("--force-array"), + reset=cmd_runner_fmt.as_bool("--reset"), + create=cmd_runner_fmt.as_bool("--create"), + list_arg=cmd_runner_fmt.as_bool("--list"), + values_and_types=_values_fmt, + version=cmd_runner_fmt.as_fixed("--version"), + ), + **kwargs + ) + return runner + + +def get_xfconf_version(runner): + with runner("version") as ctx: + rc, out, err = ctx.run() + return out.splitlines()[0].split()[1] diff --git a/plugins/modules/aerospike_migrations.py b/plugins/modules/aerospike_migrations.py deleted file mode 120000 index fcf716e187..0000000000 --- a/plugins/modules/aerospike_migrations.py +++ /dev/null @@ -1 +0,0 @@ -./database/aerospike/aerospike_migrations.py \ No newline at end of file diff --git a/plugins/modules/aerospike_migrations.py b/plugins/modules/aerospike_migrations.py new file mode 100644 index 0000000000..02b8bd7730 --- /dev/null +++ b/plugins/modules/aerospike_migrations.py @@ -0,0 +1,517 @@ +#!/usr/bin/python +"""short_description: Check or wait for migrations between nodes""" + +# Copyright (c) 2018, Albert Autin +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + + +DOCUMENTATION = r""" +module: aerospike_migrations +short_description: Check or wait for migrations between nodes +description: + - This can be used to check for migrations in a cluster. This makes it easy to do a rolling upgrade/update on Aerospike + nodes. + - If waiting for migrations is not desired, simply just poll until port 3000 if available or C(asinfo -v status) returns + ok. +author: "Albert Autin (@Alb0t)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + host: + description: + - Which host do we use as seed for info connection. + type: str + default: localhost + port: + description: + - Which port to connect to Aerospike on (service port). + required: false + type: int + default: 3000 + connect_timeout: + description: + - How long to try to connect before giving up (milliseconds). + required: false + type: int + default: 1000 + consecutive_good_checks: + description: + - How many times should the cluster report "no migrations" consecutively before returning OK back to ansible? + required: false + type: int + default: 3 + sleep_between_checks: + description: + - How long to sleep between each check (seconds). + required: false + type: int + default: 60 + tries_limit: + description: + - How many times do we poll before giving up and failing? + default: 300 + required: false + type: int + local_only: + description: + - Do you wish to only check for migrations on the local node before returning, or do you want all nodes in the cluster + to finish before returning? + required: true + type: bool + min_cluster_size: + description: + - Check fails until cluster size is met or until tries is exhausted. + required: false + type: int + default: 1 + fail_on_cluster_change: + description: + - Fail if the cluster key changes if something else is changing the cluster, we may want to fail. + required: false + type: bool + default: true + migrate_tx_key: + description: + - The metric key used to determine if we have tx migrations remaining. Changeable due to backwards compatibility. + required: false + type: str + default: migrate_tx_partitions_remaining + migrate_rx_key: + description: + - The metric key used to determine if we have rx migrations remaining. Changeable due to backwards compatibility. + required: false + type: str + default: migrate_rx_partitions_remaining + target_cluster_size: + description: + - When all aerospike builds in the cluster are greater than version 4.3, then the C(cluster-stable) info command is + used. Inside this command, you can optionally specify what the target cluster size is - but it is not necessary. + You can still rely on O(min_cluster_size) if you do not want to use this option. + - If this option is specified on a cluster that has at least one host <4.3 then it is ignored until the min version + reaches 4.3. + required: false + type: int +""" + +EXAMPLES = r""" +# check for migrations on local node +- name: Wait for migrations on local node before proceeding + community.general.aerospike_migrations: + host: "localhost" + connect_timeout: 2000 + consecutive_good_checks: 5 + sleep_between_checks: 15 + tries_limit: 600 + local_only: false + +# example playbook: +- name: Upgrade aerospike + hosts: all + become: true + serial: 1 + tasks: + - name: Install dependencies + ansible.builtin.apt: + name: + - python + - python-pip + - python-setuptools + state: latest + - name: Setup aerospike + ansible.builtin.pip: + name: aerospike +# check for migrations every (sleep_between_checks) +# If at least (consecutive_good_checks) checks come back OK in a row, then return OK. +# Will exit if any exception, which can be caused by bad nodes, +# nodes not returning data, or other reasons. +# Maximum runtime before giving up in this case will be: +# Tries Limit * Sleep Between Checks * delay * retries + - name: Wait for aerospike migrations + community.general.aerospike_migrations: + local_only: true + sleep_between_checks: 1 + tries_limit: 5 + consecutive_good_checks: 3 + fail_on_cluster_change: true + min_cluster_size: 3 + target_cluster_size: 4 + register: migrations_check + until: migrations_check is succeeded + changed_when: false + delay: 60 + retries: 120 + - name: Another thing + ansible.builtin.shell: | + echo foo + - name: Reboot + ansible.builtin.reboot: +""" + +RETURN = r""" +# Returns only a success/failure result. Changed is always false. +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +LIB_FOUND_ERR = None +try: + import aerospike + from time import sleep + import re +except ImportError as ie: + LIB_FOUND = False + LIB_FOUND_ERR = traceback.format_exc() +else: + LIB_FOUND = True + + +def run_module(): + """run ansible module""" + module_args = dict( + host=dict(type='str', default='localhost'), + port=dict(type='int', default=3000), + connect_timeout=dict(type='int', default=1000), + consecutive_good_checks=dict(type='int', default=3), + sleep_between_checks=dict(type='int', default=60), + tries_limit=dict(type='int', default=300), + local_only=dict(type='bool', required=True), + min_cluster_size=dict(type='int', default=1), + target_cluster_size=dict(type='int'), + fail_on_cluster_change=dict(type='bool', default=True), + migrate_tx_key=dict(type='str', no_log=False, + default="migrate_tx_partitions_remaining"), + migrate_rx_key=dict(type='str', no_log=False, + default="migrate_rx_partitions_remaining") + ) + + result = dict( + changed=False, + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + if not LIB_FOUND: + module.fail_json(msg=missing_required_lib('aerospike'), + exception=LIB_FOUND_ERR) + + try: + if module.check_mode: + has_migrations, skip_reason = False, None + else: + migrations = Migrations(module) + has_migrations, skip_reason = migrations.has_migs( + module.params['local_only'] + ) + + if has_migrations: + module.fail_json(msg="Failed.", skip_reason=skip_reason) + except Exception as e: + module.fail_json(msg="Error: {0}".format(e)) + + module.exit_json(**result) + + +class Migrations: + """ Check or wait for migrations between nodes """ + + def __init__(self, module): + self.module = module + self._client = self._create_client().connect() + self._nodes = {} + self._update_nodes_list() + self._cluster_statistics = {} + self._update_cluster_statistics() + self._namespaces = set() + self._update_cluster_namespace_list() + self._build_list = set() + self._update_build_list() + self._start_cluster_key = \ + self._cluster_statistics[self._nodes[0]]['cluster_key'] + + def _create_client(self): + """ TODO: add support for auth, tls, and other special features + I won't use those features, so I'll wait until somebody complains + or does it for me (Cross fingers) + create the client object""" + config = { + 'hosts': [ + (self.module.params['host'], self.module.params['port']) + ], + 'policies': { + 'timeout': self.module.params['connect_timeout'] + } + } + return aerospike.client(config) + + def _info_cmd_helper(self, cmd, node=None, delimiter=';'): + """delimiter is for separate stats that come back, NOT for kv + separation which is =""" + if node is None: # If no node passed, use the first one (local) + node = self._nodes[0] + data = self._client.info_node(cmd, node) + data = data.split("\t") + if len(data) != 1 and len(data) != 2: + self.module.fail_json( + msg="Unexpected number of values returned in info command: " + + str(len(data)) + ) + # data will be in format 'command\touput' + data = data[-1] + data = data.rstrip("\n\r") + data_arr = data.split(delimiter) + + # some commands don't return in kv format + # so we dont want a dict from those. + if '=' in data: + retval = dict( + metric.split("=", 1) for metric in data_arr + ) + else: + # if only 1 element found, and not kv, return just the value. + if len(data_arr) == 1: + retval = data_arr[0] + else: + retval = data_arr + return retval + + def _update_build_list(self): + """creates self._build_list which is a unique list + of build versions.""" + self._build_list = set() + for node in self._nodes: + build = self._info_cmd_helper('build', node) + self._build_list.add(build) + + # just checks to see if the version is 4.3 or greater + def _can_use_cluster_stable(self): + # if version <4.3 we can't use cluster-stable info cmd + # regex hack to check for versions beginning with 0-3 or + # beginning with 4.0,4.1,4.2 + if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)): + return False + return True + + def _update_cluster_namespace_list(self): + """ make a unique list of namespaces + TODO: does this work on a rolling namespace add/deletion? + thankfully if it doesn't, we dont need this on builds >=4.3""" + self._namespaces = set() + for node in self._nodes: + namespaces = self._info_cmd_helper('namespaces', node) + for namespace in namespaces: + self._namespaces.add(namespace) + + def _update_cluster_statistics(self): + """create a dict of nodes with their related stats """ + self._cluster_statistics = {} + for node in self._nodes: + self._cluster_statistics[node] = \ + self._info_cmd_helper('statistics', node) + + def _update_nodes_list(self): + """get a fresh list of all the nodes""" + self._nodes = self._client.get_nodes() + if not self._nodes: + self.module.fail_json("Failed to retrieve at least 1 node.") + + def _namespace_has_migs(self, namespace, node=None): + """returns a True or False. + Does the namespace have migrations for the node passed? + If no node passed, uses the local node or the first one in the list""" + namespace_stats = self._info_cmd_helper("namespace/" + namespace, node) + try: + namespace_tx = \ + int(namespace_stats[self.module.params['migrate_tx_key']]) + namespace_rx = \ + int(namespace_stats[self.module.params['migrate_rx_key']]) + except KeyError: + self.module.fail_json( + msg="Did not find partition remaining key:" + + self.module.params['migrate_tx_key'] + + " or key:" + + self.module.params['migrate_rx_key'] + + " in 'namespace/" + + namespace + + "' output." + ) + except TypeError: + self.module.fail_json( + msg="namespace stat returned was not numerical" + ) + return namespace_tx != 0 or namespace_rx != 0 + + def _node_has_migs(self, node=None): + """just calls namespace_has_migs and + if any namespace has migs returns true""" + migs = 0 + self._update_cluster_namespace_list() + for namespace in self._namespaces: + if self._namespace_has_migs(namespace, node): + migs += 1 + return migs != 0 + + def _cluster_key_consistent(self): + """create a dictionary to store what each node + returns the cluster key as. we should end up with only 1 dict key, + with the key being the cluster key.""" + cluster_keys = {} + for node in self._nodes: + cluster_key = self._cluster_statistics[node][ + 'cluster_key'] + if cluster_key not in cluster_keys: + cluster_keys[cluster_key] = 1 + else: + cluster_keys[cluster_key] += 1 + if len(cluster_keys.keys()) == 1 and \ + self._start_cluster_key in cluster_keys: + return True + return False + + def _cluster_migrates_allowed(self): + """ensure all nodes have 'migrate_allowed' in their stats output""" + for node in self._nodes: + node_stats = self._info_cmd_helper('statistics', node) + allowed = node_stats['migrate_allowed'] + if allowed == "false": + return False + return True + + def _cluster_has_migs(self): + """calls node_has_migs for each node""" + migs = 0 + for node in self._nodes: + if self._node_has_migs(node): + migs += 1 + if migs == 0: + return False + return True + + def _has_migs(self, local): + if local: + return self._local_node_has_migs() + return self._cluster_has_migs() + + def _local_node_has_migs(self): + return self._node_has_migs(None) + + def _is_min_cluster_size(self): + """checks that all nodes in the cluster are returning the + minimum cluster size specified in their statistics output""" + sizes = set() + for node in self._cluster_statistics: + sizes.add(int(self._cluster_statistics[node]['cluster_size'])) + + if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no + return False + if (min(sizes)) >= self.module.params['min_cluster_size']: + return True + return False + + def _cluster_stable(self): + """Added 4.3: + cluster-stable:size=;ignore-migrations=;namespace= + Returns the current 'cluster_key' when the following are satisfied: + + If 'size' is specified then the target node's 'cluster-size' + must match size. + If 'ignore-migrations' is either unspecified or 'false' then + the target node's migrations counts must be zero for the provided + 'namespace' or all namespaces if 'namespace' is not provided.""" + cluster_key = set() + cluster_key.add(self._info_cmd_helper('statistics')['cluster_key']) + cmd = "cluster-stable:" + target_cluster_size = self.module.params['target_cluster_size'] + if target_cluster_size is not None: + cmd = cmd + "size=" + str(target_cluster_size) + ";" + for node in self._nodes: + try: + cluster_key.add(self._info_cmd_helper(cmd, node)) + except aerospike.exception.ServerError as e: # unstable-cluster is returned in form of Exception + if 'unstable-cluster' in e.msg: + return False + raise e + if len(cluster_key) == 1: + return True + return False + + def _cluster_good_state(self): + """checks a few things to make sure we're OK to say the cluster + has no migs. It could be in a unhealthy condition that does not allow + migs, or a split brain""" + if self._cluster_key_consistent() is not True: + return False, "Cluster key inconsistent." + if self._is_min_cluster_size() is not True: + return False, "Cluster min size not reached." + if self._cluster_migrates_allowed() is not True: + return False, "migrate_allowed is false somewhere." + return True, "OK." + + def has_migs(self, local=True): + """returns a boolean, False if no migrations otherwise True""" + consecutive_good = 0 + try_num = 0 + skip_reason = list() + while \ + try_num < int(self.module.params['tries_limit']) and \ + consecutive_good < \ + int(self.module.params['consecutive_good_checks']): + + self._update_nodes_list() + self._update_cluster_statistics() + + # These checks are outside of the while loop because + # we probably want to skip & sleep instead of failing entirely + stable, reason = self._cluster_good_state() + if stable is not True: + skip_reason.append( + "Skipping on try#" + str(try_num) + + " for reason:" + reason + ) + else: + if self._can_use_cluster_stable(): + if self._cluster_stable(): + consecutive_good += 1 + else: + consecutive_good = 0 + skip_reason.append( + "Skipping on try#" + str(try_num) + + " for reason:" + " cluster_stable" + ) + elif self._has_migs(local): + # print("_has_migs") + skip_reason.append( + "Skipping on try#" + str(try_num) + + " for reason:" + " migrations" + ) + consecutive_good = 0 + else: + consecutive_good += 1 + if consecutive_good == self.module.params[ + 'consecutive_good_checks']: + break + try_num += 1 + sleep(self.module.params['sleep_between_checks']) + # print(skip_reason) + if consecutive_good == self.module.params['consecutive_good_checks']: + return False, None + return True, skip_reason + + +def main(): + """main method for ansible module""" + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/airbrake_deployment.py b/plugins/modules/airbrake_deployment.py deleted file mode 120000 index d6f85ffb7b..0000000000 --- a/plugins/modules/airbrake_deployment.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/airbrake_deployment.py \ No newline at end of file diff --git a/plugins/modules/airbrake_deployment.py b/plugins/modules/airbrake_deployment.py new file mode 100644 index 0000000000..d55c04fa52 --- /dev/null +++ b/plugins/modules/airbrake_deployment.py @@ -0,0 +1,166 @@ +#!/usr/bin/python + +# Copyright 2013 Bruce Pennypacker +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: airbrake_deployment +author: + - "Bruce Pennypacker (@bpennypacker)" + - "Patrick Humpal (@phumpal)" +short_description: Notify airbrake about app deployments +description: + - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + project_id: + description: + - Airbrake PROJECT_ID. + required: true + type: str + version_added: '0.2.0' + project_key: + description: + - Airbrake PROJECT_KEY. + required: true + type: str + version_added: '0.2.0' + environment: + description: + - The airbrake environment name, typically v(production), V(staging), and so on. + required: true + type: str + user: + description: + - The username of the person doing the deployment. + required: false + type: str + repo: + description: + - URL of the project repository. + required: false + type: str + revision: + description: + - A hash, number, tag, or other identifier showing what revision from version control was deployed. + required: false + type: str + version: + description: + - A string identifying what version was deployed. + required: false + type: str + version_added: '1.0.0' + url: + description: + - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit. + required: false + default: "https://api.airbrake.io/api/v4/projects/" + type: str + validate_certs: + description: + - If V(false), SSL certificates for the target URL is not validated. This should only be used on personally controlled + sites using self-signed certificates. + required: false + default: true + type: bool + +requirements: [] +""" + +EXAMPLES = r""" +- name: Notify airbrake about an app deployment + community.general.airbrake_deployment: + project_id: '12345' + project_key: 'AAAAAA' + environment: staging + user: ansible + revision: '4.2' + +- name: Notify airbrake about an app deployment, using git hash as revision + community.general.airbrake_deployment: + project_id: '12345' + project_key: 'AAAAAA' + environment: staging + user: ansible + revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15' + version: '0.2.0' +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + project_id=dict(required=True, no_log=True, type='str'), + project_key=dict(required=True, no_log=True, type='str'), + environment=dict(required=True, type='str'), + user=dict(type='str'), + repo=dict(type='str'), + revision=dict(type='str'), + version=dict(type='str'), + url=dict(default='https://api.airbrake.io/api/v4/projects/', type='str'), + validate_certs=dict(default=True, type='bool'), + ), + supports_check_mode=True, + ) + + # Build list of params + params = {} + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True) + + # v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4 + if module.params["environment"]: + params["environment"] = module.params["environment"] + + if module.params["user"]: + params["username"] = module.params["user"] + + if module.params["repo"]: + params["repository"] = module.params["repo"] + + if module.params["revision"]: + params["revision"] = module.params["revision"] + + if module.params["version"]: + params["version"] = module.params["version"] + + # Build deploy url + url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"] + json_body = module.jsonify(params) + + # Build header + headers = {'Content-Type': 'application/json'} + + # Notify Airbrake of deploy + response, info = fetch_url(module, url, data=json_body, + headers=headers, method='POST') + + if info['status'] == 200 or info['status'] == 201: + module.exit_json(changed=True) + else: + module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/aix_devices.py b/plugins/modules/aix_devices.py deleted file mode 120000 index 4256b40426..0000000000 --- a/plugins/modules/aix_devices.py +++ /dev/null @@ -1 +0,0 @@ -./system/aix_devices.py \ No newline at end of file diff --git a/plugins/modules/aix_devices.py b/plugins/modules/aix_devices.py new file mode 100644 index 0000000000..a525f6fe05 --- /dev/null +++ b/plugins/modules/aix_devices.py @@ -0,0 +1,374 @@ +#!/usr/bin/python + +# Copyright (c) 2017, 2018 Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Kairo Araujo (@kairoaraujo) +module: aix_devices +short_description: Manages AIX devices +description: + - This module discovers, defines, removes and modifies attributes of AIX devices. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + attributes: + description: + - A list of device attributes. + type: dict + device: + description: + - The name of the device. + - V(all) is valid to rescan C(available) all devices (AIX C(cfgmgr) command). + type: str + force: + description: + - Forces action. + type: bool + default: false + recursive: + description: + - Removes or defines a device and children devices. + type: bool + default: false + state: + description: + - Controls the device state. + - V(available) (alias V(present)) rescan a specific device or all devices (when O(device) is not specified). + - V(removed) (alias V(absent) removes a device. + - V(defined) changes device to Defined state. + type: str + choices: [available, defined, removed] + default: available +""" + +EXAMPLES = r""" +- name: Scan new devices + community.general.aix_devices: + device: all + state: available + +- name: Scan new virtual devices (vio0) + community.general.aix_devices: + device: vio0 + state: available + +- name: Removing IP alias to en0 + community.general.aix_devices: + device: en0 + attributes: + delalias4: 10.0.0.100,255.255.255.0 + +- name: Removes ent2 + community.general.aix_devices: + device: ent2 + state: removed + +- name: Put device en2 in Defined + community.general.aix_devices: + device: en2 + state: defined + +- name: Removes ent4 (inexistent). + community.general.aix_devices: + device: ent4 + state: removed + +- name: Put device en4 in Defined (inexistent) + community.general.aix_devices: + device: en4 + state: defined + +- name: Put vscsi1 and children devices in Defined state. + community.general.aix_devices: + device: vscsi1 + recursive: true + state: defined + +- name: Removes vscsi1 and children devices. + community.general.aix_devices: + device: vscsi1 + recursive: true + state: removed + +- name: Changes en1 mtu to 9000 and disables arp. + community.general.aix_devices: + device: en1 + attributes: + mtu: 900 + arp: 'off' + state: available + +- name: Configure IP, netmask and set en1 up. + community.general.aix_devices: + device: en1 + attributes: + netaddr: 192.168.0.100 + netmask: 255.255.255.0 + state: up + state: available + +- name: Adding IP alias to en0 + community.general.aix_devices: + device: en0 + attributes: + alias4: 10.0.0.100,255.255.255.0 + state: available +""" + +RETURN = r""" # """ + +from ansible.module_utils.basic import AnsibleModule + + +def _check_device(module, device): + """ + Check if device already exists and the state. + Args: + module: Ansible module. + device: device to be checked. + + Returns: bool, device state + + """ + lsdev_cmd = module.get_bin_path('lsdev', True) + rc, lsdev_out, err = module.run_command(["%s" % lsdev_cmd, '-C', '-l', "%s" % device]) + + if rc != 0: + module.fail_json(msg="Failed to run lsdev", rc=rc, err=err) + + if lsdev_out: + device_state = lsdev_out.split()[1] + return True, device_state + + device_state = None + return False, device_state + + +def _check_device_attr(module, device, attr): + """ + + Args: + module: Ansible module. + device: device to check attributes. + attr: attribute to be checked. + + Returns: + + """ + lsattr_cmd = module.get_bin_path('lsattr', True) + rc, lsattr_out, err = module.run_command(["%s" % lsattr_cmd, '-El', "%s" % device, '-a', "%s" % attr]) + + hidden_attrs = ['delalias4', 'delalias6'] + + if rc == 255: + + if attr in hidden_attrs: + current_param = '' + else: + current_param = None + + return current_param + + elif rc != 0: + module.fail_json(msg="Failed to run lsattr: %s" % err, rc=rc, err=err) + + current_param = lsattr_out.split()[1] + return current_param + + +def discover_device(module, device): + """ Discover AIX devices.""" + cfgmgr_cmd = module.get_bin_path('cfgmgr', True) + + if device is not None: + device = "-l %s" % device + + else: + device = '' + + changed = True + msg = '' + if not module.check_mode: + rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device]) + changed = True + msg = cfgmgr_out + + return changed, msg + + +def change_device_attr(module, attributes, device, force): + """ Change AIX device attribute. """ + + attr_changed = [] + attr_not_changed = [] + attr_invalid = [] + chdev_cmd = module.get_bin_path('chdev', True) + + for attr in list(attributes.keys()): + new_param = attributes[attr] + current_param = _check_device_attr(module, device, attr) + + if current_param is None: + attr_invalid.append(attr) + + elif current_param != new_param: + if force: + cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr]), "%s" % force] + else: + cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr])] + + if not module.check_mode: + rc, chdev_out, err = module.run_command(cmd) + if rc != 0: + module.exit_json(msg="Failed to run chdev.", rc=rc, err=err) + + attr_changed.append(attributes[attr]) + else: + attr_not_changed.append(attributes[attr]) + + if len(attr_changed) > 0: + changed = True + attr_changed_msg = "Attributes changed: %s. " % ','.join(attr_changed) + else: + changed = False + attr_changed_msg = '' + + if len(attr_not_changed) > 0: + attr_not_changed_msg = "Attributes already set: %s. " % ','.join(attr_not_changed) + else: + attr_not_changed_msg = '' + + if len(attr_invalid) > 0: + attr_invalid_msg = "Invalid attributes: %s " % ', '.join(attr_invalid) + else: + attr_invalid_msg = '' + + msg = "%s%s%s" % (attr_changed_msg, attr_not_changed_msg, attr_invalid_msg) + + return changed, msg + + +def remove_device(module, device, force, recursive, state): + """ Puts device in defined state or removes device. """ + + state_opt = { + 'removed': '-d', + 'absent': '-d', + 'defined': '' + } + + recursive_opt = { + True: '-R', + False: '' + } + + recursive = recursive_opt[recursive] + state = state_opt[state] + + changed = True + msg = '' + rmdev_cmd = module.get_bin_path('rmdev', True) + + if not module.check_mode: + if state: + rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive, "%s" % force]) + else: + rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive]) + + if rc != 0: + module.fail_json(msg="Failed to run rmdev", rc=rc, err=err) + + msg = rmdev_out + + return changed, msg + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + attributes=dict(type='dict'), + device=dict(type='str'), + force=dict(type='bool', default=False), + recursive=dict(type='bool', default=False), + state=dict(type='str', default='available', choices=['available', 'defined', 'removed']), + ), + supports_check_mode=True, + ) + + force_opt = { + True: '-f', + False: '', + } + + attributes = module.params['attributes'] + device = module.params['device'] + force = force_opt[module.params['force']] + recursive = module.params['recursive'] + state = module.params['state'] + + result = dict( + changed=False, + msg='', + ) + + if state == 'available' or state == 'present': + if attributes: + # change attributes on device + device_status, device_state = _check_device(module, device) + if device_status: + result['changed'], result['msg'] = change_device_attr(module, attributes, device, force) + else: + result['msg'] = "Device %s does not exist." % device + + else: + # discovery devices (cfgmgr) + if device and device != 'all': + device_status, device_state = _check_device(module, device) + if device_status: + # run cfgmgr on specific device + result['changed'], result['msg'] = discover_device(module, device) + + else: + result['msg'] = "Device %s does not exist." % device + + else: + result['changed'], result['msg'] = discover_device(module, device) + + elif state == 'removed' or state == 'absent' or state == 'defined': + if not device: + result['msg'] = "device is required to removed or defined state." + + else: + # Remove device + check_device, device_state = _check_device(module, device) + if check_device: + if state == 'defined' and device_state == 'Defined': + result['changed'] = False + result['msg'] = 'Device %s already in Defined' % device + + else: + result['changed'], result['msg'] = remove_device(module, device, force, recursive, state) + + else: + result['msg'] = "Device %s does not exist." % device + + else: + result['msg'] = "Unexpected state %s." % state + module.fail_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/aix_filesystem.py b/plugins/modules/aix_filesystem.py deleted file mode 120000 index 4c5a1761e8..0000000000 --- a/plugins/modules/aix_filesystem.py +++ /dev/null @@ -1 +0,0 @@ -./system/aix_filesystem.py \ No newline at end of file diff --git a/plugins/modules/aix_filesystem.py b/plugins/modules/aix_filesystem.py new file mode 100644 index 0000000000..58d49c0252 --- /dev/null +++ b/plugins/modules/aix_filesystem.py @@ -0,0 +1,604 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +author: + - Kairo Araujo (@kairoaraujo) +module: aix_filesystem +short_description: Configure LVM and NFS file systems for AIX +description: + - This module creates, removes, mount and unmount LVM and NFS file system for AIX using C(/etc/filesystems). + - For LVM file systems is possible to resize a file system. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + account_subsystem: + description: + - Specifies whether the file system is to be processed by the accounting subsystem. + type: bool + default: false + attributes: + description: + - Specifies attributes for files system separated by comma. + type: list + elements: str + default: + - agblksize=4096 + - isnapshot=no + auto_mount: + description: + - File system is automatically mounted at system restart. + type: bool + default: true + device: + description: + - Logical volume (LV) device name or remote export device to create a NFS file system. + - It is used to create a file system on an already existing logical volume or the exported NFS file system. + - If not mentioned a new logical volume name is created following AIX standards (LVM). + type: str + fs_type: + description: + - Specifies the virtual file system type. + type: str + default: jfs2 + permissions: + description: + - Set file system permissions. V(rw) (read-write) or V(ro) (read-only). + type: str + choices: [ro, rw] + default: rw + mount_group: + description: + - Specifies the mount group. + type: str + filesystem: + description: + - Specifies the mount point, which is the directory where the file system will be mounted. + type: str + required: true + nfs_server: + description: + - Specifies a Network File System (NFS) server. + type: str + rm_mount_point: + description: + - Removes the mount point directory when used with state V(absent). + type: bool + default: false + size: + description: + - Specifies the file system size. + - For already present it resizes the filesystem. + - 512-byte blocks, megabytes or gigabytes. If the value has M specified it is in megabytes. If the value has G specified + it is in gigabytes. + - If no M or G the value is 512-byte blocks. + - If V(+) is specified in begin of value, the value is added. + - If V(-) is specified in begin of value, the value is removed. + - If neither V(+) nor V(-) is specified, then the total value is the specified. + - Size respects the LVM AIX standards. + type: str + state: + description: + - Controls the file system state. + - V(present) check if file system exists, creates or resize. + - V(absent) removes existing file system if already V(unmounted). + - V(mounted) checks if the file system is mounted or mount the file system. + - V(unmounted) check if the file system is unmounted or unmount the file system. + type: str + choices: [absent, mounted, present, unmounted] + default: present + vg: + description: + - Specifies an existing volume group (VG). + type: str +notes: + - For more O(attributes), please check "crfs" AIX manual. +""" + +EXAMPLES = r""" +- name: Create filesystem in a previously defined logical volume. + community.general.aix_filesystem: + device: testlv + filesystem: /testfs + state: present + +- name: Creating NFS filesystem from nfshost. + community.general.aix_filesystem: + device: /home/ftp + nfs_server: nfshost + filesystem: /home/ftp + state: present + +- name: Creating a new file system without a previously logical volume. + community.general.aix_filesystem: + filesystem: /newfs + size: 1G + state: present + vg: datavg + +- name: Unmounting /testfs. + community.general.aix_filesystem: + filesystem: /testfs + state: unmounted + +- name: Resizing /mksysb to +512M. + community.general.aix_filesystem: + filesystem: /mksysb + size: +512M + state: present + +- name: Resizing /mksysb to 11G. + community.general.aix_filesystem: + filesystem: /mksysb + size: 11G + state: present + +- name: Resizing /mksysb to -2G. + community.general.aix_filesystem: + filesystem: /mksysb + size: -2G + state: present + +- name: Remove NFS filesystem /home/ftp. + community.general.aix_filesystem: + filesystem: /home/ftp + rm_mount_point: true + state: absent + +- name: Remove /newfs. + community.general.aix_filesystem: + filesystem: /newfs + rm_mount_point: true + state: absent +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils._mount import ismount +import re + + +def _fs_exists(module, filesystem): + """ + Check if file system already exists on /etc/filesystems. + + :param module: Ansible module. + :param community.general.filesystem: filesystem name. + :return: True or False. + """ + lsfs_cmd = module.get_bin_path('lsfs', True) + rc, lsfs_out, err = module.run_command([lsfs_cmd, "-l", filesystem]) + if rc == 1: + if re.findall("No record matching", err): + return False + + else: + module.fail_json(msg="Failed to run lsfs. Error message: %s" % err) + + else: + + return True + + +def _check_nfs_device(module, nfs_host, device): + """ + Validate if NFS server is exporting the device (remote export). + + :param module: Ansible module. + :param nfs_host: nfs_host parameter, NFS server. + :param device: device parameter, remote export. + :return: True or False. + """ + showmount_cmd = module.get_bin_path('showmount', True) + rc, showmount_out, err = module.run_command([showmount_cmd, "-a", nfs_host]) + if rc != 0: + module.fail_json(msg="Failed to run showmount. Error message: %s" % err) + else: + showmount_data = showmount_out.splitlines() + for line in showmount_data: + if line.split(':')[1] == device: + return True + + return False + + +def _validate_vg(module, vg): + """ + Check the current state of volume group. + + :param module: Ansible module argument spec. + :param vg: Volume Group name. + :return: True (VG in varyon state) or False (VG in varyoff state) or + None (VG does not exist), message. + """ + lsvg_cmd = module.get_bin_path('lsvg', True) + rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"]) + if rc != 0: + module.fail_json(msg="Failed executing %s command." % lsvg_cmd) + + rc, current_all_vgs, err = module.run_command([lsvg_cmd]) + if rc != 0: + module.fail_json(msg="Failed executing %s command." % lsvg_cmd) + + if vg in current_all_vgs and vg not in current_active_vgs: + msg = "Volume group %s is in varyoff state." % vg + return False, msg + elif vg in current_active_vgs: + msg = "Volume group %s is in varyon state." % vg + return True, msg + else: + msg = "Volume group %s does not exist." % vg + return None, msg + + +def resize_fs(module, filesystem, size): + """ Resize LVM file system. """ + + chfs_cmd = module.get_bin_path('chfs', True) + if not module.check_mode: + rc, chfs_out, err = module.run_command([chfs_cmd, "-a", "size=%s" % size, filesystem]) + + if rc == 28: + changed = False + return changed, chfs_out + elif rc != 0: + if re.findall('Maximum allocation for logical', err): + changed = False + return changed, err + else: + module.fail_json(msg="Failed to run chfs. Error message: %s" % err) + + else: + if re.findall('The filesystem size is already', chfs_out): + changed = False + else: + changed = True + + return changed, chfs_out + else: + changed = True + msg = '' + + return changed, msg + + +def create_fs( + module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, + account_subsystem, permissions, nfs_server, attributes): + """ Create LVM file system or NFS remote mount point. """ + + attributes = ' -a '.join(attributes) + + # Parameters definition. + account_subsys_opt = { + True: '-t yes', + False: '-t no' + } + + if nfs_server is not None: + auto_mount_opt = { + True: '-A', + False: '-a' + } + + else: + auto_mount_opt = { + True: '-A yes', + False: '-A no' + } + + if size is None: + size = '' + else: + size = "-a size=%s" % size + + if device is None: + device = '' + else: + device = "-d %s" % device + + if vg is None: + vg = '' + else: + vg_state, msg = _validate_vg(module, vg) + if vg_state: + vg = "-g %s" % vg + else: + changed = False + + return changed, msg + + if mount_group is None: + mount_group = '' + + else: + mount_group = "-u %s" % mount_group + + auto_mount = auto_mount_opt[auto_mount] + account_subsystem = account_subsys_opt[account_subsystem] + + if nfs_server is not None: + # Creates a NFS file system. + mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True) + if not module.check_mode: + rc, mknfsmnt_out, err = module.run_command([mknfsmnt_cmd, "-f", filesystem, device, "-h", nfs_server, "-t", permissions, auto_mount, "-w", "bg"]) + if rc != 0: + module.fail_json(msg="Failed to run mknfsmnt. Error message: %s" % err) + else: + changed = True + msg = "NFS file system %s created." % filesystem + + return changed, msg + else: + changed = True + msg = '' + + return changed, msg + + else: + # Creates a LVM file system. + crfs_cmd = module.get_bin_path('crfs', True) + if not module.check_mode: + cmd = [crfs_cmd] + + cmd.append("-v") + cmd.append(fs_type) + + if vg: + (flag, value) = vg.split() + cmd.append(flag) + cmd.append(value) + + if device: + (flag, value) = device.split() + cmd.append(flag) + cmd.append(value) + + cmd.append("-m") + cmd.append(filesystem) + + if mount_group: + (flag, value) = mount_group.split() + cmd.append(flag) + cmd.append(value) + + if auto_mount: + (flag, value) = auto_mount.split() + cmd.append(flag) + cmd.append(value) + + if account_subsystem: + (flag, value) = account_subsystem.split() + cmd.append(flag) + cmd.append(value) + + cmd.append("-p") + cmd.append(permissions) + + if size: + (flag, value) = size.split() + cmd.append(flag) + cmd.append(value) + + if attributes: + splitted_attributes = attributes.split() + cmd.append("-a") + for value in splitted_attributes: + cmd.append(value) + + rc, crfs_out, err = module.run_command(cmd) + + if rc == 10: + module.exit_json( + msg="Using a existent previously defined logical volume, " + "volume group needs to be empty. %s" % err) + + elif rc != 0: + module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err)) + + else: + changed = True + return changed, crfs_out + else: + changed = True + msg = '' + + return changed, msg + + +def remove_fs(module, filesystem, rm_mount_point): + """ Remove an LVM file system or NFS entry. """ + + # Command parameters. + rm_mount_point_opt = { + True: '-r', + False: '' + } + + rm_mount_point = rm_mount_point_opt[rm_mount_point] + + rmfs_cmd = module.get_bin_path('rmfs', True) + if not module.check_mode: + cmd = [rmfs_cmd, "-r", rm_mount_point, filesystem] + rc, rmfs_out, err = module.run_command(cmd) + if rc != 0: + module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err)) + else: + changed = True + msg = rmfs_out + if not rmfs_out: + msg = "File system %s removed." % filesystem + + return changed, msg + else: + changed = True + msg = '' + + return changed, msg + + +def mount_fs(module, filesystem): + """ Mount a file system. """ + mount_cmd = module.get_bin_path('mount', True) + + if not module.check_mode: + rc, mount_out, err = module.run_command([mount_cmd, filesystem]) + if rc != 0: + module.fail_json(msg="Failed to run mount. Error message: %s" % err) + else: + changed = True + msg = "File system %s mounted." % filesystem + + return changed, msg + else: + changed = True + msg = '' + + return changed, msg + + +def unmount_fs(module, filesystem): + """ Unmount a file system.""" + unmount_cmd = module.get_bin_path('unmount', True) + + if not module.check_mode: + rc, unmount_out, err = module.run_command([unmount_cmd, filesystem]) + if rc != 0: + module.fail_json(msg="Failed to run unmount. Error message: %s" % err) + else: + changed = True + msg = "File system %s unmounted." % filesystem + + return changed, msg + else: + changed = True + msg = '' + + return changed, msg + + +def main(): + module = AnsibleModule( + argument_spec=dict( + account_subsystem=dict(type='bool', default=False), + attributes=dict(type='list', elements='str', default=["agblksize=4096", "isnapshot=no"]), + auto_mount=dict(type='bool', default=True), + device=dict(type='str'), + filesystem=dict(type='str', required=True), + fs_type=dict(type='str', default='jfs2'), + permissions=dict(type='str', default='rw', choices=['rw', 'ro']), + mount_group=dict(type='str'), + nfs_server=dict(type='str'), + rm_mount_point=dict(type='bool', default=False), + size=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'mounted', 'present', 'unmounted']), + vg=dict(type='str'), + ), + supports_check_mode=True, + ) + + account_subsystem = module.params['account_subsystem'] + attributes = module.params['attributes'] + auto_mount = module.params['auto_mount'] + device = module.params['device'] + fs_type = module.params['fs_type'] + permissions = module.params['permissions'] + mount_group = module.params['mount_group'] + filesystem = module.params['filesystem'] + nfs_server = module.params['nfs_server'] + rm_mount_point = module.params['rm_mount_point'] + size = module.params['size'] + state = module.params['state'] + vg = module.params['vg'] + + result = dict( + changed=False, + msg='', + ) + + if state == 'present': + fs_mounted = ismount(filesystem) + fs_exists = _fs_exists(module, filesystem) + + # Check if fs is mounted or exists. + if fs_mounted or fs_exists: + result['msg'] = "File system %s already exists." % filesystem + result['changed'] = False + + # If parameter size was passed, resize fs. + if size is not None: + result['changed'], result['msg'] = resize_fs(module, filesystem, size) + + # If fs doesn't exist, create it. + else: + # Check if fs will be a NFS device. + if nfs_server is not None: + if device is None: + result['msg'] = 'Parameter "device" is required when "nfs_server" is defined.' + module.fail_json(**result) + else: + # Create a fs from NFS export. + if _check_nfs_device(module, nfs_server, device): + result['changed'], result['msg'] = create_fs( + module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) + + if device is None: + if vg is None: + result['msg'] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.' + module.fail_json(**result) + else: + # Create a fs from + result['changed'], result['msg'] = create_fs( + module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) + + if device is not None and nfs_server is None: + # Create a fs from a previously lv device. + result['changed'], result['msg'] = create_fs( + module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) + + elif state == 'absent': + if ismount(filesystem): + result['msg'] = "File system %s mounted." % filesystem + + else: + fs_status = _fs_exists(module, filesystem) + if not fs_status: + result['msg'] = "File system %s does not exist." % filesystem + else: + result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point) + + elif state == 'mounted': + if ismount(filesystem): + result['changed'] = False + result['msg'] = "File system %s already mounted." % filesystem + else: + result['changed'], result['msg'] = mount_fs(module, filesystem) + + elif state == 'unmounted': + if not ismount(filesystem): + result['changed'] = False + result['msg'] = "File system %s already unmounted." % filesystem + else: + result['changed'], result['msg'] = unmount_fs(module, filesystem) + + else: + # Unreachable codeblock + result['msg'] = "Unexpected state %s." % state + module.fail_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/aix_inittab.py b/plugins/modules/aix_inittab.py deleted file mode 120000 index 7d7865a8f6..0000000000 --- a/plugins/modules/aix_inittab.py +++ /dev/null @@ -1 +0,0 @@ -./system/aix_inittab.py \ No newline at end of file diff --git a/plugins/modules/aix_inittab.py b/plugins/modules/aix_inittab.py new file mode 100644 index 0000000000..407992ceba --- /dev/null +++ b/plugins/modules/aix_inittab.py @@ -0,0 +1,237 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Joris Weijters +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Joris Weijters (@molekuul) +module: aix_inittab +short_description: Manages the C(inittab) on AIX +description: + - Manages the C(inittab) on AIX. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the C(inittab) entry. + type: str + required: true + aliases: [service] + runlevel: + description: + - Runlevel of the entry. + type: str + required: true + action: + description: + - Action what the init has to do with this entry. + type: str + choices: + - boot + - bootwait + - hold + - initdefault + - 'off' + - once + - ondemand + - powerfail + - powerwait + - respawn + - sysinit + - wait + command: + description: + - What command has to run. + type: str + required: true + insertafter: + description: + - After which inittabline should the new entry inserted. + type: str + state: + description: + - Whether the entry should be present or absent in the inittab file. + type: str + choices: [absent, present] + default: present +notes: + - The changes are persistent across reboots. + - You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands. + - Tested on AIX 7.1. +requirements: + - itertools +""" + +EXAMPLES = r""" +# Add service startmyservice to the inittab, directly after service existingservice. +- name: Add startmyservice to inittab + community.general.aix_inittab: + name: startmyservice + runlevel: 4 + action: once + command: echo hello + insertafter: existingservice + state: present + become: true + +# Change inittab entry startmyservice to runlevel "2" and processaction "wait". +- name: Change startmyservice to inittab + community.general.aix_inittab: + name: startmyservice + runlevel: 2 + action: wait + command: echo hello + state: present + become: true + +- name: Remove startmyservice from inittab + community.general.aix_inittab: + name: startmyservice + runlevel: 2 + action: wait + command: echo hello + state: absent + become: true +""" + +RETURN = r""" +name: + description: Name of the adjusted C(inittab) entry. + returned: always + type: str + sample: startmyservice +""" + + +from ansible.module_utils.basic import AnsibleModule + +# end import modules +# start defining the functions + + +def check_current_entry(module): + # Check if entry exists, if not return False in exists in return dict, + # if true return True and the entry in return dict + existsdict = {'exist': False} + lsitab = module.get_bin_path('lsitab') + (rc, out, err) = module.run_command([lsitab, module.params['name']]) + if rc == 0: + keys = ('name', 'runlevel', 'action', 'command') + values = out.split(":") + # strip non readable characters as \n + values = map(lambda s: s.strip(), values) + existsdict = dict(zip(keys, values)) + existsdict.update({'exist': True}) + return existsdict + + +def main(): + # initialize + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True, aliases=['service']), + runlevel=dict(type='str', required=True), + action=dict(type='str', choices=[ + 'boot', + 'bootwait', + 'hold', + 'initdefault', + 'off', + 'once', + 'ondemand', + 'powerfail', + 'powerwait', + 'respawn', + 'sysinit', + 'wait', + ]), + command=dict(type='str', required=True), + insertafter=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + result = { + 'name': module.params['name'], + 'changed': False, + 'msg': "" + } + + # Find commandline strings + mkitab = module.get_bin_path('mkitab') + rmitab = module.get_bin_path('rmitab') + chitab = module.get_bin_path('chitab') + rc = 0 + err = None + + # check if the new entry exists + current_entry = check_current_entry(module) + + # if action is install or change, + if module.params['state'] == 'present': + + # create new entry string + new_entry = module.params['name'] + ":" + module.params['runlevel'] + \ + ":" + module.params['action'] + ":" + module.params['command'] + + # If current entry exists or fields are different(if the entry does not + # exists, then the entry will be created + if (not current_entry['exist']) or ( + module.params['runlevel'] != current_entry['runlevel'] or + module.params['action'] != current_entry['action'] or + module.params['command'] != current_entry['command']): + + # If the entry does exist then change the entry + if current_entry['exist']: + if not module.check_mode: + (rc, out, err) = module.run_command([chitab, new_entry]) + if rc != 0: + module.fail_json( + msg="could not change inittab", rc=rc, err=err) + result['msg'] = "changed inittab entry" + " " + current_entry['name'] + result['changed'] = True + + # If the entry does not exist create the entry + elif not current_entry['exist']: + if module.params['insertafter']: + if not module.check_mode: + (rc, out, err) = module.run_command( + [mkitab, '-i', module.params['insertafter'], new_entry]) + else: + if not module.check_mode: + (rc, out, err) = module.run_command( + [mkitab, new_entry]) + + if rc != 0: + module.fail_json(msg="could not adjust inittab", rc=rc, err=err) + result['msg'] = "add inittab entry" + " " + module.params['name'] + result['changed'] = True + + elif module.params['state'] == 'absent': + # If the action is remove and the entry exists then remove the entry + if current_entry['exist']: + if not module.check_mode: + (rc, out, err) = module.run_command( + [rmitab, module.params['name']]) + if rc != 0: + module.fail_json( + msg="could not remove entry from inittab)", rc=rc, err=err) + result['msg'] = "removed inittab entry" + " " + current_entry['name'] + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/aix_lvg.py b/plugins/modules/aix_lvg.py deleted file mode 120000 index d60c39d3a8..0000000000 --- a/plugins/modules/aix_lvg.py +++ /dev/null @@ -1 +0,0 @@ -./system/aix_lvg.py \ No newline at end of file diff --git a/plugins/modules/aix_lvg.py b/plugins/modules/aix_lvg.py new file mode 100644 index 0000000000..7afc58e2f7 --- /dev/null +++ b/plugins/modules/aix_lvg.py @@ -0,0 +1,368 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Kairo Araujo (@kairoaraujo) +module: aix_lvg +short_description: Manage LVM volume groups on AIX +description: + - This module creates, removes or resize volume groups on AIX LVM. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + force: + description: + - Force volume group creation. + type: bool + default: false + pp_size: + description: + - The size of the physical partition in megabytes. + type: int + pvs: + description: + - List of comma-separated devices to use as physical devices in this volume group. + - Required when creating or extending (V(present) state) the volume group. + - If not informed reducing (V(absent) state) the volume group is removed. + type: list + elements: str + state: + description: + - Control if the volume group exists and volume group AIX state varyonvg V(varyon) or varyoffvg V(varyoff). + type: str + choices: [absent, present, varyoff, varyon] + default: present + vg: + description: + - The name of the volume group. + type: str + required: true + vg_type: + description: + - The type of the volume group. + type: str + choices: [big, normal, scalable] + default: normal +notes: + - AIX allows removing VG only if all LV/Filesystems are not busy. + - Module does not modify PP size for already present volume group. +""" + +EXAMPLES = r""" +- name: Create a volume group datavg + community.general.aix_lvg: + vg: datavg + pp_size: 128 + vg_type: scalable + state: present + +- name: Removing a volume group datavg + community.general.aix_lvg: + vg: datavg + state: absent + +- name: Extending rootvg + community.general.aix_lvg: + vg: rootvg + pvs: hdisk1 + state: present + +- name: Reducing rootvg + community.general.aix_lvg: + vg: rootvg + pvs: hdisk1 + state: absent +""" + +RETURN = r""" # """ + +from ansible.module_utils.basic import AnsibleModule + + +def _validate_pv(module, vg, pvs): + """ + Function to validate if the physical volume (PV) is not already in use by + another volume group or Oracle ASM. + + :param module: Ansible module argument spec. + :param vg: Volume group name. + :param pvs: Physical volume list. + :return: [bool, message] or module.fail_json for errors. + """ + + lspv_cmd = module.get_bin_path('lspv', True) + rc, current_lspv, stderr = module.run_command([lspv_cmd]) + if rc != 0: + module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr) + + for pv in pvs: + # Get pv list. + lspv_list = {} + for line in current_lspv.splitlines(): + pv_data = line.split() + lspv_list[pv_data[0]] = pv_data[2] + + # Check if pv exists and is free. + if pv not in lspv_list.keys(): + module.fail_json(msg="Physical volume '%s' doesn't exist." % pv) + + if lspv_list[pv] == 'None': + # Disk None, looks free. + # Check if PV is not already in use by Oracle ASM. + lquerypv_cmd = module.get_bin_path('lquerypv', True) + rc, current_lquerypv, stderr = module.run_command([lquerypv_cmd, "-h", "/dev/%s" % pv, "20", "10"]) + if rc != 0: + module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr) + + if 'ORCLDISK' in current_lquerypv: + module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv) + + msg = "Physical volume '%s' is ok to be used." % pv + return True, msg + + # Check if PV is already in use for the same vg. + elif vg != lspv_list[pv]: + module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv])) + + msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv]) + return False, msg + + +def _validate_vg(module, vg): + """ + Check the current state of volume group. + + :param module: Ansible module argument spec. + :param vg: Volume Group name. + :return: True (VG in varyon state) or False (VG in varyoff state) or + None (VG does not exist), message. + """ + lsvg_cmd = module.get_bin_path('lsvg', True) + rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"]) + if rc != 0: + module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd) + + rc, current_all_vgs, err = module.run_command([lsvg_cmd]) + if rc != 0: + module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd) + + if vg in current_all_vgs and vg not in current_active_vgs: + msg = "Volume group '%s' is in varyoff state." % vg + return False, msg + + if vg in current_active_vgs: + msg = "Volume group '%s' is in varyon state." % vg + return True, msg + + msg = "Volume group '%s' does not exist." % vg + return None, msg + + +def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation): + """ Creates or extend a volume group. """ + + # Command option parameters. + force_opt = { + True: '-f', + False: '' + } + + vg_opt = { + 'normal': '', + 'big': '-B', + 'scalable': '-S', + } + + # Validate if PV are not already in use. + pv_state, msg = _validate_pv(module, vg, pvs) + if not pv_state: + changed = False + return changed, msg + + vg_state, msg = vg_validation + if vg_state is False: + changed = False + return changed, msg + + elif vg_state is True: + # Volume group extension. + changed = True + msg = "" + + if not module.check_mode: + extendvg_cmd = module.get_bin_path('extendvg', True) + rc, output, err = module.run_command([extendvg_cmd, vg] + pvs) + if rc != 0: + changed = False + msg = "Extending volume group '%s' has failed." % vg + return changed, msg + + msg = "Volume group '%s' extended." % vg + return changed, msg + + elif vg_state is None: + # Volume group creation. + changed = True + msg = '' + + if not module.check_mode: + mkvg_cmd = module.get_bin_path('mkvg', True) + rc, output, err = module.run_command([mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], "-y", vg] + pvs) + if rc != 0: + changed = False + msg = "Creating volume group '%s' failed." % vg + return changed, msg + + msg = "Volume group '%s' created." % vg + return changed, msg + + +def reduce_vg(module, vg, pvs, vg_validation): + vg_state, msg = vg_validation + + if vg_state is False: + changed = False + return changed, msg + + elif vg_state is None: + changed = False + return changed, msg + + # Define pvs_to_remove (list of physical volumes to be removed). + if pvs is None: + # Remove VG if pvs are note informed. + # Remark: AIX will permit remove only if the VG has not LVs. + lsvg_cmd = module.get_bin_path('lsvg', True) + rc, current_pvs, err = module.run_command([lsvg_cmd, "-p", vg]) + if rc != 0: + module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd) + + pvs_to_remove = [] + for line in current_pvs.splitlines()[2:]: + pvs_to_remove.append(line.split()[0]) + + reduce_msg = "Volume group '%s' removed." % vg + else: + pvs_to_remove = pvs + reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg)) + + # Reduce volume group. + if len(pvs_to_remove) <= 0: + changed = False + msg = "No physical volumes to remove." + return changed, msg + + changed = True + msg = '' + + if not module.check_mode: + reducevg_cmd = module.get_bin_path('reducevg', True) + rc, stdout, stderr = module.run_command([reducevg_cmd, "-df", vg] + pvs_to_remove) + if rc != 0: + module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr) + + msg = reduce_msg + return changed, msg + + +def state_vg(module, vg, state, vg_validation): + vg_state, msg = vg_validation + + if vg_state is None: + module.fail_json(msg=msg) + + if state == 'varyon': + if vg_state is True: + changed = False + return changed, msg + + changed = True + msg = '' + if not module.check_mode: + varyonvg_cmd = module.get_bin_path('varyonvg', True) + rc, varyonvg_out, err = module.run_command([varyonvg_cmd, vg]) + if rc != 0: + module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err) + + msg = "Varyon volume group %s completed." % vg + return changed, msg + + elif state == 'varyoff': + if vg_state is False: + changed = False + return changed, msg + + changed = True + msg = '' + + if not module.check_mode: + varyonvg_cmd = module.get_bin_path('varyoffvg', True) + rc, varyonvg_out, stderr = module.run_command([varyonvg_cmd, vg]) + if rc != 0: + module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr) + + msg = "Varyoff volume group %s completed." % vg + return changed, msg + + +def main(): + module = AnsibleModule( + argument_spec=dict( + force=dict(type='bool', default=False), + pp_size=dict(type='int'), + pvs=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']), + vg=dict(type='str', required=True), + vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable']) + ), + supports_check_mode=True, + ) + + force = module.params['force'] + pp_size = module.params['pp_size'] + pvs = module.params['pvs'] + state = module.params['state'] + vg = module.params['vg'] + vg_type = module.params['vg_type'] + + if pp_size is None: + pp_size = '' + else: + pp_size = "-s %s" % pp_size + + vg_validation = _validate_vg(module, vg) + + if state == 'present': + if not pvs: + changed = False + msg = "pvs is required to state 'present'." + module.fail_json(msg=msg) + else: + changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation) + + elif state == 'absent': + changed, msg = reduce_vg(module, vg, pvs, vg_validation) + + elif state == 'varyon' or state == 'varyoff': + changed, msg = state_vg(module, vg, state, vg_validation) + + else: + changed = False + msg = "Unexpected state" + + module.exit_json(changed=changed, msg=msg, state=state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/aix_lvol.py b/plugins/modules/aix_lvol.py deleted file mode 120000 index 8ceecdc244..0000000000 --- a/plugins/modules/aix_lvol.py +++ /dev/null @@ -1 +0,0 @@ -./system/aix_lvol.py \ No newline at end of file diff --git a/plugins/modules/aix_lvol.py b/plugins/modules/aix_lvol.py new file mode 100644 index 0000000000..53679fb48d --- /dev/null +++ b/plugins/modules/aix_lvol.py @@ -0,0 +1,341 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Alain Dejoux +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +author: + - Alain Dejoux (@adejoux) +module: aix_lvol +short_description: Configure AIX LVM logical volumes +description: + - This module creates, removes or resizes AIX logical volumes. Inspired by lvol module. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + vg: + description: + - The volume group this logical volume is part of. + type: str + required: true + lv: + description: + - The name of the logical volume. + type: str + required: true + lv_type: + description: + - The type of the logical volume. + type: str + default: jfs2 + size: + description: + - The size of the logical volume with one of the [MGT] units. + type: str + copies: + description: + - The number of copies of the logical volume. + - Maximum copies are 3. + type: int + default: 1 + policy: + description: + - Sets the interphysical volume allocation policy. + - V(maximum) allocates logical partitions across the maximum number of physical volumes. + - V(minimum) allocates logical partitions across the minimum number of physical volumes. + type: str + choices: [maximum, minimum] + default: maximum + state: + description: + - Control if the logical volume exists. If V(present) and the volume does not already exist then the O(size) option + is required. + type: str + choices: [absent, present] + default: present + opts: + description: + - Free-form options to be passed to the mklv command. + type: str + default: '' + pvs: + description: + - A list of physical volumes, for example V(hdisk1,hdisk2). + type: list + elements: str + default: [] +""" + +EXAMPLES = r""" +- name: Create a logical volume of 512M + community.general.aix_lvol: + vg: testvg + lv: testlv + size: 512M + +- name: Create a logical volume of 512M with disks hdisk1 and hdisk2 + community.general.aix_lvol: + vg: testvg + lv: test2lv + size: 512M + pvs: [hdisk1, hdisk2] + +- name: Create a logical volume of 512M mirrored + community.general.aix_lvol: + vg: testvg + lv: test3lv + size: 512M + copies: 2 + +- name: Create a logical volume of 1G with a minimum placement policy + community.general.aix_lvol: + vg: rootvg + lv: test4lv + size: 1G + policy: minimum + +- name: Create a logical volume with special options like mirror pool + community.general.aix_lvol: + vg: testvg + lv: testlv + size: 512M + opts: -p copy1=poolA -p copy2=poolB + +- name: Extend the logical volume to 1200M + community.general.aix_lvol: + vg: testvg + lv: test4lv + size: 1200M + +- name: Remove the logical volume + community.general.aix_lvol: + vg: testvg + lv: testlv + state: absent +""" + +RETURN = r""" +msg: + type: str + description: A friendly message describing the task result. + returned: always + sample: Logical volume testlv created. +""" + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def convert_size(module, size): + unit = size[-1].upper() + units = ['M', 'G', 'T'] + try: + multiplier = 1024 ** units.index(unit) + except ValueError: + module.fail_json(msg="No valid size unit specified.") + + return int(size[:-1]) * multiplier + + +def round_ppsize(x, base=16): + new_size = int(base * round(float(x) / base)) + if new_size < x: + new_size += base + return new_size + + +def parse_lv(data): + name = None + + for line in data.splitlines(): + match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line) + if match is not None: + name = match.group(1) + vg = match.group(2) + continue + match = re.search(r"LPs:\s+(\d+).*PPs", line) + if match is not None: + lps = int(match.group(1)) + continue + match = re.search(r"PP SIZE:\s+(\d+)", line) + if match is not None: + pp_size = int(match.group(1)) + continue + match = re.search(r"INTER-POLICY:\s+(\w+)", line) + if match is not None: + policy = match.group(1) + continue + + if not name: + return None + + size = lps * pp_size + + return {'name': name, 'vg': vg, 'size': size, 'policy': policy} + + +def parse_vg(data): + + for line in data.splitlines(): + + match = re.search(r"VOLUME GROUP:\s+(\w+)", line) + if match is not None: + name = match.group(1) + continue + + match = re.search(r"TOTAL PP.*\((\d+)", line) + if match is not None: + size = int(match.group(1)) + continue + + match = re.search(r"PP SIZE:\s+(\d+)", line) + if match is not None: + pp_size = int(match.group(1)) + continue + + match = re.search(r"FREE PP.*\((\d+)", line) + if match is not None: + free = int(match.group(1)) + continue + + return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + vg=dict(type='str', required=True), + lv=dict(type='str', required=True), + lv_type=dict(type='str', default='jfs2'), + size=dict(type='str'), + opts=dict(type='str', default=''), + copies=dict(type='int', default=1), + state=dict(type='str', default='present', choices=['absent', 'present']), + policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']), + pvs=dict(type='list', elements='str', default=list()) + ), + supports_check_mode=True, + ) + + vg = module.params['vg'] + lv = module.params['lv'] + lv_type = module.params['lv_type'] + size = module.params['size'] + opts = module.params['opts'] + copies = module.params['copies'] + policy = module.params['policy'] + state = module.params['state'] + pvs = module.params['pvs'] + + if policy == 'maximum': + lv_policy = 'x' + else: + lv_policy = 'm' + + # Add echo command when running in check-mode + if module.check_mode: + test_opt = [module.get_bin_path("echo", required=True)] + else: + test_opt = [] + + # check if system commands are available + lsvg_cmd = module.get_bin_path("lsvg", required=True) + lslv_cmd = module.get_bin_path("lslv", required=True) + + # Get information on volume group requested + rc, vg_info, err = module.run_command([lsvg_cmd, vg]) + + if rc != 0: + if state == 'absent': + module.exit_json(changed=False, msg="Volume group %s does not exist." % vg) + else: + module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err) + + this_vg = parse_vg(vg_info) + + if size is not None: + # Calculate pp size and round it up based on pp size. + lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size']) + + # Get information on logical volume requested + rc, lv_info, err = module.run_command([lslv_cmd, lv]) + + if rc != 0: + if state == 'absent': + module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv) + + changed = False + + this_lv = parse_lv(lv_info) + + if state == 'present' and not size: + if this_lv is None: + module.fail_json(msg="No size given.") + + if this_lv is None: + if state == 'present': + if lv_size > this_vg['free']: + module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free'])) + + # create LV + mklv_cmd = module.get_bin_path("mklv", required=True) + + cmd = test_opt + [mklv_cmd, "-t", lv_type, "-y", lv, "-c", copies, "-e", lv_policy, opts, vg, "%sM" % (lv_size, )] + pvs + rc, out, err = module.run_command(cmd) + if rc == 0: + module.exit_json(changed=True, msg="Logical volume %s created." % lv) + else: + module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err) + else: + if state == 'absent': + # remove LV + rmlv_cmd = module.get_bin_path("rmlv", required=True) + rc, out, err = module.run_command(test_opt + [rmlv_cmd, "-f", this_lv['name']]) + if rc == 0: + module.exit_json(changed=True, msg="Logical volume %s deleted." % lv) + else: + module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err) + else: + if this_lv['policy'] != policy: + # change lv allocation policy + chlv_cmd = module.get_bin_path("chlv", required=True) + rc, out, err = module.run_command(test_opt + [chlv_cmd, "-e", lv_policy, this_lv['name']]) + if rc == 0: + module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy)) + else: + module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err) + + if vg != this_lv['vg']: + module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg'])) + + # from here the last remaining action is to resize it, if no size parameter is passed we do nothing. + if not size: + module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv)) + + # resize LV based on absolute values + if int(lv_size) > this_lv['size']: + extendlv_cmd = module.get_bin_path("extendlv", required=True) + cmd = test_opt + [extendlv_cmd, lv, "%sM" % (lv_size - this_lv['size'], )] + rc, out, err = module.run_command(cmd) + if rc == 0: + module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size)) + else: + module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err) + elif lv_size < this_lv['size']: + module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size'])) + else: + module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/alerta_customer.py b/plugins/modules/alerta_customer.py new file mode 100644 index 0000000000..aec3923206 --- /dev/null +++ b/plugins/modules/alerta_customer.py @@ -0,0 +1,204 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Christian Wollinger <@cwollinger> +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: alerta_customer +short_description: Manage customers in Alerta +version_added: 4.8.0 +description: + - Create or delete customers in Alerta with the REST API. +author: Christian Wollinger (@cwollinger) +seealso: + - name: API documentation + description: Documentation for Alerta API. + link: https://docs.alerta.io/api/reference.html#customers +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + customer: + description: + - Name of the customer. + required: true + type: str + match: + description: + - The matching logged in user for the customer. + required: true + type: str + alerta_url: + description: + - The Alerta API endpoint. + required: true + type: str + api_username: + description: + - The username for the API using basic auth. + type: str + api_password: + description: + - The password for the API using basic auth. + type: str + api_key: + description: + - The access token for the API. + type: str + state: + description: + - Whether the customer should exist or not. + - Both O(customer) and O(match) identify a customer that should be added or removed. + type: str + choices: [absent, present] + default: present +""" + +EXAMPLES = r""" +- name: Create customer + community.general.alerta_customer: + alerta_url: https://alerta.example.com + api_username: admin@example.com + api_password: password + customer: Developer + match: dev@example.com + +- name: Delete customer + community.general.alerta_customer: + alerta_url: https://alerta.example.com + api_username: admin@example.com + api_password: password + customer: Developer + match: dev@example.com + state: absent +""" + +RETURN = r""" +msg: + description: + - Success or failure message. + returned: always + type: str + sample: Customer customer1 created +response: + description: + - The response from the API. + returned: always + type: dict +""" + +from ansible.module_utils.urls import fetch_url, basic_auth_header +from ansible.module_utils.basic import AnsibleModule + + +class AlertaInterface(object): + + def __init__(self, module): + self.module = module + self.state = module.params['state'] + self.customer = module.params['customer'] + self.match = module.params['match'] + self.alerta_url = module.params['alerta_url'] + self.headers = {"Content-Type": "application/json"} + + if module.params.get('api_key', None): + self.headers["Authorization"] = "Key %s" % module.params['api_key'] + else: + self.headers["Authorization"] = basic_auth_header(module.params['api_username'], module.params['api_password']) + + def send_request(self, url, data=None, method="GET"): + response, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method) + + status_code = info["status"] + if status_code == 401: + self.module.fail_json(failed=True, response=info, msg="Unauthorized to request '%s' on '%s'" % (method, url)) + elif status_code == 403: + self.module.fail_json(failed=True, response=info, msg="Permission Denied for '%s' on '%s'" % (method, url)) + elif status_code == 404: + self.module.fail_json(failed=True, response=info, msg="Not found for request '%s' on '%s'" % (method, url)) + elif status_code in (200, 201): + return self.module.from_json(response.read()) + self.module.fail_json(failed=True, response=info, msg="Alerta API error with HTTP %d for %s" % (status_code, url)) + + def get_customers(self): + url = "%s/api/customers" % self.alerta_url + response = self.send_request(url) + pages = response["pages"] + if pages > 1: + for page in range(2, pages + 1): + page_url = url + '?page=' + str(page) + new_results = self.send_request(page_url) + response.update(new_results) + return response + + def create_customer(self): + url = "%s/api/customer" % self.alerta_url + + payload = { + 'customer': self.customer, + 'match': self.match, + } + + payload = self.module.jsonify(payload) + response = self.send_request(url, payload, 'POST') + return response + + def delete_customer(self, id): + url = "%s/api/customer/%s" % (self.alerta_url, id) + + response = self.send_request(url, None, 'DELETE') + return response + + def find_customer_id(self, customer): + for i in customer['customers']: + if self.customer == i['customer'] and self.match == i['match']: + return i['id'] + return None + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=['present', 'absent'], default='present'), + customer=dict(type='str', required=True), + match=dict(type='str', required=True), + alerta_url=dict(type='str', required=True), + api_username=dict(type='str'), + api_password=dict(type='str', no_log=True), + api_key=dict(type='str', no_log=True), + ), + required_together=[['api_username', 'api_password']], + mutually_exclusive=[['api_username', 'api_key']], + supports_check_mode=True + ) + + alerta_iface = AlertaInterface(module) + + if alerta_iface.state == 'present': + response = alerta_iface.get_customers() + if alerta_iface.find_customer_id(response): + module.exit_json(changed=False, response=response, msg="Customer %s already exists" % alerta_iface.customer) + else: + if not module.check_mode: + response = alerta_iface.create_customer() + module.exit_json(changed=True, response=response, msg="Customer %s created" % alerta_iface.customer) + else: + response = alerta_iface.get_customers() + id = alerta_iface.find_customer_id(response) + if id: + if not module.check_mode: + alerta_iface.delete_customer(id) + module.exit_json(changed=True, response=response, msg="Customer %s with id %s deleted" % (alerta_iface.customer, id)) + else: + module.exit_json(changed=False, response=response, msg="Customer %s does not exists" % alerta_iface.customer) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ali_instance.py b/plugins/modules/ali_instance.py deleted file mode 120000 index f5e473b232..0000000000 --- a/plugins/modules/ali_instance.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/alicloud/ali_instance.py \ No newline at end of file diff --git a/plugins/modules/ali_instance.py b/plugins/modules/ali_instance.py new file mode 100644 index 0000000000..0434f0d79f --- /dev/null +++ b/plugins/modules/ali_instance.py @@ -0,0 +1,1005 @@ +#!/usr/bin/python + +# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see http://www.gnu.org/licenses/. + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ali_instance +short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS; Add or Remove Instance to/from a Security + Group +description: + - Create, start, stop, restart, modify or terminate ECS instances. + - Add or remove ecs instances to/from security group. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - The state of the instance after operating. + default: 'present' + choices: ['present', 'running', 'stopped', 'restarted', 'absent'] + type: str + availability_zone: + description: + - Aliyun availability zone ID in which to launch the instance. If it is not specified, it is allocated by system automatically. + aliases: ['alicloud_zone', 'zone_id'] + type: str + image_id: + description: + - Image ID used to launch instances. Required when O(state=present) and creating new ECS instances. + aliases: ['image'] + type: str + instance_type: + description: + - Instance type used to launch instances. Required when O(state=present) and creating new ECS instances. + aliases: ['type'] + type: str + security_groups: + description: + - A list of security group IDs. + aliases: ['group_ids'] + type: list + elements: str + vswitch_id: + description: + - The subnet ID in which to launch the instances (VPC). + aliases: ['subnet_id'] + type: str + instance_name: + description: + - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an uppercase/lowercase + letter or a Chinese character and can contain numerals, V(.), V(_) or V(-). It cannot begin with V(http://) or V(https://). + aliases: ['name'] + type: str + description: + description: + - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with V(http://) or V(https://). + type: str + internet_charge_type: + description: + - Internet charge type of ECS instance. + default: 'PayByBandwidth' + choices: ['PayByBandwidth', 'PayByTraffic'] + type: str + max_bandwidth_in: + description: + - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second). + default: 200 + type: int + max_bandwidth_out: + description: + - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second). Required when O(allocate_public_ip=true). + Ignored when O(allocate_public_ip=false). + default: 0 + type: int + host_name: + description: + - Instance host name. Ordered hostname is not supported. + type: str + unique_suffix: + description: + - Specifies whether to add sequential suffixes to the host_name. The sequential suffix ranges from 001 to 999. + default: false + type: bool + version_added: '0.2.0' + password: + description: + - The password to login instance. After rebooting instances, modified password is effective. + type: str + system_disk_category: + description: + - Category of the system disk. + default: 'cloud_efficiency' + choices: ['cloud_efficiency', 'cloud_ssd'] + type: str + system_disk_size: + description: + - Size of the system disk, in GB. The valid values are V(40)~V(500). + default: 40 + type: int + system_disk_name: + description: + - Name of the system disk. + type: str + system_disk_description: + description: + - Description of the system disk. + type: str + count: + description: + - The number of the new instance. An integer value which indicates how many instances that match O(count_tag) should + be running. Instances are either created or terminated based on this value. + default: 1 + type: int + count_tag: + description: + - O(count) determines how many instances based on a specific tag criteria should be present. This can be expressed in + multiple ways and is shown in the EXAMPLES section. The specified count_tag must already exist or be passed in as + the O(tags) option. If it is not specified, it is replaced by O(instance_name). + type: str + allocate_public_ip: + description: + - Whether allocate a public IP for the new instance. + default: false + aliases: ['assign_public_ip'] + type: bool + instance_charge_type: + description: + - The charge type of the instance. + choices: ['PrePaid', 'PostPaid'] + default: 'PostPaid' + type: str + period: + description: + - The charge duration of the instance, in months. Required when O(instance_charge_type=PrePaid). + - The valid value are [V(1-9), V(12), V(24), V(36)]. + default: 1 + type: int + auto_renew: + description: + - Whether automate renew the charge of the instance. + type: bool + default: false + auto_renew_period: + description: + - The duration of the automatic renew the charge of the instance. Required when O(auto_renew=true). + choices: [1, 2, 3, 6, 12] + type: int + instance_ids: + description: + - A list of instance IDs. It is required when need to operate existing instances. If it is specified, O(count) is ignored. + type: list + elements: str + force: + description: + - Whether the current operation needs to be execute forcibly. + default: false + type: bool + tags: + description: + - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. V({"key":"value"}). + aliases: ["instance_tags"] + type: dict + version_added: '0.2.0' + purge_tags: + description: + - Delete any tags not specified in the task that are on the instance. If V(true), it means you have to specify all the + desired tags on each task affecting an instance. + default: false + type: bool + version_added: '0.2.0' + key_name: + description: + - The name of key pair which is used to access ECS instance in SSH. + required: false + type: str + aliases: ['keypair'] + user_data: + description: + - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance. It + only takes effect when launching the new ECS instances. + required: false + type: str + ram_role_name: + description: + - The name of the instance RAM role. + type: str + version_added: '0.2.0' + spot_price_limit: + description: + - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal places and + takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit. + type: float + version_added: '0.2.0' + spot_strategy: + description: + - The bidding mode of the pay-as-you-go instance. This parameter is valid when O(instance_charge_type=PostPaid). + choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo'] + default: 'NoSpot' + type: str + version_added: '0.2.0' + period_unit: + description: + - The duration unit that you are buying the resource. It is valid when O(instance_charge_type=PrePaid). + choices: ['Month', 'Week'] + default: 'Month' + type: str + version_added: '0.2.0' + dry_run: + description: + - Specifies whether to send a dry-run request. + - If O(dry_run=true), Only a dry-run request is sent and no instance is created. The system checks whether the required + parameters are set, and validates the request format, service permissions, and available ECS instances. If the validation + fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned. + - If O(dry_run=false), a request is sent. If the validation succeeds, the instance is created. + default: false + type: bool + version_added: '0.2.0' + include_data_disks: + description: + - Whether to change instance disks charge type when changing instance charge type. + default: true + type: bool + version_added: '0.2.0' +author: + - "He Guimin (@xiaozhu36)" +requirements: + - "Python >= 3.6" + - "footmark >= 1.19.0" +extends_documentation_fragment: + - community.general.alicloud + - community.general.attributes +""" + +EXAMPLES = r""" +# basic provisioning example vpc network +- name: Basic provisioning example + hosts: localhost + vars: + alicloud_access_key: + alicloud_secret_key: + alicloud_region: cn-beijing + image: ubuntu1404_64_40G_cloudinit_20160727.raw + instance_type: ecs.n4.small + vswitch_id: vsw-abcd1234 + assign_public_ip: true + max_bandwidth_out: 10 + host_name: myhost + password: mypassword + system_disk_category: cloud_efficiency + system_disk_size: 100 + internet_charge_type: PayByBandwidth + security_groups: ["sg-f2rwnfh23r"] + + instance_ids: ["i-abcd12346", "i-abcd12345"] + force: true + + tasks: + - name: Launch ECS instance in VPC network + community.general.ali_instance: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + image: '{{ image }}' + system_disk_category: '{{ system_disk_category }}' + system_disk_size: '{{ system_disk_size }}' + instance_type: '{{ instance_type }}' + vswitch_id: '{{ vswitch_id }}' + assign_public_ip: '{{ assign_public_ip }}' + internet_charge_type: '{{ internet_charge_type }}' + max_bandwidth_out: '{{ max_bandwidth_out }}' + tags: + Name: created_one + host_name: '{{ host_name }}' + password: '{{ password }}' + + - name: With count and count_tag to create a number of instances + community.general.ali_instance: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + image: '{{ image }}' + system_disk_category: '{{ system_disk_category }}' + system_disk_size: '{{ system_disk_size }}' + instance_type: '{{ instance_type }}' + assign_public_ip: '{{ assign_public_ip }}' + security_groups: '{{ security_groups }}' + internet_charge_type: '{{ internet_charge_type }}' + max_bandwidth_out: '{{ max_bandwidth_out }}' + tags: + Name: created_one + Version: 0.1 + count: 2 + count_tag: + Name: created_one + host_name: '{{ host_name }}' + password: '{{ password }}' + + - name: Start instance + community.general.ali_instance: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + instance_ids: '{{ instance_ids }}' + state: 'running' + + - name: Reboot instance forcibly + ecs: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + instance_ids: '{{ instance_ids }}' + state: 'restarted' + force: '{{ force }}' + + - name: Add instances to an security group + ecs: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + instance_ids: '{{ instance_ids }}' + security_groups: '{{ security_groups }}' +""" + +RETURN = r""" +instances: + description: List of ECS instances. + returned: always + type: complex + contains: + availability_zone: + description: The availability zone of the instance is in. + returned: always + type: str + sample: cn-beijing-a + block_device_mappings: + description: Any block device mapping entries for the instance. + returned: always + type: complex + contains: + device_name: + description: The device name exposed to the instance. + returned: always + type: str + sample: /dev/xvda + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2018-06-25T04:08:26Z" + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: always + type: bool + sample: true + status: + description: The attachment state. + returned: always + type: str + sample: in_use + volume_id: + description: The ID of the cloud disk. + returned: always + type: str + sample: d-2zei53pjsi117y6gf9t6 + cpu: + description: The CPU core count of the instance. + returned: always + type: int + sample: 4 + creation_time: + description: The time the instance was created. + returned: always + type: str + sample: "2018-06-25T04:08Z" + description: + description: The instance description. + returned: always + type: str + sample: "my ansible instance" + eip: + description: The attribution of EIP associated with the instance. + returned: always + type: complex + contains: + allocation_id: + description: The ID of the EIP. + returned: always + type: str + sample: eip-12345 + internet_charge_type: + description: The internet charge type of the EIP. + returned: always + type: str + sample: "paybybandwidth" + ip_address: + description: EIP address. + returned: always + type: str + sample: 42.10.2.2 + expired_time: + description: The time the instance expires. + returned: always + type: str + sample: "2099-12-31T15:59Z" + gpu: + description: The attribution of instance GPU. + returned: always + type: complex + contains: + amount: + description: The count of the GPU. + returned: always + type: int + sample: 0 + spec: + description: The specification of the GPU. + returned: always + type: str + sample: "" + host_name: + description: The host name of the instance. + returned: always + type: str + sample: iZ2zewaoZ + id: + description: Alias of instance_id. + returned: always + type: str + sample: i-abc12345 + instance_id: + description: ECS instance resource ID. + returned: always + type: str + sample: i-abc12345 + image_id: + description: The ID of the image used to launch the instance. + returned: always + type: str + sample: m-0011223344 + inner_ip_address: + description: The inner IPv4 address of the classic instance. + returned: always + type: str + sample: 10.0.0.2 + instance_charge_type: + description: The instance charge type. + returned: always + type: str + sample: PostPaid + instance_name: + description: The name of the instance. + returned: always + type: str + sample: my-ecs + instance_type: + description: The instance type of the running instance. + returned: always + type: str + sample: ecs.sn1ne.xlarge + instance_type_family: + description: The instance type family of the instance belongs. + returned: always + type: str + sample: ecs.sn1ne + internet_charge_type: + description: The billing method of the network bandwidth. + returned: always + type: str + sample: PayByBandwidth + internet_max_bandwidth_in: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 200 + internet_max_bandwidth_out: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 20 + io_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + sample: false + memory: + description: Memory size of the instance. + returned: always + type: int + sample: 8192 + network_interfaces: + description: One or more network interfaces for the instance. + returned: always + type: complex + contains: + mac_address: + description: The MAC address. + returned: always + type: str + sample: "00:11:22:33:44:55" + network_interface_id: + description: The ID of the network interface. + returned: always + type: str + sample: eni-01234567 + primary_ip_address: + description: The primary IPv4 address of the network interface within the vswitch. + returned: always + type: str + sample: 10.0.0.1 + osname: + description: The operation system name of the instance owned. + returned: always + type: str + sample: CentOS + ostype: + description: The operation system type of the instance owned. + returned: always + type: str + sample: linux + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + public_ip_address: + description: The public IPv4 address assigned to the instance or eip address. + returned: always + type: str + sample: 43.0.0.1 + resource_group_id: + description: The ID of the resource group to which the instance belongs. + returned: always + type: str + sample: my-ecs-group + security_groups: + description: One or more security groups for the instance. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-0123456 + group_name: + description: The name of the security group. + returned: always + type: str + sample: my-security-group + status: + description: The current status of the instance. + returned: always + type: str + sample: running + tags: + description: Any tags assigned to the instance. + returned: always + type: dict + sample: + user_data: + description: User-defined data. + returned: always + type: dict + sample: + vswitch_id: + description: The ID of the vswitch in which the instance is running. + returned: always + type: str + sample: vsw-dew00abcdef + vpc_id: + description: The ID of the VPC the instance is in. + returned: always + type: str + sample: vpc-0011223344 + spot_price_limit: + description: + - The maximum hourly price for the preemptible instance. + returned: always + type: float + sample: 0.97 + spot_strategy: + description: + - The bidding mode of the pay-as-you-go instance. + returned: always + type: str + sample: NoSpot +ids: + description: List of ECS instance IDs. + returned: always + type: list + sample: ["i-12345er", "i-3245fs"] +""" + +import re +import time +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ( + ecs_argument_spec, ecs_connect, FOOTMARK_IMP_ERR, HAS_FOOTMARK +) + + +def get_instances_info(connection, ids): + result = [] + instances = connection.describe_instances(instance_ids=ids) + if len(instances) > 0: + for inst in instances: + volumes = connection.describe_disks(instance_id=inst.id) + setattr(inst, 'block_device_mappings', volumes) + setattr(inst, 'user_data', inst.describe_user_data()) + result.append(inst.read()) + return result + + +def run_instance(module, ecs, exact_count): + if exact_count <= 0: + return None + zone_id = module.params['availability_zone'] + image_id = module.params['image_id'] + instance_type = module.params['instance_type'] + security_groups = module.params['security_groups'] + vswitch_id = module.params['vswitch_id'] + instance_name = module.params['instance_name'] + description = module.params['description'] + internet_charge_type = module.params['internet_charge_type'] + max_bandwidth_out = module.params['max_bandwidth_out'] + max_bandwidth_in = module.params['max_bandwidth_in'] + host_name = module.params['host_name'] + password = module.params['password'] + system_disk_category = module.params['system_disk_category'] + system_disk_size = module.params['system_disk_size'] + system_disk_name = module.params['system_disk_name'] + system_disk_description = module.params['system_disk_description'] + allocate_public_ip = module.params['allocate_public_ip'] + period = module.params['period'] + auto_renew = module.params['auto_renew'] + instance_charge_type = module.params['instance_charge_type'] + auto_renew_period = module.params['auto_renew_period'] + user_data = module.params['user_data'] + key_name = module.params['key_name'] + ram_role_name = module.params['ram_role_name'] + spot_price_limit = module.params['spot_price_limit'] + spot_strategy = module.params['spot_strategy'] + unique_suffix = module.params['unique_suffix'] + # check whether the required parameter passed or not + if not image_id: + module.fail_json(msg='image_id is required for new instance') + if not instance_type: + module.fail_json(msg='instance_type is required for new instance') + if not isinstance(security_groups, list): + module.fail_json(msg='The parameter security_groups should be a list, aborting') + if len(security_groups) <= 0: + module.fail_json(msg='Expected the parameter security_groups is non-empty when create new ECS instances, aborting') + + client_token = "Ansible-Alicloud-{0}-{1}".format(hash(str(module.params)), str(time.time())) + + try: + # call to create_instance method from footmark + instances = ecs.run_instances(image_id=image_id, instance_type=instance_type, security_group_id=security_groups[0], + zone_id=zone_id, instance_name=instance_name, description=description, + internet_charge_type=internet_charge_type, internet_max_bandwidth_out=max_bandwidth_out, + internet_max_bandwidth_in=max_bandwidth_in, host_name=host_name, password=password, + io_optimized='optimized', system_disk_category=system_disk_category, + system_disk_size=system_disk_size, system_disk_disk_name=system_disk_name, + system_disk_description=system_disk_description, vswitch_id=vswitch_id, + amount=exact_count, instance_charge_type=instance_charge_type, period=period, period_unit="Month", + auto_renew=auto_renew, auto_renew_period=auto_renew_period, key_pair_name=key_name, + user_data=user_data, client_token=client_token, ram_role_name=ram_role_name, + spot_price_limit=spot_price_limit, spot_strategy=spot_strategy, unique_suffix=unique_suffix) + + except Exception as e: + module.fail_json(msg='Unable to create instance, error: {0}'.format(e)) + + return instances + + +def modify_instance(module, instance): + # According to state to modify instance's some special attribute + state = module.params["state"] + name = module.params['instance_name'] + unique_suffix = module.params['unique_suffix'] + if not name: + name = instance.name + + description = module.params['description'] + if not description: + description = instance.description + + host_name = module.params['host_name'] + if unique_suffix and host_name: + suffix = instance.host_name[-3:] + host_name = host_name + suffix + + if not host_name: + host_name = instance.host_name + + # password can be modified only when restart instance + password = "" + if state == "restarted": + password = module.params['password'] + + # userdata can be modified only when instance is stopped + setattr(instance, "user_data", instance.describe_user_data()) + user_data = instance.user_data + if state == "stopped": + user_data = module.params['user_data'].encode() + + try: + return instance.modify(name=name, description=description, host_name=host_name, password=password, user_data=user_data) + except Exception as e: + module.fail_json(msg="Modify instance {0} attribute got an error: {1}".format(instance.id, e)) + + +def wait_for_instance_modify_charge(ecs, instance_ids, charge_type, delay=10, timeout=300): + """ + To verify instance charge type has become expected after modify instance charge type + """ + try: + while True: + instances = ecs.describe_instances(instance_ids=instance_ids) + flag = True + for inst in instances: + if inst and inst.instance_charge_type != charge_type: + flag = False + if flag: + return + timeout -= delay + time.sleep(delay) + if timeout <= 0: + raise Exception("Timeout Error: Waiting for instance to {0}. ".format(charge_type)) + except Exception as e: + raise e + + +def main(): + argument_spec = ecs_argument_spec() + argument_spec.update(dict( + security_groups=dict(type='list', elements='str', aliases=['group_ids']), + availability_zone=dict(type='str', aliases=['alicloud_zone', 'zone_id']), + instance_type=dict(type='str', aliases=['type']), + image_id=dict(type='str', aliases=['image']), + count=dict(type='int', default=1), + count_tag=dict(type='str'), + vswitch_id=dict(type='str', aliases=['subnet_id']), + instance_name=dict(type='str', aliases=['name']), + host_name=dict(type='str'), + password=dict(type='str', no_log=True), + internet_charge_type=dict(type='str', default='PayByBandwidth', choices=['PayByBandwidth', 'PayByTraffic']), + max_bandwidth_in=dict(type='int', default=200), + max_bandwidth_out=dict(type='int', default=0), + system_disk_category=dict(type='str', default='cloud_efficiency', choices=['cloud_efficiency', 'cloud_ssd']), + system_disk_size=dict(type='int', default=40), + system_disk_name=dict(type='str'), + system_disk_description=dict(type='str'), + force=dict(type='bool', default=False), + tags=dict(type='dict', aliases=['instance_tags']), + purge_tags=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'running', 'stopped', 'restarted', 'absent']), + description=dict(type='str'), + allocate_public_ip=dict(type='bool', aliases=['assign_public_ip'], default=False), + instance_charge_type=dict(type='str', default='PostPaid', choices=['PrePaid', 'PostPaid']), + period=dict(type='int', default=1), + auto_renew=dict(type='bool', default=False), + instance_ids=dict(type='list', elements='str'), + auto_renew_period=dict(type='int', choices=[1, 2, 3, 6, 12]), + key_name=dict(type='str', aliases=['keypair']), + user_data=dict(type='str'), + ram_role_name=dict(type='str'), + spot_price_limit=dict(type='float'), + spot_strategy=dict(type='str', default='NoSpot', choices=['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']), + unique_suffix=dict(type='bool', default=False), + period_unit=dict(type='str', default='Month', choices=['Month', 'Week']), + dry_run=dict(type='bool', default=False), + include_data_disks=dict(type='bool', default=True) + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if HAS_FOOTMARK is False: + module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) + + ecs = ecs_connect(module) + host_name = module.params['host_name'] + state = module.params['state'] + instance_ids = module.params['instance_ids'] + count_tag = module.params['count_tag'] + count = module.params['count'] + instance_name = module.params['instance_name'] + force = module.params['force'] + zone_id = module.params['availability_zone'] + key_name = module.params['key_name'] + tags = module.params['tags'] + max_bandwidth_out = module.params['max_bandwidth_out'] + instance_charge_type = module.params['instance_charge_type'] + if instance_charge_type == "PrePaid": + module.params['spot_strategy'] = '' + changed = False + + instances = [] + if instance_ids: + if not isinstance(instance_ids, list): + module.fail_json(msg='The parameter instance_ids should be a list, aborting') + instances = ecs.describe_instances(zone_id=zone_id, instance_ids=instance_ids) + if not instances: + module.fail_json(msg="There are no instances in our record based on instance_ids {0}. " + "Please check it and try again.".format(instance_ids)) + elif count_tag: + instances = ecs.describe_instances(zone_id=zone_id, tags=eval(count_tag)) + elif instance_name: + instances = ecs.describe_instances(zone_id=zone_id, instance_name=instance_name) + + ids = [] + if state == 'absent': + if len(instances) < 1: + module.fail_json(msg='Please specify ECS instances that you want to operate by using ' + 'parameters instance_ids, tags or instance_name, aborting') + try: + targets = [] + for inst in instances: + if inst.status != 'stopped' and not force: + module.fail_json(msg="Instance is running, and please stop it or set 'force' as True.") + targets.append(inst.id) + if ecs.delete_instances(instance_ids=targets, force=force): + changed = True + ids.extend(targets) + + module.exit_json(changed=changed, ids=ids, instances=[]) + except Exception as e: + module.fail_json(msg='Delete instance got an error: {0}'.format(e)) + + if module.params['allocate_public_ip'] and max_bandwidth_out < 0: + module.fail_json(msg="'max_bandwidth_out' should be greater than 0 when 'allocate_public_ip' is True.") + if not module.params['allocate_public_ip']: + module.params['max_bandwidth_out'] = 0 + + if state == 'present': + if not instance_ids: + if len(instances) > count: + for i in range(0, len(instances) - count): + inst = instances[len(instances) - 1] + if inst.status != 'stopped' and not force: + module.fail_json(msg="That to delete instance {0} is failed results from it is running, " + "and please stop it or set 'force' as True.".format(inst.id)) + try: + if inst.terminate(force=force): + changed = True + except Exception as e: + module.fail_json(msg="Delete instance {0} got an error: {1}".format(inst.id, e)) + instances.pop(len(instances) - 1) + else: + try: + if re.search(r"-\[\d+,\d+\]-", host_name): + module.fail_json(msg='Ordered hostname is not supported, If you want to add an ordered ' + 'suffix to the hostname, you can set unique_suffix to True') + new_instances = run_instance(module, ecs, count - len(instances)) + if new_instances: + changed = True + instances.extend(new_instances) + except Exception as e: + module.fail_json(msg="Create new instances got an error: {0}".format(e)) + + # Security Group join/leave begin + security_groups = module.params['security_groups'] + if security_groups: + if not isinstance(security_groups, list): + module.fail_json(msg='The parameter security_groups should be a list, aborting') + for inst in instances: + existing = inst.security_group_ids['security_group_id'] + remove = list(set(existing).difference(set(security_groups))) + add = list(set(security_groups).difference(set(existing))) + for sg in remove: + if inst.leave_security_group(sg): + changed = True + for sg in add: + if inst.join_security_group(sg): + changed = True + # Security Group join/leave ends here + + # Attach/Detach key pair + inst_ids = [] + for inst in instances: + if key_name is not None and key_name != inst.key_name: + if key_name == "": + if inst.detach_key_pair(): + changed = True + else: + inst_ids.append(inst.id) + if inst_ids: + changed = ecs.attach_key_pair(instance_ids=inst_ids, key_pair_name=key_name) + + # Modify instance attribute + for inst in instances: + if modify_instance(module, inst): + changed = True + if inst.id not in ids: + ids.append(inst.id) + + # Modify instance charge type + ids = [] + for inst in instances: + if inst.instance_charge_type != instance_charge_type: + ids.append(inst.id) + if ids: + params = {"instance_ids": ids, "instance_charge_type": instance_charge_type, + "include_data_disks": module.params['include_data_disks'], "dry_run": module.params['dry_run'], + "auto_pay": True} + if instance_charge_type == 'PrePaid': + params['period'] = module.params['period'] + params['period_unit'] = module.params['period_unit'] + + if ecs.modify_instance_charge_type(**params): + changed = True + wait_for_instance_modify_charge(ecs, ids, instance_charge_type) + + else: + if len(instances) < 1: + module.fail_json(msg='Please specify ECS instances that you want to operate by using ' + 'parameters instance_ids, tags or instance_name, aborting') + if state == 'running': + try: + targets = [] + for inst in instances: + if modify_instance(module, inst): + changed = True + if inst.status != "running": + targets.append(inst.id) + ids.append(inst.id) + if targets and ecs.start_instances(instance_ids=targets): + changed = True + ids.extend(targets) + except Exception as e: + module.fail_json(msg='Start instances got an error: {0}'.format(e)) + elif state == 'stopped': + try: + targets = [] + for inst in instances: + if inst.status != "stopped": + targets.append(inst.id) + if targets and ecs.stop_instances(instance_ids=targets, force_stop=force): + changed = True + ids.extend(targets) + for inst in instances: + if modify_instance(module, inst): + changed = True + except Exception as e: + module.fail_json(msg='Stop instances got an error: {0}'.format(e)) + elif state == 'restarted': + try: + targets = [] + for inst in instances: + if modify_instance(module, inst): + changed = True + targets.append(inst.id) + if ecs.reboot_instances(instance_ids=targets, force_stop=module.params['force']): + changed = True + ids.extend(targets) + except Exception as e: + module.fail_json(msg='Reboot instances got an error: {0}'.format(e)) + + tags = module.params['tags'] + if module.params['purge_tags']: + for inst in instances: + if not tags: + tags = inst.tags + try: + if inst.remove_tags(tags): + changed = True + except Exception as e: + module.fail_json(msg="{0}".format(e)) + module.exit_json(changed=changed, instances=get_instances_info(ecs, ids)) + + if tags: + for inst in instances: + try: + if inst.add_tags(tags): + changed = True + except Exception as e: + module.fail_json(msg="{0}".format(e)) + module.exit_json(changed=changed, instances=get_instances_info(ecs, ids)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ali_instance_info.py b/plugins/modules/ali_instance_info.py deleted file mode 120000 index e8aa83401f..0000000000 --- a/plugins/modules/ali_instance_info.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/alicloud/ali_instance_info.py \ No newline at end of file diff --git a/plugins/modules/ali_instance_info.py b/plugins/modules/ali_instance_info.py new file mode 100644 index 0000000000..31550c4d0a --- /dev/null +++ b/plugins/modules/ali_instance_info.py @@ -0,0 +1,401 @@ +#!/usr/bin/python + +# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see http://www.gnu.org/licenses/. + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ali_instance_info +short_description: Gather information on instances of Alibaba Cloud ECS +description: + - This module fetches data from the Open API in Alicloud. The module must be called from within the ECS instance itself. +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + +options: + name_prefix: + description: + - Use a instance name prefix to filter ECS instances. + type: str + version_added: '0.2.0' + tags: + description: + - A hash/dictionaries of instance tags. C({"key":"value"}). + aliases: ["instance_tags"] + type: dict + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be all + of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details. Filter + keys can be same as request parameter name or be lower case and use underscore (V("_")) or dash (V("-")) to connect + different words in one parameter. C(InstanceIds) should be a list. C(Tag.n.Key) and C(Tag.n.Value) should be a dict + and using O(tags) instead. + type: dict + version_added: '0.2.0' +author: + - "He Guimin (@xiaozhu36)" +requirements: + - "Python >= 3.6" + - "footmark >= 1.13.0" +extends_documentation_fragment: + - community.general.alicloud + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +# Fetch instances details according to setting different filters + +- name: Find all instances in the specified region + community.general.ali_instance_info: + register: all_instances + +- name: Find all instances based on the specified ids + community.general.ali_instance_info: + instance_ids: + - "i-35b333d9" + - "i-ddav43kd" + register: instances_by_ids + +- name: Find all instances based on the specified name_prefix + community.general.ali_instance_info: + name_prefix: "ecs_instance_" + register: instances_by_name_prefix + +- name: Find instances based on tags + community.general.ali_instance_info: + tags: + Test: "add" +""" + +RETURN = r""" +instances: + description: List of ECS instances. + returned: always + type: complex + contains: + availability_zone: + description: The availability zone of the instance is in. + returned: always + type: str + sample: cn-beijing-a + block_device_mappings: + description: Any block device mapping entries for the instance. + returned: always + type: complex + contains: + device_name: + description: The device name exposed to the instance (for example, /dev/xvda). + returned: always + type: str + sample: /dev/xvda + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2018-06-25T04:08:26Z" + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: always + type: bool + sample: true + status: + description: The attachment state. + returned: always + type: str + sample: in_use + volume_id: + description: The ID of the cloud disk. + returned: always + type: str + sample: d-2zei53pjsi117y6gf9t6 + cpu: + description: The CPU core count of the instance. + returned: always + type: int + sample: 4 + creation_time: + description: The time the instance was created. + returned: always + type: str + sample: "2018-06-25T04:08Z" + description: + description: The instance description. + returned: always + type: str + sample: "my ansible instance" + eip: + description: The attribution of EIP associated with the instance. + returned: always + type: complex + contains: + allocation_id: + description: The ID of the EIP. + returned: always + type: str + sample: eip-12345 + internet_charge_type: + description: The internet charge type of the EIP. + returned: always + type: str + sample: "paybybandwidth" + ip_address: + description: EIP address. + returned: always + type: str + sample: 42.10.2.2 + expired_time: + description: The time the instance expires. + returned: always + type: str + sample: "2099-12-31T15:59Z" + gpu: + description: The attribution of instance GPU. + returned: always + type: complex + contains: + amount: + description: The count of the GPU. + returned: always + type: int + sample: 0 + spec: + description: The specification of the GPU. + returned: always + type: str + sample: "" + host_name: + description: The host name of the instance. + returned: always + type: str + sample: iZ2zewaoZ + id: + description: Alias of instance_id. + returned: always + type: str + sample: i-abc12345 + instance_id: + description: ECS instance resource ID. + returned: always + type: str + sample: i-abc12345 + image_id: + description: The ID of the image used to launch the instance. + returned: always + type: str + sample: m-0011223344 + inner_ip_address: + description: The inner IPv4 address of the classic instance. + returned: always + type: str + sample: 10.0.0.2 + instance_charge_type: + description: The instance charge type. + returned: always + type: str + sample: PostPaid + instance_name: + description: The name of the instance. + returned: always + type: str + sample: my-ecs + instance_type_family: + description: The instance type family of the instance belongs. + returned: always + type: str + sample: ecs.sn1ne + instance_type: + description: The instance type of the running instance. + returned: always + type: str + sample: ecs.sn1ne.xlarge + internet_charge_type: + description: The billing method of the network bandwidth. + returned: always + type: str + sample: PayByBandwidth + internet_max_bandwidth_in: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 200 + internet_max_bandwidth_out: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 20 + io_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + sample: false + memory: + description: Memory size of the instance. + returned: always + type: int + sample: 8192 + network_interfaces: + description: One or more network interfaces for the instance. + returned: always + type: complex + contains: + mac_address: + description: The MAC address. + returned: always + type: str + sample: "00:11:22:33:44:55" + network_interface_id: + description: The ID of the network interface. + returned: always + type: str + sample: eni-01234567 + primary_ip_address: + description: The primary IPv4 address of the network interface within the vswitch. + returned: always + type: str + sample: 10.0.0.1 + osname: + description: The operation system name of the instance owned. + returned: always + type: str + sample: CentOS + ostype: + description: The operation system type of the instance owned. + returned: always + type: str + sample: linux + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + public_ip_address: + description: The public IPv4 address assigned to the instance or EIP address. + returned: always + type: str + sample: 43.0.0.1 + resource_group_id: + description: The ID of the resource group to which the instance belongs. + returned: always + type: str + sample: my-ecs-group + security_groups: + description: One or more security groups for the instance. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-0123456 + group_name: + description: The name of the security group. + returned: always + type: str + sample: my-security-group + status: + description: The current status of the instance. + returned: always + type: str + sample: running + tags: + description: Any tags assigned to the instance. + returned: always + type: dict + sample: + vswitch_id: + description: The ID of the vswitch in which the instance is running. + returned: always + type: str + sample: vsw-dew00abcdef + vpc_id: + description: The ID of the VPC the instance is in. + returned: always + type: str + sample: vpc-0011223344 +ids: + description: List of ECS instance IDs. + returned: always + type: list + sample: ["i-12345er", "i-3245fs"] +""" + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ( + ecs_argument_spec, ecs_connect, FOOTMARK_IMP_ERR, HAS_FOOTMARK +) + + +def main(): + argument_spec = ecs_argument_spec() + argument_spec.update(dict( + name_prefix=dict(type='str'), + tags=dict(type='dict', aliases=['instance_tags']), + filters=dict(type='dict') + ) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + if HAS_FOOTMARK is False: + module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) + + ecs = ecs_connect(module) + + instances = [] + instance_ids = [] + ids = [] + name_prefix = module.params['name_prefix'] + + filters = module.params['filters'] + if not filters: + filters = {} + for key, value in list(filters.items()): + if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list): + for id in value: + if id not in ids: + ids.append(value) + if ids: + filters['instance_ids'] = ids + if module.params['tags']: + filters['tags'] = module.params['tags'] + + for inst in ecs.describe_instances(**filters): + if name_prefix: + if not str(inst.instance_name).startswith(name_prefix): + continue + volumes = ecs.describe_disks(instance_id=inst.id) + setattr(inst, 'block_device_mappings', volumes) + setattr(inst, 'user_data', inst.describe_user_data()) + instances.append(inst.read()) + instance_ids.append(inst.id) + + module.exit_json(changed=False, ids=instance_ids, instances=instances) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/alternatives.py b/plugins/modules/alternatives.py deleted file mode 120000 index f1c909e245..0000000000 --- a/plugins/modules/alternatives.py +++ /dev/null @@ -1 +0,0 @@ -./system/alternatives.py \ No newline at end of file diff --git a/plugins/modules/alternatives.py b/plugins/modules/alternatives.py new file mode 100644 index 0000000000..ad26d04578 --- /dev/null +++ b/plugins/modules/alternatives.py @@ -0,0 +1,437 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Gabe Mulley +# Copyright (c) 2015, David Wittman +# Copyright (c) 2022, Marius Rieder +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: alternatives +short_description: Manages alternative programs for common commands +description: + - Manages symbolic links using the C(update-alternatives) tool. + - Useful when multiple programs are installed but provide similar functionality (for example, different editors). +author: + - Marius Rieder (@jiuka) + - David Wittman (@DavidWittman) + - Gabe Mulley (@mulby) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + name: + description: + - The generic name of the link. + type: str + required: true + path: + description: + - The path to the real executable that the link should point to. + type: path + family: + description: + - The family groups similar alternatives. This option is available only on RHEL-based distributions. + type: str + version_added: 10.1.0 + link: + description: + - The path to the symbolic link that should point to the real executable. + - This option is always required on RHEL-based distributions. On Debian-based distributions this option is required + when the alternative O(name) is unknown to the system. + type: path + priority: + description: + - The priority of the alternative. If no priority is given for creation V(50) is used as a fallback. + type: int + state: + description: + - V(present) - install the alternative (if not already installed), but do not set it as the currently selected alternative + for the group. + - V(selected) - install the alternative (if not already installed), and set it as the currently selected alternative + for the group. + - V(auto) - install the alternative (if not already installed), and set the group to auto mode. Added in community.general + 5.1.0. + - V(absent) - removes the alternative. Added in community.general 5.1.0. + choices: [present, selected, auto, absent] + default: selected + type: str + version_added: 4.8.0 + subcommands: + description: + - A list of subcommands. + - Each subcommand needs a name, a link and a path parameter. + - Subcommands are also named C(slaves) or C(followers), depending on the version of C(alternatives). + type: list + elements: dict + aliases: ['slaves'] + suboptions: + name: + description: + - The generic name of the subcommand. + type: str + required: true + path: + description: + - The path to the real executable that the subcommand should point to. + type: path + required: true + link: + description: + - The path to the symbolic link that should point to the real subcommand executable. + type: path + required: true + version_added: 5.1.0 +requirements: [update-alternatives] +""" + +EXAMPLES = r""" +- name: Correct java version selected + community.general.alternatives: + name: java + path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java + +- name: Select java-11-openjdk.x86_64 family + community.general.alternatives: + name: java + family: java-11-openjdk.x86_64 + when: ansible_os_family == 'RedHat' + +- name: Alternatives link created + community.general.alternatives: + name: hadoop-conf + link: /etc/hadoop/conf + path: /etc/hadoop/conf.ansible + +- name: Make java 32 bit an alternative with low priority + community.general.alternatives: + name: java + path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java + priority: -10 + +- name: Install Python 3.5 but do not select it + community.general.alternatives: + name: python + path: /usr/bin/python3.5 + link: /usr/bin/python + state: present + +- name: Install Python 3.5 and reset selection to auto + community.general.alternatives: + name: python + path: /usr/bin/python3.5 + link: /usr/bin/python + state: auto + +- name: keytool is a subcommand of java + community.general.alternatives: + name: java + link: /usr/bin/java + path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java + subcommands: + - name: keytool + link: /usr/bin/keytool + path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/keytool +""" + +import os +import re + +from ansible.module_utils.basic import AnsibleModule + + +class AlternativeState: + PRESENT = "present" + SELECTED = "selected" + ABSENT = "absent" + AUTO = "auto" + + @classmethod + def to_list(cls): + return [cls.PRESENT, cls.SELECTED, cls.ABSENT, cls.AUTO] + + +class AlternativesModule(object): + _UPDATE_ALTERNATIVES = None + + def __init__(self, module): + self.module = module + self.result = dict(changed=False, diff=dict(before=dict(), after=dict())) + self.module.run_command_environ_update = {'LC_ALL': 'C'} + self.messages = [] + self.run() + + @property + def mode_present(self): + return self.module.params.get('state') in [AlternativeState.PRESENT, AlternativeState.SELECTED, AlternativeState.AUTO] + + @property + def mode_selected(self): + return self.module.params.get('state') == AlternativeState.SELECTED + + @property + def mode_auto(self): + return self.module.params.get('state') == AlternativeState.AUTO + + def run(self): + self.parse() + + if self.mode_present: + # Check if we need to (re)install + subcommands_parameter = self.module.params['subcommands'] + priority_parameter = self.module.params['priority'] + if ( + self.path is not None and ( + self.path not in self.current_alternatives or + (priority_parameter is not None and self.current_alternatives[self.path].get('priority') != priority_parameter) or + (subcommands_parameter is not None and ( + not all(s in subcommands_parameter for s in self.current_alternatives[self.path].get('subcommands')) or + not all(s in self.current_alternatives[self.path].get('subcommands') for s in subcommands_parameter) + )) + ) + ): + self.install() + + # Check if we need to set the preference + is_same_path = self.path is not None and self.current_path == self.path + is_same_family = False + if self.current_path is not None and self.current_path in self.current_alternatives: + current_alternative = self.current_alternatives[self.current_path] + is_same_family = current_alternative.get('family') == self.family + + if self.mode_selected and not (is_same_path or is_same_family): + self.set() + + # Check if we need to reset to auto + if self.mode_auto and self.current_mode == 'manual': + self.auto() + else: + # Check if we need to uninstall + if self.path in self.current_alternatives: + self.remove() + + self.result['msg'] = ' '.join(self.messages) + self.module.exit_json(**self.result) + + def install(self): + if not os.path.exists(self.path): + self.module.fail_json(msg="Specified path %s does not exist" % self.path) + if not self.link: + self.module.fail_json(msg='Needed to install the alternative, but unable to do so as we are missing the link') + + cmd = [self.UPDATE_ALTERNATIVES, '--install', self.link, self.name, self.path, str(self.priority)] + if self.family is not None: + cmd.extend(["--family", self.family]) + + if self.module.params['subcommands'] is not None: + subcommands = [['--slave', subcmd['link'], subcmd['name'], subcmd['path']] for subcmd in self.subcommands] + cmd += [item for sublist in subcommands for item in sublist] + + self.result['changed'] = True + self.messages.append("Install alternative '%s' for '%s'." % (self.path, self.name)) + + if not self.module.check_mode: + self.module.run_command(cmd, check_rc=True) + + if self.module._diff: + self.result['diff']['after'] = dict( + state=AlternativeState.PRESENT, + path=self.path, + family=self.family, + priority=self.priority, + link=self.link, + ) + if self.subcommands: + self.result['diff']['after'].update(dict( + subcommands=self.subcommands + )) + + def remove(self): + cmd = [self.UPDATE_ALTERNATIVES, '--remove', self.name, self.path] + self.result['changed'] = True + self.messages.append("Remove alternative '%s' from '%s'." % (self.path, self.name)) + + if not self.module.check_mode: + self.module.run_command(cmd, check_rc=True) + + if self.module._diff: + self.result['diff']['after'] = dict(state=AlternativeState.ABSENT) + + def set(self): + # Path takes precedence over family as it is more specific + if self.path is None: + arg = self.family + else: + arg = self.path + + cmd = [self.UPDATE_ALTERNATIVES, '--set', self.name, arg] + self.result['changed'] = True + self.messages.append("Set alternative '%s' for '%s'." % (arg, self.name)) + + if not self.module.check_mode: + self.module.run_command(cmd, check_rc=True) + + if self.module._diff: + self.result['diff']['after']['state'] = AlternativeState.SELECTED + + def auto(self): + cmd = [self.UPDATE_ALTERNATIVES, '--auto', self.name] + self.messages.append("Set alternative to auto for '%s'." % (self.name)) + self.result['changed'] = True + + if not self.module.check_mode: + self.module.run_command(cmd, check_rc=True) + + if self.module._diff: + self.result['diff']['after']['state'] = AlternativeState.PRESENT + + @property + def name(self): + return self.module.params.get('name') + + @property + def path(self): + return self.module.params.get('path') + + @property + def family(self): + return self.module.params.get('family') + + @property + def link(self): + return self.module.params.get('link') or self.current_link + + @property + def priority(self): + if self.module.params.get('priority') is not None: + return self.module.params.get('priority') + return self.current_alternatives.get(self.path, {}).get('priority', 50) + + @property + def subcommands(self): + if self.module.params.get('subcommands') is not None: + return self.module.params.get('subcommands') + elif self.path in self.current_alternatives and self.current_alternatives[self.path].get('subcommands'): + return self.current_alternatives[self.path].get('subcommands') + return None + + @property + def UPDATE_ALTERNATIVES(self): + if self._UPDATE_ALTERNATIVES is None: + self._UPDATE_ALTERNATIVES = self.module.get_bin_path('update-alternatives', True) + return self._UPDATE_ALTERNATIVES + + def parse(self): + self.current_mode = None + self.current_path = None + self.current_link = None + self.current_alternatives = {} + + # Run `update-alternatives --display ` to find existing alternatives + (rc, display_output, dummy) = self.module.run_command( + [self.UPDATE_ALTERNATIVES, '--display', self.name] + ) + + if rc != 0: + self.module.debug("No current alternative found. '%s' exited with %s" % (self.UPDATE_ALTERNATIVES, rc)) + return + + current_mode_regex = re.compile(r'\s-\s(?:status\sis\s)?(\w*)(?:\smode|.)$', re.MULTILINE) + current_path_regex = re.compile(r'^\s*link currently points to (.*)$', re.MULTILINE) + current_link_regex = re.compile(r'^\s*link \w+ is (.*)$', re.MULTILINE) + subcmd_path_link_regex = re.compile(r'^\s*(?:slave|follower) (\S+) is (.*)$', re.MULTILINE) + + alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s(\S+)\s)?priority\s(\d+)((?:\s+(?:slave|follower).*)*)', re.MULTILINE) + subcmd_regex = re.compile(r'^\s+(?:slave|follower) (.*): (.*)$', re.MULTILINE) + + match = current_mode_regex.search(display_output) + if not match: + self.module.debug("No current mode found in output") + return + self.current_mode = match.group(1) + + match = current_path_regex.search(display_output) + if not match: + self.module.debug("No current path found in output") + else: + self.current_path = match.group(1) + + match = current_link_regex.search(display_output) + if not match: + self.module.debug("No current link found in output") + else: + self.current_link = match.group(1) + + subcmd_path_map = dict(subcmd_path_link_regex.findall(display_output)) + if not subcmd_path_map and self.subcommands: + subcmd_path_map = {s['name']: s['link'] for s in self.subcommands} + + for path, family, prio, subcmd in alternative_regex.findall(display_output): + self.current_alternatives[path] = dict( + priority=int(prio), + family=family, + subcommands=[dict( + name=name, + path=spath, + link=subcmd_path_map.get(name) + ) for name, spath in subcmd_regex.findall(subcmd) if spath != '(null)'] + ) + + if self.module._diff: + if self.path in self.current_alternatives: + self.result['diff']['before'].update(dict( + state=AlternativeState.PRESENT, + path=self.path, + priority=self.current_alternatives[self.path].get('priority'), + link=self.current_link, + )) + if self.current_alternatives[self.path].get('subcommands'): + self.result['diff']['before'].update(dict( + subcommands=self.current_alternatives[self.path].get('subcommands') + )) + if self.current_mode == 'manual' and self.current_path != self.path: + self.result['diff']['before'].update(dict( + state=AlternativeState.SELECTED + )) + else: + self.result['diff']['before'].update(dict( + state=AlternativeState.ABSENT + )) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + path=dict(type='path'), + family=dict(type='str'), + link=dict(type='path'), + priority=dict(type='int'), + state=dict( + type='str', + choices=AlternativeState.to_list(), + default=AlternativeState.SELECTED, + ), + subcommands=dict(type='list', elements='dict', aliases=['slaves'], options=dict( + name=dict(type='str', required=True), + path=dict(type='path', required=True), + link=dict(type='path', required=True), + )), + ), + supports_check_mode=True, + required_one_of=[('path', 'family')] + ) + + AlternativesModule(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/android_sdk.py b/plugins/modules/android_sdk.py new file mode 100644 index 0000000000..35900f39a5 --- /dev/null +++ b/plugins/modules/android_sdk.py @@ -0,0 +1,207 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Stanislav Shamilov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: android_sdk +short_description: Manages Android SDK packages +description: + - Manages Android SDK packages. + - Allows installation from different channels (stable, beta, dev, canary). + - Allows installation of packages to a non-default SDK root directory. +author: Stanislav Shamilov (@shamilovstas) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +version_added: 10.2.0 +options: + accept_licenses: + description: + - If this is set to V(true), the module attempts to accept license prompts generated by C(sdkmanager) during package + installation. Otherwise, every license prompt is rejected. + type: bool + default: false + name: + description: + - A name of an Android SDK package (for instance, V(build-tools;34.0.0)). + aliases: ['package', 'pkg'] + type: list + elements: str + state: + description: + - Indicates the desired package(s) state. + - V(present) ensures that package(s) is/are present. + - V(absent) ensures that package(s) is/are absent. + - V(latest) ensures that package(s) is/are installed and updated to the latest version(s). + choices: ['present', 'absent', 'latest'] + default: present + type: str + sdk_root: + description: + - Provides path for an alternative directory to install Android SDK packages to. By default, all packages are installed + to the directory where C(sdkmanager) is installed. + type: path + channel: + description: + - Indicates what channel must C(sdkmanager) use for installation of packages. + choices: ['stable', 'beta', 'dev', 'canary'] + default: stable + type: str +requirements: + - C(java) >= 17 + - C(sdkmanager) Command line tool for installing Android SDK packages. +notes: + - For some of the packages installed by C(sdkmanager) is it necessary to accept licenses. Usually it is done through command + line prompt in a form of a Y/N question when a licensed package is requested to be installed. If there are several packages + requested for installation and at least two of them belong to different licenses, the C(sdkmanager) tool prompts for these + licenses in a loop. In order to install packages, the module must be able to answer these license prompts. Currently, + it is only possible to answer one license prompt at a time, meaning that instead of installing multiple packages as a + single invocation of the C(sdkmanager --install) command, it is done by executing the command independently for each package. + This makes sure that at most only one license prompt needs to be answered. At the time of writing this module, a C(sdkmanager)'s + package may belong to at most one license type that needs to be accepted. However, if this changes in the future, the + module may hang as there might be more prompts generated by the C(sdkmanager) tool which the module is unable to answer. + If this becomes the case, file an issue and in the meantime, consider accepting all the licenses in advance, as it is + described in the C(sdkmanager) L(documentation,https://developer.android.com/tools/sdkmanager#accept-licenses), for instance, + using the M(ansible.builtin.command) module. +seealso: + - name: sdkmanager tool documentation + description: Detailed information of how to install and use sdkmanager command line tool. + link: https://developer.android.com/tools/sdkmanager +""" + +EXAMPLES = r""" +- name: Install build-tools;34.0.0 + community.general.android_sdk: + name: build-tools;34.0.0 + accept_licenses: true + state: present + +- name: Install build-tools;34.0.0 and platform-tools + community.general.android_sdk: + name: + - build-tools;34.0.0 + - platform-tools + accept_licenses: true + state: present + +- name: Delete build-tools;34.0.0 + community.general.android_sdk: + name: build-tools;34.0.0 + state: absent + +- name: Install platform-tools or update if installed + community.general.android_sdk: + name: platform-tools + accept_licenses: true + state: latest + +- name: Install build-tools;34.0.0 to a different SDK root + community.general.android_sdk: + name: build-tools;34.0.0 + accept_licenses: true + state: present + sdk_root: "/path/to/new/root" + +- name: Install a package from another channel + community.general.android_sdk: + name: some-package-present-in-canary-channel + accept_licenses: true + state: present + channel: canary +""" + +RETURN = r""" +installed: + description: A list of packages that have been installed. + returned: when packages have changed + type: list + sample: ["build-tools;34.0.0", "platform-tools"] + +removed: + description: A list of packages that have been removed. + returned: when packages have changed + type: list + sample: ["build-tools;34.0.0", "platform-tools"] +""" + +from ansible_collections.community.general.plugins.module_utils.mh.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.android_sdkmanager import Package, AndroidSdkManager + + +class AndroidSdk(StateModuleHelper): + module = dict( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present', 'absent', 'latest']), + package=dict(type='list', elements='str', aliases=['pkg', 'name']), + sdk_root=dict(type='path'), + channel=dict(type='str', default='stable', choices=['stable', 'beta', 'dev', 'canary']), + accept_licenses=dict(type='bool', default=False) + ), + supports_check_mode=True + ) + + def __init_module__(self): + self.sdkmanager = AndroidSdkManager(self.module) + self.vars.set('installed', [], change=True) + self.vars.set('removed', [], change=True) + + def _parse_packages(self): + arg_pkgs = set(self.vars.package) + if len(arg_pkgs) < len(self.vars.package): + self.do_raise("Packages may not repeat") + return set(Package(p) for p in arg_pkgs) + + def state_present(self): + packages = self._parse_packages() + installed = self.sdkmanager.get_installed_packages() + pending_installation = packages.difference(installed) + + self.vars.installed = AndroidSdk._map_packages_to_names(pending_installation) + if not self.check_mode: + rc, stdout, stderr = self.sdkmanager.apply_packages_changes(pending_installation, self.vars.accept_licenses) + if rc != 0: + self.do_raise("Could not install packages: %s" % stderr) + + def state_absent(self): + packages = self._parse_packages() + installed = self.sdkmanager.get_installed_packages() + to_be_deleted = packages.intersection(installed) + self.vars.removed = AndroidSdk._map_packages_to_names(to_be_deleted) + if not self.check_mode: + rc, stdout, stderr = self.sdkmanager.apply_packages_changes(to_be_deleted) + if rc != 0: + self.do_raise("Could not uninstall packages: %s" % stderr) + + def state_latest(self): + packages = self._parse_packages() + installed = self.sdkmanager.get_installed_packages() + updatable = self.sdkmanager.get_updatable_packages() + not_installed = packages.difference(installed) + to_be_installed = not_installed.union(updatable) + self.vars.installed = AndroidSdk._map_packages_to_names(to_be_installed) + + if not self.check_mode: + rc, stdout, stderr = self.sdkmanager.apply_packages_changes(to_be_installed, self.vars.accept_licenses) + if rc != 0: + self.do_raise("Could not install packages: %s" % stderr) + + @staticmethod + def _map_packages_to_names(packages): + return [x.name for x in packages] + + +def main(): + AndroidSdk.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py deleted file mode 120000 index 369d39dbe1..0000000000 --- a/plugins/modules/ansible_galaxy_install.py +++ /dev/null @@ -1 +0,0 @@ -packaging/language/ansible_galaxy_install.py \ No newline at end of file diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py new file mode 100644 index 0000000000..919dadcd9a --- /dev/null +++ b/plugins/modules/ansible_galaxy_install.py @@ -0,0 +1,330 @@ +#!/usr/bin/python +# Copyright (c) 2021, Alexei Znamensky +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ansible_galaxy_install +author: + - "Alexei Znamensky (@russoz)" +short_description: Install Ansible roles or collections using ansible-galaxy +version_added: 3.5.0 +description: + - This module allows the installation of Ansible collections or roles using C(ansible-galaxy). +notes: + - Support for B(Ansible 2.9/2.10) was removed in community.general 8.0.0. + - The module tries to run using the C(C.UTF-8) locale. If that fails, it tries C(en_US.UTF-8). If that one also fails, the + module fails. +seealso: + - name: C(ansible-galaxy) command manual page + description: Manual page for the command. + link: https://docs.ansible.com/ansible/latest/cli/ansible-galaxy.html + +requirements: + - ansible-core 2.11 or newer +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - If O(state=present) then the collection or role is installed. Note that the collections and roles are not updated + with this option. + - Currently the O(state=latest) is ignored unless O(type=collection), and it ensures the collection is installed and + updated to the latest available version. + - Please note that O(force=true) can be used to perform upgrade regardless of O(type). + type: str + choices: [present, latest] + default: present + version_added: 9.1.0 + type: + description: + - The type of installation performed by C(ansible-galaxy). + - If O(type=both), then O(requirements_file) must be passed and it may contain both roles and collections. + - 'Note however that the opposite is not true: if using a O(requirements_file), then O(type) can be any of the three + choices.' + type: str + choices: [collection, role, both] + required: true + name: + description: + - Name of the collection or role being installed. + - Versions can be specified with C(ansible-galaxy) usual formats. For example, the collection V(community.docker:1.6.1) + or the role V(ansistrano.deploy,3.8.0). + - O(name) and O(requirements_file) are mutually exclusive. + type: str + requirements_file: + description: + - Path to a file containing a list of requirements to be installed. + - It works for O(type) equals to V(collection) and V(role). + - O(name) and O(requirements_file) are mutually exclusive. + type: path + dest: + description: + - The path to the directory containing your collections or roles, according to the value of O(type). + - Please notice that C(ansible-galaxy) does not install collections with O(type=both), when O(requirements_file) contains + both roles and collections and O(dest) is specified. + type: path + no_deps: + description: + - Refrain from installing dependencies. + version_added: 4.5.0 + type: bool + default: false + force: + description: + - Force overwriting existing roles and/or collections. + - It can be used for upgrading, but the module output always reports C(changed=true). + - Using O(force=true) is mandatory when downgrading. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Install collection community.network + community.general.ansible_galaxy_install: + type: collection + name: community.network + +- name: Install role at specific path + community.general.ansible_galaxy_install: + type: role + name: ansistrano.deploy + dest: /ansible/roles + +- name: Install collections and roles together + community.general.ansible_galaxy_install: + type: both + requirements_file: requirements.yml + +- name: Force-install collection community.network at specific version + community.general.ansible_galaxy_install: + type: collection + name: community.network:3.0.2 + force: true +""" + +RETURN = r""" +type: + description: The value of the O(type) parameter. + type: str + returned: always +name: + description: The value of the O(name) parameter. + type: str + returned: always +dest: + description: The value of the O(dest) parameter. + type: str + returned: always +requirements_file: + description: The value of the O(requirements_file) parameter. + type: str + returned: always +force: + description: The value of the O(force) parameter. + type: bool + returned: always +installed_roles: + description: + - If O(requirements_file) is specified instead, returns dictionary with all the roles installed per path. + - If O(name) is specified, returns that role name and the version installed per path. + type: dict + returned: always when installing roles + contains: + "": + description: Roles and versions for that path. + type: dict + sample: + /home/user42/.ansible/roles: + ansistrano.deploy: 3.9.0 + baztian.xfce: v0.0.3 + /custom/ansible/roles: + ansistrano.deploy: 3.8.0 +installed_collections: + description: + - If O(requirements_file) is specified instead, returns dictionary with all the collections installed per path. + - If O(name) is specified, returns that collection name and the version installed per path. + type: dict + returned: always when installing collections + contains: + "": + description: Collections and versions for that path. + type: dict + sample: + /home/az/.ansible/collections/ansible_collections: + community.docker: 1.6.0 + community.general: 3.0.2 + /custom/ansible/ansible_collections: + community.general: 3.1.0 +new_collections: + description: New collections installed by this module. + returned: success + type: dict + sample: + community.general: 3.1.0 + community.docker: 1.6.1 +new_roles: + description: New roles installed by this module. + returned: success + type: dict + sample: + ansistrano.deploy: 3.8.0 + baztian.xfce: v0.0.3 +version: + description: Version of ansible-core for ansible-galaxy. + type: str + returned: always + sample: 2.17.4 + version_added: 10.0.0 +""" + +import re + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper, ModuleHelperException + + +class AnsibleGalaxyInstall(ModuleHelper): + _RE_GALAXY_VERSION = re.compile(r'^ansible-galaxy(?: \[core)? (?P\d+\.\d+\.\d+)(?:\.\w+)?(?:\])?') + _RE_LIST_PATH = re.compile(r'^# (?P.*)$') + _RE_LIST_COLL = re.compile(r'^(?P\w+\.\w+)\s+(?P[\d\.]+)\s*$') + _RE_LIST_ROLE = re.compile(r'^- (?P\w+\.\w+),\s+(?P[\d\.]+)\s*$') + _RE_INSTALL_OUTPUT = re.compile( + r'^(?:(?P\w+\.\w+):(?P[\d\.]+)|- (?P\w+\.\w+) \((?P[\d\.]+)\)) was installed successfully$' + ) + ansible_version = None + + output_params = ('type', 'name', 'dest', 'requirements_file', 'force', 'no_deps') + module = dict( + argument_spec=dict( + state=dict(type='str', choices=['present', 'latest'], default='present'), + type=dict(type='str', choices=('collection', 'role', 'both'), required=True), + name=dict(type='str'), + requirements_file=dict(type='path'), + dest=dict(type='path'), + force=dict(type='bool', default=False), + no_deps=dict(type='bool', default=False), + ), + mutually_exclusive=[('name', 'requirements_file')], + required_one_of=[('name', 'requirements_file')], + required_if=[('type', 'both', ['requirements_file'])], + supports_check_mode=False, + ) + + command = 'ansible-galaxy' + command_args_formats = dict( + type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]), + galaxy_cmd=cmd_runner_fmt.as_list(), + upgrade=cmd_runner_fmt.as_bool("--upgrade"), + requirements_file=cmd_runner_fmt.as_opt_val('-r'), + dest=cmd_runner_fmt.as_opt_val('-p'), + force=cmd_runner_fmt.as_bool("--force"), + no_deps=cmd_runner_fmt.as_bool("--no-deps"), + version=cmd_runner_fmt.as_fixed("--version"), + name=cmd_runner_fmt.as_list(), + ) + + def _make_runner(self, lang): + return CmdRunner(self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=lang, check_rc=True) + + def _get_ansible_galaxy_version(self): + class UnsupportedLocale(ModuleHelperException): + pass + + def process(rc, out, err): + if (rc != 0 and "unsupported locale setting" in err) or (rc == 0 and "cannot change locale" in err): + raise UnsupportedLocale(msg=err) + line = out.splitlines()[0] + match = self._RE_GALAXY_VERSION.match(line) + if not match: + self.do_raise("Unable to determine ansible-galaxy version from: {0}".format(line)) + version = match.group("version") + return version + + try: + runner = self._make_runner("C.UTF-8") + with runner("version", check_rc=False, output_process=process) as ctx: + return runner, ctx.run() + except UnsupportedLocale: + runner = self._make_runner("en_US.UTF-8") + with runner("version", check_rc=True, output_process=process) as ctx: + return runner, ctx.run() + + def __init_module__(self): + self.runner, self.vars.version = self._get_ansible_galaxy_version() + self.ansible_version = tuple(int(x) for x in self.vars.version.split('.')[:3]) + if self.ansible_version < (2, 11): + self.module.fail_json(msg="Support for Ansible 2.9 and ansible-base 2.10 has been removed.") + self.vars.set("new_collections", {}, change=True) + self.vars.set("new_roles", {}, change=True) + if self.vars.type != "collection": + self.vars.installed_roles = self._list_roles() + if self.vars.type != "roles": + self.vars.installed_collections = self._list_collections() + + def _list_element(self, _type, path_re, elem_re): + def process(rc, out, err): + return [] if "None of the provided paths were usable" in out else out.splitlines() + + with self.runner('type galaxy_cmd dest', output_process=process, check_rc=False) as ctx: + elems = ctx.run(type=_type, galaxy_cmd='list') + + elems_dict = {} + current_path = None + for line in elems: + if line.startswith("#"): + match = path_re.match(line) + if not match: + continue + if self.vars.dest is not None and match.group('path') != self.vars.dest: + current_path = None + continue + current_path = match.group('path') if match else None + elems_dict[current_path] = {} + + elif current_path is not None: + match = elem_re.match(line) + if not match or (self.vars.name is not None and match.group('elem') != self.vars.name): + continue + elems_dict[current_path][match.group('elem')] = match.group('version') + return elems_dict + + def _list_collections(self): + return self._list_element('collection', self._RE_LIST_PATH, self._RE_LIST_COLL) + + def _list_roles(self): + return self._list_element('role', self._RE_LIST_PATH, self._RE_LIST_ROLE) + + def __run__(self): + + def process(rc, out, err): + for line in out.splitlines(): + match = self._RE_INSTALL_OUTPUT.match(line) + if not match: + continue + if match.group("collection"): + self.vars.new_collections[match.group("collection")] = match.group("cversion") + elif match.group("role"): + self.vars.new_roles[match.group("role")] = match.group("rversion") + + upgrade = (self.vars.type == "collection" and self.vars.state == "latest") + with self.runner("type galaxy_cmd upgrade force no_deps dest requirements_file name", output_process=process) as ctx: + ctx.run(galaxy_cmd="install", upgrade=upgrade) + if self.verbosity > 2: + self.vars.set("run_info", ctx.run_info) + + +def main(): + AnsibleGalaxyInstall.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/apache2_mod_proxy.py b/plugins/modules/apache2_mod_proxy.py deleted file mode 120000 index ac2b8d61ae..0000000000 --- a/plugins/modules/apache2_mod_proxy.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/apache2_mod_proxy.py \ No newline at end of file diff --git a/plugins/modules/apache2_mod_proxy.py b/plugins/modules/apache2_mod_proxy.py new file mode 100644 index 0000000000..3b06736898 --- /dev/null +++ b/plugins/modules/apache2_mod_proxy.py @@ -0,0 +1,424 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Olivier Boukili +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: apache2_mod_proxy +author: Olivier Boukili (@oboukili) +short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool +description: + - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool, using HTTP POST and GET requests. The + httpd mod_proxy balancer-member status page has to be enabled and accessible, as this module relies on parsing this page. +extends_documentation_fragment: + - community.general.attributes +requirements: + - Python package C(beautifulsoup4) +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + balancer_url_suffix: + type: str + description: + - Suffix of the balancer pool URL required to access the balancer pool status page (for example V(balancer_vhost[:port]/balancer_url_suffix)). + default: /balancer-manager/ + balancer_vhost: + type: str + description: + - (IPv4|IPv6|FQDN):port of the Apache httpd 2.4 mod_proxy balancer pool. + required: true + member_host: + type: str + description: + - (IPv4|IPv6|FQDN) of the balancer member to get or to set attributes to. Port number is autodetected and should not + be specified here. + - If undefined, the M(community.general.apache2_mod_proxy) module returns a members list of dictionaries of all the + current balancer pool members' attributes. + state: + type: list + elements: str + choices: [present, absent, enabled, disabled, drained, hot_standby, ignore_errors] + description: + - Desired state of the member host. + - States can be simultaneously invoked by separating them with a comma (for example V(state=drained,ignore_errors)), + but it is recommended to specify them as a proper YAML list. + - States V(present) and V(absent) must be used without any other state. + tls: + description: + - Use https to access balancer management page. + type: bool + default: false + validate_certs: + description: + - Validate ssl/tls certificates. + type: bool + default: true +""" + +EXAMPLES = r""" +- name: Get all current balancer pool members attributes + community.general.apache2_mod_proxy: + balancer_vhost: 10.0.0.2 + +- name: Get a specific member attributes + community.general.apache2_mod_proxy: + balancer_vhost: myws.mydomain.org + balancer_suffix: /lb/ + member_host: node1.myws.mydomain.org + +# Enable all balancer pool members: +- name: Get attributes + community.general.apache2_mod_proxy: + balancer_vhost: '{{ myloadbalancer_host }}' + register: result + +- name: Enable all balancer pool members + community.general.apache2_mod_proxy: + balancer_vhost: '{{ myloadbalancer_host }}' + member_host: '{{ item.host }}' + state: present + with_items: '{{ result.members }}' + +# Gracefully disable a member from a loadbalancer node: +- name: Step 1 + community.general.apache2_mod_proxy: + balancer_vhost: '{{ vhost_host }}' + member_host: '{{ member.host }}' + state: drained + delegate_to: myloadbalancernode + +- name: Step 2 + ansible.builtin.wait_for: + host: '{{ member.host }}' + port: '{{ member.port }}' + state: drained + delegate_to: myloadbalancernode + +- name: Step 3 + community.general.apache2_mod_proxy: + balancer_vhost: '{{ vhost_host }}' + member_host: '{{ member.host }}' + state: absent + delegate_to: myloadbalancernode +""" + +RETURN = r""" +member: + description: Specific balancer member information dictionary, returned when the module is invoked with O(member_host) parameter. + type: dict + returned: success + sample: + { + "attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.20", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false + } + } +members: + description: List of member (defined above) dictionaries, returned when the module is invoked with no O(member_host) and + O(state) args. + returned: success + type: list + sample: + [ + { + "attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.20", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false + } + }, + { + "attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.21", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false + } + } + ] +""" + +import re + +from ansible_collections.community.general.plugins.module_utils import deps +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper, ModuleHelperException + +from ansible.module_utils.common.text.converters import to_text +from ansible.module_utils.urls import fetch_url + +with deps.declare("beautifulsoup4"): + from bs4 import BeautifulSoup + +# balancer member attributes extraction regexp: +EXPRESSION = re.compile(to_text(r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)")) +# Apache2 server version extraction regexp: +APACHE_VERSION_EXPRESSION = re.compile(to_text(r"SERVER VERSION: APACHE/([\d.]+)")) + + +def find_all(where, what): + return where.find_all(what) + + +def regexp_extraction(string, _regexp, groups=1): + """ Returns the capture group (default=1) specified in the regexp, applied to the string """ + regexp_search = _regexp.search(string) + if regexp_search: + if regexp_search.group(groups) != '': + return regexp_search.group(groups) + return None + + +class BalancerMember(object): + """ Apache 2.4 mod_proxy LB balancer member. + attributes: + read-only: + host -> member host (string), + management_url -> member management url (string), + protocol -> member protocol (string) + port -> member port (string), + path -> member location (string), + balancer_url -> url of this member's parent balancer (string), + attributes -> whole member attributes (dictionary) + module -> ansible module instance (AnsibleModule object). + writable: + status -> status of the member (dictionary) + """ + + def __init__(self, management_url, balancer_url, module): + self.host = regexp_extraction(management_url, EXPRESSION, 4) + self.management_url = management_url + self.protocol = regexp_extraction(management_url, EXPRESSION, 3) + self.port = regexp_extraction(management_url, EXPRESSION, 5) + self.path = regexp_extraction(management_url, EXPRESSION, 6) + self.balancer_url = balancer_url + self.module = module + + def get_member_attributes(self): + """ Returns a dictionary of a balancer member's attributes.""" + + resp, info = fetch_url(self.module, self.management_url, headers={'Referer': self.management_url}) + + if info['status'] != 200: + raise ModuleHelperException("Could not get balancer_member_page, check for connectivity! {0}".format(info)) + + try: + soup = BeautifulSoup(resp) + except TypeError as exc: + raise ModuleHelperException("Cannot parse balancer_member_page HTML! {0}".format(exc)) from exc + + subsoup = find_all(find_all(soup, 'table')[1], 'tr') + keys = find_all(subsoup[0], 'th') + for valuesset in subsoup[1::1]: + if re.search(pattern=self.host, string=str(valuesset)): + values = find_all(valuesset, 'td') + return {keys[x].string: values[x].string for x in range(0, len(keys))} + + def get_member_status(self): + """ Returns a dictionary of a balancer member's status attributes.""" + status_mapping = {'disabled': 'Dis', + 'drained': 'Drn', + 'hot_standby': 'Stby', + 'ignore_errors': 'Ign'} + actual_status = self.attributes['Status'] + status = {mode: patt in actual_status for mode, patt in status_mapping.items()} + return status + + def set_member_status(self, values): + """ Sets a balancer member's status attributes amongst pre-mapped values.""" + values_mapping = {'disabled': '&w_status_D', + 'drained': '&w_status_N', + 'hot_standby': '&w_status_H', + 'ignore_errors': '&w_status_I'} + + request_body = regexp_extraction(self.management_url, EXPRESSION, 1) + values_url = "".join("{0}={1}".format(url_param, 1 if values[mode] else 0) for mode, url_param in values_mapping.items()) + request_body = "{0}{1}".format(request_body, values_url) + + response, info = fetch_url(self.module, self.management_url, data=request_body, headers={'Referer': self.management_url}) + if info['status'] != 200: + raise ModuleHelperException("Could not set the member status! {0} {1}".format(self.host, info['status'])) + + attributes = property(get_member_attributes) + status = property(get_member_status, set_member_status) + + def as_dict(self): + return { + "host": self.host, + "status": self.status, + "protocol": self.protocol, + "port": self.port, + "path": self.path, + "attributes": self.attributes, + "management_url": self.management_url, + "balancer_url": self.balancer_url + } + + +class Balancer(object): + """ Apache httpd 2.4 mod_proxy balancer object""" + + def __init__(self, module, host, suffix, tls=False): + proto = "https" if tls else "http" + self.base_url = '{0}://{1}'.format(proto, host) + self.url = '{0}://{1}{2}'.format(proto, host, suffix) + self.module = module + self.page = self.fetch_balancer_page() + + def fetch_balancer_page(self): + """ Returns the balancer management html page as a string for later parsing.""" + resp, info = fetch_url(self.module, self.url) + if info['status'] != 200: + raise ModuleHelperException("Could not get balancer page! HTTP status response: {0}".format(info['status'])) + + content = to_text(resp.read()) + apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1) + if not apache_version: + raise ModuleHelperException("Could not get the Apache server version from the balancer-manager") + + if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version): + raise ModuleHelperException("This module only acts on an Apache2 2.4+ instance, current Apache2 version: {0}".format(apache_version)) + return content + + def get_balancer_members(self): + """ Returns members of the balancer as a generator object for later iteration.""" + try: + soup = BeautifulSoup(self.page) + except TypeError as e: + raise ModuleHelperException("Cannot parse balancer page HTML! {0}".format(self.page)) from e + + elements = find_all(soup, 'a') + for element in elements[1::1]: + balancer_member_suffix = element.get('href') + if not balancer_member_suffix: + raise ModuleHelperException("Argument 'balancer_member_suffix' is empty!") + + yield BalancerMember(self.base_url + balancer_member_suffix, self.url, self.module) + + members = property(get_balancer_members) + + +class ApacheModProxy(ModuleHelper): + """ Initiates module.""" + module = dict( + argument_spec=dict( + balancer_vhost=dict(required=True, type='str'), + balancer_url_suffix=dict(default="/balancer-manager/", type='str'), + member_host=dict(type='str'), + state=dict(type='list', elements='str', choices=['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']), + tls=dict(default=False, type='bool'), + validate_certs=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + def __init_module__(self): + deps.validate(self.module) + + if len(self.vars.state or []) > 1 and ("present" in self.vars.state or "enabled" in self.vars.state): + self.do_raise(msg="states present/enabled are mutually exclusive with other states!") + + self.mybalancer = Balancer(self.module, self.vars.balancer_vhost, self.vars.balancer_url_suffix, tls=self.vars.tls) + + def __run__(self): + if self.vars.member_host is None: + self.vars.members = [member.as_dict() for member in self.mybalancer.members] + else: + member_exists = False + member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False} + for mode in member_status: + for state in self.vars.state or []: + if mode == state: + member_status[mode] = True + elif mode == 'disabled' and state == 'absent': + member_status[mode] = True + + for member in self.mybalancer.members: + if str(member.host) == self.vars.member_host: + member_exists = True + if self.vars.state is not None: + member_status_before = member.status + if not self.check_mode: + member_status_after = member.status = member_status + else: + member_status_after = member_status + self.changed |= (member_status_before != member_status_after) + self.vars.member = member.as_dict() + + if not member_exists: + self.do_raise(msg='{0} is not a member of the balancer {1}!'.format(self.vars.member_host, self.vars.balancer_vhost)) + + +def main(): + ApacheModProxy.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/apache2_module.py b/plugins/modules/apache2_module.py deleted file mode 120000 index 512bff4efa..0000000000 --- a/plugins/modules/apache2_module.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/apache2_module.py \ No newline at end of file diff --git a/plugins/modules/apache2_module.py b/plugins/modules/apache2_module.py new file mode 100644 index 0000000000..2421708262 --- /dev/null +++ b/plugins/modules/apache2_module.py @@ -0,0 +1,270 @@ +#!/usr/bin/python + +# Copyright (c) 2013-2014, Christian Berendt +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: apache2_module +author: + - Christian Berendt (@berendt) + - Ralf Hertel (@n0trax) + - Robin Roth (@robinro) +short_description: Enables/disables a module of the Apache2 webserver +description: + - Enables or disables a specified module of the Apache2 webserver. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - Name of the module to enable/disable as given to C(a2enmod)/C(a2dismod). + required: true + identifier: + type: str + description: + - Identifier of the module as listed by C(apache2ctl -M). This is optional and usually determined automatically by the + common convention of appending V(_module) to O(name) as well as custom exception for popular modules. + required: false + force: + description: + - Force disabling of default modules and override Debian warnings. + required: false + type: bool + default: false + state: + type: str + description: + - Desired state of the module. + choices: ['present', 'absent'] + default: present + ignore_configcheck: + description: + - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules. + type: bool + default: false + warn_mpm_absent: + description: + - Control the behavior of the warning process for MPM modules. + type: bool + default: true + version_added: 6.3.0 +requirements: ["a2enmod", "a2dismod"] +notes: + - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions. Whether it works + on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not. +""" + +EXAMPLES = r""" +- name: Enable the Apache2 module wsgi + community.general.apache2_module: + state: present + name: wsgi + +- name: Disables the Apache2 module wsgi + community.general.apache2_module: + state: absent + name: wsgi + +- name: Disable default modules for Debian + community.general.apache2_module: + state: absent + name: autoindex + force: true + +- name: Disable mpm_worker and ignore warnings about missing mpm module + community.general.apache2_module: + state: absent + name: mpm_worker + ignore_configcheck: true + +- name: Disable mpm_event, enable mpm_prefork and ignore warnings about missing mpm module + community.general.apache2_module: + name: "{{ item.module }}" + state: "{{ item.state }}" + warn_mpm_absent: false + ignore_configcheck: true + loop: + - module: mpm_event + state: absent + - module: mpm_prefork + state: present + +- name: Enable dump_io module, which is identified as dumpio_module inside apache2 + community.general.apache2_module: + state: present + name: dump_io + identifier: dumpio_module +""" + +RETURN = r""" +result: + description: Message about action taken. + returned: always + type: str +""" + +import re + +# import module snippets +from ansible.module_utils.basic import AnsibleModule + +_re_threaded = re.compile(r'threaded: *yes') + + +def _run_threaded(module): + control_binary = _get_ctl_binary(module) + result, stdout, stderr = module.run_command([control_binary, "-V"]) + + return bool(_re_threaded.search(stdout)) + + +def _get_ctl_binary(module): + for command in ['apache2ctl', 'apachectl']: + ctl_binary = module.get_bin_path(command) + if ctl_binary is not None: + return ctl_binary + + module.fail_json(msg="Neither of apache2ctl nor apachectl found. At least one apache control binary is necessary.") + + +def _module_is_enabled(module): + control_binary = _get_ctl_binary(module) + result, stdout, stderr = module.run_command([control_binary, "-M"]) + + if result != 0: + error_msg = "Error executing %s: %s" % (control_binary, stderr) + if module.params['ignore_configcheck']: + if 'AH00534' in stderr and 'mpm_' in module.params['name']: + if module.params['warn_mpm_absent']: + module.warn( + "No MPM module loaded! apache2 reload AND other module actions" + " will fail if no MPM module is loaded immediately." + ) + else: + module.warn(error_msg) + return False + else: + module.fail_json(msg=error_msg) + + searchstring = ' ' + module.params['identifier'] + return searchstring in stdout + + +def create_apache_identifier(name): + """ + By convention if a module is loaded via name, it appears in apache2ctl -M as + name_module. + + Some modules don't follow this convention and we use replacements for those.""" + + # a2enmod name replacement to apache2ctl -M names + text_workarounds = [ + ('shib', 'mod_shib'), + ('shib2', 'mod_shib'), + ('evasive', 'evasive20_module'), + ] + + # re expressions to extract subparts of names + re_workarounds = [ + ('php8', re.compile(r'^(php)[\d\.]+')), + ('php', re.compile(r'^(php\d)\.')), + ] + + for a2enmod_spelling, module_name in text_workarounds: + if a2enmod_spelling in name: + return module_name + + for search, reexpr in re_workarounds: + if search in name: + try: + rematch = reexpr.search(name) + return rematch.group(1) + '_module' + except AttributeError: + pass + + return name + '_module' + + +def _set_state(module, state): + name = module.params['name'] + force = module.params['force'] + + want_enabled = state == 'present' + state_string = {'present': 'enabled', 'absent': 'disabled'}[state] + a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state] + success_msg = "Module %s %s" % (name, state_string) + + if _module_is_enabled(module) != want_enabled: + if module.check_mode: + module.exit_json(changed=True, result=success_msg) + + a2mod_binary_path = module.get_bin_path(a2mod_binary) + if a2mod_binary_path is None: + module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary)) + + a2mod_binary_cmd = [a2mod_binary_path] + + if not want_enabled and force: + # force exists only for a2dismod on debian + a2mod_binary_cmd.append('-f') + + result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name]) + + if _module_is_enabled(module) == want_enabled: + module.exit_json(changed=True, result=success_msg) + else: + msg = ( + 'Failed to set module {name} to {state}:\n' + '{stdout}\n' + 'Maybe the module identifier ({identifier}) was guessed incorrectly.' + 'Consider setting the "identifier" option.' + ).format( + name=name, + state=state_string, + stdout=stdout, + identifier=module.params['identifier'] + ) + module.fail_json(msg=msg, + rc=result, + stdout=stdout, + stderr=stderr) + else: + module.exit_json(changed=False, result=success_msg) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + identifier=dict(type='str'), + force=dict(type='bool', default=False), + state=dict(default='present', choices=['absent', 'present']), + ignore_configcheck=dict(type='bool', default=False), + warn_mpm_absent=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + + name = module.params['name'] + if name == 'cgi' and module.params['state'] == 'present' and _run_threaded(module): + module.fail_json(msg="Your MPM seems to be threaded, therefore enabling cgi module is not allowed.") + + if not module.params['identifier']: + module.params['identifier'] = create_apache_identifier(module.params['name']) + + if module.params['state'] in ['present', 'absent']: + _set_state(module, module.params['state']) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/apk.py b/plugins/modules/apk.py deleted file mode 120000 index edf7717fbe..0000000000 --- a/plugins/modules/apk.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/apk.py \ No newline at end of file diff --git a/plugins/modules/apk.py b/plugins/modules/apk.py new file mode 100644 index 0000000000..d16635e3b4 --- /dev/null +++ b/plugins/modules/apk.py @@ -0,0 +1,382 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Kevin Brebanov +# Based on pacman (Afterburn , Aaron Bull Schaefer ) +# and apt (Matthew Williams ) modules. +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: apk +short_description: Manages apk packages +description: + - Manages C(apk) packages for Alpine Linux. +author: "Kevin Brebanov (@kbrebanov)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + available: + description: + - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead + of holding them) if the currently installed package is no longer available from any repository. + type: bool + default: false + name: + description: + - A package name, like V(foo), or multiple packages, like V(foo,bar). + - Do not include additional whitespace when specifying multiple packages as a string. Prefer YAML lists over comma-separating + multiple package names. + type: list + elements: str + no_cache: + description: + - Do not use any local cache path. + type: bool + default: false + version_added: 1.0.0 + repository: + description: + - A package repository or multiple repositories. Unlike with the underlying apk command, this list overrides the system + repositories rather than supplement them. + type: list + elements: str + state: + description: + - Indicates the desired package(s) state. + - V(present) ensures the package(s) is/are present. V(installed) can be used as an alias. + - V(absent) ensures the package(s) is/are absent. V(removed) can be used as an alias. + - V(latest) ensures the package(s) is/are present and the latest version(s). + default: present + choices: ["present", "absent", "latest", "installed", "removed"] + type: str + update_cache: + description: + - Update repository indexes. Can be run with other steps or on its own. + type: bool + default: false + upgrade: + description: + - Upgrade all installed packages to their latest version. + type: bool + default: false + world: + description: + - Use a custom world file when checking for explicitly installed packages. The file is used only when a value is provided + for O(name), and O(state) is set to V(present) or V(latest). + type: str + default: /etc/apk/world + version_added: 5.4.0 +notes: + - O(name) and O(upgrade) are mutually exclusive. + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. +""" + +EXAMPLES = r""" +- name: Update repositories and install foo package + community.general.apk: + name: foo + update_cache: true + +- name: Update repositories and install foo and bar packages + community.general.apk: + name: foo,bar + update_cache: true + +- name: Remove foo package + community.general.apk: + name: foo + state: absent + +- name: Remove foo and bar packages + community.general.apk: + name: foo,bar + state: absent + +- name: Install the package foo + community.general.apk: + name: foo + state: present + +- name: Install the packages foo and bar + community.general.apk: + name: foo,bar + state: present + +- name: Update repositories and update package foo to latest version + community.general.apk: + name: foo + state: latest + update_cache: true + +- name: Update repositories and update packages foo and bar to latest versions + community.general.apk: + name: foo,bar + state: latest + update_cache: true + +- name: Update all installed packages to the latest versions + community.general.apk: + upgrade: true + +- name: Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available + community.general.apk: + available: true + upgrade: true + +- name: Update repositories as a separate step + community.general.apk: + update_cache: true + +- name: Install package from a specific repository + community.general.apk: + name: foo + state: latest + update_cache: true + repository: http://dl-3.alpinelinux.org/alpine/edge/main + +- name: Install package without using cache + community.general.apk: + name: foo + state: latest + no_cache: true + +- name: Install package checking a custom world + community.general.apk: + name: foo + state: latest + world: /etc/apk/world.custom +""" + +RETURN = r""" +packages: + description: A list of packages that have been changed. + returned: when packages have changed + type: list + sample: ["package", "other-package"] +""" + +import re +# Import module snippets. +from ansible.module_utils.basic import AnsibleModule + + +def parse_for_packages(stdout): + packages = [] + data = stdout.split('\n') + regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)') + for l in data: + p = regex.search(l) + if p: + packages.append(p.group(1)) + return packages + + +def update_package_db(module, exit): + cmd = APK_PATH + ["update"] + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc != 0: + module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr) + elif exit: + module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr) + else: + return True + + +def query_toplevel(module, name, world): + # world contains a list of top-level packages separated by ' ' or \n + # packages may contain repository (@) or version (=<>~) separator characters or start with negation ! + regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$') + with open(world) as f: + content = f.read().split() + for p in content: + if regex.search(p): + return True + return False + + +def query_package(module, name): + cmd = APK_PATH + ["-v", "info", "--installed", name] + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc == 0: + return True + else: + return False + + +def query_latest(module, name): + cmd = APK_PATH + ["version", name] + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name)) + match = re.search(search_pattern, stdout) + if match and match.group(2) == "<": + return False + return True + + +def query_virtual(module, name): + cmd = APK_PATH + ["-v", "info", "--description", name] + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + search_pattern = r"^%s: virtual meta package" % (re.escape(name)) + if re.search(search_pattern, stdout): + return True + return False + + +def get_dependencies(module, name): + cmd = APK_PATH + ["-v", "info", "--depends", name] + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + dependencies = stdout.split() + if len(dependencies) > 1: + return dependencies[1:] + else: + return [] + + +def upgrade_packages(module, available): + if module.check_mode: + cmd = APK_PATH + ["upgrade", "--simulate"] + else: + cmd = APK_PATH + ["upgrade"] + if available: + cmd.append("--available") + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + packagelist = parse_for_packages(stdout) + if rc != 0: + module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist) + if re.search(r'^OK', stdout): + module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist) + module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist) + + +def install_packages(module, names, state, world): + upgrade = False + to_install = [] + to_upgrade = [] + for name in names: + # Check if virtual package + if query_virtual(module, name): + # Get virtual package dependencies + dependencies = get_dependencies(module, name) + for dependency in dependencies: + if state == 'latest' and not query_latest(module, dependency): + to_upgrade.append(dependency) + else: + if not query_toplevel(module, name, world): + to_install.append(name) + elif state == 'latest' and not query_latest(module, name): + to_upgrade.append(name) + if to_upgrade: + upgrade = True + if not to_install and not upgrade: + module.exit_json(changed=False, msg="package(s) already installed") + packages = to_install + to_upgrade + if upgrade: + if module.check_mode: + cmd = APK_PATH + ["add", "--upgrade", "--simulate"] + packages + else: + cmd = APK_PATH + ["add", "--upgrade"] + packages + else: + if module.check_mode: + cmd = APK_PATH + ["add", "--simulate"] + packages + else: + cmd = APK_PATH + ["add"] + packages + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + packagelist = parse_for_packages(stdout) + if rc != 0: + module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist) + module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist) + + +def remove_packages(module, names): + installed = [] + for name in names: + if query_package(module, name): + installed.append(name) + if not installed: + module.exit_json(changed=False, msg="package(s) already removed") + names = installed + if module.check_mode: + cmd = APK_PATH + ["del", "--purge", "--simulate"] + names + else: + cmd = APK_PATH + ["del", "--purge"] + names + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + packagelist = parse_for_packages(stdout) + # Check to see if packages are still present because of dependencies + for name in installed: + if query_package(module, name): + rc = 1 + break + if rc != 0: + module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist) + module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist) + +# ========================================== +# Main control flow. + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']), + name=dict(type='list', elements='str'), + no_cache=dict(default=False, type='bool'), + repository=dict(type='list', elements='str'), + update_cache=dict(default=False, type='bool'), + upgrade=dict(default=False, type='bool'), + available=dict(default=False, type='bool'), + world=dict(default='/etc/apk/world', type='str'), + ), + required_one_of=[['name', 'update_cache', 'upgrade']], + mutually_exclusive=[['name', 'upgrade']], + supports_check_mode=True + ) + + # Set LANG env since we parse stdout + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + global APK_PATH + APK_PATH = [module.get_bin_path('apk', required=True)] + + p = module.params + + if p['name'] and any(not name.strip() for name in p['name']): + module.fail_json(msg="Package name(s) cannot be empty or whitespace-only") + + if p['no_cache']: + APK_PATH.append("--no-cache") + + # add repositories to the APK_PATH + if p['repository']: + for r in p['repository']: + APK_PATH.extend(["--repository", r, "--repositories-file", "/dev/null"]) + + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + if p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + + if p['update_cache']: + update_package_db(module, not p['name'] and not p['upgrade']) + + if p['upgrade']: + upgrade_packages(module, p['available']) + + if p['state'] in ['present', 'latest']: + install_packages(module, p['name'], p['state'], p['world']) + elif p['state'] == 'absent': + remove_packages(module, p['name']) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/apt_repo.py b/plugins/modules/apt_repo.py deleted file mode 120000 index 28dbfb9c5f..0000000000 --- a/plugins/modules/apt_repo.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/apt_repo.py \ No newline at end of file diff --git a/plugins/modules/apt_repo.py b/plugins/modules/apt_repo.py new file mode 100644 index 0000000000..3d6da796b6 --- /dev/null +++ b/plugins/modules/apt_repo.py @@ -0,0 +1,151 @@ +#!/usr/bin/python + +# Copyright (c) 2018, Mikhail Gordeev + +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: apt_repo +short_description: Manage APT repositories using C(apt-repo) +description: + - Manages APT repositories using C(apt-repo) tool. + - See U(https://www.altlinux.org/Apt-repo) for details about C(apt-repo). +notes: + - This module works on ALT based distros. + - Does NOT support checkmode, due to a limitation in C(apt-repo) tool. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + repo: + description: + - Name of the repository to add or remove. + required: true + type: str + state: + description: + - Indicates the desired repository state. + choices: [absent, present] + default: present + type: str + remove_others: + description: + - Remove other then added repositories. + - Used if O(state=present). + type: bool + default: false + update: + description: + - Update the package database after changing repositories. + type: bool + default: false +author: + - Mikhail Gordeev (@obirvalger) +""" + +EXAMPLES = r""" +- name: Remove all repositories + community.general.apt_repo: + repo: all + state: absent + +- name: Add repository `Sisysphus` and remove other repositories + community.general.apt_repo: + repo: Sisysphus + state: present + remove_others: true + +- name: Add local repository `/space/ALT/Sisyphus` and update package cache + community.general.apt_repo: + repo: copy:///space/ALT/Sisyphus + state: present + update: true +""" + +RETURN = """ # """ + +import os + +from ansible.module_utils.basic import AnsibleModule + +APT_REPO_PATH = "/usr/bin/apt-repo" + + +def apt_repo(module, *args): + """run apt-repo with args and return its output""" + # make args list to use in concatenation + args = list(args) + rc, out, err = module.run_command([APT_REPO_PATH] + args) + + if rc != 0: + module.fail_json(msg="'%s' failed: %s" % (' '.join(['apt-repo'] + args), err)) + + return out + + +def add_repo(module, repo): + """add a repository""" + apt_repo(module, 'add', repo) + + +def rm_repo(module, repo): + """remove a repository""" + apt_repo(module, 'rm', repo) + + +def set_repo(module, repo): + """add a repository and remove other repositories""" + # first add to validate repository + apt_repo(module, 'add', repo) + apt_repo(module, 'rm', 'all') + apt_repo(module, 'add', repo) + + +def update(module): + """update package cache""" + apt_repo(module, 'update') + + +def main(): + module = AnsibleModule( + argument_spec=dict( + repo=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + remove_others=dict(type='bool', default=False), + update=dict(type='bool', default=False), + ), + ) + + if not os.path.exists(APT_REPO_PATH): + module.fail_json(msg='cannot find /usr/bin/apt-repo') + + params = module.params + repo = params['repo'] + state = params['state'] + old_repositories = apt_repo(module) + + if state == 'present': + if params['remove_others']: + set_repo(module, repo) + else: + add_repo(module, repo) + elif state == 'absent': + rm_repo(module, repo) + + if params['update']: + update(module) + + new_repositories = apt_repo(module) + changed = old_repositories != new_repositories + module.exit_json(changed=changed, repo=repo, state=state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/apt_rpm.py b/plugins/modules/apt_rpm.py deleted file mode 120000 index e4524e574c..0000000000 --- a/plugins/modules/apt_rpm.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/apt_rpm.py \ No newline at end of file diff --git a/plugins/modules/apt_rpm.py b/plugins/modules/apt_rpm.py new file mode 100644 index 0000000000..0c64385b1d --- /dev/null +++ b/plugins/modules/apt_rpm.py @@ -0,0 +1,345 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Evgenii Terechkov +# Written by Evgenii Terechkov +# Based on urpmi module written by Philippe Makowski + +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: apt_rpm +short_description: APT-RPM package manager +description: + - Manages packages with C(apt-rpm). Both low-level (C(rpm)) and high-level (C(apt-get)) package manager binaries required. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + package: + description: + - List of packages to install, upgrade, or remove. + - Since community.general 8.0.0, may include paths to local C(.rpm) files if O(state=installed) or O(state=present), + requires C(rpm) Python module. + aliases: [name, pkg] + type: list + elements: str + state: + description: + - Indicates the desired package state. + - The states V(latest) and V(present_not_latest) have been added in community.general 8.6.0. + - Please note before community.general 11.0.0, V(present) and V(installed) were equivalent to V(latest). This changed + in community.general 11.0.0. Now they are equivalent to V(present_not_latest). + choices: + - absent + - present + - present_not_latest + - installed + - removed + - latest + default: present + type: str + update_cache: + description: + - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as + a separate step. + - Default is not to update the cache. + type: bool + default: false + clean: + description: + - Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything + but the lock file from C(/var/cache/apt/archives/) and C(/var/cache/apt/archives/partial/). + - Can be run as part of the package installation (clean runs before install) or as a separate step. + type: bool + default: false + version_added: 6.5.0 + dist_upgrade: + description: + - If true performs an C(apt-get dist-upgrade) to upgrade system. + type: bool + default: false + version_added: 6.5.0 + update_kernel: + description: + - If true performs an C(update-kernel) to upgrade kernel packages. + type: bool + default: false + version_added: 6.5.0 +requirements: + - C(rpm) Python package (rpm bindings), optional. Required if O(package) option includes local files. +author: + - Evgenii Terechkov (@evgkrsk) +""" + +EXAMPLES = r""" +- name: Install package foo + community.general.apt_rpm: + pkg: foo + state: present + +- name: Install packages foo and bar + community.general.apt_rpm: + pkg: + - foo + - bar + state: present + +- name: Remove package foo + community.general.apt_rpm: + pkg: foo + state: absent + +- name: Remove packages foo and bar + community.general.apt_rpm: + pkg: foo,bar + state: absent + +# bar will be the updated if a newer version exists +- name: Update the package database and install bar + community.general.apt_rpm: + name: bar + state: present + update_cache: true + +- name: Run the equivalent of "apt-get clean" as a separate step + community.general.apt_rpm: + clean: true + +- name: Perform cache update and complete system upgrade (includes kernel) + community.general.apt_rpm: + update_cache: true + dist_upgrade: true + update_kernel: true +""" + +import os +import re +import traceback + +from ansible.module_utils.basic import ( + AnsibleModule, + missing_required_lib, +) +from ansible.module_utils.common.text.converters import to_native + +try: + import rpm +except ImportError: + HAS_RPM_PYTHON = False + RPM_PYTHON_IMPORT_ERROR = traceback.format_exc() +else: + HAS_RPM_PYTHON = True + RPM_PYTHON_IMPORT_ERROR = None + +APT_CACHE = "/usr/bin/apt-cache" +APT_PATH = "/usr/bin/apt-get" +RPM_PATH = "/usr/bin/rpm" +APT_GET_ZERO = "\n0 upgraded, 0 newly installed" +UPDATE_KERNEL_ZERO = "\nTry to install new kernel " + + +def local_rpm_package_name(path): + """return package name of a local rpm passed in. + Inspired by ansible.builtin.yum""" + + ts = rpm.TransactionSet() + ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) + fd = os.open(path, os.O_RDONLY) + try: + header = ts.hdrFromFdno(fd) + except rpm.error as e: + return None + finally: + os.close(fd) + + return to_native(header[rpm.RPMTAG_NAME]) + + +def query_package(module, name): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + rc, out, err = module.run_command([RPM_PATH, "-q", name]) + if rc == 0: + return True + else: + return False + + +def check_package_version(module, name): + # compare installed and candidate version + # if newest version already installed return True + # otherwise return False + + rc, out, err = module.run_command([APT_CACHE, "policy", name], environ_update={"LANG": "C"}) + installed = re.split("\n |: ", out)[2] + candidate = re.split("\n |: ", out)[4] + if installed >= candidate: + return True + return False + + +def query_package_provides(module, name, allow_upgrade=False): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + if name.endswith('.rpm'): + # Likely a local RPM file + if not HAS_RPM_PYTHON: + module.fail_json( + msg=missing_required_lib('rpm'), + exception=RPM_PYTHON_IMPORT_ERROR, + ) + + name = local_rpm_package_name(name) + + rc, out, err = module.run_command([RPM_PATH, "-q", "--provides", name]) + if rc == 0: + if not allow_upgrade: + return True + if check_package_version(module, name): + return True + return False + + +def update_package_db(module): + rc, update_out, err = module.run_command([APT_PATH, "update"], check_rc=True, environ_update={"LANG": "C"}) + return (False, update_out) + + +def dir_size(module, path): + total_size = 0 + for path, dirs, files in os.walk(path): + for f in files: + total_size += os.path.getsize(os.path.join(path, f)) + return total_size + + +def clean(module): + t = dir_size(module, "/var/cache/apt/archives") + rc, out, err = module.run_command([APT_PATH, "clean"], check_rc=True) + return (t != dir_size(module, "/var/cache/apt/archives"), out) + + +def dist_upgrade(module): + rc, out, err = module.run_command([APT_PATH, "-y", "dist-upgrade"], check_rc=True, environ_update={"LANG": "C"}) + return (APT_GET_ZERO not in out, out) + + +def update_kernel(module): + rc, out, err = module.run_command(["/usr/sbin/update-kernel", "-y"], check_rc=True, environ_update={"LANG": "C"}) + return (UPDATE_KERNEL_ZERO not in out, out) + + +def remove_packages(module, packages): + + if packages is None: + return (False, "Empty package list") + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, package): + continue + + rc, out, err = module.run_command([APT_PATH, "-y", "remove", package], environ_update={"LANG": "C"}) + + if rc != 0: + module.fail_json(msg="failed to remove %s: %s" % (package, err)) + + remove_c += 1 + + if remove_c > 0: + return (True, "removed %s package(s)" % remove_c) + + return (False, "package(s) already absent") + + +def install_packages(module, pkgspec, allow_upgrade=False): + + if pkgspec is None: + return (False, "Empty package list") + + packages = [] + for package in pkgspec: + if not query_package_provides(module, package, allow_upgrade=allow_upgrade): + packages.append(package) + + if packages: + command = [APT_PATH, "-y", "install"] + packages + rc, out, err = module.run_command(command, environ_update={"LANG": "C"}) + + installed = True + for package in pkgspec: + if not query_package_provides(module, package, allow_upgrade=False): + installed = False + + # apt-rpm always have 0 for exit code if --force is used + if rc or not installed: + module.fail_json(msg="'%s' failed: %s" % (" ".join(command), err)) + else: + return (True, "%s present(s)" % packages) + else: + return (False, "Nothing to install") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed', 'present_not_latest', 'latest']), + update_cache=dict(type='bool', default=False), + clean=dict(type='bool', default=False), + dist_upgrade=dict(type='bool', default=False), + update_kernel=dict(type='bool', default=False), + package=dict(type='list', elements='str', aliases=['name', 'pkg']), + ), + ) + + if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH): + module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm") + + p = module.params + + modified = False + output = "" + + if p['update_cache']: + update_package_db(module) + + if p['clean']: + (m, out) = clean(module) + modified = modified or m + + if p['dist_upgrade']: + (m, out) = dist_upgrade(module) + modified = modified or m + output += out + + if p['update_kernel']: + (m, out) = update_kernel(module) + modified = modified or m + output += out + + packages = p['package'] + if p['state'] in ['installed', 'present', 'present_not_latest', 'latest']: + (m, out) = install_packages(module, packages, allow_upgrade=p['state'] == 'latest') + modified = modified or m + output += out + + if p['state'] in ['absent', 'removed']: + (m, out) = remove_packages(module, packages) + modified = modified or m + output += out + + # Return total modification status and output of all commands + module.exit_json(changed=modified, msg=output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/archive.py b/plugins/modules/archive.py deleted file mode 120000 index 9b04727966..0000000000 --- a/plugins/modules/archive.py +++ /dev/null @@ -1 +0,0 @@ -./files/archive.py \ No newline at end of file diff --git a/plugins/modules/archive.py b/plugins/modules/archive.py new file mode 100644 index 0000000000..fe850391c8 --- /dev/null +++ b/plugins/modules/archive.py @@ -0,0 +1,644 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Ben Doherty +# Sponsored by Oomph, Inc. http://www.oomphinc.com +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: archive +short_description: Creates a compressed archive of one or more files or trees +extends_documentation_fragment: + - files + - community.general.attributes +description: + - Creates or extends an archive. + - The source and archive are on the target host, and the archive I(is not) copied to the controller host. + - Source files can be deleted after archival by specifying O(remove=True). +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + description: + - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive. + type: list + elements: path + required: true + format: + description: + - The type of compression to use. + type: str + choices: [bz2, gz, tar, xz, zip] + default: gz + dest: + description: + - The file name of the destination archive. The parent directory must exists on the remote host. + - This is required when O(path) refers to multiple files by either specifying a glob, a directory or multiple paths + in a list. + - If the destination archive already exists, it is truncated and overwritten. + type: path + exclude_path: + description: + - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from O(path) list and glob + expansion. + - Use O(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the O(path) list. + type: list + elements: path + default: [] + exclusion_patterns: + description: + - Glob style patterns to exclude files or directories from the resulting archive. + - This differs from O(exclude_path) which applies only to the source paths from O(path). + type: list + elements: path + version_added: 3.2.0 + force_archive: + description: + - Allows you to force the module to treat this as an archive even if only a single file is specified. + - By default when a single file is specified it is compressed only (not archived). + - Enable this if you want to use M(ansible.builtin.unarchive) on an archive of a single file created with this module. + type: bool + default: false + remove: + description: + - Remove any added source files and trees after adding to archive. + type: bool + default: false +notes: + - Can produce C(gzip), C(bzip2), C(lzma), and C(zip) compressed files or archives. + - This module uses C(tarfile), C(zipfile), C(gzip), C(bz2), and C(lzma) packages on the target host to create archives. These are + part of the Python standard library. +seealso: + - module: ansible.builtin.unarchive +author: + - Ben Doherty (@bendoh) +""" + +EXAMPLES = r""" +- name: Compress directory /path/to/foo/ into /path/to/foo.tgz + community.general.archive: + path: /path/to/foo + dest: /path/to/foo.tgz + +- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it + community.general.archive: + path: /path/to/foo + remove: true + +- name: Create a zip archive of /path/to/foo + community.general.archive: + path: /path/to/foo + format: zip + +- name: Create a bz2 archive of multiple files, rooted at /path + community.general.archive: + path: + - /path/to/foo + - /path/wong/foo + dest: /path/file.tar.bz2 + format: bz2 + +- name: Create a bz2 archive of a globbed path, while excluding specific dirnames + community.general.archive: + path: + - /path/to/foo/* + dest: /path/file.tar.bz2 + exclude_path: + - /path/to/foo/bar + - /path/to/foo/baz + format: bz2 + +- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames + community.general.archive: + path: + - /path/to/foo/* + dest: /path/file.tar.bz2 + exclude_path: + - /path/to/foo/ba* + format: bz2 + +- name: Use gzip to compress a single archive (i.e don't archive it first with tar) + community.general.archive: + path: /path/to/foo/single.file + dest: /path/file.gz + format: gz + +- name: Create a tar.gz archive of a single file. + community.general.archive: + path: /path/to/foo/single.file + dest: /path/file.tar.gz + format: gz + force_archive: true +""" + +RETURN = r""" +state: + description: The state of the input O(path). + type: str + returned: always +dest_state: + description: + - The state of the O(dest) file. + - V(absent) when the file does not exist. + - V(archive) when the file is an archive. + - V(compress) when the file is compressed, but not an archive. + - V(incomplete) when the file is an archive, but some files under O(path) were not found. + type: str + returned: success + version_added: 3.4.0 +missing: + description: Any files that were missing from the source. + type: list + returned: success +archived: + description: Any files that were compressed or added to the archive. + type: list + returned: success +arcroot: + description: The archive root. + type: str + returned: always +expanded_paths: + description: The list of matching paths from paths argument. + type: list + returned: always +expanded_exclude_paths: + description: The list of matching exclude paths from the exclude_path argument. + type: list + returned: always +""" + +import abc +import bz2 +import glob +import gzip +import io +import lzma +import os +import re +import shutil +import tarfile +import zipfile +from fnmatch import fnmatch +from traceback import format_exc +from zipfile import BadZipFile +from zlib import crc32 + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_native + + +STATE_ABSENT = 'absent' +STATE_ARCHIVED = 'archive' +STATE_COMPRESSED = 'compress' +STATE_INCOMPLETE = 'incomplete' + + +def common_path(paths): + empty = b'' if paths and isinstance(paths[0], bytes) else '' + + return os.path.join( + os.path.dirname(os.path.commonprefix([os.path.join(os.path.dirname(p), empty) for p in paths])), empty + ) + + +def expand_paths(paths): + expanded_path = [] + is_globby = False + for path in paths: + b_path = _to_bytes(path) + if b'*' in b_path or b'?' in b_path: + e_paths = glob.glob(b_path) + is_globby = True + else: + e_paths = [b_path] + expanded_path.extend(e_paths) + return expanded_path, is_globby + + +def matches_exclusion_patterns(path, exclusion_patterns): + return any(fnmatch(path, p) for p in exclusion_patterns) + + +def is_archive(path): + return re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(path), re.IGNORECASE) + + +def strip_prefix(prefix, string): + return string[len(prefix):] if string.startswith(prefix) else string + + +def _to_bytes(s): + return to_bytes(s, errors='surrogate_or_strict') + + +def _to_native(s): + return to_native(s, errors='surrogate_or_strict') + + +def _to_native_ascii(s): + return to_native(s, errors='surrogate_or_strict', encoding='ascii') + + +class Archive(object, metaclass=abc.ABCMeta): + def __init__(self, module): + self.module = module + + self.destination = _to_bytes(module.params['dest']) if module.params['dest'] else None + self.exclusion_patterns = module.params['exclusion_patterns'] or [] + self.format = module.params['format'] + self.must_archive = module.params['force_archive'] + self.remove = module.params['remove'] + + self.changed = False + self.destination_state = STATE_ABSENT + self.errors = [] + self.file = None + self.successes = [] + self.targets = [] + self.not_found = [] + + paths = module.params['path'] + self.expanded_paths, has_globs = expand_paths(paths) + self.expanded_exclude_paths = expand_paths(module.params['exclude_path'])[0] + + self.paths = sorted(set(self.expanded_paths) - set(self.expanded_exclude_paths)) + + if not self.paths: + module.fail_json( + path=', '.join(paths), + expanded_paths=_to_native(b', '.join(self.expanded_paths)), + expanded_exclude_paths=_to_native(b', '.join(self.expanded_exclude_paths)), + msg='Error, no source paths were found' + ) + + self.root = common_path(self.paths) + + if not self.must_archive: + self.must_archive = any([has_globs, os.path.isdir(self.paths[0]), len(self.paths) > 1]) + + if not self.destination and not self.must_archive: + self.destination = b'%s.%s' % (self.paths[0], _to_bytes(self.format)) + + if self.must_archive and not self.destination: + module.fail_json( + dest=_to_native(self.destination), + path=', '.join(paths), + msg='Error, must specify "dest" when archiving multiple files or trees' + ) + + if self.remove: + self._check_removal_safety() + + self.original_checksums = self.destination_checksums() + self.original_size = self.destination_size() + + def add(self, path, archive_name): + try: + self._add(_to_native_ascii(path), _to_native(archive_name)) + if self.contains(_to_native(archive_name)): + self.successes.append(path) + except Exception as e: + self.errors.append('%s: %s' % (_to_native_ascii(path), _to_native(e))) + + def add_single_target(self, path): + if self.format in ('zip', 'tar'): + self.open() + self.add(path, strip_prefix(self.root, path)) + self.close() + self.destination_state = STATE_ARCHIVED + else: + try: + f_out = self._open_compressed_file(_to_native_ascii(self.destination), 'wb') + with open(path, 'rb') as f_in: + shutil.copyfileobj(f_in, f_out) + f_out.close() + self.successes.append(path) + self.destination_state = STATE_COMPRESSED + except (IOError, OSError) as e: + self.module.fail_json( + path=_to_native(path), + dest=_to_native(self.destination), + msg='Unable to write to compressed file: %s' % _to_native(e), exception=format_exc() + ) + + def add_targets(self): + self.open() + try: + for target in self.targets: + if os.path.isdir(target): + for directory_path, directory_names, file_names in os.walk(target, topdown=True): + for directory_name in directory_names: + full_path = os.path.join(directory_path, directory_name) + self.add(full_path, strip_prefix(self.root, full_path)) + + for file_name in file_names: + full_path = os.path.join(directory_path, file_name) + self.add(full_path, strip_prefix(self.root, full_path)) + else: + self.add(target, strip_prefix(self.root, target)) + except Exception as e: + if self.format in ('zip', 'tar'): + archive_format = self.format + else: + archive_format = 'tar.' + self.format + self.module.fail_json( + msg='Error when writing %s archive at %s: %s' % ( + archive_format, _to_native(self.destination), _to_native(e) + ), + exception=format_exc() + ) + self.close() + + if self.errors: + self.module.fail_json( + msg='Errors when writing archive at %s: %s' % (_to_native(self.destination), '; '.join(self.errors)) + ) + + def is_different_from_original(self): + if self.original_checksums is None: + return self.original_size != self.destination_size() + else: + return self.original_checksums != self.destination_checksums() + + def destination_checksums(self): + if self.destination_exists() and self.destination_readable(): + return self._get_checksums(self.destination) + return None + + def destination_exists(self): + return self.destination and os.path.exists(self.destination) + + def destination_readable(self): + return self.destination and os.access(self.destination, os.R_OK) + + def destination_size(self): + return os.path.getsize(self.destination) if self.destination_exists() else 0 + + def find_targets(self): + for path in self.paths: + if not os.path.lexists(path): + self.not_found.append(path) + else: + self.targets.append(path) + + def has_targets(self): + return bool(self.targets) + + def has_unfound_targets(self): + return bool(self.not_found) + + def remove_single_target(self, path): + try: + os.remove(path) + except OSError as e: + self.module.fail_json( + path=_to_native(path), + msg='Unable to remove source file: %s' % _to_native(e), exception=format_exc() + ) + + def remove_targets(self): + for path in self.successes: + if os.path.exists(path): + try: + if os.path.isdir(path): + shutil.rmtree(path) + else: + os.remove(path) + except OSError: + self.errors.append(_to_native(path)) + for path in self.paths: + try: + if os.path.isdir(path): + shutil.rmtree(path) + except OSError: + self.errors.append(_to_native(path)) + + if self.errors: + self.module.fail_json( + dest=_to_native(self.destination), msg='Error deleting some source files: ', files=self.errors + ) + + def update_permissions(self): + file_args = self.module.load_file_common_arguments(self.module.params, path=self.destination) + self.changed = self.module.set_fs_attributes_if_different(file_args, self.changed) + + @property + def result(self): + return { + 'archived': [_to_native(p) for p in self.successes], + 'dest': _to_native(self.destination), + 'dest_state': self.destination_state, + 'changed': self.changed, + 'arcroot': _to_native(self.root), + 'missing': [_to_native(p) for p in self.not_found], + 'expanded_paths': [_to_native(p) for p in self.expanded_paths], + 'expanded_exclude_paths': [_to_native(p) for p in self.expanded_exclude_paths], + } + + def _check_removal_safety(self): + for path in self.paths: + if os.path.isdir(path) and self.destination.startswith(os.path.join(path, b'')): + self.module.fail_json( + path=b', '.join(self.paths), + msg='Error, created archive can not be contained in source paths when remove=true' + ) + + def _open_compressed_file(self, path, mode): + f = None + if self.format == 'gz': + f = gzip.open(path, mode) + elif self.format == 'bz2': + f = bz2.BZ2File(path, mode) + elif self.format == 'xz': + f = lzma.LZMAFile(path, mode) + else: + self.module.fail_json(msg="%s is not a valid format" % self.format) + + return f + + @abc.abstractmethod + def close(self): + pass + + @abc.abstractmethod + def contains(self, name): + pass + + @abc.abstractmethod + def open(self): + pass + + @abc.abstractmethod + def _add(self, path, archive_name): + pass + + @abc.abstractmethod + def _get_checksums(self, path): + pass + + +class ZipArchive(Archive): + def __init__(self, module): + super(ZipArchive, self).__init__(module) + + def close(self): + self.file.close() + + def contains(self, name): + try: + self.file.getinfo(name) + except KeyError: + return False + return True + + def open(self): + self.file = zipfile.ZipFile(_to_native_ascii(self.destination), 'w', zipfile.ZIP_DEFLATED, True) + + def _add(self, path, archive_name): + if not matches_exclusion_patterns(path, self.exclusion_patterns): + self.file.write(path, archive_name) + + def _get_checksums(self, path): + try: + archive = zipfile.ZipFile(_to_native_ascii(path), 'r') + checksums = set((info.filename, info.CRC) for info in archive.infolist()) + archive.close() + except BadZipFile: + checksums = set() + return checksums + + +class TarArchive(Archive): + def __init__(self, module): + super(TarArchive, self).__init__(module) + self.fileIO = None + + def close(self): + self.file.close() + if self.format == 'xz': + with lzma.open(_to_native(self.destination), 'wb') as f: + f.write(self.fileIO.getvalue()) + self.fileIO.close() + + def contains(self, name): + try: + self.file.getmember(name) + except KeyError: + return False + return True + + def open(self): + if self.format in ('gz', 'bz2'): + self.file = tarfile.open(_to_native_ascii(self.destination), 'w|' + self.format) + # python3 tarfile module allows xz format but for python2 we have to create the tarfile + # in memory and then compress it with lzma. + elif self.format == 'xz': + self.fileIO = io.BytesIO() + self.file = tarfile.open(fileobj=self.fileIO, mode='w') + elif self.format == 'tar': + self.file = tarfile.open(_to_native_ascii(self.destination), 'w') + else: + self.module.fail_json(msg="%s is not a valid archive format" % self.format) + + def _add(self, path, archive_name): + def filter(tarinfo): + return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo + + self.file.add(path, archive_name, recursive=False, filter=filter) + + def _get_checksums(self, path): + LZMAError = lzma.LZMAError + + try: + if self.format == 'xz': + with lzma.open(_to_native_ascii(path), 'r') as f: + archive = tarfile.open(fileobj=f) + checksums = set((info.name, info.chksum) for info in archive.getmembers()) + archive.close() + else: + archive = tarfile.open(_to_native_ascii(path), 'r|' + self.format) + checksums = set((info.name, info.chksum) for info in archive.getmembers()) + archive.close() + except (LZMAError, tarfile.ReadError, tarfile.CompressionError): + try: + # The python implementations of gzip, bz2, and lzma do not support restoring compressed files + # to their original names so only file checksum is returned + f = self._open_compressed_file(_to_native_ascii(path), 'r') + checksum = 0 + while True: + chunk = f.read(16 * 1024 * 1024) + if not chunk: + break + checksum = crc32(chunk, checksum) + checksums = set([(b'', checksum)]) + f.close() + except Exception: + checksums = set() + return checksums + + +def get_archive(module): + if module.params['format'] == 'zip': + return ZipArchive(module) + else: + return TarArchive(module) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='list', elements='path', required=True), + format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), + dest=dict(type='path'), + exclude_path=dict(type='list', elements='path', default=[]), + exclusion_patterns=dict(type='list', elements='path'), + force_archive=dict(type='bool', default=False), + remove=dict(type='bool', default=False), + ), + add_file_common_args=True, + supports_check_mode=True, + ) + + check_mode = module.check_mode + + archive = get_archive(module) + archive.find_targets() + + if not archive.has_targets(): + if archive.destination_exists(): + archive.destination_state = STATE_ARCHIVED if is_archive(archive.destination) else STATE_COMPRESSED + elif archive.has_targets() and archive.must_archive: + if check_mode: + archive.changed = True + else: + archive.add_targets() + archive.destination_state = STATE_INCOMPLETE if archive.has_unfound_targets() else STATE_ARCHIVED + archive.changed |= archive.is_different_from_original() + if archive.remove: + archive.remove_targets() + else: + if check_mode: + if not archive.destination_exists(): + archive.changed = True + else: + path = archive.paths[0] + archive.add_single_target(path) + archive.changed |= archive.is_different_from_original() + if archive.remove: + archive.remove_single_target(path) + + if archive.destination_exists(): + archive.update_permissions() + + module.exit_json(**archive.result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/atomic_container.py b/plugins/modules/atomic_container.py deleted file mode 120000 index a749104276..0000000000 --- a/plugins/modules/atomic_container.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/atomic/atomic_container.py \ No newline at end of file diff --git a/plugins/modules/atomic_container.py b/plugins/modules/atomic_container.py new file mode 100644 index 0000000000..9051705f12 --- /dev/null +++ b/plugins/modules/atomic_container.py @@ -0,0 +1,216 @@ +#!/usr/bin/python + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: atomic_container +short_description: Manage the containers on the atomic host platform +description: + - Manage the containers on the atomic host platform. + - Allows to manage the lifecycle of a container on the atomic host platform. +deprecated: + removed_in: 13.0.0 + why: Project Atomic was sunset by the end of 2019. + alternative: There is none. +author: "Giuseppe Scrivano (@giuseppe)" +requirements: + - atomic +notes: + - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + backend: + description: + - Define the backend to use for the container. + required: true + choices: ["docker", "ostree"] + type: str + name: + description: + - Name of the container. + required: true + type: str + image: + description: + - The image to use to install the container. + required: true + type: str + rootfs: + description: + - Define the rootfs of the image. + type: str + state: + description: + - State of the container. + choices: ["absent", "latest", "present", "rollback"] + default: "latest" + type: str + mode: + description: + - Define if it is an user or a system container. + choices: ["user", "system"] + type: str + values: + description: + - Values for the installation of the container. + - This option is permitted only with mode 'user' or 'system'. + - The values specified here will be used at installation time as --set arguments for atomic install. + type: list + elements: str + default: [] +""" + +EXAMPLES = r""" +- name: Install the etcd system container + community.general.atomic_container: + name: etcd + image: rhel/etcd + backend: ostree + state: latest + mode: system + values: + - ETCD_NAME=etcd.server + +- name: Uninstall the etcd system container + community.general.atomic_container: + name: etcd + image: rhel/etcd + backend: ostree + state: absent + mode: system +""" + +RETURN = r""" +msg: + description: The command standard output. + returned: always + type: str + sample: 'Using default tag: latest ...' +""" + +# import module snippets +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def do_install(module, mode, rootfs, container, image, values_list, backend): + system_list = ["--system"] if mode == 'system' else [] + user_list = ["--user"] if mode == 'user' else [] + rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else [] + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + else: + changed = "Extracting" in out or "Copying blob" in out + module.exit_json(msg=out, changed=changed) + + +def do_update(module, container, image, values_list): + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'containers', 'update', "--rebase=%s" % image] + values_list + [container] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + else: + changed = "Extracting" in out or "Copying blob" in out + module.exit_json(msg=out, changed=changed) + + +def do_uninstall(module, name, backend): + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'uninstall', "--storage=%s" % backend, name] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + module.exit_json(msg=out, changed=True) + + +def do_rollback(module, name): + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'containers', 'rollback', name] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + else: + changed = "Rolling back" in out + module.exit_json(msg=out, changed=changed) + + +def core(module): + mode = module.params['mode'] + name = module.params['name'] + image = module.params['image'] + rootfs = module.params['rootfs'] + values = module.params['values'] + backend = module.params['backend'] + state = module.params['state'] + + atomic_bin = module.get_bin_path('atomic') + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + + values_list = ["--set=%s" % x for x in values] if values else [] + + args = [atomic_bin, 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + return + present = name in out + + if state == 'present' and present: + module.exit_json(msg=out, changed=False) + elif (state in ['latest', 'present']) and not present: + do_install(module, mode, rootfs, name, image, values_list, backend) + elif state == 'latest': + do_update(module, name, image, values_list) + elif state == 'absent': + if not present: + module.exit_json(msg="The container is not present", changed=False) + else: + do_uninstall(module, name, backend) + elif state == 'rollback': + do_rollback(module, name) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + mode=dict(choices=['user', 'system']), + name=dict(required=True), + image=dict(required=True), + rootfs=dict(), + state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']), + backend=dict(required=True, choices=['docker', 'ostree']), + values=dict(type='list', default=[], elements='str'), + ), + ) + + if module.params['values'] is not None and module.params['mode'] == 'default': + module.fail_json(msg="values is supported only with user or system mode") + + # Verify that the platform supports atomic command + dummy = module.get_bin_path('atomic', required=True) + + try: + core(module) + except Exception as e: + module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/atomic_host.py b/plugins/modules/atomic_host.py deleted file mode 120000 index 589f0d8412..0000000000 --- a/plugins/modules/atomic_host.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/atomic/atomic_host.py \ No newline at end of file diff --git a/plugins/modules/atomic_host.py b/plugins/modules/atomic_host.py new file mode 100644 index 0000000000..470e65c919 --- /dev/null +++ b/plugins/modules/atomic_host.py @@ -0,0 +1,106 @@ +#!/usr/bin/python + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: atomic_host +short_description: Manage the atomic host platform +description: + - Manage the atomic host platform. + - Rebooting of Atomic host platform should be done outside this module. +deprecated: + removed_in: 13.0.0 + why: Project Atomic was sunset by the end of 2019. + alternative: There is none. +author: + - Saravanan KR (@krsacme) +notes: + - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file). + - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS. +requirements: + - atomic +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + revision: + description: + - The version number of the atomic host to be deployed. + - Providing V(latest) will upgrade to the latest available version. + default: 'latest' + aliases: [version] + type: str +""" + +EXAMPLES = r""" +- name: Upgrade the atomic host platform to the latest version (atomic host upgrade) + community.general.atomic_host: + revision: latest + +- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130) + community.general.atomic_host: + revision: 23.130 +""" + +RETURN = r""" +msg: + description: The command standard output. + returned: always + type: str + sample: 'Already on latest' +""" +import os +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def core(module): + revision = module.params['revision'] + atomic_bin = module.get_bin_path('atomic', required=True) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + + if revision == 'latest': + args = [atomic_bin, 'host', 'upgrade'] + else: + args = [atomic_bin, 'host', 'deploy', revision] + + rc, out, err = module.run_command(args, check_rc=False) + + if rc == 77 and revision == 'latest': + module.exit_json(msg="Already on latest", changed=False) + elif rc != 0: + module.fail_json(rc=rc, msg=err) + else: + module.exit_json(msg=out, changed=True) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + revision=dict(type='str', default='latest', aliases=["version"]), + ), + ) + + # Verify that the platform is atomic host + if not os.path.exists("/run/ostree-booted"): + module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only") + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/atomic_image.py b/plugins/modules/atomic_image.py deleted file mode 120000 index 854414b070..0000000000 --- a/plugins/modules/atomic_image.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/atomic/atomic_image.py \ No newline at end of file diff --git a/plugins/modules/atomic_image.py b/plugins/modules/atomic_image.py new file mode 100644 index 0000000000..0c3025b75f --- /dev/null +++ b/plugins/modules/atomic_image.py @@ -0,0 +1,178 @@ +#!/usr/bin/python + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: atomic_image +short_description: Manage the container images on the atomic host platform +description: + - Manage the container images on the atomic host platform. + - Allows to execute the commands specified by the RUN label in the container image when present. +deprecated: + removed_in: 13.0.0 + why: Project Atomic was sunset by the end of 2019. + alternative: There is none. +author: + - Saravanan KR (@krsacme) +notes: + - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS. +requirements: + - atomic +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + backend: + description: + - Define the backend where the image is pulled. + choices: ['docker', 'ostree'] + type: str + name: + description: + - Name of the container image. + required: true + type: str + state: + description: + - The state of the container image. + - The state V(latest) will ensure container image is upgraded to the latest version and forcefully restart container, + if running. + choices: ['absent', 'latest', 'present'] + default: 'latest' + type: str + started: + description: + - Start or stop the container. + type: bool + default: true +""" + +EXAMPLES = r""" +- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog) + community.general.atomic_image: + name: rhel7/rsyslog + state: latest + +- name: Pull busybox to the OSTree backend + community.general.atomic_image: + name: busybox + state: latest + backend: ostree +""" + +RETURN = r""" +msg: + description: The command standard output. + returned: always + type: str + sample: 'Using default tag: latest ...' +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def do_upgrade(module, image): + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'update', '--force', image] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: # something went wrong emit the msg + module.fail_json(rc=rc, msg=err) + elif 'Image is up to date' in out: + return False + + return True + + +def core(module): + image = module.params['name'] + state = module.params['state'] + started = module.params['started'] + backend = module.params['backend'] + is_upgraded = False + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + atomic_bin = module.get_bin_path('atomic') + out = {} + err = {} + rc = 0 + + if backend: + if state == 'present' or state == 'latest': + args = [atomic_bin, 'pull', "--storage=%s" % backend, image] + rc, out, err = module.run_command(args, check_rc=False) + if rc < 0: + module.fail_json(rc=rc, msg=err) + else: + out_run = "" + if started: + args = [atomic_bin, 'run', "--storage=%s" % backend, image] + rc, out_run, err = module.run_command(args, check_rc=False) + if rc < 0: + module.fail_json(rc=rc, msg=err) + + changed = "Extracting" in out or "Copying blob" in out + module.exit_json(msg=(out + out_run), changed=changed) + elif state == 'absent': + args = [atomic_bin, 'images', 'delete', "--storage=%s" % backend, image] + rc, out, err = module.run_command(args, check_rc=False) + if rc < 0: + module.fail_json(rc=rc, msg=err) + else: + changed = "Unable to find" not in out + module.exit_json(msg=out, changed=changed) + return + + if state == 'present' or state == 'latest': + if state == 'latest': + is_upgraded = do_upgrade(module, image) + + if started: + args = [atomic_bin, 'run', image] + else: + args = [atomic_bin, 'install', image] + elif state == 'absent': + args = [atomic_bin, 'uninstall', image] + + rc, out, err = module.run_command(args, check_rc=False) + + if rc < 0: + module.fail_json(rc=rc, msg=err) + elif rc == 1 and 'already present' in err: + module.exit_json(restult=err, changed=is_upgraded) + elif started and 'Container is running' in out: + module.exit_json(result=out, changed=is_upgraded) + else: + module.exit_json(msg=out, changed=True) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + backend=dict(type='str', choices=['docker', 'ostree']), + name=dict(type='str', required=True), + state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']), + started=dict(type='bool', default=True), + ), + ) + + # Verify that the platform supports atomic command + dummy = module.get_bin_path('atomic', required=True) + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/awall.py b/plugins/modules/awall.py deleted file mode 120000 index ddcfc26883..0000000000 --- a/plugins/modules/awall.py +++ /dev/null @@ -1 +0,0 @@ -./system/awall.py \ No newline at end of file diff --git a/plugins/modules/awall.py b/plugins/modules/awall.py new file mode 100644 index 0000000000..37e1e87a1c --- /dev/null +++ b/plugins/modules/awall.py @@ -0,0 +1,161 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Ted Trask +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: awall +short_description: Manage awall policies +author: Ted Trask (@tdtrask) +description: + - This modules allows for enable/disable/activate of C(awall) policies. + - Alpine Wall (C(awall)) generates a firewall configuration from the enabled policy files and activates the configuration + on the system. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - One or more policy names. + type: list + elements: str + state: + description: + - Whether the policies should be enabled or disabled. + type: str + choices: [disabled, enabled] + default: enabled + activate: + description: + - Activate the new firewall rules. + - Can be run with other steps or on its own. + - Idempotency is affected if O(activate=true), as the module always reports a changed state. + type: bool + default: false +notes: + - At least one of O(name) and O(activate) is required. +""" + +EXAMPLES = r""" +- name: Enable "foo" and "bar" policy + community.general.awall: + name: [foo bar] + state: enabled + +- name: Disable "foo" and "bar" policy and activate new rules + community.general.awall: + name: + - foo + - bar + state: disabled + activate: false + +- name: Activate currently enabled firewall rules + community.general.awall: + activate: true +""" + +RETURN = """ # """ + +import re +from ansible.module_utils.basic import AnsibleModule + + +def activate(module): + cmd = "%s activate --force" % (AWALL_PATH) + rc, stdout, stderr = module.run_command(cmd) + if rc == 0: + return True + else: + module.fail_json(msg="could not activate new rules", stdout=stdout, stderr=stderr) + + +def is_policy_enabled(module, name): + cmd = "%s list" % (AWALL_PATH) + rc, stdout, stderr = module.run_command(cmd) + if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE): + return True + return False + + +def enable_policy(module, names, act): + policies = [] + for name in names: + if not is_policy_enabled(module, name): + policies.append(name) + if not policies: + module.exit_json(changed=False, msg="policy(ies) already enabled") + names = " ".join(policies) + if module.check_mode: + cmd = "%s list" % (AWALL_PATH) + else: + cmd = "%s enable %s" % (AWALL_PATH, names) + rc, stdout, stderr = module.run_command(cmd) + if rc != 0: + module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr) + if act and not module.check_mode: + activate(module) + module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names) + + +def disable_policy(module, names, act): + policies = [] + for name in names: + if is_policy_enabled(module, name): + policies.append(name) + if not policies: + module.exit_json(changed=False, msg="policy(ies) already disabled") + names = " ".join(policies) + if module.check_mode: + cmd = "%s list" % (AWALL_PATH) + else: + cmd = "%s disable %s" % (AWALL_PATH, names) + rc, stdout, stderr = module.run_command(cmd) + if rc != 0: + module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr) + if act and not module.check_mode: + activate(module) + module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='enabled', choices=['disabled', 'enabled']), + name=dict(type='list', elements='str'), + activate=dict(type='bool', default=False), + ), + required_one_of=[['name', 'activate']], + supports_check_mode=True, + ) + + global AWALL_PATH + AWALL_PATH = module.get_bin_path('awall', required=True) + + p = module.params + + if p['name']: + if p['state'] == 'enabled': + enable_policy(module, p['name'], p['activate']) + elif p['state'] == 'disabled': + disable_policy(module, p['name'], p['activate']) + + if p['activate']: + if not module.check_mode: + activate(module) + module.exit_json(changed=True, msg="activated awall rules") + + module.fail_json(msg="no action defined") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/beadm.py b/plugins/modules/beadm.py deleted file mode 120000 index ced6e463b7..0000000000 --- a/plugins/modules/beadm.py +++ /dev/null @@ -1 +0,0 @@ -./system/beadm.py \ No newline at end of file diff --git a/plugins/modules/beadm.py b/plugins/modules/beadm.py new file mode 100644 index 0000000000..f285616ca7 --- /dev/null +++ b/plugins/modules/beadm.py @@ -0,0 +1,410 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: beadm +short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems +description: + - Create, delete or activate ZFS boot environments. + - Mount and unmount ZFS boot environments. +author: Adam Števko (@xen0l) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - ZFS boot environment name. + type: str + required: true + aliases: ["be"] + snapshot: + description: + - If specified, the new boot environment is cloned from the given snapshot or inactive boot environment. + type: str + description: + description: + - Associate a description with a new boot environment. This option is available only on Solarish platforms. + type: str + options: + description: + - Create the datasets for new BE with specific ZFS properties. + - Multiple options can be specified. + - This option is available only on Solarish platforms. + type: str + mountpoint: + description: + - Path where to mount the ZFS boot environment. + type: path + state: + description: + - Create or delete ZFS boot environment. + type: str + choices: [absent, activated, mounted, present, unmounted] + default: present + force: + description: + - Specifies if the unmount should be forced. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Create ZFS boot environment + community.general.beadm: + name: upgrade-be + state: present + +- name: Create ZFS boot environment from existing inactive boot environment + community.general.beadm: + name: upgrade-be + snapshot: be@old + state: present + +- name: Create ZFS boot environment with compression enabled and description "upgrade" + community.general.beadm: + name: upgrade-be + options: "compression=on" + description: upgrade + state: present + +- name: Delete ZFS boot environment + community.general.beadm: + name: old-be + state: absent + +- name: Mount ZFS boot environment on /tmp/be + community.general.beadm: + name: BE + mountpoint: /tmp/be + state: mounted + +- name: Unmount ZFS boot environment + community.general.beadm: + name: BE + state: unmounted + +- name: Activate ZFS boot environment + community.general.beadm: + name: upgrade-be + state: activated +""" + +RETURN = r""" +name: + description: BE name. + returned: always + type: str + sample: pre-upgrade +snapshot: + description: ZFS snapshot to create BE from. + returned: always + type: str + sample: rpool/ROOT/oi-hipster@fresh +description: + description: BE description. + returned: always + type: str + sample: Upgrade from 9.0 to 10.0 +options: + description: BE additional options. + returned: always + type: str + sample: compression=on +mountpoint: + description: BE mountpoint. + returned: always + type: str + sample: /mnt/be +state: + description: State of the target. + returned: always + type: str + sample: present +force: + description: If forced action is wanted. + returned: always + type: bool + sample: false +""" + +import os +from ansible.module_utils.basic import AnsibleModule + + +class BE(object): + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.snapshot = module.params['snapshot'] + self.description = module.params['description'] + self.options = module.params['options'] + self.mountpoint = module.params['mountpoint'] + self.state = module.params['state'] + self.force = module.params['force'] + self.is_freebsd = os.uname()[0] == 'FreeBSD' + + def _beadm_list(self): + cmd = [self.module.get_bin_path('beadm'), 'list', '-H'] + if '@' in self.name: + cmd.append('-s') + return self.module.run_command(cmd) + + def _find_be_by_name(self, out): + if '@' in self.name: + for line in out.splitlines(): + if self.is_freebsd: + check = line.split() + if check == []: + continue + full_name = check[0].split('/') + if full_name == []: + continue + check[0] = full_name[len(full_name) - 1] + if check[0] == self.name: + return check + else: + check = line.split(';') + if check[0] == self.name: + return check + else: + for line in out.splitlines(): + if self.is_freebsd: + check = line.split() + if check[0] == self.name: + return check + else: + check = line.split(';') + if check[0] == self.name: + return check + return None + + def exists(self): + (rc, out, dummy) = self._beadm_list() + + if rc == 0: + if self._find_be_by_name(out): + return True + else: + return False + else: + return False + + def is_activated(self): + (rc, out, dummy) = self._beadm_list() + + if rc == 0: + line = self._find_be_by_name(out) + if line is None: + return False + if self.is_freebsd: + if 'R' in line[1]: + return True + else: + if 'R' in line[2]: + return True + + return False + + def activate_be(self): + cmd = [self.module.get_bin_path('beadm'), 'activate', self.name] + return self.module.run_command(cmd) + + def create_be(self): + cmd = [self.module.get_bin_path('beadm'), 'create'] + + if self.snapshot: + cmd.extend(['-e', self.snapshot]) + if not self.is_freebsd: + if self.description: + cmd.extend(['-d', self.description]) + if self.options: + cmd.extend(['-o', self.options]) + + cmd.append(self.name) + + return self.module.run_command(cmd) + + def destroy_be(self): + cmd = [self.module.get_bin_path('beadm'), 'destroy', '-F', self.name] + return self.module.run_command(cmd) + + def is_mounted(self): + (rc, out, dummy) = self._beadm_list() + + if rc == 0: + line = self._find_be_by_name(out) + if line is None: + return False + if self.is_freebsd: + # On FreeBSD, we exclude currently mounted BE on /, as it is + # special and can be activated even if it is mounted. That is not + # possible with non-root BEs. + if line[2] != '-' and line[2] != '/': + return True + else: + if line[3]: + return True + + return False + + def mount_be(self): + cmd = [self.module.get_bin_path('beadm'), 'mount', self.name] + + if self.mountpoint: + cmd.append(self.mountpoint) + + return self.module.run_command(cmd) + + def unmount_be(self): + cmd = [self.module.get_bin_path('beadm'), 'unmount'] + if self.force: + cmd.append('-f') + cmd.append(self.name) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True, aliases=['be']), + snapshot=dict(type='str'), + description=dict(type='str'), + options=dict(type='str'), + mountpoint=dict(type='path'), + state=dict(type='str', default='present', choices=['absent', 'activated', 'mounted', 'present', 'unmounted']), + force=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + be = BE(module) + + rc = None + out = '' + err = '' + result = {} + result['name'] = be.name + result['state'] = be.state + + if be.snapshot: + result['snapshot'] = be.snapshot + + if be.description: + result['description'] = be.description + + if be.options: + result['options'] = be.options + + if be.mountpoint: + result['mountpoint'] = be.mountpoint + + if be.state == 'absent': + # beadm on FreeBSD and Solarish systems differs in delete behaviour in + # that we are not allowed to delete activated BE on FreeBSD while on + # Solarish systems we cannot delete BE if it is mounted. We add mount + # check for both platforms as BE should be explicitly unmounted before + # being deleted. On FreeBSD, we also check if the BE is activated. + if be.exists(): + if not be.is_mounted(): + if module.check_mode: + module.exit_json(changed=True) + + if be.is_freebsd: + if be.is_activated(): + module.fail_json(msg='Unable to remove active BE!') + + (rc, out, err) = be.destroy_be() + + if rc != 0: + module.fail_json(msg='Error while destroying BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + else: + module.fail_json(msg='Unable to remove BE as it is mounted!') + + elif be.state == 'present': + if not be.exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = be.create_be() + + if rc != 0: + module.fail_json(msg='Error while creating BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + + elif be.state == 'activated': + if not be.is_activated(): + if module.check_mode: + module.exit_json(changed=True) + + # On FreeBSD, beadm is unable to activate mounted BEs, so we add + # an explicit check for that case. + if be.is_freebsd: + if be.is_mounted(): + module.fail_json(msg='Unable to activate mounted BE!') + + (rc, out, err) = be.activate_be() + + if rc != 0: + module.fail_json(msg='Error while activating BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + elif be.state == 'mounted': + if not be.is_mounted(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = be.mount_be() + + if rc != 0: + module.fail_json(msg='Error while mounting BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + + elif be.state == 'unmounted': + if be.is_mounted(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = be.unmount_be() + + if rc != 0: + module.fail_json(msg='Error while unmounting BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/bearychat.py b/plugins/modules/bearychat.py deleted file mode 120000 index b2fd0c1c0c..0000000000 --- a/plugins/modules/bearychat.py +++ /dev/null @@ -1 +0,0 @@ -./notification/bearychat.py \ No newline at end of file diff --git a/plugins/modules/bigpanda.py b/plugins/modules/bigpanda.py deleted file mode 120000 index 2bdcf5e34e..0000000000 --- a/plugins/modules/bigpanda.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/bigpanda.py \ No newline at end of file diff --git a/plugins/modules/bigpanda.py b/plugins/modules/bigpanda.py new file mode 100644 index 0000000000..1bdd79d548 --- /dev/null +++ b/plugins/modules/bigpanda.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: bigpanda +author: "Hagai Kariti (@hkariti)" +short_description: Notify BigPanda about deployments +description: + - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters + for future module calls. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + component: + type: str + description: + - 'The name of the component being deployed. Ex: V(billing).' + required: true + aliases: ['name'] + version: + type: str + description: + - The deployment version. + required: true + token: + type: str + description: + - API token. + required: true + state: + type: str + description: + - State of the deployment. + required: true + choices: ['started', 'finished', 'failed'] + hosts: + type: str + description: + - Name of affected host name. Can be a list. + - If not specified, it defaults to the remote system's hostname. + required: false + aliases: ['host'] + env: + type: str + description: + - The environment name, typically V(production), V(staging), and so on. + required: false + owner: + type: str + description: + - The person responsible for the deployment. + required: false + description: + type: str + description: + - Free text description of the deployment. + required: false + url: + type: str + description: + - Base URL of the API server. + required: false + default: "https://api.bigpanda.io" + validate_certs: + description: + - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled + sites using self-signed certificates. + required: false + default: true + type: bool + deployment_message: + type: str + description: + - Message about the deployment. + version_added: '0.2.0' + source_system: + type: str + description: + - Source system used in the requests to the API. + default: ansible + +# informational: requirements for nodes +requirements: [] +""" + +EXAMPLES = r""" +- name: Notify BigPanda about a deployment + community.general.bigpanda: + component: myapp + version: '1.3' + token: '{{ bigpanda_token }}' + state: started + +- name: Notify BigPanda about a deployment + community.general.bigpanda: + component: myapp + version: '1.3' + token: '{{ bigpanda_token }}' + state: finished + +# If outside servers aren't reachable from your machine, use delegate_to and override hosts: +- name: Notify BigPanda about a deployment + community.general.bigpanda: + component: myapp + version: '1.3' + token: '{{ bigpanda_token }}' + hosts: '{{ ansible_hostname }}' + state: started + delegate_to: localhost + register: deployment + +- name: Notify BigPanda about a deployment + community.general.bigpanda: + component: '{{ deployment.component }}' + version: '{{ deployment.version }}' + token: '{{ deployment.token }}' + state: finished + delegate_to: localhost +""" + +# =========================================== +# Module execution. +# +import json +import socket +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.urls import fetch_url + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + component=dict(required=True, aliases=['name']), + version=dict(required=True), + token=dict(required=True, no_log=True), + state=dict(required=True, choices=['started', 'finished', 'failed']), + hosts=dict(aliases=['host']), + env=dict(), + owner=dict(), + description=dict(), + deployment_message=dict(), + source_system=dict(default='ansible'), + validate_certs=dict(default=True, type='bool'), + url=dict(default='https://api.bigpanda.io'), + ), + supports_check_mode=True, + ) + + token = module.params['token'] + state = module.params['state'] + url = module.params['url'] + + # Build the common request body + body = dict() + for k in ('component', 'version', 'hosts'): + v = module.params[k] + if v is not None: + body[k] = v + if body.get('hosts') is None: + body['hosts'] = [socket.gethostname()] + + if not isinstance(body['hosts'], list): + body['hosts'] = [body['hosts']] + + # Insert state-specific attributes to body + if state == 'started': + for k in ('source_system', 'env', 'owner', 'description'): + v = module.params[k] + if v is not None: + body[k] = v + + request_url = url + '/data/events/deployments/start' + else: + message = module.params['deployment_message'] + if message is not None: + body['errorMessage'] = message + + if state == 'finished': + body['status'] = 'success' + else: + body['status'] = 'failure' + + request_url = url + '/data/events/deployments/end' + + # Build the deployment object we return + deployment = dict(token=token, url=url) + deployment.update(body) + if 'errorMessage' in deployment: + message = deployment.pop('errorMessage') + deployment['message'] = message + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True, **deployment) + + # Send the data to bigpanda + data = json.dumps(body) + headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} + try: + response, info = fetch_url(module, request_url, data=data, headers=headers) + if info['status'] == 200: + module.exit_json(changed=True, **deployment) + else: + module.fail_json(msg=json.dumps(info)) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/bitbucket_access_key.py b/plugins/modules/bitbucket_access_key.py deleted file mode 120000 index e5e45258a3..0000000000 --- a/plugins/modules/bitbucket_access_key.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/bitbucket/bitbucket_access_key.py \ No newline at end of file diff --git a/plugins/modules/bitbucket_access_key.py b/plugins/modules/bitbucket_access_key.py new file mode 100644 index 0000000000..2b2bf9b8c5 --- /dev/null +++ b/plugins/modules/bitbucket_access_key.py @@ -0,0 +1,278 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: bitbucket_access_key +short_description: Manages Bitbucket repository access keys +description: + - Manages Bitbucket repository access keys (also called deploy keys). +author: + - Evgeniy Krysanov (@catcombo) +extends_documentation_fragment: + - community.general.bitbucket + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + repository: + description: + - The repository name. + type: str + required: true + workspace: + description: + - The repository owner. + - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user). + type: str + required: true + key: + description: + - The SSH public key. + type: str + label: + description: + - The key label. + type: str + required: true + state: + description: + - Indicates desired state of the access key. + type: str + required: true + choices: [absent, present] +notes: + - Bitbucket OAuth consumer or App password should have permissions to read and administrate account repositories. + - Check mode is supported. +""" + +EXAMPLES = r""" +- name: Create access key + community.general.bitbucket_access_key: + repository: 'bitbucket-repo' + workspace: bitbucket_workspace + key: '{{lookup("file", "bitbucket.pub") }}' + label: 'Bitbucket' + state: present + +- name: Delete access key + community.general.bitbucket_access_key: + repository: bitbucket-repo + workspace: bitbucket_workspace + label: Bitbucket + state: absent +""" + +RETURN = r""" # """ + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper + +error_messages = { + 'required_key': '`key` is required when the `state` is `present`', + 'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository', + 'invalid_workspace_or_repo': 'Invalid `repository` or `workspace`', + 'invalid_key': 'Invalid SSH key or key is already in use', +} + +BITBUCKET_API_ENDPOINTS = { + 'deploy-key-list': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL, + 'deploy-key-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL, +} + + +def get_existing_deploy_key(module, bitbucket): + """ + Search for an existing deploy key on Bitbucket + with the label specified in module param `label` + + :param module: instance of the :class:`AnsibleModule` + :param bitbucket: instance of the :class:`BitbucketHelper` + :return: existing deploy key or None if not found + :rtype: dict or None + + Return example:: + + { + "id": 123, + "label": "mykey", + "created_on": "2019-03-23T10:15:21.517377+00:00", + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", + "type": "deploy_key", + "comment": "", + "last_used": None, + "repository": { + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/mleu/test" + }, + "html": { + "href": "https://bitbucket.org/mleu/test" + }, + "avatar": { + "href": "..." + } + }, + "type": "repository", + "name": "test", + "full_name": "mleu/test", + "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}" + }, + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123" + } + }, + } + """ + content = { + 'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ) + } + + # Look through the all response pages in search of deploy key we need + while 'next' in content: + info, content = bitbucket.request( + api_url=content['next'], + method='GET', + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_workspace_or_repo']) + + if info['status'] == 403: + module.fail_json(msg=error_messages['required_permission']) + + if info['status'] != 200: + module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info)) + + res = next((v for v in content['values'] if v['label'] == module.params['label']), None) + + if res is not None: + return res + + return None + + +def create_deploy_key(module, bitbucket): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ), + method='POST', + data={ + 'key': module.params['key'], + 'label': module.params['label'], + }, + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_workspace_or_repo']) + + if info['status'] == 403: + module.fail_json(msg=error_messages['required_permission']) + + if info['status'] == 400: + module.fail_json(msg=error_messages['invalid_key']) + + if info['status'] != 200: + module.fail_json(msg='Failed to create deploy key `{label}`: {info}'.format( + label=module.params['label'], + info=info, + )) + + +def delete_deploy_key(module, bitbucket, key_id): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + key_id=key_id, + ), + method='DELETE', + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_workspace_or_repo']) + + if info['status'] == 403: + module.fail_json(msg=error_messages['required_permission']) + + if info['status'] != 204: + module.fail_json(msg='Failed to delete deploy key `{label}`: {info}'.format( + label=module.params['label'], + info=info, + )) + + +def main(): + argument_spec = BitbucketHelper.bitbucket_argument_spec() + argument_spec.update( + repository=dict(type='str', required=True), + workspace=dict( + type='str', required=True, + ), + key=dict(type='str', no_log=False), + label=dict(type='str', required=True), + state=dict(type='str', choices=['present', 'absent'], required=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=BitbucketHelper.bitbucket_required_one_of(), + required_together=BitbucketHelper.bitbucket_required_together(), + ) + + bitbucket = BitbucketHelper(module) + + key = module.params['key'] + state = module.params['state'] + + # Check parameters + if (key is None) and (state == 'present'): + module.fail_json(msg=error_messages['required_key']) + + # Retrieve access token for authorized API requests + bitbucket.fetch_access_token() + + # Retrieve existing deploy key (if any) + existing_deploy_key = get_existing_deploy_key(module, bitbucket) + changed = False + + # Create new deploy key in case it doesn't exists + if not existing_deploy_key and (state == 'present'): + if not module.check_mode: + create_deploy_key(module, bitbucket) + changed = True + + # Update deploy key if the old value does not match the new one + elif existing_deploy_key and (state == 'present'): + if not key.startswith(existing_deploy_key.get('key')): + if not module.check_mode: + # Bitbucket doesn't support update key for the same label, + # so we need to delete the old one first + delete_deploy_key(module, bitbucket, existing_deploy_key['id']) + create_deploy_key(module, bitbucket) + changed = True + + # Delete deploy key + elif existing_deploy_key and (state == 'absent'): + if not module.check_mode: + delete_deploy_key(module, bitbucket, existing_deploy_key['id']) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/bitbucket_pipeline_key_pair.py b/plugins/modules/bitbucket_pipeline_key_pair.py deleted file mode 120000 index 06801844cd..0000000000 --- a/plugins/modules/bitbucket_pipeline_key_pair.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/bitbucket/bitbucket_pipeline_key_pair.py \ No newline at end of file diff --git a/plugins/modules/bitbucket_pipeline_key_pair.py b/plugins/modules/bitbucket_pipeline_key_pair.py new file mode 100644 index 0000000000..28d837c914 --- /dev/null +++ b/plugins/modules/bitbucket_pipeline_key_pair.py @@ -0,0 +1,204 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: bitbucket_pipeline_key_pair +short_description: Manages Bitbucket pipeline SSH key pair +description: + - Manages Bitbucket pipeline SSH key pair. +author: + - Evgeniy Krysanov (@catcombo) +extends_documentation_fragment: + - community.general.bitbucket + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + repository: + description: + - The repository name. + type: str + required: true + workspace: + description: + - The repository owner. + - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user). + type: str + required: true + public_key: + description: + - The public key. + type: str + private_key: + description: + - The private key. + type: str + state: + description: + - Indicates desired state of the key pair. + type: str + required: true + choices: [absent, present] +notes: + - Check mode is supported. +""" + +EXAMPLES = r""" +- name: Create or update SSH key pair + community.general.bitbucket_pipeline_key_pair: + repository: 'bitbucket-repo' + workspace: bitbucket_workspace + public_key: '{{lookup("file", "bitbucket.pub") }}' + private_key: '{{lookup("file", "bitbucket") }}' + state: present + +- name: Remove SSH key pair + community.general.bitbucket_pipeline_key_pair: + repository: bitbucket-repo + workspace: bitbucket_workspace + state: absent +""" + +RETURN = r""" # """ + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper + +error_messages = { + 'invalid_params': 'Account, repository or SSH key pair was not found', + 'required_keys': '`public_key` and `private_key` are required when the `state` is `present`', +} + +BITBUCKET_API_ENDPOINTS = { + 'ssh-key-pair': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL, +} + + +def get_existing_ssh_key_pair(module, bitbucket): + """ + Retrieves an existing ssh key pair from repository + specified in module param `repository` + + :param module: instance of the :class:`AnsibleModule` + :param bitbucket: instance of the :class:`BitbucketHelper` + :return: existing key pair or None if not found + :rtype: dict or None + + Return example:: + + { + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...2E8HAeT", + "type": "pipeline_ssh_key_pair" + } + """ + api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ) + + info, content = bitbucket.request( + api_url=api_url, + method='GET', + ) + + if info['status'] == 404: + # Account, repository or SSH key pair was not found. + return None + + return content + + +def update_ssh_key_pair(module, bitbucket): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ), + method='PUT', + data={ + 'private_key': module.params['private_key'], + 'public_key': module.params['public_key'], + }, + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_params']) + + if info['status'] != 200: + module.fail_json(msg='Failed to create or update pipeline ssh key pair : {0}'.format(info)) + + +def delete_ssh_key_pair(module, bitbucket): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ), + method='DELETE', + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_params']) + + if info['status'] != 204: + module.fail_json(msg='Failed to delete pipeline ssh key pair: {0}'.format(info)) + + +def main(): + argument_spec = BitbucketHelper.bitbucket_argument_spec() + argument_spec.update( + repository=dict(type='str', required=True), + workspace=dict(type='str', required=True), + public_key=dict(type='str'), + private_key=dict(type='str', no_log=True), + state=dict(type='str', choices=['present', 'absent'], required=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=BitbucketHelper.bitbucket_required_one_of(), + required_together=BitbucketHelper.bitbucket_required_together(), + ) + + bitbucket = BitbucketHelper(module) + + state = module.params['state'] + public_key = module.params['public_key'] + private_key = module.params['private_key'] + + # Check parameters + if ((public_key is None) or (private_key is None)) and (state == 'present'): + module.fail_json(msg=error_messages['required_keys']) + + # Retrieve access token for authorized API requests + bitbucket.fetch_access_token() + + # Retrieve existing ssh key + key_pair = get_existing_ssh_key_pair(module, bitbucket) + changed = False + + # Create or update key pair + if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'): + if not module.check_mode: + update_ssh_key_pair(module, bitbucket) + changed = True + + # Delete key pair + elif key_pair and (state == 'absent'): + if not module.check_mode: + delete_ssh_key_pair(module, bitbucket) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/bitbucket_pipeline_known_host.py b/plugins/modules/bitbucket_pipeline_known_host.py deleted file mode 120000 index 012dc76e85..0000000000 --- a/plugins/modules/bitbucket_pipeline_known_host.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/bitbucket/bitbucket_pipeline_known_host.py \ No newline at end of file diff --git a/plugins/modules/bitbucket_pipeline_known_host.py b/plugins/modules/bitbucket_pipeline_known_host.py new file mode 100644 index 0000000000..fb382c8afb --- /dev/null +++ b/plugins/modules/bitbucket_pipeline_known_host.py @@ -0,0 +1,301 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: bitbucket_pipeline_known_host +short_description: Manages Bitbucket pipeline known hosts +description: + - Manages Bitbucket pipeline known hosts under the "SSH Keys" menu. + - The host fingerprint is retrieved automatically, but in case of an error, one can use O(key) field to specify it manually. +author: + - Evgeniy Krysanov (@catcombo) +extends_documentation_fragment: + - community.general.bitbucket + - community.general.attributes +requirements: + - paramiko +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + repository: + description: + - The repository name. + type: str + required: true + workspace: + description: + - The repository owner. + - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user). + type: str + required: true + name: + description: + - The FQDN of the known host. + type: str + required: true + key: + description: + - The public key. + type: str + state: + description: + - Indicates desired state of the record. + type: str + required: true + choices: [absent, present] +notes: + - Check mode is supported. +""" + +EXAMPLES = r""" +- name: Create known hosts from the list + community.general.bitbucket_pipeline_known_host: + repository: 'bitbucket-repo' + workspace: bitbucket_workspace + name: '{{ item }}' + state: present + with_items: + - bitbucket.org + - example.com + +- name: Remove known host + community.general.bitbucket_pipeline_known_host: + repository: bitbucket-repo + workspace: bitbucket_workspace + name: bitbucket.org + state: absent + +- name: Specify public key file + community.general.bitbucket_pipeline_known_host: + repository: bitbucket-repo + workspace: bitbucket_workspace + name: bitbucket.org + key: '{{lookup("file", "bitbucket.pub") }}' + state: absent +""" + +RETURN = r""" # """ + +import socket + +try: + import paramiko + HAS_PARAMIKO = True +except ImportError: + HAS_PARAMIKO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper + +error_messages = { + 'invalid_params': 'Account or repository was not found', + 'unknown_key_type': 'Public key type is unknown', +} + +BITBUCKET_API_ENDPOINTS = { + 'known-host-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/' % BitbucketHelper.BITBUCKET_API_URL, + 'known-host-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/{known_host_uuid}' % BitbucketHelper.BITBUCKET_API_URL, +} + + +def get_existing_known_host(module, bitbucket): + """ + Search for a host in Bitbucket pipelines known hosts + with the name specified in module param `name` + + :param module: instance of the :class:`AnsibleModule` + :param bitbucket: instance of the :class:`BitbucketHelper` + :return: existing host or None if not found + :rtype: dict or None + + Return example:: + + { + 'type': 'pipeline_known_host', + 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}' + 'hostname': 'bitbucket.org', + 'public_key': { + 'type': 'pipeline_ssh_public_key', + 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40', + 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A', + 'key_type': 'ssh-rsa', + 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw==' + }, + } + """ + content = { + 'next': BITBUCKET_API_ENDPOINTS['known-host-list'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ) + } + + # Look through all response pages in search of hostname we need + while 'next' in content: + info, content = bitbucket.request( + api_url=content['next'], + method='GET', + ) + + if info['status'] == 404: + module.fail_json(msg='Invalid `repository` or `workspace`.') + + if info['status'] != 200: + module.fail_json(msg='Failed to retrieve list of known hosts: {0}'.format(info)) + + host = next((v for v in content['values'] if v['hostname'] == module.params['name']), None) + + if host is not None: + return host + + return None + + +def get_host_key(module, hostname): + """ + Fetches public key for specified host + + :param module: instance of the :class:`AnsibleModule` + :param hostname: host name + :return: key type and key content + :rtype: tuple + + Return example:: + + ( + 'ssh-rsa', + 'AAAAB3NzaC1yc2EAAAABIwAAA...SBne8+seeFVBoGqzHM9yXw==', + ) + """ + try: + sock = socket.socket() + sock.connect((hostname, 22)) + except socket.error: + module.fail_json(msg='Error opening socket to {0}'.format(hostname)) + + try: + trans = paramiko.transport.Transport(sock) + trans.start_client() + host_key = trans.get_remote_server_key() + except paramiko.SSHException: + module.fail_json(msg='SSH error on retrieving {0} server key'.format(hostname)) + + trans.close() + sock.close() + + key_type = host_key.get_name() + key = host_key.get_base64() + + return key_type, key + + +def create_known_host(module, bitbucket): + hostname = module.params['name'] + key_param = module.params['key'] + + if key_param is None: + key_type, key = get_host_key(module, hostname) + elif ' ' in key_param: + key_type, key = key_param.split(' ', 1) + else: + module.fail_json(msg=error_messages['unknown_key_type']) + + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['known-host-list'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ), + method='POST', + data={ + 'hostname': hostname, + 'public_key': { + 'key_type': key_type, + 'key': key, + } + }, + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_params']) + + if info['status'] != 201: + module.fail_json(msg='Failed to create known host `{hostname}`: {info}'.format( + hostname=module.params['hostname'], + info=info, + )) + + +def delete_known_host(module, bitbucket, known_host_uuid): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['known-host-detail'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + known_host_uuid=known_host_uuid, + ), + method='DELETE', + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_params']) + + if info['status'] != 204: + module.fail_json(msg='Failed to delete known host `{hostname}`: {info}'.format( + hostname=module.params['name'], + info=info, + )) + + +def main(): + argument_spec = BitbucketHelper.bitbucket_argument_spec() + argument_spec.update( + repository=dict(type='str', required=True), + workspace=dict(type='str', required=True), + name=dict(type='str', required=True), + key=dict(type='str', no_log=False), + state=dict(type='str', choices=['present', 'absent'], required=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=BitbucketHelper.bitbucket_required_one_of(), + required_together=BitbucketHelper.bitbucket_required_together(), + ) + + if (module.params['key'] is None) and (not HAS_PARAMIKO): + module.fail_json(msg='`paramiko` package not found, please install it.') + + bitbucket = BitbucketHelper(module) + + # Retrieve access token for authorized API requests + bitbucket.fetch_access_token() + + # Retrieve existing known host + existing_host = get_existing_known_host(module, bitbucket) + state = module.params['state'] + changed = False + + # Create new host in case it doesn't exists + if not existing_host and (state == 'present'): + if not module.check_mode: + create_known_host(module, bitbucket) + changed = True + + # Delete host + elif existing_host and (state == 'absent'): + if not module.check_mode: + delete_known_host(module, bitbucket, existing_host['uuid']) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/bitbucket_pipeline_variable.py b/plugins/modules/bitbucket_pipeline_variable.py deleted file mode 120000 index 70402de8f0..0000000000 --- a/plugins/modules/bitbucket_pipeline_variable.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/bitbucket/bitbucket_pipeline_variable.py \ No newline at end of file diff --git a/plugins/modules/bitbucket_pipeline_variable.py b/plugins/modules/bitbucket_pipeline_variable.py new file mode 100644 index 0000000000..ea43beba55 --- /dev/null +++ b/plugins/modules/bitbucket_pipeline_variable.py @@ -0,0 +1,273 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: bitbucket_pipeline_variable +short_description: Manages Bitbucket pipeline variables +description: + - Manages Bitbucket pipeline variables. +author: + - Evgeniy Krysanov (@catcombo) +extends_documentation_fragment: + - community.general.bitbucket + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + repository: + description: + - The repository name. + type: str + required: true + workspace: + description: + - The repository owner. + - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user). + type: str + required: true + name: + description: + - The pipeline variable name. + type: str + required: true + value: + description: + - The pipeline variable value. + type: str + secured: + description: + - Whether to encrypt the variable value. + type: bool + default: false + state: + description: + - Indicates desired state of the variable. + type: str + required: true + choices: [absent, present] +notes: + - Check mode is supported. + - For secured values return parameter C(changed) is always V(true). +""" + +EXAMPLES = r""" +- name: Create or update pipeline variables from the list + community.general.bitbucket_pipeline_variable: + repository: 'bitbucket-repo' + workspace: bitbucket_workspace + name: '{{ item.name }}' + value: '{{ item.value }}' + secured: '{{ item.secured }}' + state: present + with_items: + - {name: AWS_ACCESS_KEY, value: ABCD1234, secured: false} + - {name: AWS_SECRET, value: qwe789poi123vbn0, secured: true} + +- name: Remove pipeline variable + community.general.bitbucket_pipeline_variable: + repository: bitbucket-repo + workspace: bitbucket_workspace + name: AWS_ACCESS_KEY + state: absent +""" + +RETURN = r""" # """ + +from ansible.module_utils.basic import AnsibleModule, _load_params +from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper + +error_messages = { + 'required_value': '`value` is required when the `state` is `present`', +} + +BITBUCKET_API_ENDPOINTS = { + 'pipeline-variable-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/' % BitbucketHelper.BITBUCKET_API_URL, + 'pipeline-variable-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/{variable_uuid}' % BitbucketHelper.BITBUCKET_API_URL, +} + + +def get_existing_pipeline_variable(module, bitbucket): + """ + Search for a pipeline variable + + :param module: instance of the :class:`AnsibleModule` + :param bitbucket: instance of the :class:`BitbucketHelper` + :return: existing variable or None if not found + :rtype: dict or None + + Return example:: + + { + 'name': 'AWS_ACCESS_OBKEY_ID', + 'value': 'x7HU80-a2', + 'type': 'pipeline_variable', + 'secured': False, + 'uuid': '{9ddb0507-439a-495a-99f3-5464f15128127}' + } + + The `value` key in dict is absent in case of secured variable. + """ + variables_base_url = BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ) + # Look through the all response pages in search of variable we need + page = 1 + while True: + next_url = "%s?page=%s" % (variables_base_url, page) + info, content = bitbucket.request( + api_url=next_url, + method='GET', + ) + + if info['status'] == 404: + module.fail_json(msg='Invalid `repository` or `workspace`.') + + if info['status'] != 200: + module.fail_json(msg='Failed to retrieve the list of pipeline variables: {0}'.format(info)) + + # We are at the end of list + if 'pagelen' in content and content['pagelen'] == 0: + return None + + page += 1 + var = next((v for v in content['values'] if v['key'] == module.params['name']), None) + + if var is not None: + var['name'] = var.pop('key') + return var + + +def create_pipeline_variable(module, bitbucket): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ), + method='POST', + data={ + 'key': module.params['name'], + 'value': module.params['value'], + 'secured': module.params['secured'], + }, + ) + + if info['status'] != 201: + module.fail_json(msg='Failed to create pipeline variable `{name}`: {info}'.format( + name=module.params['name'], + info=info, + )) + + +def update_pipeline_variable(module, bitbucket, variable_uuid): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + variable_uuid=variable_uuid, + ), + method='PUT', + data={ + 'value': module.params['value'], + 'secured': module.params['secured'], + }, + ) + + if info['status'] != 200: + module.fail_json(msg='Failed to update pipeline variable `{name}`: {info}'.format( + name=module.params['name'], + info=info, + )) + + +def delete_pipeline_variable(module, bitbucket, variable_uuid): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + variable_uuid=variable_uuid, + ), + method='DELETE', + ) + + if info['status'] != 204: + module.fail_json(msg='Failed to delete pipeline variable `{name}`: {info}'.format( + name=module.params['name'], + info=info, + )) + + +class BitBucketPipelineVariable(AnsibleModule): + def __init__(self, *args, **kwargs): + params = _load_params() or {} + if params.get('secured'): + kwargs['argument_spec']['value'].update({'no_log': True}) + super(BitBucketPipelineVariable, self).__init__(*args, **kwargs) + + +def main(): + argument_spec = BitbucketHelper.bitbucket_argument_spec() + argument_spec.update( + repository=dict(type='str', required=True), + workspace=dict(type='str', required=True), + name=dict(type='str', required=True), + value=dict(type='str'), + secured=dict(type='bool', default=False), + state=dict(type='str', choices=['present', 'absent'], required=True), + ) + module = BitBucketPipelineVariable( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=BitbucketHelper.bitbucket_required_one_of(), + required_together=BitbucketHelper.bitbucket_required_together(), + ) + + bitbucket = BitbucketHelper(module) + + value = module.params['value'] + state = module.params['state'] + secured = module.params['secured'] + + # Check parameters + if (value is None) and (state == 'present'): + module.fail_json(msg=error_messages['required_value']) + + # Retrieve access token for authorized API requests + bitbucket.fetch_access_token() + + # Retrieve existing pipeline variable (if any) + existing_variable = get_existing_pipeline_variable(module, bitbucket) + changed = False + + # Create new variable in case it doesn't exists + if not existing_variable and (state == 'present'): + if not module.check_mode: + create_pipeline_variable(module, bitbucket) + changed = True + + # Update variable if it is secured or the old value does not match the new one + elif existing_variable and (state == 'present'): + if (existing_variable['secured'] != secured) or (existing_variable.get('value') != value): + if not module.check_mode: + update_pipeline_variable(module, bitbucket, existing_variable['uuid']) + changed = True + + # Delete variable + elif existing_variable and (state == 'absent'): + if not module.check_mode: + delete_pipeline_variable(module, bitbucket, existing_variable['uuid']) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/bootc_manage.py b/plugins/modules/bootc_manage.py new file mode 100644 index 0000000000..d854f866bf --- /dev/null +++ b/plugins/modules/bootc_manage.py @@ -0,0 +1,92 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Ryan Cook +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt +# or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: bootc_manage +version_added: 9.3.0 +author: + - Ryan Cook (@cooktheryan) +short_description: Bootc Switch and Upgrade +description: + - This module manages the switching and upgrading of C(bootc). +options: + state: + description: + - Control whether to apply the latest image or switch the image. + - B(Note:) This does not reboot the system. + - Please use M(ansible.builtin.reboot) to reboot the system. + required: true + type: str + choices: ['switch', 'latest'] + image: + description: + - The image to switch to. + - This is required when O(state=switch). + required: false + type: str +""" + +EXAMPLES = r""" +# Switch to a different image +- name: Provide image to switch to a different image and retain the current running image + community.general.bootc_manage: + state: switch + image: "example.com/image:latest" + +# Apply updates of the current running image +- name: Apply updates of the current running image + community.general.bootc_manage: + state: latest +""" + +RETURN = r""" +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.locale import get_best_parsable_locale + + +def main(): + argument_spec = dict( + state=dict(type='str', required=True, choices=['switch', 'latest']), + image=dict(type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'switch', ['image']), + ], + ) + + state = module.params['state'] + image = module.params['image'] + + if state == 'switch': + command = ['bootc', 'switch', image, '--retain'] + elif state == 'latest': + command = ['bootc', 'upgrade'] + + locale = get_best_parsable_locale(module) + module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale) + rc, stdout, err = module.run_command(command, check_rc=True) + + if 'Queued for next boot: ' in stdout: + result = {'changed': True, 'stdout': stdout} + module.exit_json(**result) + elif 'No changes in ' in stdout or 'Image specification is unchanged.' in stdout: + result = {'changed': False, 'stdout': stdout} + module.exit_json(**result) + else: + result = {'changed': False, 'stderr': err} + module.fail_json(msg='ERROR: Command execution failed.', **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/bower.py b/plugins/modules/bower.py deleted file mode 120000 index aeb413aa9a..0000000000 --- a/plugins/modules/bower.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/language/bower.py \ No newline at end of file diff --git a/plugins/modules/bower.py b/plugins/modules/bower.py new file mode 100644 index 0000000000..fd4e2c4920 --- /dev/null +++ b/plugins/modules/bower.py @@ -0,0 +1,234 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Michael Warkentin +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: bower +short_description: Manage bower packages with C(bower) +description: + - Manage bower packages with C(bower). +author: "Michael Warkentin (@mwarkentin)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + type: str + description: + - The name of a bower package to install. + offline: + description: + - Install packages from local cache, if the packages were installed before. + type: bool + default: false + production: + description: + - Install with C(--production) flag. + type: bool + default: false + path: + type: path + description: + - The base path where to install the bower packages. + required: true + relative_execpath: + type: path + description: + - Relative path to bower executable from install path. + state: + type: str + description: + - The state of the bower package. + default: present + choices: ["present", "absent", "latest"] + version: + type: str + description: + - The version to be installed. +""" + +EXAMPLES = r""" +- name: Install "bootstrap" bower package. + community.general.bower: + name: bootstrap + +- name: Install "bootstrap" bower package on version 3.1.1. + community.general.bower: + name: bootstrap + version: '3.1.1' + +- name: Remove the "bootstrap" bower package. + community.general.bower: + name: bootstrap + state: absent + +- name: Install packages based on bower.json. + community.general.bower: + path: /app/location + +- name: Update packages based on bower.json to their latest version. + community.general.bower: + path: /app/location + state: latest + +# install bower locally and run from there +- npm: + path: /app/location + name: bower + global: false +- community.general.bower: + path: /app/location + relative_execpath: node_modules/.bin +""" + +import json +import os + +from ansible.module_utils.basic import AnsibleModule + + +class Bower(object): + def __init__(self, module, **kwargs): + self.module = module + self.name = kwargs['name'] + self.offline = kwargs['offline'] + self.production = kwargs['production'] + self.path = kwargs['path'] + self.relative_execpath = kwargs['relative_execpath'] + self.version = kwargs['version'] + + if kwargs['version']: + self.name_version = self.name + '#' + self.version + else: + self.name_version = self.name + + def _exec(self, args, run_in_check_mode=False, check_rc=True): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = [] + + if self.relative_execpath: + cmd.append(os.path.join(self.path, self.relative_execpath, "bower")) + if not os.path.isfile(cmd[-1]): + self.module.fail_json(msg="bower not found at relative path %s" % self.relative_execpath) + else: + cmd.append("bower") + + cmd.extend(args) + cmd.extend(['--config.interactive=false', '--allow-root']) + + if self.name: + cmd.append(self.name_version) + + if self.offline: + cmd.append('--offline') + + if self.production: + cmd.append('--production') + + # If path is specified, cd into that path and run the command. + cwd = None + if self.path: + if not os.path.exists(self.path): + os.makedirs(self.path) + if not os.path.isdir(self.path): + self.module.fail_json(msg="path %s is not a directory" % self.path) + cwd = self.path + + rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) + return out + return '' + + def list(self): + cmd = ['list', '--json'] + + installed = list() + missing = list() + outdated = list() + data = json.loads(self._exec(cmd, True, False)) + if 'dependencies' in data: + for dep in data['dependencies']: + dep_data = data['dependencies'][dep] + if dep_data.get('missing', False): + missing.append(dep) + elif ('version' in dep_data['pkgMeta'] and + 'update' in dep_data and + dep_data['pkgMeta']['version'] != dep_data['update']['latest']): + outdated.append(dep) + elif dep_data.get('incompatible', False): + outdated.append(dep) + else: + installed.append(dep) + # Named dependency not installed + else: + missing.append(self.name) + + return installed, missing, outdated + + def install(self): + return self._exec(['install']) + + def update(self): + return self._exec(['update']) + + def uninstall(self): + return self._exec(['uninstall']) + + +def main(): + arg_spec = dict( + name=dict(), + offline=dict(default=False, type='bool'), + production=dict(default=False, type='bool'), + path=dict(required=True, type='path'), + relative_execpath=dict(type='path'), + state=dict(default='present', choices=['present', 'absent', 'latest', ]), + version=dict(), + ) + module = AnsibleModule( + argument_spec=arg_spec + ) + + name = module.params['name'] + offline = module.params['offline'] + production = module.params['production'] + path = module.params['path'] + relative_execpath = module.params['relative_execpath'] + state = module.params['state'] + version = module.params['version'] + + if state == 'absent' and not name: + module.fail_json(msg='uninstalling a package is only available for named packages') + + bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version) + + changed = False + if state == 'present': + installed, missing, outdated = bower.list() + if missing: + changed = True + bower.install() + elif state == 'latest': + installed, missing, outdated = bower.list() + if missing or outdated: + changed = True + bower.update() + else: # Absent + installed, missing, outdated = bower.list() + if name in installed: + changed = True + bower.uninstall() + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/btrfs_info.py b/plugins/modules/btrfs_info.py new file mode 100644 index 0000000000..e05b6e6c6d --- /dev/null +++ b/plugins/modules/btrfs_info.py @@ -0,0 +1,103 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Gregory Furlong +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: btrfs_info +short_description: Query btrfs filesystem info +version_added: "6.6.0" +description: Query status of available btrfs filesystems, including UUID, label, subvolumes and mountpoints. + +author: + - Gregory Furlong (@gnfzdz) + +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Query information about mounted btrfs filesystems + community.general.btrfs_info: + register: my_btrfs_info +""" + +RETURN = r""" +filesystems: + description: Summaries of the current state for all btrfs filesystems found on the target host. + type: list + elements: dict + returned: success + contains: + uuid: + description: A unique identifier assigned to the filesystem. + type: str + sample: 96c9c605-1454-49b8-a63a-15e2584c208e + label: + description: An optional label assigned to the filesystem. + type: str + sample: Tank + devices: + description: A list of devices assigned to the filesystem. + type: list + sample: + - /dev/sda1 + - /dev/sdb1 + default_subvolume: + description: The ID of the filesystem's default subvolume. + type: int + sample: 5 + subvolumes: + description: A list of dicts containing metadata for all of the filesystem's subvolumes. + type: list + elements: dict + contains: + id: + description: An identifier assigned to the subvolume, unique within the containing filesystem. + type: int + sample: 256 + mountpoints: + description: Paths where the subvolume is mounted on the targeted host. + type: list + sample: ["/home"] + parent: + description: The identifier of this subvolume's parent. + type: int + sample: 5 + path: + description: The full path of the subvolume relative to the btrfs fileystem's root. + type: str + sample: /@home +""" + + +from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider +from ansible.module_utils.basic import AnsibleModule + + +def run_module(): + module_args = dict() + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + provider = BtrfsFilesystemsProvider(module) + filesystems = [x.get_summary() for x in provider.get_filesystems()] + result = { + "filesystems": filesystems, + } + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/btrfs_subvolume.py b/plugins/modules/btrfs_subvolume.py new file mode 100644 index 0000000000..92c3c99c02 --- /dev/null +++ b/plugins/modules/btrfs_subvolume.py @@ -0,0 +1,676 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Gregory Furlong +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: btrfs_subvolume +short_description: Manage btrfs subvolumes +version_added: "6.6.0" + +description: Creates, updates and deletes btrfs subvolumes and snapshots. + +options: + automount: + description: + - Allow the module to temporarily mount the targeted btrfs filesystem in order to validate the current state and make + any required changes. + type: bool + default: false + default: + description: + - Make the subvolume specified by O(name) the filesystem's default subvolume. + type: bool + default: false + filesystem_device: + description: + - A block device contained within the btrfs filesystem to be targeted. + - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted. + type: path + filesystem_label: + description: + - A descriptive label assigned to the btrfs filesystem to be targeted. + - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted. + type: str + filesystem_uuid: + description: + - A unique identifier assigned to the btrfs filesystem to be targeted. + - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted. + type: str + name: + description: + - Name of the subvolume/snapshot to be targeted. + required: true + type: str + recursive: + description: + - When true, indicates that parent/child subvolumes should be created/removedas necessary to complete the operation + (for O(state=present) and O(state=absent) respectively). + type: bool + default: false + snapshot_source: + description: + - Identifies the source subvolume for the created snapshot. + - Infers that the created subvolume is a snapshot. + type: str + snapshot_conflict: + description: + - Policy defining behavior when a subvolume already exists at the path of the requested snapshot. + - V(skip) - Create a snapshot only if a subvolume does not yet exist at the target location, otherwise indicate that + no change is required. Warning, this option does not yet verify that the target subvolume was generated from a snapshot + of the requested source. + - V(clobber) - If a subvolume already exists at the requested location, delete it first. This option is not idempotent + and results in a new snapshot being generated on every execution. + - V(error) - If a subvolume already exists at the requested location, return an error. This option is not idempotent + and results in an error on replay of the module. + type: str + choices: [skip, clobber, error] + default: skip + state: + description: + - Indicates the current state of the targeted subvolume. + type: str + choices: [absent, present] + default: present + +notes: + - If any or all of the options O(filesystem_device), O(filesystem_label) or O(filesystem_uuid) parameters are provided, + there is expected to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or + only a single btrfs filesystem is mounted, that filesystem is used; otherwise, the module takes no action and returns an + error. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: partial + details: + - In some scenarios it may erroneously report intermediate subvolumes being created. After mounting, if a directory + like file is found where the subvolume would have been created, the operation is skipped. + diff_mode: + support: none + +author: + - Gregory Furlong (@gnfzdz) +""" + +EXAMPLES = r""" +- name: Create a @home subvolume under the root subvolume + community.general.btrfs_subvolume: + name: /@home + filesystem_device: /dev/vda2 + +- name: Remove the @home subvolume if it exists + community.general.btrfs_subvolume: + name: /@home + state: absent + filesystem_device: /dev/vda2 + +- name: Create a snapshot of the root subvolume named @ + community.general.btrfs_subvolume: + name: /@ + snapshot_source: / + filesystem_device: /dev/vda2 + +- name: Create a snapshot of the root subvolume and make it the new default subvolume + community.general.btrfs_subvolume: + name: /@ + snapshot_source: / + default: true + filesystem_device: /dev/vda2 + +- name: Create a snapshot of the /@ subvolume and recursively creating intermediate subvolumes as required + community.general.btrfs_subvolume: + name: /@snapshots/@2022_06_09 + snapshot_source: /@ + recursive: true + filesystem_device: /dev/vda2 + +- name: Remove the /@ subvolume and recursively delete child subvolumes as required + community.general.btrfs_subvolume: + name: /@snapshots/@2022_06_09 + snapshot_source: /@ + recursive: true + filesystem_device: /dev/vda2 +""" + +RETURN = r""" +filesystem: + description: + - A summary of the final state of the targeted btrfs filesystem. + type: dict + returned: success + contains: + uuid: + description: A unique identifier assigned to the filesystem. + returned: success + type: str + sample: 96c9c605-1454-49b8-a63a-15e2584c208e + label: + description: An optional label assigned to the filesystem. + returned: success + type: str + sample: Tank + devices: + description: A list of devices assigned to the filesystem. + returned: success + type: list + sample: + - /dev/sda1 + - /dev/sdb1 + default_subvolume: + description: The ID of the filesystem's default subvolume. + returned: success and if filesystem is mounted + type: int + sample: 5 + subvolumes: + description: A list of dicts containing metadata for all of the filesystem's subvolumes. + returned: success and if filesystem is mounted + type: list + elements: dict + contains: + id: + description: An identifier assigned to the subvolume, unique within the containing filesystem. + type: int + sample: 256 + mountpoints: + description: Paths where the subvolume is mounted on the targeted host. + type: list + sample: ["/home"] + parent: + description: The identifier of this subvolume's parent. + type: int + sample: 5 + path: + description: The full path of the subvolume relative to the btrfs fileystem's root. + type: str + sample: /@home + +modifications: + description: + - A list where each element describes a change made to the target btrfs filesystem. + type: list + returned: Success + elements: str + +target_subvolume_id: + description: + - The ID of the subvolume specified with the O(name) parameter, either pre-existing or created as part of module execution. + type: int + sample: 257 + returned: Success and subvolume exists after module execution +""" + +from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider, BtrfsCommands, BtrfsModuleException +from ansible_collections.community.general.plugins.module_utils.btrfs import normalize_subvolume_path +from ansible.module_utils.basic import AnsibleModule +import os +import tempfile + + +class BtrfsSubvolumeModule(object): + + __BTRFS_ROOT_SUBVOLUME = '/' + __BTRFS_ROOT_SUBVOLUME_ID = 5 + __BTRFS_SUBVOLUME_INODE_NUMBER = 256 + + __CREATE_SUBVOLUME_OPERATION = 'create' + __CREATE_SNAPSHOT_OPERATION = 'snapshot' + __DELETE_SUBVOLUME_OPERATION = 'delete' + __SET_DEFAULT_SUBVOLUME_OPERATION = 'set-default' + + __UNKNOWN_SUBVOLUME_ID = '?' + + def __init__(self, module): + self.module = module + self.__btrfs_api = BtrfsCommands(module) + self.__provider = BtrfsFilesystemsProvider(module) + + # module parameters + name = self.module.params['name'] + self.__name = normalize_subvolume_path(name) if name is not None else None + self.__state = self.module.params['state'] + + self.__automount = self.module.params['automount'] + self.__default = self.module.params['default'] + self.__filesystem_device = self.module.params['filesystem_device'] + self.__filesystem_label = self.module.params['filesystem_label'] + self.__filesystem_uuid = self.module.params['filesystem_uuid'] + self.__recursive = self.module.params['recursive'] + self.__snapshot_conflict = self.module.params['snapshot_conflict'] + snapshot_source = self.module.params['snapshot_source'] + self.__snapshot_source = normalize_subvolume_path(snapshot_source) if snapshot_source is not None else None + + # execution state + self.__filesystem = None + self.__required_mounts = [] + self.__unit_of_work = [] + self.__completed_work = [] + self.__temporary_mounts = dict() + + def run(self): + error = None + try: + self.__load_filesystem() + self.__prepare_unit_of_work() + + if not self.module.check_mode: + # check required mounts & mount + if len(self.__unit_of_work) > 0: + self.__execute_unit_of_work() + self.__filesystem.refresh() + else: + # check required mounts + self.__completed_work.extend(self.__unit_of_work) + except Exception as e: + error = e + finally: + self.__cleanup_mounts() + if self.__filesystem is not None: + self.__filesystem.refresh_mountpoints() + + return (error, self.get_results()) + + # Identify the targeted filesystem and obtain the current state + def __load_filesystem(self): + if self.__has_filesystem_criteria(): + filesystem = self.__find_matching_filesytem() + else: + filesystem = self.__find_default_filesystem() + + # The filesystem must be mounted to obtain the current state (subvolumes, default, etc) + if not filesystem.is_mounted(): + if not self.__automount: + raise BtrfsModuleException( + "Target filesystem uuid=%s is not currently mounted and automount=False." + "Mount explicitly before module execution or pass automount=True" % filesystem.uuid) + elif self.module.check_mode: + # TODO is failing the module an appropriate outcome in this scenario? + raise BtrfsModuleException( + "Target filesystem uuid=%s is not currently mounted. Unable to validate the current" + "state while running with check_mode=True" % filesystem.uuid) + else: + self.__mount_subvolume_id_to_tempdir(filesystem, self.__BTRFS_ROOT_SUBVOLUME_ID) + filesystem.refresh() + self.__filesystem = filesystem + + def __has_filesystem_criteria(self): + return self.__filesystem_uuid is not None or self.__filesystem_label is not None or self.__filesystem_device is not None + + def __find_matching_filesytem(self): + criteria = { + 'uuid': self.__filesystem_uuid, + 'label': self.__filesystem_label, + 'device': self.__filesystem_device, + } + return self.__provider.get_matching_filesystem(criteria) + + def __find_default_filesystem(self): + filesystems = self.__provider.get_filesystems() + filesystem = None + + if len(filesystems) == 1: + filesystem = filesystems[0] + else: + mounted_filesystems = [x for x in filesystems if x.is_mounted()] + if len(mounted_filesystems) == 1: + filesystem = mounted_filesystems[0] + + if filesystem is not None: + return filesystem + else: + raise BtrfsModuleException( + "Failed to automatically identify targeted filesystem. " + "No explicit device indicated and found %d available filesystems." % len(filesystems) + ) + + # Prepare unit of work + def __prepare_unit_of_work(self): + if self.__state == "present": + if self.__snapshot_source is None: + self.__prepare_subvolume_present() + else: + self.__prepare_snapshot_present() + + if self.__default: + self.__prepare_set_default() + elif self.__state == "absent": + self.__prepare_subvolume_absent() + + def __prepare_subvolume_present(self): + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + if subvolume is None: + self.__prepare_before_create_subvolume(self.__name) + self.__stage_create_subvolume(self.__name) + + def __prepare_before_create_subvolume(self, subvolume_name): + closest_parent = self.__filesystem.get_nearest_subvolume(subvolume_name) + self.__stage_required_mount(closest_parent) + if self.__recursive: + self.__prepare_create_intermediates(closest_parent, subvolume_name) + + def __prepare_create_intermediates(self, closest_subvolume, subvolume_name): + relative_path = closest_subvolume.get_child_relative_path(self.__name) + missing_subvolumes = [x for x in relative_path.split(os.path.sep) if len(x) > 0] + if len(missing_subvolumes) > 1: + current = closest_subvolume.path + for s in missing_subvolumes[:-1]: + separator = os.path.sep if current[-1] != os.path.sep else "" + current = current + separator + s + self.__stage_create_subvolume(current, True) + + def __prepare_snapshot_present(self): + source_subvolume = self.__filesystem.get_subvolume_by_name(self.__snapshot_source) + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + subvolume_exists = subvolume is not None + + if subvolume_exists: + if self.__snapshot_conflict == "skip": + # No change required + return + elif self.__snapshot_conflict == "error": + raise BtrfsModuleException("Target subvolume=%s already exists and snapshot_conflict='error'" % self.__name) + + if source_subvolume is None: + raise BtrfsModuleException("Source subvolume %s does not exist" % self.__snapshot_source) + elif subvolume is not None and source_subvolume.id == subvolume.id: + raise BtrfsModuleException("Snapshot source and target are the same.") + else: + self.__stage_required_mount(source_subvolume) + + if subvolume_exists and self.__snapshot_conflict == "clobber": + self.__prepare_delete_subvolume_tree(subvolume) + elif not subvolume_exists: + self.__prepare_before_create_subvolume(self.__name) + + self.__stage_create_snapshot(source_subvolume, self.__name) + + def __prepare_subvolume_absent(self): + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + if subvolume is not None: + self.__prepare_delete_subvolume_tree(subvolume) + + def __prepare_delete_subvolume_tree(self, subvolume): + if subvolume.is_filesystem_root(): + raise BtrfsModuleException("Can not delete the filesystem's root subvolume") + if not self.__recursive and len(subvolume.get_child_subvolumes()) > 0: + raise BtrfsModuleException("Subvolume targeted for deletion %s has children and recursive=False." + "Either explicitly delete the child subvolumes first or pass " + "parameter recursive=True." % subvolume.path) + + self.__stage_required_mount(subvolume.get_parent_subvolume()) + queue = self.__prepare_recursive_delete_order(subvolume) if self.__recursive else [subvolume] + # prepare unit of work + for s in queue: + if s.is_mounted(): + # TODO potentially unmount the subvolume if automount=True ? + raise BtrfsModuleException("Can not delete mounted subvolume=%s" % s.path) + if s.is_filesystem_default(): + self.__stage_set_default_subvolume(self.__BTRFS_ROOT_SUBVOLUME, self.__BTRFS_ROOT_SUBVOLUME_ID) + self.__stage_delete_subvolume(s) + + def __prepare_recursive_delete_order(self, subvolume): + """Return the subvolume and all descendents as a list, ordered so that descendents always occur before their ancestors""" + pending = [subvolume] + ordered = [] + while len(pending) > 0: + next = pending.pop() + ordered.append(next) + pending.extend(next.get_child_subvolumes()) + ordered.reverse() # reverse to ensure children are deleted before their parent + return ordered + + def __prepare_set_default(self): + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + subvolume_id = subvolume.id if subvolume is not None else None + + if self.__filesystem.default_subvolid != subvolume_id: + self.__stage_set_default_subvolume(self.__name, subvolume_id) + + # Stage operations to the unit of work + def __stage_required_mount(self, subvolume): + if subvolume.get_mounted_path() is None: + if self.__automount: + self.__required_mounts.append(subvolume) + else: + raise BtrfsModuleException("The requested changes will require the subvolume '%s' to be mounted, but automount=False" % subvolume.path) + + def __stage_create_subvolume(self, subvolume_path, intermediate=False): + """ + Add required creation of an intermediate subvolume to the unit of work + If intermediate is true, the action will be skipped if a directory like file is found at target + after mounting a parent subvolume + """ + self.__unit_of_work.append({ + 'action': self.__CREATE_SUBVOLUME_OPERATION, + 'target': subvolume_path, + 'intermediate': intermediate, + }) + + def __stage_create_snapshot(self, source_subvolume, target_subvolume_path): + """Add creation of a snapshot from source to target to the unit of work""" + self.__unit_of_work.append({ + 'action': self.__CREATE_SNAPSHOT_OPERATION, + 'source': source_subvolume.path, + 'source_id': source_subvolume.id, + 'target': target_subvolume_path, + }) + + def __stage_delete_subvolume(self, subvolume): + """Add deletion of the target subvolume to the unit of work""" + self.__unit_of_work.append({ + 'action': self.__DELETE_SUBVOLUME_OPERATION, + 'target': subvolume.path, + 'target_id': subvolume.id, + }) + + def __stage_set_default_subvolume(self, subvolume_path, subvolume_id=None): + """Add update of the filesystem's default subvolume to the unit of work""" + self.__unit_of_work.append({ + 'action': self.__SET_DEFAULT_SUBVOLUME_OPERATION, + 'target': subvolume_path, + 'target_id': subvolume_id, + }) + + # Execute the unit of work + def __execute_unit_of_work(self): + self.__check_required_mounts() + for op in self.__unit_of_work: + if op['action'] == self.__CREATE_SUBVOLUME_OPERATION: + self.__execute_create_subvolume(op) + elif op['action'] == self.__CREATE_SNAPSHOT_OPERATION: + self.__execute_create_snapshot(op) + elif op['action'] == self.__DELETE_SUBVOLUME_OPERATION: + self.__execute_delete_subvolume(op) + elif op['action'] == self.__SET_DEFAULT_SUBVOLUME_OPERATION: + self.__execute_set_default_subvolume(op) + else: + raise ValueError("Unknown operation type '%s'" % op['action']) + + def __execute_create_subvolume(self, operation): + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + if not self.__is_existing_directory_like(target_mounted_path): + self.__btrfs_api.subvolume_create(target_mounted_path) + self.__completed_work.append(operation) + + def __execute_create_snapshot(self, operation): + source_subvolume = self.__filesystem.get_subvolume_by_name(operation['source']) + source_mounted_path = source_subvolume.get_mounted_path() + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + + self.__btrfs_api.subvolume_snapshot(source_mounted_path, target_mounted_path) + self.__completed_work.append(operation) + + def __execute_delete_subvolume(self, operation): + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + self.__btrfs_api.subvolume_delete(target_mounted_path) + self.__completed_work.append(operation) + + def __execute_set_default_subvolume(self, operation): + target = operation['target'] + target_id = operation['target_id'] + + if target_id is None: + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + + if target_subvolume is None: + self.__filesystem.refresh() # the target may have been created earlier in module execution + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + + if target_subvolume is None: + raise BtrfsModuleException("Failed to find existing subvolume '%s'" % target) + else: + target_id = target_subvolume.id + + self.__btrfs_api.subvolume_set_default(self.__filesystem.get_any_mountpoint(), target_id) + self.__completed_work.append(operation) + + def __is_existing_directory_like(self, path): + return os.path.exists(path) and ( + os.path.isdir(path) or + os.stat(path).st_ino == self.__BTRFS_SUBVOLUME_INODE_NUMBER + ) + + def __check_required_mounts(self): + filtered = self.__filter_child_subvolumes(self.__required_mounts) + if len(filtered) > 0: + for subvolume in filtered: + self.__mount_subvolume_id_to_tempdir(self.__filesystem, subvolume.id) + self.__filesystem.refresh_mountpoints() + + def __filter_child_subvolumes(self, subvolumes): + """Filter the provided list of subvolumes to remove any that are a child of another item in the list""" + filtered = [] + last = None + ordered = sorted(subvolumes, key=lambda x: x.path) + for next in ordered: + if last is None or not next.path[0:len(last)] == last: + filtered.append(next) + last = next.path + return filtered + + # Create/cleanup temporary mountpoints + def __mount_subvolume_id_to_tempdir(self, filesystem, subvolid): + # this check should be redundant + if self.module.check_mode or not self.__automount: + raise BtrfsModuleException("Unable to temporarily mount required subvolumes" + "with automount=%s and check_mode=%s" % (self.__automount, self.module.check_mode)) + + cache_key = "%s:%d" % (filesystem.uuid, subvolid) + # The subvolume was already mounted, so return the current path + if cache_key in self.__temporary_mounts: + return self.__temporary_mounts[cache_key] + + device = filesystem.devices[0] + mountpoint = tempfile.mkdtemp(dir="/tmp") + self.__temporary_mounts[cache_key] = mountpoint + + mount = self.module.get_bin_path("mount", required=True) + command = [mount, "-o", "noatime,subvolid=%d" % subvolid, device, mountpoint] + result = self.module.run_command(command, check_rc=True) + + return mountpoint + + def __cleanup_mounts(self): + for key in self.__temporary_mounts.keys(): + self.__cleanup_mount(self.__temporary_mounts[key]) + + def __cleanup_mount(self, mountpoint): + umount = self.module.get_bin_path("umount", required=True) + result = self.module.run_command([umount, mountpoint]) + if result[0] == 0: + rmdir = self.module.get_bin_path("rmdir", required=True) + self.module.run_command([rmdir, mountpoint]) + + # Format and return results + def get_results(self): + target = self.__filesystem.get_subvolume_by_name(self.__name) + return dict( + changed=len(self.__completed_work) > 0, + filesystem=self.__filesystem.get_summary(), + modifications=self.__get_formatted_modifications(), + target_subvolume_id=(target.id if target is not None else None) + ) + + def __get_formatted_modifications(self): + return [self.__format_operation_result(op) for op in self.__completed_work] + + def __format_operation_result(self, operation): + action_type = operation['action'] + if action_type == self.__CREATE_SUBVOLUME_OPERATION: + return self.__format_create_subvolume_result(operation) + elif action_type == self.__CREATE_SNAPSHOT_OPERATION: + return self.__format_create_snapshot_result(operation) + elif action_type == self.__DELETE_SUBVOLUME_OPERATION: + return self.__format_delete_subvolume_result(operation) + elif action_type == self.__SET_DEFAULT_SUBVOLUME_OPERATION: + return self.__format_set_default_subvolume_result(operation) + else: + raise ValueError("Unknown operation type '%s'" % operation['action']) + + def __format_create_subvolume_result(self, operation): + target = operation['target'] + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID + return "Created subvolume '%s' (%s)" % (target, target_id) + + def __format_create_snapshot_result(self, operation): + source = operation['source'] + source_id = operation['source_id'] + + target = operation['target'] + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID + return "Created snapshot '%s' (%s) from '%s' (%s)" % (target, target_id, source, source_id) + + def __format_delete_subvolume_result(self, operation): + target = operation['target'] + target_id = operation['target_id'] + return "Deleted subvolume '%s' (%s)" % (target, target_id) + + def __format_set_default_subvolume_result(self, operation): + target = operation['target'] + if 'target_id' in operation: + target_id = operation['target_id'] + else: + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID + return "Updated default subvolume to '%s' (%s)" % (target, target_id) + + +def run_module(): + module_args = dict( + automount=dict(type='bool', default=False), + default=dict(type='bool', default=False), + filesystem_device=dict(type='path'), + filesystem_label=dict(type='str'), + filesystem_uuid=dict(type='str'), + name=dict(type='str', required=True), + recursive=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['present', 'absent']), + snapshot_source=dict(type='str'), + snapshot_conflict=dict(type='str', default='skip', choices=['skip', 'clobber', 'error']) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + subvolume = BtrfsSubvolumeModule(module) + error, result = subvolume.run() + if error is not None: + module.fail_json(str(error), **result) + else: + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/bundler.py b/plugins/modules/bundler.py deleted file mode 120000 index 10ae6e963e..0000000000 --- a/plugins/modules/bundler.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/language/bundler.py \ No newline at end of file diff --git a/plugins/modules/bundler.py b/plugins/modules/bundler.py new file mode 100644 index 0000000000..2395cda332 --- /dev/null +++ b/plugins/modules/bundler.py @@ -0,0 +1,200 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Tim Hoiberg +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: bundler +short_description: Manage Ruby Gem dependencies with Bundler +description: + - Manage installation and Gem version dependencies for Ruby using the Bundler gem. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + executable: + type: str + description: + - The path to the bundler executable. + state: + type: str + description: + - The desired state of the Gem bundle. V(latest) updates gems to the most recent, acceptable version. + choices: [present, latest] + default: present + chdir: + type: path + description: + - The directory to execute the bundler commands from. This directory needs to contain a valid Gemfile or .bundle/ directory. + - If not specified, it defaults to the temporary working directory. + exclude_groups: + type: list + elements: str + description: + - A list of Gemfile groups to exclude during operations. This only applies when O(state=present). Bundler considers + this a 'remembered' property for the Gemfile and automatically excludes groups in future operations even if O(exclude_groups) + is not set. + clean: + description: + - Only applies if O(state=present). If set removes any gems on the target host that are not in the gemfile. + type: bool + default: false + gemfile: + type: path + description: + - Only applies if O(state=present). The path to the gemfile to use to install gems. + - If not specified it defaults to the Gemfile in current directory. + local: + description: + - If set only installs gems from the cache on the target host. + type: bool + default: false + deployment_mode: + description: + - Only applies if O(state=present). If set it installs gems in C(./vendor/bundle) instead of the default location. Requires + a C(Gemfile.lock) file to have been created prior. + type: bool + default: false + user_install: + description: + - Only applies if O(state=present). Installs gems in the local user's cache or for all users. + type: bool + default: true + gem_path: + type: path + description: + - Only applies if O(state=present). Specifies the directory to install the gems into. If O(chdir) is set then this path + is relative to O(chdir). + - If not specified the default RubyGems gem paths are used. + binstub_directory: + type: path + description: + - Only applies if O(state=present). Specifies the directory to install any gem bins files to. When executed the bin + files run within the context of the Gemfile and fail if any required gem dependencies are not installed. If O(chdir) + is set then this path is relative to O(chdir). + extra_args: + type: str + description: + - A space separated string of additional commands that can be applied to the Bundler command. Refer to the Bundler documentation + for more information. +author: "Tim Hoiberg (@thoiberg)" +""" + +EXAMPLES = r""" +- name: Install gems from a Gemfile in the current directory + community.general.bundler: + state: present + executable: ~/.rvm/gems/2.1.5/bin/bundle + +- name: Exclude the production group from installing + community.general.bundler: + state: present + exclude_groups: production + +- name: Install gems into ./vendor/bundle + community.general.bundler: + state: present + deployment_mode: true + +- name: Install gems using a Gemfile in another directory + community.general.bundler: + state: present + gemfile: ../rails_project/Gemfile + +- name: Update Gemfile in another directory + community.general.bundler: + state: latest + chdir: ~/rails_project +""" + +from ansible.module_utils.basic import AnsibleModule + + +def get_bundler_executable(module): + if module.params.get('executable'): + result = module.params.get('executable').split(' ') + else: + result = [module.get_bin_path('bundle', True)] + return result + + +def main(): + module = AnsibleModule( + argument_spec=dict( + executable=dict(), + state=dict(default='present', choices=['present', 'latest']), + chdir=dict(type='path'), + exclude_groups=dict(type='list', elements='str'), + clean=dict(default=False, type='bool'), + gemfile=dict(type='path'), + local=dict(default=False, type='bool'), + deployment_mode=dict(default=False, type='bool'), + user_install=dict(default=True, type='bool'), + gem_path=dict(type='path'), + binstub_directory=dict(type='path'), + extra_args=dict(), + ), + supports_check_mode=True + ) + + state = module.params.get('state') + chdir = module.params.get('chdir') + exclude_groups = module.params.get('exclude_groups') + clean = module.params.get('clean') + gemfile = module.params.get('gemfile') + local = module.params.get('local') + deployment_mode = module.params.get('deployment_mode') + user_install = module.params.get('user_install') + gem_path = module.params.get('gem_path') + binstub_directory = module.params.get('binstub_directory') + extra_args = module.params.get('extra_args') + + cmd = get_bundler_executable(module) + + if module.check_mode: + cmd.append('check') + rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False) + + module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err) + + if state == 'present': + cmd.append('install') + if exclude_groups: + cmd.extend(['--without', ':'.join(exclude_groups)]) + if clean: + cmd.append('--clean') + if gemfile: + cmd.extend(['--gemfile', gemfile]) + if local: + cmd.append('--local') + if deployment_mode: + cmd.append('--deployment') + if not user_install: + cmd.append('--system') + if gem_path: + cmd.extend(['--path', gem_path]) + if binstub_directory: + cmd.extend(['--binstubs', binstub_directory]) + else: + cmd.append('update') + if local: + cmd.append('--local') + + if extra_args: + cmd.extend(extra_args.split(' ')) + + rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True) + + module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/bzr.py b/plugins/modules/bzr.py deleted file mode 120000 index e746095038..0000000000 --- a/plugins/modules/bzr.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/bzr.py \ No newline at end of file diff --git a/plugins/modules/bzr.py b/plugins/modules/bzr.py new file mode 100644 index 0000000000..3493b9476d --- /dev/null +++ b/plugins/modules/bzr.py @@ -0,0 +1,196 @@ +#!/usr/bin/python + +# Copyright (c) 2013, André Paramés +# Based on the Git module by Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: bzr +author: + - André Paramés (@andreparames) +short_description: Deploy software (or files) from bzr branches +description: + - Manage C(bzr) branches to deploy files or software. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - SSH or HTTP protocol address of the parent branch. + aliases: [parent] + required: true + type: str + dest: + description: + - Absolute path of where the branch should be cloned to. + required: true + type: path + version: + description: + - What version of the branch to clone. This can be the bzr revno or revid. + default: head + type: str + force: + description: + - If V(true), any modified files in the working tree is discarded. + type: bool + default: false + executable: + description: + - Path to C(bzr) executable to use. If not supplied, the normal mechanism for resolving binary paths is used. + type: str +""" + +EXAMPLES = r""" +- name: Checkout + community.general.bzr: + name: bzr+ssh://foosball.example.org/path/to/branch + dest: /srv/checkout + version: 22 +""" + +import os +import re + +from ansible.module_utils.basic import AnsibleModule + + +class Bzr(object): + def __init__(self, module, parent, dest, version, bzr_path): + self.module = module + self.parent = parent + self.dest = dest + self.version = version + self.bzr_path = bzr_path + + def _command(self, args_list, cwd=None, **kwargs): + (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs) + return (rc, out, err) + + def get_version(self): + '''samples the version of the bzr branch''' + + cmd = [self.bzr_path, "revno"] + rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) + revno = stdout.strip() + return revno + + def clone(self): + '''makes a new bzr branch if it does not already exist''' + dest_dirname = os.path.dirname(self.dest) + try: + os.makedirs(dest_dirname) + except Exception: + pass + if self.version.lower() != 'head': + args_list = ["branch", "-r", self.version, self.parent, self.dest] + else: + args_list = ["branch", self.parent, self.dest] + return self._command(args_list, check_rc=True, cwd=dest_dirname) + + def has_local_mods(self): + + cmd = [self.bzr_path, "status", "-S"] + rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) + lines = stdout.splitlines() + mods_re = re.compile('^\\?\\?.*$') + + lines = [c for c in lines if not mods_re.search(c)] + return len(lines) > 0 + + def reset(self, force): + ''' + Resets the index and working tree to head. + Discards any changes to tracked files in the working + tree since that commit. + ''' + if not force and self.has_local_mods(): + self.module.fail_json(msg="Local modifications exist in branch (force=false).") + return self._command(["revert"], check_rc=True, cwd=self.dest) + + def fetch(self): + '''updates branch from remote sources''' + if self.version.lower() != 'head': + (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest) + else: + (rc, out, err) = self._command(["pull"], cwd=self.dest) + if rc != 0: + self.module.fail_json(msg="Failed to pull") + return (rc, out, err) + + def switch_version(self): + '''once pulled, switch to a particular revno or revid''' + if self.version.lower() != 'head': + args_list = ["revert", "-r", self.version] + else: + args_list = ["revert"] + return self._command(args_list, check_rc=True, cwd=self.dest) + + +# =========================================== + +def main(): + module = AnsibleModule( + argument_spec=dict( + dest=dict(type='path', required=True), + name=dict(type='str', required=True, aliases=['parent']), + version=dict(type='str', default='head'), + force=dict(type='bool', default=False), + executable=dict(type='str'), + ) + ) + + dest = module.params['dest'] + parent = module.params['name'] + version = module.params['version'] + force = module.params['force'] + bzr_path = module.params['executable'] or module.get_bin_path('bzr', True) + + bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf') + + rc, out, err = (0, None, None) + + bzr = Bzr(module, parent, dest, version, bzr_path) + + # if there is no bzr configuration, do a branch operation + # else pull and switch the version + before = None + local_mods = False + if not os.path.exists(bzrconfig): + (rc, out, err) = bzr.clone() + + else: + # else do a pull + local_mods = bzr.has_local_mods() + before = bzr.get_version() + (rc, out, err) = bzr.reset(force) + if rc != 0: + module.fail_json(msg=err) + (rc, out, err) = bzr.fetch() + if rc != 0: + module.fail_json(msg=err) + + # switch to version specified regardless of whether + # we cloned or pulled + (rc, out, err) = bzr.switch_version() + + # determine if we changed anything + after = bzr.get_version() + changed = False + + if before != after or local_mods: + changed = True + + module.exit_json(changed=changed, before=before, after=after) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/campfire.py b/plugins/modules/campfire.py deleted file mode 120000 index ecc63bfa17..0000000000 --- a/plugins/modules/campfire.py +++ /dev/null @@ -1 +0,0 @@ -./notification/campfire.py \ No newline at end of file diff --git a/plugins/modules/campfire.py b/plugins/modules/campfire.py new file mode 100644 index 0000000000..c1da278634 --- /dev/null +++ b/plugins/modules/campfire.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: campfire +short_description: Send a message to Campfire +description: + - Send a message to Campfire. + - Messages with newlines result in a "Paste" message being sent. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + subscription: + type: str + description: + - The subscription name to use. + required: true + token: + type: str + description: + - API token. + required: true + room: + type: str + description: + - Room number to which the message should be sent. + required: true + msg: + type: str + description: + - The message body. + required: true + notify: + type: str + description: + - Send a notification sound before the message. + required: false + choices: + - 56k + - bell + - bezos + - bueller + - clowntown + - cottoneyejoe + - crickets + - dadgummit + - dangerzone + - danielsan + - deeper + - drama + - greatjob + - greyjoy + - guarantee + - heygirl + - horn + - horror + - inconceivable + - live + - loggins + - makeitso + - noooo + - nyan + - ohmy + - ohyeah + - pushit + - rimshot + - rollout + - rumble + - sax + - secret + - sexyback + - story + - tada + - tmyk + - trololo + - trombone + - unix + - vuvuzela + - what + - whoomp + - yeah + - yodel + +# informational: requirements for nodes +requirements: [] +author: "Adam Garside (@fabulops)" +""" + +EXAMPLES = r""" +- name: Send a message to Campfire + community.general.campfire: + subscription: foo + token: 12345 + room: 123 + msg: Task completed. + +- name: Send a message to Campfire + community.general.campfire: + subscription: foo + token: 12345 + room: 123 + notify: loggins + msg: Task completed ... with feeling. +""" + +try: + from html import escape as html_escape +except ImportError: + # Python-3.2 or later + import cgi + + def html_escape(text, quote=True): + return cgi.escape(text, quote) + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + subscription=dict(required=True), + token=dict(required=True, no_log=True), + room=dict(required=True), + msg=dict(required=True), + notify=dict(choices=["56k", "bell", "bezos", "bueller", + "clowntown", "cottoneyejoe", + "crickets", "dadgummit", "dangerzone", + "danielsan", "deeper", "drama", + "greatjob", "greyjoy", "guarantee", + "heygirl", "horn", "horror", + "inconceivable", "live", "loggins", + "makeitso", "noooo", "nyan", "ohmy", + "ohyeah", "pushit", "rimshot", + "rollout", "rumble", "sax", "secret", + "sexyback", "story", "tada", "tmyk", + "trololo", "trombone", "unix", + "vuvuzela", "what", "whoomp", "yeah", + "yodel"]), + ), + supports_check_mode=False + ) + + subscription = module.params["subscription"] + token = module.params["token"] + room = module.params["room"] + msg = module.params["msg"] + notify = module.params["notify"] + + URI = "https://%s.campfirenow.com" % subscription + NSTR = "SoundMessage%s" + MSTR = "%s" + AGENT = "Ansible/1.2" + + # Hack to add basic auth username and password the way fetch_url expects + module.params['url_username'] = token + module.params['url_password'] = 'X' + + target_url = '%s/room/%s/speak.xml' % (URI, room) + headers = {'Content-Type': 'application/xml', + 'User-agent': AGENT} + + # Send some audible notification if requested + if notify: + response, info = fetch_url(module, target_url, data=NSTR % html_escape(notify), headers=headers) + if info['status'] not in [200, 201]: + module.fail_json(msg="unable to send msg: '%s', campfire api" + " returned error code: '%s'" % + (notify, info['status'])) + + # Send the message + response, info = fetch_url(module, target_url, data=MSTR % html_escape(msg), headers=headers) + if info['status'] not in [200, 201]: + module.fail_json(msg="unable to send msg: '%s', campfire api" + " returned error code: '%s'" % + (msg, info['status'])) + + module.exit_json(changed=True, room=room, msg=msg, notify=notify) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/capabilities.py b/plugins/modules/capabilities.py deleted file mode 120000 index 4688c90e54..0000000000 --- a/plugins/modules/capabilities.py +++ /dev/null @@ -1 +0,0 @@ -./system/capabilities.py \ No newline at end of file diff --git a/plugins/modules/capabilities.py b/plugins/modules/capabilities.py new file mode 100644 index 0000000000..64df086d67 --- /dev/null +++ b/plugins/modules/capabilities.py @@ -0,0 +1,187 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Nate Coraor +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: capabilities +short_description: Manage Linux capabilities +description: + - This module manipulates files privileges using the Linux capabilities(7) system. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + description: + - Specifies the path to the file to be managed. + type: str + required: true + aliases: [key] + capability: + description: + - Desired capability to set (with operator and flags, if O(state=present)) or remove (if O(state=absent)). + type: str + required: true + aliases: [cap] + state: + description: + - Whether the entry should be present or absent in the file's capabilities. + type: str + choices: [absent, present] + default: present +notes: + - The capabilities system automatically transforms operators and flags into the effective set, so for example, C(cap_foo=ep) + probably becomes C(cap_foo+ep). + - This module does not attempt to determine the final operator and flags to compare, so you want to ensure that your capabilities + argument matches the final capabilities. +author: + - Nate Coraor (@natefoo) +""" + +EXAMPLES = r""" +- name: Set cap_sys_chroot+ep on /foo + community.general.capabilities: + path: /foo + capability: cap_sys_chroot+ep + state: present + +- name: Remove cap_net_bind_service from /bar + community.general.capabilities: + path: /bar + capability: cap_net_bind_service + state: absent +""" + +from ansible.module_utils.basic import AnsibleModule + +OPS = ('=', '-', '+') + + +class CapabilitiesModule(object): + platform = 'Linux' + distribution = None + + def __init__(self, module): + self.module = module + self.path = module.params['path'].strip() + self.capability = module.params['capability'].strip().lower() + self.state = module.params['state'] + self.getcap_cmd = module.get_bin_path('getcap', required=True) + self.setcap_cmd = module.get_bin_path('setcap', required=True) + self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present') + + self.run() + + def run(self): + + current = self.getcap(self.path) + caps = [cap[0] for cap in current] + + if self.state == 'present' and self.capability_tup not in current: + # need to add capability + if self.module.check_mode: + self.module.exit_json(changed=True, msg='capabilities changed') + else: + # remove from current cap list if it is already set (but op/flags differ) + current = [x for x in current if x[0] != self.capability_tup[0]] + # add new cap with correct op/flags + current.append(self.capability_tup) + self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) + elif self.state == 'absent' and self.capability_tup[0] in caps: + # need to remove capability + if self.module.check_mode: + self.module.exit_json(changed=True, msg='capabilities changed') + else: + # remove from current cap list and then set current list + current = [x for x in current if x[0] != self.capability_tup[0]] + self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) + self.module.exit_json(changed=False, state=self.state) + + def getcap(self, path): + rval = [] + cmd = [self.getcap_cmd, "-v", path] + rc, stdout, stderr = self.module.run_command(cmd) + # If file xattrs are set but no caps are set the output will be: + # '/foo =' + # If file xattrs are unset the output will be: + # '/foo' + # If the file does not exist, the stderr will be (with rc == 0...): + # '/foo (No such file or directory)' + if rc != 0 or stderr != "": + self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr) + if stdout.strip() != path: + if ' =' in stdout: + # process output of an older version of libcap + caps = stdout.split(' =')[1].strip().split() + elif stdout.strip().endswith(")"): # '/foo (Error Message)' + self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr) + else: + # otherwise, we have a newer version here + # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git + caps = stdout.split()[1].strip().split() + for cap in caps: + cap = cap.lower() + # getcap condenses capabilities with the same op/flags into a + # comma-separated list, so we have to parse that + if ',' in cap: + cap_group = cap.split(',') + cap_group[-1], op, flags = self._parse_cap(cap_group[-1]) + for subcap in cap_group: + rval.append((subcap, op, flags)) + else: + rval.append(self._parse_cap(cap)) + return rval + + def setcap(self, path, caps): + caps = ' '.join([''.join(cap) for cap in caps]) + cmd = [self.setcap_cmd, caps, path] + rc, stdout, stderr = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr) + else: + return stdout + + def _parse_cap(self, cap, op_required=True): + opind = -1 + try: + i = 0 + while opind == -1: + opind = cap.find(OPS[i]) + i += 1 + except Exception: + if op_required: + self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS)) + else: + return (cap, None, None) + op = cap[opind] + cap, flags = cap.split(op) + return (cap, op, flags) + + +# ============================================================== +# main + +def main(): + # defining module + module = AnsibleModule( + argument_spec=dict( + path=dict(type='str', required=True, aliases=['key']), + capability=dict(type='str', required=True, aliases=['cap']), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + CapabilitiesModule(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cargo.py b/plugins/modules/cargo.py new file mode 100644 index 0000000000..3ec0012ca0 --- /dev/null +++ b/plugins/modules/cargo.py @@ -0,0 +1,303 @@ +#!/usr/bin/python +# Copyright (c) 2021 Radek Sprta +# Copyright (c) 2024 Colin Nolan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: cargo +short_description: Manage Rust packages with cargo +version_added: 4.3.0 +description: + - Manage Rust packages with cargo. +author: "Radek Sprta (@radek-sprta)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + executable: + description: + - Path to the C(cargo) installed in the system. + - If not specified, the module looks for C(cargo) in E(PATH). + type: path + version_added: 7.5.0 + name: + description: + - The name of a Rust package to install. + type: list + elements: str + required: true + path: + description: The base path where to install the Rust packages. Cargo automatically appends V(/bin). In other words, V(/usr/local) + becomes V(/usr/local/bin). + type: path + version: + description: The version to install. If O(name) contains multiple values, the module tries to install all of them in this + version. + type: str + required: false + locked: + description: + - Install with locked dependencies. + - This is only used when installing packages. + required: false + type: bool + default: false + version_added: 7.5.0 + state: + description: + - The state of the Rust package. + required: false + type: str + default: present + choices: ["present", "absent", "latest"] + directory: + description: + - Path to the source directory to install the Rust package from. + - This is only used when installing packages. + type: path + required: false + version_added: 9.1.0 + features: + description: + - List of features to activate. + - This is only used when installing packages. + type: list + elements: str + required: false + default: [] + version_added: 11.0.0 +requirements: + - cargo installed +""" + +EXAMPLES = r""" +- name: Install "ludusavi" Rust package + community.general.cargo: + name: ludusavi + +- name: Install "ludusavi" Rust package with locked dependencies + community.general.cargo: + name: ludusavi + locked: true + +- name: Install "ludusavi" Rust package in version 0.10.0 + community.general.cargo: + name: ludusavi + version: '0.10.0' + +- name: Install "ludusavi" Rust package to global location + community.general.cargo: + name: ludusavi + path: /usr/local + +- name: Remove "ludusavi" Rust package + community.general.cargo: + name: ludusavi + state: absent + +- name: Update "ludusavi" Rust package its latest version + community.general.cargo: + name: ludusavi + state: latest + +- name: Install "ludusavi" Rust package from source directory + community.general.cargo: + name: ludusavi + directory: /path/to/ludusavi/source + +- name: Install "serpl" Rust package with ast_grep feature + community.general.cargo: + name: serpl + features: + - ast_grep +""" + +import json +import os +import re + +from ansible.module_utils.basic import AnsibleModule + + +class Cargo(object): + def __init__(self, module, **kwargs): + self.module = module + self.executable = [kwargs["executable"] or module.get_bin_path("cargo", True)] + self.name = kwargs["name"] + self.path = kwargs["path"] + self.state = kwargs["state"] + self.version = kwargs["version"] + self.locked = kwargs["locked"] + self.directory = kwargs["directory"] + self.features = kwargs["features"] + + @property + def path(self): + return self._path + + @path.setter + def path(self, path): + if path is not None and not os.path.isdir(path): + self.module.fail_json(msg="Path %s is not a directory" % path) + self._path = path + + def _exec( + self, args, run_in_check_mode=False, check_rc=True, add_package_name=True + ): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = self.executable + args + rc, out, err = self.module.run_command(cmd, check_rc=check_rc) + return out, err + return "", "" + + def get_installed(self): + cmd = ["install", "--list"] + if self.path: + cmd.append("--root") + cmd.append(self.path) + + data, dummy = self._exec(cmd, True, False, False) + + package_regex = re.compile(r"^([\w\-]+) v(\S+).*:$") + installed = {} + for line in data.splitlines(): + package_info = package_regex.match(line) + if package_info: + installed[package_info.group(1)] = package_info.group(2) + + return installed + + def install(self, packages=None): + cmd = ["install"] + cmd.extend(packages or self.name) + if self.locked: + cmd.append("--locked") + if self.path: + cmd.append("--root") + cmd.append(self.path) + if self.version: + cmd.append("--version") + cmd.append(self.version) + if self.directory: + cmd.append("--path") + cmd.append(self.directory) + if self.features: + cmd += ["--features", ",".join(self.features)] + return self._exec(cmd) + + def is_outdated(self, name): + installed_version = self.get_installed().get(name) + latest_version = ( + self.get_latest_published_version(name) + if not self.directory + else self.get_source_directory_version(name) + ) + return installed_version != latest_version + + def get_latest_published_version(self, name): + cmd = ["search", name, "--limit", "1"] + data, dummy = self._exec(cmd, True, False, False) + + match = re.search(r'"(.+)"', data) + if not match: + self.module.fail_json( + msg="No published version for package %s found" % name + ) + return match.group(1) + + def get_source_directory_version(self, name): + cmd = [ + "metadata", + "--format-version", + "1", + "--no-deps", + "--manifest-path", + os.path.join(self.directory, "Cargo.toml"), + ] + data, dummy = self._exec(cmd, True, False, False) + manifest = json.loads(data) + + package = next( + (package for package in manifest["packages"] if package["name"] == name), + None, + ) + if not package: + self.module.fail_json( + msg="Package %s not defined in source, found: %s" + % (name, [x["name"] for x in manifest["packages"]]) + ) + return package["version"] + + def uninstall(self, packages=None): + cmd = ["uninstall"] + cmd.extend(packages or self.name) + return self._exec(cmd) + + +def main(): + arg_spec = dict( + executable=dict(type="path"), + name=dict(required=True, type="list", elements="str"), + path=dict(type="path"), + state=dict(default="present", choices=["present", "absent", "latest"]), + version=dict(type="str"), + locked=dict(default=False, type="bool"), + directory=dict(type="path"), + features=dict(default=[], type="list", elements="str"), + ) + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + + name = module.params["name"] + state = module.params["state"] + version = module.params["version"] + directory = module.params["directory"] + + if not name: + module.fail_json(msg="Package name must be specified") + + if directory is not None and not os.path.isdir(directory): + module.fail_json(msg="Source directory does not exist") + + # Set LANG env since we parse stdout + module.run_command_environ_update = dict( + LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C" + ) + + cargo = Cargo(module, **module.params) + changed, out, err = False, None, None + installed_packages = cargo.get_installed() + if state == "present": + to_install = [ + n + for n in name + if (n not in installed_packages) + or (version and version != installed_packages[n]) + ] + if to_install: + changed = True + out, err = cargo.install(to_install) + elif state == "latest": + to_update = [ + n for n in name if n not in installed_packages or cargo.is_outdated(n) + ] + if to_update: + changed = True + out, err = cargo.install(to_update) + else: # absent + to_uninstall = [n for n in name if n in installed_packages] + if to_uninstall: + changed = True + out, err = cargo.uninstall(to_uninstall) + + module.exit_json(changed=changed, stdout=out, stderr=err) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/catapult.py b/plugins/modules/catapult.py deleted file mode 120000 index 2190ebc7bc..0000000000 --- a/plugins/modules/catapult.py +++ /dev/null @@ -1 +0,0 @@ -./notification/catapult.py \ No newline at end of file diff --git a/plugins/modules/catapult.py b/plugins/modules/catapult.py new file mode 100644 index 0000000000..053eb4b51b --- /dev/null +++ b/plugins/modules/catapult.py @@ -0,0 +1,155 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Jonathan Mainguy +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# +# basis of code taken from the ansible twillio and nexmo modules + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: catapult +short_description: Send a sms / mms using the catapult bandwidth API +description: + - Allows notifications to be sent using SMS / MMS using the catapult bandwidth API. +deprecated: + removed_in: 13.0.0 + why: >- + DNS fails to resolve the API endpoint used by the module since Oct 2024. + See L(the associated issue, https://github.com/ansible-collections/community.general/issues/10318) for details. + alternative: There is none. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + src: + type: str + description: + - One of your catapult telephone numbers the message should come from (must be in E.164 format, like V(+19195551212)). + required: true + dest: + type: list + elements: str + description: + - The phone number or numbers the message should be sent to (must be in E.164 format, like V(+19195551212)). + required: true + msg: + type: str + description: + - The contents of the text message (must be 2048 characters or less). + required: true + media: + type: str + description: + - For MMS messages, a media URL to the location of the media to be sent with the message. + user_id: + type: str + description: + - User ID from API account page. + required: true + api_token: + type: str + description: + - API Token from API account page. + required: true + api_secret: + type: str + description: + - API Secret from API account page. + required: true + +author: "Jonathan Mainguy (@Jmainguy)" +notes: + - Will return changed even if the media URL is wrong. + - Will return changed if the destination number is invalid. +""" + +EXAMPLES = r""" +- name: Send a mms to multiple users + community.general.catapult: + src: "+15035555555" + dest: + - "+12525089000" + - "+12018994225" + media: "http://example.com/foobar.jpg" + msg: "Task is complete" + user_id: "{{ user_id }}" + api_token: "{{ api_token }}" + api_secret: "{{ api_secret }}" + +- name: Send a sms to a single user + community.general.catapult: + src: "+15035555555" + dest: "+12018994225" + msg: "Consider yourself notified" + user_id: "{{ user_id }}" + api_token: "{{ api_token }}" + api_secret: "{{ api_secret }}" +""" + + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def send(module, src, dest, msg, media, user_id, api_token, api_secret): + """ + Send the message + """ + AGENT = "Ansible" + URI = "https://api.catapult.inetwork.com/v1/users/%s/messages" % user_id + data = {'from': src, 'to': dest, 'text': msg} + if media: + data['media'] = media + + headers = {'User-Agent': AGENT, 'Content-type': 'application/json'} + + # Hack module params to have the Basic auth params that fetch_url expects + module.params['url_username'] = api_token.replace('\n', '') + module.params['url_password'] = api_secret.replace('\n', '') + + return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + src=dict(required=True), + dest=dict(required=True, type='list', elements='str'), + msg=dict(required=True), + user_id=dict(required=True), + api_token=dict(required=True, no_log=True), + api_secret=dict(required=True, no_log=True), + media=dict(), + ), + ) + + src = module.params['src'] + dest = module.params['dest'] + msg = module.params['msg'] + media = module.params['media'] + user_id = module.params['user_id'] + api_token = module.params['api_token'] + api_secret = module.params['api_secret'] + + for number in dest: + rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret) + if info["status"] != 201: + body = json.loads(info["body"]) + fail_msg = body["message"] + module.fail_json(msg=fail_msg) + + changed = True + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/circonus_annotation.py b/plugins/modules/circonus_annotation.py deleted file mode 120000 index 42ad352db3..0000000000 --- a/plugins/modules/circonus_annotation.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/circonus_annotation.py \ No newline at end of file diff --git a/plugins/modules/circonus_annotation.py b/plugins/modules/circonus_annotation.py new file mode 100644 index 0000000000..4d00b6fb98 --- /dev/null +++ b/plugins/modules/circonus_annotation.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# +# Copyright (c) 2014-2015, Epic Games, Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: circonus_annotation +short_description: Create an annotation in Circonus +description: + - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided. +author: "Nick Harring (@NickatEpic)" +requirements: + - requests >= 2.0.0 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + api_key: + type: str + description: + - Circonus API key. + required: true + category: + type: str + description: + - Annotation Category. + required: true + description: + type: str + description: + - Description of annotation. + required: true + title: + type: str + description: + - Title of annotation. + required: true + start: + type: int + description: + - Unix timestamp of event start. + - If not specified, it defaults to "now". + stop: + type: int + description: + - Unix timestamp of event end. + - If not specified, it defaults to "now" + O(duration). + duration: + type: int + description: + - Duration in seconds of annotation. + default: 0 +""" +EXAMPLES = r""" +- name: Create a simple annotation event with a source, defaults to start and end time of now + community.general.circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: App Config Change + description: This is a detailed description of the config change + category: This category groups like annotations + +- name: Create an annotation with a duration of 5 minutes and a default start time of now + community.general.circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: App Config Change + description: This is a detailed description of the config change + category: This category groups like annotations + duration: 300 + +- name: Create an annotation with a start_time and end_time + community.general.circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: App Config Change + description: This is a detailed description of the config change + category: This category groups like annotations + start_time: 1395940006 + end_time: 1395954407 +""" + +RETURN = r""" +annotation: + description: Details about the created annotation. + returned: success + type: complex + contains: + _cid: + description: Annotation identifier. + returned: success + type: str + sample: /annotation/100000 + _created: + description: Creation timestamp. + returned: success + type: int + sample: 1502236928 + _last_modified: + description: Last modification timestamp. + returned: success + type: int + sample: 1502236928 + _last_modified_by: + description: Last modified by. + returned: success + type: str + sample: /user/1000 + category: + description: Category of the created annotation. + returned: success + type: str + sample: alerts + title: + description: Title of the created annotation. + returned: success + type: str + sample: WARNING + description: + description: Description of the created annotation. + returned: success + type: str + sample: Host is down. + start: + description: Timestamp, since annotation applies. + returned: success + type: int + sample: Host is down. + stop: + description: Timestamp, since annotation ends. + returned: success + type: str + sample: Host is down. + rel_metrics: + description: Array of metrics related to this annotation, each metrics is a string. + returned: success + type: list + sample: + - 54321_kbps +""" + +import json +import time +import traceback + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests + HAS_REQUESTS = True +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + HAS_REQUESTS = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def check_requests_dep(module): + """Check if an adequate requests version is available""" + if not HAS_REQUESTS: + module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + else: + required_version = '2.0.0' + if LooseVersion(requests.__version__) < LooseVersion(required_version): + module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__)) + + +def post_annotation(annotation, api_key): + ''' Takes annotation dict and api_key string''' + base_url = 'https://api.circonus.com/v2' + anootate_post_endpoint = '/annotation' + resp = requests.post(base_url + anootate_post_endpoint, + headers=build_headers(api_key), data=json.dumps(annotation)) + resp.raise_for_status() + return resp + + +def create_annotation(module): + ''' Takes ansible module object ''' + annotation = {} + duration = module.params['duration'] + if module.params['start'] is not None: + start = module.params['start'] + else: + start = int(time.time()) + if module.params['stop'] is not None: + stop = module.params['stop'] + else: + stop = int(time.time()) + duration + annotation['start'] = start + annotation['stop'] = stop + annotation['category'] = module.params['category'] + annotation['description'] = module.params['description'] + annotation['title'] = module.params['title'] + return annotation + + +def build_headers(api_token): + '''Takes api token, returns headers with it included.''' + headers = {'X-Circonus-App-Name': 'ansible', + 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token, + 'Accept': 'application/json'} + return headers + + +def main(): + '''Main function, dispatches logic''' + module = AnsibleModule( + argument_spec=dict( + start=dict(type='int'), + stop=dict(type='int'), + category=dict(required=True), + title=dict(required=True), + description=dict(required=True), + duration=dict(default=0, type='int'), + api_key=dict(required=True, no_log=True) + ) + ) + + check_requests_dep(module) + + annotation = create_annotation(module) + try: + resp = post_annotation(annotation, module.params['api_key']) + except requests.exceptions.RequestException as e: + module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc()) + module.exit_json(changed=True, annotation=resp.json()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cisco_spark.py b/plugins/modules/cisco_spark.py deleted file mode 120000 index 6310af28cf..0000000000 --- a/plugins/modules/cisco_spark.py +++ /dev/null @@ -1 +0,0 @@ -notification/cisco_spark.py \ No newline at end of file diff --git a/plugins/modules/cisco_webex.py b/plugins/modules/cisco_webex.py deleted file mode 120000 index af172516ff..0000000000 --- a/plugins/modules/cisco_webex.py +++ /dev/null @@ -1 +0,0 @@ -notification/cisco_webex.py \ No newline at end of file diff --git a/plugins/modules/cisco_webex.py b/plugins/modules/cisco_webex.py new file mode 100644 index 0000000000..bd9c148b53 --- /dev/null +++ b/plugins/modules/cisco_webex.py @@ -0,0 +1,192 @@ +#!/usr/bin/python + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: cisco_webex +short_description: Send a message to a Cisco Webex Teams Room or Individual +description: + - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting. +author: Drew Rusell (@drew-russell) +notes: + - The O(recipient_type) must be valid for the supplied O(recipient_id). + - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + + recipient_type: + description: + - The request parameter you would like to send the message to. + - Messages can be sent to either a room or individual (by ID or E-Mail). + required: true + choices: ['roomId', 'toPersonEmail', 'toPersonId'] + type: str + + recipient_id: + description: + - The unique identifier associated with the supplied O(recipient_type). + required: true + type: str + + msg_type: + description: + - Specifies how you would like the message formatted. + default: text + choices: ['text', 'markdown'] + type: str + aliases: ['message_type'] + + personal_token: + description: + - Your personal access token required to validate the Webex Teams API. + required: true + aliases: ['token'] + type: str + + msg: + description: + - The message you would like to send. + required: true + type: str +""" + +EXAMPLES = r""" +# Note: The following examples assume a variable file has been imported +# that contains the appropriate information. + +- name: Cisco Webex Teams - Markdown Message to a Room + community.general.cisco_webex: + recipient_type: roomId + recipient_id: "{{ room_id }}" + msg_type: markdown + personal_token: "{{ token }}" + msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**" + +- name: Cisco Webex Teams - Text Message to a Room + community.general.cisco_webex: + recipient_type: roomId + recipient_id: "{{ room_id }}" + msg_type: text + personal_token: "{{ token }}" + msg: "Cisco Webex Teams Ansible Module - Room Message in Text" + +- name: Cisco Webex Teams - Text Message by an Individuals ID + community.general.cisco_webex: + recipient_type: toPersonId + recipient_id: "{{ person_id}}" + msg_type: text + personal_token: "{{ token }}" + msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID" + +- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address + community.general.cisco_webex: + recipient_type: toPersonEmail + recipient_id: "{{ person_email }}" + msg_type: text + personal_token: "{{ token }}" + msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail" +""" + +RETURN = r""" +status_code: + description: + - The Response Code returned by the Webex Teams API. + - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics). + returned: always + type: int + sample: 200 + +message: + description: + - The Response Message returned by the Webex Teams API. + - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics). + returned: always + type: str + sample: OK (585 bytes) +""" +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def webex_msg(module): + """When check mode is specified, establish a read only connection, that does not return any user specific + data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual""" + + # Ansible Specific Variables + results = {} + ansible = module.params + + headers = { + 'Authorization': 'Bearer {0}'.format(ansible['personal_token']), + 'content-type': 'application/json' + } + + if module.check_mode: + url = "https://webexapis.com/v1/people/me" + payload = None + + else: + url = "https://webexapis.com/v1/messages" + + payload = { + ansible['recipient_type']: ansible['recipient_id'], + ansible['msg_type']: ansible['msg'] + } + + payload = module.jsonify(payload) + + response, info = fetch_url(module, url, data=payload, headers=headers) + + status_code = info['status'] + msg = info['msg'] + + # Module will fail if the response is not 200 + if status_code != 200: + results['failed'] = True + results['status_code'] = status_code + results['message'] = msg + else: + results['failed'] = False + results['status_code'] = status_code + + if module.check_mode: + results['message'] = 'Authentication Successful.' + else: + results['message'] = msg + + return results + + +def main(): + '''Ansible main. ''' + module = AnsibleModule( + argument_spec=dict( + recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']), + recipient_id=dict(required=True, no_log=True), + msg_type=dict(default='text', aliases=['message_type'], choices=['text', 'markdown']), + personal_token=dict(required=True, no_log=True, aliases=['token']), + msg=dict(required=True), + ), + + supports_check_mode=True + ) + + results = webex_msg(module) + + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/clc_aa_policy.py b/plugins/modules/clc_aa_policy.py deleted file mode 120000 index 4b72bbcaf0..0000000000 --- a/plugins/modules/clc_aa_policy.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/centurylink/clc_aa_policy.py \ No newline at end of file diff --git a/plugins/modules/clc_alert_policy.py b/plugins/modules/clc_alert_policy.py deleted file mode 120000 index 8d59909405..0000000000 --- a/plugins/modules/clc_alert_policy.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/centurylink/clc_alert_policy.py \ No newline at end of file diff --git a/plugins/modules/clc_blueprint_package.py b/plugins/modules/clc_blueprint_package.py deleted file mode 120000 index 25c8eb425d..0000000000 --- a/plugins/modules/clc_blueprint_package.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/centurylink/clc_blueprint_package.py \ No newline at end of file diff --git a/plugins/modules/clc_firewall_policy.py b/plugins/modules/clc_firewall_policy.py deleted file mode 120000 index 749887f60a..0000000000 --- a/plugins/modules/clc_firewall_policy.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/centurylink/clc_firewall_policy.py \ No newline at end of file diff --git a/plugins/modules/clc_group.py b/plugins/modules/clc_group.py deleted file mode 120000 index 4fc1f29097..0000000000 --- a/plugins/modules/clc_group.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/centurylink/clc_group.py \ No newline at end of file diff --git a/plugins/modules/clc_loadbalancer.py b/plugins/modules/clc_loadbalancer.py deleted file mode 120000 index 7dcc351dfb..0000000000 --- a/plugins/modules/clc_loadbalancer.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/centurylink/clc_loadbalancer.py \ No newline at end of file diff --git a/plugins/modules/clc_modify_server.py b/plugins/modules/clc_modify_server.py deleted file mode 120000 index 3451b6e022..0000000000 --- a/plugins/modules/clc_modify_server.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/centurylink/clc_modify_server.py \ No newline at end of file diff --git a/plugins/modules/clc_publicip.py b/plugins/modules/clc_publicip.py deleted file mode 120000 index 0fac22539f..0000000000 --- a/plugins/modules/clc_publicip.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/centurylink/clc_publicip.py \ No newline at end of file diff --git a/plugins/modules/clc_server.py b/plugins/modules/clc_server.py deleted file mode 120000 index b7cee6f7bb..0000000000 --- a/plugins/modules/clc_server.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/centurylink/clc_server.py \ No newline at end of file diff --git a/plugins/modules/clc_server_snapshot.py b/plugins/modules/clc_server_snapshot.py deleted file mode 120000 index 07c1096dd1..0000000000 --- a/plugins/modules/clc_server_snapshot.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/centurylink/clc_server_snapshot.py \ No newline at end of file diff --git a/plugins/modules/cloud/alicloud/ali_instance.py b/plugins/modules/cloud/alicloud/ali_instance.py deleted file mode 100644 index 09754ccdba..0000000000 --- a/plugins/modules/cloud/alicloud/ali_instance.py +++ /dev/null @@ -1,1013 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see http://www.gnu.org/licenses/. - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ali_instance -short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS. Add or Remove Instance to/from a Security Group. -description: - - Create, start, stop, restart, modify or terminate ecs instances. - - Add or remove ecs instances to/from security group. -options: - state: - description: - - The state of the instance after operating. - default: 'present' - choices: ['present', 'running', 'stopped', 'restarted', 'absent'] - type: str - availability_zone: - description: - - Aliyun availability zone ID in which to launch the instance. - If it is not specified, it will be allocated by system automatically. - aliases: ['alicloud_zone', 'zone_id'] - type: str - image_id: - description: - - Image ID used to launch instances. Required when C(state=present) and creating new ECS instances. - aliases: ['image'] - type: str - instance_type: - description: - - Instance type used to launch instances. Required when C(state=present) and creating new ECS instances. - aliases: ['type'] - type: str - security_groups: - description: - - A list of security group IDs. - aliases: ['group_ids'] - type: list - elements: str - vswitch_id: - description: - - The subnet ID in which to launch the instances (VPC). - aliases: ['subnet_id'] - type: str - instance_name: - description: - - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an - uppercase/lowercase letter or a Chinese character and can contain numerals, ".", "_" or "-". - It cannot begin with http:// or https://. - aliases: ['name'] - type: str - description: - description: - - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with http:// or https://. - type: str - internet_charge_type: - description: - - Internet charge type of ECS instance. - default: 'PayByBandwidth' - choices: ['PayByBandwidth', 'PayByTraffic'] - type: str - max_bandwidth_in: - description: - - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second). - default: 200 - type: int - max_bandwidth_out: - description: - - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second). - Required when C(allocate_public_ip=True). Ignored when C(allocate_public_ip=False). - default: 0 - type: int - host_name: - description: - - Instance host name. Ordered hostname is not supported. - type: str - unique_suffix: - description: - - Specifies whether to add sequential suffixes to the host_name. - The sequential suffix ranges from 001 to 999. - default: False - type: bool - version_added: '0.2.0' - password: - description: - - The password to login instance. After rebooting instances, modified password will take effect. - type: str - system_disk_category: - description: - - Category of the system disk. - default: 'cloud_efficiency' - choices: ['cloud_efficiency', 'cloud_ssd'] - type: str - system_disk_size: - description: - - Size of the system disk, in GB. The valid values are 40~500. - default: 40 - type: int - system_disk_name: - description: - - Name of the system disk. - type: str - system_disk_description: - description: - - Description of the system disk. - type: str - count: - description: - - The number of the new instance. An integer value which indicates how many instances that match I(count_tag) - should be running. Instances are either created or terminated based on this value. - default: 1 - type: int - count_tag: - description: - - I(count) determines how many instances based on a specific tag criteria should be present. - This can be expressed in multiple ways and is shown in the EXAMPLES section. - The specified count_tag must already exist or be passed in as the I(tags) option. - If it is not specified, it will be replaced by I(instance_name). - type: str - allocate_public_ip: - description: - - Whether allocate a public ip for the new instance. - default: False - aliases: [ 'assign_public_ip' ] - type: bool - instance_charge_type: - description: - - The charge type of the instance. - choices: ['PrePaid', 'PostPaid'] - default: 'PostPaid' - type: str - period: - description: - - The charge duration of the instance, in month. Required when C(instance_charge_type=PrePaid). - - The valid value are [1-9, 12, 24, 36]. - default: 1 - type: int - auto_renew: - description: - - Whether automate renew the charge of the instance. - type: bool - default: False - auto_renew_period: - description: - - The duration of the automatic renew the charge of the instance. Required when C(auto_renew=True). - choices: [1, 2, 3, 6, 12] - type: int - instance_ids: - description: - - A list of instance ids. It is required when need to operate existing instances. - If it is specified, I(count) will lose efficacy. - type: list - elements: str - force: - description: - - Whether the current operation needs to be execute forcibly. - default: False - type: bool - tags: - description: - - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. C({"key":"value"}) - aliases: ["instance_tags"] - type: dict - version_added: '0.2.0' - purge_tags: - description: - - Delete any tags not specified in the task that are on the instance. - If True, it means you have to specify all the desired tags on each task affecting an instance. - default: False - type: bool - version_added: '0.2.0' - key_name: - description: - - The name of key pair which is used to access ECS instance in SSH. - required: false - type: str - aliases: ['keypair'] - user_data: - description: - - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance. - It only will take effect when launching the new ECS instances. - required: false - type: str - ram_role_name: - description: - - The name of the instance RAM role. - type: str - version_added: '0.2.0' - spot_price_limit: - description: - - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal - places and takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit. - type: float - version_added: '0.2.0' - spot_strategy: - description: - - The bidding mode of the pay-as-you-go instance. This parameter is valid when InstanceChargeType is set to PostPaid. - choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo'] - default: 'NoSpot' - type: str - version_added: '0.2.0' - period_unit: - description: - - The duration unit that you will buy the resource. It is valid when C(instance_charge_type=PrePaid) - choices: ['Month', 'Week'] - default: 'Month' - type: str - version_added: '0.2.0' - dry_run: - description: - - Specifies whether to send a dry-run request. - - If I(dry_run=True), Only a dry-run request is sent and no instance is created. The system checks whether the - required parameters are set, and validates the request format, service permissions, and available ECS instances. - If the validation fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned. - - If I(dry_run=False), A request is sent. If the validation succeeds, the instance is created. - default: False - type: bool - version_added: '0.2.0' - include_data_disks: - description: - - Whether to change instance disks charge type when changing instance charge type. - default: True - type: bool - version_added: '0.2.0' -author: - - "He Guimin (@xiaozhu36)" -requirements: - - "python >= 3.6" - - "footmark >= 1.19.0" -extends_documentation_fragment: - - community.general.alicloud -''' - -EXAMPLES = ''' -# basic provisioning example vpc network -- name: Basic provisioning example - hosts: localhost - vars: - alicloud_access_key: - alicloud_secret_key: - alicloud_region: cn-beijing - image: ubuntu1404_64_40G_cloudinit_20160727.raw - instance_type: ecs.n4.small - vswitch_id: vsw-abcd1234 - assign_public_ip: True - max_bandwidth_out: 10 - host_name: myhost - password: mypassword - system_disk_category: cloud_efficiency - system_disk_size: 100 - internet_charge_type: PayByBandwidth - security_groups: ["sg-f2rwnfh23r"] - - instance_ids: ["i-abcd12346", "i-abcd12345"] - force: True - - tasks: - - name: Launch ECS instance in VPC network - community.general.ali_instance: - alicloud_access_key: '{{ alicloud_access_key }}' - alicloud_secret_key: '{{ alicloud_secret_key }}' - alicloud_region: '{{ alicloud_region }}' - image: '{{ image }}' - system_disk_category: '{{ system_disk_category }}' - system_disk_size: '{{ system_disk_size }}' - instance_type: '{{ instance_type }}' - vswitch_id: '{{ vswitch_id }}' - assign_public_ip: '{{ assign_public_ip }}' - internet_charge_type: '{{ internet_charge_type }}' - max_bandwidth_out: '{{ max_bandwidth_out }}' - tags: - Name: created_one - host_name: '{{ host_name }}' - password: '{{ password }}' - - - name: With count and count_tag to create a number of instances - community.general.ali_instance: - alicloud_access_key: '{{ alicloud_access_key }}' - alicloud_secret_key: '{{ alicloud_secret_key }}' - alicloud_region: '{{ alicloud_region }}' - image: '{{ image }}' - system_disk_category: '{{ system_disk_category }}' - system_disk_size: '{{ system_disk_size }}' - instance_type: '{{ instance_type }}' - assign_public_ip: '{{ assign_public_ip }}' - security_groups: '{{ security_groups }}' - internet_charge_type: '{{ internet_charge_type }}' - max_bandwidth_out: '{{ max_bandwidth_out }}' - tags: - Name: created_one - Version: 0.1 - count: 2 - count_tag: - Name: created_one - host_name: '{{ host_name }}' - password: '{{ password }}' - - - name: Start instance - community.general.ali_instance: - alicloud_access_key: '{{ alicloud_access_key }}' - alicloud_secret_key: '{{ alicloud_secret_key }}' - alicloud_region: '{{ alicloud_region }}' - instance_ids: '{{ instance_ids }}' - state: 'running' - - - name: Reboot instance forcibly - ecs: - alicloud_access_key: '{{ alicloud_access_key }}' - alicloud_secret_key: '{{ alicloud_secret_key }}' - alicloud_region: '{{ alicloud_region }}' - instance_ids: '{{ instance_ids }}' - state: 'restarted' - force: '{{ force }}' - - - name: Add instances to an security group - ecs: - alicloud_access_key: '{{ alicloud_access_key }}' - alicloud_secret_key: '{{ alicloud_secret_key }}' - alicloud_region: '{{ alicloud_region }}' - instance_ids: '{{ instance_ids }}' - security_groups: '{{ security_groups }}' -''' - -RETURN = ''' -instances: - description: List of ECS instances - returned: always - type: complex - contains: - availability_zone: - description: The availability zone of the instance is in. - returned: always - type: str - sample: cn-beijing-a - block_device_mappings: - description: Any block device mapping entries for the instance. - returned: always - type: complex - contains: - device_name: - description: The device name exposed to the instance (for example, /dev/xvda). - returned: always - type: str - sample: /dev/xvda - attach_time: - description: The time stamp when the attachment initiated. - returned: always - type: str - sample: "2018-06-25T04:08:26Z" - delete_on_termination: - description: Indicates whether the volume is deleted on instance termination. - returned: always - type: bool - sample: true - status: - description: The attachment state. - returned: always - type: str - sample: in_use - volume_id: - description: The ID of the cloud disk. - returned: always - type: str - sample: d-2zei53pjsi117y6gf9t6 - cpu: - description: The CPU core count of the instance. - returned: always - type: int - sample: 4 - creation_time: - description: The time the instance was created. - returned: always - type: str - sample: "2018-06-25T04:08Z" - description: - description: The instance description. - returned: always - type: str - sample: "my ansible instance" - eip: - description: The attribution of EIP associated with the instance. - returned: always - type: complex - contains: - allocation_id: - description: The ID of the EIP. - returned: always - type: str - sample: eip-12345 - internet_charge_type: - description: The internet charge type of the EIP. - returned: always - type: str - sample: "paybybandwidth" - ip_address: - description: EIP address. - returned: always - type: str - sample: 42.10.2.2 - expired_time: - description: The time the instance will expire. - returned: always - type: str - sample: "2099-12-31T15:59Z" - gpu: - description: The attribution of instance GPU. - returned: always - type: complex - contains: - amount: - description: The count of the GPU. - returned: always - type: int - sample: 0 - spec: - description: The specification of the GPU. - returned: always - type: str - sample: "" - host_name: - description: The host name of the instance. - returned: always - type: str - sample: iZ2zewaoZ - id: - description: Alias of instance_id. - returned: always - type: str - sample: i-abc12345 - instance_id: - description: ECS instance resource ID. - returned: always - type: str - sample: i-abc12345 - image_id: - description: The ID of the image used to launch the instance. - returned: always - type: str - sample: m-0011223344 - inner_ip_address: - description: The inner IPv4 address of the classic instance. - returned: always - type: str - sample: 10.0.0.2 - instance_charge_type: - description: The instance charge type. - returned: always - type: str - sample: PostPaid - instance_name: - description: The name of the instance. - returned: always - type: str - sample: my-ecs - instance_type: - description: The instance type of the running instance. - returned: always - type: str - sample: ecs.sn1ne.xlarge - instance_type_family: - description: The instance type family of the instance belongs. - returned: always - type: str - sample: ecs.sn1ne - internet_charge_type: - description: The billing method of the network bandwidth. - returned: always - type: str - sample: PayByBandwidth - internet_max_bandwidth_in: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 200 - internet_max_bandwidth_out: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 20 - io_optimized: - description: Indicates whether the instance is optimized for EBS I/O. - returned: always - type: bool - sample: false - memory: - description: Memory size of the instance. - returned: always - type: int - sample: 8192 - network_interfaces: - description: One or more network interfaces for the instance. - returned: always - type: complex - contains: - mac_address: - description: The MAC address. - returned: always - type: str - sample: "00:11:22:33:44:55" - network_interface_id: - description: The ID of the network interface. - returned: always - type: str - sample: eni-01234567 - primary_ip_address: - description: The primary IPv4 address of the network interface within the vswitch. - returned: always - type: str - sample: 10.0.0.1 - osname: - description: The operation system name of the instance owned. - returned: always - type: str - sample: CentOS - ostype: - description: The operation system type of the instance owned. - returned: always - type: str - sample: linux - private_ip_address: - description: The IPv4 address of the network interface within the subnet. - returned: always - type: str - sample: 10.0.0.1 - public_ip_address: - description: The public IPv4 address assigned to the instance or eip address - returned: always - type: str - sample: 43.0.0.1 - resource_group_id: - description: The id of the resource group to which the instance belongs. - returned: always - type: str - sample: my-ecs-group - security_groups: - description: One or more security groups for the instance. - returned: always - type: list - elements: dict - contains: - group_id: - description: The ID of the security group. - returned: always - type: str - sample: sg-0123456 - group_name: - description: The name of the security group. - returned: always - type: str - sample: my-security-group - status: - description: The current status of the instance. - returned: always - type: str - sample: running - tags: - description: Any tags assigned to the instance. - returned: always - type: dict - sample: - user_data: - description: User-defined data. - returned: always - type: dict - sample: - vswitch_id: - description: The ID of the vswitch in which the instance is running. - returned: always - type: str - sample: vsw-dew00abcdef - vpc_id: - description: The ID of the VPC the instance is in. - returned: always - type: str - sample: vpc-0011223344 - spot_price_limit: - description: - - The maximum hourly price for the preemptible instance. - returned: always - type: float - sample: 0.97 - spot_strategy: - description: - - The bidding mode of the pay-as-you-go instance. - returned: always - type: str - sample: NoSpot -ids: - description: List of ECS instance IDs - returned: always - type: list - sample: [i-12345er, i-3245fs] -''' - -import re -import time -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect - -HAS_FOOTMARK = False -FOOTMARK_IMP_ERR = None -try: - from footmark.exception import ECSResponseError - HAS_FOOTMARK = True -except ImportError: - FOOTMARK_IMP_ERR = traceback.format_exc() - HAS_FOOTMARK = False - - -def get_instances_info(connection, ids): - result = [] - instances = connection.describe_instances(instance_ids=ids) - if len(instances) > 0: - for inst in instances: - volumes = connection.describe_disks(instance_id=inst.id) - setattr(inst, 'block_device_mappings', volumes) - setattr(inst, 'user_data', inst.describe_user_data()) - result.append(inst.read()) - return result - - -def run_instance(module, ecs, exact_count): - if exact_count <= 0: - return None - zone_id = module.params['availability_zone'] - image_id = module.params['image_id'] - instance_type = module.params['instance_type'] - security_groups = module.params['security_groups'] - vswitch_id = module.params['vswitch_id'] - instance_name = module.params['instance_name'] - description = module.params['description'] - internet_charge_type = module.params['internet_charge_type'] - max_bandwidth_out = module.params['max_bandwidth_out'] - max_bandwidth_in = module.params['max_bandwidth_in'] - host_name = module.params['host_name'] - password = module.params['password'] - system_disk_category = module.params['system_disk_category'] - system_disk_size = module.params['system_disk_size'] - system_disk_name = module.params['system_disk_name'] - system_disk_description = module.params['system_disk_description'] - allocate_public_ip = module.params['allocate_public_ip'] - period = module.params['period'] - auto_renew = module.params['auto_renew'] - instance_charge_type = module.params['instance_charge_type'] - auto_renew_period = module.params['auto_renew_period'] - user_data = module.params['user_data'] - key_name = module.params['key_name'] - ram_role_name = module.params['ram_role_name'] - spot_price_limit = module.params['spot_price_limit'] - spot_strategy = module.params['spot_strategy'] - unique_suffix = module.params['unique_suffix'] - # check whether the required parameter passed or not - if not image_id: - module.fail_json(msg='image_id is required for new instance') - if not instance_type: - module.fail_json(msg='instance_type is required for new instance') - if not isinstance(security_groups, list): - module.fail_json(msg='The parameter security_groups should be a list, aborting') - if len(security_groups) <= 0: - module.fail_json(msg='Expected the parameter security_groups is non-empty when create new ECS instances, aborting') - - client_token = "Ansible-Alicloud-{0}-{1}".format(hash(str(module.params)), str(time.time())) - - try: - # call to create_instance method from footmark - instances = ecs.run_instances(image_id=image_id, instance_type=instance_type, security_group_id=security_groups[0], - zone_id=zone_id, instance_name=instance_name, description=description, - internet_charge_type=internet_charge_type, internet_max_bandwidth_out=max_bandwidth_out, - internet_max_bandwidth_in=max_bandwidth_in, host_name=host_name, password=password, - io_optimized='optimized', system_disk_category=system_disk_category, - system_disk_size=system_disk_size, system_disk_disk_name=system_disk_name, - system_disk_description=system_disk_description, vswitch_id=vswitch_id, - amount=exact_count, instance_charge_type=instance_charge_type, period=period, period_unit="Month", - auto_renew=auto_renew, auto_renew_period=auto_renew_period, key_pair_name=key_name, - user_data=user_data, client_token=client_token, ram_role_name=ram_role_name, - spot_price_limit=spot_price_limit, spot_strategy=spot_strategy, unique_suffix=unique_suffix) - - except Exception as e: - module.fail_json(msg='Unable to create instance, error: {0}'.format(e)) - - return instances - - -def modify_instance(module, instance): - # According to state to modify instance's some special attribute - state = module.params["state"] - name = module.params['instance_name'] - unique_suffix = module.params['unique_suffix'] - if not name: - name = instance.name - - description = module.params['description'] - if not description: - description = instance.description - - host_name = module.params['host_name'] - if unique_suffix and host_name: - suffix = instance.host_name[-3:] - host_name = host_name + suffix - - if not host_name: - host_name = instance.host_name - - # password can be modified only when restart instance - password = "" - if state == "restarted": - password = module.params['password'] - - # userdata can be modified only when instance is stopped - setattr(instance, "user_data", instance.describe_user_data()) - user_data = instance.user_data - if state == "stopped": - user_data = module.params['user_data'].encode() - - try: - return instance.modify(name=name, description=description, host_name=host_name, password=password, user_data=user_data) - except Exception as e: - module.fail_json(msg="Modify instance {0} attribute got an error: {1}".format(instance.id, e)) - - -def wait_for_instance_modify_charge(ecs, instance_ids, charge_type, delay=10, timeout=300): - """ - To verify instance charge type has become expected after modify instance charge type - """ - try: - while True: - instances = ecs.describe_instances(instance_ids=instance_ids) - flag = True - for inst in instances: - if inst and inst.instance_charge_type != charge_type: - flag = False - if flag: - return - timeout -= delay - time.sleep(delay) - if timeout <= 0: - raise Exception("Timeout Error: Waiting for instance to {0}. ".format(charge_type)) - except Exception as e: - raise e - - -def main(): - argument_spec = ecs_argument_spec() - argument_spec.update(dict( - security_groups=dict(type='list', elements='str', aliases=['group_ids']), - availability_zone=dict(type='str', aliases=['alicloud_zone', 'zone_id']), - instance_type=dict(type='str', aliases=['type']), - image_id=dict(type='str', aliases=['image']), - count=dict(type='int', default=1), - count_tag=dict(type='str'), - vswitch_id=dict(type='str', aliases=['subnet_id']), - instance_name=dict(type='str', aliases=['name']), - host_name=dict(type='str'), - password=dict(type='str', no_log=True), - internet_charge_type=dict(type='str', default='PayByBandwidth', choices=['PayByBandwidth', 'PayByTraffic']), - max_bandwidth_in=dict(type='int', default=200), - max_bandwidth_out=dict(type='int', default=0), - system_disk_category=dict(type='str', default='cloud_efficiency', choices=['cloud_efficiency', 'cloud_ssd']), - system_disk_size=dict(type='int', default=40), - system_disk_name=dict(type='str'), - system_disk_description=dict(type='str'), - force=dict(type='bool', default=False), - tags=dict(type='dict', aliases=['instance_tags']), - purge_tags=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'running', 'stopped', 'restarted', 'absent']), - description=dict(type='str'), - allocate_public_ip=dict(type='bool', aliases=['assign_public_ip'], default=False), - instance_charge_type=dict(type='str', default='PostPaid', choices=['PrePaid', 'PostPaid']), - period=dict(type='int', default=1), - auto_renew=dict(type='bool', default=False), - instance_ids=dict(type='list', elements='str'), - auto_renew_period=dict(type='int', choices=[1, 2, 3, 6, 12]), - key_name=dict(type='str', aliases=['keypair']), - user_data=dict(type='str'), - ram_role_name=dict(type='str'), - spot_price_limit=dict(type='float'), - spot_strategy=dict(type='str', default='NoSpot', choices=['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']), - unique_suffix=dict(type='bool', default=False), - period_unit=dict(type='str', default='Month', choices=['Month', 'Week']), - dry_run=dict(type='bool', default=False), - include_data_disks=dict(type='bool', default=True) - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - if HAS_FOOTMARK is False: - module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) - - ecs = ecs_connect(module) - host_name = module.params['host_name'] - state = module.params['state'] - instance_ids = module.params['instance_ids'] - count_tag = module.params['count_tag'] - count = module.params['count'] - instance_name = module.params['instance_name'] - force = module.params['force'] - zone_id = module.params['availability_zone'] - key_name = module.params['key_name'] - tags = module.params['tags'] - max_bandwidth_out = module.params['max_bandwidth_out'] - instance_charge_type = module.params['instance_charge_type'] - if instance_charge_type == "PrePaid": - module.params['spot_strategy'] = '' - changed = False - - instances = [] - if instance_ids: - if not isinstance(instance_ids, list): - module.fail_json(msg='The parameter instance_ids should be a list, aborting') - instances = ecs.describe_instances(zone_id=zone_id, instance_ids=instance_ids) - if not instances: - module.fail_json(msg="There are no instances in our record based on instance_ids {0}. " - "Please check it and try again.".format(instance_ids)) - elif count_tag: - instances = ecs.describe_instances(zone_id=zone_id, tags=eval(count_tag)) - elif instance_name: - instances = ecs.describe_instances(zone_id=zone_id, instance_name=instance_name) - - ids = [] - if state == 'absent': - if len(instances) < 1: - module.fail_json(msg='Please specify ECS instances that you want to operate by using ' - 'parameters instance_ids, tags or instance_name, aborting') - try: - targets = [] - for inst in instances: - if inst.status != 'stopped' and not force: - module.fail_json(msg="Instance is running, and please stop it or set 'force' as True.") - targets.append(inst.id) - if ecs.delete_instances(instance_ids=targets, force=force): - changed = True - ids.extend(targets) - - module.exit_json(changed=changed, ids=ids, instances=[]) - except Exception as e: - module.fail_json(msg='Delete instance got an error: {0}'.format(e)) - - if module.params['allocate_public_ip'] and max_bandwidth_out < 0: - module.fail_json(msg="'max_bandwidth_out' should be greater than 0 when 'allocate_public_ip' is True.") - if not module.params['allocate_public_ip']: - module.params['max_bandwidth_out'] = 0 - - if state == 'present': - if not instance_ids: - if len(instances) > count: - for i in range(0, len(instances) - count): - inst = instances[len(instances) - 1] - if inst.status != 'stopped' and not force: - module.fail_json(msg="That to delete instance {0} is failed results from it is running, " - "and please stop it or set 'force' as True.".format(inst.id)) - try: - if inst.terminate(force=force): - changed = True - except Exception as e: - module.fail_json(msg="Delete instance {0} got an error: {1}".format(inst.id, e)) - instances.pop(len(instances) - 1) - else: - try: - if re.search(r"-\[\d+,\d+\]-", host_name): - module.fail_json(msg='Ordered hostname is not supported, If you want to add an ordered ' - 'suffix to the hostname, you can set unique_suffix to True') - new_instances = run_instance(module, ecs, count - len(instances)) - if new_instances: - changed = True - instances.extend(new_instances) - except Exception as e: - module.fail_json(msg="Create new instances got an error: {0}".format(e)) - - # Security Group join/leave begin - security_groups = module.params['security_groups'] - if security_groups: - if not isinstance(security_groups, list): - module.fail_json(msg='The parameter security_groups should be a list, aborting') - for inst in instances: - existing = inst.security_group_ids['security_group_id'] - remove = list(set(existing).difference(set(security_groups))) - add = list(set(security_groups).difference(set(existing))) - for sg in remove: - if inst.leave_security_group(sg): - changed = True - for sg in add: - if inst.join_security_group(sg): - changed = True - # Security Group join/leave ends here - - # Attach/Detach key pair - inst_ids = [] - for inst in instances: - if key_name is not None and key_name != inst.key_name: - if key_name == "": - if inst.detach_key_pair(): - changed = True - else: - inst_ids.append(inst.id) - if inst_ids: - changed = ecs.attach_key_pair(instance_ids=inst_ids, key_pair_name=key_name) - - # Modify instance attribute - for inst in instances: - if modify_instance(module, inst): - changed = True - if inst.id not in ids: - ids.append(inst.id) - - # Modify instance charge type - ids = [] - for inst in instances: - if inst.instance_charge_type != instance_charge_type: - ids.append(inst.id) - if ids: - params = {"instance_ids": ids, "instance_charge_type": instance_charge_type, - "include_data_disks": module.params['include_data_disks'], "dry_run": module.params['dry_run'], - "auto_pay": True} - if instance_charge_type == 'PrePaid': - params['period'] = module.params['period'] - params['period_unit'] = module.params['period_unit'] - - if ecs.modify_instance_charge_type(**params): - changed = True - wait_for_instance_modify_charge(ecs, ids, instance_charge_type) - - else: - if len(instances) < 1: - module.fail_json(msg='Please specify ECS instances that you want to operate by using ' - 'parameters instance_ids, tags or instance_name, aborting') - if state == 'running': - try: - targets = [] - for inst in instances: - if modify_instance(module, inst): - changed = True - if inst.status != "running": - targets.append(inst.id) - ids.append(inst.id) - if targets and ecs.start_instances(instance_ids=targets): - changed = True - ids.extend(targets) - except Exception as e: - module.fail_json(msg='Start instances got an error: {0}'.format(e)) - elif state == 'stopped': - try: - targets = [] - for inst in instances: - if inst.status != "stopped": - targets.append(inst.id) - if targets and ecs.stop_instances(instance_ids=targets, force_stop=force): - changed = True - ids.extend(targets) - for inst in instances: - if modify_instance(module, inst): - changed = True - except Exception as e: - module.fail_json(msg='Stop instances got an error: {0}'.format(e)) - elif state == 'restarted': - try: - targets = [] - for inst in instances: - if modify_instance(module, inst): - changed = True - targets.append(inst.id) - if ecs.reboot_instances(instance_ids=targets, force_stop=module.params['force']): - changed = True - ids.extend(targets) - except Exception as e: - module.fail_json(msg='Reboot instances got an error: {0}'.format(e)) - - tags = module.params['tags'] - if module.params['purge_tags']: - for inst in instances: - if not tags: - tags = inst.tags - try: - if inst.remove_tags(tags): - changed = True - except Exception as e: - module.fail_json(msg="{0}".format(e)) - module.exit_json(changed=changed, instances=get_instances_info(ecs, ids)) - - if tags: - for inst in instances: - try: - if inst.add_tags(tags): - changed = True - except Exception as e: - module.fail_json(msg="{0}".format(e)) - module.exit_json(changed=changed, instances=get_instances_info(ecs, ids)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/alicloud/ali_instance_info.py b/plugins/modules/cloud/alicloud/ali_instance_info.py deleted file mode 100644 index 06df6cb4f1..0000000000 --- a/plugins/modules/cloud/alicloud/ali_instance_info.py +++ /dev/null @@ -1,444 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see http://www.gnu.org/licenses/. - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ali_instance_info -short_description: Gather information on instances of Alibaba Cloud ECS. -description: - - This module fetches data from the Open API in Alicloud. - The module must be called from within the ECS instance itself. - - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change. - -options: - availability_zone: - description: - - Aliyun availability zone ID in which to launch the instance. - - Deprecated parameter, it will be removed in community.general 5.0.0. Please use filter item I(zone_id) instead. - aliases: ['alicloud_zone'] - type: str - instance_names: - description: - - A list of ECS instance names. - - Deprecated parameter, it will be removed in community.general 5.0.0. Please use filter item I(instance_name) instead. - aliases: ["names"] - type: list - elements: str - instance_ids: - description: - - A list of ECS instance ids. - aliases: ["ids"] - type: list - elements: str - name_prefix: - description: - - Use a instance name prefix to filter ecs instances. - type: str - version_added: '0.2.0' - tags: - description: - - A hash/dictionaries of instance tags. C({"key":"value"}) - aliases: ["instance_tags"] - type: dict - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be - all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details. - Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to - connect different words in one parameter. 'InstanceIds' should be a list and it will be appended to - I(instance_ids) automatically. 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead. - type: dict - version_added: '0.2.0' -author: - - "He Guimin (@xiaozhu36)" -requirements: - - "python >= 3.6" - - "footmark >= 1.13.0" -extends_documentation_fragment: - - community.general.alicloud -''' - -EXAMPLES = ''' -# Fetch instances details according to setting different filters - -- name: Find all instances in the specified region - community.general.ali_instance_info: - register: all_instances - -- name: Find all instances based on the specified ids - community.general.ali_instance_info: - instance_ids: - - "i-35b333d9" - - "i-ddav43kd" - register: instances_by_ids - -- name: Find all instances based on the specified name_prefix - community.general.ali_instance_info: - name_prefix: "ecs_instance_" - register: instances_by_name_prefix - -- name: Find instances based on tags - community.general.ali_instance_info: - tags: - Test: "add" -''' - -RETURN = ''' -instances: - description: List of ECS instances - returned: always - type: complex - contains: - availability_zone: - description: The availability zone of the instance is in. - returned: always - type: str - sample: cn-beijing-a - block_device_mappings: - description: Any block device mapping entries for the instance. - returned: always - type: complex - contains: - device_name: - description: The device name exposed to the instance (for example, /dev/xvda). - returned: always - type: str - sample: /dev/xvda - attach_time: - description: The time stamp when the attachment initiated. - returned: always - type: str - sample: "2018-06-25T04:08:26Z" - delete_on_termination: - description: Indicates whether the volume is deleted on instance termination. - returned: always - type: bool - sample: true - status: - description: The attachment state. - returned: always - type: str - sample: in_use - volume_id: - description: The ID of the cloud disk. - returned: always - type: str - sample: d-2zei53pjsi117y6gf9t6 - cpu: - description: The CPU core count of the instance. - returned: always - type: int - sample: 4 - creation_time: - description: The time the instance was created. - returned: always - type: str - sample: "2018-06-25T04:08Z" - description: - description: The instance description. - returned: always - type: str - sample: "my ansible instance" - eip: - description: The attribution of EIP associated with the instance. - returned: always - type: complex - contains: - allocation_id: - description: The ID of the EIP. - returned: always - type: str - sample: eip-12345 - internet_charge_type: - description: The internet charge type of the EIP. - returned: always - type: str - sample: "paybybandwidth" - ip_address: - description: EIP address. - returned: always - type: str - sample: 42.10.2.2 - expired_time: - description: The time the instance will expire. - returned: always - type: str - sample: "2099-12-31T15:59Z" - gpu: - description: The attribution of instance GPU. - returned: always - type: complex - contains: - amount: - description: The count of the GPU. - returned: always - type: int - sample: 0 - spec: - description: The specification of the GPU. - returned: always - type: str - sample: "" - host_name: - description: The host name of the instance. - returned: always - type: str - sample: iZ2zewaoZ - id: - description: Alias of instance_id. - returned: always - type: str - sample: i-abc12345 - instance_id: - description: ECS instance resource ID. - returned: always - type: str - sample: i-abc12345 - image_id: - description: The ID of the image used to launch the instance. - returned: always - type: str - sample: m-0011223344 - inner_ip_address: - description: The inner IPv4 address of the classic instance. - returned: always - type: str - sample: 10.0.0.2 - instance_charge_type: - description: The instance charge type. - returned: always - type: str - sample: PostPaid - instance_name: - description: The name of the instance. - returned: always - type: str - sample: my-ecs - instance_type_family: - description: The instance type family of the instance belongs. - returned: always - type: str - sample: ecs.sn1ne - instance_type: - description: The instance type of the running instance. - returned: always - type: str - sample: ecs.sn1ne.xlarge - internet_charge_type: - description: The billing method of the network bandwidth. - returned: always - type: str - sample: PayByBandwidth - internet_max_bandwidth_in: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 200 - internet_max_bandwidth_out: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 20 - io_optimized: - description: Indicates whether the instance is optimized for EBS I/O. - returned: always - type: bool - sample: false - memory: - description: Memory size of the instance. - returned: always - type: int - sample: 8192 - network_interfaces: - description: One or more network interfaces for the instance. - returned: always - type: complex - contains: - mac_address: - description: The MAC address. - returned: always - type: str - sample: "00:11:22:33:44:55" - network_interface_id: - description: The ID of the network interface. - returned: always - type: str - sample: eni-01234567 - primary_ip_address: - description: The primary IPv4 address of the network interface within the vswitch. - returned: always - type: str - sample: 10.0.0.1 - osname: - description: The operation system name of the instance owned. - returned: always - type: str - sample: CentOS - ostype: - description: The operation system type of the instance owned. - returned: always - type: str - sample: linux - private_ip_address: - description: The IPv4 address of the network interface within the subnet. - returned: always - type: str - sample: 10.0.0.1 - public_ip_address: - description: The public IPv4 address assigned to the instance or eip address - returned: always - type: str - sample: 43.0.0.1 - resource_group_id: - description: The id of the resource group to which the instance belongs. - returned: always - type: str - sample: my-ecs-group - security_groups: - description: One or more security groups for the instance. - returned: always - type: list - elements: dict - contains: - group_id: - description: The ID of the security group. - returned: always - type: str - sample: sg-0123456 - group_name: - description: The name of the security group. - returned: always - type: str - sample: my-security-group - status: - description: The current status of the instance. - returned: always - type: str - sample: running - tags: - description: Any tags assigned to the instance. - returned: always - type: dict - sample: - vswitch_id: - description: The ID of the vswitch in which the instance is running. - returned: always - type: str - sample: vsw-dew00abcdef - vpc_id: - description: The ID of the VPC the instance is in. - returned: always - type: str - sample: vpc-0011223344 -ids: - description: List of ECS instance IDs - returned: always - type: list - sample: [i-12345er, i-3245fs] -''' - -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect - -HAS_FOOTMARK = False -FOOTMARK_IMP_ERR = None -try: - from footmark.exception import ECSResponseError - HAS_FOOTMARK = True -except ImportError: - FOOTMARK_IMP_ERR = traceback.format_exc() - HAS_FOOTMARK = False - - -def main(): - argument_spec = ecs_argument_spec() - argument_spec.update(dict( - availability_zone=dict(aliases=['alicloud_zone'], - removed_in_version="5.0.0", removed_from_collection="community.general"), - instance_ids=dict(type='list', elements='str', aliases=['ids'], - removed_in_version="5.0.0", removed_from_collection="community.general"), - instance_names=dict(type='list', elements='str', aliases=['names']), - name_prefix=dict(type='str'), - tags=dict(type='dict', aliases=['instance_tags']), - filters=dict(type='dict') - ) - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - if HAS_FOOTMARK is False: - module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) - - ecs = ecs_connect(module) - - instances = [] - instance_ids = [] - ids = module.params['instance_ids'] - name_prefix = module.params['name_prefix'] - names = module.params['instance_names'] - zone_id = module.params['availability_zone'] - if ids and (not isinstance(ids, list) or len(ids) < 1): - module.fail_json(msg='instance_ids should be a list of instances, aborting') - - if names and (not isinstance(names, list) or len(names) < 1): - module.fail_json(msg='instance_names should be a list of instances, aborting') - - filters = module.params['filters'] - if not filters: - filters = {} - if not ids: - ids = [] - for key, value in list(filters.items()): - if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list): - for id in value: - if id not in ids: - ids.append(value) - if ids: - filters['instance_ids'] = ids - if module.params['tags']: - filters['tags'] = module.params['tags'] - if zone_id: - filters['zone_id'] = zone_id - if names: - filters['instance_name'] = names[0] - - for inst in ecs.describe_instances(**filters): - if name_prefix: - if not str(inst.instance_name).startswith(name_prefix): - continue - volumes = ecs.describe_disks(instance_id=inst.id) - setattr(inst, 'block_device_mappings', volumes) - setattr(inst, 'user_data', inst.describe_user_data()) - instances.append(inst.read()) - instance_ids.append(inst.id) - - module.exit_json(changed=False, ids=instance_ids, instances=instances) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/atomic/atomic_container.py b/plugins/modules/cloud/atomic/atomic_container.py deleted file mode 100644 index ca63125661..0000000000 --- a/plugins/modules/cloud/atomic/atomic_container.py +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: atomic_container -short_description: Manage the containers on the atomic host platform -description: - - Manage the containers on the atomic host platform. - - Allows to manage the lifecycle of a container on the atomic host platform. -author: "Giuseppe Scrivano (@giuseppe)" -notes: - - Host should support C(atomic) command -requirements: - - atomic - - "python >= 2.6" -options: - backend: - description: - - Define the backend to use for the container. - required: True - choices: ["docker", "ostree"] - type: str - name: - description: - - Name of the container. - required: True - type: str - image: - description: - - The image to use to install the container. - required: True - type: str - rootfs: - description: - - Define the rootfs of the image. - type: str - state: - description: - - State of the container. - choices: ["absent", "latest", "present", "rollback"] - default: "latest" - type: str - mode: - description: - - Define if it is an user or a system container. - choices: ["user", "system"] - type: str - values: - description: - - Values for the installation of the container. - - This option is permitted only with mode 'user' or 'system'. - - The values specified here will be used at installation time as --set arguments for atomic install. - type: list - elements: str -''' - -EXAMPLES = r''' - -- name: Install the etcd system container - community.general.atomic_container: - name: etcd - image: rhel/etcd - backend: ostree - state: latest - mode: system - values: - - ETCD_NAME=etcd.server - -- name: Uninstall the etcd system container - community.general.atomic_container: - name: etcd - image: rhel/etcd - backend: ostree - state: absent - mode: system -''' - -RETURN = r''' -msg: - description: The command standard output - returned: always - type: str - sample: [u'Using default tag: latest ...'] -''' - -# import module snippets -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def do_install(module, mode, rootfs, container, image, values_list, backend): - system_list = ["--system"] if mode == 'system' else [] - user_list = ["--user"] if mode == 'user' else [] - rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else [] - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - else: - changed = "Extracting" in out or "Copying blob" in out - module.exit_json(msg=out, changed=changed) - - -def do_update(module, container, image, values_list): - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'containers', 'update', "--rebase=%s" % image] + values_list + [container] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - else: - changed = "Extracting" in out or "Copying blob" in out - module.exit_json(msg=out, changed=changed) - - -def do_uninstall(module, name, backend): - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'uninstall', "--storage=%s" % backend, name] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - module.exit_json(msg=out, changed=True) - - -def do_rollback(module, name): - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'containers', 'rollback', name] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - else: - changed = "Rolling back" in out - module.exit_json(msg=out, changed=changed) - - -def core(module): - mode = module.params['mode'] - name = module.params['name'] - image = module.params['image'] - rootfs = module.params['rootfs'] - values = module.params['values'] - backend = module.params['backend'] - state = module.params['state'] - - atomic_bin = module.get_bin_path('atomic') - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - - values_list = ["--set=%s" % x for x in values] if values else [] - - args = [atomic_bin, 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - return - present = name in out - - if state == 'present' and present: - module.exit_json(msg=out, changed=False) - elif (state in ['latest', 'present']) and not present: - do_install(module, mode, rootfs, name, image, values_list, backend) - elif state == 'latest': - do_update(module, name, image, values_list) - elif state == 'absent': - if not present: - module.exit_json(msg="The container is not present", changed=False) - else: - do_uninstall(module, name, backend) - elif state == 'rollback': - do_rollback(module, name) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - mode=dict(default=None, choices=['user', 'system']), - name=dict(required=True), - image=dict(required=True), - rootfs=dict(default=None), - state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']), - backend=dict(required=True, choices=['docker', 'ostree']), - values=dict(type='list', default=[], elements='str'), - ), - ) - - if module.params['values'] is not None and module.params['mode'] == 'default': - module.fail_json(msg="values is supported only with user or system mode") - - # Verify that the platform supports atomic command - dummy = module.get_bin_path('atomic', required=True) - - try: - core(module) - except Exception as e: - module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/atomic/atomic_host.py b/plugins/modules/cloud/atomic/atomic_host.py deleted file mode 100644 index 85b00f917a..0000000000 --- a/plugins/modules/cloud/atomic/atomic_host.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: atomic_host -short_description: Manage the atomic host platform -description: - - Manage the atomic host platform. - - Rebooting of Atomic host platform should be done outside this module. -author: -- Saravanan KR (@krsacme) -notes: - - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file). -requirements: - - atomic - - python >= 2.6 -options: - revision: - description: - - The version number of the atomic host to be deployed. - - Providing C(latest) will upgrade to the latest available version. - default: 'latest' - aliases: [ version ] - type: str -''' - -EXAMPLES = r''' -- name: Upgrade the atomic host platform to the latest version (atomic host upgrade) - community.general.atomic_host: - revision: latest - -- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130) - community.general.atomic_host: - revision: 23.130 -''' - -RETURN = r''' -msg: - description: The command standard output - returned: always - type: str - sample: 'Already on latest' -''' -import os -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def core(module): - revision = module.params['revision'] - atomic_bin = module.get_bin_path('atomic', required=True) - - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - - if revision == 'latest': - args = [atomic_bin, 'host', 'upgrade'] - else: - args = [atomic_bin, 'host', 'deploy', revision] - - rc, out, err = module.run_command(args, check_rc=False) - - if rc == 77 and revision == 'latest': - module.exit_json(msg="Already on latest", changed=False) - elif rc != 0: - module.fail_json(rc=rc, msg=err) - else: - module.exit_json(msg=out, changed=True) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - revision=dict(type='str', default='latest', aliases=["version"]), - ), - ) - - # Verify that the platform is atomic host - if not os.path.exists("/run/ostree-booted"): - module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only") - - try: - core(module) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/atomic/atomic_image.py b/plugins/modules/cloud/atomic/atomic_image.py deleted file mode 100644 index 350ad4c2ae..0000000000 --- a/plugins/modules/cloud/atomic/atomic_image.py +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: atomic_image -short_description: Manage the container images on the atomic host platform -description: - - Manage the container images on the atomic host platform. - - Allows to execute the commands specified by the RUN label in the container image when present. -author: -- Saravanan KR (@krsacme) -notes: - - Host should support C(atomic) command. -requirements: - - atomic - - python >= 2.6 -options: - backend: - description: - - Define the backend where the image is pulled. - choices: [ 'docker', 'ostree' ] - type: str - name: - description: - - Name of the container image. - required: True - type: str - state: - description: - - The state of the container image. - - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running. - choices: [ 'absent', 'latest', 'present' ] - default: 'latest' - type: str - started: - description: - - Start or Stop the container. - type: bool - default: 'yes' -''' - -EXAMPLES = r''' -- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog) - community.general.atomic_image: - name: rhel7/rsyslog - state: latest - -- name: Pull busybox to the OSTree backend - community.general.atomic_image: - name: busybox - state: latest - backend: ostree -''' - -RETURN = r''' -msg: - description: The command standard output - returned: always - type: str - sample: [u'Using default tag: latest ...'] -''' -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def do_upgrade(module, image): - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'update', '--force', image] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: # something went wrong emit the msg - module.fail_json(rc=rc, msg=err) - elif 'Image is up to date' in out: - return False - - return True - - -def core(module): - image = module.params['name'] - state = module.params['state'] - started = module.params['started'] - backend = module.params['backend'] - is_upgraded = False - - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - atomic_bin = module.get_bin_path('atomic') - out = {} - err = {} - rc = 0 - - if backend: - if state == 'present' or state == 'latest': - args = [atomic_bin, 'pull', "--storage=%s" % backend, image] - rc, out, err = module.run_command(args, check_rc=False) - if rc < 0: - module.fail_json(rc=rc, msg=err) - else: - out_run = "" - if started: - args = [atomic_bin, 'run', "--storage=%s" % backend, image] - rc, out_run, err = module.run_command(args, check_rc=False) - if rc < 0: - module.fail_json(rc=rc, msg=err) - - changed = "Extracting" in out or "Copying blob" in out - module.exit_json(msg=(out + out_run), changed=changed) - elif state == 'absent': - args = [atomic_bin, 'images', 'delete', "--storage=%s" % backend, image] - rc, out, err = module.run_command(args, check_rc=False) - if rc < 0: - module.fail_json(rc=rc, msg=err) - else: - changed = "Unable to find" not in out - module.exit_json(msg=out, changed=changed) - return - - if state == 'present' or state == 'latest': - if state == 'latest': - is_upgraded = do_upgrade(module, image) - - if started: - args = [atomic_bin, 'run', image] - else: - args = [atomic_bin, 'install', image] - elif state == 'absent': - args = [atomic_bin, 'uninstall', image] - - rc, out, err = module.run_command(args, check_rc=False) - - if rc < 0: - module.fail_json(rc=rc, msg=err) - elif rc == 1 and 'already present' in err: - module.exit_json(restult=err, changed=is_upgraded) - elif started and 'Container is running' in out: - module.exit_json(result=out, changed=is_upgraded) - else: - module.exit_json(msg=out, changed=True) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - backend=dict(type='str', choices=['docker', 'ostree']), - name=dict(type='str', required=True), - state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']), - started=dict(type='bool', default=True), - ), - ) - - # Verify that the platform supports atomic command - dummy = module.get_bin_path('atomic', required=True) - - try: - core(module) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_aa_policy.py b/plugins/modules/cloud/centurylink/clc_aa_policy.py deleted file mode 100644 index 1d52cca7c5..0000000000 --- a/plugins/modules/cloud/centurylink/clc_aa_policy.py +++ /dev/null @@ -1,345 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_aa_policy -short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud. -description: - - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud. -options: - name: - description: - - The name of the Anti Affinity Policy. - type: str - required: True - location: - description: - - Datacenter in which the policy lives/should live. - type: str - required: True - state: - description: - - Whether to create or delete the policy. - type: str - required: False - default: present - choices: ['present','absent'] -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - ---- -- name: Create AA Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create an Anti Affinity Policy - community.general.clc_aa_policy: - name: Hammer Time - location: UK3 - state: present - register: policy - - - name: Debug - ansible.builtin.debug: - var: policy - -- name: Delete AA Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete an Anti Affinity Policy - community.general.clc_aa_policy: - name: Hammer Time - location: UK3 - state: absent - register: policy - - - name: Debug - ansible.builtin.debug: - var: policy -''' - -RETURN = ''' -policy: - description: The anti affinity policy information - returned: success - type: dict - sample: - { - "id":"1a28dd0988984d87b9cd61fa8da15424", - "name":"test_aa_policy", - "location":"UC1", - "links":[ - { - "rel":"self", - "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424", - "verbs":[ - "GET", - "DELETE", - "PUT" - ] - }, - { - "rel":"location", - "href":"/v2/datacenters/wfad/UC1", - "id":"uc1", - "name":"UC1 - US West (Santa Clara)" - } - ] - } -''' - -__version__ = '${version}' - -import os -import traceback - -from distutils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk: -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcAntiAffinityPolicy: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - self.policy_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), - exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), - exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - location=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - return argument_spec - - # Module Behavior Goodness - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - - self._set_clc_credentials_from_env() - self.policy_dict = self._get_policies_for_datacenter(p) - - if p['state'] == "absent": - changed, policy = self._ensure_policy_is_absent(p) - else: - changed, policy = self._ensure_policy_is_present(p) - - if hasattr(policy, 'data'): - policy = policy.data - elif hasattr(policy, '__dict__'): - policy = policy.__dict__ - - self.module.exit_json(changed=changed, policy=policy) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_policies_for_datacenter(self, p): - """ - Get the Policies for a datacenter by calling the CLC API. - :param p: datacenter to get policies from - :return: policies in the datacenter - """ - response = {} - - policies = self.clc.v2.AntiAffinity.GetAll(location=p['location']) - - for policy in policies: - response[policy.name] = policy - return response - - def _create_policy(self, p): - """ - Create an Anti Affinity Policy using the CLC API. - :param p: datacenter to create policy in - :return: response dictionary from the CLC API. - """ - try: - return self.clc.v2.AntiAffinity.Create( - name=p['name'], - location=p['location']) - except CLCException as ex: - self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format( - p['name'], ex.response_text - )) - - def _delete_policy(self, p): - """ - Delete an Anti Affinity Policy using the CLC API. - :param p: datacenter to delete a policy from - :return: none - """ - try: - policy = self.policy_dict[p['name']] - policy.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format( - p['name'], ex.response_text - )) - - def _policy_exists(self, policy_name): - """ - Check to see if an Anti Affinity Policy exists - :param policy_name: name of the policy - :return: boolean of if the policy exists - """ - if policy_name in self.policy_dict: - return self.policy_dict.get(policy_name) - - return False - - def _ensure_policy_is_absent(self, p): - """ - Makes sure that a policy is absent - :param p: dictionary of policy name - :return: tuple of if a deletion occurred and the name of the policy that was deleted - """ - changed = False - if self._policy_exists(policy_name=p['name']): - changed = True - if not self.module.check_mode: - self._delete_policy(p) - return changed, None - - def _ensure_policy_is_present(self, p): - """ - Ensures that a policy is present - :param p: dictionary of a policy name - :return: tuple of if an addition occurred and the name of the policy that was added - """ - changed = False - policy = self._policy_exists(policy_name=p['name']) - if not policy: - changed = True - policy = None - if not self.module.check_mode: - policy = self._create_policy(p) - return changed, policy - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(), - supports_check_mode=True) - clc_aa_policy = ClcAntiAffinityPolicy(module) - clc_aa_policy.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_alert_policy.py b/plugins/modules/cloud/centurylink/clc_alert_policy.py deleted file mode 100644 index de9d146dc4..0000000000 --- a/plugins/modules/cloud/centurylink/clc_alert_policy.py +++ /dev/null @@ -1,528 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_alert_policy -short_description: Create or Delete Alert Policies at CenturyLink Cloud. -description: - - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud. -options: - alias: - description: - - The alias of your CLC Account - type: str - required: True - name: - description: - - The name of the alert policy. This is mutually exclusive with id - type: str - id: - description: - - The alert policy id. This is mutually exclusive with name - type: str - alert_recipients: - description: - - A list of recipient email ids to notify the alert. - This is required for state 'present' - type: list - elements: str - metric: - description: - - The metric on which to measure the condition that will trigger the alert. - This is required for state 'present' - type: str - choices: ['cpu','memory','disk'] - duration: - description: - - The length of time in minutes that the condition must exceed the threshold. - This is required for state 'present' - type: str - threshold: - description: - - The threshold that will trigger the alert when the metric equals or exceeds it. - This is required for state 'present' - This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0 - type: int - state: - description: - - Whether to create or delete the policy. - type: str - default: present - choices: ['present','absent'] -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - ---- -- name: Create Alert Policy Example - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create an Alert Policy for disk above 80% for 5 minutes - community.general.clc_alert_policy: - alias: wfad - name: 'alert for disk > 80%' - alert_recipients: - - test1@centurylink.com - - test2@centurylink.com - metric: 'disk' - duration: '00:05:00' - threshold: 80 - state: present - register: policy - - - name: Debug - ansible.builtin.debug: var=policy - -- name: Delete Alert Policy Example - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete an Alert Policy - community.general.clc_alert_policy: - alias: wfad - name: 'alert for disk > 80%' - state: absent - register: policy - - - name: Debug - ansible.builtin.debug: var=policy -''' - -RETURN = ''' -policy: - description: The alert policy information - returned: success - type: dict - sample: - { - "actions": [ - { - "action": "email", - "settings": { - "recipients": [ - "user1@domain.com", - "user1@domain.com" - ] - } - } - ], - "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7", - "links": [ - { - "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7", - "rel": "self", - "verbs": [ - "GET", - "DELETE", - "PUT" - ] - } - ], - "name": "test_alert", - "triggers": [ - { - "duration": "00:05:00", - "metric": "disk", - "threshold": 80.0 - } - ] - } -''' - -__version__ = '${version}' - -import json -import os -import traceback -from distutils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcAlertPolicy: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - self.policy_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(), - id=dict(), - alias=dict(required=True), - alert_recipients=dict(type='list', elements='str'), - metric=dict( - choices=[ - 'cpu', - 'memory', - 'disk'], - default=None), - duration=dict(type='str'), - threshold=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']) - ) - mutually_exclusive = [ - ['name', 'id'] - ] - return {'argument_spec': argument_spec, - 'mutually_exclusive': mutually_exclusive} - - # Module Behavior Goodness - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - - self._set_clc_credentials_from_env() - self.policy_dict = self._get_alert_policies(p['alias']) - - if p['state'] == 'present': - changed, policy = self._ensure_alert_policy_is_present() - else: - changed, policy = self._ensure_alert_policy_is_absent() - - self.module.exit_json(changed=changed, policy=policy) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_alert_policy_is_present(self): - """ - Ensures that the alert policy is present - :return: (changed, policy) - changed: A flag representing if anything is modified - policy: the created/updated alert policy - """ - changed = False - p = self.module.params - policy_name = p.get('name') - - if not policy_name: - self.module.fail_json(msg='Policy name is a required') - policy = self._alert_policy_exists(policy_name) - if not policy: - changed = True - policy = None - if not self.module.check_mode: - policy = self._create_alert_policy() - else: - changed_u, policy = self._ensure_alert_policy_is_updated(policy) - if changed_u: - changed = True - return changed, policy - - def _ensure_alert_policy_is_absent(self): - """ - Ensures that the alert policy is absent - :return: (changed, None) - changed: A flag representing if anything is modified - """ - changed = False - p = self.module.params - alert_policy_id = p.get('id') - alert_policy_name = p.get('name') - alias = p.get('alias') - if not alert_policy_id and not alert_policy_name: - self.module.fail_json( - msg='Either alert policy id or policy name is required') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id( - self.module, - alert_policy_name) - if alert_policy_id and alert_policy_id in self.policy_dict: - changed = True - if not self.module.check_mode: - self._delete_alert_policy(alias, alert_policy_id) - return changed, None - - def _ensure_alert_policy_is_updated(self, alert_policy): - """ - Ensures the alert policy is updated if anything is changed in the alert policy configuration - :param alert_policy: the target alert policy - :return: (changed, policy) - changed: A flag representing if anything is modified - policy: the updated the alert policy - """ - changed = False - p = self.module.params - alert_policy_id = alert_policy.get('id') - email_list = p.get('alert_recipients') - metric = p.get('metric') - duration = p.get('duration') - threshold = p.get('threshold') - policy = alert_policy - if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \ - (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \ - (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))): - changed = True - elif email_list: - t_email_list = list( - alert_policy.get('actions')[0].get('settings').get('recipients')) - if set(email_list) != set(t_email_list): - changed = True - if changed and not self.module.check_mode: - policy = self._update_alert_policy(alert_policy_id) - return changed, policy - - def _get_alert_policies(self, alias): - """ - Get the alert policies for account alias by calling the CLC API. - :param alias: the account alias - :return: the alert policies for the account alias - """ - response = {} - - policies = self.clc.v2.API.Call('GET', - '/v2/alertPolicies/%s' - % alias) - - for policy in policies.get('items'): - response[policy.get('id')] = policy - return response - - def _create_alert_policy(self): - """ - Create an alert Policy using the CLC API. - :return: response dictionary from the CLC API. - """ - p = self.module.params - alias = p['alias'] - email_list = p['alert_recipients'] - metric = p['metric'] - duration = p['duration'] - threshold = p['threshold'] - policy_name = p['name'] - arguments = json.dumps( - { - 'name': policy_name, - 'actions': [{ - 'action': 'email', - 'settings': { - 'recipients': email_list - } - }], - 'triggers': [{ - 'metric': metric, - 'duration': duration, - 'threshold': threshold - }] - } - ) - try: - result = self.clc.v2.API.Call( - 'POST', - '/v2/alertPolicies/%s' % alias, - arguments) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to create alert policy "{0}". {1}'.format( - policy_name, str(e.response_text))) - return result - - def _update_alert_policy(self, alert_policy_id): - """ - Update alert policy using the CLC API. - :param alert_policy_id: The clc alert policy id - :return: response dictionary from the CLC API. - """ - p = self.module.params - alias = p['alias'] - email_list = p['alert_recipients'] - metric = p['metric'] - duration = p['duration'] - threshold = p['threshold'] - policy_name = p['name'] - arguments = json.dumps( - { - 'name': policy_name, - 'actions': [{ - 'action': 'email', - 'settings': { - 'recipients': email_list - } - }], - 'triggers': [{ - 'metric': metric, - 'duration': duration, - 'threshold': threshold - }] - } - ) - try: - result = self.clc.v2.API.Call( - 'PUT', '/v2/alertPolicies/%s/%s' % - (alias, alert_policy_id), arguments) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to update alert policy "{0}". {1}'.format( - policy_name, str(e.response_text))) - return result - - def _delete_alert_policy(self, alias, policy_id): - """ - Delete an alert policy using the CLC API. - :param alias : the account alias - :param policy_id: the alert policy id - :return: response dictionary from the CLC API. - """ - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/alertPolicies/%s/%s' % - (alias, policy_id), None) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to delete alert policy id "{0}". {1}'.format( - policy_id, str(e.response_text))) - return result - - def _alert_policy_exists(self, policy_name): - """ - Check to see if an alert policy exists - :param policy_name: name of the alert policy - :return: boolean of if the policy exists - """ - result = False - for policy_id in self.policy_dict: - if self.policy_dict.get(policy_id).get('name') == policy_name: - result = self.policy_dict.get(policy_id) - return result - - def _get_alert_policy_id(self, module, alert_policy_name): - """ - retrieves the alert policy id of the account based on the name of the policy - :param module: the AnsibleModule object - :param alert_policy_name: the alert policy name - :return: alert_policy_id: The alert policy id - """ - alert_policy_id = None - for policy_id in self.policy_dict: - if self.policy_dict.get(policy_id).get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = policy_id - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - argument_dict = ClcAlertPolicy._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_alert_policy = ClcAlertPolicy(module) - clc_alert_policy.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_blueprint_package.py b/plugins/modules/cloud/centurylink/clc_blueprint_package.py deleted file mode 100644 index bd0e868fa3..0000000000 --- a/plugins/modules/cloud/centurylink/clc_blueprint_package.py +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_blueprint_package -short_description: deploys a blue print package on a set of servers in CenturyLink Cloud. -description: - - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud. -options: - server_ids: - description: - - A list of server Ids to deploy the blue print package. - type: list - required: True - elements: str - package_id: - description: - - The package id of the blue print. - type: str - required: True - package_params: - description: - - The dictionary of arguments required to deploy the blue print. - type: dict - default: {} - required: False - state: - description: - - Whether to install or uninstall the package. Currently it supports only "present" for install action. - type: str - required: False - default: present - choices: ['present'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: str - default: True - required: False -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Deploy package - community.general.clc_blueprint_package: - server_ids: - - UC1TEST-SERVER1 - - UC1TEST-SERVER2 - package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a - package_params: {} -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SERVER1", - "UC1TEST-SERVER2" - ] -''' - -__version__ = '${version}' - -import os -import traceback -from distutils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcBlueprintPackage: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion( - requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - changed = False - changed_server_ids = [] - self._set_clc_credentials_from_env() - server_ids = p['server_ids'] - package_id = p['package_id'] - package_params = p['package_params'] - state = p['state'] - if state == 'present': - changed, changed_server_ids, request_list = self.ensure_package_installed( - server_ids, package_id, package_params) - self._wait_for_requests_to_complete(request_list) - self.module.exit_json(changed=changed, server_ids=changed_server_ids) - - @staticmethod - def define_argument_spec(): - """ - This function defines the dictionary object required for - package module - :return: the package dictionary object - """ - argument_spec = dict( - server_ids=dict(type='list', elements='str', required=True), - package_id=dict(required=True), - package_params=dict(type='dict', default={}), - wait=dict(default=True), # @FIXME should be bool? - state=dict(default='present', choices=['present']) - ) - return argument_spec - - def ensure_package_installed(self, server_ids, package_id, package_params): - """ - Ensure the package is installed in the given list of servers - :param server_ids: the server list where the package needs to be installed - :param package_id: the blueprint package id - :param package_params: the package arguments - :return: (changed, server_ids, request_list) - changed: A flag indicating if a change was made - server_ids: The list of servers modified - request_list: The list of request objects from clc-sdk - """ - changed = False - request_list = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to get servers from CLC') - for server in servers: - if not self.module.check_mode: - request = self.clc_install_package( - server, - package_id, - package_params) - request_list.append(request) - changed = True - return changed, server_ids, request_list - - def clc_install_package(self, server, package_id, package_params): - """ - Install the package to a given clc server - :param server: The server object where the package needs to be installed - :param package_id: The blue print package id - :param package_params: the required argument dict for the package installation - :return: The result object from the CLC API call - """ - result = None - try: - result = server.ExecutePackage( - package_id=package_id, - parameters=package_params) - except CLCException as ex: - self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format( - package_id, server.id, ex.message - )) - return result - - def _wait_for_requests_to_complete(self, request_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param request_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in request_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process package install request') - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: the list of server ids - :param message: the error message to raise if there is any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - self.module.fail_json(msg=message + ': %s' % ex) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - Main function - :return: None - """ - module = AnsibleModule( - argument_spec=ClcBlueprintPackage.define_argument_spec(), - supports_check_mode=True - ) - clc_blueprint_package = ClcBlueprintPackage(module) - clc_blueprint_package.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_firewall_policy.py b/plugins/modules/cloud/centurylink/clc_firewall_policy.py deleted file mode 100644 index a8f8a4e5f0..0000000000 --- a/plugins/modules/cloud/centurylink/clc_firewall_policy.py +++ /dev/null @@ -1,588 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_firewall_policy -short_description: Create/delete/update firewall policies -description: - - Create or delete or update firewall policies on Centurylink Cloud -options: - location: - description: - - Target datacenter for the firewall policy - type: str - required: True - state: - description: - - Whether to create or delete the firewall policy - type: str - default: present - choices: ['present', 'absent'] - source: - description: - - The list of source addresses for traffic on the originating firewall. - This is required when state is 'present' - type: list - elements: str - destination: - description: - - The list of destination addresses for traffic on the terminating firewall. - This is required when state is 'present' - type: list - elements: str - ports: - description: - - The list of ports associated with the policy. - TCP and UDP can take in single ports or port ranges. - - "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])." - type: list - elements: str - firewall_policy_id: - description: - - Id of the firewall policy. This is required to update or delete an existing firewall policy - type: str - source_account_alias: - description: - - CLC alias for the source account - type: str - required: True - destination_account_alias: - description: - - CLC alias for the destination account - type: str - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: str - default: 'True' - enabled: - description: - - Whether the firewall policy is enabled or disabled - type: str - choices: [True, False] - default: True -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' ---- -- name: Create Firewall Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create / Verify an Firewall Policy at CenturyLink Cloud - clc_firewall: - source_account_alias: WFAD - location: VA1 - state: present - source: 10.128.216.0/24 - destination: 10.128.216.0/24 - ports: Any - destination_account_alias: WFAD - -- name: Delete Firewall Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete an Firewall Policy at CenturyLink Cloud - clc_firewall: - source_account_alias: WFAD - location: VA1 - state: absent - firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1 -''' - -RETURN = ''' -firewall_policy_id: - description: The fire wall policy id - returned: success - type: str - sample: fc36f1bfd47242e488a9c44346438c05 -firewall_policy: - description: The fire wall policy information - returned: success - type: dict - sample: - { - "destination":[ - "10.1.1.0/24", - "10.2.2.0/24" - ], - "destinationAccount":"wfad", - "enabled":true, - "id":"fc36f1bfd47242e488a9c44346438c05", - "links":[ - { - "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05", - "rel":"self", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - } - ], - "ports":[ - "any" - ], - "source":[ - "10.1.1.0/24", - "10.2.2.0/24" - ], - "status":"active" - } -''' - -__version__ = '${version}' - -import os -import traceback -from ansible.module_utils.six.moves.urllib.parse import urlparse -from time import sleep -from distutils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcFirewallPolicy: - - clc = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.firewall_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion( - requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - location=dict(required=True), - source_account_alias=dict(required=True), - destination_account_alias=dict(), - firewall_policy_id=dict(), - ports=dict(type='list', elements='str'), - source=dict(type='list', elements='str'), - destination=dict(type='list', elements='str'), - wait=dict(default=True), # @FIXME type=bool - state=dict(default='present', choices=['present', 'absent']), - enabled=dict(default=True, choices=[True, False]) - ) - return argument_spec - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - changed = False - firewall_policy = None - location = self.module.params.get('location') - source_account_alias = self.module.params.get('source_account_alias') - destination_account_alias = self.module.params.get( - 'destination_account_alias') - firewall_policy_id = self.module.params.get('firewall_policy_id') - ports = self.module.params.get('ports') - source = self.module.params.get('source') - destination = self.module.params.get('destination') - wait = self.module.params.get('wait') - state = self.module.params.get('state') - enabled = self.module.params.get('enabled') - - self.firewall_dict = { - 'location': location, - 'source_account_alias': source_account_alias, - 'destination_account_alias': destination_account_alias, - 'firewall_policy_id': firewall_policy_id, - 'ports': ports, - 'source': source, - 'destination': destination, - 'wait': wait, - 'state': state, - 'enabled': enabled} - - self._set_clc_credentials_from_env() - - if state == 'absent': - changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent( - source_account_alias, location, self.firewall_dict) - - elif state == 'present': - changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present( - source_account_alias, location, self.firewall_dict) - - return self.module.exit_json( - changed=changed, - firewall_policy_id=firewall_policy_id, - firewall_policy=firewall_policy) - - @staticmethod - def _get_policy_id_from_response(response): - """ - Method to parse out the policy id from creation response - :param response: response from firewall creation API call - :return: policy_id: firewall policy id from creation call - """ - url = response.get('links')[0]['href'] - path = urlparse(url).path - path_list = os.path.split(path) - policy_id = path_list[-1] - return policy_id - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_firewall_policy_is_present( - self, - source_account_alias, - location, - firewall_dict): - """ - Ensures that a given firewall policy is present - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: dictionary of request parameters for firewall policy - :return: (changed, firewall_policy_id, firewall_policy) - changed: flag for if a change occurred - firewall_policy_id: the firewall policy id that was created/updated - firewall_policy: The firewall_policy object - """ - firewall_policy = None - firewall_policy_id = firewall_dict.get('firewall_policy_id') - - if firewall_policy_id is None: - if not self.module.check_mode: - response = self._create_firewall_policy( - source_account_alias, - location, - firewall_dict) - firewall_policy_id = self._get_policy_id_from_response( - response) - changed = True - else: - firewall_policy = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - if not firewall_policy: - return self.module.fail_json( - msg='Unable to find the firewall policy id : {0}'.format( - firewall_policy_id)) - changed = self._compare_get_request_with_dict( - firewall_policy, - firewall_dict) - if not self.module.check_mode and changed: - self._update_firewall_policy( - source_account_alias, - location, - firewall_policy_id, - firewall_dict) - if changed and firewall_policy_id: - firewall_policy = self._wait_for_requests_to_complete( - source_account_alias, - location, - firewall_policy_id) - return changed, firewall_policy_id, firewall_policy - - def _ensure_firewall_policy_is_absent( - self, - source_account_alias, - location, - firewall_dict): - """ - Ensures that a given firewall policy is removed if present - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: firewall policy to delete - :return: (changed, firewall_policy_id, response) - changed: flag for if a change occurred - firewall_policy_id: the firewall policy id that was deleted - response: response from CLC API call - """ - changed = False - response = [] - firewall_policy_id = firewall_dict.get('firewall_policy_id') - result = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - if result: - if not self.module.check_mode: - response = self._delete_firewall_policy( - source_account_alias, - location, - firewall_policy_id) - changed = True - return changed, firewall_policy_id, response - - def _create_firewall_policy( - self, - source_account_alias, - location, - firewall_dict): - """ - Creates the firewall policy for the given account alias - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: dictionary of request parameters for firewall policy - :return: response from CLC API call - """ - payload = { - 'destinationAccount': firewall_dict.get('destination_account_alias'), - 'source': firewall_dict.get('source'), - 'destination': firewall_dict.get('destination'), - 'ports': firewall_dict.get('ports')} - try: - response = self.clc.v2.API.Call( - 'POST', '/v2-experimental/firewallPolicies/%s/%s' % - (source_account_alias, location), payload) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to create firewall policy. %s" % - str(e.response_text)) - return response - - def _delete_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id): - """ - Deletes a given firewall policy for an account alias in a datacenter - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: firewall policy id to delete - :return: response: response from CLC API call - """ - try: - response = self.clc.v2.API.Call( - 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, location, firewall_policy_id)) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to delete the firewall policy id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - def _update_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id, - firewall_dict): - """ - Updates a firewall policy for a given datacenter and account alias - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: firewall policy id to update - :param firewall_dict: dictionary of request parameters for firewall policy - :return: response: response from CLC API call - """ - try: - response = self.clc.v2.API.Call( - 'PUT', - '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, - location, - firewall_policy_id), - firewall_dict) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to update the firewall policy id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - @staticmethod - def _compare_get_request_with_dict(response, firewall_dict): - """ - Helper method to compare the json response for getting the firewall policy with the request parameters - :param response: response from the get method - :param firewall_dict: dictionary of request parameters for firewall policy - :return: changed: Boolean that returns true if there are differences between - the response parameters and the playbook parameters - """ - - changed = False - - response_dest_account_alias = response.get('destinationAccount') - response_enabled = response.get('enabled') - response_source = response.get('source') - response_dest = response.get('destination') - response_ports = response.get('ports') - request_dest_account_alias = firewall_dict.get( - 'destination_account_alias') - request_enabled = firewall_dict.get('enabled') - if request_enabled is None: - request_enabled = True - request_source = firewall_dict.get('source') - request_dest = firewall_dict.get('destination') - request_ports = firewall_dict.get('ports') - - if ( - response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or ( - response_enabled != request_enabled) or ( - response_source and response_source != request_source) or ( - response_dest and response_dest != request_dest) or ( - response_ports and response_ports != request_ports): - changed = True - return changed - - def _get_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id): - """ - Get back details for a particular firewall policy - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: id of the firewall policy to get - :return: response - The response from CLC API call - """ - response = None - try: - response = self.clc.v2.API.Call( - 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, location, firewall_policy_id)) - except APIFailedResponse as e: - if e.response_status_code != 404: - self.module.fail_json( - msg="Unable to fetch the firewall policy with id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - def _wait_for_requests_to_complete( - self, - source_account_alias, - location, - firewall_policy_id, - wait_limit=50): - """ - Waits until the CLC requests are complete if the wait argument is True - :param source_account_alias: The source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: The firewall policy id - :param wait_limit: The number of times to check the status for completion - :return: the firewall_policy object - """ - wait = self.module.params.get('wait') - count = 0 - firewall_policy = None - while wait: - count += 1 - firewall_policy = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - status = firewall_policy.get('status') - if status == 'active' or count > wait_limit: - wait = False - else: - # wait for 2 seconds - sleep(2) - return firewall_policy - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcFirewallPolicy._define_module_argument_spec(), - supports_check_mode=True) - - clc_firewall = ClcFirewallPolicy(module) - clc_firewall.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_group.py b/plugins/modules/cloud/centurylink/clc_group.py deleted file mode 100644 index e1c05c6c0c..0000000000 --- a/plugins/modules/cloud/centurylink/clc_group.py +++ /dev/null @@ -1,513 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_group -short_description: Create/delete Server Groups at Centurylink Cloud -description: - - Create or delete Server Groups at Centurylink Centurylink Cloud -options: - name: - description: - - The name of the Server Group - type: str - required: True - description: - description: - - A description of the Server Group - type: str - required: False - parent: - description: - - The parent group of the server group. If parent is not provided, it creates the group at top level. - type: str - required: False - location: - description: - - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter - associated with the account - type: str - required: False - state: - description: - - Whether to create or delete the group - type: str - default: present - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: bool - default: True - required: False -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' - -# Create a Server Group - ---- -- name: Create Server Group - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create / Verify a Server Group at CenturyLink Cloud - community.general.clc_group: - name: My Cool Server Group - parent: Default Group - state: present - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc - -# Delete a Server Group -- name: Delete Server Group - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete / Verify Absent a Server Group at CenturyLink Cloud - community.general.clc_group: - name: My Cool Server Group - parent: Default Group - state: absent - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc -''' - -RETURN = ''' -group: - description: The group information - returned: success - type: dict - sample: - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":"2015-07-29T18:52:47Z", - "modifiedBy":"service.wfad", - "modifiedDate":"2015-07-29T18:52:47Z" - }, - "customFields":[ - - ], - "description":"test group", - "groups":[ - - ], - "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1", - "links":[ - { - "href":"/v2/groups/wfad", - "rel":"createGroup", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad", - "rel":"createServer", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"parentGroup" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults", - "rel":"defaults", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing", - "rel":"billing" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive", - "rel":"archiveGroupAction" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics", - "rel":"statistics" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy", - "rel":"horizontalAutoscalePolicyMapping", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - } - ], - "locationId":"UC1", - "name":"test group", - "status":"active", - "type":"default" - } -''' - -__version__ = '${version}' - -import os -import traceback -from distutils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcGroup(object): - - clc = None - root_group = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.group_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - location = self.module.params.get('location') - group_name = self.module.params.get('name') - parent_name = self.module.params.get('parent') - group_description = self.module.params.get('description') - state = self.module.params.get('state') - - self._set_clc_credentials_from_env() - self.group_dict = self._get_group_tree_for_datacenter( - datacenter=location) - - if state == "absent": - changed, group, requests = self._ensure_group_is_absent( - group_name=group_name, parent_name=parent_name) - if requests: - self._wait_for_requests_to_complete(requests) - else: - changed, group = self._ensure_group_is_present( - group_name=group_name, parent_name=parent_name, group_description=group_description) - try: - group = group.data - except AttributeError: - group = group_name - self.module.exit_json(changed=changed, group=group) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - description=dict(default=None), - parent=dict(default=None), - location=dict(default=None), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=True)) - - return argument_spec - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_group_is_absent(self, group_name, parent_name): - """ - Ensure that group_name is absent by deleting it if necessary - :param group_name: string - the name of the clc server group to delete - :param parent_name: string - the name of the parent group for group_name - :return: changed, group - """ - changed = False - group = [] - results = [] - - if self._group_exists(group_name=group_name, parent_name=parent_name): - if not self.module.check_mode: - group.append(group_name) - result = self._delete_group(group_name) - results.append(result) - changed = True - return changed, group, results - - def _delete_group(self, group_name): - """ - Delete the provided server group - :param group_name: string - the server group to delete - :return: none - """ - response = None - group, parent = self.group_dict.get(group_name) - try: - response = group.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format( - group_name, ex.response_text - )) - return response - - def _ensure_group_is_present( - self, - group_name, - parent_name, - group_description): - """ - Checks to see if a server group exists, creates it if it doesn't. - :param group_name: the name of the group to validate/create - :param parent_name: the name of the parent group for group_name - :param group_description: a short description of the server group (used when creating) - :return: (changed, group) - - changed: Boolean- whether a change was made, - group: A clc group object for the group - """ - if not self.root_group: - raise AssertionError("Implementation Error: Root Group not set") - parent = parent_name if parent_name is not None else self.root_group.name - description = group_description - changed = False - group = group_name - - parent_exists = self._group_exists(group_name=parent, parent_name=None) - child_exists = self._group_exists( - group_name=group_name, - parent_name=parent) - - if parent_exists and child_exists: - group, parent = self.group_dict[group_name] - changed = False - elif parent_exists and not child_exists: - if not self.module.check_mode: - group = self._create_group( - group=group, - parent=parent, - description=description) - changed = True - else: - self.module.fail_json( - msg="parent group: " + - parent + - " does not exist") - - return changed, group - - def _create_group(self, group, parent, description): - """ - Create the provided server group - :param group: clc_sdk.Group - the group to create - :param parent: clc_sdk.Parent - the parent group for {group} - :param description: string - a text description of the group - :return: clc_sdk.Group - the created group - """ - response = None - (parent, grandparent) = self.group_dict[parent] - try: - response = parent.Create(name=group, description=description) - except CLCException as ex: - self.module.fail_json(msg='Failed to create group :{0}. {1}'.format( - group, ex.response_text)) - return response - - def _group_exists(self, group_name, parent_name): - """ - Check to see if a group exists - :param group_name: string - the group to check - :param parent_name: string - the parent of group_name - :return: boolean - whether the group exists - """ - result = False - if group_name in self.group_dict: - (group, parent) = self.group_dict[group_name] - if parent_name is None or parent_name == parent.name: - result = True - return result - - def _get_group_tree_for_datacenter(self, datacenter=None): - """ - Walk the tree of groups for a datacenter - :param datacenter: string - the datacenter to walk (ex: 'UC1') - :return: a dictionary of groups and parents - """ - self.root_group = self.clc.v2.Datacenter( - location=datacenter).RootGroup() - return self._walk_groups_recursive( - parent_group=None, - child_group=self.root_group) - - def _walk_groups_recursive(self, parent_group, child_group): - """ - Walk a parent-child tree of groups, starting with the provided child group - :param parent_group: clc_sdk.Group - the parent group to start the walk - :param child_group: clc_sdk.Group - the child group to start the walk - :return: a dictionary of groups and parents - """ - result = {str(child_group): (child_group, parent_group)} - groups = child_group.Subgroups().groups - if len(groups) > 0: - for group in groups: - if group.type != 'default': - continue - - result.update(self._walk_groups_recursive(child_group, group)) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process group request') - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcGroup._define_module_argument_spec(), - supports_check_mode=True) - - clc_group = ClcGroup(module) - clc_group.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_loadbalancer.py b/plugins/modules/cloud/centurylink/clc_loadbalancer.py deleted file mode 100644 index 950e087976..0000000000 --- a/plugins/modules/cloud/centurylink/clc_loadbalancer.py +++ /dev/null @@ -1,937 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_loadbalancer -short_description: Create, Delete shared loadbalancers in CenturyLink Cloud. -description: - - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud. -options: - name: - description: - - The name of the loadbalancer - type: str - required: True - description: - description: - - A description for the loadbalancer - type: str - alias: - description: - - The alias of your CLC Account - type: str - required: True - location: - description: - - The location of the datacenter where the load balancer resides in - type: str - required: True - method: - description: - -The balancing method for the load balancer pool - type: str - choices: ['leastConnection', 'roundRobin'] - persistence: - description: - - The persistence method for the load balancer - type: str - choices: ['standard', 'sticky'] - port: - description: - - Port to configure on the public-facing side of the load balancer pool - type: str - choices: [80, 443] - nodes: - description: - - A list of nodes that needs to be added to the load balancer pool - type: list - default: [] - elements: dict - status: - description: - - The status of the loadbalancer - type: str - default: enabled - choices: ['enabled', 'disabled'] - state: - description: - - Whether to create or delete the load balancer pool - type: str - default: present - choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent'] -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples -- name: Create Loadbalancer - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: present - -- name: Add node to an existing loadbalancer pool - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.234 - privatePort: 80 - state: nodes_present - -- name: Remove node from an existing loadbalancer pool - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.234 - privatePort: 80 - state: nodes_absent - -- name: Delete LoadbalancerPool - hosts: localhost - connection: local - tasks: - - name: Actually Delete things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: port_absent - -- name: Delete Loadbalancer - hosts: localhost - connection: local - tasks: - - name: Actually Delete things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: absent -''' - -RETURN = ''' -loadbalancer: - description: The load balancer result object from CLC - returned: success - type: dict - sample: - { - "description":"test-lb", - "id":"ab5b18cb81e94ab9925b61d1ca043fb5", - "ipAddress":"66.150.174.197", - "links":[ - { - "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5", - "rel":"self", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools", - "rel":"pools", - "verbs":[ - "GET", - "POST" - ] - } - ], - "name":"test-lb", - "pools":[ - - ], - "status":"enabled" - } -''' - -__version__ = '${version}' - -import json -import os -import traceback -from time import sleep -from distutils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcLoadBalancer: - - clc = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.lb_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion( - requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - changed = False - result_lb = None - loadbalancer_name = self.module.params.get('name') - loadbalancer_alias = self.module.params.get('alias') - loadbalancer_location = self.module.params.get('location') - loadbalancer_description = self.module.params.get('description') - loadbalancer_port = self.module.params.get('port') - loadbalancer_method = self.module.params.get('method') - loadbalancer_persistence = self.module.params.get('persistence') - loadbalancer_nodes = self.module.params.get('nodes') - loadbalancer_status = self.module.params.get('status') - state = self.module.params.get('state') - - if loadbalancer_description is None: - loadbalancer_description = loadbalancer_name - - self._set_clc_credentials_from_env() - - self.lb_dict = self._get_loadbalancer_list( - alias=loadbalancer_alias, - location=loadbalancer_location) - - if state == 'present': - changed, result_lb, lb_id = self.ensure_loadbalancer_present( - name=loadbalancer_name, - alias=loadbalancer_alias, - location=loadbalancer_location, - description=loadbalancer_description, - status=loadbalancer_status) - if loadbalancer_port: - changed, result_pool, pool_id = self.ensure_loadbalancerpool_present( - lb_id=lb_id, - alias=loadbalancer_alias, - location=loadbalancer_location, - method=loadbalancer_method, - persistence=loadbalancer_persistence, - port=loadbalancer_port) - - if loadbalancer_nodes: - changed, result_nodes = self.ensure_lbpool_nodes_set( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - elif state == 'absent': - changed, result_lb = self.ensure_loadbalancer_absent( - name=loadbalancer_name, - alias=loadbalancer_alias, - location=loadbalancer_location) - - elif state == 'port_absent': - changed, result_lb = self.ensure_loadbalancerpool_absent( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port) - - elif state == 'nodes_present': - changed, result_lb = self.ensure_lbpool_nodes_present( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - - elif state == 'nodes_absent': - changed, result_lb = self.ensure_lbpool_nodes_absent( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - - self.module.exit_json(changed=changed, loadbalancer=result_lb) - - def ensure_loadbalancer_present( - self, name, alias, location, description, status): - """ - Checks to see if a load balancer exists and creates one if it does not. - :param name: Name of loadbalancer - :param alias: Alias of account - :param location: Datacenter - :param description: Description of loadbalancer - :param status: Enabled / Disabled - :return: (changed, result, lb_id) - changed: Boolean whether a change was made - result: The result object from the CLC load balancer request - lb_id: The load balancer id - """ - changed = False - result = name - lb_id = self._loadbalancer_exists(name=name) - if not lb_id: - if not self.module.check_mode: - result = self.create_loadbalancer(name=name, - alias=alias, - location=location, - description=description, - status=status) - lb_id = result.get('id') - changed = True - - return changed, result, lb_id - - def ensure_loadbalancerpool_present( - self, lb_id, alias, location, method, persistence, port): - """ - Checks to see if a load balancer pool exists and creates one if it does not. - :param lb_id: The loadbalancer id - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param method: the load balancing method - :param persistence: the load balancing persistence type - :param port: the port that the load balancer will listen on - :return: (changed, group, pool_id) - - changed: Boolean whether a change was made - result: The result from the CLC API call - pool_id: The string id of the load balancer pool - """ - changed = False - result = port - if not lb_id: - return changed, None, None - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if not pool_id: - if not self.module.check_mode: - result = self.create_loadbalancerpool( - alias=alias, - location=location, - lb_id=lb_id, - method=method, - persistence=persistence, - port=port) - pool_id = result.get('id') - changed = True - - return changed, result, pool_id - - def ensure_loadbalancer_absent(self, name, alias, location): - """ - Checks to see if a load balancer exists and deletes it if it does - :param name: Name of the load balancer - :param alias: Alias of account - :param location: Datacenter - :return: (changed, result) - changed: Boolean whether a change was made - result: The result from the CLC API Call - """ - changed = False - result = name - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - if not self.module.check_mode: - result = self.delete_loadbalancer(alias=alias, - location=location, - name=name) - changed = True - return changed, result - - def ensure_loadbalancerpool_absent(self, alias, location, name, port): - """ - Checks to see if a load balancer pool exists and deletes it if it does - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer listens on - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = None - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed = True - if not self.module.check_mode: - result = self.delete_loadbalancerpool( - alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id) - else: - result = "Pool doesn't exist" - else: - result = "LB Doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool - and set the nodes if any in the list those doesn't exist - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: The list of nodes to be updated to the pool - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - result = {} - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_check=nodes) - if not nodes_exist: - changed = True - result = self.set_loadbalancernodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: the list of nodes to be added - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed, result = self.add_lbpool_nodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_add=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool and removes them if found any - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: the list of nodes to be removed - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed, result = self.remove_lbpool_nodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_remove=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def create_loadbalancer(self, name, alias, location, description, status): - """ - Create a loadbalancer w/ params - :param name: Name of loadbalancer - :param alias: Alias of account - :param location: Datacenter - :param description: Description for loadbalancer to be created - :param status: Enabled / Disabled - :return: result: The result from the CLC API call - """ - result = None - try: - result = self.clc.v2.API.Call('POST', - '/v2/sharedLoadBalancers/%s/%s' % (alias, - location), - json.dumps({"name": name, - "description": description, - "status": status})) - sleep(1) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to create load balancer "{0}". {1}'.format( - name, str(e.response_text))) - return result - - def create_loadbalancerpool( - self, alias, location, lb_id, method, persistence, port): - """ - Creates a pool on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param method: the load balancing method - :param persistence: the load balancing persistence type - :param port: the port that the load balancer will listen on - :return: result: The result from the create API call - """ - result = None - try: - result = self.clc.v2.API.Call( - 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % - (alias, location, lb_id), json.dumps( - { - "port": port, "method": method, "persistence": persistence - })) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to create pool for load balancer id "{0}". {1}'.format( - lb_id, str(e.response_text))) - return result - - def delete_loadbalancer(self, alias, location, name): - """ - Delete CLC loadbalancer - :param alias: Alias for account - :param location: Datacenter - :param name: Name of the loadbalancer to delete - :return: result: The result from the CLC API call - """ - result = None - lb_id = self._get_loadbalancer_id(name=name) - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' % - (alias, location, lb_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to delete load balancer "{0}". {1}'.format( - name, str(e.response_text))) - return result - - def delete_loadbalancerpool(self, alias, location, lb_id, pool_id): - """ - Delete the pool on the provided load balancer - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the load balancer pool - :return: result: The result from the delete API call - """ - result = None - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' % - (alias, location, lb_id, pool_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to delete pool for load balancer id "{0}". {1}'.format( - lb_id, str(e.response_text))) - return result - - def _get_loadbalancer_id(self, name): - """ - Retrieves unique ID of loadbalancer - :param name: Name of loadbalancer - :return: Unique ID of the loadbalancer - """ - id = None - for lb in self.lb_dict: - if lb.get('name') == name: - id = lb.get('id') - return id - - def _get_loadbalancer_list(self, alias, location): - """ - Retrieve a list of loadbalancers - :param alias: Alias for account - :param location: Datacenter - :return: JSON data for all loadbalancers at datacenter - """ - result = None - try: - result = self.clc.v2.API.Call( - 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to fetch load balancers for account: {0}. {1}'.format( - alias, str(e.response_text))) - return result - - def _loadbalancer_exists(self, name): - """ - Verify a loadbalancer exists - :param name: Name of loadbalancer - :return: False or the ID of the existing loadbalancer - """ - result = False - - for lb in self.lb_dict: - if lb.get('name') == name: - result = lb.get('id') - return result - - def _loadbalancerpool_exists(self, alias, location, port, lb_id): - """ - Checks to see if a pool exists on the specified port on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param port: the port to check and see if it exists - :param lb_id: the id string of the provided load balancer - :return: result: The id string of the pool or False - """ - result = False - try: - pool_list = self.clc.v2.API.Call( - 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % - (alias, location, lb_id)) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format( - lb_id, str(e.response_text))) - for pool in pool_list: - if int(pool.get('port')) == int(port): - result = pool.get('id') - return result - - def _loadbalancerpool_nodes_exists( - self, alias, location, lb_id, pool_id, nodes_to_check): - """ - Checks to see if a set of nodes exists on the specified port on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the provided load balancer - :param pool_id: the id string of the load balancer pool - :param nodes_to_check: the list of nodes to check for - :return: result: True / False indicating if the given nodes exist - """ - result = False - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_check: - if not node.get('status'): - node['status'] = 'enabled' - if node in nodes: - result = True - else: - result = False - return result - - def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes): - """ - Updates nodes to the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes: a list of dictionaries containing the nodes to set - :return: result: The result from the CLC API call - """ - result = None - if not lb_id: - return result - if not self.module.check_mode: - try: - result = self.clc.v2.API.Call('PUT', - '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' - % (alias, location, lb_id, pool_id), json.dumps(nodes)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format( - pool_id, str(e.response_text))) - return result - - def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add): - """ - Add nodes to the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes_to_add: a list of dictionaries containing the nodes to add - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = {} - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_add: - if not node.get('status'): - node['status'] = 'enabled' - if node not in nodes: - changed = True - nodes.append(node) - if changed is True and not self.module.check_mode: - result = self.set_loadbalancernodes( - alias, - location, - lb_id, - pool_id, - nodes) - return changed, result - - def remove_lbpool_nodes( - self, alias, location, lb_id, pool_id, nodes_to_remove): - """ - Removes nodes from the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes_to_remove: a list of dictionaries containing the nodes to remove - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = {} - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_remove: - if not node.get('status'): - node['status'] = 'enabled' - if node in nodes: - changed = True - nodes.remove(node) - if changed is True and not self.module.check_mode: - result = self.set_loadbalancernodes( - alias, - location, - lb_id, - pool_id, - nodes) - return changed, result - - def _get_lbpool_nodes(self, alias, location, lb_id, pool_id): - """ - Return the list of nodes available to the provided load balancer pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :return: result: The list of nodes - """ - result = None - try: - result = self.clc.v2.API.Call('GET', - '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' - % (alias, location, lb_id, pool_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format( - pool_id, str(e.response_text))) - return result - - @staticmethod - def define_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - description=dict(default=None), - location=dict(required=True), - alias=dict(required=True), - port=dict(choices=[80, 443]), - method=dict(choices=['leastConnection', 'roundRobin']), - persistence=dict(choices=['standard', 'sticky']), - nodes=dict(type='list', default=[], elements='dict'), - status=dict(default='enabled', choices=['enabled', 'disabled']), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'port_absent', - 'nodes_present', - 'nodes_absent']) - ) - return argument_spec - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(), - supports_check_mode=True) - clc_loadbalancer = ClcLoadBalancer(module) - clc_loadbalancer.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_modify_server.py b/plugins/modules/cloud/centurylink/clc_modify_server.py deleted file mode 100644 index 90a368867e..0000000000 --- a/plugins/modules/cloud/centurylink/clc_modify_server.py +++ /dev/null @@ -1,967 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_modify_server -short_description: modify servers in CenturyLink Cloud. -description: - - An Ansible module to modify servers in CenturyLink Cloud. -options: - server_ids: - description: - - A list of server Ids to modify. - type: list - required: True - elements: str - cpu: - description: - - How many CPUs to update on the server - type: str - memory: - description: - - Memory (in GB) to set to the server. - type: str - anti_affinity_policy_id: - description: - - The anti affinity policy id to be set for a hyper scale server. - This is mutually exclusive with 'anti_affinity_policy_name' - type: str - anti_affinity_policy_name: - description: - - The anti affinity policy name to be set for a hyper scale server. - This is mutually exclusive with 'anti_affinity_policy_id' - type: str - alert_policy_id: - description: - - The alert policy id to be associated to the server. - This is mutually exclusive with 'alert_policy_name' - type: str - alert_policy_name: - description: - - The alert policy name to be associated to the server. - This is mutually exclusive with 'alert_policy_id' - type: str - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: bool - default: 'yes' -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Set the cpu count to 4 on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - cpu: 4 - state: present - -- name: Set the memory to 8GB on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - memory: 8 - state: present - -- name: Set the anti affinity policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - anti_affinity_policy_name: 'aa_policy' - state: present - -- name: Remove the anti affinity policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - anti_affinity_policy_name: 'aa_policy' - state: absent - -- name: Add the alert policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - alert_policy_name: 'alert_policy' - state: present - -- name: Remove the alert policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - alert_policy_name: 'alert_policy' - state: absent - -- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - cpu: 8 - memory: 16 - state: present -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -servers: - description: The list of server objects that are changed - returned: success - type: list - sample: - [ - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":1438196820, - "modifiedBy":"service.wfad", - "modifiedDate":1438196820 - }, - "description":"test-server", - "details":{ - "alertPolicies":[ - - ], - "cpu":1, - "customFields":[ - - ], - "diskCount":3, - "disks":[ - { - "id":"0:0", - "partitionPaths":[ - - ], - "sizeGB":1 - }, - { - "id":"0:1", - "partitionPaths":[ - - ], - "sizeGB":2 - }, - { - "id":"0:2", - "partitionPaths":[ - - ], - "sizeGB":14 - } - ], - "hostName":"", - "inMaintenanceMode":false, - "ipAddresses":[ - { - "internal":"10.1.1.1" - } - ], - "memoryGB":1, - "memoryMB":1024, - "partitions":[ - - ], - "powerState":"started", - "snapshots":[ - - ], - "storageGB":17 - }, - "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", - "id":"test-server", - "ipaddress":"10.120.45.23", - "isTemplate":false, - "links":[ - { - "href":"/v2/servers/wfad/test-server", - "id":"test-server", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"group" - }, - { - "href":"/v2/accounts/wfad", - "id":"wfad", - "rel":"account" - }, - { - "href":"/v2/billing/wfad/serverPricing/test-server", - "rel":"billing" - }, - { - "href":"/v2/servers/wfad/test-server/publicIPAddresses", - "rel":"publicIPAddresses", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/credentials", - "rel":"credentials" - }, - { - "href":"/v2/servers/wfad/test-server/statistics", - "rel":"statistics" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/capabilities", - "rel":"capabilities" - }, - { - "href":"/v2/servers/wfad/test-server/alertPolicies", - "rel":"alertPolicyMappings", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", - "rel":"antiAffinityPolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", - "rel":"cpuAutoscalePolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - } - ], - "locationId":"UC1", - "name":"test-server", - "os":"ubuntu14_64Bit", - "osType":"Ubuntu 14 64-bit", - "status":"active", - "storageType":"standard", - "type":"standard" - } - ] -''' - -__version__ = '${version}' - -import json -import os -import traceback -from distutils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcModifyServer: - clc = clc_sdk - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion( - requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - self._set_clc_credentials_from_env() - - p = self.module.params - cpu = p.get('cpu') - memory = p.get('memory') - state = p.get('state') - if state == 'absent' and (cpu or memory): - return self.module.fail_json( - msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments') - - server_ids = p['server_ids'] - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of instances to modify: %s' % - server_ids) - - (changed, server_dict_array, changed_server_ids) = self._modify_servers( - server_ids=server_ids) - - self.module.exit_json( - changed=changed, - server_ids=changed_server_ids, - servers=server_dict_array) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - state=dict(default='present', choices=['present', 'absent']), - cpu=dict(), - memory=dict(), - anti_affinity_policy_id=dict(), - anti_affinity_policy_name=dict(), - alert_policy_id=dict(), - alert_policy_name=dict(), - wait=dict(type='bool', default=True) - ) - mutually_exclusive = [ - ['anti_affinity_policy_id', 'anti_affinity_policy_name'], - ['alert_policy_id', 'alert_policy_name'] - ] - return {"argument_spec": argument_spec, - "mutually_exclusive": mutually_exclusive} - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: The list of server ids - :param message: the error message to throw in case of any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - return self.module.fail_json(msg=message + ': %s' % ex.message) - - def _modify_servers(self, server_ids): - """ - modify the servers configuration on the provided list - :param server_ids: list of servers to modify - :return: a list of dictionaries with server information about the servers that were modified - """ - p = self.module.params - state = p.get('state') - server_params = { - 'cpu': p.get('cpu'), - 'memory': p.get('memory'), - 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), - 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'), - 'alert_policy_id': p.get('alert_policy_id'), - 'alert_policy_name': p.get('alert_policy_name'), - } - changed = False - server_changed = False - aa_changed = False - ap_changed = False - server_dict_array = [] - result_server_ids = [] - request_list = [] - changed_servers = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return self.module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - for server in servers: - if state == 'present': - server_changed, server_result = self._ensure_server_config( - server, server_params) - if server_result: - request_list.append(server_result) - aa_changed = self._ensure_aa_policy_present( - server, - server_params) - ap_changed = self._ensure_alert_policy_present( - server, - server_params) - elif state == 'absent': - aa_changed = self._ensure_aa_policy_absent( - server, - server_params) - ap_changed = self._ensure_alert_policy_absent( - server, - server_params) - if server_changed or aa_changed or ap_changed: - changed_servers.append(server) - changed = True - - self._wait_for_requests(self.module, request_list) - self._refresh_servers(self.module, changed_servers) - - for server in changed_servers: - server_dict_array.append(server.data) - result_server_ids.append(server.id) - - return changed, server_dict_array, result_server_ids - - def _ensure_server_config( - self, server, server_params): - """ - ensures the server is updated with the provided cpu and memory - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - cpu = server_params.get('cpu') - memory = server_params.get('memory') - changed = False - result = None - - if not cpu: - cpu = server.cpu - if not memory: - memory = server.memory - if memory != server.memory or cpu != server.cpu: - if not self.module.check_mode: - result = self._modify_clc_server( - self.clc, - self.module, - server.id, - cpu, - memory) - changed = True - return changed, result - - @staticmethod - def _modify_clc_server(clc, module, server_id, cpu, memory): - """ - Modify the memory or CPU of a clc server. - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param server_id: id of the server to modify - :param cpu: the new cpu value - :param memory: the new memory value - :return: the result of CLC API call - """ - result = None - acct_alias = clc.v2.Account.GetAlias() - try: - # Update the server configuration - job_obj = clc.v2.API.Call('PATCH', - 'servers/%s/%s' % (acct_alias, - server_id), - json.dumps([{"op": "set", - "member": "memory", - "value": memory}, - {"op": "set", - "member": "cpu", - "value": cpu}])) - result = clc.v2.Requests(job_obj) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to update the server configuration for server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _wait_for_requests(module, request_list): - """ - Block until server provisioning requests are completed. - :param module: the AnsibleModule object - :param request_list: a list of clc-sdk.Request instances - :return: none - """ - wait = module.params.get('wait') - if wait: - # Requests.WaitUntilComplete() returns the count of failed requests - failed_requests_count = sum( - [request.WaitUntilComplete() for request in request_list]) - - if failed_requests_count > 0: - module.fail_json( - msg='Unable to process modify server request') - - @staticmethod - def _refresh_servers(module, servers): - """ - Loop through a list of servers and refresh them. - :param module: the AnsibleModule object - :param servers: list of clc-sdk.Server instances to refresh - :return: none - """ - for server in servers: - try: - server.Refresh() - except CLCException as ex: - module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( - server.id, ex.message - )) - - def _ensure_aa_policy_present( - self, server, server_params): - """ - ensures the server is updated with the provided anti affinity policy - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - - aa_policy_id = server_params.get('anti_affinity_policy_id') - aa_policy_name = server_params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - aa_policy_id = self._get_aa_policy_id_by_name( - self.clc, - self.module, - acct_alias, - aa_policy_name) - current_aa_policy_id = self._get_aa_policy_id_of_server( - self.clc, - self.module, - acct_alias, - server.id) - - if aa_policy_id and aa_policy_id != current_aa_policy_id: - self._modify_aa_policy( - self.clc, - self.module, - acct_alias, - server.id, - aa_policy_id) - changed = True - return changed - - def _ensure_aa_policy_absent( - self, server, server_params): - """ - ensures the provided anti affinity policy is removed from the server - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - aa_policy_id = server_params.get('anti_affinity_policy_id') - aa_policy_name = server_params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - aa_policy_id = self._get_aa_policy_id_by_name( - self.clc, - self.module, - acct_alias, - aa_policy_name) - current_aa_policy_id = self._get_aa_policy_id_of_server( - self.clc, - self.module, - acct_alias, - server.id) - - if aa_policy_id and aa_policy_id == current_aa_policy_id: - self._delete_aa_policy( - self.clc, - self.module, - acct_alias, - server.id) - changed = True - return changed - - @staticmethod - def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id): - """ - modifies the anti affinity policy of the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param aa_policy_id: the anti affinity policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('PUT', - 'servers/%s/%s/antiAffinityPolicy' % ( - acct_alias, - server_id), - json.dumps({"id": aa_policy_id})) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _delete_aa_policy(clc, module, acct_alias, server_id): - """ - Delete the anti affinity policy of the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('DELETE', - 'servers/%s/%s/antiAffinityPolicy' % ( - acct_alias, - server_id), - json.dumps({})) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name): - """ - retrieves the anti affinity policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param aa_policy_name: the anti affinity policy name - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - aa_policies = clc.v2.API.Call(method='GET', - url='antiAffinityPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json( - msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format( - alias, str(ex.response_text))) - for aa_policy in aa_policies.get('items'): - if aa_policy.get('name') == aa_policy_name: - if not aa_policy_id: - aa_policy_id = aa_policy.get('id') - else: - return module.fail_json( - msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) - if not aa_policy_id: - module.fail_json( - msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) - return aa_policy_id - - @staticmethod - def _get_aa_policy_id_of_server(clc, module, alias, server_id): - """ - retrieves the anti affinity policy id of the server based on the CLC server id - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param server_id: the CLC server id - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - result = clc.v2.API.Call( - method='GET', url='servers/%s/%s/antiAffinityPolicy' % - (alias, server_id)) - aa_policy_id = result.get('id') - except APIFailedResponse as ex: - if ex.response_status_code != 404: - module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format( - server_id, str(ex.response_text))) - return aa_policy_id - - def _ensure_alert_policy_present( - self, server, server_params): - """ - ensures the server is updated with the provided alert policy - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - alert_policy_id = server_params.get('alert_policy_id') - alert_policy_name = server_params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id_by_name( - self.clc, - self.module, - acct_alias, - alert_policy_name) - if alert_policy_id and not self._alert_policy_exists( - server, alert_policy_id): - self._add_alert_policy_to_server( - self.clc, - self.module, - acct_alias, - server.id, - alert_policy_id) - changed = True - return changed - - def _ensure_alert_policy_absent( - self, server, server_params): - """ - ensures the alert policy is removed from the server - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - - acct_alias = self.clc.v2.Account.GetAlias() - alert_policy_id = server_params.get('alert_policy_id') - alert_policy_name = server_params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id_by_name( - self.clc, - self.module, - acct_alias, - alert_policy_name) - - if alert_policy_id and self._alert_policy_exists( - server, alert_policy_id): - self._remove_alert_policy_to_server( - self.clc, - self.module, - acct_alias, - server.id, - alert_policy_id) - changed = True - return changed - - @staticmethod - def _add_alert_policy_to_server( - clc, module, acct_alias, server_id, alert_policy_id): - """ - add the alert policy to CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param alert_policy_id: the alert policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('POST', - 'servers/%s/%s/alertPolicies' % ( - acct_alias, - server_id), - json.dumps({"id": alert_policy_id})) - except APIFailedResponse as ex: - module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _remove_alert_policy_to_server( - clc, module, acct_alias, server_id, alert_policy_id): - """ - remove the alert policy to the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param alert_policy_id: the alert policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('DELETE', - 'servers/%s/%s/alertPolicies/%s' - % (acct_alias, server_id, alert_policy_id)) - except APIFailedResponse as ex: - module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): - """ - retrieves the alert policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param alert_policy_name: the alert policy name - :return: alert_policy_id: The alert policy id - """ - alert_policy_id = None - try: - alert_policies = clc.v2.API.Call(method='GET', - url='alertPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format( - alias, str(ex.response_text))) - for alert_policy in alert_policies.get('items'): - if alert_policy.get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = alert_policy.get('id') - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _alert_policy_exists(server, alert_policy_id): - """ - Checks if the alert policy exists for the server - :param server: the clc server object - :param alert_policy_id: the alert policy - :return: True: if the given alert policy id associated to the server, False otherwise - """ - result = False - alert_policies = server.alertPolicies - if alert_policies: - for alert_policy in alert_policies: - if alert_policy.get('id') == alert_policy_id: - result = True - return result - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - - argument_dict = ClcModifyServer._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_modify_server = ClcModifyServer(module) - clc_modify_server.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_publicip.py b/plugins/modules/cloud/centurylink/clc_publicip.py deleted file mode 100644 index 1cdb4aa8db..0000000000 --- a/plugins/modules/cloud/centurylink/clc_publicip.py +++ /dev/null @@ -1,360 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_publicip -short_description: Add and Delete public ips on servers in CenturyLink Cloud. -description: - - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud. -options: - protocol: - description: - - The protocol that the public IP will listen for. - type: str - default: TCP - choices: ['TCP', 'UDP', 'ICMP'] - ports: - description: - - A list of ports to expose. This is required when state is 'present' - type: list - elements: int - server_ids: - description: - - A list of servers to create public ips on. - type: list - required: True - elements: str - state: - description: - - Determine whether to create or delete public IPs. If present module will not create a second public ip if one - already exists. - type: str - default: present - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: bool - default: 'yes' -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Add Public IP to Server - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create Public IP For Servers - community.general.clc_publicip: - protocol: TCP - ports: - - 80 - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - state: present - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc - -- name: Delete Public IP from Server - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create Public IP For Servers - community.general.clc_publicip: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - state: absent - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -''' - -__version__ = '${version}' - -import os -import traceback -from distutils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcPublicIp(object): - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - self._set_clc_credentials_from_env() - params = self.module.params - server_ids = params['server_ids'] - ports = params['ports'] - protocol = params['protocol'] - state = params['state'] - - if state == 'present': - changed, changed_server_ids, requests = self.ensure_public_ip_present( - server_ids=server_ids, protocol=protocol, ports=ports) - elif state == 'absent': - changed, changed_server_ids, requests = self.ensure_public_ip_absent( - server_ids=server_ids) - else: - return self.module.fail_json(msg="Unknown State: " + state) - self._wait_for_requests_to_complete(requests) - return self.module.exit_json(changed=changed, - server_ids=changed_server_ids) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']), - ports=dict(type='list', elements='int'), - wait=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), - ) - return argument_spec - - def ensure_public_ip_present(self, server_ids, protocol, ports): - """ - Ensures the given server ids having the public ip available - :param server_ids: the list of server ids - :param protocol: the ip protocol - :param ports: the list of ports to expose - :return: (changed, changed_server_ids, results) - changed: A flag indicating if there is any change - changed_server_ids : the list of server ids that are changed - results: The result list from clc public ip call - """ - changed = False - results = [] - changed_server_ids = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.PublicIPs().public_ips) == 0] - ports_to_expose = [{'protocol': protocol, 'port': port} - for port in ports] - for server in servers_to_change: - if not self.module.check_mode: - result = self._add_publicip_to_server(server, ports_to_expose) - results.append(result) - changed_server_ids.append(server.id) - changed = True - return changed, changed_server_ids, results - - def _add_publicip_to_server(self, server, ports_to_expose): - result = None - try: - result = server.PublicIPs().Add(ports_to_expose) - except CLCException as ex: - self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_public_ip_absent(self, server_ids): - """ - Ensures the given server ids having the public ip removed if there is any - :param server_ids: the list of server ids - :return: (changed, changed_server_ids, results) - changed: A flag indicating if there is any change - changed_server_ids : the list of server ids that are changed - results: The result list from clc public ip call - """ - changed = False - results = [] - changed_server_ids = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.PublicIPs().public_ips) > 0] - for server in servers_to_change: - if not self.module.check_mode: - result = self._remove_publicip_from_server(server) - results.append(result) - changed_server_ids.append(server.id) - changed = True - return changed, changed_server_ids, results - - def _remove_publicip_from_server(self, server): - result = None - try: - for ip_address in server.PublicIPs().public_ips: - result = ip_address.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process public ip request') - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_servers_from_clc(self, server_ids, message): - """ - Gets list of servers form CLC api - """ - try: - return self.clc.v2.Servers(server_ids).servers - except CLCException as exception: - self.module.fail_json(msg=message + ': %s' % exception) - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcPublicIp._define_module_argument_spec(), - supports_check_mode=True - ) - clc_public_ip = ClcPublicIp(module) - clc_public_ip.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_server.py b/plugins/modules/cloud/centurylink/clc_server.py deleted file mode 100644 index 95481f1a52..0000000000 --- a/plugins/modules/cloud/centurylink/clc_server.py +++ /dev/null @@ -1,1563 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_server -short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud. -description: - - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud. -options: - additional_disks: - description: - - The list of additional disks for the server - type: list - elements: dict - default: [] - add_public_ip: - description: - - Whether to add a public ip to the server - type: bool - default: 'no' - alias: - description: - - The account alias to provision the servers under. - type: str - anti_affinity_policy_id: - description: - - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'. - type: str - anti_affinity_policy_name: - description: - - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'. - type: str - alert_policy_id: - description: - - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'. - type: str - alert_policy_name: - description: - - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'. - type: str - count: - description: - - The number of servers to build (mutually exclusive with exact_count) - default: 1 - type: int - count_group: - description: - - Required when exact_count is specified. The Server Group use to determine how many servers to deploy. - type: str - cpu: - description: - - How many CPUs to provision on the server - default: 1 - type: int - cpu_autoscale_policy_id: - description: - - The autoscale policy to assign to the server. - type: str - custom_fields: - description: - - The list of custom fields to set on the server. - type: list - default: [] - elements: dict - description: - description: - - The description to set for the server. - type: str - exact_count: - description: - - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group, - creating and deleting them to reach that count. Requires count_group to be set. - type: int - group: - description: - - The Server Group to create servers under. - type: str - default: 'Default Group' - ip_address: - description: - - The IP Address for the server. One is assigned if not provided. - type: str - location: - description: - - The Datacenter to create servers in. - type: str - managed_os: - description: - - Whether to create the server as 'Managed' or not. - type: bool - default: 'no' - required: False - memory: - description: - - Memory in GB. - type: int - default: 1 - name: - description: - - A 1 to 6 character identifier to use for the server. This is required when state is 'present' - type: str - network_id: - description: - - The network UUID on which to create servers. - type: str - packages: - description: - - The list of blue print packages to run on the server after its created. - type: list - elements: dict - default: [] - password: - description: - - Password for the administrator / root user - type: str - primary_dns: - description: - - Primary DNS used by the server. - type: str - public_ip_protocol: - description: - - The protocol to use for the public ip if add_public_ip is set to True. - type: str - default: 'TCP' - choices: ['TCP', 'UDP', 'ICMP'] - public_ip_ports: - description: - - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True. - type: list - elements: dict - default: [] - secondary_dns: - description: - - Secondary DNS used by the server. - type: str - server_ids: - description: - - Required for started, stopped, and absent states. - A list of server Ids to insure are started, stopped, or absent. - type: list - default: [] - elements: str - source_server_password: - description: - - The password for the source server if a clone is specified. - type: str - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - choices: ['present', 'absent', 'started', 'stopped'] - storage_type: - description: - - The type of storage to attach to the server. - type: str - default: 'standard' - choices: ['standard', 'hyperscale'] - template: - description: - - The template to use for server creation. Will search for a template if a partial string is provided. - This is required when state is 'present' - type: str - ttl: - description: - - The time to live for the server in seconds. The server will be deleted when this time expires. - type: str - type: - description: - - The type of server to create. - type: str - default: 'standard' - choices: ['standard', 'hyperscale', 'bareMetal'] - configuration_id: - description: - - Only required for bare metal servers. - Specifies the identifier for the specific configuration type of bare metal server to deploy. - type: str - os_type: - description: - - Only required for bare metal servers. - Specifies the OS to provision with the bare metal server. - type: str - choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: bool - default: 'yes' -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Provision a single Ubuntu Server - community.general.clc_server: - name: test - template: ubuntu-14-64 - count: 1 - group: Default Group - state: present - -- name: Ensure 'Default Group' has exactly 5 servers - community.general.clc_server: - name: test - template: ubuntu-14-64 - exact_count: 5 - count_group: Default Group - group: Default Group - -- name: Stop a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: stopped - -- name: Start a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: started - -- name: Delete a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: absent -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are created - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -partially_created_server_ids: - description: The list of server ids that are partially created - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -servers: - description: The list of server objects returned from CLC - returned: success - type: list - sample: - [ - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":1438196820, - "modifiedBy":"service.wfad", - "modifiedDate":1438196820 - }, - "description":"test-server", - "details":{ - "alertPolicies":[ - - ], - "cpu":1, - "customFields":[ - - ], - "diskCount":3, - "disks":[ - { - "id":"0:0", - "partitionPaths":[ - - ], - "sizeGB":1 - }, - { - "id":"0:1", - "partitionPaths":[ - - ], - "sizeGB":2 - }, - { - "id":"0:2", - "partitionPaths":[ - - ], - "sizeGB":14 - } - ], - "hostName":"", - "inMaintenanceMode":false, - "ipAddresses":[ - { - "internal":"10.1.1.1" - } - ], - "memoryGB":1, - "memoryMB":1024, - "partitions":[ - - ], - "powerState":"started", - "snapshots":[ - - ], - "storageGB":17 - }, - "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", - "id":"test-server", - "ipaddress":"10.120.45.23", - "isTemplate":false, - "links":[ - { - "href":"/v2/servers/wfad/test-server", - "id":"test-server", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"group" - }, - { - "href":"/v2/accounts/wfad", - "id":"wfad", - "rel":"account" - }, - { - "href":"/v2/billing/wfad/serverPricing/test-server", - "rel":"billing" - }, - { - "href":"/v2/servers/wfad/test-server/publicIPAddresses", - "rel":"publicIPAddresses", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/credentials", - "rel":"credentials" - }, - { - "href":"/v2/servers/wfad/test-server/statistics", - "rel":"statistics" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/capabilities", - "rel":"capabilities" - }, - { - "href":"/v2/servers/wfad/test-server/alertPolicies", - "rel":"alertPolicyMappings", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", - "rel":"antiAffinityPolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", - "rel":"cpuAutoscalePolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - } - ], - "locationId":"UC1", - "name":"test-server", - "os":"ubuntu14_64Bit", - "osType":"Ubuntu 14 64-bit", - "status":"active", - "storageType":"standard", - "type":"standard" - } - ] -''' - -__version__ = '${version}' - -import json -import os -import time -import traceback -from distutils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcServer: - clc = clc_sdk - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.group_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion( - requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - changed = False - new_server_ids = [] - server_dict_array = [] - - self._set_clc_credentials_from_env() - self.module.params = self._validate_module_params( - self.clc, - self.module) - p = self.module.params - state = p.get('state') - - # - # Handle each state - # - partial_servers_ids = [] - if state == 'absent': - server_ids = p['server_ids'] - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of instances to delete: %s' % - server_ids) - - (changed, - server_dict_array, - new_server_ids) = self._delete_servers(module=self.module, - clc=self.clc, - server_ids=server_ids) - - elif state in ('started', 'stopped'): - server_ids = p.get('server_ids') - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of servers to run: %s' % - server_ids) - - (changed, - server_dict_array, - new_server_ids) = self._start_stop_servers(self.module, - self.clc, - server_ids) - - elif state == 'present': - # Changed is always set to true when provisioning new instances - if not p.get('template') and p.get('type') != 'bareMetal': - return self.module.fail_json( - msg='template parameter is required for new instance') - - if p.get('exact_count') is None: - (server_dict_array, - new_server_ids, - partial_servers_ids, - changed) = self._create_servers(self.module, - self.clc) - else: - (server_dict_array, - new_server_ids, - partial_servers_ids, - changed) = self._enforce_count(self.module, - self.clc) - - self.module.exit_json( - changed=changed, - server_ids=new_server_ids, - partially_created_server_ids=partial_servers_ids, - servers=server_dict_array) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(), - template=dict(), - group=dict(default='Default Group'), - network_id=dict(), - location=dict(default=None), - cpu=dict(default=1, type='int'), - memory=dict(default=1, type='int'), - alias=dict(default=None), - password=dict(default=None, no_log=True), - ip_address=dict(default=None), - storage_type=dict( - default='standard', - choices=[ - 'standard', - 'hyperscale']), - type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']), - primary_dns=dict(default=None), - secondary_dns=dict(default=None), - additional_disks=dict(type='list', default=[], elements='dict'), - custom_fields=dict(type='list', default=[], elements='dict'), - ttl=dict(default=None), - managed_os=dict(type='bool', default=False), - description=dict(default=None), - source_server_password=dict(default=None, no_log=True), - cpu_autoscale_policy_id=dict(default=None), - anti_affinity_policy_id=dict(default=None), - anti_affinity_policy_name=dict(default=None), - alert_policy_id=dict(default=None), - alert_policy_name=dict(default=None), - packages=dict(type='list', default=[], elements='dict'), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'started', - 'stopped']), - count=dict(type='int', default=1), - exact_count=dict(type='int', default=None), - count_group=dict(), - server_ids=dict(type='list', default=[], elements='str'), - add_public_ip=dict(type='bool', default=False), - public_ip_protocol=dict( - default='TCP', - choices=[ - 'TCP', - 'UDP', - 'ICMP']), - public_ip_ports=dict(type='list', default=[], elements='dict'), - configuration_id=dict(default=None), - os_type=dict(default=None, - choices=[ - 'redHat6_64Bit', - 'centOS6_64Bit', - 'windows2012R2Standard_64Bit', - 'ubuntu14_64Bit' - ]), - wait=dict(type='bool', default=True)) - - mutually_exclusive = [ - ['exact_count', 'count'], - ['exact_count', 'state'], - ['anti_affinity_policy_id', 'anti_affinity_policy_name'], - ['alert_policy_id', 'alert_policy_name'], - ] - return {"argument_spec": argument_spec, - "mutually_exclusive": mutually_exclusive} - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _validate_module_params(clc, module): - """ - Validate the module params, and lookup default values. - :param clc: clc-sdk instance to use - :param module: module to validate - :return: dictionary of validated params - """ - params = module.params - datacenter = ClcServer._find_datacenter(clc, module) - - ClcServer._validate_types(module) - ClcServer._validate_name(module) - - params['alias'] = ClcServer._find_alias(clc, module) - params['cpu'] = ClcServer._find_cpu(clc, module) - params['memory'] = ClcServer._find_memory(clc, module) - params['description'] = ClcServer._find_description(module) - params['ttl'] = ClcServer._find_ttl(clc, module) - params['template'] = ClcServer._find_template_id(module, datacenter) - params['group'] = ClcServer._find_group(module, datacenter).id - params['network_id'] = ClcServer._find_network_id(module, datacenter) - params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id( - clc, - module) - params['alert_policy_id'] = ClcServer._find_alert_policy_id( - clc, - module) - - return params - - @staticmethod - def _find_datacenter(clc, module): - """ - Find the datacenter by calling the CLC API. - :param clc: clc-sdk instance to use - :param module: module to validate - :return: clc-sdk.Datacenter instance - """ - location = module.params.get('location') - try: - if not location: - account = clc.v2.Account() - location = account.data.get('primaryDataCenter') - data_center = clc.v2.Datacenter(location) - return data_center - except CLCException: - module.fail_json(msg="Unable to find location: {0}".format(location)) - - @staticmethod - def _find_alias(clc, module): - """ - Find or Validate the Account Alias by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: clc-sdk.Account instance - """ - alias = module.params.get('alias') - if not alias: - try: - alias = clc.v2.Account.GetAlias() - except CLCException as ex: - module.fail_json(msg='Unable to find account alias. {0}'.format( - ex.message - )) - return alias - - @staticmethod - def _find_cpu(clc, module): - """ - Find or validate the CPU value by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: Int value for CPU - """ - cpu = module.params.get('cpu') - group_id = module.params.get('group_id') - alias = module.params.get('alias') - state = module.params.get('state') - - if not cpu and state == 'present': - group = clc.v2.Group(id=group_id, - alias=alias) - if group.Defaults("cpu"): - cpu = group.Defaults("cpu") - else: - module.fail_json( - msg=str("Can\'t determine a default cpu value. Please provide a value for cpu.")) - return cpu - - @staticmethod - def _find_memory(clc, module): - """ - Find or validate the Memory value by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: Int value for Memory - """ - memory = module.params.get('memory') - group_id = module.params.get('group_id') - alias = module.params.get('alias') - state = module.params.get('state') - - if not memory and state == 'present': - group = clc.v2.Group(id=group_id, - alias=alias) - if group.Defaults("memory"): - memory = group.Defaults("memory") - else: - module.fail_json(msg=str( - "Can\'t determine a default memory value. Please provide a value for memory.")) - return memory - - @staticmethod - def _find_description(module): - """ - Set the description module param to name if description is blank - :param module: the module to validate - :return: string description - """ - description = module.params.get('description') - if not description: - description = module.params.get('name') - return description - - @staticmethod - def _validate_types(module): - """ - Validate that type and storage_type are set appropriately, and fail if not - :param module: the module to validate - :return: none - """ - state = module.params.get('state') - server_type = module.params.get( - 'type').lower() if module.params.get('type') else None - storage_type = module.params.get( - 'storage_type').lower() if module.params.get('storage_type') else None - - if state == "present": - if server_type == "standard" and storage_type not in ( - "standard", "premium"): - module.fail_json( - msg=str("Standard VMs must have storage_type = 'standard' or 'premium'")) - - if server_type == "hyperscale" and storage_type != "hyperscale": - module.fail_json( - msg=str("Hyperscale VMs must have storage_type = 'hyperscale'")) - - @staticmethod - def _validate_name(module): - """ - Validate that name is the correct length if provided, fail if it's not - :param module: the module to validate - :return: none - """ - server_name = module.params.get('name') - state = module.params.get('state') - - if state == 'present' and ( - len(server_name) < 1 or len(server_name) > 6): - module.fail_json(msg=str( - "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6")) - - @staticmethod - def _find_ttl(clc, module): - """ - Validate that TTL is > 3600 if set, and fail if not - :param clc: clc-sdk instance to use - :param module: module to validate - :return: validated ttl - """ - ttl = module.params.get('ttl') - - if ttl: - if ttl <= 3600: - return module.fail_json(msg=str("Ttl cannot be <= 3600")) - else: - ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl) - return ttl - - @staticmethod - def _find_template_id(module, datacenter): - """ - Find the template id by calling the CLC API. - :param module: the module to validate - :param datacenter: the datacenter to search for the template - :return: a valid clc template id - """ - lookup_template = module.params.get('template') - state = module.params.get('state') - type = module.params.get('type') - result = None - - if state == 'present' and type != 'bareMetal': - try: - result = datacenter.Templates().Search(lookup_template)[0].id - except CLCException: - module.fail_json( - msg=str( - "Unable to find a template: " + - lookup_template + - " in location: " + - datacenter.id)) - return result - - @staticmethod - def _find_network_id(module, datacenter): - """ - Validate the provided network id or return a default. - :param module: the module to validate - :param datacenter: the datacenter to search for a network id - :return: a valid network id - """ - network_id = module.params.get('network_id') - - if not network_id: - try: - network_id = datacenter.Networks().networks[0].id - # -- added for clc-sdk 2.23 compatibility - # datacenter_networks = clc_sdk.v2.Networks( - # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks']) - # network_id = datacenter_networks.networks[0].id - # -- end - except CLCException: - module.fail_json( - msg=str( - "Unable to find a network in location: " + - datacenter.id)) - - return network_id - - @staticmethod - def _find_aa_policy_id(clc, module): - """ - Validate if the anti affinity policy exist for the given name and throw error if not - :param clc: the clc-sdk instance - :param module: the module to validate - :return: aa_policy_id: the anti affinity policy id of the given name. - """ - aa_policy_id = module.params.get('anti_affinity_policy_id') - aa_policy_name = module.params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - alias = module.params.get('alias') - aa_policy_id = ClcServer._get_anti_affinity_policy_id( - clc, - module, - alias, - aa_policy_name) - if not aa_policy_id: - module.fail_json( - msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) - return aa_policy_id - - @staticmethod - def _find_alert_policy_id(clc, module): - """ - Validate if the alert policy exist for the given name and throw error if not - :param clc: the clc-sdk instance - :param module: the module to validate - :return: alert_policy_id: the alert policy id of the given name. - """ - alert_policy_id = module.params.get('alert_policy_id') - alert_policy_name = module.params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alias = module.params.get('alias') - alert_policy_id = ClcServer._get_alert_policy_id_by_name( - clc=clc, - module=module, - alias=alias, - alert_policy_name=alert_policy_name - ) - if not alert_policy_id: - module.fail_json( - msg='No alert policy exist with name : %s' % alert_policy_name) - return alert_policy_id - - def _create_servers(self, module, clc, override_count=None): - """ - Create New Servers in CLC cloud - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :return: a list of dictionaries with server information about the servers that were created - """ - p = module.params - request_list = [] - servers = [] - server_dict_array = [] - created_server_ids = [] - partial_created_servers_ids = [] - - add_public_ip = p.get('add_public_ip') - public_ip_protocol = p.get('public_ip_protocol') - public_ip_ports = p.get('public_ip_ports') - - params = { - 'name': p.get('name'), - 'template': p.get('template'), - 'group_id': p.get('group'), - 'network_id': p.get('network_id'), - 'cpu': p.get('cpu'), - 'memory': p.get('memory'), - 'alias': p.get('alias'), - 'password': p.get('password'), - 'ip_address': p.get('ip_address'), - 'storage_type': p.get('storage_type'), - 'type': p.get('type'), - 'primary_dns': p.get('primary_dns'), - 'secondary_dns': p.get('secondary_dns'), - 'additional_disks': p.get('additional_disks'), - 'custom_fields': p.get('custom_fields'), - 'ttl': p.get('ttl'), - 'managed_os': p.get('managed_os'), - 'description': p.get('description'), - 'source_server_password': p.get('source_server_password'), - 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'), - 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), - 'packages': p.get('packages'), - 'configuration_id': p.get('configuration_id'), - 'os_type': p.get('os_type') - } - - count = override_count if override_count else p.get('count') - - changed = False if count == 0 else True - - if not changed: - return server_dict_array, created_server_ids, partial_created_servers_ids, changed - for i in range(0, count): - if not module.check_mode: - req = self._create_clc_server(clc=clc, - module=module, - server_params=params) - server = req.requests[0].Server() - request_list.append(req) - servers.append(server) - - self._wait_for_requests(module, request_list) - self._refresh_servers(module, servers) - - ip_failed_servers = self._add_public_ip_to_servers( - module=module, - should_add_public_ip=add_public_ip, - servers=servers, - public_ip_protocol=public_ip_protocol, - public_ip_ports=public_ip_ports) - ap_failed_servers = self._add_alert_policy_to_servers(clc=clc, - module=module, - servers=servers) - - for server in servers: - if server in ip_failed_servers or server in ap_failed_servers: - partial_created_servers_ids.append(server.id) - else: - # reload server details - server = clc.v2.Server(server.id) - server.data['ipaddress'] = server.details[ - 'ipAddresses'][0]['internal'] - - if add_public_ip and len(server.PublicIPs().public_ips) > 0: - server.data['publicip'] = str( - server.PublicIPs().public_ips[0]) - created_server_ids.append(server.id) - server_dict_array.append(server.data) - - return server_dict_array, created_server_ids, partial_created_servers_ids, changed - - def _enforce_count(self, module, clc): - """ - Enforce that there is the right number of servers in the provided group. - Starts or stops servers as necessary. - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :return: a list of dictionaries with server information about the servers that were created or deleted - """ - p = module.params - changed = False - count_group = p.get('count_group') - datacenter = ClcServer._find_datacenter(clc, module) - exact_count = p.get('exact_count') - server_dict_array = [] - partial_servers_ids = [] - changed_server_ids = [] - - # fail here if the exact count was specified without filtering - # on a group, as this may lead to a undesired removal of instances - if exact_count and count_group is None: - return module.fail_json( - msg="you must use the 'count_group' option with exact_count") - - servers, running_servers = ClcServer._find_running_servers_by_group( - module, datacenter, count_group) - - if len(running_servers) == exact_count: - changed = False - - elif len(running_servers) < exact_count: - to_create = exact_count - len(running_servers) - server_dict_array, changed_server_ids, partial_servers_ids, changed \ - = self._create_servers(module, clc, override_count=to_create) - - for server in server_dict_array: - running_servers.append(server) - - elif len(running_servers) > exact_count: - to_remove = len(running_servers) - exact_count - all_server_ids = sorted([x.id for x in running_servers]) - remove_ids = all_server_ids[0:to_remove] - - (changed, server_dict_array, changed_server_ids) \ - = ClcServer._delete_servers(module, clc, remove_ids) - - return server_dict_array, changed_server_ids, partial_servers_ids, changed - - @staticmethod - def _wait_for_requests(module, request_list): - """ - Block until server provisioning requests are completed. - :param module: the AnsibleModule object - :param request_list: a list of clc-sdk.Request instances - :return: none - """ - wait = module.params.get('wait') - if wait: - # Requests.WaitUntilComplete() returns the count of failed requests - failed_requests_count = sum( - [request.WaitUntilComplete() for request in request_list]) - - if failed_requests_count > 0: - module.fail_json( - msg='Unable to process server request') - - @staticmethod - def _refresh_servers(module, servers): - """ - Loop through a list of servers and refresh them. - :param module: the AnsibleModule object - :param servers: list of clc-sdk.Server instances to refresh - :return: none - """ - for server in servers: - try: - server.Refresh() - except CLCException as ex: - module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( - server.id, ex.message - )) - - @staticmethod - def _add_public_ip_to_servers( - module, - should_add_public_ip, - servers, - public_ip_protocol, - public_ip_ports): - """ - Create a public IP for servers - :param module: the AnsibleModule object - :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False - :param servers: List of servers to add public ips to - :param public_ip_protocol: a protocol to allow for the public ips - :param public_ip_ports: list of ports to allow for the public ips - :return: none - """ - failed_servers = [] - if not should_add_public_ip: - return failed_servers - - ports_lst = [] - request_list = [] - server = None - - for port in public_ip_ports: - ports_lst.append( - {'protocol': public_ip_protocol, 'port': port}) - try: - if not module.check_mode: - for server in servers: - request = server.PublicIPs().Add(ports_lst) - request_list.append(request) - except APIFailedResponse: - failed_servers.append(server) - ClcServer._wait_for_requests(module, request_list) - return failed_servers - - @staticmethod - def _add_alert_policy_to_servers(clc, module, servers): - """ - Associate the alert policy to servers - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param servers: List of servers to add alert policy to - :return: failed_servers: the list of servers which failed while associating alert policy - """ - failed_servers = [] - p = module.params - alert_policy_id = p.get('alert_policy_id') - alias = p.get('alias') - - if alert_policy_id and not module.check_mode: - for server in servers: - try: - ClcServer._add_alert_policy_to_server( - clc=clc, - alias=alias, - server_id=server.id, - alert_policy_id=alert_policy_id) - except CLCException: - failed_servers.append(server) - return failed_servers - - @staticmethod - def _add_alert_policy_to_server( - clc, alias, server_id, alert_policy_id): - """ - Associate an alert policy to a clc server - :param clc: the clc-sdk instance to use - :param alias: the clc account alias - :param server_id: The clc server id - :param alert_policy_id: the alert policy id to be associated to the server - :return: none - """ - try: - clc.v2.API.Call( - method='POST', - url='servers/%s/%s/alertPolicies' % (alias, server_id), - payload=json.dumps( - { - 'id': alert_policy_id - })) - except APIFailedResponse as e: - raise CLCException( - 'Failed to associate alert policy to the server : {0} with Error {1}'.format( - server_id, str(e.response_text))) - - @staticmethod - def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): - """ - Returns the alert policy id for the given alert policy name - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the clc account alias - :param alert_policy_name: the name of the alert policy - :return: alert_policy_id: the alert policy id - """ - alert_policy_id = None - policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias) - if not policies: - return alert_policy_id - for policy in policies.get('items'): - if policy.get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = policy.get('id') - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _delete_servers(module, clc, server_ids): - """ - Delete the servers on the provided list - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :param server_ids: list of servers to delete - :return: a list of dictionaries with server information about the servers that were deleted - """ - terminated_server_ids = [] - server_dict_array = [] - request_list = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = clc.v2.Servers(server_ids).Servers() - for server in servers: - if not module.check_mode: - request_list.append(server.Delete()) - ClcServer._wait_for_requests(module, request_list) - - for server in servers: - terminated_server_ids.append(server.id) - - return True, server_dict_array, terminated_server_ids - - @staticmethod - def _start_stop_servers(module, clc, server_ids): - """ - Start or Stop the servers on the provided list - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :param server_ids: list of servers to start or stop - :return: a list of dictionaries with server information about the servers that were started or stopped - """ - p = module.params - state = p.get('state') - changed = False - changed_servers = [] - server_dict_array = [] - result_server_ids = [] - request_list = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = clc.v2.Servers(server_ids).Servers() - for server in servers: - if server.powerState != state: - changed_servers.append(server) - if not module.check_mode: - request_list.append( - ClcServer._change_server_power_state( - module, - server, - state)) - changed = True - - ClcServer._wait_for_requests(module, request_list) - ClcServer._refresh_servers(module, changed_servers) - - for server in set(changed_servers + servers): - try: - server.data['ipaddress'] = server.details[ - 'ipAddresses'][0]['internal'] - server.data['publicip'] = str( - server.PublicIPs().public_ips[0]) - except (KeyError, IndexError): - pass - - server_dict_array.append(server.data) - result_server_ids.append(server.id) - - return changed, server_dict_array, result_server_ids - - @staticmethod - def _change_server_power_state(module, server, state): - """ - Change the server powerState - :param module: the module to check for intended state - :param server: the server to start or stop - :param state: the intended powerState for the server - :return: the request object from clc-sdk call - """ - result = None - try: - if state == 'started': - result = server.PowerOn() - else: - # Try to shut down the server and fall back to power off when unable to shut down. - result = server.ShutDown() - if result and hasattr(result, 'requests') and result.requests[0]: - return result - else: - result = server.PowerOff() - except CLCException: - module.fail_json( - msg='Unable to change power state for server {0}'.format( - server.id)) - return result - - @staticmethod - def _find_running_servers_by_group(module, datacenter, count_group): - """ - Find a list of running servers in the provided group - :param module: the AnsibleModule object - :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group - :param count_group: the group to count the servers - :return: list of servers, and list of running servers - """ - group = ClcServer._find_group( - module=module, - datacenter=datacenter, - lookup_group=count_group) - - servers = group.Servers().Servers() - running_servers = [] - - for server in servers: - if server.status == 'active' and server.powerState == 'started': - running_servers.append(server) - - return servers, running_servers - - @staticmethod - def _find_group(module, datacenter, lookup_group=None): - """ - Find a server group in a datacenter by calling the CLC API - :param module: the AnsibleModule instance - :param datacenter: clc-sdk.Datacenter instance to search for the group - :param lookup_group: string name of the group to search for - :return: clc-sdk.Group instance - """ - if not lookup_group: - lookup_group = module.params.get('group') - try: - return datacenter.Groups().Get(lookup_group) - except CLCException: - pass - - # The search above only acts on the main - result = ClcServer._find_group_recursive( - module, - datacenter.Groups(), - lookup_group) - - if result is None: - module.fail_json( - msg=str( - "Unable to find group: " + - lookup_group + - " in location: " + - datacenter.id)) - - return result - - @staticmethod - def _find_group_recursive(module, group_list, lookup_group): - """ - Find a server group by recursively walking the tree - :param module: the AnsibleModule instance to use - :param group_list: a list of groups to search - :param lookup_group: the group to look for - :return: list of groups - """ - result = None - for group in group_list.groups: - subgroups = group.Subgroups() - try: - return subgroups.Get(lookup_group) - except CLCException: - result = ClcServer._find_group_recursive( - module, - subgroups, - lookup_group) - - if result is not None: - break - - return result - - @staticmethod - def _create_clc_server( - clc, - module, - server_params): - """ - Call the CLC Rest API to Create a Server - :param clc: the clc-python-sdk instance to use - :param module: the AnsibleModule instance to use - :param server_params: a dictionary of params to use to create the servers - :return: clc-sdk.Request object linked to the queued server request - """ - - try: - res = clc.v2.API.Call( - method='POST', - url='servers/%s' % - (server_params.get('alias')), - payload=json.dumps( - { - 'name': server_params.get('name'), - 'description': server_params.get('description'), - 'groupId': server_params.get('group_id'), - 'sourceServerId': server_params.get('template'), - 'isManagedOS': server_params.get('managed_os'), - 'primaryDNS': server_params.get('primary_dns'), - 'secondaryDNS': server_params.get('secondary_dns'), - 'networkId': server_params.get('network_id'), - 'ipAddress': server_params.get('ip_address'), - 'password': server_params.get('password'), - 'sourceServerPassword': server_params.get('source_server_password'), - 'cpu': server_params.get('cpu'), - 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'), - 'memoryGB': server_params.get('memory'), - 'type': server_params.get('type'), - 'storageType': server_params.get('storage_type'), - 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'), - 'customFields': server_params.get('custom_fields'), - 'additionalDisks': server_params.get('additional_disks'), - 'ttl': server_params.get('ttl'), - 'packages': server_params.get('packages'), - 'configurationId': server_params.get('configuration_id'), - 'osType': server_params.get('os_type')})) - - result = clc.v2.Requests(res) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to create the server: {0}. {1}'.format( - server_params.get('name'), - ex.response_text - )) - - # - # Patch the Request object so that it returns a valid server - - # Find the server's UUID from the API response - server_uuid = [obj['id'] - for obj in res['links'] if obj['rel'] == 'self'][0] - - # Change the request server method to a _find_server_by_uuid closure so - # that it will work - result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry( - clc, - module, - server_uuid, - server_params.get('alias')) - - return result - - @staticmethod - def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name): - """ - retrieves the anti affinity policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param aa_policy_name: the anti affinity policy name - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - aa_policies = clc.v2.API.Call(method='GET', - url='antiAffinityPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format( - alias, ex.response_text)) - for aa_policy in aa_policies.get('items'): - if aa_policy.get('name') == aa_policy_name: - if not aa_policy_id: - aa_policy_id = aa_policy.get('id') - else: - return module.fail_json( - msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) - return aa_policy_id - - # - # This is the function that gets patched to the Request.server object using a lamda closure - # - - @staticmethod - def _find_server_by_uuid_w_retry( - clc, module, svr_uuid, alias=None, retries=5, back_out=2): - """ - Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned. - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param svr_uuid: UUID of the server - :param retries: the number of retry attempts to make prior to fail. default is 5 - :param alias: the Account Alias to search - :return: a clc-sdk.Server instance - """ - if not alias: - alias = clc.v2.Account.GetAlias() - - # Wait and retry if the api returns a 404 - while True: - retries -= 1 - try: - server_obj = clc.v2.API.Call( - method='GET', url='servers/%s/%s?uuid=true' % - (alias, svr_uuid)) - server_id = server_obj['id'] - server = clc.v2.Server( - id=server_id, - alias=alias, - server_obj=server_obj) - return server - - except APIFailedResponse as e: - if e.response_status_code != 404: - return module.fail_json( - msg='A failure response was received from CLC API when ' - 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' % - (svr_uuid, e.response_status_code, e.message)) - if retries == 0: - return module.fail_json( - msg='Unable to reach the CLC API after 5 attempts') - time.sleep(back_out) - back_out *= 2 - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - argument_dict = ClcServer._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_server = ClcServer(module) - clc_server.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_server_snapshot.py b/plugins/modules/cloud/centurylink/clc_server_snapshot.py deleted file mode 100644 index 1f92def088..0000000000 --- a/plugins/modules/cloud/centurylink/clc_server_snapshot.py +++ /dev/null @@ -1,411 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_server_snapshot -short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud. -description: - - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud. -options: - server_ids: - description: - - The list of CLC server Ids. - type: list - required: True - elements: str - expiration_days: - description: - - The number of days to keep the server snapshot before it expires. - type: int - default: 7 - required: False - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - required: False - choices: ['present', 'absent', 'restore'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - default: True - required: False - type: str -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Create server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - expiration_days: 10 - wait: True - state: present - -- name: Restore server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - wait: True - state: restore - -- name: Delete server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - wait: True - state: absent -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -''' - -__version__ = '${version}' - -import os -import traceback -from distutils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcSnapshot: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion( - requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - server_ids = p['server_ids'] - expiration_days = p['expiration_days'] - state = p['state'] - request_list = [] - changed = False - changed_servers = [] - - self._set_clc_credentials_from_env() - if state == 'present': - changed, request_list, changed_servers = self.ensure_server_snapshot_present( - server_ids=server_ids, - expiration_days=expiration_days) - elif state == 'absent': - changed, request_list, changed_servers = self.ensure_server_snapshot_absent( - server_ids=server_ids) - elif state == 'restore': - changed, request_list, changed_servers = self.ensure_server_snapshot_restore( - server_ids=server_ids) - - self._wait_for_requests_to_complete(request_list) - return self.module.exit_json( - changed=changed, - server_ids=changed_servers) - - def ensure_server_snapshot_present(self, server_ids, expiration_days): - """ - Ensures the given set of server_ids have the snapshots created - :param server_ids: The list of server_ids to create the snapshot - :param expiration_days: The number of days to keep the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) == 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._create_server_snapshot(server, expiration_days) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _create_server_snapshot(self, server, expiration_days): - """ - Create the snapshot for the CLC server - :param server: the CLC server object - :param expiration_days: The number of days to keep the snapshot - :return: the create request object from CLC API Call - """ - result = None - try: - result = server.CreateSnapshot( - delete_existing=True, - expiration_days=expiration_days) - except CLCException as ex: - self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_server_snapshot_absent(self, server_ids): - """ - Ensures the given set of server_ids have the snapshots removed - :param server_ids: The list of server_ids to delete the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) > 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._delete_server_snapshot(server) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _delete_server_snapshot(self, server): - """ - Delete snapshot for the CLC server - :param server: the CLC server object - :return: the delete snapshot request object from CLC API - """ - result = None - try: - result = server.DeleteSnapshot() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_server_snapshot_restore(self, server_ids): - """ - Ensures the given set of server_ids have the snapshots restored - :param server_ids: The list of server_ids to delete the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) > 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._restore_server_snapshot(server) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _restore_server_snapshot(self, server): - """ - Restore snapshot for the CLC server - :param server: the CLC server object - :return: the restore snapshot request object from CLC API - """ - result = None - try: - result = server.RestoreSnapshot() - except CLCException as ex: - self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process server snapshot request') - - @staticmethod - def define_argument_spec(): - """ - This function defines the dictionary object required for - package module - :return: the package dictionary object - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - expiration_days=dict(default=7, type='int'), - wait=dict(default=True), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'restore']), - ) - return argument_spec - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: The list of server ids - :param message: The error message to throw in case of any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - return self.module.fail_json(msg=message + ': %s' % ex) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - Main function - :return: None - """ - module = AnsibleModule( - argument_spec=ClcSnapshot.define_argument_spec(), - supports_check_mode=True - ) - clc_snapshot = ClcSnapshot(module) - clc_snapshot.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/dimensiondata/dimensiondata_network.py b/plugins/modules/cloud/dimensiondata/dimensiondata_network.py deleted file mode 100644 index 64cc8b118a..0000000000 --- a/plugins/modules/cloud/dimensiondata/dimensiondata_network.py +++ /dev/null @@ -1,296 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Dimension Data -# Authors: -# - Aimon Bustardo -# - Bert Diwa -# - Adam Friedman -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: dimensiondata_network -short_description: Create, update, and delete MCP 1.0 & 2.0 networks -extends_documentation_fragment: -- community.general.dimensiondata -- community.general.dimensiondata_wait - -description: - - Create, update, and delete MCP 1.0 & 2.0 networks -author: 'Aimon Bustardo (@aimonb)' -options: - name: - description: - - The name of the network domain to create. - required: true - type: str - description: - description: - - Additional description of the network domain. - required: false - type: str - service_plan: - description: - - The service plan, either "ESSENTIALS" or "ADVANCED". - - MCP 2.0 Only. - choices: [ESSENTIALS, ADVANCED] - default: ESSENTIALS - type: str - state: - description: - - Should the resource be present or absent. - choices: [present, absent] - default: present - type: str -''' - -EXAMPLES = ''' -- name: Create an MCP 1.0 network - community.general.dimensiondata_network: - region: na - location: NA5 - name: mynet - -- name: Create an MCP 2.0 network - community.general.dimensiondata_network: - region: na - mcp_user: my_user - mcp_password: my_password - location: NA9 - name: mynet - service_plan: ADVANCED - -- name: Delete a network - community.general.dimensiondata_network: - region: na - location: NA1 - name: mynet - state: absent -''' - -RETURN = ''' -network: - description: Dictionary describing the network. - returned: On success when I(state=present). - type: complex - contains: - id: - description: Network ID. - type: str - sample: "8c787000-a000-4050-a215-280893411a7d" - name: - description: Network name. - type: str - sample: "My network" - description: - description: Network description. - type: str - sample: "My network description" - location: - description: Datacenter location. - type: str - sample: NA3 - status: - description: Network status. (MCP 2.0 only) - type: str - sample: NORMAL - private_net: - description: Private network subnet. (MCP 1.0 only) - type: str - sample: "10.2.3.0" - multicast: - description: Multicast enabled? (MCP 1.0 only) - type: bool - sample: false -''' -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule -from ansible.module_utils.common.text.converters import to_native - -if HAS_LIBCLOUD: - from libcloud.compute.base import NodeLocation - from libcloud.common.dimensiondata import DimensionDataAPIException - - -class DimensionDataNetworkModule(DimensionDataModule): - """ - The dimensiondata_network module for Ansible. - """ - - def __init__(self): - """ - Create a new Dimension Data network module. - """ - - super(DimensionDataNetworkModule, self).__init__( - module=AnsibleModule( - argument_spec=DimensionDataModule.argument_spec_with_wait( - name=dict(type='str', required=True), - description=dict(type='str', required=False), - service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']), - state=dict(default='present', choices=['present', 'absent']) - ), - required_together=DimensionDataModule.required_together() - ) - ) - - self.name = self.module.params['name'] - self.description = self.module.params['description'] - self.service_plan = self.module.params['service_plan'] - self.state = self.module.params['state'] - - def state_present(self): - network = self._get_network() - - if network: - self.module.exit_json( - changed=False, - msg='Network already exists', - network=self._network_to_dict(network) - ) - - network = self._create_network() - - self.module.exit_json( - changed=True, - msg='Created network "%s" in datacenter "%s".' % (self.name, self.location), - network=self._network_to_dict(network) - ) - - def state_absent(self): - network = self._get_network() - - if not network: - self.module.exit_json( - changed=False, - msg='Network "%s" does not exist' % self.name, - network=self._network_to_dict(network) - ) - - self._delete_network(network) - - def _get_network(self): - if self.mcp_version == '1.0': - networks = self.driver.list_networks(location=self.location) - else: - networks = self.driver.ex_list_network_domains(location=self.location) - - matched_network = [network for network in networks if network.name == self.name] - if matched_network: - return matched_network[0] - - return None - - def _network_to_dict(self, network): - network_dict = dict( - id=network.id, - name=network.name, - description=network.description - ) - - if isinstance(network.location, NodeLocation): - network_dict['location'] = network.location.id - else: - network_dict['location'] = network.location - - if self.mcp_version == '1.0': - network_dict['private_net'] = network.private_net - network_dict['multicast'] = network.multicast - network_dict['status'] = None - else: - network_dict['private_net'] = None - network_dict['multicast'] = None - network_dict['status'] = network.status - - return network_dict - - def _create_network(self): - - # Make sure service_plan argument is defined - if self.mcp_version == '2.0' and 'service_plan' not in self.module.params: - self.module.fail_json( - msg='service_plan required when creating network and location is MCP 2.0' - ) - - # Create network - try: - if self.mcp_version == '1.0': - network = self.driver.ex_create_network( - self.location, - self.name, - description=self.description - ) - else: - network = self.driver.ex_create_network_domain( - self.location, - self.name, - self.module.params['service_plan'], - description=self.description - ) - except DimensionDataAPIException as e: - - self.module.fail_json( - msg="Failed to create new network: %s" % to_native(e), exception=traceback.format_exc() - ) - - if self.module.params['wait'] is True: - network = self._wait_for_network_state(network.id, 'NORMAL') - - return network - - def _delete_network(self, network): - try: - if self.mcp_version == '1.0': - deleted = self.driver.ex_delete_network(network) - else: - deleted = self.driver.ex_delete_network_domain(network) - - if deleted: - self.module.exit_json( - changed=True, - msg="Deleted network with id %s" % network.id - ) - - self.module.fail_json( - "Unexpected failure deleting network with id %s" % network.id - ) - - except DimensionDataAPIException as e: - self.module.fail_json( - msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc() - ) - - def _wait_for_network_state(self, net_id, state_to_wait_for): - try: - return self.driver.connection.wait_for_state( - state_to_wait_for, - self.driver.ex_get_network_domain, - self.module.params['wait_poll_interval'], - self.module.params['wait_time'], - net_id - ) - except DimensionDataAPIException as e: - self.module.fail_json( - msg='Network did not reach % state in time: %s' % (state_to_wait_for, to_native(e)), - exception=traceback.format_exc() - ) - - -def main(): - module = DimensionDataNetworkModule() - if module.state == 'present': - module.state_present() - elif module.state == 'absent': - module.state_absent() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py b/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py deleted file mode 100644 index 26c621f44b..0000000000 --- a/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py +++ /dev/null @@ -1,568 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Dimension Data -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . -# -# Authors: -# - Adam Friedman -# - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: dimensiondata_vlan -short_description: Manage a VLAN in a Cloud Control network domain. -extends_documentation_fragment: -- community.general.dimensiondata -- community.general.dimensiondata_wait - -description: - - Manage VLANs in Cloud Control network domains. -author: 'Adam Friedman (@tintoy)' -options: - name: - description: - - The name of the target VLAN. - type: str - required: true - description: - description: - - A description of the VLAN. - type: str - network_domain: - description: - - The Id or name of the target network domain. - required: true - type: str - private_ipv4_base_address: - description: - - The base address for the VLAN's IPv4 network (e.g. 192.168.1.0). - type: str - private_ipv4_prefix_size: - description: - - The size of the IPv4 address space, e.g 24. - - Required, if C(private_ipv4_base_address) is specified. - type: int - state: - description: - - The desired state for the target VLAN. - - C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist). - choices: [present, absent, readonly] - default: present - type: str - allow_expand: - description: - - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses. - - If C(False), the module will fail under these conditions. - - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible). - type: bool - default: 'no' -''' - -EXAMPLES = ''' -- name: Add or update VLAN - community.general.dimensiondata_vlan: - region: na - location: NA5 - network_domain: test_network - name: my_vlan1 - description: A test VLAN - private_ipv4_base_address: 192.168.23.0 - private_ipv4_prefix_size: 24 - state: present - wait: yes - -- name: Read / get VLAN details - community.general.dimensiondata_vlan: - region: na - location: NA5 - network_domain: test_network - name: my_vlan1 - state: readonly - wait: yes - -- name: Delete a VLAN - community.general.dimensiondata_vlan: - region: na - location: NA5 - network_domain: test_network - name: my_vlan_1 - state: absent - wait: yes -''' - -RETURN = ''' -vlan: - description: Dictionary describing the VLAN. - returned: On success when I(state) is 'present' - type: complex - contains: - id: - description: VLAN ID. - type: str - sample: "aaaaa000-a000-4050-a215-2808934ccccc" - name: - description: VLAN name. - type: str - sample: "My VLAN" - description: - description: VLAN description. - type: str - sample: "My VLAN description" - location: - description: Datacenter location. - type: str - sample: NA3 - private_ipv4_base_address: - description: The base address for the VLAN's private IPV4 network. - type: str - sample: 192.168.23.0 - private_ipv4_prefix_size: - description: The prefix size for the VLAN's private IPV4 network. - type: int - sample: 24 - private_ipv4_gateway_address: - description: The gateway address for the VLAN's private IPV4 network. - type: str - sample: 192.168.23.1 - private_ipv6_base_address: - description: The base address for the VLAN's IPV6 network. - type: str - sample: 2402:9900:111:1195:0:0:0:0 - private_ipv6_prefix_size: - description: The prefix size for the VLAN's IPV6 network. - type: int - sample: 64 - private_ipv6_gateway_address: - description: The gateway address for the VLAN's IPV6 network. - type: str - sample: 2402:9900:111:1195:0:0:0:1 - status: - description: VLAN status. - type: str - sample: NORMAL -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError - -try: - from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException - - HAS_LIBCLOUD = True - -except ImportError: - DimensionDataVlan = None - - HAS_LIBCLOUD = False - - -class DimensionDataVlanModule(DimensionDataModule): - """ - The dimensiondata_vlan module for Ansible. - """ - - def __init__(self): - """ - Create a new Dimension Data VLAN module. - """ - - super(DimensionDataVlanModule, self).__init__( - module=AnsibleModule( - argument_spec=DimensionDataModule.argument_spec_with_wait( - name=dict(required=True, type='str'), - description=dict(default='', type='str'), - network_domain=dict(required=True, type='str'), - private_ipv4_base_address=dict(default='', type='str'), - private_ipv4_prefix_size=dict(default=0, type='int'), - allow_expand=dict(required=False, default=False, type='bool'), - state=dict(default='present', choices=['present', 'absent', 'readonly']) - ), - required_together=DimensionDataModule.required_together() - ) - ) - - self.name = self.module.params['name'] - self.description = self.module.params['description'] - self.network_domain_selector = self.module.params['network_domain'] - self.private_ipv4_base_address = self.module.params['private_ipv4_base_address'] - self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size'] - self.state = self.module.params['state'] - self.allow_expand = self.module.params['allow_expand'] - - if self.wait and self.state != 'present': - self.module.fail_json( - msg='The wait parameter is only supported when state is "present".' - ) - - def state_present(self): - """ - Ensure that the target VLAN is present. - """ - - network_domain = self._get_network_domain() - - vlan = self._get_vlan(network_domain) - if not vlan: - if self.module.check_mode: - self.module.exit_json( - msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format( - self.name, self.network_domain_selector - ), - changed=True - ) - - vlan = self._create_vlan(network_domain) - self.module.exit_json( - msg='Created VLAN "{0}" in network domain "{1}".'.format( - self.name, self.network_domain_selector - ), - vlan=vlan_to_dict(vlan), - changed=True - ) - else: - diff = VlanDiff(vlan, self.module.params) - if not diff.has_changes(): - self.module.exit_json( - msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format( - self.name, self.network_domain_selector - ), - vlan=vlan_to_dict(vlan), - changed=False - ) - - return - - try: - diff.ensure_legal_change() - except InvalidVlanChangeError as invalid_vlan_change: - self.module.fail_json( - msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format( - self.name, self.network_domain_selector, invalid_vlan_change - ) - ) - - if diff.needs_expand() and not self.allow_expand: - self.module.fail_json( - msg='The configured private IPv4 network size ({0}-bit prefix) for '.format( - self.private_ipv4_prefix_size - ) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format( - vlan.private_ipv4_range_size - ) + 'and needs to be expanded. Use allow_expand=true if this is what you want.' - ) - - if self.module.check_mode: - self.module.exit_json( - msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format( - self.name, self.network_domain_selector - ), - vlan=vlan_to_dict(vlan), - changed=True - ) - - if diff.needs_edit(): - vlan.name = self.name - vlan.description = self.description - - self.driver.ex_update_vlan(vlan) - - if diff.needs_expand(): - vlan.private_ipv4_range_size = self.private_ipv4_prefix_size - self.driver.ex_expand_vlan(vlan) - - self.module.exit_json( - msg='Updated VLAN "{0}" in network domain "{1}".'.format( - self.name, self.network_domain_selector - ), - vlan=vlan_to_dict(vlan), - changed=True - ) - - def state_readonly(self): - """ - Read the target VLAN's state. - """ - - network_domain = self._get_network_domain() - - vlan = self._get_vlan(network_domain) - if vlan: - self.module.exit_json( - vlan=vlan_to_dict(vlan), - changed=False - ) - else: - self.module.fail_json( - msg='VLAN "{0}" does not exist in network domain "{1}".'.format( - self.name, self.network_domain_selector - ) - ) - - def state_absent(self): - """ - Ensure that the target VLAN is not present. - """ - - network_domain = self._get_network_domain() - - vlan = self._get_vlan(network_domain) - if not vlan: - self.module.exit_json( - msg='VLAN "{0}" is absent from network domain "{1}".'.format( - self.name, self.network_domain_selector - ), - changed=False - ) - - return - - if self.module.check_mode: - self.module.exit_json( - msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format( - self.name, self.network_domain_selector - ), - vlan=vlan_to_dict(vlan), - changed=True - ) - - self._delete_vlan(vlan) - - self.module.exit_json( - msg='Deleted VLAN "{0}" from network domain "{1}".'.format( - self.name, self.network_domain_selector - ), - changed=True - ) - - def _get_vlan(self, network_domain): - """ - Retrieve the target VLAN details from CloudControl. - - :param network_domain: The target network domain. - :return: The VLAN, or None if the target VLAN was not found. - :rtype: DimensionDataVlan - """ - - vlans = self.driver.ex_list_vlans( - location=self.location, - network_domain=network_domain - ) - matching_vlans = [vlan for vlan in vlans if vlan.name == self.name] - if matching_vlans: - return matching_vlans[0] - - return None - - def _create_vlan(self, network_domain): - vlan = self.driver.ex_create_vlan( - network_domain, - self.name, - self.private_ipv4_base_address, - self.description, - self.private_ipv4_prefix_size - ) - - if self.wait: - vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL') - - return vlan - - def _delete_vlan(self, vlan): - try: - self.driver.ex_delete_vlan(vlan) - - # Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present"). - if self.wait: - self._wait_for_vlan_state(vlan, 'NOT_FOUND') - - except DimensionDataAPIException as api_exception: - self.module.fail_json( - msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format( - vlan.id, api_exception.msg - ) - ) - - def _wait_for_vlan_state(self, vlan, state_to_wait_for): - network_domain = self._get_network_domain() - - wait_poll_interval = self.module.params['wait_poll_interval'] - wait_time = self.module.params['wait_time'] - - # Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try. - - try: - return self.driver.connection.wait_for_state( - state_to_wait_for, - self.driver.ex_get_vlan, - wait_poll_interval, - wait_time, - vlan - ) - - except DimensionDataAPIException as api_exception: - if api_exception.code != 'RESOURCE_NOT_FOUND': - raise - - return DimensionDataVlan( - id=vlan.id, - status='NOT_FOUND', - name='', - description='', - private_ipv4_range_address='', - private_ipv4_range_size=0, - ipv4_gateway='', - ipv6_range_address='', - ipv6_range_size=0, - ipv6_gateway='', - location=self.location, - network_domain=network_domain - ) - - def _get_network_domain(self): - """ - Retrieve the target network domain from the Cloud Control API. - - :return: The network domain. - """ - - try: - return self.get_network_domain( - self.network_domain_selector, self.location - ) - except UnknownNetworkError: - self.module.fail_json( - msg='Cannot find network domain "{0}" in datacenter "{1}".'.format( - self.network_domain_selector, self.location - ) - ) - - return None - - -class InvalidVlanChangeError(Exception): - """ - Error raised when an illegal change to VLAN state is attempted. - """ - - pass - - -class VlanDiff(object): - """ - Represents differences between VLAN information (from CloudControl) and module parameters. - """ - - def __init__(self, vlan, module_params): - """ - - :param vlan: The VLAN information from CloudControl. - :type vlan: DimensionDataVlan - :param module_params: The module parameters. - :type module_params: dict - """ - - self.vlan = vlan - self.module_params = module_params - - self.name_changed = module_params['name'] != vlan.name - self.description_changed = module_params['description'] != vlan.description - self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address - self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size - - # Is configured prefix size greater than or less than the actual prefix size? - private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size - self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0 - self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0 - - def has_changes(self): - """ - Does the VlanDiff represent any changes between the VLAN and module configuration? - - :return: True, if there are change changes; otherwise, False. - """ - - return self.needs_edit() or self.needs_expand() - - def ensure_legal_change(self): - """ - Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state. - - - private_ipv4_base_address cannot be changed - - private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size - - :raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state. - """ - - # Cannot change base address for private IPv4 network. - if self.private_ipv4_base_address_changed: - raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.') - - # Cannot shrink private IPv4 network (by increasing prefix size). - if self.private_ipv4_prefix_size_increased: - raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).') - - def needs_edit(self): - """ - Is an Edit operation required to resolve the differences between the VLAN information and the module parameters? - - :return: True, if an Edit operation is required; otherwise, False. - """ - - return self.name_changed or self.description_changed - - def needs_expand(self): - """ - Is an Expand operation required to resolve the differences between the VLAN information and the module parameters? - - The VLAN's network is expanded by reducing the size of its network prefix. - - :return: True, if an Expand operation is required; otherwise, False. - """ - - return self.private_ipv4_prefix_size_decreased - - -def vlan_to_dict(vlan): - return { - 'id': vlan.id, - 'name': vlan.name, - 'description': vlan.description, - 'location': vlan.location.id, - 'private_ipv4_base_address': vlan.private_ipv4_range_address, - 'private_ipv4_prefix_size': vlan.private_ipv4_range_size, - 'private_ipv4_gateway_address': vlan.ipv4_gateway, - 'ipv6_base_address': vlan.ipv6_range_address, - 'ipv6_prefix_size': vlan.ipv6_range_size, - 'ipv6_gateway_address': vlan.ipv6_gateway, - 'status': vlan.status - } - - -def main(): - module = DimensionDataVlanModule() - - if module.state == 'present': - module.state_present() - elif module.state == 'readonly': - module.state_readonly() - elif module.state == 'absent': - module.state_absent() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/heroku/heroku_collaborator.py b/plugins/modules/cloud/heroku/heroku_collaborator.py deleted file mode 100644 index bbc34fdb30..0000000000 --- a/plugins/modules/cloud/heroku/heroku_collaborator.py +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: heroku_collaborator -short_description: "Add or delete app collaborators on Heroku" -description: - - Manages collaborators for Heroku apps. - - If set to C(present) and heroku user is already collaborator, then do nothing. - - If set to C(present) and heroku user is not collaborator, then add user to app. - - If set to C(absent) and heroku user is collaborator, then delete user from app. -author: - - Marcel Arns (@marns93) -requirements: - - heroku3 -options: - api_key: - type: str - description: - - Heroku API key - apps: - type: list - elements: str - description: - - List of Heroku App names - required: true - suppress_invitation: - description: - - Suppress email invitation when creating collaborator - type: bool - default: "no" - user: - type: str - description: - - User ID or e-mail - required: true - state: - type: str - description: - - Create or remove the heroku collaborator - choices: ["present", "absent"] - default: "present" -notes: - - C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting C(api_key). - - If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"]. -''' - -EXAMPLES = ''' -- name: Create a heroku collaborator - community.general.heroku_collaborator: - api_key: YOUR_API_KEY - user: max.mustermann@example.com - apps: heroku-example-app - state: present - -- name: An example of using the module in loop - community.general.heroku_collaborator: - api_key: YOUR_API_KEY - user: '{{ item.user }}' - apps: '{{ item.apps | default(apps) }}' - suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}' - state: '{{ item.state | default("present") }}' - with_items: - - { user: 'a.b@example.com' } - - { state: 'absent', user: 'b.c@example.com', suppress_invitation: false } - - { user: 'x.y@example.com', apps: ["heroku-example-app"] } -''' - -RETURN = ''' # ''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.heroku import HerokuHelper - - -def add_or_delete_heroku_collaborator(module, client): - user = module.params['user'] - state = module.params['state'] - affected_apps = [] - result_state = False - - for app in module.params['apps']: - if app not in client.apps(): - module.fail_json(msg='App {0} does not exist'.format(app)) - - heroku_app = client.apps()[app] - - heroku_collaborator_list = [collaborator.user.email for collaborator in heroku_app.collaborators()] - - if state == 'absent' and user in heroku_collaborator_list: - if not module.check_mode: - heroku_app.remove_collaborator(user) - affected_apps += [app] - result_state = True - elif state == 'present' and user not in heroku_collaborator_list: - if not module.check_mode: - heroku_app.add_collaborator(user_id_or_email=user, silent=module.params['suppress_invitation']) - affected_apps += [app] - result_state = True - - return result_state, affected_apps - - -def main(): - argument_spec = HerokuHelper.heroku_argument_spec() - argument_spec.update( - user=dict(required=True, type='str'), - apps=dict(required=True, type='list', elements='str'), - suppress_invitation=dict(default=False, type='bool'), - state=dict(default='present', type='str', choices=['present', 'absent']), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - client = HerokuHelper(module).get_heroku_client() - - has_changed, msg = add_or_delete_heroku_collaborator(module, client) - module.exit_json(changed=has_changed, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/huawei/hwc_ecs_instance.py b/plugins/modules/cloud/huawei/hwc_ecs_instance.py deleted file mode 100644 index 3d4ba84b64..0000000000 --- a/plugins/modules/cloud/huawei/hwc_ecs_instance.py +++ /dev/null @@ -1,2135 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_ecs_instance -description: - - instance management. -short_description: Creates a resource of Ecs/Instance in Huawei Cloud -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '30m' - update: - description: - - The timeouts for update operation. - type: str - default: '30m' - delete: - description: - - The timeouts for delete operation. - type: str - default: '30m' - availability_zone: - description: - - Specifies the name of the AZ where the ECS is located. - type: str - required: true - flavor_name: - description: - - Specifies the name of the system flavor. - type: str - required: true - image_id: - description: - - Specifies the ID of the system image. - type: str - required: true - name: - description: - - Specifies the ECS name. Value requirements consists of 1 to 64 - characters, including letters, digits, underscores C(_), hyphens - (-), periods (.). - type: str - required: true - nics: - description: - - Specifies the NIC information of the ECS. Constraints the - network of the NIC must belong to the VPC specified by vpc_id. A - maximum of 12 NICs can be attached to an ECS. - type: list - elements: dict - required: true - suboptions: - ip_address: - description: - - Specifies the IP address of the NIC. The value is an IPv4 - address. Its value must be an unused IP - address in the network segment of the subnet. - type: str - required: true - subnet_id: - description: - - Specifies the ID of subnet. - type: str - required: true - root_volume: - description: - - Specifies the configuration of the ECS's system disks. - type: dict - required: true - suboptions: - volume_type: - description: - - Specifies the ECS system disk type. - - SATA is common I/O disk type. - - SAS is high I/O disk type. - - SSD is ultra-high I/O disk type. - - co-p1 is high I/O (performance-optimized I) disk type. - - uh-l1 is ultra-high I/O (latency-optimized) disk type. - - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 - disks. For other ECSs, do not use co-p1 or uh-l1 disks. - type: str - required: true - size: - description: - - Specifies the system disk size, in GB. The value range is - 1 to 1024. The system disk size must be - greater than or equal to the minimum system disk size - supported by the image (min_disk attribute of the image). - If this parameter is not specified or is set to 0, the - default system disk size is the minimum value of the - system disk in the image (min_disk attribute of the - image). - type: int - required: false - snapshot_id: - description: - - Specifies the snapshot ID or ID of the original data disk - contained in the full-ECS image. - type: str - required: false - vpc_id: - description: - - Specifies the ID of the VPC to which the ECS belongs. - type: str - required: true - admin_pass: - description: - - Specifies the initial login password of the administrator account - for logging in to an ECS using password authentication. The Linux - administrator is root, and the Windows administrator is - Administrator. Password complexity requirements, consists of 8 to - 26 characters. The password must contain at least three of the - following character types 'uppercase letters, lowercase letters, - digits, and special characters (!@$%^-_=+[{}]:,./?)'. The password - cannot contain the username or the username in reverse. The - Windows ECS password cannot contain the username, the username in - reverse, or more than two consecutive characters in the username. - type: str - required: false - data_volumes: - description: - - Specifies the data disks of ECS instance. - type: list - elements: dict - required: false - suboptions: - volume_id: - description: - - Specifies the disk ID. - type: str - required: true - device: - description: - - Specifies the disk device name. - type: str - required: false - description: - description: - - Specifies the description of an ECS, which is a null string by - default. Can contain a maximum of 85 characters. Cannot contain - special characters, such as < and >. - type: str - required: false - eip_id: - description: - - Specifies the ID of the elastic IP address assigned to the ECS. - Only elastic IP addresses in the DOWN state can be - assigned. - type: str - required: false - enable_auto_recovery: - description: - - Specifies whether automatic recovery is enabled on the ECS. - type: bool - required: false - enterprise_project_id: - description: - - Specifies the ID of the enterprise project to which the ECS - belongs. - type: str - required: false - security_groups: - description: - - Specifies the security groups of the ECS. If this - parameter is left blank, the default security group is bound to - the ECS by default. - type: list - elements: str - required: false - server_metadata: - description: - - Specifies the metadata of ECS to be created. - type: dict - required: false - server_tags: - description: - - Specifies the tags of an ECS. When you create ECSs, one ECS - supports up to 10 tags. - type: dict - required: false - ssh_key_name: - description: - - Specifies the name of the SSH key used for logging in to the ECS. - type: str - required: false - user_data: - description: - - Specifies the user data to be injected during the ECS creation - process. Text, text files, and gzip files can be injected. - The content to be injected must be encoded with - base64. The maximum size of the content to be injected (before - encoding) is 32 KB. For Linux ECSs, this parameter does not take - effect when adminPass is used. - type: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create an ecs instance -- name: Create a vpc - hwc_network_vpc: - cidr: "192.168.100.0/24" - name: "ansible_network_vpc_test" - register: vpc -- name: Create a subnet - hwc_vpc_subnet: - gateway_ip: "192.168.100.32" - name: "ansible_network_subnet_test" - dhcp_enable: true - vpc_id: "{{ vpc.id }}" - cidr: "192.168.100.0/26" - register: subnet -- name: Create a eip - hwc_vpc_eip: - dedicated_bandwidth: - charge_mode: "traffic" - name: "ansible_test_dedicated_bandwidth" - size: 1 - type: "5_bgp" - register: eip -- name: Create a disk - hwc_evs_disk: - availability_zone: "cn-north-1a" - name: "ansible_evs_disk_test" - volume_type: "SATA" - size: 10 - register: disk -- name: Create an instance - community.general.hwc_ecs_instance: - data_volumes: - - volume_id: "{{ disk.id }}" - enable_auto_recovery: false - eip_id: "{{ eip.id }}" - name: "ansible_ecs_instance_test" - availability_zone: "cn-north-1a" - nics: - - subnet_id: "{{ subnet.id }}" - ip_address: "192.168.100.33" - - subnet_id: "{{ subnet.id }}" - ip_address: "192.168.100.34" - server_tags: - my_server: "my_server" - image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892" - flavor_name: "s3.small.1" - vpc_id: "{{ vpc.id }}" - root_volume: - volume_type: "SAS" -''' - -RETURN = ''' - availability_zone: - description: - - Specifies the name of the AZ where the ECS is located. - type: str - returned: success - flavor_name: - description: - - Specifies the name of the system flavor. - type: str - returned: success - image_id: - description: - - Specifies the ID of the system image. - type: str - returned: success - name: - description: - - Specifies the ECS name. Value requirements "Consists of 1 to 64 - characters, including letters, digits, underscores C(_), hyphens - (-), periods (.)". - type: str - returned: success - nics: - description: - - Specifies the NIC information of the ECS. The - network of the NIC must belong to the VPC specified by vpc_id. A - maximum of 12 NICs can be attached to an ECS. - type: list - returned: success - contains: - ip_address: - description: - - Specifies the IP address of the NIC. The value is an IPv4 - address. Its value must be an unused IP - address in the network segment of the subnet. - type: str - returned: success - subnet_id: - description: - - Specifies the ID of subnet. - type: str - returned: success - port_id: - description: - - Specifies the port ID corresponding to the IP address. - type: str - returned: success - root_volume: - description: - - Specifies the configuration of the ECS's system disks. - type: dict - returned: success - contains: - volume_type: - description: - - Specifies the ECS system disk type. - - SATA is common I/O disk type. - - SAS is high I/O disk type. - - SSD is ultra-high I/O disk type. - - co-p1 is high I/O (performance-optimized I) disk type. - - uh-l1 is ultra-high I/O (latency-optimized) disk type. - - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 - disks. For other ECSs, do not use co-p1 or uh-l1 disks. - type: str - returned: success - size: - description: - - Specifies the system disk size, in GB. The value range is - 1 to 1024. The system disk size must be - greater than or equal to the minimum system disk size - supported by the image (min_disk attribute of the image). - If this parameter is not specified or is set to 0, the - default system disk size is the minimum value of the - system disk in the image (min_disk attribute of the - image). - type: int - returned: success - snapshot_id: - description: - - Specifies the snapshot ID or ID of the original data disk - contained in the full-ECS image. - type: str - returned: success - device: - description: - - Specifies the disk device name. - type: str - returned: success - volume_id: - description: - - Specifies the disk ID. - type: str - returned: success - vpc_id: - description: - - Specifies the ID of the VPC to which the ECS belongs. - type: str - returned: success - admin_pass: - description: - - Specifies the initial login password of the administrator account - for logging in to an ECS using password authentication. The Linux - administrator is root, and the Windows administrator is - Administrator. Password complexity requirements consists of 8 to - 26 characters. The password must contain at least three of the - following character types "uppercase letters, lowercase letters, - digits, and special characters (!@$%^-_=+[{}]:,./?)". The password - cannot contain the username or the username in reverse. The - Windows ECS password cannot contain the username, the username in - reverse, or more than two consecutive characters in the username. - type: str - returned: success - data_volumes: - description: - - Specifies the data disks of ECS instance. - type: list - returned: success - contains: - volume_id: - description: - - Specifies the disk ID. - type: str - returned: success - device: - description: - - Specifies the disk device name. - type: str - returned: success - description: - description: - - Specifies the description of an ECS, which is a null string by - default. Can contain a maximum of 85 characters. Cannot contain - special characters, such as < and >. - type: str - returned: success - eip_id: - description: - - Specifies the ID of the elastic IP address assigned to the ECS. - Only elastic IP addresses in the DOWN state can be assigned. - type: str - returned: success - enable_auto_recovery: - description: - - Specifies whether automatic recovery is enabled on the ECS. - type: bool - returned: success - enterprise_project_id: - description: - - Specifies the ID of the enterprise project to which the ECS - belongs. - type: str - returned: success - security_groups: - description: - - Specifies the security groups of the ECS. If this parameter is left - blank, the default security group is bound to the ECS by default. - type: list - returned: success - server_metadata: - description: - - Specifies the metadata of ECS to be created. - type: dict - returned: success - server_tags: - description: - - Specifies the tags of an ECS. When you create ECSs, one ECS - supports up to 10 tags. - type: dict - returned: success - ssh_key_name: - description: - - Specifies the name of the SSH key used for logging in to the ECS. - type: str - returned: success - user_data: - description: - - Specifies the user data to be injected during the ECS creation - process. Text, text files, and gzip files can be injected. - The content to be injected must be encoded with base64. The maximum - size of the content to be injected (before encoding) is 32 KB. For - Linux ECSs, this parameter does not take effect when adminPass is - used. - type: str - returned: success - config_drive: - description: - - Specifies the configuration driver. - type: str - returned: success - created: - description: - - Specifies the time when an ECS was created. - type: str - returned: success - disk_config_type: - description: - - Specifies the disk configuration type. MANUAL is The image - space is not expanded. AUTO is the image space of the system disk - will be expanded to be as same as the flavor. - type: str - returned: success - host_name: - description: - - Specifies the host name of the ECS. - type: str - returned: success - image_name: - description: - - Specifies the image name of the ECS. - type: str - returned: success - power_state: - description: - - Specifies the power status of the ECS. - type: int - returned: success - server_alias: - description: - - Specifies the ECS alias. - type: str - returned: success - status: - description: - - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT, - REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, ERROR, - and DELETED. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value, wait_to_finish) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='30m', type='str'), - update=dict(default='30m', type='str'), - delete=dict(default='30m', type='str'), - ), default=dict()), - availability_zone=dict(type='str', required=True), - flavor_name=dict(type='str', required=True), - image_id=dict(type='str', required=True), - name=dict(type='str', required=True), - nics=dict( - type='list', required=True, elements='dict', - options=dict( - ip_address=dict(type='str', required=True), - subnet_id=dict(type='str', required=True) - ), - ), - root_volume=dict(type='dict', required=True, options=dict( - volume_type=dict(type='str', required=True), - size=dict(type='int'), - snapshot_id=dict(type='str') - )), - vpc_id=dict(type='str', required=True), - admin_pass=dict(type='str', no_log=True), - data_volumes=dict(type='list', elements='dict', options=dict( - volume_id=dict(type='str', required=True), - device=dict(type='str') - )), - description=dict(type='str'), - eip_id=dict(type='str'), - enable_auto_recovery=dict(type='bool'), - enterprise_project_id=dict(type='str'), - security_groups=dict(type='list', elements='str'), - server_metadata=dict(type='dict'), - server_tags=dict(type='dict'), - ssh_key_name=dict(type='str'), - user_data=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "ecs") - - try: - _init(config) - is_exist = module.params['id'] - - result = None - changed = False - if module.params['state'] == 'present': - if not is_exist: - if not module.check_mode: - create(config) - changed = True - - inputv = user_input_parameters(module) - resp, array_index = read_resource(config) - result = build_state(inputv, resp, array_index) - set_readonly_options(inputv, result) - if are_different_dicts(inputv, result): - if not module.check_mode: - update(config, inputv, result) - - inputv = user_input_parameters(module) - resp, array_index = read_resource(config) - result = build_state(inputv, resp, array_index) - set_readonly_options(inputv, result) - if are_different_dicts(inputv, result): - raise Exception("Update resource failed, " - "some attributes are not updated") - - changed = True - - result['id'] = module.params.get('id') - else: - result = dict() - if is_exist: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def _init(config): - module = config.module - if module.params['id']: - return - - v = search_resource(config) - n = len(v) - if n > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) - for i in v - ])) - - if n == 1: - module.params['id'] = navigate_value(v[0], ["id"]) - - -def user_input_parameters(module): - return { - "admin_pass": module.params.get("admin_pass"), - "availability_zone": module.params.get("availability_zone"), - "data_volumes": module.params.get("data_volumes"), - "description": module.params.get("description"), - "eip_id": module.params.get("eip_id"), - "enable_auto_recovery": module.params.get("enable_auto_recovery"), - "enterprise_project_id": module.params.get("enterprise_project_id"), - "flavor_name": module.params.get("flavor_name"), - "image_id": module.params.get("image_id"), - "name": module.params.get("name"), - "nics": module.params.get("nics"), - "root_volume": module.params.get("root_volume"), - "security_groups": module.params.get("security_groups"), - "server_metadata": module.params.get("server_metadata"), - "server_tags": module.params.get("server_tags"), - "ssh_key_name": module.params.get("ssh_key_name"), - "user_data": module.params.get("user_data"), - "vpc_id": module.params.get("vpc_id"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "ecs", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - opts = user_input_parameters(module) - opts["ansible_module"] = module - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - obj = async_wait(config, r, client, timeout) - - sub_job_identity = { - "job_type": "createSingleServer", - } - for item in navigate_value(obj, ["entities", "sub_jobs"]): - for k, v in sub_job_identity.items(): - if item[k] != v: - break - else: - obj = item - break - else: - raise Exception("Can't find the sub job") - module.params['id'] = navigate_value(obj, ["entities", "server_id"]) - - -def update(config, expect_state, current_state): - module = config.module - expect_state["current_state"] = current_state - current_state["current_state"] = current_state - timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) - client = config.client(get_region(module), "ecs", "project") - - params = build_delete_nics_parameters(expect_state) - params1 = build_delete_nics_parameters(current_state) - if params and are_different_dicts(params, params1): - r = send_delete_nics_request(module, params, client) - async_wait(config, r, client, timeout) - - params = build_set_auto_recovery_parameters(expect_state) - params1 = build_set_auto_recovery_parameters(current_state) - if params and are_different_dicts(params, params1): - send_set_auto_recovery_request(module, params, client) - - params = build_attach_nics_parameters(expect_state) - params1 = build_attach_nics_parameters(current_state) - if params and are_different_dicts(params, params1): - r = send_attach_nics_request(module, params, client) - async_wait(config, r, client, timeout) - - multi_invoke_delete_volume(config, expect_state, client, timeout) - - multi_invoke_attach_data_disk(config, expect_state, client, timeout) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "ecs", "project") - timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) - - opts = user_input_parameters(module) - opts["ansible_module"] = module - - params = build_delete_parameters(opts) - if params: - r = send_delete_request(module, params, client) - async_wait(config, r, client, timeout) - - -def read_resource(config): - module = config.module - client = config.client(get_region(module), "ecs", "project") - - res = {} - - r = send_read_request(module, client) - preprocess_read_response(r) - res["read"] = fill_read_resp_body(r) - - r = send_read_auto_recovery_request(module, client) - res["read_auto_recovery"] = fill_read_auto_recovery_resp_body(r) - - return res, None - - -def preprocess_read_response(resp): - v = resp.get("os-extended-volumes:volumes_attached") - if v and isinstance(v, list): - for i in range(len(v)): - if v[i].get("bootIndex") == "0": - root_volume = v[i] - - if (i + 1) != len(v): - v[i] = v[-1] - - v.pop() - - resp["root_volume"] = root_volume - break - - v = resp.get("addresses") - if v: - rv = {} - eips = [] - for val in v.values(): - for item in val: - if item["OS-EXT-IPS:type"] == "floating": - eips.append(item) - else: - rv[item["OS-EXT-IPS:port_id"]] = item - - for item in eips: - k = item["OS-EXT-IPS:port_id"] - if k in rv: - rv[k]["eip_address"] = item.get("addr", "") - else: - rv[k] = item - item["eip_address"] = item.get("addr", "") - item["addr"] = "" - - resp["address"] = rv.values() - - -def build_state(opts, response, array_index): - states = flatten_options(response, array_index) - set_unreadable_options(opts, states) - adjust_options(opts, states) - return states - - -def _build_query_link(opts): - query_params = [] - - v = navigate_value(opts, ["enterprise_project_id"]) - if v or v in [False, 0]: - query_params.append( - "enterprise_project_id=" + (str(v) if v else str(v).lower())) - - v = navigate_value(opts, ["name"]) - if v or v in [False, 0]: - query_params.append( - "name=" + (str(v) if v else str(v).lower())) - - query_link = "?limit=10&offset={offset}" - if query_params: - query_link += "&" + "&".join(query_params) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "ecs", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "cloudservers/detail" + query_link - - result = [] - p = {'offset': 1} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - adjust_list_resp(identity_obj, item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['offset'] += 1 - - return result - - -def build_delete_nics_parameters(opts): - params = dict() - - v = expand_delete_nics_nics(opts, None) - if not is_empty_value(v): - params["nics"] = v - - return params - - -def expand_delete_nics_nics(d, array_index): - cv = d["current_state"].get("nics") - if not cv: - return None - - val = cv - - ev = d.get("nics") - if ev: - m = [item.get("ip_address") for item in ev] - val = [item for item in cv if item.get("ip_address") not in m] - - r = [] - for item in val: - transformed = dict() - - v = item.get("port_id") - if not is_empty_value(v): - transformed["id"] = v - - if transformed: - r.append(transformed) - - return r - - -def send_delete_nics_request(module, params, client): - url = build_path(module, "cloudservers/{id}/nics/delete") - - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(delete_nics), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def build_set_auto_recovery_parameters(opts): - params = dict() - - v = expand_set_auto_recovery_support_auto_recovery(opts, None) - if v is not None: - params["support_auto_recovery"] = v - - return params - - -def expand_set_auto_recovery_support_auto_recovery(d, array_index): - v = navigate_value(d, ["enable_auto_recovery"], None) - return None if v is None else str(v).lower() - - -def send_set_auto_recovery_request(module, params, client): - url = build_path(module, "cloudservers/{id}/autorecovery") - - try: - r = client.put(url, params) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(set_auto_recovery), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["admin_pass"], None) - if not is_empty_value(v): - params["adminPass"] = v - - v = navigate_value(opts, ["availability_zone"], None) - if not is_empty_value(v): - params["availability_zone"] = v - - v = navigate_value(opts, ["description"], None) - if not is_empty_value(v): - params["description"] = v - - v = expand_create_extendparam(opts, None) - if not is_empty_value(v): - params["extendparam"] = v - - v = navigate_value(opts, ["flavor_name"], None) - if not is_empty_value(v): - params["flavorRef"] = v - - v = navigate_value(opts, ["image_id"], None) - if not is_empty_value(v): - params["imageRef"] = v - - v = navigate_value(opts, ["ssh_key_name"], None) - if not is_empty_value(v): - params["key_name"] = v - - v = navigate_value(opts, ["server_metadata"], None) - if not is_empty_value(v): - params["metadata"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = expand_create_nics(opts, None) - if not is_empty_value(v): - params["nics"] = v - - v = expand_create_publicip(opts, None) - if not is_empty_value(v): - params["publicip"] = v - - v = expand_create_root_volume(opts, None) - if not is_empty_value(v): - params["root_volume"] = v - - v = expand_create_security_groups(opts, None) - if not is_empty_value(v): - params["security_groups"] = v - - v = expand_create_server_tags(opts, None) - if not is_empty_value(v): - params["server_tags"] = v - - v = navigate_value(opts, ["user_data"], None) - if not is_empty_value(v): - params["user_data"] = v - - v = navigate_value(opts, ["vpc_id"], None) - if not is_empty_value(v): - params["vpcid"] = v - - if not params: - return params - - params = {"server": params} - - return params - - -def expand_create_extendparam(d, array_index): - r = dict() - - r["chargingMode"] = 0 - - v = navigate_value(d, ["enterprise_project_id"], array_index) - if not is_empty_value(v): - r["enterprise_project_id"] = v - - v = navigate_value(d, ["enable_auto_recovery"], array_index) - if not is_empty_value(v): - r["support_auto_recovery"] = v - - return r - - -def expand_create_nics(d, array_index): - new_ai = dict() - if array_index: - new_ai.update(array_index) - - req = [] - - v = navigate_value( - d, ["nics"], new_ai) - - if not v: - return req - n = len(v) - for i in range(n): - new_ai["nics"] = i - transformed = dict() - - v = navigate_value(d, ["nics", "ip_address"], new_ai) - if not is_empty_value(v): - transformed["ip_address"] = v - - v = navigate_value(d, ["nics", "subnet_id"], new_ai) - if not is_empty_value(v): - transformed["subnet_id"] = v - - if transformed: - req.append(transformed) - - return req - - -def expand_create_publicip(d, array_index): - r = dict() - - v = navigate_value(d, ["eip_id"], array_index) - if not is_empty_value(v): - r["id"] = v - - return r - - -def expand_create_root_volume(d, array_index): - r = dict() - - v = expand_create_root_volume_extendparam(d, array_index) - if not is_empty_value(v): - r["extendparam"] = v - - v = navigate_value(d, ["root_volume", "size"], array_index) - if not is_empty_value(v): - r["size"] = v - - v = navigate_value(d, ["root_volume", "volume_type"], array_index) - if not is_empty_value(v): - r["volumetype"] = v - - return r - - -def expand_create_root_volume_extendparam(d, array_index): - r = dict() - - v = navigate_value(d, ["root_volume", "snapshot_id"], array_index) - if not is_empty_value(v): - r["snapshotId"] = v - - return r - - -def expand_create_security_groups(d, array_index): - v = d.get("security_groups") - if not v: - return None - - return [{"id": i} for i in v] - - -def expand_create_server_tags(d, array_index): - v = d.get("server_tags") - if not v: - return None - - return [{"key": k, "value": v1} for k, v1 in v.items()] - - -def send_create_request(module, params, client): - url = "cloudservers" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def build_attach_nics_parameters(opts): - params = dict() - - v = expand_attach_nics_nics(opts, None) - if not is_empty_value(v): - params["nics"] = v - - return params - - -def expand_attach_nics_nics(d, array_index): - ev = d.get("nics") - if not ev: - return None - - val = ev - - cv = d["current_state"].get("nics") - if cv: - m = [item.get("ip_address") for item in cv] - val = [item for item in ev if item.get("ip_address") not in m] - - r = [] - for item in val: - transformed = dict() - - v = item.get("ip_address") - if not is_empty_value(v): - transformed["ip_address"] = v - - v = item.get("subnet_id") - if not is_empty_value(v): - transformed["subnet_id"] = v - - if transformed: - r.append(transformed) - - return r - - -def send_attach_nics_request(module, params, client): - url = build_path(module, "cloudservers/{id}/nics") - - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(attach_nics), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_volume_request(module, params, client, info): - path_parameters = { - "volume_id": ["volume_id"], - } - data = dict((key, navigate_value(info, path)) - for key, path in path_parameters.items()) - - url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data) - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(delete_volume), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def build_attach_data_disk_parameters(opts, array_index): - params = dict() - - v = expand_attach_data_disk_volume_attachment(opts, array_index) - if not is_empty_value(v): - params["volumeAttachment"] = v - - return params - - -def expand_attach_data_disk_volume_attachment(d, array_index): - r = dict() - - v = navigate_value(d, ["data_volumes", "device"], array_index) - if not is_empty_value(v): - r["device"] = v - - v = navigate_value(d, ["data_volumes", "volume_id"], array_index) - if not is_empty_value(v): - r["volumeId"] = v - - return r - - -def send_attach_data_disk_request(module, params, client): - url = build_path(module, "cloudservers/{id}/attachvolume") - - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(attach_data_disk), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def build_delete_parameters(opts): - params = dict() - - params["delete_publicip"] = False - - params["delete_volume"] = False - - v = expand_delete_servers(opts, None) - if not is_empty_value(v): - params["servers"] = v - - return params - - -def expand_delete_servers(d, array_index): - new_ai = dict() - if array_index: - new_ai.update(array_index) - - req = [] - - n = 1 - for i in range(n): - transformed = dict() - - v = expand_delete_servers_id(d, new_ai) - if not is_empty_value(v): - transformed["id"] = v - - if transformed: - req.append(transformed) - - return req - - -def expand_delete_servers_id(d, array_index): - return d["ansible_module"].params.get("id") - - -def send_delete_request(module, params, client): - url = "cloudservers/delete" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait(config, result, client, timeout): - module = config.module - - url = build_path(module, "jobs/{job_id}", result) - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["SUCCESS"], - ["RUNNING", "INIT"], - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_ecs_instance): error " - "waiting to be done, error= %s" % str(ex)) - - -def multi_invoke_delete_volume(config, opts, client, timeout): - module = config.module - - opts1 = None - expect = opts["data_volumes"] - current = opts["current_state"]["data_volumes"] - if expect and current: - v = [i["volume_id"] for i in expect] - opts1 = { - "data_volumes": [ - i for i in current if i["volume_id"] not in v - ] - } - - loop_val = navigate_value(opts1, ["data_volumes"]) - if not loop_val: - return - - for i in range(len(loop_val)): - r = send_delete_volume_request(module, None, client, loop_val[i]) - async_wait(config, r, client, timeout) - - -def multi_invoke_attach_data_disk(config, opts, client, timeout): - module = config.module - - opts1 = opts - expect = opts["data_volumes"] - current = opts["current_state"]["data_volumes"] - if expect and current: - v = [i["volume_id"] for i in current] - opts1 = { - "data_volumes": [ - i for i in expect if i["volume_id"] not in v - ] - } - - loop_val = navigate_value(opts1, ["data_volumes"]) - if not loop_val: - return - - for i in range(len(loop_val)): - params = build_attach_data_disk_parameters(opts1, {"data_volumes": i}) - r = send_attach_data_disk_request(module, params, client) - async_wait(config, r, client, timeout) - - -def send_read_request(module, client): - url = build_path(module, "cloudservers/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["server"], None) - - -def fill_read_resp_body(body): - result = dict() - - result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig") - - result["OS-EXT-AZ:availability_zone"] = body.get( - "OS-EXT-AZ:availability_zone") - - result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname") - - result["OS-EXT-SRV-ATTR:instance_name"] = body.get( - "OS-EXT-SRV-ATTR:instance_name") - - result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data") - - result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state") - - v = fill_read_resp_address(body.get("address")) - result["address"] = v - - result["config_drive"] = body.get("config_drive") - - result["created"] = body.get("created") - - result["description"] = body.get("description") - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - v = fill_read_resp_flavor(body.get("flavor")) - result["flavor"] = v - - result["id"] = body.get("id") - - v = fill_read_resp_image(body.get("image")) - result["image"] = v - - result["key_name"] = body.get("key_name") - - v = fill_read_resp_metadata(body.get("metadata")) - result["metadata"] = v - - result["name"] = body.get("name") - - v = fill_read_resp_os_extended_volumes_volumes_attached( - body.get("os-extended-volumes:volumes_attached")) - result["os-extended-volumes:volumes_attached"] = v - - v = fill_read_resp_root_volume(body.get("root_volume")) - result["root_volume"] = v - - result["status"] = body.get("status") - - result["tags"] = body.get("tags") - - return result - - -def fill_read_resp_address(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["OS-EXT-IPS:port_id"] = item.get("OS-EXT-IPS:port_id") - - val["OS-EXT-IPS:type"] = item.get("OS-EXT-IPS:type") - - val["addr"] = item.get("addr") - - result.append(val) - - return result - - -def fill_read_resp_flavor(value): - if not value: - return None - - result = dict() - - result["id"] = value.get("id") - - return result - - -def fill_read_resp_image(value): - if not value: - return None - - result = dict() - - result["id"] = value.get("id") - - return result - - -def fill_read_resp_metadata(value): - if not value: - return None - - result = dict() - - result["image_name"] = value.get("image_name") - - result["vpc_id"] = value.get("vpc_id") - - return result - - -def fill_read_resp_os_extended_volumes_volumes_attached(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["bootIndex"] = item.get("bootIndex") - - val["device"] = item.get("device") - - val["id"] = item.get("id") - - result.append(val) - - return result - - -def fill_read_resp_root_volume(value): - if not value: - return None - - result = dict() - - result["device"] = value.get("device") - - result["id"] = value.get("id") - - return result - - -def send_read_auto_recovery_request(module, client): - url = build_path(module, "cloudservers/{id}/autorecovery") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(read_auto_recovery), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def fill_read_auto_recovery_resp_body(body): - result = dict() - - result["support_auto_recovery"] = body.get("support_auto_recovery") - - return result - - -def flatten_options(response, array_index): - r = dict() - - v = navigate_value( - response, ["read", "OS-EXT-AZ:availability_zone"], array_index) - r["availability_zone"] = v - - v = navigate_value(response, ["read", "config_drive"], array_index) - r["config_drive"] = v - - v = navigate_value(response, ["read", "created"], array_index) - r["created"] = v - - v = flatten_data_volumes(response, array_index) - r["data_volumes"] = v - - v = navigate_value(response, ["read", "description"], array_index) - r["description"] = v - - v = navigate_value(response, ["read", "OS-DCF:diskConfig"], array_index) - r["disk_config_type"] = v - - v = flatten_enable_auto_recovery(response, array_index) - r["enable_auto_recovery"] = v - - v = navigate_value( - response, ["read", "enterprise_project_id"], array_index) - r["enterprise_project_id"] = v - - v = navigate_value(response, ["read", "flavor", "id"], array_index) - r["flavor_name"] = v - - v = navigate_value( - response, ["read", "OS-EXT-SRV-ATTR:hostname"], array_index) - r["host_name"] = v - - v = navigate_value(response, ["read", "image", "id"], array_index) - r["image_id"] = v - - v = navigate_value( - response, ["read", "metadata", "image_name"], array_index) - r["image_name"] = v - - v = navigate_value(response, ["read", "name"], array_index) - r["name"] = v - - v = flatten_nics(response, array_index) - r["nics"] = v - - v = navigate_value( - response, ["read", "OS-EXT-STS:power_state"], array_index) - r["power_state"] = v - - v = flatten_root_volume(response, array_index) - r["root_volume"] = v - - v = navigate_value( - response, ["read", "OS-EXT-SRV-ATTR:instance_name"], array_index) - r["server_alias"] = v - - v = flatten_server_tags(response, array_index) - r["server_tags"] = v - - v = navigate_value(response, ["read", "key_name"], array_index) - r["ssh_key_name"] = v - - v = navigate_value(response, ["read", "status"], array_index) - r["status"] = v - - v = navigate_value( - response, ["read", "OS-EXT-SRV-ATTR:user_data"], array_index) - r["user_data"] = v - - v = navigate_value(response, ["read", "metadata", "vpc_id"], array_index) - r["vpc_id"] = v - - return r - - -def flatten_data_volumes(d, array_index): - v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached"], - array_index) - if not v: - return None - n = len(v) - result = [] - - new_ai = dict() - if array_index: - new_ai.update(array_index) - - for i in range(n): - new_ai["read.os-extended-volumes:volumes_attached"] = i - - val = dict() - - v = navigate_value( - d, ["read", "os-extended-volumes:volumes_attached", "device"], new_ai) - val["device"] = v - - v = navigate_value( - d, ["read", "os-extended-volumes:volumes_attached", "id"], new_ai) - val["volume_id"] = v - - for v in val.values(): - if v is not None: - result.append(val) - break - - return result if result else None - - -def flatten_enable_auto_recovery(d, array_index): - v = navigate_value(d, ["read_auto_recovery", "support_auto_recovery"], - array_index) - return v == "true" - - -def flatten_nics(d, array_index): - v = navigate_value(d, ["read", "address"], - array_index) - if not v: - return None - n = len(v) - result = [] - - new_ai = dict() - if array_index: - new_ai.update(array_index) - - for i in range(n): - new_ai["read.address"] = i - - val = dict() - - v = navigate_value(d, ["read", "address", "addr"], new_ai) - val["ip_address"] = v - - v = navigate_value( - d, ["read", "address", "OS-EXT-IPS:port_id"], new_ai) - val["port_id"] = v - - for v in val.values(): - if v is not None: - result.append(val) - break - - return result if result else None - - -def flatten_root_volume(d, array_index): - result = dict() - - v = navigate_value(d, ["read", "root_volume", "device"], array_index) - result["device"] = v - - v = navigate_value(d, ["read", "root_volume", "id"], array_index) - result["volume_id"] = v - - for v in result.values(): - if v is not None: - return result - return None - - -def flatten_server_tags(d, array_index): - v = navigate_value(d, ["read", "tags"], array_index) - if not v: - return None - - r = dict() - for item in v: - v1 = item.split("=") - if v1: - r[v1[0]] = v1[1] - return r - - -def adjust_options(opts, states): - adjust_data_volumes(opts, states) - - adjust_nics(opts, states) - - -def adjust_data_volumes(parent_input, parent_cur): - iv = parent_input.get("data_volumes") - if not (iv and isinstance(iv, list)): - return - - cv = parent_cur.get("data_volumes") - if not (cv and isinstance(cv, list)): - return - - lcv = len(cv) - result = [] - q = [] - for iiv in iv: - if len(q) == lcv: - break - - icv = None - for j in range(lcv): - if j in q: - continue - - icv = cv[j] - - if iiv["volume_id"] != icv["volume_id"]: - continue - - result.append(icv) - q.append(j) - break - else: - break - - if len(q) != lcv: - for i in range(lcv): - if i not in q: - result.append(cv[i]) - - if len(result) != lcv: - raise Exception("adjust property(data_volumes) failed, " - "the array number is not equal") - - parent_cur["data_volumes"] = result - - -def adjust_nics(parent_input, parent_cur): - iv = parent_input.get("nics") - if not (iv and isinstance(iv, list)): - return - - cv = parent_cur.get("nics") - if not (cv and isinstance(cv, list)): - return - - lcv = len(cv) - result = [] - q = [] - for iiv in iv: - if len(q) == lcv: - break - - icv = None - for j in range(lcv): - if j in q: - continue - - icv = cv[j] - - if iiv["ip_address"] != icv["ip_address"]: - continue - - result.append(icv) - q.append(j) - break - else: - break - - if len(q) != lcv: - for i in range(lcv): - if i not in q: - result.append(cv[i]) - - if len(result) != lcv: - raise Exception("adjust property(nics) failed, " - "the array number is not equal") - - parent_cur["nics"] = result - - -def set_unreadable_options(opts, states): - states["admin_pass"] = opts.get("admin_pass") - - states["eip_id"] = opts.get("eip_id") - - set_unread_nics( - opts.get("nics"), states.get("nics")) - - set_unread_root_volume( - opts.get("root_volume"), states.get("root_volume")) - - states["security_groups"] = opts.get("security_groups") - - states["server_metadata"] = opts.get("server_metadata") - - -def set_unread_nics(inputv, curv): - if not (inputv and isinstance(inputv, list)): - return - - if not (curv and isinstance(curv, list)): - return - - lcv = len(curv) - q = [] - for iv in inputv: - if len(q) == lcv: - break - - cv = None - for j in range(lcv): - if j in q: - continue - - cv = curv[j] - - if iv["ip_address"] != cv["ip_address"]: - continue - - q.append(j) - break - else: - continue - - cv["subnet_id"] = iv.get("subnet_id") - - -def set_unread_root_volume(inputv, curv): - if not (inputv and isinstance(inputv, dict)): - return - - if not (curv and isinstance(curv, dict)): - return - - curv["size"] = inputv.get("size") - - curv["snapshot_id"] = inputv.get("snapshot_id") - - curv["volume_type"] = inputv.get("volume_type") - - -def set_readonly_options(opts, states): - opts["config_drive"] = states.get("config_drive") - - opts["created"] = states.get("created") - - opts["disk_config_type"] = states.get("disk_config_type") - - opts["host_name"] = states.get("host_name") - - opts["image_name"] = states.get("image_name") - - set_readonly_nics( - opts.get("nics"), states.get("nics")) - - opts["power_state"] = states.get("power_state") - - set_readonly_root_volume( - opts.get("root_volume"), states.get("root_volume")) - - opts["server_alias"] = states.get("server_alias") - - opts["status"] = states.get("status") - - -def set_readonly_nics(inputv, curv): - if not (curv and isinstance(curv, list)): - return - - if not (inputv and isinstance(inputv, list)): - return - - lcv = len(curv) - q = [] - for iv in inputv: - if len(q) == lcv: - break - - cv = None - for j in range(lcv): - if j in q: - continue - - cv = curv[j] - - if iv["ip_address"] != cv["ip_address"]: - continue - - q.append(j) - break - else: - continue - - iv["port_id"] = cv.get("port_id") - - -def set_readonly_root_volume(inputv, curv): - if not (inputv and isinstance(inputv, dict)): - return - - if not (curv and isinstance(curv, dict)): - return - - inputv["device"] = curv.get("device") - - inputv["volume_id"] = curv.get("volume_id") - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["servers"], None) - - -def _build_identity_object(all_opts): - result = dict() - - result["OS-DCF:diskConfig"] = None - - v = navigate_value(all_opts, ["availability_zone"], None) - result["OS-EXT-AZ:availability_zone"] = v - - result["OS-EXT-SRV-ATTR:hostname"] = None - - result["OS-EXT-SRV-ATTR:instance_name"] = None - - v = navigate_value(all_opts, ["user_data"], None) - result["OS-EXT-SRV-ATTR:user_data"] = v - - result["OS-EXT-STS:power_state"] = None - - result["config_drive"] = None - - result["created"] = None - - v = navigate_value(all_opts, ["description"], None) - result["description"] = v - - v = navigate_value(all_opts, ["enterprise_project_id"], None) - result["enterprise_project_id"] = v - - v = expand_list_flavor(all_opts, None) - result["flavor"] = v - - result["id"] = None - - v = expand_list_image(all_opts, None) - result["image"] = v - - v = navigate_value(all_opts, ["ssh_key_name"], None) - result["key_name"] = v - - v = expand_list_metadata(all_opts, None) - result["metadata"] = v - - v = navigate_value(all_opts, ["name"], None) - result["name"] = v - - result["status"] = None - - v = expand_list_tags(all_opts, None) - result["tags"] = v - - return result - - -def expand_list_flavor(d, array_index): - r = dict() - - v = navigate_value(d, ["flavor_name"], array_index) - r["id"] = v - - for v in r.values(): - if v is not None: - return r - return None - - -def expand_list_image(d, array_index): - r = dict() - - v = navigate_value(d, ["image_id"], array_index) - r["id"] = v - - for v in r.values(): - if v is not None: - return r - return None - - -def expand_list_metadata(d, array_index): - r = dict() - - v = navigate_value(d, ["vpc_id"], array_index) - r["vpc_id"] = v - - for v in r.values(): - if v is not None: - return r - return None - - -def expand_list_tags(d, array_index): - v = d.get("server_tags") - if not v: - return None - - return [k + "=" + v1 for k, v1 in v.items()] - - -def fill_list_resp_body(body): - result = dict() - - result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig") - - result["OS-EXT-AZ:availability_zone"] = body.get( - "OS-EXT-AZ:availability_zone") - - result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname") - - result["OS-EXT-SRV-ATTR:instance_name"] = body.get( - "OS-EXT-SRV-ATTR:instance_name") - - result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data") - - result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state") - - result["config_drive"] = body.get("config_drive") - - result["created"] = body.get("created") - - result["description"] = body.get("description") - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - v = fill_list_resp_flavor(body.get("flavor")) - result["flavor"] = v - - result["id"] = body.get("id") - - v = fill_list_resp_image(body.get("image")) - result["image"] = v - - result["key_name"] = body.get("key_name") - - v = fill_list_resp_metadata(body.get("metadata")) - result["metadata"] = v - - result["name"] = body.get("name") - - result["status"] = body.get("status") - - result["tags"] = body.get("tags") - - return result - - -def fill_list_resp_flavor(value): - if not value: - return None - - result = dict() - - result["id"] = value.get("id") - - return result - - -def fill_list_resp_image(value): - if not value: - return None - - result = dict() - - result["id"] = value.get("id") - - return result - - -def fill_list_resp_metadata(value): - if not value: - return None - - result = dict() - - result["vpc_id"] = value.get("vpc_id") - - return result - - -def adjust_list_resp(opts, resp): - adjust_list_api_tags(opts, resp) - - -def adjust_list_api_tags(parent_input, parent_cur): - iv = parent_input.get("tags") - if not (iv and isinstance(iv, list)): - return - - cv = parent_cur.get("tags") - if not (cv and isinstance(cv, list)): - return - - result = [] - for iiv in iv: - if iiv not in cv: - break - - result.append(iiv) - - j = cv.index(iiv) - cv[j] = cv[-1] - cv.pop() - - if cv: - result.extend(cv) - parent_cur["tags"] = result - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/huawei/hwc_evs_disk.py b/plugins/modules/cloud/huawei/hwc_evs_disk.py deleted file mode 100644 index 4aec1b94db..0000000000 --- a/plugins/modules/cloud/huawei/hwc_evs_disk.py +++ /dev/null @@ -1,1210 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_evs_disk -description: - - block storage management. -short_description: Creates a resource of Evs/Disk in Huawei Cloud -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huaweicloud Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '30m' - update: - description: - - The timeouts for update operation. - type: str - default: '30m' - delete: - description: - - The timeouts for delete operation. - type: str - default: '30m' - availability_zone: - description: - - Specifies the AZ where you want to create the disk. - type: str - required: true - name: - description: - - Specifies the disk name. The value can contain a maximum of 255 - bytes. - type: str - required: true - volume_type: - description: - - Specifies the disk type. Currently, the value can be SSD, SAS, or - SATA. - - SSD specifies the ultra-high I/O disk type. - - SAS specifies the high I/O disk type. - - SATA specifies the common I/O disk type. - - If the specified disk type is not available in the AZ, the - disk will fail to create. If the EVS disk is created from a - snapshot, the volume_type field must be the same as that of the - snapshot's source disk. - type: str - required: true - backup_id: - description: - - Specifies the ID of the backup that can be used to create a disk. - This parameter is mandatory when you use a backup to create the - disk. - type: str - required: false - description: - description: - - Specifies the disk description. The value can contain a maximum - of 255 bytes. - type: str - required: false - enable_full_clone: - description: - - If the disk is created from a snapshot and linked cloning needs - to be used, set this parameter to True. - type: bool - required: false - enable_scsi: - description: - - If this parameter is set to True, the disk device type will be - SCSI, which allows ECS OSs to directly access underlying storage - media. SCSI reservation command is supported. If this parameter - is set to False, the disk device type will be VBD, which supports - only simple SCSI read/write commands. - - If parameter enable_share is set to True and this parameter - is not specified, shared SCSI disks are created. SCSI EVS disks - cannot be created from backups, which means that this parameter - cannot be True if backup_id has been specified. - type: bool - required: false - enable_share: - description: - - Specifies whether the disk is shareable. The default value is - False. - type: bool - required: false - encryption_id: - description: - - Specifies the encryption ID. The length of it fixes at 36 bytes. - type: str - required: false - enterprise_project_id: - description: - - Specifies the enterprise project ID. This ID is associated with - the disk during the disk creation. If it is not specified, the - disk is bound to the default enterprise project. - type: str - required: false - image_id: - description: - - Specifies the image ID. If this parameter is specified, the disk - is created from an image. BMS system disks cannot be - created from BMS images. - type: str - required: false - size: - description: - - Specifies the disk size, in GB. Its values are as follows, System - disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This - parameter is mandatory when you create an empty disk or use an - image or a snapshot to create a disk. If you use an image or a - snapshot to create a disk, the disk size must be greater than or - equal to the image or snapshot size. This parameter is optional - when you use a backup to create a disk. If this parameter is not - specified, the disk size is equal to the backup size. - type: int - required: false - snapshot_id: - description: - - Specifies the snapshot ID. If this parameter is specified, the - disk is created from a snapshot. - type: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# test create disk -- name: Create a disk - community.general.hwc_evs_disk: - availability_zone: "cn-north-1a" - name: "ansible_evs_disk_test" - volume_type: "SATA" - size: 10 -''' - -RETURN = ''' - availability_zone: - description: - - Specifies the AZ where you want to create the disk. - type: str - returned: success - name: - description: - - Specifies the disk name. The value can contain a maximum of 255 - bytes. - type: str - returned: success - volume_type: - description: - - Specifies the disk type. Currently, the value can be SSD, SAS, or - SATA. - - SSD specifies the ultra-high I/O disk type. - - SAS specifies the high I/O disk type. - - SATA specifies the common I/O disk type. - - If the specified disk type is not available in the AZ, the - disk will fail to create. If the EVS disk is created from a - snapshot, the volume_type field must be the same as that of the - snapshot's source disk. - type: str - returned: success - backup_id: - description: - - Specifies the ID of the backup that can be used to create a disk. - This parameter is mandatory when you use a backup to create the - disk. - type: str - returned: success - description: - description: - - Specifies the disk description. The value can contain a maximum - of 255 bytes. - type: str - returned: success - enable_full_clone: - description: - - If the disk is created from a snapshot and linked cloning needs - to be used, set this parameter to True. - type: bool - returned: success - enable_scsi: - description: - - If this parameter is set to True, the disk device type will be - SCSI, which allows ECS OSs to directly access underlying storage - media. SCSI reservation command is supported. If this parameter - is set to False, the disk device type will be VBD, which supports - only simple SCSI read/write commands. - - If parameter enable_share is set to True and this parameter - is not specified, shared SCSI disks are created. SCSI EVS disks - cannot be created from backups, which means that this parameter - cannot be True if backup_id has been specified. - type: bool - returned: success - enable_share: - description: - - Specifies whether the disk is shareable. The default value is - False. - type: bool - returned: success - encryption_id: - description: - - Specifies the encryption ID. The length of it fixes at 36 bytes. - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. This ID is associated with - the disk during the disk creation. If it is not specified, the - disk is bound to the default enterprise project. - type: str - returned: success - image_id: - description: - - Specifies the image ID. If this parameter is specified, the disk - is created from an image. BMS system disks cannot be - created from BMS images. - type: str - returned: success - size: - description: - - Specifies the disk size, in GB. Its values are as follows, System - disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This - parameter is mandatory when you create an empty disk or use an - image or a snapshot to create a disk. If you use an image or a - snapshot to create a disk, the disk size must be greater than or - equal to the image or snapshot size. This parameter is optional - when you use a backup to create a disk. If this parameter is not - specified, the disk size is equal to the backup size. - type: int - returned: success - snapshot_id: - description: - - Specifies the snapshot ID. If this parameter is specified, the - disk is created from a snapshot. - type: str - returned: success - attachments: - description: - - Specifies the disk attachment information. - type: complex - returned: success - contains: - attached_at: - description: - - Specifies the time when the disk was attached. Time - format is 'UTC YYYY-MM-DDTHH:MM:SS'. - type: str - returned: success - attachment_id: - description: - - Specifies the ID of the attachment information. - type: str - returned: success - device: - description: - - Specifies the device name. - type: str - returned: success - server_id: - description: - - Specifies the ID of the server to which the disk is - attached. - type: str - returned: success - backup_policy_id: - description: - - Specifies the backup policy ID. - type: str - returned: success - created_at: - description: - - Specifies the time when the disk was created. Time format is 'UTC - YYYY-MM-DDTHH:MM:SS'. - type: str - returned: success - is_bootable: - description: - - Specifies whether the disk is bootable. - type: bool - returned: success - is_readonly: - description: - - Specifies whether the disk is read-only or read/write. True - indicates that the disk is read-only. False indicates that the - disk is read/write. - type: bool - returned: success - source_volume_id: - description: - - Specifies the source disk ID. This parameter has a value if the - disk is created from a source disk. - type: str - returned: success - status: - description: - - Specifies the disk status. - type: str - returned: success - tags: - description: - - Specifies the disk tags. - type: dict - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value, wait_to_finish) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='30m', type='str'), - update=dict(default='30m', type='str'), - delete=dict(default='30m', type='str'), - ), default=dict()), - availability_zone=dict(type='str', required=True), - name=dict(type='str', required=True), - volume_type=dict(type='str', required=True), - backup_id=dict(type='str'), - description=dict(type='str'), - enable_full_clone=dict(type='bool'), - enable_scsi=dict(type='bool'), - enable_share=dict(type='bool'), - encryption_id=dict(type='str'), - enterprise_project_id=dict(type='str'), - image_id=dict(type='str'), - size=dict(type='int'), - snapshot_id=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "evs") - - try: - _init(config) - is_exist = module.params.get('id') - - result = None - changed = False - if module.params['state'] == 'present': - if not is_exist: - if not module.check_mode: - create(config) - changed = True - - inputv = user_input_parameters(module) - resp, array_index = read_resource(config) - result = build_state(inputv, resp, array_index) - set_readonly_options(inputv, result) - if are_different_dicts(inputv, result): - if not module.check_mode: - update(config, inputv, result) - - inputv = user_input_parameters(module) - resp, array_index = read_resource(config) - result = build_state(inputv, resp, array_index) - set_readonly_options(inputv, result) - if are_different_dicts(inputv, result): - raise Exception("Update resource failed, " - "some attributes are not updated") - - changed = True - - result['id'] = module.params.get('id') - else: - result = dict() - if is_exist: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def _init(config): - module = config.module - if module.params.get('id'): - return - - v = search_resource(config) - n = len(v) - if n > 1: - raise Exception("find more than one resources(%s)" % ", ".join([ - navigate_value(i, ["id"]) - for i in v - ])) - - if n == 1: - module.params['id'] = navigate_value(v[0], ["id"]) - - -def user_input_parameters(module): - return { - "availability_zone": module.params.get("availability_zone"), - "backup_id": module.params.get("backup_id"), - "description": module.params.get("description"), - "enable_full_clone": module.params.get("enable_full_clone"), - "enable_scsi": module.params.get("enable_scsi"), - "enable_share": module.params.get("enable_share"), - "encryption_id": module.params.get("encryption_id"), - "enterprise_project_id": module.params.get("enterprise_project_id"), - "image_id": module.params.get("image_id"), - "name": module.params.get("name"), - "size": module.params.get("size"), - "snapshot_id": module.params.get("snapshot_id"), - "volume_type": module.params.get("volume_type"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "volumev3", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - opts = user_input_parameters(module) - opts["ansible_module"] = module - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - - client1 = config.client(get_region(module), "volume", "project") - client1.endpoint = client1.endpoint.replace("/v2/", "/v1/") - obj = async_wait(config, r, client1, timeout) - module.params['id'] = navigate_value(obj, ["entities", "volume_id"]) - - -def update(config, expect_state, current_state): - module = config.module - expect_state["current_state"] = current_state - current_state["current_state"] = current_state - client = config.client(get_region(module), "evs", "project") - timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) - - params = build_update_parameters(expect_state) - params1 = build_update_parameters(current_state) - if params and are_different_dicts(params, params1): - send_update_request(module, params, client) - - params = build_extend_disk_parameters(expect_state) - params1 = build_extend_disk_parameters(current_state) - if params and are_different_dicts(params, params1): - client1 = config.client(get_region(module), "evsv2.1", "project") - r = send_extend_disk_request(module, params, client1) - - client1 = config.client(get_region(module), "volume", "project") - client1.endpoint = client1.endpoint.replace("/v2/", "/v1/") - async_wait(config, r, client1, timeout) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "evs", "project") - timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) - - r = send_delete_request(module, None, client) - - client = config.client(get_region(module), "volume", "project") - client.endpoint = client.endpoint.replace("/v2/", "/v1/") - async_wait(config, r, client, timeout) - - -def read_resource(config): - module = config.module - client = config.client(get_region(module), "volumev3", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - return res, None - - -def build_state(opts, response, array_index): - states = flatten_options(response, array_index) - set_unreadable_options(opts, states) - return states - - -def _build_query_link(opts): - query_params = [] - - v = navigate_value(opts, ["enable_share"]) - if v or v in [False, 0]: - query_params.append( - "multiattach=" + (str(v) if v else str(v).lower())) - - v = navigate_value(opts, ["name"]) - if v or v in [False, 0]: - query_params.append( - "name=" + (str(v) if v else str(v).lower())) - - v = navigate_value(opts, ["availability_zone"]) - if v or v in [False, 0]: - query_params.append( - "availability_zone=" + (str(v) if v else str(v).lower())) - - query_link = "?limit=10&offset={start}" - if query_params: - query_link += "&" + "&".join(query_params) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "volumev3", "project") - opts = user_input_parameters(module) - name = module.params.get("name") - query_link = _build_query_link(opts) - link = "os-vendor-volumes/detail" + query_link - - result = [] - p = {'start': 0} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - if name == item.get("name"): - result.append(item) - - if len(result) > 1: - break - - p['start'] += len(r) - - return result - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["availability_zone"], None) - if not is_empty_value(v): - params["availability_zone"] = v - - v = navigate_value(opts, ["backup_id"], None) - if not is_empty_value(v): - params["backup_id"] = v - - v = navigate_value(opts, ["description"], None) - if not is_empty_value(v): - params["description"] = v - - v = navigate_value(opts, ["enterprise_project_id"], None) - if not is_empty_value(v): - params["enterprise_project_id"] = v - - v = navigate_value(opts, ["image_id"], None) - if not is_empty_value(v): - params["imageRef"] = v - - v = expand_create_metadata(opts, None) - if not is_empty_value(v): - params["metadata"] = v - - v = navigate_value(opts, ["enable_share"], None) - if not is_empty_value(v): - params["multiattach"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = navigate_value(opts, ["size"], None) - if not is_empty_value(v): - params["size"] = v - - v = navigate_value(opts, ["snapshot_id"], None) - if not is_empty_value(v): - params["snapshot_id"] = v - - v = navigate_value(opts, ["volume_type"], None) - if not is_empty_value(v): - params["volume_type"] = v - - if not params: - return params - - params = {"volume": params} - - return params - - -def expand_create_metadata(d, array_index): - r = dict() - - v = navigate_value(d, ["encryption_id"], array_index) - if not is_empty_value(v): - r["__system__cmkid"] = v - - v = expand_create_metadata_system_encrypted(d, array_index) - if not is_empty_value(v): - r["__system__encrypted"] = v - - v = expand_create_metadata_full_clone(d, array_index) - if not is_empty_value(v): - r["full_clone"] = v - - v = expand_create_metadata_hw_passthrough(d, array_index) - if not is_empty_value(v): - r["hw:passthrough"] = v - - return r - - -def expand_create_metadata_system_encrypted(d, array_index): - v = navigate_value(d, ["encryption_id"], array_index) - return "1" if v else "" - - -def expand_create_metadata_full_clone(d, array_index): - v = navigate_value(d, ["enable_full_clone"], array_index) - return "0" if v else "" - - -def expand_create_metadata_hw_passthrough(d, array_index): - v = navigate_value(d, ["enable_scsi"], array_index) - if v is None: - return v - return "true" if v else "false" - - -def send_create_request(module, params, client): - url = "cloudvolumes" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_evs_disk): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def build_update_parameters(opts): - params = dict() - - v = navigate_value(opts, ["description"], None) - if v is not None: - params["description"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - if not params: - return params - - params = {"volume": params} - - return params - - -def send_update_request(module, params, client): - url = build_path(module, "cloudvolumes/{id}") - - try: - r = client.put(url, params) - except HwcClientException as ex: - msg = ("module(hwc_evs_disk): error running " - "api(update), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_request(module, params, client): - url = build_path(module, "cloudvolumes/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_evs_disk): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def build_extend_disk_parameters(opts): - params = dict() - - v = expand_extend_disk_os_extend(opts, None) - if not is_empty_value(v): - params["os-extend"] = v - - return params - - -def expand_extend_disk_os_extend(d, array_index): - r = dict() - - v = navigate_value(d, ["size"], array_index) - if not is_empty_value(v): - r["new_size"] = v - - return r - - -def send_extend_disk_request(module, params, client): - url = build_path(module, "cloudvolumes/{id}/action") - - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_evs_disk): error running " - "api(extend_disk), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait(config, result, client, timeout): - module = config.module - - path_parameters = { - "job_id": ["job_id"], - } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) - - url = build_path(module, "jobs/{job_id}", data) - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["SUCCESS"], - ["RUNNING", "INIT"], - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_evs_disk): error " - "waiting to be done, error= %s" % str(ex)) - - -def send_read_request(module, client): - url = build_path(module, "os-vendor-volumes/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_evs_disk): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["volume"], None) - - -def fill_read_resp_body(body): - result = dict() - - v = fill_read_resp_attachments(body.get("attachments")) - result["attachments"] = v - - result["availability_zone"] = body.get("availability_zone") - - result["bootable"] = body.get("bootable") - - result["created_at"] = body.get("created_at") - - result["description"] = body.get("description") - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - result["id"] = body.get("id") - - v = fill_read_resp_metadata(body.get("metadata")) - result["metadata"] = v - - result["multiattach"] = body.get("multiattach") - - result["name"] = body.get("name") - - result["size"] = body.get("size") - - result["snapshot_id"] = body.get("snapshot_id") - - result["source_volid"] = body.get("source_volid") - - result["status"] = body.get("status") - - result["tags"] = body.get("tags") - - v = fill_read_resp_volume_image_metadata(body.get("volume_image_metadata")) - result["volume_image_metadata"] = v - - result["volume_type"] = body.get("volume_type") - - return result - - -def fill_read_resp_attachments(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["attached_at"] = item.get("attached_at") - - val["attachment_id"] = item.get("attachment_id") - - val["device"] = item.get("device") - - val["server_id"] = item.get("server_id") - - result.append(val) - - return result - - -def fill_read_resp_metadata(value): - if not value: - return None - - result = dict() - - result["__system__cmkid"] = value.get("__system__cmkid") - - result["attached_mode"] = value.get("attached_mode") - - result["full_clone"] = value.get("full_clone") - - result["hw:passthrough"] = value.get("hw:passthrough") - - result["policy"] = value.get("policy") - - result["readonly"] = value.get("readonly") - - return result - - -def fill_read_resp_volume_image_metadata(value): - if not value: - return None - - result = dict() - - result["id"] = value.get("id") - - return result - - -def flatten_options(response, array_index): - r = dict() - - v = flatten_attachments(response, array_index) - r["attachments"] = v - - v = navigate_value(response, ["read", "availability_zone"], array_index) - r["availability_zone"] = v - - v = navigate_value(response, ["read", "metadata", "policy"], array_index) - r["backup_policy_id"] = v - - v = navigate_value(response, ["read", "created_at"], array_index) - r["created_at"] = v - - v = navigate_value(response, ["read", "description"], array_index) - r["description"] = v - - v = flatten_enable_full_clone(response, array_index) - r["enable_full_clone"] = v - - v = flatten_enable_scsi(response, array_index) - r["enable_scsi"] = v - - v = navigate_value(response, ["read", "multiattach"], array_index) - r["enable_share"] = v - - v = navigate_value( - response, ["read", "metadata", "__system__cmkid"], array_index) - r["encryption_id"] = v - - v = navigate_value( - response, ["read", "enterprise_project_id"], array_index) - r["enterprise_project_id"] = v - - v = navigate_value( - response, ["read", "volume_image_metadata", "id"], array_index) - r["image_id"] = v - - v = flatten_is_bootable(response, array_index) - r["is_bootable"] = v - - v = flatten_is_readonly(response, array_index) - r["is_readonly"] = v - - v = navigate_value(response, ["read", "name"], array_index) - r["name"] = v - - v = navigate_value(response, ["read", "size"], array_index) - r["size"] = v - - v = navigate_value(response, ["read", "snapshot_id"], array_index) - r["snapshot_id"] = v - - v = navigate_value(response, ["read", "source_volid"], array_index) - r["source_volume_id"] = v - - v = navigate_value(response, ["read", "status"], array_index) - r["status"] = v - - v = navigate_value(response, ["read", "tags"], array_index) - r["tags"] = v - - v = navigate_value(response, ["read", "volume_type"], array_index) - r["volume_type"] = v - - return r - - -def flatten_attachments(d, array_index): - v = navigate_value(d, ["read", "attachments"], - array_index) - if not v: - return None - n = len(v) - result = [] - - new_ai = dict() - if array_index: - new_ai.update(array_index) - - for i in range(n): - new_ai["read.attachments"] = i - - val = dict() - - v = navigate_value(d, ["read", "attachments", "attached_at"], new_ai) - val["attached_at"] = v - - v = navigate_value(d, ["read", "attachments", "attachment_id"], new_ai) - val["attachment_id"] = v - - v = navigate_value(d, ["read", "attachments", "device"], new_ai) - val["device"] = v - - v = navigate_value(d, ["read", "attachments", "server_id"], new_ai) - val["server_id"] = v - - for v in val.values(): - if v is not None: - result.append(val) - break - - return result if result else None - - -def flatten_enable_full_clone(d, array_index): - v = navigate_value(d, ["read", "metadata", "full_clone"], - array_index) - if v is None: - return v - return True if v == "0" else False - - -def flatten_enable_scsi(d, array_index): - v = navigate_value(d, ["read", "metadata", "hw:passthrough"], - array_index) - if v is None: - return v - return True if v in ["true", "True"] else False - - -def flatten_is_bootable(d, array_index): - v = navigate_value(d, ["read", "bootable"], array_index) - if v is None: - return v - return True if v in ["true", "True"] else False - - -def flatten_is_readonly(d, array_index): - v = navigate_value(d, ["read", "metadata", "readonly"], - array_index) - if v is None: - return v - return True if v in ["true", "True"] else False - - -def set_unreadable_options(opts, states): - states["backup_id"] = opts.get("backup_id") - - -def set_readonly_options(opts, states): - opts["attachments"] = states.get("attachments") - - opts["backup_policy_id"] = states.get("backup_policy_id") - - opts["created_at"] = states.get("created_at") - - opts["is_bootable"] = states.get("is_bootable") - - opts["is_readonly"] = states.get("is_readonly") - - opts["source_volume_id"] = states.get("source_volume_id") - - opts["status"] = states.get("status") - - opts["tags"] = states.get("tags") - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_evs_disk): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["volumes"], None) - - -def expand_list_metadata(d, array_index): - r = dict() - - v = navigate_value(d, ["encryption_id"], array_index) - r["__system__cmkid"] = v - - r["attached_mode"] = None - - v = navigate_value(d, ["enable_full_clone"], array_index) - r["full_clone"] = v - - v = navigate_value(d, ["enable_scsi"], array_index) - r["hw:passthrough"] = v - - r["policy"] = None - - r["readonly"] = None - - for v in r.values(): - if v is not None: - return r - return None - - -def expand_list_volume_image_metadata(d, array_index): - r = dict() - - v = navigate_value(d, ["image_id"], array_index) - r["id"] = v - - for v in r.values(): - if v is not None: - return r - return None - - -def fill_list_resp_body(body): - result = dict() - - v = fill_list_resp_attachments(body.get("attachments")) - result["attachments"] = v - - result["availability_zone"] = body.get("availability_zone") - - result["bootable"] = body.get("bootable") - - result["created_at"] = body.get("created_at") - - result["description"] = body.get("description") - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - result["id"] = body.get("id") - - v = fill_list_resp_metadata(body.get("metadata")) - result["metadata"] = v - - result["multiattach"] = body.get("multiattach") - - result["name"] = body.get("name") - - result["size"] = body.get("size") - - result["snapshot_id"] = body.get("snapshot_id") - - result["source_volid"] = body.get("source_volid") - - result["status"] = body.get("status") - - result["tags"] = body.get("tags") - - v = fill_list_resp_volume_image_metadata(body.get("volume_image_metadata")) - result["volume_image_metadata"] = v - - result["volume_type"] = body.get("volume_type") - - return result - - -def fill_list_resp_attachments(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["attached_at"] = item.get("attached_at") - - val["attachment_id"] = item.get("attachment_id") - - val["device"] = item.get("device") - - val["server_id"] = item.get("server_id") - - result.append(val) - - return result - - -def fill_list_resp_metadata(value): - if not value: - return None - - result = dict() - - result["__system__cmkid"] = value.get("__system__cmkid") - - result["attached_mode"] = value.get("attached_mode") - - result["full_clone"] = value.get("full_clone") - - result["hw:passthrough"] = value.get("hw:passthrough") - - result["policy"] = value.get("policy") - - result["readonly"] = value.get("readonly") - - return result - - -def fill_list_resp_volume_image_metadata(value): - if not value: - return None - - result = dict() - - result["id"] = value.get("id") - - return result - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/huawei/hwc_network_vpc.py b/plugins/modules/cloud/huawei/hwc_network_vpc.py deleted file mode 100644 index f53369adcd..0000000000 --- a/plugins/modules/cloud/huawei/hwc_network_vpc.py +++ /dev/null @@ -1,493 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2018 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_network_vpc -description: - - Represents an vpc resource. -short_description: Creates a Huawei Cloud VPC -author: Huawei Inc. (@huaweicloud) -requirements: - - requests >= 2.18.4 - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in vpc. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeout for create operation. - type: str - default: '15m' - update: - description: - - The timeout for update operation. - type: str - default: '15m' - delete: - description: - - The timeout for delete operation. - type: str - default: '15m' - name: - description: - - The name of vpc. - type: str - required: true - cidr: - description: - - The range of available subnets in the vpc. - type: str - required: true -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -- name: Create a vpc - community.general.hwc_network_vpc: - identity_endpoint: "{{ identity_endpoint }}" - user: "{{ user }}" - password: "{{ password }}" - domain: "{{ domain }}" - project: "{{ project }}" - region: "{{ region }}" - name: "vpc_1" - cidr: "192.168.100.0/24" - state: present -''' - -RETURN = ''' - id: - description: - - the id of vpc. - type: str - returned: success - name: - description: - - the name of vpc. - type: str - returned: success - cidr: - description: - - the range of available subnets in the vpc. - type: str - returned: success - status: - description: - - the status of vpc. - type: str - returned: success - routes: - description: - - the route information. - type: complex - returned: success - contains: - destination: - description: - - the destination network segment of a route. - type: str - returned: success - next_hop: - description: - - the next hop of a route. If the route type is peering, - it will provide VPC peering connection ID. - type: str - returned: success - enable_shared_snat: - description: - - show whether the shared snat is enabled. - type: bool - returned: success -''' - -############################################################################### -# Imports -############################################################################### - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException, - HwcClientException404, HwcModule, - are_different_dicts, is_empty_value, - wait_to_finish, get_region, - build_path, navigate_value) -import re - -############################################################################### -# Main -############################################################################### - - -def main(): - """Main function""" - - module = HwcModule( - argument_spec=dict( - state=dict( - default='present', choices=['present', 'absent'], type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='15m', type='str'), - update=dict(default='15m', type='str'), - delete=dict(default='15m', type='str'), - ), default=dict()), - name=dict(required=True, type='str'), - cidr=dict(required=True, type='str') - ), - supports_check_mode=True, - ) - config = Config(module, 'vpc') - - state = module.params['state'] - - if (not module.params.get("id")) and module.params.get("name"): - module.params['id'] = get_id_by_name(config) - - fetch = None - link = self_link(module) - # the link will include Nones if required format parameters are missed - if not re.search('/None/|/None$', link): - client = config.client(get_region(module), "vpc", "project") - fetch = fetch_resource(module, client, link) - if fetch: - fetch = fetch.get('vpc') - changed = False - - if fetch: - if state == 'present': - expect = _get_editable_properties(module) - current_state = response_to_hash(module, fetch) - current = {"cidr": current_state["cidr"]} - if are_different_dicts(expect, current): - if not module.check_mode: - fetch = update(config, self_link(module)) - fetch = response_to_hash(module, fetch.get('vpc')) - changed = True - else: - fetch = current_state - else: - if not module.check_mode: - delete(config, self_link(module)) - fetch = {} - changed = True - else: - if state == 'present': - if not module.check_mode: - fetch = create(config, "vpcs") - fetch = response_to_hash(module, fetch.get('vpc')) - changed = True - else: - fetch = {} - - fetch.update({'changed': changed}) - - module.exit_json(**fetch) - - -def create(config, link): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - r = None - try: - r = client.post(link, resource_to_create(module)) - except HwcClientException as ex: - msg = ("module(hwc_network_vpc): error creating " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - wait_done = wait_for_operation(config, 'create', r) - v = "" - try: - v = navigate_value(wait_done, ['vpc', 'id']) - except Exception as ex: - module.fail_json(msg=str(ex)) - - url = build_path(module, 'vpcs/{op_id}', {'op_id': v}) - return fetch_resource(module, client, url) - - -def update(config, link): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - r = None - try: - r = client.put(link, resource_to_update(module)) - except HwcClientException as ex: - msg = ("module(hwc_network_vpc): error updating " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - wait_for_operation(config, 'update', r) - - return fetch_resource(module, client, link) - - -def delete(config, link): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - try: - client.delete(link) - except HwcClientException as ex: - msg = ("module(hwc_network_vpc): error deleting " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - wait_for_delete(module, client, link) - - -def fetch_resource(module, client, link): - try: - return client.get(link) - except HwcClientException as ex: - msg = ("module(hwc_network_vpc): error fetching " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - -def get_id_by_name(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - name = module.params.get("name") - link = "vpcs" - query_link = "?marker={marker}&limit=10" - link += query_link - not_format_keys = re.findall("={marker}", link) - none_values = re.findall("=None", link) - - if not (not_format_keys or none_values): - r = None - try: - r = client.get(link) - except Exception: - pass - if r is None: - return None - r = r.get('vpcs', []) - ids = [ - i.get('id') for i in r if i.get('name', '') == name - ] - if not ids: - return None - elif len(ids) == 1: - return ids[0] - else: - module.fail_json( - msg="Multiple resources with same name are found.") - elif none_values: - module.fail_json( - msg="Can not find id by name because url includes None.") - else: - p = {'marker': ''} - ids = set() - while True: - r = None - try: - r = client.get(link.format(**p)) - except Exception: - pass - if r is None: - break - r = r.get('vpcs', []) - if r == []: - break - for i in r: - if i.get('name') == name: - ids.add(i.get('id')) - if len(ids) >= 2: - module.fail_json( - msg="Multiple resources with same name are found.") - - p['marker'] = r[-1].get('id') - - return ids.pop() if ids else None - - -def self_link(module): - return build_path(module, "vpcs/{id}") - - -def resource_to_create(module): - params = dict() - - v = module.params.get('cidr') - if not is_empty_value(v): - params["cidr"] = v - - v = module.params.get('name') - if not is_empty_value(v): - params["name"] = v - - if not params: - return params - - params = {"vpc": params} - - return params - - -def resource_to_update(module): - params = dict() - - v = module.params.get('cidr') - if not is_empty_value(v): - params["cidr"] = v - - if not params: - return params - - params = {"vpc": params} - - return params - - -def _get_editable_properties(module): - return { - "cidr": module.params.get("cidr"), - } - - -def response_to_hash(module, response): - """ Remove unnecessary properties from the response. - This is for doing comparisons with Ansible's current parameters. - """ - return { - u'id': response.get(u'id'), - u'name': response.get(u'name'), - u'cidr': response.get(u'cidr'), - u'status': response.get(u'status'), - u'routes': VpcRoutesArray( - response.get(u'routes', []), module).from_response(), - u'enable_shared_snat': response.get(u'enable_shared_snat') - } - - -def wait_for_operation(config, op_type, op_result): - module = config.module - op_id = "" - try: - op_id = navigate_value(op_result, ['vpc', 'id']) - except Exception as ex: - module.fail_json(msg=str(ex)) - - url = build_path(module, "vpcs/{op_id}", {'op_id': op_id}) - timeout = 60 * int(module.params['timeouts'][op_type].rstrip('m')) - states = { - 'create': { - 'allowed': ['CREATING', 'DONW', 'OK'], - 'complete': ['OK'], - }, - 'update': { - 'allowed': ['PENDING_UPDATE', 'DONW', 'OK'], - 'complete': ['OK'], - } - } - - return wait_for_completion(url, timeout, states[op_type]['allowed'], - states[op_type]['complete'], config) - - -def wait_for_completion(op_uri, timeout, allowed_states, - complete_states, config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - def _refresh_status(): - r = None - try: - r = fetch_resource(module, client, op_uri) - except Exception: - return None, "" - - status = "" - try: - status = navigate_value(r, ['vpc', 'status']) - except Exception: - return None, "" - - return r, status - - try: - return wait_to_finish(complete_states, allowed_states, - _refresh_status, timeout) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def wait_for_delete(module, client, link): - - def _refresh_status(): - try: - client.get(link) - except HwcClientException404: - return True, "Done" - - except Exception: - return None, "" - - return True, "Pending" - - timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) - try: - return wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -class VpcRoutesArray(object): - def __init__(self, request, module): - self.module = module - if request: - self.request = request - else: - self.request = [] - - def to_request(self): - items = [] - for item in self.request: - items.append(self._request_for_item(item)) - return items - - def from_response(self): - items = [] - for item in self.request: - items.append(self._response_from_item(item)) - return items - - def _request_for_item(self, item): - return { - u'destination': item.get('destination'), - u'nexthop': item.get('next_hop') - } - - def _response_from_item(self, item): - return { - u'destination': item.get(u'destination'), - u'next_hop': item.get(u'nexthop') - } - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/huawei/hwc_smn_topic.py b/plugins/modules/cloud/huawei/hwc_smn_topic.py deleted file mode 100644 index f7fb4faea4..0000000000 --- a/plugins/modules/cloud/huawei/hwc_smn_topic.py +++ /dev/null @@ -1,338 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_smn_topic -description: - - Represents a SMN notification topic resource. -short_description: Creates a resource of SMNTopic in Huaweicloud Cloud -author: Huawei Inc. (@huaweicloud) -requirements: - - requests >= 2.18.4 - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huaweicloud Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - display_name: - description: - - Topic display name, which is presented as the name of the email - sender in an email message. The topic display name contains a - maximum of 192 bytes. - type: str - required: false - name: - description: - - Name of the topic to be created. The topic name is a string of 1 - to 256 characters. It must contain upper- or lower-case letters, - digits, hyphens (-), and underscores C(_), and must start with a - letter or digit. - type: str - required: true -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -- name: Create a smn topic - community.general.hwc_smn_topic: - identity_endpoint: "{{ identity_endpoint }}" - user_name: "{{ user_name }}" - password: "{{ password }}" - domain_name: "{{ domain_name }}" - project_name: "{{ project_name }}" - region: "{{ region }}" - name: "ansible_smn_topic_test" - state: present -''' - -RETURN = ''' -create_time: - description: - - Time when the topic was created. - returned: success - type: str -display_name: - description: - - Topic display name, which is presented as the name of the email - sender in an email message. The topic display name contains a - maximum of 192 bytes. - returned: success - type: str -name: - description: - - Name of the topic to be created. The topic name is a string of 1 - to 256 characters. It must contain upper- or lower-case letters, - digits, hyphens (-), and underscores C(_), and must start with a - letter or digit. - returned: success - type: str -push_policy: - description: - - Message pushing policy. 0 indicates that the message sending - fails and the message is cached in the queue. 1 indicates that - the failed message is discarded. - returned: success - type: int -topic_urn: - description: - - Resource identifier of a topic, which is unique. - returned: success - type: str -update_time: - description: - - Time when the topic was updated. - returned: success - type: str -''' - -############################################################################### -# Imports -############################################################################### - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException, - HwcModule, navigate_value, - are_different_dicts, is_empty_value, - build_path, get_region) -import re - -############################################################################### -# Main -############################################################################### - - -def main(): - """Main function""" - - module = HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - display_name=dict(type='str'), - name=dict(required=True, type='str') - ), - supports_check_mode=True, - ) - - config = Config(module, "smn") - - state = module.params['state'] - - if not module.params.get("id"): - module.params['id'] = get_resource_id(config) - - fetch = None - link = self_link(module) - # the link will include Nones if required format parameters are missed - if not re.search('/None/|/None$', link): - client = config.client(get_region(module), "smn", "project") - fetch = fetch_resource(module, client, link) - changed = False - - if fetch: - if state == 'present': - expect = _get_resource_editable_properties(module) - current_state = response_to_hash(module, fetch) - current = {'display_name': current_state['display_name']} - if are_different_dicts(expect, current): - if not module.check_mode: - fetch = update(config) - fetch = response_to_hash(module, fetch) - changed = True - else: - fetch = current_state - else: - if not module.check_mode: - delete(config) - fetch = {} - changed = True - else: - if state == 'present': - if not module.check_mode: - fetch = create(config) - fetch = response_to_hash(module, fetch) - changed = True - else: - fetch = {} - - fetch.update({'changed': changed}) - - module.exit_json(**fetch) - - -def create(config): - module = config.module - client = config.client(get_region(module), "smn", "project") - - link = "notifications/topics" - r = None - try: - r = client.post(link, create_resource_opts(module)) - except HwcClientException as ex: - msg = ("module(hwc_smn_topic): error creating " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - return get_resource(config, r) - - -def update(config): - module = config.module - client = config.client(get_region(module), "smn", "project") - - link = self_link(module) - try: - client.put(link, update_resource_opts(module)) - except HwcClientException as ex: - msg = ("module(hwc_smn_topic): error updating " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - return fetch_resource(module, client, link) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "smn", "project") - - link = self_link(module) - try: - client.delete(link) - except HwcClientException as ex: - msg = ("module(hwc_smn_topic): error deleting " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - -def fetch_resource(module, client, link): - try: - return client.get(link) - except HwcClientException as ex: - msg = ("module(hwc_smn_topic): error fetching " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - -def get_resource(config, result): - module = config.module - client = config.client(get_region(module), "smn", "project") - - v = "" - try: - v = navigate_value(result, ['topic_urn']) - except Exception as ex: - module.fail_json(msg=str(ex)) - - d = {'topic_urn': v} - url = build_path(module, 'notifications/topics/{topic_urn}', d) - - return fetch_resource(module, client, url) - - -def get_resource_id(config): - module = config.module - client = config.client(get_region(module), "smn", "project") - - link = "notifications/topics" - query_link = "?offset={offset}&limit=10" - link += query_link - - p = {'offset': 0} - v = module.params.get('name') - ids = set() - while True: - r = None - try: - r = client.get(link.format(**p)) - except Exception: - pass - if r is None: - break - r = r.get('topics', []) - if r == []: - break - for i in r: - if i.get('name') == v: - ids.add(i.get('topic_urn')) - if len(ids) >= 2: - module.fail_json(msg="Multiple resources are found") - - p['offset'] += 1 - - return ids.pop() if ids else None - - -def self_link(module): - return build_path(module, "notifications/topics/{id}") - - -def create_resource_opts(module): - params = dict() - - v = module.params.get('display_name') - if not is_empty_value(v): - params["display_name"] = v - - v = module.params.get('name') - if not is_empty_value(v): - params["name"] = v - - return params - - -def update_resource_opts(module): - params = dict() - - v = module.params.get('display_name') - if not is_empty_value(v): - params["display_name"] = v - - return params - - -def _get_resource_editable_properties(module): - return { - "display_name": module.params.get("display_name"), - } - - -def response_to_hash(module, response): - """Remove unnecessary properties from the response. - This is for doing comparisons with Ansible's current parameters. - """ - return { - u'create_time': response.get(u'create_time'), - u'display_name': response.get(u'display_name'), - u'name': response.get(u'name'), - u'push_policy': _push_policy_convert_from_response( - response.get('push_policy')), - u'topic_urn': response.get(u'topic_urn'), - u'update_time': response.get(u'update_time') - } - - -def _push_policy_convert_from_response(value): - return { - 0: "the message sending fails and is cached in the queue", - 1: "the failed message is discarded", - }.get(int(value)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_eip.py b/plugins/modules/cloud/huawei/hwc_vpc_eip.py deleted file mode 100644 index b53395f87a..0000000000 --- a/plugins/modules/cloud/huawei/hwc_vpc_eip.py +++ /dev/null @@ -1,877 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_eip -description: - - elastic ip management. -short_description: Creates a resource of Vpc/EIP in Huawei Cloud -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '5m' - update: - description: - - The timeouts for update operation. - type: str - default: '5m' - type: - description: - - Specifies the EIP type. - type: str - required: true - dedicated_bandwidth: - description: - - Specifies the dedicated bandwidth object. - type: dict - required: false - suboptions: - charge_mode: - description: - - Specifies whether the bandwidth is billed by traffic or - by bandwidth size. The value can be bandwidth or traffic. - If this parameter is left blank or is null character - string, default value bandwidth is used. For IPv6 - addresses, the default parameter value is bandwidth - outside China and is traffic in China. - type: str - required: true - name: - description: - - Specifies the bandwidth name. The value is a string of 1 - to 64 characters that can contain letters, digits, - underscores C(_), hyphens (-), and periods (.). - type: str - required: true - size: - description: - - Specifies the bandwidth size. The value ranges from 1 - Mbit/s to 2000 Mbit/s by default. (The specific range may - vary depending on the configuration in each region. You - can see the bandwidth range of each region on the - management console.) The minimum unit for bandwidth - adjustment varies depending on the bandwidth range. The - details are as follows. - - The minimum unit is 1 Mbit/s if the allowed bandwidth - size ranges from 0 to 300 Mbit/s (with 300 Mbit/s - included). - - The minimum unit is 50 Mbit/s if the allowed bandwidth - size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s - included). - - The minimum unit is 500 Mbit/s if the allowed bandwidth - size is greater than 1000 Mbit/s. - type: int - required: true - enterprise_project_id: - description: - - Specifies the enterprise project ID. - type: str - required: false - ip_version: - description: - - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this - parameter is left blank, an IPv4 address will be assigned. - type: int - required: false - ipv4_address: - description: - - Specifies the obtained IPv4 EIP. The system automatically assigns - an EIP if you do not specify it. - type: str - required: false - port_id: - description: - - Specifies the port ID. This parameter is returned only when a - private IP address is bound with the EIP. - type: str - required: false - shared_bandwidth_id: - description: - - Specifies the ID of shared bandwidth. - type: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create an eip and bind it to a port -- name: Create vpc - hwc_network_vpc: - cidr: "192.168.100.0/24" - name: "ansible_network_vpc_test" - register: vpc -- name: Create subnet - hwc_vpc_subnet: - gateway_ip: "192.168.100.32" - name: "ansible_network_subnet_test" - dhcp_enable: True - vpc_id: "{{ vpc.id }}" - cidr: "192.168.100.0/26" - register: subnet -- name: Create a port - hwc_vpc_port: - subnet_id: "{{ subnet.id }}" - ip_address: "192.168.100.33" - register: port -- name: Create an eip and bind it to a port - community.general.hwc_vpc_eip: - type: "5_bgp" - dedicated_bandwidth: - charge_mode: "traffic" - name: "ansible_test_dedicated_bandwidth" - size: 1 - port_id: "{{ port.id }}" -''' - -RETURN = ''' - type: - description: - - Specifies the EIP type. - type: str - returned: success - dedicated_bandwidth: - description: - - Specifies the dedicated bandwidth object. - type: dict - returned: success - contains: - charge_mode: - description: - - Specifies whether the bandwidth is billed by traffic or - by bandwidth size. The value can be bandwidth or traffic. - If this parameter is left blank or is null character - string, default value bandwidth is used. For IPv6 - addresses, the default parameter value is bandwidth - outside China and is traffic in China. - type: str - returned: success - name: - description: - - Specifies the bandwidth name. The value is a string of 1 - to 64 characters that can contain letters, digits, - underscores C(_), hyphens (-), and periods (.). - type: str - returned: success - size: - description: - - Specifies the bandwidth size. The value ranges from 1 - Mbit/s to 2000 Mbit/s by default. (The specific range may - vary depending on the configuration in each region. You - can see the bandwidth range of each region on the - management console.) The minimum unit for bandwidth - adjustment varies depending on the bandwidth range. The - details are as follows:. - - The minimum unit is 1 Mbit/s if the allowed bandwidth - size ranges from 0 to 300 Mbit/s (with 300 Mbit/s - included). - - The minimum unit is 50 Mbit/s if the allowed bandwidth - size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s - included). - - The minimum unit is 500 Mbit/s if the allowed bandwidth - size is greater than 1000 Mbit/s. - type: int - returned: success - id: - description: - - Specifies the ID of dedicated bandwidth. - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. - type: str - returned: success - ip_version: - description: - - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this - parameter is left blank, an IPv4 address will be assigned. - type: int - returned: success - ipv4_address: - description: - - Specifies the obtained IPv4 EIP. The system automatically assigns - an EIP if you do not specify it. - type: str - returned: success - port_id: - description: - - Specifies the port ID. This parameter is returned only when a - private IP address is bound with the EIP. - type: str - returned: success - shared_bandwidth_id: - description: - - Specifies the ID of shared bandwidth. - type: str - returned: success - create_time: - description: - - Specifies the time (UTC time) when the EIP was assigned. - type: str - returned: success - ipv6_address: - description: - - Specifies the obtained IPv6 EIP. - type: str - returned: success - private_ip_address: - description: - - Specifies the private IP address bound with the EIP. This - parameter is returned only when a private IP address is bound - with the EIP. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcClientException404, HwcModule, - are_different_dicts, build_path, get_region, is_empty_value, - navigate_value, wait_to_finish) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='5m', type='str'), - update=dict(default='5m', type='str'), - ), default=dict()), - type=dict(type='str', required=True), - dedicated_bandwidth=dict(type='dict', options=dict( - charge_mode=dict(type='str', required=True), - name=dict(type='str', required=True), - size=dict(type='int', required=True) - )), - enterprise_project_id=dict(type='str'), - ip_version=dict(type='int'), - ipv4_address=dict(type='str'), - port_id=dict(type='str'), - shared_bandwidth_id=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params['id']: - resource = True - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - create(config) - changed = True - - current = read_resource(config, exclude_output=True) - expect = user_input_parameters(module) - if are_different_dicts(expect, current): - if not module.check_mode: - update(config) - changed = True - - result = read_resource(config) - result['id'] = module.params.get('id') - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "dedicated_bandwidth": module.params.get("dedicated_bandwidth"), - "enterprise_project_id": module.params.get("enterprise_project_id"), - "ip_version": module.params.get("ip_version"), - "ipv4_address": module.params.get("ipv4_address"), - "port_id": module.params.get("port_id"), - "shared_bandwidth_id": module.params.get("shared_bandwidth_id"), - "type": module.params.get("type"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - obj = async_wait_create(config, r, client, timeout) - module.params['id'] = navigate_value(obj, ["publicip", "id"]) - - -def update(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) - opts = user_input_parameters(module) - - params = build_update_parameters(opts) - if params: - r = send_update_request(module, params, client) - async_wait_update(config, r, client, timeout) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - if module.params["port_id"]: - module.params["port_id"] = "" - update(config) - - send_delete_request(module, None, client) - - url = build_path(module, "publicips/{id}") - - def _refresh_status(): - try: - client.get(url) - except HwcClientException404: - return True, "Done" - - except Exception: - return None, "" - - return True, "Pending" - - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - try: - wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_eip): error " - "waiting for api(delete) to " - "be done, error= %s" % str(ex)) - - -def read_resource(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - return update_properties(module, res, None, exclude_output) - - -def _build_query_link(opts): - query_params = [] - - v = navigate_value(opts, ["ip_version"]) - if v: - query_params.append("ip_version=" + str(v)) - - v = navigate_value(opts, ["enterprise_project_id"]) - if v: - query_params.append("enterprise_project_id=" + str(v)) - - query_link = "?marker={marker}&limit=10" - if query_params: - query_link += "&" + "&".join(query_params) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "publicips" + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = expand_create_bandwidth(opts, None) - if not is_empty_value(v): - params["bandwidth"] = v - - v = navigate_value(opts, ["enterprise_project_id"], None) - if not is_empty_value(v): - params["enterprise_project_id"] = v - - v = expand_create_publicip(opts, None) - if not is_empty_value(v): - params["publicip"] = v - - return params - - -def expand_create_bandwidth(d, array_index): - v = navigate_value(d, ["dedicated_bandwidth"], array_index) - sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index) - if v and sbwid: - raise Exception("don't input shared_bandwidth_id and " - "dedicated_bandwidth at same time") - - if not (v or sbwid): - raise Exception("must input shared_bandwidth_id or " - "dedicated_bandwidth") - - if sbwid: - return { - "id": sbwid, - "share_type": "WHOLE"} - - return { - "charge_mode": v["charge_mode"], - "name": v["name"], - "share_type": "PER", - "size": v["size"]} - - -def expand_create_publicip(d, array_index): - r = dict() - - v = navigate_value(d, ["ipv4_address"], array_index) - if not is_empty_value(v): - r["ip_address"] = v - - v = navigate_value(d, ["ip_version"], array_index) - if not is_empty_value(v): - r["ip_version"] = v - - v = navigate_value(d, ["type"], array_index) - if not is_empty_value(v): - r["type"] = v - - return r - - -def send_create_request(module, params, client): - url = "publicips" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_eip): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait_create(config, result, client, timeout): - module = config.module - - path_parameters = { - "publicip_id": ["publicip", "id"], - } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) - - url = build_path(module, "publicips/{publicip_id}", data) - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["publicip", "status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["ACTIVE", "DOWN"], - None, - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_eip): error " - "waiting for api(create) to " - "be done, error= %s" % str(ex)) - - -def build_update_parameters(opts): - params = dict() - - v = navigate_value(opts, ["ip_version"], None) - if not is_empty_value(v): - params["ip_version"] = v - - v = navigate_value(opts, ["port_id"], None) - if v is not None: - params["port_id"] = v - - if not params: - return params - - params = {"publicip": params} - - return params - - -def send_update_request(module, params, client): - url = build_path(module, "publicips/{id}") - - try: - r = client.put(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_eip): error running " - "api(update), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait_update(config, result, client, timeout): - module = config.module - - url = build_path(module, "publicips/{id}") - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["publicip", "status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["ACTIVE", "DOWN"], - None, - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_eip): error " - "waiting for api(update) to " - "be done, error= %s" % str(ex)) - - -def send_delete_request(module, params, client): - url = build_path(module, "publicips/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_eip): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "publicips/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_eip): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["publicip"], None) - - -def fill_read_resp_body(body): - result = dict() - - result["bandwidth_id"] = body.get("bandwidth_id") - - result["bandwidth_name"] = body.get("bandwidth_name") - - result["bandwidth_share_type"] = body.get("bandwidth_share_type") - - result["bandwidth_size"] = body.get("bandwidth_size") - - result["create_time"] = body.get("create_time") - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - result["id"] = body.get("id") - - result["ip_version"] = body.get("ip_version") - - result["port_id"] = body.get("port_id") - - result["private_ip_address"] = body.get("private_ip_address") - - result["public_ip_address"] = body.get("public_ip_address") - - result["public_ipv6_address"] = body.get("public_ipv6_address") - - result["status"] = body.get("status") - - result["tenant_id"] = body.get("tenant_id") - - result["type"] = body.get("type") - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - if not exclude_output: - v = navigate_value(response, ["read", "create_time"], array_index) - r["create_time"] = v - - v = r.get("dedicated_bandwidth") - v = flatten_dedicated_bandwidth(response, array_index, v, exclude_output) - r["dedicated_bandwidth"] = v - - v = navigate_value(response, ["read", "enterprise_project_id"], - array_index) - r["enterprise_project_id"] = v - - v = navigate_value(response, ["read", "ip_version"], array_index) - r["ip_version"] = v - - v = navigate_value(response, ["read", "public_ip_address"], array_index) - r["ipv4_address"] = v - - if not exclude_output: - v = navigate_value(response, ["read", "public_ipv6_address"], - array_index) - r["ipv6_address"] = v - - v = navigate_value(response, ["read", "port_id"], array_index) - r["port_id"] = v - - if not exclude_output: - v = navigate_value(response, ["read", "private_ip_address"], - array_index) - r["private_ip_address"] = v - - v = r.get("shared_bandwidth_id") - v = flatten_shared_bandwidth_id(response, array_index, v, exclude_output) - r["shared_bandwidth_id"] = v - - v = navigate_value(response, ["read", "type"], array_index) - r["type"] = v - - return r - - -def flatten_dedicated_bandwidth(d, array_index, current_value, exclude_output): - v = navigate_value(d, ["read", "bandwidth_share_type"], array_index) - if not (v and v == "PER"): - return current_value - - result = current_value - if not result: - result = dict() - - if not exclude_output: - v = navigate_value(d, ["read", "bandwidth_id"], array_index) - if v is not None: - result["id"] = v - - v = navigate_value(d, ["read", "bandwidth_name"], array_index) - if v is not None: - result["name"] = v - - v = navigate_value(d, ["read", "bandwidth_size"], array_index) - if v is not None: - result["size"] = v - - return result if result else current_value - - -def flatten_shared_bandwidth_id(d, array_index, current_value, exclude_output): - v = navigate_value(d, ["read", "bandwidth_id"], array_index) - - v1 = navigate_value(d, ["read", "bandwidth_share_type"], array_index) - - return v if (v1 and v1 == "WHOLE") else current_value - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_eip): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["publicips"], None) - - -def _build_identity_object(all_opts): - result = dict() - - v = expand_list_bandwidth_id(all_opts, None) - result["bandwidth_id"] = v - - v = navigate_value(all_opts, ["dedicated_bandwidth", "name"], None) - result["bandwidth_name"] = v - - result["bandwidth_share_type"] = None - - v = navigate_value(all_opts, ["dedicated_bandwidth", "size"], None) - result["bandwidth_size"] = v - - result["create_time"] = None - - v = navigate_value(all_opts, ["enterprise_project_id"], None) - result["enterprise_project_id"] = v - - result["id"] = None - - v = navigate_value(all_opts, ["ip_version"], None) - result["ip_version"] = v - - v = navigate_value(all_opts, ["port_id"], None) - result["port_id"] = v - - result["private_ip_address"] = None - - v = navigate_value(all_opts, ["ipv4_address"], None) - result["public_ip_address"] = v - - result["public_ipv6_address"] = None - - result["status"] = None - - result["tenant_id"] = None - - v = navigate_value(all_opts, ["type"], None) - result["type"] = v - - return result - - -def expand_list_bandwidth_id(d, array_index): - v = navigate_value(d, ["dedicated_bandwidth"], array_index) - sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index) - if v and sbwid: - raise Exception("don't input shared_bandwidth_id and " - "dedicated_bandwidth at same time") - - return sbwid - - -def fill_list_resp_body(body): - result = dict() - - result["bandwidth_id"] = body.get("bandwidth_id") - - result["bandwidth_name"] = body.get("bandwidth_name") - - result["bandwidth_share_type"] = body.get("bandwidth_share_type") - - result["bandwidth_size"] = body.get("bandwidth_size") - - result["create_time"] = body.get("create_time") - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - result["id"] = body.get("id") - - result["ip_version"] = body.get("ip_version") - - result["port_id"] = body.get("port_id") - - result["private_ip_address"] = body.get("private_ip_address") - - result["public_ip_address"] = body.get("public_ip_address") - - result["public_ipv6_address"] = body.get("public_ipv6_address") - - result["status"] = body.get("status") - - result["tenant_id"] = body.get("tenant_id") - - result["type"] = body.get("type") - - return result - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py b/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py deleted file mode 100644 index a4d5921b77..0000000000 --- a/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py +++ /dev/null @@ -1,691 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_peering_connect -description: - - vpc peering management. -short_description: Creates a resource of Vpc/PeeringConnect in Huawei Cloud -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - local_vpc_id: - description: - - Specifies the ID of local VPC. - type: str - required: true - name: - description: - - Specifies the name of the VPC peering connection. The value can - contain 1 to 64 characters. - type: str - required: true - peering_vpc: - description: - - Specifies information about the peering VPC. - type: dict - required: true - suboptions: - vpc_id: - description: - - Specifies the ID of peering VPC. - type: str - required: true - project_id: - description: - - Specifies the ID of the project which the peering vpc - belongs to. - type: str - required: false - description: - description: - - The description of vpc peering connection. - type: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create a peering connect -- name: Create a local vpc - hwc_network_vpc: - cidr: "192.168.0.0/16" - name: "ansible_network_vpc_test_local" - register: vpc1 -- name: Create a peering vpc - hwc_network_vpc: - cidr: "192.168.0.0/16" - name: "ansible_network_vpc_test_peering" - register: vpc2 -- name: Create a peering connect - community.general.hwc_vpc_peering_connect: - local_vpc_id: "{{ vpc1.id }}" - name: "ansible_network_peering_test" - peering_vpc: - vpc_id: "{{ vpc2.id }}" -''' - -RETURN = ''' - local_vpc_id: - description: - - Specifies the ID of local VPC. - type: str - returned: success - name: - description: - - Specifies the name of the VPC peering connection. The value can - contain 1 to 64 characters. - type: str - returned: success - peering_vpc: - description: - - Specifies information about the peering VPC. - type: dict - returned: success - contains: - vpc_id: - description: - - Specifies the ID of peering VPC. - type: str - returned: success - project_id: - description: - - Specifies the ID of the project which the peering vpc - belongs to. - type: str - returned: success - description: - description: - - The description of vpc peering connection. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcClientException404, HwcModule, - are_different_dicts, build_path, get_region, is_empty_value, - navigate_value, wait_to_finish) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='15m', type='str'), - ), default=dict()), - local_vpc_id=dict(type='str', required=True), - name=dict(type='str', required=True), - peering_vpc=dict(type='dict', required=True, options=dict( - vpc_id=dict(type='str', required=True), - project_id=dict(type='str') - )), - description=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params['id']: - resource = True - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - create(config) - changed = True - - current = read_resource(config, exclude_output=True) - expect = user_input_parameters(module) - if are_different_dicts(expect, current): - if not module.check_mode: - update(config) - changed = True - - result = read_resource(config) - result['id'] = module.params.get('id') - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "description": module.params.get("description"), - "local_vpc_id": module.params.get("local_vpc_id"), - "name": module.params.get("name"), - "peering_vpc": module.params.get("peering_vpc"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "network", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - obj = async_wait_create(config, r, client, timeout) - module.params['id'] = navigate_value(obj, ["peering", "id"]) - - -def update(config): - module = config.module - client = config.client(get_region(module), "network", "project") - opts = user_input_parameters(module) - - params = build_update_parameters(opts) - if params: - send_update_request(module, params, client) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "network", "project") - - send_delete_request(module, None, client) - - url = build_path(module, "v2.0/vpc/peerings/{id}") - - def _refresh_status(): - try: - client.get(url) - except HwcClientException404: - return True, "Done" - - except Exception: - return None, "" - - return True, "Pending" - - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - try: - wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_peering_connect): error " - "waiting for api(delete) to " - "be done, error= %s" % str(ex)) - - -def read_resource(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "network", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - return update_properties(module, res, None, exclude_output) - - -def _build_query_link(opts): - query_params = [] - - v = navigate_value(opts, ["local_vpc_id"]) - if v: - query_params.append("vpc_id=" + str(v)) - - v = navigate_value(opts, ["name"]) - if v: - query_params.append("name=" + str(v)) - - query_link = "?marker={marker}&limit=10" - if query_params: - query_link += "&" + "&".join(query_params) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "network", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "v2.0/vpc/peerings" + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = expand_create_accept_vpc_info(opts, None) - if not is_empty_value(v): - params["accept_vpc_info"] = v - - v = navigate_value(opts, ["description"], None) - if not is_empty_value(v): - params["description"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = expand_create_request_vpc_info(opts, None) - if not is_empty_value(v): - params["request_vpc_info"] = v - - if not params: - return params - - params = {"peering": params} - - return params - - -def expand_create_accept_vpc_info(d, array_index): - r = dict() - - v = navigate_value(d, ["peering_vpc", "project_id"], array_index) - if not is_empty_value(v): - r["tenant_id"] = v - - v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index) - if not is_empty_value(v): - r["vpc_id"] = v - - return r - - -def expand_create_request_vpc_info(d, array_index): - r = dict() - - r["tenant_id"] = "" - - v = navigate_value(d, ["local_vpc_id"], array_index) - if not is_empty_value(v): - r["vpc_id"] = v - - return r - - -def send_create_request(module, params, client): - url = "v2.0/vpc/peerings" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_peering_connect): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait_create(config, result, client, timeout): - module = config.module - - path_parameters = { - "peering_id": ["peering", "id"], - } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) - - url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data) - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["peering", "status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["ACTIVE"], - ["PENDING_ACCEPTANCE"], - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_peering_connect): error " - "waiting for api(create) to " - "be done, error= %s" % str(ex)) - - -def build_update_parameters(opts): - params = dict() - - v = navigate_value(opts, ["description"], None) - if not is_empty_value(v): - params["description"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - if not params: - return params - - params = {"peering": params} - - return params - - -def send_update_request(module, params, client): - url = build_path(module, "v2.0/vpc/peerings/{id}") - - try: - r = client.put(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_peering_connect): error running " - "api(update), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_request(module, params, client): - url = build_path(module, "v2.0/vpc/peerings/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_peering_connect): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "v2.0/vpc/peerings/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_peering_connect): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["peering"], None) - - -def fill_read_resp_body(body): - result = dict() - - v = fill_read_resp_accept_vpc_info(body.get("accept_vpc_info")) - result["accept_vpc_info"] = v - - result["description"] = body.get("description") - - result["id"] = body.get("id") - - result["name"] = body.get("name") - - v = fill_read_resp_request_vpc_info(body.get("request_vpc_info")) - result["request_vpc_info"] = v - - result["status"] = body.get("status") - - return result - - -def fill_read_resp_accept_vpc_info(value): - if not value: - return None - - result = dict() - - result["tenant_id"] = value.get("tenant_id") - - result["vpc_id"] = value.get("vpc_id") - - return result - - -def fill_read_resp_request_vpc_info(value): - if not value: - return None - - result = dict() - - result["tenant_id"] = value.get("tenant_id") - - result["vpc_id"] = value.get("vpc_id") - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - v = navigate_value(response, ["read", "description"], array_index) - r["description"] = v - - v = navigate_value(response, ["read", "request_vpc_info", "vpc_id"], - array_index) - r["local_vpc_id"] = v - - v = navigate_value(response, ["read", "name"], array_index) - r["name"] = v - - v = r.get("peering_vpc") - v = flatten_peering_vpc(response, array_index, v, exclude_output) - r["peering_vpc"] = v - - return r - - -def flatten_peering_vpc(d, array_index, current_value, exclude_output): - result = current_value - has_init_value = True - if not result: - result = dict() - has_init_value = False - - v = navigate_value(d, ["read", "accept_vpc_info", "tenant_id"], - array_index) - result["project_id"] = v - - v = navigate_value(d, ["read", "accept_vpc_info", "vpc_id"], array_index) - result["vpc_id"] = v - - if has_init_value: - return result - - for v in result.values(): - if v is not None: - return result - return current_value - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_peering_connect): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["peerings"], None) - - -def _build_identity_object(all_opts): - result = dict() - - v = expand_list_accept_vpc_info(all_opts, None) - result["accept_vpc_info"] = v - - v = navigate_value(all_opts, ["description"], None) - result["description"] = v - - result["id"] = None - - v = navigate_value(all_opts, ["name"], None) - result["name"] = v - - v = expand_list_request_vpc_info(all_opts, None) - result["request_vpc_info"] = v - - result["status"] = None - - return result - - -def expand_list_accept_vpc_info(d, array_index): - r = dict() - - v = navigate_value(d, ["peering_vpc", "project_id"], array_index) - r["tenant_id"] = v - - v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index) - r["vpc_id"] = v - - for v in r.values(): - if v is not None: - return r - return None - - -def expand_list_request_vpc_info(d, array_index): - r = dict() - - r["tenant_id"] = None - - v = navigate_value(d, ["local_vpc_id"], array_index) - r["vpc_id"] = v - - for v in r.values(): - if v is not None: - return r - return None - - -def fill_list_resp_body(body): - result = dict() - - v = fill_list_resp_accept_vpc_info(body.get("accept_vpc_info")) - result["accept_vpc_info"] = v - - result["description"] = body.get("description") - - result["id"] = body.get("id") - - result["name"] = body.get("name") - - v = fill_list_resp_request_vpc_info(body.get("request_vpc_info")) - result["request_vpc_info"] = v - - result["status"] = body.get("status") - - return result - - -def fill_list_resp_accept_vpc_info(value): - if not value: - return None - - result = dict() - - result["tenant_id"] = value.get("tenant_id") - - result["vpc_id"] = value.get("vpc_id") - - return result - - -def fill_list_resp_request_vpc_info(value): - if not value: - return None - - result = dict() - - result["tenant_id"] = value.get("tenant_id") - - result["vpc_id"] = value.get("vpc_id") - - return result - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_port.py b/plugins/modules/cloud/huawei/hwc_vpc_port.py deleted file mode 100644 index cf0718f59b..0000000000 --- a/plugins/modules/cloud/huawei/hwc_vpc_port.py +++ /dev/null @@ -1,1160 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_port -description: - - vpc port management. -short_description: Creates a resource of Vpc/Port in Huawei Cloud -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - subnet_id: - description: - - Specifies the ID of the subnet to which the port belongs. - type: str - required: true - admin_state_up: - description: - - Specifies the administrative state of the port. - type: bool - required: false - allowed_address_pairs: - description: - - Specifies a set of zero or more allowed address pairs. - required: false - type: list - elements: dict - suboptions: - ip_address: - description: - - Specifies the IP address. It cannot set it to 0.0.0.0. - Configure an independent security group for the port if a - large CIDR block (subnet mask less than 24) is configured - for parameter allowed_address_pairs. - type: str - required: false - mac_address: - description: - - Specifies the MAC address. - type: str - required: false - extra_dhcp_opts: - description: - - Specifies the extended option of DHCP. - type: list - elements: dict - required: false - suboptions: - name: - description: - - Specifies the option name. - type: str - required: false - value: - description: - - Specifies the option value. - type: str - required: false - ip_address: - description: - - Specifies the port IP address. - type: str - required: false - name: - description: - - Specifies the port name. The value can contain no more than 255 - characters. - type: str - required: false - security_groups: - description: - - Specifies the ID of the security group. - type: list - elements: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create a port -- name: Create vpc - hwc_network_vpc: - cidr: "192.168.100.0/24" - name: "ansible_network_vpc_test" - register: vpc -- name: Create subnet - hwc_vpc_subnet: - gateway_ip: "192.168.100.32" - name: "ansible_network_subnet_test" - dhcp_enable: True - vpc_id: "{{ vpc.id }}" - cidr: "192.168.100.0/26" - register: subnet -- name: Create a port - community.general.hwc_vpc_port: - subnet_id: "{{ subnet.id }}" - ip_address: "192.168.100.33" -''' - -RETURN = ''' - subnet_id: - description: - - Specifies the ID of the subnet to which the port belongs. - type: str - returned: success - admin_state_up: - description: - - Specifies the administrative state of the port. - type: bool - returned: success - allowed_address_pairs: - description: - - Specifies a set of zero or more allowed address pairs. - type: list - returned: success - contains: - ip_address: - description: - - Specifies the IP address. It cannot set it to 0.0.0.0. - Configure an independent security group for the port if a - large CIDR block (subnet mask less than 24) is configured - for parameter allowed_address_pairs. - type: str - returned: success - mac_address: - description: - - Specifies the MAC address. - type: str - returned: success - extra_dhcp_opts: - description: - - Specifies the extended option of DHCP. - type: list - returned: success - contains: - name: - description: - - Specifies the option name. - type: str - returned: success - value: - description: - - Specifies the option value. - type: str - returned: success - ip_address: - description: - - Specifies the port IP address. - type: str - returned: success - name: - description: - - Specifies the port name. The value can contain no more than 255 - characters. - type: str - returned: success - security_groups: - description: - - Specifies the ID of the security group. - type: list - returned: success - mac_address: - description: - - Specifies the port MAC address. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcClientException404, HwcModule, - are_different_dicts, build_path, get_region, is_empty_value, - navigate_value, wait_to_finish) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='15m', type='str'), - ), default=dict()), - subnet_id=dict(type='str', required=True), - admin_state_up=dict(type='bool'), - allowed_address_pairs=dict( - type='list', elements='dict', - options=dict( - ip_address=dict(type='str'), - mac_address=dict(type='str') - ), - ), - extra_dhcp_opts=dict(type='list', elements='dict', options=dict( - name=dict(type='str'), - value=dict(type='str') - )), - ip_address=dict(type='str'), - name=dict(type='str'), - security_groups=dict(type='list', elements='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params['id']: - resource = True - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - create(config) - changed = True - - current = read_resource(config, exclude_output=True) - expect = user_input_parameters(module) - if are_different_dicts(expect, current): - if not module.check_mode: - update(config) - changed = True - - result = read_resource(config) - result['id'] = module.params.get('id') - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "admin_state_up": module.params.get("admin_state_up"), - "allowed_address_pairs": module.params.get("allowed_address_pairs"), - "extra_dhcp_opts": module.params.get("extra_dhcp_opts"), - "ip_address": module.params.get("ip_address"), - "name": module.params.get("name"), - "security_groups": module.params.get("security_groups"), - "subnet_id": module.params.get("subnet_id"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - obj = async_wait_create(config, r, client, timeout) - module.params['id'] = navigate_value(obj, ["port", "id"]) - - -def update(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - - params = build_update_parameters(opts) - if params: - send_update_request(module, params, client) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - send_delete_request(module, None, client) - - url = build_path(module, "ports/{id}") - - def _refresh_status(): - try: - client.get(url) - except HwcClientException404: - return True, "Done" - - except Exception: - return None, "" - - return True, "Pending" - - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - try: - wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_port): error " - "waiting for api(delete) to " - "be done, error= %s" % str(ex)) - - -def read_resource(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - array_index = { - "read.fixed_ips": 0, - } - - return update_properties(module, res, array_index, exclude_output) - - -def _build_query_link(opts): - query_params = [] - - v = navigate_value(opts, ["subnet_id"]) - if v: - query_params.append("network_id=" + str(v)) - - v = navigate_value(opts, ["name"]) - if v: - query_params.append("name=" + str(v)) - - v = navigate_value(opts, ["admin_state_up"]) - if v: - query_params.append("admin_state_up=" + str(v)) - - query_link = "?marker={marker}&limit=10" - if query_params: - query_link += "&" + "&".join(query_params) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "ports" + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["admin_state_up"], None) - if not is_empty_value(v): - params["admin_state_up"] = v - - v = expand_create_allowed_address_pairs(opts, None) - if not is_empty_value(v): - params["allowed_address_pairs"] = v - - v = expand_create_extra_dhcp_opts(opts, None) - if not is_empty_value(v): - params["extra_dhcp_opts"] = v - - v = expand_create_fixed_ips(opts, None) - if not is_empty_value(v): - params["fixed_ips"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = navigate_value(opts, ["subnet_id"], None) - if not is_empty_value(v): - params["network_id"] = v - - v = navigate_value(opts, ["security_groups"], None) - if not is_empty_value(v): - params["security_groups"] = v - - if not params: - return params - - params = {"port": params} - - return params - - -def expand_create_allowed_address_pairs(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - v = navigate_value(d, ["allowed_address_pairs"], - new_array_index) - if not v: - return req - n = len(v) - for i in range(n): - new_array_index["allowed_address_pairs"] = i - transformed = dict() - - v = navigate_value(d, ["allowed_address_pairs", "ip_address"], - new_array_index) - if not is_empty_value(v): - transformed["ip_address"] = v - - v = navigate_value(d, ["allowed_address_pairs", "mac_address"], - new_array_index) - if not is_empty_value(v): - transformed["mac_address"] = v - - if transformed: - req.append(transformed) - - return req - - -def expand_create_extra_dhcp_opts(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - v = navigate_value(d, ["extra_dhcp_opts"], - new_array_index) - if not v: - return req - n = len(v) - for i in range(n): - new_array_index["extra_dhcp_opts"] = i - transformed = dict() - - v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) - if not is_empty_value(v): - transformed["opt_name"] = v - - v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) - if not is_empty_value(v): - transformed["opt_value"] = v - - if transformed: - req.append(transformed) - - return req - - -def expand_create_fixed_ips(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - n = 1 - for i in range(n): - transformed = dict() - - v = navigate_value(d, ["ip_address"], new_array_index) - if not is_empty_value(v): - transformed["ip_address"] = v - - if transformed: - req.append(transformed) - - return req - - -def send_create_request(module, params, client): - url = "ports" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_port): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait_create(config, result, client, timeout): - module = config.module - - path_parameters = { - "port_id": ["port", "id"], - } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) - - url = build_path(module, "ports/{port_id}", data) - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["port", "status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["ACTIVE", "DOWN"], - ["BUILD"], - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_port): error " - "waiting for api(create) to " - "be done, error= %s" % str(ex)) - - -def build_update_parameters(opts): - params = dict() - - v = expand_update_allowed_address_pairs(opts, None) - if v is not None: - params["allowed_address_pairs"] = v - - v = expand_update_extra_dhcp_opts(opts, None) - if v is not None: - params["extra_dhcp_opts"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = navigate_value(opts, ["security_groups"], None) - if not is_empty_value(v): - params["security_groups"] = v - - if not params: - return params - - params = {"port": params} - - return params - - -def expand_update_allowed_address_pairs(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - v = navigate_value(d, ["allowed_address_pairs"], - new_array_index) - if not v: - return req - n = len(v) - for i in range(n): - new_array_index["allowed_address_pairs"] = i - transformed = dict() - - v = navigate_value(d, ["allowed_address_pairs", "ip_address"], - new_array_index) - if not is_empty_value(v): - transformed["ip_address"] = v - - v = navigate_value(d, ["allowed_address_pairs", "mac_address"], - new_array_index) - if not is_empty_value(v): - transformed["mac_address"] = v - - if transformed: - req.append(transformed) - - return req - - -def expand_update_extra_dhcp_opts(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - v = navigate_value(d, ["extra_dhcp_opts"], - new_array_index) - if not v: - return req - n = len(v) - for i in range(n): - new_array_index["extra_dhcp_opts"] = i - transformed = dict() - - v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) - if not is_empty_value(v): - transformed["opt_name"] = v - - v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) - if not is_empty_value(v): - transformed["opt_value"] = v - - if transformed: - req.append(transformed) - - return req - - -def send_update_request(module, params, client): - url = build_path(module, "ports/{id}") - - try: - r = client.put(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_port): error running " - "api(update), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_request(module, params, client): - url = build_path(module, "ports/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_port): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "ports/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_port): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["port"], None) - - -def fill_read_resp_body(body): - result = dict() - - result["admin_state_up"] = body.get("admin_state_up") - - v = fill_read_resp_allowed_address_pairs(body.get("allowed_address_pairs")) - result["allowed_address_pairs"] = v - - result["binding_host_id"] = body.get("binding_host_id") - - result["binding_vnic_type"] = body.get("binding_vnic_type") - - result["device_id"] = body.get("device_id") - - result["device_owner"] = body.get("device_owner") - - result["dns_name"] = body.get("dns_name") - - v = fill_read_resp_extra_dhcp_opts(body.get("extra_dhcp_opts")) - result["extra_dhcp_opts"] = v - - v = fill_read_resp_fixed_ips(body.get("fixed_ips")) - result["fixed_ips"] = v - - result["id"] = body.get("id") - - result["mac_address"] = body.get("mac_address") - - result["name"] = body.get("name") - - result["network_id"] = body.get("network_id") - - result["security_groups"] = body.get("security_groups") - - result["status"] = body.get("status") - - result["tenant_id"] = body.get("tenant_id") - - return result - - -def fill_read_resp_allowed_address_pairs(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["ip_address"] = item.get("ip_address") - - val["mac_address"] = item.get("mac_address") - - result.append(val) - - return result - - -def fill_read_resp_extra_dhcp_opts(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["opt_name"] = item.get("opt_name") - - val["opt_value"] = item.get("opt_value") - - result.append(val) - - return result - - -def fill_read_resp_fixed_ips(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["ip_address"] = item.get("ip_address") - - result.append(val) - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - v = navigate_value(response, ["read", "admin_state_up"], array_index) - r["admin_state_up"] = v - - v = r.get("allowed_address_pairs") - v = flatten_allowed_address_pairs(response, array_index, v, exclude_output) - r["allowed_address_pairs"] = v - - v = r.get("extra_dhcp_opts") - v = flatten_extra_dhcp_opts(response, array_index, v, exclude_output) - r["extra_dhcp_opts"] = v - - v = navigate_value(response, ["read", "fixed_ips", "ip_address"], - array_index) - r["ip_address"] = v - - if not exclude_output: - v = navigate_value(response, ["read", "mac_address"], array_index) - r["mac_address"] = v - - v = navigate_value(response, ["read", "name"], array_index) - r["name"] = v - - v = navigate_value(response, ["read", "security_groups"], array_index) - r["security_groups"] = v - - v = navigate_value(response, ["read", "network_id"], array_index) - r["subnet_id"] = v - - return r - - -def flatten_allowed_address_pairs(d, array_index, - current_value, exclude_output): - n = 0 - result = current_value - has_init_value = True - if result: - n = len(result) - else: - has_init_value = False - result = [] - v = navigate_value(d, ["read", "allowed_address_pairs"], - array_index) - if not v: - return current_value - n = len(v) - - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - for i in range(n): - new_array_index["read.allowed_address_pairs"] = i - - val = dict() - if len(result) >= (i + 1) and result[i]: - val = result[i] - - v = navigate_value(d, ["read", "allowed_address_pairs", "ip_address"], - new_array_index) - val["ip_address"] = v - - v = navigate_value(d, ["read", "allowed_address_pairs", "mac_address"], - new_array_index) - val["mac_address"] = v - - if len(result) >= (i + 1): - result[i] = val - else: - for v in val.values(): - if v is not None: - result.append(val) - break - - return result if (has_init_value or result) else current_value - - -def flatten_extra_dhcp_opts(d, array_index, current_value, exclude_output): - n = 0 - result = current_value - has_init_value = True - if result: - n = len(result) - else: - has_init_value = False - result = [] - v = navigate_value(d, ["read", "extra_dhcp_opts"], - array_index) - if not v: - return current_value - n = len(v) - - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - for i in range(n): - new_array_index["read.extra_dhcp_opts"] = i - - val = dict() - if len(result) >= (i + 1) and result[i]: - val = result[i] - - v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_name"], - new_array_index) - val["name"] = v - - v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_value"], - new_array_index) - val["value"] = v - - if len(result) >= (i + 1): - result[i] = val - else: - for v in val.values(): - if v is not None: - result.append(val) - break - - return result if (has_init_value or result) else current_value - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_port): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["ports"], None) - - -def _build_identity_object(all_opts): - result = dict() - - v = navigate_value(all_opts, ["admin_state_up"], None) - result["admin_state_up"] = v - - v = expand_list_allowed_address_pairs(all_opts, None) - result["allowed_address_pairs"] = v - - result["binding_host_id"] = None - - result["binding_vnic_type"] = None - - result["device_id"] = None - - result["device_owner"] = None - - result["dns_name"] = None - - v = expand_list_extra_dhcp_opts(all_opts, None) - result["extra_dhcp_opts"] = v - - v = expand_list_fixed_ips(all_opts, None) - result["fixed_ips"] = v - - result["id"] = None - - result["mac_address"] = None - - v = navigate_value(all_opts, ["name"], None) - result["name"] = v - - v = navigate_value(all_opts, ["subnet_id"], None) - result["network_id"] = v - - v = navigate_value(all_opts, ["security_groups"], None) - result["security_groups"] = v - - result["status"] = None - - result["tenant_id"] = None - - return result - - -def expand_list_allowed_address_pairs(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - v = navigate_value(d, ["allowed_address_pairs"], - new_array_index) - - n = len(v) if v else 1 - for i in range(n): - new_array_index["allowed_address_pairs"] = i - transformed = dict() - - v = navigate_value(d, ["allowed_address_pairs", "ip_address"], - new_array_index) - transformed["ip_address"] = v - - v = navigate_value(d, ["allowed_address_pairs", "mac_address"], - new_array_index) - transformed["mac_address"] = v - - for v in transformed.values(): - if v is not None: - req.append(transformed) - break - - return req if req else None - - -def expand_list_extra_dhcp_opts(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - v = navigate_value(d, ["extra_dhcp_opts"], - new_array_index) - - n = len(v) if v else 1 - for i in range(n): - new_array_index["extra_dhcp_opts"] = i - transformed = dict() - - v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) - transformed["opt_name"] = v - - v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) - transformed["opt_value"] = v - - for v in transformed.values(): - if v is not None: - req.append(transformed) - break - - return req if req else None - - -def expand_list_fixed_ips(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - n = 1 - for i in range(n): - transformed = dict() - - v = navigate_value(d, ["ip_address"], new_array_index) - transformed["ip_address"] = v - - for v in transformed.values(): - if v is not None: - req.append(transformed) - break - - return req if req else None - - -def fill_list_resp_body(body): - result = dict() - - result["admin_state_up"] = body.get("admin_state_up") - - v = fill_list_resp_allowed_address_pairs(body.get("allowed_address_pairs")) - result["allowed_address_pairs"] = v - - result["binding_host_id"] = body.get("binding_host_id") - - result["binding_vnic_type"] = body.get("binding_vnic_type") - - result["device_id"] = body.get("device_id") - - result["device_owner"] = body.get("device_owner") - - result["dns_name"] = body.get("dns_name") - - v = fill_list_resp_extra_dhcp_opts(body.get("extra_dhcp_opts")) - result["extra_dhcp_opts"] = v - - v = fill_list_resp_fixed_ips(body.get("fixed_ips")) - result["fixed_ips"] = v - - result["id"] = body.get("id") - - result["mac_address"] = body.get("mac_address") - - result["name"] = body.get("name") - - result["network_id"] = body.get("network_id") - - result["security_groups"] = body.get("security_groups") - - result["status"] = body.get("status") - - result["tenant_id"] = body.get("tenant_id") - - return result - - -def fill_list_resp_allowed_address_pairs(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["ip_address"] = item.get("ip_address") - - val["mac_address"] = item.get("mac_address") - - result.append(val) - - return result - - -def fill_list_resp_extra_dhcp_opts(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["opt_name"] = item.get("opt_name") - - val["opt_value"] = item.get("opt_value") - - result.append(val) - - return result - - -def fill_list_resp_fixed_ips(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["ip_address"] = item.get("ip_address") - - result.append(val) - - return result - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py b/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py deleted file mode 100644 index 901755f362..0000000000 --- a/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py +++ /dev/null @@ -1,354 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_private_ip -description: - - vpc private ip management. -short_description: Creates a resource of Vpc/PrivateIP in Huawei Cloud -notes: - - If I(id) option is provided, it takes precedence over I(subnet_id), I(ip_address) for private ip selection. - - I(subnet_id), I(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted. - - No parameter support updating. If one of option is changed, the module will create a new resource. -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - subnet_id: - description: - - Specifies the ID of the subnet from which IP addresses are - assigned. Cannot be changed after creating the private ip. - type: str - required: true - ip_address: - description: - - Specifies the target IP address. The value can be an available IP - address in the subnet. If it is not specified, the system - automatically assigns an IP address. Cannot be changed after - creating the private ip. - type: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create a private ip -- name: Create vpc - hwc_network_vpc: - cidr: "192.168.100.0/24" - name: "ansible_network_vpc_test" - register: vpc -- name: Create subnet - hwc_vpc_subnet: - gateway_ip: "192.168.100.32" - name: "ansible_network_subnet_test" - dhcp_enable: True - vpc_id: "{{ vpc.id }}" - cidr: "192.168.100.0/26" - register: subnet -- name: Create a private ip - community.general.hwc_vpc_private_ip: - subnet_id: "{{ subnet.id }}" - ip_address: "192.168.100.33" -''' - -RETURN = ''' - subnet_id: - description: - - Specifies the ID of the subnet from which IP addresses are - assigned. - type: str - returned: success - ip_address: - description: - - Specifies the target IP address. The value can be an available IP - address in the subnet. If it is not specified, the system - automatically assigns an IP address. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - subnet_id=dict(type='str', required=True), - ip_address=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params['id']: - resource = True - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - create(config) - changed = True - - current = read_resource(config, exclude_output=True) - expect = user_input_parameters(module) - if are_different_dicts(expect, current): - raise Exception( - "Cannot change option from (%s) to (%s)of an" - " existing resource.(%s)" % (current, expect, module.params.get('id'))) - - result = read_resource(config) - result['id'] = module.params.get('id') - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "ip_address": module.params.get("ip_address"), - "subnet_id": module.params.get("subnet_id"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - module.params['id'] = navigate_value(r, ["privateips", "id"], - {"privateips": 0}) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - send_delete_request(module, None, client) - - -def read_resource(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - return update_properties(module, res, None, exclude_output) - - -def _build_query_link(opts): - query_link = "?marker={marker}&limit=10" - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = build_path(module, "subnets/{subnet_id}/privateips") + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["ip_address"], None) - if not is_empty_value(v): - params["ip_address"] = v - - v = navigate_value(opts, ["subnet_id"], None) - if not is_empty_value(v): - params["subnet_id"] = v - - if not params: - return params - - params = {"privateips": [params]} - - return params - - -def send_create_request(module, params, client): - url = "privateips" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_private_ip): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_request(module, params, client): - url = build_path(module, "privateips/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_private_ip): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "privateips/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_private_ip): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["privateip"], None) - - -def fill_read_resp_body(body): - result = dict() - - result["id"] = body.get("id") - - result["ip_address"] = body.get("ip_address") - - result["subnet_id"] = body.get("subnet_id") - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - v = navigate_value(response, ["read", "ip_address"], array_index) - r["ip_address"] = v - - v = navigate_value(response, ["read", "subnet_id"], array_index) - r["subnet_id"] = v - - return r - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_private_ip): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["privateips"], None) - - -def _build_identity_object(all_opts): - result = dict() - - result["id"] = None - - v = navigate_value(all_opts, ["ip_address"], None) - result["ip_address"] = v - - v = navigate_value(all_opts, ["subnet_id"], None) - result["subnet_id"] = v - - return result - - -def fill_list_resp_body(body): - result = dict() - - result["id"] = body.get("id") - - result["ip_address"] = body.get("ip_address") - - result["subnet_id"] = body.get("subnet_id") - - return result - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_route.py b/plugins/modules/cloud/huawei/hwc_vpc_route.py deleted file mode 100644 index 31829dc601..0000000000 --- a/plugins/modules/cloud/huawei/hwc_vpc_route.py +++ /dev/null @@ -1,437 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_route -description: - - vpc route management. -short_description: Creates a resource of Vpc/Route in Huawei Cloud -notes: - - If I(id) option is provided, it takes precedence over I(destination), I(vpc_id), I(type) and I(next_hop) for route selection. - - I(destination), I(vpc_id), I(type) and I(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted. - - No parameter support updating. If one of option is changed, the module will create a new resource. -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - destination: - description: - - Specifies the destination IP address or CIDR block. - type: str - required: true - next_hop: - description: - - Specifies the next hop. The value is VPC peering connection ID. - type: str - required: true - vpc_id: - description: - - Specifies the VPC ID to which route is added. - type: str - required: true - type: - description: - - Specifies the type of route. - type: str - required: false - default: 'peering' -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create a peering connect -- name: Create a local vpc - hwc_network_vpc: - cidr: "192.168.0.0/16" - name: "ansible_network_vpc_test_local" - register: vpc1 -- name: Create a peering vpc - hwc_network_vpc: - cidr: "192.168.0.0/16" - name: "ansible_network_vpc_test_peering" - register: vpc2 -- name: Create a peering connect - hwc_vpc_peering_connect: - local_vpc_id: "{{ vpc1.id }}" - name: "ansible_network_peering_test" - filters: - - "name" - peering_vpc: - vpc_id: "{{ vpc2.id }}" - register: connect -- name: Create a route - community.general.hwc_vpc_route: - vpc_id: "{{ vpc1.id }}" - destination: "192.168.0.0/16" - next_hop: "{{ connect.id }}" -''' - -RETURN = ''' - id: - description: - - UUID of the route. - type: str - returned: success - destination: - description: - - Specifies the destination IP address or CIDR block. - type: str - returned: success - next_hop: - description: - - Specifies the next hop. The value is VPC peering connection ID. - type: str - returned: success - vpc_id: - description: - - Specifies the VPC ID to which route is added. - type: str - returned: success - type: - description: - - Specifies the type of route. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - destination=dict(type='str', required=True), - next_hop=dict(type='str', required=True), - vpc_id=dict(type='str', required=True), - type=dict(type='str', default='peering'), - id=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params.get("id"): - resource = get_resource_by_id(config) - if module.params['state'] == 'present': - opts = user_input_parameters(module) - if are_different_dicts(resource, opts): - raise Exception( - "Cannot change option from (%s) to (%s) for an" - " existing route.(%s)" % (resource, opts, - config.module.params.get( - 'id'))) - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = update_properties(module, {"read": v[0]}, None) - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - resource = create(config) - changed = True - - result = resource - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "destination": module.params.get("destination"), - "next_hop": module.params.get("next_hop"), - "type": module.params.get("type"), - "vpc_id": module.params.get("vpc_id"), - "id": module.params.get("id"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "network", "project") - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - module.params['id'] = navigate_value(r, ["route", "id"]) - - result = update_properties(module, {"read": fill_resp_body(r)}, None) - return result - - -def delete(config): - module = config.module - client = config.client(get_region(module), "network", "project") - - send_delete_request(module, None, client) - - -def get_resource_by_id(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "network", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_resp_body(r) - - result = update_properties(module, res, None, exclude_output) - return result - - -def _build_query_link(opts): - query_params = [] - - v = navigate_value(opts, ["type"]) - if v: - query_params.append("type=" + str(v)) - - v = navigate_value(opts, ["destination"]) - if v: - query_params.append("destination=" + str(v)) - - v = navigate_value(opts, ["vpc_id"]) - if v: - query_params.append("vpc_id=" + str(v)) - - query_link = "?marker={marker}&limit=10" - if query_params: - query_link += "&" + "&".join(query_params) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "network", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "v2.0/vpc/routes" + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["destination"], None) - if not is_empty_value(v): - params["destination"] = v - - v = navigate_value(opts, ["next_hop"], None) - if not is_empty_value(v): - params["nexthop"] = v - - v = navigate_value(opts, ["type"], None) - if not is_empty_value(v): - params["type"] = v - - v = navigate_value(opts, ["vpc_id"], None) - if not is_empty_value(v): - params["vpc_id"] = v - - if not params: - return params - - params = {"route": params} - - return params - - -def send_create_request(module, params, client): - url = "v2.0/vpc/routes" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_route): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_request(module, params, client): - url = build_path(module, "v2.0/vpc/routes/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_route): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "v2.0/vpc/routes/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_route): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["route"], None) - - -def fill_resp_body(body): - result = dict() - - result["destination"] = body.get("destination") - - result["id"] = body.get("id") - - result["nexthop"] = body.get("nexthop") - - result["type"] = body.get("type") - - result["vpc_id"] = body.get("vpc_id") - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - v = navigate_value(response, ["read", "destination"], array_index) - r["destination"] = v - - v = navigate_value(response, ["read", "nexthop"], array_index) - r["next_hop"] = v - - v = navigate_value(response, ["read", "type"], array_index) - r["type"] = v - - v = navigate_value(response, ["read", "vpc_id"], array_index) - r["vpc_id"] = v - - v = navigate_value(response, ["read", "id"], array_index) - r["id"] = v - - return r - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_route): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["routes"], None) - - -def _build_identity_object(all_opts): - result = dict() - - v = navigate_value(all_opts, ["destination"], None) - result["destination"] = v - - v = navigate_value(all_opts, ["id"], None) - result["id"] = v - - v = navigate_value(all_opts, ["next_hop"], None) - result["nexthop"] = v - - v = navigate_value(all_opts, ["type"], None) - result["type"] = v - - v = navigate_value(all_opts, ["vpc_id"], None) - result["vpc_id"] = v - - return result - - -def fill_list_resp_body(body): - result = dict() - - result["destination"] = body.get("destination") - - result["id"] = body.get("id") - - result["nexthop"] = body.get("nexthop") - - result["type"] = body.get("type") - - result["vpc_id"] = body.get("vpc_id") - - return result - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_security_group.py b/plugins/modules/cloud/huawei/hwc_vpc_security_group.py deleted file mode 100644 index 603518159b..0000000000 --- a/plugins/modules/cloud/huawei/hwc_vpc_security_group.py +++ /dev/null @@ -1,645 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_security_group -description: - - vpc security group management. -short_description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud -notes: - - If I(id) option is provided, it takes precedence over I(name), - I(enterprise_project_id) and I(vpc_id) for security group selection. - - I(name), I(enterprise_project_id) and I(vpc_id) are used for security - group selection. If more than one security group with this options exists, - execution is aborted. - - No parameter support updating. If one of option is changed, the module - will create a new resource. -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - name: - description: - - Specifies the security group name. The value is a string of 1 to - 64 characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - required: true - enterprise_project_id: - description: - - Specifies the enterprise project ID. When creating a security - group, associate the enterprise project ID with the security - group.s - type: str - required: false - default: 0 - vpc_id: - description: - - Specifies the resource ID of the VPC to which the security group - belongs. - type: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create a security group -- name: Create a security group - community.general.hwc_vpc_security_group: - name: "ansible_network_security_group_test" -''' - -RETURN = ''' - name: - description: - - Specifies the security group name. The value is a string of 1 to - 64 characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. When creating a security - group, associate the enterprise project ID with the security - group. - type: str - returned: success - vpc_id: - description: - - Specifies the resource ID of the VPC to which the security group - belongs. - type: str - returned: success - rules: - description: - - Specifies the security group rule, which ensures that resources - in the security group can communicate with one another. - type: complex - returned: success - contains: - description: - description: - - Provides supplementary information about the security - group rule. - type: str - returned: success - direction: - description: - - Specifies the direction of access control. The value can - be egress or ingress. - type: str - returned: success - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 - or IPv6. - type: str - returned: success - id: - description: - - Specifies the security group rule ID. - type: str - returned: success - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to - 65535. If the protocol is not icmp, the value cannot be - smaller than the port_range_min value. An empty value - indicates all ports. - type: int - returned: success - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 - to 65535. The value cannot be greater than the - port_range_max value. An empty value indicates all ports. - type: int - returned: success - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, - udp, or others. If the parameter is left blank, the - security group supports all protocols. - type: str - returned: success - remote_address_group_id: - description: - - Specifies the ID of remote IP address group. - type: str - returned: success - remote_group_id: - description: - - Specifies the ID of the peer security group. - type: str - returned: success - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control - direction is set to egress, the parameter specifies the - source IP address. If the access control direction is set - to ingress, the parameter specifies the destination IP - address. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - name=dict(type='str', required=True), - enterprise_project_id=dict(type='str'), - vpc_id=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params.get("id"): - resource = read_resource(config) - if module.params['state'] == 'present': - check_resource_option(resource, module) - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = update_properties(module, {"read": v[0]}, None) - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - resource = create(config) - changed = True - - result = resource - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "enterprise_project_id": module.params.get("enterprise_project_id"), - "name": module.params.get("name"), - "vpc_id": module.params.get("vpc_id"), - "id": module.params.get("id"), - } - - -def check_resource_option(resource, module): - opts = user_input_parameters(module) - - resource = { - "enterprise_project_id": resource.get("enterprise_project_id"), - "name": resource.get("name"), - "vpc_id": resource.get("vpc_id"), - "id": resource.get("id"), - } - - if are_different_dicts(resource, opts): - raise Exception( - "Cannot change option from (%s) to (%s) for an" - " existing security group(%s)." % (resource, opts, - module.params.get('id'))) - - -def create(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - module.params['id'] = navigate_value(r, ["security_group", "id"]) - - result = update_properties(module, {"read": fill_read_resp_body(r)}, None) - return result - - -def delete(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - send_delete_request(module, None, client) - - -def read_resource(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - return update_properties(module, res, None, exclude_output) - - -def _build_query_link(opts): - query_params = [] - - v = navigate_value(opts, ["enterprise_project_id"]) - if v: - query_params.append("enterprise_project_id=" + str(v)) - - v = navigate_value(opts, ["vpc_id"]) - if v: - query_params.append("vpc_id=" + str(v)) - - query_link = "?marker={marker}&limit=10" - if query_params: - query_link += "&" + "&".join(query_params) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "security-groups" + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["enterprise_project_id"], None) - if not is_empty_value(v): - params["enterprise_project_id"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = navigate_value(opts, ["vpc_id"], None) - if not is_empty_value(v): - params["vpc_id"] = v - - if not params: - return params - - params = {"security_group": params} - - return params - - -def send_create_request(module, params, client): - url = "security-groups" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_request(module, params, client): - url = build_path(module, "security-groups/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "security-groups/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["security_group"], None) - - -def fill_read_resp_body(body): - result = dict() - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - result["id"] = body.get("id") - - result["name"] = body.get("name") - - v = fill_read_resp_security_group_rules(body.get("security_group_rules")) - result["security_group_rules"] = v - - result["vpc_id"] = body.get("vpc_id") - - return result - - -def fill_read_resp_security_group_rules(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["description"] = item.get("description") - - val["direction"] = item.get("direction") - - val["ethertype"] = item.get("ethertype") - - val["id"] = item.get("id") - - val["port_range_max"] = item.get("port_range_max") - - val["port_range_min"] = item.get("port_range_min") - - val["protocol"] = item.get("protocol") - - val["remote_address_group_id"] = item.get("remote_address_group_id") - - val["remote_group_id"] = item.get("remote_group_id") - - val["remote_ip_prefix"] = item.get("remote_ip_prefix") - - val["security_group_id"] = item.get("security_group_id") - - result.append(val) - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - v = navigate_value(response, ["read", "enterprise_project_id"], - array_index) - r["enterprise_project_id"] = v - - v = navigate_value(response, ["read", "name"], array_index) - r["name"] = v - - if not exclude_output: - v = r.get("rules") - v = flatten_rules(response, array_index, v, exclude_output) - r["rules"] = v - - v = navigate_value(response, ["read", "vpc_id"], array_index) - r["vpc_id"] = v - - v = navigate_value(response, ["read", "id"], array_index) - r["id"] = v - - return r - - -def flatten_rules(d, array_index, current_value, exclude_output): - n = 0 - result = current_value - has_init_value = True - if result: - n = len(result) - else: - has_init_value = False - result = [] - v = navigate_value(d, ["read", "security_group_rules"], - array_index) - if not v: - return current_value - n = len(v) - - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - for i in range(n): - new_array_index["read.security_group_rules"] = i - - val = dict() - if len(result) >= (i + 1) and result[i]: - val = result[i] - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "description"], - new_array_index) - val["description"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "direction"], - new_array_index) - val["direction"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "ethertype"], - new_array_index) - val["ethertype"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "id"], - new_array_index) - val["id"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "port_range_max"], - new_array_index) - val["port_range_max"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "port_range_min"], - new_array_index) - val["port_range_min"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "protocol"], - new_array_index) - val["protocol"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "remote_address_group_id"], - new_array_index) - val["remote_address_group_id"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "remote_group_id"], - new_array_index) - val["remote_group_id"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "remote_ip_prefix"], - new_array_index) - val["remote_ip_prefix"] = v - - if len(result) >= (i + 1): - result[i] = val - else: - for v in val.values(): - if v is not None: - result.append(val) - break - - return result if (has_init_value or result) else current_value - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["security_groups"], None) - - -def _build_identity_object(all_opts): - result = dict() - - v = navigate_value(all_opts, ["enterprise_project_id"], None) - result["enterprise_project_id"] = v - - result["id"] = None - - v = navigate_value(all_opts, ["name"], None) - result["name"] = v - - result["security_group_rules"] = None - - v = navigate_value(all_opts, ["vpc_id"], None) - result["vpc_id"] = v - - return result - - -def fill_list_resp_body(body): - result = dict() - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - result["id"] = body.get("id") - - result["name"] = body.get("name") - - v = fill_list_resp_security_group_rules(body.get("security_group_rules")) - result["security_group_rules"] = v - - result["vpc_id"] = body.get("vpc_id") - - return result - - -def fill_list_resp_security_group_rules(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["description"] = item.get("description") - - val["direction"] = item.get("direction") - - val["ethertype"] = item.get("ethertype") - - val["id"] = item.get("id") - - val["port_range_max"] = item.get("port_range_max") - - val["port_range_min"] = item.get("port_range_min") - - val["protocol"] = item.get("protocol") - - val["remote_address_group_id"] = item.get("remote_address_group_id") - - val["remote_group_id"] = item.get("remote_group_id") - - val["remote_ip_prefix"] = item.get("remote_ip_prefix") - - val["security_group_id"] = item.get("security_group_id") - - result.append(val) - - return result - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py b/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py deleted file mode 100644 index f92c82764e..0000000000 --- a/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py +++ /dev/null @@ -1,570 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_security_group_rule -description: - - vpc security group management. -short_description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud -notes: - - If I(id) option is provided, it takes precedence over - I(enterprise_project_id) for security group rule selection. - - I(security_group_id) is used for security group rule selection. If more - than one security group rule with this options exists, execution is - aborted. - - No parameter support updating. If one of option is changed, the module - will create a new resource. -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - direction: - description: - - Specifies the direction of access control. The value can be - egress or ingress. - type: str - required: true - security_group_id: - description: - - Specifies the security group rule ID, which uniquely identifies - the security group rule. - type: str - required: true - description: - description: - - Provides supplementary information about the security group rule. - The value is a string of no more than 255 characters that can - contain letters and digits. - type: str - required: false - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 or IPv6. - If you do not set this parameter, IPv4 is used by default. - type: str - required: false - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to 65535. - If the protocol is not icmp, the value cannot be smaller than the - port_range_min value. An empty value indicates all ports. - type: int - required: false - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 to - 65535. The value cannot be greater than the port_range_max value. - An empty value indicates all ports. - type: int - required: false - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, or udp. - If the parameter is left blank, the security group supports all - protocols. - type: str - required: false - remote_group_id: - description: - - Specifies the ID of the peer security group. The value is - exclusive with parameter remote_ip_prefix. - type: str - required: false - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control direction - is set to egress, the parameter specifies the source IP address. - If the access control direction is set to ingress, the parameter - specifies the destination IP address. The value can be in the - CIDR format or IP addresses. The parameter is exclusive with - parameter remote_group_id. - type: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create a security group rule -- name: Create a security group - hwc_vpc_security_group: - name: "ansible_network_security_group_test" - register: sg -- name: Create a security group rule - community.general.hwc_vpc_security_group_rule: - direction: "ingress" - protocol: "tcp" - ethertype: "IPv4" - port_range_max: 22 - security_group_id: "{{ sg.id }}" - port_range_min: 22 - remote_ip_prefix: "0.0.0.0/0" -''' - -RETURN = ''' - direction: - description: - - Specifies the direction of access control. The value can be - egress or ingress. - type: str - returned: success - security_group_id: - description: - - Specifies the security group rule ID, which uniquely identifies - the security group rule. - type: str - returned: success - description: - description: - - Provides supplementary information about the security group rule. - The value is a string of no more than 255 characters that can - contain letters and digits. - type: str - returned: success - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 or IPv6. - If you do not set this parameter, IPv4 is used by default. - type: str - returned: success - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to 65535. - If the protocol is not icmp, the value cannot be smaller than the - port_range_min value. An empty value indicates all ports. - type: int - returned: success - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 to - 65535. The value cannot be greater than the port_range_max value. - An empty value indicates all ports. - type: int - returned: success - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, or udp. - If the parameter is left blank, the security group supports all - protocols. - type: str - returned: success - remote_group_id: - description: - - Specifies the ID of the peer security group. The value is - exclusive with parameter remote_ip_prefix. - type: str - returned: success - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control direction - is set to egress, the parameter specifies the source IP address. - If the access control direction is set to ingress, the parameter - specifies the destination IP address. The value can be in the - CIDR format or IP addresses. The parameter is exclusive with - parameter remote_group_id. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - direction=dict(type='str', required=True), - security_group_id=dict(type='str', required=True), - description=dict(type='str'), - ethertype=dict(type='str'), - port_range_max=dict(type='int'), - port_range_min=dict(type='int'), - protocol=dict(type='str'), - remote_group_id=dict(type='str'), - remote_ip_prefix=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params['id']: - resource = True - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - create(config) - changed = True - - current = read_resource(config, exclude_output=True) - expect = user_input_parameters(module) - if are_different_dicts(expect, current): - raise Exception( - "Cannot change option from (%s) to (%s) for an" - " existing security group(%s)." % (current, expect, module.params.get('id'))) - result = read_resource(config) - result['id'] = module.params.get('id') - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "description": module.params.get("description"), - "direction": module.params.get("direction"), - "ethertype": module.params.get("ethertype"), - "port_range_max": module.params.get("port_range_max"), - "port_range_min": module.params.get("port_range_min"), - "protocol": module.params.get("protocol"), - "remote_group_id": module.params.get("remote_group_id"), - "remote_ip_prefix": module.params.get("remote_ip_prefix"), - "security_group_id": module.params.get("security_group_id"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - module.params['id'] = navigate_value(r, ["security_group_rule", "id"]) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - send_delete_request(module, None, client) - - -def read_resource(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - return update_properties(module, res, None, exclude_output) - - -def _build_query_link(opts): - query_link = "?marker={marker}&limit=10" - v = navigate_value(opts, ["security_group_id"]) - if v: - query_link += "&security_group_id=" + str(v) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "security-group-rules" + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["description"], None) - if not is_empty_value(v): - params["description"] = v - - v = navigate_value(opts, ["direction"], None) - if not is_empty_value(v): - params["direction"] = v - - v = navigate_value(opts, ["ethertype"], None) - if not is_empty_value(v): - params["ethertype"] = v - - v = navigate_value(opts, ["port_range_max"], None) - if not is_empty_value(v): - params["port_range_max"] = v - - v = navigate_value(opts, ["port_range_min"], None) - if not is_empty_value(v): - params["port_range_min"] = v - - v = navigate_value(opts, ["protocol"], None) - if not is_empty_value(v): - params["protocol"] = v - - v = navigate_value(opts, ["remote_group_id"], None) - if not is_empty_value(v): - params["remote_group_id"] = v - - v = navigate_value(opts, ["remote_ip_prefix"], None) - if not is_empty_value(v): - params["remote_ip_prefix"] = v - - v = navigate_value(opts, ["security_group_id"], None) - if not is_empty_value(v): - params["security_group_id"] = v - - if not params: - return params - - params = {"security_group_rule": params} - - return params - - -def send_create_request(module, params, client): - url = "security-group-rules" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group_rule): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_request(module, params, client): - url = build_path(module, "security-group-rules/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group_rule): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "security-group-rules/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group_rule): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["security_group_rule"], None) - - -def fill_read_resp_body(body): - result = dict() - - result["description"] = body.get("description") - - result["direction"] = body.get("direction") - - result["ethertype"] = body.get("ethertype") - - result["id"] = body.get("id") - - result["port_range_max"] = body.get("port_range_max") - - result["port_range_min"] = body.get("port_range_min") - - result["protocol"] = body.get("protocol") - - result["remote_address_group_id"] = body.get("remote_address_group_id") - - result["remote_group_id"] = body.get("remote_group_id") - - result["remote_ip_prefix"] = body.get("remote_ip_prefix") - - result["security_group_id"] = body.get("security_group_id") - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - v = navigate_value(response, ["read", "description"], array_index) - r["description"] = v - - v = navigate_value(response, ["read", "direction"], array_index) - r["direction"] = v - - v = navigate_value(response, ["read", "ethertype"], array_index) - r["ethertype"] = v - - v = navigate_value(response, ["read", "port_range_max"], array_index) - r["port_range_max"] = v - - v = navigate_value(response, ["read", "port_range_min"], array_index) - r["port_range_min"] = v - - v = navigate_value(response, ["read", "protocol"], array_index) - r["protocol"] = v - - v = navigate_value(response, ["read", "remote_group_id"], array_index) - r["remote_group_id"] = v - - v = navigate_value(response, ["read", "remote_ip_prefix"], array_index) - r["remote_ip_prefix"] = v - - v = navigate_value(response, ["read", "security_group_id"], array_index) - r["security_group_id"] = v - - return r - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group_rule): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["security_group_rules"], None) - - -def _build_identity_object(all_opts): - result = dict() - - v = navigate_value(all_opts, ["description"], None) - result["description"] = v - - v = navigate_value(all_opts, ["direction"], None) - result["direction"] = v - - v = navigate_value(all_opts, ["ethertype"], None) - result["ethertype"] = v - - result["id"] = None - - v = navigate_value(all_opts, ["port_range_max"], None) - result["port_range_max"] = v - - v = navigate_value(all_opts, ["port_range_min"], None) - result["port_range_min"] = v - - v = navigate_value(all_opts, ["protocol"], None) - result["protocol"] = v - - result["remote_address_group_id"] = None - - v = navigate_value(all_opts, ["remote_group_id"], None) - result["remote_group_id"] = v - - v = navigate_value(all_opts, ["remote_ip_prefix"], None) - result["remote_ip_prefix"] = v - - v = navigate_value(all_opts, ["security_group_id"], None) - result["security_group_id"] = v - - return result - - -def fill_list_resp_body(body): - result = dict() - - result["description"] = body.get("description") - - result["direction"] = body.get("direction") - - result["ethertype"] = body.get("ethertype") - - result["id"] = body.get("id") - - result["port_range_max"] = body.get("port_range_max") - - result["port_range_min"] = body.get("port_range_min") - - result["protocol"] = body.get("protocol") - - result["remote_address_group_id"] = body.get("remote_address_group_id") - - result["remote_group_id"] = body.get("remote_group_id") - - result["remote_ip_prefix"] = body.get("remote_ip_prefix") - - result["security_group_id"] = body.get("security_group_id") - - return result - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_subnet.py b/plugins/modules/cloud/huawei/hwc_vpc_subnet.py deleted file mode 100644 index ccf180502c..0000000000 --- a/plugins/modules/cloud/huawei/hwc_vpc_subnet.py +++ /dev/null @@ -1,734 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_subnet -description: - - subnet management. -short_description: Creates a resource of Vpc/Subnet in Huawei Cloud -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - update: - description: - - The timeouts for update operation. - type: str - default: '15m' - cidr: - description: - - Specifies the subnet CIDR block. The value must be within the VPC - CIDR block and be in CIDR format. The subnet mask cannot be - greater than 28. Cannot be changed after creating the subnet. - type: str - required: true - gateway_ip: - description: - - Specifies the gateway of the subnet. The value must be an IP - address in the subnet. Cannot be changed after creating the subnet. - type: str - required: true - name: - description: - - Specifies the subnet name. The value is a string of 1 to 64 - characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - required: true - vpc_id: - description: - - Specifies the ID of the VPC to which the subnet belongs. Cannot - be changed after creating the subnet. - type: str - required: true - availability_zone: - description: - - Specifies the AZ to which the subnet belongs. Cannot be changed - after creating the subnet. - type: str - required: false - dhcp_enable: - description: - - Specifies whether DHCP is enabled for the subnet. The value can - be true (enabled) or false(disabled), and default value is true. - If this parameter is set to false, newly created ECSs cannot - obtain IP addresses, and usernames and passwords cannot be - injected using Cloud-init. - type: bool - required: false - dns_address: - description: - - Specifies the DNS server addresses for subnet. The address - in the head will be used first. - type: list - elements: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create subnet -- name: Create vpc - hwc_network_vpc: - cidr: "192.168.100.0/24" - name: "ansible_network_vpc_test" - register: vpc -- name: Create subnet - community.general.hwc_vpc_subnet: - vpc_id: "{{ vpc.id }}" - cidr: "192.168.100.0/26" - gateway_ip: "192.168.100.32" - name: "ansible_network_subnet_test" - dhcp_enable: True -''' - -RETURN = ''' - cidr: - description: - - Specifies the subnet CIDR block. The value must be within the VPC - CIDR block and be in CIDR format. The subnet mask cannot be - greater than 28. - type: str - returned: success - gateway_ip: - description: - - Specifies the gateway of the subnet. The value must be an IP - address in the subnet. - type: str - returned: success - name: - description: - - Specifies the subnet name. The value is a string of 1 to 64 - characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - returned: success - vpc_id: - description: - - Specifies the ID of the VPC to which the subnet belongs. - type: str - returned: success - availability_zone: - description: - - Specifies the AZ to which the subnet belongs. - type: str - returned: success - dhcp_enable: - description: - - Specifies whether DHCP is enabled for the subnet. The value can - be true (enabled) or false(disabled), and default value is true. - If this parameter is set to false, newly created ECSs cannot - obtain IP addresses, and usernames and passwords cannot be - injected using Cloud-init. - type: bool - returned: success - dns_address: - description: - - Specifies the DNS server addresses for subnet. The address - in the head will be used first. - type: list - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcClientException404, HwcModule, - are_different_dicts, build_path, get_region, is_empty_value, - navigate_value, wait_to_finish) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='15m', type='str'), - update=dict(default='15m', type='str'), - ), default=dict()), - cidr=dict(type='str', required=True), - gateway_ip=dict(type='str', required=True), - name=dict(type='str', required=True), - vpc_id=dict(type='str', required=True), - availability_zone=dict(type='str'), - dhcp_enable=dict(type='bool'), - dns_address=dict(type='list', elements='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params.get('id'): - resource = True - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - create(config) - changed = True - - current = read_resource(config, exclude_output=True) - expect = user_input_parameters(module) - if are_different_dicts(expect, current): - if not module.check_mode: - update(config) - changed = True - - result = read_resource(config) - result['id'] = module.params.get('id') - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "availability_zone": module.params.get("availability_zone"), - "cidr": module.params.get("cidr"), - "dhcp_enable": module.params.get("dhcp_enable"), - "dns_address": module.params.get("dns_address"), - "gateway_ip": module.params.get("gateway_ip"), - "name": module.params.get("name"), - "vpc_id": module.params.get("vpc_id"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - obj = async_wait_create(config, r, client, timeout) - module.params['id'] = navigate_value(obj, ["subnet", "id"]) - - -def update(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) - opts = user_input_parameters(module) - - params = build_update_parameters(opts) - if params: - r = send_update_request(module, params, client) - async_wait_update(config, r, client, timeout) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - send_delete_request(module, None, client) - - url = build_path(module, "subnets/{id}") - - def _refresh_status(): - try: - client.get(url) - except HwcClientException404: - return True, "Done" - - except Exception: - return None, "" - - return True, "Pending" - - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - try: - wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_subnet): error " - "waiting for api(delete) to " - "be done, error= %s" % str(ex)) - - -def read_resource(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - return update_properties(module, res, None, exclude_output) - - -def _build_query_link(opts): - query_link = "?marker={marker}&limit=10" - v = navigate_value(opts, ["vpc_id"]) - if v: - query_link += "&vpc_id=" + str(v) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "subnets" + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["availability_zone"], None) - if not is_empty_value(v): - params["availability_zone"] = v - - v = navigate_value(opts, ["cidr"], None) - if not is_empty_value(v): - params["cidr"] = v - - v = navigate_value(opts, ["dhcp_enable"], None) - if v is not None: - params["dhcp_enable"] = v - - v = expand_create_dns_list(opts, None) - if not is_empty_value(v): - params["dnsList"] = v - - v = navigate_value(opts, ["gateway_ip"], None) - if not is_empty_value(v): - params["gateway_ip"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = expand_create_primary_dns(opts, None) - if not is_empty_value(v): - params["primary_dns"] = v - - v = expand_create_secondary_dns(opts, None) - if not is_empty_value(v): - params["secondary_dns"] = v - - v = navigate_value(opts, ["vpc_id"], None) - if not is_empty_value(v): - params["vpc_id"] = v - - if not params: - return params - - params = {"subnet": params} - - return params - - -def expand_create_dns_list(d, array_index): - v = navigate_value(d, ["dns_address"], array_index) - return v if (v and len(v) > 2) else [] - - -def expand_create_primary_dns(d, array_index): - v = navigate_value(d, ["dns_address"], array_index) - return v[0] if v else "" - - -def expand_create_secondary_dns(d, array_index): - v = navigate_value(d, ["dns_address"], array_index) - return v[1] if (v and len(v) > 1) else "" - - -def send_create_request(module, params, client): - url = "subnets" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_subnet): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait_create(config, result, client, timeout): - module = config.module - - path_parameters = { - "subnet_id": ["subnet", "id"], - } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) - - url = build_path(module, "subnets/{subnet_id}", data) - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["subnet", "status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["ACTIVE"], - ["UNKNOWN"], - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_subnet): error " - "waiting for api(create) to " - "be done, error= %s" % str(ex)) - - -def build_update_parameters(opts): - params = dict() - - v = navigate_value(opts, ["dhcp_enable"], None) - if v is not None: - params["dhcp_enable"] = v - - v = expand_update_dns_list(opts, None) - if v is not None: - params["dnsList"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = expand_update_primary_dns(opts, None) - if v is not None: - params["primary_dns"] = v - - v = expand_update_secondary_dns(opts, None) - if v is not None: - params["secondary_dns"] = v - - if not params: - return params - - params = {"subnet": params} - - return params - - -def expand_update_dns_list(d, array_index): - v = navigate_value(d, ["dns_address"], array_index) - if v: - if len(v) > 2: - return v - return None - return [] - - -def expand_update_primary_dns(d, array_index): - v = navigate_value(d, ["dns_address"], array_index) - return v[0] if v else "" - - -def expand_update_secondary_dns(d, array_index): - v = navigate_value(d, ["dns_address"], array_index) - return v[1] if (v and len(v) > 1) else "" - - -def send_update_request(module, params, client): - url = build_path(module, "vpcs/{vpc_id}/subnets/{id}") - - try: - r = client.put(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_subnet): error running " - "api(update), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait_update(config, result, client, timeout): - module = config.module - - path_parameters = { - "subnet_id": ["subnet", "id"], - } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) - - url = build_path(module, "subnets/{subnet_id}", data) - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["subnet", "status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["ACTIVE"], - ["UNKNOWN"], - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_subnet): error " - "waiting for api(update) to " - "be done, error= %s" % str(ex)) - - -def send_delete_request(module, params, client): - url = build_path(module, "vpcs/{vpc_id}/subnets/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_subnet): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "subnets/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_subnet): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["subnet"], None) - - -def fill_read_resp_body(body): - result = dict() - - result["availability_zone"] = body.get("availability_zone") - - result["cidr"] = body.get("cidr") - - result["dhcp_enable"] = body.get("dhcp_enable") - - result["dnsList"] = body.get("dnsList") - - result["gateway_ip"] = body.get("gateway_ip") - - result["id"] = body.get("id") - - result["name"] = body.get("name") - - result["neutron_network_id"] = body.get("neutron_network_id") - - result["neutron_subnet_id"] = body.get("neutron_subnet_id") - - result["primary_dns"] = body.get("primary_dns") - - result["secondary_dns"] = body.get("secondary_dns") - - result["status"] = body.get("status") - - result["vpc_id"] = body.get("vpc_id") - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - v = navigate_value(response, ["read", "availability_zone"], array_index) - r["availability_zone"] = v - - v = navigate_value(response, ["read", "cidr"], array_index) - r["cidr"] = v - - v = navigate_value(response, ["read", "dhcp_enable"], array_index) - r["dhcp_enable"] = v - - v = navigate_value(response, ["read", "dnsList"], array_index) - r["dns_address"] = v - - v = navigate_value(response, ["read", "gateway_ip"], array_index) - r["gateway_ip"] = v - - v = navigate_value(response, ["read", "name"], array_index) - r["name"] = v - - v = navigate_value(response, ["read", "vpc_id"], array_index) - r["vpc_id"] = v - - return r - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_subnet): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["subnets"], None) - - -def _build_identity_object(all_opts): - result = dict() - - v = navigate_value(all_opts, ["availability_zone"], None) - result["availability_zone"] = v - - v = navigate_value(all_opts, ["cidr"], None) - result["cidr"] = v - - v = navigate_value(all_opts, ["dhcp_enable"], None) - result["dhcp_enable"] = v - - v = navigate_value(all_opts, ["dns_address"], None) - result["dnsList"] = v - - v = navigate_value(all_opts, ["gateway_ip"], None) - result["gateway_ip"] = v - - result["id"] = None - - v = navigate_value(all_opts, ["name"], None) - result["name"] = v - - result["neutron_network_id"] = None - - result["neutron_subnet_id"] = None - - result["primary_dns"] = None - - result["secondary_dns"] = None - - result["status"] = None - - v = navigate_value(all_opts, ["vpc_id"], None) - result["vpc_id"] = v - - return result - - -def fill_list_resp_body(body): - result = dict() - - result["availability_zone"] = body.get("availability_zone") - - result["cidr"] = body.get("cidr") - - result["dhcp_enable"] = body.get("dhcp_enable") - - result["dnsList"] = body.get("dnsList") - - result["gateway_ip"] = body.get("gateway_ip") - - result["id"] = body.get("id") - - result["name"] = body.get("name") - - result["neutron_network_id"] = body.get("neutron_network_id") - - result["neutron_subnet_id"] = body.get("neutron_subnet_id") - - result["primary_dns"] = body.get("primary_dns") - - result["secondary_dns"] = body.get("secondary_dns") - - result["status"] = body.get("status") - - result["vpc_id"] = body.get("vpc_id") - - return result - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/linode/linode.py b/plugins/modules/cloud/linode/linode.py deleted file mode 100644 index c627fb705a..0000000000 --- a/plugins/modules/cloud/linode/linode.py +++ /dev/null @@ -1,688 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: linode -short_description: Manage instances on the Linode Public Cloud -description: - - Manage Linode Public Cloud instances and optionally wait for it to be 'running'. -options: - state: - description: - - Indicate desired state of the resource - choices: [ absent, active, deleted, present, restarted, started, stopped ] - default: present - type: str - api_key: - description: - - Linode API key. - - C(LINODE_API_KEY) env variable can be used instead. - type: str - required: yes - name: - description: - - Name to give the instance (alphanumeric, dashes, underscore). - - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-). - required: true - type: str - displaygroup: - description: - - Add the instance to a Display Group in Linode Manager. - type: str - linode_id: - description: - - Unique ID of a linode server. This value is read-only in the sense that - if you specify it on creation of a Linode it will not be used. The - Linode API generates these IDs and we can those generated value here to - reference a Linode more specifically. This is useful for idempotence. - aliases: [ lid ] - type: int - additional_disks: - description: - - List of dictionaries for creating additional disks that are added to the Linode configuration settings. - - Dictionary takes Size, Label, Type. Size is in MB. - type: list - elements: dict - alert_bwin_enabled: - description: - - Set status of bandwidth in alerts. - type: bool - alert_bwin_threshold: - description: - - Set threshold in MB of bandwidth in alerts. - type: int - alert_bwout_enabled: - description: - - Set status of bandwidth out alerts. - type: bool - alert_bwout_threshold: - description: - - Set threshold in MB of bandwidth out alerts. - type: int - alert_bwquota_enabled: - description: - - Set status of bandwidth quota alerts as percentage of network transfer quota. - type: bool - alert_bwquota_threshold: - description: - - Set threshold in MB of bandwidth quota alerts. - type: int - alert_cpu_enabled: - description: - - Set status of receiving CPU usage alerts. - type: bool - alert_cpu_threshold: - description: - - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total. - type: int - alert_diskio_enabled: - description: - - Set status of receiving disk IO alerts. - type: bool - alert_diskio_threshold: - description: - - Set threshold for average IO ops/sec over 2 hour period. - type: int - backupsenabled: - description: - - Deprecated parameter, it will be removed in community.general C(5.0.0). - - To enable backups pass values to either I(backupweeklyday) or I(backupwindow). - type: int - backupweeklyday: - description: - - Day of the week to take backups. - type: int - backupwindow: - description: - - The time window in which backups will be taken. - type: int - plan: - description: - - plan to use for the instance (Linode plan) - type: int - payment_term: - description: - - payment term to use for the instance (payment term in months) - default: 1 - choices: [ 1, 12, 24 ] - type: int - password: - description: - - root password to apply to a new server (auto generated if missing) - type: str - private_ip: - description: - - Add private IPv4 address when Linode is created. - - Default is C(false). - type: bool - ssh_pub_key: - description: - - SSH public key applied to root user - type: str - swap: - description: - - swap size in MB - default: 512 - type: int - distribution: - description: - - distribution to use for the instance (Linode Distribution) - type: int - datacenter: - description: - - datacenter to create an instance in (Linode Datacenter) - type: int - kernel_id: - description: - - kernel to use for the instance (Linode Kernel) - type: int - wait: - description: - - wait for the instance to be in state C(running) before returning - type: bool - default: true - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 - type: int - watchdog: - description: - - Set status of Lassie watchdog. - type: bool - default: "True" -requirements: - - python >= 2.6 - - linode-python -author: -- Vincent Viallet (@zbal) -notes: - - Please note, linode-python does not have python 3 support. - - This module uses the now deprecated v3 of the Linode API. - - Please review U(https://www.linode.com/api/linode) for determining the required parameters. -''' - -EXAMPLES = ''' - -- name: Create a new Linode - community.general.linode: - name: linode-test1 - plan: 1 - datacenter: 7 - distribution: 129 - state: present - register: linode_creation - -- name: Create a server with a private IP Address - community.general.linode: - module: linode - api_key: 'longStringFromLinodeApi' - name: linode-test1 - plan: 1 - datacenter: 2 - distribution: 99 - password: 'superSecureRootPassword' - private_ip: yes - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: yes - wait_timeout: 600 - state: present - delegate_to: localhost - register: linode_creation - -- name: Fully configure new server - community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - plan: 4 - datacenter: 2 - distribution: 99 - kernel_id: 138 - password: 'superSecureRootPassword' - private_ip: yes - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: yes - wait_timeout: 600 - state: present - alert_bwquota_enabled: True - alert_bwquota_threshold: 80 - alert_bwin_enabled: True - alert_bwin_threshold: 10 - alert_cpu_enabled: True - alert_cpu_threshold: 210 - alert_bwout_enabled: True - alert_bwout_threshold: 10 - alert_diskio_enabled: True - alert_diskio_threshold: 10000 - backupweeklyday: 1 - backupwindow: 2 - displaygroup: 'test' - additional_disks: - - {Label: 'disk1', Size: 2500, Type: 'raw'} - - {Label: 'newdisk', Size: 2000} - watchdog: True - delegate_to: localhost - register: linode_creation - -- name: Ensure a running server (create if missing) - community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - plan: 1 - datacenter: 2 - distribution: 99 - password: 'superSecureRootPassword' - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: yes - wait_timeout: 600 - state: present - delegate_to: localhost - register: linode_creation - -- name: Delete a server - community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: "{{ linode_creation.instance.id }}" - state: absent - delegate_to: localhost - -- name: Stop a server - community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: "{{ linode_creation.instance.id }}" - state: stopped - delegate_to: localhost - -- name: Reboot a server - community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: "{{ linode_creation.instance.id }}" - state: restarted - delegate_to: localhost -''' - -import time -import traceback - -LINODE_IMP_ERR = None -try: - from linode import api as linode_api - HAS_LINODE = True -except ImportError: - LINODE_IMP_ERR = traceback.format_exc() - HAS_LINODE = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback - - -def randompass(): - ''' - Generate a long random password that comply to Linode requirements - ''' - # Linode API currently requires the following: - # It must contain at least two of these four character classes: - # lower case letters - upper case letters - numbers - punctuation - # we play it safe :) - import random - import string - # as of python 2.4, this reseeds the PRNG from urandom - random.seed() - lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6)) - upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6)) - number = ''.join(random.choice(string.digits) for x in range(6)) - punct = ''.join(random.choice(string.punctuation) for x in range(6)) - p = lower + upper + number + punct - return ''.join(random.sample(p, len(p))) - - -def getInstanceDetails(api, server): - ''' - Return the details of an instance, populating IPs, etc. - ''' - instance = {'id': server['LINODEID'], - 'name': server['LABEL'], - 'public': [], - 'private': []} - - # Populate with ips - for ip in api.linode_ip_list(LinodeId=server['LINODEID']): - if ip['ISPUBLIC'] and 'ipv4' not in instance: - instance['ipv4'] = ip['IPADDRESS'] - instance['fqdn'] = ip['RDNS_NAME'] - if ip['ISPUBLIC']: - instance['public'].append({'ipv4': ip['IPADDRESS'], - 'fqdn': ip['RDNS_NAME'], - 'ip_id': ip['IPADDRESSID']}) - else: - instance['private'].append({'ipv4': ip['IPADDRESS'], - 'fqdn': ip['RDNS_NAME'], - 'ip_id': ip['IPADDRESSID']}) - return instance - - -def linodeServers(module, api, state, name, - displaygroup, plan, additional_disks, distribution, - datacenter, kernel_id, linode_id, payment_term, password, - private_ip, ssh_pub_key, swap, wait, wait_timeout, watchdog, **kwargs): - instances = [] - changed = False - new_server = False - servers = [] - disks = [] - configs = [] - jobs = [] - - # See if we can match an existing server details with the provided linode_id - if linode_id: - # For the moment we only consider linode_id as criteria for match - # Later we can use more (size, name, etc.) and update existing - servers = api.linode_list(LinodeId=linode_id) - # Attempt to fetch details about disks and configs only if servers are - # found with linode_id - if servers: - disks = api.linode_disk_list(LinodeId=linode_id) - configs = api.linode_config_list(LinodeId=linode_id) - - # Act on the state - if state in ('active', 'present', 'started'): - # TODO: validate all the plan / distribution / datacenter are valid - - # Multi step process/validation: - # - need linode_id (entity) - # - need disk_id for linode_id - create disk from distrib - # - need config_id for linode_id - create config (need kernel) - - # Any create step triggers a job that need to be waited for. - if not servers: - for arg in (name, plan, distribution, datacenter): - if not arg: - module.fail_json(msg='%s is required for %s state' % (arg, state)) - # Create linode entity - new_server = True - - # Get size of all individually listed disks to subtract from Distribution disk - used_disk_space = 0 if additional_disks is None else sum(disk['Size'] for disk in additional_disks) - - try: - res = api.linode_create(DatacenterID=datacenter, PlanID=plan, - PaymentTerm=payment_term) - linode_id = res['LinodeID'] - # Update linode Label to match name - api.linode_update(LinodeId=linode_id, Label='%s-%s' % (linode_id, name)) - # Update Linode with Ansible configuration options - api.linode_update(LinodeId=linode_id, LPM_DISPLAYGROUP=displaygroup, WATCHDOG=watchdog, **kwargs) - # Save server - servers = api.linode_list(LinodeId=linode_id) - except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) - - # Add private IP to Linode - if private_ip: - try: - res = api.linode_ip_addprivate(LinodeID=linode_id) - except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) - - if not disks: - for arg in (name, linode_id, distribution): - if not arg: - module.fail_json(msg='%s is required for %s state' % (arg, state)) - # Create disks (1 from distrib, 1 for SWAP) - new_server = True - try: - if not password: - # Password is required on creation, if not provided generate one - password = randompass() - if not swap: - swap = 512 - # Create data disk - size = servers[0]['TOTALHD'] - used_disk_space - swap - - if ssh_pub_key: - res = api.linode_disk_createfromdistribution( - LinodeId=linode_id, DistributionID=distribution, - rootPass=password, rootSSHKey=ssh_pub_key, - Label='%s data disk (lid: %s)' % (name, linode_id), - Size=size) - else: - res = api.linode_disk_createfromdistribution( - LinodeId=linode_id, DistributionID=distribution, - rootPass=password, - Label='%s data disk (lid: %s)' % (name, linode_id), - Size=size) - jobs.append(res['JobID']) - # Create SWAP disk - res = api.linode_disk_create(LinodeId=linode_id, Type='swap', - Label='%s swap disk (lid: %s)' % (name, linode_id), - Size=swap) - # Create individually listed disks at specified size - if additional_disks: - for disk in additional_disks: - # If a disk Type is not passed in, default to ext4 - if disk.get('Type') is None: - disk['Type'] = 'ext4' - res = api.linode_disk_create(LinodeID=linode_id, Label=disk['Label'], Size=disk['Size'], Type=disk['Type']) - - jobs.append(res['JobID']) - except Exception as e: - # TODO: destroy linode ? - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) - - if not configs: - for arg in (name, linode_id, distribution): - if not arg: - module.fail_json(msg='%s is required for %s state' % (arg, state)) - - # Check architecture - for distrib in api.avail_distributions(): - if distrib['DISTRIBUTIONID'] != distribution: - continue - arch = '32' - if distrib['IS64BIT']: - arch = '64' - break - - # Get latest kernel matching arch if kernel_id is not specified - if not kernel_id: - for kernel in api.avail_kernels(): - if not kernel['LABEL'].startswith('Latest %s' % arch): - continue - kernel_id = kernel['KERNELID'] - break - - # Get disk list - disks_id = [] - for disk in api.linode_disk_list(LinodeId=linode_id): - if disk['TYPE'] == 'ext3': - disks_id.insert(0, str(disk['DISKID'])) - continue - disks_id.append(str(disk['DISKID'])) - # Trick to get the 9 items in the list - while len(disks_id) < 9: - disks_id.append('') - disks_list = ','.join(disks_id) - - # Create config - new_server = True - try: - api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id, - Disklist=disks_list, Label='%s config' % name) - configs = api.linode_config_list(LinodeId=linode_id) - except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) - - # Start / Ensure servers are running - for server in servers: - # Refresh server state - server = api.linode_list(LinodeId=server['LINODEID'])[0] - # Ensure existing servers are up and running, boot if necessary - if server['STATUS'] != 1: - res = api.linode_boot(LinodeId=linode_id) - jobs.append(res['JobID']) - changed = True - - # wait here until the instances are up - wait_timeout = time.time() + wait_timeout - while wait and wait_timeout > time.time(): - # refresh the server details - server = api.linode_list(LinodeId=server['LINODEID'])[0] - # status: - # -2: Boot failed - # 1: Running - if server['STATUS'] in (-2, 1): - break - time.sleep(5) - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg='Timeout waiting on %s (lid: %s)' % (server['LABEL'], server['LINODEID'])) - # Get a fresh copy of the server details - server = api.linode_list(LinodeId=server['LINODEID'])[0] - if server['STATUS'] == -2: - module.fail_json(msg='%s (lid: %s) failed to boot' % - (server['LABEL'], server['LINODEID'])) - # From now on we know the task is a success - # Build instance report - instance = getInstanceDetails(api, server) - # depending on wait flag select the status - if wait: - instance['status'] = 'Running' - else: - instance['status'] = 'Starting' - - # Return the root password if this is a new box and no SSH key - # has been provided - if new_server and not ssh_pub_key: - instance['password'] = password - instances.append(instance) - - elif state in ('stopped',): - if not servers: - module.fail_json(msg='Server (lid: %s) not found' % (linode_id)) - - for server in servers: - instance = getInstanceDetails(api, server) - if server['STATUS'] != 2: - try: - res = api.linode_shutdown(LinodeId=linode_id) - except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) - instance['status'] = 'Stopping' - changed = True - else: - instance['status'] = 'Stopped' - instances.append(instance) - - elif state in ('restarted',): - if not servers: - module.fail_json(msg='Server (lid: %s) not found' % (linode_id)) - - for server in servers: - instance = getInstanceDetails(api, server) - try: - res = api.linode_reboot(LinodeId=server['LINODEID']) - except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) - instance['status'] = 'Restarting' - changed = True - instances.append(instance) - - elif state in ('absent', 'deleted'): - for server in servers: - instance = getInstanceDetails(api, server) - try: - api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True) - except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) - instance['status'] = 'Deleting' - changed = True - instances.append(instance) - - # Ease parsing if only 1 instance - if len(instances) == 1: - module.exit_json(changed=changed, instance=instances[0]) - - module.exit_json(changed=changed, instances=instances) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', - choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']), - api_key=dict(type='str', no_log=True, required=True, fallback=(env_fallback, ['LINODE_API_KEY'])), - name=dict(type='str', required=True), - alert_bwin_enabled=dict(type='bool'), - alert_bwin_threshold=dict(type='int'), - alert_bwout_enabled=dict(type='bool'), - alert_bwout_threshold=dict(type='int'), - alert_bwquota_enabled=dict(type='bool'), - alert_bwquota_threshold=dict(type='int'), - alert_cpu_enabled=dict(type='bool'), - alert_cpu_threshold=dict(type='int'), - alert_diskio_enabled=dict(type='bool'), - alert_diskio_threshold=dict(type='int'), - backupsenabled=dict(type='int', removed_in_version='5.0.0', removed_from_collection='community.general'), - backupweeklyday=dict(type='int'), - backupwindow=dict(type='int'), - displaygroup=dict(type='str', default=''), - plan=dict(type='int'), - additional_disks=dict(type='list', elements='dict'), - distribution=dict(type='int'), - datacenter=dict(type='int'), - kernel_id=dict(type='int'), - linode_id=dict(type='int', aliases=['lid']), - payment_term=dict(type='int', default=1, choices=[1, 12, 24]), - password=dict(type='str', no_log=True), - private_ip=dict(type='bool'), - ssh_pub_key=dict(type='str'), - swap=dict(type='int', default=512), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=300), - watchdog=dict(type='bool', default=True), - ), - required_if=[ - ('state', 'restarted', ['linode_id']), - ('state', 'stopped', ['linode_id']), - ] - ) - - if not HAS_LINODE: - module.fail_json(msg=missing_required_lib('linode-python'), exception=LINODE_IMP_ERR) - - state = module.params.get('state') - api_key = module.params.get('api_key') - name = module.params.get('name') - alert_bwin_enabled = module.params.get('alert_bwin_enabled') - alert_bwin_threshold = module.params.get('alert_bwin_threshold') - alert_bwout_enabled = module.params.get('alert_bwout_enabled') - alert_bwout_threshold = module.params.get('alert_bwout_threshold') - alert_bwquota_enabled = module.params.get('alert_bwquota_enabled') - alert_bwquota_threshold = module.params.get('alert_bwquota_threshold') - alert_cpu_enabled = module.params.get('alert_cpu_enabled') - alert_cpu_threshold = module.params.get('alert_cpu_threshold') - alert_diskio_enabled = module.params.get('alert_diskio_enabled') - alert_diskio_threshold = module.params.get('alert_diskio_threshold') - backupweeklyday = module.params.get('backupweeklyday') - backupwindow = module.params.get('backupwindow') - displaygroup = module.params.get('displaygroup') - plan = module.params.get('plan') - additional_disks = module.params.get('additional_disks') - distribution = module.params.get('distribution') - datacenter = module.params.get('datacenter') - kernel_id = module.params.get('kernel_id') - linode_id = module.params.get('linode_id') - payment_term = module.params.get('payment_term') - password = module.params.get('password') - private_ip = module.params.get('private_ip') - ssh_pub_key = module.params.get('ssh_pub_key') - swap = module.params.get('swap') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - watchdog = int(module.params.get('watchdog')) - - check_items = dict( - alert_bwin_enabled=alert_bwin_enabled, - alert_bwin_threshold=alert_bwin_threshold, - alert_bwout_enabled=alert_bwout_enabled, - alert_bwout_threshold=alert_bwout_threshold, - alert_bwquota_enabled=alert_bwquota_enabled, - alert_bwquota_threshold=alert_bwquota_threshold, - alert_cpu_enabled=alert_cpu_enabled, - alert_cpu_threshold=alert_cpu_threshold, - alert_diskio_enabled=alert_diskio_enabled, - alert_diskio_threshold=alert_diskio_threshold, - backupweeklyday=backupweeklyday, - backupwindow=backupwindow, - ) - - kwargs = dict((k, v) for k, v in check_items.items() if v is not None) - - # setup the auth - try: - api = linode_api.Api(api_key) - api.test_echo() - except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) - - linodeServers(module, api, state, name, - displaygroup, plan, - additional_disks, distribution, datacenter, kernel_id, linode_id, - payment_term, password, private_ip, ssh_pub_key, swap, wait, - wait_timeout, watchdog, **kwargs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/linode/linode_v4.py b/plugins/modules/cloud/linode/linode_v4.py deleted file mode 100644 index fcf3725bfc..0000000000 --- a/plugins/modules/cloud/linode/linode_v4.py +++ /dev/null @@ -1,312 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: linode_v4 -short_description: Manage instances on the Linode cloud. -description: Manage instances on the Linode cloud. -requirements: - - python >= 2.7 - - linode_api4 >= 2.0.0 -author: - - Luke Murphy (@decentral1se) -notes: - - No Linode resizing is currently implemented. This module will, in time, - replace the current Linode module which uses deprecated API bindings on the - Linode side. -options: - region: - description: - - The region of the instance. This is a required parameter only when - creating Linode instances. See - U(https://www.linode.com/docs/api/regions/). - type: str - image: - description: - - The image of the instance. This is a required parameter only when - creating Linode instances. See - U(https://www.linode.com/docs/api/images/). - type: str - type: - description: - - The type of the instance. This is a required parameter only when - creating Linode instances. See - U(https://www.linode.com/docs/api/linode-types/). - type: str - label: - description: - - The instance label. This label is used as the main determiner for - idempotence for the module and is therefore mandatory. - type: str - required: true - group: - description: - - The group that the instance should be marked under. Please note, that - group labelling is deprecated but still supported. The encouraged - method for marking instances is to use tags. - type: str - private_ip: - description: - - If C(true), the created Linode will have private networking enabled and - assigned a private IPv4 address. - type: bool - default: false - version_added: 3.0.0 - tags: - description: - - The tags that the instance should be marked under. See - U(https://www.linode.com/docs/api/tags/). - type: list - elements: str - root_pass: - description: - - The password for the root user. If not specified, one will be - generated. This generated password will be available in the task - success JSON. - type: str - authorized_keys: - description: - - A list of SSH public key parts to deploy for the root user. - type: list - elements: str - state: - description: - - The desired instance state. - type: str - choices: - - present - - absent - required: true - access_token: - description: - - The Linode API v4 access token. It may also be specified by exposing - the C(LINODE_ACCESS_TOKEN) environment variable. See - U(https://www.linode.com/docs/api#access-and-authentication). - required: true - type: str - stackscript_id: - description: - - The numeric ID of the StackScript to use when creating the instance. - See U(https://www.linode.com/docs/api/stackscripts/). - type: int - version_added: 1.3.0 - stackscript_data: - description: - - An object containing arguments to any User Defined Fields present in - the StackScript used when creating the instance. - Only valid when a stackscript_id is provided. - See U(https://www.linode.com/docs/api/stackscripts/). - type: dict - version_added: 1.3.0 -''' - -EXAMPLES = """ -- name: Create a new Linode. - community.general.linode_v4: - label: new-linode - type: g6-nanode-1 - region: eu-west - image: linode/debian9 - root_pass: passw0rd - authorized_keys: - - "ssh-rsa ..." - stackscript_id: 1337 - stackscript_data: - variable: value - state: present - -- name: Delete that new Linode. - community.general.linode_v4: - label: new-linode - state: absent -""" - -RETURN = """ -instance: - description: The instance description in JSON serialized form. - returned: Always. - type: dict - sample: { - "root_pass": "foobar", # if auto-generated - "alerts": { - "cpu": 90, - "io": 10000, - "network_in": 10, - "network_out": 10, - "transfer_quota": 80 - }, - "backups": { - "enabled": false, - "schedule": { - "day": null, - "window": null - } - }, - "created": "2018-09-26T08:12:33", - "group": "Foobar Group", - "hypervisor": "kvm", - "id": 10480444, - "image": "linode/centos7", - "ipv4": [ - "130.132.285.233" - ], - "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64", - "label": "lin-foo", - "region": "eu-west", - "specs": { - "disk": 25600, - "memory": 1024, - "transfer": 1000, - "vcpus": 1 - }, - "status": "running", - "tags": [], - "type": "g6-nanode-1", - "updated": "2018-09-26T10:10:14", - "watchdog_enabled": true - } -""" - -import traceback - -from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.linode import get_user_agent - -LINODE_IMP_ERR = None -try: - from linode_api4 import Instance, LinodeClient - HAS_LINODE_DEPENDENCY = True -except ImportError: - LINODE_IMP_ERR = traceback.format_exc() - HAS_LINODE_DEPENDENCY = False - - -def create_linode(module, client, **kwargs): - """Creates a Linode instance and handles return format.""" - if kwargs['root_pass'] is None: - kwargs.pop('root_pass') - - try: - response = client.linode.instance_create(**kwargs) - except Exception as exception: - module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception) - - try: - if isinstance(response, tuple): - instance, root_pass = response - instance_json = instance._raw_json - instance_json.update({'root_pass': root_pass}) - return instance_json - else: - return response._raw_json - except TypeError: - module.fail_json(msg='Unable to parse Linode instance creation response. Please raise a bug against this' - ' module on https://github.com/ansible-collections/community.general/issues' - ) - - -def maybe_instance_from_label(module, client): - """Try to retrieve an instance based on a label.""" - try: - label = module.params['label'] - result = client.linode.instances(Instance.label == label) - return result[0] - except IndexError: - return None - except Exception as exception: - module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception) - - -def initialise_module(): - """Initialise the module parameter specification.""" - return AnsibleModule( - argument_spec=dict( - label=dict(type='str', required=True), - state=dict( - type='str', - required=True, - choices=['present', 'absent'] - ), - access_token=dict( - type='str', - required=True, - no_log=True, - fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']), - ), - authorized_keys=dict(type='list', elements='str', no_log=False), - group=dict(type='str'), - image=dict(type='str'), - private_ip=dict(type='bool', default=False), - region=dict(type='str'), - root_pass=dict(type='str', no_log=True), - tags=dict(type='list', elements='str'), - type=dict(type='str'), - stackscript_id=dict(type='int'), - stackscript_data=dict(type='dict'), - ), - supports_check_mode=False, - required_one_of=( - ['state', 'label'], - ), - required_together=( - ['region', 'image', 'type'], - ) - ) - - -def build_client(module): - """Build a LinodeClient.""" - return LinodeClient( - module.params['access_token'], - user_agent=get_user_agent('linode_v4_module') - ) - - -def main(): - """Module entrypoint.""" - module = initialise_module() - - if not HAS_LINODE_DEPENDENCY: - module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR) - - client = build_client(module) - instance = maybe_instance_from_label(module, client) - - if module.params['state'] == 'present' and instance is not None: - module.exit_json(changed=False, instance=instance._raw_json) - - elif module.params['state'] == 'present' and instance is None: - instance_json = create_linode( - module, client, - authorized_keys=module.params['authorized_keys'], - group=module.params['group'], - image=module.params['image'], - label=module.params['label'], - private_ip=module.params['private_ip'], - region=module.params['region'], - root_pass=module.params['root_pass'], - tags=module.params['tags'], - ltype=module.params['type'], - stackscript=module.params['stackscript_id'], - stackscript_data=module.params['stackscript_data'], - ) - module.exit_json(changed=True, instance=instance_json) - - elif module.params['state'] == 'absent' and instance is not None: - instance.delete() - module.exit_json(changed=True, instance=instance._raw_json) - - elif module.params['state'] == 'absent' and instance is None: - module.exit_json(changed=False, instance={}) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/cloud/lxc/lxc_container.py b/plugins/modules/cloud/lxc/lxc_container.py deleted file mode 100644 index 18f1d02efe..0000000000 --- a/plugins/modules/cloud/lxc/lxc_container.py +++ /dev/null @@ -1,1760 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Kevin Carter -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: lxc_container -short_description: Manage LXC Containers -description: - - Management of LXC containers. -author: "Kevin Carter (@cloudnull)" -options: - name: - description: - - Name of a container. - type: str - required: true - backing_store: - choices: - - dir - - lvm - - loop - - btrfs - - overlayfs - - zfs - description: - - Backend storage type for the container. - type: str - default: dir - template: - description: - - Name of the template to use within an LXC create. - type: str - default: ubuntu - template_options: - description: - - Template options when building the container. - type: str - config: - description: - - Path to the LXC configuration file. - type: path - lv_name: - description: - - Name of the logical volume, defaults to the container name. - - If not specified, it defaults to C($CONTAINER_NAME). - type: str - vg_name: - description: - - If backend store is lvm, specify the name of the volume group. - type: str - default: lxc - thinpool: - description: - - Use LVM thin pool called TP. - type: str - fs_type: - description: - - Create fstype TYPE. - type: str - default: ext4 - fs_size: - description: - - File system Size. - type: str - default: 5G - directory: - description: - - Place rootfs directory under DIR. - type: path - zfs_root: - description: - - Create zfs under given zfsroot. - type: str - container_command: - description: - - Run a command within a container. - type: str - lxc_path: - description: - - Place container under PATH. - type: path - container_log: - description: - - Enable a container log for host actions to the container. - type: bool - default: 'no' - container_log_level: - choices: - - Info - - info - - INFO - - Error - - error - - ERROR - - Debug - - debug - - DEBUG - description: - - Set the log level for a container where *container_log* was set. - type: str - required: false - default: INFO - clone_name: - description: - - Name of the new cloned server. - - This is only used when state is clone. - type: str - clone_snapshot: - description: - - Create a snapshot a container when cloning. - - This is not supported by all container storage backends. - - Enabling this may fail if the backing store does not support snapshots. - type: bool - default: 'no' - archive: - description: - - Create an archive of a container. - - This will create a tarball of the running container. - type: bool - default: 'no' - archive_path: - description: - - Path the save the archived container. - - If the path does not exist the archive method will attempt to create it. - type: path - archive_compression: - choices: - - gzip - - bzip2 - - none - description: - - Type of compression to use when creating an archive of a running - container. - type: str - default: gzip - state: - choices: - - started - - stopped - - restarted - - absent - - frozen - - clone - description: - - Define the state of a container. - - If you clone a container using I(clone_name) the newly cloned - container created in a stopped state. - - The running container will be stopped while the clone operation is - happening and upon completion of the clone the original container - state will be restored. - type: str - default: started - container_config: - description: - - A list of C(key=value) options to use when configuring a container. - type: list - elements: str -requirements: - - 'lxc >= 1.0 # OS package' - - 'python >= 2.6 # OS Package' - - 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc' -notes: - - Containers must have a unique name. If you attempt to create a container - with a name that already exists in the users namespace the module will - simply return as "unchanged". - - The "container_command" can be used with any state except "absent". If - used with state "stopped" the container will be "started", the command - executed, and then the container "stopped" again. Likewise if the state - is "stopped" and the container does not exist it will be first created, - "started", the command executed, and then "stopped". If you use a "|" - in the variable you can use common script formatting within the variable - itself The "container_command" option will always execute as BASH. - When using "container_command" a log file is created in the /tmp/ directory - which contains both stdout and stderr of any command executed. - - If "archive" is **true** the system will attempt to create a compressed - tarball of the running container. The "archive" option supports LVM backed - containers and will create a snapshot of the running container when - creating the archive. - - If your distro does not have a package for "python2-lxc", which is a - requirement for this module, it can be installed from source at - "https://github.com/lxc/python2-lxc" or installed via pip using the package - name lxc-python2. -''' - -EXAMPLES = r""" -- name: Create a started container - community.general.lxc_container: - name: test-container-started - container_log: true - template: ubuntu - state: started - template_options: --release trusty - -- name: Create a stopped container - community.general.lxc_container: - name: test-container-stopped - container_log: true - template: ubuntu - state: stopped - template_options: --release trusty - -- name: Create a frozen container - community.general.lxc_container: - name: test-container-frozen - container_log: true - template: ubuntu - state: frozen - template_options: --release trusty - container_command: | - echo 'hello world.' | tee /opt/started-frozen - -# Create filesystem container, configure it, and archive it, and start it. -- name: Create filesystem container - community.general.lxc_container: - name: test-container-config - backing_store: dir - container_log: true - template: ubuntu - state: started - archive: true - archive_compression: none - container_config: - - "lxc.aa_profile=unconfined" - - "lxc.cgroup.devices.allow=a *:* rmw" - template_options: --release trusty - -# Create an lvm container, run a complex command in it, add additional -# configuration to it, create an archive of it, and finally leave the container -# in a frozen state. The container archive will be compressed using bzip2 -- name: Create a frozen lvm container - community.general.lxc_container: - name: test-container-lvm - container_log: true - template: ubuntu - state: frozen - backing_store: lvm - template_options: --release trusty - container_command: | - apt-get update - apt-get install -y vim lxc-dev - echo 'hello world.' | tee /opt/started - if [[ -f "/opt/started" ]]; then - echo 'hello world.' | tee /opt/found-started - fi - container_config: - - "lxc.aa_profile=unconfined" - - "lxc.cgroup.devices.allow=a *:* rmw" - archive: true - archive_compression: bzip2 - register: lvm_container_info - -- name: Debug info on container "test-container-lvm" - ansible.builtin.debug: - var: lvm_container_info - -- name: Run a command in a container and ensure its in a "stopped" state. - community.general.lxc_container: - name: test-container-started - state: stopped - container_command: | - echo 'hello world.' | tee /opt/stopped - -- name: Run a command in a container and ensure its it in a "frozen" state. - community.general.lxc_container: - name: test-container-stopped - state: frozen - container_command: | - echo 'hello world.' | tee /opt/frozen - -- name: Start a container - community.general.lxc_container: - name: test-container-stopped - state: started - -- name: Run a command in a container and then restart it - community.general.lxc_container: - name: test-container-started - state: restarted - container_command: | - echo 'hello world.' | tee /opt/restarted - -- name: Run a complex command within a "running" container - community.general.lxc_container: - name: test-container-started - container_command: | - apt-get update - apt-get install -y curl wget vim apache2 - echo 'hello world.' | tee /opt/started - if [[ -f "/opt/started" ]]; then - echo 'hello world.' | tee /opt/found-started - fi - -# Create an archive of an existing container, save the archive to a defined -# path and then destroy it. -- name: Archive container - community.general.lxc_container: - name: test-container-started - state: absent - archive: true - archive_path: /opt/archives - -# Create a container using overlayfs, create an archive of it, create a -# snapshot clone of the container and and finally leave the container -# in a frozen state. The container archive will be compressed using gzip. -- name: Create an overlayfs container archive and clone it - community.general.lxc_container: - name: test-container-overlayfs - container_log: true - template: ubuntu - state: started - backing_store: overlayfs - template_options: --release trusty - clone_snapshot: true - clone_name: test-container-overlayfs-clone-snapshot - archive: true - archive_compression: gzip - register: clone_container_info - -- name: Debug info on container "test-container" - ansible.builtin.debug: - var: clone_container_info - -- name: Clone a container using snapshot - community.general.lxc_container: - name: test-container-overlayfs-clone-snapshot - backing_store: overlayfs - clone_name: test-container-overlayfs-clone-snapshot2 - clone_snapshot: true - -- name: Create a new container and clone it - community.general.lxc_container: - name: test-container-new-archive - backing_store: dir - clone_name: test-container-new-archive-clone - -- name: Archive and clone a container then destroy it - community.general.lxc_container: - name: test-container-new-archive - state: absent - clone_name: test-container-new-archive-destroyed-clone - archive: true - archive_compression: gzip - -- name: Start a cloned container. - community.general.lxc_container: - name: test-container-new-archive-destroyed-clone - state: started - -- name: Destroy a container - community.general.lxc_container: - name: '{{ item }}' - state: absent - with_items: - - test-container-stopped - - test-container-started - - test-container-frozen - - test-container-lvm - - test-container-config - - test-container-overlayfs - - test-container-overlayfs-clone - - test-container-overlayfs-clone-snapshot - - test-container-overlayfs-clone-snapshot2 - - test-container-new-archive - - test-container-new-archive-clone - - test-container-new-archive-destroyed-clone -""" - -RETURN = r""" -lxc_container: - description: container information - returned: success - type: complex - contains: - name: - description: name of the lxc container - returned: success - type: str - sample: test_host - init_pid: - description: pid of the lxc init process - returned: success - type: int - sample: 19786 - interfaces: - description: list of the container's network interfaces - returned: success - type: list - sample: [ "eth0", "lo" ] - ips: - description: list of ips - returned: success - type: list - sample: [ "10.0.3.3" ] - state: - description: resulting state of the container - returned: success - type: str - sample: "running" - archive: - description: resulting state of the container - returned: success, when archive is true - type: str - sample: "/tmp/test-container-config.tar" - clone: - description: if the container was cloned - returned: success, when clone_name is specified - type: bool - sample: True -""" - -import os -import os.path -import re -import shutil -import subprocess -import tempfile -import time - -try: - import lxc -except ImportError: - HAS_LXC = False -else: - HAS_LXC = True - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.common.text.converters import to_text, to_bytes - - -# LXC_COMPRESSION_MAP is a map of available compression types when creating -# an archive of a container. -LXC_COMPRESSION_MAP = { - 'gzip': { - 'extension': 'tar.tgz', - 'argument': '-czf' - }, - 'bzip2': { - 'extension': 'tar.bz2', - 'argument': '-cjf' - }, - 'none': { - 'extension': 'tar', - 'argument': '-cf' - } -} - - -# LXC_COMMAND_MAP is a map of variables that are available to a method based -# on the state the container is in. -LXC_COMMAND_MAP = { - 'create': { - 'variables': { - 'config': '--config', - 'template': '--template', - 'backing_store': '--bdev', - 'lxc_path': '--lxcpath', - 'lv_name': '--lvname', - 'vg_name': '--vgname', - 'thinpool': '--thinpool', - 'fs_type': '--fstype', - 'fs_size': '--fssize', - 'directory': '--dir', - 'zfs_root': '--zfsroot' - } - }, - 'clone': { - 'variables-lxc-copy': { - 'backing_store': '--backingstorage', - 'lxc_path': '--lxcpath', - 'fs_size': '--fssize', - 'name': '--name', - 'clone_name': '--newname' - }, - # lxc-clone is deprecated in favor of lxc-copy - 'variables-lxc-clone': { - 'backing_store': '--backingstore', - 'lxc_path': '--lxcpath', - 'fs_size': '--fssize', - 'name': '--orig', - 'clone_name': '--new' - } - } -} - - -# LXC_BACKING_STORE is a map of available storage backends and options that -# are incompatible with the given storage backend. -LXC_BACKING_STORE = { - 'dir': [ - 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool' - ], - 'lvm': [ - 'zfs_root' - ], - 'btrfs': [ - 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size' - ], - 'loop': [ - 'lv_name', 'vg_name', 'thinpool', 'zfs_root' - ], - 'overlayfs': [ - 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root' - ], - 'zfs': [ - 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool' - ] -} - - -# LXC_LOGGING_LEVELS is a map of available log levels -LXC_LOGGING_LEVELS = { - 'INFO': ['info', 'INFO', 'Info'], - 'ERROR': ['error', 'ERROR', 'Error'], - 'DEBUG': ['debug', 'DEBUG', 'Debug'] -} - - -# LXC_ANSIBLE_STATES is a map of states that contain values of methods used -# when a particular state is evoked. -LXC_ANSIBLE_STATES = { - 'started': '_started', - 'stopped': '_stopped', - 'restarted': '_restarted', - 'absent': '_destroyed', - 'frozen': '_frozen', - 'clone': '_clone' -} - - -# This is used to attach to a running container and execute commands from -# within the container on the host. This will provide local access to a -# container without using SSH. The template will attempt to work within the -# home directory of the user that was attached to the container and source -# that users environment variables by default. -ATTACH_TEMPLATE = """#!/usr/bin/env bash -pushd "$(getent passwd $(whoami)|cut -f6 -d':')" - if [[ -f ".bashrc" ]];then - source .bashrc - unset HOSTNAME - fi -popd - -# User defined command -%(container_command)s -""" - - -def create_script(command): - """Write out a script onto a target. - - This method should be backward compatible with Python 2.4+ when executing - from within the container. - - :param command: command to run, this can be a script and can use spacing - with newlines as separation. - :type command: ``str`` - """ - - (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script') - f = os.fdopen(fd, 'wb') - try: - f.write(to_bytes(ATTACH_TEMPLATE % {'container_command': command}, errors='surrogate_or_strict')) - f.flush() - finally: - f.close() - - # Ensure the script is executable. - os.chmod(script_file, int('0700', 8)) - - # Output log file. - stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab') - - # Error log file. - stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab') - - # Execute the script command. - try: - subprocess.Popen( - [script_file], - stdout=stdout_file, - stderr=stderr_file - ).communicate() - finally: - # Close the log files. - stderr_file.close() - stdout_file.close() - - # Remove the script file upon completion of execution. - os.remove(script_file) - - -class LxcContainerManagement(object): - def __init__(self, module): - """Management of LXC containers via Ansible. - - :param module: Processed Ansible Module. - :type module: ``object`` - """ - self.module = module - self.state = self.module.params.get('state', None) - self.state_change = False - self.lxc_vg = None - self.lxc_path = self.module.params.get('lxc_path', None) - self.container_name = self.module.params['name'] - self.container = self.get_container_bind() - self.archive_info = None - self.clone_info = None - - def get_container_bind(self): - return lxc.Container(name=self.container_name) - - @staticmethod - def _roundup(num): - """Return a rounded floating point number. - - :param num: Number to round up. - :type: ``float`` - :returns: Rounded up number. - :rtype: ``int`` - """ - num, part = str(num).split('.') - num = int(num) - if int(part) != 0: - num += 1 - return num - - @staticmethod - def _container_exists(container_name, lxc_path=None): - """Check if a container exists. - - :param container_name: Name of the container. - :type: ``str`` - :returns: True or False if the container is found. - :rtype: ``bol`` - """ - if [i for i in lxc.list_containers(config_path=lxc_path) if i == container_name]: - return True - else: - return False - - @staticmethod - def _add_variables(variables_dict, build_command): - """Return a command list with all found options. - - :param variables_dict: Pre-parsed optional variables used from a - seed command. - :type variables_dict: ``dict`` - :param build_command: Command to run. - :type build_command: ``list`` - :returns: list of command options. - :rtype: ``list`` - """ - - for key, value in variables_dict.items(): - build_command.append( - '%s %s' % (key, value) - ) - return build_command - - def _get_vars(self, variables): - """Return a dict of all variables as found within the module. - - :param variables: Hash of all variables to find. - :type variables: ``dict`` - """ - - # Remove incompatible storage backend options. - variables = variables.copy() - for v in LXC_BACKING_STORE[self.module.params['backing_store']]: - variables.pop(v, None) - - return_dict = dict() - false_values = BOOLEANS_FALSE.union([None, '']) - for k, v in variables.items(): - _var = self.module.params.get(k) - if _var not in false_values: - return_dict[v] = _var - return return_dict - - def _run_command(self, build_command, unsafe_shell=False): - """Return information from running an Ansible Command. - - This will squash the build command list into a string and then - execute the command via Ansible. The output is returned to the method. - This output is returned as `return_code`, `stdout`, `stderr`. - - :param build_command: Used for the command and all options. - :type build_command: ``list`` - :param unsafe_shell: Enable or Disable unsafe sell commands. - :type unsafe_shell: ``bol`` - """ - - return self.module.run_command( - ' '.join(build_command), - use_unsafe_shell=unsafe_shell - ) - - def _config(self): - """Configure an LXC container. - - Write new configuration values to the lxc config file. This will - stop the container if it's running write the new options and then - restart the container upon completion. - """ - - _container_config = self.module.params.get('container_config') - if not _container_config: - return False - - container_config_file = self.container.config_file_name - with open(container_config_file, 'rb') as f: - container_config = to_text(f.read(), errors='surrogate_or_strict').splitlines(True) - - parsed_options = [i.split('=', 1) for i in _container_config] - config_change = False - for key, value in parsed_options: - key = key.strip() - value = value.strip() - new_entry = '%s = %s\n' % (key, value) - keyre = re.compile(r'%s(\s+)?=' % key) - for option_line in container_config: - # Look for key in config - if keyre.match(option_line): - dummy, _value = option_line.split('=', 1) - config_value = ' '.join(_value.split()) - line_index = container_config.index(option_line) - # If the sanitized values don't match replace them - if value != config_value: - line_index += 1 - if new_entry not in container_config: - config_change = True - container_config.insert(line_index, new_entry) - # Break the flow as values are written or not at this point - break - else: - config_change = True - container_config.append(new_entry) - - # If the config changed restart the container. - if config_change: - container_state = self._get_state() - if container_state != 'stopped': - self.container.stop() - - with open(container_config_file, 'wb') as f: - f.writelines([to_bytes(line, errors='surrogate_or_strict') for line in container_config]) - - self.state_change = True - if container_state == 'running': - self._container_startup() - elif container_state == 'frozen': - self._container_startup() - self.container.freeze() - - def _container_create_clone(self): - """Clone a new LXC container from an existing container. - - This method will clone an existing container to a new container using - the `clone_name` variable as the new container name. The method will - create a container if the container `name` does not exist. - - Note that cloning a container will ensure that the original container - is "stopped" before the clone can be done. Because this operation can - require a state change the method will return the original container - to its prior state upon completion of the clone. - - Once the clone is complete the new container will be left in a stopped - state. - """ - - # Ensure that the state of the original container is stopped - container_state = self._get_state() - if container_state != 'stopped': - self.state_change = True - self.container.stop() - - # lxc-clone is deprecated in favor of lxc-copy - clone_vars = 'variables-lxc-copy' - clone_cmd = self.module.get_bin_path('lxc-copy') - if not clone_cmd: - clone_vars = 'variables-lxc-clone' - clone_cmd = self.module.get_bin_path('lxc-clone', True) - - build_command = [ - clone_cmd, - ] - - build_command = self._add_variables( - variables_dict=self._get_vars( - variables=LXC_COMMAND_MAP['clone'][clone_vars] - ), - build_command=build_command - ) - - # Load logging for the instance when creating it. - if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE: - build_command.append('--snapshot') - # Check for backing_store == overlayfs if so force the use of snapshot - # If overlay fs is used and snapshot is unset the clone command will - # fail with an unsupported type. - elif self.module.params.get('backing_store') == 'overlayfs': - build_command.append('--snapshot') - - rc, return_data, err = self._run_command(build_command) - if rc != 0: - message = "Failed executing %s." % os.path.basename(clone_cmd) - self.failure( - err=err, rc=rc, msg=message, command=' '.join( - build_command - ) - ) - else: - self.state_change = True - # Restore the original state of the origin container if it was - # not in a stopped state. - if container_state == 'running': - self.container.start() - elif container_state == 'frozen': - self.container.start() - self.container.freeze() - - return True - - def _create(self): - """Create a new LXC container. - - This method will build and execute a shell command to build the - container. It would have been nice to simply use the lxc python library - however at the time this was written the python library, in both py2 - and py3 didn't support some of the more advanced container create - processes. These missing processes mainly revolve around backing - LXC containers with block devices. - """ - - build_command = [ - self.module.get_bin_path('lxc-create', True), - '--name %s' % self.container_name, - '--quiet' - ] - - build_command = self._add_variables( - variables_dict=self._get_vars( - variables=LXC_COMMAND_MAP['create']['variables'] - ), - build_command=build_command - ) - - # Load logging for the instance when creating it. - if self.module.params.get('container_log') in BOOLEANS_TRUE: - # Set the logging path to the /var/log/lxc if uid is root. else - # set it to the home folder of the user executing. - try: - if os.getuid() != 0: - log_path = os.getenv('HOME') - else: - if not os.path.isdir('/var/log/lxc/'): - os.makedirs('/var/log/lxc/') - log_path = '/var/log/lxc/' - except OSError: - log_path = os.getenv('HOME') - - build_command.extend([ - '--logfile %s' % os.path.join( - log_path, 'lxc-%s.log' % self.container_name - ), - '--logpriority %s' % self.module.params.get( - 'container_log_level' - ).upper() - ]) - - # Add the template commands to the end of the command if there are any - template_options = self.module.params.get('template_options', None) - if template_options: - build_command.append('-- %s' % template_options) - - rc, return_data, err = self._run_command(build_command) - if rc != 0: - message = "Failed executing lxc-create." - self.failure( - err=err, rc=rc, msg=message, command=' '.join(build_command) - ) - else: - self.state_change = True - - def _container_data(self): - """Returns a dict of container information. - - :returns: container data - :rtype: ``dict`` - """ - - return { - 'interfaces': self.container.get_interfaces(), - 'ips': self.container.get_ips(), - 'state': self._get_state(), - 'init_pid': int(self.container.init_pid), - 'name': self.container_name, - } - - def _unfreeze(self): - """Unfreeze a container. - - :returns: True or False based on if the container was unfrozen. - :rtype: ``bol`` - """ - - unfreeze = self.container.unfreeze() - if unfreeze: - self.state_change = True - return unfreeze - - def _get_state(self): - """Return the state of a container. - - If the container is not found the state returned is "absent" - - :returns: state of a container as a lower case string. - :rtype: ``str`` - """ - - if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): - return str(self.container.state).lower() - return str('absent') - - def _execute_command(self): - """Execute a shell command.""" - - container_command = self.module.params.get('container_command') - if container_command: - container_state = self._get_state() - if container_state == 'frozen': - self._unfreeze() - elif container_state == 'stopped': - self._container_startup() - - self.container.attach_wait(create_script, container_command) - self.state_change = True - - def _container_startup(self, timeout=60): - """Ensure a container is started. - - :param timeout: Time before the destroy operation is abandoned. - :type timeout: ``int`` - """ - - self.container = self.get_container_bind() - for dummy in xrange(timeout): - if self._get_state() != 'running': - self.container.start() - self.state_change = True - # post startup sleep for 1 second. - time.sleep(1) - else: - return True - self.failure( - lxc_container=self._container_data(), - error='Failed to start container' - ' [ %s ]' % self.container_name, - rc=1, - msg='The container [ %s ] failed to start. Check to lxc is' - ' available and that the container is in a functional' - ' state.' % self.container_name - ) - - def _check_archive(self): - """Create a compressed archive of a container. - - This will store archive_info in as self.archive_info - """ - - if self.module.params.get('archive') in BOOLEANS_TRUE: - self.archive_info = { - 'archive': self._container_create_tar() - } - - def _check_clone(self): - """Create a compressed archive of a container. - - This will store archive_info in as self.archive_info - """ - - clone_name = self.module.params.get('clone_name') - if clone_name: - if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path): - self.clone_info = { - 'cloned': self._container_create_clone() - } - else: - self.clone_info = { - 'cloned': False - } - - def _destroyed(self, timeout=60): - """Ensure a container is destroyed. - - :param timeout: Time before the destroy operation is abandoned. - :type timeout: ``int`` - """ - - for dummy in xrange(timeout): - if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): - break - - # Check if the container needs to have an archive created. - self._check_archive() - - # Check if the container is to be cloned - self._check_clone() - - if self._get_state() != 'stopped': - self.state_change = True - self.container.stop() - - if self.container.destroy(): - self.state_change = True - - # post destroy attempt sleep for 1 second. - time.sleep(1) - else: - self.failure( - lxc_container=self._container_data(), - error='Failed to destroy container' - ' [ %s ]' % self.container_name, - rc=1, - msg='The container [ %s ] failed to be destroyed. Check' - ' that lxc is available and that the container is in a' - ' functional state.' % self.container_name - ) - - def _frozen(self, count=0): - """Ensure a container is frozen. - - If the container does not exist the container will be created. - - :param count: number of times this command has been called by itself. - :type count: ``int`` - """ - - self.check_count(count=count, method='frozen') - if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): - self._execute_command() - - # Perform any configuration updates - self._config() - - container_state = self._get_state() - if container_state == 'frozen': - pass - elif container_state == 'running': - self.container.freeze() - self.state_change = True - else: - self._container_startup() - self.container.freeze() - self.state_change = True - - # Check if the container needs to have an archive created. - self._check_archive() - - # Check if the container is to be cloned - self._check_clone() - else: - self._create() - count += 1 - self._frozen(count) - - def _restarted(self, count=0): - """Ensure a container is restarted. - - If the container does not exist the container will be created. - - :param count: number of times this command has been called by itself. - :type count: ``int`` - """ - - self.check_count(count=count, method='restart') - if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): - self._execute_command() - - # Perform any configuration updates - self._config() - - if self._get_state() != 'stopped': - self.container.stop() - self.state_change = True - - # Run container startup - self._container_startup() - - # Check if the container needs to have an archive created. - self._check_archive() - - # Check if the container is to be cloned - self._check_clone() - else: - self._create() - count += 1 - self._restarted(count) - - def _stopped(self, count=0): - """Ensure a container is stopped. - - If the container does not exist the container will be created. - - :param count: number of times this command has been called by itself. - :type count: ``int`` - """ - - self.check_count(count=count, method='stop') - if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): - self._execute_command() - - # Perform any configuration updates - self._config() - - if self._get_state() != 'stopped': - self.container.stop() - self.state_change = True - - # Check if the container needs to have an archive created. - self._check_archive() - - # Check if the container is to be cloned - self._check_clone() - else: - self._create() - count += 1 - self._stopped(count) - - def _started(self, count=0): - """Ensure a container is started. - - If the container does not exist the container will be created. - - :param count: number of times this command has been called by itself. - :type count: ``int`` - """ - - self.check_count(count=count, method='start') - if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): - container_state = self._get_state() - if container_state == 'running': - pass - elif container_state == 'frozen': - self._unfreeze() - elif not self._container_startup(): - self.failure( - lxc_container=self._container_data(), - error='Failed to start container' - ' [ %s ]' % self.container_name, - rc=1, - msg='The container [ %s ] failed to start. Check to lxc is' - ' available and that the container is in a functional' - ' state.' % self.container_name - ) - - # Return data - self._execute_command() - - # Perform any configuration updates - self._config() - - # Check if the container needs to have an archive created. - self._check_archive() - - # Check if the container is to be cloned - self._check_clone() - else: - self._create() - count += 1 - self._started(count) - - def _get_lxc_vg(self): - """Return the name of the Volume Group used in LXC.""" - - build_command = [ - self.module.get_bin_path('lxc-config', True), - "lxc.bdev.lvm.vg" - ] - rc, vg, err = self._run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='Failed to read LVM VG from LXC config', - command=' '.join(build_command) - ) - else: - return str(vg.strip()) - - def _lvm_lv_list(self): - """Return a list of all lv in a current vg.""" - - vg = self._get_lxc_vg() - build_command = [ - self.module.get_bin_path('lvs', True) - ] - rc, stdout, err = self._run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='Failed to get list of LVs', - command=' '.join(build_command) - ) - - all_lvms = [i.split() for i in stdout.splitlines()][1:] - return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg] - - def _get_vg_free_pe(self, vg_name): - """Return the available size of a given VG. - - :param vg_name: Name of volume. - :type vg_name: ``str`` - :returns: size and measurement of an LV - :type: ``tuple`` - """ - - build_command = [ - 'vgdisplay', - vg_name, - '--units', - 'g' - ] - rc, stdout, err = self._run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to read vg %s' % vg_name, - command=' '.join(build_command) - ) - - vg_info = [i.strip() for i in stdout.splitlines()][1:] - free_pe = [i for i in vg_info if i.startswith('Free')] - _free_pe = free_pe[0].split() - return float(_free_pe[-2]), _free_pe[-1] - - def _get_lv_size(self, lv_name): - """Return the available size of a given LV. - - :param lv_name: Name of volume. - :type lv_name: ``str`` - :returns: size and measurement of an LV - :type: ``tuple`` - """ - - vg = self._get_lxc_vg() - lv = os.path.join(vg, lv_name) - build_command = [ - 'lvdisplay', - lv, - '--units', - 'g' - ] - rc, stdout, err = self._run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to read lv %s' % lv, - command=' '.join(build_command) - ) - - lv_info = [i.strip() for i in stdout.splitlines()][1:] - _free_pe = [i for i in lv_info if i.startswith('LV Size')] - free_pe = _free_pe[0].split() - return self._roundup(float(free_pe[-2])), free_pe[-1] - - def _lvm_snapshot_create(self, source_lv, snapshot_name, - snapshot_size_gb=5): - """Create an LVM snapshot. - - :param source_lv: Name of lv to snapshot - :type source_lv: ``str`` - :param snapshot_name: Name of lv snapshot - :type snapshot_name: ``str`` - :param snapshot_size_gb: Size of snapshot to create - :type snapshot_size_gb: ``int`` - """ - - vg = self._get_lxc_vg() - free_space, messurement = self._get_vg_free_pe(vg_name=vg) - - if free_space < float(snapshot_size_gb): - message = ( - 'Snapshot size [ %s ] is > greater than [ %s ] on volume group' - ' [ %s ]' % (snapshot_size_gb, free_space, vg) - ) - self.failure( - error='Not enough space to create snapshot', - rc=2, - msg=message - ) - - # Create LVM Snapshot - build_command = [ - self.module.get_bin_path('lvcreate', True), - "-n", - snapshot_name, - "-s", - os.path.join(vg, source_lv), - "-L%sg" % snapshot_size_gb - ] - rc, stdout, err = self._run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='Failed to Create LVM snapshot %s/%s --> %s' - % (vg, source_lv, snapshot_name) - ) - - def _lvm_lv_mount(self, lv_name, mount_point): - """mount an lv. - - :param lv_name: name of the logical volume to mount - :type lv_name: ``str`` - :param mount_point: path on the file system that is mounted. - :type mount_point: ``str`` - """ - - vg = self._get_lxc_vg() - - build_command = [ - self.module.get_bin_path('mount', True), - "/dev/%s/%s" % (vg, lv_name), - mount_point, - ] - rc, stdout, err = self._run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to mountlvm lv %s/%s to %s' - % (vg, lv_name, mount_point) - ) - - def _create_tar(self, source_dir): - """Create an archive of a given ``source_dir`` to ``output_path``. - - :param source_dir: Path to the directory to be archived. - :type source_dir: ``str`` - """ - - old_umask = os.umask(int('0077', 8)) - - archive_path = self.module.params.get('archive_path') - if not os.path.isdir(archive_path): - os.makedirs(archive_path) - - archive_compression = self.module.params.get('archive_compression') - compression_type = LXC_COMPRESSION_MAP[archive_compression] - - # remove trailing / if present. - archive_name = '%s.%s' % ( - os.path.join( - archive_path, - self.container_name - ), - compression_type['extension'] - ) - - build_command = [ - self.module.get_bin_path('tar', True), - '--directory=%s' % os.path.realpath( - os.path.expanduser(source_dir) - ), - compression_type['argument'], - archive_name, - '.' - ] - - rc, stdout, err = self._run_command( - build_command=build_command, - unsafe_shell=True - ) - - os.umask(old_umask) - - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to create tar archive', - command=' '.join(build_command) - ) - - return archive_name - - def _lvm_lv_remove(self, lv_name): - """Remove an LV. - - :param lv_name: The name of the logical volume - :type lv_name: ``str`` - """ - - vg = self._get_lxc_vg() - build_command = [ - self.module.get_bin_path('lvremove', True), - "-f", - "%s/%s" % (vg, lv_name), - ] - rc, stdout, err = self._run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='Failed to remove LVM LV %s/%s' % (vg, lv_name), - command=' '.join(build_command) - ) - - def _rsync_data(self, container_path, temp_dir): - """Sync the container directory to the temp directory. - - :param container_path: path to the container container - :type container_path: ``str`` - :param temp_dir: path to the temporary local working directory - :type temp_dir: ``str`` - """ - # This loop is created to support overlayfs archives. This should - # squash all of the layers into a single archive. - fs_paths = container_path.split(':') - if 'overlayfs' in fs_paths: - fs_paths.pop(fs_paths.index('overlayfs')) - - for fs_path in fs_paths: - # Set the path to the container data - fs_path = os.path.dirname(fs_path) - - # Run the sync command - build_command = [ - self.module.get_bin_path('rsync', True), - '-aHAX', - fs_path, - temp_dir - ] - rc, stdout, err = self._run_command( - build_command, - unsafe_shell=True - ) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to perform archive', - command=' '.join(build_command) - ) - - def _unmount(self, mount_point): - """Unmount a file system. - - :param mount_point: path on the file system that is mounted. - :type mount_point: ``str`` - """ - - build_command = [ - self.module.get_bin_path('umount', True), - mount_point, - ] - rc, stdout, err = self._run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to unmount [ %s ]' % mount_point, - command=' '.join(build_command) - ) - - def _overlayfs_mount(self, lowerdir, upperdir, mount_point): - """mount an lv. - - :param lowerdir: name/path of the lower directory - :type lowerdir: ``str`` - :param upperdir: name/path of the upper directory - :type upperdir: ``str`` - :param mount_point: path on the file system that is mounted. - :type mount_point: ``str`` - """ - - build_command = [ - self.module.get_bin_path('mount', True), - '-t overlayfs', - '-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir), - 'overlayfs', - mount_point, - ] - rc, stdout, err = self._run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to mount overlayfs:%s:%s to %s -- Command: %s' - % (lowerdir, upperdir, mount_point, build_command) - ) - - def _container_create_tar(self): - """Create a tar archive from an LXC container. - - The process is as follows: - * Stop or Freeze the container - * Create temporary dir - * Copy container and config to temporary directory - * If LVM backed: - * Create LVM snapshot of LV backing the container - * Mount the snapshot to tmpdir/rootfs - * Restore the state of the container - * Create tar of tmpdir - * Clean up - """ - - # Create a temp dir - temp_dir = tempfile.mkdtemp() - - # Set the name of the working dir, temp + container_name - work_dir = os.path.join(temp_dir, self.container_name) - - # LXC container rootfs - lxc_rootfs = self.container.get_config_item('lxc.rootfs') - - # Test if the containers rootfs is a block device - block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev')) - - # Test if the container is using overlayfs - overlayfs_backed = lxc_rootfs.startswith('overlayfs') - - mount_point = os.path.join(work_dir, 'rootfs') - - # Set the snapshot name if needed - snapshot_name = '%s_lxc_snapshot' % self.container_name - - container_state = self._get_state() - try: - # Ensure the original container is stopped or frozen - if container_state not in ['stopped', 'frozen']: - if container_state == 'running': - self.container.freeze() - else: - self.container.stop() - - # Sync the container data from the container_path to work_dir - self._rsync_data(lxc_rootfs, temp_dir) - - if block_backed: - if snapshot_name not in self._lvm_lv_list(): - if not os.path.exists(mount_point): - os.makedirs(mount_point) - - # Take snapshot - size, measurement = self._get_lv_size( - lv_name=self.container_name - ) - self._lvm_snapshot_create( - source_lv=self.container_name, - snapshot_name=snapshot_name, - snapshot_size_gb=size - ) - - # Mount snapshot - self._lvm_lv_mount( - lv_name=snapshot_name, - mount_point=mount_point - ) - else: - self.failure( - err='snapshot [ %s ] already exists' % snapshot_name, - rc=1, - msg='The snapshot [ %s ] already exists. Please clean' - ' up old snapshot of containers before continuing.' - % snapshot_name - ) - elif overlayfs_backed: - lowerdir, upperdir = lxc_rootfs.split(':')[1:] - self._overlayfs_mount( - lowerdir=lowerdir, - upperdir=upperdir, - mount_point=mount_point - ) - - # Set the state as changed and set a new fact - self.state_change = True - return self._create_tar(source_dir=work_dir) - finally: - if block_backed or overlayfs_backed: - # unmount snapshot - self._unmount(mount_point) - - if block_backed: - # Remove snapshot - self._lvm_lv_remove(snapshot_name) - - # Restore original state of container - if container_state == 'running': - if self._get_state() == 'frozen': - self.container.unfreeze() - else: - self.container.start() - - # Remove tmpdir - shutil.rmtree(temp_dir) - - def check_count(self, count, method): - if count > 1: - self.failure( - error='Failed to %s container' % method, - rc=1, - msg='The container [ %s ] failed to %s. Check to lxc is' - ' available and that the container is in a functional' - ' state.' % (self.container_name, method) - ) - - def failure(self, **kwargs): - """Return a Failure when running an Ansible command. - - :param error: ``str`` Error that occurred. - :param rc: ``int`` Return code while executing an Ansible command. - :param msg: ``str`` Message to report. - """ - - self.module.fail_json(**kwargs) - - def run(self): - """Run the main method.""" - - action = getattr(self, LXC_ANSIBLE_STATES[self.state]) - action() - - outcome = self._container_data() - if self.archive_info: - outcome.update(self.archive_info) - - if self.clone_info: - outcome.update(self.clone_info) - - self.module.exit_json( - changed=self.state_change, - lxc_container=outcome - ) - - -def main(): - """Ansible Main module.""" - - module = AnsibleModule( - argument_spec=dict( - name=dict( - type='str', - required=True - ), - template=dict( - type='str', - default='ubuntu' - ), - backing_store=dict( - type='str', - choices=list(LXC_BACKING_STORE.keys()), - default='dir' - ), - template_options=dict( - type='str' - ), - config=dict( - type='path', - ), - vg_name=dict( - type='str', - default='lxc' - ), - thinpool=dict( - type='str' - ), - fs_type=dict( - type='str', - default='ext4' - ), - fs_size=dict( - type='str', - default='5G' - ), - directory=dict( - type='path' - ), - zfs_root=dict( - type='str' - ), - lv_name=dict( - type='str' - ), - lxc_path=dict( - type='path' - ), - state=dict( - choices=list(LXC_ANSIBLE_STATES.keys()), - default='started' - ), - container_command=dict( - type='str' - ), - container_config=dict( - type='list', - elements='str' - ), - container_log=dict( - type='bool', - default=False - ), - container_log_level=dict( - choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i], - default='INFO' - ), - clone_name=dict( - type='str', - required=False - ), - clone_snapshot=dict( - type='bool', - default='false' - ), - archive=dict( - type='bool', - default=False - ), - archive_path=dict( - type='path', - ), - archive_compression=dict( - choices=list(LXC_COMPRESSION_MAP.keys()), - default='gzip' - ) - ), - supports_check_mode=False, - required_if=([ - ('archive', True, ['archive_path']) - ]), - ) - - if not HAS_LXC: - module.fail_json( - msg='The `lxc` module is not importable. Check the requirements.' - ) - - lv_name = module.params.get('lv_name') - if not lv_name: - module.params['lv_name'] = module.params.get('name') - - lxc_manage = LxcContainerManagement(module=module) - lxc_manage.run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/lxd/lxd_container.py b/plugins/modules/cloud/lxd/lxd_container.py deleted file mode 100644 index e06b72b244..0000000000 --- a/plugins/modules/cloud/lxd/lxd_container.py +++ /dev/null @@ -1,744 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Hiroaki Nakamura -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: lxd_container -short_description: Manage LXD Containers -description: - - Management of LXD containers -author: "Hiroaki Nakamura (@hnakamur)" -options: - name: - description: - - Name of a container. - type: str - required: true - architecture: - description: - - 'The architecture for the container (for example C(x86_64) or C(i686)). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).' - type: str - required: false - config: - description: - - 'The config for the container (for example C({"limits.cpu": "2"})). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).' - - If the container already exists and its "config" values in metadata - obtained from GET /1.0/containers/ - U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname) - are different, this module tries to apply the configurations. - - The keys starting with C(volatile.) are ignored for this comparison when I(ignore_volatile_options=true). - type: dict - required: false - ignore_volatile_options: - description: - - If set to C(true), options starting with C(volatile.) are ignored. As a result, - they are reapplied for each execution. - - This default behavior can be changed by setting this option to C(false). - - The current default value C(true) is deprecated since community.general 4.0.0, - and will change to C(false) in community.general 6.0.0. - type: bool - required: false - version_added: 3.7.0 - profiles: - description: - - Profile to be used by the container. - type: list - elements: str - devices: - description: - - 'The devices for the container - (for example C({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).' - type: dict - required: false - ephemeral: - description: - - Whether or not the container is ephemeral (for example C(true) or C(false)). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1). - required: false - type: bool - source: - description: - - 'The source for the container - (e.g. { "type": "image", - "mode": "pull", - "server": "https://images.linuxcontainers.org", - "protocol": "lxd", - "alias": "ubuntu/xenial/amd64" }).' - - 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.' - - 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams).' - required: false - type: dict - state: - choices: - - started - - stopped - - restarted - - absent - - frozen - description: - - Define the state of a container. - required: false - default: started - type: str - target: - description: - - For cluster deployments. Will attempt to create a container on a target node. - If container exists elsewhere in a cluster, then container will not be replaced or moved. - The name should respond to same name of the node you see in C(lxc cluster list). - type: str - required: false - version_added: 1.0.0 - timeout: - description: - - A timeout for changing the state of the container. - - This is also used as a timeout for waiting until IPv4 addresses - are set to the all network interfaces in the container after - starting or restarting. - required: false - default: 30 - type: int - wait_for_ipv4_addresses: - description: - - If this is true, the C(lxd_container) waits until IPv4 addresses - are set to the all network interfaces in the container after - starting or restarting. - required: false - default: false - type: bool - force_stop: - description: - - If this is true, the C(lxd_container) forces to stop the container - when it stops or restarts the container. - required: false - default: false - type: bool - url: - description: - - The unix domain socket path or the https URL for the LXD server. - required: false - default: unix:/var/lib/lxd/unix.socket - type: str - snap_url: - description: - - The unix domain socket path when LXD is installed by snap package manager. - required: false - default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str - client_key: - description: - - The client certificate key file path. - - If not specified, it defaults to C(${HOME}/.config/lxc/client.key). - required: false - aliases: [ key_file ] - type: path - client_cert: - description: - - The client certificate file path. - - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt). - required: false - aliases: [ cert_file ] - type: path - trust_password: - description: - - The client trusted password. - - 'You need to set this password on the LXD server before - running this module using the following command: - C(lxc config set core.trust_password ). - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' - - If trust_password is set, this module send a request for - authentication before sending any requests. - required: false - type: str -notes: - - Containers must have a unique name. If you attempt to create a container - with a name that already existed in the users namespace the module will - simply return as "unchanged". - - There are two ways to run commands in containers, using the command - module or using the ansible lxd connection plugin bundled in Ansible >= - 2.1, the later requires python to be installed in the container which can - be done with the command module. - - You can copy a file from the host to the container - with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the `lxd` connection plugin. - See the example below. - - You can copy a file in the created container to the localhost - with `command=lxc file pull container_name/dir/filename filename`. - See the first example below. -''' - -EXAMPLES = ''' -# An example for creating a Ubuntu container and install python -- hosts: localhost - connection: local - tasks: - - name: Create a started container - community.general.lxd_container: - name: mycontainer - ignore_volatile_options: true - state: started - source: - type: image - mode: pull - server: https://images.linuxcontainers.org - protocol: lxd # if you get a 404, try setting protocol: simplestreams - alias: ubuntu/xenial/amd64 - profiles: ["default"] - wait_for_ipv4_addresses: true - timeout: 600 - - - name: Check python is installed in container - delegate_to: mycontainer - ansible.builtin.raw: dpkg -s python - register: python_install_check - failed_when: python_install_check.rc not in [0, 1] - changed_when: false - - - name: Install python in container - delegate_to: mycontainer - ansible.builtin.raw: apt-get install -y python - when: python_install_check.rc == 1 - -# An example for creating an Ubuntu 14.04 container using an image fingerprint. -# This requires changing 'server' and 'protocol' key values, replacing the -# 'alias' key with with 'fingerprint' and supplying an appropriate value that -# matches the container image you wish to use. -- hosts: localhost - connection: local - tasks: - - name: Create a started container - community.general.lxd_container: - name: mycontainer - ignore_volatile_options: true - state: started - source: - type: image - mode: pull - # Provides current (and older) Ubuntu images with listed fingerprints - server: https://cloud-images.ubuntu.com/releases - # Protocol used by 'ubuntu' remote (as shown by 'lxc remote list') - protocol: simplestreams - # This provides an Ubuntu 14.04 LTS amd64 image from 20150814. - fingerprint: e9a8bdfab6dc - profiles: ["default"] - wait_for_ipv4_addresses: true - timeout: 600 - -# An example for deleting a container -- hosts: localhost - connection: local - tasks: - - name: Delete a container - community.general.lxd_container: - name: mycontainer - state: absent - -# An example for restarting a container -- hosts: localhost - connection: local - tasks: - - name: Restart a container - community.general.lxd_container: - name: mycontainer - state: restarted - -# An example for restarting a container using https to connect to the LXD server -- hosts: localhost - connection: local - tasks: - - name: Restart a container - community.general.lxd_container: - url: https://127.0.0.1:8443 - # These client_cert and client_key values are equal to the default values. - #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" - #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" - trust_password: mypassword - name: mycontainer - state: restarted - -# Note your container must be in the inventory for the below example. -# -# [containers] -# mycontainer ansible_connection=lxd -# -- hosts: - - mycontainer - tasks: - - name: Copy /etc/hosts in the created container to localhost with name "mycontainer-hosts" - ansible.builtin.fetch: - src: /etc/hosts - dest: /tmp/mycontainer-hosts - flat: true - -# An example for LXD cluster deployments. This example will create two new container on specific -# nodes - 'node01' and 'node02'. In 'target:', 'node01' and 'node02' are names of LXD cluster -# members that LXD cluster recognizes, not ansible inventory names, see: 'lxc cluster list'. -# LXD API calls can be made to any LXD member, in this example, we send API requests to -#'node01.example.com', which matches ansible inventory name. -- hosts: node01.example.com - tasks: - - name: Create LXD container - community.general.lxd_container: - name: new-container-1 - ignore_volatile_options: true - state: started - source: - type: image - mode: pull - alias: ubuntu/xenial/amd64 - target: node01 - - - name: Create container on another node - community.general.lxd_container: - name: new-container-2 - ignore_volatile_options: true - state: started - source: - type: image - mode: pull - alias: ubuntu/xenial/amd64 - target: node02 -''' - -RETURN = ''' -addresses: - description: Mapping from the network device name to a list of IPv4 addresses in the container - returned: when state is started or restarted - type: dict - sample: {"eth0": ["10.155.92.191"]} -old_state: - description: The old state of the container - returned: when state is started or restarted - type: str - sample: "stopped" -logs: - description: The logs of requests and responses. - returned: when ansible-playbook is invoked with -vvvv. - type: list - sample: "(too long to be placed here)" -actions: - description: List of actions performed for the container. - returned: success - type: list - sample: '["create", "start"]' -''' -import datetime -import os -import time - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException -from ansible.module_utils.six.moves.urllib.parse import urlencode - -# LXD_ANSIBLE_STATES is a map of states that contain values of methods used -# when a particular state is evoked. -LXD_ANSIBLE_STATES = { - 'started': '_started', - 'stopped': '_stopped', - 'restarted': '_restarted', - 'absent': '_destroyed', - 'frozen': '_frozen' -} - -# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible -# lxc_container module state parameter value. -ANSIBLE_LXD_STATES = { - 'Running': 'started', - 'Stopped': 'stopped', - 'Frozen': 'frozen', -} - -# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint -ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' - -# CONFIG_PARAMS is a list of config attribute names. -CONFIG_PARAMS = [ - 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source' -] - - -class LXDContainerManagement(object): - def __init__(self, module): - """Management of LXC containers via Ansible. - - :param module: Processed Ansible Module. - :type module: ``object`` - """ - self.module = module - self.name = self.module.params['name'] - self._build_config() - - self.state = self.module.params['state'] - - self.timeout = self.module.params['timeout'] - self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses'] - self.force_stop = self.module.params['force_stop'] - self.addresses = None - self.target = self.module.params['target'] - - self.key_file = self.module.params.get('client_key') - if self.key_file is None: - self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME']) - self.cert_file = self.module.params.get('client_cert') - if self.cert_file is None: - self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME']) - self.debug = self.module._verbosity >= 4 - - try: - if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: - self.url = self.module.params['url'] - elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): - self.url = self.module.params['snap_url'] - else: - self.url = self.module.params['url'] - except Exception as e: - self.module.fail_json(msg=e.msg) - - try: - self.client = LXDClient( - self.url, key_file=self.key_file, cert_file=self.cert_file, - debug=self.debug - ) - except LXDClientException as e: - self.module.fail_json(msg=e.msg) - self.trust_password = self.module.params.get('trust_password', None) - self.actions = [] - - def _build_config(self): - self.config = {} - for attr in CONFIG_PARAMS: - param_val = self.module.params.get(attr, None) - if param_val is not None: - self.config[attr] = param_val - - def _get_container_json(self): - return self.client.do( - 'GET', '/1.0/containers/{0}'.format(self.name), - ok_error_codes=[404] - ) - - def _get_container_state_json(self): - return self.client.do( - 'GET', '/1.0/containers/{0}/state'.format(self.name), - ok_error_codes=[404] - ) - - @staticmethod - def _container_json_to_module_state(resp_json): - if resp_json['type'] == 'error': - return 'absent' - return ANSIBLE_LXD_STATES[resp_json['metadata']['status']] - - def _change_state(self, action, force_stop=False): - body_json = {'action': action, 'timeout': self.timeout} - if force_stop: - body_json['force'] = True - return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json) - - def _create_container(self): - config = self.config.copy() - config['name'] = self.name - if self.target: - self.client.do('POST', '/1.0/containers?' + urlencode(dict(target=self.target)), config) - else: - self.client.do('POST', '/1.0/containers', config) - self.actions.append('create') - - def _start_container(self): - self._change_state('start') - self.actions.append('start') - - def _stop_container(self): - self._change_state('stop', self.force_stop) - self.actions.append('stop') - - def _restart_container(self): - self._change_state('restart', self.force_stop) - self.actions.append('restart') - - def _delete_container(self): - self.client.do('DELETE', '/1.0/containers/{0}'.format(self.name)) - self.actions.append('delete') - - def _freeze_container(self): - self._change_state('freeze') - self.actions.append('freeze') - - def _unfreeze_container(self): - self._change_state('unfreeze') - self.actions.append('unfreez') - - def _container_ipv4_addresses(self, ignore_devices=None): - ignore_devices = ['lo'] if ignore_devices is None else ignore_devices - - resp_json = self._get_container_state_json() - network = resp_json['metadata']['network'] or {} - network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {} - addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {} - return addresses - - @staticmethod - def _has_all_ipv4_addresses(addresses): - return len(addresses) > 0 and all(len(v) > 0 for v in addresses.values()) - - def _get_addresses(self): - try: - due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout) - while datetime.datetime.now() < due: - time.sleep(1) - addresses = self._container_ipv4_addresses() - if self._has_all_ipv4_addresses(addresses): - self.addresses = addresses - return - except LXDClientException as e: - e.msg = 'timeout for getting IPv4 addresses' - raise - - def _started(self): - if self.old_state == 'absent': - self._create_container() - self._start_container() - else: - if self.old_state == 'frozen': - self._unfreeze_container() - elif self.old_state == 'stopped': - self._start_container() - if self._needs_to_apply_container_configs(): - self._apply_container_configs() - if self.wait_for_ipv4_addresses: - self._get_addresses() - - def _stopped(self): - if self.old_state == 'absent': - self._create_container() - else: - if self.old_state == 'stopped': - if self._needs_to_apply_container_configs(): - self._start_container() - self._apply_container_configs() - self._stop_container() - else: - if self.old_state == 'frozen': - self._unfreeze_container() - if self._needs_to_apply_container_configs(): - self._apply_container_configs() - self._stop_container() - - def _restarted(self): - if self.old_state == 'absent': - self._create_container() - self._start_container() - else: - if self.old_state == 'frozen': - self._unfreeze_container() - if self._needs_to_apply_container_configs(): - self._apply_container_configs() - self._restart_container() - if self.wait_for_ipv4_addresses: - self._get_addresses() - - def _destroyed(self): - if self.old_state != 'absent': - if self.old_state == 'frozen': - self._unfreeze_container() - if self.old_state != 'stopped': - self._stop_container() - self._delete_container() - - def _frozen(self): - if self.old_state == 'absent': - self._create_container() - self._start_container() - self._freeze_container() - else: - if self.old_state == 'stopped': - self._start_container() - if self._needs_to_apply_container_configs(): - self._apply_container_configs() - self._freeze_container() - - def _needs_to_change_container_config(self, key): - if key not in self.config: - return False - if key == 'config' and self.ignore_volatile_options: # the old behavior is to ignore configurations by keyword "volatile" - old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.')) - for k, v in self.config['config'].items(): - if k not in old_configs: - return True - if old_configs[k] != v: - return True - return False - elif key == 'config': # next default behavior - old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items()) - for k, v in self.config['config'].items(): - if k not in old_configs: - return True - if old_configs[k] != v: - return True - return False - else: - old_configs = self.old_container_json['metadata'][key] - return self.config[key] != old_configs - - def _needs_to_apply_container_configs(self): - return ( - self._needs_to_change_container_config('architecture') or - self._needs_to_change_container_config('config') or - self._needs_to_change_container_config('ephemeral') or - self._needs_to_change_container_config('devices') or - self._needs_to_change_container_config('profiles') - ) - - def _apply_container_configs(self): - old_metadata = self.old_container_json['metadata'] - body_json = { - 'architecture': old_metadata['architecture'], - 'config': old_metadata['config'], - 'devices': old_metadata['devices'], - 'profiles': old_metadata['profiles'] - } - if self._needs_to_change_container_config('architecture'): - body_json['architecture'] = self.config['architecture'] - if self._needs_to_change_container_config('config'): - for k, v in self.config['config'].items(): - body_json['config'][k] = v - if self._needs_to_change_container_config('ephemeral'): - body_json['ephemeral'] = self.config['ephemeral'] - if self._needs_to_change_container_config('devices'): - body_json['devices'] = self.config['devices'] - if self._needs_to_change_container_config('profiles'): - body_json['profiles'] = self.config['profiles'] - self.client.do('PUT', '/1.0/containers/{0}'.format(self.name), body_json=body_json) - self.actions.append('apply_container_configs') - - def run(self): - """Run the main method.""" - - try: - if self.trust_password is not None: - self.client.authenticate(self.trust_password) - self.ignore_volatile_options = self.module.params.get('ignore_volatile_options') - - self.old_container_json = self._get_container_json() - self.old_state = self._container_json_to_module_state(self.old_container_json) - action = getattr(self, LXD_ANSIBLE_STATES[self.state]) - action() - - state_changed = len(self.actions) > 0 - result_json = { - 'log_verbosity': self.module._verbosity, - 'changed': state_changed, - 'old_state': self.old_state, - 'actions': self.actions - } - if self.client.debug: - result_json['logs'] = self.client.logs - if self.addresses is not None: - result_json['addresses'] = self.addresses - self.module.exit_json(**result_json) - except LXDClientException as e: - state_changed = len(self.actions) > 0 - fail_params = { - 'msg': e.msg, - 'changed': state_changed, - 'actions': self.actions - } - if self.client.debug: - fail_params['logs'] = e.kwargs['logs'] - self.module.fail_json(**fail_params) - - -def main(): - """Ansible Main module.""" - - module = AnsibleModule( - argument_spec=dict( - name=dict( - type='str', - required=True - ), - architecture=dict( - type='str', - ), - config=dict( - type='dict', - ), - ignore_volatile_options=dict( - type='bool', - ), - devices=dict( - type='dict', - ), - ephemeral=dict( - type='bool', - ), - profiles=dict( - type='list', - elements='str', - ), - source=dict( - type='dict', - ), - state=dict( - choices=list(LXD_ANSIBLE_STATES.keys()), - default='started' - ), - target=dict( - type='str', - ), - timeout=dict( - type='int', - default=30 - ), - wait_for_ipv4_addresses=dict( - type='bool', - default=False - ), - force_stop=dict( - type='bool', - default=False - ), - url=dict( - type='str', - default=ANSIBLE_LXD_DEFAULT_URL - ), - snap_url=dict( - type='str', - default='unix:/var/snap/lxd/common/lxd/unix.socket' - ), - client_key=dict( - type='path', - aliases=['key_file'] - ), - client_cert=dict( - type='path', - aliases=['cert_file'] - ), - trust_password=dict(type='str', no_log=True) - ), - supports_check_mode=False, - ) - - if module.params['ignore_volatile_options'] is None: - module.params['ignore_volatile_options'] = True - module.deprecate( - 'If the keyword "volatile" is used in a playbook in the config' - 'section, a "changed" message will appear with every run, even without a change' - 'to the playbook.' - 'This will change in the future. Please test your scripts' - 'by "ignore_volatile_options: false". To keep the old behavior, set that option explicitly to "true"', - version='6.0.0', collection_name='community.general') - lxd_manage = LXDContainerManagement(module=module) - lxd_manage.run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/lxd/lxd_profile.py b/plugins/modules/cloud/lxd/lxd_profile.py deleted file mode 100644 index 3094898f2c..0000000000 --- a/plugins/modules/cloud/lxd/lxd_profile.py +++ /dev/null @@ -1,518 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Hiroaki Nakamura -# Copyright: (c) 2020, Frank Dornheim -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: lxd_profile -short_description: Manage LXD profiles -description: - - Management of LXD profiles -author: "Hiroaki Nakamura (@hnakamur)" -options: - name: - description: - - Name of a profile. - required: true - type: str - description: - description: - - Description of the profile. - type: str - config: - description: - - 'The config for the container (e.g. {"limits.memory": "4GB"}). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)' - - If the profile already exists and its "config" value in metadata - obtained from - GET /1.0/profiles/ - U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19) - are different, they this module tries to apply the configurations. - - Not all config values are supported to apply the existing profile. - Maybe you need to delete and recreate a profile. - required: false - type: dict - devices: - description: - - 'The devices for the profile - (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)' - required: false - type: dict - new_name: - description: - - A new name of a profile. - - If this parameter is specified a profile will be renamed to this name. - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11) - required: false - type: str - merge_profile: - description: - - Merge the configuration of the present profile with the new desired configuration, - instead of replacing it. - required: false - default: false - type: bool - version_added: 2.1.0 - state: - choices: - - present - - absent - description: - - Define the state of a profile. - required: false - default: present - type: str - url: - description: - - The unix domain socket path or the https URL for the LXD server. - required: false - default: unix:/var/lib/lxd/unix.socket - type: str - snap_url: - description: - - The unix domain socket path when LXD is installed by snap package manager. - required: false - default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str - client_key: - description: - - The client certificate key file path. - - If not specified, it defaults to C($HOME/.config/lxc/client.key). - required: false - aliases: [ key_file ] - type: path - client_cert: - description: - - The client certificate file path. - - If not specified, it defaults to C($HOME/.config/lxc/client.crt). - required: false - aliases: [ cert_file ] - type: path - trust_password: - description: - - The client trusted password. - - You need to set this password on the LXD server before - running this module using the following command. - lxc config set core.trust_password - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/) - - If trust_password is set, this module send a request for - authentication before sending any requests. - required: false - type: str -notes: - - Profiles must have a unique name. If you attempt to create a profile - with a name that already existed in the users namespace the module will - simply return as "unchanged". -''' - -EXAMPLES = ''' -# An example for creating a profile -- hosts: localhost - connection: local - tasks: - - name: Create a profile - community.general.lxd_profile: - name: macvlan - state: present - config: {} - description: my macvlan profile - devices: - eth0: - nictype: macvlan - parent: br0 - type: nic - -# An example for creating a profile via http connection -- hosts: localhost - connection: local - tasks: - - name: Create macvlan profile - community.general.lxd_profile: - url: https://127.0.0.1:8443 - # These client_cert and client_key values are equal to the default values. - #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" - #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" - trust_password: mypassword - name: macvlan - state: present - config: {} - description: my macvlan profile - devices: - eth0: - nictype: macvlan - parent: br0 - type: nic - -# An example for modify/merge a profile -- hosts: localhost - connection: local - tasks: - - name: Merge a profile - community.general.lxd_profile: - merge_profile: true - name: macvlan - state: present - config: {} - description: my macvlan profile - devices: - eth0: - nictype: macvlan - parent: br0 - type: nic - -# An example for deleting a profile -- hosts: localhost - connection: local - tasks: - - name: Delete a profile - community.general.lxd_profile: - name: macvlan - state: absent - -# An example for renaming a profile -- hosts: localhost - connection: local - tasks: - - name: Rename a profile - community.general.lxd_profile: - name: macvlan - new_name: macvlan2 - state: present -''' - -RETURN = ''' -old_state: - description: The old state of the profile - returned: success - type: str - sample: "absent" -logs: - description: The logs of requests and responses. - returned: when ansible-playbook is invoked with -vvvv. - type: list - sample: "(too long to be placed here)" -actions: - description: List of actions performed for the profile. - returned: success - type: list - sample: '["create"]' -''' - -import os -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException - -# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint -ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' - -# PROFILE_STATES is a list for states supported -PROFILES_STATES = [ - 'present', 'absent' -] - -# CONFIG_PARAMS is a list of config attribute names. -CONFIG_PARAMS = [ - 'config', 'description', 'devices' -] - - -class LXDProfileManagement(object): - def __init__(self, module): - """Management of LXC containers via Ansible. - - :param module: Processed Ansible Module. - :type module: ``object`` - """ - self.module = module - self.name = self.module.params['name'] - self._build_config() - self.state = self.module.params['state'] - self.new_name = self.module.params.get('new_name', None) - - self.key_file = self.module.params.get('client_key') - if self.key_file is None: - self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME']) - self.cert_file = self.module.params.get('client_cert') - if self.cert_file is None: - self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME']) - self.debug = self.module._verbosity >= 4 - - try: - if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: - self.url = self.module.params['url'] - elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): - self.url = self.module.params['snap_url'] - else: - self.url = self.module.params['url'] - except Exception as e: - self.module.fail_json(msg=e.msg) - - try: - self.client = LXDClient( - self.url, key_file=self.key_file, cert_file=self.cert_file, - debug=self.debug - ) - except LXDClientException as e: - self.module.fail_json(msg=e.msg) - self.trust_password = self.module.params.get('trust_password', None) - self.actions = [] - - def _build_config(self): - self.config = {} - for attr in CONFIG_PARAMS: - param_val = self.module.params.get(attr, None) - if param_val is not None: - self.config[attr] = param_val - - def _get_profile_json(self): - return self.client.do( - 'GET', '/1.0/profiles/{0}'.format(self.name), - ok_error_codes=[404] - ) - - @staticmethod - def _profile_json_to_module_state(resp_json): - if resp_json['type'] == 'error': - return 'absent' - return 'present' - - def _update_profile(self): - if self.state == 'present': - if self.old_state == 'absent': - if self.new_name is None: - self._create_profile() - else: - self.module.fail_json( - msg='new_name must not be set when the profile does not exist and the state is present', - changed=False) - else: - if self.new_name is not None and self.new_name != self.name: - self._rename_profile() - if self._needs_to_apply_profile_configs(): - self._apply_profile_configs() - elif self.state == 'absent': - if self.old_state == 'present': - if self.new_name is None: - self._delete_profile() - else: - self.module.fail_json( - msg='new_name must not be set when the profile exists and the specified state is absent', - changed=False) - - def _create_profile(self): - config = self.config.copy() - config['name'] = self.name - self.client.do('POST', '/1.0/profiles', config) - self.actions.append('create') - - def _rename_profile(self): - config = {'name': self.new_name} - self.client.do('POST', '/1.0/profiles/{0}'.format(self.name), config) - self.actions.append('rename') - self.name = self.new_name - - def _needs_to_change_profile_config(self, key): - if key not in self.config: - return False - old_configs = self.old_profile_json['metadata'].get(key, None) - return self.config[key] != old_configs - - def _needs_to_apply_profile_configs(self): - return ( - self._needs_to_change_profile_config('config') or - self._needs_to_change_profile_config('description') or - self._needs_to_change_profile_config('devices') - ) - - def _merge_dicts(self, source, destination): - """Merge Dictionarys - - Get a list of filehandle numbers from logger to be handed to - DaemonContext.files_preserve - - Args: - dict(source): source dict - dict(destination): destination dict - Kwargs: - None - Raises: - None - Returns: - dict(destination): merged dict""" - for key, value in source.items(): - if isinstance(value, dict): - # get node or create one - node = destination.setdefault(key, {}) - self._merge_dicts(value, node) - else: - destination[key] = value - return destination - - def _merge_config(self, config): - """ merge profile - - Merge Configuration of the present profile and the new desired configitems - - Args: - dict(config): Dict with the old config in 'metadata' and new config in 'config' - Kwargs: - None - Raises: - None - Returns: - dict(config): new config""" - # merge or copy the sections from the existing profile to 'config' - for item in ['config', 'description', 'devices', 'name', 'used_by']: - if item in config: - config[item] = self._merge_dicts(config['metadata'][item], config[item]) - else: - config[item] = config['metadata'][item] - # merge or copy the sections from the ansible-task to 'config' - return self._merge_dicts(self.config, config) - - def _generate_new_config(self, config): - """ rebuild profile - - Rebuild the Profile by the configuration provided in the play. - Existing configurations are discarded. - - This ist the default behavior. - - Args: - dict(config): Dict with the old config in 'metadata' and new config in 'config' - Kwargs: - None - Raises: - None - Returns: - dict(config): new config""" - for k, v in self.config.items(): - config[k] = v - return config - - def _apply_profile_configs(self): - """ Selection of the procedure: rebuild or merge - - The standard behavior is that all information not contained - in the play is discarded. - - If "merge_profile" is provides in the play and "True", then existing - configurations from the profile and new ones defined are merged. - - Args: - None - Kwargs: - None - Raises: - None - Returns: - None""" - config = self.old_profile_json.copy() - if self.module.params['merge_profile']: - config = self._merge_config(config) - else: - config = self._generate_new_config(config) - - # upload config to lxd - self.client.do('PUT', '/1.0/profiles/{0}'.format(self.name), config) - self.actions.append('apply_profile_configs') - - def _delete_profile(self): - self.client.do('DELETE', '/1.0/profiles/{0}'.format(self.name)) - self.actions.append('delete') - - def run(self): - """Run the main method.""" - - try: - if self.trust_password is not None: - self.client.authenticate(self.trust_password) - - self.old_profile_json = self._get_profile_json() - self.old_state = self._profile_json_to_module_state(self.old_profile_json) - self._update_profile() - - state_changed = len(self.actions) > 0 - result_json = { - 'changed': state_changed, - 'old_state': self.old_state, - 'actions': self.actions - } - if self.client.debug: - result_json['logs'] = self.client.logs - self.module.exit_json(**result_json) - except LXDClientException as e: - state_changed = len(self.actions) > 0 - fail_params = { - 'msg': e.msg, - 'changed': state_changed, - 'actions': self.actions - } - if self.client.debug: - fail_params['logs'] = e.kwargs['logs'] - self.module.fail_json(**fail_params) - - -def main(): - """Ansible Main module.""" - - module = AnsibleModule( - argument_spec=dict( - name=dict( - type='str', - required=True - ), - new_name=dict( - type='str', - ), - config=dict( - type='dict', - ), - description=dict( - type='str', - ), - devices=dict( - type='dict', - ), - merge_profile=dict( - type='bool', - default=False - ), - state=dict( - choices=PROFILES_STATES, - default='present' - ), - url=dict( - type='str', - default=ANSIBLE_LXD_DEFAULT_URL - ), - snap_url=dict( - type='str', - default='unix:/var/snap/lxd/common/lxd/unix.socket' - ), - client_key=dict( - type='path', - aliases=['key_file'] - ), - client_cert=dict( - type='path', - aliases=['cert_file'] - ), - trust_password=dict(type='str', no_log=True) - ), - supports_check_mode=False, - ) - - lxd_manage = LXDProfileManagement(module=module) - lxd_manage.run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/memset/memset_dns_reload.py b/plugins/modules/cloud/memset/memset_dns_reload.py deleted file mode 100644 index 6eefe133fd..0000000000 --- a/plugins/modules/cloud/memset/memset_dns_reload.py +++ /dev/null @@ -1,183 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: memset_dns_reload -author: "Simon Weald (@glitchcrab)" -short_description: Request reload of Memset's DNS infrastructure, -notes: - - DNS reload requests are a best-effort service provided by Memset; these generally - happen every 15 minutes by default, however you can request an immediate reload if - later tasks rely on the records being created. An API key generated via the - Memset customer control panel is required with the following minimum scope - - I(dns.reload). If you wish to poll the job status to wait until the reload has - completed, then I(job.status) is also required. -description: - - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes. -options: - api_key: - required: true - type: str - description: - - The API key obtained from the Memset control panel. - poll: - default: false - type: bool - description: - - Boolean value, if set will poll the reload job's status and return - when the job has completed (unless the 30 second timeout is reached first). - If the timeout is reached then the task will not be marked as failed, but - stderr will indicate that the polling failed. -''' - -EXAMPLES = ''' -- name: Submit DNS reload and poll - community.general.memset_dns_reload: - api_key: 5eb86c9196ab03919abcf03857163741 - poll: True - delegate_to: localhost -''' - -RETURN = ''' ---- -memset_api: - description: Raw response from the Memset API. - returned: always - type: complex - contains: - error: - description: Whether the job ended in error state. - returned: always - type: bool - sample: true - finished: - description: Whether the job completed before the result was returned. - returned: always - type: bool - sample: true - id: - description: Job ID. - returned: always - type: str - sample: "c9cc8ad2a3e3fb8c63ed83c424928ef8" - status: - description: Job status. - returned: always - type: str - sample: "DONE" - type: - description: Job type. - returned: always - type: str - sample: "dns" -''' - -from time import sleep - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call - - -def poll_reload_status(api_key=None, job_id=None, payload=None): - ''' - We poll the `job.status` endpoint every 5 seconds up to a - maximum of 6 times. This is a relatively arbitrary choice of - timeout, however requests rarely take longer than 15 seconds - to complete. - ''' - memset_api, stderr, msg = None, None, None - payload['id'] = job_id - - api_method = 'job.status' - _has_failed, _msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload) - - while not response.json()['finished']: - counter = 0 - while counter < 6: - sleep(5) - _has_failed, msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload) - counter += 1 - if response.json()['error']: - # the reload job was submitted but polling failed. Don't return this as an overall task failure. - stderr = "Reload submitted successfully, but the Memset API returned a job error when attempting to poll the reload status." - else: - memset_api = response.json() - msg = None - - return(memset_api, msg, stderr) - - -def reload_dns(args=None): - ''' - DNS reloads are a single API call and therefore there's not much - which can go wrong outside of auth errors. - ''' - retvals, payload = dict(), dict() - has_changed, has_failed = False, False - memset_api, msg, stderr = None, None, None - - api_method = 'dns.reload' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - if has_failed: - # this is the first time the API is called; incorrect credentials will - # manifest themselves at this point so we need to ensure the user is - # informed of the reason. - retvals['failed'] = has_failed - retvals['memset_api'] = response.json() - retvals['msg'] = msg - return(retvals) - - # set changed to true if the reload request was accepted. - has_changed = True - memset_api = msg - # empty msg var as we don't want to return the API's json response twice. - msg = None - - if args['poll']: - # hand off to the poll function. - job_id = response.json()['id'] - memset_api, msg, stderr = poll_reload_status(api_key=args['api_key'], job_id=job_id, payload=payload) - - # assemble return variables. - retvals['failed'] = has_failed - retvals['changed'] = has_changed - for val in ['msg', 'stderr', 'memset_api']: - if val is not None: - retvals[val] = eval(val) - - return(retvals) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, type='str', no_log=True), - poll=dict(required=False, default=False, type='bool') - ), - supports_check_mode=False - ) - - # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg - - retvals = reload_dns(args) - - if retvals['failed']: - module.fail_json(**retvals) - else: - module.exit_json(**retvals) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/memset/memset_memstore_info.py b/plugins/modules/cloud/memset/memset_memstore_info.py deleted file mode 100644 index e880b46009..0000000000 --- a/plugins/modules/cloud/memset/memset_memstore_info.py +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: memset_memstore_info -author: "Simon Weald (@glitchcrab)" -short_description: Retrieve Memstore product usage information. -notes: - - An API key generated via the Memset customer control panel is needed with the - following minimum scope - I(memstore.usage). -description: - - Retrieve Memstore product usage information. - - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change. -options: - api_key: - required: true - type: str - description: - - The API key obtained from the Memset control panel. - name: - required: true - type: str - description: - - The Memstore product name (i.e. C(mstestyaa1)). -''' - -EXAMPLES = ''' -- name: Get usage for mstestyaa1 - community.general.memset_memstore_info: - name: mstestyaa1 - api_key: 5eb86c9896ab03919abcf03857163741 - delegate_to: localhost -''' - -RETURN = ''' ---- -memset_api: - description: Info from the Memset API - returned: always - type: complex - contains: - cdn_bandwidth: - description: Dictionary of CDN bandwidth facts - returned: always - type: complex - contains: - bytes_out: - description: Outbound CDN bandwidth for the last 24 hours in bytes - returned: always - type: int - sample: 1000 - requests: - description: Number of requests in the last 24 hours - returned: always - type: int - sample: 10 - bytes_in: - description: Inbound CDN bandwidth for the last 24 hours in bytes - returned: always - type: int - sample: 1000 - containers: - description: Number of containers - returned: always - type: int - sample: 10 - bytes: - description: Space used in bytes - returned: always - type: int - sample: 3860997965 - objs: - description: Number of objects - returned: always - type: int - sample: 1000 - bandwidth: - description: Dictionary of CDN bandwidth facts - returned: always - type: complex - contains: - bytes_out: - description: Outbound bandwidth for the last 24 hours in bytes - returned: always - type: int - sample: 1000 - requests: - description: Number of requests in the last 24 hours - returned: always - type: int - sample: 10 - bytes_in: - description: Inbound bandwidth for the last 24 hours in bytes - returned: always - type: int - sample: 1000 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call - - -def get_facts(args=None): - ''' - Performs a simple API call and returns a JSON blob. - ''' - retvals, payload = dict(), dict() - has_changed, has_failed = False, False - msg, stderr, memset_api = None, None, None - - payload['name'] = args['name'] - - api_method = 'memstore.usage' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - - if has_failed: - # this is the first time the API is called; incorrect credentials will - # manifest themselves at this point so we need to ensure the user is - # informed of the reason. - retvals['failed'] = has_failed - retvals['msg'] = msg - retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) - return(retvals) - - # we don't want to return the same thing twice - msg = None - memset_api = response.json() - - retvals['changed'] = has_changed - retvals['failed'] = has_failed - for val in ['msg', 'memset_api']: - if val is not None: - retvals[val] = eval(val) - - return(retvals) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, type='str', no_log=True), - name=dict(required=True, type='str') - ), - supports_check_mode=True, - ) - - # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg - - retvals = get_facts(args) - - if retvals['failed']: - module.fail_json(**retvals) - else: - module.exit_json(**retvals) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/memset/memset_server_info.py b/plugins/modules/cloud/memset/memset_server_info.py deleted file mode 100644 index 853e2c884d..0000000000 --- a/plugins/modules/cloud/memset/memset_server_info.py +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: memset_server_info -author: "Simon Weald (@glitchcrab)" -short_description: Retrieve server information. -notes: - - An API key generated via the Memset customer control panel is needed with the - following minimum scope - I(server.info). -description: - - Retrieve server information. - - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change. -options: - api_key: - required: true - type: str - description: - - The API key obtained from the Memset control panel. - name: - required: true - type: str - description: - - The server product name (i.e. C(testyaa1)). -''' - -EXAMPLES = ''' -- name: Get details for testyaa1 - community.general.memset_server_info: - name: testyaa1 - api_key: 5eb86c9896ab03919abcf03857163741 - delegate_to: localhost -''' - -RETURN = ''' ---- -memset_api: - description: Info from the Memset API - returned: always - type: complex - contains: - backups: - description: Whether this server has a backup service. - returned: always - type: bool - sample: true - control_panel: - description: Whether the server has a control panel (i.e. cPanel). - returned: always - type: str - sample: 'cpanel' - data_zone: - description: The data zone the server is in. - returned: always - type: str - sample: 'Memset Public Cloud' - expiry_date: - description: Current expiry date of the server. - returned: always - type: str - sample: '2018-08-10' - firewall_rule_group: - description: Details about the firewall group this server is in. - returned: always - type: dict - sample: { - "default_outbound_policy": "RETURN", - "name": "testyaa-fw1", - "nickname": "testyaa cPanel rules", - "notes": "", - "public": false, - "rules": { - "51d7db54d39c3544ef7c48baa0b9944f": { - "action": "ACCEPT", - "comment": "", - "dest_ip6s": "any", - "dest_ips": "any", - "dest_ports": "any", - "direction": "Inbound", - "ip_version": "any", - "ordering": 2, - "protocols": "icmp", - "rule_group_name": "testyaa-fw1", - "rule_id": "51d7db54d39c3544ef7c48baa0b9944f", - "source_ip6s": "any", - "source_ips": "any", - "source_ports": "any" - } - } - } - firewall_type: - description: The type of firewall the server has (i.e. self-managed, managed). - returned: always - type: str - sample: 'managed' - host_name: - description: The server's hostname. - returned: always - type: str - sample: 'testyaa1.miniserver.com' - ignore_monitoring_off: - description: When true, Memset won't remind the customer that monitoring is disabled. - returned: always - type: bool - sample: true - ips: - description: List of dictionaries of all IP addresses assigned to the server. - returned: always - type: list - sample: [ - { - "address": "1.2.3.4", - "bytes_in_today": 1000.0, - "bytes_in_yesterday": 2000.0, - "bytes_out_today": 1000.0, - "bytes_out_yesterday": 2000.0 - } - ] - monitor: - description: Whether the server has monitoring enabled. - returned: always - type: bool - sample: true - monitoring_level: - description: The server's monitoring level (i.e. basic). - returned: always - type: str - sample: 'basic' - name: - description: Server name (same as the service name). - returned: always - type: str - sample: 'testyaa1' - network_zones: - description: The network zone(s) the server is in. - returned: always - type: list - sample: [ 'reading' ] - nickname: - description: Customer-set nickname for the server. - returned: always - type: str - sample: 'database server' - no_auto_reboot: - description: Whether or not to reboot the server if monitoring detects it down. - returned: always - type: bool - sample: true - no_nrpe: - description: Whether Memset should use NRPE to monitor this server. - returned: always - type: bool - sample: true - os: - description: The server's Operating System. - returned: always - type: str - sample: 'debian_stretch_64' - penetration_patrol: - description: Intrusion detection support level for this server. - returned: always - type: str - sample: 'managed' - penetration_patrol_alert_level: - description: The alert level at which notifications are sent. - returned: always - type: int - sample: 10 - primary_ip: - description: Server's primary IP. - returned: always - type: str - sample: '1.2.3.4' - renewal_price_amount: - description: Renewal cost for the server. - returned: always - type: str - sample: '30.00' - renewal_price_currency: - description: Currency for renewal payments. - returned: always - type: str - sample: 'GBP' - renewal_price_vat: - description: VAT rate for renewal payments - returned: always - type: str - sample: '20' - start_date: - description: Server's start date. - returned: always - type: str - sample: '2013-04-10' - status: - description: Current status of the server (i.e. live, onhold). - returned: always - type: str - sample: 'LIVE' - support_level: - description: Support level included with the server. - returned: always - type: str - sample: 'managed' - type: - description: What this server is (i.e. dedicated) - returned: always - type: str - sample: 'miniserver' - vlans: - description: Dictionary of tagged and untagged VLANs this server is in. - returned: always - type: dict - sample: { - tagged: [], - untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ] - } - vulnscan: - description: Vulnerability scanning level. - returned: always - type: str - sample: 'basic' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call - - -def get_facts(args=None): - ''' - Performs a simple API call and returns a JSON blob. - ''' - retvals, payload = dict(), dict() - has_changed, has_failed = False, False - msg, stderr, memset_api = None, None, None - - payload['name'] = args['name'] - - api_method = 'server.info' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - - if has_failed: - # this is the first time the API is called; incorrect credentials will - # manifest themselves at this point so we need to ensure the user is - # informed of the reason. - retvals['failed'] = has_failed - retvals['msg'] = msg - retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) - return(retvals) - - # we don't want to return the same thing twice - msg = None - memset_api = response.json() - - retvals['changed'] = has_changed - retvals['failed'] = has_failed - for val in ['msg', 'memset_api']: - if val is not None: - retvals[val] = eval(val) - - return(retvals) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, type='str', no_log=True), - name=dict(required=True, type='str') - ), - supports_check_mode=True, - ) - - # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg - - retvals = get_facts(args) - - if retvals['failed']: - module.fail_json(**retvals) - else: - module.exit_json(**retvals) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/memset/memset_zone.py b/plugins/modules/cloud/memset/memset_zone.py deleted file mode 100644 index 9ef798bd74..0000000000 --- a/plugins/modules/cloud/memset/memset_zone.py +++ /dev/null @@ -1,311 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: memset_zone -author: "Simon Weald (@glitchcrab)" -short_description: Creates and deletes Memset DNS zones. -notes: - - Zones can be thought of as a logical group of domains, all of which share the - same DNS records (i.e. they point to the same IP). An API key generated via the - Memset customer control panel is needed with the following minimum scope - - I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list). -description: - - Manage DNS zones in a Memset account. -options: - state: - required: true - description: - - Indicates desired state of resource. - type: str - choices: [ absent, present ] - api_key: - required: true - description: - - The API key obtained from the Memset control panel. - type: str - name: - required: true - description: - - The zone nickname; usually the same as the main domain. Ensure this - value has at most 250 characters. - type: str - aliases: [ nickname ] - ttl: - description: - - The default TTL for all records created in the zone. This must be a - valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create). - type: int - choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ] - force: - required: false - default: false - type: bool - description: - - Forces deletion of a zone and all zone domains/zone records it contains. -''' - -EXAMPLES = ''' -# Create the zone 'test' -- name: Create zone - community.general.memset_zone: - name: test - state: present - api_key: 5eb86c9196ab03919abcf03857163741 - ttl: 300 - delegate_to: localhost - -# Force zone deletion -- name: Force delete zone - community.general.memset_zone: - name: test - state: absent - api_key: 5eb86c9196ab03919abcf03857163741 - force: true - delegate_to: localhost -''' - -RETURN = ''' -memset_api: - description: Zone info from the Memset API - returned: when state == present - type: complex - contains: - domains: - description: List of domains in this zone - returned: always - type: list - sample: [] - id: - description: Zone id - returned: always - type: str - sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" - nickname: - description: Zone name - returned: always - type: str - sample: "example.com" - records: - description: List of DNS records for domains in this zone - returned: always - type: list - sample: [] - ttl: - description: Default TTL for domains in this zone - returned: always - type: int - sample: 300 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.memset import check_zone -from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id -from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call - - -def api_validation(args=None): - ''' - Perform some validation which will be enforced by Memset's API (see: - https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) - ''' - # zone domain length must be less than 250 chars. - if len(args['name']) > 250: - stderr = 'Zone name must be less than 250 characters in length.' - module.fail_json(failed=True, msg=stderr, stderr=stderr) - - -def check(args=None): - ''' - Support for running with check mode. - ''' - retvals = dict() - - api_method = 'dns.zone_list' - has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - zone_exists, counter = check_zone(data=response, name=args['name']) - - # set changed to true if the operation would cause a change. - has_changed = ((zone_exists and args['state'] == 'absent') or (not zone_exists and args['state'] == 'present')) - - retvals['changed'] = has_changed - retvals['failed'] = has_failed - - return(retvals) - - -def create_zone(args=None, zone_exists=None, payload=None): - ''' - At this point we already know whether the zone exists, so we - just need to make the API reflect the desired state. - ''' - has_changed, has_failed = False, False - msg, memset_api = None, None - - if not zone_exists: - payload['ttl'] = args['ttl'] - payload['nickname'] = args['name'] - api_method = 'dns.zone_create' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - else: - api_method = 'dns.zone_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - for zone in response.json(): - if zone['nickname'] == args['name']: - break - if zone['ttl'] != args['ttl']: - # update the zone if the desired TTL is different. - payload['id'] = zone['id'] - payload['ttl'] = args['ttl'] - api_method = 'dns.zone_update' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - - # populate return var with zone info. - api_method = 'dns.zone_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json()) - - if zone_exists: - payload = dict() - payload['id'] = zone_id - api_method = 'dns.zone_info' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - memset_api = response.json() - - return(has_failed, has_changed, memset_api, msg) - - -def delete_zone(args=None, zone_exists=None, payload=None): - ''' - Deletion requires extra sanity checking as the zone cannot be - deleted if it contains domains or records. Setting force=true - will override this behaviour. - ''' - has_changed, has_failed = False, False - msg, memset_api = None, None - - if zone_exists: - api_method = 'dns.zone_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - counter = 0 - for zone in response.json(): - if zone['nickname'] == args['name']: - counter += 1 - if counter == 1: - for zone in response.json(): - if zone['nickname'] == args['name']: - zone_id = zone['id'] - domain_count = len(zone['domains']) - record_count = len(zone['records']) - if (domain_count > 0 or record_count > 0) and args['force'] is False: - # we need to fail out if force was not explicitly set. - stderr = 'Zone contains domains or records and force was not used.' - has_failed = True - has_changed = False - module.fail_json(failed=has_failed, changed=has_changed, msg=msg, stderr=stderr, rc=1) - api_method = 'dns.zone_delete' - payload['id'] = zone_id - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - # return raw JSON from API in named var and then unset msg var so we aren't returning the same thing twice. - memset_api = msg - msg = None - else: - # zone names are not unique, so we cannot safely delete the requested - # zone at this time. - has_failed = True - has_changed = False - msg = 'Unable to delete zone as multiple zones with the same name exist.' - else: - has_failed, has_changed = False, False - - return(has_failed, has_changed, memset_api, msg) - - -def create_or_delete(args=None): - ''' - We need to perform some initial sanity checking and also look - up required info before handing it off to create or delete. - ''' - retvals, payload = dict(), dict() - has_failed, has_changed = False, False - msg, memset_api, stderr = None, None, None - - # get the zones and check if the relevant zone exists. - api_method = 'dns.zone_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - if _has_failed: - # this is the first time the API is called; incorrect credentials will - # manifest themselves at this point so we need to ensure the user is - # informed of the reason. - retvals['failed'] = _has_failed - retvals['msg'] = _msg - - return(retvals) - - zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json()) - - if args['state'] == 'present': - has_failed, has_changed, memset_api, msg = create_zone(args=args, zone_exists=zone_exists, payload=payload) - - elif args['state'] == 'absent': - has_failed, has_changed, memset_api, msg = delete_zone(args=args, zone_exists=zone_exists, payload=payload) - - retvals['failed'] = has_failed - retvals['changed'] = has_changed - for val in ['msg', 'stderr', 'memset_api']: - if val is not None: - retvals[val] = eval(val) - - return(retvals) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, choices=['present', 'absent'], type='str'), - api_key=dict(required=True, type='str', no_log=True), - name=dict(required=True, aliases=['nickname'], type='str'), - ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), - force=dict(required=False, default=False, type='bool') - ), - supports_check_mode=True - ) - - # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg - args['check_mode'] = module.check_mode - - # validate some API-specific limitations. - api_validation(args=args) - - if module.check_mode: - retvals = check(args) - else: - retvals = create_or_delete(args) - - if retvals['failed']: - module.fail_json(**retvals) - else: - module.exit_json(**retvals) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/memset/memset_zone_domain.py b/plugins/modules/cloud/memset/memset_zone_domain.py deleted file mode 100644 index 4aa0eada92..0000000000 --- a/plugins/modules/cloud/memset/memset_zone_domain.py +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: memset_zone_domain -author: "Simon Weald (@glitchcrab)" -short_description: Create and delete domains in Memset DNS zones. -notes: - - Zone domains can be thought of as a collection of domains, all of which share the - same DNS records (i.e. they point to the same IP). An API key generated via the - Memset customer control panel is needed with the following minimum scope - - I(dns.zone_domain_create), I(dns.zone_domain_delete), I(dns.zone_domain_list). - - Currently this module can only create one domain at a time. Multiple domains should - be created using C(with_items). -description: - - Manage DNS zone domains in a Memset account. -options: - state: - default: present - description: - - Indicates desired state of resource. - type: str - choices: [ absent, present ] - api_key: - required: true - description: - - The API key obtained from the Memset control panel. - type: str - domain: - required: true - description: - - The zone domain name. Ensure this value has at most 250 characters. - type: str - aliases: ['name'] - zone: - required: true - description: - - The zone to add the domain to (this must already exist). - type: str -''' - -EXAMPLES = ''' -# Create the zone domain 'test.com' -- name: Create zone domain - community.general.memset_zone_domain: - domain: test.com - zone: testzone - state: present - api_key: 5eb86c9196ab03919abcf03857163741 - delegate_to: localhost -''' - -RETURN = ''' -memset_api: - description: Domain info from the Memset API - returned: when changed or state == present - type: complex - contains: - domain: - description: Domain name - returned: always - type: str - sample: "example.com" - id: - description: Domain ID - returned: always - type: str - sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id -from ansible_collections.community.general.plugins.module_utils.memset import check_zone_domain -from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call - - -def api_validation(args=None): - ''' - Perform some validation which will be enforced by Memset's API (see: - https://www.memset.com/apidocs/methods_dns.html#dns.zone_domain_create) - ''' - # zone domain length must be less than 250 chars - if len(args['domain']) > 250: - stderr = 'Zone domain must be less than 250 characters in length.' - module.fail_json(failed=True, msg=stderr) - - -def check(args=None): - ''' - Support for running with check mode. - ''' - retvals = dict() - has_changed = False - - api_method = 'dns.zone_domain_list' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - domain_exists = check_zone_domain(data=response, domain=args['domain']) - - # set changed to true if the operation would cause a change. - has_changed = ((domain_exists and args['state'] == 'absent') or (not domain_exists and args['state'] == 'present')) - - retvals['changed'] = has_changed - retvals['failed'] = has_failed - - return(retvals) - - -def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None): - ''' - At this point we already know whether the containing zone exists, - so we just need to create the domain (or exit if it already exists). - ''' - has_changed, has_failed = False, False - msg = None - - api_method = 'dns.zone_domain_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - for zone_domain in response.json(): - if zone_domain['domain'] == args['domain']: - # zone domain already exists, nothing to change. - has_changed = False - break - else: - # we need to create the domain - api_method = 'dns.zone_domain_create' - payload['domain'] = args['domain'] - payload['zone_id'] = zone_id - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - - return(has_failed, has_changed, msg) - - -def delete_zone_domain(args=None, payload=None): - ''' - Deletion is pretty simple, domains are always unique so we - we don't need to do any sanity checking to avoid deleting the - wrong thing. - ''' - has_changed, has_failed = False, False - msg, memset_api = None, None - - api_method = 'dns.zone_domain_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - domain_exists = check_zone_domain(data=response, domain=args['domain']) - - if domain_exists: - api_method = 'dns.zone_domain_delete' - payload['domain'] = args['domain'] - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - memset_api = response.json() - # unset msg as we don't want to return unnecessary info to the user. - msg = None - - return(has_failed, has_changed, memset_api, msg) - - -def create_or_delete_domain(args=None): - ''' - We need to perform some initial sanity checking and also look - up required info before handing it off to create or delete. - ''' - retvals, payload = dict(), dict() - has_changed, has_failed = False, False - msg, stderr, memset_api = None, None, None - - # get the zones and check if the relevant zone exists. - api_method = 'dns.zone_list' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - if has_failed: - # this is the first time the API is called; incorrect credentials will - # manifest themselves at this point so we need to ensure the user is - # informed of the reason. - retvals['failed'] = has_failed - retvals['msg'] = msg - retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) - return(retvals) - - zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) - - if not zone_exists: - # the zone needs to be unique - this isn't a requirement of Memset's API but it - # makes sense in the context of this module. - has_failed = True - if counter == 0: - stderr = "DNS zone '{0}' does not exist, cannot create domain." . format(args['zone']) - elif counter > 1: - stderr = "{0} matches multiple zones, cannot create domain." . format(args['zone']) - - retvals['failed'] = has_failed - retvals['msg'] = stderr - return(retvals) - - if args['state'] == 'present': - has_failed, has_changed, msg = create_zone_domain(args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload) - - if args['state'] == 'absent': - has_failed, has_changed, memset_api, msg = delete_zone_domain(args=args, payload=payload) - - retvals['changed'] = has_changed - retvals['failed'] = has_failed - for val in ['msg', 'stderr', 'memset_api']: - if val is not None: - retvals[val] = eval(val) - - return(retvals) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], type='str'), - api_key=dict(required=True, type='str', no_log=True), - domain=dict(required=True, aliases=['name'], type='str'), - zone=dict(required=True, type='str') - ), - supports_check_mode=True - ) - - # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg - args['check_mode'] = module.check_mode - - # validate some API-specific limitations. - api_validation(args=args) - - if module.check_mode: - retvals = check(args) - else: - retvals = create_or_delete_domain(args) - - # we would need to populate the return values with the API's response - # in several places so it's easier to do it at the end instead. - if not retvals['failed']: - if args['state'] == 'present' and not module.check_mode: - payload = dict() - payload['domain'] = args['domain'] - api_method = 'dns.zone_domain_info' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - retvals['memset_api'] = response.json() - - if retvals['failed']: - module.fail_json(**retvals) - else: - module.exit_json(**retvals) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/memset/memset_zone_record.py b/plugins/modules/cloud/memset/memset_zone_record.py deleted file mode 100644 index 981d2ac47c..0000000000 --- a/plugins/modules/cloud/memset/memset_zone_record.py +++ /dev/null @@ -1,380 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: memset_zone_record -author: "Simon Weald (@glitchcrab)" -short_description: Create and delete records in Memset DNS zones. -notes: - - Zones can be thought of as a logical group of domains, all of which share the - same DNS records (i.e. they point to the same IP). An API key generated via the - Memset customer control panel is needed with the following minimum scope - - I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list). - - Currently this module can only create one DNS record at a time. Multiple records - should be created using C(with_items). -description: - - Manage DNS records in a Memset account. -options: - state: - default: present - description: - - Indicates desired state of resource. - type: str - choices: [ absent, present ] - api_key: - required: true - description: - - The API key obtained from the Memset control panel. - type: str - address: - required: true - description: - - The address for this record (can be IP or text string depending on record type). - type: str - aliases: [ ip, data ] - priority: - description: - - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive). - type: int - record: - required: false - description: - - The subdomain to create. - type: str - type: - required: true - description: - - The type of DNS record to create. - choices: [ A, AAAA, CNAME, MX, NS, SRV, TXT ] - type: str - relative: - type: bool - default: false - description: - - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS) - and C(SRV)record types. - ttl: - description: - - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a - valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create). - choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ] - type: int - zone: - required: true - description: - - The name of the zone to which to add the record to. - type: str -''' - -EXAMPLES = ''' -# Create DNS record for www.domain.com -- name: Create DNS record - community.general.memset_zone_record: - api_key: dcf089a2896940da9ffefb307ef49ccd - state: present - zone: domain.com - type: A - record: www - address: 1.2.3.4 - ttl: 300 - relative: false - delegate_to: localhost - -# create an SPF record for domain.com -- name: Create SPF record for domain.com - community.general.memset_zone_record: - api_key: dcf089a2896940da9ffefb307ef49ccd - state: present - zone: domain.com - type: TXT - address: "v=spf1 +a +mx +ip4:a1.2.3.4 ?all" - delegate_to: localhost - -# create multiple DNS records -- name: Create multiple DNS records - community.general.memset_zone_record: - api_key: dcf089a2896940da9ffefb307ef49ccd - zone: "{{ item.zone }}" - type: "{{ item.type }}" - record: "{{ item.record }}" - address: "{{ item.address }}" - delegate_to: localhost - with_items: - - { 'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4' } - - { 'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1' } -''' - -RETURN = ''' -memset_api: - description: Record info from the Memset API. - returned: when state == present - type: complex - contains: - address: - description: Record content (may be an IP, string or blank depending on record type). - returned: always - type: str - sample: 1.1.1.1 - id: - description: Record ID. - returned: always - type: str - sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" - priority: - description: Priority for C(MX) and C(SRV) records. - returned: always - type: int - sample: 10 - record: - description: Name of record. - returned: always - type: str - sample: "www" - relative: - description: Adds the current domain onto the address field for C(CNAME), C(MX), C(NS) and C(SRV) types. - returned: always - type: bool - sample: False - ttl: - description: Record TTL. - returned: always - type: int - sample: 10 - type: - description: Record type. - returned: always - type: str - sample: AAAA - zone_id: - description: Zone ID. - returned: always - type: str - sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id -from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call -from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id - - -def api_validation(args=None): - ''' - Perform some validation which will be enforced by Memset's API (see: - https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) - ''' - failed_validation = False - - # priority can only be integer 0 > 999 - if not 0 <= args['priority'] <= 999: - failed_validation = True - error = 'Priority must be in the range 0 > 999 (inclusive).' - # data value must be max 250 chars - if len(args['address']) > 250: - failed_validation = True - error = "Address must be less than 250 characters in length." - # record value must be max 250 chars - if args['record']: - if len(args['record']) > 63: - failed_validation = True - error = "Record must be less than 63 characters in length." - # relative isn't used for all record types - if args['relative']: - if args['type'] not in ['CNAME', 'MX', 'NS', 'SRV']: - failed_validation = True - error = "Relative is only valid for CNAME, MX, NS and SRV record types." - # if any of the above failed then fail early - if failed_validation: - module.fail_json(failed=True, msg=error) - - -def create_zone_record(args=None, zone_id=None, records=None, payload=None): - ''' - Sanity checking has already occurred prior to this function being - called, so we can go ahead and either create or update the record. - As defaults are defined for all values in the argument_spec, this - may cause some changes to occur as the defaults are enforced (if - the user has only configured required variables). - ''' - has_changed, has_failed = False, False - msg, memset_api = None, None - - # assemble the new record. - new_record = dict() - new_record['zone_id'] = zone_id - for arg in ['priority', 'address', 'relative', 'record', 'ttl', 'type']: - new_record[arg] = args[arg] - - # if we have any matches, update them. - if records: - for zone_record in records: - # record exists, add ID to payload. - new_record['id'] = zone_record['id'] - if zone_record == new_record: - # nothing to do; record is already correct so we populate - # the return var with the existing record's details. - memset_api = zone_record - return(has_changed, has_failed, memset_api, msg) - else: - # merge dicts ensuring we change any updated values - payload = zone_record.copy() - payload.update(new_record) - api_method = 'dns.zone_record_update' - if args['check_mode']: - has_changed = True - # return the new record to the user in the returned var. - memset_api = new_record - return(has_changed, has_failed, memset_api, msg) - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - memset_api = new_record - # empty msg as we don't want to return a boatload of json to the user. - msg = None - else: - # no record found, so we need to create it - api_method = 'dns.zone_record_create' - payload = new_record - if args['check_mode']: - has_changed = True - # populate the return var with the new record's details. - memset_api = new_record - return(has_changed, has_failed, memset_api, msg) - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - memset_api = new_record - # empty msg as we don't want to return a boatload of json to the user. - msg = None - - return(has_changed, has_failed, memset_api, msg) - - -def delete_zone_record(args=None, records=None, payload=None): - ''' - Matching records can be cleanly deleted without affecting other - resource types, so this is pretty simple to achieve. - ''' - has_changed, has_failed = False, False - msg, memset_api = None, None - - # if we have any matches, delete them. - if records: - for zone_record in records: - if args['check_mode']: - has_changed = True - return(has_changed, has_failed, memset_api, msg) - payload['id'] = zone_record['id'] - api_method = 'dns.zone_record_delete' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - memset_api = zone_record - # empty msg as we don't want to return a boatload of json to the user. - msg = None - - return(has_changed, has_failed, memset_api, msg) - - -def create_or_delete(args=None): - ''' - We need to perform some initial sanity checking and also look - up required info before handing it off to create or delete functions. - Check mode is integrated into the create or delete functions. - ''' - has_failed, has_changed = False, False - msg, memset_api, stderr = None, None, None - retvals, payload = dict(), dict() - - # get the zones and check if the relevant zone exists. - api_method = 'dns.zone_list' - _has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - if _has_failed: - # this is the first time the API is called; incorrect credentials will - # manifest themselves at this point so we need to ensure the user is - # informed of the reason. - retvals['failed'] = _has_failed - retvals['msg'] = msg - retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) - return(retvals) - - zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) - - if not zone_exists: - has_failed = True - if counter == 0: - stderr = "DNS zone {0} does not exist." . format(args['zone']) - elif counter > 1: - stderr = "{0} matches multiple zones." . format(args['zone']) - retvals['failed'] = has_failed - retvals['msg'] = stderr - retvals['stderr'] = stderr - return(retvals) - - # get a list of all records ( as we can't limit records by zone) - api_method = 'dns.zone_record_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - # find any matching records - records = [record for record in response.json() if record['zone_id'] == zone_id - and record['record'] == args['record'] and record['type'] == args['type']] - - if args['state'] == 'present': - has_changed, has_failed, memset_api, msg = create_zone_record(args=args, zone_id=zone_id, records=records, payload=payload) - - if args['state'] == 'absent': - has_changed, has_failed, memset_api, msg = delete_zone_record(args=args, records=records, payload=payload) - - retvals['changed'] = has_changed - retvals['failed'] = has_failed - for val in ['msg', 'stderr', 'memset_api']: - if val is not None: - retvals[val] = eval(val) - - return(retvals) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - state=dict(required=False, default='present', choices=['present', 'absent'], type='str'), - api_key=dict(required=True, type='str', no_log=True), - zone=dict(required=True, type='str'), - type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'), - address=dict(required=True, aliases=['ip', 'data'], type='str'), - record=dict(required=False, default='', type='str'), - ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), - priority=dict(required=False, default=0, type='int'), - relative=dict(required=False, default=False, type='bool') - ), - supports_check_mode=True - ) - - # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg - args['check_mode'] = module.check_mode - - # perform some Memset API-specific validation - api_validation(args=args) - - retvals = create_or_delete(args) - - if retvals['failed']: - module.fail_json(**retvals) - else: - module.exit_json(**retvals) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/cloud_init_data_facts.py b/plugins/modules/cloud/misc/cloud_init_data_facts.py deleted file mode 100644 index 1b44c50cbe..0000000000 --- a/plugins/modules/cloud/misc/cloud_init_data_facts.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: cloud_init_data_facts -short_description: Retrieve facts of cloud-init. -description: - - Gathers facts by reading the status.json and result.json of cloud-init. -author: René Moser (@resmo) -options: - filter: - description: - - Filter facts - type: str - choices: [ status, result ] -notes: - - See http://cloudinit.readthedocs.io/ for more information about cloud-init. -''' - -EXAMPLES = ''' -- name: Gather all facts of cloud init - community.general.cloud_init_data_facts: - register: result - -- ansible.builtin.debug: - var: result - -- name: Wait for cloud init to finish - community.general.cloud_init_data_facts: - filter: status - register: res - until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage" - retries: 50 - delay: 5 -''' - -RETURN = ''' ---- -cloud_init_data_facts: - description: Facts of result and status. - returned: success - type: dict - sample: '{ - "status": { - "v1": { - "datasource": "DataSourceCloudStack", - "errors": [] - }, - "result": { - "v1": { - "datasource": "DataSourceCloudStack", - "init": { - "errors": [], - "finished": 1522066377.0185432, - "start": 1522066375.2648022 - }, - "init-local": { - "errors": [], - "finished": 1522066373.70919, - "start": 1522066373.4726632 - }, - "modules-config": { - "errors": [], - "finished": 1522066380.9097016, - "start": 1522066379.0011985 - }, - "modules-final": { - "errors": [], - "finished": 1522066383.56594, - "start": 1522066382.3449218 - }, - "stage": null - } - }' -''' - -import os - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_text - - -CLOUD_INIT_PATH = "/var/lib/cloud/data" - - -def gather_cloud_init_data_facts(module): - res = { - 'cloud_init_data_facts': dict() - } - - for i in ['result', 'status']: - filter = module.params.get('filter') - if filter is None or filter == i: - res['cloud_init_data_facts'][i] = dict() - json_file = os.path.join(CLOUD_INIT_PATH, i + '.json') - - if os.path.exists(json_file): - f = open(json_file, 'rb') - contents = to_text(f.read(), errors='surrogate_or_strict') - f.close() - - if contents: - res['cloud_init_data_facts'][i] = module.from_json(contents) - return res - - -def main(): - module = AnsibleModule( - argument_spec=dict( - filter=dict(choices=['result', 'status']), - ), - supports_check_mode=True, - ) - - facts = gather_cloud_init_data_facts(module) - result = dict(changed=False, ansible_facts=facts, **facts) - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox.py b/plugins/modules/cloud/misc/proxmox.py deleted file mode 100644 index 710f028345..0000000000 --- a/plugins/modules/cloud/misc/proxmox.py +++ /dev/null @@ -1,707 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: proxmox -short_description: management of instances in Proxmox VE cluster -description: - - allows you to create/delete/stop instances in Proxmox VE cluster - - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older) - - Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior). -options: - password: - description: - - the instance root password - type: str - hostname: - description: - - the instance hostname - - required only for C(state=present) - - must be unique if vmid is not passed - type: str - ostemplate: - description: - - the template for VM creating - - required only for C(state=present) - type: str - disk: - description: - - This option was previously described as "hard disk size in GB for instance" however several formats describing - a lxc mount are permitted. - - Older versions of Proxmox will accept a numeric value for size using the I(storage) parameter to automatically - choose which storage to allocate from, however new versions enforce the C(:) syntax. - - "Additional options are available by using some combination of the following key-value pairs as a - comma-delimited list C([volume=] [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>] - [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=])." - - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(3). - type: str - cores: - description: - - Specify number of cores per socket. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). - type: int - cpus: - description: - - numbers of allocated cpus for instance - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). - type: int - memory: - description: - - memory size in MB for instance - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512). - type: int - swap: - description: - - swap memory size in MB for instance - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0). - type: int - netif: - description: - - specifies network interfaces for the container. As a hash/dictionary defining interfaces. - type: dict - features: - description: - - Specifies a list of features to be enabled. For valid options, see U(https://pve.proxmox.com/wiki/Linux_Container#pct_options). - - Some features require the use of a privileged container. - type: list - elements: str - version_added: 2.0.0 - mounts: - description: - - specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points - type: dict - ip_address: - description: - - specifies the address the container will be assigned - type: str - onboot: - description: - - specifies whether a VM will be started during system bootup - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - storage: - description: - - target storage - type: str - default: 'local' - cpuunits: - description: - - CPU weight for a VM - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000). - type: int - nameserver: - description: - - sets DNS server IP address for a container - type: str - searchdomain: - description: - - sets DNS search domain for a container - type: str - timeout: - description: - - timeout for operations - type: int - default: 30 - force: - description: - - forcing operations - - can be used only with states C(present), C(stopped), C(restarted) - - with C(state=present) force option allow to overwrite existing container - - with states C(stopped) , C(restarted) allow to force stop instance - type: bool - default: 'no' - purge: - description: - - Remove container from all related configurations. - - For example backup jobs, replication jobs, or HA. - - Related ACLs and Firewall entries will always be removed. - - Used with state C(absent). - type: bool - default: false - version_added: 2.3.0 - state: - description: - - Indicate desired state of the instance - type: str - choices: ['present', 'started', 'absent', 'stopped', 'restarted'] - default: present - pubkey: - description: - - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions - type: str - unprivileged: - description: - - Indicate if the container should be unprivileged - type: bool - default: 'no' - description: - description: - - Specify the description for the container. Only used on the configuration web interface. - - This is saved as a comment inside the configuration file. - type: str - version_added: '0.2.0' - hookscript: - description: - - Script that will be executed during various steps in the containers lifetime. - type: str - version_added: '0.2.0' - proxmox_default_behavior: - description: - - As of community.general 4.0.0, various options no longer have default values. - These default values caused problems when users expected different behavior from Proxmox - by default or filled options which caused problems when set. - - The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values - are used when the values are not explicitly specified by the user. The new default is C(no_defaults), - which makes sure these options have no defaults. - - This affects the I(disk), I(cores), I(cpus), I(memory), I(onboot), I(swap), I(cpuunits) options. - type: str - default: no_defaults - choices: - - compatibility - - no_defaults - version_added: "1.3.0" -author: Sergei Antipov (@UnderGreen) -extends_documentation_fragment: - - community.general.proxmox.documentation - - community.general.proxmox.selection -''' - -EXAMPLES = r''' -- name: Create new container with minimal options - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - -- name: Create new container with hookscript and description - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - hookscript: 'local:snippets/vm_hook.sh' - description: created with ansible - -- name: Create new container automatically selecting the next available vmid. - community.general.proxmox: - node: 'uk-mc02' - api_user: 'root@pam' - api_password: '1q2w3e' - api_host: 'node1' - password: '123456' - hostname: 'example.org' - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - -- name: Create new container with minimal options with force(it will rewrite existing container) - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - force: yes - -- name: Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - -- name: Create new container with minimal options defining network interface with dhcp - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}' - -- name: Create new container with minimal options defining network interface with static ip - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}' - -- name: Create new container with minimal options defining a mount with 8GB - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - mounts: '{"mp0":"local:8,mp=/mnt/test/"}' - -- name: Create new container with minimal options defining a cpu core limit - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - cores: 2 - -- name: Create a new container with nesting enabled and allows the use of CIFS/NFS inside the container. - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - features: - - nesting=1 - - mount=cifs,nfs - - -- name: Start container - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: started - -- name: > - Start container with mount. You should enter a 90-second timeout because servers - with additional disks take longer to boot - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: started - timeout: 90 - -- name: Stop container - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: stopped - -- name: Stop container with force - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - force: yes - state: stopped - -- name: Restart container(stopped or mounted container you can't restart) - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: restarted - -- name: Remove container - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: absent -''' - -import time -import traceback -from distutils.version import LooseVersion - -try: - from proxmoxer import ProxmoxAPI - HAS_PROXMOXER = True -except ImportError: - HAS_PROXMOXER = False - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.common.text.converters import to_native - - -VZ_TYPE = None - - -def get_nextvmid(module, proxmox): - try: - vmid = proxmox.cluster.nextid.get() - return vmid - except Exception as e: - module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % to_native(e), - exception=traceback.format_exc()) - - -def get_vmid(proxmox, hostname): - return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if 'name' in vm and vm['name'] == hostname] - - -def get_instance(proxmox, vmid): - return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)] - - -def content_check(proxmox, node, ostemplate, template_store): - return [True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate] - - -def node_check(proxmox, node): - return [True for nd in proxmox.nodes.get() if nd['node'] == node] - - -def proxmox_version(proxmox): - apireturn = proxmox.version.get() - return LooseVersion(apireturn['version']) - - -def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs): - proxmox_node = proxmox.nodes(node) - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - - if VZ_TYPE == 'lxc': - kwargs['cpulimit'] = cpus - kwargs['rootfs'] = disk - if 'netif' in kwargs: - kwargs.update(kwargs['netif']) - del kwargs['netif'] - if 'mounts' in kwargs: - kwargs.update(kwargs['mounts']) - del kwargs['mounts'] - if 'pubkey' in kwargs: - if proxmox_version(proxmox) >= LooseVersion('4.2'): - kwargs['ssh-public-keys'] = kwargs['pubkey'] - del kwargs['pubkey'] - else: - kwargs['cpus'] = cpus - kwargs['disk'] = disk - - taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs) - - while timeout: - if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and - proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - -def start_instance(module, proxmox, vm, vmid, timeout): - taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post() - while timeout: - if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and - proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' % - proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - -def stop_instance(module, proxmox, vm, vmid, timeout, force): - if force: - taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1) - else: - taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post() - while timeout: - if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and - proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' % - proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - -def umount_instance(module, proxmox, vm, vmid, timeout): - taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post() - while timeout: - if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and - proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' % - proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_host=dict(required=True), - api_password=dict(no_log=True, fallback=(env_fallback, ['PROXMOX_PASSWORD'])), - api_token_id=dict(no_log=True), - api_token_secret=dict(no_log=True), - api_user=dict(required=True), - vmid=dict(type='int', required=False), - validate_certs=dict(type='bool', default=False), - node=dict(), - pool=dict(), - password=dict(no_log=True), - hostname=dict(), - ostemplate=dict(), - disk=dict(type='str'), - cores=dict(type='int'), - cpus=dict(type='int'), - memory=dict(type='int'), - swap=dict(type='int'), - netif=dict(type='dict'), - mounts=dict(type='dict'), - ip_address=dict(), - onboot=dict(type='bool'), - features=dict(type='list', elements='str'), - storage=dict(default='local'), - cpuunits=dict(type='int'), - nameserver=dict(), - searchdomain=dict(), - timeout=dict(type='int', default=30), - force=dict(type='bool', default=False), - purge=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']), - pubkey=dict(type='str', default=None), - unprivileged=dict(type='bool', default=False), - description=dict(type='str'), - hookscript=dict(type='str'), - proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), - ), - required_if=[('state', 'present', ['node', 'hostname', 'ostemplate'])], - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('api_password', 'api_token_id')], - ) - - if not HAS_PROXMOXER: - module.fail_json(msg='proxmoxer required for this module') - - state = module.params['state'] - api_host = module.params['api_host'] - api_password = module.params['api_password'] - api_token_id = module.params['api_token_id'] - api_token_secret = module.params['api_token_secret'] - api_user = module.params['api_user'] - vmid = module.params['vmid'] - validate_certs = module.params['validate_certs'] - node = module.params['node'] - disk = module.params['disk'] - cpus = module.params['cpus'] - memory = module.params['memory'] - swap = module.params['swap'] - storage = module.params['storage'] - hostname = module.params['hostname'] - if module.params['ostemplate'] is not None: - template_store = module.params['ostemplate'].split(":")[0] - timeout = module.params['timeout'] - - if module.params['proxmox_default_behavior'] == 'compatibility': - old_default_values = dict( - disk="3", - cores=1, - cpus=1, - memory=512, - swap=0, - onboot=False, - cpuunits=1000, - ) - for param, value in old_default_values.items(): - if module.params[param] is None: - module.params[param] = value - - auth_args = {'user': api_user} - if not api_token_id: - auth_args['password'] = api_password - else: - auth_args['token_name'] = api_token_id - auth_args['token_value'] = api_token_secret - - try: - proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args) - global VZ_TYPE - VZ_TYPE = 'openvz' if proxmox_version(proxmox) < LooseVersion('4.0') else 'lxc' - except Exception as e: - module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) - - # If vmid not set get the Next VM id from ProxmoxAPI - # If hostname is set get the VM id from ProxmoxAPI - if not vmid and state == 'present': - vmid = get_nextvmid(module, proxmox) - elif not vmid and hostname: - hosts = get_vmid(proxmox, hostname) - if len(hosts) == 0: - module.fail_json(msg="Vmid could not be fetched => Hostname doesn't exist (action: %s)" % state) - vmid = hosts[0] - elif not vmid: - module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) - - if state == 'present': - try: - if get_instance(proxmox, vmid) and not module.params['force']: - module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) - # If no vmid was passed, there cannot be another VM named 'hostname' - if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']: - module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0])) - elif not node_check(proxmox, node): - module.fail_json(msg="node '%s' not exists in cluster" % node) - elif not content_check(proxmox, node, module.params['ostemplate'], template_store): - module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s" - % (module.params['ostemplate'], node, template_store)) - - create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, - cores=module.params['cores'], - pool=module.params['pool'], - password=module.params['password'], - hostname=module.params['hostname'], - ostemplate=module.params['ostemplate'], - netif=module.params['netif'], - mounts=module.params['mounts'], - ip_address=module.params['ip_address'], - onboot=int(module.params['onboot']), - cpuunits=module.params['cpuunits'], - nameserver=module.params['nameserver'], - searchdomain=module.params['searchdomain'], - force=int(module.params['force']), - pubkey=module.params['pubkey'], - features=",".join(module.params['features']) if module.params['features'] is not None else None, - unprivileged=int(module.params['unprivileged']), - description=module.params['description'], - hookscript=module.params['hookscript']) - - module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) - except Exception as e: - module.fail_json(msg="creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) - - elif state == 'started': - try: - vm = get_instance(proxmox, vmid) - if not vm: - module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) - if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': - module.exit_json(changed=False, msg="VM %s is already running" % vmid) - - if start_instance(module, proxmox, vm, vmid, timeout): - module.exit_json(changed=True, msg="VM %s started" % vmid) - except Exception as e: - module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'stopped': - try: - vm = get_instance(proxmox, vmid) - if not vm: - module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) - - if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': - if module.params['force']: - if umount_instance(module, proxmox, vm, vmid, timeout): - module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) - else: - module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. " - "You can use force option to umount it.") % vmid) - - if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': - module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid) - - if stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']): - module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) - except Exception as e: - module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'restarted': - try: - vm = get_instance(proxmox, vmid) - if not vm: - module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) - if (getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped' or - getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted'): - module.exit_json(changed=False, msg="VM %s is not running" % vmid) - - if (stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']) and - start_instance(module, proxmox, vm, vmid, timeout)): - module.exit_json(changed=True, msg="VM %s is restarted" % vmid) - except Exception as e: - module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'absent': - try: - vm = get_instance(proxmox, vmid) - if not vm: - module.exit_json(changed=False, msg="VM %s does not exist" % vmid) - - if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': - module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid) - - if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': - module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) - - delete_params = {} - - if module.params['purge']: - delete_params['purge'] = 1 - - taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid, **delete_params) - - while timeout: - if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and - proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): - module.exit_json(changed=True, msg="VM %s removed" % vmid) - timeout -= 1 - if timeout == 0: - module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' - % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - except Exception as e: - module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e))) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_domain_info.py b/plugins/modules/cloud/misc/proxmox_domain_info.py deleted file mode 100644 index 1034bc8d30..0000000000 --- a/plugins/modules/cloud/misc/proxmox_domain_info.py +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Tristan Le Guern (@tleguern) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_domain_info -short_description: Retrieve information about one or more Proxmox VE domains -version_added: 1.3.0 -description: - - Retrieve information about one or more Proxmox VE domains. -options: - domain: - description: - - Restrict results to a specific authentication realm. - aliases: ['realm', 'name'] - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: community.general.proxmox.documentation -''' - - -EXAMPLES = ''' -- name: List existing domains - community.general.proxmox_domain_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_domains - -- name: Retrieve information about the pve domain - community.general.proxmox_domain_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - domain: pve - register: proxmox_domain_pve -''' - - -RETURN = ''' -proxmox_domains: - description: List of authentication domains. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the realm. - returned: on success - type: str - realm: - description: Realm name. - returned: on success - type: str - type: - description: Realm type. - returned: on success - type: str - digest: - description: Realm hash. - returned: on success, can be absent - type: str -''' - - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR) - - -class ProxmoxDomainInfoAnsible(ProxmoxAnsible): - def get_domain(self, realm): - try: - domain = self.proxmox_api.access.domains.get(realm) - except Exception: - self.module.fail_json(msg="Domain '%s' does not exist" % realm) - domain['realm'] = realm - return domain - - def get_domains(self): - domains = self.proxmox_api.access.domains.get() - return domains - - -def proxmox_domain_info_argument_spec(): - return dict( - domain=dict(type='str', aliases=['realm', 'name']), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - domain_info_args = proxmox_domain_info_argument_spec() - module_args.update(domain_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - if not HAS_PROXMOXER: - module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR) - - proxmox = ProxmoxDomainInfoAnsible(module) - domain = module.params['domain'] - - if domain: - domains = [proxmox.get_domain(realm=domain)] - else: - domains = proxmox.get_domains() - result['proxmox_domains'] = domains - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_group_info.py b/plugins/modules/cloud/misc/proxmox_group_info.py deleted file mode 100644 index 734ecb0f8a..0000000000 --- a/plugins/modules/cloud/misc/proxmox_group_info.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Tristan Le Guern -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_group_info -short_description: Retrieve information about one or more Proxmox VE groups -version_added: 1.3.0 -description: - - Retrieve information about one or more Proxmox VE groups -options: - group: - description: - - Restrict results to a specific group. - aliases: ['groupid', 'name'] - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: community.general.proxmox.documentation -''' - - -EXAMPLES = ''' -- name: List existing groups - community.general.proxmox_group_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_groups - -- name: Retrieve information about the admin group - community.general.proxmox_group_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - group: admin - register: proxmox_group_admin -''' - - -RETURN = ''' -proxmox_groups: - description: List of groups. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the group. - returned: on success, can be absent - type: str - groupid: - description: Group name. - returned: on success - type: str - users: - description: List of users in the group. - returned: on success - type: list - elements: str -''' - - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR) - - -class ProxmoxGroupInfoAnsible(ProxmoxAnsible): - def get_group(self, groupid): - try: - group = self.proxmox_api.access.groups.get(groupid) - except Exception: - self.module.fail_json(msg="Group '%s' does not exist" % groupid) - group['groupid'] = groupid - return ProxmoxGroup(group) - - def get_groups(self): - groups = self.proxmox_api.access.groups.get() - return [ProxmoxGroup(group) for group in groups] - - -class ProxmoxGroup: - def __init__(self, group): - self.group = dict() - # Data representation is not the same depending on API calls - for k, v in group.items(): - if k == 'users' and isinstance(v, str): - self.group['users'] = v.split(',') - elif k == 'members': - self.group['users'] = group['members'] - else: - self.group[k] = v - - -def proxmox_group_info_argument_spec(): - return dict( - group=dict(type='str', aliases=['groupid', 'name']), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - group_info_args = proxmox_group_info_argument_spec() - module_args.update(group_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - if not HAS_PROXMOXER: - module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR) - - proxmox = ProxmoxGroupInfoAnsible(module) - group = module.params['group'] - - if group: - groups = [proxmox.get_group(groupid=group)] - else: - groups = proxmox.get_groups() - result['proxmox_groups'] = [group.group for group in groups] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py deleted file mode 100644 index 110a78434f..0000000000 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ /dev/null @@ -1,1368 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Abdoul Bah (@helldorado) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: proxmox_kvm -short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster. -description: - - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster. - - Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior). -author: "Abdoul Bah (@helldorado) " -options: - acpi: - description: - - Specify if ACPI should be enabled/disabled. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes). - type: bool - agent: - description: - - Specify if the QEMU Guest Agent should be enabled/disabled. - type: bool - args: - description: - - Pass arbitrary arguments to kvm. - - This option is for experts only! - - If I(proxmox_default_behavior) is set to C(compatiblity), this option has a default of - C(-serial unix:/var/run/qemu-server/.serial,server,nowait). - type: str - autostart: - description: - - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API). - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - balloon: - description: - - Specify the amount of RAM for the VM in MB. - - Using zero disables the balloon driver. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0). - type: int - bios: - description: - - Specify the BIOS implementation. - type: str - choices: ['seabios', 'ovmf'] - boot: - description: - - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n). - - You can combine to set order. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(cnd). - type: str - bootdisk: - description: - - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+) - type: str - cicustom: - description: - - 'cloud-init: Specify custom files to replace the automatically generated ones at start.' - type: str - version_added: 1.3.0 - cipassword: - description: - - 'cloud-init: password of default user to create.' - type: str - version_added: 1.3.0 - citype: - description: - - 'cloud-init: Specifies the cloud-init configuration format.' - - The default depends on the configured operating system type (C(ostype)). - - We use the C(nocloud) format for Linux, and C(configdrive2) for Windows. - type: str - choices: ['nocloud', 'configdrive2'] - version_added: 1.3.0 - ciuser: - description: - - 'cloud-init: username of default user to create.' - type: str - version_added: 1.3.0 - clone: - description: - - Name of VM to be cloned. If C(vmid) is setted, C(clone) can take arbitrary value but required for initiating the clone. - type: str - cores: - description: - - Specify number of cores per socket. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). - type: int - cpu: - description: - - Specify emulated CPU type. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(kvm64). - type: str - cpulimit: - description: - - Specify if CPU usage will be limited. Value 0 indicates no CPU limit. - - If the computer has 2 CPUs, it has total of '2' CPU time - type: int - cpuunits: - description: - - Specify CPU weight for a VM. - - You can disable fair-scheduler configuration by setting this to 0 - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000). - type: int - delete: - description: - - Specify a list of settings you want to delete. - type: str - description: - description: - - Specify the description for the VM. Only used on the configuration web interface. - - This is saved as comment inside the configuration file. - type: str - digest: - description: - - Specify if to prevent changes if current configuration file has different SHA1 digest. - - This can be used to prevent concurrent modifications. - type: str - force: - description: - - Allow to force stop VM. - - Can be used with states C(stopped), C(restarted) and C(absent). - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - format: - description: - - Target drive's backing file's data format. - - Used only with clone - - Use I(format=unspecified) and I(full=false) for a linked clone. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(qcow2). - If I(proxmox_default_behavior) is set to C(no_defaults), not specifying this option is equivalent to setting it to C(unspecified). - type: str - choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ] - freeze: - description: - - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution). - type: bool - full: - description: - - Create a full copy of all disk. This is always done when you clone a normal VM. - - For VM templates, we try to create a linked clone by default. - - Used only with clone - type: bool - default: 'yes' - hostpci: - description: - - Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}'). - - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N. - - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0""). - - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers). - - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model). - - C(rombar=boolean) I(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map. - - C(x-vga=boolean) I(default=0) Enable vfio-vga device support. - - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care. - type: dict - hotplug: - description: - - Selectively enable hotplug features. - - This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb'). - - Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb'). - type: str - hugepages: - description: - - Enable/disable hugepages memory. - type: str - choices: ['any', '2', '1024'] - ide: - description: - - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}'). - - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). - type: dict - ipconfig: - description: - - 'cloud-init: Set the IP configuration.' - - A hash/dictionary of network ip configurations. C(ipconfig='{"key":"value", "key":"value"}'). - - Keys allowed are - C(ipconfig[n]) where 0 ≤ n ≤ network interfaces. - - Values allowed are - C("[gw=] [,gw6=] [,ip=] [,ip6=]"). - - 'cloud-init: Specify IP addresses and gateways for the corresponding interface.' - - IP addresses use CIDR notation, gateways are optional but they should be in the same subnet of specified IP address. - - The special string 'dhcp' can be used for IP addresses to use DHCP, in which case no explicit gateway should be provided. - - For IPv6 the special string 'auto' can be used to use stateless autoconfiguration. - - If cloud-init is enabled and neither an IPv4 nor an IPv6 address is specified, it defaults to using dhcp on IPv4. - type: dict - version_added: 1.3.0 - keyboard: - description: - - Sets the keyboard layout for VNC server. - type: str - kvm: - description: - - Enable/disable KVM hardware virtualization. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes). - type: bool - localtime: - description: - - Sets the real time clock to local time. - - This is enabled by default if ostype indicates a Microsoft OS. - type: bool - lock: - description: - - Lock/unlock the VM. - type: str - choices: ['migrate', 'backup', 'snapshot', 'rollback'] - machine: - description: - - Specifies the Qemu machine type. - - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?)) - type: str - memory: - description: - - Memory size in MB for instance. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512). - type: int - migrate_downtime: - description: - - Sets maximum tolerated downtime (in seconds) for migrations. - type: int - migrate_speed: - description: - - Sets maximum speed (in MB/s) for migrations. - - A value of 0 is no limit. - type: int - name: - description: - - Specifies the VM name. Only used on the configuration web interface. - - Required only for C(state=present). - type: str - nameservers: - description: - - 'cloud-init: DNS server IP address(es).' - - If unset, PVE host settings are used. - type: list - elements: str - version_added: 1.3.0 - net: - description: - - A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}'). - - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N. - - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid""). - - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3). - - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified. - - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'. - - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'. - - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services. - type: dict - newid: - description: - - VMID for the clone. Used only with clone. - - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI. - type: int - numa: - description: - - A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}'). - - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N. - - Values allowed are - C("cpu="",hostnodes="",memory="number",policy="(bind|interleave|preferred)""). - - C(cpus) CPUs accessing this NUMA node. - - C(hostnodes) Host NUMA nodes to use. - - C(memory) Amount of memory this NUMA node provides. - - C(policy) NUMA allocation policy. - type: dict - numa_enabled: - description: - - Enables NUMA. - type: bool - onboot: - description: - - Specifies whether a VM will be started during system bootup. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes). - type: bool - ostype: - description: - - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems. - - The l26 is Linux 2.6/3.X Kernel. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(l26). - type: str - choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris'] - parallel: - description: - - A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}'). - - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2. - - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+"). - type: dict - protection: - description: - - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations. - type: bool - reboot: - description: - - Allow reboot. If set to C(yes), the VM exit on reboot. - type: bool - revert: - description: - - Revert a pending change. - type: str - sata: - description: - - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}'). - - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). - type: dict - scsi: - description: - - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}'). - - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). - type: dict - scsihw: - description: - - Specifies the SCSI controller model. - type: str - choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi'] - searchdomains: - description: - - 'cloud-init: Sets DNS search domain(s).' - - If unset, PVE host settings are used. - type: list - elements: str - version_added: 1.3.0 - serial: - description: - - A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}'). - - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3. - - Values allowed are - C((/dev/.+|socket)). - - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care. - type: dict - shares: - description: - - Rets amount of memory shares for auto-ballooning. (0 - 50000). - - The larger the number is, the more memory this VM gets. - - The number is relative to weights of all other running VMs. - - Using 0 disables auto-ballooning, this means no limit. - type: int - skiplock: - description: - - Ignore locks - - Only root is allowed to use this option. - type: bool - smbios: - description: - - Specifies SMBIOS type 1 fields. - type: str - snapname: - description: - - The name of the snapshot. Used only with clone. - type: str - sockets: - description: - - Sets the number of CPU sockets. (1 - N). - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). - type: int - sshkeys: - description: - - 'cloud-init: SSH key to assign to the default user. NOT TESTED with multiple keys but a multi-line value should work.' - type: str - version_added: 1.3.0 - startdate: - description: - - Sets the initial date of the real time clock. - - Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25'). - type: str - startup: - description: - - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]). - - Order is a non-negative number defining the general startup order. - - Shutdown in done with reverse ordering. - type: str - state: - description: - - Indicates desired state of the instance. - - If C(current), the current state of the VM will be fetched. You can access it with C(results.status) - type: str - choices: ['present', 'started', 'absent', 'stopped', 'restarted','current'] - default: present - storage: - description: - - Target storage for full clone. - type: str - tablet: - description: - - Enables/disables the USB tablet device. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - tags: - description: - - List of tags to apply to the VM instance. - - Tags must start with C([a-z0-9_]) followed by zero or more of the following characters C([a-z0-9_-+.]). - - Tags are only available in Proxmox 6+. - type: list - elements: str - version_added: 2.3.0 - target: - description: - - Target node. Only allowed if the original VM is on shared storage. - - Used only with clone - type: str - tdf: - description: - - Enables/disables time drift fix. - type: bool - template: - description: - - Enables/disables the template. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - timeout: - description: - - Timeout for operations. - type: int - default: 30 - update: - description: - - If C(yes), the VM will be updated with new value. - - Cause of the operations of the API and security reasons, I have disabled the update of the following parameters - - C(net, virtio, ide, sata, scsi). Per example updating C(net) update the MAC address and C(virtio) create always new disk... - - Update of C(pool) is disabled. It needs an additional API endpoint not covered by this module. - type: bool - default: 'no' - vcpus: - description: - - Sets number of hotplugged vcpus. - type: int - vga: - description: - - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(std). - type: str - choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4'] - virtio: - description: - - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}'). - - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). - type: dict - watchdog: - description: - - Creates a virtual hardware watchdog device. - type: str - proxmox_default_behavior: - description: - - As of community.general 4.0.0, various options no longer have default values. - These default values caused problems when users expected different behavior from Proxmox - by default or filled options which caused problems when set. - - The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values - are used when the values are not explicitly specified by the user. The new default is C(no_defaults), - which makes sure these options have no defaults. - - This affects the I(acpi), I(autostart), I(balloon), I(boot), I(cores), I(cpu), - I(cpuunits), I(force), I(format), I(kvm), I(memory), I(onboot), I(ostype), I(sockets), - I(tablet), I(template), I(vga), options. - type: str - default: no_defaults - choices: - - compatibility - - no_defaults - version_added: "1.3.0" -extends_documentation_fragment: - - community.general.proxmox.documentation - - community.general.proxmox.selection -''' - -EXAMPLES = ''' -- name: Create new VM with minimal options - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - -- name: Create new VM with minimal options and given vmid - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - vmid: 100 - -- name: Create new VM with two network interface options - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - net: - net0: 'virtio,bridge=vmbr1,rate=200' - net1: 'e1000,bridge=vmbr2' - -- name: Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - net: - net0: 'virtio,bridge=vmbr1,rate=200' - virtio: - virtio0: 'VMs_LVM:10' - virtio1: 'VMs:2,format=qcow2' - virtio2: 'VMs:5,format=raw' - cores: 4 - vcpus: 2 - -- name: > - Clone VM with only source VM name. - The VM source is spynal. - The target VM name is zavala - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - clone: spynal - name: zavala - node: sabrewulf - storage: VMs - format: qcow2 - timeout: 500 - -- name: > - Create linked clone VM with only source VM name. - The VM source is spynal. - The target VM name is zavala - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - clone: spynal - name: zavala - node: sabrewulf - storage: VMs - full: no - format: unspecified - timeout: 500 - -- name: Clone VM with source vmid and target newid and raw format - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - clone: arbitrary_name - vmid: 108 - newid: 152 - name: zavala - node: sabrewulf - storage: LVM_STO - format: raw - timeout: 300 - -- name: Create new VM and lock it for snapshot - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - lock: snapshot - -- name: Create new VM and set protection to disable the remove VM and remove disk operations - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - protection: yes - -- name: Create new VM using cloud-init with a username and password - community.general.proxmox_kvm: - node: sabrewulf - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - ide: - ide2: 'local:cloudinit,format=qcow2' - ciuser: mylinuxuser - cipassword: supersecret - searchdomains: 'mydomain.internal' - nameservers: 1.1.1.1 - net: - net0: 'virtio,bridge=vmbr1,tag=77' - ipconfig: - ipconfig0: 'ip=192.168.1.1/24,gw=192.168.1.1' - -- name: Create new VM using Cloud-Init with an ssh key - community.general.proxmox_kvm: - node: sabrewulf - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - ide: - ide2: 'local:cloudinit,format=qcow2' - sshkeys: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILJkVm98B71lD5XHfihwcYHE9TVpsJmK1vR1JcaU82L+' - searchdomains: 'mydomain.internal' - nameservers: - - '1.1.1.1' - - '8.8.8.8' - net: - net0: 'virtio,bridge=vmbr1,tag=77' - ipconfig: - ipconfig0: 'ip=192.168.1.1/24' - -- name: Start VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: started - -- name: Stop VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: stopped - -- name: Stop VM with force - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: stopped - force: yes - -- name: Restart VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: restarted - -- name: Remove VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: absent - -- name: Get VM current state - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: current - -- name: Update VM configuration - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - cores: 8 - memory: 16384 - update: yes - -- name: Delete QEMU parameters - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - delete: 'args,template,cpulimit' - -- name: Revert a pending change - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - revert: 'template,cpulimit' -''' - -RETURN = ''' -vmid: - description: The VM vmid. - returned: success - type: int - sample: 115 -status: - description: The current virtual machine status. - returned: success, not clone, not absent, not update - type: str - sample: running -msg: - description: A short message - returned: always - type: str - sample: "VM kropta with vmid = 110 is running" -''' - -import re -import time -import traceback -from distutils.version import LooseVersion -from ansible.module_utils.six.moves.urllib.parse import quote - -try: - from proxmoxer import ProxmoxAPI - HAS_PROXMOXER = True -except ImportError: - HAS_PROXMOXER = False - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.common.text.converters import to_native - - -def get_nextvmid(module, proxmox): - try: - vmid = proxmox.cluster.nextid.get() - return vmid - except Exception as e: - module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % to_native(e), - exception=traceback.format_exc()) - - -def get_vmid(proxmox, name): - return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm.get('name') == name] - - -def get_vm(proxmox, vmid): - return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)] - - -def node_check(proxmox, node): - return [True for nd in proxmox.nodes.get() if nd['node'] == node] - - -def get_vminfo(module, proxmox, node, vmid, **kwargs): - global results - results = {} - mac = {} - devices = {} - try: - vm = proxmox.nodes(node).qemu(vmid).config.get() - except Exception as e: - module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) - - # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - - # Convert all dict in kwargs to elements. - # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] - for k in list(kwargs.keys()): - if isinstance(kwargs[k], dict): - kwargs.update(kwargs[k]) - del kwargs[k] - - # Split information by type - re_net = re.compile(r'net[0-9]') - re_dev = re.compile(r'(virtio|ide|scsi|sata)[0-9]') - for k in kwargs.keys(): - if re_net.match(k): - mac[k] = parse_mac(vm[k]) - elif re_dev.match(k): - devices[k] = parse_dev(vm[k]) - - results['mac'] = mac - results['devices'] = devices - results['vmid'] = int(vmid) - - -def parse_mac(netstr): - return re.search('=(.*?),', netstr).group(1) - - -def parse_dev(devstr): - return re.search('(.*?)(,|$)', devstr).group(1) - - -def settings(proxmox, vmid, node, **kwargs): - proxmox_node = proxmox.nodes(node) - - # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - - return proxmox_node.qemu(vmid).config.set(**kwargs) is None - - -def wait_for_task(module, proxmox, node, taskid): - timeout = module.params['timeout'] - - while timeout: - task = proxmox.nodes(node).tasks(taskid).status.get() - if task['status'] == 'stopped' and task['exitstatus'] == 'OK': - # Wait an extra second as the API can be a ahead of the hypervisor - time.sleep(1) - return True - timeout = timeout - 1 - if timeout == 0: - break - time.sleep(1) - return False - - -def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs): - # Available only in PVE 4 - only_v4 = ['force', 'protection', 'skiplock'] - only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig', 'tags'] - - # valide clone parameters - valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target'] - clone_params = {} - # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm. - vm_args = "-serial unix:/var/run/qemu-server/{0}.serial,server,nowait".format(vmid) - - proxmox_node = proxmox.nodes(node) - - # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool))) - - # The features work only on PVE 4+ - if PVE_MAJOR_VERSION < 4: - for p in only_v4: - if p in kwargs: - del kwargs[p] - - # The features work only on PVE 6 - if PVE_MAJOR_VERSION < 6: - for p in only_v6: - if p in kwargs: - del kwargs[p] - - # 'sshkeys' param expects an urlencoded string - if 'sshkeys' in kwargs: - urlencoded_ssh_keys = quote(kwargs['sshkeys'], safe='') - kwargs['sshkeys'] = str(urlencoded_ssh_keys) - - # If update, don't update disk (virtio, ide, sata, scsi) and network interface - # pool parameter not supported by qemu//config endpoint on "update" (PVE 6.2) - only with "create" - if update: - if 'virtio' in kwargs: - del kwargs['virtio'] - if 'sata' in kwargs: - del kwargs['sata'] - if 'scsi' in kwargs: - del kwargs['scsi'] - if 'ide' in kwargs: - del kwargs['ide'] - if 'net' in kwargs: - del kwargs['net'] - if 'force' in kwargs: - del kwargs['force'] - if 'pool' in kwargs: - del kwargs['pool'] - - # Convert all dict in kwargs to elements. - # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n] - for k in list(kwargs.keys()): - if isinstance(kwargs[k], dict): - kwargs.update(kwargs[k]) - del kwargs[k] - - # Rename numa_enabled to numa. According the API documentation - if 'numa_enabled' in kwargs: - kwargs['numa'] = kwargs['numa_enabled'] - del kwargs['numa_enabled'] - - # PVE api expects strings for the following params - if 'nameservers' in module.params: - nameservers = module.params.pop('nameservers') - if nameservers: - kwargs['nameserver'] = ' '.join(nameservers) - if 'searchdomains' in module.params: - searchdomains = module.params.pop('searchdomains') - if searchdomains: - kwargs['searchdomain'] = ' '.join(searchdomains) - - # VM tags are expected to be valid and presented as a comma/semi-colon delimited string - if 'tags' in kwargs: - re_tag = re.compile(r'^[a-z0-9_][a-z0-9_\-\+\.]*$') - for tag in kwargs['tags']: - if not re_tag.match(tag): - module.fail_json(msg='%s is not a valid tag' % tag) - kwargs['tags'] = ",".join(kwargs['tags']) - - # -args and skiplock require root@pam user - but can not use api tokens - if module.params['api_user'] == "root@pam" and module.params['args'] is None: - if not update and module.params['proxmox_default_behavior'] == 'compatibility': - kwargs['args'] = vm_args - elif module.params['api_user'] == "root@pam" and module.params['args'] is not None: - kwargs['args'] = module.params['args'] - elif module.params['api_user'] != "root@pam" and module.params['args'] is not None: - module.fail_json(msg='args parameter require root@pam user. ') - - if module.params['api_user'] != "root@pam" and module.params['skiplock'] is not None: - module.fail_json(msg='skiplock parameter require root@pam user. ') - - if update: - if proxmox_node.qemu(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None: - return True - else: - return False - elif module.params['clone'] is not None: - for param in valid_clone_params: - if module.params[param] is not None: - clone_params[param] = module.params[param] - clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool))) - taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params) - else: - taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) - - if not wait_for_task(module, proxmox, node, taskid): - module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - return False - return True - - -def start_vm(module, proxmox, vm): - vmid = vm[0]['vmid'] - proxmox_node = proxmox.nodes(vm[0]['node']) - taskid = proxmox_node.qemu(vmid).status.start.post() - if not wait_for_task(module, proxmox, vm[0]['node'], taskid): - module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - return False - return True - - -def stop_vm(module, proxmox, vm, force): - vmid = vm[0]['vmid'] - proxmox_node = proxmox.nodes(vm[0]['node']) - taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0)) - if not wait_for_task(module, proxmox, vm[0]['node'], taskid): - module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - return False - return True - - -def proxmox_version(proxmox): - apireturn = proxmox.version.get() - return LooseVersion(apireturn['version']) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - acpi=dict(type='bool'), - agent=dict(type='bool'), - args=dict(type='str'), - api_host=dict(required=True), - api_password=dict(no_log=True, fallback=(env_fallback, ['PROXMOX_PASSWORD'])), - api_token_id=dict(no_log=True), - api_token_secret=dict(no_log=True), - api_user=dict(required=True), - autostart=dict(type='bool'), - balloon=dict(type='int'), - bios=dict(choices=['seabios', 'ovmf']), - boot=dict(type='str'), - bootdisk=dict(type='str'), - cicustom=dict(type='str'), - cipassword=dict(type='str', no_log=True), - citype=dict(type='str', choices=['nocloud', 'configdrive2']), - ciuser=dict(type='str'), - clone=dict(type='str'), - cores=dict(type='int'), - cpu=dict(type='str'), - cpulimit=dict(type='int'), - cpuunits=dict(type='int'), - delete=dict(type='str'), - description=dict(type='str'), - digest=dict(type='str'), - force=dict(type='bool'), - format=dict(type='str', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk', 'unspecified']), - freeze=dict(type='bool'), - full=dict(type='bool', default=True), - hostpci=dict(type='dict'), - hotplug=dict(type='str'), - hugepages=dict(choices=['any', '2', '1024']), - ide=dict(type='dict'), - ipconfig=dict(type='dict'), - keyboard=dict(type='str'), - kvm=dict(type='bool'), - localtime=dict(type='bool'), - lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']), - machine=dict(type='str'), - memory=dict(type='int'), - migrate_downtime=dict(type='int'), - migrate_speed=dict(type='int'), - name=dict(type='str'), - nameservers=dict(type='list', elements='str'), - net=dict(type='dict'), - newid=dict(type='int'), - node=dict(), - numa=dict(type='dict'), - numa_enabled=dict(type='bool'), - onboot=dict(type='bool'), - ostype=dict(choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris']), - parallel=dict(type='dict'), - pool=dict(type='str'), - protection=dict(type='bool'), - reboot=dict(type='bool'), - revert=dict(type='str'), - sata=dict(type='dict'), - scsi=dict(type='dict'), - scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']), - serial=dict(type='dict'), - searchdomains=dict(type='list', elements='str'), - shares=dict(type='int'), - skiplock=dict(type='bool'), - smbios=dict(type='str'), - snapname=dict(type='str'), - sockets=dict(type='int'), - sshkeys=dict(type='str', no_log=False), - startdate=dict(type='str'), - startup=dict(), - state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']), - storage=dict(type='str'), - tablet=dict(type='bool'), - tags=dict(type='list', elements='str'), - target=dict(type='str'), - tdf=dict(type='bool'), - template=dict(type='bool'), - timeout=dict(type='int', default=30), - update=dict(type='bool', default=False), - validate_certs=dict(type='bool', default=False), - vcpus=dict(type='int'), - vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']), - virtio=dict(type='dict'), - vmid=dict(type='int'), - watchdog=dict(), - proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), - ), - mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')], - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], - required_if=[('state', 'present', ['node'])], - ) - - if not HAS_PROXMOXER: - module.fail_json(msg='proxmoxer required for this module') - - api_host = module.params['api_host'] - api_password = module.params['api_password'] - api_token_id = module.params['api_token_id'] - api_token_secret = module.params['api_token_secret'] - api_user = module.params['api_user'] - clone = module.params['clone'] - cpu = module.params['cpu'] - cores = module.params['cores'] - delete = module.params['delete'] - memory = module.params['memory'] - name = module.params['name'] - newid = module.params['newid'] - node = module.params['node'] - revert = module.params['revert'] - sockets = module.params['sockets'] - state = module.params['state'] - update = bool(module.params['update']) - vmid = module.params['vmid'] - validate_certs = module.params['validate_certs'] - - if module.params['proxmox_default_behavior'] == 'compatibility': - old_default_values = dict( - acpi=True, - autostart=False, - balloon=0, - boot='cnd', - cores=1, - cpu='kvm64', - cpuunits=1000, - format='qcow2', - kvm=True, - memory=512, - ostype='l26', - sockets=1, - tablet=False, - template=False, - vga='std', - ) - for param, value in old_default_values.items(): - if module.params[param] is None: - module.params[param] = value - - if module.params['format'] == 'unspecified': - module.params['format'] = None - - auth_args = {'user': api_user} - if not (api_token_id and api_token_secret): - auth_args['password'] = api_password - else: - auth_args['token_name'] = api_token_id - auth_args['token_value'] = api_token_secret - - try: - proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args) - global PVE_MAJOR_VERSION - version = proxmox_version(proxmox) - PVE_MAJOR_VERSION = 3 if version < LooseVersion('4.0') else version.version[0] - except Exception as e: - module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) - - # If vmid is not defined then retrieve its value from the vm name, - # the cloned vm name or retrieve the next free VM id from ProxmoxAPI. - if not vmid: - if state == 'present' and not update and not clone and not delete and not revert: - try: - vmid = get_nextvmid(module, proxmox) - except Exception: - module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name)) - else: - clone_target = clone or name - try: - vmid = get_vmid(proxmox, clone_target)[0] - except Exception: - vmid = -1 - - if clone is not None: - # If newid is not defined then retrieve the next free id from ProxmoxAPI - if not newid: - try: - newid = get_nextvmid(module, proxmox) - except Exception: - module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name)) - - # Ensure source VM name exists when cloning - if -1 == vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % clone) - - # Ensure source VM id exists when cloning - if not get_vm(proxmox, vmid): - module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid) - - # Ensure the choosen VM name doesn't already exist when cloning - existing_vmid = get_vmid(proxmox, name) - if existing_vmid: - module.exit_json(changed=False, vmid=existing_vmid[0], msg="VM with name <%s> already exists" % name) - - # Ensure the choosen VM id doesn't already exist when cloning - if get_vm(proxmox, newid): - module.exit_json(changed=False, vmid=vmid, msg="vmid %s with VM name %s already exists" % (newid, name)) - - if delete is not None: - try: - settings(proxmox, vmid, node, delete=delete) - module.exit_json(changed=True, vmid=vmid, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e)) - - if revert is not None: - try: - settings(proxmox, vmid, node, revert=revert) - module.exit_json(changed=True, vmid=vmid, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e)) - - if state == 'present': - try: - if get_vm(proxmox, vmid) and not (update or clone): - module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid) - elif get_vmid(proxmox, name) and not (update or clone): - module.exit_json(changed=False, vmid=get_vmid(proxmox, name)[0], msg="VM with name <%s> already exists" % name) - elif not (node, name): - module.fail_json(msg='node, name is mandatory for creating/updating vm') - elif not node_check(proxmox, node): - module.fail_json(msg="node '%s' does not exist in cluster" % node) - - create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update, - acpi=module.params['acpi'], - agent=module.params['agent'], - autostart=module.params['autostart'], - balloon=module.params['balloon'], - bios=module.params['bios'], - boot=module.params['boot'], - bootdisk=module.params['bootdisk'], - cicustom=module.params['cicustom'], - cipassword=module.params['cipassword'], - citype=module.params['citype'], - ciuser=module.params['ciuser'], - cpulimit=module.params['cpulimit'], - cpuunits=module.params['cpuunits'], - description=module.params['description'], - digest=module.params['digest'], - force=module.params['force'], - freeze=module.params['freeze'], - hostpci=module.params['hostpci'], - hotplug=module.params['hotplug'], - hugepages=module.params['hugepages'], - ide=module.params['ide'], - ipconfig=module.params['ipconfig'], - keyboard=module.params['keyboard'], - kvm=module.params['kvm'], - localtime=module.params['localtime'], - lock=module.params['lock'], - machine=module.params['machine'], - migrate_downtime=module.params['migrate_downtime'], - migrate_speed=module.params['migrate_speed'], - net=module.params['net'], - numa=module.params['numa'], - numa_enabled=module.params['numa_enabled'], - onboot=module.params['onboot'], - ostype=module.params['ostype'], - parallel=module.params['parallel'], - pool=module.params['pool'], - protection=module.params['protection'], - reboot=module.params['reboot'], - sata=module.params['sata'], - scsi=module.params['scsi'], - scsihw=module.params['scsihw'], - serial=module.params['serial'], - shares=module.params['shares'], - skiplock=module.params['skiplock'], - smbios1=module.params['smbios'], - snapname=module.params['snapname'], - sshkeys=module.params['sshkeys'], - startdate=module.params['startdate'], - startup=module.params['startup'], - tablet=module.params['tablet'], - tags=module.params['tags'], - target=module.params['target'], - tdf=module.params['tdf'], - template=module.params['template'], - vcpus=module.params['vcpus'], - vga=module.params['vga'], - virtio=module.params['virtio'], - watchdog=module.params['watchdog']) - - if not clone: - get_vminfo(module, proxmox, node, vmid, - ide=module.params['ide'], - net=module.params['net'], - sata=module.params['sata'], - scsi=module.params['scsi'], - virtio=module.params['virtio']) - if update: - module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid)) - elif clone is not None: - module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) - else: - module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) - except Exception as e: - if update: - module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e)) - elif clone is not None: - module.fail_json(vmid=vmid, msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e)) - else: - module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e)) - - elif state == 'started': - status = {} - try: - if -1 == vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - vm = get_vm(proxmox, vmid) - if not vm: - module.fail_json(vmid=vmid, msg='VM with vmid <%s> does not exist in cluster' % vmid) - status['status'] = vm[0]['status'] - if vm[0]['status'] == 'running': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid, **status) - - if start_vm(module, proxmox, vm): - module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid, **status) - except Exception as e: - module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status) - - elif state == 'stopped': - status = {} - try: - if -1 == vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - - vm = get_vm(proxmox, vmid) - if not vm: - module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid) - - status['status'] = vm[0]['status'] - if vm[0]['status'] == 'stopped': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already stopped" % vmid, **status) - - if stop_vm(module, proxmox, vm, force=module.params['force']): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid, **status) - except Exception as e: - module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status) - - elif state == 'restarted': - status = {} - try: - if -1 == vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - - vm = get_vm(proxmox, vmid) - if not vm: - module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid) - status['status'] = vm[0]['status'] - if vm[0]['status'] == 'stopped': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status) - - if stop_vm(module, proxmox, vm, force=module.params['force']) and start_vm(module, proxmox, vm): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status) - except Exception as e: - module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e), **status) - - elif state == 'absent': - status = {} - try: - vm = get_vm(proxmox, vmid) - if not vm: - module.exit_json(changed=False, vmid=vmid) - - proxmox_node = proxmox.nodes(vm[0]['node']) - status['status'] = vm[0]['status'] - if vm[0]['status'] == 'running': - if module.params['force']: - stop_vm(module, proxmox, vm, True) - else: - module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion or use force=yes." % vmid) - taskid = proxmox_node.qemu.delete(vmid) - if not wait_for_task(module, proxmox, vm[0]['node'], taskid): - module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - else: - module.exit_json(changed=True, vmid=vmid, msg="VM %s removed" % vmid) - except Exception as e: - module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'current': - status = {} - if -1 == vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - vm = get_vm(proxmox, vmid) - if not vm: - module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) - if not name: - name = vm[0]['name'] - current = proxmox.nodes(vm[0]['node']).qemu(vmid).status.current.get()['status'] - status['status'] = current - if status: - module.exit_json(changed=False, vmid=vmid, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_nic.py b/plugins/modules/cloud/misc/proxmox_nic.py deleted file mode 100644 index 23be9473eb..0000000000 --- a/plugins/modules/cloud/misc/proxmox_nic.py +++ /dev/null @@ -1,348 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Lammert Hellinga (@Kogelvis) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: proxmox_nic -short_description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster. -version_added: 3.1.0 -description: - - Allows you to create/update/delete a NIC on Qemu(KVM) Virtual Machines in a Proxmox VE cluster. -author: "Lammert Hellinga (@Kogelvis) " -options: - bridge: - description: - - Add this interface to the specified bridge device. The Proxmox VE default bridge is called C(vmbr0). - type: str - firewall: - description: - - Whether this interface should be protected by the firewall. - type: bool - default: false - interface: - description: - - Name of the interface, should be C(net[n]) where C(1 ≤ n ≤ 31). - type: str - required: true - link_down: - description: - - Whether this interface should be disconnected (like pulling the plug). - type: bool - default: false - mac: - description: - - C(XX:XX:XX:XX:XX:XX) should be a unique MAC address. This is automatically generated if not specified. - - When not specified this module will keep the MAC address the same when changing an existing interface. - type: str - model: - description: - - The NIC emulator model. - type: str - choices: ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', - 'rtl8139', 'virtio', 'vmxnet3'] - default: virtio - mtu: - description: - - Force MTU, for C(virtio) model only, setting will be ignored otherwise. - - Set to C(1) to use the bridge MTU. - - Value should be C(1 ≤ n ≤ 65520). - type: int - name: - description: - - Specifies the VM name. Only used on the configuration web interface. - - Required only for I(state=present). - type: str - queues: - description: - - Number of packet queues to be used on the device. - - Value should be C(0 ≤ n ≤ 16). - type: int - rate: - description: - - Rate limit in MBps (MegaBytes per second) as floating point number. - type: float - state: - description: - - Indicates desired state of the NIC. - type: str - choices: ['present', 'absent'] - default: present - tag: - description: - - VLAN tag to apply to packets on this interface. - - Value should be C(1 ≤ n ≤ 4094). - type: int - trunks: - description: - - List of VLAN trunks to pass through this interface. - type: list - elements: int - vmid: - description: - - Specifies the instance ID. - type: int -extends_documentation_fragment: - - community.general.proxmox.documentation -''' - -EXAMPLES = ''' -- name: Create NIC net0 targeting the vm by name - community.general.proxmox_nic: - api_user: root@pam - api_password: secret - api_host: proxmoxhost - name: my_vm - interface: net0 - bridge: vmbr0 - tag: 3 - -- name: Create NIC net0 targeting the vm by id - community.general.proxmox_nic: - api_user: root@pam - api_password: secret - api_host: proxmoxhost - vmid: 103 - interface: net0 - bridge: vmbr0 - mac: "12:34:56:C0:FF:EE" - firewall: true - -- name: Delete NIC net0 targeting the vm by name - community.general.proxmox_nic: - api_user: root@pam - api_password: secret - api_host: proxmoxhost - name: my_vm - interface: net0 - state: absent -''' - -RETURN = ''' -vmid: - description: The VM vmid. - returned: success - type: int - sample: 115 -msg: - description: A short message - returned: always - type: str - sample: "Nic net0 unchanged on VM with vmid 103" -''' - -try: - from proxmoxer import ProxmoxAPI - HAS_PROXMOXER = True -except ImportError: - HAS_PROXMOXER = False - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible_collections.community.general.plugins.module_utils.proxmox import proxmox_auth_argument_spec - - -def get_vmid(module, proxmox, name): - try: - vms = [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm.get('name') == name] - except Exception as e: - module.fail_json(msg='Error: %s occurred while retrieving VM with name = %s' % (e, name)) - - if not vms: - module.fail_json(msg='No VM found with name: %s' % name) - elif len(vms) > 1: - module.fail_json(msg='Multiple VMs found with name: %s, provide vmid instead' % name) - - return vms[0] - - -def get_vm(proxmox, vmid): - return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)] - - -def update_nic(module, proxmox, vmid, interface, model, **kwargs): - vm = get_vm(proxmox, vmid) - - try: - vminfo = proxmox.nodes(vm[0]['node']).qemu(vmid).config.get() - except Exception as e: - module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) - - if interface in vminfo: - # Convert the current config to a dictionary - config = vminfo[interface].split(',') - config.sort() - - config_current = {} - - for i in config: - kv = i.split('=') - try: - config_current[kv[0]] = kv[1] - except IndexError: - config_current[kv[0]] = '' - - # determine the current model nic and mac-address - models = ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', - 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', 'rtl8139', 'virtio', 'vmxnet3'] - current_model = set(models) & set(config_current.keys()) - current_model = current_model.pop() - current_mac = config_current[current_model] - - # build nic config string - config_provided = "{0}={1}".format(model, current_mac) - else: - config_provided = model - - if kwargs['mac']: - config_provided = "{0}={1}".format(model, kwargs['mac']) - - if kwargs['bridge']: - config_provided += ",bridge={0}".format(kwargs['bridge']) - - if kwargs['firewall']: - config_provided += ",firewall=1" - - if kwargs['link_down']: - config_provided += ',link_down=1' - - if kwargs['mtu']: - config_provided += ",mtu={0}".format(kwargs['mtu']) - if model != 'virtio': - module.warn( - 'Ignoring MTU for nic {0} on VM with vmid {1}, ' - 'model should be set to \'virtio\': '.format(interface, vmid)) - - if kwargs['queues']: - config_provided += ",queues={0}".format(kwargs['queues']) - - if kwargs['rate']: - config_provided += ",rate={0}".format(kwargs['rate']) - - if kwargs['tag']: - config_provided += ",tag={0}".format(kwargs['tag']) - - if kwargs['trunks']: - config_provided += ",trunks={0}".format(';'.join(str(x) for x in kwargs['trunks'])) - - net = {interface: config_provided} - vm = get_vm(proxmox, vmid) - - if ((interface not in vminfo) or (vminfo[interface] != config_provided)): - if not module.check_mode: - proxmox.nodes(vm[0]['node']).qemu(vmid).config.set(**net) - return True - - return False - - -def delete_nic(module, proxmox, vmid, interface): - vm = get_vm(proxmox, vmid) - vminfo = proxmox.nodes(vm[0]['node']).qemu(vmid).config.get() - - if interface in vminfo: - if not module.check_mode: - proxmox.nodes(vm[0]['node']).qemu(vmid).config.set(vmid=vmid, delete=interface) - return True - - return False - - -def main(): - module_args = proxmox_auth_argument_spec() - nic_args = dict( - bridge=dict(type='str'), - firewall=dict(type='bool', default=False), - interface=dict(type='str', required=True), - link_down=dict(type='bool', default=False), - mac=dict(type='str'), - model=dict(choices=['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', - 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', - 'rtl8139', 'virtio', 'vmxnet3'], default='virtio'), - mtu=dict(type='int'), - name=dict(type='str'), - queues=dict(type='int'), - rate=dict(type='float'), - state=dict(default='present', choices=['present', 'absent']), - tag=dict(type='int'), - trunks=dict(type='list', elements='int'), - vmid=dict(type='int'), - ) - module_args.update(nic_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], - supports_check_mode=True, - ) - - if not HAS_PROXMOXER: - module.fail_json(msg='proxmoxer required for this module') - - api_host = module.params['api_host'] - api_password = module.params['api_password'] - api_token_id = module.params['api_token_id'] - api_token_secret = module.params['api_token_secret'] - api_user = module.params['api_user'] - interface = module.params['interface'] - model = module.params['model'] - name = module.params['name'] - state = module.params['state'] - validate_certs = module.params['validate_certs'] - vmid = module.params['vmid'] - - auth_args = {'user': api_user} - if not (api_token_id and api_token_secret): - auth_args['password'] = api_password - else: - auth_args['token_name'] = api_token_id - auth_args['token_value'] = api_token_secret - - try: - proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args) - except Exception as e: - module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) - - # If vmid is not defined then retrieve its value from the vm name, - if not vmid: - vmid = get_vmid(module, proxmox, name) - - # Ensure VM id exists - if not get_vm(proxmox, vmid): - module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid) - - if state == 'present': - try: - if update_nic(module, proxmox, vmid, interface, model, - bridge=module.params['bridge'], - firewall=module.params['firewall'], - link_down=module.params['link_down'], - mac=module.params['mac'], - mtu=module.params['mtu'], - queues=module.params['queues'], - rate=module.params['rate'], - tag=module.params['tag'], - trunks=module.params['trunks']): - module.exit_json(changed=True, vmid=vmid, msg="Nic {0} updated on VM with vmid {1}".format(interface, vmid)) - else: - module.exit_json(vmid=vmid, msg="Nic {0} unchanged on VM with vmid {1}".format(interface, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to change nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e)) - - elif state == 'absent': - try: - if delete_nic(module, proxmox, vmid, interface): - module.exit_json(changed=True, vmid=vmid, msg="Nic {0} deleted on VM with vmid {1}".format(interface, vmid)) - else: - module.exit_json(vmid=vmid, msg="Nic {0} does not exist on VM with vmid {1}".format(interface, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to delete nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_snap.py b/plugins/modules/cloud/misc/proxmox_snap.py deleted file mode 100644 index 4ee2d27893..0000000000 --- a/plugins/modules/cloud/misc/proxmox_snap.py +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Jeffrey van Pelt (@Thulium-Drake) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: proxmox_snap -short_description: Snapshot management of instances in Proxmox VE cluster -version_added: 2.0.0 -description: - - Allows you to create/delete snapshots from instances in Proxmox VE cluster. - - Supports both KVM and LXC, OpenVZ has not been tested, as it is no longer supported on Proxmox VE. -options: - api_host: - description: - - The host of the Proxmox VE cluster. - required: true - type: str - api_user: - description: - - The user to authenticate with. - required: true - type: str - api_password: - description: - - The password to authenticate with. - - You can use PROXMOX_PASSWORD environment variable. - type: str - required: yes - hostname: - description: - - The instance name. - type: str - vmid: - description: - - The instance id. - - If not set, will be fetched from PromoxAPI based on the hostname. - type: str - validate_certs: - description: - - Enable / disable https certificate verification. - type: bool - default: no - state: - description: - - Indicate desired state of the instance snapshot. - choices: ['present', 'absent'] - default: present - type: str - force: - description: - - For removal from config file, even if removing disk snapshot fails. - default: no - type: bool - vmstate: - description: - - Snapshot includes RAM. - default: no - type: bool - description: - description: - - Specify the description for the snapshot. Only used on the configuration web interface. - - This is saved as a comment inside the configuration file. - type: str - timeout: - description: - - Timeout for operations. - default: 30 - type: int - snapname: - description: - - Name of the snapshot that has to be created. - default: 'ansible_snap' - type: str - -notes: - - Requires proxmoxer and requests modules on host. These modules can be installed with pip. - - Supports C(check_mode). -requirements: [ "proxmoxer", "python >= 2.7", "requests" ] -author: Jeffrey van Pelt (@Thulium-Drake) -''' - -EXAMPLES = r''' -- name: Create new container snapshot - community.general.proxmox_snap: - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - vmid: 100 - state: present - snapname: pre-updates - -- name: Remove container snapshot - community.general.proxmox_snap: - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - vmid: 100 - state: absent - snapname: pre-updates -''' - -RETURN = r'''#''' - -import time -import traceback - -PROXMOXER_IMP_ERR = None -try: - from proxmoxer import ProxmoxAPI - HAS_PROXMOXER = True -except ImportError: - PROXMOXER_IMP_ERR = traceback.format_exc() - HAS_PROXMOXER = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback -from ansible.module_utils.common.text.converters import to_native - - -VZ_TYPE = None - - -def get_vmid(proxmox, hostname): - return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if 'name' in vm and vm['name'] == hostname] - - -def get_instance(proxmox, vmid): - return [vm for vm in proxmox.cluster.resources.get(type='vm') if int(vm['vmid']) == int(vmid)] - - -def snapshot_create(module, proxmox, vm, vmid, timeout, snapname, description, vmstate): - if module.check_mode: - return True - - if VZ_TYPE == 'lxc': - taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).snapshot.post(snapname=snapname, description=description) - else: - taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).snapshot.post(snapname=snapname, description=description, vmstate=int(vmstate)) - while timeout: - if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and - proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - module.fail_json(msg='Reached timeout while waiting for creating VM snapshot. Last line in task before timeout: %s' % - proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - -def snapshot_remove(module, proxmox, vm, vmid, timeout, snapname, force): - if module.check_mode: - return True - - taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).snapshot.delete(snapname, force=int(force)) - while timeout: - if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and - proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - module.fail_json(msg='Reached timeout while waiting for removing VM snapshot. Last line in task before timeout: %s' % - proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - -def setup_api(api_host, api_user, api_password, validate_certs): - api = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) - return api - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_host=dict(required=True), - api_user=dict(required=True), - api_password=dict(no_log=True, required=True, fallback=(env_fallback, ['PROXMOX_PASSWORD'])), - vmid=dict(required=False), - validate_certs=dict(type='bool', default='no'), - hostname=dict(), - timeout=dict(type='int', default=30), - state=dict(default='present', choices=['present', 'absent']), - description=dict(type='str'), - snapname=dict(type='str', default='ansible_snap'), - force=dict(type='bool', default='no'), - vmstate=dict(type='bool', default='no'), - ), - supports_check_mode=True - ) - - if not HAS_PROXMOXER: - module.fail_json(msg=missing_required_lib('python-proxmoxer'), - exception=PROXMOXER_IMP_ERR) - - state = module.params['state'] - api_user = module.params['api_user'] - api_host = module.params['api_host'] - api_password = module.params['api_password'] - vmid = module.params['vmid'] - validate_certs = module.params['validate_certs'] - hostname = module.params['hostname'] - description = module.params['description'] - snapname = module.params['snapname'] - timeout = module.params['timeout'] - force = module.params['force'] - vmstate = module.params['vmstate'] - - try: - proxmox = setup_api(api_host, api_user, api_password, validate_certs) - - except Exception as e: - module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % to_native(e)) - - # If hostname is set get the VM id from ProxmoxAPI - if not vmid and hostname: - hosts = get_vmid(proxmox, hostname) - if len(hosts) == 0: - module.fail_json(msg="Vmid could not be fetched => Hostname does not exist (action: %s)" % state) - vmid = hosts[0] - elif not vmid: - module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) - - vm = get_instance(proxmox, vmid) - - global VZ_TYPE - VZ_TYPE = vm[0]['type'] - - if state == 'present': - try: - vm = get_instance(proxmox, vmid) - if not vm: - module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) - - for i in getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).snapshot.get(): - if i['name'] == snapname: - module.exit_json(changed=False, msg="Snapshot %s is already present" % snapname) - - if snapshot_create(module, proxmox, vm, vmid, timeout, snapname, description, vmstate): - if module.check_mode: - module.exit_json(changed=False, msg="Snapshot %s would be created" % snapname) - else: - module.exit_json(changed=True, msg="Snapshot %s created" % snapname) - - except Exception as e: - module.fail_json(msg="Creating snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e))) - - elif state == 'absent': - try: - vm = get_instance(proxmox, vmid) - if not vm: - module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) - - snap_exist = False - - for i in getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).snapshot.get(): - if i['name'] == snapname: - snap_exist = True - continue - - if not snap_exist: - module.exit_json(changed=False, msg="Snapshot %s does not exist" % snapname) - else: - if snapshot_remove(module, proxmox, vm, vmid, timeout, snapname, force): - if module.check_mode: - module.exit_json(changed=False, msg="Snapshot %s would be removed" % snapname) - else: - module.exit_json(changed=True, msg="Snapshot %s removed" % snapname) - - except Exception as e: - module.fail_json(msg="Removing snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e))) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_storage_info.py b/plugins/modules/cloud/misc/proxmox_storage_info.py deleted file mode 100644 index d06c9be8c1..0000000000 --- a/plugins/modules/cloud/misc/proxmox_storage_info.py +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Tristan Le Guern (@tleguern) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_storage_info -short_description: Retrieve information about one or more Proxmox VE storages -version_added: 2.2.0 -description: - - Retrieve information about one or more Proxmox VE storages. -options: - storage: - description: - - Only return informations on a specific storage. - aliases: ['name'] - type: str - type: - description: - - Filter on a specifc storage type. - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: community.general.proxmox.documentation -notes: - - Storage specific options can be returned by this module, please look at the documentation at U(https://pve.proxmox.com/wiki/Storage). -''' - - -EXAMPLES = ''' -- name: List existing storages - community.general.proxmox_storage_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_storages - -- name: List NFS storages only - community.general.proxmox_storage_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - type: nfs - register: proxmox_storages_nfs - -- name: Retrieve information about the lvm2 storage - community.general.proxmox_storage_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - storage: lvm2 - register: proxmox_storage_lvm -''' - - -RETURN = ''' -proxmox_storages: - description: List of storage pools. - returned: on success - type: list - elements: dict - contains: - content: - description: Proxmox content types available in this storage - returned: on success - type: list - elements: str - digest: - description: Storage's digest - returned: on success - type: str - nodes: - description: List of nodes associated to this storage - returned: on success, if storage is not local - type: list - elements: str - path: - description: Physical path to this storage - returned: on success - type: str - prune-backups: - description: Backup retention options - returned: on success - type: list - elements: dict - shared: - description: Is this storage shared - returned: on success - type: bool - storage: - description: Storage name - returned: on success - type: str - type: - description: Storage type - returned: on success - type: str -''' - - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR, proxmox_to_ansible_bool) - - -class ProxmoxStorageInfoAnsible(ProxmoxAnsible): - def get_storage(self, storage): - try: - storage = self.proxmox_api.storage.get(storage) - except Exception: - self.module.fail_json(msg="Storage '%s' does not exist" % storage) - return ProxmoxStorage(storage) - - def get_storages(self, type=None): - storages = self.proxmox_api.storage.get(type=type) - storages = [ProxmoxStorage(storage) for storage in storages] - return storages - - -class ProxmoxStorage: - def __init__(self, storage): - self.storage = storage - # Convert proxmox representation of lists, dicts and boolean for easier - # manipulation within ansible. - if 'shared' in self.storage: - self.storage['shared'] = proxmox_to_ansible_bool(self.storage['shared']) - if 'content' in self.storage: - self.storage['content'] = self.storage['content'].split(',') - if 'nodes' in self.storage: - self.storage['nodes'] = self.storage['nodes'].split(',') - if 'prune-backups' in storage: - options = storage['prune-backups'].split(',') - self.storage['prune-backups'] = dict() - for option in options: - k, v = option.split('=') - self.storage['prune-backups'][k] = v - - -def proxmox_storage_info_argument_spec(): - return dict( - storage=dict(type='str', aliases=['name']), - type=dict(type='str'), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - storage_info_args = proxmox_storage_info_argument_spec() - module_args.update(storage_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - mutually_exclusive=[('storage', 'type')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - if not HAS_PROXMOXER: - module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR) - - proxmox = ProxmoxStorageInfoAnsible(module) - storage = module.params['storage'] - storagetype = module.params['type'] - - if storage: - storages = [proxmox.get_storage(storage)] - else: - storages = proxmox.get_storages(type=storagetype) - result['proxmox_storages'] = [storage.storage for storage in storages] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_tasks_info.py b/plugins/modules/cloud/misc/proxmox_tasks_info.py deleted file mode 100644 index 63dd6215dc..0000000000 --- a/plugins/modules/cloud/misc/proxmox_tasks_info.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Andreas Botzner (@paginabianca) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: proxmox_tasks_info -short_description: Retrieve information about one or more Proxmox VE tasks -version_added: 3.8.0 -description: - - Retrieve information about one or more Proxmox VE tasks. -author: 'Andreas Botzner (@paginabianca) ' -options: - node: - description: - - Node where to get tasks. - required: true - type: str - task: - description: - - Return specific task. - aliases: ['upid', 'name'] - type: str -extends_documentation_fragment: - - community.general.proxmox.documentation -''' - - -EXAMPLES = ''' -- name: List tasks on node01 - community.general.proxmox_task_info: - api_host: proxmoxhost - api_user: root@pam - api_password: '{{ password | default(omit) }}' - api_token_id: '{{ token_id | default(omit) }}' - api_token_secret: '{{ token_secret | default(omit) }}' - node: node01 - register: result - -- name: Retrieve information about specific tasks on node01 - community.general.proxmox_task_info: - api_host: proxmoxhost - api_user: root@pam - api_password: '{{ password | default(omit) }}' - api_token_id: '{{ token_id | default(omit) }}' - api_token_secret: '{{ token_secret | default(omit) }}' - task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:' - node: node01 - register: proxmox_tasks -''' - - -RETURN = ''' -proxmox_tasks: - description: List of tasks. - returned: on success - type: list - elements: dict - contains: - id: - description: ID of the task. - returned: on success - type: str - node: - description: Node name. - returned: on success - type: str - pid: - description: PID of the task. - returned: on success - type: int - pstart: - description: pastart of the task. - returned: on success - type: int - starttime: - description: Starting time of the task. - returned: on success - type: int - type: - description: Type of the task. - returned: on success - type: str - upid: - description: UPID of the task. - returned: on success - type: str - user: - description: User that owns the task. - returned: on success - type: str - endtime: - description: Endtime of the task. - returned: on success, can be absent - type: int - status: - description: Status of the task. - returned: on success, can be absent - type: str - failed: - description: If the task failed. - returned: when status is defined - type: bool -msg: - description: Short message. - returned: on failure - type: str - sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode' -''' - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR) - - -class ProxmoxTaskInfoAnsible(ProxmoxAnsible): - def get_task(self, upid, node): - tasks = self.get_tasks(node) - for task in tasks: - if task.info['upid'] == upid: - return [task] - - def get_tasks(self, node): - tasks = self.proxmox_api.nodes(node).tasks.get() - return [ProxmoxTask(task) for task in tasks] - - -class ProxmoxTask: - def __init__(self, task): - self.info = dict() - for k, v in task.items(): - if k == 'status' and isinstance(v, str): - self.info[k] = v - if v != 'OK': - self.info['failed'] = True - else: - self.info[k] = v - - -def proxmox_task_info_argument_spec(): - return dict( - task=dict(type='str', aliases=['upid', 'name'], required=False), - node=dict(type='str', required=True), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - task_info_args = proxmox_task_info_argument_spec() - module_args.update(task_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[('api_token_id', 'api_token_secret'), - ('api_user', 'api_password')], - required_one_of=[('api_password', 'api_token_id')], - supports_check_mode=True) - result = dict(changed=False) - - if not HAS_PROXMOXER: - module.fail_json(msg=missing_required_lib( - 'proxmoxer'), exception=PROXMOXER_IMP_ERR) - proxmox = ProxmoxTaskInfoAnsible(module) - upid = module.params['task'] - node = module.params['node'] - if upid: - tasks = proxmox.get_task(upid=upid, node=node) - else: - tasks = proxmox.get_tasks(node=node) - if tasks is not None: - result['proxmox_tasks'] = [task.info for task in tasks] - module.exit_json(**result) - else: - result['msg'] = 'Task: {0} does not exist on node: {1}.'.format( - upid, node) - module.fail_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_template.py b/plugins/modules/cloud/misc/proxmox_template.py deleted file mode 100644 index bee2583908..0000000000 --- a/plugins/modules/cloud/misc/proxmox_template.py +++ /dev/null @@ -1,272 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_template -short_description: management of OS templates in Proxmox VE cluster -description: - - allows you to upload/delete templates in Proxmox VE cluster -options: - node: - description: - - Proxmox VE node on which to operate. - type: str - src: - description: - - path to uploaded file - - required only for C(state=present) - type: path - template: - description: - - the template name - - Required for state C(absent) to delete a template. - - Required for state C(present) to download an appliance container template (pveam). - type: str - content_type: - description: - - content type - - required only for C(state=present) - type: str - default: 'vztmpl' - choices: ['vztmpl', 'iso'] - storage: - description: - - target storage - type: str - default: 'local' - timeout: - description: - - timeout for operations - type: int - default: 30 - force: - description: - - can be used only with C(state=present), exists template will be overwritten - type: bool - default: 'no' - state: - description: - - Indicate desired state of the template - type: str - choices: ['present', 'absent'] - default: present -notes: - - Requires proxmoxer and requests modules on host. This modules can be installed with pip. -author: Sergei Antipov (@UnderGreen) -extends_documentation_fragment: community.general.proxmox.documentation -''' - -EXAMPLES = ''' -- name: Upload new openvz template with minimal options - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - src: ~/ubuntu-14.04-x86_64.tar.gz - -- name: > - Upload new openvz template with minimal options use environment - PROXMOX_PASSWORD variable(you should export it before) - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_host: node1 - src: ~/ubuntu-14.04-x86_64.tar.gz - -- name: Upload new openvz template with all options and force overwrite - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - storage: local - content_type: vztmpl - src: ~/ubuntu-14.04-x86_64.tar.gz - force: yes - -- name: Delete template with minimal options - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - template: ubuntu-14.04-x86_64.tar.gz - state: absent - -- name: Download proxmox appliance container template - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - storage: local - content_type: vztmpl - template: ubuntu-20.04-standard_20.04-1_amd64.tar.gz -''' - -import os -import time - -try: - from proxmoxer import ProxmoxAPI - HAS_PROXMOXER = True -except ImportError: - HAS_PROXMOXER = False - -from ansible.module_utils.basic import AnsibleModule, env_fallback - - -def get_template(proxmox, node, storage, content_type, template): - return [True for tmpl in proxmox.nodes(node).storage(storage).content.get() - if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)] - - -def task_status(module, proxmox, node, taskid, timeout): - """ - Check the task status and wait until the task is completed or the timeout is reached. - """ - while timeout: - task_status = proxmox.nodes(node).tasks(taskid).status.get() - if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK': - return True - timeout = timeout - 1 - if timeout == 0: - module.fail_json(msg='Reached timeout while waiting for uploading/downloading template. Last line in task before timeout: %s' - % proxmox.node(node).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - -def upload_template(module, proxmox, node, storage, content_type, realpath, timeout): - taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb')) - return task_status(module, proxmox, node, taskid, timeout) - - -def download_template(module, proxmox, node, storage, template, timeout): - taskid = proxmox.nodes(node).aplinfo.post(storage=storage, template=template) - return task_status(module, proxmox, node, taskid, timeout) - - -def delete_template(module, proxmox, node, storage, content_type, template, timeout): - volid = '%s:%s/%s' % (storage, content_type, template) - proxmox.nodes(node).storage(storage).content.delete(volid) - while timeout: - if not get_template(proxmox, node, storage, content_type, template): - return True - timeout = timeout - 1 - if timeout == 0: - module.fail_json(msg='Reached timeout while waiting for deleting template.') - - time.sleep(1) - return False - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_host=dict(required=True), - api_password=dict(no_log=True, fallback=(env_fallback, ['PROXMOX_PASSWORD'])), - api_token_id=dict(no_log=True), - api_token_secret=dict(no_log=True), - api_user=dict(required=True), - validate_certs=dict(type='bool', default=False), - node=dict(), - src=dict(type='path'), - template=dict(), - content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']), - storage=dict(default='local'), - timeout=dict(type='int', default=30), - force=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - ), - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('api_password', 'api_token_id')], - required_if=[('state', 'absent', ['template'])] - ) - - if not HAS_PROXMOXER: - module.fail_json(msg='proxmoxer required for this module') - - state = module.params['state'] - api_host = module.params['api_host'] - api_password = module.params['api_password'] - api_token_id = module.params['api_token_id'] - api_token_secret = module.params['api_token_secret'] - api_user = module.params['api_user'] - validate_certs = module.params['validate_certs'] - node = module.params['node'] - storage = module.params['storage'] - timeout = module.params['timeout'] - - auth_args = {'user': api_user} - if not (api_token_id and api_token_secret): - auth_args['password'] = api_password - else: - auth_args['token_name'] = api_token_id - auth_args['token_value'] = api_token_secret - - try: - proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args) - # Used to test the validity of the token if given - proxmox.version.get() - except Exception as e: - module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) - - if state == 'present': - try: - content_type = module.params['content_type'] - src = module.params['src'] - - # download appliance template - if content_type == 'vztmpl' and not src: - template = module.params['template'] - - if not template: - module.fail_json(msg='template param for downloading appliance template is mandatory') - - if get_template(proxmox, node, storage, content_type, template) and not module.params['force']: - module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template)) - - if download_template(module, proxmox, node, storage, template, timeout): - module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template)) - - template = os.path.basename(src) - if get_template(proxmox, node, storage, content_type, template) and not module.params['force']: - module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template)) - elif not src: - module.fail_json(msg='src param to uploading template file is mandatory') - elif not (os.path.exists(src) and os.path.isfile(src)): - module.fail_json(msg='template file on path %s not exists' % src) - - if upload_template(module, proxmox, node, storage, content_type, src, timeout): - module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) - except Exception as e: - module.fail_json(msg="uploading/downloading of template %s failed with exception: %s" % (template, e)) - - elif state == 'absent': - try: - content_type = module.params['content_type'] - template = module.params['template'] - - if not get_template(proxmox, node, storage, content_type, template): - module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template)) - - if delete_template(module, proxmox, node, storage, content_type, template, timeout): - module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template)) - except Exception as e: - module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_user_info.py b/plugins/modules/cloud/misc/proxmox_user_info.py deleted file mode 100644 index 49a890b9f6..0000000000 --- a/plugins/modules/cloud/misc/proxmox_user_info.py +++ /dev/null @@ -1,256 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Tristan Le Guern -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_user_info -short_description: Retrieve information about one or more Proxmox VE users -version_added: 1.3.0 -description: - - Retrieve information about one or more Proxmox VE users -options: - domain: - description: - - Restrict results to a specific authentication realm. - aliases: ['realm'] - type: str - user: - description: - - Restrict results to a specific user. - aliases: ['name'] - type: str - userid: - description: - - Restrict results to a specific user ID, which is a concatenation of a user and domain parts. - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: community.general.proxmox.documentation -''' - -EXAMPLES = ''' -- name: List existing users - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_users - -- name: List existing users in the pve authentication realm - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - domain: pve - register: proxmox_users_pve - -- name: Retrieve information about admin@pve - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - userid: admin@pve - register: proxmox_user_admin - -- name: Alternative way to retrieve information about admin@pve - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - user: admin - domain: pve - register: proxmox_user_admin -''' - - -RETURN = ''' -proxmox_users: - description: List of users. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the user. - returned: on success - type: str - domain: - description: User's authentication realm, also the right part of the user ID. - returned: on success - type: str - email: - description: User's email address. - returned: on success - type: str - enabled: - description: User's account state. - returned: on success - type: bool - expire: - description: Expiration date in seconds since EPOCH. Zero means no expiration. - returned: on success - type: int - firstname: - description: User's first name. - returned: on success - type: str - groups: - description: List of groups which the user is a member of. - returned: on success - type: list - elements: str - keys: - description: User's two factor authentication keys. - returned: on success - type: str - lastname: - description: User's last name. - returned: on success - type: str - tokens: - description: List of API tokens associated to the user. - returned: on success - type: list - elements: dict - contains: - comment: - description: Short description of the token. - returned: on success - type: str - expire: - description: Expiration date in seconds since EPOCH. Zero means no expiration. - returned: on success - type: int - privsep: - description: Describe if the API token is further restricted with ACLs or is fully privileged. - returned: on success - type: bool - tokenid: - description: Token name. - returned: on success - type: str - user: - description: User's login name, also the left part of the user ID. - returned: on success - type: str - userid: - description: Proxmox user ID, represented as user@realm. - returned: on success - type: str -''' - - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool, HAS_PROXMOXER, PROXMOXER_IMP_ERR) - - -class ProxmoxUserInfoAnsible(ProxmoxAnsible): - def get_user(self, userid): - try: - user = self.proxmox_api.access.users.get(userid) - except Exception: - self.module.fail_json(msg="User '%s' does not exist" % userid) - user['userid'] = userid - return ProxmoxUser(user) - - def get_users(self, domain=None): - users = self.proxmox_api.access.users.get(full=1) - users = [ProxmoxUser(user) for user in users] - if domain: - return [user for user in users if user.user['domain'] == domain] - return users - - -class ProxmoxUser: - def __init__(self, user): - self.user = dict() - # Data representation is not the same depending on API calls - for k, v in user.items(): - if k == 'enable': - self.user['enabled'] = proxmox_to_ansible_bool(user['enable']) - elif k == 'userid': - self.user['user'] = user['userid'].split('@')[0] - self.user['domain'] = user['userid'].split('@')[1] - self.user[k] = v - elif k in ['groups', 'tokens'] and (v == '' or v is None): - self.user[k] = [] - elif k == 'groups' and type(v) == str: - self.user['groups'] = v.split(',') - elif k == 'tokens' and type(v) == list: - for token in v: - if 'privsep' in token: - token['privsep'] = proxmox_to_ansible_bool(token['privsep']) - self.user['tokens'] = v - elif k == 'tokens' and type(v) == dict: - self.user['tokens'] = list() - for tokenid, tokenvalues in v.items(): - t = tokenvalues - t['tokenid'] = tokenid - if 'privsep' in tokenvalues: - t['privsep'] = proxmox_to_ansible_bool(tokenvalues['privsep']) - self.user['tokens'].append(t) - else: - self.user[k] = v - - -def proxmox_user_info_argument_spec(): - return dict( - domain=dict(type='str', aliases=['realm']), - user=dict(type='str', aliases=['name']), - userid=dict(type='str'), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - user_info_args = proxmox_user_info_argument_spec() - module_args.update(user_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - mutually_exclusive=[('user', 'userid'), ('domain', 'userid')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - if not HAS_PROXMOXER: - module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR) - - proxmox = ProxmoxUserInfoAnsible(module) - domain = module.params['domain'] - user = module.params['user'] - if user and domain: - userid = user + '@' + domain - else: - userid = module.params['userid'] - - if userid: - users = [proxmox.get_user(userid=userid)] - else: - users = proxmox.get_users(domain=domain) - result['proxmox_users'] = [user.user for user in users] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/rhevm.py b/plugins/modules/cloud/misc/rhevm.py deleted file mode 100644 index 77b40248b3..0000000000 --- a/plugins/modules/cloud/misc/rhevm.py +++ /dev/null @@ -1,1498 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Timothy Vandenbrande -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: rhevm -short_description: RHEV/oVirt automation -description: - - This module only supports oVirt/RHEV version 3. - - A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4. - - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform. -requirements: - - ovirtsdk -author: -- Timothy Vandenbrande (@TimothyVandenbrande) -options: - user: - description: - - The user to authenticate with. - type: str - default: admin@internal - password: - description: - - The password for user authentication. - type: str - required: true - server: - description: - - The name/IP of your RHEV-m/oVirt instance. - type: str - default: 127.0.0.1 - port: - description: - - The port on which the API is reachable. - type: int - default: 443 - insecure_api: - description: - - A boolean switch to make a secure or insecure connection to the server. - type: bool - default: no - name: - description: - - The name of the VM. - type: str - cluster: - description: - - The RHEV/oVirt cluster in which you want you VM to start. - type: str - datacenter: - description: - - The RHEV/oVirt datacenter in which you want you VM to start. - type: str - default: Default - state: - description: - - This serves to create/remove/update or powermanage your VM. - type: str - choices: [ absent, cd, down, info, ping, present, restarted, up ] - default: present - image: - description: - - The template to use for the VM. - type: str - type: - description: - - To define if the VM is a server or desktop. - type: str - choices: [ desktop, host, server ] - default: server - vmhost: - description: - - The host you wish your VM to run on. - type: str - vmcpu: - description: - - The number of CPUs you want in your VM. - type: int - default: 2 - cpu_share: - description: - - This parameter is used to configure the CPU share. - type: int - default: 0 - vmmem: - description: - - The amount of memory you want your VM to use (in GB). - type: int - default: 1 - osver: - description: - - The operating system option in RHEV/oVirt. - type: str - default: rhel_6x64 - mempol: - description: - - The minimum amount of memory you wish to reserve for this system. - type: int - default: 1 - vm_ha: - description: - - To make your VM High Available. - type: bool - default: yes - disks: - description: - - This option uses complex arguments and is a list of disks with the options name, size and domain. - type: list - elements: str - ifaces: - description: - - This option uses complex arguments and is a list of interfaces with the options name and vlan. - type: list - elements: str - aliases: [ interfaces, nics ] - boot_order: - description: - - This option uses complex arguments and is a list of items that specify the bootorder. - type: list - elements: str - default: [ hd, network ] - del_prot: - description: - - This option sets the delete protection checkbox. - type: bool - default: yes - cd_drive: - description: - - The CD you wish to have mounted on the VM when I(state = 'CD'). - type: str - timeout: - description: - - The timeout you wish to define for power actions. - - When I(state = 'up'). - - When I(state = 'down'). - - When I(state = 'restarted'). - type: int -''' - -RETURN = r''' -vm: - description: Returns all of the VMs variables and execution. - returned: always - type: dict - sample: '{ - "boot_order": [ - "hd", - "network" - ], - "changed": true, - "changes": [ - "Delete Protection" - ], - "cluster": "C1", - "cpu_share": "0", - "created": false, - "datacenter": "Default", - "del_prot": true, - "disks": [ - { - "domain": "ssd-san", - "name": "OS", - "size": 40 - } - ], - "eth0": "00:00:5E:00:53:00", - "eth1": "00:00:5E:00:53:01", - "eth2": "00:00:5E:00:53:02", - "exists": true, - "failed": false, - "ifaces": [ - { - "name": "eth0", - "vlan": "Management" - }, - { - "name": "eth1", - "vlan": "Internal" - }, - { - "name": "eth2", - "vlan": "External" - } - ], - "image": false, - "mempol": "0", - "msg": [ - "VM exists", - "cpu_share was already set to 0", - "VM high availability was already set to True", - "The boot order has already been set", - "VM delete protection has been set to True", - "Disk web2_Disk0_OS already exists", - "The VM starting host was already set to host416" - ], - "name": "web2", - "type": "server", - "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b", - "vm_ha": true, - "vmcpu": "4", - "vmhost": "host416", - "vmmem": "16" - }' -''' - -EXAMPLES = r''' -- name: Basic get info from VM - community.general.rhevm: - server: rhevm01 - user: '{{ rhev.admin.name }}' - password: '{{ rhev.admin.pass }}' - name: demo - state: info - -- name: Basic create example from image - community.general.rhevm: - server: rhevm01 - user: '{{ rhev.admin.name }}' - password: '{{ rhev.admin.pass }}' - name: demo - cluster: centos - image: centos7_x64 - state: present - -- name: Power management - community.general.rhevm: - server: rhevm01 - user: '{{ rhev.admin.name }}' - password: '{{ rhev.admin.pass }}' - cluster: RH - name: uptime_server - image: centos7_x64 - state: down - -- name: Multi disk, multi nic create example - community.general.rhevm: - server: rhevm01 - user: '{{ rhev.admin.name }}' - password: '{{ rhev.admin.pass }}' - cluster: RH - name: server007 - type: server - vmcpu: 4 - vmmem: 2 - ifaces: - - name: eth0 - vlan: vlan2202 - - name: eth1 - vlan: vlan36 - - name: eth2 - vlan: vlan38 - - name: eth3 - vlan: vlan2202 - disks: - - name: root - size: 10 - domain: ssd-san - - name: swap - size: 10 - domain: 15kiscsi-san - - name: opt - size: 10 - domain: 15kiscsi-san - - name: var - size: 10 - domain: 10kiscsi-san - - name: home - size: 10 - domain: sata-san - boot_order: - - network - - hd - state: present - -- name: Add a CD to the disk cd_drive - community.general.rhevm: - user: '{{ rhev.admin.name }}' - password: '{{ rhev.admin.pass }}' - name: server007 - cd_drive: rhev-tools-setup.iso - state: cd - -- name: New host deployment + host network configuration - community.general.rhevm: - password: '{{ rhevm.admin.pass }}' - name: ovirt_node007 - type: host - cluster: rhevm01 - ifaces: - - name: em1 - - name: em2 - - name: p3p1 - ip: 172.31.224.200 - netmask: 255.255.254.0 - - name: p3p2 - ip: 172.31.225.200 - netmask: 255.255.254.0 - - name: bond0 - bond: - - em1 - - em2 - network: rhevm - ip: 172.31.222.200 - netmask: 255.255.255.0 - management: yes - - name: bond0.36 - network: vlan36 - ip: 10.2.36.200 - netmask: 255.255.254.0 - gateway: 10.2.36.254 - - name: bond0.2202 - network: vlan2202 - - name: bond0.38 - network: vlan38 - state: present -''' - -import time - -try: - from ovirtsdk.api import API - from ovirtsdk.xml import params - HAS_SDK = True -except ImportError: - HAS_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -RHEV_FAILED = 1 -RHEV_SUCCESS = 0 -RHEV_UNAVAILABLE = 2 - -RHEV_TYPE_OPTS = ['desktop', 'host', 'server'] -STATE_OPTS = ['absent', 'cd', 'down', 'info', 'ping', 'present', 'restart', 'up'] - -msg = [] -changed = False -failed = False - - -class RHEVConn(object): - 'Connection to RHEV-M' - - def __init__(self, module): - self.module = module - - user = module.params.get('user') - password = module.params.get('password') - server = module.params.get('server') - port = module.params.get('port') - insecure_api = module.params.get('insecure_api') - - url = "https://%s:%s" % (server, port) - - try: - api = API(url=url, username=user, password=password, insecure=str(insecure_api)) - api.test() - self.conn = api - except Exception: - raise Exception("Failed to connect to RHEV-M.") - - def __del__(self): - self.conn.disconnect() - - def createVMimage(self, name, cluster, template): - try: - vmparams = params.VM( - name=name, - cluster=self.conn.clusters.get(name=cluster), - template=self.conn.templates.get(name=template), - disks=params.Disks(clone=True) - ) - self.conn.vms.add(vmparams) - setMsg("VM is created") - setChanged() - return True - except Exception as e: - setMsg("Failed to create VM") - setMsg(str(e)) - setFailed() - return False - - def createVM(self, name, cluster, os, actiontype): - try: - vmparams = params.VM( - name=name, - cluster=self.conn.clusters.get(name=cluster), - os=params.OperatingSystem(type_=os), - template=self.conn.templates.get(name="Blank"), - type_=actiontype - ) - self.conn.vms.add(vmparams) - setMsg("VM is created") - setChanged() - return True - except Exception as e: - setMsg("Failed to create VM") - setMsg(str(e)) - setFailed() - return False - - def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot): - VM = self.get_VM(vmname) - - newdisk = params.Disk( - name=diskname, - size=1024 * 1024 * 1024 * int(disksize), - wipe_after_delete=True, - sparse=diskallocationtype, - interface=diskinterface, - format=diskformat, - bootable=diskboot, - storage_domains=params.StorageDomains( - storage_domain=[self.get_domain(diskdomain)] - ) - ) - - try: - VM.disks.add(newdisk) - VM.update() - setMsg("Successfully added disk " + diskname) - setChanged() - except Exception as e: - setFailed() - setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.") - setMsg(str(e)) - return False - - try: - currentdisk = VM.disks.get(name=diskname) - attempt = 1 - while currentdisk.status.state != 'ok': - currentdisk = VM.disks.get(name=diskname) - if attempt == 100: - setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state))) - raise Exception() - else: - attempt += 1 - time.sleep(2) - setMsg("The disk " + diskname + " is ready.") - except Exception as e: - setFailed() - setMsg("Error getting the state of " + diskname + ".") - setMsg(str(e)) - return False - return True - - def createNIC(self, vmname, nicname, vlan, interface): - VM = self.get_VM(vmname) - CLUSTER = self.get_cluster_byid(VM.cluster.id) - DC = self.get_DC_byid(CLUSTER.data_center.id) - newnic = params.NIC( - name=nicname, - network=DC.networks.get(name=vlan), - interface=interface - ) - - try: - VM.nics.add(newnic) - VM.update() - setMsg("Successfully added iface " + nicname) - setChanged() - except Exception as e: - setFailed() - setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.") - setMsg(str(e)) - return False - - try: - currentnic = VM.nics.get(name=nicname) - attempt = 1 - while currentnic.active is not True: - currentnic = VM.nics.get(name=nicname) - if attempt == 100: - setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active))) - raise Exception() - else: - attempt += 1 - time.sleep(2) - setMsg("The iface " + nicname + " is ready.") - except Exception as e: - setFailed() - setMsg("Error getting the state of " + nicname + ".") - setMsg(str(e)) - return False - return True - - def get_DC(self, dc_name): - return self.conn.datacenters.get(name=dc_name) - - def get_DC_byid(self, dc_id): - return self.conn.datacenters.get(id=dc_id) - - def get_VM(self, vm_name): - return self.conn.vms.get(name=vm_name) - - def get_cluster_byid(self, cluster_id): - return self.conn.clusters.get(id=cluster_id) - - def get_cluster(self, cluster_name): - return self.conn.clusters.get(name=cluster_name) - - def get_domain_byid(self, dom_id): - return self.conn.storagedomains.get(id=dom_id) - - def get_domain(self, domain_name): - return self.conn.storagedomains.get(name=domain_name) - - def get_disk(self, disk): - return self.conn.disks.get(disk) - - def get_network(self, dc_name, network_name): - return self.get_DC(dc_name).networks.get(network_name) - - def get_network_byid(self, network_id): - return self.conn.networks.get(id=network_id) - - def get_NIC(self, vm_name, nic_name): - return self.get_VM(vm_name).nics.get(nic_name) - - def get_Host(self, host_name): - return self.conn.hosts.get(name=host_name) - - def get_Host_byid(self, host_id): - return self.conn.hosts.get(id=host_id) - - def set_Memory(self, name, memory): - VM = self.get_VM(name) - VM.memory = int(int(memory) * 1024 * 1024 * 1024) - try: - VM.update() - setMsg("The Memory has been updated.") - setChanged() - return True - except Exception as e: - setMsg("Failed to update memory.") - setMsg(str(e)) - setFailed() - return False - - def set_Memory_Policy(self, name, memory_policy): - VM = self.get_VM(name) - VM.memory_policy.guaranteed = int(memory_policy) * 1024 * 1024 * 1024 - try: - VM.update() - setMsg("The memory policy has been updated.") - setChanged() - return True - except Exception as e: - setMsg("Failed to update memory policy.") - setMsg(str(e)) - setFailed() - return False - - def set_CPU(self, name, cpu): - VM = self.get_VM(name) - VM.cpu.topology.cores = int(cpu) - try: - VM.update() - setMsg("The number of CPUs has been updated.") - setChanged() - return True - except Exception as e: - setMsg("Failed to update the number of CPUs.") - setMsg(str(e)) - setFailed() - return False - - def set_CPU_share(self, name, cpu_share): - VM = self.get_VM(name) - VM.cpu_shares = int(cpu_share) - try: - VM.update() - setMsg("The CPU share has been updated.") - setChanged() - return True - except Exception as e: - setMsg("Failed to update the CPU share.") - setMsg(str(e)) - setFailed() - return False - - def set_Disk(self, diskname, disksize, diskinterface, diskboot): - DISK = self.get_disk(diskname) - setMsg("Checking disk " + diskname) - if DISK.get_bootable() != diskboot: - try: - DISK.set_bootable(diskboot) - setMsg("Updated the boot option on the disk.") - setChanged() - except Exception as e: - setMsg("Failed to set the boot option on the disk.") - setMsg(str(e)) - setFailed() - return False - else: - setMsg("The boot option of the disk is correct") - if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)): - try: - DISK.size = (1024 * 1024 * 1024 * int(disksize)) - setMsg("Updated the size of the disk.") - setChanged() - except Exception as e: - setMsg("Failed to update the size of the disk.") - setMsg(str(e)) - setFailed() - return False - elif int(DISK.size) > (1024 * 1024 * 1024 * int(disksize)): - setMsg("Shrinking disks is not supported") - setFailed() - return False - else: - setMsg("The size of the disk is correct") - if str(DISK.interface) != str(diskinterface): - try: - DISK.interface = diskinterface - setMsg("Updated the interface of the disk.") - setChanged() - except Exception as e: - setMsg("Failed to update the interface of the disk.") - setMsg(str(e)) - setFailed() - return False - else: - setMsg("The interface of the disk is correct") - return True - - def set_NIC(self, vmname, nicname, newname, vlan, interface): - NIC = self.get_NIC(vmname, nicname) - VM = self.get_VM(vmname) - CLUSTER = self.get_cluster_byid(VM.cluster.id) - DC = self.get_DC_byid(CLUSTER.data_center.id) - NETWORK = self.get_network(str(DC.name), vlan) - checkFail() - if NIC.name != newname: - NIC.name = newname - setMsg('Updating iface name to ' + newname) - setChanged() - if str(NIC.network.id) != str(NETWORK.id): - NIC.set_network(NETWORK) - setMsg('Updating iface network to ' + vlan) - setChanged() - if NIC.interface != interface: - NIC.interface = interface - setMsg('Updating iface interface to ' + interface) - setChanged() - try: - NIC.update() - setMsg('iface has successfully been updated.') - except Exception as e: - setMsg("Failed to update the iface.") - setMsg(str(e)) - setFailed() - return False - return True - - def set_DeleteProtection(self, vmname, del_prot): - VM = self.get_VM(vmname) - VM.delete_protected = del_prot - try: - VM.update() - setChanged() - except Exception as e: - setMsg("Failed to update delete protection.") - setMsg(str(e)) - setFailed() - return False - return True - - def set_BootOrder(self, vmname, boot_order): - VM = self.get_VM(vmname) - bootorder = [] - for device in boot_order: - bootorder.append(params.Boot(dev=device)) - VM.os.boot = bootorder - - try: - VM.update() - setChanged() - except Exception as e: - setMsg("Failed to update the boot order.") - setMsg(str(e)) - setFailed() - return False - return True - - def set_Host(self, host_name, cluster, ifaces): - HOST = self.get_Host(host_name) - CLUSTER = self.get_cluster(cluster) - - if HOST is None: - setMsg("Host does not exist.") - ifacelist = dict() - networklist = [] - manageip = '' - - try: - for iface in ifaces: - try: - setMsg('creating host interface ' + iface['name']) - if 'management' in iface: - manageip = iface['ip'] - if 'boot_protocol' not in iface: - if 'ip' in iface: - iface['boot_protocol'] = 'static' - else: - iface['boot_protocol'] = 'none' - if 'ip' not in iface: - iface['ip'] = '' - if 'netmask' not in iface: - iface['netmask'] = '' - if 'gateway' not in iface: - iface['gateway'] = '' - - if 'network' in iface: - if 'bond' in iface: - bond = [] - for slave in iface['bond']: - bond.append(ifacelist[slave]) - try: - tmpiface = params.Bonding( - slaves=params.Slaves(host_nic=bond), - options=params.Options( - option=[ - params.Option(name='miimon', value='100'), - params.Option(name='mode', value='4') - ] - ) - ) - except Exception as e: - setMsg('Failed to create the bond for ' + iface['name']) - setFailed() - setMsg(str(e)) - return False - try: - tmpnetwork = params.HostNIC( - network=params.Network(name=iface['network']), - name=iface['name'], - boot_protocol=iface['boot_protocol'], - ip=params.IP( - address=iface['ip'], - netmask=iface['netmask'], - gateway=iface['gateway'] - ), - override_configuration=True, - bonding=tmpiface) - networklist.append(tmpnetwork) - setMsg('Applying network ' + iface['name']) - except Exception as e: - setMsg('Failed to set' + iface['name'] + ' as network interface') - setFailed() - setMsg(str(e)) - return False - else: - tmpnetwork = params.HostNIC( - network=params.Network(name=iface['network']), - name=iface['name'], - boot_protocol=iface['boot_protocol'], - ip=params.IP( - address=iface['ip'], - netmask=iface['netmask'], - gateway=iface['gateway'] - )) - networklist.append(tmpnetwork) - setMsg('Applying network ' + iface['name']) - else: - tmpiface = params.HostNIC( - name=iface['name'], - network=params.Network(), - boot_protocol=iface['boot_protocol'], - ip=params.IP( - address=iface['ip'], - netmask=iface['netmask'], - gateway=iface['gateway'] - )) - ifacelist[iface['name']] = tmpiface - except Exception as e: - setMsg('Failed to set ' + iface['name']) - setFailed() - setMsg(str(e)) - return False - except Exception as e: - setMsg('Failed to set networks') - setMsg(str(e)) - setFailed() - return False - - if manageip == '': - setMsg('No management network is defined') - setFailed() - return False - - try: - HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey')) - if self.conn.hosts.add(HOST): - setChanged() - HOST = self.get_Host(host_name) - state = HOST.status.state - while (state != 'non_operational' and state != 'up'): - HOST = self.get_Host(host_name) - state = HOST.status.state - time.sleep(1) - if state == 'non_responsive': - setMsg('Failed to add host to RHEVM') - setFailed() - return False - - setMsg('status host: up') - time.sleep(5) - - HOST = self.get_Host(host_name) - state = HOST.status.state - setMsg('State before setting to maintenance: ' + str(state)) - HOST.deactivate() - while state != 'maintenance': - HOST = self.get_Host(host_name) - state = HOST.status.state - time.sleep(1) - setMsg('status host: maintenance') - - try: - HOST.nics.setupnetworks(params.Action( - force=True, - check_connectivity=False, - host_nics=params.HostNics(host_nic=networklist) - )) - setMsg('nics are set') - except Exception as e: - setMsg('Failed to apply networkconfig') - setFailed() - setMsg(str(e)) - return False - - try: - HOST.commitnetconfig() - setMsg('Network config is saved') - except Exception as e: - setMsg('Failed to save networkconfig') - setFailed() - setMsg(str(e)) - return False - except Exception as e: - if 'The Host name is already in use' in str(e): - setMsg("Host already exists") - else: - setMsg("Failed to add host") - setFailed() - setMsg(str(e)) - return False - - HOST.activate() - while state != 'up': - HOST = self.get_Host(host_name) - state = HOST.status.state - time.sleep(1) - if state == 'non_responsive': - setMsg('Failed to apply networkconfig.') - setFailed() - return False - setMsg('status host: up') - else: - setMsg("Host exists.") - - return True - - def del_NIC(self, vmname, nicname): - return self.get_NIC(vmname, nicname).delete() - - def remove_VM(self, vmname): - VM = self.get_VM(vmname) - try: - VM.delete() - except Exception as e: - setMsg("Failed to remove VM.") - setMsg(str(e)) - setFailed() - return False - return True - - def start_VM(self, vmname, timeout): - VM = self.get_VM(vmname) - try: - VM.start() - except Exception as e: - setMsg("Failed to start VM.") - setMsg(str(e)) - setFailed() - return False - return self.wait_VM(vmname, "up", timeout) - - def wait_VM(self, vmname, state, timeout): - VM = self.get_VM(vmname) - while VM.status.state != state: - VM = self.get_VM(vmname) - time.sleep(10) - if timeout is not False: - timeout -= 10 - if timeout <= 0: - setMsg("Timeout expired") - setFailed() - return False - return True - - def stop_VM(self, vmname, timeout): - VM = self.get_VM(vmname) - try: - VM.stop() - except Exception as e: - setMsg("Failed to stop VM.") - setMsg(str(e)) - setFailed() - return False - return self.wait_VM(vmname, "down", timeout) - - def set_CD(self, vmname, cd_drive): - VM = self.get_VM(vmname) - try: - if str(VM.status.state) == 'down': - cdrom = params.CdRom(file=cd_drive) - VM.cdroms.add(cdrom) - setMsg("Attached the image.") - setChanged() - else: - cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000") - cdrom.set_file(cd_drive) - cdrom.update(current=True) - setMsg("Attached the image.") - setChanged() - except Exception as e: - setMsg("Failed to attach image.") - setMsg(str(e)) - setFailed() - return False - return True - - def set_VM_Host(self, vmname, vmhost): - VM = self.get_VM(vmname) - HOST = self.get_Host(vmhost) - try: - VM.placement_policy.host = HOST - VM.update() - setMsg("Set startup host to " + vmhost) - setChanged() - except Exception as e: - setMsg("Failed to set startup host.") - setMsg(str(e)) - setFailed() - return False - return True - - def migrate_VM(self, vmname, vmhost): - VM = self.get_VM(vmname) - - HOST = self.get_Host_byid(VM.host.id) - if str(HOST.name) != vmhost: - try: - VM.migrate( - action=params.Action( - host=params.Host( - name=vmhost, - ) - ), - ) - setChanged() - setMsg("VM migrated to " + vmhost) - except Exception as e: - setMsg("Failed to set startup host.") - setMsg(str(e)) - setFailed() - return False - return True - - def remove_CD(self, vmname): - VM = self.get_VM(vmname) - try: - VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete() - setMsg("Removed the image.") - setChanged() - except Exception as e: - setMsg("Failed to remove the image.") - setMsg(str(e)) - setFailed() - return False - return True - - -class RHEV(object): - def __init__(self, module): - self.module = module - - def __get_conn(self): - self.conn = RHEVConn(self.module) - return self.conn - - def test(self): - self.__get_conn() - return "OK" - - def getVM(self, name): - self.__get_conn() - VM = self.conn.get_VM(name) - if VM: - vminfo = dict() - vminfo['uuid'] = VM.id - vminfo['name'] = VM.name - vminfo['status'] = VM.status.state - vminfo['cpu_cores'] = VM.cpu.topology.cores - vminfo['cpu_sockets'] = VM.cpu.topology.sockets - vminfo['cpu_shares'] = VM.cpu_shares - vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024) - vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024) - vminfo['os'] = VM.get_os().type_ - vminfo['del_prot'] = VM.delete_protected - try: - vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name) - except Exception: - vminfo['host'] = None - vminfo['boot_order'] = [] - for boot_dev in VM.os.get_boot(): - vminfo['boot_order'].append(str(boot_dev.dev)) - vminfo['disks'] = [] - for DISK in VM.disks.list(): - disk = dict() - disk['name'] = DISK.name - disk['size'] = (int(DISK.size) // 1024 // 1024 // 1024) - disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name) - disk['interface'] = DISK.interface - vminfo['disks'].append(disk) - vminfo['ifaces'] = [] - for NIC in VM.nics.list(): - iface = dict() - iface['name'] = str(NIC.name) - iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name) - iface['interface'] = NIC.interface - iface['mac'] = NIC.mac.address - vminfo['ifaces'].append(iface) - vminfo[str(NIC.name)] = NIC.mac.address - CLUSTER = self.conn.get_cluster_byid(VM.cluster.id) - if CLUSTER: - vminfo['cluster'] = CLUSTER.name - else: - vminfo = False - return vminfo - - def createVMimage(self, name, cluster, template, disks): - self.__get_conn() - return self.conn.createVMimage(name, cluster, template, disks) - - def createVM(self, name, cluster, os, actiontype): - self.__get_conn() - return self.conn.createVM(name, cluster, os, actiontype) - - def setMemory(self, name, memory): - self.__get_conn() - return self.conn.set_Memory(name, memory) - - def setMemoryPolicy(self, name, memory_policy): - self.__get_conn() - return self.conn.set_Memory_Policy(name, memory_policy) - - def setCPU(self, name, cpu): - self.__get_conn() - return self.conn.set_CPU(name, cpu) - - def setCPUShare(self, name, cpu_share): - self.__get_conn() - return self.conn.set_CPU_share(name, cpu_share) - - def setDisks(self, name, disks): - self.__get_conn() - counter = 0 - bootselect = False - for disk in disks: - if 'bootable' in disk: - if disk['bootable'] is True: - bootselect = True - - for disk in disks: - diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_') - disksize = disk.get('size', 1) - diskdomain = disk.get('domain', None) - if diskdomain is None: - setMsg("`domain` is a required disk key.") - setFailed() - return False - diskinterface = disk.get('interface', 'virtio') - diskformat = disk.get('format', 'raw') - diskallocationtype = disk.get('thin', False) - diskboot = disk.get('bootable', False) - - if bootselect is False and counter == 0: - diskboot = True - - DISK = self.conn.get_disk(diskname) - - if DISK is None: - self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot) - else: - self.conn.set_Disk(diskname, disksize, diskinterface, diskboot) - checkFail() - counter += 1 - - return True - - def setNetworks(self, vmname, ifaces): - self.__get_conn() - VM = self.conn.get_VM(vmname) - - counter = 0 - length = len(ifaces) - - for NIC in VM.nics.list(): - if counter < length: - iface = ifaces[counter] - name = iface.get('name', None) - if name is None: - setMsg("`name` is a required iface key.") - setFailed() - elif str(name) != str(NIC.name): - setMsg("ifaces are in the wrong order, rebuilding everything.") - for NIC in VM.nics.list(): - self.conn.del_NIC(vmname, NIC.name) - self.setNetworks(vmname, ifaces) - checkFail() - return True - vlan = iface.get('vlan', None) - if vlan is None: - setMsg("`vlan` is a required iface key.") - setFailed() - checkFail() - interface = iface.get('interface', 'virtio') - self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface) - else: - self.conn.del_NIC(vmname, NIC.name) - counter += 1 - checkFail() - - while counter < length: - iface = ifaces[counter] - name = iface.get('name', None) - if name is None: - setMsg("`name` is a required iface key.") - setFailed() - vlan = iface.get('vlan', None) - if vlan is None: - setMsg("`vlan` is a required iface key.") - setFailed() - if failed is True: - return False - interface = iface.get('interface', 'virtio') - self.conn.createNIC(vmname, name, vlan, interface) - - counter += 1 - checkFail() - return True - - def setDeleteProtection(self, vmname, del_prot): - self.__get_conn() - VM = self.conn.get_VM(vmname) - if bool(VM.delete_protected) != bool(del_prot): - self.conn.set_DeleteProtection(vmname, del_prot) - checkFail() - setMsg("`delete protection` has been updated.") - else: - setMsg("`delete protection` already has the right value.") - return True - - def setBootOrder(self, vmname, boot_order): - self.__get_conn() - VM = self.conn.get_VM(vmname) - bootorder = [] - for boot_dev in VM.os.get_boot(): - bootorder.append(str(boot_dev.dev)) - - if boot_order != bootorder: - self.conn.set_BootOrder(vmname, boot_order) - setMsg('The boot order has been set') - else: - setMsg('The boot order has already been set') - return True - - def removeVM(self, vmname): - self.__get_conn() - self.setPower(vmname, "down", 300) - return self.conn.remove_VM(vmname) - - def setPower(self, vmname, state, timeout): - self.__get_conn() - VM = self.conn.get_VM(vmname) - if VM is None: - setMsg("VM does not exist.") - setFailed() - return False - - if state == VM.status.state: - setMsg("VM state was already " + state) - else: - if state == "up": - setMsg("VM is going to start") - self.conn.start_VM(vmname, timeout) - setChanged() - elif state == "down": - setMsg("VM is going to stop") - self.conn.stop_VM(vmname, timeout) - setChanged() - elif state == "restarted": - self.setPower(vmname, "down", timeout) - checkFail() - self.setPower(vmname, "up", timeout) - checkFail() - setMsg("the vm state is set to " + state) - return True - - def setCD(self, vmname, cd_drive): - self.__get_conn() - if cd_drive: - return self.conn.set_CD(vmname, cd_drive) - else: - return self.conn.remove_CD(vmname) - - def setVMHost(self, vmname, vmhost): - self.__get_conn() - return self.conn.set_VM_Host(vmname, vmhost) - - def setHost(self, hostname, cluster, ifaces): - self.__get_conn() - return self.conn.set_Host(hostname, cluster, ifaces) - - -def checkFail(): - if failed: - module.fail_json(msg=msg) - else: - return True - - -def setFailed(): - global failed - failed = True - - -def setChanged(): - global changed - changed = True - - -def setMsg(message): - global failed - msg.append(message) - - -def core(module): - - r = RHEV(module) - - state = module.params.get('state') - - if state == 'ping': - r.test() - return RHEV_SUCCESS, {"ping": "pong"} - elif state == 'info': - name = module.params.get('name') - if not name: - setMsg("`name` is a required argument.") - return RHEV_FAILED, msg - vminfo = r.getVM(name) - return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} - elif state == 'present': - created = False - name = module.params.get('name') - if not name: - setMsg("`name` is a required argument.") - return RHEV_FAILED, msg - actiontype = module.params.get('type') - if actiontype == 'server' or actiontype == 'desktop': - vminfo = r.getVM(name) - if vminfo: - setMsg('VM exists') - else: - # Create VM - cluster = module.params.get('cluster') - if cluster is None: - setMsg("cluster is a required argument.") - setFailed() - template = module.params.get('image') - if template: - disks = module.params.get('disks') - if disks is None: - setMsg("disks is a required argument.") - setFailed() - checkFail() - if r.createVMimage(name, cluster, template, disks) is False: - return RHEV_FAILED, vminfo - else: - os = module.params.get('osver') - if os is None: - setMsg("osver is a required argument.") - setFailed() - checkFail() - if r.createVM(name, cluster, os, actiontype) is False: - return RHEV_FAILED, vminfo - created = True - - # Set MEMORY and MEMORY POLICY - vminfo = r.getVM(name) - memory = module.params.get('vmmem') - if memory is not None: - memory_policy = module.params.get('mempol') - if memory_policy == 0: - memory_policy = memory - mem_pol_nok = True - if int(vminfo['mem_pol']) == memory_policy: - setMsg("Memory is correct") - mem_pol_nok = False - - mem_nok = True - if int(vminfo['memory']) == memory: - setMsg("Memory is correct") - mem_nok = False - - if memory_policy > memory: - setMsg('memory_policy cannot have a higher value than memory.') - return RHEV_FAILED, msg - - if mem_nok and mem_pol_nok: - if memory_policy > int(vminfo['memory']): - r.setMemory(vminfo['name'], memory) - r.setMemoryPolicy(vminfo['name'], memory_policy) - else: - r.setMemoryPolicy(vminfo['name'], memory_policy) - r.setMemory(vminfo['name'], memory) - elif mem_nok: - r.setMemory(vminfo['name'], memory) - elif mem_pol_nok: - r.setMemoryPolicy(vminfo['name'], memory_policy) - checkFail() - - # Set CPU - cpu = module.params.get('vmcpu') - if int(vminfo['cpu_cores']) == cpu: - setMsg("Number of CPUs is correct") - else: - if r.setCPU(vminfo['name'], cpu) is False: - return RHEV_FAILED, msg - - # Set CPU SHARE - cpu_share = module.params.get('cpu_share') - if cpu_share is not None: - if int(vminfo['cpu_shares']) == cpu_share: - setMsg("CPU share is correct.") - else: - if r.setCPUShare(vminfo['name'], cpu_share) is False: - return RHEV_FAILED, msg - - # Set DISKS - disks = module.params.get('disks') - if disks is not None: - if r.setDisks(vminfo['name'], disks) is False: - return RHEV_FAILED, msg - - # Set NETWORKS - ifaces = module.params.get('ifaces', None) - if ifaces is not None: - if r.setNetworks(vminfo['name'], ifaces) is False: - return RHEV_FAILED, msg - - # Set Delete Protection - del_prot = module.params.get('del_prot') - if r.setDeleteProtection(vminfo['name'], del_prot) is False: - return RHEV_FAILED, msg - - # Set Boot Order - boot_order = module.params.get('boot_order') - if r.setBootOrder(vminfo['name'], boot_order) is False: - return RHEV_FAILED, msg - - # Set VM Host - vmhost = module.params.get('vmhost') - if vmhost: - if r.setVMHost(vminfo['name'], vmhost) is False: - return RHEV_FAILED, msg - - vminfo = r.getVM(name) - vminfo['created'] = created - return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} - - if actiontype == 'host': - cluster = module.params.get('cluster') - if cluster is None: - setMsg("cluster is a required argument.") - setFailed() - ifaces = module.params.get('ifaces') - if ifaces is None: - setMsg("ifaces is a required argument.") - setFailed() - if r.setHost(name, cluster, ifaces) is False: - return RHEV_FAILED, msg - return RHEV_SUCCESS, {'changed': changed, 'msg': msg} - - elif state == 'absent': - name = module.params.get('name') - if not name: - setMsg("`name` is a required argument.") - return RHEV_FAILED, msg - actiontype = module.params.get('type') - if actiontype == 'server' or actiontype == 'desktop': - vminfo = r.getVM(name) - if vminfo: - setMsg('VM exists') - - # Set Delete Protection - del_prot = module.params.get('del_prot') - if r.setDeleteProtection(vminfo['name'], del_prot) is False: - return RHEV_FAILED, msg - - # Remove VM - if r.removeVM(vminfo['name']) is False: - return RHEV_FAILED, msg - setMsg('VM has been removed.') - vminfo['state'] = 'DELETED' - else: - setMsg('VM was already removed.') - return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} - - elif state == 'up' or state == 'down' or state == 'restarted': - name = module.params.get('name') - if not name: - setMsg("`name` is a required argument.") - return RHEV_FAILED, msg - timeout = module.params.get('timeout') - if r.setPower(name, state, timeout) is False: - return RHEV_FAILED, msg - vminfo = r.getVM(name) - return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} - - elif state == 'cd': - name = module.params.get('name') - cd_drive = module.params.get('cd_drive') - if r.setCD(name, cd_drive) is False: - return RHEV_FAILED, msg - return RHEV_SUCCESS, {'changed': changed, 'msg': msg} - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'cd', 'down', 'info', 'ping', 'present', 'restarted', 'up']), - user=dict(type='str', default='admin@internal'), - password=dict(type='str', required=True, no_log=True), - server=dict(type='str', default='127.0.0.1'), - port=dict(type='int', default=443), - insecure_api=dict(type='bool', default=False), - name=dict(type='str'), - image=dict(type='str'), - datacenter=dict(type='str', default="Default"), - type=dict(type='str', default='server', choices=['desktop', 'host', 'server']), - cluster=dict(type='str', default=''), - vmhost=dict(type='str'), - vmcpu=dict(type='int', default=2), - vmmem=dict(type='int', default=1), - disks=dict(type='list', elements='str'), - osver=dict(type='str', default="rhel_6x64"), - ifaces=dict(type='list', elements='str', aliases=['interfaces', 'nics']), - timeout=dict(type='int'), - mempol=dict(type='int', default=1), - vm_ha=dict(type='bool', default=True), - cpu_share=dict(type='int', default=0), - boot_order=dict(type='list', elements='str', default=['hd', 'network']), - del_prot=dict(type='bool', default=True), - cd_drive=dict(type='str'), - ), - ) - - if not HAS_SDK: - module.fail_json(msg="The 'ovirtsdk' module is not importable. Check the requirements.") - - rc = RHEV_SUCCESS - try: - rc, result = core(module) - except Exception as e: - module.fail_json(msg=str(e)) - - if rc != 0: # something went wrong emit the msg - module.fail_json(rc=rc, msg=result) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/serverless.py b/plugins/modules/cloud/misc/serverless.py deleted file mode 100644 index 878621c38c..0000000000 --- a/plugins/modules/cloud/misc/serverless.py +++ /dev/null @@ -1,224 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Ryan Scott Brown -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: serverless -short_description: Manages a Serverless Framework project -description: - - Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks. -options: - state: - description: - - Goal state of given stage/project. - type: str - choices: [ absent, present ] - default: present - serverless_bin_path: - description: - - The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless - type: path - service_path: - description: - - The path to the root of the Serverless Service to be operated on. - type: path - required: true - stage: - description: - - The name of the serverless framework project stage to deploy to. - - This uses the serverless framework default "dev". - type: str - functions: - description: - - A list of specific functions to deploy. - - If this is not provided, all functions in the service will be deployed. - - Deprecated parameter, it will be removed in community.general 5.0.0. - type: list - elements: str - default: [] - region: - description: - - AWS region to deploy the service to. - - This parameter defaults to C(us-east-1). - type: str - deploy: - description: - - Whether or not to deploy artifacts after building them. - - When this option is C(false) all the functions will be built, but no stack update will be run to send them out. - - This is mostly useful for generating artifacts to be stored/deployed elsewhere. - type: bool - default: yes - force: - description: - - Whether or not to force full deployment, equivalent to serverless C(--force) option. - type: bool - default: no - verbose: - description: - - Shows all stack events during deployment, and display any Stack Output. - type: bool - default: no -notes: - - Currently, the C(serverless) command must be in the path of the node executing the task. - In the future this may be a flag. -requirements: -- serverless -- yaml -author: -- Ryan Scott Brown (@ryansb) -''' - -EXAMPLES = r''' -- name: Basic deploy of a service - community.general.serverless: - service_path: '{{ project_dir }}' - state: present - -- name: Deploy a project, then pull its resource list back into Ansible - community.general.serverless: - stage: dev - region: us-east-1 - service_path: '{{ project_dir }}' - register: sls - -# The cloudformation stack is always named the same as the full service, so the -# cloudformation_info module can get a full list of the stack resources, as -# well as stack events and outputs -- cloudformation_info: - region: us-east-1 - stack_name: '{{ sls.service_name }}' - stack_resources: true - -- name: Deploy a project using a locally installed serverless binary - community.general.serverless: - stage: dev - region: us-east-1 - service_path: '{{ project_dir }}' - serverless_bin_path: node_modules/.bin/serverless -''' - -RETURN = r''' -service_name: - type: str - description: The service name specified in the serverless.yml that was just deployed. - returned: always - sample: my-fancy-service-dev -state: - type: str - description: Whether the stack for the serverless project is present/absent. - returned: always -command: - type: str - description: Full `serverless` command run by this module, in case you want to re-run the command outside the module. - returned: always - sample: serverless deploy --stage production -''' - -import os - -try: - import yaml - HAS_YAML = True -except ImportError: - HAS_YAML = False - -from ansible.module_utils.basic import AnsibleModule - - -def read_serverless_config(module): - path = module.params.get('service_path') - full_path = os.path.join(path, 'serverless.yml') - - try: - with open(full_path) as sls_config: - config = yaml.safe_load(sls_config.read()) - return config - except IOError as e: - module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(full_path, str(e))) - - -def get_service_name(module, stage): - config = read_serverless_config(module) - if config.get('service') is None: - module.fail_json(msg="Could not read `service` key from serverless.yml file") - - if stage: - return "{0}-{1}".format(config['service'], stage) - - return "{0}-{1}".format(config['service'], config.get('stage', 'dev')) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - service_path=dict(type='path', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - functions=dict(type='list', elements='str', - removed_in_version="5.0.0", removed_from_collection="community.general"), - region=dict(type='str', default=''), - stage=dict(type='str', default=''), - deploy=dict(type='bool', default=True), - serverless_bin_path=dict(type='path'), - force=dict(type='bool', default=False), - verbose=dict(type='bool', default=False), - ), - ) - - if not HAS_YAML: - module.fail_json(msg='yaml is required for this module') - - service_path = module.params.get('service_path') - state = module.params.get('state') - region = module.params.get('region') - stage = module.params.get('stage') - deploy = module.params.get('deploy', True) - force = module.params.get('force', False) - verbose = module.params.get('verbose', False) - serverless_bin_path = module.params.get('serverless_bin_path') - - if serverless_bin_path is not None: - command = serverless_bin_path + " " - else: - command = module.get_bin_path("serverless") + " " - - if state == 'present': - command += 'deploy ' - elif state == 'absent': - command += 'remove ' - else: - module.fail_json(msg="State must either be 'present' or 'absent'. Received: {0}".format(state)) - - if state == 'present': - if not deploy: - command += '--noDeploy ' - elif force: - command += '--force ' - - if region: - command += '--region {0} '.format(region) - if stage: - command += '--stage {0} '.format(stage) - if verbose: - command += '--verbose ' - - rc, out, err = module.run_command(command, cwd=service_path) - if rc != 0: - if state == 'absent' and "-{0}' does not exist".format(stage) in out: - module.exit_json(changed=False, state='absent', command=command, - out=out, service_name=get_service_name(module, stage)) - - module.fail_json(msg="Failure when executing Serverless command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err)) - - # gather some facts about the deployment - module.exit_json(changed=True, state='present', out=out, command=command, - service_name=get_service_name(module, stage)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/terraform.py b/plugins/modules/cloud/misc/terraform.py deleted file mode 100644 index 5e3b952c0c..0000000000 --- a/plugins/modules/cloud/misc/terraform.py +++ /dev/null @@ -1,506 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2017, Ryan Scott Brown -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: terraform -short_description: Manages a Terraform deployment (and plans) -description: - - Provides support for deploying resources with Terraform and pulling - resource information back into Ansible. -options: - state: - choices: ['planned', 'present', 'absent'] - description: - - Goal state of given stage/project - type: str - default: present - binary_path: - description: - - The path of a terraform binary to use, relative to the 'service_path' - unless you supply an absolute path. - type: path - project_path: - description: - - The path to the root of the Terraform directory with the - vars.tf/main.tf/etc to use. - type: path - required: true - plugin_paths: - description: - - List of paths containing Terraform plugin executable files. - - Plugin executables can be downloaded from U(https://releases.hashicorp.com/). - - When set, the plugin discovery and auto-download behavior of Terraform is disabled. - - The directory structure in the plugin path can be tricky. The Terraform docs - U(https://learn.hashicorp.com/tutorials/terraform/automate-terraform#pre-installed-plugins) - show a simple directory of files, but actually, the directory structure - has to follow the same structure you would see if Terraform auto-downloaded the plugins. - See the examples below for a tree output of an example plugin directory. - type: list - elements: path - version_added: 3.0.0 - workspace: - description: - - The terraform workspace to work with. - type: str - default: default - purge_workspace: - description: - - Only works with state = absent - - If true, the workspace will be deleted after the "terraform destroy" action. - - The 'default' workspace will not be deleted. - default: false - type: bool - plan_file: - description: - - The path to an existing Terraform plan file to apply. If this is not - specified, Ansible will build a new TF plan and execute it. - Note that this option is required if 'state' has the 'planned' value. - type: path - state_file: - description: - - The path to an existing Terraform state file to use when building plan. - If this is not specified, the default `terraform.tfstate` will be used. - - This option is ignored when plan is specified. - type: path - variables_files: - description: - - The path to a variables file for Terraform to fill into the TF - configurations. This can accept a list of paths to multiple variables files. - - Up until Ansible 2.9, this option was usable as I(variables_file). - type: list - elements: path - aliases: [ 'variables_file' ] - variables: - description: - - A group of key-values to override template variables or those in - variables files. - type: dict - targets: - description: - - A list of specific resources to target in this plan/application. The - resources selected here will also auto-include any dependencies. - type: list - elements: str - lock: - description: - - Enable statefile locking, if you use a service that accepts locks (such - as S3+DynamoDB) to store your statefile. - type: bool - default: true - lock_timeout: - description: - - How long to maintain the lock on the statefile, if you use a service - that accepts locks (such as S3+DynamoDB). - type: int - force_init: - description: - - To avoid duplicating infra, if a state file can't be found this will - force a `terraform init`. Generally, this should be turned off unless - you intend to provision an entirely new Terraform deployment. - default: false - type: bool - overwrite_init: - description: - - Run init even if C(.terraform/terraform.tfstate) already exists in I(project_path). - default: true - type: bool - version_added: '3.2.0' - backend_config: - description: - - A group of key-values to provide at init stage to the -backend-config parameter. - type: dict - backend_config_files: - description: - - The path to a configuration file to provide at init state to the -backend-config parameter. - This can accept a list of paths to multiple configuration files. - type: list - elements: path - version_added: '0.2.0' - init_reconfigure: - description: - - Forces backend reconfiguration during init. - default: false - type: bool - version_added: '1.3.0' - check_destroy: - description: - - Apply only when no resources are destroyed. Note that this only prevents "destroy" actions, - but not "destroy and re-create" actions. This option is ignored when I(state=absent). - type: bool - default: false - version_added: '3.3.0' - parallelism: - description: - - Restrict concurrent operations when Terraform applies the plan. - type: int - version_added: '3.8.0' -notes: - - To just run a `terraform plan`, use check mode. -requirements: [ "terraform" ] -author: "Ryan Scott Brown (@ryansb)" -''' - -EXAMPLES = """ -- name: Basic deploy of a service - community.general.terraform: - project_path: '{{ project_dir }}' - state: present - -- name: Define the backend configuration at init - community.general.terraform: - project_path: 'project/' - state: "{{ state }}" - force_init: true - backend_config: - region: "eu-west-1" - bucket: "some-bucket" - key: "random.tfstate" - -- name: Define the backend configuration with one or more files at init - community.general.terraform: - project_path: 'project/' - state: "{{ state }}" - force_init: true - backend_config_files: - - /path/to/backend_config_file_1 - - /path/to/backend_config_file_2 - -- name: Disable plugin discovery and auto-download by setting plugin_paths - community.general.terraform: - project_path: 'project/' - state: "{{ state }}" - force_init: true - plugin_paths: - - /path/to/plugins_dir_1 - - /path/to/plugins_dir_2 - -### Example directory structure for plugin_paths example -# $ tree /path/to/plugins_dir_1 -# /path/to/plugins_dir_1/ -# └── registry.terraform.io -# └── hashicorp -# └── vsphere -# ├── 1.24.0 -# │ └── linux_amd64 -# │ └── terraform-provider-vsphere_v1.24.0_x4 -# └── 1.26.0 -# └── linux_amd64 -# └── terraform-provider-vsphere_v1.26.0_x4 -""" - -RETURN = """ -outputs: - type: complex - description: A dictionary of all the TF outputs by their assigned name. Use `.outputs.MyOutputName.value` to access the value. - returned: on success - sample: '{"bukkit_arn": {"sensitive": false, "type": "string", "value": "arn:aws:s3:::tf-test-bukkit"}' - contains: - sensitive: - type: bool - returned: always - description: Whether Terraform has marked this value as sensitive - type: - type: str - returned: always - description: The type of the value (string, int, etc) - value: - type: str - returned: always - description: The value of the output as interpolated by Terraform -stdout: - type: str - description: Full `terraform` command stdout, in case you want to display it or examine the event log - returned: always - sample: '' -command: - type: str - description: Full `terraform` command built by this module, in case you want to re-run the command outside the module or debug a problem. - returned: always - sample: terraform apply ... -""" - -import os -import json -import tempfile -from distutils.version import LooseVersion -from ansible.module_utils.six.moves import shlex_quote - -from ansible.module_utils.basic import AnsibleModule - -module = None - - -def get_version(bin_path): - extract_version = module.run_command([bin_path, 'version', '-json']) - terraform_version = (json.loads(extract_version[1]))['terraform_version'] - return terraform_version - - -def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None): - if project_path is None or '/' not in project_path: - module.fail_json(msg="Path for Terraform project can not be None or ''.") - if not os.path.exists(bin_path): - module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path)) - if not os.path.isdir(project_path): - module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path)) - if LooseVersion(version) < LooseVersion('0.15.0'): - rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, check_rc=True, cwd=project_path) - else: - rc, out, err = module.run_command([bin_path, 'validate'], check_rc=True, cwd=project_path) - - -def _state_args(state_file): - if state_file and os.path.exists(state_file): - return ['-state', state_file] - if state_file and not os.path.exists(state_file): - module.fail_json(msg='Could not find state_file "{0}", check the path and try again.'.format(state_file)) - return [] - - -def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, plugin_paths): - command = [bin_path, 'init', '-input=false'] - if backend_config: - for key, val in backend_config.items(): - command.extend([ - '-backend-config', - shlex_quote('{0}={1}'.format(key, val)) - ]) - if backend_config_files: - for f in backend_config_files: - command.extend(['-backend-config', f]) - if init_reconfigure: - command.extend(['-reconfigure']) - if plugin_paths: - for plugin_path in plugin_paths: - command.extend(['-plugin-dir', plugin_path]) - rc, out, err = module.run_command(command, check_rc=True, cwd=project_path) - - -def get_workspace_context(bin_path, project_path): - workspace_ctx = {"current": "default", "all": []} - command = [bin_path, 'workspace', 'list', '-no-color'] - rc, out, err = module.run_command(command, cwd=project_path) - if rc != 0: - module.warn("Failed to list Terraform workspaces:\r\n{0}".format(err)) - for item in out.split('\n'): - stripped_item = item.strip() - if not stripped_item: - continue - elif stripped_item.startswith('* '): - workspace_ctx["current"] = stripped_item.replace('* ', '') - else: - workspace_ctx["all"].append(stripped_item) - return workspace_ctx - - -def _workspace_cmd(bin_path, project_path, action, workspace): - command = [bin_path, 'workspace', action, workspace, '-no-color'] - rc, out, err = module.run_command(command, check_rc=True, cwd=project_path) - return rc, out, err - - -def create_workspace(bin_path, project_path, workspace): - _workspace_cmd(bin_path, project_path, 'new', workspace) - - -def select_workspace(bin_path, project_path, workspace): - _workspace_cmd(bin_path, project_path, 'select', workspace) - - -def remove_workspace(bin_path, project_path, workspace): - _workspace_cmd(bin_path, project_path, 'delete', workspace) - - -def build_plan(command, project_path, variables_args, state_file, targets, state, plan_path=None): - if plan_path is None: - f, plan_path = tempfile.mkstemp(suffix='.tfplan') - - plan_command = [command[0], 'plan', '-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path] - - for t in targets: - plan_command.extend(['-target', t]) - - plan_command.extend(_state_args(state_file)) - - rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path) - - if rc == 0: - # no changes - return plan_path, False, out, err, plan_command if state == 'planned' else command - elif rc == 1: - # failure to plan - module.fail_json(msg='Terraform plan could not be created\r\nSTDOUT: {0}\r\n\r\nSTDERR: {1}'.format(out, err)) - elif rc == 2: - # changes, but successful - return plan_path, True, out, err, plan_command if state == 'planned' else command - - module.fail_json(msg='Terraform plan failed with unexpected exit code {0}. \r\nSTDOUT: {1}\r\n\r\nSTDERR: {2}'.format(rc, out, err)) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - project_path=dict(required=True, type='path'), - binary_path=dict(type='path'), - plugin_paths=dict(type='list', elements='path'), - workspace=dict(type='str', default='default'), - purge_workspace=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent', 'planned']), - variables=dict(type='dict'), - variables_files=dict(aliases=['variables_file'], type='list', elements='path'), - plan_file=dict(type='path'), - state_file=dict(type='path'), - targets=dict(type='list', elements='str', default=[]), - lock=dict(type='bool', default=True), - lock_timeout=dict(type='int',), - force_init=dict(type='bool', default=False), - backend_config=dict(type='dict'), - backend_config_files=dict(type='list', elements='path'), - init_reconfigure=dict(type='bool', default=False), - overwrite_init=dict(type='bool', default=True), - check_destroy=dict(type='bool', default=False), - parallelism=dict(type='int'), - ), - required_if=[('state', 'planned', ['plan_file'])], - supports_check_mode=True, - ) - - project_path = module.params.get('project_path') - bin_path = module.params.get('binary_path') - plugin_paths = module.params.get('plugin_paths') - workspace = module.params.get('workspace') - purge_workspace = module.params.get('purge_workspace') - state = module.params.get('state') - variables = module.params.get('variables') or {} - variables_files = module.params.get('variables_files') - plan_file = module.params.get('plan_file') - state_file = module.params.get('state_file') - force_init = module.params.get('force_init') - backend_config = module.params.get('backend_config') - backend_config_files = module.params.get('backend_config_files') - init_reconfigure = module.params.get('init_reconfigure') - overwrite_init = module.params.get('overwrite_init') - check_destroy = module.params.get('check_destroy') - - if bin_path is not None: - command = [bin_path] - else: - command = [module.get_bin_path('terraform', required=True)] - - checked_version = get_version(command[0]) - - if LooseVersion(checked_version) < LooseVersion('0.15.0'): - DESTROY_ARGS = ('destroy', '-no-color', '-force') - APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true') - else: - DESTROY_ARGS = ('destroy', '-no-color', '-auto-approve') - APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve') - - if force_init: - if overwrite_init or not os.path.isfile(os.path.join(project_path, ".terraform", "terraform.tfstate")): - init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, plugin_paths) - - workspace_ctx = get_workspace_context(command[0], project_path) - if workspace_ctx["current"] != workspace: - if workspace not in workspace_ctx["all"]: - create_workspace(command[0], project_path, workspace) - else: - select_workspace(command[0], project_path, workspace) - - if state == 'present': - command.extend(APPLY_ARGS) - elif state == 'absent': - command.extend(DESTROY_ARGS) - - if state == 'present' and module.params.get('parallelism') is not None: - command.append('-parallelism=%d' % module.params.get('parallelism')) - - variables_args = [] - for k, v in variables.items(): - variables_args.extend([ - '-var', - '{0}={1}'.format(k, v) - ]) - if variables_files: - for f in variables_files: - variables_args.extend(['-var-file', f]) - - preflight_validation(command[0], project_path, checked_version, variables_args) - - if module.params.get('lock') is not None: - if module.params.get('lock'): - command.append('-lock=true') - else: - command.append('-lock=false') - if module.params.get('lock_timeout') is not None: - command.append('-lock-timeout=%ds' % module.params.get('lock_timeout')) - - for t in (module.params.get('targets') or []): - command.extend(['-target', t]) - - # we aren't sure if this plan will result in changes, so assume yes - needs_application, changed = True, False - - out, err = '', '' - - if state == 'absent': - command.extend(variables_args) - elif state == 'present' and plan_file: - if any([os.path.isfile(project_path + "/" + plan_file), os.path.isfile(plan_file)]): - command.append(plan_file) - else: - module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file)) - else: - plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file, - module.params.get('targets'), state, plan_file) - if state == 'present' and check_destroy and '- destroy' in out: - module.fail_json(msg="Aborting command because it would destroy some resources. " - "Consider switching the 'check_destroy' to false to suppress this error") - command.append(plan_file) - - if needs_application and not module.check_mode and state != 'planned': - rc, out, err = module.run_command(command, check_rc=False, cwd=project_path) - if rc != 0: - if workspace_ctx["current"] != workspace: - select_workspace(command[0], project_path, workspace_ctx["current"]) - module.fail_json(msg=err.rstrip(), rc=rc, stdout=out, - stdout_lines=out.splitlines(), stderr=err, - stderr_lines=err.splitlines(), - cmd=' '.join(command)) - # checks out to decide if changes were made during execution - if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out: - changed = True - - outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file) - rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path) - if rc == 1: - module.warn("Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}".format(outputs_text, outputs_err)) - outputs = {} - elif rc != 0: - module.fail_json( - msg="Failure when getting Terraform outputs. " - "Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, outputs_text, outputs_err), - command=' '.join(outputs_command)) - else: - outputs = json.loads(outputs_text) - - # Restore the Terraform workspace found when running the module - if workspace_ctx["current"] != workspace: - select_workspace(command[0], project_path, workspace_ctx["current"]) - if state == 'absent' and workspace != 'default' and purge_workspace is True: - remove_workspace(command[0], project_path, workspace) - - module.exit_json(changed=changed, state=state, workspace=workspace, outputs=outputs, stdout=out, stderr=err, command=' '.join(command)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/xenserver_facts.py b/plugins/modules/cloud/misc/xenserver_facts.py deleted file mode 100644 index f65e3c9a86..0000000000 --- a/plugins/modules/cloud/misc/xenserver_facts.py +++ /dev/null @@ -1,205 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: xenserver_facts -short_description: get facts reported on xenserver -description: - - Reads data out of XenAPI, can be used instead of multiple xe commands. -author: - - Andy Hill (@andyhky) - - Tim Rupp (@caphrim007) - - Robin Lee (@cheese) -options: {} -''' - -EXAMPLES = ''' -- name: Gather facts from xenserver - community.general.xenserver_facts: - -- name: Print running VMs - ansible.builtin.debug: - msg: "{{ item }}" - with_items: "{{ xs_vms.keys() }}" - when: xs_vms[item]['power_state'] == "Running" - -# Which will print: -# -# TASK: [Print running VMs] *********************************************************** -# skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit)) -# ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => { -# "item": "Control domain on host: 10.0.13.22", -# "msg": "Control domain on host: 10.0.13.22" -# } -''' - - -HAVE_XENAPI = False -try: - import XenAPI - HAVE_XENAPI = True -except ImportError: - pass - -from ansible.module_utils import distro -from ansible.module_utils.basic import AnsibleModule - - -class XenServerFacts: - def __init__(self): - self.codes = { - '5.5.0': 'george', - '5.6.100': 'oxford', - '6.0.0': 'boston', - '6.1.0': 'tampa', - '6.2.0': 'clearwater' - } - - @property - def version(self): - result = distro.linux_distribution()[1] - return result - - @property - def codename(self): - if self.version in self.codes: - result = self.codes[self.version] - else: - result = None - - return result - - -def get_xenapi_session(): - session = XenAPI.xapi_local() - session.xenapi.login_with_password('', '') - return session - - -def get_networks(session): - recs = session.xenapi.network.get_all_records() - networks = change_keys(recs, key='name_label') - return networks - - -def get_pifs(session): - recs = session.xenapi.PIF.get_all_records() - pifs = change_keys(recs, key='uuid') - xs_pifs = {} - devicenums = range(0, 7) - for pif in pifs.values(): - for eth in devicenums: - interface_name = "eth%s" % (eth) - bond_name = interface_name.replace('eth', 'bond') - if pif['device'] == interface_name: - xs_pifs[interface_name] = pif - elif pif['device'] == bond_name: - xs_pifs[bond_name] = pif - return xs_pifs - - -def get_vlans(session): - recs = session.xenapi.VLAN.get_all_records() - return change_keys(recs, key='tag') - - -def change_keys(recs, key='uuid', filter_func=None): - """ - Take a xapi dict, and make the keys the value of recs[ref][key]. - - Preserves the ref in rec['ref'] - - """ - new_recs = {} - - for ref, rec in recs.items(): - if filter_func is not None and not filter_func(rec): - continue - - for param_name, param_value in rec.items(): - # param_value may be of type xmlrpc.client.DateTime, - # which is not simply convertable to str. - # Use 'value' attr to get the str value, - # following an example in xmlrpc.client.DateTime document - if hasattr(param_value, "value"): - rec[param_name] = param_value.value - new_recs[rec[key]] = rec - new_recs[rec[key]]['ref'] = ref - - return new_recs - - -def get_host(session): - """Get the host""" - host_recs = session.xenapi.host.get_all() - # We only have one host, so just return its entry - return session.xenapi.host.get_record(host_recs[0]) - - -def get_vms(session): - recs = session.xenapi.VM.get_all_records() - if not recs: - return None - vms = change_keys(recs, key='name_label') - return vms - - -def get_srs(session): - recs = session.xenapi.SR.get_all_records() - if not recs: - return None - srs = change_keys(recs, key='name_label') - return srs - - -def main(): - module = AnsibleModule( - supports_check_mode=True, - ) - - if not HAVE_XENAPI: - module.fail_json(changed=False, msg="python xen api required for this module") - - obj = XenServerFacts() - try: - session = get_xenapi_session() - except XenAPI.Failure as e: - module.fail_json(msg='%s' % e) - - data = { - 'xenserver_version': obj.version, - 'xenserver_codename': obj.codename - } - - xs_networks = get_networks(session) - xs_pifs = get_pifs(session) - xs_vlans = get_vlans(session) - xs_vms = get_vms(session) - xs_srs = get_srs(session) - - if xs_vlans: - data['xs_vlans'] = xs_vlans - if xs_pifs: - data['xs_pifs'] = xs_pifs - if xs_networks: - data['xs_networks'] = xs_networks - - if xs_vms: - data['xs_vms'] = xs_vms - - if xs_srs: - data['xs_srs'] = xs_srs - - module.exit_json(ansible_facts=data) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py b/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py deleted file mode 100644 index d46ce38897..0000000000 --- a/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py +++ /dev/null @@ -1,579 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneandone_firewall_policy -short_description: Configure 1&1 firewall policy. -description: - - Create, remove, reconfigure, update firewall policies. - This module has a dependency on 1and1 >= 1.0 -options: - state: - description: - - Define a firewall policy state to create, remove, or update. - required: false - type: str - default: 'present' - choices: [ "present", "absent", "update" ] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - api_url: - description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. - type: str - required: false - name: - description: - - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state. - maxLength=128 - type: str - firewall_policy: - description: - - The identifier (id or name) of the firewall policy used with update state. - type: str - rules: - description: - - A list of rules that will be set for the firewall policy. - Each rule must contain protocol parameter, in addition to three optional parameters - (port_from, port_to, and source) - type: list - elements: dict - add_server_ips: - description: - - A list of server identifiers (id or name) to be assigned to a firewall policy. - Used in combination with update state. - type: list - elements: str - required: false - remove_server_ips: - description: - - A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state. - type: list - elements: str - required: false - add_rules: - description: - - A list of rules that will be added to an existing firewall policy. - It is syntax is the same as the one used for rules parameter. Used in combination with update state. - type: list - elements: dict - required: false - remove_rules: - description: - - A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state. - type: list - elements: str - required: false - description: - description: - - Firewall policy description. maxLength=256 - type: str - required: false - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods - type: int - default: 5 - -requirements: - - "1and1" - - "python >= 2.6" - -author: - - "Amel Ajdinovic (@aajdinov)" - - "Ethan Devenport (@edevenport)" -''' - -EXAMPLES = ''' -- name: Create a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - name: ansible-firewall-policy - description: Testing creation of firewall policies with ansible - rules: - - - protocol: TCP - port_from: 80 - port_to: 80 - source: 0.0.0.0 - wait: true - wait_timeout: 500 - -- name: Destroy a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - state: absent - name: ansible-firewall-policy - -- name: Update a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - state: update - firewall_policy: ansible-firewall-policy - name: ansible-firewall-policy-updated - description: Testing creation of firewall policies with ansible - updated - -- name: Add server to a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - firewall_policy: ansible-firewall-policy-updated - add_server_ips: - - server_identifier (id or name) - - server_identifier #2 (id or name) - wait: true - wait_timeout: 500 - state: update - -- name: Remove server from a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - firewall_policy: ansible-firewall-policy-updated - remove_server_ips: - - B2504878540DBC5F7634EB00A07C1EBD (server's IP id) - wait: true - wait_timeout: 500 - state: update - -- name: Add rules to a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - firewall_policy: ansible-firewall-policy-updated - description: Adding rules to an existing firewall policy - add_rules: - - - protocol: TCP - port_from: 70 - port_to: 70 - source: 0.0.0.0 - - - protocol: TCP - port_from: 60 - port_to: 60 - source: 0.0.0.0 - wait: true - wait_timeout: 500 - state: update - -- name: Remove rules from a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - firewall_policy: ansible-firewall-policy-updated - remove_rules: - - rule_id #1 - - rule_id #2 - - ... - wait: true - wait_timeout: 500 - state: update -''' - -RETURN = ''' -firewall_policy: - description: Information about the firewall policy that was processed - type: dict - sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}' - returned: always -''' - -import os -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - get_firewall_policy, - get_server, - OneAndOneResources, - wait_for_resource_creation_completion -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json( - changed=result - ) - - -def _add_server_ips(module, oneandone_conn, firewall_id, server_ids): - """ - Assigns servers to a firewall policy. - """ - try: - attach_servers = [] - - for _server_id in server_ids: - server = get_server(oneandone_conn, _server_id, True) - attach_server = oneandone.client.AttachServer( - server_id=server['id'], - server_ip_id=next(iter(server['ips'] or []), None)['id'] - ) - attach_servers.append(attach_server) - - if module.check_mode: - if attach_servers: - return True - return False - - firewall_policy = oneandone_conn.attach_server_firewall_policy( - firewall_id=firewall_id, - server_ips=attach_servers) - return firewall_policy - except Exception as e: - module.fail_json(msg=str(e)) - - -def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id): - """ - Unassigns a server/IP from a firewall policy. - """ - try: - if module.check_mode: - firewall_server = oneandone_conn.get_firewall_server( - firewall_id=firewall_id, - server_ip_id=server_ip_id) - if firewall_server: - return True - return False - - firewall_policy = oneandone_conn.remove_firewall_server( - firewall_id=firewall_id, - server_ip_id=server_ip_id) - return firewall_policy - except Exception as e: - module.fail_json(msg=str(e)) - - -def _add_firewall_rules(module, oneandone_conn, firewall_id, rules): - """ - Adds new rules to a firewall policy. - """ - try: - firewall_rules = [] - - for rule in rules: - firewall_rule = oneandone.client.FirewallPolicyRule( - protocol=rule['protocol'], - port_from=rule['port_from'], - port_to=rule['port_to'], - source=rule['source']) - firewall_rules.append(firewall_rule) - - if module.check_mode: - firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id) - if (firewall_rules and firewall_policy_id): - return True - return False - - firewall_policy = oneandone_conn.add_firewall_policy_rule( - firewall_id=firewall_id, - firewall_policy_rules=firewall_rules - ) - return firewall_policy - except Exception as e: - module.fail_json(msg=str(e)) - - -def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id): - """ - Removes a rule from a firewall policy. - """ - try: - if module.check_mode: - rule = oneandone_conn.get_firewall_policy_rule( - firewall_id=firewall_id, - rule_id=rule_id) - if rule: - return True - return False - - firewall_policy = oneandone_conn.remove_firewall_rule( - firewall_id=firewall_id, - rule_id=rule_id - ) - return firewall_policy - except Exception as e: - module.fail_json(msg=str(e)) - - -def update_firewall_policy(module, oneandone_conn): - """ - Updates a firewall policy based on input arguments. - Firewall rules and server ips can be added/removed to/from - firewall policy. Firewall policy name and description can be - updated as well. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - firewall_policy_id = module.params.get('firewall_policy') - name = module.params.get('name') - description = module.params.get('description') - add_server_ips = module.params.get('add_server_ips') - remove_server_ips = module.params.get('remove_server_ips') - add_rules = module.params.get('add_rules') - remove_rules = module.params.get('remove_rules') - - changed = False - - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True) - if firewall_policy is None: - _check_mode(module, False) - - if name or description: - _check_mode(module, True) - firewall_policy = oneandone_conn.modify_firewall( - firewall_id=firewall_policy['id'], - name=name, - description=description) - changed = True - - if add_server_ips: - if module.check_mode: - _check_mode(module, _add_server_ips(module, - oneandone_conn, - firewall_policy['id'], - add_server_ips)) - - firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips) - changed = True - - if remove_server_ips: - chk_changed = False - for server_ip_id in remove_server_ips: - if module.check_mode: - chk_changed |= _remove_firewall_server(module, - oneandone_conn, - firewall_policy['id'], - server_ip_id) - - _remove_firewall_server(module, - oneandone_conn, - firewall_policy['id'], - server_ip_id) - _check_mode(module, chk_changed) - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) - changed = True - - if add_rules: - firewall_policy = _add_firewall_rules(module, - oneandone_conn, - firewall_policy['id'], - add_rules) - _check_mode(module, firewall_policy) - changed = True - - if remove_rules: - chk_changed = False - for rule_id in remove_rules: - if module.check_mode: - chk_changed |= _remove_firewall_rule(module, - oneandone_conn, - firewall_policy['id'], - rule_id) - - _remove_firewall_rule(module, - oneandone_conn, - firewall_policy['id'], - rule_id) - _check_mode(module, chk_changed) - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) - changed = True - - return (changed, firewall_policy) - except Exception as e: - module.fail_json(msg=str(e)) - - -def create_firewall_policy(module, oneandone_conn): - """ - Create a new firewall policy. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - name = module.params.get('name') - description = module.params.get('description') - rules = module.params.get('rules') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - firewall_rules = [] - - for rule in rules: - firewall_rule = oneandone.client.FirewallPolicyRule( - protocol=rule['protocol'], - port_from=rule['port_from'], - port_to=rule['port_to'], - source=rule['source']) - firewall_rules.append(firewall_rule) - - firewall_policy_obj = oneandone.client.FirewallPolicy( - name=name, - description=description - ) - - _check_mode(module, True) - firewall_policy = oneandone_conn.create_firewall_policy( - firewall_policy=firewall_policy_obj, - firewall_policy_rules=firewall_rules - ) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, - OneAndOneResources.firewall_policy, - firewall_policy['id'], - wait_timeout, - wait_interval) - - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) # refresh - changed = True if firewall_policy else False - - _check_mode(module, False) - - return (changed, firewall_policy) - except Exception as e: - module.fail_json(msg=str(e)) - - -def remove_firewall_policy(module, oneandone_conn): - """ - Removes a firewall policy. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - fp_id = module.params.get('name') - firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id) - if module.check_mode: - if firewall_policy_id is None: - _check_mode(module, False) - _check_mode(module, True) - firewall_policy = oneandone_conn.delete_firewall(firewall_policy_id) - - changed = True if firewall_policy else False - - return (changed, { - 'id': firewall_policy['id'], - 'name': firewall_policy['name'] - }) - except Exception as e: - module.fail_json(msg=str(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - name=dict(type='str'), - firewall_policy=dict(type='str'), - description=dict(type='str'), - rules=dict(type='list', elements="dict", default=[]), - add_server_ips=dict(type='list', elements="str", default=[]), - remove_server_ips=dict(type='list', elements="str", default=[]), - add_rules=dict(type='list', elements="dict", default=[]), - remove_rules=dict(type='list', elements="str", default=[]), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), - ), - supports_check_mode=True - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') - - if not module.params.get('auth_token'): - module.fail_json( - msg='The "auth_token" parameter or ' + - 'ONEANDONE_AUTH_TOKEN environment variable is required.') - - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required to delete a firewall policy.") - try: - (changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - elif state == 'update': - if not module.params.get('firewall_policy'): - module.fail_json( - msg="'firewall_policy' parameter is required to update a firewall policy.") - try: - (changed, firewall_policy) = update_firewall_policy(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - elif state == 'present': - for param in ('name', 'rules'): - if not module.params.get(param): - module.fail_json( - msg="%s parameter is required for new firewall policies." % param) - try: - (changed, firewall_policy) = create_firewall_policy(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - module.exit_json(changed=changed, firewall_policy=firewall_policy) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/oneandone/oneandone_load_balancer.py b/plugins/modules/cloud/oneandone/oneandone_load_balancer.py deleted file mode 100644 index 5f541a878c..0000000000 --- a/plugins/modules/cloud/oneandone/oneandone_load_balancer.py +++ /dev/null @@ -1,683 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneandone_load_balancer -short_description: Configure 1&1 load balancer. -description: - - Create, remove, update load balancers. - This module has a dependency on 1and1 >= 1.0 -options: - state: - description: - - Define a load balancer state to create, remove, or update. - type: str - required: false - default: 'present' - choices: [ "present", "absent", "update" ] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - load_balancer: - description: - - The identifier (id or name) of the load balancer used with update state. - type: str - api_url: - description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. - type: str - required: false - name: - description: - - Load balancer name used with present state. Used as identifier (id or name) when used with absent state. - maxLength=128 - type: str - health_check_test: - description: - - Type of the health check. At the moment, HTTP is not allowed. - type: str - choices: [ "NONE", "TCP", "HTTP", "ICMP" ] - health_check_interval: - description: - - Health check period in seconds. minimum=5, maximum=300, multipleOf=1 - type: str - health_check_path: - description: - - Url to call for checking. Required for HTTP health check. maxLength=1000 - type: str - required: false - health_check_parse: - description: - - Regular expression to check. Required for HTTP health check. maxLength=64 - type: str - required: false - persistence: - description: - - Persistence. - type: bool - persistence_time: - description: - - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1 - type: str - method: - description: - - Balancing procedure. - type: str - choices: [ "ROUND_ROBIN", "LEAST_CONNECTIONS" ] - datacenter: - description: - - ID or country code of the datacenter where the load balancer will be created. - - If not specified, it defaults to I(US). - type: str - choices: [ "US", "ES", "DE", "GB" ] - required: false - rules: - description: - - A list of rule objects that will be set for the load balancer. Each rule must contain protocol, - port_balancer, and port_server parameters, in addition to source parameter, which is optional. - type: list - elements: dict - description: - description: - - Description of the load balancer. maxLength=256 - type: str - required: false - add_server_ips: - description: - - A list of server identifiers (id or name) to be assigned to a load balancer. - Used in combination with update state. - type: list - elements: str - required: false - remove_server_ips: - description: - - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state. - type: list - elements: str - required: false - add_rules: - description: - - A list of rules that will be added to an existing load balancer. - It is syntax is the same as the one used for rules parameter. Used in combination with update state. - type: list - elements: dict - required: false - remove_rules: - description: - - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state. - type: list - elements: str - required: false - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods - type: int - default: 5 - -requirements: - - "1and1" - - "python >= 2.6" - -author: - - Amel Ajdinovic (@aajdinov) - - Ethan Devenport (@edevenport) -''' - -EXAMPLES = ''' -- name: Create a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - name: ansible load balancer - description: Testing creation of load balancer with ansible - health_check_test: TCP - health_check_interval: 40 - persistence: true - persistence_time: 1200 - method: ROUND_ROBIN - datacenter: US - rules: - - - protocol: TCP - port_balancer: 80 - port_server: 80 - source: 0.0.0.0 - wait: true - wait_timeout: 500 - -- name: Destroy a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - name: ansible load balancer - wait: true - wait_timeout: 500 - state: absent - -- name: Update a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer - name: ansible load balancer updated - description: Testing the update of a load balancer with ansible - wait: true - wait_timeout: 500 - state: update - -- name: Add server to a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer updated - description: Adding server to a load balancer with ansible - add_server_ips: - - server identifier (id or name) - wait: true - wait_timeout: 500 - state: update - -- name: Remove server from a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer updated - description: Removing server from a load balancer with ansible - remove_server_ips: - - B2504878540DBC5F7634EB00A07C1EBD (server's ip id) - wait: true - wait_timeout: 500 - state: update - -- name: Add rules to a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer updated - description: Adding rules to a load balancer with ansible - add_rules: - - - protocol: TCP - port_balancer: 70 - port_server: 70 - source: 0.0.0.0 - - - protocol: TCP - port_balancer: 60 - port_server: 60 - source: 0.0.0.0 - wait: true - wait_timeout: 500 - state: update - -- name: Remove rules from a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer updated - description: Adding rules to a load balancer with ansible - remove_rules: - - rule_id #1 - - rule_id #2 - - ... - wait: true - wait_timeout: 500 - state: update -''' - -RETURN = ''' -load_balancer: - description: Information about the load balancer that was processed - type: dict - sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}' - returned: always -''' - -import os -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - get_load_balancer, - get_server, - get_datacenter, - OneAndOneResources, - wait_for_resource_creation_completion -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - -DATACENTERS = ['US', 'ES', 'DE', 'GB'] -HEALTH_CHECK_TESTS = ['NONE', 'TCP', 'HTTP', 'ICMP'] -METHODS = ['ROUND_ROBIN', 'LEAST_CONNECTIONS'] - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json( - changed=result - ) - - -def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids): - """ - Assigns servers to a load balancer. - """ - try: - attach_servers = [] - - for server_id in server_ids: - server = get_server(oneandone_conn, server_id, True) - attach_server = oneandone.client.AttachServer( - server_id=server['id'], - server_ip_id=next(iter(server['ips'] or []), None)['id'] - ) - attach_servers.append(attach_server) - - if module.check_mode: - if attach_servers: - return True - return False - - load_balancer = oneandone_conn.attach_load_balancer_server( - load_balancer_id=load_balancer_id, - server_ips=attach_servers) - return load_balancer - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id): - """ - Unassigns a server/IP from a load balancer. - """ - try: - if module.check_mode: - lb_server = oneandone_conn.get_load_balancer_server( - load_balancer_id=load_balancer_id, - server_ip_id=server_ip_id) - if lb_server: - return True - return False - - load_balancer = oneandone_conn.remove_load_balancer_server( - load_balancer_id=load_balancer_id, - server_ip_id=server_ip_id) - return load_balancer - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules): - """ - Adds new rules to a load_balancer. - """ - try: - load_balancer_rules = [] - - for rule in rules: - load_balancer_rule = oneandone.client.LoadBalancerRule( - protocol=rule['protocol'], - port_balancer=rule['port_balancer'], - port_server=rule['port_server'], - source=rule['source']) - load_balancer_rules.append(load_balancer_rule) - - if module.check_mode: - lb_id = get_load_balancer(oneandone_conn, load_balancer_id) - if (load_balancer_rules and lb_id): - return True - return False - - load_balancer = oneandone_conn.add_load_balancer_rule( - load_balancer_id=load_balancer_id, - load_balancer_rules=load_balancer_rules - ) - - return load_balancer - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id): - """ - Removes a rule from a load_balancer. - """ - try: - if module.check_mode: - rule = oneandone_conn.get_load_balancer_rule( - load_balancer_id=load_balancer_id, - rule_id=rule_id) - if rule: - return True - return False - - load_balancer = oneandone_conn.remove_load_balancer_rule( - load_balancer_id=load_balancer_id, - rule_id=rule_id - ) - return load_balancer - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def update_load_balancer(module, oneandone_conn): - """ - Updates a load_balancer based on input arguments. - Load balancer rules and server ips can be added/removed to/from - load balancer. Load balancer name, description, health_check_test, - health_check_interval, persistence, persistence_time, and method - can be updated as well. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - load_balancer_id = module.params.get('load_balancer') - name = module.params.get('name') - description = module.params.get('description') - health_check_test = module.params.get('health_check_test') - health_check_interval = module.params.get('health_check_interval') - health_check_path = module.params.get('health_check_path') - health_check_parse = module.params.get('health_check_parse') - persistence = module.params.get('persistence') - persistence_time = module.params.get('persistence_time') - method = module.params.get('method') - add_server_ips = module.params.get('add_server_ips') - remove_server_ips = module.params.get('remove_server_ips') - add_rules = module.params.get('add_rules') - remove_rules = module.params.get('remove_rules') - - changed = False - - load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True) - if load_balancer is None: - _check_mode(module, False) - - if (name or description or health_check_test or health_check_interval or health_check_path or - health_check_parse or persistence or persistence_time or method): - _check_mode(module, True) - load_balancer = oneandone_conn.modify_load_balancer( - load_balancer_id=load_balancer['id'], - name=name, - description=description, - health_check_test=health_check_test, - health_check_interval=health_check_interval, - health_check_path=health_check_path, - health_check_parse=health_check_parse, - persistence=persistence, - persistence_time=persistence_time, - method=method) - changed = True - - if add_server_ips: - if module.check_mode: - _check_mode(module, _add_server_ips(module, - oneandone_conn, - load_balancer['id'], - add_server_ips)) - - load_balancer = _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips) - changed = True - - if remove_server_ips: - chk_changed = False - for server_ip_id in remove_server_ips: - if module.check_mode: - chk_changed |= _remove_load_balancer_server(module, - oneandone_conn, - load_balancer['id'], - server_ip_id) - - _remove_load_balancer_server(module, - oneandone_conn, - load_balancer['id'], - server_ip_id) - _check_mode(module, chk_changed) - load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) - changed = True - - if add_rules: - load_balancer = _add_load_balancer_rules(module, - oneandone_conn, - load_balancer['id'], - add_rules) - _check_mode(module, load_balancer) - changed = True - - if remove_rules: - chk_changed = False - for rule_id in remove_rules: - if module.check_mode: - chk_changed |= _remove_load_balancer_rule(module, - oneandone_conn, - load_balancer['id'], - rule_id) - - _remove_load_balancer_rule(module, - oneandone_conn, - load_balancer['id'], - rule_id) - _check_mode(module, chk_changed) - load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) - changed = True - - try: - return (changed, load_balancer) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def create_load_balancer(module, oneandone_conn): - """ - Create a new load_balancer. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - name = module.params.get('name') - description = module.params.get('description') - health_check_test = module.params.get('health_check_test') - health_check_interval = module.params.get('health_check_interval') - health_check_path = module.params.get('health_check_path') - health_check_parse = module.params.get('health_check_parse') - persistence = module.params.get('persistence') - persistence_time = module.params.get('persistence_time') - method = module.params.get('method') - datacenter = module.params.get('datacenter') - rules = module.params.get('rules') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - load_balancer_rules = [] - - datacenter_id = None - if datacenter is not None: - datacenter_id = get_datacenter(oneandone_conn, datacenter) - if datacenter_id is None: - module.fail_json( - msg='datacenter %s not found.' % datacenter) - - for rule in rules: - load_balancer_rule = oneandone.client.LoadBalancerRule( - protocol=rule['protocol'], - port_balancer=rule['port_balancer'], - port_server=rule['port_server'], - source=rule['source']) - load_balancer_rules.append(load_balancer_rule) - - _check_mode(module, True) - load_balancer_obj = oneandone.client.LoadBalancer( - health_check_path=health_check_path, - health_check_parse=health_check_parse, - name=name, - description=description, - health_check_test=health_check_test, - health_check_interval=health_check_interval, - persistence=persistence, - persistence_time=persistence_time, - method=method, - datacenter_id=datacenter_id - ) - - load_balancer = oneandone_conn.create_load_balancer( - load_balancer=load_balancer_obj, - load_balancer_rules=load_balancer_rules - ) - - if wait: - wait_for_resource_creation_completion(oneandone_conn, - OneAndOneResources.load_balancer, - load_balancer['id'], - wait_timeout, - wait_interval) - - load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) # refresh - changed = True if load_balancer else False - - _check_mode(module, False) - - return (changed, load_balancer) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def remove_load_balancer(module, oneandone_conn): - """ - Removes a load_balancer. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - lb_id = module.params.get('name') - load_balancer_id = get_load_balancer(oneandone_conn, lb_id) - if module.check_mode: - if load_balancer_id is None: - _check_mode(module, False) - _check_mode(module, True) - load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id) - - changed = True if load_balancer else False - - return (changed, { - 'id': load_balancer['id'], - 'name': load_balancer['name'] - }) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - load_balancer=dict(type='str'), - name=dict(type='str'), - description=dict(type='str'), - health_check_test=dict( - choices=HEALTH_CHECK_TESTS), - health_check_interval=dict(type='str'), - health_check_path=dict(type='str'), - health_check_parse=dict(type='str'), - persistence=dict(type='bool'), - persistence_time=dict(type='str'), - method=dict( - choices=METHODS), - datacenter=dict( - choices=DATACENTERS), - rules=dict(type='list', elements="dict", default=[]), - add_server_ips=dict(type='list', elements="str", default=[]), - remove_server_ips=dict(type='list', elements="str", default=[]), - add_rules=dict(type='list', elements="dict", default=[]), - remove_rules=dict(type='list', elements="str", default=[]), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), - ), - supports_check_mode=True - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') - - if not module.params.get('auth_token'): - module.fail_json( - msg='auth_token parameter is required.') - - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required for deleting a load balancer.") - try: - (changed, load_balancer) = remove_load_balancer(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - elif state == 'update': - if not module.params.get('load_balancer'): - module.fail_json( - msg="'load_balancer' parameter is required for updating a load balancer.") - try: - (changed, load_balancer) = update_load_balancer(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - elif state == 'present': - for param in ('name', 'health_check_test', 'health_check_interval', 'persistence', - 'persistence_time', 'method', 'rules'): - if not module.params.get(param): - module.fail_json( - msg="%s parameter is required for new load balancers." % param) - try: - (changed, load_balancer) = create_load_balancer(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - module.exit_json(changed=changed, load_balancer=load_balancer) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py b/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py deleted file mode 100644 index 28dd0d41c5..0000000000 --- a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py +++ /dev/null @@ -1,1038 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneandone_monitoring_policy -short_description: Configure 1&1 monitoring policy. -description: - - Create, remove, update monitoring policies - (and add/remove ports, processes, and servers). - This module has a dependency on 1and1 >= 1.0 -options: - state: - description: - - Define a monitoring policy's state to create, remove, update. - type: str - required: false - default: present - choices: [ "present", "absent", "update" ] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - api_url: - description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. - type: str - required: false - name: - description: - - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128 - type: str - monitoring_policy: - description: - - The identifier (id or name) of the monitoring policy used with update state. - type: str - agent: - description: - - Set true for using agent. - type: str - email: - description: - - User's email. maxLength=128 - type: str - description: - description: - - Monitoring policy description. maxLength=256 - type: str - required: false - thresholds: - description: - - Monitoring policy thresholds. Each of the suboptions have warning and critical, - which both have alert and value suboptions. Warning is used to set limits for - warning alerts, critical is used to set critical alerts. alert enables alert, - and value is used to advise when the value is exceeded. - type: list - elements: dict - suboptions: - cpu: - description: - - Consumption limits of CPU. - required: true - ram: - description: - - Consumption limits of RAM. - required: true - disk: - description: - - Consumption limits of hard disk. - required: true - internal_ping: - description: - - Response limits of internal ping. - required: true - transfer: - description: - - Consumption limits for transfer. - required: true - ports: - description: - - Array of ports that will be monitoring. - type: list - elements: dict - suboptions: - protocol: - description: - - Internet protocol. - choices: [ "TCP", "UDP" ] - required: true - port: - description: - - Port number. minimum=1, maximum=65535 - required: true - alert_if: - description: - - Case of alert. - choices: [ "RESPONDING", "NOT_RESPONDING" ] - required: true - email_notification: - description: - - Set true for sending e-mail notifications. - required: true - processes: - description: - - Array of processes that will be monitoring. - type: list - elements: dict - suboptions: - process: - description: - - Name of the process. maxLength=50 - required: true - alert_if: - description: - - Case of alert. - choices: [ "RUNNING", "NOT_RUNNING" ] - required: true - add_ports: - description: - - Ports to add to the monitoring policy. - type: list - elements: dict - required: false - add_processes: - description: - - Processes to add to the monitoring policy. - type: list - elements: dict - required: false - add_servers: - description: - - Servers to add to the monitoring policy. - type: list - elements: str - required: false - remove_ports: - description: - - Ports to remove from the monitoring policy. - type: list - elements: str - required: false - remove_processes: - description: - - Processes to remove from the monitoring policy. - type: list - elements: str - required: false - remove_servers: - description: - - Servers to remove from the monitoring policy. - type: list - elements: str - required: false - update_ports: - description: - - Ports to be updated on the monitoring policy. - type: list - elements: dict - required: false - update_processes: - description: - - Processes to be updated on the monitoring policy. - type: list - elements: dict - required: false - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods - type: int - default: 5 - -requirements: - - "1and1" - - "python >= 2.6" - -author: - - "Amel Ajdinovic (@aajdinov)" - - "Ethan Devenport (@edevenport)" -''' - -EXAMPLES = ''' -- name: Create a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - name: ansible monitoring policy - description: Testing creation of a monitoring policy with ansible - email: your@emailaddress.com - agent: true - thresholds: - - - cpu: - warning: - value: 80 - alert: false - critical: - value: 92 - alert: false - - - ram: - warning: - value: 80 - alert: false - critical: - value: 90 - alert: false - - - disk: - warning: - value: 80 - alert: false - critical: - value: 90 - alert: false - - - internal_ping: - warning: - value: 50 - alert: false - critical: - value: 100 - alert: false - - - transfer: - warning: - value: 1000 - alert: false - critical: - value: 2000 - alert: false - ports: - - - protocol: TCP - port: 22 - alert_if: RESPONDING - email_notification: false - processes: - - - process: test - alert_if: NOT_RUNNING - email_notification: false - wait: true - -- name: Destroy a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - state: absent - name: ansible monitoring policy - -- name: Update a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy - name: ansible monitoring policy updated - description: Testing creation of a monitoring policy with ansible updated - email: another@emailaddress.com - thresholds: - - - cpu: - warning: - value: 70 - alert: false - critical: - value: 90 - alert: false - - - ram: - warning: - value: 70 - alert: false - critical: - value: 80 - alert: false - - - disk: - warning: - value: 70 - alert: false - critical: - value: 80 - alert: false - - - internal_ping: - warning: - value: 60 - alert: false - critical: - value: 90 - alert: false - - - transfer: - warning: - value: 900 - alert: false - critical: - value: 1900 - alert: false - wait: true - state: update - -- name: Add a port to a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - add_ports: - - - protocol: TCP - port: 33 - alert_if: RESPONDING - email_notification: false - wait: true - state: update - -- name: Update existing ports of a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - update_ports: - - - id: existing_port_id - protocol: TCP - port: 34 - alert_if: RESPONDING - email_notification: false - - - id: existing_port_id - protocol: TCP - port: 23 - alert_if: RESPONDING - email_notification: false - wait: true - state: update - -- name: Remove a port from a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - remove_ports: - - port_id - state: update - -- name: Add a process to a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - add_processes: - - - process: test_2 - alert_if: NOT_RUNNING - email_notification: false - wait: true - state: update - -- name: Update existing processes of a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - update_processes: - - - id: process_id - process: test_1 - alert_if: NOT_RUNNING - email_notification: false - - - id: process_id - process: test_3 - alert_if: NOT_RUNNING - email_notification: false - wait: true - state: update - -- name: Remove a process from a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - remove_processes: - - process_id - wait: true - state: update - -- name: Add server to a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - add_servers: - - server id or name - wait: true - state: update - -- name: Remove server from a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - remove_servers: - - server01 - wait: true - state: update -''' - -RETURN = ''' -monitoring_policy: - description: Information about the monitoring policy that was processed - type: dict - sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}' - returned: always -''' - -import os -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - get_monitoring_policy, - get_server, - OneAndOneResources, - wait_for_resource_creation_completion -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json( - changed=result - ) - - -def _add_ports(module, oneandone_conn, monitoring_policy_id, ports): - """ - Adds new ports to a monitoring policy. - """ - try: - monitoring_policy_ports = [] - - for _port in ports: - monitoring_policy_port = oneandone.client.Port( - protocol=_port['protocol'], - port=_port['port'], - alert_if=_port['alert_if'], - email_notification=_port['email_notification'] - ) - monitoring_policy_ports.append(monitoring_policy_port) - - if module.check_mode: - if monitoring_policy_ports: - return True - return False - - monitoring_policy = oneandone_conn.add_port( - monitoring_policy_id=monitoring_policy_id, - ports=monitoring_policy_ports) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, port_id): - """ - Removes a port from a monitoring policy. - """ - try: - if module.check_mode: - monitoring_policy = oneandone_conn.delete_monitoring_policy_port( - monitoring_policy_id=monitoring_policy_id, - port_id=port_id) - if monitoring_policy: - return True - return False - - monitoring_policy = oneandone_conn.delete_monitoring_policy_port( - monitoring_policy_id=monitoring_policy_id, - port_id=port_id) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port): - """ - Modifies a monitoring policy port. - """ - try: - if module.check_mode: - cm_port = oneandone_conn.get_monitoring_policy_port( - monitoring_policy_id=monitoring_policy_id, - port_id=port_id) - if cm_port: - return True - return False - - monitoring_policy_port = oneandone.client.Port( - protocol=port['protocol'], - port=port['port'], - alert_if=port['alert_if'], - email_notification=port['email_notification'] - ) - - monitoring_policy = oneandone_conn.modify_port( - monitoring_policy_id=monitoring_policy_id, - port_id=port_id, - port=monitoring_policy_port) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _add_processes(module, oneandone_conn, monitoring_policy_id, processes): - """ - Adds new processes to a monitoring policy. - """ - try: - monitoring_policy_processes = [] - - for _process in processes: - monitoring_policy_process = oneandone.client.Process( - process=_process['process'], - alert_if=_process['alert_if'], - email_notification=_process['email_notification'] - ) - monitoring_policy_processes.append(monitoring_policy_process) - - if module.check_mode: - mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id) - if (monitoring_policy_processes and mp_id): - return True - return False - - monitoring_policy = oneandone_conn.add_process( - monitoring_policy_id=monitoring_policy_id, - processes=monitoring_policy_processes) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_id, process_id): - """ - Removes a process from a monitoring policy. - """ - try: - if module.check_mode: - process = oneandone_conn.get_monitoring_policy_process( - monitoring_policy_id=monitoring_policy_id, - process_id=process_id - ) - if process: - return True - return False - - monitoring_policy = oneandone_conn.delete_monitoring_policy_process( - monitoring_policy_id=monitoring_policy_id, - process_id=process_id) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, process): - """ - Modifies a monitoring policy process. - """ - try: - if module.check_mode: - cm_process = oneandone_conn.get_monitoring_policy_process( - monitoring_policy_id=monitoring_policy_id, - process_id=process_id) - if cm_process: - return True - return False - - monitoring_policy_process = oneandone.client.Process( - process=process['process'], - alert_if=process['alert_if'], - email_notification=process['email_notification'] - ) - - monitoring_policy = oneandone_conn.modify_process( - monitoring_policy_id=monitoring_policy_id, - process_id=process_id, - process=monitoring_policy_process) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers): - """ - Attaches servers to a monitoring policy. - """ - try: - attach_servers = [] - - for _server_id in servers: - server_id = get_server(oneandone_conn, _server_id) - attach_server = oneandone.client.AttachServer( - server_id=server_id - ) - attach_servers.append(attach_server) - - if module.check_mode: - if attach_servers: - return True - return False - - monitoring_policy = oneandone_conn.attach_monitoring_policy_server( - monitoring_policy_id=monitoring_policy_id, - servers=attach_servers) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, server_id): - """ - Detaches a server from a monitoring policy. - """ - try: - if module.check_mode: - mp_server = oneandone_conn.get_monitoring_policy_server( - monitoring_policy_id=monitoring_policy_id, - server_id=server_id) - if mp_server: - return True - return False - - monitoring_policy = oneandone_conn.detach_monitoring_policy_server( - monitoring_policy_id=monitoring_policy_id, - server_id=server_id) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def update_monitoring_policy(module, oneandone_conn): - """ - Updates a monitoring_policy based on input arguments. - Monitoring policy ports, processes and servers can be added/removed to/from - a monitoring policy. Monitoring policy name, description, email, - thresholds for cpu, ram, disk, transfer and internal_ping - can be updated as well. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - monitoring_policy_id = module.params.get('monitoring_policy') - name = module.params.get('name') - description = module.params.get('description') - email = module.params.get('email') - thresholds = module.params.get('thresholds') - add_ports = module.params.get('add_ports') - update_ports = module.params.get('update_ports') - remove_ports = module.params.get('remove_ports') - add_processes = module.params.get('add_processes') - update_processes = module.params.get('update_processes') - remove_processes = module.params.get('remove_processes') - add_servers = module.params.get('add_servers') - remove_servers = module.params.get('remove_servers') - - changed = False - - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy_id, True) - if monitoring_policy is None: - _check_mode(module, False) - - _monitoring_policy = oneandone.client.MonitoringPolicy( - name=name, - description=description, - email=email - ) - - _thresholds = None - - if thresholds: - threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer'] - - _thresholds = [] - for threshold in thresholds: - key = list(threshold.keys())[0] - if key in threshold_entities: - _threshold = oneandone.client.Threshold( - entity=key, - warning_value=threshold[key]['warning']['value'], - warning_alert=str(threshold[key]['warning']['alert']).lower(), - critical_value=threshold[key]['critical']['value'], - critical_alert=str(threshold[key]['critical']['alert']).lower()) - _thresholds.append(_threshold) - - if name or description or email or thresholds: - _check_mode(module, True) - monitoring_policy = oneandone_conn.modify_monitoring_policy( - monitoring_policy_id=monitoring_policy['id'], - monitoring_policy=_monitoring_policy, - thresholds=_thresholds) - changed = True - - if add_ports: - if module.check_mode: - _check_mode(module, _add_ports(module, - oneandone_conn, - monitoring_policy['id'], - add_ports)) - - monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy['id'], add_ports) - changed = True - - if update_ports: - chk_changed = False - for update_port in update_ports: - if module.check_mode: - chk_changed |= _modify_port(module, - oneandone_conn, - monitoring_policy['id'], - update_port['id'], - update_port) - - _modify_port(module, - oneandone_conn, - monitoring_policy['id'], - update_port['id'], - update_port) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) - changed = True - - if remove_ports: - chk_changed = False - for port_id in remove_ports: - if module.check_mode: - chk_changed |= _delete_monitoring_policy_port(module, - oneandone_conn, - monitoring_policy['id'], - port_id) - - _delete_monitoring_policy_port(module, - oneandone_conn, - monitoring_policy['id'], - port_id) - _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) - changed = True - - if add_processes: - monitoring_policy = _add_processes(module, - oneandone_conn, - monitoring_policy['id'], - add_processes) - _check_mode(module, monitoring_policy) - changed = True - - if update_processes: - chk_changed = False - for update_process in update_processes: - if module.check_mode: - chk_changed |= _modify_process(module, - oneandone_conn, - monitoring_policy['id'], - update_process['id'], - update_process) - - _modify_process(module, - oneandone_conn, - monitoring_policy['id'], - update_process['id'], - update_process) - _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) - changed = True - - if remove_processes: - chk_changed = False - for process_id in remove_processes: - if module.check_mode: - chk_changed |= _delete_monitoring_policy_process(module, - oneandone_conn, - monitoring_policy['id'], - process_id) - - _delete_monitoring_policy_process(module, - oneandone_conn, - monitoring_policy['id'], - process_id) - _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) - changed = True - - if add_servers: - monitoring_policy = _attach_monitoring_policy_server(module, - oneandone_conn, - monitoring_policy['id'], - add_servers) - _check_mode(module, monitoring_policy) - changed = True - - if remove_servers: - chk_changed = False - for _server_id in remove_servers: - server_id = get_server(oneandone_conn, _server_id) - - if module.check_mode: - chk_changed |= _detach_monitoring_policy_server(module, - oneandone_conn, - monitoring_policy['id'], - server_id) - - _detach_monitoring_policy_server(module, - oneandone_conn, - monitoring_policy['id'], - server_id) - _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) - changed = True - - return (changed, monitoring_policy) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def create_monitoring_policy(module, oneandone_conn): - """ - Creates a new monitoring policy. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - name = module.params.get('name') - description = module.params.get('description') - email = module.params.get('email') - agent = module.params.get('agent') - thresholds = module.params.get('thresholds') - ports = module.params.get('ports') - processes = module.params.get('processes') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - _monitoring_policy = oneandone.client.MonitoringPolicy(name, - description, - email, - agent, ) - - _monitoring_policy.specs['agent'] = str(_monitoring_policy.specs['agent']).lower() - - threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer'] - - _thresholds = [] - for threshold in thresholds: - key = list(threshold.keys())[0] - if key in threshold_entities: - _threshold = oneandone.client.Threshold( - entity=key, - warning_value=threshold[key]['warning']['value'], - warning_alert=str(threshold[key]['warning']['alert']).lower(), - critical_value=threshold[key]['critical']['value'], - critical_alert=str(threshold[key]['critical']['alert']).lower()) - _thresholds.append(_threshold) - - _ports = [] - for port in ports: - _port = oneandone.client.Port( - protocol=port['protocol'], - port=port['port'], - alert_if=port['alert_if'], - email_notification=str(port['email_notification']).lower()) - _ports.append(_port) - - _processes = [] - for process in processes: - _process = oneandone.client.Process( - process=process['process'], - alert_if=process['alert_if'], - email_notification=str(process['email_notification']).lower()) - _processes.append(_process) - - _check_mode(module, True) - monitoring_policy = oneandone_conn.create_monitoring_policy( - monitoring_policy=_monitoring_policy, - thresholds=_thresholds, - ports=_ports, - processes=_processes - ) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, - OneAndOneResources.monitoring_policy, - monitoring_policy['id'], - wait_timeout, - wait_interval) - - changed = True if monitoring_policy else False - - _check_mode(module, False) - - return (changed, monitoring_policy) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def remove_monitoring_policy(module, oneandone_conn): - """ - Removes a monitoring policy. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - mp_id = module.params.get('name') - monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id) - if module.check_mode: - if monitoring_policy_id is None: - _check_mode(module, False) - _check_mode(module, True) - monitoring_policy = oneandone_conn.delete_monitoring_policy(monitoring_policy_id) - - changed = True if monitoring_policy else False - - return (changed, { - 'id': monitoring_policy['id'], - 'name': monitoring_policy['name'] - }) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - name=dict(type='str'), - monitoring_policy=dict(type='str'), - agent=dict(type='str'), - email=dict(type='str'), - description=dict(type='str'), - thresholds=dict(type='list', elements="dict", default=[]), - ports=dict(type='list', elements="dict", default=[]), - processes=dict(type='list', elements="dict", default=[]), - add_ports=dict(type='list', elements="dict", default=[]), - update_ports=dict(type='list', elements="dict", default=[]), - remove_ports=dict(type='list', elements="str", default=[]), - add_processes=dict(type='list', elements="dict", default=[]), - update_processes=dict(type='list', elements="dict", default=[]), - remove_processes=dict(type='list', elements="str", default=[]), - add_servers=dict(type='list', elements="str", default=[]), - remove_servers=dict(type='list', elements="str", default=[]), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), - ), - supports_check_mode=True - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') - - if not module.params.get('auth_token'): - module.fail_json( - msg='auth_token parameter is required.') - - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required to delete a monitoring policy.") - try: - (changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - elif state == 'update': - if not module.params.get('monitoring_policy'): - module.fail_json( - msg="'monitoring_policy' parameter is required to update a monitoring policy.") - try: - (changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - elif state == 'present': - for param in ('name', 'agent', 'email', 'thresholds', 'ports', 'processes'): - if not module.params.get(param): - module.fail_json( - msg="%s parameter is required for a new monitoring policy." % param) - try: - (changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - module.exit_json(changed=changed, monitoring_policy=monitoring_policy) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/oneandone/oneandone_private_network.py b/plugins/modules/cloud/oneandone/oneandone_private_network.py deleted file mode 100644 index 6a16cf683e..0000000000 --- a/plugins/modules/cloud/oneandone/oneandone_private_network.py +++ /dev/null @@ -1,457 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneandone_private_network -short_description: Configure 1&1 private networking. -description: - - Create, remove, reconfigure, update a private network. - This module has a dependency on 1and1 >= 1.0 -options: - state: - description: - - Define a network's state to create, remove, or update. - type: str - required: false - default: 'present' - choices: [ "present", "absent", "update" ] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - private_network: - description: - - The identifier (id or name) of the network used with update state. - type: str - api_url: - description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. - type: str - required: false - name: - description: - - Private network name used with present state. Used as identifier (id or name) when used with absent state. - type: str - description: - description: - - Set a description for the network. - type: str - datacenter: - description: - - The identifier of the datacenter where the private network will be created - type: str - choices: [US, ES, DE, GB] - network_address: - description: - - Set a private network space, i.e. 192.168.1.0 - type: str - subnet_mask: - description: - - Set the netmask for the private network, i.e. 255.255.255.0 - type: str - add_members: - description: - - List of server identifiers (name or id) to be added to the private network. - type: list - elements: str - remove_members: - description: - - List of server identifiers (name or id) to be removed from the private network. - type: list - elements: str - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods - type: int - default: 5 - -requirements: - - "1and1" - - "python >= 2.6" - -author: - - Amel Ajdinovic (@aajdinov) - - Ethan Devenport (@edevenport) -''' - -EXAMPLES = ''' -- name: Create a private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - name: backup_network - description: Testing creation of a private network with ansible - network_address: 70.35.193.100 - subnet_mask: 255.0.0.0 - datacenter: US - -- name: Destroy a private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - state: absent - name: backup_network - -- name: Modify the private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - state: update - private_network: backup_network - network_address: 192.168.2.0 - subnet_mask: 255.255.255.0 - -- name: Add members to the private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - state: update - private_network: backup_network - add_members: - - server identifier (id or name) - -- name: Remove members from the private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - state: update - private_network: backup_network - remove_members: - - server identifier (id or name) -''' - -RETURN = ''' -private_network: - description: Information about the private network. - type: dict - sample: '{"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}' - returned: always -''' - -import os -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - get_private_network, - get_server, - get_datacenter, - OneAndOneResources, - wait_for_resource_creation_completion, - wait_for_resource_deletion_completion -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - -DATACENTERS = ['US', 'ES', 'DE', 'GB'] - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json( - changed=result - ) - - -def _add_servers(module, oneandone_conn, name, members): - try: - private_network_id = get_private_network(oneandone_conn, name) - - if module.check_mode: - if private_network_id and members: - return True - return False - - network = oneandone_conn.attach_private_network_servers( - private_network_id=private_network_id, - server_ids=members) - - return network - except Exception as e: - module.fail_json(msg=str(e)) - - -def _remove_member(module, oneandone_conn, name, member_id): - try: - private_network_id = get_private_network(oneandone_conn, name) - - if module.check_mode: - if private_network_id: - network_member = oneandone_conn.get_private_network_server( - private_network_id=private_network_id, - server_id=member_id) - if network_member: - return True - return False - - network = oneandone_conn.remove_private_network_server( - private_network_id=name, - server_id=member_id) - - return network - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def create_network(module, oneandone_conn): - """ - Create new private network - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any network was added. - """ - name = module.params.get('name') - description = module.params.get('description') - network_address = module.params.get('network_address') - subnet_mask = module.params.get('subnet_mask') - datacenter = module.params.get('datacenter') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - if datacenter is not None: - datacenter_id = get_datacenter(oneandone_conn, datacenter) - if datacenter_id is None: - module.fail_json( - msg='datacenter %s not found.' % datacenter) - - try: - _check_mode(module, True) - network = oneandone_conn.create_private_network( - private_network=oneandone.client.PrivateNetwork( - name=name, - description=description, - network_address=network_address, - subnet_mask=subnet_mask, - datacenter_id=datacenter_id - )) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, - OneAndOneResources.private_network, - network['id'], - wait_timeout, - wait_interval) - network = get_private_network(oneandone_conn, - network['id'], - True) - - changed = True if network else False - - _check_mode(module, False) - - return (changed, network) - except Exception as e: - module.fail_json(msg=str(e)) - - -def update_network(module, oneandone_conn): - """ - Modifies a private network. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - _private_network_id = module.params.get('private_network') - _name = module.params.get('name') - _description = module.params.get('description') - _network_address = module.params.get('network_address') - _subnet_mask = module.params.get('subnet_mask') - _add_members = module.params.get('add_members') - _remove_members = module.params.get('remove_members') - - changed = False - - private_network = get_private_network(oneandone_conn, - _private_network_id, - True) - if private_network is None: - _check_mode(module, False) - - if _name or _description or _network_address or _subnet_mask: - _check_mode(module, True) - private_network = oneandone_conn.modify_private_network( - private_network_id=private_network['id'], - name=_name, - description=_description, - network_address=_network_address, - subnet_mask=_subnet_mask) - changed = True - - if _add_members: - instances = [] - - for member in _add_members: - instance_id = get_server(oneandone_conn, member) - instance_obj = oneandone.client.AttachServer(server_id=instance_id) - - instances.extend([instance_obj]) - private_network = _add_servers(module, oneandone_conn, private_network['id'], instances) - _check_mode(module, private_network) - changed = True - - if _remove_members: - chk_changed = False - for member in _remove_members: - instance = get_server(oneandone_conn, member, True) - - if module.check_mode: - chk_changed |= _remove_member(module, - oneandone_conn, - private_network['id'], - instance['id']) - _check_mode(module, instance and chk_changed) - - _remove_member(module, - oneandone_conn, - private_network['id'], - instance['id']) - private_network = get_private_network(oneandone_conn, - private_network['id'], - True) - changed = True - - return (changed, private_network) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def remove_network(module, oneandone_conn): - """ - Removes a private network. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object. - """ - try: - pn_id = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - private_network_id = get_private_network(oneandone_conn, pn_id) - if module.check_mode: - if private_network_id is None: - _check_mode(module, False) - _check_mode(module, True) - private_network = oneandone_conn.delete_private_network(private_network_id) - wait_for_resource_deletion_completion(oneandone_conn, - OneAndOneResources.private_network, - private_network['id'], - wait_timeout, - wait_interval) - - changed = True if private_network else False - - return (changed, { - 'id': private_network['id'], - 'name': private_network['name'] - }) - except Exception as e: - module.fail_json(msg=str(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - private_network=dict(type='str'), - name=dict(type='str'), - description=dict(type='str'), - network_address=dict(type='str'), - subnet_mask=dict(type='str'), - add_members=dict(type='list', elements="str", default=[]), - remove_members=dict(type='list', elements="str", default=[]), - datacenter=dict( - choices=DATACENTERS), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), - ), - supports_check_mode=True - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') - - if not module.params.get('auth_token'): - module.fail_json( - msg='auth_token parameter is required.') - - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required for deleting a network.") - try: - (changed, private_network) = remove_network(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - elif state == 'update': - if not module.params.get('private_network'): - module.fail_json( - msg="'private_network' parameter is required for updating a network.") - try: - (changed, private_network) = update_network(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - elif state == 'present': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required for new networks.") - try: - (changed, private_network) = create_network(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - module.exit_json(changed=changed, private_network=private_network) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/oneandone/oneandone_public_ip.py b/plugins/modules/cloud/oneandone/oneandone_public_ip.py deleted file mode 100644 index 96b1c9f3a5..0000000000 --- a/plugins/modules/cloud/oneandone/oneandone_public_ip.py +++ /dev/null @@ -1,342 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneandone_public_ip -short_description: Configure 1&1 public IPs. -description: - - Create, update, and remove public IPs. - This module has a dependency on 1and1 >= 1.0 -options: - state: - description: - - Define a public ip state to create, remove, or update. - type: str - required: false - default: 'present' - choices: [ "present", "absent", "update" ] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - api_url: - description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. - type: str - required: false - reverse_dns: - description: - - Reverse DNS name. maxLength=256 - type: str - required: false - datacenter: - description: - - ID of the datacenter where the IP will be created (only for unassigned IPs). - type: str - choices: [US, ES, DE, GB] - default: US - required: false - type: - description: - - Type of IP. Currently, only IPV4 is available. - type: str - choices: ["IPV4", "IPV6"] - default: 'IPV4' - required: false - public_ip_id: - description: - - The ID of the public IP used with update and delete states. - type: str - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods - type: int - default: 5 - -requirements: - - "1and1" - - "python >= 2.6" - -author: - - Amel Ajdinovic (@aajdinov) - - Ethan Devenport (@edevenport) -''' - -EXAMPLES = ''' -- name: Create a public IP - community.general.oneandone_public_ip: - auth_token: oneandone_private_api_key - reverse_dns: example.com - datacenter: US - type: IPV4 - -- name: Update a public IP - community.general.oneandone_public_ip: - auth_token: oneandone_private_api_key - public_ip_id: public ip id - reverse_dns: secondexample.com - state: update - -- name: Delete a public IP - community.general.oneandone_public_ip: - auth_token: oneandone_private_api_key - public_ip_id: public ip id - state: absent -''' - -RETURN = ''' -public_ip: - description: Information about the public ip that was processed - type: dict - sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}' - returned: always -''' - -import os -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - get_datacenter, - get_public_ip, - OneAndOneResources, - wait_for_resource_creation_completion -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - -DATACENTERS = ['US', 'ES', 'DE', 'GB'] - -TYPES = ['IPV4', 'IPV6'] - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json( - changed=result - ) - - -def create_public_ip(module, oneandone_conn): - """ - Create new public IP - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any public IP was added. - """ - reverse_dns = module.params.get('reverse_dns') - datacenter = module.params.get('datacenter') - ip_type = module.params.get('type') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - if datacenter is not None: - datacenter_id = get_datacenter(oneandone_conn, datacenter) - if datacenter_id is None: - _check_mode(module, False) - module.fail_json( - msg='datacenter %s not found.' % datacenter) - - try: - _check_mode(module, True) - public_ip = oneandone_conn.create_public_ip( - reverse_dns=reverse_dns, - ip_type=ip_type, - datacenter_id=datacenter_id) - - if wait: - wait_for_resource_creation_completion(oneandone_conn, - OneAndOneResources.public_ip, - public_ip['id'], - wait_timeout, - wait_interval) - public_ip = oneandone_conn.get_public_ip(public_ip['id']) - - changed = True if public_ip else False - - return (changed, public_ip) - except Exception as e: - module.fail_json(msg=str(e)) - - -def update_public_ip(module, oneandone_conn): - """ - Update a public IP - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any public IP was changed. - """ - reverse_dns = module.params.get('reverse_dns') - public_ip_id = module.params.get('public_ip_id') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - public_ip = get_public_ip(oneandone_conn, public_ip_id, True) - if public_ip is None: - _check_mode(module, False) - module.fail_json( - msg='public IP %s not found.' % public_ip_id) - - try: - _check_mode(module, True) - public_ip = oneandone_conn.modify_public_ip( - ip_id=public_ip['id'], - reverse_dns=reverse_dns) - - if wait: - wait_for_resource_creation_completion(oneandone_conn, - OneAndOneResources.public_ip, - public_ip['id'], - wait_timeout, - wait_interval) - public_ip = oneandone_conn.get_public_ip(public_ip['id']) - - changed = True if public_ip else False - - return (changed, public_ip) - except Exception as e: - module.fail_json(msg=str(e)) - - -def delete_public_ip(module, oneandone_conn): - """ - Delete a public IP - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any public IP was deleted. - """ - public_ip_id = module.params.get('public_ip_id') - - public_ip = get_public_ip(oneandone_conn, public_ip_id, True) - if public_ip is None: - _check_mode(module, False) - module.fail_json( - msg='public IP %s not found.' % public_ip_id) - - try: - _check_mode(module, True) - deleted_public_ip = oneandone_conn.delete_public_ip( - ip_id=public_ip['id']) - - changed = True if deleted_public_ip else False - - return (changed, { - 'id': public_ip['id'] - }) - except Exception as e: - module.fail_json(msg=str(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - public_ip_id=dict(type='str'), - reverse_dns=dict(type='str'), - datacenter=dict( - choices=DATACENTERS, - default='US'), - type=dict( - choices=TYPES, - default='IPV4'), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), - ), - supports_check_mode=True - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') - - if not module.params.get('auth_token'): - module.fail_json( - msg='auth_token parameter is required.') - - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('public_ip_id'): - module.fail_json( - msg="'public_ip_id' parameter is required to delete a public ip.") - try: - (changed, public_ip) = delete_public_ip(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - elif state == 'update': - if not module.params.get('public_ip_id'): - module.fail_json( - msg="'public_ip_id' parameter is required to update a public ip.") - try: - (changed, public_ip) = update_public_ip(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - elif state == 'present': - try: - (changed, public_ip) = create_public_ip(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - module.exit_json(changed=changed, public_ip=public_ip) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/oneandone/oneandone_server.py b/plugins/modules/cloud/oneandone/oneandone_server.py deleted file mode 100644 index aa651bd75f..0000000000 --- a/plugins/modules/cloud/oneandone/oneandone_server.py +++ /dev/null @@ -1,707 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneandone_server -short_description: Create, destroy, start, stop, and reboot a 1&1 Host server. -description: - - Create, destroy, update, start, stop, and reboot a 1&1 Host server. - When the server is created it can optionally wait for it to be 'running' before returning. -options: - state: - description: - - Define a server's state to create, remove, start or stop it. - type: str - default: present - choices: [ "present", "absent", "running", "stopped" ] - auth_token: - description: - - Authenticating API token provided by 1&1. Overrides the - ONEANDONE_AUTH_TOKEN environment variable. - type: str - api_url: - description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. - type: str - datacenter: - description: - - The datacenter location. - type: str - default: US - choices: [ "US", "ES", "DE", "GB" ] - hostname: - description: - - The hostname or ID of the server. Only used when state is 'present'. - type: str - description: - description: - - The description of the server. - type: str - appliance: - description: - - The operating system name or ID for the server. - It is required only for 'present' state. - type: str - fixed_instance_size: - description: - - The instance size name or ID of the server. - It is required only for 'present' state, and it is mutually exclusive with - vcore, cores_per_processor, ram, and hdds parameters. - - 'The available choices are: C(S), C(M), C(L), C(XL), C(XXL), C(3XL), C(4XL), C(5XL)' - type: str - vcore: - description: - - The total number of processors. - It must be provided with cores_per_processor, ram, and hdds parameters. - type: int - cores_per_processor: - description: - - The number of cores per processor. - It must be provided with vcore, ram, and hdds parameters. - type: int - ram: - description: - - The amount of RAM memory. - It must be provided with with vcore, cores_per_processor, and hdds parameters. - type: float - hdds: - description: - - A list of hard disks with nested "size" and "is_main" properties. - It must be provided with vcore, cores_per_processor, and ram parameters. - type: list - elements: dict - private_network: - description: - - The private network name or ID. - type: str - firewall_policy: - description: - - The firewall policy name or ID. - type: str - load_balancer: - description: - - The load balancer name or ID. - type: str - monitoring_policy: - description: - - The monitoring policy name or ID. - type: str - server: - description: - - Server identifier (ID or hostname). It is required for all states except 'running' and 'present'. - type: str - count: - description: - - The number of servers to create. - type: int - default: 1 - ssh_key: - description: - - User's public SSH key (contents, not path). - type: raw - server_type: - description: - - The type of server to be built. - type: str - default: "cloud" - choices: [ "cloud", "baremetal", "k8s_node" ] - wait: - description: - - Wait for the server to be in state 'running' before returning. - Also used for delete operation (set to 'false' if you don't want to wait - for each individual server to be deleted before moving on with - other tasks.) - type: bool - default: 'yes' - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the wait_for methods - type: int - default: 5 - auto_increment: - description: - - When creating multiple servers at once, whether to differentiate - hostnames by appending a count after them or substituting the count - where there is a %02d or %03d in the hostname string. - type: bool - default: 'yes' - -requirements: - - "1and1" - - "python >= 2.6" - -author: - - "Amel Ajdinovic (@aajdinov)" - - "Ethan Devenport (@edevenport)" - -''' - -EXAMPLES = ''' -- name: Create three servers and enumerate their names - community.general.oneandone_server: - auth_token: oneandone_private_api_key - hostname: node%02d - fixed_instance_size: XL - datacenter: US - appliance: C5A349786169F140BCBC335675014C08 - auto_increment: true - count: 3 - -- name: Create three servers, passing in an ssh_key - community.general.oneandone_server: - auth_token: oneandone_private_api_key - hostname: node%02d - vcore: 2 - cores_per_processor: 4 - ram: 8.0 - hdds: - - size: 50 - is_main: false - datacenter: ES - appliance: C5A349786169F140BCBC335675014C08 - count: 3 - wait: yes - wait_timeout: 600 - wait_interval: 10 - ssh_key: SSH_PUBLIC_KEY - -- name: Removing server - community.general.oneandone_server: - auth_token: oneandone_private_api_key - state: absent - server: 'node01' - -- name: Starting server - community.general.oneandone_server: - auth_token: oneandone_private_api_key - state: running - server: 'node01' - -- name: Stopping server - community.general.oneandone_server: - auth_token: oneandone_private_api_key - state: stopped - server: 'node01' -''' - -RETURN = ''' -servers: - description: Information about each server that was processed - type: list - sample: '[{"hostname": "my-server", "id": "server-id"}]' - returned: always -''' - -import os -import time -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - get_datacenter, - get_fixed_instance_size, - get_appliance, - get_private_network, - get_monitoring_policy, - get_firewall_policy, - get_load_balancer, - get_server, - OneAndOneResources, - wait_for_resource_creation_completion, - wait_for_resource_deletion_completion -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - -DATACENTERS = ['US', 'ES', 'DE', 'GB'] - -ONEANDONE_SERVER_STATES = ( - 'DEPLOYING', - 'POWERED_OFF', - 'POWERED_ON', - 'POWERING_ON', - 'POWERING_OFF', -) - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json( - changed=result - ) - - -def _create_server(module, oneandone_conn, hostname, description, - fixed_instance_size_id, vcore, cores_per_processor, ram, - hdds, datacenter_id, appliance_id, ssh_key, - private_network_id, firewall_policy_id, load_balancer_id, - monitoring_policy_id, server_type, wait, wait_timeout, - wait_interval): - - try: - existing_server = get_server(oneandone_conn, hostname) - - if existing_server: - if module.check_mode: - return False - return None - - if module.check_mode: - return True - - server = oneandone_conn.create_server( - oneandone.client.Server( - name=hostname, - description=description, - fixed_instance_size_id=fixed_instance_size_id, - vcore=vcore, - cores_per_processor=cores_per_processor, - ram=ram, - appliance_id=appliance_id, - datacenter_id=datacenter_id, - rsa_key=ssh_key, - private_network_id=private_network_id, - firewall_policy_id=firewall_policy_id, - load_balancer_id=load_balancer_id, - monitoring_policy_id=monitoring_policy_id, - server_type=server_type,), hdds) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, - OneAndOneResources.server, - server['id'], - wait_timeout, - wait_interval) - server = oneandone_conn.get_server(server['id']) # refresh - - return server - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _insert_network_data(server): - for addr_data in server['ips']: - if addr_data['type'] == 'IPV6': - server['public_ipv6'] = addr_data['ip'] - elif addr_data['type'] == 'IPV4': - server['public_ipv4'] = addr_data['ip'] - return server - - -def create_server(module, oneandone_conn): - """ - Create new server - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any server was added, and a 'servers' attribute with the list of the - created servers' hostname, id and ip addresses. - """ - hostname = module.params.get('hostname') - description = module.params.get('description') - auto_increment = module.params.get('auto_increment') - count = module.params.get('count') - fixed_instance_size = module.params.get('fixed_instance_size') - vcore = module.params.get('vcore') - cores_per_processor = module.params.get('cores_per_processor') - ram = module.params.get('ram') - hdds = module.params.get('hdds') - datacenter = module.params.get('datacenter') - appliance = module.params.get('appliance') - ssh_key = module.params.get('ssh_key') - private_network = module.params.get('private_network') - monitoring_policy = module.params.get('monitoring_policy') - firewall_policy = module.params.get('firewall_policy') - load_balancer = module.params.get('load_balancer') - server_type = module.params.get('server_type') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - datacenter_id = get_datacenter(oneandone_conn, datacenter) - if datacenter_id is None: - _check_mode(module, False) - module.fail_json( - msg='datacenter %s not found.' % datacenter) - - fixed_instance_size_id = None - if fixed_instance_size: - fixed_instance_size_id = get_fixed_instance_size( - oneandone_conn, - fixed_instance_size) - if fixed_instance_size_id is None: - _check_mode(module, False) - module.fail_json( - msg='fixed_instance_size %s not found.' % fixed_instance_size) - - appliance_id = get_appliance(oneandone_conn, appliance) - if appliance_id is None: - _check_mode(module, False) - module.fail_json( - msg='appliance %s not found.' % appliance) - - private_network_id = None - if private_network: - private_network_id = get_private_network( - oneandone_conn, - private_network) - if private_network_id is None: - _check_mode(module, False) - module.fail_json( - msg='private network %s not found.' % private_network) - - monitoring_policy_id = None - if monitoring_policy: - monitoring_policy_id = get_monitoring_policy( - oneandone_conn, - monitoring_policy) - if monitoring_policy_id is None: - _check_mode(module, False) - module.fail_json( - msg='monitoring policy %s not found.' % monitoring_policy) - - firewall_policy_id = None - if firewall_policy: - firewall_policy_id = get_firewall_policy( - oneandone_conn, - firewall_policy) - if firewall_policy_id is None: - _check_mode(module, False) - module.fail_json( - msg='firewall policy %s not found.' % firewall_policy) - - load_balancer_id = None - if load_balancer: - load_balancer_id = get_load_balancer( - oneandone_conn, - load_balancer) - if load_balancer_id is None: - _check_mode(module, False) - module.fail_json( - msg='load balancer %s not found.' % load_balancer) - - if auto_increment: - hostnames = _auto_increment_hostname(count, hostname) - descriptions = _auto_increment_description(count, description) - else: - hostnames = [hostname] * count - descriptions = [description] * count - - hdd_objs = [] - if hdds: - for hdd in hdds: - hdd_objs.append(oneandone.client.Hdd( - size=hdd['size'], - is_main=hdd['is_main'] - )) - - servers = [] - for index, name in enumerate(hostnames): - server = _create_server( - module=module, - oneandone_conn=oneandone_conn, - hostname=name, - description=descriptions[index], - fixed_instance_size_id=fixed_instance_size_id, - vcore=vcore, - cores_per_processor=cores_per_processor, - ram=ram, - hdds=hdd_objs, - datacenter_id=datacenter_id, - appliance_id=appliance_id, - ssh_key=ssh_key, - private_network_id=private_network_id, - monitoring_policy_id=monitoring_policy_id, - firewall_policy_id=firewall_policy_id, - load_balancer_id=load_balancer_id, - server_type=server_type, - wait=wait, - wait_timeout=wait_timeout, - wait_interval=wait_interval) - if server: - servers.append(server) - - changed = False - - if servers: - for server in servers: - if server: - _check_mode(module, True) - _check_mode(module, False) - servers = [_insert_network_data(_server) for _server in servers] - changed = True - - _check_mode(module, False) - - return (changed, servers) - - -def remove_server(module, oneandone_conn): - """ - Removes a server. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object. - - Returns a dictionary containing a 'changed' attribute indicating whether - the server was removed, and a 'removed_server' attribute with - the removed server's hostname and id. - """ - server_id = module.params.get('server') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - changed = False - removed_server = None - - server = get_server(oneandone_conn, server_id, True) - if server: - _check_mode(module, True) - try: - oneandone_conn.delete_server(server_id=server['id']) - if wait: - wait_for_resource_deletion_completion(oneandone_conn, - OneAndOneResources.server, - server['id'], - wait_timeout, - wait_interval) - changed = True - except Exception as ex: - module.fail_json( - msg="failed to terminate the server: %s" % str(ex)) - - removed_server = { - 'id': server['id'], - 'hostname': server['name'] - } - _check_mode(module, False) - - return (changed, removed_server) - - -def startstop_server(module, oneandone_conn): - """ - Starts or Stops a server. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object. - - Returns a dictionary with a 'changed' attribute indicating whether - anything has changed for the server as a result of this function - being run, and a 'server' attribute with basic information for - the server. - """ - state = module.params.get('state') - server_id = module.params.get('server') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - changed = False - - # Resolve server - server = get_server(oneandone_conn, server_id, True) - if server: - # Attempt to change the server state, only if it's not already there - # or on its way. - try: - if state == 'stopped' and server['status']['state'] == 'POWERED_ON': - _check_mode(module, True) - oneandone_conn.modify_server_status( - server_id=server['id'], - action='POWER_OFF', - method='SOFTWARE') - elif state == 'running' and server['status']['state'] == 'POWERED_OFF': - _check_mode(module, True) - oneandone_conn.modify_server_status( - server_id=server['id'], - action='POWER_ON', - method='SOFTWARE') - except Exception as ex: - module.fail_json( - msg="failed to set server %s to state %s: %s" % ( - server_id, state, str(ex))) - - _check_mode(module, False) - - # Make sure the server has reached the desired state - if wait: - operation_completed = False - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(wait_interval) - server = oneandone_conn.get_server(server['id']) # refresh - server_state = server['status']['state'] - if state == 'stopped' and server_state == 'POWERED_OFF': - operation_completed = True - break - if state == 'running' and server_state == 'POWERED_ON': - operation_completed = True - break - if not operation_completed: - module.fail_json( - msg="Timeout waiting for server %s to get to state %s" % ( - server_id, state)) - - changed = True - server = _insert_network_data(server) - - _check_mode(module, False) - - return (changed, server) - - -def _auto_increment_hostname(count, hostname): - """ - Allow a custom incremental count in the hostname when defined with the - string formatting (%) operator. Otherwise, increment using name-01, - name-02, name-03, and so forth. - """ - if '%' not in hostname: - hostname = "%s-%%01d" % hostname - - return [ - hostname % i - for i in xrange(1, count + 1) - ] - - -def _auto_increment_description(count, description): - """ - Allow the incremental count in the description when defined with the - string formatting (%) operator. Otherwise, repeat the same description. - """ - if '%' in description: - return [ - description % i - for i in xrange(1, count + 1) - ] - else: - return [description] * count - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict( - type='str', - default=os.environ.get('ONEANDONE_AUTH_TOKEN'), - no_log=True), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - hostname=dict(type='str'), - description=dict(type='str'), - appliance=dict(type='str'), - fixed_instance_size=dict(type='str'), - vcore=dict(type='int'), - cores_per_processor=dict(type='int'), - ram=dict(type='float'), - hdds=dict(type='list', elements='dict'), - count=dict(type='int', default=1), - ssh_key=dict(type='raw', no_log=False), - auto_increment=dict(type='bool', default=True), - server=dict(type='str'), - datacenter=dict( - choices=DATACENTERS, - default='US'), - private_network=dict(type='str'), - firewall_policy=dict(type='str'), - load_balancer=dict(type='str'), - monitoring_policy=dict(type='str'), - server_type=dict(type='str', default='cloud', choices=['cloud', 'baremetal', 'k8s_node']), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'running', 'stopped']), - ), - supports_check_mode=True, - mutually_exclusive=(['fixed_instance_size', 'vcore'], ['fixed_instance_size', 'cores_per_processor'], - ['fixed_instance_size', 'ram'], ['fixed_instance_size', 'hdds'],), - required_together=(['vcore', 'cores_per_processor', 'ram', 'hdds'],) - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') - - if not module.params.get('auth_token'): - module.fail_json( - msg='The "auth_token" parameter or ' + - 'ONEANDONE_AUTH_TOKEN environment variable is required.') - - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('server'): - module.fail_json( - msg="'server' parameter is required for deleting a server.") - try: - (changed, servers) = remove_server(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - elif state in ('running', 'stopped'): - if not module.params.get('server'): - module.fail_json( - msg="'server' parameter is required for starting/stopping a server.") - try: - (changed, servers) = startstop_server(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - elif state == 'present': - for param in ('hostname', - 'appliance', - 'datacenter'): - if not module.params.get(param): - module.fail_json( - msg="%s parameter is required for new server." % param) - try: - (changed, servers) = create_server(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - module.exit_json(changed=changed, servers=servers) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/online/online_server_info.py b/plugins/modules/cloud/online/online_server_info.py deleted file mode 100644 index f33a44d30f..0000000000 --- a/plugins/modules/cloud/online/online_server_info.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: online_server_info -short_description: Gather information about Online servers. -description: - - Gather information about the servers. - - U(https://www.online.net/en/dedicated-server) -author: - - "Remy Leone (@sieben)" -extends_documentation_fragment: -- community.general.online - -''' - -EXAMPLES = r''' -- name: Gather Online server information - community.general.online_server_info: - api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f' - register: result - -- ansible.builtin.debug: - msg: "{{ result.online_server_info }}" -''' - -RETURN = r''' -online_server_info: - description: - - Response from Online API. - - "For more details please refer to: U(https://console.online.net/en/api/)." - returned: success - type: list - elements: dict - sample: - "online_server_info": [ - { - "abuse": "abuse@example.com", - "anti_ddos": false, - "bmc": { - "session_key": null - }, - "boot_mode": "normal", - "contacts": { - "owner": "foobar", - "tech": "foobar" - }, - "disks": [ - { - "$ref": "/api/v1/server/hardware/disk/68452" - }, - { - "$ref": "/api/v1/server/hardware/disk/68453" - } - ], - "drive_arrays": [ - { - "disks": [ - { - "$ref": "/api/v1/server/hardware/disk/68452" - }, - { - "$ref": "/api/v1/server/hardware/disk/68453" - } - ], - "raid_controller": { - "$ref": "/api/v1/server/hardware/raidController/9910" - }, - "raid_level": "RAID1" - } - ], - "hardware_watch": true, - "hostname": "sd-42", - "id": 42, - "ip": [ - { - "address": "195.154.172.149", - "mac": "28:92:4a:33:5e:c6", - "reverse": "195-154-172-149.rev.poneytelecom.eu.", - "switch_port_state": "up", - "type": "public" - }, - { - "address": "10.90.53.212", - "mac": "28:92:4a:33:5e:c7", - "reverse": null, - "switch_port_state": "up", - "type": "private" - } - ], - "last_reboot": "2018-08-23T08:32:03.000Z", - "location": { - "block": "A", - "datacenter": "DC3", - "position": 19, - "rack": "A23", - "room": "4 4-4" - }, - "network": { - "ip": [ - "195.154.172.149" - ], - "ipfo": [], - "private": [ - "10.90.53.212" - ] - }, - "offer": "Pro-1-S-SATA", - "os": { - "name": "FreeBSD", - "version": "11.1-RELEASE" - }, - "power": "ON", - "proactive_monitoring": false, - "raid_controllers": [ - { - "$ref": "/api/v1/server/hardware/raidController/9910" - } - ], - "support": "Basic service level" - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.online import ( - Online, OnlineException, online_argument_spec -) - - -class OnlineServerInfo(Online): - - def __init__(self, module): - super(OnlineServerInfo, self).__init__(module) - self.name = 'api/v1/server' - - def _get_server_detail(self, server_path): - try: - return self.get(path=server_path).json - except OnlineException as exc: - self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc)) - - def all_detailed_servers(self): - servers_api_path = self.get_resources() - - server_data = ( - self._get_server_detail(server_api_path) - for server_api_path in servers_api_path - ) - - return [s for s in server_data if s is not None] - - -def main(): - module = AnsibleModule( - argument_spec=online_argument_spec(), - supports_check_mode=True, - ) - - try: - servers_info = OnlineServerInfo(module).all_detailed_servers() - module.exit_json( - online_server_info=servers_info - ) - except OnlineException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/online/online_user_info.py b/plugins/modules/cloud/online/online_user_info.py deleted file mode 100644 index 4125ccb63d..0000000000 --- a/plugins/modules/cloud/online/online_user_info.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' -module: online_user_info -short_description: Gather information about Online user. -description: - - Gather information about the user. -author: - - "Remy Leone (@sieben)" -extends_documentation_fragment: -- community.general.online -''' - -EXAMPLES = r''' -- name: Gather Online user info - community.general.online_user_info: - register: result - -- ansible.builtin.debug: - msg: "{{ result.online_user_info }}" -''' - -RETURN = r''' -online_user_info: - description: - - Response from Online API. - - "For more details please refer to: U(https://console.online.net/en/api/)." - returned: success - type: dict - sample: - "online_user_info": { - "company": "foobar LLC", - "email": "foobar@example.com", - "first_name": "foo", - "id": 42, - "last_name": "bar", - "login": "foobar" - } -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.online import ( - Online, OnlineException, online_argument_spec -) - - -class OnlineUserInfo(Online): - - def __init__(self, module): - super(OnlineUserInfo, self).__init__(module) - self.name = 'api/v1/user' - - -def main(): - module = AnsibleModule( - argument_spec=online_argument_spec(), - supports_check_mode=True, - ) - - try: - module.exit_json( - online_user_info=OnlineUserInfo(module).get_resources() - ) - except OnlineException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/opennebula/one_host.py b/plugins/modules/cloud/opennebula/one_host.py deleted file mode 100644 index f205a40a2c..0000000000 --- a/plugins/modules/cloud/opennebula/one_host.py +++ /dev/null @@ -1,285 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright 2018 www.privaz.io Valletech AB -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: one_host - -short_description: Manages OpenNebula Hosts - - -requirements: - - pyone - -description: - - "Manages OpenNebula Hosts" - -options: - name: - description: - - Hostname of the machine to manage. - required: true - type: str - state: - description: - - Takes the host to the desired lifecycle state. - - If C(absent) the host will be deleted from the cluster. - - If C(present) the host will be created in the cluster (includes C(enabled), C(disabled) and C(offline) states). - - If C(enabled) the host is fully operational. - - C(disabled), e.g. to perform maintenance operations. - - C(offline), host is totally offline. - choices: - - absent - - present - - enabled - - disabled - - offline - default: present - type: str - im_mad_name: - description: - - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name) - default: kvm - type: str - vmm_mad_name: - description: - - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD (name) - default: kvm - type: str - cluster_id: - description: - - The cluster ID. - default: 0 - type: int - cluster_name: - description: - - The cluster specified by name. - type: str - labels: - description: - - The labels for this host. - type: list - elements: str - template: - description: - - The template or attribute changes to merge into the host template. - aliases: - - attributes - type: dict - -extends_documentation_fragment: -- community.general.opennebula - - -author: - - Rafael del Valle (@rvalle) -''' - -EXAMPLES = ''' -- name: Create a new host in OpenNebula - community.general.one_host: - name: host1 - cluster_id: 1 - api_url: http://127.0.0.1:2633/RPC2 - -- name: Create a host and adjust its template - community.general.one_host: - name: host2 - cluster_name: default - template: - LABELS: - - gold - - ssd - RESERVED_CPU: -100 -''' - -# TODO: pending setting guidelines on returned values -RETURN = ''' -''' - -# TODO: Documentation on valid state transitions is required to properly implement all valid cases -# TODO: To be coherent with CLI this module should also provide "flush" functionality - -from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule - -try: - from pyone import HOST_STATES, HOST_STATUS -except ImportError: - pass # handled at module utils - - -# Pseudo definitions... - -HOST_ABSENT = -99 # the host is absent (special case defined by this module) - - -class HostModule(OpenNebulaModule): - - def __init__(self): - - argument_spec = dict( - name=dict(type='str', required=True), - state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'offline'], default='present'), - im_mad_name=dict(type='str', default="kvm"), - vmm_mad_name=dict(type='str', default="kvm"), - cluster_id=dict(type='int', default=0), - cluster_name=dict(type='str'), - labels=dict(type='list', elements='str'), - template=dict(type='dict', aliases=['attributes']), - ) - - mutually_exclusive = [ - ['cluster_id', 'cluster_name'] - ] - - OpenNebulaModule.__init__(self, argument_spec, mutually_exclusive=mutually_exclusive) - - def allocate_host(self): - """ - Creates a host entry in OpenNebula - Returns: True on success, fails otherwise. - - """ - if not self.one.host.allocate(self.get_parameter('name'), - self.get_parameter('vmm_mad_name'), - self.get_parameter('im_mad_name'), - self.get_parameter('cluster_id')): - self.fail(msg="could not allocate host") - else: - self.result['changed'] = True - return True - - def wait_for_host_state(self, host, target_states): - """ - Utility method that waits for a host state. - Args: - host: - target_states: - - """ - return self.wait_for_state('host', - lambda: self.one.host.info(host.ID).STATE, - lambda s: HOST_STATES(s).name, target_states, - invalid_states=[HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]) - - def run(self, one, module, result): - - # Get the list of hosts - host_name = self.get_parameter("name") - host = self.get_host_by_name(host_name) - - # manage host state - desired_state = self.get_parameter('state') - if bool(host): - current_state = host.STATE - current_state_name = HOST_STATES(host.STATE).name - else: - current_state = HOST_ABSENT - current_state_name = "ABSENT" - - # apply properties - if desired_state == 'present': - if current_state == HOST_ABSENT: - self.allocate_host() - host = self.get_host_by_name(host_name) - self.wait_for_host_state(host, [HOST_STATES.MONITORED]) - elif current_state in [HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]: - self.fail(msg="invalid host state %s" % current_state_name) - - elif desired_state == 'enabled': - if current_state == HOST_ABSENT: - self.allocate_host() - host = self.get_host_by_name(host_name) - self.wait_for_host_state(host, [HOST_STATES.MONITORED]) - elif current_state in [HOST_STATES.DISABLED, HOST_STATES.OFFLINE]: - if one.host.status(host.ID, HOST_STATUS.ENABLED): - self.wait_for_host_state(host, [HOST_STATES.MONITORED]) - result['changed'] = True - else: - self.fail(msg="could not enable host") - elif current_state in [HOST_STATES.MONITORED]: - pass - else: - self.fail(msg="unknown host state %s, cowardly refusing to change state to enable" % current_state_name) - - elif desired_state == 'disabled': - if current_state == HOST_ABSENT: - self.fail(msg='absent host cannot be put in disabled state') - elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]: - if one.host.status(host.ID, HOST_STATUS.DISABLED): - self.wait_for_host_state(host, [HOST_STATES.DISABLED]) - result['changed'] = True - else: - self.fail(msg="could not disable host") - elif current_state in [HOST_STATES.DISABLED]: - pass - else: - self.fail(msg="unknown host state %s, cowardly refusing to change state to disable" % current_state_name) - - elif desired_state == 'offline': - if current_state == HOST_ABSENT: - self.fail(msg='absent host cannot be placed in offline state') - elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]: - if one.host.status(host.ID, HOST_STATUS.OFFLINE): - self.wait_for_host_state(host, [HOST_STATES.OFFLINE]) - result['changed'] = True - else: - self.fail(msg="could not set host offline") - elif current_state in [HOST_STATES.OFFLINE]: - pass - else: - self.fail(msg="unknown host state %s, cowardly refusing to change state to offline" % current_state_name) - - elif desired_state == 'absent': - if current_state != HOST_ABSENT: - if one.host.delete(host.ID): - result['changed'] = True - else: - self.fail(msg="could not delete host from cluster") - - # if we reach this point we can assume that the host was taken to the desired state - - if desired_state != "absent": - # manipulate or modify the template - desired_template_changes = self.get_parameter('template') - - if desired_template_changes is None: - desired_template_changes = dict() - - # complete the template with specific ansible parameters - if self.is_parameter('labels'): - desired_template_changes['LABELS'] = self.get_parameter('labels') - - if self.requires_template_update(host.TEMPLATE, desired_template_changes): - # setup the root element so that pyone will generate XML instead of attribute vector - desired_template_changes = {"TEMPLATE": desired_template_changes} - if one.host.update(host.ID, desired_template_changes, 1): # merge the template - result['changed'] = True - else: - self.fail(msg="failed to update the host template") - - # the cluster - if host.CLUSTER_ID != self.get_parameter('cluster_id'): - if one.cluster.addhost(self.get_parameter('cluster_id'), host.ID): - result['changed'] = True - else: - self.fail(msg="failed to update the host cluster") - - # return - self.exit() - - -def main(): - HostModule().run_module() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/opennebula/one_image.py b/plugins/modules/cloud/opennebula/one_image.py deleted file mode 100644 index 5a80306fd1..0000000000 --- a/plugins/modules/cloud/opennebula/one_image.py +++ /dev/null @@ -1,423 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -(c) 2018, Milan Ilic - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a clone of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: one_image -short_description: Manages OpenNebula images -description: - - Manages OpenNebula images -requirements: - - pyone -options: - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - - transferred over the network unencrypted. - - If not set then the value of the C(ONE_URL) environment variable is used. - type: str - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - - then the value of the C(ONE_USERNAME) environment variable is used. - type: str - api_password: - description: - - Password of the user to login into OpenNebula RPC server. If not set - - then the value of the C(ONE_PASSWORD) environment variable is used. - type: str - id: - description: - - A C(id) of the image you would like to manage. - type: int - name: - description: - - A C(name) of the image you would like to manage. - type: str - state: - description: - - C(present) - state that is used to manage the image - - C(absent) - delete the image - - C(cloned) - clone the image - - C(renamed) - rename the image to the C(new_name) - choices: ["present", "absent", "cloned", "renamed"] - default: present - type: str - enabled: - description: - - Whether the image should be enabled or disabled. - type: bool - new_name: - description: - - A name that will be assigned to the existing or new image. - - In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'. - type: str -author: - - "Milan Ilic (@ilicmilan)" -''' - -EXAMPLES = ''' -- name: Fetch the IMAGE by id - community.general.one_image: - id: 45 - register: result - -- name: Print the IMAGE properties - ansible.builtin.debug: - var: result - -- name: Rename existing IMAGE - community.general.one_image: - id: 34 - state: renamed - new_name: bar-image - -- name: Disable the IMAGE by id - community.general.one_image: - id: 37 - enabled: no - -- name: Enable the IMAGE by name - community.general.one_image: - name: bar-image - enabled: yes - -- name: Clone the IMAGE by name - community.general.one_image: - name: bar-image - state: cloned - new_name: bar-image-clone - register: result - -- name: Delete the IMAGE by id - community.general.one_image: - id: '{{ result.id }}' - state: absent -''' - -RETURN = ''' -id: - description: image id - type: int - returned: success - sample: 153 -name: - description: image name - type: str - returned: success - sample: app1 -group_id: - description: image's group id - type: int - returned: success - sample: 1 -group_name: - description: image's group name - type: str - returned: success - sample: one-users -owner_id: - description: image's owner id - type: int - returned: success - sample: 143 -owner_name: - description: image's owner name - type: str - returned: success - sample: ansible-test -state: - description: state of image instance - type: str - returned: success - sample: READY -used: - description: is image in use - type: bool - returned: success - sample: true -running_vms: - description: count of running vms that use this image - type: int - returned: success - sample: 7 -''' - -try: - import pyone - HAS_PYONE = True -except ImportError: - HAS_PYONE = False - -from ansible.module_utils.basic import AnsibleModule -import os - - -def get_image(module, client, predicate): - # Filter -2 means fetch all images user can Use - pool = client.imagepool.info(-2, -1, -1, -1) - - for image in pool.IMAGE: - if predicate(image): - return image - - return None - - -def get_image_by_name(module, client, image_name): - return get_image(module, client, lambda image: (image.NAME == image_name)) - - -def get_image_by_id(module, client, image_id): - return get_image(module, client, lambda image: (image.ID == image_id)) - - -def get_image_instance(module, client, requested_id, requested_name): - if requested_id: - return get_image_by_id(module, client, requested_id) - else: - return get_image_by_name(module, client, requested_name) - - -IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] - - -def get_image_info(image): - info = { - 'id': image.ID, - 'name': image.NAME, - 'state': IMAGE_STATES[image.STATE], - 'running_vms': image.RUNNING_VMS, - 'used': bool(image.RUNNING_VMS), - 'user_name': image.UNAME, - 'user_id': image.UID, - 'group_name': image.GNAME, - 'group_id': image.GID, - } - - return info - - -def wait_for_state(module, client, image_id, wait_timeout, state_predicate): - import time - start_time = time.time() - - while (time.time() - start_time) < wait_timeout: - image = client.image.info(image_id) - state = image.STATE - - if state_predicate(state): - return image - - time.sleep(1) - - module.fail_json(msg="Wait timeout has expired!") - - -def wait_for_ready(module, client, image_id, wait_timeout=60): - return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')])) - - -def wait_for_delete(module, client, image_id, wait_timeout=60): - return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')])) - - -def enable_image(module, client, image, enable): - image = client.image.info(image.ID) - changed = False - - state = image.STATE - - if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: - if enable: - module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!") - else: - module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!") - - if ((enable and state != IMAGE_STATES.index('READY')) or - (not enable and state != IMAGE_STATES.index('DISABLED'))): - changed = True - - if changed and not module.check_mode: - client.image.enable(image.ID, enable) - - result = get_image_info(image) - result['changed'] = changed - - return result - - -def clone_image(module, client, image, new_name): - if new_name is None: - new_name = "Copy of " + image.NAME - - tmp_image = get_image_by_name(module, client, new_name) - if tmp_image: - result = get_image_info(tmp_image) - result['changed'] = False - return result - - if image.STATE == IMAGE_STATES.index('DISABLED'): - module.fail_json(msg="Cannot clone DISABLED image") - - if not module.check_mode: - new_id = client.image.clone(image.ID, new_name) - wait_for_ready(module, client, new_id) - image = client.image.info(new_id) - - result = get_image_info(image) - result['changed'] = True - - return result - - -def rename_image(module, client, image, new_name): - if new_name is None: - module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'") - - if new_name == image.NAME: - result = get_image_info(image) - result['changed'] = False - return result - - tmp_image = get_image_by_name(module, client, new_name) - if tmp_image: - module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.ID)) - - if not module.check_mode: - client.image.rename(image.ID, new_name) - - result = get_image_info(image) - result['changed'] = True - return result - - -def delete_image(module, client, image): - - if not image: - return {'changed': False} - - if image.RUNNING_VMS > 0: - module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.") - - if not module.check_mode: - client.image.delete(image.ID) - wait_for_delete(module, client, image.ID) - - return {'changed': True} - - -def get_connection_info(module): - - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') - - if not url: - url = os.environ.get('ONE_URL') - - if not username: - username = os.environ.get('ONE_USERNAME') - - if not password: - password = os.environ.get('ONE_PASSWORD') - - if not(url and username and password): - module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") - from collections import namedtuple - - auth_params = namedtuple('auth', ('url', 'username', 'password')) - - return auth_params(url=url, username=username, password=password) - - -def main(): - fields = { - "api_url": {"required": False, "type": "str"}, - "api_username": {"required": False, "type": "str"}, - "api_password": {"required": False, "type": "str", "no_log": True}, - "id": {"required": False, "type": "int"}, - "name": {"required": False, "type": "str"}, - "state": { - "default": "present", - "choices": ['present', 'absent', 'cloned', 'renamed'], - "type": "str" - }, - "enabled": {"required": False, "type": "bool"}, - "new_name": {"required": False, "type": "str"}, - } - - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[['id', 'name']], - supports_check_mode=True) - - if not HAS_PYONE: - module.fail_json(msg='This module requires pyone to work!') - - auth = get_connection_info(module) - params = module.params - id = params.get('id') - name = params.get('name') - state = params.get('state') - enabled = params.get('enabled') - new_name = params.get('new_name') - client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) - - result = {} - - if not id and state == 'renamed': - module.fail_json(msg="Option 'id' is required when the state is 'renamed'") - - image = get_image_instance(module, client, id, name) - if not image and state != 'absent': - if id: - module.fail_json(msg="There is no image with id=" + str(id)) - else: - module.fail_json(msg="There is no image with name=" + name) - - if state == 'absent': - result = delete_image(module, client, image) - else: - result = get_image_info(image) - changed = False - result['changed'] = False - - if enabled is not None: - result = enable_image(module, client, image, enabled) - if state == "cloned": - result = clone_image(module, client, image, new_name) - elif state == "renamed": - result = rename_image(module, client, image, new_name) - - changed = changed or result['changed'] - result['changed'] = changed - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/opennebula/one_image_info.py b/plugins/modules/cloud/opennebula/one_image_info.py deleted file mode 100644 index e03b8ad724..0000000000 --- a/plugins/modules/cloud/opennebula/one_image_info.py +++ /dev/null @@ -1,289 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -(c) 2018, Milan Ilic - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a clone of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: one_image_info -short_description: Gather information on OpenNebula images -description: - - Gather information on OpenNebula images. - - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change. -requirements: - - pyone -options: - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - - transferred over the network unencrypted. - - If not set then the value of the C(ONE_URL) environment variable is used. - type: str - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - - then the value of the C(ONE_USERNAME) environment variable is used. - type: str - api_password: - description: - - Password of the user to login into OpenNebula RPC server. If not set - - then the value of the C(ONE_PASSWORD) environment variable is used. - type: str - ids: - description: - - A list of images ids whose facts you want to gather. - aliases: ['id'] - type: list - elements: str - name: - description: - - A C(name) of the image whose facts will be gathered. - - If the C(name) begins with '~' the C(name) will be used as regex pattern - - which restricts the list of images (whose facts will be returned) whose names match specified regex. - - Also, if the C(name) begins with '~*' case-insensitive matching will be performed. - - See examples for more details. - type: str -author: - - "Milan Ilic (@ilicmilan)" - - "Jan Meerkamp (@meerkampdvv)" -''' - -EXAMPLES = ''' -- name: Gather facts about all images - community.general.one_image_info: - register: result - -- name: Print all images facts - ansible.builtin.debug: - msg: result - -- name: Gather facts about an image using ID - community.general.one_image_info: - ids: - - 123 - -- name: Gather facts about an image using the name - community.general.one_image_info: - name: 'foo-image' - register: foo_image - -- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*' - community.general.one_image_info: - name: '~app-image-.*' - register: app_images - -- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases - community.general.one_image_info: - name: '~*foo-image-.*' - register: foo_images -''' - -RETURN = ''' -images: - description: A list of images info - type: complex - returned: success - contains: - id: - description: image id - type: int - sample: 153 - name: - description: image name - type: str - sample: app1 - group_id: - description: image's group id - type: int - sample: 1 - group_name: - description: image's group name - type: str - sample: one-users - owner_id: - description: image's owner id - type: int - sample: 143 - owner_name: - description: image's owner name - type: str - sample: ansible-test - state: - description: state of image instance - type: str - sample: READY - used: - description: is image in use - type: bool - sample: true - running_vms: - description: count of running vms that use this image - type: int - sample: 7 -''' - -try: - import pyone - HAS_PYONE = True -except ImportError: - HAS_PYONE = False - -from ansible.module_utils.basic import AnsibleModule -import os - - -def get_all_images(client): - pool = client.imagepool.info(-2, -1, -1, -1) - # Filter -2 means fetch all images user can Use - - return pool - - -IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] - - -def get_image_info(image): - info = { - 'id': image.ID, - 'name': image.NAME, - 'state': IMAGE_STATES[image.STATE], - 'running_vms': image.RUNNING_VMS, - 'used': bool(image.RUNNING_VMS), - 'user_name': image.UNAME, - 'user_id': image.UID, - 'group_name': image.GNAME, - 'group_id': image.GID, - } - return info - - -def get_images_by_ids(module, client, ids): - images = [] - pool = get_all_images(client) - - for image in pool.IMAGE: - if str(image.ID) in ids: - images.append(image) - ids.remove(str(image.ID)) - if len(ids) == 0: - break - - if len(ids) > 0: - module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids)) - - return images - - -def get_images_by_name(module, client, name_pattern): - - images = [] - pattern = None - - pool = get_all_images(client) - - if name_pattern.startswith('~'): - import re - if name_pattern[1] == '*': - pattern = re.compile(name_pattern[2:], re.IGNORECASE) - else: - pattern = re.compile(name_pattern[1:]) - - for image in pool.IMAGE: - if pattern is not None: - if pattern.match(image.NAME): - images.append(image) - elif name_pattern == image.NAME: - images.append(image) - break - - # if the specific name is indicated - if pattern is None and len(images) == 0: - module.fail_json(msg="There is no IMAGE with name=" + name_pattern) - - return images - - -def get_connection_info(module): - - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') - - if not url: - url = os.environ.get('ONE_URL') - - if not username: - username = os.environ.get('ONE_USERNAME') - - if not password: - password = os.environ.get('ONE_PASSWORD') - - if not(url and username and password): - module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") - from collections import namedtuple - - auth_params = namedtuple('auth', ('url', 'username', 'password')) - - return auth_params(url=url, username=username, password=password) - - -def main(): - fields = { - "api_url": {"required": False, "type": "str"}, - "api_username": {"required": False, "type": "str"}, - "api_password": {"required": False, "type": "str", "no_log": True}, - "ids": {"required": False, "aliases": ['id'], "type": "list", "elements": "str"}, - "name": {"required": False, "type": "str"}, - } - - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[['ids', 'name']], - supports_check_mode=True) - - if not HAS_PYONE: - module.fail_json(msg='This module requires pyone to work!') - - auth = get_connection_info(module) - params = module.params - ids = params.get('ids') - name = params.get('name') - client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) - - if ids: - images = get_images_by_ids(module, client, ids) - elif name: - images = get_images_by_name(module, client, name) - else: - images = get_all_images(client).IMAGE - - result = { - 'images': [get_image_info(image) for image in images], - } - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/opennebula/one_service.py b/plugins/modules/cloud/opennebula/one_service.py deleted file mode 100644 index 68f8398f36..0000000000 --- a/plugins/modules/cloud/opennebula/one_service.py +++ /dev/null @@ -1,768 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -(c) 2017, Milan Ilic - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: one_service -short_description: Deploy and manage OpenNebula services -description: - - Manage OpenNebula services -options: - api_url: - description: - - URL of the OpenNebula OneFlow API server. - - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted. - - If not set then the value of the ONEFLOW_URL environment variable is used. - type: str - api_username: - description: - - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_USERNAME) environment variable is used. - type: str - api_password: - description: - - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_PASSWORD) environment variable is used. - type: str - template_name: - description: - - Name of service template to use to create a new instance of a service - type: str - template_id: - description: - - ID of a service template to use to create a new instance of a service - type: int - service_id: - description: - - ID of a service instance that you would like to manage - type: int - service_name: - description: - - Name of a service instance that you would like to manage - type: str - unique: - description: - - Setting C(unique=yes) will make sure that there is only one service instance running with a name set with C(service_name) when - - instantiating a service from a template specified with C(template_id)/C(template_name). Check examples below. - type: bool - default: no - state: - description: - - C(present) - instantiate a service from a template specified with C(template_id)/C(template_name). - - C(absent) - terminate an instance of a service specified with C(service_id)/C(service_name). - choices: ["present", "absent"] - default: present - type: str - mode: - description: - - Set permission mode of a service instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others. - type: str - owner_id: - description: - - ID of the user which will be set as the owner of the service - type: int - group_id: - description: - - ID of the group which will be set as the group of the service - type: int - wait: - description: - - Wait for the instance to reach RUNNING state after DEPLOYING or COOLDOWN state after SCALING - type: bool - default: no - wait_timeout: - description: - - How long before wait gives up, in seconds - default: 300 - type: int - custom_attrs: - description: - - Dictionary of key/value custom attributes which will be used when instantiating a new service. - default: {} - type: dict - role: - description: - - Name of the role whose cardinality should be changed - type: str - cardinality: - description: - - Number of VMs for the specified role - type: int - force: - description: - - Force the new cardinality even if it is outside the limits - type: bool - default: no -author: - - "Milan Ilic (@ilicmilan)" -''' - -EXAMPLES = ''' -- name: Instantiate a new service - community.general.one_service: - template_id: 90 - register: result - -- name: Print service properties - ansible.builtin.debug: - msg: result - -- name: Instantiate a new service with specified service_name, service group and mode - community.general.one_service: - template_name: 'app1_template' - service_name: 'app1' - group_id: 1 - mode: '660' - -- name: Instantiate a new service with template_id and pass custom_attrs dict - community.general.one_service: - template_id: 90 - custom_attrs: - public_network_id: 21 - private_network_id: 26 - -- name: Instantiate a new service 'foo' if the service doesn't already exist, otherwise do nothing - community.general.one_service: - template_id: 53 - service_name: 'foo' - unique: yes - -- name: Delete a service by ID - community.general.one_service: - service_id: 153 - state: absent - -- name: Get service info - community.general.one_service: - service_id: 153 - register: service_info - -- name: Change service owner, group and mode - community.general.one_service: - service_name: 'app2' - owner_id: 34 - group_id: 113 - mode: '600' - -- name: Instantiate service and wait for it to become RUNNING - community.general.one_service: - template_id: 43 - service_name: 'foo1' - -- name: Wait service to become RUNNING - community.general.one_service: - service_id: 112 - wait: yes - -- name: Change role cardinality - community.general.one_service: - service_id: 153 - role: bar - cardinality: 5 - -- name: Change role cardinality and wait for it to be applied - community.general.one_service: - service_id: 112 - role: foo - cardinality: 7 - wait: yes -''' - -RETURN = ''' -service_id: - description: service id - type: int - returned: success - sample: 153 -service_name: - description: service name - type: str - returned: success - sample: app1 -group_id: - description: service's group id - type: int - returned: success - sample: 1 -group_name: - description: service's group name - type: str - returned: success - sample: one-users -owner_id: - description: service's owner id - type: int - returned: success - sample: 143 -owner_name: - description: service's owner name - type: str - returned: success - sample: ansible-test -state: - description: state of service instance - type: str - returned: success - sample: RUNNING -mode: - description: service's mode - type: int - returned: success - sample: 660 -roles: - description: list of dictionaries of roles, each role is described by name, cardinality, state and nodes ids - type: list - returned: success - sample: '[{"cardinality": 1,"name": "foo","state": "RUNNING","ids": [ 123, 456 ]}, - {"cardinality": 2,"name": "bar","state": "RUNNING", "ids": [ 452, 567, 746 ]}]' -''' - -import os -import sys -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import open_url - -STATES = ("PENDING", "DEPLOYING", "RUNNING", "UNDEPLOYING", "WARNING", "DONE", - "FAILED_UNDEPLOYING", "FAILED_DEPLOYING", "SCALING", "FAILED_SCALING", "COOLDOWN") - - -def get_all_templates(module, auth): - try: - all_templates = open_url(url=(auth.url + "/service_template"), method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password) - except Exception as e: - module.fail_json(msg=str(e)) - - return module.from_json(all_templates.read()) - - -def get_template(module, auth, pred): - all_templates_dict = get_all_templates(module, auth) - - found = 0 - found_template = None - template_name = '' - - if "DOCUMENT_POOL" in all_templates_dict and "DOCUMENT" in all_templates_dict["DOCUMENT_POOL"]: - for template in all_templates_dict["DOCUMENT_POOL"]["DOCUMENT"]: - if pred(template): - found = found + 1 - found_template = template - template_name = template["NAME"] - - if found <= 0: - return None - elif found > 1: - module.fail_json(msg="There is no template with unique name: " + template_name) - else: - return found_template - - -def get_all_services(module, auth): - try: - response = open_url(auth.url + "/service", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password) - except Exception as e: - module.fail_json(msg=str(e)) - - return module.from_json(response.read()) - - -def get_service(module, auth, pred): - all_services_dict = get_all_services(module, auth) - - found = 0 - found_service = None - service_name = '' - - if "DOCUMENT_POOL" in all_services_dict and "DOCUMENT" in all_services_dict["DOCUMENT_POOL"]: - for service in all_services_dict["DOCUMENT_POOL"]["DOCUMENT"]: - if pred(service): - found = found + 1 - found_service = service - service_name = service["NAME"] - - # fail if there are more services with same name - if found > 1: - module.fail_json(msg="There are multiple services with a name: '" + - service_name + "'. You have to use a unique service name or use 'service_id' instead.") - elif found <= 0: - return None - else: - return found_service - - -def get_service_by_id(module, auth, service_id): - return get_service(module, auth, lambda service: (int(service["ID"]) == int(service_id))) if service_id else None - - -def get_service_by_name(module, auth, service_name): - return get_service(module, auth, lambda service: (service["NAME"] == service_name)) - - -def get_service_info(module, auth, service): - - result = { - "service_id": int(service["ID"]), - "service_name": service["NAME"], - "group_id": int(service["GID"]), - "group_name": service["GNAME"], - "owner_id": int(service["UID"]), - "owner_name": service["UNAME"], - "state": STATES[service["TEMPLATE"]["BODY"]["state"]] - } - - roles_status = service["TEMPLATE"]["BODY"]["roles"] - roles = [] - for role in roles_status: - nodes_ids = [] - if "nodes" in role: - for node in role["nodes"]: - nodes_ids.append(node["deploy_id"]) - roles.append({"name": role["name"], "cardinality": role["cardinality"], "state": STATES[int(role["state"])], "ids": nodes_ids}) - - result["roles"] = roles - result["mode"] = int(parse_service_permissions(service)) - - return result - - -def create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout): - # make sure that the values in custom_attrs dict are strings - custom_attrs_with_str = dict((k, str(v)) for k, v in custom_attrs.items()) - - data = { - "action": { - "perform": "instantiate", - "params": { - "merge_template": { - "custom_attrs_values": custom_attrs_with_str, - "name": service_name - } - } - } - } - - try: - response = open_url(auth.url + "/service_template/" + str(template_id) + "/action", method="POST", - data=module.jsonify(data), force_basic_auth=True, url_username=auth.user, url_password=auth.password) - except Exception as e: - module.fail_json(msg=str(e)) - - service_result = module.from_json(response.read())["DOCUMENT"] - - return service_result - - -def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout): - import time - start_time = time.time() - - while (time.time() - start_time) < wait_timeout: - try: - status_result = open_url(auth.url + "/service/" + str(service_id), method="GET", - force_basic_auth=True, url_username=auth.user, url_password=auth.password) - except Exception as e: - module.fail_json(msg="Request for service status has failed. Error message: " + str(e)) - - status_result = module.from_json(status_result.read()) - service_state = status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["state"] - - if service_state in [STATES.index("RUNNING"), STATES.index("COOLDOWN")]: - return status_result["DOCUMENT"] - elif service_state not in [STATES.index("PENDING"), STATES.index("DEPLOYING"), STATES.index("SCALING")]: - log_message = '' - for log_info in status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["log"]: - if log_info["severity"] == "E": - log_message = log_message + log_info["message"] - break - - module.fail_json(msg="Deploying is unsuccessful. Service state: " + STATES[service_state] + ". Error message: " + log_message) - - time.sleep(1) - - module.fail_json(msg="Wait timeout has expired") - - -def change_service_permissions(module, auth, service_id, permissions): - - data = { - "action": { - "perform": "chmod", - "params": {"octet": permissions} - } - } - - try: - status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, - url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) - except Exception as e: - module.fail_json(msg=str(e)) - - -def change_service_owner(module, auth, service_id, owner_id): - data = { - "action": { - "perform": "chown", - "params": {"owner_id": owner_id} - } - } - - try: - status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, - url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) - except Exception as e: - module.fail_json(msg=str(e)) - - -def change_service_group(module, auth, service_id, group_id): - - data = { - "action": { - "perform": "chgrp", - "params": {"group_id": group_id} - } - } - - try: - status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, - url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) - except Exception as e: - module.fail_json(msg=str(e)) - - -def change_role_cardinality(module, auth, service_id, role, cardinality, force): - - data = { - "cardinality": cardinality, - "force": force - } - - try: - status_result = open_url(auth.url + "/service/" + str(service_id) + "/role/" + role, method="PUT", - force_basic_auth=True, url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) - except Exception as e: - module.fail_json(msg=str(e)) - - if status_result.getcode() != 204: - module.fail_json(msg="Failed to change cardinality for role: " + role + ". Return code: " + str(status_result.getcode())) - - -def check_change_service_owner(module, service, owner_id): - old_owner_id = int(service["UID"]) - - return old_owner_id != owner_id - - -def check_change_service_group(module, service, group_id): - old_group_id = int(service["GID"]) - - return old_group_id != group_id - - -def parse_service_permissions(service): - perm_dict = service["PERMISSIONS"] - ''' - This is the structure of the 'PERMISSIONS' dictionary: - - "PERMISSIONS": { - "OWNER_U": "1", - "OWNER_M": "1", - "OWNER_A": "0", - "GROUP_U": "0", - "GROUP_M": "0", - "GROUP_A": "0", - "OTHER_U": "0", - "OTHER_M": "0", - "OTHER_A": "0" - } - ''' - - owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"]) - group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"]) - other_octal = int(perm_dict["OTHER_U"]) * 4 + int(perm_dict["OTHER_M"]) * 2 + int(perm_dict["OTHER_A"]) - - permissions = str(owner_octal) + str(group_octal) + str(other_octal) - - return permissions - - -def check_change_service_permissions(module, service, permissions): - old_permissions = parse_service_permissions(service) - - return old_permissions != permissions - - -def check_change_role_cardinality(module, service, role_name, cardinality): - roles_list = service["TEMPLATE"]["BODY"]["roles"] - - for role in roles_list: - if role["name"] == role_name: - return int(role["cardinality"]) != cardinality - - module.fail_json(msg="There is no role with name: " + role_name) - - -def create_service_and_operation(module, auth, template_id, service_name, owner_id, group_id, permissions, custom_attrs, unique, wait, wait_timeout): - if not service_name: - service_name = '' - changed = False - service = None - - if unique: - service = get_service_by_name(module, auth, service_name) - - if not service: - if not module.check_mode: - service = create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout) - changed = True - - # if check_mode=true and there would be changes, service doesn't exist and we can not get it - if module.check_mode and changed: - return {"changed": True} - - result = service_operation(module, auth, owner_id=owner_id, group_id=group_id, wait=wait, - wait_timeout=wait_timeout, permissions=permissions, service=service) - - if result["changed"]: - changed = True - - result["changed"] = changed - - return result - - -def service_operation(module, auth, service_id=None, owner_id=None, group_id=None, permissions=None, - role=None, cardinality=None, force=None, wait=False, wait_timeout=None, service=None): - - changed = False - - if not service: - service = get_service_by_id(module, auth, service_id) - else: - service_id = service["ID"] - - if not service: - module.fail_json(msg="There is no service with id: " + str(service_id)) - - if owner_id: - if check_change_service_owner(module, service, owner_id): - if not module.check_mode: - change_service_owner(module, auth, service_id, owner_id) - changed = True - if group_id: - if check_change_service_group(module, service, group_id): - if not module.check_mode: - change_service_group(module, auth, service_id, group_id) - changed = True - if permissions: - if check_change_service_permissions(module, service, permissions): - if not module.check_mode: - change_service_permissions(module, auth, service_id, permissions) - changed = True - - if role: - if check_change_role_cardinality(module, service, role, cardinality): - if not module.check_mode: - change_role_cardinality(module, auth, service_id, role, cardinality, force) - changed = True - - if wait and not module.check_mode: - service = wait_for_service_to_become_ready(module, auth, service_id, wait_timeout) - - # if something has changed, fetch service info again - if changed: - service = get_service_by_id(module, auth, service_id) - - service_info = get_service_info(module, auth, service) - service_info["changed"] = changed - - return service_info - - -def delete_service(module, auth, service_id): - service = get_service_by_id(module, auth, service_id) - if not service: - return {"changed": False} - - service_info = get_service_info(module, auth, service) - - service_info["changed"] = True - - if module.check_mode: - return service_info - - try: - result = open_url(auth.url + '/service/' + str(service_id), method="DELETE", force_basic_auth=True, url_username=auth.user, url_password=auth.password) - except Exception as e: - module.fail_json(msg="Service deletion has failed. Error message: " + str(e)) - - return service_info - - -def get_template_by_name(module, auth, template_name): - return get_template(module, auth, lambda template: (template["NAME"] == template_name)) - - -def get_template_by_id(module, auth, template_id): - return get_template(module, auth, lambda template: (int(template["ID"]) == int(template_id))) if template_id else None - - -def get_template_id(module, auth, requested_id, requested_name): - template = get_template_by_id(module, auth, requested_id) if requested_id else get_template_by_name(module, auth, requested_name) - - if template: - return template["ID"] - - return None - - -def get_service_id_by_name(module, auth, service_name): - service = get_service_by_name(module, auth, service_name) - - if service: - return service["ID"] - - return None - - -def get_connection_info(module): - - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') - - if not url: - url = os.environ.get('ONEFLOW_URL') - - if not username: - username = os.environ.get('ONEFLOW_USERNAME') - - if not password: - password = os.environ.get('ONEFLOW_PASSWORD') - - if not(url and username and password): - module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") - from collections import namedtuple - - auth_params = namedtuple('auth', ('url', 'user', 'password')) - - return auth_params(url=url, user=username, password=password) - - -def main(): - fields = { - "api_url": {"required": False, "type": "str"}, - "api_username": {"required": False, "type": "str"}, - "api_password": {"required": False, "type": "str", "no_log": True}, - "service_name": {"required": False, "type": "str"}, - "service_id": {"required": False, "type": "int"}, - "template_name": {"required": False, "type": "str"}, - "template_id": {"required": False, "type": "int"}, - "state": { - "default": "present", - "choices": ['present', 'absent'], - "type": "str" - }, - "mode": {"required": False, "type": "str"}, - "owner_id": {"required": False, "type": "int"}, - "group_id": {"required": False, "type": "int"}, - "unique": {"default": False, "type": "bool"}, - "wait": {"default": False, "type": "bool"}, - "wait_timeout": {"default": 300, "type": "int"}, - "custom_attrs": {"default": {}, "type": "dict"}, - "role": {"required": False, "type": "str"}, - "cardinality": {"required": False, "type": "int"}, - "force": {"default": False, "type": "bool"} - } - - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[ - ['template_id', 'template_name', 'service_id'], - ['service_id', 'service_name'], - ['template_id', 'template_name', 'role'], - ['template_id', 'template_name', 'cardinality'], - ['service_id', 'custom_attrs'] - ], - required_together=[['role', 'cardinality']], - supports_check_mode=True) - - auth = get_connection_info(module) - params = module.params - service_name = params.get('service_name') - service_id = params.get('service_id') - - requested_template_id = params.get('template_id') - requested_template_name = params.get('template_name') - state = params.get('state') - permissions = params.get('mode') - owner_id = params.get('owner_id') - group_id = params.get('group_id') - unique = params.get('unique') - wait = params.get('wait') - wait_timeout = params.get('wait_timeout') - custom_attrs = params.get('custom_attrs') - role = params.get('role') - cardinality = params.get('cardinality') - force = params.get('force') - - template_id = None - - if requested_template_id or requested_template_name: - template_id = get_template_id(module, auth, requested_template_id, requested_template_name) - if not template_id: - if requested_template_id: - module.fail_json(msg="There is no template with template_id: " + str(requested_template_id)) - elif requested_template_name: - module.fail_json(msg="There is no template with name: " + requested_template_name) - - if unique and not service_name: - module.fail_json(msg="You cannot use unique without passing service_name!") - - if template_id and state == 'absent': - module.fail_json(msg="State absent is not valid for template") - - if template_id and state == 'present': # Instantiate a service - result = create_service_and_operation(module, auth, template_id, service_name, owner_id, - group_id, permissions, custom_attrs, unique, wait, wait_timeout) - else: - if not (service_id or service_name): - module.fail_json(msg="To manage the service at least the service id or service name should be specified!") - if custom_attrs: - module.fail_json(msg="You can only set custom_attrs when instantiate service!") - - if not service_id: - service_id = get_service_id_by_name(module, auth, service_name) - # The task should be failed when we want to manage a non-existent service identified by its name - if not service_id and state == 'present': - module.fail_json(msg="There is no service with name: " + service_name) - - if state == 'absent': - result = delete_service(module, auth, service_id) - else: - result = service_operation(module, auth, service_id, owner_id, group_id, permissions, role, cardinality, force, wait, wait_timeout) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/opennebula/one_template.py b/plugins/modules/cloud/opennebula/one_template.py deleted file mode 100644 index b1d2c69ccf..0000000000 --- a/plugins/modules/cloud/opennebula/one_template.py +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2021, Georg Gadinger -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: one_template - -short_description: Manages OpenNebula templates - -version_added: 2.4.0 - -requirements: - - pyone - -description: - - "Manages OpenNebula templates." - -options: - id: - description: - - A I(id) of the template you would like to manage. If not set then a - - new template will be created with the given I(name). - type: int - name: - description: - - A I(name) of the template you would like to manage. If a template with - - the given name does not exist it will be created, otherwise it will be - - managed by this module. - type: str - template: - description: - - A string containing the template contents. - type: str - state: - description: - - C(present) - state that is used to manage the template. - - C(absent) - delete the template. - choices: ["present", "absent"] - default: present - type: str - -notes: - - Supports C(check_mode). Note that check mode always returns C(changed=true) for existing templates, even if the template would not actually change. - -extends_documentation_fragment: - - community.general.opennebula - -author: - - "Georg Gadinger (@nilsding)" -''' - -EXAMPLES = ''' -- name: Fetch the TEMPLATE by id - community.general.one_template: - id: 6459 - register: result - -- name: Print the TEMPLATE properties - ansible.builtin.debug: - var: result - -- name: Fetch the TEMPLATE by name - community.general.one_template: - name: tf-prd-users-workerredis-p6379a - register: result - -- name: Create a new or update an existing TEMPLATE - community.general.one_template: - name: generic-opensuse - template: | - CONTEXT = [ - HOSTNAME = "generic-opensuse" - ] - CPU = "1" - CUSTOM_ATTRIBUTE = "" - DISK = [ - CACHE = "writeback", - DEV_PREFIX = "sd", - DISCARD = "unmap", - IMAGE = "opensuse-leap-15.2", - IMAGE_UNAME = "oneadmin", - IO = "threads", - SIZE = "" ] - MEMORY = "2048" - NIC = [ - MODEL = "virtio", - NETWORK = "testnet", - NETWORK_UNAME = "oneadmin" ] - OS = [ - ARCH = "x86_64", - BOOT = "disk0" ] - SCHED_REQUIREMENTS = "CLUSTER_ID=\\"100\\"" - VCPU = "2" - -- name: Delete the TEMPLATE by id - community.general.one_template: - id: 6459 - state: absent -''' - -RETURN = ''' -id: - description: template id - type: int - returned: when I(state=present) - sample: 153 -name: - description: template name - type: str - returned: when I(state=present) - sample: app1 -template: - description: the parsed template - type: dict - returned: when I(state=present) -group_id: - description: template's group id - type: int - returned: when I(state=present) - sample: 1 -group_name: - description: template's group name - type: str - returned: when I(state=present) - sample: one-users -owner_id: - description: template's owner id - type: int - returned: when I(state=present) - sample: 143 -owner_name: - description: template's owner name - type: str - returned: when I(state=present) - sample: ansible-test -''' - - -from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule - - -class TemplateModule(OpenNebulaModule): - def __init__(self): - argument_spec = dict( - id=dict(type='int', required=False), - name=dict(type='str', required=False), - state=dict(type='str', choices=['present', 'absent'], default='present'), - template=dict(type='str', required=False), - ) - - mutually_exclusive = [ - ['id', 'name'] - ] - - required_one_of = [('id', 'name')] - - required_if = [ - ['state', 'present', ['template']] - ] - - OpenNebulaModule.__init__(self, - argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - required_one_of=required_one_of, - required_if=required_if) - - def run(self, one, module, result): - params = module.params - id = params.get('id') - name = params.get('name') - desired_state = params.get('state') - template_data = params.get('template') - - self.result = {} - - template = self.get_template_instance(id, name) - needs_creation = False - if not template and desired_state != 'absent': - if id: - module.fail_json(msg="There is no template with id=" + str(id)) - else: - needs_creation = True - - if desired_state == 'absent': - self.result = self.delete_template(template) - else: - if needs_creation: - self.result = self.create_template(name, template_data) - else: - self.result = self.update_template(template, template_data) - - self.exit() - - def get_template(self, predicate): - # -3 means "Resources belonging to the user" - # the other two parameters are used for pagination, -1 for both essentially means "return all" - pool = self.one.templatepool.info(-3, -1, -1) - - for template in pool.VMTEMPLATE: - if predicate(template): - return template - - return None - - def get_template_by_id(self, template_id): - return self.get_template(lambda template: (template.ID == template_id)) - - def get_template_by_name(self, name): - return self.get_template(lambda template: (template.NAME == name)) - - def get_template_instance(self, requested_id, requested_name): - if requested_id: - return self.get_template_by_id(requested_id) - else: - return self.get_template_by_name(requested_name) - - def get_template_info(self, template): - info = { - 'id': template.ID, - 'name': template.NAME, - 'template': template.TEMPLATE, - 'user_name': template.UNAME, - 'user_id': template.UID, - 'group_name': template.GNAME, - 'group_id': template.GID, - } - - return info - - def create_template(self, name, template_data): - if not self.module.check_mode: - self.one.template.allocate("NAME = \"" + name + "\"\n" + template_data) - - result = self.get_template_info(self.get_template_by_name(name)) - result['changed'] = True - - return result - - def update_template(self, template, template_data): - if not self.module.check_mode: - # 0 = replace the whole template - self.one.template.update(template.ID, template_data, 0) - - result = self.get_template_info(self.get_template_by_id(template.ID)) - if self.module.check_mode: - # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here. - result['changed'] = True - else: - # if the previous parsed template data is not equal to the updated one, this has changed - result['changed'] = template.TEMPLATE != result['template'] - - return result - - def delete_template(self, template): - if not template: - return {'changed': False} - - if not self.module.check_mode: - self.one.template.delete(template.ID) - - return {'changed': True} - - -def main(): - TemplateModule().run_module() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/opennebula/one_vm.py b/plugins/modules/cloud/opennebula/one_vm.py deleted file mode 100644 index fa3d4abaab..0000000000 --- a/plugins/modules/cloud/opennebula/one_vm.py +++ /dev/null @@ -1,1613 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -(c) 2017, Milan Ilic -(c) 2019, Jan Meerkamp - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: one_vm -short_description: Creates or terminates OpenNebula instances -description: - - Manages OpenNebula instances -requirements: - - pyone -options: - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - - transferred over the network unencrypted. - - If not set then the value of the C(ONE_URL) environment variable is used. - type: str - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - - then the value of the C(ONE_USERNAME) environment variable is used. - type: str - api_password: - description: - - Password of the user to login into OpenNebula RPC server. If not set - - then the value of the C(ONE_PASSWORD) environment variable is used. - - if both I(api_username) or I(api_password) are not set, then it will try - - authenticate with ONE auth file. Default path is "~/.one/one_auth". - - Set environment variable C(ONE_AUTH) to override this path. - type: str - template_name: - description: - - Name of VM template to use to create a new instace - type: str - template_id: - description: - - ID of a VM template to use to create a new instance - type: int - vm_start_on_hold: - description: - - Set to true to put vm on hold while creating - default: False - type: bool - instance_ids: - description: - - A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff) - aliases: ['ids'] - type: list - elements: int - state: - description: - - C(present) - create instances from a template specified with C(template_id)/C(template_name). - - C(running) - run instances - - C(poweredoff) - power-off instances - - C(rebooted) - reboot instances - - C(absent) - terminate instances - choices: ["present", "absent", "running", "rebooted", "poweredoff"] - default: present - type: str - hard: - description: - - Reboot, power-off or terminate instances C(hard) - default: no - type: bool - wait: - description: - - Wait for the instance to reach its desired state before returning. Keep - - in mind if you are waiting for instance to be in running state it - - doesn't mean that you will be able to SSH on that machine only that - - boot process have started on that instance, see 'wait_for' example for - - details. - default: yes - type: bool - wait_timeout: - description: - - How long before wait gives up, in seconds - default: 300 - type: int - attributes: - description: - - A dictionary of key/value attributes to add to new instances, or for - - setting C(state) of instances with these attributes. - - Keys are case insensitive and OpenNebula automatically converts them to upper case. - - Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed. - - C(#) character(s) can be appended to the C(NAME) and the module will automatically add - - indexes to the names of VMs. - - For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),... - - When used with C(count_attributes) and C(exact_count) the module will - - match the base name without the index part. - default: {} - type: dict - labels: - description: - - A list of labels to associate with new instances, or for setting - - C(state) of instances with these labels. - default: [] - type: list - elements: str - count_attributes: - description: - - A dictionary of key/value attributes that can only be used with - - C(exact_count) to determine how many nodes based on a specific - - attributes criteria should be deployed. This can be expressed in - - multiple ways and is shown in the EXAMPLES section. - type: dict - count_labels: - description: - - A list of labels that can only be used with C(exact_count) to determine - - how many nodes based on a specific labels criteria should be deployed. - - This can be expressed in multiple ways and is shown in the EXAMPLES - - section. - type: list - elements: str - count: - description: - - Number of instances to launch - default: 1 - type: int - exact_count: - description: - - Indicates how many instances that match C(count_attributes) and - - C(count_labels) parameters should be deployed. Instances are either - - created or terminated based on this value. - - NOTE':' Instances with the least IDs will be terminated first. - type: int - mode: - description: - - Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others. - type: str - owner_id: - description: - - ID of the user which will be set as the owner of the instance - type: int - group_id: - description: - - ID of the group which will be set as the group of the instance - type: int - memory: - description: - - The size of the memory for new instances (in MB, GB, ...) - type: str - disk_size: - description: - - The size of the disk created for new instances (in MB, GB, TB,...). - - NOTE':' If The Template hats Multiple Disks the Order of the Sizes is - - matched against the order specified in C(template_id)/C(template_name). - type: list - elements: str - cpu: - description: - - Percentage of CPU divided by 100 required for the new instance. Half a - - processor is written 0.5. - type: float - vcpu: - description: - - Number of CPUs (cores) new VM will have. - type: int - networks: - description: - - A list of dictionaries with network parameters. See examples for more details. - default: [] - type: list - elements: dict - disk_saveas: - description: - - Creates an image from a VM disk. - - It is a dictionary where you have to specify C(name) of the new image. - - Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0. - - I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed) - - and the VM has to be in the C(poweredoff) state. - - Also this operation will fail if an image with specified C(name) already exists. - type: dict - persistent: - description: - - Create a private persistent copy of the template plus any image defined in DISK, and instantiate that copy. - default: NO - type: bool - version_added: '0.2.0' - datastore_id: - description: - - Name of Datastore to use to create a new instace - version_added: '0.2.0' - type: int - datastore_name: - description: - - Name of Datastore to use to create a new instace - version_added: '0.2.0' - type: str -author: - - "Milan Ilic (@ilicmilan)" - - "Jan Meerkamp (@meerkampdvv)" -''' - - -EXAMPLES = ''' -- name: Create a new instance - community.general.one_vm: - template_id: 90 - register: result - -- name: Print VM properties - ansible.builtin.debug: - msg: result - -- name: Deploy a new VM on hold - community.general.one_vm: - template_name: 'app1_template' - vm_start_on_hold: 'True' - -- name: Deploy a new VM and set its name to 'foo' - community.general.one_vm: - template_name: 'app1_template' - attributes: - name: foo - -- name: Deploy a new VM and set its group_id and mode - community.general.one_vm: - template_id: 90 - group_id: 16 - mode: 660 - -- name: Deploy a new VM as persistent - community.general.one_vm: - template_id: 90 - persistent: yes - -- name: Change VM's permissions to 640 - community.general.one_vm: - instance_ids: 5 - mode: 640 - -- name: Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks - community.general.one_vm: - template_id: 15 - disk_size: 35.2 GB - memory: 4 GB - vcpu: 4 - count: 2 - networks: - - NETWORK_ID: 27 - - NETWORK: "default-network" - NETWORK_UNAME: "app-user" - SECURITY_GROUPS: "120,124" - - NETWORK_ID: 27 - SECURITY_GROUPS: "10" - -- name: Deploy a new instance which uses a Template with two Disks - community.general.one_vm: - template_id: 42 - disk_size: - - 35.2 GB - - 50 GB - memory: 4 GB - vcpu: 4 - count: 1 - networks: - - NETWORK_ID: 27 - -- name: "Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo'" - community.general.one_vm: - template_id: 53 - attributes: - name: foo - bar: bar1 - -- name: "Enforce that 2 instances with attributes 'foo1: app1' and 'foo2: app2' are deployed" - community.general.one_vm: - template_id: 53 - attributes: - foo1: app1 - foo2: app2 - exact_count: 2 - count_attributes: - foo1: app1 - foo2: app2 - -- name: Enforce that 4 instances with an attribute 'bar' are deployed - community.general.one_vm: - template_id: 53 - attributes: - name: app - bar: bar2 - exact_count: 4 - count_attributes: - bar: - -# Deploy 2 new instances with attribute 'foo: bar' and labels 'app1' and 'app2' and names in format 'fooapp-##' -# Names will be: fooapp-00 and fooapp-01 -- name: Deploy 2 new instances - community.general.one_vm: - template_id: 53 - attributes: - name: fooapp-## - foo: bar - labels: - - app1 - - app2 - count: 2 - -# Deploy 2 new instances with attribute 'app: app1' and names in format 'fooapp-###' -# Names will be: fooapp-002 and fooapp-003 -- name: Deploy 2 new instances - community.general.one_vm: - template_id: 53 - attributes: - name: fooapp-### - app: app1 - count: 2 - -# Reboot all instances with name in format 'fooapp-#' -# Instances 'fooapp-00', 'fooapp-01', 'fooapp-002' and 'fooapp-003' will be rebooted -- name: Reboot all instances with names in a certain format - community.general.one_vm: - attributes: - name: fooapp-# - state: rebooted - -# Enforce that only 1 instance with name in format 'fooapp-#' is deployed -# The task will delete oldest instances, so only the 'fooapp-003' will remain -- name: Enforce that only 1 instance with name in a certain format is deployed - community.general.one_vm: - template_id: 53 - exact_count: 1 - count_attributes: - name: fooapp-# - -- name: Deploy an new instance with a network - community.general.one_vm: - template_id: 53 - networks: - - NETWORK_ID: 27 - register: vm - -- name: Wait for SSH to come up - ansible.builtin.wait_for_connection: - delegate_to: '{{ vm.instances[0].networks[0].ip }}' - -- name: Terminate VMs by ids - community.general.one_vm: - instance_ids: - - 153 - - 160 - state: absent - -- name: Reboot all VMs that have labels 'foo' and 'app1' - community.general.one_vm: - labels: - - foo - - app1 - state: rebooted - -- name: "Fetch all VMs that have name 'foo' and attribute 'app: bar'" - community.general.one_vm: - attributes: - name: foo - app: bar - register: results - -- name: Deploy 2 new instances with labels 'foo1' and 'foo2' - community.general.one_vm: - template_name: app_template - labels: - - foo1 - - foo2 - count: 2 - -- name: Enforce that only 1 instance with label 'foo1' will be running - community.general.one_vm: - template_name: app_template - labels: - - foo1 - exact_count: 1 - count_labels: - - foo1 - -- name: Terminate all instances that have attribute foo - community.general.one_vm: - template_id: 53 - exact_count: 0 - count_attributes: - foo: - -- name: "Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image'" - community.general.one_vm: - instance_ids: 351 - state: poweredoff - disk_saveas: - name: foo-image - -- name: "Save VM's disk with id=1 to the image with name 'bar-image'" - community.general.one_vm: - instance_ids: 351 - disk_saveas: - name: bar-image - disk_id: 1 -''' - -RETURN = ''' -instances_ids: - description: a list of instances ids whose state is changed or which are fetched with C(instance_ids) option. - type: list - returned: success - sample: [ 1234, 1235 ] -instances: - description: a list of instances info whose state is changed or which are fetched with C(instance_ids) option. - type: complex - returned: success - contains: - vm_id: - description: vm id - type: int - sample: 153 - vm_name: - description: vm name - type: str - sample: foo - template_id: - description: vm's template id - type: int - sample: 153 - group_id: - description: vm's group id - type: int - sample: 1 - group_name: - description: vm's group name - type: str - sample: one-users - owner_id: - description: vm's owner id - type: int - sample: 143 - owner_name: - description: vm's owner name - type: str - sample: app-user - mode: - description: vm's mode - type: str - returned: success - sample: 660 - state: - description: state of an instance - type: str - sample: ACTIVE - lcm_state: - description: lcm state of an instance that is only relevant when the state is ACTIVE - type: str - sample: RUNNING - cpu: - description: Percentage of CPU divided by 100 - type: float - sample: 0.2 - vcpu: - description: Number of CPUs (cores) - type: int - sample: 2 - memory: - description: The size of the memory in MB - type: str - sample: 4096 MB - disk_size: - description: The size of the disk in MB - type: str - sample: 20480 MB - networks: - description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC - type: list - sample: [ - { - "ip": "10.120.5.33", - "mac": "02:00:0a:78:05:21", - "name": "default-test-private", - "security_groups": "0,10" - }, - { - "ip": "10.120.5.34", - "mac": "02:00:0a:78:05:22", - "name": "default-test-private", - "security_groups": "0" - } - ] - uptime_h: - description: Uptime of the instance in hours - type: int - sample: 35 - labels: - description: A list of string labels that are associated with the instance - type: list - sample: [ - "foo", - "spec-label" - ] - attributes: - description: A dictionary of key/values attributes that are associated with the instance - type: dict - sample: { - "HYPERVISOR": "kvm", - "LOGO": "images/logos/centos.png", - "TE_GALAXY": "bar", - "USER_INPUTS": null - } -tagged_instances: - description: - - A list of instances info based on a specific attributes and/or - - labels that are specified with C(count_attributes) and C(count_labels) - - options. - type: complex - returned: success - contains: - vm_id: - description: vm id - type: int - sample: 153 - vm_name: - description: vm name - type: str - sample: foo - template_id: - description: vm's template id - type: int - sample: 153 - group_id: - description: vm's group id - type: int - sample: 1 - group_name: - description: vm's group name - type: str - sample: one-users - owner_id: - description: vm's user id - type: int - sample: 143 - owner_name: - description: vm's user name - type: str - sample: app-user - mode: - description: vm's mode - type: str - returned: success - sample: 660 - state: - description: state of an instance - type: str - sample: ACTIVE - lcm_state: - description: lcm state of an instance that is only relevant when the state is ACTIVE - type: str - sample: RUNNING - cpu: - description: Percentage of CPU divided by 100 - type: float - sample: 0.2 - vcpu: - description: Number of CPUs (cores) - type: int - sample: 2 - memory: - description: The size of the memory in MB - type: str - sample: 4096 MB - disk_size: - description: The size of the disk in MB - type: list - sample: [ - "20480 MB", - "10240 MB" - ] - networks: - description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC - type: list - sample: [ - { - "ip": "10.120.5.33", - "mac": "02:00:0a:78:05:21", - "name": "default-test-private", - "security_groups": "0,10" - }, - { - "ip": "10.120.5.34", - "mac": "02:00:0a:78:05:22", - "name": "default-test-private", - "security_groups": "0" - } - ] - uptime_h: - description: Uptime of the instance in hours - type: int - sample: 35 - labels: - description: A list of string labels that are associated with the instance - type: list - sample: [ - "foo", - "spec-label" - ] - attributes: - description: A dictionary of key/values attributes that are associated with the instance - type: dict - sample: { - "HYPERVISOR": "kvm", - "LOGO": "images/logos/centos.png", - "TE_GALAXY": "bar", - "USER_INPUTS": null - } -''' - -try: - import pyone - HAS_PYONE = True -except ImportError: - HAS_PYONE = False - -from ansible.module_utils.basic import AnsibleModule -import os - - -def get_template(module, client, predicate): - - pool = client.templatepool.info(-2, -1, -1, -1) - # Filter -2 means fetch all templates user can Use - found = 0 - found_template = None - template_name = '' - - for template in pool.VMTEMPLATE: - if predicate(template): - found = found + 1 - found_template = template - template_name = template.NAME - - if found == 0: - return None - elif found > 1: - module.fail_json(msg='There are more templates with name: ' + template_name) - return found_template - - -def get_template_by_name(module, client, template_name): - return get_template(module, client, lambda template: (template.NAME == template_name)) - - -def get_template_by_id(module, client, template_id): - return get_template(module, client, lambda template: (template.ID == template_id)) - - -def get_template_id(module, client, requested_id, requested_name): - template = get_template_by_id(module, client, requested_id) if requested_id is not None else get_template_by_name(module, client, requested_name) - if template: - return template.ID - else: - return None - - -def get_datastore(module, client, predicate): - pool = client.datastorepool.info() - found = 0 - found_datastore = None - datastore_name = '' - - for datastore in pool.DATASTORE: - if predicate(datastore): - found = found + 1 - found_datastore = datastore - datastore_name = datastore.NAME - - if found == 0: - return None - elif found > 1: - module.fail_json(msg='There are more datastores with name: ' + datastore_name) - return found_datastore - - -def get_datastore_by_name(module, client, datastore_name): - return get_datastore(module, client, lambda datastore: (datastore.NAME == datastore_name)) - - -def get_datastore_by_id(module, client, datastore_id): - return get_datastore(module, client, lambda datastore: (datastore.ID == datastore_id)) - - -def get_datastore_id(module, client, requested_id, requested_name): - datastore = get_datastore_by_id(module, client, requested_id) if requested_id else get_datastore_by_name(module, client, requested_name) - if datastore: - return datastore.ID - else: - return None - - -def get_vm_by_id(client, vm_id): - try: - vm = client.vm.info(int(vm_id)) - except BaseException: - return None - return vm - - -def get_vms_by_ids(module, client, state, ids): - vms = [] - - for vm_id in ids: - vm = get_vm_by_id(client, vm_id) - if vm is None and state != 'absent': - module.fail_json(msg='There is no VM with id=' + str(vm_id)) - vms.append(vm) - - return vms - - -def get_vm_info(client, vm): - - vm = client.vm.info(vm.ID) - - networks_info = [] - - disk_size = [] - if 'DISK' in vm.TEMPLATE: - if isinstance(vm.TEMPLATE['DISK'], list): - for disk in vm.TEMPLATE['DISK']: - disk_size.append(disk['SIZE'] + ' MB') - else: - disk_size.append(vm.TEMPLATE['DISK']['SIZE'] + ' MB') - - if 'NIC' in vm.TEMPLATE: - if isinstance(vm.TEMPLATE['NIC'], list): - for nic in vm.TEMPLATE['NIC']: - networks_info.append({ - 'ip': nic.get('IP', ''), - 'mac': nic.get('MAC', ''), - 'name': nic.get('NETWORK', ''), - 'security_groups': nic.get('SECURITY_GROUPS', '') - }) - else: - networks_info.append({ - 'ip': vm.TEMPLATE['NIC'].get('IP', ''), - 'mac': vm.TEMPLATE['NIC'].get('MAC', ''), - 'name': vm.TEMPLATE['NIC'].get('NETWORK', ''), - 'security_groups': - vm.TEMPLATE['NIC'].get('SECURITY_GROUPS', '') - }) - import time - - current_time = time.localtime() - vm_start_time = time.localtime(vm.STIME) - - vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time) - vm_uptime /= (60 * 60) - - permissions_str = parse_vm_permissions(client, vm) - - # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE - vm_lcm_state = None - if vm.STATE == VM_STATES.index('ACTIVE'): - vm_lcm_state = LCM_STATES[vm.LCM_STATE] - - vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID) - - info = { - 'template_id': int(vm.TEMPLATE['TEMPLATE_ID']), - 'vm_id': vm.ID, - 'vm_name': vm.NAME, - 'state': VM_STATES[vm.STATE], - 'lcm_state': vm_lcm_state, - 'owner_name': vm.UNAME, - 'owner_id': vm.UID, - 'networks': networks_info, - 'disk_size': disk_size, - 'memory': vm.TEMPLATE['MEMORY'] + ' MB', - 'vcpu': vm.TEMPLATE['VCPU'], - 'cpu': vm.TEMPLATE['CPU'], - 'group_name': vm.GNAME, - 'group_id': vm.GID, - 'uptime_h': int(vm_uptime), - 'attributes': vm_attributes, - 'mode': permissions_str, - 'labels': vm_labels - } - - return info - - -def parse_vm_permissions(client, vm): - vm_PERMISSIONS = client.vm.info(vm.ID).PERMISSIONS - - owner_octal = int(vm_PERMISSIONS.OWNER_U) * 4 + int(vm_PERMISSIONS.OWNER_M) * 2 + int(vm_PERMISSIONS.OWNER_A) - group_octal = int(vm_PERMISSIONS.GROUP_U) * 4 + int(vm_PERMISSIONS.GROUP_M) * 2 + int(vm_PERMISSIONS.GROUP_A) - other_octal = int(vm_PERMISSIONS.OTHER_U) * 4 + int(vm_PERMISSIONS.OTHER_M) * 2 + int(vm_PERMISSIONS.OTHER_A) - - permissions = str(owner_octal) + str(group_octal) + str(other_octal) - - return permissions - - -def set_vm_permissions(module, client, vms, permissions): - changed = False - - for vm in vms: - vm = client.vm.info(vm.ID) - old_permissions = parse_vm_permissions(client, vm) - changed = changed or old_permissions != permissions - - if not module.check_mode and old_permissions != permissions: - permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000 - mode_bits = [int(d) for d in permissions_str] - try: - client.vm.chmod( - vm.ID, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8]) - except pyone.OneAuthorizationException: - module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.") - - return changed - - -def set_vm_ownership(module, client, vms, owner_id, group_id): - changed = False - - for vm in vms: - vm = client.vm.info(vm.ID) - if owner_id is None: - owner_id = vm.UID - if group_id is None: - group_id = vm.GID - - changed = changed or owner_id != vm.UID or group_id != vm.GID - - if not module.check_mode and (owner_id != vm.UID or group_id != vm.GID): - try: - client.vm.chown(vm.ID, owner_id, group_id) - except pyone.OneAuthorizationException: - module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.") - - return changed - - -def get_size_in_MB(module, size_str): - - SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB'] - - s = size_str - init = size_str - num = "" - while s and s[0:1].isdigit() or s[0:1] == '.': - num += s[0] - s = s[1:] - num = float(num) - symbol = s.strip() - - if symbol not in SYMBOLS: - module.fail_json(msg="Cannot interpret %r %r %d" % (init, symbol, num)) - - prefix = {'B': 1} - - for i, s in enumerate(SYMBOLS[1:]): - prefix[s] = 1 << (i + 1) * 10 - - size_in_bytes = int(num * prefix[symbol]) - size_in_MB = size_in_bytes / (1024 * 1024) - - return size_in_MB - - -def create_disk_str(module, client, template_id, disk_size_list): - - if not disk_size_list: - return '' - - template = client.template.info(template_id) - if isinstance(template.TEMPLATE['DISK'], list): - # check if the number of disks is correct - if len(template.TEMPLATE['DISK']) != len(disk_size_list): - module.fail_json(msg='This template has ' + str(len(template.TEMPLATE['DISK'])) + ' disks but you defined ' + str(len(disk_size_list))) - result = '' - index = 0 - for DISKS in template.TEMPLATE['DISK']: - disk = {} - diskresult = '' - # Get all info about existed disk e.g. IMAGE_ID,... - for key, value in DISKS.items(): - disk[key] = value - # copy disk attributes if it is not the size attribute - diskresult += 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE') - # Set the Disk Size - diskresult += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[index]))) + ']\n' - result += diskresult - index += 1 - else: - if len(disk_size_list) > 1: - module.fail_json(msg='This template has one disk but you defined ' + str(len(disk_size_list))) - disk = {} - # Get all info about existed disk e.g. IMAGE_ID,... - for key, value in template.TEMPLATE['DISK'].items(): - disk[key] = value - # copy disk attributes if it is not the size attribute - result = 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE') - # Set the Disk Size - result += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[0]))) + ']\n' - - return result - - -def create_attributes_str(attributes_dict, labels_list): - - attributes_str = '' - - if labels_list: - attributes_str += 'LABELS="' + ','.join('{label}'.format(label=label) for label in labels_list) + '"\n' - if attributes_dict: - attributes_str += '\n'.join('{key}="{val}"'.format(key=key.upper(), val=val) for key, val in attributes_dict.items()) + '\n' - - return attributes_str - - -def create_nics_str(network_attrs_list): - nics_str = '' - - for network in network_attrs_list: - # Packing key-value dict in string with format key="value", key="value" - network_str = ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in network.items()) - nics_str = nics_str + 'NIC = [' + network_str + ']\n' - - return nics_str - - -def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent): - - if attributes_dict: - vm_name = attributes_dict.get('NAME', '') - - disk_str = create_disk_str(module, client, template_id, disk_size) - vm_extra_template_str = create_attributes_str(attributes_dict, labels_list) + create_nics_str(network_attrs_list) + disk_str - try: - vm_id = client.template.instantiate(template_id, vm_name, vm_start_on_hold, vm_extra_template_str, vm_persistent) - except pyone.OneException as e: - module.fail_json(msg=str(e)) - vm = get_vm_by_id(client, vm_id) - - return get_vm_info(client, vm) - - -def generate_next_index(vm_filled_indexes_list, num_sign_cnt): - counter = 0 - cnt_str = str(counter).zfill(num_sign_cnt) - - while cnt_str in vm_filled_indexes_list: - counter = counter + 1 - cnt_str = str(counter).zfill(num_sign_cnt) - - return cnt_str - - -def get_vm_labels_and_attributes_dict(client, vm_id): - vm_USER_TEMPLATE = client.vm.info(vm_id).USER_TEMPLATE - - attrs_dict = {} - labels_list = [] - - for key, value in vm_USER_TEMPLATE.items(): - if key != 'LABELS': - attrs_dict[key] = value - else: - if key is not None: - labels_list = value.split(',') - - return labels_list, attrs_dict - - -def get_all_vms_by_attributes(client, attributes_dict, labels_list): - pool = client.vmpool.info(-2, -1, -1, -1).VM - vm_list = [] - name = '' - if attributes_dict: - name = attributes_dict.pop('NAME', '') - - if name != '': - base_name = name[:len(name) - name.count('#')] - # Check does the name have indexed format - with_hash = name.endswith('#') - - for vm in pool: - if vm.NAME.startswith(base_name): - if with_hash and vm.NAME[len(base_name):].isdigit(): - # If the name has indexed format and after base_name it has only digits it'll be matched - vm_list.append(vm) - elif not with_hash and vm.NAME == name: - # If the name is not indexed it has to be same - vm_list.append(vm) - pool = vm_list - - import copy - - vm_list = copy.copy(pool) - - for vm in pool: - remove_list = [] - vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.ID) - - if attributes_dict and len(attributes_dict) > 0: - for key, val in attributes_dict.items(): - if key in vm_attributes_dict: - if val and vm_attributes_dict[key] != val: - remove_list.append(vm) - break - else: - remove_list.append(vm) - break - vm_list = list(set(vm_list).difference(set(remove_list))) - - remove_list = [] - if labels_list and len(labels_list) > 0: - for label in labels_list: - if label not in vm_labels_list: - remove_list.append(vm) - break - vm_list = list(set(vm_list).difference(set(remove_list))) - - return vm_list - - -def create_count_of_vms( - module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout, vm_start_on_hold, vm_persistent): - new_vms_list = [] - - vm_name = '' - if attributes_dict: - vm_name = attributes_dict.get('NAME', '') - - if module.check_mode: - return True, [], [] - - # Create list of used indexes - vm_filled_indexes_list = None - num_sign_cnt = vm_name.count('#') - if vm_name != '' and num_sign_cnt > 0: - vm_list = get_all_vms_by_attributes(client, {'NAME': vm_name}, None) - base_name = vm_name[:len(vm_name) - num_sign_cnt] - vm_name = base_name - # Make list which contains used indexes in format ['000', '001',...] - vm_filled_indexes_list = list((vm.NAME[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list) - - while count > 0: - new_vm_name = vm_name - # Create indexed name - if vm_filled_indexes_list is not None: - next_index = generate_next_index(vm_filled_indexes_list, num_sign_cnt) - vm_filled_indexes_list.append(next_index) - new_vm_name += next_index - # Update NAME value in the attributes in case there is index - attributes_dict['NAME'] = new_vm_name - new_vm_dict = create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent) - new_vm_id = new_vm_dict.get('vm_id') - new_vm = get_vm_by_id(client, new_vm_id) - new_vms_list.append(new_vm) - count -= 1 - - if vm_start_on_hold: - if wait: - for vm in new_vms_list: - wait_for_hold(module, client, vm, wait_timeout) - else: - if wait: - for vm in new_vms_list: - wait_for_running(module, client, vm, wait_timeout) - - return True, new_vms_list, [] - - -def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict, - labels_list, count_labels_list, disk_size, network_attrs_list, hard, wait, wait_timeout, vm_start_on_hold, vm_persistent): - - vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list) - - vm_count_diff = exact_count - len(vm_list) - changed = vm_count_diff != 0 - - new_vms_list = [] - instances_list = [] - tagged_instances_list = vm_list - - if module.check_mode: - return changed, instances_list, tagged_instances_list - - if vm_count_diff > 0: - # Add more VMs - changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict, - labels_list, disk_size, network_attrs_list, wait, wait_timeout, - vm_start_on_hold, vm_persistent) - - tagged_instances_list += instances_list - elif vm_count_diff < 0: - # Delete surplus VMs - old_vms_list = [] - - while vm_count_diff < 0: - old_vm = vm_list.pop(0) - old_vms_list.append(old_vm) - terminate_vm(module, client, old_vm, hard) - vm_count_diff += 1 - - if wait: - for vm in old_vms_list: - wait_for_done(module, client, vm, wait_timeout) - - instances_list = old_vms_list - # store only the remaining instances - old_vms_set = set(old_vms_list) - tagged_instances_list = [vm for vm in vm_list if vm not in old_vms_set] - - return changed, instances_list, tagged_instances_list - - -VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE'] -LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP', - 'SAVE_SUSPEND', 'SAVE_MIGRATE', 'PROLOG_MIGRATE', 'PROLOG_RESUME', - 'EPILOG_STOP', 'EPILOG', 'SHUTDOWN', 'STATE13', 'STATE14', 'CLEANUP_RESUBMIT', 'UNKNOWN', 'HOTPLUG', 'SHUTDOWN_POWEROFF', - 'BOOT_UNKNOWN', 'BOOT_POWEROFF', 'BOOT_SUSPENDED', 'BOOT_STOPPED', 'CLEANUP_DELETE', 'HOTPLUG_SNAPSHOT', 'HOTPLUG_NIC', - 'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY'] - - -def wait_for_state(module, client, vm, wait_timeout, state_predicate): - import time - start_time = time.time() - - while (time.time() - start_time) < wait_timeout: - vm = client.vm.info(vm.ID) - state = vm.STATE - lcm_state = vm.LCM_STATE - - if state_predicate(state, lcm_state): - return vm - elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'), - VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]: - module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state]) - - time.sleep(1) - - module.fail_json(msg="Wait timeout has expired!") - - -def wait_for_running(module, client, vm, wait_timeout): - return wait_for_state(module, client, vm, wait_timeout, lambda state, - lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')])) - - -def wait_for_done(module, client, vm, wait_timeout): - return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')])) - - -def wait_for_hold(module, client, vm, wait_timeout): - return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')])) - - -def wait_for_poweroff(module, client, vm, wait_timeout): - return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')])) - - -def terminate_vm(module, client, vm, hard=False): - changed = False - - if not vm: - return changed - - changed = True - - if not module.check_mode: - if hard: - client.vm.action('terminate-hard', vm.ID) - else: - client.vm.action('terminate', vm.ID) - - return changed - - -def terminate_vms(module, client, vms, hard): - changed = False - - for vm in vms: - changed = terminate_vm(module, client, vm, hard) or changed - - return changed - - -def poweroff_vm(module, client, vm, hard): - vm = client.vm.info(vm.ID) - changed = False - - lcm_state = vm.LCM_STATE - state = vm.STATE - - if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: - changed = True - - if changed and not module.check_mode: - if not hard: - client.vm.action('poweroff', vm.ID) - else: - client.vm.action('poweroff-hard', vm.ID) - - return changed - - -def poweroff_vms(module, client, vms, hard): - changed = False - - for vm in vms: - changed = poweroff_vm(module, client, vm, hard) or changed - - return changed - - -def reboot_vms(module, client, vms, wait_timeout, hard): - - if not module.check_mode: - # Firstly, power-off all instances - for vm in vms: - vm = client.vm.info(vm.ID) - lcm_state = vm.LCM_STATE - state = vm.STATE - if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: - poweroff_vm(module, client, vm, hard) - - # Wait for all to be power-off - for vm in vms: - wait_for_poweroff(module, client, vm, wait_timeout) - - for vm in vms: - resume_vm(module, client, vm) - - return True - - -def resume_vm(module, client, vm): - vm = client.vm.info(vm.ID) - changed = False - - lcm_state = vm.LCM_STATE - if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'): - module.fail_json(msg="Cannot perform action 'resume' because this action is not available " + - "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly") - if lcm_state not in [LCM_STATES.index('RUNNING')]: - changed = True - - if changed and not module.check_mode: - client.vm.action('resume', vm.ID) - - return changed - - -def resume_vms(module, client, vms): - changed = False - - for vm in vms: - changed = resume_vm(module, client, vm) or changed - - return changed - - -def check_name_attribute(module, attributes): - if attributes.get("NAME"): - import re - if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None: - module.fail_json(msg="Ilegal 'NAME' attribute: '" + attributes.get("NAME") + - "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.") - - -TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS", - "CONTEXT", "CREATED_BY", "CPU_COST", "DISK_COST", "MEMORY_COST", - "TEMPLATE_ID", "VMID", "AUTOMATIC_DS_REQUIREMENTS", "DEPLOY_FOLDER", "LABELS"] - - -def check_attributes(module, attributes): - for key in attributes.keys(): - if key in TEMPLATE_RESTRICTED_ATTRIBUTES: - module.fail_json(msg='Restricted attribute `' + key + '` cannot be used when filtering VMs.') - # Check the format of the name attribute - check_name_attribute(module, attributes) - - -def disk_save_as(module, client, vm, disk_saveas, wait_timeout): - if not disk_saveas.get('name'): - module.fail_json(msg="Key 'name' is required for 'disk_saveas' option") - - image_name = disk_saveas.get('name') - disk_id = disk_saveas.get('disk_id', 0) - - if not module.check_mode: - if vm.STATE != VM_STATES.index('POWEROFF'): - module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state") - try: - client.vm.disksaveas(vm.ID, disk_id, image_name, 'OS', -1) - except pyone.OneException as e: - module.fail_json(msg=str(e)) - wait_for_poweroff(module, client, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state - - -def get_connection_info(module): - - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') - - if not url: - url = os.environ.get('ONE_URL') - - if not username: - username = os.environ.get('ONE_USERNAME') - - if not password: - password = os.environ.get('ONE_PASSWORD') - - if not username: - if not password: - authfile = os.environ.get('ONE_AUTH') - if authfile is None: - authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth") - try: - with open(authfile, "r") as fp: - authstring = fp.read().rstrip() - username = authstring.split(":")[0] - password = authstring.split(":")[1] - except (OSError, IOError): - module.fail_json(msg=("Could not find or read ONE_AUTH file at '%s'" % authfile)) - except Exception: - module.fail_json(msg=("Error occurs when read ONE_AUTH file at '%s'" % authfile)) - if not url: - module.fail_json(msg="Opennebula API url (api_url) is not specified") - from collections import namedtuple - - auth_params = namedtuple('auth', ('url', 'username', 'password')) - - return auth_params(url=url, username=username, password=password) - - -def main(): - fields = { - "api_url": {"required": False, "type": "str"}, - "api_username": {"required": False, "type": "str"}, - "api_password": {"required": False, "type": "str", "no_log": True}, - "instance_ids": {"required": False, "aliases": ['ids'], "type": "list", "elements": "int"}, - "template_name": {"required": False, "type": "str"}, - "template_id": {"required": False, "type": "int"}, - "vm_start_on_hold": {"default": False, "type": "bool"}, - "state": { - "default": "present", - "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'], - "type": "str" - }, - "mode": {"required": False, "type": "str"}, - "owner_id": {"required": False, "type": "int"}, - "group_id": {"required": False, "type": "int"}, - "wait": {"default": True, "type": "bool"}, - "wait_timeout": {"default": 300, "type": "int"}, - "hard": {"default": False, "type": "bool"}, - "memory": {"required": False, "type": "str"}, - "cpu": {"required": False, "type": "float"}, - "vcpu": {"required": False, "type": "int"}, - "disk_size": {"required": False, "type": "list", "elements": "str"}, - "datastore_name": {"required": False, "type": "str"}, - "datastore_id": {"required": False, "type": "int"}, - "networks": {"default": [], "type": "list", "elements": "dict"}, - "count": {"default": 1, "type": "int"}, - "exact_count": {"required": False, "type": "int"}, - "attributes": {"default": {}, "type": "dict"}, - "count_attributes": {"required": False, "type": "dict"}, - "labels": {"default": [], "type": "list", "elements": "str"}, - "count_labels": {"required": False, "type": "list", "elements": "str"}, - "disk_saveas": {"type": "dict"}, - "persistent": {"default": False, "type": "bool"} - } - - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[ - ['template_id', 'template_name', 'instance_ids'], - ['template_id', 'template_name', 'disk_saveas'], - ['instance_ids', 'count_attributes', 'count'], - ['instance_ids', 'count_labels', 'count'], - ['instance_ids', 'exact_count'], - ['instance_ids', 'attributes'], - ['instance_ids', 'labels'], - ['disk_saveas', 'attributes'], - ['disk_saveas', 'labels'], - ['exact_count', 'count'], - ['count', 'hard'], - ['instance_ids', 'cpu'], ['instance_ids', 'vcpu'], - ['instance_ids', 'memory'], ['instance_ids', 'disk_size'], - ['instance_ids', 'networks'], - ['persistent', 'disk_size'] - ], - supports_check_mode=True) - - if not HAS_PYONE: - module.fail_json(msg='This module requires pyone to work!') - - auth = get_connection_info(module) - params = module.params - instance_ids = params.get('instance_ids') - requested_template_name = params.get('template_name') - requested_template_id = params.get('template_id') - put_vm_on_hold = params.get('vm_start_on_hold') - state = params.get('state') - permissions = params.get('mode') - owner_id = params.get('owner_id') - group_id = params.get('group_id') - wait = params.get('wait') - wait_timeout = params.get('wait_timeout') - hard = params.get('hard') - memory = params.get('memory') - cpu = params.get('cpu') - vcpu = params.get('vcpu') - disk_size = params.get('disk_size') - requested_datastore_id = params.get('datastore_id') - requested_datastore_name = params.get('datastore_name') - networks = params.get('networks') - count = params.get('count') - exact_count = params.get('exact_count') - attributes = params.get('attributes') - count_attributes = params.get('count_attributes') - labels = params.get('labels') - count_labels = params.get('count_labels') - disk_saveas = params.get('disk_saveas') - persistent = params.get('persistent') - - if not (auth.username and auth.password): - module.warn("Credentials missing") - else: - one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) - - if attributes: - attributes = dict((key.upper(), value) for key, value in attributes.items()) - check_attributes(module, attributes) - - if count_attributes: - count_attributes = dict((key.upper(), value) for key, value in count_attributes.items()) - if not attributes: - import copy - module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.') - attributes = copy.copy(count_attributes) - check_attributes(module, count_attributes) - - if count_labels and not labels: - module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.') - labels = count_labels - - # Fetch template - template_id = None - if requested_template_id is not None or requested_template_name: - template_id = get_template_id(module, one_client, requested_template_id, requested_template_name) - if template_id is None: - if requested_template_id is not None: - module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id)) - elif requested_template_name: - module.fail_json(msg="There is no template with name: " + requested_template_name) - - # Fetch datastore - datastore_id = None - if requested_datastore_id or requested_datastore_name: - datastore_id = get_datastore_id(module, one_client, requested_datastore_id, requested_datastore_name) - if datastore_id is None: - if requested_datastore_id: - module.fail_json(msg='There is no datastore with datastore_id: ' + str(requested_datastore_id)) - elif requested_datastore_name: - module.fail_json(msg="There is no datastore with name: " + requested_datastore_name) - else: - attributes['SCHED_DS_REQUIREMENTS'] = 'ID=' + str(datastore_id) - - if exact_count and template_id is None: - module.fail_json(msg='Option `exact_count` needs template_id or template_name') - - if exact_count is not None and not (count_attributes or count_labels): - module.fail_json(msg='Either `count_attributes` or `count_labels` has to be specified with option `exact_count`.') - if (count_attributes or count_labels) and exact_count is None: - module.fail_json(msg='Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used.') - if template_id is not None and state != 'present': - module.fail_json(msg="Only state 'present' is valid for the template") - - if memory: - attributes['MEMORY'] = str(int(get_size_in_MB(module, memory))) - if cpu: - attributes['CPU'] = str(cpu) - if vcpu: - attributes['VCPU'] = str(vcpu) - - if exact_count is not None and state != 'present': - module.fail_json(msg='The `exact_count` option is valid only for the `present` state') - if exact_count is not None and exact_count < 0: - module.fail_json(msg='`exact_count` cannot be less than 0') - if count <= 0: - module.fail_json(msg='`count` has to be greater than 0') - - if permissions is not None: - import re - if re.match("^[0-7]{3}$", permissions) is None: - module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600") - - if exact_count is not None: - # Deploy an exact count of VMs - changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes, - count_attributes, labels, count_labels, disk_size, - networks, hard, wait, wait_timeout, put_vm_on_hold, persistent) - vms = tagged_instances_list - elif template_id is not None and state == 'present': - # Deploy count VMs - changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count, - attributes, labels, disk_size, networks, wait, wait_timeout, - put_vm_on_hold, persistent) - # instances_list - new instances - # tagged_instances_list - all instances with specified `count_attributes` and `count_labels` - vms = instances_list - else: - # Fetch data of instances, or change their state - if not (instance_ids or attributes or labels): - module.fail_json(msg="At least one of `instance_ids`,`attributes`,`labels` must be passed!") - - if memory or cpu or vcpu or disk_size or networks: - module.fail_json(msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!") - - if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']: - module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'") - - vms = [] - tagged = False - changed = False - - if instance_ids: - vms = get_vms_by_ids(module, one_client, state, instance_ids) - else: - tagged = True - vms = get_all_vms_by_attributes(one_client, attributes, labels) - - if len(vms) == 0 and state != 'absent' and state != 'present': - module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`') - - if len(vms) == 0 and state == 'present' and not tagged: - module.fail_json(msg='There are no instances with specified `instance_ids`.') - - if tagged and state == 'absent': - module.fail_json(msg='Option `instance_ids` is required when state is `absent`.') - - if state == 'absent': - changed = terminate_vms(module, one_client, vms, hard) - elif state == 'rebooted': - changed = reboot_vms(module, one_client, vms, wait_timeout, hard) - elif state == 'poweredoff': - changed = poweroff_vms(module, one_client, vms, hard) - elif state == 'running': - changed = resume_vms(module, one_client, vms) - - instances_list = vms - tagged_instances_list = [] - - if permissions is not None: - changed = set_vm_permissions(module, one_client, vms, permissions) or changed - - if owner_id is not None or group_id is not None: - changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed - - if wait and not module.check_mode and state != 'present': - wait_for = { - 'absent': wait_for_done, - 'rebooted': wait_for_running, - 'poweredoff': wait_for_poweroff, - 'running': wait_for_running - } - for vm in vms: - if vm is not None: - wait_for[state](module, one_client, vm, wait_timeout) - - if disk_saveas is not None: - if len(vms) == 0: - module.fail_json(msg="There is no VM whose disk will be saved.") - disk_save_as(module, one_client, vms[0], disk_saveas, wait_timeout) - changed = True - - # instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option - instances = list(get_vm_info(one_client, vm) for vm in instances_list if vm is not None) - instances_ids = list(vm.ID for vm in instances_list if vm is not None) - # tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels) - tagged_instances = list(get_vm_info(one_client, vm) for vm in tagged_instances_list if vm is not None) - - result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances} - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/oracle/oci_vcn.py b/plugins/modules/cloud/oracle/oci_vcn.py deleted file mode 100644 index a82914bdea..0000000000 --- a/plugins/modules/cloud/oracle/oci_vcn.py +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017, 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oci_vcn -short_description: Manage Virtual Cloud Networks(VCN) in OCI -description: - - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI. - The complete Oracle Cloud Infrastructure Ansible Modules can be downloaded from - U(https://github.com/oracle/oci-ansible-modules/releases). -options: - cidr_block: - description: The CIDR IP address block of the VCN. Required when creating a VCN with I(state=present). - type: str - required: false - compartment_id: - description: The OCID of the compartment to contain the VCN. Required when creating a VCN with I(state=present). - This option is mutually exclusive with I(vcn_id). - type: str - display_name: - description: A user-friendly name. Does not have to be unique, and it's changeable. - type: str - aliases: [ 'name' ] - dns_label: - description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to - form a fully qualified domain name (FQDN) for each VNIC within this subnet (for example, - bminstance-1.subnet123.vcn1.oraclevcn.com). Not required to be unique, but it's a best practice - to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric string that begins - with a letter. The value cannot be changed. - type: str - state: - description: Create or update a VCN with I(state=present). Use I(state=absent) to delete a VCN. - type: str - default: present - choices: ['present', 'absent'] - vcn_id: - description: The OCID of the VCN. Required when deleting a VCN with I(state=absent) or updating a VCN - with I(state=present). This option is mutually exclusive with I(compartment_id). - type: str - aliases: [ 'id' ] -author: "Rohit Chaware (@rohitChaware)" -extends_documentation_fragment: -- community.general.oracle -- community.general.oracle_creatable_resource -- community.general.oracle_wait_options -- community.general.oracle_tags - -''' - -EXAMPLES = """ -- name: Create a VCN - community.general.oci_vcn: - cidr_block: '10.0.0.0/16' - compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx' - display_name: my_vcn - dns_label: ansiblevcn - -- name: Updates the specified VCN's display name - community.general.oci_vcn: - vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx - display_name: ansible_vcn - -- name: Delete the specified VCN - community.general.oci_vcn: - vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx - state: absent -""" - -RETURN = """ -vcn: - description: Information about the VCN - returned: On successful create and update operation - type: dict - sample: { - "cidr_block": "10.0.0.0/16", - compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx", - "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx", - "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx", - "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx", - "display_name": "ansible_vcn", - "dns_label": "ansiblevcn", - "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx", - "lifecycle_state": "AVAILABLE", - "time_created": "2017-11-13T20:22:40.626000+00:00", - "vcn_domain_name": "ansiblevcn.oraclevcn.com" - } -""" - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.oracle import oci_utils - -try: - from oci.core.virtual_network_client import VirtualNetworkClient - from oci.core.models import CreateVcnDetails - from oci.core.models import UpdateVcnDetails - - HAS_OCI_PY_SDK = True -except ImportError: - HAS_OCI_PY_SDK = False - - -def delete_vcn(virtual_network_client, module): - result = oci_utils.delete_and_wait( - resource_type="vcn", - client=virtual_network_client, - get_fn=virtual_network_client.get_vcn, - kwargs_get={"vcn_id": module.params["vcn_id"]}, - delete_fn=virtual_network_client.delete_vcn, - kwargs_delete={"vcn_id": module.params["vcn_id"]}, - module=module, - ) - return result - - -def update_vcn(virtual_network_client, module): - result = oci_utils.check_and_update_resource( - resource_type="vcn", - client=virtual_network_client, - get_fn=virtual_network_client.get_vcn, - kwargs_get={"vcn_id": module.params["vcn_id"]}, - update_fn=virtual_network_client.update_vcn, - primitive_params_update=["vcn_id"], - kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"}, - module=module, - update_attributes=list(UpdateVcnDetails().attribute_map.keys()), - ) - return result - - -def create_vcn(virtual_network_client, module): - create_vcn_details = CreateVcnDetails() - for attribute in create_vcn_details.attribute_map.keys(): - if attribute in module.params: - setattr(create_vcn_details, attribute, module.params[attribute]) - - result = oci_utils.create_and_wait( - resource_type="vcn", - create_fn=virtual_network_client.create_vcn, - kwargs_create={"create_vcn_details": create_vcn_details}, - client=virtual_network_client, - get_fn=virtual_network_client.get_vcn, - get_param="vcn_id", - module=module, - ) - return result - - -def main(): - module_args = oci_utils.get_taggable_arg_spec( - supports_create=True, supports_wait=True - ) - module_args.update( - dict( - cidr_block=dict(type="str", required=False), - compartment_id=dict(type="str", required=False), - display_name=dict(type="str", required=False, aliases=["name"]), - dns_label=dict(type="str", required=False), - state=dict( - type="str", - required=False, - default="present", - choices=["absent", "present"], - ), - vcn_id=dict(type="str", required=False, aliases=["id"]), - ) - ) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=False, - mutually_exclusive=[["compartment_id", "vcn_id"]], - ) - - if not HAS_OCI_PY_SDK: - module.fail_json(msg=missing_required_lib("oci")) - - virtual_network_client = oci_utils.create_service_client( - module, VirtualNetworkClient - ) - - exclude_attributes = {"display_name": True, "dns_label": True} - state = module.params["state"] - vcn_id = module.params["vcn_id"] - - if state == "absent": - if vcn_id is not None: - result = delete_vcn(virtual_network_client, module) - else: - module.fail_json( - msg="Specify vcn_id with state as 'absent' to delete a VCN." - ) - - else: - if vcn_id is not None: - result = update_vcn(virtual_network_client, module) - else: - result = oci_utils.check_and_create_resource( - resource_type="vcn", - create_fn=create_vcn, - kwargs_create={ - "virtual_network_client": virtual_network_client, - "module": module, - }, - list_fn=virtual_network_client.list_vcns, - kwargs_list={"compartment_id": module.params["compartment_id"]}, - module=module, - model=CreateVcnDetails(), - exclude_attributes=exclude_attributes, - ) - - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/cloud/ovh/ovh_ip_failover.py b/plugins/modules/cloud/ovh/ovh_ip_failover.py deleted file mode 100644 index 26179eb8f7..0000000000 --- a/plugins/modules/cloud/ovh/ovh_ip_failover.py +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ovh_ip_failover -short_description: Manage OVH IP failover address -description: - - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move - an ip failover (or failover block) between services -author: "Pascal HERAUD (@pascalheraud)" -notes: - - Uses the python OVH Api U(https://github.com/ovh/python-ovh). - You have to create an application (a key and secret) with a consummer - key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/) -requirements: - - ovh >= 0.4.8 -options: - name: - required: true - description: - - The IP address to manage (can be a single IP like 1.1.1.1 - or a block like 1.1.1.1/28 ) - type: str - service: - required: true - description: - - The name of the OVH service this IP address should be routed - type: str - endpoint: - required: true - description: - - The endpoint to use ( for instance ovh-eu) - type: str - wait_completion: - required: false - default: true - type: bool - description: - - If true, the module will wait for the IP address to be moved. - If false, exit without waiting. The taskId will be returned - in module output - wait_task_completion: - required: false - default: 0 - description: - - If not 0, the module will wait for this task id to be - completed. Use wait_task_completion if you want to wait for - completion of a previously executed task with - wait_completion=false. You can execute this module repeatedly on - a list of failover IPs using wait_completion=false (see examples) - type: int - application_key: - required: true - description: - - The applicationKey to use - type: str - application_secret: - required: true - description: - - The application secret to use - type: str - consumer_key: - required: true - description: - - The consumer key to use - type: str - timeout: - required: false - default: 120 - description: - - The timeout in seconds used to wait for a task to be - completed. Default is 120 seconds. - type: int - -''' - -EXAMPLES = ''' -# Route an IP address 1.1.1.1 to the service ns666.ovh.net -- community.general.ovh_ip_failover: - name: 1.1.1.1 - service: ns666.ovh.net - endpoint: ovh-eu - application_key: yourkey - application_secret: yoursecret - consumer_key: yourconsumerkey -- community.general.ovh_ip_failover: - name: 1.1.1.1 - service: ns666.ovh.net - endpoint: ovh-eu - wait_completion: false - application_key: yourkey - application_secret: yoursecret - consumer_key: yourconsumerkey - register: moved -- community.general.ovh_ip_failover: - name: 1.1.1.1 - service: ns666.ovh.net - endpoint: ovh-eu - wait_task_completion: "{{moved.taskId}}" - application_key: yourkey - application_secret: yoursecret - consumer_key: yourconsumerkey -''' - -RETURN = ''' -''' - -import time - -try: - import ovh - import ovh.exceptions - from ovh.exceptions import APIError - HAS_OVH = True -except ImportError: - HAS_OVH = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import quote_plus - - -def getOvhClient(ansibleModule): - endpoint = ansibleModule.params.get('endpoint') - application_key = ansibleModule.params.get('application_key') - application_secret = ansibleModule.params.get('application_secret') - consumer_key = ansibleModule.params.get('consumer_key') - - return ovh.Client( - endpoint=endpoint, - application_key=application_key, - application_secret=application_secret, - consumer_key=consumer_key - ) - - -def waitForNoTask(client, name, timeout): - currentTimeout = timeout - while client.get('/ip/{0}/task'.format(quote_plus(name)), - function='genericMoveFloatingIp', - status='todo'): - time.sleep(1) # Delay for 1 sec - currentTimeout -= 1 - if currentTimeout < 0: - return False - return True - - -def waitForTaskDone(client, name, taskId, timeout): - currentTimeout = timeout - while True: - task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId)) - if task['status'] == 'done': - return True - time.sleep(5) # Delay for 5 sec because it's long to wait completion, do not harass the API - currentTimeout -= 5 - if currentTimeout < 0: - return False - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - service=dict(required=True), - endpoint=dict(required=True), - wait_completion=dict(default=True, type='bool'), - wait_task_completion=dict(default=0, type='int'), - application_key=dict(required=True, no_log=True), - application_secret=dict(required=True, no_log=True), - consumer_key=dict(required=True, no_log=True), - timeout=dict(default=120, type='int') - ), - supports_check_mode=True - ) - - result = dict( - changed=False - ) - - if not HAS_OVH: - module.fail_json(msg='ovh-api python module is required to run this module ') - - # Get parameters - name = module.params.get('name') - service = module.params.get('service') - timeout = module.params.get('timeout') - wait_completion = module.params.get('wait_completion') - wait_task_completion = module.params.get('wait_task_completion') - - # Connect to OVH API - client = getOvhClient(module) - - # Check that the load balancing exists - try: - ips = client.get('/ip', ip=name, type='failover') - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for getting the list of ips, ' - 'check application key, secret, consumerkey and parameters. ' - 'Error returned by OVH api was : {0}'.format(apiError)) - - if name not in ips and '{0}/32'.format(name) not in ips: - module.fail_json(msg='IP {0} does not exist'.format(name)) - - # Check that no task is pending before going on - try: - if not waitForNoTask(client, name, timeout): - module.fail_json( - msg='Timeout of {0} seconds while waiting for no pending ' - 'tasks before executing the module '.format(timeout)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for getting the list of pending tasks ' - 'of the ip, check application key, secret, consumerkey ' - 'and parameters. Error returned by OVH api was : {0}' - .format(apiError)) - - try: - ipproperties = client.get('/ip/{0}'.format(quote_plus(name))) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for getting the properties ' - 'of the ip, check application key, secret, consumerkey ' - 'and parameters. Error returned by OVH api was : {0}' - .format(apiError)) - - if ipproperties['routedTo']['serviceName'] != service: - if not module.check_mode: - if wait_task_completion == 0: - # Move the IP and get the created taskId - task = client.post('/ip/{0}/move'.format(quote_plus(name)), to=service) - taskId = task['taskId'] - result['moved'] = True - else: - # Just wait for the given taskId to be completed - taskId = wait_task_completion - result['moved'] = False - result['taskId'] = taskId - if wait_completion or wait_task_completion != 0: - if not waitForTaskDone(client, name, taskId, timeout): - module.fail_json( - msg='Timeout of {0} seconds while waiting for completion ' - 'of move ip to service'.format(timeout)) - result['waited'] = True - else: - result['waited'] = False - result['changed'] = True - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py b/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py deleted file mode 100644 index 28d6f3a129..0000000000 --- a/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py +++ /dev/null @@ -1,312 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ovh_ip_loadbalancing_backend -short_description: Manage OVH IP LoadBalancing backends -description: - - Manage OVH (French European hosting provider) LoadBalancing IP backends -author: Pascal Heraud (@pascalheraud) -notes: - - Uses the python OVH Api U(https://github.com/ovh/python-ovh). - You have to create an application (a key and secret) with a consumer - key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/) -requirements: - - ovh > 0.3.5 -options: - name: - required: true - description: - - Name of the LoadBalancing internal name (ip-X.X.X.X) - type: str - backend: - required: true - description: - - The IP address of the backend to update / modify / delete - type: str - state: - default: present - choices: ['present', 'absent'] - description: - - Determines whether the backend is to be created/modified - or deleted - type: str - probe: - default: 'none' - choices: ['none', 'http', 'icmp' , 'oco'] - description: - - Determines the type of probe to use for this backend - type: str - weight: - default: 8 - description: - - Determines the weight for this backend - type: int - endpoint: - required: true - description: - - The endpoint to use ( for instance ovh-eu) - type: str - application_key: - required: true - description: - - The applicationKey to use - type: str - application_secret: - required: true - description: - - The application secret to use - type: str - consumer_key: - required: true - description: - - The consumer key to use - type: str - timeout: - default: 120 - description: - - The timeout in seconds used to wait for a task to be - completed. - type: int - -''' - -EXAMPLES = ''' -- name: Adds or modify the backend '212.1.1.1' to a loadbalancing 'ip-1.1.1.1' - ovh_ip_loadbalancing: - name: ip-1.1.1.1 - backend: 212.1.1.1 - state: present - probe: none - weight: 8 - endpoint: ovh-eu - application_key: yourkey - application_secret: yoursecret - consumer_key: yourconsumerkey - -- name: Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1' - ovh_ip_loadbalancing: - name: ip-1.1.1.1 - backend: 212.1.1.1 - state: absent - endpoint: ovh-eu - application_key: yourkey - application_secret: yoursecret - consumer_key: yourconsumerkey -''' - -RETURN = ''' -''' - -import time - -try: - import ovh - import ovh.exceptions - from ovh.exceptions import APIError - HAS_OVH = True -except ImportError: - HAS_OVH = False - -from ansible.module_utils.basic import AnsibleModule - - -def getOvhClient(ansibleModule): - endpoint = ansibleModule.params.get('endpoint') - application_key = ansibleModule.params.get('application_key') - application_secret = ansibleModule.params.get('application_secret') - consumer_key = ansibleModule.params.get('consumer_key') - - return ovh.Client( - endpoint=endpoint, - application_key=application_key, - application_secret=application_secret, - consumer_key=consumer_key - ) - - -def waitForNoTask(client, name, timeout): - currentTimeout = timeout - while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0: - time.sleep(1) # Delay for 1 sec - currentTimeout -= 1 - if currentTimeout < 0: - return False - return True - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - backend=dict(required=True), - weight=dict(default=8, type='int'), - probe=dict(default='none', - choices=['none', 'http', 'icmp', 'oco']), - state=dict(default='present', choices=['present', 'absent']), - endpoint=dict(required=True), - application_key=dict(required=True, no_log=True), - application_secret=dict(required=True, no_log=True), - consumer_key=dict(required=True, no_log=True), - timeout=dict(default=120, type='int') - ) - ) - - if not HAS_OVH: - module.fail_json(msg='ovh-api python module' - 'is required to run this module ') - - # Get parameters - name = module.params.get('name') - state = module.params.get('state') - backend = module.params.get('backend') - weight = module.params.get('weight') - probe = module.params.get('probe') - timeout = module.params.get('timeout') - - # Connect to OVH API - client = getOvhClient(module) - - # Check that the load balancing exists - try: - loadBalancings = client.get('/ip/loadBalancing') - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for getting the list of loadBalancing, ' - 'check application key, secret, consumerkey and parameters. ' - 'Error returned by OVH api was : {0}'.format(apiError)) - - if name not in loadBalancings: - module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name)) - - # Check that no task is pending before going on - try: - if not waitForNoTask(client, name, timeout): - module.fail_json( - msg='Timeout of {0} seconds while waiting for no pending ' - 'tasks before executing the module '.format(timeout)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for getting the list of pending tasks ' - 'of the loadBalancing, check application key, secret, consumerkey ' - 'and parameters. Error returned by OVH api was : {0}' - .format(apiError)) - - try: - backends = client.get('/ip/loadBalancing/{0}/backend'.format(name)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for getting the list of backends ' - 'of the loadBalancing, check application key, secret, consumerkey ' - 'and parameters. Error returned by OVH api was : {0}' - .format(apiError)) - - backendExists = backend in backends - moduleChanged = False - if state == "absent": - if backendExists: - # Remove backend - try: - client.delete( - '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend)) - if not waitForNoTask(client, name, timeout): - module.fail_json( - msg='Timeout of {0} seconds while waiting for completion ' - 'of removing backend task'.format(timeout)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for deleting the backend, ' - 'check application key, secret, consumerkey and ' - 'parameters. Error returned by OVH api was : {0}' - .format(apiError)) - moduleChanged = True - else: - if backendExists: - # Get properties - try: - backendProperties = client.get( - '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for getting the backend properties, ' - 'check application key, secret, consumerkey and ' - 'parameters. Error returned by OVH api was : {0}' - .format(apiError)) - - if (backendProperties['weight'] != weight): - # Change weight - try: - client.post( - '/ip/loadBalancing/{0}/backend/{1}/setWeight' - .format(name, backend), weight=weight) - if not waitForNoTask(client, name, timeout): - module.fail_json( - msg='Timeout of {0} seconds while waiting for completion ' - 'of setWeight to backend task' - .format(timeout)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for updating the weight of the ' - 'backend, check application key, secret, consumerkey ' - 'and parameters. Error returned by OVH api was : {0}' - .format(apiError)) - moduleChanged = True - - if (backendProperties['probe'] != probe): - # Change probe - backendProperties['probe'] = probe - try: - client.put( - '/ip/loadBalancing/{0}/backend/{1}' - .format(name, backend), probe=probe) - if not waitForNoTask(client, name, timeout): - module.fail_json( - msg='Timeout of {0} seconds while waiting for completion of ' - 'setProbe to backend task' - .format(timeout)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for updating the probe of ' - 'the backend, check application key, secret, ' - 'consumerkey and parameters. Error returned by OVH api ' - 'was : {0}' - .format(apiError)) - moduleChanged = True - - else: - # Creates backend - try: - try: - client.post('/ip/loadBalancing/{0}/backend'.format(name), - ipBackend=backend, probe=probe, weight=weight) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for creating the backend, check ' - 'application key, secret, consumerkey and parameters. ' - 'Error returned by OVH api was : {0}' - .format(apiError)) - - if not waitForNoTask(client, name, timeout): - module.fail_json( - msg='Timeout of {0} seconds while waiting for completion of ' - 'backend creation task'.format(timeout)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for creating the backend, check ' - 'application key, secret, consumerkey and parameters. ' - 'Error returned by OVH api was : {0}'.format(apiError)) - moduleChanged = True - - module.exit_json(changed=moduleChanged) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovh/ovh_monthly_billing.py b/plugins/modules/cloud/ovh/ovh_monthly_billing.py deleted file mode 100644 index 75c70a79ec..0000000000 --- a/plugins/modules/cloud/ovh/ovh_monthly_billing.py +++ /dev/null @@ -1,157 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Francois Lallart (@fraff) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovh_monthly_billing -author: Francois Lallart (@fraff) -version_added: '0.2.0' -short_description: Manage OVH monthly billing -description: - - Enable monthly billing on OVH cloud intances (be aware OVH does not allow to disable it). -requirements: [ "ovh" ] -options: - project_id: - required: true - type: str - description: - - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET) - instance_id: - required: true - type: str - description: - - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET) - endpoint: - type: str - description: - - The endpoint to use (for instance ovh-eu) - application_key: - type: str - description: - - The applicationKey to use - application_secret: - type: str - description: - - The application secret to use - consumer_key: - type: str - description: - - The consumer key to use -''' - -EXAMPLES = ''' -- name: Basic usage, using auth from /etc/ovh.conf - community.general.ovh_monthly_billing: - project_id: 0c727a20aa144485b70c44dee9123b46 - instance_id: 8fa89ad2-8f08-4220-9fa4-9695ea23e948 - -# Get openstack cloud ID and instance ID, OVH use them in its API -- name: Get openstack cloud ID and instance ID - os_server_info: - cloud: myProjectName - region_name: myRegionName - server: myServerName - register: openstack_servers - -- name: Use IDs - community.general.ovh_monthly_billing: - project_id: "{{ openstack_servers.0.tenant_id }}" - instance_id: "{{ openstack_servers.0.id }}" - application_key: yourkey - application_secret: yoursecret - consumer_key: yourconsumerkey -''' - -RETURN = ''' -''' - -import os -import sys -import traceback - -try: - import ovh - import ovh.exceptions - from ovh.exceptions import APIError - HAS_OVH = True -except ImportError: - HAS_OVH = False - OVH_IMPORT_ERROR = traceback.format_exc() - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - argument_spec=dict( - project_id=dict(required=True), - instance_id=dict(required=True), - endpoint=dict(required=False), - application_key=dict(required=False, no_log=True), - application_secret=dict(required=False, no_log=True), - consumer_key=dict(required=False, no_log=True), - ), - supports_check_mode=True - ) - - # Get parameters - project_id = module.params.get('project_id') - instance_id = module.params.get('instance_id') - endpoint = module.params.get('endpoint') - application_key = module.params.get('application_key') - application_secret = module.params.get('application_secret') - consumer_key = module.params.get('consumer_key') - project = "" - instance = "" - ovh_billing_status = "" - - if not HAS_OVH: - module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh') - - # Connect to OVH API - client = ovh.Client( - endpoint=endpoint, - application_key=application_key, - application_secret=application_secret, - consumer_key=consumer_key - ) - - # Check that the instance exists - try: - project = client.get('/cloud/project/{0}'.format(project_id)) - except ovh.exceptions.ResourceNotFoundError: - module.fail_json(msg='project {0} does not exist'.format(project_id)) - - # Check that the instance exists - try: - instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id)) - except ovh.exceptions.ResourceNotFoundError: - module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id)) - - # Is monthlyBilling already enabled or pending ? - if instance['monthlyBilling'] is not None: - if instance['monthlyBilling']['status'] in ['ok', 'activationPending']: - module.exit_json(changed=False, ovh_billing_status=instance['monthlyBilling']) - - if module.check_mode: - module.exit_json(changed=True, msg="Dry Run!") - - try: - ovh_billing_status = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id)) - module.exit_json(changed=True, ovh_billing_status=ovh_billing_status['monthlyBilling']) - except APIError as apiError: - module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError)) - - # We should never reach here - module.fail_json(msg='Internal ovh_monthly_billing module error') - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/cloud/packet/packet_device.py b/plugins/modules/cloud/packet/packet_device.py deleted file mode 100644 index 5912a6f46a..0000000000 --- a/plugins/modules/cloud/packet/packet_device.py +++ /dev/null @@ -1,670 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2016, Tomas Karasek -# (c) 2016, Matt Baldwin -# (c) 2016, Thibaud Morel l'Horset -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: packet_device - -short_description: Manage a bare metal server in the Packet Host. - -description: - - Manage a bare metal server in the Packet Host (a "device" in the API terms). - - When the machine is created it can optionally wait for public IP address, or for active state. - - This module has a dependency on packet >= 1.0. - - API is documented at U(https://www.packet.net/developers/api/devices). - - -author: - - Tomas Karasek (@t0mk) - - Matt Baldwin (@baldwinSPC) - - Thibaud Morel l'Horset (@teebes) - -options: - auth_token: - description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). - type: str - - count: - description: - - The number of devices to create. Count number can be included in hostname via the %d string formatter. - default: 1 - type: int - - count_offset: - description: - - From which number to start the count. - default: 1 - type: int - - device_ids: - description: - - List of device IDs on which to operate. - type: list - elements: str - - tags: - description: - - List of device tags. - - Currently implemented only for device creation. - type: list - elements: str - version_added: '0.2.0' - - facility: - description: - - Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/). - type: str - - features: - description: - - Dict with "features" for device creation. See Packet API docs for details. - type: dict - - hostnames: - description: - - A hostname of a device, or a list of hostnames. - - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count). - - If only one hostname, it might be expanded to list if I(count)>1. - aliases: [name] - type: list - elements: str - - locked: - description: - - Whether to lock a created device. - default: false - aliases: [lock] - type: bool - - operating_system: - description: - - OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/). - type: str - - plan: - description: - - Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/). - type: str - - project_id: - description: - - ID of project of the device. - required: true - type: str - - state: - description: - - Desired state of the device. - - If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns. - - If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout). - choices: [present, absent, active, inactive, rebooted] - default: present - type: str - - user_data: - description: - - Userdata blob made available to the machine - type: str - - wait_for_public_IPv: - description: - - Whether to wait for the instance to be assigned a public IPv4/IPv6 address. - - If set to 4, it will wait until IPv4 is assigned to the instance. - - If set to 6, wait until public IPv6 is assigned to the instance. - choices: [4,6] - type: int - - wait_timeout: - description: - - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state). - - If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice. - default: 900 - type: int - - ipxe_script_url: - description: - - URL of custom iPXE script for provisioning. - - More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe). - type: str - - always_pxe: - description: - - Persist PXE as the first boot option. - - Normally, the PXE process happens only on the first boot. Set this arg to have your device continuously boot to iPXE. - default: false - type: bool - - -requirements: - - "packet-python >= 1.35" - -notes: - - Doesn't support check mode. - -''' - -EXAMPLES = ''' -# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. -# You can also pass it to the auth_token parameter of the module instead. - -# Creating devices - -- name: Create 1 device - hosts: localhost - tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - tags: ci-xyz - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 - -# Create the same device and wait until it is in state "active", (when it's -# ready for other API operations). Fail if the device is not "active" in -# 10 minutes. - -- name: Create device and wait up to 10 minutes for active state - hosts: localhost - tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 - state: active - wait_timeout: 600 - -- name: Create 3 ubuntu devices called server-01, server-02 and server-03 - hosts: localhost - tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: server-%02d - count: 3 - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 - -- name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH - hosts: localhost - tasks: - - name: Create 3 devices and register their facts - community.general.packet_device: - hostnames: [coreos-one, coreos-two, coreos-three] - operating_system: coreos_stable - plan: baremetal_0 - facility: ewr1 - locked: true - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - wait_for_public_IPv: 4 - user_data: | - #cloud-config - ssh_authorized_keys: - - {{ lookup('file', 'my_packet_sshkey') }} - coreos: - etcd: - discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3 - addr: $private_ipv4:4001 - peer-addr: $private_ipv4:7001 - fleet: - public-ip: $private_ipv4 - units: - - name: etcd.service - command: start - - name: fleet.service - command: start - register: newhosts - - - name: Wait for ssh - ansible.builtin.wait_for: - delay: 1 - host: "{{ item.public_ipv4 }}" - port: 22 - state: started - timeout: 500 - with_items: "{{ newhosts.devices }}" - - -# Other states of devices - -- name: Remove 3 devices by uuid - hosts: localhost - tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - state: absent - device_ids: - - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8 - - 2eb4faf8-a638-4ac7-8f47-86fe514c3043 - - 6bb4faf8-a638-4ac7-8f47-86fe514c301f -''' - -RETURN = ''' -changed: - description: True if a device was altered in any way (created, modified or removed) - type: bool - sample: True - returned: success - -devices: - description: Information about each device that was processed - type: list - sample: '[{"hostname": "my-server.com", "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7", - "public_ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12", - "tags": [], "locked": false, "state": "provisioning", - "public_ipv6": ""2604:1380:2:5200::3"}]' - returned: success -''' # NOQA - - -import os -import re -import time -import uuid -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - -HAS_PACKET_SDK = True -try: - import packet -except ImportError: - HAS_PACKET_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]') -HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE) -MAX_DEVICES = 100 - -PACKET_DEVICE_STATES = ( - 'queued', - 'provisioning', - 'failed', - 'powering_on', - 'active', - 'powering_off', - 'inactive', - 'rebooting', -) - -PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" - - -ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present'] - - -def serialize_device(device): - """ - Standard representation for a device as returned by various tasks:: - - { - 'id': 'device_id' - 'hostname': 'device_hostname', - 'tags': [], - 'locked': false, - 'state': 'provisioning', - 'ip_addresses': [ - { - "address": "147.75.194.227", - "address_family": 4, - "public": true - }, - { - "address": "2604:1380:2:5200::3", - "address_family": 6, - "public": true - }, - { - "address": "10.100.11.129", - "address_family": 4, - "public": false - } - ], - "private_ipv4": "10.100.11.129", - "public_ipv4": "147.75.194.227", - "public_ipv6": "2604:1380:2:5200::3", - } - - """ - device_data = {} - device_data['id'] = device.id - device_data['hostname'] = device.hostname - device_data['tags'] = device.tags - device_data['locked'] = device.locked - device_data['state'] = device.state - device_data['ip_addresses'] = [ - { - 'address': addr_data['address'], - 'address_family': addr_data['address_family'], - 'public': addr_data['public'], - } - for addr_data in device.ip_addresses - ] - # Also include each IPs as a key for easier lookup in roles. - # Key names: - # - public_ipv4 - # - public_ipv6 - # - private_ipv4 - # - private_ipv6 (if there is one) - for ipdata in device_data['ip_addresses']: - if ipdata['public']: - if ipdata['address_family'] == 6: - device_data['public_ipv6'] = ipdata['address'] - elif ipdata['address_family'] == 4: - device_data['public_ipv4'] = ipdata['address'] - elif not ipdata['public']: - if ipdata['address_family'] == 6: - # Packet doesn't give public ipv6 yet, but maybe one - # day they will - device_data['private_ipv6'] = ipdata['address'] - elif ipdata['address_family'] == 4: - device_data['private_ipv4'] = ipdata['address'] - return device_data - - -def is_valid_hostname(hostname): - return re.match(HOSTNAME_RE, hostname) is not None - - -def is_valid_uuid(myuuid): - try: - val = uuid.UUID(myuuid, version=4) - except ValueError: - return False - return str(val) == myuuid - - -def listify_string_name_or_id(s): - if ',' in s: - return s.split(',') - else: - return [s] - - -def get_hostname_list(module): - # hostname is a list-typed param, so I guess it should return list - # (and it does, in Ansible 2.2.1) but in order to be defensive, - # I keep here the code to convert an eventual string to list - hostnames = module.params.get('hostnames') - count = module.params.get('count') - count_offset = module.params.get('count_offset') - if isinstance(hostnames, str): - hostnames = listify_string_name_or_id(hostnames) - if not isinstance(hostnames, list): - raise Exception("name %s is not convertible to list" % hostnames) - - # at this point, hostnames is a list - hostnames = [h.strip() for h in hostnames] - - if (len(hostnames) > 1) and (count > 1): - _msg = ("If you set count>1, you should only specify one hostname " - "with the %d formatter, not a list of hostnames.") - raise Exception(_msg) - - if (len(hostnames) == 1) and (count > 0): - hostname_spec = hostnames[0] - count_range = range(count_offset, count_offset + count) - if re.search(r"%\d{0,2}d", hostname_spec): - hostnames = [hostname_spec % i for i in count_range] - elif count > 1: - hostname_spec = '%s%%02d' % hostname_spec - hostnames = [hostname_spec % i for i in count_range] - - for hn in hostnames: - if not is_valid_hostname(hn): - raise Exception("Hostname '%s' does not seem to be valid" % hn) - - if len(hostnames) > MAX_DEVICES: - raise Exception("You specified too many hostnames, max is %d" % - MAX_DEVICES) - return hostnames - - -def get_device_id_list(module): - device_ids = module.params.get('device_ids') - - if isinstance(device_ids, str): - device_ids = listify_string_name_or_id(device_ids) - - device_ids = [di.strip() for di in device_ids] - - for di in device_ids: - if not is_valid_uuid(di): - raise Exception("Device ID '%s' does not seem to be valid" % di) - - if len(device_ids) > MAX_DEVICES: - raise Exception("You specified too many devices, max is %d" % - MAX_DEVICES) - return device_ids - - -def create_single_device(module, packet_conn, hostname): - - for param in ('hostnames', 'operating_system', 'plan'): - if not module.params.get(param): - raise Exception("%s parameter is required for new device." - % param) - project_id = module.params.get('project_id') - plan = module.params.get('plan') - tags = module.params.get('tags') - user_data = module.params.get('user_data') - facility = module.params.get('facility') - operating_system = module.params.get('operating_system') - locked = module.params.get('locked') - ipxe_script_url = module.params.get('ipxe_script_url') - always_pxe = module.params.get('always_pxe') - if operating_system != 'custom_ipxe': - for param in ('ipxe_script_url', 'always_pxe'): - if module.params.get(param): - raise Exception('%s parameter is not valid for non custom_ipxe operating_system.' % param) - - device = packet_conn.create_device( - project_id=project_id, - hostname=hostname, - tags=tags, - plan=plan, - facility=facility, - operating_system=operating_system, - userdata=user_data, - locked=locked, - ipxe_script_url=ipxe_script_url, - always_pxe=always_pxe) - return device - - -def refresh_device_list(module, packet_conn, devices): - device_ids = [d.id for d in devices] - new_device_list = get_existing_devices(module, packet_conn) - return [d for d in new_device_list if d.id in device_ids] - - -def wait_for_devices_active(module, packet_conn, watched_devices): - wait_timeout = module.params.get('wait_timeout') - wait_timeout = time.time() + wait_timeout - refreshed = watched_devices - while wait_timeout > time.time(): - refreshed = refresh_device_list(module, packet_conn, watched_devices) - if all(d.state == 'active' for d in refreshed): - return refreshed - time.sleep(5) - raise Exception("Waiting for state \"active\" timed out for devices: %s" - % [d.hostname for d in refreshed if d.state != "active"]) - - -def wait_for_public_IPv(module, packet_conn, created_devices): - - def has_public_ip(addr_list, ip_v): - return any(a['public'] and a['address_family'] == ip_v and a['address'] for a in addr_list) - - def all_have_public_ip(ds, ip_v): - return all(has_public_ip(d.ip_addresses, ip_v) for d in ds) - - address_family = module.params.get('wait_for_public_IPv') - - wait_timeout = module.params.get('wait_timeout') - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - refreshed = refresh_device_list(module, packet_conn, created_devices) - if all_have_public_ip(refreshed, address_family): - return refreshed - time.sleep(5) - - raise Exception("Waiting for IPv%d address timed out. Hostnames: %s" - % (address_family, [d.hostname for d in created_devices])) - - -def get_existing_devices(module, packet_conn): - project_id = module.params.get('project_id') - return packet_conn.list_devices( - project_id, params={ - 'per_page': MAX_DEVICES}) - - -def get_specified_device_identifiers(module): - if module.params.get('device_ids'): - device_id_list = get_device_id_list(module) - return {'ids': device_id_list, 'hostnames': []} - elif module.params.get('hostnames'): - hostname_list = get_hostname_list(module) - return {'hostnames': hostname_list, 'ids': []} - - -def act_on_devices(module, packet_conn, target_state): - specified_identifiers = get_specified_device_identifiers(module) - existing_devices = get_existing_devices(module, packet_conn) - changed = False - create_hostnames = [] - if target_state in ['present', 'active', 'rebooted']: - # states where we might create non-existing specified devices - existing_devices_names = [ed.hostname for ed in existing_devices] - create_hostnames = [hn for hn in specified_identifiers['hostnames'] - if hn not in existing_devices_names] - - process_devices = [d for d in existing_devices - if (d.id in specified_identifiers['ids']) or - (d.hostname in specified_identifiers['hostnames'])] - - if target_state != 'present': - _absent_state_map = {} - for s in PACKET_DEVICE_STATES: - _absent_state_map[s] = packet.Device.delete - - state_map = { - 'absent': _absent_state_map, - 'active': {'inactive': packet.Device.power_on, - 'provisioning': None, 'rebooting': None - }, - 'inactive': {'active': packet.Device.power_off}, - 'rebooted': {'active': packet.Device.reboot, - 'inactive': packet.Device.power_on, - 'provisioning': None, 'rebooting': None - }, - } - - # First do non-creation actions, it might be faster - for d in process_devices: - if d.state == target_state: - continue - if d.state in state_map[target_state]: - api_operation = state_map[target_state].get(d.state) - if api_operation is not None: - api_operation(d) - changed = True - else: - _msg = ( - "I don't know how to process existing device %s from state %s " - "to state %s" % - (d.hostname, d.state, target_state)) - raise Exception(_msg) - - # At last create missing devices - created_devices = [] - if create_hostnames: - created_devices = [create_single_device(module, packet_conn, n) - for n in create_hostnames] - if module.params.get('wait_for_public_IPv'): - created_devices = wait_for_public_IPv( - module, packet_conn, created_devices) - changed = True - - processed_devices = created_devices + process_devices - if target_state == 'active': - processed_devices = wait_for_devices_active( - module, packet_conn, processed_devices) - - return { - 'changed': changed, - 'devices': [serialize_device(d) for d in processed_devices] - } - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), - no_log=True), - count=dict(type='int', default=1), - count_offset=dict(type='int', default=1), - device_ids=dict(type='list', elements='str'), - facility=dict(), - features=dict(type='dict'), - hostnames=dict(type='list', elements='str', aliases=['name']), - tags=dict(type='list', elements='str'), - locked=dict(type='bool', default=False, aliases=['lock']), - operating_system=dict(), - plan=dict(), - project_id=dict(required=True), - state=dict(choices=ALLOWED_STATES, default='present'), - user_data=dict(default=None), - wait_for_public_IPv=dict(type='int', choices=[4, 6]), - wait_timeout=dict(type='int', default=900), - ipxe_script_url=dict(default=''), - always_pxe=dict(type='bool', default=False), - ), - required_one_of=[('device_ids', 'hostnames',)], - mutually_exclusive=[ - ('hostnames', 'device_ids'), - ('count', 'device_ids'), - ('count_offset', 'device_ids'), - ] - ) - - if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') - - if not module.params.get('auth_token'): - _fail_msg = ("if Packet API token is not in environment variable %s, " - "the auth_token parameter is required" % - PACKET_API_TOKEN_ENV_VAR) - module.fail_json(msg=_fail_msg) - - auth_token = module.params.get('auth_token') - - packet_conn = packet.Manager(auth_token=auth_token) - - state = module.params.get('state') - - try: - module.exit_json(**act_on_devices(module, packet_conn, state)) - except Exception as e: - module.fail_json(msg='failed to set device state %s, error: %s' % - (state, to_native(e)), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/packet/packet_ip_subnet.py b/plugins/modules/cloud/packet/packet_ip_subnet.py deleted file mode 100644 index 718de36f22..0000000000 --- a/plugins/modules/cloud/packet/packet_ip_subnet.py +++ /dev/null @@ -1,326 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Nurfet Becirevic -# Copyright: (c) 2017, Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: packet_ip_subnet - -short_description: Assign IP subnet to a bare metal server. - -description: - - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host. - - IPv4 subnets must come from already reserved block. - - IPv6 subnets must come from publicly routable /56 block from your project. - - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation. - -version_added: '0.2.0' - -author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) - -options: - auth_token: - description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). - type: str - - hostname: - description: - - A hostname of a device to/from which to assign/remove a subnet. - required: False - type: str - - device_id: - description: - - UUID of a device to/from which to assign/remove a subnet. - required: False - type: str - - project_id: - description: - - UUID of a project of the device to/from which to assign/remove a subnet. - type: str - - device_count: - description: - - The number of devices to retrieve from the project. The max allowed value is 1000. - - See U(https://www.packet.com/developers/api/#retrieve-all-devices-of-a-project) for more info. - default: 100 - type: int - - cidr: - description: - - IPv4 or IPv6 subnet which you want to manage. It must come from a reserved block for your project in the Packet Host. - aliases: [name] - type: str - required: true - - state: - description: - - Desired state of the IP subnet on the specified device. - - With state == C(present), you must specify either hostname or device_id. Subnet with given CIDR will then be assigned to the specified device. - - With state == C(absent), you can specify either hostname or device_id. The subnet will be removed from specified devices. - - If you leave both hostname and device_id empty, the subnet will be removed from any device it's assigned to. - choices: ['present', 'absent'] - default: 'present' - type: str - -requirements: - - "packet-python >= 1.35" - - "python >= 2.6" -''' - -EXAMPLES = ''' -# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. -# You can also pass it to the auth_token parameter of the module instead. - -- name: Create 1 device and assign an arbitrary public IPv4 subnet to it - hosts: localhost - tasks: - - - packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 - state: active - -# Pick an IPv4 address from a block allocated to your project. - - - community.general.packet_ip_subnet: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostname: myserver - cidr: "147.75.201.78/32" - -# Release IP address 147.75.201.78 - -- name: Unassign IP address from any device in your project - hosts: localhost - tasks: - - community.general.packet_ip_subnet: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - cidr: "147.75.201.78/32" - state: absent -''' - -RETURN = ''' -changed: - description: True if an IP address assignments were altered in any way (created or removed). - type: bool - sample: True - returned: success - -device_id: - type: str - description: UUID of the device associated with the specified IP address. - returned: success - -subnet: - description: Dict with data about the handled IP subnet. - type: dict - sample: - address: 147.75.90.241 - address_family: 4 - assigned_to: { href : /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0 } - cidr: 31 - created_at: '2017-08-07T15:15:30Z' - enabled: True - gateway: 147.75.90.240 - href: /ips/31eda960-0a16-4c0f-b196-f3dc4928529f - id: 1eda960-0a16-4c0f-b196-f3dc4928529f - manageable: True - management: True - netmask: 255.255.255.254 - network: 147.75.90.240 - public: True - returned: success -''' - - -import uuid -import re - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.common.text.converters import to_native - -HAS_PACKET_SDK = True - -try: - import packet -except ImportError: - HAS_PACKET_SDK = False - - -NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]') -HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE) -PROJECT_MAX_DEVICES = 100 - - -PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" - - -ALLOWED_STATES = ['absent', 'present'] - - -def is_valid_hostname(hostname): - return re.match(HOSTNAME_RE, hostname) is not None - - -def is_valid_uuid(myuuid): - try: - val = uuid.UUID(myuuid, version=4) - except ValueError: - return False - return str(val) == myuuid - - -def get_existing_devices(module, packet_conn): - project_id = module.params.get('project_id') - if not is_valid_uuid(project_id): - raise Exception("Project ID {0} does not seem to be valid".format(project_id)) - - per_page = module.params.get('device_count') - return packet_conn.list_devices( - project_id, params={'per_page': per_page}) - - -def get_specified_device_identifiers(module): - if module.params.get('device_id'): - _d_id = module.params.get('device_id') - if not is_valid_uuid(_d_id): - raise Exception("Device ID '{0}' does not seem to be valid".format(_d_id)) - return {'device_id': _d_id, 'hostname': None} - elif module.params.get('hostname'): - _hn = module.params.get('hostname') - if not is_valid_hostname(_hn): - raise Exception("Hostname '{0}' does not seem to be valid".format(_hn)) - return {'hostname': _hn, 'device_id': None} - else: - return {'hostname': None, 'device_id': None} - - -def parse_subnet_cidr(cidr): - if "/" not in cidr: - raise Exception("CIDR expression in wrong format, must be address/prefix_len") - addr, prefixlen = cidr.split("/") - try: - prefixlen = int(prefixlen) - except ValueError: - raise("Wrong prefix length in CIDR expression {0}".format(cidr)) - return addr, prefixlen - - -def act_on_assignment(target_state, module, packet_conn): - return_dict = {'changed': False} - specified_cidr = module.params.get("cidr") - address, prefixlen = parse_subnet_cidr(specified_cidr) - - specified_identifier = get_specified_device_identifiers(module) - - if module.check_mode: - return return_dict - - if (specified_identifier['hostname'] is None) and ( - specified_identifier['device_id'] is None): - if target_state == 'absent': - # The special case to release the IP from any assignment - for d in get_existing_devices(module, packet_conn): - for ia in d.ip_addresses: - if address == ia['address'] and prefixlen == ia['cidr']: - packet_conn.call_api(ia['href'], "DELETE") - return_dict['changed'] = True - return_dict['subnet'] = ia - return_dict['device_id'] = d.id - return return_dict - raise Exception("If you assign an address, you must specify either " - "target device ID or target unique hostname.") - - if specified_identifier['device_id'] is not None: - device = packet_conn.get_device(specified_identifier['device_id']) - else: - all_devices = get_existing_devices(module, packet_conn) - hn = specified_identifier['hostname'] - matching_devices = [d for d in all_devices if d.hostname == hn] - if len(matching_devices) > 1: - raise Exception("There are more than one devices matching given hostname {0}".format(hn)) - if len(matching_devices) == 0: - raise Exception("There is no device matching given hostname {0}".format(hn)) - device = matching_devices[0] - - return_dict['device_id'] = device.id - assignment_dicts = [i for i in device.ip_addresses - if i['address'] == address and i['cidr'] == prefixlen] - if len(assignment_dicts) > 1: - raise Exception("IP address {0} is assigned more than once for device {1}".format( - specified_cidr, device.hostname)) - - if target_state == "absent": - if len(assignment_dicts) == 1: - packet_conn.call_api(assignment_dicts[0]['href'], "DELETE") - return_dict['subnet'] = assignment_dicts[0] - return_dict['changed'] = True - elif target_state == "present": - if len(assignment_dicts) == 0: - new_assignment = packet_conn.call_api( - "devices/{0}/ips".format(device.id), "POST", {"address": "{0}".format(specified_cidr)}) - return_dict['changed'] = True - return_dict['subnet'] = new_assignment - return return_dict - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict( - type='str', - fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), - no_log=True - ), - device_id=dict(type='str'), - hostname=dict(type='str'), - project_id=dict(type='str'), - device_count=dict(type='int', default=PROJECT_MAX_DEVICES), - cidr=dict(type='str', required=True, aliases=['name']), - state=dict(choices=ALLOWED_STATES, default='present'), - ), - supports_check_mode=True, - mutually_exclusive=[('hostname', 'device_id')], - required_one_of=[['hostname', 'device_id', 'project_id']], - required_by=dict( - hostname=('project_id',), - ), - ) - - if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') - - if not module.params.get('auth_token'): - _fail_msg = ("if Packet API token is not in environment variable {0}, " - "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) - module.fail_json(msg=_fail_msg) - - auth_token = module.params.get('auth_token') - - packet_conn = packet.Manager(auth_token=auth_token) - - state = module.params.get('state') - - try: - module.exit_json(**act_on_assignment(state, module, packet_conn)) - except Exception as e: - module.fail_json( - msg="failed to set IP subnet to state {0}, error: {1}".format(state, to_native(e))) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/packet/packet_project.py b/plugins/modules/cloud/packet/packet_project.py deleted file mode 100644 index c6502c6ea6..0000000000 --- a/plugins/modules/cloud/packet/packet_project.py +++ /dev/null @@ -1,244 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Nurfet Becirevic -# Copyright: (c) 2019, Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: packet_project - -short_description: Create/delete a project in Packet host. - -description: - - Create/delete a project in Packet host. - - API is documented at U(https://www.packet.com/developers/api/#projects). - -version_added: '0.2.0' - -author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) - -options: - state: - description: - - Indicate desired state of the target. - default: present - choices: ['present', 'absent'] - type: str - - payment_method: - description: - - Payment method is name of one of the payment methods available to your user. - - When blank, the API assumes the default payment method. - type: str - - auth_token: - description: - - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN). - type: str - - name: - description: - - Name for/of the project. - type: str - - org_id: - description: - - UUID of the organization to create a project for. - - When blank, the API assumes the default organization. - type: str - - id: - description: - - UUID of the project which you want to remove. - type: str - - custom_data: - description: - - Custom data about the project to create. - type: str - -requirements: - - "python >= 2.6" - - "packet-python >= 1.40" - -''' - -EXAMPLES = ''' -# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. -# You can also pass the api token in module param auth_token. - -- name: Create new project - hosts: localhost - tasks: - community.general.packet_project: - name: "new project" - -- name: Create new project within non-default organization - hosts: localhost - tasks: - community.general.packet_project: - name: "my org project" - org_id: a4cc87f9-e00f-48c2-9460-74aa60beb6b0 - -- name: Remove project by id - hosts: localhost - tasks: - community.general.packet_project: - state: absent - id: eef49903-7a09-4ca1-af67-4087c29ab5b6 - -- name: Create new project with non-default billing method - hosts: localhost - tasks: - community.general.packet_project: - name: "newer project" - payment_method: "the other visa" -''' - -RETURN = ''' -changed: - description: True if a project was created or removed. - type: bool - sample: True - returned: success - -name: - description: Name of addressed project. - type: str - returned: success - -id: - description: UUID of addressed project. - type: str - returned: success -''' - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.common.text.converters import to_native - -HAS_PACKET_SDK = True - -try: - import packet -except ImportError: - HAS_PACKET_SDK = False - -PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" - - -def act_on_project(target_state, module, packet_conn): - result_dict = {'changed': False} - given_id = module.params.get('id') - given_name = module.params.get('name') - if given_id: - matching_projects = [ - p for p in packet_conn.list_projects() if given_id == p.id] - else: - matching_projects = [ - p for p in packet_conn.list_projects() if given_name == p.name] - - if target_state == 'present': - if len(matching_projects) == 0: - org_id = module.params.get('org_id') - custom_data = module.params.get('custom_data') - payment_method = module.params.get('payment_method') - - if not org_id: - params = { - "name": given_name, - "payment_method_id": payment_method, - "customdata": custom_data - } - new_project_data = packet_conn.call_api("projects", "POST", params) - new_project = packet.Project(new_project_data, packet_conn) - else: - new_project = packet_conn.create_organization_project( - org_id=org_id, - name=given_name, - payment_method_id=payment_method, - customdata=custom_data - ) - - result_dict['changed'] = True - matching_projects.append(new_project) - - result_dict['name'] = matching_projects[0].name - result_dict['id'] = matching_projects[0].id - else: - if len(matching_projects) > 1: - _msg = ("More than projects matched for module call with state = absent: " - "{0}".format(to_native(matching_projects))) - module.fail_json(msg=_msg) - - if len(matching_projects) == 1: - p = matching_projects[0] - result_dict['name'] = p.name - result_dict['id'] = p.id - result_dict['changed'] = True - try: - p.delete() - except Exception as e: - _msg = ("while trying to remove project {0}, id {1}, got error: {2}".format( - p.name, p.id, to_native(e))) - module.fail_json(msg=_msg) - return result_dict - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(choices=['present', 'absent'], default='present'), - auth_token=dict( - type='str', - fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), - no_log=True - ), - name=dict(type='str'), - id=dict(type='str'), - org_id=dict(type='str'), - payment_method=dict(type='str'), - custom_data=dict(type='str'), - ), - supports_check_mode=True, - required_one_of=[("name", "id",)], - mutually_exclusive=[ - ('name', 'id'), - ] - ) - if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') - - if not module.params.get('auth_token'): - _fail_msg = ("if Packet API token is not in environment variable {0}, " - "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) - module.fail_json(msg=_fail_msg) - - auth_token = module.params.get('auth_token') - - packet_conn = packet.Manager(auth_token=auth_token) - - state = module.params.get('state') - - if state in ['present', 'absent']: - if module.check_mode: - module.exit_json(changed=False) - - try: - module.exit_json(**act_on_project(state, module, packet_conn)) - except Exception as e: - module.fail_json( - msg="failed to set project state {0}: {1}".format(state, to_native(e))) - else: - module.fail_json(msg="{0} is not a valid state for this module".format(state)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/packet/packet_sshkey.py b/plugins/modules/cloud/packet/packet_sshkey.py deleted file mode 100644 index 4800718fd0..0000000000 --- a/plugins/modules/cloud/packet/packet_sshkey.py +++ /dev/null @@ -1,270 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright 2016 Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: packet_sshkey -short_description: Create/delete an SSH key in Packet host. -description: - - Create/delete an SSH key in Packet host. - - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post). -author: "Tomas Karasek (@t0mk) " -options: - state: - description: - - Indicate desired state of the target. - default: present - choices: ['present', 'absent'] - type: str - auth_token: - description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). - type: str - label: - description: - - Label for the key. If you keep it empty, it will be read from key string. - type: str - aliases: [name] - id: - description: - - UUID of the key which you want to remove. - type: str - fingerprint: - description: - - Fingerprint of the key which you want to remove. - type: str - key: - description: - - Public Key string ({type} {base64 encoded key} {description}). - type: str - key_file: - description: - - File with the public key. - type: path - -requirements: - - "python >= 2.6" - - packet-python - -''' - -EXAMPLES = ''' -# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. -# You can also pass the api token in module param auth_token. - -- name: Create sshkey from string - hosts: localhost - tasks: - community.general.packet_sshkey: - key: "{{ lookup('file', 'my_packet_sshkey.pub') }}" - -- name: Create sshkey from file - hosts: localhost - tasks: - community.general.packet_sshkey: - label: key from file - key_file: ~/ff.pub - -- name: Remove sshkey by id - hosts: localhost - tasks: - community.general.packet_sshkey: - state: absent - id: eef49903-7a09-4ca1-af67-4087c29ab5b6 -''' - -RETURN = ''' -changed: - description: True if a sshkey was created or removed. - type: bool - sample: True - returned: always -sshkeys: - description: Information about sshkeys that were created/removed. - type: list - sample: [ - { - "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46", - "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7", - "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2", - "label": "mynewkey33" - } - ] - returned: always -''' # NOQA - -import os -import uuid - -from ansible.module_utils.basic import AnsibleModule - -HAS_PACKET_SDK = True -try: - import packet -except ImportError: - HAS_PACKET_SDK = False - - -PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" - - -def serialize_sshkey(sshkey): - sshkey_data = {} - copy_keys = ['id', 'key', 'label', 'fingerprint'] - for name in copy_keys: - sshkey_data[name] = getattr(sshkey, name) - return sshkey_data - - -def is_valid_uuid(myuuid): - try: - val = uuid.UUID(myuuid, version=4) - except ValueError: - return False - return str(val) == myuuid - - -def load_key_string(key_str): - ret_dict = {} - key_str = key_str.strip() - ret_dict['key'] = key_str - cut_key = key_str.split() - if len(cut_key) in [2, 3]: - if len(cut_key) == 3: - ret_dict['label'] = cut_key[2] - else: - raise Exception("Public key %s is in wrong format" % key_str) - return ret_dict - - -def get_sshkey_selector(module): - key_id = module.params.get('id') - if key_id: - if not is_valid_uuid(key_id): - raise Exception("sshkey ID %s is not valid UUID" % key_id) - selecting_fields = ['label', 'fingerprint', 'id', 'key'] - select_dict = {} - for f in selecting_fields: - if module.params.get(f) is not None: - select_dict[f] = module.params.get(f) - - if module.params.get('key_file'): - with open(module.params.get('key_file')) as _file: - loaded_key = load_key_string(_file.read()) - select_dict['key'] = loaded_key['key'] - if module.params.get('label') is None: - if loaded_key.get('label'): - select_dict['label'] = loaded_key['label'] - - def selector(k): - if 'key' in select_dict: - # if key string is specified, compare only the key strings - return k.key == select_dict['key'] - else: - # if key string not specified, all the fields must match - return all(select_dict[f] == getattr(k, f) for f in select_dict) - return selector - - -def act_on_sshkeys(target_state, module, packet_conn): - selector = get_sshkey_selector(module) - existing_sshkeys = packet_conn.list_ssh_keys() - matching_sshkeys = filter(selector, existing_sshkeys) - changed = False - if target_state == 'present': - if matching_sshkeys == []: - # there is no key matching the fields from module call - # => create the key, label and - newkey = {} - if module.params.get('key_file'): - with open(module.params.get('key_file')) as f: - newkey = load_key_string(f.read()) - if module.params.get('key'): - newkey = load_key_string(module.params.get('key')) - if module.params.get('label'): - newkey['label'] = module.params.get('label') - for param in ('label', 'key'): - if param not in newkey: - _msg = ("If you want to ensure a key is present, you must " - "supply both a label and a key string, either in " - "module params, or in a key file. %s is missing" - % param) - raise Exception(_msg) - matching_sshkeys = [] - new_key_response = packet_conn.create_ssh_key( - newkey['label'], newkey['key']) - changed = True - - matching_sshkeys.append(new_key_response) - else: - # state is 'absent' => delete matching keys - for k in matching_sshkeys: - try: - k.delete() - changed = True - except Exception as e: - _msg = ("while trying to remove sshkey %s, id %s %s, " - "got error: %s" % - (k.label, k.id, target_state, e)) - raise Exception(_msg) - - return { - 'changed': changed, - 'sshkeys': [serialize_sshkey(k) for k in matching_sshkeys] - } - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(choices=['present', 'absent'], default='present'), - auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), - no_log=True), - label=dict(type='str', aliases=['name'], default=None), - id=dict(type='str', default=None), - fingerprint=dict(type='str', default=None), - key=dict(type='str', default=None, no_log=True), - key_file=dict(type='path', default=None), - ), - mutually_exclusive=[ - ('label', 'id'), - ('label', 'fingerprint'), - ('id', 'fingerprint'), - ('key', 'fingerprint'), - ('key', 'id'), - ('key_file', 'key'), - ] - ) - - if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') - - if not module.params.get('auth_token'): - _fail_msg = ("if Packet API token is not in environment variable %s, " - "the auth_token parameter is required" % - PACKET_API_TOKEN_ENV_VAR) - module.fail_json(msg=_fail_msg) - - auth_token = module.params.get('auth_token') - - packet_conn = packet.Manager(auth_token=auth_token) - - state = module.params.get('state') - - if state in ['present', 'absent']: - try: - module.exit_json(**act_on_sshkeys(state, module, packet_conn)) - except Exception as e: - module.fail_json(msg='failed to set sshkey state: %s' % str(e)) - else: - module.fail_json(msg='%s is not a valid state for this module' % state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/packet/packet_volume.py b/plugins/modules/cloud/packet/packet_volume.py deleted file mode 100644 index 97c1e7498d..0000000000 --- a/plugins/modules/cloud/packet/packet_volume.py +++ /dev/null @@ -1,321 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Nurfet Becirevic -# Copyright: (c) 2017, Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: packet_volume - -short_description: Create/delete a volume in Packet host. - -description: - - Create/delete a volume in Packet host. - - API is documented at U(https://www.packet.com/developers/api/#volumes). - -version_added: '0.2.0' - -author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) - -options: - state: - description: - - Desired state of the volume. - default: present - choices: ['present', 'absent'] - type: str - - project_id: - description: - - ID of project of the device. - required: true - type: str - - auth_token: - description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). - type: str - - name: - description: - - Selector for API-generated name of the volume - type: str - - description: - description: - - User-defined description attribute for Packet volume. - - "It is used used as idempotent identifier - if volume with given - description exists, new one is not created." - type: str - - id: - description: - - UUID of a volume. - type: str - - plan: - description: - - storage_1 for standard tier, storage_2 for premium (performance) tier. - - Tiers are described at U(https://www.packet.com/cloud/storage/). - choices: ['storage_1', 'storage_2'] - default: 'storage_1' - type: str - - facility: - description: - - Location of the volume. - - Volumes can only be attached to device in the same location. - type: str - - size: - description: - - Size of the volume in gigabytes. - type: int - - locked: - description: - - Create new volume locked. - type: bool - default: False - - billing_cycle: - description: - - Billing cycle for new volume. - choices: ['hourly', 'monthly'] - default: 'hourly' - type: str - - snapshot_policy: - description: - - Snapshot policy for new volume. - type: dict - - suboptions: - snapshot_count: - description: - - How many snapshots to keep, a positive integer. - required: True - type: int - - snapshot_frequency: - description: - - Frequency of snapshots. - required: True - choices: ["15min", "1hour", "1day", "1week", "1month", "1year"] - type: str - -requirements: - - "python >= 2.6" - - "packet-python >= 1.35" - -''' - -EXAMPLES = ''' -# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. -# You can also pass the api token in module param auth_token. - -- hosts: localhost - vars: - volname: testvol123 - project_id: 53000fb2-ee46-4673-93a8-de2c2bdba33b - - tasks: - - name: Create volume - community.general.packet_volume: - description: "{{ volname }}" - project_id: "{{ project_id }}" - facility: 'ewr1' - plan: 'storage_1' - state: present - size: 10 - snapshot_policy: - snapshot_count: 10 - snapshot_frequency: 1day - register: result_create - - - name: Delete volume - community.general.packet_volume: - id: "{{ result_create.id }}" - project_id: "{{ project_id }}" - state: absent -''' - -RETURN = ''' -id: - description: UUID of specified volume - type: str - returned: success - sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c -name: - description: The API-generated name of the volume resource. - type: str - returned: if volume is attached/detached to/from some device - sample: "volume-a91dc506" -description: - description: The user-defined description of the volume resource. - type: str - returned: success - sample: "Just another volume" -''' - -import uuid - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.common.text.converters import to_native - -HAS_PACKET_SDK = True - - -try: - import packet -except ImportError: - HAS_PACKET_SDK = False - - -PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" - -VOLUME_PLANS = ["storage_1", "storage_2"] -VOLUME_STATES = ["present", "absent"] -BILLING = ["hourly", "monthly"] - - -def is_valid_uuid(myuuid): - try: - val = uuid.UUID(myuuid, version=4) - except ValueError: - return False - return str(val) == myuuid - - -def get_volume_selector(module): - if module.params.get('id'): - i = module.params.get('id') - if not is_valid_uuid(i): - raise Exception("Volume ID '{0}' is not a valid UUID".format(i)) - return lambda v: v['id'] == i - elif module.params.get('name'): - n = module.params.get('name') - return lambda v: v['name'] == n - elif module.params.get('description'): - d = module.params.get('description') - return lambda v: v['description'] == d - - -def get_or_fail(params, key): - item = params.get(key) - if item is None: - raise Exception("{0} must be specified for new volume".format(key)) - return item - - -def act_on_volume(target_state, module, packet_conn): - return_dict = {'changed': False} - s = get_volume_selector(module) - project_id = module.params.get("project_id") - api_method = "projects/{0}/storage".format(project_id) - all_volumes = packet_conn.call_api(api_method, "GET")['volumes'] - matching_volumes = [v for v in all_volumes if s(v)] - - if target_state == "present": - if len(matching_volumes) == 0: - params = { - "description": get_or_fail(module.params, "description"), - "size": get_or_fail(module.params, "size"), - "plan": get_or_fail(module.params, "plan"), - "facility": get_or_fail(module.params, "facility"), - "locked": get_or_fail(module.params, "locked"), - "billing_cycle": get_or_fail(module.params, "billing_cycle"), - "snapshot_policies": module.params.get("snapshot_policy"), - } - - new_volume_data = packet_conn.call_api(api_method, "POST", params) - return_dict['changed'] = True - for k in ['id', 'name', 'description']: - return_dict[k] = new_volume_data[k] - - else: - for k in ['id', 'name', 'description']: - return_dict[k] = matching_volumes[0][k] - - else: - if len(matching_volumes) > 1: - _msg = ("More than one volume matches in module call for absent state: {0}".format( - to_native(matching_volumes))) - module.fail_json(msg=_msg) - - if len(matching_volumes) == 1: - volume = matching_volumes[0] - packet_conn.call_api("storage/{0}".format(volume['id']), "DELETE") - return_dict['changed'] = True - for k in ['id', 'name', 'description']: - return_dict[k] = volume[k] - - return return_dict - - -def main(): - module = AnsibleModule( - argument_spec=dict( - id=dict(type='str', default=None), - description=dict(type="str", default=None), - name=dict(type='str', default=None), - state=dict(choices=VOLUME_STATES, default="present"), - auth_token=dict( - type='str', - fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), - no_log=True - ), - project_id=dict(required=True), - plan=dict(choices=VOLUME_PLANS, default="storage_1"), - facility=dict(type="str"), - size=dict(type="int"), - locked=dict(type="bool", default=False), - snapshot_policy=dict(type='dict', default=None), - billing_cycle=dict(type='str', choices=BILLING, default="hourly"), - ), - supports_check_mode=True, - required_one_of=[("name", "id", "description")], - mutually_exclusive=[ - ('name', 'id'), - ('id', 'description'), - ('name', 'description'), - ] - ) - - if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') - - if not module.params.get('auth_token'): - _fail_msg = ("if Packet API token is not in environment variable {0}, " - "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) - module.fail_json(msg=_fail_msg) - - auth_token = module.params.get('auth_token') - - packet_conn = packet.Manager(auth_token=auth_token) - - state = module.params.get('state') - - if state in VOLUME_STATES: - if module.check_mode: - module.exit_json(changed=False) - - try: - module.exit_json(**act_on_volume(state, module, packet_conn)) - except Exception as e: - module.fail_json( - msg="failed to set volume state {0}: {1}".format( - state, to_native(e))) - else: - module.fail_json(msg="{0} is not a valid state for this module".format(state)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/packet/packet_volume_attachment.py b/plugins/modules/cloud/packet/packet_volume_attachment.py deleted file mode 100644 index 9044fbcffa..0000000000 --- a/plugins/modules/cloud/packet/packet_volume_attachment.py +++ /dev/null @@ -1,298 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Nurfet Becirevic -# Copyright: (c) 2017, Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: packet_volume_attachment - -short_description: Attach/detach a volume to a device in the Packet host. - -description: - - Attach/detach a volume to a device in the Packet host. - - API is documented at U(https://www.packet.com/developers/api/volumes/). - - "This module creates the attachment route in the Packet API. In order to discover - the block devices on the server, you have to run the Attach Scripts, - as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux)." - -version_added: '0.2.0' - -author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) - -options: - state: - description: - - Indicate desired state of the attachment. - default: present - choices: ['present', 'absent'] - type: str - - auth_token: - description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). - type: str - - project_id: - description: - - UUID of the project to which the device and volume belong. - type: str - required: true - - volume: - description: - - Selector for the volume. - - It can be a UUID, an API-generated volume name, or user-defined description string. - - 'Example values: 4a347482-b546-4f67-8300-fb5018ef0c5, volume-4a347482, "my volume"' - type: str - required: true - - device: - description: - - Selector for the device. - - It can be a UUID of the device, or a hostname. - - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device"' - type: str - -requirements: - - "python >= 2.6" - - "packet-python >= 1.35" - -''' - -EXAMPLES = ''' -# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. -# You can also pass the api token in module param auth_token. - -- hosts: localhost - - vars: - volname: testvol - devname: testdev - project_id: 52000fb2-ee46-4673-93a8-de2c2bdba33b - - tasks: - - name: Create volume - packet_volume: - description: "{{ volname }}" - project_id: "{{ project_id }}" - facility: ewr1 - plan: storage_1 - state: present - size: 10 - snapshot_policy: - snapshot_count: 10 - snapshot_frequency: 1day - - - name: Create a device - packet_device: - project_id: "{{ project_id }}" - hostnames: "{{ devname }}" - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: ewr1 - state: present - - - name: Attach testvol to testdev - community.general.packet_volume_attachment: - project_id: "{{ project_id }}" - volume: "{{ volname }}" - device: "{{ devname }}" - - - name: Detach testvol from testdev - community.general.packet_volume_attachment: - project_id: "{{ project_id }}" - volume: "{{ volname }}" - device: "{{ devname }}" - state: absent -''' - -RETURN = ''' -volume_id: - description: UUID of volume addressed by the module call. - type: str - returned: success - -device_id: - description: UUID of device addressed by the module call. - type: str - returned: success -''' - -import uuid - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.common.text.converters import to_native - -HAS_PACKET_SDK = True - - -try: - import packet -except ImportError: - HAS_PACKET_SDK = False - - -PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" - -STATES = ["present", "absent"] - - -def is_valid_uuid(myuuid): - try: - val = uuid.UUID(myuuid, version=4) - except ValueError: - return False - return str(val) == myuuid - - -def get_volume_selector(spec): - if is_valid_uuid(spec): - return lambda v: v['id'] == spec - else: - return lambda v: v['name'] == spec or v['description'] == spec - - -def get_device_selector(spec): - if is_valid_uuid(spec): - return lambda v: v['id'] == spec - else: - return lambda v: v['hostname'] == spec - - -def do_attach(packet_conn, vol_id, dev_id): - api_method = "storage/{0}/attachments".format(vol_id) - packet_conn.call_api( - api_method, - params={"device_id": dev_id}, - type="POST") - - -def do_detach(packet_conn, vol, dev_id=None): - def dev_match(a): - return (dev_id is None) or (a['device']['id'] == dev_id) - for a in vol['attachments']: - if dev_match(a): - packet_conn.call_api(a['href'], type="DELETE") - - -def validate_selected(l, resource_type, spec): - if len(l) > 1: - _msg = ("more than one {0} matches specification {1}: {2}".format( - resource_type, spec, l)) - raise Exception(_msg) - if len(l) == 0: - _msg = "no {0} matches specification: {1}".format(resource_type, spec) - raise Exception(_msg) - - -def get_attached_dev_ids(volume_dict): - if len(volume_dict['attachments']) == 0: - return [] - else: - return [a['device']['id'] for a in volume_dict['attachments']] - - -def act_on_volume_attachment(target_state, module, packet_conn): - return_dict = {'changed': False} - volspec = module.params.get("volume") - devspec = module.params.get("device") - if devspec is None and target_state == 'present': - raise Exception("If you want to attach a volume, you must specify a device.") - project_id = module.params.get("project_id") - volumes_api_method = "projects/{0}/storage".format(project_id) - volumes = packet_conn.call_api(volumes_api_method, - params={'include': 'facility,attachments.device'})['volumes'] - v_match = get_volume_selector(volspec) - matching_volumes = [v for v in volumes if v_match(v)] - validate_selected(matching_volumes, "volume", volspec) - volume = matching_volumes[0] - return_dict['volume_id'] = volume['id'] - - device = None - if devspec is not None: - devices_api_method = "projects/{0}/devices".format(project_id) - devices = packet_conn.call_api(devices_api_method)['devices'] - d_match = get_device_selector(devspec) - matching_devices = [d for d in devices if d_match(d)] - validate_selected(matching_devices, "device", devspec) - device = matching_devices[0] - return_dict['device_id'] = device['id'] - - attached_device_ids = get_attached_dev_ids(volume) - - if target_state == "present": - if len(attached_device_ids) == 0: - do_attach(packet_conn, volume['id'], device['id']) - return_dict['changed'] = True - elif device['id'] not in attached_device_ids: - # Don't reattach volume which is attached to a different device. - # Rather fail than force remove a device on state == 'present'. - raise Exception("volume {0} is already attached to device {1}".format( - volume, attached_device_ids)) - else: - if device is None: - if len(attached_device_ids) > 0: - do_detach(packet_conn, volume) - return_dict['changed'] = True - elif device['id'] in attached_device_ids: - do_detach(packet_conn, volume, device['id']) - return_dict['changed'] = True - - return return_dict - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(choices=STATES, default="present"), - auth_token=dict( - type='str', - fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), - no_log=True - ), - volume=dict(type="str", required=True), - project_id=dict(type="str", required=True), - device=dict(type="str"), - ), - supports_check_mode=True, - ) - - if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') - - if not module.params.get('auth_token'): - _fail_msg = ("if Packet API token is not in environment variable {0}, " - "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) - module.fail_json(msg=_fail_msg) - - auth_token = module.params.get('auth_token') - - packet_conn = packet.Manager(auth_token=auth_token) - - state = module.params.get('state') - - if state in STATES: - if module.check_mode: - module.exit_json(changed=False) - - try: - module.exit_json( - **act_on_volume_attachment(state, module, packet_conn)) - except Exception as e: - module.fail_json( - msg="failed to set volume_attachment state {0}: {1}".format(state, to_native(e))) - else: - module.fail_json(msg="{0} is not a valid state for this module".format(state)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks.py b/plugins/modules/cloud/profitbricks/profitbricks.py deleted file mode 100644 index 3a75778a08..0000000000 --- a/plugins/modules/cloud/profitbricks/profitbricks.py +++ /dev/null @@ -1,657 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks -short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine. -description: - - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait - for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0 -options: - auto_increment: - description: - - Whether or not to increment a single number in the name for created virtual machines. - type: bool - default: 'yes' - name: - description: - - The name of the virtual machine. - type: str - image: - description: - - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. - type: str - image_password: - description: - - Password set for the administrative user. - type: str - ssh_keys: - description: - - Public SSH keys allowing access to the virtual machine. - type: list - elements: str - datacenter: - description: - - The datacenter to provision this virtual machine. - type: str - cores: - description: - - The number of CPU cores to allocate to the virtual machine. - default: 2 - type: int - ram: - description: - - The amount of memory to allocate to the virtual machine. - default: 2048 - type: int - cpu_family: - description: - - The CPU family type to allocate to the virtual machine. - type: str - default: AMD_OPTERON - choices: [ "AMD_OPTERON", "INTEL_XEON" ] - volume_size: - description: - - The size in GB of the boot volume. - type: int - default: 10 - bus: - description: - - The bus type for the volume. - type: str - default: VIRTIO - choices: [ "IDE", "VIRTIO"] - instance_ids: - description: - - list of instance ids, currently only used when state='absent' to remove instances. - type: list - elements: str - count: - description: - - The number of virtual machines to create. - type: int - default: 1 - location: - description: - - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored. - type: str - default: us/las - choices: [ "us/las", "de/fra", "de/fkb" ] - assign_public_ip: - description: - - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created. - type: bool - default: 'no' - lan: - description: - - The ID of the LAN you wish to add the servers to. - type: int - default: 1 - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - wait: - description: - - wait for the instance to be in state 'running' before returning - type: bool - default: 'yes' - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - remove_boot_volume: - description: - - remove the bootVolume of the virtual machine you're destroying. - type: bool - default: 'yes' - state: - description: - - create or terminate instances - - 'The choices available are: C(running), C(stopped), C(absent), C(present).' - type: str - default: 'present' - disk_type: - description: - - the type of disk to be allocated. - type: str - choices: [SSD, HDD] - default: HDD - -requirements: - - "profitbricks" - - "python >= 2.6" -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' - -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Provisioning example -- name: Create three servers and enumerate their names - community.general.profitbricks: - datacenter: Tardis One - name: web%02d.stackpointcloud.com - cores: 4 - ram: 2048 - volume_size: 50 - cpu_family: INTEL_XEON - image: a3eae284-a2fe-11e4-b187-5f1f641608c8 - location: us/las - count: 3 - assign_public_ip: true - -- name: Remove virtual machines - community.general.profitbricks: - datacenter: Tardis One - instance_ids: - - 'web001.stackpointcloud.com' - - 'web002.stackpointcloud.com' - - 'web003.stackpointcloud.com' - wait_timeout: 500 - state: absent - -- name: Start virtual machines - community.general.profitbricks: - datacenter: Tardis One - instance_ids: - - 'web001.stackpointcloud.com' - - 'web002.stackpointcloud.com' - - 'web003.stackpointcloud.com' - wait_timeout: 500 - state: running - -- name: Stop virtual machines - community.general.profitbricks: - datacenter: Tardis One - instance_ids: - - 'web001.stackpointcloud.com' - - 'web002.stackpointcloud.com' - - 'web003.stackpointcloud.com' - wait_timeout: 500 - state: stopped -''' - -import re -import uuid -import time -import traceback - -HAS_PB_SDK = True - -try: - from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.common.text.converters import to_native - - -LOCATIONS = ['us/las', - 'de/fra', - 'de/fkb'] - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def _create_machine(module, profitbricks, datacenter, name): - cores = module.params.get('cores') - ram = module.params.get('ram') - cpu_family = module.params.get('cpu_family') - volume_size = module.params.get('volume_size') - disk_type = module.params.get('disk_type') - image_password = module.params.get('image_password') - ssh_keys = module.params.get('ssh_keys') - bus = module.params.get('bus') - lan = module.params.get('lan') - assign_public_ip = module.params.get('assign_public_ip') - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - location = module.params.get('location') - image = module.params.get('image') - assign_public_ip = module.boolean(module.params.get('assign_public_ip')) - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - if assign_public_ip: - public_found = False - - lans = profitbricks.list_lans(datacenter) - for lan in lans['items']: - if lan['properties']['public']: - public_found = True - lan = lan['id'] - - if not public_found: - i = LAN( - name='public', - public=True) - - lan_response = profitbricks.create_lan(datacenter, i) - _wait_for_completion(profitbricks, lan_response, - wait_timeout, "_create_machine") - lan = lan_response['id'] - - v = Volume( - name=str(uuid.uuid4()).replace('-', '')[:10], - size=volume_size, - image=image, - image_password=image_password, - ssh_keys=ssh_keys, - disk_type=disk_type, - bus=bus) - - n = NIC( - lan=int(lan) - ) - - s = Server( - name=name, - ram=ram, - cores=cores, - cpu_family=cpu_family, - create_volumes=[v], - nics=[n], - ) - - try: - create_server_response = profitbricks.create_server( - datacenter_id=datacenter, server=s) - - _wait_for_completion(profitbricks, create_server_response, - wait_timeout, "create_virtual_machine") - - server_response = profitbricks.get_server( - datacenter_id=datacenter, - server_id=create_server_response['id'], - depth=3 - ) - except Exception as e: - module.fail_json(msg="failed to create the new server: %s" % str(e)) - else: - return server_response - - -def _startstop_machine(module, profitbricks, datacenter_id, server_id): - state = module.params.get('state') - - try: - if state == 'running': - profitbricks.start_server(datacenter_id, server_id) - else: - profitbricks.stop_server(datacenter_id, server_id) - - return True - except Exception as e: - module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e))) - - -def _create_datacenter(module, profitbricks): - datacenter = module.params.get('datacenter') - location = module.params.get('location') - wait_timeout = module.params.get('wait_timeout') - - i = Datacenter( - name=datacenter, - location=location - ) - - try: - datacenter_response = profitbricks.create_datacenter(datacenter=i) - - _wait_for_completion(profitbricks, datacenter_response, - wait_timeout, "_create_datacenter") - - return datacenter_response - except Exception as e: - module.fail_json(msg="failed to create the new server(s): %s" % str(e)) - - -def create_virtual_machine(module, profitbricks): - """ - Create new virtual machine - - module : AnsibleModule object - community.general.profitbricks: authenticated profitbricks object - - Returns: - True if a new virtual machine was created, false otherwise - """ - datacenter = module.params.get('datacenter') - name = module.params.get('name') - auto_increment = module.params.get('auto_increment') - count = module.params.get('count') - lan = module.params.get('lan') - wait_timeout = module.params.get('wait_timeout') - failed = True - datacenter_found = False - - virtual_machines = [] - virtual_machine_ids = [] - - # Locate UUID for datacenter if referenced by name. - datacenter_list = profitbricks.list_datacenters() - datacenter_id = _get_datacenter_id(datacenter_list, datacenter) - if datacenter_id: - datacenter_found = True - - if not datacenter_found: - datacenter_response = _create_datacenter(module, profitbricks) - datacenter_id = datacenter_response['id'] - - _wait_for_completion(profitbricks, datacenter_response, - wait_timeout, "create_virtual_machine") - - if auto_increment: - numbers = set() - count_offset = 1 - - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message, exception=traceback.format_exc()) - - number_range = xrange(count_offset, count_offset + count + len(numbers)) - available_numbers = list(set(number_range).difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) - else: - names = [name] - - # Prefetch a list of servers for later comparison. - server_list = profitbricks.list_servers(datacenter_id) - for name in names: - # Skip server creation if the server already exists. - if _get_server_id(server_list, name): - continue - - create_response = _create_machine(module, profitbricks, str(datacenter_id), name) - nics = profitbricks.list_nics(datacenter_id, create_response['id']) - for n in nics['items']: - if lan == n['properties']['lan']: - create_response.update({'public_ip': n['properties']['ips'][0]}) - - virtual_machines.append(create_response) - - failed = False - - results = { - 'failed': failed, - 'machines': virtual_machines, - 'action': 'create', - 'instance_ids': { - 'instances': [i['id'] for i in virtual_machines], - } - } - - return results - - -def remove_virtual_machine(module, profitbricks): - """ - Removes a virtual machine. - - This will remove the virtual machine along with the bootVolume. - - module : AnsibleModule object - community.general.profitbricks: authenticated profitbricks object. - - Not yet supported: handle deletion of attached data disks. - - Returns: - True if a new virtual server was deleted, false otherwise - """ - datacenter = module.params.get('datacenter') - instance_ids = module.params.get('instance_ids') - remove_boot_volume = module.params.get('remove_boot_volume') - changed = False - - if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: - module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') - - # Locate UUID for datacenter if referenced by name. - datacenter_list = profitbricks.list_datacenters() - datacenter_id = _get_datacenter_id(datacenter_list, datacenter) - if not datacenter_id: - module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) - - # Prefetch server list for later comparison. - server_list = profitbricks.list_servers(datacenter_id) - for instance in instance_ids: - # Locate UUID for server if referenced by name. - server_id = _get_server_id(server_list, instance) - if server_id: - # Remove the server's boot volume - if remove_boot_volume: - _remove_boot_volume(module, profitbricks, datacenter_id, server_id) - - # Remove the server - try: - server_response = profitbricks.delete_server(datacenter_id, server_id) - except Exception as e: - module.fail_json(msg="failed to terminate the virtual server: %s" % to_native(e), exception=traceback.format_exc()) - else: - changed = True - - return changed - - -def _remove_boot_volume(module, profitbricks, datacenter_id, server_id): - """ - Remove the boot volume from the server - """ - try: - server = profitbricks.get_server(datacenter_id, server_id) - volume_id = server['properties']['bootVolume']['id'] - volume_response = profitbricks.delete_volume(datacenter_id, volume_id) - except Exception as e: - module.fail_json(msg="failed to remove the server's boot volume: %s" % to_native(e), exception=traceback.format_exc()) - - -def startstop_machine(module, profitbricks, state): - """ - Starts or Stops a virtual machine. - - module : AnsibleModule object - community.general.profitbricks: authenticated profitbricks object. - - Returns: - True when the servers process the action successfully, false otherwise. - """ - if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: - module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') - - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - changed = False - - datacenter = module.params.get('datacenter') - instance_ids = module.params.get('instance_ids') - - # Locate UUID for datacenter if referenced by name. - datacenter_list = profitbricks.list_datacenters() - datacenter_id = _get_datacenter_id(datacenter_list, datacenter) - if not datacenter_id: - module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) - - # Prefetch server list for later comparison. - server_list = profitbricks.list_servers(datacenter_id) - for instance in instance_ids: - # Locate UUID of server if referenced by name. - server_id = _get_server_id(server_list, instance) - if server_id: - _startstop_machine(module, profitbricks, datacenter_id, server_id) - changed = True - - if wait: - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - matched_instances = [] - for res in profitbricks.list_servers(datacenter_id)['items']: - if state == 'running': - if res['properties']['vmState'].lower() == state: - matched_instances.append(res) - elif state == 'stopped': - if res['properties']['vmState'].lower() == 'shutoff': - matched_instances.append(res) - - if len(matched_instances) < len(instance_ids): - time.sleep(5) - else: - break - - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime()) - - return (changed) - - -def _get_datacenter_id(datacenters, identity): - """ - Fetch and return datacenter UUID by datacenter name if found. - """ - for datacenter in datacenters['items']: - if identity in (datacenter['properties']['name'], datacenter['id']): - return datacenter['id'] - return None - - -def _get_server_id(servers, identity): - """ - Fetch and return server UUID by server name if found. - """ - for server in servers['items']: - if identity in (server['properties']['name'], server['id']): - return server['id'] - return None - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(), - name=dict(), - image=dict(), - cores=dict(type='int', default=2), - ram=dict(type='int', default=2048), - cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'], - default='AMD_OPTERON'), - volume_size=dict(type='int', default=10), - disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), - image_password=dict(default=None, no_log=True), - ssh_keys=dict(type='list', elements='str', default=[], no_log=False), - bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), - lan=dict(type='int', default=1), - count=dict(type='int', default=1), - auto_increment=dict(type='bool', default=True), - instance_ids=dict(type='list', elements='str', default=[]), - subscription_user=dict(), - subscription_password=dict(no_log=True), - location=dict(choices=LOCATIONS, default='us/las'), - assign_public_ip=dict(type='bool', default=False), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - remove_boot_volume=dict(type='bool', default=True), - state=dict(default='present'), - ) - ) - - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required ' + - 'for running or stopping machines.') - - try: - (changed) = remove_virtual_machine(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) - - elif state in ('running', 'stopped'): - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required for ' + - 'running or stopping machines.') - try: - (changed) = startstop_machine(module, profitbricks, state) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) - - elif state == 'present': - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for new instance') - if not module.params.get('image'): - module.fail_json(msg='image parameter is required for new instance') - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is ' + - 'required for new instance') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is ' + - 'required for new instance') - - try: - (machine_dict_array) = create_virtual_machine(module, profitbricks) - module.exit_json(**machine_dict_array) - except Exception as e: - module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py b/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py deleted file mode 100644 index 7897ffdeb9..0000000000 --- a/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks_datacenter -short_description: Create or destroy a ProfitBricks Virtual Datacenter. -description: - - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency - on profitbricks >= 1.0.0 -options: - name: - description: - - The name of the virtual datacenter. - type: str - description: - description: - - The description of the virtual datacenter. - type: str - required: false - location: - description: - - The datacenter location. - type: str - required: false - default: us/las - choices: [ "us/las", "de/fra", "de/fkb" ] - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - required: false - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - required: false - wait: - description: - - wait for the datacenter to be created before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - state: - description: - - Create or terminate datacenters. - - "The available choices are: C(present), C(absent)." - type: str - required: false - default: 'present' - -requirements: [ "profitbricks" ] -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' -- name: Create a datacenter - community.general.profitbricks_datacenter: - datacenter: Tardis One - wait_timeout: 500 - -- name: Destroy a datacenter (remove all servers, volumes, and other objects in the datacenter) - community.general.profitbricks_datacenter: - datacenter: Tardis One - wait_timeout: 500 - state: absent -''' - -import re -import time - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService, Datacenter -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -LOCATIONS = ['us/las', - 'de/fra', - 'de/fkb'] - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def _remove_datacenter(module, profitbricks, datacenter): - try: - profitbricks.delete_datacenter(datacenter) - except Exception as e: - module.fail_json(msg="failed to remove the datacenter: %s" % str(e)) - - -def create_datacenter(module, profitbricks): - """ - Creates a Datacenter - - This will create a new Datacenter in the specified location. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if a new datacenter was created, false otherwise - """ - name = module.params.get('name') - location = module.params.get('location') - description = module.params.get('description') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - i = Datacenter( - name=name, - location=location, - description=description - ) - - try: - datacenter_response = profitbricks.create_datacenter(datacenter=i) - - if wait: - _wait_for_completion(profitbricks, datacenter_response, - wait_timeout, "_create_datacenter") - - results = { - 'datacenter_id': datacenter_response['id'] - } - - return results - - except Exception as e: - module.fail_json(msg="failed to create the new datacenter: %s" % str(e)) - - -def remove_datacenter(module, profitbricks): - """ - Removes a Datacenter. - - This will remove a datacenter. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the datacenter was deleted, false otherwise - """ - name = module.params.get('name') - changed = False - - if(uuid_match.match(name)): - _remove_datacenter(module, profitbricks, name) - changed = True - else: - datacenters = profitbricks.list_datacenters() - - for d in datacenters['items']: - vdc = profitbricks.get_datacenter(d['id']) - - if name == vdc['properties']['name']: - name = d['id'] - _remove_datacenter(module, profitbricks, name) - changed = True - - return changed - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(), - description=dict(), - location=dict(choices=LOCATIONS, default='us/las'), - subscription_user=dict(), - subscription_password=dict(no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=600, type='int'), - state=dict(default='present'), # @TODO add choices - ) - ) - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is required') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is required') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('name'): - module.fail_json(msg='name parameter is required deleting a virtual datacenter.') - - try: - (changed) = remove_datacenter(module, profitbricks) - module.exit_json( - changed=changed) - except Exception as e: - module.fail_json(msg='failed to set datacenter state: %s' % str(e)) - - elif state == 'present': - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for a new datacenter') - if not module.params.get('location'): - module.fail_json(msg='location parameter is required for a new datacenter') - - try: - (datacenter_dict_array) = create_datacenter(module, profitbricks) - module.exit_json(**datacenter_dict_array) - except Exception as e: - module.fail_json(msg='failed to set datacenter state: %s' % str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks_nic.py b/plugins/modules/cloud/profitbricks/profitbricks_nic.py deleted file mode 100644 index 5d98e05e4b..0000000000 --- a/plugins/modules/cloud/profitbricks/profitbricks_nic.py +++ /dev/null @@ -1,289 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks_nic -short_description: Create or Remove a NIC. -description: - - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0 -options: - datacenter: - description: - - The datacenter in which to operate. - type: str - required: true - server: - description: - - The server name or ID. - type: str - required: true - name: - description: - - The name or ID of the NIC. This is only required on deletes, but not on create. - - If not specified, it defaults to a value based on UUID4. - type: str - lan: - description: - - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create. - type: str - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - required: true - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - required: true - wait: - description: - - wait for the operation to complete before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - state: - description: - - Indicate desired state of the resource - - "The available choices are: C(present), C(absent)." - type: str - required: false - default: 'present' - -requirements: [ "profitbricks" ] -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' -- name: Create a NIC - community.general.profitbricks_nic: - datacenter: Tardis One - server: node002 - lan: 2 - wait_timeout: 500 - state: present - -- name: Remove a NIC - community.general.profitbricks_nic: - datacenter: Tardis One - server: node002 - name: 7341c2454f - wait_timeout: 500 - state: absent -''' - -import re -import uuid -import time - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService, NIC -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _make_default_name(): - return str(uuid.uuid4()).replace('-', '')[:10] - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def create_nic(module, profitbricks): - """ - Creates a NIC. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the nic creates, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - lan = module.params.get('lan') - name = module.params.get('name') - if name is None: - name = _make_default_name() - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - try: - n = NIC( - name=name, - lan=lan - ) - - nic_response = profitbricks.create_nic(datacenter, server, n) - - if wait: - _wait_for_completion(profitbricks, nic_response, - wait_timeout, "create_nic") - - return nic_response - - except Exception as e: - module.fail_json(msg="failed to create the NIC: %s" % str(e)) - - -def delete_nic(module, profitbricks): - """ - Removes a NIC - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the NIC was removed, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - name = module.params.get('name') - if name is None: - name = _make_default_name() - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - server_found = False - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server_found = True - server = s['id'] - break - - if not server_found: - return False - - # Locate UUID for NIC - nic_found = False - if not (uuid_match.match(name)): - nic_list = profitbricks.list_nics(datacenter, server) - for n in nic_list['items']: - if name == n['properties']['name']: - nic_found = True - name = n['id'] - break - - if not nic_found: - return False - - try: - nic_response = profitbricks.delete_nic(datacenter, server, name) - return nic_response - except Exception as e: - module.fail_json(msg="failed to remove the NIC: %s" % str(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(required=True), - server=dict(required=True), - name=dict(), - lan=dict(), - subscription_user=dict(required=True), - subscription_password=dict(required=True, no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - state=dict(default='present'), - ), - required_if=( - ('state', 'absent', ['name']), - ('state', 'present', ['lan']), - ) - ) - - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - try: - (changed) = delete_nic(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set nic state: %s' % str(e)) - - elif state == 'present': - try: - (nic_dict) = create_nic(module, profitbricks) - module.exit_json(nics=nic_dict) # @FIXME changed not calculated? - except Exception as e: - module.fail_json(msg='failed to set nic state: %s' % str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks_volume.py b/plugins/modules/cloud/profitbricks/profitbricks_volume.py deleted file mode 100644 index be1c18b55a..0000000000 --- a/plugins/modules/cloud/profitbricks/profitbricks_volume.py +++ /dev/null @@ -1,432 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks_volume -short_description: Create or destroy a volume. -description: - - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0 -options: - datacenter: - description: - - The datacenter in which to create the volumes. - type: str - name: - description: - - The name of the volumes. You can enumerate the names using auto_increment. - type: str - size: - description: - - The size of the volume. - type: int - required: false - default: 10 - bus: - description: - - The bus type. - type: str - required: false - default: VIRTIO - choices: [ "IDE", "VIRTIO"] - image: - description: - - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID. - type: str - image_password: - description: - - Password set for the administrative user. - type: str - required: false - ssh_keys: - description: - - Public SSH keys allowing access to the virtual machine. - type: list - elements: str - required: false - disk_type: - description: - - The disk type of the volume. - type: str - required: false - default: HDD - choices: [ "HDD", "SSD" ] - licence_type: - description: - - The licence type for the volume. This is used when the image is non-standard. - - "The available choices are: C(LINUX), C(WINDOWS), C(UNKNOWN), C(OTHER)." - type: str - required: false - default: UNKNOWN - count: - description: - - The number of volumes you wish to create. - type: int - required: false - default: 1 - auto_increment: - description: - - Whether or not to increment a single number in the name for created virtual machines. - default: yes - type: bool - instance_ids: - description: - - list of instance ids, currently only used when state='absent' to remove instances. - type: list - elements: str - required: false - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - required: false - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - required: false - wait: - description: - - wait for the datacenter to be created before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - state: - description: - - create or terminate datacenters - - "The available choices are: C(present), C(absent)." - type: str - required: false - default: 'present' - server: - description: - - Server name to attach the volume to. - type: str - -requirements: [ "profitbricks" ] -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' -- name: Create multiple volumes - community.general.profitbricks_volume: - datacenter: Tardis One - name: vol%02d - count: 5 - auto_increment: yes - wait_timeout: 500 - state: present - -- name: Remove Volumes - community.general.profitbricks_volume: - datacenter: Tardis One - instance_ids: - - 'vol01' - - 'vol02' - wait_timeout: 500 - state: absent -''' - -import re -import time -import traceback - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService, Volume -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.common.text.converters import to_native - - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def _create_volume(module, profitbricks, datacenter, name): - size = module.params.get('size') - bus = module.params.get('bus') - image = module.params.get('image') - image_password = module.params.get('image_password') - ssh_keys = module.params.get('ssh_keys') - disk_type = module.params.get('disk_type') - licence_type = module.params.get('licence_type') - wait_timeout = module.params.get('wait_timeout') - wait = module.params.get('wait') - - try: - v = Volume( - name=name, - size=size, - bus=bus, - image=image, - image_password=image_password, - ssh_keys=ssh_keys, - disk_type=disk_type, - licence_type=licence_type - ) - - volume_response = profitbricks.create_volume(datacenter, v) - - if wait: - _wait_for_completion(profitbricks, volume_response, - wait_timeout, "_create_volume") - - except Exception as e: - module.fail_json(msg="failed to create the volume: %s" % str(e)) - - return volume_response - - -def _delete_volume(module, profitbricks, datacenter, volume): - try: - profitbricks.delete_volume(datacenter, volume) - except Exception as e: - module.fail_json(msg="failed to remove the volume: %s" % str(e)) - - -def create_volume(module, profitbricks): - """ - Creates a volume. - - This will create a volume in a datacenter. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was created, false otherwise - """ - datacenter = module.params.get('datacenter') - name = module.params.get('name') - auto_increment = module.params.get('auto_increment') - count = module.params.get('count') - - datacenter_found = False - failed = True - volumes = [] - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - datacenter_found = True - break - - if not datacenter_found: - module.fail_json(msg='datacenter could not be found.') - - if auto_increment: - numbers = set() - count_offset = 1 - - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message, exception=traceback.format_exc()) - - number_range = xrange(count_offset, count_offset + count + len(numbers)) - available_numbers = list(set(number_range).difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) - else: - names = [name] * count - - for name in names: - create_response = _create_volume(module, profitbricks, str(datacenter), name) - volumes.append(create_response) - _attach_volume(module, profitbricks, datacenter, create_response['id']) - failed = False - - results = { - 'failed': failed, - 'volumes': volumes, - 'action': 'create', - 'instance_ids': { - 'instances': [i['id'] for i in volumes], - } - } - - return results - - -def delete_volume(module, profitbricks): - """ - Removes a volume. - - This will create a volume in a datacenter. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was removed, false otherwise - """ - if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: - module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') - - datacenter = module.params.get('datacenter') - changed = False - instance_ids = module.params.get('instance_ids') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - for n in instance_ids: - if(uuid_match.match(n)): - _delete_volume(module, profitbricks, datacenter, n) - changed = True - else: - volumes = profitbricks.list_volumes(datacenter) - for v in volumes['items']: - if n == v['properties']['name']: - volume_id = v['id'] - _delete_volume(module, profitbricks, datacenter, volume_id) - changed = True - - return changed - - -def _attach_volume(module, profitbricks, datacenter, volume): - """ - Attaches a volume. - - This will attach a volume to the server. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was attached, false otherwise - """ - server = module.params.get('server') - - # Locate UUID for Server - if server: - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - - try: - return profitbricks.attach_volume(datacenter, server, volume) - except Exception as e: - module.fail_json(msg='failed to attach volume: %s' % to_native(e), exception=traceback.format_exc()) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(), - server=dict(), - name=dict(), - size=dict(type='int', default=10), - bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), - image=dict(), - image_password=dict(no_log=True), - ssh_keys=dict(type='list', elements='str', default=[], no_log=False), - disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), - licence_type=dict(default='UNKNOWN'), - count=dict(type='int', default=1), - auto_increment=dict(type='bool', default=True), - instance_ids=dict(type='list', elements='str', default=[]), - subscription_user=dict(), - subscription_password=dict(no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - state=dict(default='present'), - ) - ) - - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is required') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is required') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required for running or stopping machines.') - - try: - (changed) = delete_volume(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc()) - - elif state == 'present': - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required for new instance') - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for new instance') - - try: - (volume_dict_array) = create_volume(module, profitbricks) - module.exit_json(**volume_dict_array) - except Exception as e: - module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py b/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py deleted file mode 100644 index 1fb3f3c0e2..0000000000 --- a/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks_volume_attachments -short_description: Attach or detach a volume. -description: - - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0 -options: - datacenter: - description: - - The datacenter in which to operate. - type: str - server: - description: - - The name of the server you wish to detach or attach the volume. - type: str - volume: - description: - - The volume name or ID. - type: str - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - required: false - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - required: false - wait: - description: - - wait for the operation to complete before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - state: - description: - - Indicate desired state of the resource - - "The available choices are: C(present), C(absent)." - type: str - required: false - default: 'present' - -requirements: [ "profitbricks" ] -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' -- name: Attach a volume - community.general.profitbricks_volume_attachments: - datacenter: Tardis One - server: node002 - volume: vol01 - wait_timeout: 500 - state: present - -- name: Detach a volume - community.general.profitbricks_volume_attachments: - datacenter: Tardis One - server: node002 - volume: vol01 - wait_timeout: 500 - state: absent -''' - -import re -import time - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def attach_volume(module, profitbricks): - """ - Attaches a volume. - - This will attach a volume to the server. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was attached, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - volume = module.params.get('volume') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - - # Locate UUID for Volume - if not (uuid_match.match(volume)): - volume_list = profitbricks.list_volumes(datacenter) - for v in volume_list['items']: - if volume == v['properties']['name']: - volume = v['id'] - break - - return profitbricks.attach_volume(datacenter, server, volume) - - -def detach_volume(module, profitbricks): - """ - Detaches a volume. - - This will remove a volume from the server. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was detached, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - volume = module.params.get('volume') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - - # Locate UUID for Volume - if not (uuid_match.match(volume)): - volume_list = profitbricks.list_volumes(datacenter) - for v in volume_list['items']: - if volume == v['properties']['name']: - volume = v['id'] - break - - return profitbricks.detach_volume(datacenter, server, volume) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(), - server=dict(), - volume=dict(), - subscription_user=dict(), - subscription_password=dict(no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - state=dict(default='present'), - ) - ) - - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is required') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is required') - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required') - if not module.params.get('server'): - module.fail_json(msg='server parameter is required') - if not module.params.get('volume'): - module.fail_json(msg='volume parameter is required') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - try: - (changed) = detach_volume(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set volume_attach state: %s' % str(e)) - elif state == 'present': - try: - attach_volume(module, profitbricks) - module.exit_json() - except Exception as e: - module.fail_json(msg='failed to set volume_attach state: %s' % str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/pubnub/pubnub_blocks.py b/plugins/modules/cloud/pubnub/pubnub_blocks.py deleted file mode 100644 index d3b76337a3..0000000000 --- a/plugins/modules/cloud/pubnub/pubnub_blocks.py +++ /dev/null @@ -1,628 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# PubNub Real-time Cloud-Hosted Push API and Push Notification Client -# Frameworks -# Copyright (C) 2016 PubNub Inc. -# http://www.pubnub.com/ -# http://www.pubnub.com/terms -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: pubnub_blocks -short_description: PubNub blocks management module. -description: - - "This module allows Ansible to interface with the PubNub BLOCKS - infrastructure by providing the following operations: create / remove, - start / stop and rename for blocks and create / modify / remove for event - handlers" -author: - - PubNub (@pubnub) - - Sergey Mamontov (@parfeon) -requirements: - - "python >= 2.7" - - "pubnub_blocks_client >= 1.0" -options: - email: - description: - - Email from account for which new session should be started. - - "Not required if C(cache) contains result of previous module call (in - same play)." - required: false - type: str - password: - description: - - Password which match to account to which specified C(email) belong. - - "Not required if C(cache) contains result of previous module call (in - same play)." - required: false - type: str - cache: - description: > - In case if single play use blocks management module few times it is - preferred to enabled 'caching' by making previous module to share - gathered artifacts and pass them to this parameter. - required: false - type: dict - default: {} - account: - description: - - "Name of PubNub account for from which C(application) will be used to - manage blocks." - - "User's account will be used if value not set or empty." - type: str - required: false - application: - description: - - "Name of target PubNub application for which blocks configuration on - specific C(keyset) will be done." - type: str - required: true - keyset: - description: - - Name of application's keys set which is bound to managed blocks. - type: str - required: true - state: - description: - - "Intended block state after event handlers creation / update process - will be completed." - required: false - default: 'present' - choices: ['started', 'stopped', 'present', 'absent'] - type: str - name: - description: - - Name of managed block which will be later visible on admin.pubnub.com. - required: true - type: str - description: - description: - - Short block description which will be later visible on - admin.pubnub.com. Used only if block doesn't exists and won't change - description for existing block. - required: false - type: str - event_handlers: - description: - - "List of event handlers which should be updated for specified block - C(name)." - - "Each entry for new event handler should contain: C(name), C(src), - C(channels), C(event). C(name) used as event handler name which can be - used later to make changes to it." - - C(src) is full path to file with event handler code. - - "C(channels) is name of channel from which event handler is waiting - for events." - - "C(event) is type of event which is able to trigger event handler: - I(js-before-publish), I(js-after-publish), I(js-after-presence)." - - "Each entry for existing handlers should contain C(name) (so target - handler can be identified). Rest parameters (C(src), C(channels) and - C(event)) can be added if changes required for them." - - "It is possible to rename event handler by adding C(changes) key to - event handler payload and pass dictionary, which will contain single key - C(name), where new name should be passed." - - "To remove particular event handler it is possible to set C(state) for - it to C(absent) and it will be removed." - required: false - default: [] - type: list - elements: dict - changes: - description: - - "List of fields which should be changed by block itself (doesn't - affect any event handlers)." - - "Possible options for change is: C(name)." - required: false - default: {} - type: dict - validate_certs: - description: - - "This key allow to try skip certificates check when performing REST API - calls. Sometimes host may have issues with certificates on it and this - will cause problems to call PubNub REST API." - - If check should be ignored C(False) should be passed to this parameter. - required: false - default: true - type: bool -''' - -EXAMPLES = ''' -# Event handler create example. -- name: Create single event handler - community.general.pubnub_blocks: - email: '{{ email }}' - password: '{{ password }}' - application: '{{ app_name }}' - keyset: '{{ keyset_name }}' - name: '{{ block_name }}' - event_handlers: - - - src: '{{ path_to_handler_source }}' - name: '{{ handler_name }}' - event: 'js-before-publish' - channels: '{{ handler_channel }}' - -# Change event handler trigger event type. -- name: Change event handler 'event' - community.general.pubnub_blocks: - email: '{{ email }}' - password: '{{ password }}' - application: '{{ app_name }}' - keyset: '{{ keyset_name }}' - name: '{{ block_name }}' - event_handlers: - - - name: '{{ handler_name }}' - event: 'js-after-publish' - -# Stop block and event handlers. -- name: Stopping block - community.general.pubnub_blocks: - email: '{{ email }}' - password: '{{ password }}' - application: '{{ app_name }}' - keyset: '{{ keyset_name }}' - name: '{{ block_name }}' - state: stop - -# Multiple module calls with cached result passing -- name: Create '{{ block_name }}' block - register: module_cache - community.general.pubnub_blocks: - email: '{{ email }}' - password: '{{ password }}' - application: '{{ app_name }}' - keyset: '{{ keyset_name }}' - name: '{{ block_name }}' - state: present -- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}' - register: module_cache - community.general.pubnub_blocks: - cache: '{{ module_cache }}' - application: '{{ app_name }}' - keyset: '{{ keyset_name }}' - name: '{{ block_name }}' - state: present - event_handlers: - - - src: '{{ path_to_handler_1_source }}' - name: '{{ event_handler_1_name }}' - channels: '{{ event_handler_1_channel }}' - event: 'js-before-publish' -- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}' - register: module_cache - community.general.pubnub_blocks: - cache: '{{ module_cache }}' - application: '{{ app_name }}' - keyset: '{{ keyset_name }}' - name: '{{ block_name }}' - state: present - event_handlers: - - - src: '{{ path_to_handler_2_source }}' - name: '{{ event_handler_2_name }}' - channels: '{{ event_handler_2_channel }}' - event: 'js-before-publish' -- name: Start '{{ block_name }}' block - register: module_cache - community.general.pubnub_blocks: - cache: '{{ module_cache }}' - application: '{{ app_name }}' - keyset: '{{ keyset_name }}' - name: '{{ block_name }}' - state: started -''' - -RETURN = ''' -module_cache: - description: "Cached account information. In case if with single play module - used few times it is better to pass cached data to next module calls to speed - up process." - type: dict - returned: always -''' -import copy -import os - -try: - # Import PubNub BLOCKS client. - from pubnub_blocks_client import User, Account, Owner, Application, Keyset - from pubnub_blocks_client import Block, EventHandler - from pubnub_blocks_client import exceptions - HAS_PUBNUB_BLOCKS_CLIENT = True -except ImportError: - HAS_PUBNUB_BLOCKS_CLIENT = False - User = None - Account = None - Owner = None - Application = None - Keyset = None - Block = None - EventHandler = None - exceptions = None - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_text - - -def pubnub_user(module): - """Create and configure user model if it possible. - - :type module: AnsibleModule - :param module: Reference on module which contain module launch - information and status report methods. - - :rtype: User - :return: Reference on initialized and ready to use user or 'None' in - case if not all required information has been passed to block. - """ - user = None - params = module.params - - if params.get('cache') and params['cache'].get('module_cache'): - cache = params['cache']['module_cache'] - user = User() - user.restore(cache=copy.deepcopy(cache['pnm_user'])) - elif params.get('email') and params.get('password'): - user = User(email=params.get('email'), password=params.get('password')) - else: - err_msg = 'It looks like not account credentials has been passed or ' \ - '\'cache\' field doesn\'t have result of previous module ' \ - 'call.' - module.fail_json(msg='Missing account credentials.', - description=err_msg, changed=False) - - return user - - -def pubnub_account(module, user): - """Create and configure account if it is possible. - - :type module: AnsibleModule - :param module: Reference on module which contain module launch - information and status report methods. - :type user: User - :param user: Reference on authorized user for which one of accounts - should be used during manipulations with block. - - :rtype: Account - :return: Reference on initialized and ready to use account or 'None' in - case if not all required information has been passed to block. - """ - params = module.params - if params.get('account'): - account_name = params.get('account') - account = user.account(name=params.get('account')) - if account is None: - err_frmt = 'It looks like there is no \'{0}\' account for ' \ - 'authorized user. Please make sure what correct ' \ - 'name has been passed during module configuration.' - module.fail_json(msg='Missing account.', - description=err_frmt.format(account_name), - changed=False) - else: - account = user.accounts()[0] - - return account - - -def pubnub_application(module, account): - """Retrieve reference on target application from account model. - - NOTE: In case if account authorization will fail or there is no - application with specified name, module will exit with error. - :type module: AnsibleModule - :param module: Reference on module which contain module launch - information and status report methods. - :type account: Account - :param account: Reference on PubNub account model from which reference - on application should be fetched. - - :rtype: Application - :return: Reference on initialized and ready to use application model. - """ - application = None - params = module.params - try: - application = account.application(params['application']) - except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc: - exc_msg = _failure_title_from_exception(exc) - exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] - module.fail_json(msg=exc_msg, description=exc_descr, - changed=account.changed, - module_cache=dict(account)) - - if application is None: - err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \ - 'correct application name has been passed. If application ' \ - 'doesn\'t exist you can create it on admin.pubnub.com.' - email = account.owner.email - module.fail_json(msg=err_fmt.format(params['application'], email), - changed=account.changed, module_cache=dict(account)) - - return application - - -def pubnub_keyset(module, account, application): - """Retrieve reference on target keyset from application model. - - NOTE: In case if there is no keyset with specified name, module will - exit with error. - :type module: AnsibleModule - :param module: Reference on module which contain module launch - information and status report methods. - :type account: Account - :param account: Reference on PubNub account model which will be - used in case of error to export cached data. - :type application: Application - :param application: Reference on PubNub application model from which - reference on keyset should be fetched. - - :rtype: Keyset - :return: Reference on initialized and ready to use keyset model. - """ - params = module.params - keyset = application.keyset(params['keyset']) - if keyset is None: - err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \ - 'sure what correct keyset name has been passed. If keyset ' \ - 'doesn\'t exist you can create it on admin.pubnub.com.' - module.fail_json(msg=err_fmt.format(params['keyset'], - application.name), - changed=account.changed, module_cache=dict(account)) - - return keyset - - -def pubnub_block(module, account, keyset): - """Retrieve reference on target keyset from application model. - - NOTE: In case if there is no block with specified name and module - configured to start/stop it, module will exit with error. - :type module: AnsibleModule - :param module: Reference on module which contain module launch - information and status report methods. - :type account: Account - :param account: Reference on PubNub account model which will be used in - case of error to export cached data. - :type keyset: Keyset - :param keyset: Reference on keyset model from which reference on block - should be fetched. - - :rtype: Block - :return: Reference on initialized and ready to use keyset model. - """ - block = None - params = module.params - try: - block = keyset.block(params['name']) - except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc: - exc_msg = _failure_title_from_exception(exc) - exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] - module.fail_json(msg=exc_msg, description=exc_descr, - changed=account.changed, module_cache=dict(account)) - - # Report error because block doesn't exists and at the same time - # requested to start/stop. - if block is None and params['state'] in ['started', 'stopped']: - block_name = params.get('name') - module.fail_json(msg="'{0}' block doesn't exists.".format(block_name), - changed=account.changed, module_cache=dict(account)) - - if block is None and params['state'] == 'present': - block = Block(name=params.get('name'), - description=params.get('description')) - keyset.add_block(block) - - if block: - # Update block information if required. - if params.get('changes') and params['changes'].get('name'): - block.name = params['changes']['name'] - if params.get('description'): - block.description = params.get('description') - - return block - - -def pubnub_event_handler(block, data): - """Retrieve reference on target event handler from application model. - - :type block: Block - :param block: Reference on block model from which reference on event - handlers should be fetched. - :type data: dict - :param data: Reference on dictionary which contain information about - event handler and whether it should be created or not. - - :rtype: EventHandler - :return: Reference on initialized and ready to use event handler model. - 'None' will be returned in case if there is no handler with - specified name and no request to create it. - """ - event_handler = block.event_handler(data['name']) - - # Prepare payload for event handler update. - changed_name = (data.pop('changes').get('name') - if 'changes' in data else None) - name = data.get('name') or changed_name - channels = data.get('channels') - event = data.get('event') - code = _content_of_file_at_path(data.get('src')) - state = data.get('state') or 'present' - - # Create event handler if required. - if event_handler is None and state == 'present': - event_handler = EventHandler(name=name, channels=channels, event=event, - code=code) - block.add_event_handler(event_handler) - - # Update event handler if required. - if event_handler is not None and state == 'present': - if name is not None: - event_handler.name = name - if channels is not None: - event_handler.channels = channels - if event is not None: - event_handler.event = event - if code is not None: - event_handler.code = code - - return event_handler - - -def _failure_title_from_exception(exception): - """Compose human-readable title for module error title. - - Title will be based on status codes if they has been provided. - :type exception: exceptions.GeneralPubNubError - :param exception: Reference on exception for which title should be - composed. - - :rtype: str - :return: Reference on error tile which should be shown on module - failure. - """ - title = 'General REST API access error.' - if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS: - title = 'Authorization error: missing credentials.' - elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS: - title = 'Authorization error: wrong credentials.' - elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS: - title = 'API access error: insufficient access rights.' - elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED: - title = 'API access error: time token expired.' - elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS: - title = 'Block create did fail: block with same name already exists).' - elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL: - title = 'Unable fetch list of blocks for keyset.' - elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL: - title = 'Block creation did fail.' - elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL: - title = 'Block update did fail.' - elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL: - title = 'Block removal did fail.' - elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL: - title = 'Block start/stop did fail.' - elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS: - title = 'Event handler creation did fail: missing fields.' - elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS: - title = 'Event handler creation did fail: missing fields.' - elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL: - title = 'Event handler creation did fail.' - elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL: - title = 'Event handler update did fail.' - elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL: - title = 'Event handler removal did fail.' - - return title - - -def _content_of_file_at_path(path): - """Read file content. - - Try read content of file at specified path. - :type path: str - :param path: Full path to location of file which should be read'ed. - :rtype: content - :return: File content or 'None' - """ - content = None - if path and os.path.exists(path): - with open(path, mode="rt") as opened_file: - b_content = opened_file.read() - try: - content = to_text(b_content, errors='surrogate_or_strict') - except UnicodeError: - pass - - return content - - -def main(): - fields = dict( - email=dict(default='', required=False, type='str'), - password=dict(default='', required=False, type='str', no_log=True), - account=dict(default='', required=False, type='str'), - application=dict(required=True, type='str'), - keyset=dict(required=True, type='str', no_log=False), - state=dict(default='present', type='str', - choices=['started', 'stopped', 'present', 'absent']), - name=dict(required=True, type='str'), description=dict(type='str'), - event_handlers=dict(default=list(), type='list', elements='dict'), - changes=dict(default=dict(), type='dict'), - cache=dict(default=dict(), type='dict'), - validate_certs=dict(default=True, type='bool')) - module = AnsibleModule(argument_spec=fields, supports_check_mode=True) - - if not HAS_PUBNUB_BLOCKS_CLIENT: - module.fail_json(msg='pubnub_blocks_client required for this module.') - - params = module.params - - # Authorize user. - user = pubnub_user(module) - # Initialize PubNub account instance. - account = pubnub_account(module, user=user) - # Try fetch application with which module should work. - application = pubnub_application(module, account=account) - # Try fetch keyset with which module should work. - keyset = pubnub_keyset(module, account=account, application=application) - # Try fetch block with which module should work. - block = pubnub_block(module, account=account, keyset=keyset) - is_new_block = block is not None and block.uid == -1 - - # Check whether block should be removed or not. - if block is not None and params['state'] == 'absent': - keyset.remove_block(block) - block = None - - if block is not None: - # Update block information if required. - if params.get('changes') and params['changes'].get('name'): - block.name = params['changes']['name'] - - # Process event changes to event handlers. - for event_handler_data in params.get('event_handlers') or list(): - state = event_handler_data.get('state') or 'present' - event_handler = pubnub_event_handler(data=event_handler_data, - block=block) - if state == 'absent' and event_handler: - block.delete_event_handler(event_handler) - - # Update block operation state if required. - if block and not is_new_block: - if params['state'] == 'started': - block.start() - elif params['state'] == 'stopped': - block.stop() - - # Save current account state. - if not module.check_mode: - try: - account.save() - except (exceptions.APIAccessError, exceptions.KeysetError, - exceptions.BlockError, exceptions.EventHandlerError, - exceptions.GeneralPubNubError) as exc: - module_cache = dict(account) - module_cache.update(dict(pnm_user=dict(user))) - exc_msg = _failure_title_from_exception(exc) - exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] - module.fail_json(msg=exc_msg, description=exc_descr, - changed=account.changed, - module_cache=module_cache) - - # Report module execution results. - module_cache = dict(account) - module_cache.update(dict(pnm_user=dict(user))) - changed_will_change = account.changed or account.will_change - module.exit_json(changed=changed_will_change, module_cache=module_cache) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax.py b/plugins/modules/cloud/rackspace/rax.py deleted file mode 100644 index 8c452d9d72..0000000000 --- a/plugins/modules/cloud/rackspace/rax.py +++ /dev/null @@ -1,892 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax -short_description: create / delete an instance in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud instance and optionally - waits for it to be 'running'. -options: - auto_increment: - description: - - Whether or not to increment a single number with the name of the - created servers. Only applicable when used with the I(group) attribute - or meta key. - type: bool - default: 'yes' - boot_from_volume: - description: - - Whether or not to boot the instance from a Cloud Block Storage volume. - If C(yes) and I(image) is specified a new volume will be created at - boot time. I(boot_volume_size) is required with I(image) to create a - new volume at boot time. - type: bool - default: 'no' - boot_volume: - type: str - description: - - Cloud Block Storage ID or Name to use as the boot volume of the - instance - boot_volume_size: - type: int - description: - - Size of the volume to create in Gigabytes. This is only required with - I(image) and I(boot_from_volume). - default: 100 - boot_volume_terminate: - description: - - Whether the I(boot_volume) or newly created volume from I(image) will - be terminated when the server is terminated - type: bool - default: 'no' - config_drive: - description: - - Attach read-only configuration drive to server as label config-2 - type: bool - default: 'no' - count: - type: int - description: - - number of instances to launch - default: 1 - count_offset: - type: int - description: - - number count to start at - default: 1 - disk_config: - type: str - description: - - Disk partitioning strategy - - If not specified it will assume the value C(auto). - choices: - - auto - - manual - exact_count: - description: - - Explicitly ensure an exact count of instances, used with - state=active/present. If specified as C(yes) and I(count) is less than - the servers matched, servers will be deleted to match the count. If - the number of matched servers is fewer than specified in I(count) - additional servers will be added. - type: bool - default: 'no' - extra_client_args: - type: dict - description: - - A hash of key/value pairs to be used when creating the cloudservers - client. This is considered an advanced option, use it wisely and - with caution. - extra_create_args: - type: dict - description: - - A hash of key/value pairs to be used when creating a new server. - This is considered an advanced option, use it wisely and with caution. - files: - type: dict - description: - - Files to insert into the instance. remotefilename:localcontent - flavor: - type: str - description: - - flavor to use for the instance - group: - type: str - description: - - host group to assign to server, is also used for idempotent operations - to ensure a specific number of instances - image: - type: str - description: - - image to use for the instance. Can be an C(id), C(human_id) or C(name). - With I(boot_from_volume), a Cloud Block Storage volume will be created - with this image - instance_ids: - type: list - elements: str - description: - - list of instance ids, currently only used when state='absent' to - remove instances - key_name: - type: str - description: - - key pair to use on the instance - aliases: - - keypair - meta: - type: dict - description: - - A hash of metadata to associate with the instance - name: - type: str - description: - - Name to give the instance - networks: - type: list - elements: str - description: - - The network to attach to the instances. If specified, you must include - ALL networks including the public and private interfaces. Can be C(id) - or C(label). - default: - - public - - private - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - user_data: - type: str - description: - - Data to be uploaded to the servers config drive. This option implies - I(config_drive). Can be a file path or a string - wait: - description: - - wait for the instance to be in state 'running' before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Jesse Keating (@omgjlk)" - - "Matt Martz (@sivel)" -notes: - - I(exact_count) can be "destructive" if the number of running servers in - the I(group) is larger than that specified in I(count). In such a case, the - I(state) is effectively set to C(absent) and the extra servers are deleted. - In the case of deletion, the returned data structure will have C(action) - set to C(delete), and the oldest servers in the group will be deleted. -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Cloud Server - gather_facts: False - tasks: - - name: Server build request - local_action: - module: rax - credentials: ~/.raxpub - name: rax-test1 - flavor: 5 - image: b11d9567-e412-4255-96b9-bd63ab23bcfe - key_name: my_rackspace_key - files: - /root/test.txt: /home/localuser/test.txt - wait: yes - state: present - networks: - - private - - public - register: rax - -- name: Build an exact count of cloud servers with incremented names - hosts: local - gather_facts: False - tasks: - - name: Server build requests - local_action: - module: rax - credentials: ~/.raxpub - name: test%03d.example.org - flavor: performance1-1 - image: ubuntu-1204-lts-precise-pangolin - state: present - count: 10 - count_offset: 10 - exact_count: yes - group: test - wait: yes - register: rax -''' - -import json -import os -import re -import time - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (FINAL_STATUSES, rax_argument_spec, rax_find_bootable_volume, - rax_find_image, rax_find_network, rax_find_volume, - rax_required_together, rax_to_dict, setup_rax_module) -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.six import string_types - - -def rax_find_server_image(module, server, image, boot_volume): - if not image and boot_volume: - vol = rax_find_bootable_volume(module, pyrax, server, - exit=False) - if not vol: - return None - volume_image_metadata = vol.volume_image_metadata - vol_image_id = volume_image_metadata.get('image_id') - if vol_image_id: - server_image = rax_find_image(module, pyrax, - vol_image_id, exit=False) - if server_image: - server.image = dict(id=server_image) - - # Match image IDs taking care of boot from volume - if image and not server.image: - vol = rax_find_bootable_volume(module, pyrax, server) - volume_image_metadata = vol.volume_image_metadata - vol_image_id = volume_image_metadata.get('image_id') - if not vol_image_id: - return None - server_image = rax_find_image(module, pyrax, - vol_image_id, exit=False) - if image != server_image: - return None - - server.image = dict(id=server_image) - elif image and server.image['id'] != image: - return None - - return server.image - - -def create(module, names=None, flavor=None, image=None, meta=None, key_name=None, - files=None, wait=True, wait_timeout=300, disk_config=None, - group=None, nics=None, extra_create_args=None, user_data=None, - config_drive=False, existing=None, block_device_mapping_v2=None): - names = [] if names is None else names - meta = {} if meta is None else meta - files = {} if files is None else files - nics = [] if nics is None else nics - extra_create_args = {} if extra_create_args is None else extra_create_args - existing = [] if existing is None else existing - block_device_mapping_v2 = [] if block_device_mapping_v2 is None else block_device_mapping_v2 - - cs = pyrax.cloudservers - changed = False - - if user_data: - config_drive = True - - if user_data and os.path.isfile(os.path.expanduser(user_data)): - try: - user_data = os.path.expanduser(user_data) - f = open(user_data) - user_data = f.read() - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % user_data) - - # Handle the file contents - for rpath in files.keys(): - lpath = os.path.expanduser(files[rpath]) - try: - fileobj = open(lpath, 'r') - files[rpath] = fileobj.read() - fileobj.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % lpath) - try: - servers = [] - bdmv2 = block_device_mapping_v2 - for name in names: - servers.append(cs.servers.create(name=name, image=image, - flavor=flavor, meta=meta, - key_name=key_name, - files=files, nics=nics, - disk_config=disk_config, - config_drive=config_drive, - userdata=user_data, - block_device_mapping_v2=bdmv2, - **extra_create_args)) - except Exception as e: - if e.message: - msg = str(e.message) - else: - msg = repr(e) - module.fail_json(msg=msg) - else: - changed = True - - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - for server in servers: - try: - server.get() - except Exception: - server.status = 'ERROR' - - if not filter(lambda s: s.status not in FINAL_STATUSES, - servers): - break - time.sleep(5) - - success = [] - error = [] - timeout = [] - for server in servers: - try: - server.get() - except Exception: - server.status = 'ERROR' - instance = rax_to_dict(server, 'server') - if server.status == 'ACTIVE' or not wait: - success.append(instance) - elif server.status == 'ERROR': - error.append(instance) - elif wait: - timeout.append(instance) - - untouched = [rax_to_dict(s, 'server') for s in existing] - instances = success + untouched - - results = { - 'changed': changed, - 'action': 'create', - 'instances': instances, - 'success': success, - 'error': error, - 'timeout': timeout, - 'instance_ids': { - 'instances': [i['id'] for i in instances], - 'success': [i['id'] for i in success], - 'error': [i['id'] for i in error], - 'timeout': [i['id'] for i in timeout] - } - } - - if timeout: - results['msg'] = 'Timeout waiting for all servers to build' - elif error: - results['msg'] = 'Failed to build all servers' - - if 'msg' in results: - module.fail_json(**results) - else: - module.exit_json(**results) - - -def delete(module, instance_ids=None, wait=True, wait_timeout=300, kept=None): - instance_ids = [] if instance_ids is None else instance_ids - kept = [] if kept is None else kept - - cs = pyrax.cloudservers - - changed = False - instances = {} - servers = [] - - for instance_id in instance_ids: - servers.append(cs.servers.get(instance_id)) - - for server in servers: - try: - server.delete() - except Exception as e: - module.fail_json(msg=e.message) - else: - changed = True - - instance = rax_to_dict(server, 'server') - instances[instance['id']] = instance - - # If requested, wait for server deletion - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - for server in servers: - instance_id = server.id - try: - server.get() - except Exception: - instances[instance_id]['status'] = 'DELETED' - instances[instance_id]['rax_status'] = 'DELETED' - - if not filter(lambda s: s['status'] not in ('', 'DELETED', - 'ERROR'), - instances.values()): - break - - time.sleep(5) - - timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'), - instances.values()) - error = filter(lambda s: s['status'] in ('ERROR'), - instances.values()) - success = filter(lambda s: s['status'] in ('', 'DELETED'), - instances.values()) - - instances = [rax_to_dict(s, 'server') for s in kept] - - results = { - 'changed': changed, - 'action': 'delete', - 'instances': instances, - 'success': success, - 'error': error, - 'timeout': timeout, - 'instance_ids': { - 'instances': [i['id'] for i in instances], - 'success': [i['id'] for i in success], - 'error': [i['id'] for i in error], - 'timeout': [i['id'] for i in timeout] - } - } - - if timeout: - results['msg'] = 'Timeout waiting for all servers to delete' - elif error: - results['msg'] = 'Failed to delete all servers' - - if 'msg' in results: - module.fail_json(**results) - else: - module.exit_json(**results) - - -def cloudservers(module, state=None, name=None, flavor=None, image=None, - meta=None, key_name=None, files=None, wait=True, wait_timeout=300, - disk_config=None, count=1, group=None, instance_ids=None, - exact_count=False, networks=None, count_offset=0, - auto_increment=False, extra_create_args=None, user_data=None, - config_drive=False, boot_from_volume=False, - boot_volume=None, boot_volume_size=None, - boot_volume_terminate=False): - meta = {} if meta is None else meta - files = {} if files is None else files - instance_ids = [] if instance_ids is None else instance_ids - networks = [] if networks is None else networks - extra_create_args = {} if extra_create_args is None else extra_create_args - - cs = pyrax.cloudservers - cnw = pyrax.cloud_networks - if not cnw: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present' or (state == 'absent' and instance_ids is None): - if not boot_from_volume and not boot_volume and not image: - module.fail_json(msg='image is required for the "rax" module') - - for arg, value in dict(name=name, flavor=flavor).items(): - if not value: - module.fail_json(msg='%s is required for the "rax" module' % - arg) - - if boot_from_volume and not image and not boot_volume: - module.fail_json(msg='image or boot_volume are required for the ' - '"rax" with boot_from_volume') - - if boot_from_volume and image and not boot_volume_size: - module.fail_json(msg='boot_volume_size is required for the "rax" ' - 'module with boot_from_volume and image') - - if boot_from_volume and image and boot_volume: - image = None - - servers = [] - - # Add the group meta key - if group and 'group' not in meta: - meta['group'] = group - elif 'group' in meta and group is None: - group = meta['group'] - - # Normalize and ensure all metadata values are strings - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, string_types): - meta[k] = '%s' % v - - # When using state=absent with group, the absent block won't match the - # names properly. Use the exact_count functionality to decrease the count - # to the desired level - was_absent = False - if group is not None and state == 'absent': - exact_count = True - state = 'present' - was_absent = True - - if image: - image = rax_find_image(module, pyrax, image) - - nics = [] - if networks: - for network in networks: - nics.extend(rax_find_network(module, pyrax, network)) - - # act on the state - if state == 'present': - # Idempotent ensurance of a specific count of servers - if exact_count is not False: - # See if we can find servers that match our options - if group is None: - module.fail_json(msg='"group" must be provided when using ' - '"exact_count"') - - if auto_increment: - numbers = set() - - # See if the name is a printf like string, if not append - # %d to the end - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) - - # regex pattern to match printf formatting - pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) - for server in cs.servers.list(): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, count_offset + count) - available_numbers = list(set(number_range) - .difference(numbers)) - else: # Not auto incrementing - for server in cs.servers.list(): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - if server.metadata.get('group') == group: - servers.append(server) - # available_numbers not needed here, we inspect auto_increment - # again later - - # If state was absent but the count was changed, - # assume we only wanted to remove that number of instances - if was_absent: - diff = len(servers) - count - if diff < 0: - count = 0 - else: - count = diff - - if len(servers) > count: - # We have more servers than we need, set state='absent' - # and delete the extras, this should delete the oldest - state = 'absent' - kept = servers[:count] - del servers[:count] - instance_ids = [] - for server in servers: - instance_ids.append(server.id) - delete(module, instance_ids=instance_ids, wait=wait, - wait_timeout=wait_timeout, kept=kept) - elif len(servers) < count: - # we have fewer servers than we need - if auto_increment: - # auto incrementing server numbers - names = [] - name_slice = count - len(servers) - numbers_to_use = available_numbers[:name_slice] - for number in numbers_to_use: - names.append(name % number) - else: - # We are not auto incrementing server numbers, - # create a list of 'name' that matches how many we need - names = [name] * (count - len(servers)) - else: - # we have the right number of servers, just return info - # about all of the matched servers - instances = [] - instance_ids = [] - for server in servers: - instances.append(rax_to_dict(server, 'server')) - instance_ids.append(server.id) - module.exit_json(changed=False, action=None, - instances=instances, - success=[], error=[], timeout=[], - instance_ids={'instances': instance_ids, - 'success': [], 'error': [], - 'timeout': []}) - else: # not called with exact_count=True - if group is not None: - if auto_increment: - # we are auto incrementing server numbers, but not with - # exact_count - numbers = set() - - # See if the name is a printf like string, if not append - # %d to the end - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) - - # regex pattern to match printf formatting - pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) - for server in cs.servers.list(): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, - count_offset + count + len(numbers)) - available_numbers = list(set(number_range) - .difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) - else: - # Not auto incrementing - names = [name] * count - else: - # No group was specified, and not using exact_count - # Perform more simplistic matching - search_opts = { - 'name': '^%s$' % name, - 'flavor': flavor - } - servers = [] - for server in cs.servers.list(search_opts=search_opts): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - - if not rax_find_server_image(module, server, image, - boot_volume): - continue - - # Ignore servers with non matching metadata - if server.metadata != meta: - continue - servers.append(server) - - if len(servers) >= count: - # We have more servers than were requested, don't do - # anything. Not running with exact_count=True, so we assume - # more is OK - instances = [] - for server in servers: - instances.append(rax_to_dict(server, 'server')) - - instance_ids = [i['id'] for i in instances] - module.exit_json(changed=False, action=None, - instances=instances, success=[], error=[], - timeout=[], - instance_ids={'instances': instance_ids, - 'success': [], 'error': [], - 'timeout': []}) - - # We need more servers to reach out target, create names for - # them, we aren't performing auto_increment here - names = [name] * (count - len(servers)) - - block_device_mapping_v2 = [] - if boot_from_volume: - mapping = { - 'boot_index': '0', - 'delete_on_termination': boot_volume_terminate, - 'destination_type': 'volume', - } - if image: - mapping.update({ - 'uuid': image, - 'source_type': 'image', - 'volume_size': boot_volume_size, - }) - image = None - elif boot_volume: - volume = rax_find_volume(module, pyrax, boot_volume) - mapping.update({ - 'uuid': pyrax.utils.get_id(volume), - 'source_type': 'volume', - }) - block_device_mapping_v2.append(mapping) - - create(module, names=names, flavor=flavor, image=image, - meta=meta, key_name=key_name, files=files, wait=wait, - wait_timeout=wait_timeout, disk_config=disk_config, group=group, - nics=nics, extra_create_args=extra_create_args, - user_data=user_data, config_drive=config_drive, - existing=servers, - block_device_mapping_v2=block_device_mapping_v2) - - elif state == 'absent': - if instance_ids is None: - # We weren't given an explicit list of server IDs to delete - # Let's match instead - search_opts = { - 'name': '^%s$' % name, - 'flavor': flavor - } - for server in cs.servers.list(search_opts=search_opts): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - - if not rax_find_server_image(module, server, image, - boot_volume): - continue - - # Ignore servers with non matching metadata - if meta != server.metadata: - continue - - servers.append(server) - - # Build a list of server IDs to delete - instance_ids = [] - for server in servers: - if len(instance_ids) < count: - instance_ids.append(server.id) - else: - break - - if not instance_ids: - # No server IDs were matched for deletion, or no IDs were - # explicitly provided, just exit and don't do anything - module.exit_json(changed=False, action=None, instances=[], - success=[], error=[], timeout=[], - instance_ids={'instances': [], - 'success': [], 'error': [], - 'timeout': []}) - - delete(module, instance_ids=instance_ids, wait=wait, - wait_timeout=wait_timeout) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - auto_increment=dict(default=True, type='bool'), - boot_from_volume=dict(default=False, type='bool'), - boot_volume=dict(type='str'), - boot_volume_size=dict(type='int', default=100), - boot_volume_terminate=dict(type='bool', default=False), - config_drive=dict(default=False, type='bool'), - count=dict(default=1, type='int'), - count_offset=dict(default=1, type='int'), - disk_config=dict(choices=['auto', 'manual']), - exact_count=dict(default=False, type='bool'), - extra_client_args=dict(type='dict', default={}), - extra_create_args=dict(type='dict', default={}), - files=dict(type='dict', default={}), - flavor=dict(), - group=dict(), - image=dict(), - instance_ids=dict(type='list', elements='str'), - key_name=dict(aliases=['keypair']), - meta=dict(type='dict', default={}), - name=dict(), - networks=dict(type='list', elements='str', default=['public', 'private']), - state=dict(default='present', choices=['present', 'absent']), - user_data=dict(no_log=True), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=300, type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - auto_increment = module.params.get('auto_increment') - boot_from_volume = module.params.get('boot_from_volume') - boot_volume = module.params.get('boot_volume') - boot_volume_size = module.params.get('boot_volume_size') - boot_volume_terminate = module.params.get('boot_volume_terminate') - config_drive = module.params.get('config_drive') - count = module.params.get('count') - count_offset = module.params.get('count_offset') - disk_config = module.params.get('disk_config') - if disk_config: - disk_config = disk_config.upper() - exact_count = module.params.get('exact_count', False) - extra_client_args = module.params.get('extra_client_args') - extra_create_args = module.params.get('extra_create_args') - files = module.params.get('files') - flavor = module.params.get('flavor') - group = module.params.get('group') - image = module.params.get('image') - instance_ids = module.params.get('instance_ids') - key_name = module.params.get('key_name') - meta = module.params.get('meta') - name = module.params.get('name') - networks = module.params.get('networks') - state = module.params.get('state') - user_data = module.params.get('user_data') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - setup_rax_module(module, pyrax) - - if extra_client_args: - pyrax.cloudservers = pyrax.connect_to_cloudservers( - region=pyrax.cloudservers.client.region_name, - **extra_client_args) - client = pyrax.cloudservers.client - if 'bypass_url' in extra_client_args: - client.management_url = extra_client_args['bypass_url'] - - if pyrax.cloudservers is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - cloudservers(module, state=state, name=name, flavor=flavor, - image=image, meta=meta, key_name=key_name, files=files, - wait=wait, wait_timeout=wait_timeout, disk_config=disk_config, - count=count, group=group, instance_ids=instance_ids, - exact_count=exact_count, networks=networks, - count_offset=count_offset, auto_increment=auto_increment, - extra_create_args=extra_create_args, user_data=user_data, - config_drive=config_drive, boot_from_volume=boot_from_volume, - boot_volume=boot_volume, boot_volume_size=boot_volume_size, - boot_volume_terminate=boot_volume_terminate) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_cbs.py b/plugins/modules/cloud/rackspace/rax_cbs.py deleted file mode 100644 index b543f5979a..0000000000 --- a/plugins/modules/cloud/rackspace/rax_cbs.py +++ /dev/null @@ -1,227 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cbs -short_description: Manipulate Rackspace Cloud Block Storage Volumes -description: - - Manipulate Rackspace Cloud Block Storage Volumes -options: - description: - type: str - description: - - Description to give the volume being created - image: - type: str - description: - - image to use for bootable volumes. Can be an C(id), C(human_id) or - C(name). This option requires C(pyrax>=1.9.3) - meta: - type: dict - description: - - A hash of metadata to associate with the volume - name: - type: str - description: - - Name to give the volume being created - required: true - size: - type: int - description: - - Size of the volume to create in Gigabytes - default: 100 - snapshot_id: - type: str - description: - - The id of the snapshot to create the volume from - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - volume_type: - type: str - description: - - Type of the volume being created - choices: - - SATA - - SSD - default: SATA - wait: - description: - - wait for the volume to be in state 'available' before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Block Storage Volume - gather_facts: False - hosts: local - connection: local - tasks: - - name: Storage volume create request - local_action: - module: rax_cbs - credentials: ~/.raxpub - name: my-volume - description: My Volume - volume_type: SSD - size: 150 - region: DFW - wait: yes - state: present - meta: - app: my-cool-app - register: my_volume -''' - -from distutils.version import LooseVersion - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (VOLUME_STATUS, rax_argument_spec, rax_find_image, rax_find_volume, - rax_required_together, rax_to_dict, setup_rax_module) - - -def cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout, - image): - changed = False - volume = None - instance = {} - - cbs = pyrax.cloud_blockstorage - - if cbs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if image: - # pyrax<1.9.3 did not have support for specifying an image when - # creating a volume which is required for bootable volumes - if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'): - module.fail_json(msg='Creating a bootable volume requires ' - 'pyrax>=1.9.3') - image = rax_find_image(module, pyrax, image) - - volume = rax_find_volume(module, pyrax, name) - - if state == 'present': - if not volume: - kwargs = dict() - if image: - kwargs['image'] = image - try: - volume = cbs.create(name, size=size, volume_type=volume_type, - description=description, - metadata=meta, - snapshot_id=snapshot_id, **kwargs) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - if wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_for_build(volume, interval=5, - attempts=attempts) - - volume.get() - instance = rax_to_dict(volume) - - result = dict(changed=changed, volume=instance) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - elif wait and volume.status not in VOLUME_STATUS: - result['msg'] = 'Timeout waiting on %s' % volume.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - if volume: - instance = rax_to_dict(volume) - try: - volume.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, volume=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - description=dict(type='str'), - image=dict(type='str'), - meta=dict(type='dict', default={}), - name=dict(required=True), - size=dict(type='int', default=100), - snapshot_id=dict(), - state=dict(default='present', choices=['present', 'absent']), - volume_type=dict(choices=['SSD', 'SATA'], default='SATA'), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - description = module.params.get('description') - image = module.params.get('image') - meta = module.params.get('meta') - name = module.params.get('name') - size = module.params.get('size') - snapshot_id = module.params.get('snapshot_id') - state = module.params.get('state') - volume_type = module.params.get('volume_type') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout, - image) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_cbs_attachments.py b/plugins/modules/cloud/rackspace/rax_cbs_attachments.py deleted file mode 100644 index fd21081475..0000000000 --- a/plugins/modules/cloud/rackspace/rax_cbs_attachments.py +++ /dev/null @@ -1,219 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cbs_attachments -short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments -description: - - Manipulate Rackspace Cloud Block Storage Volume Attachments -options: - device: - type: str - description: - - The device path to attach the volume to, e.g. /dev/xvde. - - Before 2.4 this was a required field. Now it can be left to null to auto assign the device name. - volume: - type: str - description: - - Name or id of the volume to attach/detach - required: true - server: - type: str - description: - - Name or id of the server to attach/detach - required: true - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - wait: - description: - - wait for the volume to be in 'in-use'/'available' state before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Attach a Block Storage Volume - gather_facts: False - hosts: local - connection: local - tasks: - - name: Storage volume attach request - local_action: - module: rax_cbs_attachments - credentials: ~/.raxpub - volume: my-volume - server: my-server - device: /dev/xvdd - region: DFW - wait: yes - state: present - register: my_volume -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (NON_CALLABLES, - rax_argument_spec, - rax_find_server, - rax_find_volume, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def cloud_block_storage_attachments(module, state, volume, server, device, - wait, wait_timeout): - cbs = pyrax.cloud_blockstorage - cs = pyrax.cloudservers - - if cbs is None or cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - changed = False - instance = {} - - volume = rax_find_volume(module, pyrax, volume) - - if not volume: - module.fail_json(msg='No matching storage volumes were found') - - if state == 'present': - server = rax_find_server(module, pyrax, server) - - if (volume.attachments and - volume.attachments[0]['server_id'] == server.id): - changed = False - elif volume.attachments: - module.fail_json(msg='Volume is attached to another server') - else: - try: - volume.attach_to_instance(server, mountpoint=device) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - volume.get() - - for key, value in vars(volume).items(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value - - result = dict(changed=changed) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - elif wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_until(volume, 'status', 'in-use', - interval=5, attempts=attempts) - - volume.get() - result['volume'] = rax_to_dict(volume) - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - server = rax_find_server(module, pyrax, server) - - if (volume.attachments and - volume.attachments[0]['server_id'] == server.id): - try: - volume.detach() - if wait: - pyrax.utils.wait_until(volume, 'status', 'available', - interval=3, attempts=0, - verbose=False) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - volume.get() - changed = True - elif volume.attachments: - module.fail_json(msg='Volume is attached to another server') - - result = dict(changed=changed, volume=rax_to_dict(volume)) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - module.exit_json(changed=changed, volume=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - device=dict(required=False), - volume=dict(required=True), - server=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - device = module.params.get('device') - volume = module.params.get('volume') - server = module.params.get('server') - state = module.params.get('state') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_block_storage_attachments(module, state, volume, server, device, - wait, wait_timeout) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_cdb.py b/plugins/modules/cloud/rackspace/rax_cdb.py deleted file mode 100644 index 04bbe71cda..0000000000 --- a/plugins/modules/cloud/rackspace/rax_cdb.py +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cdb -short_description: create/delete or resize a Rackspace Cloud Databases instance -description: - - creates / deletes or resize a Rackspace Cloud Databases instance - and optionally waits for it to be 'running'. The name option needs to be - unique since it's used to identify the instance. -options: - name: - type: str - description: - - Name of the databases server instance - required: yes - flavor: - type: int - description: - - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB) - default: 1 - volume: - type: int - description: - - Volume size of the database 1-150GB - default: 2 - cdb_type: - type: str - description: - - type of instance (i.e. MySQL, MariaDB, Percona) - default: MySQL - aliases: ['type'] - cdb_version: - type: str - description: - - version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6) - - "The available choices are: C(5.1), C(5.6) and C(10)." - default: 5.6 - aliases: ['version'] - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - wait: - description: - - wait for the instance to be in state 'running' before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: "Simon JAILLET (@jails)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Cloud Databases - gather_facts: False - tasks: - - name: Server build request - local_action: - module: rax_cdb - credentials: ~/.raxpub - region: IAD - name: db-server1 - flavor: 1 - volume: 2 - cdb_type: MySQL - cdb_version: 5.6 - wait: yes - state: present - register: rax_db_server -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module - - -def find_instance(name): - - cdb = pyrax.cloud_databases - instances = cdb.list() - if instances: - for instance in instances: - if instance.name == name: - return instance - return False - - -def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait, - wait_timeout): - - for arg, value in dict(name=name, flavor=flavor, - volume=volume, type=cdb_type, version=cdb_version - ).items(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb"' - ' module' % arg) - - if not (volume >= 1 and volume <= 150): - module.fail_json(msg='volume is required to be between 1 and 150') - - cdb = pyrax.cloud_databases - - flavors = [] - for item in cdb.list_flavors(): - flavors.append(item.id) - - if not (flavor in flavors): - module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor)) - - changed = False - - instance = find_instance(name) - - if not instance: - action = 'create' - try: - instance = cdb.create(name=name, flavor=flavor, volume=volume, - type=cdb_type, version=cdb_version) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - else: - action = None - - if instance.volume.size != volume: - action = 'resize' - if instance.volume.size > volume: - module.fail_json(changed=False, action=action, - msg='The new volume size must be larger than ' - 'the current volume size', - cdb=rax_to_dict(instance)) - instance.resize_volume(volume) - changed = True - - if int(instance.flavor.id) != flavor: - action = 'resize' - pyrax.utils.wait_until(instance, 'status', 'ACTIVE', - attempts=wait_timeout) - instance.resize(flavor) - changed = True - - if wait: - pyrax.utils.wait_until(instance, 'status', 'ACTIVE', - attempts=wait_timeout) - - if wait and instance.status != 'ACTIVE': - module.fail_json(changed=changed, action=action, - cdb=rax_to_dict(instance), - msg='Timeout waiting for "%s" databases instance to ' - 'be created' % name) - - module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance)) - - -def delete_instance(module, name, wait, wait_timeout): - - if not name: - module.fail_json(msg='name is required for the "rax_cdb" module') - - changed = False - - instance = find_instance(name) - if not instance: - module.exit_json(changed=False, action='delete') - - try: - instance.delete() - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - if wait: - pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN', - attempts=wait_timeout) - - if wait and instance.status != 'SHUTDOWN': - module.fail_json(changed=changed, action='delete', - cdb=rax_to_dict(instance), - msg='Timeout waiting for "%s" databases instance to ' - 'be deleted' % name) - - module.exit_json(changed=changed, action='delete', - cdb=rax_to_dict(instance)) - - -def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, - wait_timeout): - - # act on the state - if state == 'present': - save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait, - wait_timeout) - elif state == 'absent': - delete_instance(module, name, wait, wait_timeout) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(type='str', required=True), - flavor=dict(type='int', default=1), - volume=dict(type='int', default=2), - cdb_type=dict(type='str', default='MySQL', aliases=['type']), - cdb_version=dict(type='str', default='5.6', aliases=['version']), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - flavor = module.params.get('flavor') - volume = module.params.get('volume') - cdb_type = module.params.get('cdb_type') - cdb_version = module.params.get('cdb_version') - state = module.params.get('state') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_cdb_database.py b/plugins/modules/cloud/rackspace/rax_cdb_database.py deleted file mode 100644 index 86cd1aac40..0000000000 --- a/plugins/modules/cloud/rackspace/rax_cdb_database.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: rax_cdb_database -short_description: 'create / delete a database in the Cloud Databases' -description: - - create / delete a database in the Cloud Databases. -options: - cdb_id: - type: str - description: - - The databases server UUID - required: yes - name: - type: str - description: - - Name to give to the database - required: yes - character_set: - type: str - description: - - Set of symbols and encodings - default: 'utf8' - collate: - type: str - description: - - Set of rules for comparing characters in a character set - default: 'utf8_general_ci' - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -author: "Simon JAILLET (@jails)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a database in Cloud Databases - tasks: - - name: Database build request - local_action: - module: rax_cdb_database - credentials: ~/.raxpub - region: IAD - cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 - name: db1 - state: present - register: rax_db_database -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module - - -def find_database(instance, name): - try: - database = instance.get_database(name) - except Exception: - return False - - return database - - -def save_database(module, cdb_id, name, character_set, collate): - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - database = find_database(instance, name) - - if not database: - try: - database = instance.create_database(name=name, - character_set=character_set, - collate=collate) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='create', - database=rax_to_dict(database)) - - -def delete_database(module, cdb_id, name): - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - database = find_database(instance, name) - - if database: - try: - database.delete() - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='delete', - database=rax_to_dict(database)) - - -def rax_cdb_database(module, state, cdb_id, name, character_set, collate): - - # act on the state - if state == 'present': - save_database(module, cdb_id, name, character_set, collate) - elif state == 'absent': - delete_database(module, cdb_id, name) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - cdb_id=dict(type='str', required=True), - name=dict(type='str', required=True), - character_set=dict(type='str', default='utf8'), - collate=dict(type='str', default='utf8_general_ci'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - cdb_id = module.params.get('cdb_id') - name = module.params.get('name') - character_set = module.params.get('character_set') - collate = module.params.get('collate') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - rax_cdb_database(module, state, cdb_id, name, character_set, collate) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_cdb_user.py b/plugins/modules/cloud/rackspace/rax_cdb_user.py deleted file mode 100644 index 674f17c070..0000000000 --- a/plugins/modules/cloud/rackspace/rax_cdb_user.py +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cdb_user -short_description: create / delete a Rackspace Cloud Database -description: - - create / delete a database in the Cloud Databases. -options: - cdb_id: - type: str - description: - - The databases server UUID - required: yes - db_username: - type: str - description: - - Name of the database user - required: yes - db_password: - type: str - description: - - Database user password - required: yes - databases: - type: list - elements: str - description: - - Name of the databases that the user can access - default: [] - host: - type: str - description: - - Specifies the host from which a user is allowed to connect to - the database. Possible values are a string containing an IPv4 address - or "%" to allow connecting from any host - default: '%' - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -author: "Simon JAILLET (@jails)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a user in Cloud Databases - tasks: - - name: User build request - local_action: - module: rax_cdb_user - credentials: ~/.raxpub - region: IAD - cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 - db_username: user1 - db_password: user1 - databases: ['db1'] - state: present - register: rax_db_user -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_text -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module - - -def find_user(instance, name): - try: - user = instance.get_user(name) - except Exception: - return False - - return user - - -def save_user(module, cdb_id, name, password, databases, host): - - for arg, value in dict(cdb_id=cdb_id, name=name).items(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_user" ' - 'module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - user = find_user(instance, name) - - if not user: - action = 'create' - try: - user = instance.create_user(name=name, - password=password, - database_names=databases, - host=host) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - else: - action = 'update' - - if user.host != host: - changed = True - - user.update(password=password, host=host) - - former_dbs = set([item.name for item in user.list_user_access()]) - databases = set(databases) - - if databases != former_dbs: - try: - revoke_dbs = [db for db in former_dbs if db not in databases] - user.revoke_user_access(db_names=revoke_dbs) - - new_dbs = [db for db in databases if db not in former_dbs] - user.grant_user_access(db_names=new_dbs) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action=action, user=rax_to_dict(user)) - - -def delete_user(module, cdb_id, name): - - for arg, value in dict(cdb_id=cdb_id, name=name).items(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_user"' - ' module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - user = find_user(instance, name) - - if user: - try: - user.delete() - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='delete') - - -def rax_cdb_user(module, state, cdb_id, name, password, databases, host): - - # act on the state - if state == 'present': - save_user(module, cdb_id, name, password, databases, host) - elif state == 'absent': - delete_user(module, cdb_id, name) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - cdb_id=dict(type='str', required=True), - db_username=dict(type='str', required=True), - db_password=dict(type='str', required=True, no_log=True), - databases=dict(type='list', elements='str', default=[]), - host=dict(type='str', default='%'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - cdb_id = module.params.get('cdb_id') - name = module.params.get('db_username') - password = module.params.get('db_password') - databases = module.params.get('databases') - host = to_text(module.params.get('host'), errors='surrogate_or_strict') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - rax_cdb_user(module, state, cdb_id, name, password, databases, host) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_clb.py b/plugins/modules/cloud/rackspace/rax_clb.py deleted file mode 100644 index 9160133e21..0000000000 --- a/plugins/modules/cloud/rackspace/rax_clb.py +++ /dev/null @@ -1,312 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_clb -short_description: create / delete a load balancer in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud load balancer. -options: - algorithm: - type: str - description: - - algorithm for the balancer being created - choices: - - RANDOM - - LEAST_CONNECTIONS - - ROUND_ROBIN - - WEIGHTED_LEAST_CONNECTIONS - - WEIGHTED_ROUND_ROBIN - default: LEAST_CONNECTIONS - meta: - type: dict - description: - - A hash of metadata to associate with the instance - name: - type: str - description: - - Name to give the load balancer - required: yes - port: - type: int - description: - - Port for the balancer being created - default: 80 - protocol: - type: str - description: - - Protocol for the balancer being created - choices: - - DNS_TCP - - DNS_UDP - - FTP - - HTTP - - HTTPS - - IMAPS - - IMAPv4 - - LDAP - - LDAPS - - MYSQL - - POP3 - - POP3S - - SMTP - - TCP - - TCP_CLIENT_FIRST - - UDP - - UDP_STREAM - - SFTP - default: HTTP - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - timeout: - type: int - description: - - timeout for communication between the balancer and the node - default: 30 - type: - type: str - description: - - type of interface for the balancer being created - choices: - - PUBLIC - - SERVICENET - default: PUBLIC - vip_id: - type: str - description: - - Virtual IP ID to use when creating the load balancer for purposes of - sharing an IP with another load balancer of another protocol - wait: - description: - - wait for the balancer to be in state 'running' before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Load Balancer - gather_facts: False - hosts: local - connection: local - tasks: - - name: Load Balancer create request - local_action: - module: rax_clb - credentials: ~/.raxpub - name: my-lb - port: 8080 - protocol: HTTP - type: SERVICENET - timeout: 30 - region: DFW - wait: yes - state: present - meta: - app: my-cool-app - register: my_lb -''' - - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (CLB_ALGORITHMS, - CLB_PROTOCOLS, - rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, - vip_type, timeout, wait, wait_timeout, vip_id): - if int(timeout) < 30: - module.fail_json(msg='"timeout" must be greater than or equal to 30') - - changed = False - balancers = [] - - clb = pyrax.cloud_loadbalancers - if not clb: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - balancer_list = clb.list() - while balancer_list: - retrieved = clb.list(marker=balancer_list.pop().id) - balancer_list.extend(retrieved) - if len(retrieved) < 2: - break - - for balancer in balancer_list: - if name != balancer.name and name != balancer.id: - continue - - balancers.append(balancer) - - if len(balancers) > 1: - module.fail_json(msg='Multiple Load Balancers were matched by name, ' - 'try using the Load Balancer ID instead') - - if state == 'present': - if isinstance(meta, dict): - metadata = [dict(key=k, value=v) for k, v in meta.items()] - - if not balancers: - try: - virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)] - balancer = clb.create(name, metadata=metadata, port=port, - algorithm=algorithm, protocol=protocol, - timeout=timeout, virtual_ips=virtual_ips) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - balancer = balancers[0] - setattr(balancer, 'metadata', - [dict(key=k, value=v) for k, v in - balancer.get_metadata().items()]) - atts = { - 'name': name, - 'algorithm': algorithm, - 'port': port, - 'protocol': protocol, - 'timeout': timeout - } - for att, value in atts.items(): - current = getattr(balancer, att) - if current != value: - changed = True - - if changed: - balancer.update(**atts) - - if balancer.metadata != metadata: - balancer.set_metadata(meta) - changed = True - - virtual_ips = [clb.VirtualIP(type=vip_type)] - current_vip_types = set([v.type for v in balancer.virtual_ips]) - vip_types = set([v.type for v in virtual_ips]) - if current_vip_types != vip_types: - module.fail_json(msg='Load balancer Virtual IP type cannot ' - 'be changed') - - if wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - - balancer.get() - instance = rax_to_dict(balancer, 'clb') - - result = dict(changed=changed, balancer=instance) - - if balancer.status == 'ERROR': - result['msg'] = '%s failed to build' % balancer.id - elif wait and balancer.status not in ('ACTIVE', 'ERROR'): - result['msg'] = 'Timeout waiting on %s' % balancer.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - if balancers: - balancer = balancers[0] - try: - balancer.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - instance = rax_to_dict(balancer, 'clb') - - if wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_until(balancer, 'status', ('DELETED'), - interval=5, attempts=attempts) - else: - instance = {} - - module.exit_json(changed=changed, balancer=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - algorithm=dict(choices=CLB_ALGORITHMS, - default='LEAST_CONNECTIONS'), - meta=dict(type='dict', default={}), - name=dict(required=True), - port=dict(type='int', default=80), - protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'), - state=dict(default='present', choices=['present', 'absent']), - timeout=dict(type='int', default=30), - type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'), - vip_id=dict(), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - algorithm = module.params.get('algorithm') - meta = module.params.get('meta') - name = module.params.get('name') - port = module.params.get('port') - protocol = module.params.get('protocol') - state = module.params.get('state') - timeout = int(module.params.get('timeout')) - vip_id = module.params.get('vip_id') - vip_type = module.params.get('type') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - setup_rax_module(module, pyrax) - - cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, - vip_type, timeout, wait, wait_timeout, vip_id) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_clb_nodes.py b/plugins/modules/cloud/rackspace/rax_clb_nodes.py deleted file mode 100644 index 4adcc66fb7..0000000000 --- a/plugins/modules/cloud/rackspace/rax_clb_nodes.py +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_clb_nodes -short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer -description: - - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer -options: - address: - type: str - required: false - description: - - IP address or domain name of the node - condition: - type: str - required: false - choices: - - enabled - - disabled - - draining - description: - - Condition for the node, which determines its role within the load - balancer - load_balancer_id: - type: int - required: true - description: - - Load balancer id - node_id: - type: int - required: false - description: - - Node id - port: - type: int - required: false - description: - - Port number of the load balanced service on the node - state: - type: str - required: false - default: "present" - choices: - - present - - absent - description: - - Indicate desired state of the node - type: - type: str - required: false - choices: - - primary - - secondary - description: - - Type of node - wait: - required: false - default: "no" - type: bool - description: - - Wait for the load balancer to become active before returning - wait_timeout: - type: int - required: false - default: 30 - description: - - How long to wait before giving up and returning an error - weight: - type: int - required: false - description: - - Weight of node - virtualenv: - type: path - description: - - Virtualenv to execute this module in -author: "Lukasz Kawczynski (@neuroid)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Add a new node to the load balancer - local_action: - module: rax_clb_nodes - load_balancer_id: 71 - address: 10.2.2.3 - port: 80 - condition: enabled - type: primary - wait: yes - credentials: /path/to/credentials - -- name: Drain connections from a node - local_action: - module: rax_clb_nodes - load_balancer_id: 71 - node_id: 410 - condition: draining - wait: yes - credentials: /path/to/credentials - -- name: Remove a node from the load balancer - local_action: - module: rax_clb_nodes - load_balancer_id: 71 - node_id: 410 - state: absent - wait: yes - credentials: /path/to/credentials -''' - -import os - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module - - -def _activate_virtualenv(path): - activate_this = os.path.join(path, 'bin', 'activate_this.py') - with open(activate_this) as f: - code = compile(f.read(), activate_this, 'exec') - exec(code) - - -def _get_node(lb, node_id=None, address=None, port=None): - """Return a matching node""" - for node in getattr(lb, 'nodes', []): - match_list = [] - if node_id is not None: - match_list.append(getattr(node, 'id', None) == node_id) - if address is not None: - match_list.append(getattr(node, 'address', None) == address) - if port is not None: - match_list.append(getattr(node, 'port', None) == port) - - if match_list and all(match_list): - return node - - return None - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - condition=dict(choices=['enabled', 'disabled', 'draining']), - load_balancer_id=dict(required=True, type='int'), - node_id=dict(type='int'), - port=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - type=dict(choices=['primary', 'secondary']), - virtualenv=dict(type='path'), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=30, type='int'), - weight=dict(type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params['address'] - condition = (module.params['condition'] and - module.params['condition'].upper()) - load_balancer_id = module.params['load_balancer_id'] - node_id = module.params['node_id'] - port = module.params['port'] - state = module.params['state'] - typ = module.params['type'] and module.params['type'].upper() - virtualenv = module.params['virtualenv'] - wait = module.params['wait'] - wait_timeout = module.params['wait_timeout'] or 1 - weight = module.params['weight'] - - if virtualenv: - try: - _activate_virtualenv(virtualenv) - except IOError as e: - module.fail_json(msg='Failed to activate virtualenv %s (%s)' % ( - virtualenv, e)) - - setup_rax_module(module, pyrax) - - if not pyrax.cloud_loadbalancers: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - try: - lb = pyrax.cloud_loadbalancers.get(load_balancer_id) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - - node = _get_node(lb, node_id, address, port) - - result = rax_clb_node_to_dict(node) - - if state == 'absent': - if not node: # Removing a non-existent node - module.exit_json(changed=False, state=state) - try: - lb.delete_node(node) - result = {} - except pyrax.exc.NotFound: - module.exit_json(changed=False, state=state) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - else: # present - if not node: - if node_id: # Updating a non-existent node - msg = 'Node %d not found' % node_id - if lb.nodes: - msg += (' (available nodes: %s)' % - ', '.join([str(x.id) for x in lb.nodes])) - module.fail_json(msg=msg) - else: # Creating a new node - try: - node = pyrax.cloudloadbalancers.Node( - address=address, port=port, condition=condition, - weight=weight, type=typ) - resp, body = lb.add_nodes([node]) - result.update(body['nodes'][0]) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - else: # Updating an existing node - mutable = { - 'condition': condition, - 'type': typ, - 'weight': weight, - } - - for name, value in mutable.items(): - if value is None or value == getattr(node, name): - mutable.pop(name) - - if not mutable: - module.exit_json(changed=False, state=state, node=result) - - try: - # The diff has to be set explicitly to update node's weight and - # type; this should probably be fixed in pyrax - lb.update_node(node, diff=mutable) - result.update(mutable) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - - if wait: - pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1, - attempts=wait_timeout) - if lb.status != 'ACTIVE': - module.fail_json( - msg='Load balancer not active after %ds (current status: %s)' % - (wait_timeout, lb.status.lower())) - - kwargs = {'node': result} if result else {} - module.exit_json(changed=True, state=state, **kwargs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_clb_ssl.py b/plugins/modules/cloud/rackspace/rax_clb_ssl.py deleted file mode 100644 index adf375124d..0000000000 --- a/plugins/modules/cloud/rackspace/rax_clb_ssl.py +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: rax_clb_ssl -short_description: Manage SSL termination for a Rackspace Cloud Load Balancer. -description: -- Set up, reconfigure, or remove SSL termination for an existing load balancer. -options: - loadbalancer: - type: str - description: - - Name or ID of the load balancer on which to manage SSL termination. - required: true - state: - type: str - description: - - If set to "present", SSL termination will be added to this load balancer. - - If "absent", SSL termination will be removed instead. - choices: - - present - - absent - default: present - enabled: - description: - - If set to "false", temporarily disable SSL termination without discarding - - existing credentials. - default: true - type: bool - private_key: - type: str - description: - - The private SSL key as a string in PEM format. - certificate: - type: str - description: - - The public SSL certificates as a string in PEM format. - intermediate_certificate: - type: str - description: - - One or more intermediate certificate authorities as a string in PEM - - format, concatenated into a single string. - secure_port: - type: int - description: - - The port to listen for secure traffic. - default: 443 - secure_traffic_only: - description: - - If "true", the load balancer will *only* accept secure traffic. - default: false - type: bool - https_redirect: - description: - - If "true", the load balancer will redirect HTTP traffic to HTTPS. - - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL - - termination is also applied or removed. - type: bool - wait: - description: - - Wait for the balancer to be in state "running" before turning. - default: false - type: bool - wait_timeout: - type: int - description: - - How long before "wait" gives up, in seconds. - default: 300 -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Enable SSL termination on a load balancer - community.general.rax_clb_ssl: - loadbalancer: the_loadbalancer - state: present - private_key: "{{ lookup('file', 'credentials/server.key' ) }}" - certificate: "{{ lookup('file', 'credentials/server.crt' ) }}" - intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}" - secure_traffic_only: true - wait: true - -- name: Disable SSL termination - community.general.rax_clb_ssl: - loadbalancer: "{{ registered_lb.balancer.id }}" - state: absent - wait: true -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_find_loadbalancer, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, - certificate, intermediate_certificate, secure_port, - secure_traffic_only, https_redirect, - wait, wait_timeout): - # Validate arguments. - - if state == 'present': - if not private_key: - module.fail_json(msg="private_key must be provided.") - else: - private_key = private_key.strip() - - if not certificate: - module.fail_json(msg="certificate must be provided.") - else: - certificate = certificate.strip() - - attempts = wait_timeout // 5 - - # Locate the load balancer. - - balancer = rax_find_loadbalancer(module, pyrax, loadbalancer) - existing_ssl = balancer.get_ssl_termination() - - changed = False - - if state == 'present': - # Apply or reconfigure SSL termination on the load balancer. - ssl_attrs = dict( - securePort=secure_port, - privatekey=private_key, - certificate=certificate, - intermediateCertificate=intermediate_certificate, - enabled=enabled, - secureTrafficOnly=secure_traffic_only - ) - - needs_change = False - - if existing_ssl: - for ssl_attr, value in ssl_attrs.items(): - if ssl_attr == 'privatekey': - # The private key is not included in get_ssl_termination's - # output (as it shouldn't be). Also, if you're changing the - # private key, you'll also be changing the certificate, - # so we don't lose anything by not checking it. - continue - - if value is not None and existing_ssl.get(ssl_attr) != value: - # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr)) - needs_change = True - else: - needs_change = True - - if needs_change: - try: - balancer.add_ssl_termination(**ssl_attrs) - except pyrax.exceptions.PyraxException as e: - module.fail_json(msg='%s' % e.message) - changed = True - elif state == 'absent': - # Remove SSL termination if it's already configured. - if existing_ssl: - try: - balancer.delete_ssl_termination() - except pyrax.exceptions.PyraxException as e: - module.fail_json(msg='%s' % e.message) - changed = True - - if https_redirect is not None and balancer.httpsRedirect != https_redirect: - if changed: - # This wait is unavoidable because load balancers are immutable - # while the SSL termination changes above are being applied. - pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - - try: - balancer.update(httpsRedirect=https_redirect) - except pyrax.exceptions.PyraxException as e: - module.fail_json(msg='%s' % e.message) - changed = True - - if changed and wait: - pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - - balancer.get() - new_ssl_termination = balancer.get_ssl_termination() - - # Intentionally omit the private key from the module output, so you don't - # accidentally echo it with `ansible-playbook -v` or `debug`, and the - # certificate, which is just long. Convert other attributes to snake_case - # and include https_redirect at the top-level. - if new_ssl_termination: - new_ssl = dict( - enabled=new_ssl_termination['enabled'], - secure_port=new_ssl_termination['securePort'], - secure_traffic_only=new_ssl_termination['secureTrafficOnly'] - ) - else: - new_ssl = None - - result = dict( - changed=changed, - https_redirect=balancer.httpsRedirect, - ssl_termination=new_ssl, - balancer=rax_to_dict(balancer, 'clb') - ) - success = True - - if balancer.status == 'ERROR': - result['msg'] = '%s failed to build' % balancer.id - success = False - elif wait and balancer.status not in ('ACTIVE', 'ERROR'): - result['msg'] = 'Timeout waiting on %s' % balancer.id - success = False - - if success: - module.exit_json(**result) - else: - module.fail_json(**result) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update(dict( - loadbalancer=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - enabled=dict(type='bool', default=True), - private_key=dict(no_log=True), - certificate=dict(), - intermediate_certificate=dict(), - secure_port=dict(type='int', default=443), - secure_traffic_only=dict(type='bool', default=False), - https_redirect=dict(type='bool'), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - )) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module.') - - loadbalancer = module.params.get('loadbalancer') - state = module.params.get('state') - enabled = module.boolean(module.params.get('enabled')) - private_key = module.params.get('private_key') - certificate = module.params.get('certificate') - intermediate_certificate = module.params.get('intermediate_certificate') - secure_port = module.params.get('secure_port') - secure_traffic_only = module.boolean(module.params.get('secure_traffic_only')) - https_redirect = module.boolean(module.params.get('https_redirect')) - wait = module.boolean(module.params.get('wait')) - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_load_balancer_ssl( - module, loadbalancer, state, enabled, private_key, certificate, - intermediate_certificate, secure_port, secure_traffic_only, - https_redirect, wait, wait_timeout - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_dns.py b/plugins/modules/cloud/rackspace/rax_dns.py deleted file mode 100644 index 915e13a9a6..0000000000 --- a/plugins/modules/cloud/rackspace/rax_dns.py +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_dns -short_description: Manage domains on Rackspace Cloud DNS -description: - - Manage domains on Rackspace Cloud DNS -options: - comment: - type: str - description: - - Brief description of the domain. Maximum length of 160 characters - email: - type: str - description: - - Email address of the domain administrator - name: - type: str - description: - - Domain name to create - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - ttl: - type: int - description: - - Time to live of domain in seconds - default: 3600 -notes: - - "It is recommended that plays utilizing this module be run with - C(serial: 1) to avoid exceeding the API request limit imposed by - the Rackspace CloudDNS API" -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Create domain - hosts: all - gather_facts: False - tasks: - - name: Domain create request - local_action: - module: rax_dns - credentials: ~/.raxpub - name: example.org - email: admin@example.org - register: rax_dns -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_dns(module, comment, email, name, state, ttl): - changed = False - - dns = pyrax.cloud_dns - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not email: - module.fail_json(msg='An "email" attribute is required for ' - 'creating a domain') - - try: - domain = dns.find(name=name) - except pyrax.exceptions.NoUniqueMatch as e: - module.fail_json(msg='%s' % e.message) - except pyrax.exceptions.NotFound: - try: - domain = dns.create(name=name, emailAddress=email, ttl=ttl, - comment=comment) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - update = {} - if comment != getattr(domain, 'comment', None): - update['comment'] = comment - if ttl != getattr(domain, 'ttl', None): - update['ttl'] = ttl - if email != getattr(domain, 'emailAddress', None): - update['emailAddress'] = email - - if update: - try: - domain.update(**update) - changed = True - domain.get() - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - domain = dns.find(name=name) - except pyrax.exceptions.NotFound: - domain = {} - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if domain: - try: - domain.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, domain=rax_to_dict(domain)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - comment=dict(), - email=dict(), - name=dict(), - state=dict(default='present', choices=['present', 'absent']), - ttl=dict(type='int', default=3600), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - comment = module.params.get('comment') - email = module.params.get('email') - name = module.params.get('name') - state = module.params.get('state') - ttl = module.params.get('ttl') - - setup_rax_module(module, pyrax, False) - - rax_dns(module, comment, email, name, state, ttl) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_dns_record.py b/plugins/modules/cloud/rackspace/rax_dns_record.py deleted file mode 100644 index 1a6986dea7..0000000000 --- a/plugins/modules/cloud/rackspace/rax_dns_record.py +++ /dev/null @@ -1,353 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_dns_record -short_description: Manage DNS records on Rackspace Cloud DNS -description: - - Manage DNS records on Rackspace Cloud DNS -options: - comment: - type: str - description: - - Brief description of the domain. Maximum length of 160 characters - data: - type: str - description: - - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for - SRV/TXT - required: True - domain: - type: str - description: - - Domain name to create the record in. This is an invalid option when - type=PTR - loadbalancer: - type: str - description: - - Load Balancer ID to create a PTR record for. Only used with type=PTR - name: - type: str - description: - - FQDN record name to create - required: True - overwrite: - description: - - Add new records if data doesn't match, instead of updating existing - record with matching name. If there are already multiple records with - matching name and overwrite=true, this module will fail. - default: true - type: bool - priority: - type: int - description: - - Required for MX and SRV records, but forbidden for other record types. - If specified, must be an integer from 0 to 65535. - server: - type: str - description: - - Server ID to create a PTR record for. Only used with type=PTR - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - ttl: - type: int - description: - - Time to live of record in seconds - default: 3600 - type: - type: str - description: - - DNS record type - choices: - - A - - AAAA - - CNAME - - MX - - NS - - SRV - - TXT - - PTR - required: true -notes: - - "It is recommended that plays utilizing this module be run with - C(serial: 1) to avoid exceeding the API request limit imposed by - the Rackspace CloudDNS API" - - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be - supplied - - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record. - - C(PTR) record support was added in version 1.7 -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Create DNS Records - hosts: all - gather_facts: False - tasks: - - name: Create A record - local_action: - module: rax_dns_record - credentials: ~/.raxpub - domain: example.org - name: www.example.org - data: "{{ rax_accessipv4 }}" - type: A - register: a_record - - - name: Create PTR record - local_action: - module: rax_dns_record - credentials: ~/.raxpub - server: "{{ rax_id }}" - name: "{{ inventory_hostname }}" - region: DFW - register: ptr_record -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_find_loadbalancer, - rax_find_server, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None, - name=None, server=None, state='present', ttl=7200): - changed = False - results = [] - - dns = pyrax.cloud_dns - - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if loadbalancer: - item = rax_find_loadbalancer(module, pyrax, loadbalancer) - elif server: - item = rax_find_server(module, pyrax, server) - - if state == 'present': - current = dns.list_ptr_records(item) - for record in current: - if record.data == data: - if record.ttl != ttl or record.name != name: - try: - dns.update_ptr_record(item, record, name, data, ttl) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - record.ttl = ttl - record.name = name - results.append(rax_to_dict(record)) - break - else: - results.append(rax_to_dict(record)) - break - - if not results: - record = dict(name=name, type='PTR', data=data, ttl=ttl, - comment=comment) - try: - results = dns.add_ptr_records(item, [record]) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, records=results) - - elif state == 'absent': - current = dns.list_ptr_records(item) - for record in current: - if record.data == data: - results.append(rax_to_dict(record)) - break - - if results: - try: - dns.delete_ptr_records(item, data) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, records=results) - - -def rax_dns_record(module, comment=None, data=None, domain=None, name=None, - overwrite=True, priority=None, record_type='A', - state='present', ttl=7200): - """Function for manipulating record types other than PTR""" - - changed = False - - dns = pyrax.cloud_dns - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not priority and record_type in ['MX', 'SRV']: - module.fail_json(msg='A "priority" attribute is required for ' - 'creating a MX or SRV record') - - try: - domain = dns.find(name=domain) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - try: - if overwrite: - record = domain.find_record(record_type, name=name) - else: - record = domain.find_record(record_type, name=name, data=data) - except pyrax.exceptions.DomainRecordNotUnique as e: - module.fail_json(msg='overwrite=true and there are multiple matching records') - except pyrax.exceptions.DomainRecordNotFound as e: - try: - record_data = { - 'type': record_type, - 'name': name, - 'data': data, - 'ttl': ttl - } - if comment: - record_data.update(dict(comment=comment)) - if priority and record_type.upper() in ['MX', 'SRV']: - record_data.update(dict(priority=priority)) - - record = domain.add_records([record_data])[0] - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - update = {} - if comment != getattr(record, 'comment', None): - update['comment'] = comment - if ttl != getattr(record, 'ttl', None): - update['ttl'] = ttl - if priority != getattr(record, 'priority', None): - update['priority'] = priority - if data != getattr(record, 'data', None): - update['data'] = data - - if update: - try: - record.update(**update) - changed = True - record.get() - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - domain = dns.find(name=domain) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - try: - record = domain.find_record(record_type, name=name, data=data) - except pyrax.exceptions.DomainRecordNotFound as e: - record = {} - except pyrax.exceptions.DomainRecordNotUnique as e: - module.fail_json(msg='%s' % e.message) - - if record: - try: - record.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, record=rax_to_dict(record)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - comment=dict(), - data=dict(required=True), - domain=dict(), - loadbalancer=dict(), - name=dict(required=True), - overwrite=dict(type='bool', default=True), - priority=dict(type='int'), - server=dict(), - state=dict(default='present', choices=['present', 'absent']), - ttl=dict(type='int', default=3600), - type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', - 'SRV', 'TXT', 'PTR']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[ - ['server', 'loadbalancer', 'domain'], - ], - required_one_of=[ - ['server', 'loadbalancer', 'domain'], - ], - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - comment = module.params.get('comment') - data = module.params.get('data') - domain = module.params.get('domain') - loadbalancer = module.params.get('loadbalancer') - name = module.params.get('name') - overwrite = module.params.get('overwrite') - priority = module.params.get('priority') - server = module.params.get('server') - state = module.params.get('state') - ttl = module.params.get('ttl') - record_type = module.params.get('type') - - setup_rax_module(module, pyrax, False) - - if record_type.upper() == 'PTR': - if not server and not loadbalancer: - module.fail_json(msg='one of the following is required: ' - 'server,loadbalancer') - rax_dns_record_ptr(module, data=data, comment=comment, - loadbalancer=loadbalancer, name=name, server=server, - state=state, ttl=ttl) - else: - rax_dns_record(module, comment=comment, data=data, domain=domain, - name=name, overwrite=overwrite, priority=priority, - record_type=record_type, state=state, ttl=ttl) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_facts.py b/plugins/modules/cloud/rackspace/rax_facts.py deleted file mode 100644 index 0288a5e35b..0000000000 --- a/plugins/modules/cloud/rackspace/rax_facts.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_facts -short_description: Gather facts for Rackspace Cloud Servers -description: - - Gather facts for Rackspace Cloud Servers. -options: - address: - type: str - description: - - Server IP address to retrieve facts for, will match any IP assigned to - the server - id: - type: str - description: - - Server ID to retrieve facts for - name: - type: str - description: - - Server name to retrieve facts for -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Gather info about servers - hosts: all - gather_facts: False - tasks: - - name: Get facts about servers - local_action: - module: rax_facts - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW - - name: Map some facts - ansible.builtin.set_fact: - ansible_ssh_host: "{{ rax_accessipv4 }}" -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_facts(module, address, name, server_id): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - ansible_facts = {} - - search_opts = {} - if name: - search_opts = dict(name='^%s$' % name) - try: - servers = cs.servers.list(search_opts=search_opts) - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif address: - servers = [] - try: - for server in cs.servers.list(): - for addresses in server.networks.values(): - if address in addresses: - servers.append(server) - break - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif server_id: - servers = [] - try: - servers.append(cs.servers.get(server_id)) - except Exception as e: - pass - - servers[:] = [server for server in servers if server.status != "DELETED"] - - if len(servers) > 1: - module.fail_json(msg='Multiple servers found matching provided ' - 'search parameters') - elif len(servers) == 1: - ansible_facts = rax_to_dict(servers[0], 'server') - - module.exit_json(changed=changed, ansible_facts=ansible_facts) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - id=dict(), - name=dict(), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[['address', 'id', 'name']], - required_one_of=[['address', 'id', 'name']], - supports_check_mode=True, - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params.get('address') - server_id = module.params.get('id') - name = module.params.get('name') - - setup_rax_module(module, pyrax) - - rax_facts(module, address, name, server_id) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_files.py b/plugins/modules/cloud/rackspace/rax_files.py deleted file mode 100644 index 1e1f82c85d..0000000000 --- a/plugins/modules/cloud/rackspace/rax_files.py +++ /dev/null @@ -1,393 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Paul Durivage -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_files -short_description: Manipulate Rackspace Cloud Files Containers -description: - - Manipulate Rackspace Cloud Files Containers -options: - clear_meta: - description: - - Optionally clear existing metadata when applying metadata to existing containers. - Selecting this option is only appropriate when setting type=meta - type: bool - default: "no" - container: - type: str - description: - - The container to use for container or metadata operations. - meta: - type: dict - description: - - A hash of items to set as metadata values on a container - private: - description: - - Used to set a container as private, removing it from the CDN. B(Warning!) - Private containers, if previously made public, can have live objects - available until the TTL on cached objects expires - type: bool - default: false - public: - description: - - Used to set a container as public, available via the Cloud Files CDN - type: bool - default: false - region: - type: str - description: - - Region to create an instance in - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent', 'list'] - default: present - ttl: - type: int - description: - - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes. - Setting a TTL is only appropriate for containers that are public - type: - type: str - description: - - Type of object to do work on, i.e. metadata object or a container object - choices: - - container - - meta - default: container - web_error: - type: str - description: - - Sets an object to be presented as the HTTP error page when accessed by the CDN URL - web_index: - type: str - description: - - Sets an object to be presented as the HTTP index page when accessed by the CDN URL -author: "Paul Durivage (@angstwad)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: "Test Cloud Files Containers" - hosts: local - gather_facts: no - tasks: - - name: "List all containers" - community.general.rax_files: - state: list - - - name: "Create container called 'mycontainer'" - community.general.rax_files: - container: mycontainer - - - name: "Create container 'mycontainer2' with metadata" - community.general.rax_files: - container: mycontainer2 - meta: - key: value - file_for: someuser@example.com - - - name: "Set a container's web index page" - community.general.rax_files: - container: mycontainer - web_index: index.html - - - name: "Set a container's web error page" - community.general.rax_files: - container: mycontainer - web_error: error.html - - - name: "Make container public" - community.general.rax_files: - container: mycontainer - public: yes - - - name: "Make container public with a 24 hour TTL" - community.general.rax_files: - container: mycontainer - public: yes - ttl: 86400 - - - name: "Make container private" - community.general.rax_files: - container: mycontainer - private: yes - -- name: "Test Cloud Files Containers Metadata Storage" - hosts: local - gather_facts: no - tasks: - - name: "Get mycontainer2 metadata" - community.general.rax_files: - container: mycontainer2 - type: meta - - - name: "Set mycontainer2 metadata" - community.general.rax_files: - container: mycontainer2 - type: meta - meta: - uploaded_by: someuser@example.com - - - name: "Remove mycontainer2 metadata" - community.general.rax_files: - container: "mycontainer2" - type: meta - state: absent - meta: - key: "" - file_for: "" -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError as e: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -EXIT_DICT = dict(success=True) -META_PREFIX = 'x-container-meta-' - - -def _get_container(module, cf, container): - try: - return cf.get_container(container) - except pyrax.exc.NoSuchContainer as e: - module.fail_json(msg=e.message) - - -def _fetch_meta(module, container): - EXIT_DICT['meta'] = dict() - try: - for k, v in container.get_metadata().items(): - split_key = k.split(META_PREFIX)[-1] - EXIT_DICT['meta'][split_key] = v - except Exception as e: - module.fail_json(msg=e.message) - - -def meta(cf, module, container_, state, meta_, clear_meta): - c = _get_container(module, cf, container_) - - if meta_ and state == 'present': - try: - meta_set = c.set_metadata(meta_, clear=clear_meta) - except Exception as e: - module.fail_json(msg=e.message) - elif meta_ and state == 'absent': - remove_results = [] - for k, v in meta_.items(): - c.remove_metadata_key(k) - remove_results.append(k) - EXIT_DICT['deleted_meta_keys'] = remove_results - elif state == 'absent': - remove_results = [] - for k, v in c.get_metadata().items(): - c.remove_metadata_key(k) - remove_results.append(k) - EXIT_DICT['deleted_meta_keys'] = remove_results - - _fetch_meta(module, c) - _locals = locals().keys() - - EXIT_DICT['container'] = c.name - if 'meta_set' in _locals or 'remove_results' in _locals: - EXIT_DICT['changed'] = True - - module.exit_json(**EXIT_DICT) - - -def container(cf, module, container_, state, meta_, clear_meta, ttl, public, - private, web_index, web_error): - if public and private: - module.fail_json(msg='container cannot be simultaneously ' - 'set to public and private') - - if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error): - module.fail_json(msg='state cannot be omitted when setting/removing ' - 'attributes on a container') - - if state == 'list': - # We don't care if attributes are specified, let's list containers - EXIT_DICT['containers'] = cf.list_containers() - module.exit_json(**EXIT_DICT) - - try: - c = cf.get_container(container_) - except pyrax.exc.NoSuchContainer as e: - # Make the container if state=present, otherwise bomb out - if state == 'present': - try: - c = cf.create_container(container_) - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['changed'] = True - EXIT_DICT['created'] = True - else: - module.fail_json(msg=e.message) - else: - # Successfully grabbed a container object - # Delete if state is absent - if state == 'absent': - try: - cont_deleted = c.delete() - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['deleted'] = True - - if meta_: - try: - meta_set = c.set_metadata(meta_, clear=clear_meta) - except Exception as e: - module.fail_json(msg=e.message) - finally: - _fetch_meta(module, c) - - if ttl: - try: - c.cdn_ttl = ttl - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['ttl'] = c.cdn_ttl - - if public: - try: - cont_public = c.make_public() - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['container_urls'] = dict(url=c.cdn_uri, - ssl_url=c.cdn_ssl_uri, - streaming_url=c.cdn_streaming_uri, - ios_uri=c.cdn_ios_uri) - - if private: - try: - cont_private = c.make_private() - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_private'] = True - - if web_index: - try: - cont_web_index = c.set_web_index_page(web_index) - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_index'] = True - finally: - _fetch_meta(module, c) - - if web_error: - try: - cont_err_index = c.set_web_error_page(web_error) - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_error'] = True - finally: - _fetch_meta(module, c) - - EXIT_DICT['container'] = c.name - EXIT_DICT['objs_in_container'] = c.object_count - EXIT_DICT['total_bytes'] = c.total_bytes - - _locals = locals().keys() - if ('cont_deleted' in _locals - or 'meta_set' in _locals - or 'cont_public' in _locals - or 'cont_private' in _locals - or 'cont_web_index' in _locals - or 'cont_err_index' in _locals): - EXIT_DICT['changed'] = True - - module.exit_json(**EXIT_DICT) - - -def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, - private, web_index, web_error): - """ Dispatch from here to work with metadata or file objects """ - cf = pyrax.cloudfiles - - if cf is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if typ == "container": - container(cf, module, container_, state, meta_, clear_meta, ttl, - public, private, web_index, web_error) - else: - meta(cf, module, container_, state, meta_, clear_meta) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - container=dict(), - state=dict(choices=['present', 'absent', 'list'], - default='present'), - meta=dict(type='dict', default=dict()), - clear_meta=dict(default=False, type='bool'), - type=dict(choices=['container', 'meta'], default='container'), - ttl=dict(type='int'), - public=dict(default=False, type='bool'), - private=dict(default=False, type='bool'), - web_index=dict(), - web_error=dict() - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - container_ = module.params.get('container') - state = module.params.get('state') - meta_ = module.params.get('meta') - clear_meta = module.params.get('clear_meta') - typ = module.params.get('type') - ttl = module.params.get('ttl') - public = module.params.get('public') - private = module.params.get('private') - web_index = module.params.get('web_index') - web_error = module.params.get('web_error') - - if state in ['present', 'absent'] and not container_: - module.fail_json(msg='please specify a container name') - if clear_meta and not typ == 'meta': - module.fail_json(msg='clear_meta can only be used when setting ' - 'metadata') - - setup_rax_module(module, pyrax) - cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, - private, web_index, web_error) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_files_objects.py b/plugins/modules/cloud/rackspace/rax_files_objects.py deleted file mode 100644 index 3269fe0512..0000000000 --- a/plugins/modules/cloud/rackspace/rax_files_objects.py +++ /dev/null @@ -1,609 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Paul Durivage -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_files_objects -short_description: Upload, download, and delete objects in Rackspace Cloud Files -description: - - Upload, download, and delete objects in Rackspace Cloud Files -options: - clear_meta: - description: - - Optionally clear existing metadata when applying metadata to existing objects. - Selecting this option is only appropriate when setting type=meta - type: bool - default: 'no' - container: - type: str - description: - - The container to use for file object operations. - required: true - dest: - type: str - description: - - The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder". - Used to specify the destination of an operation on a remote object; i.e. a file name, - "file1", or a comma-separated list of remote objects, "file1,file2,file17" - expires: - type: int - description: - - Used to set an expiration on a file or folder uploaded to Cloud Files. - Requires an integer, specifying expiration in seconds - meta: - type: dict - description: - - A hash of items to set as metadata values on an uploaded file or folder - method: - type: str - description: - - The method of operation to be performed. For example, put to upload files - to Cloud Files, get to download files from Cloud Files or delete to delete - remote objects in Cloud Files - choices: - - get - - put - - delete - default: get - src: - type: str - description: - - Source from which to upload files. Used to specify a remote object as a source for - an operation, i.e. a file name, "file1", or a comma-separated list of remote objects, - "file1,file2,file17". src and dest are mutually exclusive on remote-only object operations - structure: - description: - - Used to specify whether to maintain nested directory structure when downloading objects - from Cloud Files. Setting to false downloads the contents of a container to a single, - flat directory - type: bool - default: 'yes' - type: - type: str - description: - - Type of object to do work on - - Metadata object or a file object - choices: - - file - - meta - default: file -author: "Paul Durivage (@angstwad)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: "Test Cloud Files Objects" - hosts: local - gather_facts: False - tasks: - - name: "Get objects from test container" - community.general.rax_files_objects: - container: testcont - dest: ~/Downloads/testcont - - - name: "Get single object from test container" - community.general.rax_files_objects: - container: testcont - src: file1 - dest: ~/Downloads/testcont - - - name: "Get several objects from test container" - community.general.rax_files_objects: - container: testcont - src: file1,file2,file3 - dest: ~/Downloads/testcont - - - name: "Delete one object in test container" - community.general.rax_files_objects: - container: testcont - method: delete - dest: file1 - - - name: "Delete several objects in test container" - community.general.rax_files_objects: - container: testcont - method: delete - dest: file2,file3,file4 - - - name: "Delete all objects in test container" - community.general.rax_files_objects: - container: testcont - method: delete - - - name: "Upload all files to test container" - community.general.rax_files_objects: - container: testcont - method: put - src: ~/Downloads/onehundred - - - name: "Upload one file to test container" - community.general.rax_files_objects: - container: testcont - method: put - src: ~/Downloads/testcont/file1 - - - name: "Upload one file to test container with metadata" - community.general.rax_files_objects: - container: testcont - src: ~/Downloads/testcont/file2 - method: put - meta: - testkey: testdata - who_uploaded_this: someuser@example.com - - - name: "Upload one file to test container with TTL of 60 seconds" - community.general.rax_files_objects: - container: testcont - method: put - src: ~/Downloads/testcont/file3 - expires: 60 - - - name: "Attempt to get remote object that does not exist" - community.general.rax_files_objects: - container: testcont - method: get - src: FileThatDoesNotExist.jpg - dest: ~/Downloads/testcont - ignore_errors: yes - - - name: "Attempt to delete remote object that does not exist" - community.general.rax_files_objects: - container: testcont - method: delete - dest: FileThatDoesNotExist.jpg - ignore_errors: yes - -- name: "Test Cloud Files Objects Metadata" - hosts: local - gather_facts: false - tasks: - - name: "Get metadata on one object" - community.general.rax_files_objects: - container: testcont - type: meta - dest: file2 - - - name: "Get metadata on several objects" - community.general.rax_files_objects: - container: testcont - type: meta - src: file2,file1 - - - name: "Set metadata on an object" - community.general.rax_files_objects: - container: testcont - type: meta - dest: file17 - method: put - meta: - key1: value1 - key2: value2 - clear_meta: true - - - name: "Verify metadata is set" - community.general.rax_files_objects: - container: testcont - type: meta - src: file17 - - - name: "Delete metadata" - community.general.rax_files_objects: - container: testcont - type: meta - dest: file17 - method: delete - meta: - key1: '' - key2: '' - - - name: "Get metadata on all objects" - community.general.rax_files_objects: - container: testcont - type: meta -''' - -import os - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -EXIT_DICT = dict(success=False) -META_PREFIX = 'x-object-meta-' - - -def _get_container(module, cf, container): - try: - return cf.get_container(container) - except pyrax.exc.NoSuchContainer as e: - module.fail_json(msg=e.message) - - -def _upload_folder(cf, folder, container, ttl=None, headers=None): - """ Uploads a folder to Cloud Files. - """ - total_bytes = 0 - for root, dirs, files in os.walk(folder): - for fname in files: - full_path = os.path.join(root, fname) - obj_name = os.path.relpath(full_path, folder) - obj_size = os.path.getsize(full_path) - cf.upload_file(container, full_path, - obj_name=obj_name, return_none=True, ttl=ttl, headers=headers) - total_bytes += obj_size - return total_bytes - - -def upload(module, cf, container, src, dest, meta, expires): - """ Uploads a single object or a folder to Cloud Files Optionally sets an - metadata, TTL value (expires), or Content-Disposition and Content-Encoding - headers. - """ - if not src: - module.fail_json(msg='src must be specified when uploading') - - c = _get_container(module, cf, container) - src = os.path.abspath(os.path.expanduser(src)) - is_dir = os.path.isdir(src) - - if not is_dir and not os.path.isfile(src) or not os.path.exists(src): - module.fail_json(msg='src must be a file or a directory') - if dest and is_dir: - module.fail_json(msg='dest cannot be set when whole ' - 'directories are uploaded') - - cont_obj = None - total_bytes = 0 - if dest and not is_dir: - try: - cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta) - except Exception as e: - module.fail_json(msg=e.message) - elif is_dir: - try: - total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta) - except Exception as e: - module.fail_json(msg=e.message) - else: - try: - cont_obj = c.upload_file(src, ttl=expires, headers=meta) - except Exception as e: - module.fail_json(msg=e.message) - - EXIT_DICT['success'] = True - EXIT_DICT['container'] = c.name - EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name) - if cont_obj or total_bytes > 0: - EXIT_DICT['changed'] = True - if meta: - EXIT_DICT['meta'] = dict(updated=True) - - if cont_obj: - EXIT_DICT['bytes'] = cont_obj.total_bytes - EXIT_DICT['etag'] = cont_obj.etag - else: - EXIT_DICT['bytes'] = total_bytes - - module.exit_json(**EXIT_DICT) - - -def download(module, cf, container, src, dest, structure): - """ Download objects from Cloud Files to a local path specified by "dest". - Optionally disable maintaining a directory structure by by passing a - false value to "structure". - """ - # Looking for an explicit destination - if not dest: - module.fail_json(msg='dest is a required argument when ' - 'downloading from Cloud Files') - - # Attempt to fetch the container by name - c = _get_container(module, cf, container) - - # Accept a single object name or a comma-separated list of objs - # If not specified, get the entire container - if src: - objs = src.split(',') - objs = map(str.strip, objs) - else: - objs = c.get_object_names() - - dest = os.path.abspath(os.path.expanduser(dest)) - is_dir = os.path.isdir(dest) - - if not is_dir: - module.fail_json(msg='dest must be a directory') - - results = [] - for obj in objs: - try: - c.download_object(obj, dest, structure=structure) - except Exception as e: - module.fail_json(msg=e.message) - else: - results.append(obj) - - len_results = len(results) - len_objs = len(objs) - - EXIT_DICT['container'] = c.name - EXIT_DICT['requested_downloaded'] = results - if results: - EXIT_DICT['changed'] = True - if len_results == len_objs: - EXIT_DICT['success'] = True - EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest) - else: - EXIT_DICT['msg'] = "Error: only %s of %s objects were " \ - "downloaded" % (len_results, len_objs) - module.exit_json(**EXIT_DICT) - - -def delete(module, cf, container, src, dest): - """ Delete specific objects by proving a single file name or a - comma-separated list to src OR dest (but not both). Omitting file name(s) - assumes the entire container is to be deleted. - """ - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to be deleted " - "have been specified on both src and dest args") - elif dest: - objs = dest - else: - objs = src - - c = _get_container(module, cf, container) - - if objs: - objs = objs.split(',') - objs = map(str.strip, objs) - else: - objs = c.get_object_names() - - num_objs = len(objs) - - results = [] - for obj in objs: - try: - result = c.delete_object(obj) - except Exception as e: - module.fail_json(msg=e.message) - else: - results.append(result) - - num_deleted = results.count(True) - - EXIT_DICT['container'] = c.name - EXIT_DICT['deleted'] = num_deleted - EXIT_DICT['requested_deleted'] = objs - - if num_deleted: - EXIT_DICT['changed'] = True - - if num_objs == num_deleted: - EXIT_DICT['success'] = True - EXIT_DICT['msg'] = "%s objects deleted" % num_deleted - else: - EXIT_DICT['msg'] = ("Error: only %s of %s objects " - "deleted" % (num_deleted, num_objs)) - module.exit_json(**EXIT_DICT) - - -def get_meta(module, cf, container, src, dest): - """ Get metadata for a single file, comma-separated list, or entire - container - """ - c = _get_container(module, cf, container) - - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to be deleted " - "have been specified on both src and dest args") - elif dest: - objs = dest - else: - objs = src - - if objs: - objs = objs.split(',') - objs = map(str.strip, objs) - else: - objs = c.get_object_names() - - results = dict() - for obj in objs: - try: - meta = c.get_object(obj).get_metadata() - except Exception as e: - module.fail_json(msg=e.message) - else: - results[obj] = dict() - for k, v in meta.items(): - meta_key = k.split(META_PREFIX)[-1] - results[obj][meta_key] = v - - EXIT_DICT['container'] = c.name - if results: - EXIT_DICT['meta_results'] = results - EXIT_DICT['success'] = True - module.exit_json(**EXIT_DICT) - - -def put_meta(module, cf, container, src, dest, meta, clear_meta): - """ Set metadata on a container, single file, or comma-separated list. - Passing a true value to clear_meta clears the metadata stored in Cloud - Files before setting the new metadata to the value of "meta". - """ - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to set meta" - " have been specified on both src and dest args") - elif dest: - objs = dest - else: - objs = src - - objs = objs.split(',') - objs = map(str.strip, objs) - - c = _get_container(module, cf, container) - - results = [] - for obj in objs: - try: - result = c.get_object(obj).set_metadata(meta, clear=clear_meta) - except Exception as e: - module.fail_json(msg=e.message) - else: - results.append(result) - - EXIT_DICT['container'] = c.name - EXIT_DICT['success'] = True - if results: - EXIT_DICT['changed'] = True - EXIT_DICT['num_changed'] = True - module.exit_json(**EXIT_DICT) - - -def delete_meta(module, cf, container, src, dest, meta): - """ Removes metadata keys and values specified in meta, if any. Deletes on - all objects specified by src or dest (but not both), if any; otherwise it - deletes keys on all objects in the container - """ - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; meta keys to be " - "deleted have been specified on both src and dest" - " args") - elif dest: - objs = dest - else: - objs = src - - objs = objs.split(',') - objs = map(str.strip, objs) - - c = _get_container(module, cf, container) - - results = [] # Num of metadata keys removed, not objects affected - for obj in objs: - if meta: - for k, v in meta.items(): - try: - result = c.get_object(obj).remove_metadata_key(k) - except Exception as e: - module.fail_json(msg=e.message) - else: - results.append(result) - else: - try: - o = c.get_object(obj) - except pyrax.exc.NoSuchObject as e: - module.fail_json(msg=e.message) - - for k, v in o.get_metadata().items(): - try: - result = o.remove_metadata_key(k) - except Exception as e: - module.fail_json(msg=e.message) - results.append(result) - - EXIT_DICT['container'] = c.name - EXIT_DICT['success'] = True - if results: - EXIT_DICT['changed'] = True - EXIT_DICT['num_deleted'] = len(results) - module.exit_json(**EXIT_DICT) - - -def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, - structure, expires): - """ Dispatch from here to work with metadata or file objects """ - cf = pyrax.cloudfiles - - if cf is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if typ == "file": - if method == 'put': - upload(module, cf, container, src, dest, meta, expires) - - elif method == 'get': - download(module, cf, container, src, dest, structure) - - elif method == 'delete': - delete(module, cf, container, src, dest) - - else: - if method == 'get': - get_meta(module, cf, container, src, dest) - - if method == 'put': - put_meta(module, cf, container, src, dest, meta, clear_meta) - - if method == 'delete': - delete_meta(module, cf, container, src, dest, meta) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - container=dict(required=True), - src=dict(), - dest=dict(), - method=dict(default='get', choices=['put', 'get', 'delete']), - type=dict(default='file', choices=['file', 'meta']), - meta=dict(type='dict', default=dict()), - clear_meta=dict(default=False, type='bool'), - structure=dict(default=True, type='bool'), - expires=dict(type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - container = module.params.get('container') - src = module.params.get('src') - dest = module.params.get('dest') - method = module.params.get('method') - typ = module.params.get('type') - meta = module.params.get('meta') - clear_meta = module.params.get('clear_meta') - structure = module.params.get('structure') - expires = module.params.get('expires') - - if clear_meta and not typ == 'meta': - module.fail_json(msg='clear_meta can only be used when setting metadata') - - setup_rax_module(module, pyrax) - cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_identity.py b/plugins/modules/cloud/rackspace/rax_identity.py deleted file mode 100644 index 2021052faa..0000000000 --- a/plugins/modules/cloud/rackspace/rax_identity.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_identity -short_description: Load Rackspace Cloud Identity -description: - - Verifies Rackspace Cloud credentials and returns identity information -options: - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present'] - default: present - required: false -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Load Rackspace Cloud Identity - gather_facts: False - hosts: local - connection: local - tasks: - - name: Load Identity - local_action: - module: rax_identity - credentials: ~/.raxpub - region: DFW - register: rackspace_identity -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_required_together, rax_to_dict, - setup_rax_module) - - -def cloud_identity(module, state, identity): - instance = dict( - authenticated=identity.authenticated, - credentials=identity._creds_file - ) - changed = False - - instance.update(rax_to_dict(identity)) - instance['services'] = instance.get('services', {}).keys() - - if state == 'present': - if not identity.authenticated: - module.fail_json(msg='Credentials could not be verified!') - - module.exit_json(changed=changed, identity=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - if not pyrax.identity: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - cloud_identity(module, state, pyrax.identity) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_keypair.py b/plugins/modules/cloud/rackspace/rax_keypair.py deleted file mode 100644 index 90b0183e50..0000000000 --- a/plugins/modules/cloud/rackspace/rax_keypair.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_keypair -short_description: Create a keypair for use with Rackspace Cloud Servers -description: - - Create a keypair for use with Rackspace Cloud Servers -options: - name: - type: str - description: - - Name of keypair - required: true - public_key: - type: str - description: - - Public Key string to upload. Can be a file path or string - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: "Matt Martz (@sivel)" -notes: - - Keypairs cannot be manipulated, only created and deleted. To "update" a - keypair you must first delete and then recreate. - - The ability to specify a file path for the public key was added in 1.7 -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Create a keypair - hosts: localhost - gather_facts: False - tasks: - - name: Keypair request - local_action: - module: rax_keypair - credentials: ~/.raxpub - name: my_keypair - region: DFW - register: keypair - - name: Create local public key - local_action: - module: copy - content: "{{ keypair.keypair.public_key }}" - dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub" - - name: Create local private key - local_action: - module: copy - content: "{{ keypair.keypair.private_key }}" - dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}" - -- name: Create a keypair - hosts: localhost - gather_facts: False - tasks: - - name: Keypair request - local_action: - module: rax_keypair - credentials: ~/.raxpub - name: my_keypair - public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}" - region: DFW - register: keypair -''' -import os - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_keypair(module, name, public_key, state): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - keypair = {} - - if state == 'present': - if public_key and os.path.isfile(public_key): - try: - f = open(public_key) - public_key = f.read() - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % public_key) - - try: - keypair = cs.keypairs.find(name=name) - except cs.exceptions.NotFound: - try: - keypair = cs.keypairs.create(name, public_key) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - keypair = cs.keypairs.find(name=name) - except Exception: - pass - - if keypair: - try: - keypair.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, keypair=rax_to_dict(keypair)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(required=True), - public_key=dict(), - state=dict(default='present', choices=['absent', 'present']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - public_key = module.params.get('public_key') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - rax_keypair(module, name, public_key, state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_meta.py b/plugins/modules/cloud/rackspace/rax_meta.py deleted file mode 100644 index 3504181f19..0000000000 --- a/plugins/modules/cloud/rackspace/rax_meta.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_meta -short_description: Manipulate metadata for Rackspace Cloud Servers -description: - - Manipulate metadata for Rackspace Cloud Servers -options: - address: - type: str - description: - - Server IP address to modify metadata for, will match any IP assigned to - the server - id: - type: str - description: - - Server ID to modify metadata for - name: - type: str - description: - - Server name to modify metadata for - meta: - type: dict - description: - - A hash of metadata to associate with the instance -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Set metadata for a server - hosts: all - gather_facts: False - tasks: - - name: Set metadata - local_action: - module: rax_meta - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW - meta: - group: primary_group - groups: - - group_two - - group_three - app: my_app - - - name: Clear metadata - local_action: - module: rax_meta - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW -''' - -import json - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module -from ansible.module_utils.six import string_types - - -def rax_meta(module, address, name, server_id, meta): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - search_opts = {} - if name: - search_opts = dict(name='^%s$' % name) - try: - servers = cs.servers.list(search_opts=search_opts) - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif address: - servers = [] - try: - for server in cs.servers.list(): - for addresses in server.networks.values(): - if address in addresses: - servers.append(server) - break - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif server_id: - servers = [] - try: - servers.append(cs.servers.get(server_id)) - except Exception as e: - pass - - if len(servers) > 1: - module.fail_json(msg='Multiple servers found matching provided ' - 'search parameters') - elif not servers: - module.fail_json(msg='Failed to find a server matching provided ' - 'search parameters') - - # Normalize and ensure all metadata values are strings - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, string_types): - meta[k] = '%s' % v - - server = servers[0] - if server.metadata == meta: - changed = False - else: - changed = True - removed = set(server.metadata.keys()).difference(meta.keys()) - cs.servers.delete_meta(server, list(removed)) - cs.servers.set_meta(server, meta) - server.get() - - module.exit_json(changed=changed, meta=server.metadata) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - id=dict(), - name=dict(), - meta=dict(type='dict', default=dict()), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[['address', 'id', 'name']], - required_one_of=[['address', 'id', 'name']], - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params.get('address') - server_id = module.params.get('id') - name = module.params.get('name') - meta = module.params.get('meta') - - setup_rax_module(module, pyrax) - - rax_meta(module, address, name, server_id, meta) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_alarm.py b/plugins/modules/cloud/rackspace/rax_mon_alarm.py deleted file mode 100644 index 7e99db3fa8..0000000000 --- a/plugins/modules/cloud/rackspace/rax_mon_alarm.py +++ /dev/null @@ -1,228 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_alarm -short_description: Create or delete a Rackspace Cloud Monitoring alarm. -description: -- Create or delete a Rackspace Cloud Monitoring alarm that associates an - existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with - criteria that specify what conditions will trigger which levels of - notifications. Rackspace monitoring module flow | rax_mon_entity -> - rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> - *rax_mon_alarm* -options: - state: - type: str - description: - - Ensure that the alarm with this C(label) exists or does not exist. - choices: [ "present", "absent" ] - required: false - default: present - label: - type: str - description: - - Friendly name for this alarm, used to achieve idempotence. Must be a String - between 1 and 255 characters long. - required: true - entity_id: - type: str - description: - - ID of the entity this alarm is attached to. May be acquired by registering - the value of a rax_mon_entity task. - required: true - check_id: - type: str - description: - - ID of the check that should be alerted on. May be acquired by registering - the value of a rax_mon_check task. - required: true - notification_plan_id: - type: str - description: - - ID of the notification plan to trigger if this alarm fires. May be acquired - by registering the value of a rax_mon_notification_plan task. - required: true - criteria: - type: str - description: - - Alarm DSL that describes alerting conditions and their output states. Must - be between 1 and 16384 characters long. See - http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html - for a reference on the alerting language. - disabled: - description: - - If yes, create this alarm, but leave it in an inactive state. Defaults to - no. - type: bool - default: false - metadata: - type: dict - description: - - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String - keys and values between 1 and 255 characters long. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Alarm example - gather_facts: False - hosts: local - connection: local - tasks: - - name: Ensure that a specific alarm exists. - community.general.rax_mon_alarm: - credentials: ~/.rax_pub - state: present - label: uhoh - entity_id: "{{ the_entity['entity']['id'] }}" - check_id: "{{ the_check['check']['id'] }}" - notification_plan_id: "{{ defcon1['notification_plan']['id'] }}" - criteria: > - if (rate(metric['average']) > 10) { - return new AlarmStatus(WARNING); - } - return new AlarmStatus(OK); - register: the_alarm -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria, - disabled, metadata): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - if criteria and len(criteria) < 1 or len(criteria) > 16384: - module.fail_json(msg='criteria must be between 1 and 16384 characters long') - - # Coerce attributes. - - changed = False - alarm = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [a for a in cm.list_alarms(entity_id) if a.label == label] - - if existing: - alarm = existing[0] - - if state == 'present': - should_create = False - should_update = False - should_delete = False - - if len(existing) > 1: - module.fail_json(msg='%s existing alarms have the label %s.' % - (len(existing), label)) - - if alarm: - if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id: - should_delete = should_create = True - - should_update = (disabled and disabled != alarm.disabled) or \ - (metadata and metadata != alarm.metadata) or \ - (criteria and criteria != alarm.criteria) - - if should_update and not should_delete: - cm.update_alarm(entity=entity_id, alarm=alarm, - criteria=criteria, disabled=disabled, - label=label, metadata=metadata) - changed = True - - if should_delete: - alarm.delete() - changed = True - else: - should_create = True - - if should_create: - alarm = cm.create_alarm(entity=entity_id, check=check_id, - notification_plan=notification_plan_id, - criteria=criteria, disabled=disabled, label=label, - metadata=metadata) - changed = True - else: - for a in existing: - a.delete() - changed = True - - if alarm: - alarm_dict = { - "id": alarm.id, - "label": alarm.label, - "check_id": alarm.check_id, - "notification_plan_id": alarm.notification_plan_id, - "criteria": alarm.criteria, - "disabled": alarm.disabled, - "metadata": alarm.metadata - } - module.exit_json(changed=changed, alarm=alarm_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - entity_id=dict(required=True), - check_id=dict(required=True), - notification_plan_id=dict(required=True), - criteria=dict(), - disabled=dict(type='bool', default=False), - metadata=dict(type='dict') - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - label = module.params.get('label') - entity_id = module.params.get('entity_id') - check_id = module.params.get('check_id') - notification_plan_id = module.params.get('notification_plan_id') - criteria = module.params.get('criteria') - disabled = module.boolean(module.params.get('disabled')) - metadata = module.params.get('metadata') - - setup_rax_module(module, pyrax) - - alarm(module, state, label, entity_id, check_id, notification_plan_id, - criteria, disabled, metadata) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_check.py b/plugins/modules/cloud/rackspace/rax_mon_check.py deleted file mode 100644 index 17a3932f6e..0000000000 --- a/plugins/modules/cloud/rackspace/rax_mon_check.py +++ /dev/null @@ -1,320 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_check -short_description: Create or delete a Rackspace Cloud Monitoring check for an - existing entity. -description: -- Create or delete a Rackspace Cloud Monitoring check associated with an - existing rax_mon_entity. A check is a specific test or measurement that is - performed, possibly from different monitoring zones, on the systems you - monitor. Rackspace monitoring module flow | rax_mon_entity -> - *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan -> - rax_mon_alarm -options: - state: - type: str - description: - - Ensure that a check with this C(label) exists or does not exist. - choices: ["present", "absent"] - default: present - entity_id: - type: str - description: - - ID of the rax_mon_entity to target with this check. - required: true - label: - type: str - description: - - Defines a label for this check, between 1 and 64 characters long. - required: true - check_type: - type: str - description: - - The type of check to create. C(remote.) checks may be created on any - rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities - that have a non-null C(agent_id). - - | - Choices for this option are: - - C(remote.dns) - - C(remote.ftp-banner) - - C(remote.http) - - C(remote.imap-banner) - - C(remote.mssql-banner) - - C(remote.mysql-banner) - - C(remote.ping) - - C(remote.pop3-banner) - - C(remote.postgresql-banner) - - C(remote.smtp-banner) - - C(remote.smtp) - - C(remote.ssh) - - C(remote.tcp) - - C(remote.telnet-banner) - - C(agent.filesystem) - - C(agent.memory) - - C(agent.load_average) - - C(agent.cpu) - - C(agent.disk) - - C(agent.network) - - C(agent.plugin) - required: true - monitoring_zones_poll: - type: str - description: - - Comma-separated list of the names of the monitoring zones the check should - run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon, - mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks. - target_hostname: - type: str - description: - - One of `target_hostname` and `target_alias` is required for remote.* checks, - but prohibited for agent.* checks. The hostname this check should target. - Must be a valid IPv4, IPv6, or FQDN. - target_alias: - type: str - description: - - One of `target_alias` and `target_hostname` is required for remote.* checks, - but prohibited for agent.* checks. Use the corresponding key in the entity's - `ip_addresses` hash to resolve an IP address to target. - details: - type: dict - description: - - Additional details specific to the check type. Must be a hash of strings - between 1 and 255 characters long, or an array or object containing 0 to - 256 items. - disabled: - description: - - If "yes", ensure the check is created, but don't actually use it yet. - type: bool - default: false - metadata: - type: dict - description: - - Hash of arbitrary key-value pairs to accompany this check if it fires. - Keys and values must be strings between 1 and 255 characters long. - period: - type: int - description: - - The number of seconds between each time the check is performed. Must be - greater than the minimum period set on your account. - timeout: - type: int - description: - - The number of seconds this check will wait when attempting to collect - results. Must be less than the period. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Create a monitoring check - gather_facts: False - hosts: local - connection: local - tasks: - - name: Associate a check with an existing entity. - community.general.rax_mon_check: - credentials: ~/.rax_pub - state: present - entity_id: "{{ the_entity['entity']['id'] }}" - label: the_check - check_type: remote.ping - monitoring_zones_poll: mziad,mzord,mzdfw - details: - count: 10 - meta: - hurf: durf - register: the_check -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_check(module, state, entity_id, label, check_type, - monitoring_zones_poll, target_hostname, target_alias, details, - disabled, metadata, period, timeout): - - # Coerce attributes. - - if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list): - monitoring_zones_poll = [monitoring_zones_poll] - - if period: - period = int(period) - - if timeout: - timeout = int(timeout) - - changed = False - check = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - entity = cm.get_entity(entity_id) - if not entity: - module.fail_json(msg='Failed to instantiate entity. "%s" may not be' - ' a valid entity id.' % entity_id) - - existing = [e for e in entity.list_checks() if e.label == label] - - if existing: - check = existing[0] - - if state == 'present': - if len(existing) > 1: - module.fail_json(msg='%s existing checks have a label of %s.' % - (len(existing), label)) - - should_delete = False - should_create = False - should_update = False - - if check: - # Details may include keys set to default values that are not - # included in the initial creation. - # - # Only force a recreation of the check if one of the *specified* - # keys is missing or has a different value. - if details: - for (key, value) in details.items(): - if key not in check.details: - should_delete = should_create = True - elif value != check.details[key]: - should_delete = should_create = True - - should_update = label != check.label or \ - (target_hostname and target_hostname != check.target_hostname) or \ - (target_alias and target_alias != check.target_alias) or \ - (disabled != check.disabled) or \ - (metadata and metadata != check.metadata) or \ - (period and period != check.period) or \ - (timeout and timeout != check.timeout) or \ - (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll) - - if should_update and not should_delete: - check.update(label=label, - disabled=disabled, - metadata=metadata, - monitoring_zones_poll=monitoring_zones_poll, - timeout=timeout, - period=period, - target_alias=target_alias, - target_hostname=target_hostname) - changed = True - else: - # The check doesn't exist yet. - should_create = True - - if should_delete: - check.delete() - - if should_create: - check = cm.create_check(entity, - label=label, - check_type=check_type, - target_hostname=target_hostname, - target_alias=target_alias, - monitoring_zones_poll=monitoring_zones_poll, - details=details, - disabled=disabled, - metadata=metadata, - period=period, - timeout=timeout) - changed = True - elif state == 'absent': - if check: - check.delete() - changed = True - else: - module.fail_json(msg='state must be either present or absent.') - - if check: - check_dict = { - "id": check.id, - "label": check.label, - "type": check.type, - "target_hostname": check.target_hostname, - "target_alias": check.target_alias, - "monitoring_zones_poll": check.monitoring_zones_poll, - "details": check.details, - "disabled": check.disabled, - "metadata": check.metadata, - "period": check.period, - "timeout": check.timeout - } - module.exit_json(changed=changed, check=check_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - entity_id=dict(required=True), - label=dict(required=True), - check_type=dict(required=True), - monitoring_zones_poll=dict(), - target_hostname=dict(), - target_alias=dict(), - details=dict(type='dict', default={}), - disabled=dict(type='bool', default=False), - metadata=dict(type='dict', default={}), - period=dict(type='int'), - timeout=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - entity_id = module.params.get('entity_id') - label = module.params.get('label') - check_type = module.params.get('check_type') - monitoring_zones_poll = module.params.get('monitoring_zones_poll') - target_hostname = module.params.get('target_hostname') - target_alias = module.params.get('target_alias') - details = module.params.get('details') - disabled = module.boolean(module.params.get('disabled')) - metadata = module.params.get('metadata') - period = module.params.get('period') - timeout = module.params.get('timeout') - - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - cloud_check(module, state, entity_id, label, check_type, - monitoring_zones_poll, target_hostname, target_alias, details, - disabled, metadata, period, timeout) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_entity.py b/plugins/modules/cloud/rackspace/rax_mon_entity.py deleted file mode 100644 index 2f8cdeefd8..0000000000 --- a/plugins/modules/cloud/rackspace/rax_mon_entity.py +++ /dev/null @@ -1,192 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_entity -short_description: Create or delete a Rackspace Cloud Monitoring entity -description: -- Create or delete a Rackspace Cloud Monitoring entity, which represents a device - to monitor. Entities associate checks and alarms with a target system and - provide a convenient, centralized place to store IP addresses. Rackspace - monitoring module flow | *rax_mon_entity* -> rax_mon_check -> - rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm -options: - label: - type: str - description: - - Defines a name for this entity. Must be a non-empty string between 1 and - 255 characters long. - required: true - state: - type: str - description: - - Ensure that an entity with this C(name) exists or does not exist. - choices: ["present", "absent"] - default: present - agent_id: - type: str - description: - - Rackspace monitoring agent on the target device to which this entity is - bound. Necessary to collect C(agent.) rax_mon_checks against this entity. - named_ip_addresses: - type: dict - description: - - Hash of IP addresses that may be referenced by name by rax_mon_checks - added to this entity. Must be a dictionary of with keys that are names - between 1 and 64 characters long, and values that are valid IPv4 or IPv6 - addresses. - metadata: - type: dict - description: - - Hash of arbitrary C(name), C(value) pairs that are passed to associated - rax_mon_alarms. Names and values must all be between 1 and 255 characters - long. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Entity example - gather_facts: False - hosts: local - connection: local - tasks: - - name: Ensure an entity exists - community.general.rax_mon_entity: - credentials: ~/.rax_pub - state: present - label: my_entity - named_ip_addresses: - web_box: 192.0.2.4 - db_box: 192.0.2.5 - meta: - hurf: durf - register: the_entity -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_monitoring(module, state, label, agent_id, named_ip_addresses, - metadata): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - changed = False - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [] - for entity in cm.list_entities(): - if label == entity.label: - existing.append(entity) - - entity = None - - if existing: - entity = existing[0] - - if state == 'present': - should_update = False - should_delete = False - should_create = False - - if len(existing) > 1: - module.fail_json(msg='%s existing entities have the label %s.' % - (len(existing), label)) - - if entity: - if named_ip_addresses and named_ip_addresses != entity.ip_addresses: - should_delete = should_create = True - - # Change an existing Entity, unless there's nothing to do. - should_update = agent_id and agent_id != entity.agent_id or \ - (metadata and metadata != entity.metadata) - - if should_update and not should_delete: - entity.update(agent_id, metadata) - changed = True - - if should_delete: - entity.delete() - else: - should_create = True - - if should_create: - # Create a new Entity. - entity = cm.create_entity(label=label, agent=agent_id, - ip_addresses=named_ip_addresses, - metadata=metadata) - changed = True - else: - # Delete the existing Entities. - for e in existing: - e.delete() - changed = True - - if entity: - entity_dict = { - "id": entity.id, - "name": entity.name, - "agent_id": entity.agent_id, - } - module.exit_json(changed=changed, entity=entity_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - agent_id=dict(), - named_ip_addresses=dict(type='dict', default={}), - metadata=dict(type='dict', default={}) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - label = module.params.get('label') - agent_id = module.params.get('agent_id') - named_ip_addresses = module.params.get('named_ip_addresses') - metadata = module.params.get('metadata') - - setup_rax_module(module, pyrax) - - cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_notification.py b/plugins/modules/cloud/rackspace/rax_mon_notification.py deleted file mode 100644 index fb645c3036..0000000000 --- a/plugins/modules/cloud/rackspace/rax_mon_notification.py +++ /dev/null @@ -1,175 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_notification -short_description: Create or delete a Rackspace Cloud Monitoring notification. -description: -- Create or delete a Rackspace Cloud Monitoring notification that specifies a - channel that can be used to communicate alarms, such as email, webhooks, or - PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> - *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm -options: - state: - type: str - description: - - Ensure that the notification with this C(label) exists or does not exist. - choices: ['present', 'absent'] - default: present - label: - type: str - description: - - Defines a friendly name for this notification. String between 1 and 255 - characters long. - required: true - notification_type: - type: str - description: - - A supported notification type. - choices: ["webhook", "email", "pagerduty"] - required: true - details: - type: dict - description: - - Dictionary of key-value pairs used to initialize the notification. - Required keys and meanings vary with notification type. See - http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/ - service-notification-types-crud.html for details. - required: true -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Monitoring notification example - gather_facts: False - hosts: local - connection: local - tasks: - - name: Email me when something goes wrong. - rax_mon_entity: - credentials: ~/.rax_pub - label: omg - type: email - details: - address: me@mailhost.com - register: the_notification -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def notification(module, state, label, notification_type, details): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - changed = False - notification = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [] - for n in cm.list_notifications(): - if n.label == label: - existing.append(n) - - if existing: - notification = existing[0] - - if state == 'present': - should_update = False - should_delete = False - should_create = False - - if len(existing) > 1: - module.fail_json(msg='%s existing notifications are labelled %s.' % - (len(existing), label)) - - if notification: - should_delete = (notification_type != notification.type) - - should_update = (details != notification.details) - - if should_update and not should_delete: - notification.update(details=notification.details) - changed = True - - if should_delete: - notification.delete() - else: - should_create = True - - if should_create: - notification = cm.create_notification(notification_type, - label=label, details=details) - changed = True - else: - for n in existing: - n.delete() - changed = True - - if notification: - notification_dict = { - "id": notification.id, - "type": notification.type, - "label": notification.label, - "details": notification.details - } - module.exit_json(changed=changed, notification=notification_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']), - details=dict(required=True, type='dict') - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - label = module.params.get('label') - notification_type = module.params.get('notification_type') - details = module.params.get('details') - - setup_rax_module(module, pyrax) - - notification(module, state, label, notification_type, details) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py b/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py deleted file mode 100644 index 25e506829f..0000000000 --- a/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_notification_plan -short_description: Create or delete a Rackspace Cloud Monitoring notification - plan. -description: -- Create or delete a Rackspace Cloud Monitoring notification plan by - associating existing rax_mon_notifications with severity levels. Rackspace - monitoring module flow | rax_mon_entity -> rax_mon_check -> - rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm -options: - state: - type: str - description: - - Ensure that the notification plan with this C(label) exists or does not - exist. - choices: ['present', 'absent'] - default: present - label: - type: str - description: - - Defines a friendly name for this notification plan. String between 1 and - 255 characters long. - required: true - critical_state: - type: list - elements: str - description: - - Notification list to use when the alarm state is CRITICAL. Must be an - array of valid rax_mon_notification ids. - warning_state: - type: list - elements: str - description: - - Notification list to use when the alarm state is WARNING. Must be an array - of valid rax_mon_notification ids. - ok_state: - type: list - elements: str - description: - - Notification list to use when the alarm state is OK. Must be an array of - valid rax_mon_notification ids. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Example notification plan - gather_facts: False - hosts: local - connection: local - tasks: - - name: Establish who gets called when. - community.general.rax_mon_notification_plan: - credentials: ~/.rax_pub - state: present - label: defcon1 - critical_state: - - "{{ everyone['notification']['id'] }}" - warning_state: - - "{{ opsfloor['notification']['id'] }}" - register: defcon1 -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def notification_plan(module, state, label, critical_state, warning_state, ok_state): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - changed = False - notification_plan = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [] - for n in cm.list_notification_plans(): - if n.label == label: - existing.append(n) - - if existing: - notification_plan = existing[0] - - if state == 'present': - should_create = False - should_delete = False - - if len(existing) > 1: - module.fail_json(msg='%s notification plans are labelled %s.' % - (len(existing), label)) - - if notification_plan: - should_delete = (critical_state and critical_state != notification_plan.critical_state) or \ - (warning_state and warning_state != notification_plan.warning_state) or \ - (ok_state and ok_state != notification_plan.ok_state) - - if should_delete: - notification_plan.delete() - should_create = True - else: - should_create = True - - if should_create: - notification_plan = cm.create_notification_plan(label=label, - critical_state=critical_state, - warning_state=warning_state, - ok_state=ok_state) - changed = True - else: - for np in existing: - np.delete() - changed = True - - if notification_plan: - notification_plan_dict = { - "id": notification_plan.id, - "critical_state": notification_plan.critical_state, - "warning_state": notification_plan.warning_state, - "ok_state": notification_plan.ok_state, - "metadata": notification_plan.metadata - } - module.exit_json(changed=changed, notification_plan=notification_plan_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - critical_state=dict(type='list', elements='str'), - warning_state=dict(type='list', elements='str'), - ok_state=dict(type='list', elements='str'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - label = module.params.get('label') - critical_state = module.params.get('critical_state') - warning_state = module.params.get('warning_state') - ok_state = module.params.get('ok_state') - - setup_rax_module(module, pyrax) - - notification_plan(module, state, label, critical_state, warning_state, ok_state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_network.py b/plugins/modules/cloud/rackspace/rax_network.py deleted file mode 100644 index 146c08c8e1..0000000000 --- a/plugins/modules/cloud/rackspace/rax_network.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_network -short_description: create / delete an isolated network in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud isolated network. -options: - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - label: - type: str - description: - - Label (name) to give the network - required: yes - cidr: - type: str - description: - - cidr of the network being created -author: - - "Christopher H. Laco (@claco)" - - "Jesse Keating (@omgjlk)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build an Isolated Network - gather_facts: False - - tasks: - - name: Network create request - local_action: - module: rax_network - credentials: ~/.raxpub - label: my-net - cidr: 192.168.3.0/24 - state: present -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_network(module, state, label, cidr): - changed = False - network = None - networks = [] - - if not pyrax.cloud_networks: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not cidr: - module.fail_json(msg='missing required arguments: cidr') - - try: - network = pyrax.cloud_networks.find_network_by_label(label) - except pyrax.exceptions.NetworkNotFound: - try: - network = pyrax.cloud_networks.create(label, cidr=cidr) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - network = pyrax.cloud_networks.find_network_by_label(label) - network.delete() - changed = True - except pyrax.exceptions.NetworkNotFound: - pass - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if network: - instance = dict(id=network.id, - label=network.label, - cidr=network.cidr) - networks.append(instance) - - module.exit_json(changed=changed, networks=networks) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', - choices=['present', 'absent']), - label=dict(required=True), - cidr=dict() - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - label = module.params.get('label') - cidr = module.params.get('cidr') - - setup_rax_module(module, pyrax) - - cloud_network(module, state, label, cidr) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_queue.py b/plugins/modules/cloud/rackspace/rax_queue.py deleted file mode 100644 index 46c942c70d..0000000000 --- a/plugins/modules/cloud/rackspace/rax_queue.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_queue -short_description: create / delete a queue in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud queue. -options: - name: - type: str - description: - - Name to give the queue - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Queue - gather_facts: False - hosts: local - connection: local - tasks: - - name: Queue create request - local_action: - module: rax_queue - credentials: ~/.raxpub - name: my-queue - region: DFW - state: present - register: my_queue -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_queue(module, state, name): - for arg in (state, name): - if not arg: - module.fail_json(msg='%s is required for rax_queue' % arg) - - changed = False - queues = [] - instance = {} - - cq = pyrax.queues - if not cq: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - for queue in cq.list(): - if name != queue.name: - continue - - queues.append(queue) - - if len(queues) > 1: - module.fail_json(msg='Multiple Queues were matched by name') - - if state == 'present': - if not queues: - try: - queue = cq.create(name) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - queue = queues[0] - - instance = dict(name=queue.name) - result = dict(changed=changed, queue=instance) - module.exit_json(**result) - - elif state == 'absent': - if queues: - queue = queues[0] - try: - queue.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, queue=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - cloud_queue(module, state, name) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_scaling_group.py b/plugins/modules/cloud/rackspace/rax_scaling_group.py deleted file mode 100644 index 4080e4c6a4..0000000000 --- a/plugins/modules/cloud/rackspace/rax_scaling_group.py +++ /dev/null @@ -1,441 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_scaling_group -short_description: Manipulate Rackspace Cloud Autoscale Groups -description: - - Manipulate Rackspace Cloud Autoscale Groups -options: - config_drive: - description: - - Attach read-only configuration drive to server as label config-2 - type: bool - default: 'no' - cooldown: - type: int - description: - - The period of time, in seconds, that must pass before any scaling can - occur after the previous scaling. Must be an integer between 0 and - 86400 (24 hrs). - default: 300 - disk_config: - type: str - description: - - Disk partitioning strategy - - If not specified, it will fallback to C(auto). - choices: - - auto - - manual - files: - type: dict - description: - - 'Files to insert into the instance. Hash of C(remotepath: localpath)' - flavor: - type: str - description: - - flavor to use for the instance - required: true - image: - type: str - description: - - image to use for the instance. Can be an C(id), C(human_id) or C(name) - required: true - key_name: - type: str - description: - - key pair to use on the instance - loadbalancers: - type: list - elements: dict - description: - - List of load balancer C(id) and C(port) hashes - max_entities: - type: int - description: - - The maximum number of entities that are allowed in the scaling group. - Must be an integer between 0 and 1000. - required: true - meta: - type: dict - description: - - A hash of metadata to associate with the instance - min_entities: - type: int - description: - - The minimum number of entities that are allowed in the scaling group. - Must be an integer between 0 and 1000. - required: true - name: - type: str - description: - - Name to give the scaling group - required: true - networks: - type: list - elements: str - description: - - The network to attach to the instances. If specified, you must include - ALL networks including the public and private interfaces. Can be C(id) - or C(label). - default: - - public - - private - server_name: - type: str - description: - - The base name for servers created by Autoscale - required: true - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - user_data: - type: str - description: - - Data to be uploaded to the servers config drive. This option implies - I(config_drive). Can be a file path or a string - wait: - description: - - wait for the scaling group to finish provisioning the minimum amount of - servers - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' ---- -- hosts: localhost - gather_facts: false - connection: local - tasks: - - community.general.rax_scaling_group: - credentials: ~/.raxpub - region: ORD - cooldown: 300 - flavor: performance1-1 - image: bb02b1a3-bc77-4d17-ab5b-421d89850fca - min_entities: 5 - max_entities: 10 - name: ASG Test - server_name: asgtest - loadbalancers: - - id: 228385 - port: 80 - register: asg -''' - -import base64 -import json -import os -import time - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_find_image, rax_find_network, - rax_required_together, rax_to_dict, setup_rax_module) -from ansible.module_utils.six import string_types - - -def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None, - image=None, key_name=None, loadbalancers=None, meta=None, - min_entities=0, max_entities=0, name=None, networks=None, - server_name=None, state='present', user_data=None, - config_drive=False, wait=True, wait_timeout=300): - files = {} if files is None else files - loadbalancers = [] if loadbalancers is None else loadbalancers - meta = {} if meta is None else meta - networks = [] if networks is None else networks - - changed = False - - au = pyrax.autoscale - if not au: - module.fail_json(msg='Failed to instantiate clients. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if user_data: - config_drive = True - - if user_data and os.path.isfile(user_data): - try: - f = open(user_data) - user_data = f.read() - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % user_data) - - if state == 'present': - # Normalize and ensure all metadata values are strings - if meta: - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, string_types): - meta[k] = '%s' % v - - if image: - image = rax_find_image(module, pyrax, image) - - nics = [] - if networks: - for network in networks: - nics.extend(rax_find_network(module, pyrax, network)) - - for nic in nics: - # pyrax is currently returning net-id, but we need uuid - # this check makes this forward compatible for a time when - # pyrax uses uuid instead - if nic.get('net-id'): - nic.update(uuid=nic['net-id']) - del nic['net-id'] - - # Handle the file contents - personality = [] - if files: - for rpath in files.keys(): - lpath = os.path.expanduser(files[rpath]) - try: - f = open(lpath, 'r') - personality.append({ - 'path': rpath, - 'contents': f.read() - }) - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % lpath) - - lbs = [] - if loadbalancers: - for lb in loadbalancers: - try: - lb_id = int(lb.get('id')) - except (ValueError, TypeError): - module.fail_json(msg='Load balancer ID is not an integer: ' - '%s' % lb.get('id')) - try: - port = int(lb.get('port')) - except (ValueError, TypeError): - module.fail_json(msg='Load balancer port is not an ' - 'integer: %s' % lb.get('port')) - if not lb_id or not port: - continue - lbs.append((lb_id, port)) - - try: - sg = au.find(name=name) - except pyrax.exceptions.NoUniqueMatch as e: - module.fail_json(msg='%s' % e.message) - except pyrax.exceptions.NotFound: - try: - sg = au.create(name, cooldown=cooldown, - min_entities=min_entities, - max_entities=max_entities, - launch_config_type='launch_server', - server_name=server_name, image=image, - flavor=flavor, disk_config=disk_config, - metadata=meta, personality=personality, - networks=nics, load_balancers=lbs, - key_name=key_name, config_drive=config_drive, - user_data=user_data) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if not changed: - # Scaling Group Updates - group_args = {} - if cooldown != sg.cooldown: - group_args['cooldown'] = cooldown - - if min_entities != sg.min_entities: - group_args['min_entities'] = min_entities - - if max_entities != sg.max_entities: - group_args['max_entities'] = max_entities - - if group_args: - changed = True - sg.update(**group_args) - - # Launch Configuration Updates - lc = sg.get_launch_config() - lc_args = {} - if server_name != lc.get('name'): - lc_args['server_name'] = server_name - - if image != lc.get('image'): - lc_args['image'] = image - - if flavor != lc.get('flavor'): - lc_args['flavor'] = flavor - - disk_config = disk_config or 'AUTO' - if ((disk_config or lc.get('disk_config')) and - disk_config != lc.get('disk_config', 'AUTO')): - lc_args['disk_config'] = disk_config - - if (meta or lc.get('meta')) and meta != lc.get('metadata'): - lc_args['metadata'] = meta - - test_personality = [] - for p in personality: - test_personality.append({ - 'path': p['path'], - 'contents': base64.b64encode(p['contents']) - }) - if ((test_personality or lc.get('personality')) and - test_personality != lc.get('personality')): - lc_args['personality'] = personality - - if nics != lc.get('networks'): - lc_args['networks'] = nics - - if lbs != lc.get('load_balancers'): - # Work around for https://github.com/rackspace/pyrax/pull/393 - lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs) - - if key_name != lc.get('key_name'): - lc_args['key_name'] = key_name - - if config_drive != lc.get('config_drive', False): - lc_args['config_drive'] = config_drive - - if (user_data and - base64.b64encode(user_data) != lc.get('user_data')): - lc_args['user_data'] = user_data - - if lc_args: - # Work around for https://github.com/rackspace/pyrax/pull/389 - if 'flavor' not in lc_args: - lc_args['flavor'] = lc.get('flavor') - changed = True - sg.update_launch_config(**lc_args) - - sg.get() - - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - state = sg.get_state() - if state["pending_capacity"] == 0: - break - - time.sleep(5) - - module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) - - else: - try: - sg = au.find(name=name) - sg.delete() - changed = True - except pyrax.exceptions.NotFound as e: - sg = {} - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - config_drive=dict(default=False, type='bool'), - cooldown=dict(type='int', default=300), - disk_config=dict(choices=['auto', 'manual']), - files=dict(type='dict', default={}), - flavor=dict(required=True), - image=dict(required=True), - key_name=dict(), - loadbalancers=dict(type='list', elements='dict'), - meta=dict(type='dict', default={}), - min_entities=dict(type='int', required=True), - max_entities=dict(type='int', required=True), - name=dict(required=True), - networks=dict(type='list', elements='str', default=['public', 'private']), - server_name=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - user_data=dict(no_log=True), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=300, type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - config_drive = module.params.get('config_drive') - cooldown = module.params.get('cooldown') - disk_config = module.params.get('disk_config') - if disk_config: - disk_config = disk_config.upper() - files = module.params.get('files') - flavor = module.params.get('flavor') - image = module.params.get('image') - key_name = module.params.get('key_name') - loadbalancers = module.params.get('loadbalancers') - meta = module.params.get('meta') - min_entities = module.params.get('min_entities') - max_entities = module.params.get('max_entities') - name = module.params.get('name') - networks = module.params.get('networks') - server_name = module.params.get('server_name') - state = module.params.get('state') - user_data = module.params.get('user_data') - - if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000: - module.fail_json(msg='min_entities and max_entities must be an ' - 'integer between 0 and 1000') - - if not 0 <= cooldown <= 86400: - module.fail_json(msg='cooldown must be an integer between 0 and 86400') - - setup_rax_module(module, pyrax) - - rax_asg(module, cooldown=cooldown, disk_config=disk_config, - files=files, flavor=flavor, image=image, meta=meta, - key_name=key_name, loadbalancers=loadbalancers, - min_entities=min_entities, max_entities=max_entities, - name=name, networks=networks, server_name=server_name, - state=state, config_drive=config_drive, user_data=user_data) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_scaling_policy.py b/plugins/modules/cloud/rackspace/rax_scaling_policy.py deleted file mode 100644 index be46bd62a6..0000000000 --- a/plugins/modules/cloud/rackspace/rax_scaling_policy.py +++ /dev/null @@ -1,287 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_scaling_policy -short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy -description: - - Manipulate Rackspace Cloud Autoscale Scaling Policy -options: - at: - type: str - description: - - The UTC time when this policy will be executed. The time must be - formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as - C(2013-05-19T08:07:08Z) - change: - type: int - description: - - The change, either as a number of servers or as a percentage, to make - in the scaling group. If this is a percentage, you must set - I(is_percent) to C(true) also. - cron: - type: str - description: - - The time when the policy will be executed, as a cron entry. For - example, if this is parameter is set to C(1 0 * * *) - cooldown: - type: int - description: - - The period of time, in seconds, that must pass before any scaling can - occur after the previous scaling. Must be an integer between 0 and - 86400 (24 hrs). - default: 300 - desired_capacity: - type: int - description: - - The desired server capacity of the scaling the group; that is, how - many servers should be in the scaling group. - is_percent: - description: - - Whether the value in I(change) is a percent value - default: false - type: bool - name: - type: str - description: - - Name to give the policy - required: true - policy_type: - type: str - description: - - The type of policy that will be executed for the current release. - choices: - - webhook - - schedule - required: true - scaling_group: - type: str - description: - - Name of the scaling group that this policy will be added to - required: true - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' ---- -- hosts: localhost - gather_facts: false - connection: local - tasks: - - community.general.rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - at: '2013-05-19T08:07:08Z' - change: 25 - cooldown: 300 - is_percent: true - name: ASG Test Policy - at - policy_type: schedule - scaling_group: ASG Test - register: asps_at - - - community.general.rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - cron: '1 0 * * *' - change: 25 - cooldown: 300 - is_percent: true - name: ASG Test Policy - cron - policy_type: schedule - scaling_group: ASG Test - register: asp_cron - - - community.general.rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - cooldown: 300 - desired_capacity: 5 - name: ASG Test Policy - webhook - policy_type: webhook - scaling_group: ASG Test - register: asp_webhook -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (UUID, rax_argument_spec, rax_required_together, rax_to_dict, - setup_rax_module) - - -def rax_asp(module, at=None, change=0, cron=None, cooldown=300, - desired_capacity=0, is_percent=False, name=None, - policy_type=None, scaling_group=None, state='present'): - changed = False - - au = pyrax.autoscale - if not au: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - try: - UUID(scaling_group) - except ValueError: - try: - sg = au.find(name=scaling_group) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - try: - sg = au.get(scaling_group) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if state == 'present': - policies = filter(lambda p: name == p.name, sg.list_policies()) - if len(policies) > 1: - module.fail_json(msg='No unique policy match found by name') - if at: - args = dict(at=at) - elif cron: - args = dict(cron=cron) - else: - args = None - - if not policies: - try: - policy = sg.add_policy(name, policy_type=policy_type, - cooldown=cooldown, change=change, - is_percent=is_percent, - desired_capacity=desired_capacity, - args=args) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - else: - policy = policies[0] - kwargs = {} - if policy_type != policy.type: - kwargs['policy_type'] = policy_type - - if cooldown != policy.cooldown: - kwargs['cooldown'] = cooldown - - if hasattr(policy, 'change') and change != policy.change: - kwargs['change'] = change - - if hasattr(policy, 'changePercent') and is_percent is False: - kwargs['change'] = change - kwargs['is_percent'] = False - elif hasattr(policy, 'change') and is_percent is True: - kwargs['change'] = change - kwargs['is_percent'] = True - - if hasattr(policy, 'desiredCapacity') and change: - kwargs['change'] = change - elif ((hasattr(policy, 'change') or - hasattr(policy, 'changePercent')) and desired_capacity): - kwargs['desired_capacity'] = desired_capacity - - if hasattr(policy, 'args') and args != policy.args: - kwargs['args'] = args - - if kwargs: - policy.update(**kwargs) - changed = True - - policy.get() - - module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) - - else: - try: - policies = filter(lambda p: name == p.name, sg.list_policies()) - if len(policies) > 1: - module.fail_json(msg='No unique policy match found by name') - elif not policies: - policy = {} - else: - policy.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - at=dict(), - change=dict(type='int'), - cron=dict(), - cooldown=dict(type='int', default=300), - desired_capacity=dict(type='int'), - is_percent=dict(type='bool', default=False), - name=dict(required=True), - policy_type=dict(required=True, choices=['webhook', 'schedule']), - scaling_group=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[ - ['cron', 'at'], - ['change', 'desired_capacity'], - ] - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - at = module.params.get('at') - change = module.params.get('change') - cron = module.params.get('cron') - cooldown = module.params.get('cooldown') - desired_capacity = module.params.get('desired_capacity') - is_percent = module.params.get('is_percent') - name = module.params.get('name') - policy_type = module.params.get('policy_type') - scaling_group = module.params.get('scaling_group') - state = module.params.get('state') - - if (at or cron) and policy_type == 'webhook': - module.fail_json(msg='policy_type=schedule is required for a time ' - 'based policy') - - setup_rax_module(module, pyrax) - - rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown, - desired_capacity=desired_capacity, is_percent=is_percent, - name=name, policy_type=policy_type, scaling_group=scaling_group, - state=state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_compute.py b/plugins/modules/cloud/scaleway/scaleway_compute.py deleted file mode 100644 index c5d5af9177..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_compute.py +++ /dev/null @@ -1,673 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway Compute management module -# -# Copyright (C) 2018 Online SAS. -# https://www.scaleway.com -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_compute -short_description: Scaleway compute management module -author: Remy Leone (@sieben) -description: - - "This module manages compute instances on Scaleway." -extends_documentation_fragment: -- community.general.scaleway - - -options: - - public_ip: - type: str - description: - - Manage public IP on a Scaleway server - - Could be Scaleway IP address UUID - - C(dynamic) Means that IP is destroyed at the same time the host is destroyed - - C(absent) Means no public IP at all - default: absent - - enable_ipv6: - description: - - Enable public IPv6 connectivity on the instance - default: false - type: bool - - image: - type: str - description: - - Image identifier used to start the instance with - required: true - - name: - type: str - description: - - Name of the instance - - organization: - type: str - description: - - Organization identifier - required: true - - state: - type: str - description: - - Indicate desired state of the instance. - default: present - choices: - - present - - absent - - running - - restarted - - stopped - - tags: - type: list - elements: str - description: - - List of tags to apply to the instance (5 max) - required: false - default: [] - - region: - type: str - description: - - Scaleway compute zone - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 - - commercial_type: - type: str - description: - - Commercial name of the compute node - required: true - - wait: - description: - - Wait for the instance to reach its desired state before returning. - type: bool - default: 'no' - - wait_timeout: - type: int - description: - - Time to wait for the server to reach the expected state - required: false - default: 300 - - wait_sleep_time: - type: int - description: - - Time to wait before every attempt to check the state of the server - required: false - default: 3 - - security_group: - type: str - description: - - Security group unique identifier - - If no value provided, the default security group or current security group will be used - required: false -''' - -EXAMPLES = ''' -- name: Create a server - community.general.scaleway_compute: - name: foobar - state: present - image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe - organization: 951df375-e094-4d26-97c1-ba548eeb9c42 - region: ams1 - commercial_type: VC1S - tags: - - test - - www - -- name: Create a server attached to a security group - community.general.scaleway_compute: - name: foobar - state: present - image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe - organization: 951df375-e094-4d26-97c1-ba548eeb9c42 - region: ams1 - commercial_type: VC1S - security_group: 4a31b633-118e-4900-bd52-facf1085fc8d - tags: - - test - - www - -- name: Destroy it right after - community.general.scaleway_compute: - name: foobar - state: absent - image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe - organization: 951df375-e094-4d26-97c1-ba548eeb9c42 - region: ams1 - commercial_type: VC1S -''' - -RETURN = ''' -''' - -import datetime -import time - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import quote as urlquote -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway - -SCALEWAY_SERVER_STATES = ( - 'stopped', - 'stopping', - 'starting', - 'running', - 'locked' -) - -SCALEWAY_TRANSITIONS_STATES = ( - "stopping", - "starting", - "pending" -) - - -def check_image_id(compute_api, image_id): - response = compute_api.get(path="images/%s" % image_id) - - if not response.ok: - msg = 'Error in getting image %s on %s : %s' % (image_id, compute_api.module.params.get('api_url'), response.json) - compute_api.module.fail_json(msg=msg) - - -def fetch_state(compute_api, server): - compute_api.module.debug("fetch_state of server: %s" % server["id"]) - response = compute_api.get(path="servers/%s" % server["id"]) - - if response.status_code == 404: - return "absent" - - if not response.ok: - msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - try: - compute_api.module.debug("Server %s in state: %s" % (server["id"], response.json["server"]["state"])) - return response.json["server"]["state"] - except KeyError: - compute_api.module.fail_json(msg="Could not fetch state in %s" % response.json) - - -def wait_to_complete_state_transition(compute_api, server, wait=None): - if wait is None: - wait = compute_api.module.params["wait"] - if not wait: - return - - wait_timeout = compute_api.module.params["wait_timeout"] - wait_sleep_time = compute_api.module.params["wait_sleep_time"] - - start = datetime.datetime.utcnow() - end = start + datetime.timedelta(seconds=wait_timeout) - while datetime.datetime.utcnow() < end: - compute_api.module.debug("We are going to wait for the server to finish its transition") - if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES: - compute_api.module.debug("It seems that the server is not in transition anymore.") - compute_api.module.debug("Server in state: %s" % fetch_state(compute_api, server)) - break - time.sleep(wait_sleep_time) - else: - compute_api.module.fail_json(msg="Server takes too long to finish its transition") - - -def public_ip_payload(compute_api, public_ip): - # We don't want a public ip - if public_ip in ("absent",): - return {"dynamic_ip_required": False} - - # IP is only attached to the instance and is released as soon as the instance terminates - if public_ip in ("dynamic", "allocated"): - return {"dynamic_ip_required": True} - - # We check that the IP we want to attach exists, if so its ID is returned - response = compute_api.get("ips") - if not response.ok: - msg = 'Error during public IP validation: (%s) %s' % (response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - ip_list = [] - try: - ip_list = response.json["ips"] - except KeyError: - compute_api.module.fail_json(msg="Error in getting the IP information from: %s" % response.json) - - lookup = [ip["id"] for ip in ip_list] - if public_ip in lookup: - return {"public_ip": public_ip} - - -def create_server(compute_api, server): - compute_api.module.debug("Starting a create_server") - target_server = None - data = {"enable_ipv6": server["enable_ipv6"], - "tags": server["tags"], - "commercial_type": server["commercial_type"], - "image": server["image"], - "dynamic_ip_required": server["dynamic_ip_required"], - "name": server["name"], - "organization": server["organization"] - } - - if server["security_group"]: - data["security_group"] = server["security_group"] - - response = compute_api.post(path="servers", data=data) - - if not response.ok: - msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - try: - target_server = response.json["server"] - except KeyError: - compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json) - - wait_to_complete_state_transition(compute_api=compute_api, server=target_server) - - return target_server - - -def restart_server(compute_api, server): - return perform_action(compute_api=compute_api, server=server, action="reboot") - - -def stop_server(compute_api, server): - return perform_action(compute_api=compute_api, server=server, action="poweroff") - - -def start_server(compute_api, server): - return perform_action(compute_api=compute_api, server=server, action="poweron") - - -def perform_action(compute_api, server, action): - response = compute_api.post(path="servers/%s/action" % server["id"], - data={"action": action}) - if not response.ok: - msg = 'Error during server %s: (%s) %s' % (action, response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - wait_to_complete_state_transition(compute_api=compute_api, server=server) - - return response - - -def remove_server(compute_api, server): - compute_api.module.debug("Starting remove server strategy") - response = compute_api.delete(path="servers/%s" % server["id"]) - if not response.ok: - msg = 'Error during server deletion: (%s) %s' % (response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - wait_to_complete_state_transition(compute_api=compute_api, server=server) - - return response - - -def present_strategy(compute_api, wished_server): - compute_api.module.debug("Starting present strategy") - changed = False - query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) - - if not query_results: - changed = True - if compute_api.module.check_mode: - return changed, {"status": "A server would be created."} - - target_server = create_server(compute_api=compute_api, server=wished_server) - else: - target_server = query_results[0] - - if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, - wished_server=wished_server): - changed = True - - if compute_api.module.check_mode: - return changed, {"status": "Server %s attributes would be changed." % target_server["id"]} - - target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) - - return changed, target_server - - -def absent_strategy(compute_api, wished_server): - compute_api.module.debug("Starting absent strategy") - changed = False - target_server = None - query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) - - if not query_results: - return changed, {"status": "Server already absent."} - else: - target_server = query_results[0] - - changed = True - - if compute_api.module.check_mode: - return changed, {"status": "Server %s would be made absent." % target_server["id"]} - - # A server MUST be stopped to be deleted. - while fetch_state(compute_api=compute_api, server=target_server) != "stopped": - wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True) - response = stop_server(compute_api=compute_api, server=target_server) - - if not response.ok: - err_msg = 'Error while stopping a server before removing it [{0}: {1}]'.format(response.status_code, - response.json) - compute_api.module.fail_json(msg=err_msg) - - wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True) - - response = remove_server(compute_api=compute_api, server=target_server) - - if not response.ok: - err_msg = 'Error while removing server [{0}: {1}]'.format(response.status_code, response.json) - compute_api.module.fail_json(msg=err_msg) - - return changed, {"status": "Server %s deleted" % target_server["id"]} - - -def running_strategy(compute_api, wished_server): - compute_api.module.debug("Starting running strategy") - changed = False - query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) - - if not query_results: - changed = True - if compute_api.module.check_mode: - return changed, {"status": "A server would be created before being run."} - - target_server = create_server(compute_api=compute_api, server=wished_server) - else: - target_server = query_results[0] - - if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, - wished_server=wished_server): - changed = True - - if compute_api.module.check_mode: - return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]} - - target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) - - current_state = fetch_state(compute_api=compute_api, server=target_server) - if current_state not in ("running", "starting"): - compute_api.module.debug("running_strategy: Server in state: %s" % current_state) - changed = True - - if compute_api.module.check_mode: - return changed, {"status": "Server %s attributes would be changed." % target_server["id"]} - - response = start_server(compute_api=compute_api, server=target_server) - if not response.ok: - msg = 'Error while running server [{0}: {1}]'.format(response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - return changed, target_server - - -def stop_strategy(compute_api, wished_server): - compute_api.module.debug("Starting stop strategy") - query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) - - changed = False - - if not query_results: - - if compute_api.module.check_mode: - return changed, {"status": "A server would be created before being stopped."} - - target_server = create_server(compute_api=compute_api, server=wished_server) - changed = True - else: - target_server = query_results[0] - - compute_api.module.debug("stop_strategy: Servers are found.") - - if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, - wished_server=wished_server): - changed = True - - if compute_api.module.check_mode: - return changed, { - "status": "Server %s attributes would be changed before stopping it." % target_server["id"]} - - target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) - - wait_to_complete_state_transition(compute_api=compute_api, server=target_server) - - current_state = fetch_state(compute_api=compute_api, server=target_server) - if current_state not in ("stopped",): - compute_api.module.debug("stop_strategy: Server in state: %s" % current_state) - - changed = True - - if compute_api.module.check_mode: - return changed, {"status": "Server %s would be stopped." % target_server["id"]} - - response = stop_server(compute_api=compute_api, server=target_server) - compute_api.module.debug(response.json) - compute_api.module.debug(response.ok) - - if not response.ok: - msg = 'Error while stopping server [{0}: {1}]'.format(response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - return changed, target_server - - -def restart_strategy(compute_api, wished_server): - compute_api.module.debug("Starting restart strategy") - changed = False - query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) - - if not query_results: - changed = True - if compute_api.module.check_mode: - return changed, {"status": "A server would be created before being rebooted."} - - target_server = create_server(compute_api=compute_api, server=wished_server) - else: - target_server = query_results[0] - - if server_attributes_should_be_changed(compute_api=compute_api, - target_server=target_server, - wished_server=wished_server): - changed = True - - if compute_api.module.check_mode: - return changed, { - "status": "Server %s attributes would be changed before rebooting it." % target_server["id"]} - - target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) - - changed = True - if compute_api.module.check_mode: - return changed, {"status": "Server %s would be rebooted." % target_server["id"]} - - wait_to_complete_state_transition(compute_api=compute_api, server=target_server) - - if fetch_state(compute_api=compute_api, server=target_server) in ("running",): - response = restart_server(compute_api=compute_api, server=target_server) - wait_to_complete_state_transition(compute_api=compute_api, server=target_server) - if not response.ok: - msg = 'Error while restarting server that was running [{0}: {1}].'.format(response.status_code, - response.json) - compute_api.module.fail_json(msg=msg) - - if fetch_state(compute_api=compute_api, server=target_server) in ("stopped",): - response = restart_server(compute_api=compute_api, server=target_server) - wait_to_complete_state_transition(compute_api=compute_api, server=target_server) - if not response.ok: - msg = 'Error while restarting server that was stopped [{0}: {1}].'.format(response.status_code, - response.json) - compute_api.module.fail_json(msg=msg) - - return changed, target_server - - -state_strategy = { - "present": present_strategy, - "restarted": restart_strategy, - "stopped": stop_strategy, - "running": running_strategy, - "absent": absent_strategy -} - - -def find(compute_api, wished_server, per_page=1): - compute_api.module.debug("Getting inside find") - # Only the name attribute is accepted in the Compute query API - response = compute_api.get("servers", params={"name": wished_server["name"], - "per_page": per_page}) - - if not response.ok: - msg = 'Error during server search: (%s) %s' % (response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - search_results = response.json["servers"] - - return search_results - - -PATCH_MUTABLE_SERVER_ATTRIBUTES = ( - "ipv6", - "tags", - "name", - "dynamic_ip_required", - "security_group", -) - - -def server_attributes_should_be_changed(compute_api, target_server, wished_server): - compute_api.module.debug("Checking if server attributes should be changed") - compute_api.module.debug("Current Server: %s" % target_server) - compute_api.module.debug("Wished Server: %s" % wished_server) - debug_dict = dict((x, (target_server[x], wished_server[x])) - for x in PATCH_MUTABLE_SERVER_ATTRIBUTES - if x in target_server and x in wished_server) - compute_api.module.debug("Debug dict %s" % debug_dict) - try: - for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: - if key in target_server and key in wished_server: - # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook - if isinstance(target_server[key], dict) and wished_server[key] and "id" in target_server[key].keys( - ) and target_server[key]["id"] != wished_server[key]: - return True - # Handling other structure compare simply the two objects content - elif not isinstance(target_server[key], dict) and target_server[key] != wished_server[key]: - return True - return False - except AttributeError: - compute_api.module.fail_json(msg="Error while checking if attributes should be changed") - - -def server_change_attributes(compute_api, target_server, wished_server): - compute_api.module.debug("Starting patching server attributes") - patch_payload = dict() - - for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: - if key in target_server and key in wished_server: - # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook - if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]: - # Setting all key to current value except ID - key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id") - # Setting ID to the user specified ID - key_dict["id"] = wished_server[key] - patch_payload[key] = key_dict - elif not isinstance(target_server[key], dict): - patch_payload[key] = wished_server[key] - - response = compute_api.patch(path="servers/%s" % target_server["id"], - data=patch_payload) - if not response.ok: - msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - try: - target_server = response.json["server"] - except KeyError: - compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json) - - wait_to_complete_state_transition(compute_api=compute_api, server=target_server) - - return target_server - - -def core(module): - region = module.params["region"] - wished_server = { - "state": module.params["state"], - "image": module.params["image"], - "name": module.params["name"], - "commercial_type": module.params["commercial_type"], - "enable_ipv6": module.params["enable_ipv6"], - "tags": module.params["tags"], - "organization": module.params["organization"], - "security_group": module.params["security_group"] - } - module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - compute_api = Scaleway(module=module) - - check_image_id(compute_api, wished_server["image"]) - - # IP parameters of the wished server depends on the configuration - ip_payload = public_ip_payload(compute_api=compute_api, public_ip=module.params["public_ip"]) - wished_server.update(ip_payload) - - changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server) - module.exit_json(changed=changed, msg=summary) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - image=dict(required=True), - name=dict(), - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - commercial_type=dict(required=True), - enable_ipv6=dict(default=False, type="bool"), - public_ip=dict(default="absent"), - state=dict(choices=list(state_strategy.keys()), default='present'), - tags=dict(type="list", elements="str", default=[]), - organization=dict(required=True), - wait=dict(type="bool", default=False), - wait_timeout=dict(type="int", default=300), - wait_sleep_time=dict(type="int", default=3), - security_group=dict(), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_database_backup.py b/plugins/modules/cloud/scaleway/scaleway_database_backup.py deleted file mode 100644 index 35f35f820a..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_database_backup.py +++ /dev/null @@ -1,372 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway database backups management module -# -# Copyright (C) 2020 Guillaume Rodriguez (g.rodriguez@opendecide.com). -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_database_backup -short_description: Scaleway database backups management module -version_added: 1.2.0 -author: Guillaume Rodriguez (@guillaume_ro_fr) -description: - - This module manages database backups on Scaleway account U(https://developer.scaleway.com). -extends_documentation_fragment: - - community.general.scaleway -options: - state: - description: - - Indicate desired state of the database backup. - - C(present) creates a backup. - - C(absent) deletes the backup. - - C(exported) creates a download link for the backup. - - C(restored) restores the backup to a new database. - type: str - default: present - choices: - - present - - absent - - exported - - restored - - region: - description: - - Scaleway region to use (for example C(fr-par)). - type: str - required: true - choices: - - fr-par - - nl-ams - - pl-waw - - id: - description: - - UUID used to identify the database backup. - - Required for C(absent), C(exported) and C(restored) states. - type: str - - name: - description: - - Name used to identify the database backup. - - Required for C(present) state. - - Ignored when C(state=absent), C(state=exported) or C(state=restored). - type: str - required: false - - database_name: - description: - - Name used to identify the database. - - Required for C(present) and C(restored) states. - - Ignored when C(state=absent) or C(state=exported). - type: str - required: false - - instance_id: - description: - - UUID of the instance associated to the database backup. - - Required for C(present) and C(restored) states. - - Ignored when C(state=absent) or C(state=exported). - type: str - required: false - - expires_at: - description: - - Expiration datetime of the database backup (ISO 8601 format). - - Ignored when C(state=absent), C(state=exported) or C(state=restored). - type: str - required: false - - wait: - description: - - Wait for the instance to reach its desired state before returning. - type: bool - default: false - - wait_timeout: - description: - - Time to wait for the backup to reach the expected state. - type: int - required: false - default: 300 - - wait_sleep_time: - description: - - Time to wait before every attempt to check the state of the backup. - type: int - required: false - default: 3 -''' - -EXAMPLES = ''' - - name: Create a backup - community.general.scaleway_database_backup: - name: 'my_backup' - state: present - region: 'fr-par' - database_name: 'my-database' - instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' - - - name: Export a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: exported - region: 'fr-par' - - - name: Restore a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: restored - region: 'fr-par' - database_name: 'my-new-database' - instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' - - - name: Remove a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: absent - region: 'fr-par' -''' - -RETURN = ''' -metadata: - description: Backup metadata. - returned: when C(state=present), C(state=exported) or C(state=restored) - type: dict - sample: { - "metadata": { - "created_at": "2020-08-06T12:42:05.631049Z", - "database_name": "my-database", - "download_url": null, - "download_url_expires_at": null, - "expires_at": null, - "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07", - "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49", - "instance_name": "my-instance", - "name": "backup_name", - "region": "fr-par", - "size": 600000, - "status": "ready", - "updated_at": "2020-08-06T12:42:10.581649Z" - } - } -''' - -import datetime -import time - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - scaleway_argument_spec, - SCALEWAY_REGIONS, -) - -stable_states = ( - 'ready', - 'deleting', -) - - -def wait_to_complete_state_transition(module, account_api, backup=None): - wait_timeout = module.params['wait_timeout'] - wait_sleep_time = module.params['wait_sleep_time'] - - if backup is None or backup['status'] in stable_states: - return backup - - start = datetime.datetime.utcnow() - end = start + datetime.timedelta(seconds=wait_timeout) - while datetime.datetime.utcnow() < end: - module.debug('We are going to wait for the backup to finish its transition') - - response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id'])) - if not response.ok: - module.fail_json(msg='Error getting backup [{0}: {1}]'.format(response.status_code, response.json)) - break - response_json = response.json - - if response_json['status'] in stable_states: - module.debug('It seems that the backup is not in transition anymore.') - module.debug('Backup in state: %s' % response_json['status']) - return response_json - time.sleep(wait_sleep_time) - else: - module.fail_json(msg='Backup takes too long to finish its transition') - - -def present_strategy(module, account_api, backup): - name = module.params['name'] - database_name = module.params['database_name'] - instance_id = module.params['instance_id'] - expiration_date = module.params['expires_at'] - - if backup is not None: - if (backup['name'] == name or name is None) and ( - backup['expires_at'] == expiration_date or expiration_date is None): - wait_to_complete_state_transition(module, account_api, backup) - module.exit_json(changed=False) - - if module.check_mode: - module.exit_json(changed=True) - - payload = {} - if name is not None: - payload['name'] = name - if expiration_date is not None: - payload['expires_at'] = expiration_date - - response = account_api.patch('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']), - payload) - if response.ok: - result = wait_to_complete_state_transition(module, account_api, response.json) - module.exit_json(changed=True, metadata=result) - - module.fail_json(msg='Error modifying backup [{0}: {1}]'.format(response.status_code, response.json)) - - if module.check_mode: - module.exit_json(changed=True) - - payload = {'name': name, 'database_name': database_name, 'instance_id': instance_id} - if expiration_date is not None: - payload['expires_at'] = expiration_date - - response = account_api.post('/rdb/v1/regions/%s/backups' % module.params.get('region'), payload) - - if response.ok: - result = wait_to_complete_state_transition(module, account_api, response.json) - module.exit_json(changed=True, metadata=result) - - module.fail_json(msg='Error creating backup [{0}: {1}]'.format(response.status_code, response.json)) - - -def absent_strategy(module, account_api, backup): - if backup is None: - module.exit_json(changed=False) - - if module.check_mode: - module.exit_json(changed=True) - - response = account_api.delete('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id'])) - if response.ok: - result = wait_to_complete_state_transition(module, account_api, response.json) - module.exit_json(changed=True, metadata=result) - - module.fail_json(msg='Error deleting backup [{0}: {1}]'.format(response.status_code, response.json)) - - -def exported_strategy(module, account_api, backup): - if backup is None: - module.fail_json(msg=('Backup "%s" not found' % module.params['id'])) - - if backup['download_url'] is not None: - module.exit_json(changed=False, metadata=backup) - - if module.check_mode: - module.exit_json(changed=True) - - backup = wait_to_complete_state_transition(module, account_api, backup) - response = account_api.post( - '/rdb/v1/regions/%s/backups/%s/export' % (module.params.get('region'), backup['id']), {}) - - if response.ok: - result = wait_to_complete_state_transition(module, account_api, response.json) - module.exit_json(changed=True, metadata=result) - - module.fail_json(msg='Error exporting backup [{0}: {1}]'.format(response.status_code, response.json)) - - -def restored_strategy(module, account_api, backup): - if backup is None: - module.fail_json(msg=('Backup "%s" not found' % module.params['id'])) - - database_name = module.params['database_name'] - instance_id = module.params['instance_id'] - - if module.check_mode: - module.exit_json(changed=True) - - backup = wait_to_complete_state_transition(module, account_api, backup) - - payload = {'database_name': database_name, 'instance_id': instance_id} - response = account_api.post('/rdb/v1/regions/%s/backups/%s/restore' % (module.params.get('region'), backup['id']), - payload) - - if response.ok: - result = wait_to_complete_state_transition(module, account_api, response.json) - module.exit_json(changed=True, metadata=result) - - module.fail_json(msg='Error restoring backup [{0}: {1}]'.format(response.status_code, response.json)) - - -state_strategy = { - 'present': present_strategy, - 'absent': absent_strategy, - 'exported': exported_strategy, - 'restored': restored_strategy, -} - - -def core(module): - state = module.params['state'] - backup_id = module.params['id'] - - account_api = Scaleway(module) - - if backup_id is None: - backup_by_id = None - else: - response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup_id)) - status_code = response.status_code - backup_json = response.json - backup_by_id = None - if status_code == 404: - backup_by_id = None - elif response.ok: - backup_by_id = backup_json - else: - module.fail_json(msg='Error getting backup [{0}: {1}]'.format(status_code, response.json['message'])) - - state_strategy[state](module, account_api, backup_by_id) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present', 'exported', 'restored']), - region=dict(required=True, choices=SCALEWAY_REGIONS), - id=dict(), - name=dict(type='str'), - database_name=dict(required=False), - instance_id=dict(required=False), - expires_at=dict(), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - wait_sleep_time=dict(type='int', default=3), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_together=[ - ['database_name', 'instance_id'], - ], - required_if=[ - ['state', 'present', ['name', 'database_name', 'instance_id']], - ['state', 'absent', ['id']], - ['state', 'exported', ['id']], - ['state', 'restored', ['id', 'database_name', 'instance_id']], - ], - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_image_info.py b/plugins/modules/cloud/scaleway/scaleway_image_info.py deleted file mode 100644 index 609ba3d1e8..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_image_info.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_image_info -short_description: Gather information about the Scaleway images available. -description: - - Gather information about the Scaleway images available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@sieben)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway compute zone - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway images information - community.general.scaleway_image_info: - region: par1 - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_image_info }}" -''' - -RETURN = r''' ---- -scaleway_image_info: - description: - - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." - returned: success - type: list - elements: dict - sample: - "scaleway_image_info": [ - { - "arch": "x86_64", - "creation_date": "2018-07-17T16:18:49.276456+00:00", - "default_bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": false, - "dtb": "", - "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.9.93 rev1" - }, - "extra_volumes": [], - "from_server": null, - "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0", - "modification_date": "2018-07-17T16:42:06.319315+00:00", - "name": "Debian Stretch", - "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", - "public": true, - "root_volume": { - "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd", - "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18", - "size": 25000000000, - "volume_type": "l_ssd" - }, - "state": "available" - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION) - - -class ScalewayImageInfo(Scaleway): - - def __init__(self, module): - super(ScalewayImageInfo, self).__init__(module) - self.name = 'images' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_image_info=ScalewayImageInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_ip.py b/plugins/modules/cloud/scaleway/scaleway_ip.py deleted file mode 100644 index 135da120cf..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_ip.py +++ /dev/null @@ -1,262 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway IP management module -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_ip -short_description: Scaleway IP management module -author: Remy Leone (@sieben) -description: - - This module manages IP on Scaleway account - U(https://developer.scaleway.com) -extends_documentation_fragment: -- community.general.scaleway - - -options: - state: - type: str - description: - - Indicate desired state of the IP. - default: present - choices: - - present - - absent - - organization: - type: str - description: - - Scaleway organization identifier - required: true - - region: - type: str - description: - - Scaleway region to use (for example par1). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 - - id: - type: str - description: - - id of the Scaleway IP (UUID) - - server: - type: str - description: - - id of the server you want to attach an IP to. - - To unattach an IP don't specify this option - - reverse: - type: str - description: - - Reverse to assign to the IP -''' - -EXAMPLES = ''' -- name: Create an IP - community.general.scaleway_ip: - organization: '{{ scw_org }}' - state: present - region: par1 - register: ip_creation_task - -- name: Make sure IP deleted - community.general.scaleway_ip: - id: '{{ ip_creation_task.scaleway_ip.id }}' - state: absent - region: par1 -''' - -RETURN = ''' -data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { - "ips": [ - { - "organization": "951df375-e094-4d26-97c1-ba548eeb9c42", - "reverse": null, - "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477", - "server": { - "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1", - "name": "ansible_tuto-1" - }, - "address": "212.47.232.136" - } - ] - } -''' - -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway -from ansible.module_utils.basic import AnsibleModule - - -def ip_attributes_should_be_changed(api, target_ip, wished_ip): - patch_payload = {} - - if target_ip["reverse"] != wished_ip["reverse"]: - patch_payload["reverse"] = wished_ip["reverse"] - - # IP is assigned to a server - if target_ip["server"] is None and wished_ip["server"]: - patch_payload["server"] = wished_ip["server"] - - # IP is unassigned to a server - try: - if target_ip["server"]["id"] and wished_ip["server"] is None: - patch_payload["server"] = wished_ip["server"] - except (TypeError, KeyError): - pass - - # IP is migrated between 2 different servers - try: - if target_ip["server"]["id"] != wished_ip["server"]: - patch_payload["server"] = wished_ip["server"] - except (TypeError, KeyError): - pass - - return patch_payload - - -def payload_from_wished_ip(wished_ip): - return dict( - (k, v) - for k, v in wished_ip.items() - if k != 'id' and v is not None - ) - - -def present_strategy(api, wished_ip): - changed = False - - response = api.get('ips') - if not response.ok: - api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format( - response.status_code, response.json['message'])) - - ips_list = response.json["ips"] - ip_lookup = dict((ip["id"], ip) - for ip in ips_list) - - if wished_ip["id"] not in ip_lookup.keys(): - changed = True - if api.module.check_mode: - return changed, {"status": "An IP would be created."} - - # Create IP - creation_response = api.post('/ips', - data=payload_from_wished_ip(wished_ip)) - - if not creation_response.ok: - msg = "Error during ip creation: %s: '%s' (%s)" % (creation_response.info['msg'], - creation_response.json['message'], - creation_response.json) - api.module.fail_json(msg=msg) - return changed, creation_response.json["ip"] - - target_ip = ip_lookup[wished_ip["id"]] - patch_payload = ip_attributes_should_be_changed(api=api, target_ip=target_ip, wished_ip=wished_ip) - - if not patch_payload: - return changed, target_ip - - changed = True - if api.module.check_mode: - return changed, {"status": "IP attributes would be changed."} - - ip_patch_response = api.patch(path="ips/%s" % target_ip["id"], - data=patch_payload) - - if not ip_patch_response.ok: - api.module.fail_json(msg='Error during IP attributes update: [{0}: {1}]'.format( - ip_patch_response.status_code, ip_patch_response.json['message'])) - - return changed, ip_patch_response.json["ip"] - - -def absent_strategy(api, wished_ip): - response = api.get('ips') - changed = False - - status_code = response.status_code - ips_json = response.json - ips_list = ips_json["ips"] - - if not response.ok: - api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format( - status_code, response.json['message'])) - - ip_lookup = dict((ip["id"], ip) - for ip in ips_list) - if wished_ip["id"] not in ip_lookup.keys(): - return changed, {} - - changed = True - if api.module.check_mode: - return changed, {"status": "IP would be destroyed"} - - response = api.delete('/ips/' + wished_ip["id"]) - if not response.ok: - api.module.fail_json(msg='Error deleting IP [{0}: {1}]'.format( - response.status_code, response.json)) - - return changed, response.json - - -def core(module): - wished_ip = { - "organization": module.params['organization'], - "reverse": module.params["reverse"], - "id": module.params["id"], - "server": module.params["server"] - } - - region = module.params["region"] - module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - api = Scaleway(module=module) - if module.params["state"] == "absent": - changed, summary = absent_strategy(api=api, wished_ip=wished_ip) - else: - changed, summary = present_strategy(api=api, wished_ip=wished_ip) - module.exit_json(changed=changed, scaleway_ip=summary) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present']), - organization=dict(required=True), - server=dict(), - reverse=dict(), - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - id=dict() - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_ip_info.py b/plugins/modules/cloud/scaleway/scaleway_ip_info.py deleted file mode 100644 index e2e49557cc..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_ip_info.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_ip_info -short_description: Gather information about the Scaleway ips available. -description: - - Gather information about the Scaleway ips available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@sieben)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway region to use (for example C(par1)). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway ips information - community.general.scaleway_ip_info: - region: par1 - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_ip_info }}" -''' - -RETURN = r''' ---- -scaleway_ip_info: - description: - - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." - returned: success - type: list - elements: dict - sample: - "scaleway_ip_info": [ - { - "address": "163.172.170.243", - "id": "ea081794-a581-8899-8451-386ddaf0a451", - "organization": "3f709602-5e6c-4619-b80c-e324324324af", - "reverse": null, - "server": { - "id": "12f19bc7-109c-4517-954c-e6b3d0311363", - "name": "scw-e0d158" - } - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - ScalewayException, - scaleway_argument_spec, - SCALEWAY_LOCATION, -) - - -class ScalewayIpInfo(Scaleway): - - def __init__(self, module): - super(ScalewayIpInfo, self).__init__(module) - self.name = 'ips' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_ip_info=ScalewayIpInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_lb.py b/plugins/modules/cloud/scaleway/scaleway_lb.py deleted file mode 100644 index 9761500ab9..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_lb.py +++ /dev/null @@ -1,358 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway Load-balancer management module -# -# Copyright (C) 2018 Online SAS. -# https://www.scaleway.com -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_lb -short_description: Scaleway load-balancer management module -author: Remy Leone (@sieben) -description: - - "This module manages load-balancers on Scaleway." -extends_documentation_fragment: -- community.general.scaleway - - -options: - - name: - type: str - description: - - Name of the load-balancer - required: true - - description: - type: str - description: - - Description of the load-balancer - required: true - - organization_id: - type: str - description: - - Organization identifier - required: true - - state: - type: str - description: - - Indicate desired state of the instance. - default: present - choices: - - present - - absent - - region: - type: str - description: - - Scaleway zone - required: true - choices: - - nl-ams - - fr-par - - pl-waw - - tags: - type: list - elements: str - description: - - List of tags to apply to the load-balancer - - wait: - description: - - Wait for the load-balancer to reach its desired state before returning. - type: bool - default: 'no' - - wait_timeout: - type: int - description: - - Time to wait for the load-balancer to reach the expected state - required: false - default: 300 - - wait_sleep_time: - type: int - description: - - Time to wait before every attempt to check the state of the load-balancer - required: false - default: 3 -''' - -EXAMPLES = ''' -- name: Create a load-balancer - community.general.scaleway_lb: - name: foobar - state: present - organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42 - region: fr-par - tags: - - hello - -- name: Delete a load-balancer - community.general.scaleway_lb: - name: foobar - state: absent - organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42 - region: fr-par -''' - -RETURNS = ''' -{ - "scaleway_lb": { - "backend_count": 0, - "frontend_count": 0, - "description": "Description of my load-balancer", - "id": "00000000-0000-0000-0000-000000000000", - "instances": [ - { - "id": "00000000-0000-0000-0000-000000000000", - "ip_address": "10.0.0.1", - "region": "fr-par", - "status": "ready" - }, - { - "id": "00000000-0000-0000-0000-000000000000", - "ip_address": "10.0.0.2", - "region": "fr-par", - "status": "ready" - } - ], - "ip": [ - { - "id": "00000000-0000-0000-0000-000000000000", - "ip_address": "192.168.0.1", - "lb_id": "00000000-0000-0000-0000-000000000000", - "region": "fr-par", - "organization_id": "00000000-0000-0000-0000-000000000000", - "reverse": "" - } - ], - "name": "lb_ansible_test", - "organization_id": "00000000-0000-0000-0000-000000000000", - "region": "fr-par", - "status": "ready", - "tags": [ - "first_tag", - "second_tag" - ] - } -} -''' - -import datetime -import time -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_REGIONS, SCALEWAY_ENDPOINT, scaleway_argument_spec, Scaleway - -STABLE_STATES = ( - "ready", - "absent" -) - -MUTABLE_ATTRIBUTES = ( - "name", - "description" -) - - -def payload_from_wished_lb(wished_lb): - return { - "organization_id": wished_lb["organization_id"], - "name": wished_lb["name"], - "tags": wished_lb["tags"], - "description": wished_lb["description"] - } - - -def fetch_state(api, lb): - api.module.debug("fetch_state of load-balancer: %s" % lb["id"]) - response = api.get(path=api.api_path + "/%s" % lb["id"]) - - if response.status_code == 404: - return "absent" - - if not response.ok: - msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json) - api.module.fail_json(msg=msg) - - try: - api.module.debug("Load-balancer %s in state: %s" % (lb["id"], response.json["status"])) - return response.json["status"] - except KeyError: - api.module.fail_json(msg="Could not fetch state in %s" % response.json) - - -def wait_to_complete_state_transition(api, lb, force_wait=False): - wait = api.module.params["wait"] - if not (wait or force_wait): - return - wait_timeout = api.module.params["wait_timeout"] - wait_sleep_time = api.module.params["wait_sleep_time"] - - start = datetime.datetime.utcnow() - end = start + datetime.timedelta(seconds=wait_timeout) - while datetime.datetime.utcnow() < end: - api.module.debug("We are going to wait for the load-balancer to finish its transition") - state = fetch_state(api, lb) - if state in STABLE_STATES: - api.module.debug("It seems that the load-balancer is not in transition anymore.") - api.module.debug("load-balancer in state: %s" % fetch_state(api, lb)) - break - time.sleep(wait_sleep_time) - else: - api.module.fail_json(msg="Server takes too long to finish its transition") - - -def lb_attributes_should_be_changed(target_lb, wished_lb): - diff = dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr]) - - if diff: - return dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES) - else: - return diff - - -def present_strategy(api, wished_lb): - changed = False - - response = api.get(path=api.api_path) - if not response.ok: - api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format( - response.status_code, response.json['message'])) - - lbs_list = response.json["lbs"] - lb_lookup = dict((lb["name"], lb) - for lb in lbs_list) - - if wished_lb["name"] not in lb_lookup.keys(): - changed = True - if api.module.check_mode: - return changed, {"status": "A load-balancer would be created."} - - # Create Load-balancer - api.warn(payload_from_wished_lb(wished_lb)) - creation_response = api.post(path=api.api_path, - data=payload_from_wished_lb(wished_lb)) - - if not creation_response.ok: - msg = "Error during lb creation: %s: '%s' (%s)" % (creation_response.info['msg'], - creation_response.json['message'], - creation_response.json) - api.module.fail_json(msg=msg) - - wait_to_complete_state_transition(api=api, lb=creation_response.json) - response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) - return changed, response.json - - target_lb = lb_lookup[wished_lb["name"]] - patch_payload = lb_attributes_should_be_changed(target_lb=target_lb, - wished_lb=wished_lb) - - if not patch_payload: - return changed, target_lb - - changed = True - if api.module.check_mode: - return changed, {"status": "Load-balancer attributes would be changed."} - - lb_patch_response = api.put(path=api.api_path + "/%s" % target_lb["id"], - data=patch_payload) - - if not lb_patch_response.ok: - api.module.fail_json(msg='Error during load-balancer attributes update: [{0}: {1}]'.format( - lb_patch_response.status_code, lb_patch_response.json['message'])) - - wait_to_complete_state_transition(api=api, lb=target_lb) - return changed, lb_patch_response.json - - -def absent_strategy(api, wished_lb): - response = api.get(path=api.api_path) - changed = False - - status_code = response.status_code - lbs_json = response.json - lbs_list = lbs_json["lbs"] - - if not response.ok: - api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format( - status_code, response.json['message'])) - - lb_lookup = dict((lb["name"], lb) - for lb in lbs_list) - if wished_lb["name"] not in lb_lookup.keys(): - return changed, {} - - target_lb = lb_lookup[wished_lb["name"]] - changed = True - if api.module.check_mode: - return changed, {"status": "Load-balancer would be destroyed"} - - wait_to_complete_state_transition(api=api, lb=target_lb, force_wait=True) - response = api.delete(path=api.api_path + "/%s" % target_lb["id"]) - if not response.ok: - api.module.fail_json(msg='Error deleting load-balancer [{0}: {1}]'.format( - response.status_code, response.json)) - - wait_to_complete_state_transition(api=api, lb=target_lb) - return changed, response.json - - -state_strategy = { - "present": present_strategy, - "absent": absent_strategy -} - - -def core(module): - region = module.params["region"] - wished_load_balancer = { - "state": module.params["state"], - "name": module.params["name"], - "description": module.params["description"], - "tags": module.params["tags"], - "organization_id": module.params["organization_id"] - } - module.params['api_url'] = SCALEWAY_ENDPOINT - api = Scaleway(module=module) - api.api_path = "lb/v1/regions/%s/lbs" % region - - changed, summary = state_strategy[wished_load_balancer["state"]](api=api, - wished_lb=wished_load_balancer) - module.exit_json(changed=changed, scaleway_lb=summary) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - name=dict(required=True), - description=dict(required=True), - region=dict(required=True, choices=SCALEWAY_REGIONS), - state=dict(choices=list(state_strategy.keys()), default='present'), - tags=dict(type="list", elements="str", default=[]), - organization_id=dict(required=True), - wait=dict(type="bool", default=False), - wait_timeout=dict(type="int", default=300), - wait_sleep_time=dict(type="int", default=3), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_organization_info.py b/plugins/modules/cloud/scaleway/scaleway_organization_info.py deleted file mode 100644 index f530dcb81a..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_organization_info.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_organization_info -short_description: Gather information about the Scaleway organizations available. -description: - - Gather information about the Scaleway organizations available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@sieben)" -options: - api_url: - description: - - Scaleway API URL - default: 'https://account.scaleway.com' - aliases: ['base_url'] -extends_documentation_fragment: -- community.general.scaleway - -''' - -EXAMPLES = r''' -- name: Gather Scaleway organizations information - community.general.scaleway_organization_info: - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_organization_info }}" -''' - -RETURN = r''' ---- -scaleway_organization_info: - description: Response from Scaleway API - returned: success - type: complex - sample: - "scaleway_organization_info": [ - { - "address_city_name": "Paris", - "address_country_code": "FR", - "address_line1": "42 Rue de l'univers", - "address_line2": null, - "address_postal_code": "75042", - "address_subdivision_code": "FR-75", - "creation_date": "2018-08-06T13:43:28.508575+00:00", - "currency": "EUR", - "customer_class": "individual", - "id": "3f709602-5e6c-4619-b80c-e8432ferewtr", - "locale": "fr_FR", - "modification_date": "2018-08-06T14:56:41.401685+00:00", - "name": "James Bond", - "support_id": "694324", - "support_level": "basic", - "support_pin": "9324", - "users": [], - "vat_number": null, - "warnings": [] - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, ScalewayException, scaleway_argument_spec -) - - -class ScalewayOrganizationInfo(Scaleway): - - def __init__(self, module): - super(ScalewayOrganizationInfo, self).__init__(module) - self.name = 'organizations' - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_organization_info=ScalewayOrganizationInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group.py b/plugins/modules/cloud/scaleway/scaleway_security_group.py deleted file mode 100644 index f9faee6104..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_security_group.py +++ /dev/null @@ -1,239 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway Security Group management module -# -# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com). -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_security_group -short_description: Scaleway Security Group management module -author: Antoine Barbare (@abarbare) -description: - - This module manages Security Group on Scaleway account - U(https://developer.scaleway.com). -extends_documentation_fragment: -- community.general.scaleway - - -options: - state: - description: - - Indicate desired state of the Security Group. - type: str - choices: [ absent, present ] - default: present - - organization: - description: - - Organization identifier. - type: str - required: true - - region: - description: - - Scaleway region to use (for example C(par1)). - type: str - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 - - name: - description: - - Name of the Security Group. - type: str - required: true - - description: - description: - - Description of the Security Group. - type: str - - stateful: - description: - - Create a stateful security group which allows established connections in and out. - type: bool - required: true - - inbound_default_policy: - description: - - Default policy for incoming traffic. - type: str - choices: [ accept, drop ] - - outbound_default_policy: - description: - - Default policy for outcoming traffic. - type: str - choices: [ accept, drop ] - - organization_default: - description: - - Create security group to be the default one. - type: bool -''' - -EXAMPLES = ''' -- name: Create a Security Group - community.general.scaleway_security_group: - state: present - region: par1 - name: security_group - description: "my security group description" - organization: "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9" - stateful: false - inbound_default_policy: accept - outbound_default_policy: accept - organization_default: false - register: security_group_creation_task -''' - -RETURN = ''' -data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { - "scaleway_security_group": { - "description": "my security group description", - "enable_default_security": true, - "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae", - "inbound_default_policy": "accept", - "name": "security_group", - "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9", - "organization_default": false, - "outbound_default_policy": "accept", - "servers": [], - "stateful": false - } - } -''' - -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway -from ansible.module_utils.basic import AnsibleModule -from uuid import uuid4 - - -def payload_from_security_group(security_group): - return dict( - (k, v) - for k, v in security_group.items() - if k != 'id' and v is not None - ) - - -def present_strategy(api, security_group): - ret = {'changed': False} - - response = api.get('security_groups') - if not response.ok: - api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) - - security_group_lookup = dict((sg['name'], sg) - for sg in response.json['security_groups']) - - if security_group['name'] not in security_group_lookup.keys(): - ret['changed'] = True - if api.module.check_mode: - # Help user when check mode is enabled by defining id key - ret['scaleway_security_group'] = {'id': str(uuid4())} - return ret - - # Create Security Group - response = api.post('/security_groups', - data=payload_from_security_group(security_group)) - - if not response.ok: - msg = 'Error during security group creation: "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json) - api.module.fail_json(msg=msg) - ret['scaleway_security_group'] = response.json['security_group'] - - else: - ret['scaleway_security_group'] = security_group_lookup[security_group['name']] - - return ret - - -def absent_strategy(api, security_group): - response = api.get('security_groups') - ret = {'changed': False} - - if not response.ok: - api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) - - security_group_lookup = dict((sg['name'], sg) - for sg in response.json['security_groups']) - if security_group['name'] not in security_group_lookup.keys(): - return ret - - ret['changed'] = True - if api.module.check_mode: - return ret - - response = api.delete('/security_groups/' + security_group_lookup[security_group['name']]['id']) - if not response.ok: - api.module.fail_json(msg='Error deleting security group "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) - - return ret - - -def core(module): - security_group = { - 'organization': module.params['organization'], - 'name': module.params['name'], - 'description': module.params['description'], - 'stateful': module.params['stateful'], - 'inbound_default_policy': module.params['inbound_default_policy'], - 'outbound_default_policy': module.params['outbound_default_policy'], - 'organization_default': module.params['organization_default'], - } - - region = module.params['region'] - module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint'] - - api = Scaleway(module=module) - if module.params['state'] == 'present': - summary = present_strategy(api=api, security_group=security_group) - else: - summary = absent_strategy(api=api, security_group=security_group) - module.exit_json(**summary) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - organization=dict(type='str', required=True), - name=dict(type='str', required=True), - description=dict(type='str'), - region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())), - stateful=dict(type='bool', required=True), - inbound_default_policy=dict(type='str', choices=['accept', 'drop']), - outbound_default_policy=dict(type='str', choices=['accept', 'drop']), - organization_default=dict(type='bool'), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_if=[['stateful', True, ['inbound_default_policy', 'outbound_default_policy']]] - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_info.py b/plugins/modules/cloud/scaleway/scaleway_security_group_info.py deleted file mode 100644 index 1f5af7da53..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_security_group_info.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_security_group_info -short_description: Gather information about the Scaleway security groups available. -description: - - Gather information about the Scaleway security groups available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@sieben)" -options: - region: - type: str - description: - - Scaleway region to use (for example C(par1)). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -extends_documentation_fragment: -- community.general.scaleway - -''' - -EXAMPLES = r''' -- name: Gather Scaleway security groups information - community.general.scaleway_security_group_info: - region: par1 - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_security_group_info }}" -''' - -RETURN = r''' ---- -scaleway_security_group_info: - description: - - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." - returned: success - type: list - elements: dict - sample: - "scaleway_security_group_info": [ - { - "description": "test-ams", - "enable_default_security": true, - "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51", - "name": "test-ams", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "organization_default": false, - "servers": [ - { - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "name": "scw-e0d158" - } - ] - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - ScalewayException, - scaleway_argument_spec, - SCALEWAY_LOCATION, -) - - -class ScalewaySecurityGroupInfo(Scaleway): - - def __init__(self, module): - super(ScalewaySecurityGroupInfo, self).__init__(module) - self.name = 'security_groups' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_security_group_info=ScalewaySecurityGroupInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py deleted file mode 100644 index 9f95921202..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py +++ /dev/null @@ -1,276 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway Security Group Rule management module -# -# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com). -# -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_security_group_rule -short_description: Scaleway Security Group Rule management module -author: Antoine Barbare (@abarbare) -description: - - This module manages Security Group Rule on Scaleway account - U(https://developer.scaleway.com) -extends_documentation_fragment: - - community.general.scaleway -requirements: - - ipaddress - -options: - state: - type: str - description: - - Indicate desired state of the Security Group Rule. - default: present - choices: - - present - - absent - - region: - type: str - description: - - Scaleway region to use (for example C(par1)). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 - - protocol: - type: str - description: - - Network protocol to use - choices: - - TCP - - UDP - - ICMP - required: true - - port: - description: - - Port related to the rule, null value for all the ports - required: true - type: int - - ip_range: - type: str - description: - - IPV4 CIDR notation to apply to the rule - default: 0.0.0.0/0 - - direction: - type: str - description: - - Rule direction - choices: - - inbound - - outbound - required: true - - action: - type: str - description: - - Rule action - choices: - - accept - - drop - required: true - - security_group: - type: str - description: - - Security Group unique identifier - required: true -''' - -EXAMPLES = ''' - - name: Create a Security Group Rule - community.general.scaleway_security_group_rule: - state: present - region: par1 - protocol: TCP - port: 80 - ip_range: 0.0.0.0/0 - direction: inbound - action: accept - security_group: b57210ee-1281-4820-a6db-329f78596ecb - register: security_group_rule_creation_task -''' - -RETURN = ''' -data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { - "scaleway_security_group_rule": { - "direction": "inbound", - "protocol": "TCP", - "ip_range": "0.0.0.0/0", - "dest_port_from": 80, - "action": "accept", - "position": 2, - "dest_port_to": null, - "editable": null, - "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9" - } - } -''' - -import traceback - -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -try: - from ipaddress import ip_network -except ImportError: - IPADDRESS_IMP_ERR = traceback.format_exc() - HAS_IPADDRESS = False -else: - HAS_IPADDRESS = True - - -def get_sgr_from_api(security_group_rules, security_group_rule): - """ Check if a security_group_rule specs are present in security_group_rules - Return None if no rules match the specs - Return the rule if found - """ - for sgr in security_group_rules: - if (sgr['ip_range'] == security_group_rule['ip_range'] and sgr['dest_port_from'] == security_group_rule['dest_port_from'] and - sgr['direction'] == security_group_rule['direction'] and sgr['action'] == security_group_rule['action'] and - sgr['protocol'] == security_group_rule['protocol']): - return sgr - - return None - - -def present_strategy(api, security_group_id, security_group_rule): - ret = {'changed': False} - - response = api.get('security_groups/%s/rules' % security_group_id) - if not response.ok: - api.module.fail_json( - msg='Error getting security group rules "%s": "%s" (%s)' % - (response.info['msg'], response.json['message'], response.json)) - - existing_rule = get_sgr_from_api( - response.json['rules'], security_group_rule) - - if not existing_rule: - ret['changed'] = True - if api.module.check_mode: - return ret - - # Create Security Group Rule - response = api.post('/security_groups/%s/rules' % security_group_id, - data=payload_from_object(security_group_rule)) - - if not response.ok: - api.module.fail_json( - msg='Error during security group rule creation: "%s": "%s" (%s)' % - (response.info['msg'], response.json['message'], response.json)) - ret['scaleway_security_group_rule'] = response.json['rule'] - - else: - ret['scaleway_security_group_rule'] = existing_rule - - return ret - - -def absent_strategy(api, security_group_id, security_group_rule): - ret = {'changed': False} - - response = api.get('security_groups/%s/rules' % security_group_id) - if not response.ok: - api.module.fail_json( - msg='Error getting security group rules "%s": "%s" (%s)' % - (response.info['msg'], response.json['message'], response.json)) - - existing_rule = get_sgr_from_api( - response.json['rules'], security_group_rule) - - if not existing_rule: - return ret - - ret['changed'] = True - if api.module.check_mode: - return ret - - response = api.delete( - '/security_groups/%s/rules/%s' % - (security_group_id, existing_rule['id'])) - if not response.ok: - api.module.fail_json( - msg='Error deleting security group rule "%s": "%s" (%s)' % - (response.info['msg'], response.json['message'], response.json)) - - return ret - - -def core(module): - api = Scaleway(module=module) - - security_group_rule = { - 'protocol': module.params['protocol'], - 'dest_port_from': module.params['port'], - 'ip_range': module.params['ip_range'], - 'direction': module.params['direction'], - 'action': module.params['action'], - } - - region = module.params['region'] - module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint'] - - if module.params['state'] == 'present': - summary = present_strategy( - api=api, - security_group_id=module.params['security_group'], - security_group_rule=security_group_rule) - else: - summary = absent_strategy( - api=api, - security_group_id=module.params['security_group'], - security_group_rule=security_group_rule) - module.exit_json(**summary) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update( - state=dict(type='str', default='present', choices=['absent', 'present']), - region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())), - protocol=dict(type='str', required=True, choices=['TCP', 'UDP', 'ICMP']), - port=dict(type='int', required=True), - ip_range=dict(type='str', default='0.0.0.0/0'), - direction=dict(type='str', required=True, choices=['inbound', 'outbound']), - action=dict(type='str', required=True, choices=['accept', 'drop']), - security_group=dict(type='str', required=True), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - if not HAS_IPADDRESS: - module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_server_info.py b/plugins/modules/cloud/scaleway/scaleway_server_info.py deleted file mode 100644 index 61bd9de41b..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_server_info.py +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_server_info -short_description: Gather information about the Scaleway servers available. -description: - - Gather information about the Scaleway servers available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@sieben)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway region to use (for example C(par1)). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway servers information - community.general.scaleway_server_info: - region: par1 - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_server_info }}" -''' - -RETURN = r''' ---- -scaleway_server_info: - description: - - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." - returned: success - type: list - elements: dict - sample: - "scaleway_server_info": [ - { - "arch": "x86_64", - "boot_type": "local", - "bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": true, - "dtb": "", - "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.4.127 rev1" - }, - "commercial_type": "START1-XS", - "creation_date": "2018-08-14T21:36:56.271545+00:00", - "dynamic_ip_required": false, - "enable_ipv6": false, - "extra_networks": [], - "hostname": "scw-e0d256", - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "image": { - "arch": "x86_64", - "creation_date": "2018-04-26T12:42:21.619844+00:00", - "default_bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": true, - "dtb": "", - "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.4.127 rev1" - }, - "extra_volumes": [], - "from_server": null, - "id": "67375eb1-f14d-4f02-bb42-6119cecbde51", - "modification_date": "2018-04-26T12:49:07.573004+00:00", - "name": "Ubuntu Xenial", - "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", - "public": true, - "root_volume": { - "id": "020b8d61-3867-4a0e-84a4-445c5393e05d", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", - "size": 25000000000, - "volume_type": "l_ssd" - }, - "state": "available" - }, - "ipv6": null, - "location": { - "cluster_id": "5", - "hypervisor_id": "412", - "node_id": "2", - "platform_id": "13", - "zone_id": "par1" - }, - "maintenances": [], - "modification_date": "2018-08-14T21:37:28.630882+00:00", - "name": "scw-e0d256", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "private_ip": "10.14.222.131", - "protected": false, - "public_ip": { - "address": "163.172.170.197", - "dynamic": false, - "id": "ea081794-a581-4495-8451-386ddaf0a451" - }, - "security_group": { - "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e", - "name": "Default security group" - }, - "state": "running", - "state_detail": "booted", - "tags": [], - "volumes": { - "0": { - "creation_date": "2018-08-14T21:36:56.271545+00:00", - "export_uri": "device://dev/vda", - "id": "68386fae-4f55-4fbf-aabb-953036a85872", - "modification_date": "2018-08-14T21:36:56.271545+00:00", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "server": { - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "name": "scw-e0d256" - }, - "size": 25000000000, - "state": "available", - "volume_type": "l_ssd" - } - } - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - ScalewayException, - scaleway_argument_spec, - SCALEWAY_LOCATION, -) - - -class ScalewayServerInfo(Scaleway): - - def __init__(self, module): - super(ScalewayServerInfo, self).__init__(module) - self.name = 'servers' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_server_info=ScalewayServerInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py b/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py deleted file mode 100644 index 95ec04d16f..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_snapshot_info -short_description: Gather information about the Scaleway snapshots available. -description: - - Gather information about the Scaleway snapshot available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@sieben)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway region to use (for example C(par1)). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway snapshots information - community.general.scaleway_snapshot_info: - region: par1 - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_snapshot_info }}" -''' - -RETURN = r''' ---- -scaleway_snapshot_info: - description: - - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." - returned: success - type: list - elements: dict - sample: - "scaleway_snapshot_info": [ - { - "base_volume": { - "id": "68386fae-4f55-4fbf-aabb-953036a85872", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42" - }, - "creation_date": "2018-08-14T22:34:35.299461+00:00", - "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2", - "modification_date": "2018-08-14T22:34:54.520560+00:00", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "size": 25000000000, - "state": "available", - "volume_type": "l_ssd" - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - ScalewayException, - scaleway_argument_spec, - SCALEWAY_LOCATION -) - - -class ScalewaySnapshotInfo(Scaleway): - - def __init__(self, module): - super(ScalewaySnapshotInfo, self).__init__(module) - self.name = 'snapshots' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_snapshot_info=ScalewaySnapshotInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_sshkey.py b/plugins/modules/cloud/scaleway/scaleway_sshkey.py deleted file mode 100644 index bc15cefb20..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_sshkey.py +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway SSH keys management module -# -# Copyright (C) 2018 Online SAS. -# https://www.scaleway.com -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_sshkey -short_description: Scaleway SSH keys management module -author: Remy Leone (@sieben) -description: - - This module manages SSH keys on Scaleway account - U(https://developer.scaleway.com) -extends_documentation_fragment: -- community.general.scaleway - - -options: - state: - type: str - description: - - Indicate desired state of the SSH key. - default: present - choices: - - present - - absent - ssh_pub_key: - type: str - description: - - The public SSH key as a string to add. - required: true - api_url: - type: str - description: - - Scaleway API URL - default: 'https://account.scaleway.com' - aliases: ['base_url'] -''' - -EXAMPLES = ''' -- name: "Add SSH key" - community.general.scaleway_sshkey: - ssh_pub_key: "ssh-rsa AAAA..." - state: "present" - -- name: "Delete SSH key" - community.general.scaleway_sshkey: - ssh_pub_key: "ssh-rsa AAAA..." - state: "absent" - -- name: "Add SSH key with explicit token" - community.general.scaleway_sshkey: - ssh_pub_key: "ssh-rsa AAAA..." - state: "present" - oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c" -''' - -RETURN = ''' -data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { - "ssh_public_keys": [ - {"key": "ssh-rsa AAAA...."} - ] - } -''' - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible_collections.community.general.plugins.module_utils.scaleway import scaleway_argument_spec, Scaleway - - -def extract_present_sshkeys(raw_organization_dict): - ssh_key_list = raw_organization_dict["organizations"][0]["users"][0]["ssh_public_keys"] - ssh_key_lookup = [ssh_key["key"] for ssh_key in ssh_key_list] - return ssh_key_lookup - - -def extract_user_id(raw_organization_dict): - return raw_organization_dict["organizations"][0]["users"][0]["id"] - - -def sshkey_user_patch(ssh_lookup): - ssh_list = {"ssh_public_keys": [{"key": key} - for key in ssh_lookup]} - return ssh_list - - -def core(module): - ssh_pub_key = module.params['ssh_pub_key'] - state = module.params["state"] - account_api = Scaleway(module) - response = account_api.get('organizations') - - status_code = response.status_code - organization_json = response.json - - if not response.ok: - module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format( - status_code, response.json['message'])) - - user_id = extract_user_id(organization_json) - present_sshkeys = [] - try: - present_sshkeys = extract_present_sshkeys(organization_json) - except (KeyError, IndexError) as e: - module.fail_json(changed=False, data="Error while extracting present SSH keys from API") - - if state in ('present',): - if ssh_pub_key in present_sshkeys: - module.exit_json(changed=False) - - # If key not found create it! - if module.check_mode: - module.exit_json(changed=True) - - present_sshkeys.append(ssh_pub_key) - payload = sshkey_user_patch(present_sshkeys) - - response = account_api.patch('/users/%s' % user_id, data=payload) - - if response.ok: - module.exit_json(changed=True, data=response.json) - - module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format( - response.status_code, response.json)) - - elif state in ('absent',): - if ssh_pub_key not in present_sshkeys: - module.exit_json(changed=False) - - if module.check_mode: - module.exit_json(changed=True) - - present_sshkeys.remove(ssh_pub_key) - payload = sshkey_user_patch(present_sshkeys) - - response = account_api.patch('/users/%s' % user_id, data=payload) - - if response.ok: - module.exit_json(changed=True, data=response.json) - - module.fail_json(msg='Error deleting ssh key [{0}: {1}]'.format( - response.status_code, response.json)) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present']), - ssh_pub_key=dict(required=True), - api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_user_data.py b/plugins/modules/cloud/scaleway/scaleway_user_data.py deleted file mode 100644 index d51d3e174d..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_user_data.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway user data management module -# -# Copyright (C) 2018 Online SAS. -# https://www.scaleway.com -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_user_data -short_description: Scaleway user_data management module -author: Remy Leone (@sieben) -description: - - "This module manages user_data on compute instances on Scaleway." - - "It can be used to configure cloud-init for instance" -extends_documentation_fragment: -- community.general.scaleway - - -options: - - server_id: - type: str - description: - - Scaleway Compute instance ID of the server - required: true - - user_data: - type: dict - description: - - User defined data. Typically used with `cloud-init`. - - Pass your cloud-init script here as a string - required: false - - region: - type: str - description: - - Scaleway compute zone - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = ''' -- name: Update the cloud-init - community.general.scaleway_user_data: - server_id: '5a33b4ab-57dd-4eb6-8b0a-d95eb63492ce' - region: ams1 - user_data: - cloud-init: 'final_message: "Hello World!"' -''' - -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway - - -def patch_user_data(compute_api, server_id, key, value): - compute_api.module.debug("Starting patching user_data attributes") - - path = "servers/%s/user_data/%s" % (server_id, key) - response = compute_api.patch(path=path, data=value, headers={"Content-type": "text/plain"}) - if not response.ok: - msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body) - compute_api.module.fail_json(msg=msg) - - return response - - -def delete_user_data(compute_api, server_id, key): - compute_api.module.debug("Starting deleting user_data attributes: %s" % key) - - response = compute_api.delete(path="servers/%s/user_data/%s" % (server_id, key)) - - if not response.ok: - msg = 'Error during user_data deleting: (%s) %s' % response.status_code, response.body - compute_api.module.fail_json(msg=msg) - - return response - - -def get_user_data(compute_api, server_id, key): - compute_api.module.debug("Starting patching user_data attributes") - - path = "servers/%s/user_data/%s" % (server_id, key) - response = compute_api.get(path=path) - if not response.ok: - msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body) - compute_api.module.fail_json(msg=msg) - - return response.json - - -def core(module): - region = module.params["region"] - server_id = module.params["server_id"] - user_data = module.params["user_data"] - changed = False - - module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - compute_api = Scaleway(module=module) - - user_data_list = compute_api.get(path="servers/%s/user_data" % server_id) - if not user_data_list.ok: - msg = 'Error during user_data fetching: %s %s' % user_data_list.status_code, user_data_list.body - compute_api.module.fail_json(msg=msg) - - present_user_data_keys = user_data_list.json["user_data"] - present_user_data = dict( - (key, get_user_data(compute_api=compute_api, server_id=server_id, key=key)) - for key in present_user_data_keys - ) - - if present_user_data == user_data: - module.exit_json(changed=changed, msg=user_data_list.json) - - # First we remove keys that are not defined in the wished user_data - for key in present_user_data: - if key not in user_data: - - changed = True - if compute_api.module.check_mode: - module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id}) - - delete_user_data(compute_api=compute_api, server_id=server_id, key=key) - - # Then we patch keys that are different - for key, value in user_data.items(): - if key not in present_user_data or user_data[key] != present_user_data[key]: - - changed = True - if compute_api.module.check_mode: - module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id}) - - patch_user_data(compute_api=compute_api, server_id=server_id, key=key, value=value) - - module.exit_json(changed=changed, msg=user_data) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - user_data=dict(type="dict"), - server_id=dict(required=True), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_volume.py b/plugins/modules/cloud/scaleway/scaleway_volume.py deleted file mode 100644 index a49e23c17d..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_volume.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway volumes management module -# -# Copyright (C) 2018 Henryk Konsek Consulting (hekonsek@gmail.com). -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_volume -short_description: Scaleway volumes management module -author: Henryk Konsek (@hekonsek) -description: - - This module manages volumes on Scaleway account - U(https://developer.scaleway.com) -extends_documentation_fragment: -- community.general.scaleway - - -options: - state: - type: str - description: - - Indicate desired state of the volume. - default: present - choices: - - present - - absent - region: - type: str - description: - - Scaleway region to use (for example par1). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 - name: - type: str - description: - - Name used to identify the volume. - required: true - organization: - type: str - description: - - ScaleWay organization ID to which volume belongs. - size: - type: int - description: - - Size of the volume in bytes. - volume_type: - type: str - description: - - Type of the volume (for example 'l_ssd'). -''' - -EXAMPLES = ''' -- name: Create 10GB volume - community.general.scaleway_volume: - name: my-volume - state: present - region: par1 - organization: "{{ scw_org }}" - "size": 10000000000 - volume_type: l_ssd - register: server_creation_check_task - -- name: Make sure volume deleted - community.general.scaleway_volume: - name: my-volume - state: absent - region: par1 -''' - -RETURN = ''' -data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { - "volume": { - "export_uri": null, - "id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd", - "name": "volume-0-3", - "organization": "000a115d-2852-4b0a-9ce8-47f1134ba95a", - "server": null, - "size": 10000000000, - "volume_type": "l_ssd" - } -} -''' - -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway -from ansible.module_utils.basic import AnsibleModule - - -def core(module): - state = module.params['state'] - name = module.params['name'] - organization = module.params['organization'] - size = module.params['size'] - volume_type = module.params['volume_type'] - - account_api = Scaleway(module) - response = account_api.get('volumes') - status_code = response.status_code - volumes_json = response.json - - if not response.ok: - module.fail_json(msg='Error getting volume [{0}: {1}]'.format( - status_code, response.json['message'])) - - volumeByName = None - for volume in volumes_json['volumes']: - if volume['organization'] == organization and volume['name'] == name: - volumeByName = volume - - if state in ('present',): - if volumeByName is not None: - module.exit_json(changed=False) - - payload = {'name': name, 'organization': organization, 'size': size, 'volume_type': volume_type} - - response = account_api.post('/volumes', payload) - - if response.ok: - module.exit_json(changed=True, data=response.json) - - module.fail_json(msg='Error creating volume [{0}: {1}]'.format( - response.status_code, response.json)) - - elif state in ('absent',): - if volumeByName is None: - module.exit_json(changed=False) - - if module.check_mode: - module.exit_json(changed=True) - - response = account_api.delete('/volumes/' + volumeByName['id']) - if response.status_code == 204: - module.exit_json(changed=True, data=response.json) - - module.fail_json(msg='Error deleting volume [{0}: {1}]'.format( - response.status_code, response.json)) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present']), - name=dict(required=True), - size=dict(type='int'), - organization=dict(), - volume_type=dict(), - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_volume_info.py b/plugins/modules/cloud/scaleway/scaleway_volume_info.py deleted file mode 100644 index 0042146795..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_volume_info.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_volume_info -short_description: Gather information about the Scaleway volumes available. -description: - - Gather information about the Scaleway volumes available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@sieben)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway region to use (for example C(par1)). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway volumes information - community.general.scaleway_volume_info: - region: par1 - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_volume_info }}" -''' - -RETURN = r''' ---- -scaleway_volume_info: - description: - - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." - returned: success - type: list - elements: dict - sample: - "scaleway_volume_info": [ - { - "creation_date": "2018-08-14T20:56:24.949660+00:00", - "export_uri": null, - "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba", - "modification_date": "2018-08-14T20:56:24.949660+00:00", - "name": "test-volume", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "server": null, - "size": 50000000000, - "state": "available", - "volume_type": "l_ssd" - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, ScalewayException, scaleway_argument_spec, - SCALEWAY_LOCATION) - - -class ScalewayVolumeInfo(Scaleway): - - def __init__(self, module): - super(ScalewayVolumeInfo, self).__init__(module) - self.name = 'volumes' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_volume_info=ScalewayVolumeInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/smartos/imgadm.py b/plugins/modules/cloud/smartos/imgadm.py deleted file mode 100644 index 18a67d014a..0000000000 --- a/plugins/modules/cloud/smartos/imgadm.py +++ /dev/null @@ -1,311 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, 2017 Jasper Lievisse Adriaanse -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: imgadm -short_description: Manage SmartOS images -description: - - Manage SmartOS virtual machine images through imgadm(1M) -author: Jasper Lievisse Adriaanse (@jasperla) -options: - force: - required: false - type: bool - description: - - Force a given operation (where supported by imgadm(1M)). - pool: - required: false - default: zones - description: - - zpool to import to or delete images from. - type: str - source: - required: false - description: - - URI for the image source. - type: str - state: - required: true - choices: [ present, absent, deleted, imported, updated, vacuumed ] - description: - - State the object operated on should be in. C(imported) is an alias for - for C(present) and C(deleted) for C(absent). When set to C(vacuumed) - and C(uuid) to C(*), it will remove all unused images. - type: str - - type: - required: false - choices: [ imgapi, docker, dsapi ] - default: imgapi - description: - - Type for image sources. - type: str - - uuid: - required: false - description: - - Image UUID. Can either be a full UUID or C(*) for all images. - type: str - -requirements: - - python >= 2.6 -''' - -EXAMPLES = ''' -- name: Import an image - community.general.imgadm: - uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' - state: imported - -- name: Delete an image - community.general.imgadm: - uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' - state: deleted - -- name: Update all images - community.general.imgadm: - uuid: '*' - state: updated - -- name: Update a single image - community.general.imgadm: - uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' - state: updated - -- name: Add a source - community.general.imgadm: - source: 'https://datasets.project-fifo.net' - state: present - -- name: Add a Docker source - community.general.imgadm: - source: 'https://docker.io' - type: docker - state: present - -- name: Remove a source - community.general.imgadm: - source: 'https://docker.io' - state: absent -''' - -RETURN = ''' -source: - description: Source that is managed. - returned: When not managing an image. - type: str - sample: https://datasets.project-fifo.net -uuid: - description: UUID for an image operated on. - returned: When not managing an image source. - type: str - sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764 -state: - description: State of the target, after execution. - returned: success - type: str - sample: 'present' -''' - -import re - -from ansible.module_utils.basic import AnsibleModule - -# Shortcut for the imgadm(1M) command. While imgadm(1M) supports a -# -E option to return any errors in JSON, the generated JSON does not play well -# with the JSON parsers of Python. The returned message contains '\n' as part of -# the stacktrace, which breaks the parsers. - - -class Imgadm(object): - def __init__(self, module): - self.module = module - self.params = module.params - self.cmd = module.get_bin_path('imgadm', required=True) - self.changed = False - self.uuid = module.params['uuid'] - - # Since there are a number of (natural) aliases, prevent having to look - # them up everytime we operate on `state`. - if self.params['state'] in ['present', 'imported', 'updated']: - self.present = True - else: - self.present = False - - # Perform basic UUID validation upfront. - if self.uuid and self.uuid != '*': - if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE): - module.fail_json(msg='Provided value for uuid option is not a valid UUID.') - - # Helper method to massage stderr - def errmsg(self, stderr): - match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr) - if match: - return match.groups()[0] - else: - return 'Unexpected failure' - - def update_images(self): - if self.uuid == '*': - cmd = '{0} update'.format(self.cmd) - else: - cmd = '{0} update {1}'.format(self.cmd, self.uuid) - - (rc, stdout, stderr) = self.module.run_command(cmd) - - if rc != 0: - self.module.fail_json(msg='Failed to update images: {0}'.format(self.errmsg(stderr))) - - # There is no feedback from imgadm(1M) to determine if anything - # was actually changed. So treat this as an 'always-changes' operation. - # Note that 'imgadm -v' produces unparseable JSON... - self.changed = True - - def manage_sources(self): - force = self.params['force'] - source = self.params['source'] - imgtype = self.params['type'] - - cmd = '{0} sources'.format(self.cmd) - - if force: - cmd += ' -f' - - if self.present: - cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype) - (rc, stdout, stderr) = self.module.run_command(cmd) - - if rc != 0: - self.module.fail_json(msg='Failed to add source: {0}'.format(self.errmsg(stderr))) - - # Check the various responses. - # Note that trying to add a source with the wrong type is handled - # above as it results in a non-zero status. - - regex = 'Already have "{0}" image source "{1}", no change'.format(imgtype, source) - if re.match(regex, stdout): - self.changed = False - - regex = 'Added "%s" image source "%s"' % (imgtype, source) - if re.match(regex, stdout): - self.changed = True - else: - # Type is ignored by imgadm(1M) here - cmd += ' -d %s' % source - (rc, stdout, stderr) = self.module.run_command(cmd) - - if rc != 0: - self.module.fail_json(msg='Failed to remove source: {0}'.format(self.errmsg(stderr))) - - regex = 'Do not have image source "%s", no change' % source - if re.match(regex, stdout): - self.changed = False - - regex = 'Deleted ".*" image source "%s"' % source - if re.match(regex, stdout): - self.changed = True - - def manage_images(self): - pool = self.params['pool'] - state = self.params['state'] - - if state == 'vacuumed': - # Unconditionally pass '--force', otherwise we're prompted with 'y/N' - cmd = '{0} vacuum -f'.format(self.cmd) - - (rc, stdout, stderr) = self.module.run_command(cmd) - - if rc != 0: - self.module.fail_json(msg='Failed to vacuum images: {0}'.format(self.errmsg(stderr))) - else: - if stdout == '': - self.changed = False - else: - self.changed = True - if self.present: - cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid) - - (rc, stdout, stderr) = self.module.run_command(cmd) - - if rc != 0: - self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr))) - - regex = r'Image {0} \(.*\) is already installed, skipping'.format(self.uuid) - if re.match(regex, stdout): - self.changed = False - - regex = '.*ActiveImageNotFound.*' - if re.match(regex, stderr): - self.changed = False - - regex = 'Imported image {0}.*'.format(self.uuid) - if re.match(regex, stdout.splitlines()[-1]): - self.changed = True - else: - cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid) - - (rc, stdout, stderr) = self.module.run_command(cmd) - - regex = '.*ImageNotInstalled.*' - if re.match(regex, stderr): - # Even if the 'rc' was non-zero (3), we handled the situation - # in order to determine if there was a change. - self.changed = False - - regex = 'Deleted image {0}'.format(self.uuid) - if re.match(regex, stdout): - self.changed = True - - -def main(): - module = AnsibleModule( - argument_spec=dict( - force=dict(type='bool'), - pool=dict(default='zones'), - source=dict(), - state=dict(required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']), - type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']), - uuid=dict() - ), - # This module relies largely on imgadm(1M) to enforce idempotency, which does not - # provide a "noop" (or equivalent) mode to do a dry-run. - supports_check_mode=False, - ) - - imgadm = Imgadm(module) - - uuid = module.params['uuid'] - source = module.params['source'] - state = module.params['state'] - - result = {'state': state} - - # Either manage sources or images. - if source: - result['source'] = source - imgadm.manage_sources() - else: - result['uuid'] = uuid - - if state == 'updated': - imgadm.update_images() - else: - # Make sure operate on a single image for the following actions - if (uuid == '*') and (state != 'vacuumed'): - module.fail_json(msg='Can only specify uuid as "*" when updating image(s)') - imgadm.manage_images() - - result['changed'] = imgadm.changed - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/smartos/nictagadm.py b/plugins/modules/cloud/smartos/nictagadm.py deleted file mode 100644 index 05aba6f188..0000000000 --- a/plugins/modules/cloud/smartos/nictagadm.py +++ /dev/null @@ -1,224 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Bruce Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: nictagadm -short_description: Manage nic tags on SmartOS systems -description: - - Create or delete nic tags on SmartOS systems. -author: -- Bruce Smith (@SmithX10) -options: - name: - description: - - Name of the nic tag. - required: true - type: str - mac: - description: - - Specifies the I(mac) address to attach the nic tag to when not creating an I(etherstub). - - Parameters I(mac) and I(etherstub) are mutually exclusive. - type: str - etherstub: - description: - - Specifies that the nic tag will be attached to a created I(etherstub). - - Parameter I(etherstub) is mutually exclusive with both I(mtu), and I(mac). - type: bool - default: no - mtu: - description: - - Specifies the size of the I(mtu) of the desired nic tag. - - Parameters I(mtu) and I(etherstub) are mutually exclusive. - type: int - force: - description: - - When I(state) is absent set this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs. - type: bool - default: no - state: - description: - - Create or delete a SmartOS nic tag. - type: str - choices: [ absent, present ] - default: present -''' - -EXAMPLES = r''' -- name: Create 'storage0' on '00:1b:21:a3:f5:4d' - community.general.nictagadm: - name: storage0 - mac: 00:1b:21:a3:f5:4d - mtu: 9000 - state: present - -- name: Remove 'storage0' nic tag - community.general.nictagadm: - name: storage0 - state: absent -''' - -RETURN = r''' -name: - description: nic tag name - returned: always - type: str - sample: storage0 -mac: - description: MAC Address that the nic tag was attached to. - returned: always - type: str - sample: 00:1b:21:a3:f5:4d -etherstub: - description: specifies if the nic tag will create and attach to an etherstub. - returned: always - type: bool - sample: False -mtu: - description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive. - returned: always - type: int - sample: 1500 -force: - description: Shows if -f was used during the deletion of a nic tag - returned: always - type: bool - sample: False -state: - description: state of the target - returned: always - type: str - sample: present -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.network import is_mac - - -class NicTag(object): - - def __init__(self, module): - self.module = module - - self.name = module.params['name'] - self.mac = module.params['mac'] - self.etherstub = module.params['etherstub'] - self.mtu = module.params['mtu'] - self.force = module.params['force'] - self.state = module.params['state'] - - self.nictagadm_bin = self.module.get_bin_path('nictagadm', True) - - def is_valid_mac(self): - return is_mac(self.mac.lower()) - - def nictag_exists(self): - cmd = [self.nictagadm_bin, 'exists', self.name] - (rc, dummy, dummy) = self.module.run_command(cmd) - - return rc == 0 - - def add_nictag(self): - cmd = [self.nictagadm_bin, '-v', 'add'] - - if self.etherstub: - cmd.append('-l') - - if self.mtu: - cmd.append('-p') - cmd.append('mtu=' + str(self.mtu)) - - if self.mac: - cmd.append('-p') - cmd.append('mac=' + str(self.mac)) - - cmd.append(self.name) - - return self.module.run_command(cmd) - - def delete_nictag(self): - cmd = [self.nictagadm_bin, '-v', 'delete'] - - if self.force: - cmd.append('-f') - - cmd.append(self.name) - - return self.module.run_command(cmd) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - mac=dict(type='str'), - etherstub=dict(type='bool', default=False), - mtu=dict(type='int'), - force=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'present']), - ), - mutually_exclusive=[ - ['etherstub', 'mac'], - ['etherstub', 'mtu'], - ], - required_if=[ - ['etherstub', False, ['name', 'mac']], - ['state', 'absent', ['name', 'force']], - ], - supports_check_mode=True - ) - - nictag = NicTag(module) - - rc = None - out = '' - err = '' - result = dict( - changed=False, - etherstub=nictag.etherstub, - force=nictag.force, - name=nictag.name, - mac=nictag.mac, - mtu=nictag.mtu, - state=nictag.state, - ) - - if not nictag.is_valid_mac(): - module.fail_json(msg='Invalid MAC Address Value', - name=nictag.name, - mac=nictag.mac, - etherstub=nictag.etherstub) - - if nictag.state == 'absent': - if nictag.nictag_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = nictag.delete_nictag() - if rc != 0: - module.fail_json(name=nictag.name, msg=err, rc=rc) - elif nictag.state == 'present': - if not nictag.nictag_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = nictag.add_nictag() - if rc is not None and rc != 0: - module.fail_json(name=nictag.name, msg=err, rc=rc) - - if rc is not None: - result['changed'] = True - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/smartos/smartos_image_info.py b/plugins/modules/cloud/smartos/smartos_image_info.py deleted file mode 100644 index 369559f52a..0000000000 --- a/plugins/modules/cloud/smartos/smartos_image_info.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2015, Adam Števko -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: smartos_image_info -short_description: Get SmartOS image details. -description: - - Retrieve information about all installed images on SmartOS. - - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)! -author: Adam Števko (@xen0l) -options: - filters: - description: - - Criteria for selecting image. Can be any value from image - manifest and 'published_date', 'published', 'source', 'clones', - and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm) - under 'imgadm list'. - type: str -''' - -EXAMPLES = ''' -- name: Return information about all installed images - community.general.smartos_image_info: - register: result - -- name: Return all private active Linux images - community.general.smartos_image_info: - filters: "os=linux state=active public=false" - register: result - -- name: Show, how many clones does every image have - community.general.smartos_image_info: - register: result - -- name: Print information - ansible.builtin.debug: - msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }} - has {{ result.smartos_images[item]['clones'] }} VM(s)" - with_items: "{{ result.smartos_images.keys() | list }}" - -- name: Print information - ansible.builtin.debug: - msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} - has {{ smartos_images[item]['clones'] }} VM(s)" - with_items: "{{ smartos_images.keys() | list }}" -''' - -RETURN = ''' -''' - -import json -from ansible.module_utils.basic import AnsibleModule - - -class ImageFacts(object): - - def __init__(self, module): - self.module = module - - self.filters = module.params['filters'] - - def return_all_installed_images(self): - cmd = [self.module.get_bin_path('imgadm'), 'list', '-j'] - - if self.filters: - cmd.append(self.filters) - - (rc, out, err) = self.module.run_command(cmd) - - if rc != 0: - self.module.exit_json( - msg='Failed to get all installed images', stderr=err) - - images = json.loads(out) - - result = {} - for image in images: - result[image['manifest']['uuid']] = image['manifest'] - # Merge additional attributes with the image manifest. - for attrib in ['clones', 'source', 'zpool']: - result[image['manifest']['uuid']][attrib] = image[attrib] - - return result - - -def main(): - module = AnsibleModule( - argument_spec=dict( - filters=dict(default=None), - ), - supports_check_mode=True, - ) - - image_facts = ImageFacts(module) - - data = dict(smartos_images=image_facts.return_all_installed_images()) - - module.exit_json(**data) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/smartos/vmadm.py b/plugins/modules/cloud/smartos/vmadm.py deleted file mode 100644 index 03a022423e..0000000000 --- a/plugins/modules/cloud/smartos/vmadm.py +++ /dev/null @@ -1,803 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2017, Jasper Lievisse Adriaanse -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: vmadm -short_description: Manage SmartOS virtual machines and zones. -description: - - Manage SmartOS virtual machines through vmadm(1M). -author: Jasper Lievisse Adriaanse (@jasperla) -options: - archive_on_delete: - required: false - description: - - When enabled, the zone dataset will be mounted on C(/zones/archive) - upon removal. - type: bool - autoboot: - required: false - description: - - Whether or not a VM is booted when the system is rebooted. - type: bool - brand: - choices: [ joyent, joyent-minimal, lx, kvm, bhyve ] - default: joyent - description: - - Type of virtual machine. The C(bhyve) option was added in community.general 0.2.0. - type: str - boot: - required: false - description: - - Set the boot order for KVM VMs. - type: str - cpu_cap: - required: false - description: - - Sets a limit on the amount of CPU time that can be used by a VM. - Use C(0) for no cap. - type: int - cpu_shares: - required: false - description: - - Sets a limit on the number of fair share scheduler (FSS) CPU shares for - a VM. This limit is relative to all other VMs on the system. - type: int - cpu_type: - required: false - choices: [ qemu64, host ] - default: qemu64 - description: - - Control the type of virtual CPU exposed to KVM VMs. - type: str - customer_metadata: - required: false - description: - - Metadata to be set and associated with this VM, this contain customer - modifiable keys. - type: dict - delegate_dataset: - required: false - description: - - Whether to delegate a ZFS dataset to an OS VM. - type: bool - disk_driver: - required: false - description: - - Default value for a virtual disk model for KVM guests. - type: str - disks: - required: false - description: - - A list of disks to add, valid properties are documented in vmadm(1M). - type: list - elements: dict - dns_domain: - required: false - description: - - Domain value for C(/etc/hosts). - type: str - docker: - required: false - description: - - Docker images need this flag enabled along with the I(brand) set to C(lx). - type: bool - filesystems: - required: false - description: - - Mount additional filesystems into an OS VM. - type: list - elements: dict - firewall_enabled: - required: false - description: - - Enables the firewall, allowing fwadm(1M) rules to be applied. - type: bool - force: - required: false - description: - - Force a particular action (i.e. stop or delete a VM). - type: bool - fs_allowed: - required: false - description: - - Comma separated list of filesystem types this zone is allowed to mount. - type: str - hostname: - required: false - description: - - Zone/VM hostname. - type: str - image_uuid: - required: false - description: - - Image UUID. - type: str - indestructible_delegated: - required: false - description: - - Adds an C(@indestructible) snapshot to delegated datasets. - type: bool - indestructible_zoneroot: - required: false - description: - - Adds an C(@indestructible) snapshot to zoneroot. - type: bool - internal_metadata: - required: false - description: - - Metadata to be set and associated with this VM, this contains operator - generated keys. - type: dict - internal_metadata_namespace: - required: false - description: - - List of namespaces to be set as I(internal_metadata-only); these namespaces - will come from I(internal_metadata) rather than I(customer_metadata). - type: str - kernel_version: - required: false - description: - - Kernel version to emulate for LX VMs. - type: str - limit_priv: - required: false - description: - - Set (comma separated) list of privileges the zone is allowed to use. - type: str - maintain_resolvers: - required: false - description: - - Resolvers in C(/etc/resolv.conf) will be updated when updating - the I(resolvers) property. - type: bool - max_locked_memory: - required: false - description: - - Total amount of memory (in MiBs) on the host that can be locked by this VM. - type: int - max_lwps: - required: false - description: - - Maximum number of lightweight processes this VM is allowed to have running. - type: int - max_physical_memory: - required: false - description: - - Maximum amount of memory (in MiBs) on the host that the VM is allowed to use. - type: int - max_swap: - required: false - description: - - Maximum amount of virtual memory (in MiBs) the VM is allowed to use. - type: int - mdata_exec_timeout: - required: false - description: - - Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service - that runs user-scripts in the zone. - type: int - name: - required: false - aliases: [ alias ] - description: - - Name of the VM. vmadm(1M) uses this as an optional name. - type: str - nic_driver: - required: false - description: - - Default value for a virtual NIC model for KVM guests. - type: str - nics: - required: false - description: - - A list of nics to add, valid properties are documented in vmadm(1M). - type: list - elements: dict - nowait: - required: false - description: - - Consider the provisioning complete when the VM first starts, rather than - when the VM has rebooted. - type: bool - qemu_opts: - required: false - description: - - Additional qemu arguments for KVM guests. This overwrites the default arguments - provided by vmadm(1M) and should only be used for debugging. - type: str - qemu_extra_opts: - required: false - description: - - Additional qemu cmdline arguments for KVM guests. - type: str - quota: - required: false - description: - - Quota on zone filesystems (in MiBs). - type: int - ram: - required: false - description: - - Amount of virtual RAM for a KVM guest (in MiBs). - type: int - resolvers: - required: false - description: - - List of resolvers to be put into C(/etc/resolv.conf). - type: list - elements: str - routes: - required: false - description: - - Dictionary that maps destinations to gateways, these will be set as static - routes in the VM. - type: dict - spice_opts: - required: false - description: - - Addition options for SPICE-enabled KVM VMs. - type: str - spice_password: - required: false - description: - - Password required to connect to SPICE. By default no password is set. - Please note this can be read from the Global Zone. - type: str - state: - choices: [ present, running, absent, deleted, stopped, created, restarted, rebooted ] - default: running - description: - - States for the VM to be in. Please note that C(present), C(stopped) and C(restarted) - operate on a VM that is currently provisioned. C(present) means that the VM will be - created if it was absent, and that it will be in a running state. C(absent) will - shutdown the zone before removing it. - C(stopped) means the zone will be created if it doesn't exist already, before shutting - it down. - type: str - tmpfs: - required: false - description: - - Amount of memory (in MiBs) that will be available in the VM for the C(/tmp) filesystem. - type: int - uuid: - required: false - description: - - UUID of the VM. Can either be a full UUID or C(*) for all VMs. - type: str - vcpus: - required: false - description: - - Number of virtual CPUs for a KVM guest. - type: int - vga: - required: false - description: - - Specify VGA emulation used by KVM VMs. - type: str - virtio_txburst: - required: false - description: - - Number of packets that can be sent in a single flush of the tx queue of virtio NICs. - type: int - virtio_txtimer: - required: false - description: - - Timeout (in nanoseconds) for the TX timer of virtio NICs. - type: int - vnc_password: - required: false - description: - - Password required to connect to VNC. By default no password is set. - Please note this can be read from the Global Zone. - type: str - vnc_port: - required: false - description: - - TCP port to listen of the VNC server. Or set C(0) for random, - or C(-1) to disable. - type: int - zfs_data_compression: - required: false - description: - - Specifies compression algorithm used for this VMs data dataset. This option - only has effect on delegated datasets. - type: str - zfs_data_recsize: - required: false - description: - - Suggested block size (power of 2) for files in the delegated dataset's filesystem. - type: int - zfs_filesystem_limit: - required: false - description: - - Maximum number of filesystems the VM can have. - type: int - zfs_io_priority: - required: false - description: - - IO throttle priority value relative to other VMs. - type: int - zfs_root_compression: - required: false - description: - - Specifies compression algorithm used for this VMs root dataset. This option - only has effect on the zoneroot dataset. - type: str - zfs_root_recsize: - required: false - description: - - Suggested block size (power of 2) for files in the zoneroot dataset's filesystem. - type: int - zfs_snapshot_limit: - required: false - description: - - Number of snapshots the VM can have. - type: int - zpool: - required: false - description: - - ZFS pool the VM's zone dataset will be created in. - type: str -requirements: - - python >= 2.6 -''' - -EXAMPLES = ''' -- name: Create SmartOS zone - community.general.vmadm: - brand: joyent - state: present - alias: fw_zone - image_uuid: 95f265b8-96b2-11e6-9597-972f3af4b6d5 - firewall_enabled: yes - indestructible_zoneroot: yes - nics: - - nic_tag: admin - ip: dhcp - primary: true - internal_metadata: - root_pw: 'secret' - quota: 1 - -- name: Delete a zone - community.general.vmadm: - alias: test_zone - state: deleted - -- name: Stop all zones - community.general.vmadm: - uuid: '*' - state: stopped -''' - -RETURN = ''' -uuid: - description: UUID of the managed VM. - returned: always - type: str - sample: 'b217ab0b-cf57-efd8-cd85-958d0b80be33' -alias: - description: Alias of the managed VM. - returned: When addressing a VM by alias. - type: str - sample: 'dns-zone' -state: - description: State of the target, after execution. - returned: success - type: str - sample: 'running' -''' - -import json -import os -import re -import tempfile -import traceback - - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - -# While vmadm(1M) supports a -E option to return any errors in JSON, the -# generated JSON does not play well with the JSON parsers of Python. -# The returned message contains '\n' as part of the stacktrace, -# which breaks the parsers. - - -def get_vm_prop(module, uuid, prop): - # Lookup a property for the given VM. - # Returns the property, or None if not found. - cmd = '{0} lookup -j -o {1} uuid={2}'.format(module.vmadm, prop, uuid) - - (rc, stdout, stderr) = module.run_command(cmd) - - if rc != 0: - module.fail_json( - msg='Could not perform lookup of {0} on {1}'.format(prop, uuid), exception=stderr) - - try: - stdout_json = json.loads(stdout) - except Exception as e: - module.fail_json( - msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(prop), - details=to_native(e), exception=traceback.format_exc()) - - if len(stdout_json) > 0 and prop in stdout_json[0]: - return stdout_json[0][prop] - else: - return None - - -def get_vm_uuid(module, alias): - # Lookup the uuid that goes with the given alias. - # Returns the uuid or '' if not found. - cmd = '{0} lookup -j -o uuid alias={1}'.format(module.vmadm, alias) - - (rc, stdout, stderr) = module.run_command(cmd) - - if rc != 0: - module.fail_json( - msg='Could not retrieve UUID of {0}'.format(alias), exception=stderr) - - # If no VM was found matching the given alias, we get back an empty array. - # That is not an error condition as we might be explicitly checking it's - # absence. - if stdout.strip() == '[]': - return None - else: - try: - stdout_json = json.loads(stdout) - except Exception as e: - module.fail_json( - msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias), - details=to_native(e), exception=traceback.format_exc()) - - if len(stdout_json) > 0 and 'uuid' in stdout_json[0]: - return stdout_json[0]['uuid'] - - -def get_all_vm_uuids(module): - # Retrieve the UUIDs for all VMs. - cmd = '{0} lookup -j -o uuid'.format(module.vmadm) - - (rc, stdout, stderr) = module.run_command(cmd) - - if rc != 0: - module.fail_json(msg='Failed to get VMs list', exception=stderr) - - try: - stdout_json = json.loads(stdout) - return [v['uuid'] for v in stdout_json] - except Exception as e: - module.fail_json(msg='Could not retrieve VM UUIDs', details=to_native(e), - exception=traceback.format_exc()) - - -def new_vm(module, uuid, vm_state): - payload_file = create_payload(module, uuid) - - (rc, stdout, stderr) = vmadm_create_vm(module, payload_file) - - if rc != 0: - changed = False - module.fail_json(msg='Could not create VM', exception=stderr) - else: - changed = True - # 'vmadm create' returns all output to stderr... - match = re.match('Successfully created VM (.*)', stderr) - if match: - vm_uuid = match.groups()[0] - if not is_valid_uuid(vm_uuid): - module.fail_json(msg='Invalid UUID for VM {0}?'.format(vm_uuid)) - else: - module.fail_json(msg='Could not retrieve UUID of newly created(?) VM') - - # Now that the VM is created, ensure it is in the desired state (if not 'running') - if vm_state != 'running': - ret = set_vm_state(module, vm_uuid, vm_state) - if not ret: - module.fail_json(msg='Could not set VM {0} to state {1}'.format(vm_uuid, vm_state)) - - try: - os.unlink(payload_file) - except Exception as e: - # Since the payload may contain sensitive information, fail hard - # if we cannot remove the file so the operator knows about it. - module.fail_json(msg='Could not remove temporary JSON payload file {0}: {1}'.format(payload_file, to_native(e)), - exception=traceback.format_exc()) - - return changed, vm_uuid - - -def vmadm_create_vm(module, payload_file): - # Create a new VM using the provided payload. - cmd = '{0} create -f {1}'.format(module.vmadm, payload_file) - - return module.run_command(cmd) - - -def set_vm_state(module, vm_uuid, vm_state): - p = module.params - - # Check if the VM is already in the desired state. - state = get_vm_prop(module, vm_uuid, 'state') - if state and (state == vm_state): - return None - - # Lookup table for the state to be in, and which command to use for that. - # vm_state: [vmadm commandm, forceable?] - cmds = { - 'stopped': ['stop', True], - 'running': ['start', False], - 'deleted': ['delete', True], - 'rebooted': ['reboot', False] - } - - if p['force'] and cmds[vm_state][1]: - force = '-F' - else: - force = '' - - cmd = 'vmadm {0} {1} {2}'.format(cmds[vm_state][0], force, vm_uuid) - - (rc, stdout, stderr) = module.run_command(cmd) - - match = re.match('^Successfully.*', stderr) - if match: - return True - else: - return False - - -def create_payload(module, uuid): - # Create the JSON payload (vmdef) and return the filename. - - # Filter out the few options that are not valid VM properties. - module_options = ['debug', 'force', 'state'] - # @TODO make this a simple {} comprehension as soon as py2 is ditched - # @TODO {k: v for k, v in p.items() if k not in module_options} - vmdef = dict([(k, v) for k, v in module.params.items() if k not in module_options and v]) - - try: - vmdef_json = json.dumps(vmdef) - except Exception as e: - module.fail_json( - msg='Could not create valid JSON payload', exception=traceback.format_exc()) - - # Create the temporary file that contains our payload, and set tight - # permissions for it may container sensitive information. - try: - # XXX: When there's a way to get the current ansible temporary directory - # drop the mkstemp call and rely on ANSIBLE_KEEP_REMOTE_FILES to retain - # the payload (thus removing the `save_payload` option). - fname = tempfile.mkstemp()[1] - os.chmod(fname, 0o400) - with open(fname, 'w') as fh: - fh.write(vmdef_json) - except Exception as e: - module.fail_json(msg='Could not save JSON payload: %s' % to_native(e), exception=traceback.format_exc()) - - return fname - - -def vm_state_transition(module, uuid, vm_state): - ret = set_vm_state(module, uuid, vm_state) - - # Whether the VM changed state. - if ret is None: - return False - elif ret: - return True - else: - module.fail_json(msg='Failed to set VM {0} to state {1}'.format(uuid, vm_state)) - - -def is_valid_uuid(uuid): - if re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', uuid, re.IGNORECASE): - return True - else: - return False - - -def validate_uuids(module): - # Perform basic UUID validation. - failed = [] - - for u in [['uuid', module.params['uuid']], - ['image_uuid', module.params['image_uuid']]]: - if u[1] and u[1] != '*': - if not is_valid_uuid(u[1]): - failed.append(u[0]) - - if len(failed) > 0: - module.fail_json(msg='No valid UUID(s) found for: {0}'.format(", ".join(failed))) - - -def manage_all_vms(module, vm_state): - # Handle operations for all VMs, which can by definition only - # be state transitions. - state = module.params['state'] - - if state == 'created': - module.fail_json(msg='State "created" is only valid for tasks with a single VM') - - # If any of the VMs has a change, the task as a whole has a change. - any_changed = False - - # First get all VM uuids and for each check their state, and adjust it if needed. - for uuid in get_all_vm_uuids(module): - current_vm_state = get_vm_prop(module, uuid, 'state') - if not current_vm_state and vm_state == 'deleted': - any_changed = False - else: - if module.check_mode: - if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state): - any_changed = True - else: - any_changed = (vm_state_transition(module, uuid, vm_state) | any_changed) - - return any_changed - - -def main(): - # In order to reduce the clutter and boilerplate for trivial options, - # abstract the vmadm properties and build the dict of arguments later. - # Dict of all options that are simple to define based on their type. - # They're not required and have a default of None. - properties = { - 'str': [ - 'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname', - 'image_uuid', 'internal_metadata_namespace', 'kernel_version', - 'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts', - 'spice_opts', 'uuid', 'vga', 'zfs_data_compression', - 'zfs_root_compression', 'zpool' - ], - 'bool': [ - 'archive_on_delete', 'autoboot', 'debug', 'delegate_dataset', - 'docker', 'firewall_enabled', 'force', 'indestructible_delegated', - 'indestructible_zoneroot', 'maintain_resolvers', 'nowait' - ], - 'int': [ - 'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps', - 'max_physical_memory', 'max_swap', 'mdata_exec_timeout', - 'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst', - 'virtio_txtimer', 'vnc_port', 'zfs_data_recsize', - 'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize', - 'zfs_snapshot_limit' - ], - 'dict': ['customer_metadata', 'internal_metadata', 'routes'], - } - - # Start with the options that are not as trivial as those above. - options = dict( - state=dict( - default='running', - type='str', - choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted'] - ), - name=dict( - default=None, type='str', - aliases=['alias'] - ), - brand=dict( - default='joyent', - type='str', - choices=['joyent', 'joyent-minimal', 'lx', 'kvm', 'bhyve'] - ), - cpu_type=dict( - default='qemu64', - type='str', - choices=['host', 'qemu64'] - ), - # Regular strings, however these require additional options. - spice_password=dict(type='str', no_log=True), - vnc_password=dict(type='str', no_log=True), - disks=dict(type='list', elements='dict'), - nics=dict(type='list', elements='dict'), - resolvers=dict(type='list', elements='str'), - filesystems=dict(type='list', elements='dict'), - ) - - # Add our 'simple' options to options dict. - for type in properties: - for p in properties[type]: - option = dict(default=None, type=type) - options[p] = option - - module = AnsibleModule( - argument_spec=options, - supports_check_mode=True, - required_one_of=[['name', 'uuid']] - ) - - module.vmadm = module.get_bin_path('vmadm', required=True) - - p = module.params - uuid = p['uuid'] - state = p['state'] - - # Translate the state parameter into something we can use later on. - if state in ['present', 'running']: - vm_state = 'running' - elif state in ['stopped', 'created']: - vm_state = 'stopped' - elif state in ['absent', 'deleted']: - vm_state = 'deleted' - elif state in ['restarted', 'rebooted']: - vm_state = 'rebooted' - - result = {'state': state} - - # While it's possible to refer to a given VM by it's `alias`, it's easier - # to operate on VMs by their UUID. So if we're not given a `uuid`, look - # it up. - if not uuid: - uuid = get_vm_uuid(module, p['name']) - # Bit of a chicken and egg problem here for VMs with state == deleted. - # If they're going to be removed in this play, we have to lookup the - # uuid. If they're already deleted there's nothing to lookup. - # So if state == deleted and get_vm_uuid() returned '', the VM is already - # deleted and there's nothing else to do. - if uuid is None and vm_state == 'deleted': - result['name'] = p['name'] - module.exit_json(**result) - - validate_uuids(module) - - if p['name']: - result['name'] = p['name'] - result['uuid'] = uuid - - if uuid == '*': - result['changed'] = manage_all_vms(module, vm_state) - module.exit_json(**result) - - # The general flow is as follows: - # - first the current state of the VM is obtained by it's UUID. - # - If the state was not found and the desired state is 'deleted', return. - # - If the state was not found, it means the VM has to be created. - # Subsequently the VM will be set to the desired state (i.e. stopped) - # - Otherwise, it means the VM exists already and we operate on it's - # state (i.e. reboot it.) - # - # In the future it should be possible to query the VM for a particular - # property as a valid state (i.e. queried) so the result can be - # registered. - # Also, VMs should be able to get their properties updated. - # Managing VM snapshots should be part of a standalone module. - - # First obtain the VM state to determine what needs to be done with it. - current_vm_state = get_vm_prop(module, uuid, 'state') - - # First handle the case where the VM should be deleted and is not present. - if not current_vm_state and vm_state == 'deleted': - result['changed'] = False - elif module.check_mode: - # Shortcut for check mode, if there is no VM yet, it will need to be created. - # Or, if the VM is not in the desired state yet, it needs to transition. - if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state): - result['changed'] = True - else: - result['changed'] = False - - module.exit_json(**result) - # No VM was found that matched the given ID (alias or uuid), so we create it. - elif not current_vm_state: - result['changed'], result['uuid'] = new_vm(module, uuid, vm_state) - else: - # VM was found, operate on its state directly. - result['changed'] = vm_state_transition(module, uuid, vm_state) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/softlayer/sl_vm.py b/plugins/modules/cloud/softlayer/sl_vm.py deleted file mode 100644 index 825d82e173..0000000000 --- a/plugins/modules/cloud/softlayer/sl_vm.py +++ /dev/null @@ -1,430 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: sl_vm -short_description: create or cancel a virtual instance in SoftLayer -description: - - Creates or cancels SoftLayer instances. - - When created, optionally waits for it to be 'running'. -options: - instance_id: - description: - - Instance Id of the virtual instance to perform action option. - type: str - hostname: - description: - - Hostname to be provided to a virtual instance. - type: str - domain: - description: - - Domain name to be provided to a virtual instance. - type: str - datacenter: - description: - - Datacenter for the virtual instance to be deployed. - type: str - choices: - - ams01 - - ams03 - - che01 - - dal01 - - dal05 - - dal06 - - dal09 - - dal10 - - dal12 - - dal13 - - fra02 - - fra04 - - fra05 - - hkg02 - - hou02 - - lon02 - - lon04 - - lon06 - - mel01 - - mex01 - - mil01 - - mon01 - - osl01 - - par01 - - sao01 - - sea01 - - seo01 - - sjc01 - - sjc03 - - sjc04 - - sng01 - - syd01 - - syd04 - - tok02 - - tor01 - - wdc01 - - wdc04 - - wdc06 - - wdc07 - tags: - description: - - Tag or list of tags to be provided to a virtual instance. - type: str - hourly: - description: - - Flag to determine if the instance should be hourly billed. - type: bool - default: 'yes' - private: - description: - - Flag to determine if the instance should be private only. - type: bool - default: 'no' - dedicated: - description: - - Flag to determine if the instance should be deployed in dedicated space. - type: bool - default: 'no' - local_disk: - description: - - Flag to determine if local disk should be used for the new instance. - type: bool - default: 'yes' - cpus: - description: - - Count of cpus to be assigned to new virtual instance. - type: int - choices: [1, 2, 4, 8, 16, 32, 56] - memory: - description: - - Amount of memory to be assigned to new virtual instance. - type: int - choices: [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808] - flavor: - description: - - Specify which SoftLayer flavor template to use instead of cpus and memory. - version_added: '0.2.0' - type: str - disks: - description: - - List of disk sizes to be assigned to new virtual instance. - default: [ 25 ] - type: list - elements: int - os_code: - description: - - OS Code to be used for new virtual instance. - type: str - image_id: - description: - - Image Template to be used for new virtual instance. - type: str - nic_speed: - description: - - NIC Speed to be assigned to new virtual instance. - choices: [10, 100, 1000] - type: int - public_vlan: - description: - - VLAN by its Id to be assigned to the public NIC. - type: str - private_vlan: - description: - - VLAN by its Id to be assigned to the private NIC. - type: str - ssh_keys: - description: - - List of ssh keys by their Id to be assigned to a virtual instance. - type: list - elements: str - post_uri: - description: - - URL of a post provisioning script to be loaded and executed on virtual instance. - type: str - state: - description: - - Create, or cancel a virtual instance. - - Specify C(present) for create, C(absent) to cancel. - choices: [ absent, present ] - default: present - type: str - wait: - description: - - Flag used to wait for active status before returning. - type: bool - default: 'yes' - wait_time: - description: - - Time in seconds before wait returns. - default: 600 - type: int -requirements: - - python >= 2.6 - - softlayer >= 4.1.1 -author: -- Matt Colton (@mcltn) -''' - -EXAMPLES = ''' -- name: Build instance - hosts: localhost - gather_facts: no - tasks: - - name: Build instance request - community.general.sl_vm: - hostname: instance-1 - domain: anydomain.com - datacenter: dal09 - tags: ansible-module-test - hourly: yes - private: no - dedicated: no - local_disk: yes - cpus: 1 - memory: 1024 - disks: [25] - os_code: UBUNTU_LATEST - wait: no - -- name: Build additional instances - hosts: localhost - gather_facts: no - tasks: - - name: Build instances request - community.general.sl_vm: - hostname: "{{ item.hostname }}" - domain: "{{ item.domain }}" - datacenter: "{{ item.datacenter }}" - tags: "{{ item.tags }}" - hourly: "{{ item.hourly }}" - private: "{{ item.private }}" - dedicated: "{{ item.dedicated }}" - local_disk: "{{ item.local_disk }}" - cpus: "{{ item.cpus }}" - memory: "{{ item.memory }}" - disks: "{{ item.disks }}" - os_code: "{{ item.os_code }}" - ssh_keys: "{{ item.ssh_keys }}" - wait: "{{ item.wait }}" - with_items: - - hostname: instance-2 - domain: anydomain.com - datacenter: dal09 - tags: - - ansible-module-test - - ansible-module-test-replicas - hourly: yes - private: no - dedicated: no - local_disk: yes - cpus: 1 - memory: 1024 - disks: - - 25 - - 100 - os_code: UBUNTU_LATEST - ssh_keys: [] - wait: True - - hostname: instance-3 - domain: anydomain.com - datacenter: dal09 - tags: - - ansible-module-test - - ansible-module-test-replicas - hourly: yes - private: no - dedicated: no - local_disk: yes - cpus: 1 - memory: 1024 - disks: - - 25 - - 100 - os_code: UBUNTU_LATEST - ssh_keys: [] - wait: yes - -- name: Cancel instances - hosts: localhost - gather_facts: no - tasks: - - name: Cancel by tag - community.general.sl_vm: - state: absent - tags: ansible-module-test -''' - -# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed. -RETURN = '''# ''' - -import json -import time - -try: - import SoftLayer - from SoftLayer import VSManager - - HAS_SL = True - vsManager = VSManager(SoftLayer.create_client_from_env()) -except ImportError: - HAS_SL = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import string_types - - -# TODO: get this info from API -STATES = ['present', 'absent'] -DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'dal12', 'dal13', 'fra02', - 'fra04', 'fra05', 'hkg02', 'hou02', 'lon02', 'lon04', 'lon06', 'mel01', 'mex01', 'mil01', 'mon01', - 'osl01', 'par01', 'sao01', 'sea01', 'seo01', 'sjc01', 'sjc03', 'sjc04', 'sng01', 'syd01', 'syd04', - 'tok02', 'tor01', 'wdc01', 'wdc04', 'wdc06', 'wdc07'] -CPU_SIZES = [1, 2, 4, 8, 16, 32, 56] -MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808] -INITIALDISK_SIZES = [25, 100] -LOCALDISK_SIZES = [25, 100, 150, 200, 300] -SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000] -NIC_SPEEDS = [10, 100, 1000] - - -def create_virtual_instance(module): - - instances = vsManager.list_instances( - hostname=module.params.get('hostname'), - domain=module.params.get('domain'), - datacenter=module.params.get('datacenter') - ) - - if instances: - return False, None - - # Check if OS or Image Template is provided (Can't be both, defaults to OS) - if (module.params.get('os_code') is not None and module.params.get('os_code') != ''): - module.params['image_id'] = '' - elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''): - module.params['os_code'] = '' - module.params['disks'] = [] # Blank out disks since it will use the template - else: - return False, None - - tags = module.params.get('tags') - if isinstance(tags, list): - tags = ','.join(map(str, module.params.get('tags'))) - - instance = vsManager.create_instance( - hostname=module.params.get('hostname'), - domain=module.params.get('domain'), - cpus=module.params.get('cpus'), - memory=module.params.get('memory'), - flavor=module.params.get('flavor'), - hourly=module.params.get('hourly'), - datacenter=module.params.get('datacenter'), - os_code=module.params.get('os_code'), - image_id=module.params.get('image_id'), - local_disk=module.params.get('local_disk'), - disks=module.params.get('disks'), - ssh_keys=module.params.get('ssh_keys'), - nic_speed=module.params.get('nic_speed'), - private=module.params.get('private'), - public_vlan=module.params.get('public_vlan'), - private_vlan=module.params.get('private_vlan'), - dedicated=module.params.get('dedicated'), - post_uri=module.params.get('post_uri'), - tags=tags, - ) - - if instance is not None and instance['id'] > 0: - return True, instance - else: - return False, None - - -def wait_for_instance(module, id): - instance = None - completed = False - wait_timeout = time.time() + module.params.get('wait_time') - while not completed and wait_timeout > time.time(): - try: - completed = vsManager.wait_for_ready(id, 10, 2) - if completed: - instance = vsManager.get_instance(id) - except Exception: - completed = False - - return completed, instance - - -def cancel_instance(module): - canceled = True - if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')): - tags = module.params.get('tags') - if isinstance(tags, string_types): - tags = [module.params.get('tags')] - instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain')) - for instance in instances: - try: - vsManager.cancel_instance(instance['id']) - except Exception: - canceled = False - elif module.params.get('instance_id') and module.params.get('instance_id') != 0: - try: - vsManager.cancel_instance(instance['id']) - except Exception: - canceled = False - else: - return False, None - - return canceled, None - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - instance_id=dict(type='str'), - hostname=dict(type='str'), - domain=dict(type='str'), - datacenter=dict(type='str', choices=DATACENTERS), - tags=dict(type='str'), - hourly=dict(type='bool', default=True), - private=dict(type='bool', default=False), - dedicated=dict(type='bool', default=False), - local_disk=dict(type='bool', default=True), - cpus=dict(type='int', choices=CPU_SIZES), - memory=dict(type='int', choices=MEMORY_SIZES), - flavor=dict(type='str'), - disks=dict(type='list', elements='int', default=[25]), - os_code=dict(type='str'), - image_id=dict(type='str'), - nic_speed=dict(type='int', choices=NIC_SPEEDS), - public_vlan=dict(type='str'), - private_vlan=dict(type='str'), - ssh_keys=dict(type='list', elements='str', default=[], no_log=False), - post_uri=dict(type='str'), - state=dict(type='str', default='present', choices=STATES), - wait=dict(type='bool', default=True), - wait_time=dict(type='int', default=600), - ) - ) - - if not HAS_SL: - module.fail_json(msg='softlayer python library required for this module') - - if module.params.get('state') == 'absent': - (changed, instance) = cancel_instance(module) - - elif module.params.get('state') == 'present': - (changed, instance) = create_virtual_instance(module) - if module.params.get('wait') is True and instance: - (changed, instance) = wait_for_instance(module, instance['id']) - - module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__))) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py deleted file mode 100644 index da8f010229..0000000000 --- a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py +++ /dev/null @@ -1,1557 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -DOCUMENTATION = ''' ---- -module: spotinst_aws_elastigroup -short_description: Create, update or delete Spotinst AWS Elastigroups -author: Spotinst (@talzur) -description: - - Can create, update, or delete Spotinst AWS Elastigroups - Launch configuration is part of the elastigroup configuration, - so no additional modules are necessary for handling the launch configuration. - You will have to have a credentials file in this location - /.spotinst/credentials - The credentials file must contain a row that looks like this - token = - Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible- -requirements: - - python >= 2.7 - - spotinst_sdk >= 1.0.38 -options: - - credentials_path: - description: - - Optional parameter that allows to set a non-default credentials path. - default: ~/.spotinst/credentials - type: path - - account_id: - description: - - Optional parameter that allows to set an account-id inside the module configuration. - By default this is retrieved from the credentials path. - type: str - - availability_vs_cost: - description: - - The strategy orientation. - - "The choices available are: C(availabilityOriented), C(costOriented), C(balanced)." - required: true - type: str - - availability_zones: - description: - - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - name (String), - subnet_id (String), - placement_group_name (String), - required: true - type: list - elements: dict - - block_device_mappings: - description: - - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; - You can specify virtual devices and EBS volumes.; - '[{"key":"value", "key":"value"}]'; - keys allowed are - device_name (List of Strings), - virtual_name (String), - no_device (String), - ebs (Object, expects the following keys- - delete_on_termination(Boolean), - encrypted(Boolean), - iops (Integer), - snapshot_id(Integer), - volume_type(String), - volume_size(Integer)) - type: list - elements: dict - - chef: - description: - - The Chef integration configuration.; - Expects the following keys - chef_server (String), - organization (String), - user (String), - pem_key (String), - chef_version (String) - type: dict - - draining_timeout: - description: - - Time for instance to be drained from incoming requests and deregistered from ELB before termination. - type: int - - ebs_optimized: - description: - - Enable EBS optimization for supported instances which are not enabled by default.; - Note - additional charges will be applied. - type: bool - - ebs_volume_pool: - description: - - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - volume_ids (List of Strings), - device_name (String) - type: list - elements: dict - - ecs: - description: - - The ECS integration configuration.; - Expects the following key - - cluster_name (String) - type: dict - - elastic_ips: - description: - - List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances - type: list - elements: str - - fallback_to_od: - description: - - In case of no spots available, Elastigroup will launch an On-demand instance instead - type: bool - - health_check_grace_period: - description: - - The amount of time, in seconds, after the instance has launched to start and check its health. - - If not specified, it defaults to C(300). - type: int - - health_check_unhealthy_duration_before_replacement: - description: - - Minimal mount of time instance should be unhealthy for us to consider it unhealthy. - type: int - - health_check_type: - description: - - The service to use for the health check. - - "The choices available are: C(ELB), C(HCS), C(TARGET_GROUP), C(MLB), C(EC2)." - type: str - - iam_role_name: - description: - - The instance profile iamRole name - - Only use iam_role_arn, or iam_role_name - type: str - - iam_role_arn: - description: - - The instance profile iamRole arn - - Only use iam_role_arn, or iam_role_name - type: str - - id: - description: - - The group id if it already exists and you want to update, or delete it. - This will not work unless the uniqueness_by field is set to id. - When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created. - type: str - - image_id: - description: - - The image Id used to launch the instance.; - In case of conflict between Instance type and image type, an error will be returned - required: true - type: str - - key_pair: - description: - - Specify a Key Pair to attach to the instances - type: str - - kubernetes: - description: - - The Kubernetes integration configuration. - Expects the following keys - - api_server (String), - token (String) - type: dict - - lifetime_period: - description: - - Lifetime period - type: int - - load_balancers: - description: - - List of classic ELB names - type: list - elements: str - - max_size: - description: - - The upper limit number of instances that you can scale up to - required: true - type: int - - mesosphere: - description: - - The Mesosphere integration configuration. - Expects the following key - - api_server (String) - type: dict - - min_size: - description: - - The lower limit number of instances that you can scale down to - required: true - type: int - - monitoring: - description: - - Describes whether instance Enhanced Monitoring is enabled - type: str - - name: - description: - - Unique name for elastigroup to be created, updated or deleted - required: true - type: str - - network_interfaces: - description: - - A list of hash/dictionaries of network interfaces to add to the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - description (String), - device_index (Integer), - secondary_private_ip_address_count (Integer), - associate_public_ip_address (Boolean), - delete_on_termination (Boolean), - groups (List of Strings), - network_interface_id (String), - private_ip_address (String), - subnet_id (String), - associate_ipv6_address (Boolean), - private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean)) - type: list - elements: dict - - on_demand_count: - description: - - Required if risk is not set - - Number of on demand instances to launch. All other instances will be spot instances.; - Either set this parameter or the risk parameter - type: int - - on_demand_instance_type: - description: - - On-demand instance type that will be provisioned - type: str - - opsworks: - description: - - The elastigroup OpsWorks integration configration.; - Expects the following key - - layer_id (String) - type: dict - - persistence: - description: - - The Stateful elastigroup configration.; - Accepts the following keys - - should_persist_root_device (Boolean), - should_persist_block_devices (Boolean), - should_persist_private_ip (Boolean) - type: dict - - product: - description: - - Operation system type. - - "Available choices are: C(Linux/UNIX), C(SUSE Linux), C(Windows), C(Linux/UNIX (Amazon VPC)), C(SUSE Linux (Amazon VPC))." - required: true - type: str - - rancher: - description: - - The Rancher integration configuration.; - Expects the following keys - - version (String), - access_key (String), - secret_key (String), - master_host (String) - type: dict - - right_scale: - description: - - The Rightscale integration configuration.; - Expects the following keys - - account_id (String), - refresh_token (String) - type: dict - - risk: - description: - - Required if on demand is not set. The percentage of Spot instances to launch (0 - 100). - type: int - - roll_config: - description: - - Roll configuration.; - If you would like the group to roll after updating, please use this feature. - Accepts the following keys - - batch_size_percentage(Integer, Required), - grace_period - (Integer, Required), - health_check_type(String, Optional) - type: dict - - scheduled_tasks: - description: - - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - adjustment (Integer), - scale_target_capacity (Integer), - scale_min_capacity (Integer), - scale_max_capacity (Integer), - adjustment_percentage (Integer), - batch_size_percentage (Integer), - cron_expression (String), - frequency (String), - grace_period (Integer), - task_type (String, required), - is_enabled (Boolean) - type: list - elements: dict - - security_group_ids: - description: - - One or more security group IDs. ; - In case of update it will override the existing Security Group with the new given array - required: true - type: list - elements: str - - shutdown_script: - description: - - The Base64-encoded shutdown script that executes prior to instance termination. - Encode before setting. - type: str - - signals: - description: - - A list of hash/dictionaries of signals to configure in the elastigroup; - keys allowed are - - name (String, required), - timeout (Integer) - type: list - elements: dict - - spin_up_time: - description: - - Spin up time, in seconds, for the instance - type: int - - spot_instance_types: - description: - - Spot instance type that will be provisioned. - required: true - type: list - elements: str - - state: - choices: - - present - - absent - description: - - Create or delete the elastigroup - default: present - type: str - - tags: - description: - - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); - type: list - elements: dict - - target: - description: - - The number of instances to launch - required: true - type: int - - target_group_arns: - description: - - List of target group arns instances should be registered to - type: list - elements: str - - tenancy: - description: - - Dedicated vs shared tenancy. - - "The available choices are: C(default), C(dedicated)." - type: str - - terminate_at_end_of_billing_hour: - description: - - Terminate at the end of billing hour - type: bool - - unit: - description: - - The capacity unit to launch instances by. - - "The available choices are: C(instance), C(weight)." - type: str - - up_scaling_policies: - description: - - A list of hash/dictionaries of scaling policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - metric_name (String, required), - dimensions (List of Objects, Keys allowed are name (String, required) and value (String)), - statistic (String, required) - evaluation_periods (String, required), - period (String, required), - threshold (String, required), - cooldown (String, required), - unit (String, required), - operator (String, required), - action_type (String, required), - adjustment (String), - min_target_capacity (String), - target (String), - maximum (String), - minimum (String) - type: list - elements: dict - - down_scaling_policies: - description: - - A list of hash/dictionaries of scaling policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - metric_name (String, required), - dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)), - statistic (String, required), - evaluation_periods (String, required), - period (String, required), - threshold (String, required), - cooldown (String, required), - unit (String, required), - operator (String, required), - action_type (String, required), - adjustment (String), - max_target_capacity (String), - target (String), - maximum (String), - minimum (String) - type: list - elements: dict - - target_tracking_policies: - description: - - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - source (String, required), - metric_name (String, required), - statistic (String, required), - unit (String, required), - cooldown (String, required), - target (String, required) - type: list - elements: dict - - uniqueness_by: - choices: - - id - - name - description: - - If your group names are not unique, you may use this feature to update or delete a specific group. - Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created. - default: name - type: str - - user_data: - description: - - Base64-encoded MIME user data. Encode before setting the value. - type: str - - utilize_reserved_instances: - description: - - In case of any available Reserved Instances, - Elastigroup will utilize your reservations before purchasing Spot instances. - type: bool - - wait_for_instances: - description: - - Whether or not the elastigroup creation / update actions should wait for the instances to spin - type: bool - default: false - - wait_timeout: - description: - - How long the module should wait for instances before failing the action.; - Only works if wait_for_instances is True. - type: int - -''' -EXAMPLES = ''' -# Basic configuration YAML example - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - monitoring: True - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target - register: result - - ansible.builtin.debug: var=result - -# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - account_id: act-1a9dd2b - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - tags: - - Environment: someEnvValue - - OtherTagKey: otherValue - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 5 - min_size: 0 - target: 0 - unit: instance - monitoring: True - name: ansible-group-tal - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-8f4b8fe9 - block_device_mappings: - - device_name: '/dev/sda1' - ebs: - volume_size: 100 - volume_type: gp2 - spot_instance_types: - - c3.large - do_not_update: - - image_id - wait_for_instances: True - wait_timeout: 600 - register: result - - - name: Store private ips to file - ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips - with_items: "{{ result.instances }}" - - ansible.builtin.debug: var=result - -# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id -# In organizations with more than one account, it is required to specify an account_id - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - account_id: act-1a9dd2b - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - tags: - - Environment: someEnvValue - - OtherTagKey: otherValue - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 5 - min_size: 0 - target: 0 - unit: instance - monitoring: True - name: ansible-group-tal - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-8f4b8fe9 - block_device_mappings: - - device_name: '/dev/xvda' - ebs: - volume_size: 60 - volume_type: gp2 - - device_name: '/dev/xvdb' - ebs: - volume_size: 120 - volume_type: gp2 - spot_instance_types: - - c3.large - do_not_update: - - image_id - wait_for_instances: True - wait_timeout: 600 - register: result - - - name: Store private ips to file - ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips - with_items: "{{ result.instances }}" - - ansible.builtin.debug: var=result - -# In this example we have set up block device mapping with ephemeral devices - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - block_device_mappings: - - device_name: '/dev/xvda' - virtual_name: ephemeral0 - - device_name: '/dev/xvdb/' - virtual_name: ephemeral1 - monitoring: True - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target - register: result - - ansible.builtin.debug: var=result - -# In this example we create a basic group configuration with a network interface defined. -# Each network interface must have a device index - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - network_interfaces: - - associate_public_ip_address: true - device_index: 0 - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - monitoring: True - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target - register: result - - ansible.builtin.debug: var=result - - -# In this example we create a basic group configuration with a target tracking scaling policy defined - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - account_id: act-92d45673 - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-79da021e - image_id: ami-f173cc91 - fallback_to_od: true - tags: - - Creator: ValueOfCreatorTag - - Environment: ValueOfEnvironmentTag - key_pair: spotinst-labs-oregon - max_size: 10 - min_size: 0 - target: 2 - unit: instance - monitoring: True - name: ansible-group-1 - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-46cdc13d - spot_instance_types: - - c3.large - target_tracking_policies: - - policy_name: target-tracking-1 - namespace: AWS/EC2 - metric_name: CPUUtilization - statistic: average - unit: percent - target: 50 - cooldown: 120 - do_not_update: - - image_id - register: result - - ansible.builtin.debug: var=result -''' - -RETURN = ''' ---- -instances: - description: List of active elastigroup instances and their details. - returned: success - type: dict - sample: [ - { - "spotInstanceRequestId": "sir-regs25zp", - "instanceId": "i-09640ad8678234c", - "instanceType": "m4.large", - "product": "Linux/UNIX", - "availabilityZone": "us-west-2b", - "privateIp": "180.0.2.244", - "createdAt": "2017-07-17T12:46:18.000Z", - "status": "fulfilled" - } - ] -group_id: - description: Created / Updated group's ID. - returned: success - type: str - sample: "sig-12345" - -''' - -HAS_SPOTINST_SDK = False -__metaclass__ = type - -import os -import time -from ansible.module_utils.basic import AnsibleModule - -try: - import spotinst_sdk as spotinst - from spotinst_sdk import SpotinstClientException - - HAS_SPOTINST_SDK = True - -except ImportError: - pass - -eni_fields = ('description', - 'device_index', - 'secondary_private_ip_address_count', - 'associate_public_ip_address', - 'delete_on_termination', - 'groups', - 'network_interface_id', - 'private_ip_address', - 'subnet_id', - 'associate_ipv6_address') - -private_ip_fields = ('private_ip_address', - 'primary') - -capacity_fields = (dict(ansible_field_name='min_size', - spotinst_field_name='minimum'), - dict(ansible_field_name='max_size', - spotinst_field_name='maximum'), - 'target', - 'unit') - -lspec_fields = ('user_data', - 'key_pair', - 'tenancy', - 'shutdown_script', - 'monitoring', - 'ebs_optimized', - 'image_id', - 'health_check_type', - 'health_check_grace_period', - 'health_check_unhealthy_duration_before_replacement', - 'security_group_ids') - -iam_fields = (dict(ansible_field_name='iam_role_name', - spotinst_field_name='name'), - dict(ansible_field_name='iam_role_arn', - spotinst_field_name='arn')) - -scheduled_task_fields = ('adjustment', - 'adjustment_percentage', - 'batch_size_percentage', - 'cron_expression', - 'frequency', - 'grace_period', - 'task_type', - 'is_enabled', - 'scale_target_capacity', - 'scale_min_capacity', - 'scale_max_capacity') - -scaling_policy_fields = ('policy_name', - 'namespace', - 'metric_name', - 'dimensions', - 'statistic', - 'evaluation_periods', - 'period', - 'threshold', - 'cooldown', - 'unit', - 'operator') - -tracking_policy_fields = ('policy_name', - 'namespace', - 'source', - 'metric_name', - 'statistic', - 'unit', - 'cooldown', - 'target', - 'threshold') - -action_fields = (dict(ansible_field_name='action_type', - spotinst_field_name='type'), - 'adjustment', - 'min_target_capacity', - 'max_target_capacity', - 'target', - 'minimum', - 'maximum') - -signal_fields = ('name', - 'timeout') - -multai_lb_fields = ('balancer_id', - 'project_id', - 'target_set_id', - 'az_awareness', - 'auto_weight') - -persistence_fields = ('should_persist_root_device', - 'should_persist_block_devices', - 'should_persist_private_ip') - -strategy_fields = ('risk', - 'utilize_reserved_instances', - 'fallback_to_od', - 'on_demand_count', - 'availability_vs_cost', - 'draining_timeout', - 'spin_up_time', - 'lifetime_period') - -ebs_fields = ('delete_on_termination', - 'encrypted', - 'iops', - 'snapshot_id', - 'volume_type', - 'volume_size') - -bdm_fields = ('device_name', - 'virtual_name', - 'no_device') - -kubernetes_fields = ('api_server', - 'token') - -right_scale_fields = ('account_id', - 'refresh_token') - -rancher_fields = ('access_key', - 'secret_key', - 'master_host', - 'version') - -chef_fields = ('chef_server', - 'organization', - 'user', - 'pem_key', - 'chef_version') - -az_fields = ('name', - 'subnet_id', - 'placement_group_name') - -opsworks_fields = ('layer_id',) - -scaling_strategy_fields = ('terminate_at_end_of_billing_hour',) - -mesosphere_fields = ('api_server',) - -ecs_fields = ('cluster_name',) - -multai_fields = ('multai_token',) - - -def handle_elastigroup(client, module): - has_changed = False - group_id = None - message = 'None' - - name = module.params.get('name') - state = module.params.get('state') - uniqueness_by = module.params.get('uniqueness_by') - external_group_id = module.params.get('id') - - if uniqueness_by == 'id': - if external_group_id is None: - should_create = True - else: - should_create = False - group_id = external_group_id - else: - groups = client.get_elastigroups() - should_create, group_id = find_group_with_same_name(groups, name) - - if should_create is True: - if state == 'present': - eg = expand_elastigroup(module, is_update=False) - module.debug(str(" [INFO] " + message + "\n")) - group = client.create_elastigroup(group=eg) - group_id = group['id'] - message = 'Created group Successfully.' - has_changed = True - - elif state == 'absent': - message = 'Cannot delete non-existent group.' - has_changed = False - else: - eg = expand_elastigroup(module, is_update=True) - - if state == 'present': - group = client.update_elastigroup(group_update=eg, group_id=group_id) - message = 'Updated group successfully.' - - try: - roll_config = module.params.get('roll_config') - if roll_config: - eg_roll = spotinst.aws_elastigroup.Roll( - batch_size_percentage=roll_config.get('batch_size_percentage'), - grace_period=roll_config.get('grace_period'), - health_check_type=roll_config.get('health_check_type') - ) - roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id) - message = 'Updated and started rolling the group successfully.' - - except SpotinstClientException as exc: - message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc) - has_changed = True - - elif state == 'absent': - try: - client.delete_elastigroup(group_id=group_id) - except SpotinstClientException as exc: - if "GROUP_DOESNT_EXIST" in exc.message: - pass - else: - module.fail_json(msg="Error while attempting to delete group : " + exc.message) - - message = 'Deleted group successfully.' - has_changed = True - - return group_id, message, has_changed - - -def retrieve_group_instances(client, module, group_id): - wait_timeout = module.params.get('wait_timeout') - wait_for_instances = module.params.get('wait_for_instances') - - health_check_type = module.params.get('health_check_type') - - if wait_timeout is None: - wait_timeout = 300 - - wait_timeout = time.time() + wait_timeout - target = module.params.get('target') - state = module.params.get('state') - instances = list() - - if state == 'present' and group_id is not None and wait_for_instances is True: - - is_amount_fulfilled = False - while is_amount_fulfilled is False and wait_timeout > time.time(): - instances = list() - amount_of_fulfilled_instances = 0 - - if health_check_type is not None: - healthy_instances = client.get_instance_healthiness(group_id=group_id) - - for healthy_instance in healthy_instances: - if healthy_instance.get('healthStatus') == 'HEALTHY': - amount_of_fulfilled_instances += 1 - instances.append(healthy_instance) - - else: - active_instances = client.get_elastigroup_active_instances(group_id=group_id) - - for active_instance in active_instances: - if active_instance.get('private_ip') is not None: - amount_of_fulfilled_instances += 1 - instances.append(active_instance) - - if amount_of_fulfilled_instances >= target: - is_amount_fulfilled = True - - time.sleep(10) - - return instances - - -def find_group_with_same_name(groups, name): - for group in groups: - if group['name'] == name: - return False, group.get('id') - - return True, None - - -def expand_elastigroup(module, is_update): - do_not_update = module.params['do_not_update'] - name = module.params.get('name') - - eg = spotinst.aws_elastigroup.Elastigroup() - description = module.params.get('description') - - if name is not None: - eg.name = name - if description is not None: - eg.description = description - - # Capacity - expand_capacity(eg, module, is_update, do_not_update) - # Strategy - expand_strategy(eg, module) - # Scaling - expand_scaling(eg, module) - # Third party integrations - expand_integrations(eg, module) - # Compute - expand_compute(eg, module, is_update, do_not_update) - # Multai - expand_multai(eg, module) - # Scheduling - expand_scheduled_tasks(eg, module) - - return eg - - -def expand_compute(eg, module, is_update, do_not_update): - elastic_ips = module.params['elastic_ips'] - on_demand_instance_type = module.params.get('on_demand_instance_type') - spot_instance_types = module.params['spot_instance_types'] - ebs_volume_pool = module.params['ebs_volume_pool'] - availability_zones_list = module.params['availability_zones'] - product = module.params.get('product') - - eg_compute = spotinst.aws_elastigroup.Compute() - - if product is not None: - # Only put product on group creation - if is_update is not True: - eg_compute.product = product - - if elastic_ips is not None: - eg_compute.elastic_ips = elastic_ips - - if on_demand_instance_type or spot_instance_types is not None: - eg_instance_types = spotinst.aws_elastigroup.InstanceTypes() - - if on_demand_instance_type is not None: - eg_instance_types.spot = spot_instance_types - if spot_instance_types is not None: - eg_instance_types.ondemand = on_demand_instance_type - - if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None: - eg_compute.instance_types = eg_instance_types - - expand_ebs_volume_pool(eg_compute, ebs_volume_pool) - - eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone') - - expand_launch_spec(eg_compute, module, is_update, do_not_update) - - eg.compute = eg_compute - - -def expand_ebs_volume_pool(eg_compute, ebs_volumes_list): - if ebs_volumes_list is not None: - eg_volumes = [] - - for volume in ebs_volumes_list: - eg_volume = spotinst.aws_elastigroup.EbsVolume() - - if volume.get('device_name') is not None: - eg_volume.device_name = volume.get('device_name') - if volume.get('volume_ids') is not None: - eg_volume.volume_ids = volume.get('volume_ids') - - if eg_volume.device_name is not None: - eg_volumes.append(eg_volume) - - if len(eg_volumes) > 0: - eg_compute.ebs_volume_pool = eg_volumes - - -def expand_launch_spec(eg_compute, module, is_update, do_not_update): - eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification') - - if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None: - eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole') - - tags = module.params['tags'] - load_balancers = module.params['load_balancers'] - target_group_arns = module.params['target_group_arns'] - block_device_mappings = module.params['block_device_mappings'] - network_interfaces = module.params['network_interfaces'] - - if is_update is True: - if 'image_id' in do_not_update: - delattr(eg_launch_spec, 'image_id') - - expand_tags(eg_launch_spec, tags) - - expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns) - - expand_block_device_mappings(eg_launch_spec, block_device_mappings) - - expand_network_interfaces(eg_launch_spec, network_interfaces) - - eg_compute.launch_specification = eg_launch_spec - - -def expand_integrations(eg, module): - rancher = module.params.get('rancher') - mesosphere = module.params.get('mesosphere') - ecs = module.params.get('ecs') - kubernetes = module.params.get('kubernetes') - right_scale = module.params.get('right_scale') - opsworks = module.params.get('opsworks') - chef = module.params.get('chef') - - integration_exists = False - - eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations() - - if mesosphere is not None: - eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere') - integration_exists = True - - if ecs is not None: - eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration') - integration_exists = True - - if kubernetes is not None: - eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration') - integration_exists = True - - if right_scale is not None: - eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration') - integration_exists = True - - if opsworks is not None: - eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration') - integration_exists = True - - if rancher is not None: - eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher') - integration_exists = True - - if chef is not None: - eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration') - integration_exists = True - - if integration_exists: - eg.third_parties_integration = eg_integrations - - -def expand_capacity(eg, module, is_update, do_not_update): - eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity') - - if is_update is True: - delattr(eg_capacity, 'unit') - - if 'target' in do_not_update: - delattr(eg_capacity, 'target') - - eg.capacity = eg_capacity - - -def expand_strategy(eg, module): - persistence = module.params.get('persistence') - signals = module.params.get('signals') - - eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy') - - terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour') - - if terminate_at_end_of_billing_hour is not None: - eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields, - module.params, 'ScalingStrategy') - - if persistence is not None: - eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence') - - if signals is not None: - eg_signals = expand_list(signals, signal_fields, 'Signal') - - if len(eg_signals) > 0: - eg_strategy.signals = eg_signals - - eg.strategy = eg_strategy - - -def expand_multai(eg, module): - multai_load_balancers = module.params.get('multai_load_balancers') - - eg_multai = expand_fields(multai_fields, module.params, 'Multai') - - if multai_load_balancers is not None: - eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer') - - if len(eg_multai_load_balancers) > 0: - eg_multai.balancers = eg_multai_load_balancers - eg.multai = eg_multai - - -def expand_scheduled_tasks(eg, module): - scheduled_tasks = module.params.get('scheduled_tasks') - - if scheduled_tasks is not None: - eg_scheduling = spotinst.aws_elastigroup.Scheduling() - - eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask') - - if len(eg_tasks) > 0: - eg_scheduling.tasks = eg_tasks - eg.scheduling = eg_scheduling - - -def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns): - if load_balancers is not None or target_group_arns is not None: - eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig() - eg_total_lbs = [] - - if load_balancers is not None: - for elb_name in load_balancers: - eg_elb = spotinst.aws_elastigroup.LoadBalancer() - if elb_name is not None: - eg_elb.name = elb_name - eg_elb.type = 'CLASSIC' - eg_total_lbs.append(eg_elb) - - if target_group_arns is not None: - for target_arn in target_group_arns: - eg_elb = spotinst.aws_elastigroup.LoadBalancer() - if target_arn is not None: - eg_elb.arn = target_arn - eg_elb.type = 'TARGET_GROUP' - eg_total_lbs.append(eg_elb) - - if len(eg_total_lbs) > 0: - eg_load_balancers_config.load_balancers = eg_total_lbs - eg_launchspec.load_balancers_config = eg_load_balancers_config - - -def expand_tags(eg_launchspec, tags): - if tags is not None: - eg_tags = [] - - for tag in tags: - eg_tag = spotinst.aws_elastigroup.Tag() - if tag: - eg_tag.tag_key, eg_tag.tag_value = list(tag.items())[0] - - eg_tags.append(eg_tag) - - if len(eg_tags) > 0: - eg_launchspec.tags = eg_tags - - -def expand_block_device_mappings(eg_launchspec, bdms): - if bdms is not None: - eg_bdms = [] - - for bdm in bdms: - eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping') - - if bdm.get('ebs') is not None: - eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS') - - eg_bdms.append(eg_bdm) - - if len(eg_bdms) > 0: - eg_launchspec.block_device_mappings = eg_bdms - - -def expand_network_interfaces(eg_launchspec, enis): - if enis is not None: - eg_enis = [] - - for eni in enis: - eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface') - - eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress') - - if eg_pias is not None: - eg_eni.private_ip_addresses = eg_pias - - eg_enis.append(eg_eni) - - if len(eg_enis) > 0: - eg_launchspec.network_interfaces = eg_enis - - -def expand_scaling(eg, module): - up_scaling_policies = module.params['up_scaling_policies'] - down_scaling_policies = module.params['down_scaling_policies'] - target_tracking_policies = module.params['target_tracking_policies'] - - eg_scaling = spotinst.aws_elastigroup.Scaling() - - if up_scaling_policies is not None: - eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies) - if len(eg_up_scaling_policies) > 0: - eg_scaling.up = eg_up_scaling_policies - - if down_scaling_policies is not None: - eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies) - if len(eg_down_scaling_policies) > 0: - eg_scaling.down = eg_down_scaling_policies - - if target_tracking_policies is not None: - eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies) - if len(eg_target_tracking_policies) > 0: - eg_scaling.target = eg_target_tracking_policies - - if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None: - eg.scaling = eg_scaling - - -def expand_list(items, fields, class_name): - if items is not None: - new_objects_list = [] - for item in items: - new_obj = expand_fields(fields, item, class_name) - new_objects_list.append(new_obj) - - return new_objects_list - - -def expand_fields(fields, item, class_name): - class_ = getattr(spotinst.aws_elastigroup, class_name) - new_obj = class_() - - # Handle primitive fields - if item is not None: - for field in fields: - if isinstance(field, dict): - ansible_field_name = field['ansible_field_name'] - spotinst_field_name = field['spotinst_field_name'] - else: - ansible_field_name = field - spotinst_field_name = field - if item.get(ansible_field_name) is not None: - setattr(new_obj, spotinst_field_name, item.get(ansible_field_name)) - - return new_obj - - -def expand_scaling_policies(scaling_policies): - eg_scaling_policies = [] - - for policy in scaling_policies: - eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy') - eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction') - eg_scaling_policies.append(eg_policy) - - return eg_scaling_policies - - -def expand_target_tracking_policies(tracking_policies): - eg_tracking_policies = [] - - for policy in tracking_policies: - eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy') - eg_tracking_policies.append(eg_policy) - - return eg_tracking_policies - - -def main(): - fields = dict( - account_id=dict(type='str'), - availability_vs_cost=dict(type='str', required=True), - availability_zones=dict(type='list', elements='dict', required=True), - block_device_mappings=dict(type='list', elements='dict'), - chef=dict(type='dict'), - credentials_path=dict(type='path', default="~/.spotinst/credentials"), - do_not_update=dict(default=[], type='list'), - down_scaling_policies=dict(type='list', elements='dict'), - draining_timeout=dict(type='int'), - ebs_optimized=dict(type='bool'), - ebs_volume_pool=dict(type='list', elements='dict'), - ecs=dict(type='dict'), - elastic_beanstalk=dict(type='dict'), - elastic_ips=dict(type='list', elements='str'), - fallback_to_od=dict(type='bool'), - id=dict(type='str'), - health_check_grace_period=dict(type='int'), - health_check_type=dict(type='str'), - health_check_unhealthy_duration_before_replacement=dict(type='int'), - iam_role_arn=dict(type='str'), - iam_role_name=dict(type='str'), - image_id=dict(type='str', required=True), - key_pair=dict(type='str', no_log=False), - kubernetes=dict(type='dict'), - lifetime_period=dict(type='int'), - load_balancers=dict(type='list', elements='str'), - max_size=dict(type='int', required=True), - mesosphere=dict(type='dict'), - min_size=dict(type='int', required=True), - monitoring=dict(type='str'), - multai_load_balancers=dict(type='list'), - multai_token=dict(type='str', no_log=True), - name=dict(type='str', required=True), - network_interfaces=dict(type='list', elements='dict'), - on_demand_count=dict(type='int'), - on_demand_instance_type=dict(type='str'), - opsworks=dict(type='dict'), - persistence=dict(type='dict'), - product=dict(type='str', required=True), - rancher=dict(type='dict'), - right_scale=dict(type='dict'), - risk=dict(type='int'), - roll_config=dict(type='dict'), - scheduled_tasks=dict(type='list', elements='dict'), - security_group_ids=dict(type='list', elements='str', required=True), - shutdown_script=dict(type='str'), - signals=dict(type='list', elements='dict'), - spin_up_time=dict(type='int'), - spot_instance_types=dict(type='list', elements='str', required=True), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='list', elements='dict'), - target=dict(type='int', required=True), - target_group_arns=dict(type='list', elements='str'), - tenancy=dict(type='str'), - terminate_at_end_of_billing_hour=dict(type='bool'), - token=dict(type='str', no_log=True), - unit=dict(type='str'), - user_data=dict(type='str'), - utilize_reserved_instances=dict(type='bool'), - uniqueness_by=dict(default='name', choices=['name', 'id']), - up_scaling_policies=dict(type='list', elements='dict'), - target_tracking_policies=dict(type='list', elements='dict'), - wait_for_instances=dict(type='bool', default=False), - wait_timeout=dict(type='int') - ) - - module = AnsibleModule(argument_spec=fields) - - if not HAS_SPOTINST_SDK: - module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)") - - # Retrieve creds file variables - creds_file_loaded_vars = dict() - - credentials_path = module.params.get('credentials_path') - - try: - with open(credentials_path, "r") as creds: - for line in creds: - eq_index = line.find('=') - var_name = line[:eq_index].strip() - string_value = line[eq_index + 1:].strip() - creds_file_loaded_vars[var_name] = string_value - except IOError: - pass - # End of creds file retrieval - - token = module.params.get('token') - if not token: - token = os.environ.get('SPOTINST_TOKEN') - if not token: - token = creds_file_loaded_vars.get("token") - - account = module.params.get('account_id') - if not account: - account = os.environ.get('SPOTINST_ACCOUNT_ID') or os.environ.get('ACCOUNT') - if not account: - account = creds_file_loaded_vars.get("account") - - client = spotinst.SpotinstClient(auth_token=token, print_output=False) - - if account is not None: - client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account) - - group_id, message, has_changed = handle_elastigroup(client=client, module=module) - - instances = retrieve_group_instances(client=client, module=module, group_id=group_id) - - module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/univention/udm_dns_record.py b/plugins/modules/cloud/univention/udm_dns_record.py deleted file mode 100644 index 4e7aa70b32..0000000000 --- a/plugins/modules/cloud/univention/udm_dns_record.py +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Adfinis SyGroup AG -# Tobias Rueetschi -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: udm_dns_record -author: -- Tobias Rüetschi (@keachi) -short_description: Manage dns entries on a univention corporate server -description: - - "This module allows to manage dns records on a univention corporate server (UCS). - It uses the python API of the UCS to create a new object or edit it." -requirements: - - Python >= 2.6 - - Univention - - ipaddress (for I(type=ptr_record)) -options: - state: - type: str - default: "present" - choices: [ present, absent ] - description: - - Whether the dns record is present or not. - name: - type: str - required: true - description: - - "Name of the record, this is also the DNS record. E.g. www for - www.example.com." - - For PTR records this has to be the IP address. - zone: - type: str - required: true - description: - - Corresponding DNS zone for this record, e.g. example.com. - - For PTR records this has to be the full reverse zone (for example C(1.1.192.in-addr.arpa)). - type: - type: str - required: true - description: - - "Define the record type. C(host_record) is a A or AAAA record, - C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record) - is a SRV record and C(txt_record) is a TXT record." - - "The available choices are: C(host_record), C(alias), C(ptr_record), C(srv_record), C(txt_record)." - data: - type: dict - default: {} - description: - - "Additional data for this record, e.g. ['a': '192.0.2.1']. - Required if C(state=present)." -''' - - -EXAMPLES = ''' -- name: Create a DNS record on a UCS - community.general.udm_dns_record: - name: www - zone: example.com - type: host_record - data: - a: - - 192.0.2.1 - - 2001:0db8::42 - -- name: Create a DNS v4 PTR record on a UCS - community.general.udm_dns_record: - name: 192.0.2.1 - zone: 2.0.192.in-addr.arpa - type: ptr_record - data: - ptr_record: "www.example.com." - -- name: Create a DNS v6 PTR record on a UCS - community.general.udm_dns_record: - name: 2001:db8:0:0:0:ff00:42:8329 - zone: 2.4.0.0.0.0.f.f.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa - type: ptr_record - data: - ptr_record: "www.example.com." -''' - - -RETURN = '''#''' - -HAVE_UNIVENTION = False -HAVE_IPADDRESS = False -try: - from univention.admin.handlers.dns import ( - forward_zone, - reverse_zone, - ) - HAVE_UNIVENTION = True -except ImportError: - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.basic import missing_required_lib -from ansible_collections.community.general.plugins.module_utils.univention_umc import ( - umc_module_for_add, - umc_module_for_edit, - ldap_search, - base_dn, - config, - uldap, -) -try: - import ipaddress - HAVE_IPADDRESS = True -except ImportError: - pass - - -def main(): - module = AnsibleModule( - argument_spec=dict( - type=dict(required=True, - type='str'), - zone=dict(required=True, - type='str'), - name=dict(required=True, - type='str'), - data=dict(default={}, - type='dict'), - state=dict(default='present', - choices=['present', 'absent'], - type='str') - ), - supports_check_mode=True, - required_if=([ - ('state', 'present', ['data']) - ]) - ) - - if not HAVE_UNIVENTION: - module.fail_json(msg="This module requires univention python bindings") - - type = module.params['type'] - zone = module.params['zone'] - name = module.params['name'] - data = module.params['data'] - state = module.params['state'] - changed = False - diff = None - - workname = name - if type == 'ptr_record': - if not HAVE_IPADDRESS: - module.fail_json(msg=missing_required_lib('ipaddress')) - try: - if 'arpa' not in zone: - raise Exception("Zone must be reversed zone for ptr_record. (e.g. 1.1.192.in-addr.arpa)") - ipaddr_rev = ipaddress.ip_address(name).reverse_pointer - subnet_offset = ipaddr_rev.find(zone) - if subnet_offset == -1: - raise Exception("reversed IP address {0} is not part of zone.".format(ipaddr_rev)) - workname = ipaddr_rev[0:subnet_offset - 1] - except Exception as e: - module.fail_json( - msg='handling PTR record for {0} in zone {1} failed: {2}'.format(name, zone, e) - ) - - obj = list(ldap_search( - '(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, workname), - attr=['dNSZone'] - )) - exists = bool(len(obj)) - container = 'zoneName={0},cn=dns,{1}'.format(zone, base_dn()) - dn = 'relativeDomainName={0},{1}'.format(workname, container) - - if state == 'present': - try: - if not exists: - so = forward_zone.lookup( - config(), - uldap(), - '(zone={0})'.format(zone), - scope='domain', - ) or reverse_zone.lookup( - config(), - uldap(), - '(zoneName={0})'.format(zone), - scope='domain', - ) - if len(so) == 0: - raise Exception("Did not find zone '{0}' in Univention".format(zone)) - obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0]) - else: - obj = umc_module_for_edit('dns/{0}'.format(type), dn) - - if type == 'ptr_record': - obj['ip'] = name - obj['address'] = workname - else: - obj['name'] = name - - for k, v in data.items(): - obj[k] = v - diff = obj.diff() - changed = obj.diff() != [] - if not module.check_mode: - if not exists: - obj.create() - else: - obj.modify() - except Exception as e: - module.fail_json( - msg='Creating/editing dns entry {0} in {1} failed: {2}'.format(name, container, e) - ) - - if state == 'absent' and exists: - try: - obj = umc_module_for_edit('dns/{0}'.format(type), dn) - if not module.check_mode: - obj.remove() - changed = True - except Exception as e: - module.fail_json( - msg='Removing dns entry {0} in {1} failed: {2}'.format(name, container, e) - ) - - module.exit_json( - changed=changed, - name=name, - diff=diff, - container=container - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/univention/udm_dns_zone.py b/plugins/modules/cloud/univention/udm_dns_zone.py deleted file mode 100644 index f1cea87e4f..0000000000 --- a/plugins/modules/cloud/univention/udm_dns_zone.py +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Adfinis SyGroup AG -# Tobias Rueetschi -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: udm_dns_zone -author: -- Tobias Rüetschi (@keachi) -short_description: Manage dns zones on a univention corporate server -description: - - "This module allows to manage dns zones on a univention corporate server (UCS). - It uses the python API of the UCS to create a new object or edit it." -requirements: - - Python >= 2.6 -options: - state: - type: str - default: "present" - choices: [ present, absent ] - description: - - Whether the dns zone is present or not. - type: - type: str - required: true - description: - - Define if the zone is a forward or reverse DNS zone. - - "The available choices are: C(forward_zone), C(reverse_zone)." - zone: - type: str - required: true - description: - - DNS zone name, e.g. C(example.com). - aliases: [name] - nameserver: - type: list - elements: str - description: - - List of appropriate name servers. Required if C(state=present). - interfaces: - type: list - elements: str - description: - - List of interface IP addresses, on which the server should - response this zone. Required if C(state=present). - - refresh: - type: int - default: 3600 - description: - - Interval before the zone should be refreshed. - retry: - type: int - default: 1800 - description: - - Interval that should elapse before a failed refresh should be retried. - expire: - type: int - default: 604800 - description: - - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative. - ttl: - type: int - default: 600 - description: - - Minimum TTL field that should be exported with any RR from this zone. - - contact: - type: str - default: '' - description: - - Contact person in the SOA record. - mx: - type: list - elements: str - default: [] - description: - - List of MX servers. (Must declared as A or AAAA records). -''' - - -EXAMPLES = ''' -- name: Create a DNS zone on a UCS - community.general.udm_dns_zone: - zone: example.com - type: forward_zone - nameserver: - - ucs.example.com - interfaces: - - 192.0.2.1 -''' - - -RETURN = '''# ''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.univention_umc import ( - umc_module_for_add, - umc_module_for_edit, - ldap_search, - base_dn, -) - - -def convert_time(time): - """Convert a time in seconds into the biggest unit""" - units = [ - (24 * 60 * 60, 'days'), - (60 * 60, 'hours'), - (60, 'minutes'), - (1, 'seconds'), - ] - - if time == 0: - return ('0', 'seconds') - for unit in units: - if time >= unit[0]: - return ('{0}'.format(time // unit[0]), unit[1]) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - type=dict(required=True, - type='str'), - zone=dict(required=True, - aliases=['name'], - type='str'), - nameserver=dict(default=[], - type='list', - elements='str'), - interfaces=dict(default=[], - type='list', - elements='str'), - refresh=dict(default=3600, - type='int'), - retry=dict(default=1800, - type='int'), - expire=dict(default=604800, - type='int'), - ttl=dict(default=600, - type='int'), - contact=dict(default='', - type='str'), - mx=dict(default=[], - type='list', - elements='str'), - state=dict(default='present', - choices=['present', 'absent'], - type='str') - ), - supports_check_mode=True, - required_if=([ - ('state', 'present', ['nameserver', 'interfaces']) - ]) - ) - type = module.params['type'] - zone = module.params['zone'] - nameserver = module.params['nameserver'] - interfaces = module.params['interfaces'] - refresh = module.params['refresh'] - retry = module.params['retry'] - expire = module.params['expire'] - ttl = module.params['ttl'] - contact = module.params['contact'] - mx = module.params['mx'] - state = module.params['state'] - changed = False - diff = None - - obj = list(ldap_search( - '(&(objectClass=dNSZone)(zoneName={0}))'.format(zone), - attr=['dNSZone'] - )) - - exists = bool(len(obj)) - container = 'cn=dns,{0}'.format(base_dn()) - dn = 'zoneName={0},{1}'.format(zone, container) - if contact == '': - contact = 'root@{0}.'.format(zone) - - if state == 'present': - try: - if not exists: - obj = umc_module_for_add('dns/{0}'.format(type), container) - else: - obj = umc_module_for_edit('dns/{0}'.format(type), dn) - obj['zone'] = zone - obj['nameserver'] = nameserver - obj['a'] = interfaces - obj['refresh'] = convert_time(refresh) - obj['retry'] = convert_time(retry) - obj['expire'] = convert_time(expire) - obj['ttl'] = convert_time(ttl) - obj['contact'] = contact - obj['mx'] = mx - diff = obj.diff() - if exists: - for k in obj.keys(): - if obj.hasChanged(k): - changed = True - else: - changed = True - if not module.check_mode: - if not exists: - obj.create() - elif changed: - obj.modify() - except Exception as e: - module.fail_json( - msg='Creating/editing dns zone {0} failed: {1}'.format(zone, e) - ) - - if state == 'absent' and exists: - try: - obj = umc_module_for_edit('dns/{0}'.format(type), dn) - if not module.check_mode: - obj.remove() - changed = True - except Exception as e: - module.fail_json( - msg='Removing dns zone {0} failed: {1}'.format(zone, e) - ) - - module.exit_json( - changed=changed, - diff=diff, - zone=zone - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/univention/udm_group.py b/plugins/modules/cloud/univention/udm_group.py deleted file mode 100644 index d20187c628..0000000000 --- a/plugins/modules/cloud/univention/udm_group.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Adfinis SyGroup AG -# Tobias Rueetschi -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: udm_group -author: -- Tobias Rüetschi (@keachi) -short_description: Manage of the posix group -description: - - "This module allows to manage user groups on a univention corporate server (UCS). - It uses the python API of the UCS to create a new object or edit it." -requirements: - - Python >= 2.6 -options: - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the group is present or not. - type: str - name: - required: true - description: - - Name of the posix group. - type: str - description: - required: false - description: - - Group description. - type: str - position: - required: false - description: - - define the whole ldap position of the group, e.g. - C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com). - type: str - ou: - required: false - description: - - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com). - type: str - subpath: - required: false - description: - - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups). - type: str - default: "cn=groups" -''' - - -EXAMPLES = ''' -- name: Create a POSIX group - community.general.udm_group: - name: g123m-1A - -# Create a POSIX group with the exact DN -# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com) -- name: Create a POSIX group with a DN - community.general.udm_group: - name: g123m-1A - subpath: 'cn=classes,cn=students,cn=groups' - ou: school - -# or -- name: Create a POSIX group with a DN - community.general.udm_group: - name: g123m-1A - position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com' -''' - - -RETURN = '''# ''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.univention_umc import ( - umc_module_for_add, - umc_module_for_edit, - ldap_search, - base_dn, -) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True, - type='str'), - description=dict(default=None, - type='str'), - position=dict(default='', - type='str'), - ou=dict(default='', - type='str'), - subpath=dict(default='cn=groups', - type='str'), - state=dict(default='present', - choices=['present', 'absent'], - type='str') - ), - supports_check_mode=True - ) - name = module.params['name'] - description = module.params['description'] - position = module.params['position'] - ou = module.params['ou'] - subpath = module.params['subpath'] - state = module.params['state'] - changed = False - diff = None - - groups = list(ldap_search( - '(&(objectClass=posixGroup)(cn={0}))'.format(name), - attr=['cn'] - )) - if position != '': - container = position - else: - if ou != '': - ou = 'ou={0},'.format(ou) - if subpath != '': - subpath = '{0},'.format(subpath) - container = '{0}{1}{2}'.format(subpath, ou, base_dn()) - group_dn = 'cn={0},{1}'.format(name, container) - - exists = bool(len(groups)) - - if state == 'present': - try: - if not exists: - grp = umc_module_for_add('groups/group', container) - else: - grp = umc_module_for_edit('groups/group', group_dn) - grp['name'] = name - grp['description'] = description - diff = grp.diff() - changed = grp.diff() != [] - if not module.check_mode: - if not exists: - grp.create() - else: - grp.modify() - except Exception: - module.fail_json( - msg="Creating/editing group {0} in {1} failed".format(name, container) - ) - - if state == 'absent' and exists: - try: - grp = umc_module_for_edit('groups/group', group_dn) - if not module.check_mode: - grp.remove() - changed = True - except Exception: - module.fail_json( - msg="Removing group {0} failed".format(name) - ) - - module.exit_json( - changed=changed, - name=name, - diff=diff, - container=container - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/univention/udm_share.py b/plugins/modules/cloud/univention/udm_share.py deleted file mode 100644 index fb86d83666..0000000000 --- a/plugins/modules/cloud/univention/udm_share.py +++ /dev/null @@ -1,576 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Adfinis SyGroup AG -# Tobias Rueetschi -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: udm_share -author: -- Tobias Rüetschi (@keachi) -short_description: Manage samba shares on a univention corporate server -description: - - "This module allows to manage samba shares on a univention corporate - server (UCS). - It uses the python API of the UCS to create a new object or edit it." -requirements: - - Python >= 2.6 -options: - state: - default: "present" - choices: [ present, absent ] - description: - - Whether the share is present or not. - type: str - name: - required: true - description: - - Name - type: str - host: - required: false - description: - - Host FQDN (server which provides the share), e.g. C({{ - ansible_fqdn }}). Required if C(state=present). - type: str - path: - required: false - description: - - Directory on the providing server, e.g. C(/home). Required if C(state=present). - type: path - sambaName: - required: false - description: - - Windows name. Required if C(state=present). - type: str - aliases: [ samba_name ] - ou: - required: true - description: - - Organisational unit, inside the LDAP Base DN. - type: str - owner: - default: '0' - description: - - Directory owner of the share's root directory. - type: str - group: - default: '0' - description: - - Directory owner group of the share's root directory. - type: str - directorymode: - default: '00755' - description: - - Permissions for the share's root directory. - type: str - root_squash: - default: true - description: - - Modify user ID for root user (root squashing). - type: bool - subtree_checking: - default: true - description: - - Subtree checking. - type: bool - sync: - default: 'sync' - description: - - NFS synchronisation. - type: str - writeable: - default: true - description: - - NFS write access. - type: bool - sambaBlockSize: - description: - - Blocking size. - type: str - aliases: [ samba_block_size ] - sambaBlockingLocks: - default: true - description: - - Blocking locks. - type: bool - aliases: [ samba_blocking_locks ] - sambaBrowseable: - description: - - Show in Windows network environment. - type: bool - default: True - aliases: [ samba_browsable ] - sambaCreateMode: - default: '0744' - description: - - File mode. - type: str - aliases: [ samba_create_mode ] - sambaCscPolicy: - default: 'manual' - description: - - Client-side caching policy. - type: str - aliases: [ samba_csc_policy ] - sambaCustomSettings: - default: [] - description: - - Option name in smb.conf and its value. - type: list - aliases: [ samba_custom_settings ] - sambaDirectoryMode: - default: '0755' - description: - - Directory mode. - type: str - aliases: [ samba_directory_mode ] - sambaDirectorySecurityMode: - default: '0777' - description: - - Directory security mode. - type: str - aliases: [ samba_directory_security_mode ] - sambaDosFilemode: - default: false - description: - - Users with write access may modify permissions. - type: bool - aliases: [ samba_dos_filemode ] - sambaFakeOplocks: - default: false - description: - - Fake oplocks. - type: bool - aliases: [ samba_fake_oplocks ] - sambaForceCreateMode: - default: false - description: - - Force file mode. - type: bool - aliases: [ samba_force_create_mode ] - sambaForceDirectoryMode: - default: false - description: - - Force directory mode. - type: bool - aliases: [ samba_force_directory_mode ] - sambaForceDirectorySecurityMode: - default: false - description: - - Force directory security mode. - type: bool - aliases: [ samba_force_directory_security_mode ] - sambaForceGroup: - description: - - Force group. - type: str - aliases: [ samba_force_group ] - sambaForceSecurityMode: - default: false - description: - - Force security mode. - type: bool - aliases: [ samba_force_security_mode ] - sambaForceUser: - description: - - Force user. - type: str - aliases: [ samba_force_user ] - sambaHideFiles: - description: - - Hide files. - type: str - aliases: [ samba_hide_files ] - sambaHideUnreadable: - default: false - description: - - Hide unreadable files/directories. - type: bool - aliases: [ samba_hide_unreadable ] - sambaHostsAllow: - default: [] - description: - - Allowed host/network. - type: list - aliases: [ samba_hosts_allow ] - sambaHostsDeny: - default: [] - description: - - Denied host/network. - type: list - aliases: [ samba_hosts_deny ] - sambaInheritAcls: - default: true - description: - - Inherit ACLs. - type: bool - aliases: [ samba_inherit_acls ] - sambaInheritOwner: - default: false - description: - - Create files/directories with the owner of the parent directory. - type: bool - aliases: [ samba_inherit_owner ] - sambaInheritPermissions: - default: false - description: - - Create files/directories with permissions of the parent directory. - type: bool - aliases: [ samba_inherit_permissions ] - sambaInvalidUsers: - description: - - Invalid users or groups. - type: str - aliases: [ samba_invalid_users ] - sambaLevel2Oplocks: - default: true - description: - - Level 2 oplocks. - type: bool - aliases: [ samba_level_2_oplocks ] - sambaLocking: - default: true - description: - - Locking. - type: bool - aliases: [ samba_locking ] - sambaMSDFSRoot: - default: false - description: - - MSDFS root. - type: bool - aliases: [ samba_msdfs_root ] - sambaNtAclSupport: - default: true - description: - - NT ACL support. - type: bool - aliases: [ samba_nt_acl_support ] - sambaOplocks: - default: true - description: - - Oplocks. - type: bool - aliases: [ samba_oplocks ] - sambaPostexec: - description: - - Postexec script. - type: str - aliases: [ samba_postexec ] - sambaPreexec: - description: - - Preexec script. - type: str - aliases: [ samba_preexec ] - sambaPublic: - default: false - description: - - Allow anonymous read-only access with a guest user. - type: bool - aliases: [ samba_public ] - sambaSecurityMode: - default: '0777' - description: - - Security mode. - type: str - aliases: [ samba_security_mode ] - sambaStrictLocking: - default: 'Auto' - description: - - Strict locking. - type: str - aliases: [ samba_strict_locking ] - sambaVFSObjects: - description: - - VFS objects. - type: str - aliases: [ samba_vfs_objects ] - sambaValidUsers: - description: - - Valid users or groups. - type: str - aliases: [ samba_valid_users ] - sambaWriteList: - description: - - Restrict write access to these users/groups. - type: str - aliases: [ samba_write_list ] - sambaWriteable: - default: true - description: - - Samba write access. - type: bool - aliases: [ samba_writeable ] - nfs_hosts: - default: [] - description: - - Only allow access for this host, IP address or network. - type: list - nfsCustomSettings: - default: [] - description: - - Option name in exports file. - type: list - aliases: [ nfs_custom_settings ] -''' - - -EXAMPLES = ''' -- name: Create a share named home on the server ucs.example.com with the path /home - community.general.udm_share: - name: home - path: /home - host: ucs.example.com - sambaName: Home -''' - - -RETURN = '''# ''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.univention_umc import ( - umc_module_for_add, - umc_module_for_edit, - ldap_search, - base_dn, -) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True, - type='str'), - ou=dict(required=True, - type='str'), - owner=dict(type='str', - default='0'), - group=dict(type='str', - default='0'), - path=dict(type='path', - default=None), - directorymode=dict(type='str', - default='00755'), - host=dict(type='str', - default=None), - root_squash=dict(type='bool', - default=True), - subtree_checking=dict(type='bool', - default=True), - sync=dict(type='str', - default='sync'), - writeable=dict(type='bool', - default=True), - sambaBlockSize=dict(type='str', - aliases=['samba_block_size'], - default=None), - sambaBlockingLocks=dict(type='bool', - aliases=['samba_blocking_locks'], - default=True), - sambaBrowseable=dict(type='bool', - aliases=['samba_browsable'], - default=True), - sambaCreateMode=dict(type='str', - aliases=['samba_create_mode'], - default='0744'), - sambaCscPolicy=dict(type='str', - aliases=['samba_csc_policy'], - default='manual'), - sambaCustomSettings=dict(type='list', - aliases=['samba_custom_settings'], - default=[]), - sambaDirectoryMode=dict(type='str', - aliases=['samba_directory_mode'], - default='0755'), - sambaDirectorySecurityMode=dict(type='str', - aliases=['samba_directory_security_mode'], - default='0777'), - sambaDosFilemode=dict(type='bool', - aliases=['samba_dos_filemode'], - default=False), - sambaFakeOplocks=dict(type='bool', - aliases=['samba_fake_oplocks'], - default=False), - sambaForceCreateMode=dict(type='bool', - aliases=['samba_force_create_mode'], - default=False), - sambaForceDirectoryMode=dict(type='bool', - aliases=['samba_force_directory_mode'], - default=False), - sambaForceDirectorySecurityMode=dict(type='bool', - aliases=['samba_force_directory_security_mode'], - default=False), - sambaForceGroup=dict(type='str', - aliases=['samba_force_group'], - default=None), - sambaForceSecurityMode=dict(type='bool', - aliases=['samba_force_security_mode'], - default=False), - sambaForceUser=dict(type='str', - aliases=['samba_force_user'], - default=None), - sambaHideFiles=dict(type='str', - aliases=['samba_hide_files'], - default=None), - sambaHideUnreadable=dict(type='bool', - aliases=['samba_hide_unreadable'], - default=False), - sambaHostsAllow=dict(type='list', - aliases=['samba_hosts_allow'], - default=[]), - sambaHostsDeny=dict(type='list', - aliases=['samba_hosts_deny'], - default=[]), - sambaInheritAcls=dict(type='bool', - aliases=['samba_inherit_acls'], - default=True), - sambaInheritOwner=dict(type='bool', - aliases=['samba_inherit_owner'], - default=False), - sambaInheritPermissions=dict(type='bool', - aliases=['samba_inherit_permissions'], - default=False), - sambaInvalidUsers=dict(type='str', - aliases=['samba_invalid_users'], - default=None), - sambaLevel2Oplocks=dict(type='bool', - aliases=['samba_level_2_oplocks'], - default=True), - sambaLocking=dict(type='bool', - aliases=['samba_locking'], - default=True), - sambaMSDFSRoot=dict(type='bool', - aliases=['samba_msdfs_root'], - default=False), - sambaName=dict(type='str', - aliases=['samba_name'], - default=None), - sambaNtAclSupport=dict(type='bool', - aliases=['samba_nt_acl_support'], - default=True), - sambaOplocks=dict(type='bool', - aliases=['samba_oplocks'], - default=True), - sambaPostexec=dict(type='str', - aliases=['samba_postexec'], - default=None), - sambaPreexec=dict(type='str', - aliases=['samba_preexec'], - default=None), - sambaPublic=dict(type='bool', - aliases=['samba_public'], - default=False), - sambaSecurityMode=dict(type='str', - aliases=['samba_security_mode'], - default='0777'), - sambaStrictLocking=dict(type='str', - aliases=['samba_strict_locking'], - default='Auto'), - sambaVFSObjects=dict(type='str', - aliases=['samba_vfs_objects'], - default=None), - sambaValidUsers=dict(type='str', - aliases=['samba_valid_users'], - default=None), - sambaWriteList=dict(type='str', - aliases=['samba_write_list'], - default=None), - sambaWriteable=dict(type='bool', - aliases=['samba_writeable'], - default=True), - nfs_hosts=dict(type='list', - default=[]), - nfsCustomSettings=dict(type='list', - aliases=['nfs_custom_settings'], - default=[]), - state=dict(default='present', - choices=['present', 'absent'], - type='str') - ), - supports_check_mode=True, - required_if=([ - ('state', 'present', ['path', 'host', 'sambaName']) - ]) - ) - name = module.params['name'] - state = module.params['state'] - changed = False - diff = None - - obj = list(ldap_search( - '(&(objectClass=univentionShare)(cn={0}))'.format(name), - attr=['cn'] - )) - - exists = bool(len(obj)) - container = 'cn=shares,ou={0},{1}'.format(module.params['ou'], base_dn()) - dn = 'cn={0},{1}'.format(name, container) - - if state == 'present': - try: - if not exists: - obj = umc_module_for_add('shares/share', container) - else: - obj = umc_module_for_edit('shares/share', dn) - - module.params['printablename'] = '{0} ({1})'.format(name, module.params['host']) - for k in obj.keys(): - if module.params[k] is True: - module.params[k] = '1' - elif module.params[k] is False: - module.params[k] = '0' - obj[k] = module.params[k] - - diff = obj.diff() - if exists: - for k in obj.keys(): - if obj.hasChanged(k): - changed = True - else: - changed = True - if not module.check_mode: - if not exists: - obj.create() - elif changed: - obj.modify() - except Exception as err: - module.fail_json( - msg='Creating/editing share {0} in {1} failed: {2}'.format( - name, - container, - err, - ) - ) - - if state == 'absent' and exists: - try: - obj = umc_module_for_edit('shares/share', dn) - if not module.check_mode: - obj.remove() - changed = True - except Exception as err: - module.fail_json( - msg='Removing share {0} in {1} failed: {2}'.format( - name, - container, - err, - ) - ) - - module.exit_json( - changed=changed, - name=name, - diff=diff, - container=container - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/univention/udm_user.py b/plugins/modules/cloud/univention/udm_user.py deleted file mode 100644 index b0d6138fda..0000000000 --- a/plugins/modules/cloud/univention/udm_user.py +++ /dev/null @@ -1,542 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Adfinis SyGroup AG -# Tobias Rueetschi -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: udm_user -author: -- Tobias Rüetschi (@keachi) -short_description: Manage posix users on a univention corporate server -description: - - "This module allows to manage posix users on a univention corporate - server (UCS). - It uses the python API of the UCS to create a new object or edit it." -requirements: - - Python >= 2.6 -options: - state: - default: "present" - choices: [ present, absent ] - description: - - Whether the user is present or not. - type: str - username: - required: true - description: - - User name - aliases: ['name'] - type: str - firstname: - description: - - First name. Required if C(state=present). - type: str - lastname: - description: - - Last name. Required if C(state=present). - type: str - password: - description: - - Password. Required if C(state=present). - type: str - birthday: - description: - - Birthday - type: str - city: - description: - - City of users business address. - type: str - country: - description: - - Country of users business address. - type: str - department_number: - description: - - Department number of users business address. - aliases: [ departmentNumber ] - type: str - description: - description: - - Description (not gecos) - type: str - display_name: - description: - - Display name (not gecos) - aliases: [ displayName ] - type: str - email: - default: [''] - description: - - A list of e-mail addresses. - type: list - employee_number: - description: - - Employee number - aliases: [ employeeNumber ] - type: str - employee_type: - description: - - Employee type - aliases: [ employeeType ] - type: str - gecos: - description: - - GECOS - type: str - groups: - default: [] - description: - - "POSIX groups, the LDAP DNs of the groups will be found with the - LDAP filter for each group as $GROUP: - C((&(objectClass=posixGroup)(cn=$GROUP)))." - type: list - home_share: - description: - - "Home NFS share. Must be a LDAP DN, e.g. - C(cn=home,cn=shares,ou=school,dc=example,dc=com)." - aliases: [ homeShare ] - type: str - home_share_path: - description: - - Path to home NFS share, inside the homeShare. - aliases: [ homeSharePath ] - type: str - home_telephone_number: - default: [] - description: - - List of private telephone numbers. - aliases: [ homeTelephoneNumber ] - type: list - homedrive: - description: - - Windows home drive, e.g. C("H:"). - type: str - mail_alternative_address: - default: [] - description: - - List of alternative e-mail addresses. - aliases: [ mailAlternativeAddress ] - type: list - mail_home_server: - description: - - FQDN of mail server - aliases: [ mailHomeServer ] - type: str - mail_primary_address: - description: - - Primary e-mail address - aliases: [ mailPrimaryAddress ] - type: str - mobile_telephone_number: - default: [] - description: - - Mobile phone number - aliases: [ mobileTelephoneNumber ] - type: list - organisation: - description: - - Organisation - aliases: [ organization ] - type: str - overridePWHistory: - type: bool - default: 'no' - description: - - Override password history - aliases: [ override_pw_history ] - overridePWLength: - type: bool - default: 'no' - description: - - Override password check - aliases: [ override_pw_length ] - pager_telephonenumber: - default: [] - description: - - List of pager telephone numbers. - aliases: [ pagerTelephonenumber ] - type: list - phone: - description: - - List of telephone numbers. - type: list - postcode: - description: - - Postal code of users business address. - type: str - primary_group: - description: - - Primary group. This must be the group LDAP DN. - - If not specified, it defaults to C(cn=Domain Users,cn=groups,$LDAP_BASE_DN). - aliases: [ primaryGroup ] - type: str - profilepath: - description: - - Windows profile directory - type: str - pwd_change_next_login: - choices: [ '0', '1' ] - description: - - Change password on next login. - aliases: [ pwdChangeNextLogin ] - type: str - room_number: - description: - - Room number of users business address. - aliases: [ roomNumber ] - type: str - samba_privileges: - description: - - "Samba privilege, like allow printer administration, do domain - join." - aliases: [ sambaPrivileges ] - type: list - samba_user_workstations: - description: - - Allow the authentication only on this Microsoft Windows host. - aliases: [ sambaUserWorkstations ] - type: list - sambahome: - description: - - Windows home path, e.g. C('\\$FQDN\$USERNAME'). - type: str - scriptpath: - description: - - Windows logon script. - type: str - secretary: - default: [] - description: - - A list of superiors as LDAP DNs. - type: list - serviceprovider: - default: [''] - description: - - Enable user for the following service providers. - type: list - shell: - default: '/bin/bash' - description: - - Login shell - type: str - street: - description: - - Street of users business address. - type: str - title: - description: - - Title, e.g. C(Prof.). - type: str - unixhome: - description: - - Unix home directory - - If not specified, it defaults to C(/home/$USERNAME). - type: str - userexpiry: - description: - - Account expiry date, e.g. C(1999-12-31). - - If not specified, it defaults to the current day plus one year. - type: str - position: - default: '' - description: - - "Define the whole position of users object inside the LDAP tree, - e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)." - type: str - update_password: - default: always - choices: [ always, on_create ] - description: - - "C(always) will update passwords if they differ. - C(on_create) will only set the password for newly created users." - type: str - ou: - default: '' - description: - - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for - LDAP OU C(ou=school,dc=example,dc=com)." - type: str - subpath: - default: 'cn=users' - description: - - "LDAP subpath inside the organizational unit, e.g. - C(cn=teachers,cn=users) for LDAP container - C(cn=teachers,cn=users,dc=example,dc=com)." - type: str -''' - - -EXAMPLES = ''' -- name: Create a user on a UCS - community.general.udm_user: - name: FooBar - password: secure_password - firstname: Foo - lastname: Bar - -- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com) - community.general.udm_user: - name: foo - password: secure_password - firstname: Foo - lastname: Bar - ou: school - subpath: 'cn=teachers,cn=users' - -# or define the position -- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com) - community.general.udm_user: - name: foo - password: secure_password - firstname: Foo - lastname: Bar - position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com' -''' - - -RETURN = '''# ''' - -import crypt -from datetime import date, timedelta - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.univention_umc import ( - umc_module_for_add, - umc_module_for_edit, - ldap_search, - base_dn, -) - - -def main(): - expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d") - module = AnsibleModule( - argument_spec=dict( - birthday=dict(type='str'), - city=dict(type='str'), - country=dict(type='str'), - department_number=dict(type='str', - aliases=['departmentNumber']), - description=dict(type='str'), - display_name=dict(type='str', - aliases=['displayName']), - email=dict(default=[''], - type='list'), - employee_number=dict(type='str', - aliases=['employeeNumber']), - employee_type=dict(type='str', - aliases=['employeeType']), - firstname=dict(type='str'), - gecos=dict(type='str'), - groups=dict(default=[], - type='list'), - home_share=dict(type='str', - aliases=['homeShare']), - home_share_path=dict(type='str', - aliases=['homeSharePath']), - home_telephone_number=dict(default=[], - type='list', - aliases=['homeTelephoneNumber']), - homedrive=dict(type='str'), - lastname=dict(type='str'), - mail_alternative_address=dict(default=[], - type='list', - aliases=['mailAlternativeAddress']), - mail_home_server=dict(type='str', - aliases=['mailHomeServer']), - mail_primary_address=dict(type='str', - aliases=['mailPrimaryAddress']), - mobile_telephone_number=dict(default=[], - type='list', - aliases=['mobileTelephoneNumber']), - organisation=dict(type='str', - aliases=['organization']), - overridePWHistory=dict(default=False, - type='bool', - aliases=['override_pw_history']), - overridePWLength=dict(default=False, - type='bool', - aliases=['override_pw_length']), - pager_telephonenumber=dict(default=[], - type='list', - aliases=['pagerTelephonenumber']), - password=dict(type='str', - no_log=True), - phone=dict(default=[], - type='list'), - postcode=dict(type='str'), - primary_group=dict(type='str', - aliases=['primaryGroup']), - profilepath=dict(type='str'), - pwd_change_next_login=dict(type='str', - choices=['0', '1'], - aliases=['pwdChangeNextLogin']), - room_number=dict(type='str', - aliases=['roomNumber']), - samba_privileges=dict(default=[], - type='list', - aliases=['sambaPrivileges']), - samba_user_workstations=dict(default=[], - type='list', - aliases=['sambaUserWorkstations']), - sambahome=dict(type='str'), - scriptpath=dict(type='str'), - secretary=dict(default=[], - type='list'), - serviceprovider=dict(default=[''], - type='list'), - shell=dict(default='/bin/bash', - type='str'), - street=dict(type='str'), - title=dict(type='str'), - unixhome=dict(type='str'), - userexpiry=dict(type='str'), - username=dict(required=True, - aliases=['name'], - type='str'), - position=dict(default='', - type='str'), - update_password=dict(default='always', - choices=['always', 'on_create'], - type='str'), - ou=dict(default='', - type='str'), - subpath=dict(default='cn=users', - type='str'), - state=dict(default='present', - choices=['present', 'absent'], - type='str') - ), - supports_check_mode=True, - required_if=([ - ('state', 'present', ['firstname', 'lastname', 'password']) - ]) - ) - username = module.params['username'] - position = module.params['position'] - ou = module.params['ou'] - subpath = module.params['subpath'] - state = module.params['state'] - changed = False - diff = None - - users = list(ldap_search( - '(&(objectClass=posixAccount)(uid={0}))'.format(username), - attr=['uid'] - )) - if position != '': - container = position - else: - if ou != '': - ou = 'ou={0},'.format(ou) - if subpath != '': - subpath = '{0},'.format(subpath) - container = '{0}{1}{2}'.format(subpath, ou, base_dn()) - user_dn = 'uid={0},{1}'.format(username, container) - - exists = bool(len(users)) - - if state == 'present': - try: - if not exists: - obj = umc_module_for_add('users/user', container) - else: - obj = umc_module_for_edit('users/user', user_dn) - - if module.params['displayName'] is None: - module.params['displayName'] = '{0} {1}'.format( - module.params['firstname'], - module.params['lastname'] - ) - if module.params['unixhome'] is None: - module.params['unixhome'] = '/home/{0}'.format( - module.params['username'] - ) - for k in obj.keys(): - if (k != 'password' and - k != 'groups' and - k != 'overridePWHistory' and - k in module.params and - module.params[k] is not None): - obj[k] = module.params[k] - # handle some special values - obj['e-mail'] = module.params['email'] - if 'userexpiry' in obj and obj.get('userexpiry') is None: - obj['userexpiry'] = expiry - password = module.params['password'] - if obj['password'] is None: - obj['password'] = password - if module.params['update_password'] == 'always': - old_password = obj['password'].split('}', 2)[1] - if crypt.crypt(password, old_password) != old_password: - obj['overridePWHistory'] = module.params['overridePWHistory'] - obj['overridePWLength'] = module.params['overridePWLength'] - obj['password'] = password - - diff = obj.diff() - if exists: - for k in obj.keys(): - if obj.hasChanged(k): - changed = True - else: - changed = True - if not module.check_mode: - if not exists: - obj.create() - elif changed: - obj.modify() - except Exception: - module.fail_json( - msg="Creating/editing user {0} in {1} failed".format( - username, - container - ) - ) - try: - groups = module.params['groups'] - if groups: - filter = '(&(objectClass=posixGroup)(|(cn={0})))'.format( - ')(cn='.join(groups) - ) - group_dns = list(ldap_search(filter, attr=['dn'])) - for dn in group_dns: - grp = umc_module_for_edit('groups/group', dn[0]) - if user_dn not in grp['users']: - grp['users'].append(user_dn) - if not module.check_mode: - grp.modify() - changed = True - except Exception: - module.fail_json( - msg="Adding groups to user {0} failed".format(username) - ) - - if state == 'absent' and exists: - try: - obj = umc_module_for_edit('users/user', user_dn) - if not module.check_mode: - obj.remove() - changed = True - except Exception: - module.fail_json( - msg="Removing user {0} failed".format(username) - ) - - module.exit_json( - changed=changed, - username=username, - diff=diff, - container=container - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/webfaction/webfaction_app.py b/plugins/modules/cloud/webfaction/webfaction_app.py deleted file mode 100644 index 1839db3810..0000000000 --- a/plugins/modules/cloud/webfaction/webfaction_app.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from: -# * Andy Baker -# * Federico Tarantini -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create a Webfaction application using Ansible and the Webfaction API -# -# Valid application types can be found by looking here: -# https://docs.webfaction.com/xmlrpc-api/apps.html#application-types - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_app -short_description: Add or remove applications on a Webfaction host -description: - - Add or remove applications on a Webfaction host. Further documentation at U(https://github.com/quentinsf/ansible-webfaction). -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. - -options: - name: - description: - - The name of the application - required: true - type: str - - state: - description: - - Whether the application should exist - choices: ['present', 'absent'] - default: "present" - type: str - - type: - description: - - The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list. - required: true - type: str - - autostart: - description: - - Whether the app should restart with an C(autostart.cgi) script - type: bool - default: 'no' - - extra_info: - description: - - Any extra parameters required by the app - default: '' - type: str - - port_open: - description: - - IF the port should be opened - type: bool - default: 'no' - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str - - machine: - description: - - The machine name to use (optional for accounts with only one machine) - type: str - -''' - -EXAMPLES = ''' - - name: Create a test app - community.general.webfaction_app: - name: "my_wsgi_app1" - state: present - type: mod_wsgi35-python27 - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - machine: "{{webfaction_machine}}" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(required=False, choices=['present', 'absent'], default='present'), - type=dict(required=True), - autostart=dict(required=False, type='bool', default=False), - extra_info=dict(required=False, default=""), - port_open=dict(required=False, type='bool', default=False), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - machine=dict(required=False, default=None), - ), - supports_check_mode=True - ) - app_name = module.params['name'] - app_type = module.params['type'] - app_state = module.params['state'] - - if module.params['machine']: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'], - module.params['machine'] - ) - else: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - app_list = webfaction.list_apps(session_id) - app_map = dict([(i['name'], i) for i in app_list]) - existing_app = app_map.get(app_name) - - result = {} - - # Here's where the real stuff happens - - if app_state == 'present': - - # Does an app with this name already exist? - if existing_app: - if existing_app['type'] != app_type: - module.fail_json(msg="App already exists with different type. Please fix by hand.") - - # If it exists with the right type, we don't change it - # Should check other parameters. - module.exit_json( - changed=False, - result=existing_app, - ) - - if not module.check_mode: - # If this isn't a dry run, create the app - result.update( - webfaction.create_app( - session_id, app_name, app_type, - module.boolean(module.params['autostart']), - module.params['extra_info'], - module.boolean(module.params['port_open']) - ) - ) - - elif app_state == 'absent': - - # If the app's already not there, nothing changed. - if not existing_app: - module.exit_json( - changed=False, - ) - - if not module.check_mode: - # If this isn't a dry run, delete the app - result.update( - webfaction.delete_app(session_id, app_name) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(app_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/webfaction/webfaction_db.py b/plugins/modules/cloud/webfaction/webfaction_db.py deleted file mode 100644 index 11563426d7..0000000000 --- a/plugins/modules/cloud/webfaction/webfaction_db.py +++ /dev/null @@ -1,195 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from: -# * Andy Baker -# * Federico Tarantini -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create a webfaction database using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_db -short_description: Add or remove a database on Webfaction -description: - - Add or remove a database on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. -options: - - name: - description: - - The name of the database - required: true - type: str - - state: - description: - - Whether the database should exist - choices: ['present', 'absent'] - default: "present" - type: str - - type: - description: - - The type of database to create. - required: true - choices: ['mysql', 'postgresql'] - type: str - - password: - description: - - The password for the new database user. - type: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str - - machine: - description: - - The machine name to use (optional for accounts with only one machine) - type: str -''' - -EXAMPLES = ''' - # This will also create a default DB user with the same - # name as the database, and the specified password. - - - name: Create a database - community.general.webfaction_db: - name: "{{webfaction_user}}_db1" - password: mytestsql - type: mysql - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - machine: "{{webfaction_machine}}" - - # Note that, for symmetry's sake, deleting a database using - # 'state: absent' will also delete the matching user. - -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(required=False, choices=['present', 'absent'], default='present'), - # You can specify an IP address or hostname. - type=dict(required=True, choices=['mysql', 'postgresql']), - password=dict(required=False, default=None, no_log=True), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - machine=dict(required=False, default=None), - ), - supports_check_mode=True - ) - db_name = module.params['name'] - db_state = module.params['state'] - db_type = module.params['type'] - db_passwd = module.params['password'] - - if module.params['machine']: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'], - module.params['machine'] - ) - else: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - db_list = webfaction.list_dbs(session_id) - db_map = dict([(i['name'], i) for i in db_list]) - existing_db = db_map.get(db_name) - - user_list = webfaction.list_db_users(session_id) - user_map = dict([(i['username'], i) for i in user_list]) - existing_user = user_map.get(db_name) - - result = {} - - # Here's where the real stuff happens - - if db_state == 'present': - - # Does a database with this name already exist? - if existing_db: - # Yes, but of a different type - fail - if existing_db['db_type'] != db_type: - module.fail_json(msg="Database already exists but is a different type. Please fix by hand.") - - # If it exists with the right type, we don't change anything. - module.exit_json( - changed=False, - ) - - if not module.check_mode: - # If this isn't a dry run, create the db - # and default user. - result.update( - webfaction.create_db( - session_id, db_name, db_type, db_passwd - ) - ) - - elif db_state == 'absent': - - # If this isn't a dry run... - if not module.check_mode: - - if not (existing_db or existing_user): - module.exit_json(changed=False,) - - if existing_db: - # Delete the db if it exists - result.update( - webfaction.delete_db(session_id, db_name, db_type) - ) - - if existing_user: - # Delete the default db user if it exists - result.update( - webfaction.delete_db_user(session_id, db_name, db_type) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(db_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/webfaction/webfaction_domain.py b/plugins/modules/cloud/webfaction/webfaction_domain.py deleted file mode 100644 index f9c3b7db7a..0000000000 --- a/plugins/modules/cloud/webfaction/webfaction_domain.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create Webfaction domains and subdomains using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_domain -short_description: Add or remove domains and subdomains on Webfaction -description: - - Add or remove domains or subdomains on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. - If you don't specify subdomains, the domain will be deleted. - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. - -options: - - name: - description: - - The name of the domain - required: true - type: str - - state: - description: - - Whether the domain should exist - choices: ['present', 'absent'] - default: "present" - type: str - - subdomains: - description: - - Any subdomains to create. - default: [] - type: list - elements: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str -''' - -EXAMPLES = ''' - - name: Create a test domain - community.general.webfaction_domain: - name: mydomain.com - state: present - subdomains: - - www - - blog - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - - - name: Delete test domain and any subdomains - community.general.webfaction_domain: - name: mydomain.com - state: absent - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - subdomains=dict(default=[], type='list', elements='str'), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - ), - supports_check_mode=True - ) - domain_name = module.params['name'] - domain_state = module.params['state'] - domain_subdomains = module.params['subdomains'] - - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - domain_list = webfaction.list_domains(session_id) - domain_map = dict([(i['domain'], i) for i in domain_list]) - existing_domain = domain_map.get(domain_name) - - result = {} - - # Here's where the real stuff happens - - if domain_state == 'present': - - # Does an app with this name already exist? - if existing_domain: - - if set(existing_domain['subdomains']) >= set(domain_subdomains): - # If it exists with the right subdomains, we don't change anything. - module.exit_json( - changed=False, - ) - - positional_args = [session_id, domain_name] + domain_subdomains - - if not module.check_mode: - # If this isn't a dry run, create the app - # print positional_args - result.update( - webfaction.create_domain( - *positional_args - ) - ) - - elif domain_state == 'absent': - - # If the app's already not there, nothing changed. - if not existing_domain: - module.exit_json( - changed=False, - ) - - positional_args = [session_id, domain_name] + domain_subdomains - - if not module.check_mode: - # If this isn't a dry run, delete the app - result.update( - webfaction.delete_domain(*positional_args) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(domain_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/webfaction/webfaction_mailbox.py b/plugins/modules/cloud/webfaction/webfaction_mailbox.py deleted file mode 100644 index 37755763a2..0000000000 --- a/plugins/modules/cloud/webfaction/webfaction_mailbox.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser and Andy Baker -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create webfaction mailbox using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_mailbox -short_description: Add or remove mailboxes on Webfaction -description: - - Add or remove mailboxes on a Webfaction account. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. -options: - - mailbox_name: - description: - - The name of the mailbox - required: true - type: str - - mailbox_password: - description: - - The password for the mailbox - required: true - type: str - - state: - description: - - Whether the mailbox should exist - choices: ['present', 'absent'] - default: "present" - type: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str -''' - -EXAMPLES = ''' - - name: Create a mailbox - community.general.webfaction_mailbox: - mailbox_name="mybox" - mailbox_password="myboxpw" - state=present - login_name={{webfaction_user}} - login_password={{webfaction_passwd}} -''' - - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - mailbox_name=dict(required=True), - mailbox_password=dict(required=True, no_log=True), - state=dict(required=False, choices=['present', 'absent'], default='present'), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - ), - supports_check_mode=True - ) - - mailbox_name = module.params['mailbox_name'] - site_state = module.params['state'] - - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)] - existing_mailbox = mailbox_name in mailbox_list - - result = {} - - # Here's where the real stuff happens - - if site_state == 'present': - - # Does a mailbox with this name already exist? - if existing_mailbox: - module.exit_json(changed=False,) - - positional_args = [session_id, mailbox_name] - - if not module.check_mode: - # If this isn't a dry run, create the mailbox - result.update(webfaction.create_mailbox(*positional_args)) - - elif site_state == 'absent': - - # If the mailbox is already not there, nothing changed. - if not existing_mailbox: - module.exit_json(changed=False) - - if not module.check_mode: - # If this isn't a dry run, delete the mailbox - result.update(webfaction.delete_mailbox(session_id, mailbox_name)) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(site_state)) - - module.exit_json(changed=True, result=result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/webfaction/webfaction_site.py b/plugins/modules/cloud/webfaction/webfaction_site.py deleted file mode 100644 index 87faade3e2..0000000000 --- a/plugins/modules/cloud/webfaction/webfaction_site.py +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create Webfaction website using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_site -short_description: Add or remove a website on a Webfaction host -description: - - Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP - address. You can use a DNS name. - - If a site of the same name exists in the account but on a different host, the operation will exit. - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. - -options: - - name: - description: - - The name of the website - required: true - type: str - - state: - description: - - Whether the website should exist - choices: ['present', 'absent'] - default: "present" - type: str - - host: - description: - - The webfaction host on which the site should be created. - required: true - type: str - - https: - description: - - Whether or not to use HTTPS - type: bool - default: 'no' - - site_apps: - description: - - A mapping of URLs to apps - default: [] - type: list - elements: list - - subdomains: - description: - - A list of subdomains associated with this site. - default: [] - type: list - elements: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str -''' - -EXAMPLES = ''' - - name: Create website - community.general.webfaction_site: - name: testsite1 - state: present - host: myhost.webfaction.com - subdomains: - - 'testsite1.my_domain.org' - site_apps: - - ['testapp1', '/'] - https: no - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" -''' - -import socket - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - # You can specify an IP address or hostname. - host=dict(required=True), - https=dict(required=False, type='bool', default=False), - subdomains=dict(type='list', elements='str', default=[]), - site_apps=dict(type='list', elements='list', default=[]), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - ), - supports_check_mode=True - ) - site_name = module.params['name'] - site_state = module.params['state'] - site_host = module.params['host'] - site_ip = socket.gethostbyname(site_host) - - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - site_list = webfaction.list_websites(session_id) - site_map = dict([(i['name'], i) for i in site_list]) - existing_site = site_map.get(site_name) - - result = {} - - # Here's where the real stuff happens - - if site_state == 'present': - - # Does a site with this name already exist? - if existing_site: - - # If yes, but it's on a different IP address, then fail. - # If we wanted to allow relocation, we could add a 'relocate=true' option - # which would get the existing IP address, delete the site there, and create it - # at the new address. A bit dangerous, perhaps, so for now we'll require manual - # deletion if it's on another host. - - if existing_site['ip'] != site_ip: - module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.") - - # If it's on this host and the key parameters are the same, nothing needs to be done. - - if (existing_site['https'] == module.boolean(module.params['https'])) and \ - (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \ - (dict(existing_site['website_apps']) == dict(module.params['site_apps'])): - module.exit_json( - changed=False - ) - - positional_args = [ - session_id, site_name, site_ip, - module.boolean(module.params['https']), - module.params['subdomains'], - ] - for a in module.params['site_apps']: - positional_args.append((a[0], a[1])) - - if not module.check_mode: - # If this isn't a dry run, create or modify the site - result.update( - webfaction.create_website( - *positional_args - ) if not existing_site else webfaction.update_website( - *positional_args - ) - ) - - elif site_state == 'absent': - - # If the site's already not there, nothing changed. - if not existing_site: - module.exit_json( - changed=False, - ) - - if not module.check_mode: - # If this isn't a dry run, delete the site - result.update( - webfaction.delete_website(session_id, site_name, site_ip) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(site_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/xenserver/xenserver_guest.py b/plugins/modules/cloud/xenserver/xenserver_guest.py deleted file mode 100644 index b90b380c3f..0000000000 --- a/plugins/modules/cloud/xenserver/xenserver_guest.py +++ /dev/null @@ -1,2026 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2018, Bojan Vitnik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: xenserver_guest -short_description: Manages virtual machines running on Citrix Hypervisor/XenServer host or pool -description: > - This module can be used to create new virtual machines from templates or other virtual machines, - modify various virtual machine components like network and disk, rename a virtual machine and - remove a virtual machine with associated components. -author: -- Bojan Vitnik (@bvitnik) -notes: -- Minimal supported version of XenServer is 5.6. -- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. -- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside - Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your - Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: - U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' -- 'If no scheme is specified in I(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are - accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' -- 'To use C(https://) scheme for I(hostname) you have to either import host certificate to your OS certificate store or use I(validate_certs): C(no) - which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' -- 'Network configuration inside a guest OS, by using I(networks.type), I(networks.ip), I(networks.gateway) etc. parameters, is supported on - XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to - detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest - agent only support None and Static types of network configuration, where None means DHCP configured interface, I(networks.type) and I(networks.type6) - values C(none) and C(dhcp) have same effect. More info here: - U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html)' -- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore - C(vm-data/networks/) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or trough - WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user - to implement a boot time scripts or custom agent that will read the parameters from xenstore and configure network with given parameters. - Take note that for xenstore data to become available inside a guest, a VM restart is needed hence module will require VM restart if any - parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration trough xenstore is most - useful for bootstraping newly deployed VMs, much less for reconfiguring existing ones. More info here: - U(https://support.citrix.com/article/CTX226713)' -requirements: -- python >= 2.6 -- XenAPI -options: - state: - description: - - Specify the state VM should be in. - - If I(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters. - - If I(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters. - - If I(state) is set to C(absent) and VM exists, then VM is removed with its associated components. - - If I(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically. - type: str - default: present - choices: [ present, absent, poweredon ] - name: - description: - - Name of the VM to work with. - - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. - - In case of multiple VMs with same name, use I(uuid) to uniquely specify VM to manage. - - This parameter is case sensitive. - type: str - aliases: [ name_label ] - name_desc: - description: - - VM description. - type: str - uuid: - description: - - UUID of the VM to manage if known. This is XenServer's unique identifier. - - It is required if name is not unique. - - Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally. - type: str - template: - description: - - Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM. - - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found. - - In case of multiple templates/VMs/snapshots with same name, use I(template_uuid) to uniquely specify source template. - - If VM already exists, this setting will be ignored. - - This parameter is case sensitive. - type: str - aliases: [ template_src ] - template_uuid: - description: - - UUID of a template, an existing VM or a snapshot that should be used to create VM. - - It is required if template name is not unique. - type: str - is_template: - description: - - Convert VM to template. - type: bool - default: no - folder: - description: - - Destination folder for VM. - - This parameter is case sensitive. - - 'Example:' - - ' folder: /folder1/folder2' - type: str - hardware: - description: - - Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters. - type: dict - suboptions: - num_cpus: - description: - - Number of CPUs. - type: int - num_cpu_cores_per_socket: - description: - - Number of Cores Per Socket. I(num_cpus) has to be a multiple of I(num_cpu_cores_per_socket). - type: int - memory_mb: - description: - - Amount of memory in MB. - type: int - disks: - description: - - A list of disks to add to VM. - - All parameters are case sensitive. - - Removing or detaching existing disks of VM is not supported. - - New disks are required to have either a I(size) or one of I(size_[tb,gb,mb,kb,b]) parameters specified. - - VM needs to be shut down to reconfigure disk size. - type: list - elements: dict - aliases: [ disk ] - suboptions: - size: - description: - - 'Disk size with unit. Unit must be: C(b), C(kb), C(mb), C(gb), C(tb). VM needs to be shut down to reconfigure this parameter.' - - If no unit is specified, size is assumed to be in bytes. - type: str - size_b: - description: - - Disk size in bytes. - type: str - size_kb: - description: - - Disk size in kilobytes. - type: str - size_mb: - description: - - Disk size in megabytes. - type: str - size_gb: - description: - - Disk size in gigabytes. - type: str - size_tb: - description: - - Disk size in terabytes. - type: str - name: - description: - - Disk name. - type: str - aliases: [ name_label ] - name_desc: - description: - - Disk description. - type: str - sr: - description: - - Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR. - type: str - sr_uuid: - description: - - UUID of a SR to create disk on. Use if SR name is not unique. - type: str - cdrom: - description: - - A CD-ROM configuration for the VM. - - All parameters are case sensitive. - type: dict - suboptions: - type: - description: - - The type of CD-ROM. With C(none) the CD-ROM device will be present but empty. - type: str - choices: [ none, iso ] - iso_name: - description: - - 'The file name of an ISO image from one of the XenServer ISO Libraries (implies I(type): C(iso)).' - - Required if I(type) is set to C(iso). - type: str - networks: - description: - - A list of networks (in the order of the NICs). - - All parameters are case sensitive. - - Name is required for new NICs. Other parameters are optional in all cases. - type: list - elements: dict - aliases: [ network ] - suboptions: - name: - description: - - Name of a XenServer network to attach the network interface to. - type: str - aliases: [ name_label ] - mac: - description: - - Customize MAC address of the interface. - type: str - type: - description: - - Type of IPv4 assignment. Value C(none) means whatever is default for OS. - - On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux). - type: str - choices: [ none, dhcp, static ] - ip: - description: - - 'Static IPv4 address (implies I(type): C(static)). Can include prefix in format C(/) instead of using C(netmask).' - type: str - netmask: - description: - - Static IPv4 netmask required for I(ip) if prefix is not specified. - type: str - gateway: - description: - - Static IPv4 gateway. - type: str - type6: - description: - - Type of IPv6 assignment. Value C(none) means whatever is default for OS. - type: str - choices: [ none, dhcp, static ] - ip6: - description: - - 'Static IPv6 address (implies I(type6): C(static)) with prefix in format C(/).' - type: str - gateway6: - description: - - Static IPv6 gateway. - type: str - home_server: - description: - - Name of a XenServer host that will be a Home Server for the VM. - - This parameter is case sensitive. - type: str - custom_params: - description: - - Define a list of custom VM params to set on VM. - - Useful for advanced users familiar with managing VM params trough xe CLI. - - A custom value object takes two fields I(key) and I(value) (see example below). - type: list - elements: dict - suboptions: - key: - description: - - VM param name. - type: str - required: yes - value: - description: - - VM param value. - type: raw - required: yes - wait_for_ip_address: - description: - - Wait until XenServer detects an IP address for the VM. If I(state) is set to C(absent), this parameter is ignored. - - This requires XenServer Tools to be preinstalled on the VM to work properly. - type: bool - default: no - state_change_timeout: - description: - - 'By default, module will wait indefinitely for VM to accquire an IP address if I(wait_for_ip_address): C(yes).' - - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. - - In case of timeout, module will generate an error message. - type: int - default: 0 - linked_clone: - description: - - Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy. - - This is equivalent to C(Use storage-level fast disk clone) option in XenCenter. - type: bool - default: no - force: - description: - - Ignore warnings and complete the actions. - - This parameter is useful for removing VM in running state or reconfiguring VM params that require VM to be shut down. - type: bool - default: no -extends_documentation_fragment: -- community.general.xenserver.documentation - -''' - -EXAMPLES = r''' -- name: Create a VM from a template - community.general.xenserver_guest: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - validate_certs: no - folder: /testvms - name: testvm_2 - state: poweredon - template: CentOS 7 - disks: - - size_gb: 10 - sr: my_sr - hardware: - num_cpus: 6 - num_cpu_cores_per_socket: 3 - memory_mb: 512 - cdrom: - type: iso - iso_name: guest-tools.iso - networks: - - name: VM Network - mac: aa:bb:dd:aa:00:14 - wait_for_ip_address: yes - delegate_to: localhost - register: deploy - -- name: Create a VM template - community.general.xenserver_guest: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - validate_certs: no - folder: /testvms - name: testvm_6 - is_template: yes - disk: - - size_gb: 10 - sr: my_sr - hardware: - memory_mb: 512 - num_cpus: 1 - delegate_to: localhost - register: deploy - -- name: Rename a VM (requires the VM's UUID) - community.general.xenserver_guest: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - uuid: 421e4592-c069-924d-ce20-7e7533fab926 - name: new_name - state: present - delegate_to: localhost - -- name: Remove a VM by UUID - community.general.xenserver_guest: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - uuid: 421e4592-c069-924d-ce20-7e7533fab926 - state: absent - delegate_to: localhost - -- name: Modify custom params (boot order) - community.general.xenserver_guest: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - name: testvm_8 - state: present - custom_params: - - key: HVM_boot_params - value: { "order": "ndc" } - delegate_to: localhost - -- name: Customize network parameters - community.general.xenserver_guest: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - name: testvm_10 - networks: - - name: VM Network - ip: 192.168.1.100/24 - gateway: 192.168.1.1 - - type: dhcp - delegate_to: localhost -''' - -RETURN = r''' -instance: - description: Metadata about the VM - returned: always - type: dict - sample: { - "cdrom": { - "type": "none" - }, - "customization_agent": "native", - "disks": [ - { - "name": "testvm_11-0", - "name_desc": "", - "os_device": "xvda", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "0" - }, - { - "name": "testvm_11-1", - "name_desc": "", - "os_device": "xvdb", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "1" - } - ], - "domid": "56", - "folder": "", - "hardware": { - "memory_mb": 8192, - "num_cpu_cores_per_socket": 2, - "num_cpus": 4 - }, - "home_server": "", - "is_template": false, - "name": "testvm_11", - "name_desc": "", - "networks": [ - { - "gateway": "192.168.0.254", - "gateway6": "fc00::fffe", - "ip": "192.168.0.200", - "ip6": [ - "fe80:0000:0000:0000:e9cb:625a:32c5:c291", - "fc00:0000:0000:0000:0000:0000:0000:0001" - ], - "mac": "ba:91:3a:48:20:76", - "mtu": "1500", - "name": "Pool-wide network associated with eth1", - "netmask": "255.255.255.128", - "prefix": "25", - "prefix6": "64", - "vif_device": "0" - } - ], - "other_config": { - "base_template_name": "Windows Server 2016 (64-bit)", - "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", - "install-methods": "cdrom", - "instant": "true", - "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" - }, - "platform": { - "acpi": "1", - "apic": "true", - "cores-per-socket": "2", - "device_id": "0002", - "hpet": "true", - "nx": "true", - "pae": "true", - "timeoffset": "-25200", - "vga": "std", - "videoram": "8", - "viridian": "true", - "viridian_reference_tsc": "true", - "viridian_time_ref_count": "true" - }, - "state": "poweredon", - "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", - "xenstore_data": { - "vm-data": "" - } - } -changes: - description: Detected or made changes to VM - returned: always - type: list - sample: [ - { - "hardware": [ - "num_cpus" - ] - }, - { - "disks_changed": [ - [], - [ - "size" - ] - ] - }, - { - "disks_new": [ - { - "name": "new-disk", - "name_desc": "", - "position": 2, - "size_gb": "4", - "vbd_userdevice": "2" - } - ] - }, - { - "cdrom": [ - "type", - "iso_name" - ] - }, - { - "networks_changed": [ - [ - "mac" - ], - ] - }, - { - "networks_new": [ - { - "name": "Pool-wide network associated with eth2", - "position": 1, - "vif_device": "1" - } - ] - }, - "need_poweredoff" - ] -''' - -import re - -HAS_XENAPI = False -try: - import XenAPI - HAS_XENAPI = True -except ImportError: - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.network import is_mac -from ansible.module_utils import six -from ansible_collections.community.general.plugins.module_utils.xenserver import ( - xenserver_common_argument_spec, XenServerObject, get_object_ref, - gather_vm_params, gather_vm_facts, set_vm_power_state, - wait_for_vm_ip_address, is_valid_ip_addr, is_valid_ip_netmask, - is_valid_ip_prefix, ip_prefix_to_netmask, ip_netmask_to_prefix, - is_valid_ip6_addr, is_valid_ip6_prefix) - - -class XenServerVM(XenServerObject): - """Class for managing XenServer VM. - - Attributes: - vm_ref (str): XAPI reference to VM. - vm_params (dict): A dictionary with VM parameters as returned - by gather_vm_params() function. - """ - - def __init__(self, module): - """Inits XenServerVM using module parameters. - - Args: - module: Reference to Ansible module object. - """ - super(XenServerVM, self).__init__(module) - - self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=False, msg_prefix="VM search: ") - self.gather_params() - - def exists(self): - """Returns True if VM exists, else False.""" - return True if self.vm_ref is not None else False - - def gather_params(self): - """Gathers all VM parameters available in XAPI database.""" - self.vm_params = gather_vm_params(self.module, self.vm_ref) - - def gather_facts(self): - """Gathers and returns VM facts.""" - return gather_vm_facts(self.module, self.vm_params) - - def set_power_state(self, power_state): - """Controls VM power state.""" - state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout']) - - # If state has changed, update vm_params. - if state_changed: - self.vm_params['power_state'] = current_state.capitalize() - - return state_changed - - def wait_for_ip_address(self): - """Waits for VM to acquire an IP address.""" - self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout']) - - def deploy(self): - """Deploys new VM from template.""" - # Safety check. - if self.exists(): - self.module.fail_json(msg="Called deploy on existing VM!") - - try: - templ_ref = get_object_ref(self.module, self.module.params['template'], self.module.params['template_uuid'], obj_type="template", fail=True, - msg_prefix="VM deploy: ") - - # Is this an existing running VM? - if self.xapi_session.xenapi.VM.get_power_state(templ_ref).lower() != 'halted': - self.module.fail_json(msg="VM deploy: running VM cannot be used as a template!") - - # Find a SR we can use for VM.copy(). We use SR of the first disk - # if specified or default SR if not specified. - disk_params_list = self.module.params['disks'] - - sr_ref = None - - if disk_params_list: - disk_params = disk_params_list[0] - - disk_sr_uuid = disk_params.get('sr_uuid') - disk_sr = disk_params.get('sr') - - if disk_sr_uuid is not None or disk_sr is not None: - sr_ref = get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True, - msg_prefix="VM deploy disks[0]: ") - - if not sr_ref: - if self.default_sr_ref != "OpaqueRef:NULL": - sr_ref = self.default_sr_ref - else: - self.module.fail_json(msg="VM deploy disks[0]: no default SR found! You must specify SR explicitly.") - - # VM name could be an empty string which is bad. - if self.module.params['name'] is not None and not self.module.params['name']: - self.module.fail_json(msg="VM deploy: VM name must not be an empty string!") - - # Support for Ansible check mode. - if self.module.check_mode: - return - - # Now we can instantiate VM. We use VM.clone for linked_clone and - # VM.copy for non linked_clone. - if self.module.params['linked_clone']: - self.vm_ref = self.xapi_session.xenapi.VM.clone(templ_ref, self.module.params['name']) - else: - self.vm_ref = self.xapi_session.xenapi.VM.copy(templ_ref, self.module.params['name'], sr_ref) - - # Description is copied over from template so we reset it. - self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, "") - - # If template is one of built-in XenServer templates, we have to - # do some additional steps. - # Note: VM.get_is_default_template() is supported from XenServer 7.2 - # onward so we use an alternative way. - templ_other_config = self.xapi_session.xenapi.VM.get_other_config(templ_ref) - - if "default_template" in templ_other_config and templ_other_config['default_template']: - # other_config of built-in XenServer templates have a key called - # 'disks' with the following content: - # disks: - # This value of other_data is copied to cloned or copied VM and - # it prevents provisioning of VM because sr is not specified and - # XAPI returns an error. To get around this, we remove the - # 'disks' key and add disks to VM later ourselves. - vm_other_config = self.xapi_session.xenapi.VM.get_other_config(self.vm_ref) - - if "disks" in vm_other_config: - del vm_other_config['disks'] - - self.xapi_session.xenapi.VM.set_other_config(self.vm_ref, vm_other_config) - - # At this point we have VM ready for provisioning. - self.xapi_session.xenapi.VM.provision(self.vm_ref) - - # After provisioning we can prepare vm_params for reconfigure(). - self.gather_params() - - # VM is almost ready. We just need to reconfigure it... - self.reconfigure() - - # Power on VM if needed. - if self.module.params['state'] == "poweredon": - self.set_power_state("poweredon") - - except XenAPI.Failure as f: - self.module.fail_json(msg="XAPI ERROR: %s" % f.details) - - def reconfigure(self): - """Reconfigures an existing VM. - - Returns: - list: parameters that were reconfigured. - """ - # Safety check. - if not self.exists(): - self.module.fail_json(msg="Called reconfigure on non existing VM!") - - config_changes = self.get_changes() - - vm_power_state_save = self.vm_params['power_state'].lower() - - if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and not self.module.params['force']: - self.module.fail_json(msg="VM reconfigure: VM has to be in powered off state to reconfigure but force was not specified!") - - # Support for Ansible check mode. - if self.module.check_mode: - return config_changes - - if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and self.module.params['force']: - self.set_power_state("shutdownguest") - - try: - for change in config_changes: - if isinstance(change, six.string_types): - if change == "name": - self.xapi_session.xenapi.VM.set_name_label(self.vm_ref, self.module.params['name']) - elif change == "name_desc": - self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, self.module.params['name_desc']) - elif change == "folder": - self.xapi_session.xenapi.VM.remove_from_other_config(self.vm_ref, 'folder') - - if self.module.params['folder']: - self.xapi_session.xenapi.VM.add_to_other_config(self.vm_ref, 'folder', self.module.params['folder']) - elif change == "home_server": - if self.module.params['home_server']: - host_ref = self.xapi_session.xenapi.host.get_by_name_label(self.module.params['home_server'])[0] - else: - host_ref = "OpaqueRef:NULL" - - self.xapi_session.xenapi.VM.set_affinity(self.vm_ref, host_ref) - elif isinstance(change, dict): - if change.get('hardware'): - for hardware_change in change['hardware']: - if hardware_change == "num_cpus": - num_cpus = int(self.module.params['hardware']['num_cpus']) - - if num_cpus < int(self.vm_params['VCPUs_at_startup']): - self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus)) - self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus)) - else: - self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus)) - self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus)) - elif hardware_change == "num_cpu_cores_per_socket": - self.xapi_session.xenapi.VM.remove_from_platform(self.vm_ref, 'cores-per-socket') - num_cpu_cores_per_socket = int(self.module.params['hardware']['num_cpu_cores_per_socket']) - - if num_cpu_cores_per_socket > 1: - self.xapi_session.xenapi.VM.add_to_platform(self.vm_ref, 'cores-per-socket', str(num_cpu_cores_per_socket)) - elif hardware_change == "memory_mb": - memory_b = str(int(self.module.params['hardware']['memory_mb']) * 1048576) - vm_memory_static_min_b = str(min(int(memory_b), int(self.vm_params['memory_static_min']))) - - self.xapi_session.xenapi.VM.set_memory_limits(self.vm_ref, vm_memory_static_min_b, memory_b, memory_b, memory_b) - elif change.get('disks_changed'): - vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] - position = 0 - - for disk_change_list in change['disks_changed']: - for disk_change in disk_change_list: - vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params_list[position]['VDI']['uuid']) - - if disk_change == "name": - self.xapi_session.xenapi.VDI.set_name_label(vdi_ref, self.module.params['disks'][position]['name']) - elif disk_change == "name_desc": - self.xapi_session.xenapi.VDI.set_name_description(vdi_ref, self.module.params['disks'][position]['name_desc']) - elif disk_change == "size": - self.xapi_session.xenapi.VDI.resize(vdi_ref, str(self.get_normalized_disk_size(self.module.params['disks'][position], - "VM reconfigure disks[%s]: " % position))) - - position += 1 - elif change.get('disks_new'): - for position, disk_userdevice in change['disks_new']: - disk_params = self.module.params['disks'][position] - - disk_name = disk_params['name'] if disk_params.get('name') else "%s-%s" % (self.vm_params['name_label'], position) - disk_name_desc = disk_params['name_desc'] if disk_params.get('name_desc') else "" - - if disk_params.get('sr_uuid'): - sr_ref = self.xapi_session.xenapi.SR.get_by_uuid(disk_params['sr_uuid']) - elif disk_params.get('sr'): - sr_ref = self.xapi_session.xenapi.SR.get_by_name_label(disk_params['sr'])[0] - else: - sr_ref = self.default_sr_ref - - disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], "VM reconfigure disks[%s]: " % position)) - - new_disk_vdi = { - "name_label": disk_name, - "name_description": disk_name_desc, - "SR": sr_ref, - "virtual_size": disk_size, - "type": "user", - "sharable": False, - "read_only": False, - "other_config": {}, - } - - new_disk_vbd = { - "VM": self.vm_ref, - "VDI": None, - "userdevice": disk_userdevice, - "bootable": False, - "mode": "RW", - "type": "Disk", - "empty": False, - "other_config": {}, - "qos_algorithm_type": "", - "qos_algorithm_params": {}, - } - - new_disk_vbd['VDI'] = self.xapi_session.xenapi.VDI.create(new_disk_vdi) - vbd_ref_new = self.xapi_session.xenapi.VBD.create(new_disk_vbd) - - if self.vm_params['power_state'].lower() == "running": - self.xapi_session.xenapi.VBD.plug(vbd_ref_new) - - elif change.get('cdrom'): - vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"] - - # If there is no CD present, we have to create one. - if not vm_cdrom_params_list: - # We will try to place cdrom at userdevice position - # 3 (which is default) if it is not already occupied - # else we will place it at first allowed position. - cdrom_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref) - - if "3" in cdrom_userdevices_allowed: - cdrom_userdevice = "3" - else: - cdrom_userdevice = cdrom_userdevices_allowed[0] - - cdrom_vbd = { - "VM": self.vm_ref, - "VDI": "OpaqueRef:NULL", - "userdevice": cdrom_userdevice, - "bootable": False, - "mode": "RO", - "type": "CD", - "empty": True, - "other_config": {}, - "qos_algorithm_type": "", - "qos_algorithm_params": {}, - } - - cdrom_vbd_ref = self.xapi_session.xenapi.VBD.create(cdrom_vbd) - else: - cdrom_vbd_ref = self.xapi_session.xenapi.VBD.get_by_uuid(vm_cdrom_params_list[0]['uuid']) - - cdrom_is_empty = self.xapi_session.xenapi.VBD.get_empty(cdrom_vbd_ref) - - for cdrom_change in change['cdrom']: - if cdrom_change == "type": - cdrom_type = self.module.params['cdrom']['type'] - - if cdrom_type == "none" and not cdrom_is_empty: - self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref) - elif cdrom_type == "host": - # Unimplemented! - pass - - elif cdrom_change == "iso_name": - if not cdrom_is_empty: - self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref) - - cdrom_vdi_ref = self.xapi_session.xenapi.VDI.get_by_name_label(self.module.params['cdrom']['iso_name'])[0] - self.xapi_session.xenapi.VBD.insert(cdrom_vbd_ref, cdrom_vdi_ref) - elif change.get('networks_changed'): - position = 0 - - for network_change_list in change['networks_changed']: - if network_change_list: - vm_vif_params = self.vm_params['VIFs'][position] - network_params = self.module.params['networks'][position] - - vif_ref = self.xapi_session.xenapi.VIF.get_by_uuid(vm_vif_params['uuid']) - network_ref = self.xapi_session.xenapi.network.get_by_uuid(vm_vif_params['network']['uuid']) - - vif_recreated = False - - if "name" in network_change_list or "mac" in network_change_list: - # To change network or MAC, we destroy old - # VIF and then create a new one with changed - # parameters. That's how XenCenter does it. - - # Copy all old parameters to new VIF record. - vif = { - "device": vm_vif_params['device'], - "network": network_ref, - "VM": vm_vif_params['VM'], - "MAC": vm_vif_params['MAC'], - "MTU": vm_vif_params['MTU'], - "other_config": vm_vif_params['other_config'], - "qos_algorithm_type": vm_vif_params['qos_algorithm_type'], - "qos_algorithm_params": vm_vif_params['qos_algorithm_params'], - "locking_mode": vm_vif_params['locking_mode'], - "ipv4_allowed": vm_vif_params['ipv4_allowed'], - "ipv6_allowed": vm_vif_params['ipv6_allowed'], - } - - if "name" in network_change_list: - network_ref_new = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0] - vif['network'] = network_ref_new - vif['MTU'] = self.xapi_session.xenapi.network.get_MTU(network_ref_new) - - if "mac" in network_change_list: - vif['MAC'] = network_params['mac'].lower() - - if self.vm_params['power_state'].lower() == "running": - self.xapi_session.xenapi.VIF.unplug(vif_ref) - - self.xapi_session.xenapi.VIF.destroy(vif_ref) - vif_ref_new = self.xapi_session.xenapi.VIF.create(vif) - - if self.vm_params['power_state'].lower() == "running": - self.xapi_session.xenapi.VIF.plug(vif_ref_new) - - vif_ref = vif_ref_new - vif_recreated = True - - if self.vm_params['customization_agent'] == "native": - vif_reconfigure_needed = False - - if "type" in network_change_list: - network_type = network_params['type'].capitalize() - vif_reconfigure_needed = True - else: - network_type = vm_vif_params['ipv4_configuration_mode'] - - if "ip" in network_change_list: - network_ip = network_params['ip'] - vif_reconfigure_needed = True - elif vm_vif_params['ipv4_addresses']: - network_ip = vm_vif_params['ipv4_addresses'][0].split('/')[0] - else: - network_ip = "" - - if "prefix" in network_change_list: - network_prefix = "/%s" % network_params['prefix'] - vif_reconfigure_needed = True - elif vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]: - network_prefix = "/%s" % vm_vif_params['ipv4_addresses'][0].split('/')[1] - else: - network_prefix = "" - - if "gateway" in network_change_list: - network_gateway = network_params['gateway'] - vif_reconfigure_needed = True - else: - network_gateway = vm_vif_params['ipv4_gateway'] - - if vif_recreated or vif_reconfigure_needed: - self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref, network_type, - "%s%s" % (network_ip, network_prefix), network_gateway) - - vif_reconfigure_needed = False - - if "type6" in network_change_list: - network_type6 = network_params['type6'].capitalize() - vif_reconfigure_needed = True - else: - network_type6 = vm_vif_params['ipv6_configuration_mode'] - - if "ip6" in network_change_list: - network_ip6 = network_params['ip6'] - vif_reconfigure_needed = True - elif vm_vif_params['ipv6_addresses']: - network_ip6 = vm_vif_params['ipv6_addresses'][0].split('/')[0] - else: - network_ip6 = "" - - if "prefix6" in network_change_list: - network_prefix6 = "/%s" % network_params['prefix6'] - vif_reconfigure_needed = True - elif vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]: - network_prefix6 = "/%s" % vm_vif_params['ipv6_addresses'][0].split('/')[1] - else: - network_prefix6 = "" - - if "gateway6" in network_change_list: - network_gateway6 = network_params['gateway6'] - vif_reconfigure_needed = True - else: - network_gateway6 = vm_vif_params['ipv6_gateway'] - - if vif_recreated or vif_reconfigure_needed: - self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref, network_type6, - "%s%s" % (network_ip6, network_prefix6), network_gateway6) - - elif self.vm_params['customization_agent'] == "custom": - vif_device = vm_vif_params['device'] - - # A user could have manually changed network - # or mac e.g. trough XenCenter and then also - # make those changes in playbook manually. - # In that case, module will not detect any - # changes and info in xenstore_data will - # become stale. For that reason we always - # update name and mac in xenstore_data. - - # Since we handle name and mac differently, - # we have to remove them from - # network_change_list. - network_change_list_tmp = [net_chg for net_chg in network_change_list if net_chg not in ['name', 'mac']] - - for network_change in network_change_list_tmp + ['name', 'mac']: - self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, - "vm-data/networks/%s/%s" % (vif_device, network_change)) - - if network_params.get('name'): - network_name = network_params['name'] - else: - network_name = vm_vif_params['network']['name_label'] - - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/%s" % (vif_device, 'name'), network_name) - - if network_params.get('mac'): - network_mac = network_params['mac'].lower() - else: - network_mac = vm_vif_params['MAC'].lower() - - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/%s" % (vif_device, 'mac'), network_mac) - - for network_change in network_change_list_tmp: - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/%s" % (vif_device, network_change), - network_params[network_change]) - - position += 1 - elif change.get('networks_new'): - for position, vif_device in change['networks_new']: - network_params = self.module.params['networks'][position] - - network_ref = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0] - - network_name = network_params['name'] - network_mac = network_params['mac'] if network_params.get('mac') else "" - network_type = network_params.get('type') - network_ip = network_params['ip'] if network_params.get('ip') else "" - network_prefix = network_params['prefix'] if network_params.get('prefix') else "" - network_netmask = network_params['netmask'] if network_params.get('netmask') else "" - network_gateway = network_params['gateway'] if network_params.get('gateway') else "" - network_type6 = network_params.get('type6') - network_ip6 = network_params['ip6'] if network_params.get('ip6') else "" - network_prefix6 = network_params['prefix6'] if network_params.get('prefix6') else "" - network_gateway6 = network_params['gateway6'] if network_params.get('gateway6') else "" - - vif = { - "device": vif_device, - "network": network_ref, - "VM": self.vm_ref, - "MAC": network_mac, - "MTU": self.xapi_session.xenapi.network.get_MTU(network_ref), - "other_config": {}, - "qos_algorithm_type": "", - "qos_algorithm_params": {}, - } - - vif_ref_new = self.xapi_session.xenapi.VIF.create(vif) - - if self.vm_params['power_state'].lower() == "running": - self.xapi_session.xenapi.VIF.plug(vif_ref_new) - - if self.vm_params['customization_agent'] == "native": - if network_type and network_type == "static": - self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref_new, "Static", - "%s/%s" % (network_ip, network_prefix), network_gateway) - - if network_type6 and network_type6 == "static": - self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref_new, "Static", - "%s/%s" % (network_ip6, network_prefix6), network_gateway6) - elif self.vm_params['customization_agent'] == "custom": - # We first have to remove any existing data - # from xenstore_data because there could be - # some old leftover data from some interface - # that once occupied same device location as - # our new interface. - for network_param in ['name', 'mac', 'type', 'ip', 'prefix', 'netmask', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']: - self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, "vm-data/networks/%s/%s" % (vif_device, network_param)) - - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/name" % vif_device, network_name) - - # We get MAC from VIF itself instead of - # networks.mac because it could be - # autogenerated. - vm_vif_mac = self.xapi_session.xenapi.VIF.get_MAC(vif_ref_new) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/mac" % vif_device, vm_vif_mac) - - if network_type: - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type" % vif_device, network_type) - - if network_type == "static": - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/ip" % vif_device, network_ip) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/prefix" % vif_device, network_prefix) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/netmask" % vif_device, network_netmask) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/gateway" % vif_device, network_gateway) - - if network_type6: - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type6" % vif_device, network_type6) - - if network_type6 == "static": - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/ip6" % vif_device, network_ip6) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/prefix6" % vif_device, network_prefix6) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/gateway6" % vif_device, network_gateway6) - - elif change.get('custom_params'): - for position in change['custom_params']: - custom_param_key = self.module.params['custom_params'][position]['key'] - custom_param_value = self.module.params['custom_params'][position]['value'] - self.xapi_session.xenapi_request("VM.set_%s" % custom_param_key, (self.vm_ref, custom_param_value)) - - if self.module.params['is_template']: - self.xapi_session.xenapi.VM.set_is_a_template(self.vm_ref, True) - elif "need_poweredoff" in config_changes and self.module.params['force'] and vm_power_state_save != 'halted': - self.set_power_state("poweredon") - - # Gather new params after reconfiguration. - self.gather_params() - - except XenAPI.Failure as f: - self.module.fail_json(msg="XAPI ERROR: %s" % f.details) - - return config_changes - - def destroy(self): - """Removes an existing VM with associated disks""" - # Safety check. - if not self.exists(): - self.module.fail_json(msg="Called destroy on non existing VM!") - - if self.vm_params['power_state'].lower() != 'halted' and not self.module.params['force']: - self.module.fail_json(msg="VM destroy: VM has to be in powered off state to destroy but force was not specified!") - - # Support for Ansible check mode. - if self.module.check_mode: - return - - # Make sure that VM is poweredoff before we can destroy it. - self.set_power_state("poweredoff") - - try: - # Destroy VM! - self.xapi_session.xenapi.VM.destroy(self.vm_ref) - - vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] - - # Destroy all VDIs associated with VM! - for vm_disk_params in vm_disk_params_list: - vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params['VDI']['uuid']) - - self.xapi_session.xenapi.VDI.destroy(vdi_ref) - - except XenAPI.Failure as f: - self.module.fail_json(msg="XAPI ERROR: %s" % f.details) - - def get_changes(self): - """Finds VM parameters that differ from specified ones. - - This method builds a dictionary with hierarchy of VM parameters - that differ from those specified in module parameters. - - Returns: - list: VM parameters that differ from those specified in - module parameters. - """ - # Safety check. - if not self.exists(): - self.module.fail_json(msg="Called get_changes on non existing VM!") - - need_poweredoff = False - - if self.module.params['is_template']: - need_poweredoff = True - - try: - # This VM could be a template or a snapshot. In that case we fail - # because we can't reconfigure them or it would just be too - # dangerous. - if self.vm_params['is_a_template'] and not self.vm_params['is_a_snapshot']: - self.module.fail_json(msg="VM check: targeted VM is a template! Template reconfiguration is not supported.") - - if self.vm_params['is_a_snapshot']: - self.module.fail_json(msg="VM check: targeted VM is a snapshot! Snapshot reconfiguration is not supported.") - - # Let's build a list of parameters that changed. - config_changes = [] - - # Name could only differ if we found an existing VM by uuid. - if self.module.params['name'] is not None and self.module.params['name'] != self.vm_params['name_label']: - if self.module.params['name']: - config_changes.append('name') - else: - self.module.fail_json(msg="VM check name: VM name cannot be an empty string!") - - if self.module.params['name_desc'] is not None and self.module.params['name_desc'] != self.vm_params['name_description']: - config_changes.append('name_desc') - - # Folder parameter is found in other_config. - vm_other_config = self.vm_params['other_config'] - vm_folder = vm_other_config.get('folder', '') - - if self.module.params['folder'] is not None and self.module.params['folder'] != vm_folder: - config_changes.append('folder') - - if self.module.params['home_server'] is not None: - if (self.module.params['home_server'] and - (not self.vm_params['affinity'] or self.module.params['home_server'] != self.vm_params['affinity']['name_label'])): - - # Check existance only. Ignore return value. - get_object_ref(self.module, self.module.params['home_server'], uuid=None, obj_type="home server", fail=True, - msg_prefix="VM check home_server: ") - - config_changes.append('home_server') - elif not self.module.params['home_server'] and self.vm_params['affinity']: - config_changes.append('home_server') - - config_changes_hardware = [] - - if self.module.params['hardware']: - num_cpus = self.module.params['hardware'].get('num_cpus') - - if num_cpus is not None: - # Kept for compatibility with older Ansible versions that - # do not support subargument specs. - try: - num_cpus = int(num_cpus) - except ValueError as e: - self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be an integer value!") - - if num_cpus < 1: - self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be greater than zero!") - - # We can use VCPUs_at_startup or VCPUs_max parameter. I'd - # say the former is the way to go but this needs - # confirmation and testing. - if num_cpus != int(self.vm_params['VCPUs_at_startup']): - config_changes_hardware.append('num_cpus') - # For now, we don't support hotpluging so VM has to be in - # poweredoff state to reconfigure. - need_poweredoff = True - - num_cpu_cores_per_socket = self.module.params['hardware'].get('num_cpu_cores_per_socket') - - if num_cpu_cores_per_socket is not None: - # Kept for compatibility with older Ansible versions that - # do not support subargument specs. - try: - num_cpu_cores_per_socket = int(num_cpu_cores_per_socket) - except ValueError as e: - self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be an integer value!") - - if num_cpu_cores_per_socket < 1: - self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be greater than zero!") - - if num_cpus and num_cpus % num_cpu_cores_per_socket != 0: - self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be a multiple of hardware.num_cpu_cores_per_socket!") - - vm_platform = self.vm_params['platform'] - vm_cores_per_socket = int(vm_platform.get('cores-per-socket', 1)) - - if num_cpu_cores_per_socket != vm_cores_per_socket: - config_changes_hardware.append('num_cpu_cores_per_socket') - # For now, we don't support hotpluging so VM has to be - # in poweredoff state to reconfigure. - need_poweredoff = True - - memory_mb = self.module.params['hardware'].get('memory_mb') - - if memory_mb is not None: - # Kept for compatibility with older Ansible versions that - # do not support subargument specs. - try: - memory_mb = int(memory_mb) - except ValueError as e: - self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be an integer value!") - - if memory_mb < 1: - self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be greater than zero!") - - # There are multiple memory parameters: - # - memory_dynamic_max - # - memory_dynamic_min - # - memory_static_max - # - memory_static_min - # - memory_target - # - # memory_target seems like a good candidate but it returns 0 for - # halted VMs so we can't use it. - # - # I decided to use memory_dynamic_max and memory_static_max - # and use whichever is larger. This strategy needs validation - # and testing. - # - # XenServer stores memory size in bytes so we need to divide - # it by 1024*1024 = 1048576. - if memory_mb != int(max(int(self.vm_params['memory_dynamic_max']), int(self.vm_params['memory_static_max'])) / 1048576): - config_changes_hardware.append('memory_mb') - # For now, we don't support hotpluging so VM has to be in - # poweredoff state to reconfigure. - need_poweredoff = True - - if config_changes_hardware: - config_changes.append({"hardware": config_changes_hardware}) - - config_changes_disks = [] - config_new_disks = [] - - # Find allowed userdevices. - vbd_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref) - - if self.module.params['disks']: - # Get the list of all disk. Filter out any CDs found. - vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] - - # Number of disks defined in module params have to be same or - # higher than a number of existing disks attached to the VM. - # We don't support removal or detachment of disks. - if len(self.module.params['disks']) < len(vm_disk_params_list): - self.module.fail_json(msg="VM check disks: provided disks configuration has less disks than the target VM (%d < %d)!" % - (len(self.module.params['disks']), len(vm_disk_params_list))) - - # Find the highest disk occupied userdevice. - if not vm_disk_params_list: - vm_disk_userdevice_highest = "-1" - else: - vm_disk_userdevice_highest = vm_disk_params_list[-1]['userdevice'] - - for position in range(len(self.module.params['disks'])): - if position < len(vm_disk_params_list): - vm_disk_params = vm_disk_params_list[position] - else: - vm_disk_params = None - - disk_params = self.module.params['disks'][position] - - disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], "VM check disks[%s]: " % position) - - disk_name = disk_params.get('name') - - if disk_name is not None and not disk_name: - self.module.fail_json(msg="VM check disks[%s]: disk name cannot be an empty string!" % position) - - # If this is an existing disk. - if vm_disk_params and vm_disk_params['VDI']: - disk_changes = [] - - if disk_name and disk_name != vm_disk_params['VDI']['name_label']: - disk_changes.append('name') - - disk_name_desc = disk_params.get('name_desc') - - if disk_name_desc is not None and disk_name_desc != vm_disk_params['VDI']['name_description']: - disk_changes.append('name_desc') - - if disk_size: - if disk_size > int(vm_disk_params['VDI']['virtual_size']): - disk_changes.append('size') - need_poweredoff = True - elif disk_size < int(vm_disk_params['VDI']['virtual_size']): - self.module.fail_json(msg="VM check disks[%s]: disk size is smaller than existing (%d bytes < %s bytes). " - "Reducing disk size is not allowed!" % (position, disk_size, vm_disk_params['VDI']['virtual_size'])) - - config_changes_disks.append(disk_changes) - # If this is a new disk. - else: - if not disk_size: - self.module.fail_json(msg="VM check disks[%s]: no valid disk size specification found!" % position) - - disk_sr_uuid = disk_params.get('sr_uuid') - disk_sr = disk_params.get('sr') - - if disk_sr_uuid is not None or disk_sr is not None: - # Check existance only. Ignore return value. - get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True, - msg_prefix="VM check disks[%s]: " % position) - elif self.default_sr_ref == 'OpaqueRef:NULL': - self.module.fail_json(msg="VM check disks[%s]: no default SR found! You must specify SR explicitly." % position) - - if not vbd_userdevices_allowed: - self.module.fail_json(msg="VM check disks[%s]: maximum number of devices reached!" % position) - - disk_userdevice = None - - # We need to place a new disk right above the highest - # placed existing disk to maintain relative disk - # positions pairable with disk specifications in - # module params. That place must not be occupied by - # some other device like CD-ROM. - for userdevice in vbd_userdevices_allowed: - if int(userdevice) > int(vm_disk_userdevice_highest): - disk_userdevice = userdevice - vbd_userdevices_allowed.remove(userdevice) - vm_disk_userdevice_highest = userdevice - break - - # If no place was found. - if disk_userdevice is None: - # Highest occupied place could be a CD-ROM device - # so we have to include all devices regardless of - # type when calculating out-of-bound position. - disk_userdevice = str(int(self.vm_params['VBDs'][-1]['userdevice']) + 1) - self.module.fail_json(msg="VM check disks[%s]: new disk position %s is out of bounds!" % (position, disk_userdevice)) - - # For new disks we only track their position. - config_new_disks.append((position, disk_userdevice)) - - # We should append config_changes_disks to config_changes only - # if there is at least one changed disk, else skip. - for disk_change in config_changes_disks: - if disk_change: - config_changes.append({"disks_changed": config_changes_disks}) - break - - if config_new_disks: - config_changes.append({"disks_new": config_new_disks}) - - config_changes_cdrom = [] - - if self.module.params['cdrom']: - # Get the list of all CD-ROMs. Filter out any regular disks - # found. If we found no existing CD-ROM, we will create it - # later else take the first one found. - vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"] - - # If no existing CD-ROM is found, we will need to add one. - # We need to check if there is any userdevice allowed. - if not vm_cdrom_params_list and not vbd_userdevices_allowed: - self.module.fail_json(msg="VM check cdrom: maximum number of devices reached!") - - cdrom_type = self.module.params['cdrom'].get('type') - cdrom_iso_name = self.module.params['cdrom'].get('iso_name') - - # If cdrom.iso_name is specified but cdrom.type is not, - # then set cdrom.type to 'iso', unless cdrom.iso_name is - # an empty string, in that case set cdrom.type to 'none'. - if not cdrom_type: - if cdrom_iso_name: - cdrom_type = "iso" - elif cdrom_iso_name is not None: - cdrom_type = "none" - - self.module.params['cdrom']['type'] = cdrom_type - - # If type changed. - if cdrom_type and (not vm_cdrom_params_list or cdrom_type != self.get_cdrom_type(vm_cdrom_params_list[0])): - config_changes_cdrom.append('type') - - if cdrom_type == "iso": - # Check if ISO exists. - # Check existance only. Ignore return value. - get_object_ref(self.module, cdrom_iso_name, uuid=None, obj_type="ISO image", fail=True, - msg_prefix="VM check cdrom.iso_name: ") - - # Is ISO image changed? - if (cdrom_iso_name and - (not vm_cdrom_params_list or - not vm_cdrom_params_list[0]['VDI'] or - cdrom_iso_name != vm_cdrom_params_list[0]['VDI']['name_label'])): - config_changes_cdrom.append('iso_name') - - if config_changes_cdrom: - config_changes.append({"cdrom": config_changes_cdrom}) - - config_changes_networks = [] - config_new_networks = [] - - # Find allowed devices. - vif_devices_allowed = self.xapi_session.xenapi.VM.get_allowed_VIF_devices(self.vm_ref) - - if self.module.params['networks']: - # Number of VIFs defined in module params have to be same or - # higher than a number of existing VIFs attached to the VM. - # We don't support removal of VIFs. - if len(self.module.params['networks']) < len(self.vm_params['VIFs']): - self.module.fail_json(msg="VM check networks: provided networks configuration has less interfaces than the target VM (%d < %d)!" % - (len(self.module.params['networks']), len(self.vm_params['VIFs']))) - - # Find the highest occupied device. - if not self.vm_params['VIFs']: - vif_device_highest = "-1" - else: - vif_device_highest = self.vm_params['VIFs'][-1]['device'] - - for position in range(len(self.module.params['networks'])): - if position < len(self.vm_params['VIFs']): - vm_vif_params = self.vm_params['VIFs'][position] - else: - vm_vif_params = None - - network_params = self.module.params['networks'][position] - - network_name = network_params.get('name') - - if network_name is not None and not network_name: - self.module.fail_json(msg="VM check networks[%s]: network name cannot be an empty string!" % position) - - if network_name: - # Check existance only. Ignore return value. - get_object_ref(self.module, network_name, uuid=None, obj_type="network", fail=True, - msg_prefix="VM check networks[%s]: " % position) - - network_mac = network_params.get('mac') - - if network_mac is not None: - network_mac = network_mac.lower() - - if not is_mac(network_mac): - self.module.fail_json(msg="VM check networks[%s]: specified MAC address '%s' is not valid!" % (position, network_mac)) - - # IPv4 reconfiguration. - network_type = network_params.get('type') - network_ip = network_params.get('ip') - network_netmask = network_params.get('netmask') - network_prefix = None - - # If networks.ip is specified and networks.type is not, - # then set networks.type to 'static'. - if not network_type and network_ip: - network_type = "static" - - # XenServer natively supports only 'none' and 'static' - # type with 'none' being the same as 'dhcp'. - if self.vm_params['customization_agent'] == "native" and network_type and network_type == "dhcp": - network_type = "none" - - if network_type and network_type == "static": - if network_ip is not None: - network_ip_split = network_ip.split('/') - network_ip = network_ip_split[0] - - if network_ip and not is_valid_ip_addr(network_ip): - self.module.fail_json(msg="VM check networks[%s]: specified IPv4 address '%s' is not valid!" % (position, network_ip)) - - if len(network_ip_split) > 1: - network_prefix = network_ip_split[1] - - if not is_valid_ip_prefix(network_prefix): - self.module.fail_json(msg="VM check networks[%s]: specified IPv4 prefix '%s' is not valid!" % (position, network_prefix)) - - if network_netmask is not None: - if not is_valid_ip_netmask(network_netmask): - self.module.fail_json(msg="VM check networks[%s]: specified IPv4 netmask '%s' is not valid!" % (position, network_netmask)) - - network_prefix = ip_netmask_to_prefix(network_netmask, skip_check=True) - elif network_prefix is not None: - network_netmask = ip_prefix_to_netmask(network_prefix, skip_check=True) - - # If any parameter is overridden at this point, update it. - if network_type: - network_params['type'] = network_type - - if network_ip: - network_params['ip'] = network_ip - - if network_netmask: - network_params['netmask'] = network_netmask - - if network_prefix: - network_params['prefix'] = network_prefix - - network_gateway = network_params.get('gateway') - - # Gateway can be an empty string (when removing gateway - # configuration) but if it is not, it should be validated. - if network_gateway and not is_valid_ip_addr(network_gateway): - self.module.fail_json(msg="VM check networks[%s]: specified IPv4 gateway '%s' is not valid!" % (position, network_gateway)) - - # IPv6 reconfiguration. - network_type6 = network_params.get('type6') - network_ip6 = network_params.get('ip6') - network_prefix6 = None - - # If networks.ip6 is specified and networks.type6 is not, - # then set networks.type6 to 'static'. - if not network_type6 and network_ip6: - network_type6 = "static" - - # XenServer natively supports only 'none' and 'static' - # type with 'none' being the same as 'dhcp'. - if self.vm_params['customization_agent'] == "native" and network_type6 and network_type6 == "dhcp": - network_type6 = "none" - - if network_type6 and network_type6 == "static": - if network_ip6 is not None: - network_ip6_split = network_ip6.split('/') - network_ip6 = network_ip6_split[0] - - if network_ip6 and not is_valid_ip6_addr(network_ip6): - self.module.fail_json(msg="VM check networks[%s]: specified IPv6 address '%s' is not valid!" % (position, network_ip6)) - - if len(network_ip6_split) > 1: - network_prefix6 = network_ip6_split[1] - - if not is_valid_ip6_prefix(network_prefix6): - self.module.fail_json(msg="VM check networks[%s]: specified IPv6 prefix '%s' is not valid!" % (position, network_prefix6)) - - # If any parameter is overridden at this point, update it. - if network_type6: - network_params['type6'] = network_type6 - - if network_ip6: - network_params['ip6'] = network_ip6 - - if network_prefix6: - network_params['prefix6'] = network_prefix6 - - network_gateway6 = network_params.get('gateway6') - - # Gateway can be an empty string (when removing gateway - # configuration) but if it is not, it should be validated. - if network_gateway6 and not is_valid_ip6_addr(network_gateway6): - self.module.fail_json(msg="VM check networks[%s]: specified IPv6 gateway '%s' is not valid!" % (position, network_gateway6)) - - # If this is an existing VIF. - if vm_vif_params and vm_vif_params['network']: - network_changes = [] - - if network_name and network_name != vm_vif_params['network']['name_label']: - network_changes.append('name') - - if network_mac and network_mac != vm_vif_params['MAC'].lower(): - network_changes.append('mac') - - if self.vm_params['customization_agent'] == "native": - if network_type and network_type != vm_vif_params['ipv4_configuration_mode'].lower(): - network_changes.append('type') - - if network_type and network_type == "static": - if network_ip and (not vm_vif_params['ipv4_addresses'] or - not vm_vif_params['ipv4_addresses'][0] or - network_ip != vm_vif_params['ipv4_addresses'][0].split('/')[0]): - network_changes.append('ip') - - if network_prefix and (not vm_vif_params['ipv4_addresses'] or - not vm_vif_params['ipv4_addresses'][0] or - network_prefix != vm_vif_params['ipv4_addresses'][0].split('/')[1]): - network_changes.append('prefix') - network_changes.append('netmask') - - if network_gateway is not None and network_gateway != vm_vif_params['ipv4_gateway']: - network_changes.append('gateway') - - if network_type6 and network_type6 != vm_vif_params['ipv6_configuration_mode'].lower(): - network_changes.append('type6') - - if network_type6 and network_type6 == "static": - if network_ip6 and (not vm_vif_params['ipv6_addresses'] or - not vm_vif_params['ipv6_addresses'][0] or - network_ip6 != vm_vif_params['ipv6_addresses'][0].split('/')[0]): - network_changes.append('ip6') - - if network_prefix6 and (not vm_vif_params['ipv6_addresses'] or - not vm_vif_params['ipv6_addresses'][0] or - network_prefix6 != vm_vif_params['ipv6_addresses'][0].split('/')[1]): - network_changes.append('prefix6') - - if network_gateway6 is not None and network_gateway6 != vm_vif_params['ipv6_gateway']: - network_changes.append('gateway6') - - elif self.vm_params['customization_agent'] == "custom": - vm_xenstore_data = self.vm_params['xenstore_data'] - - if network_type and network_type != vm_xenstore_data.get('vm-data/networks/%s/type' % vm_vif_params['device'], "none"): - network_changes.append('type') - need_poweredoff = True - - if network_type and network_type == "static": - if network_ip and network_ip != vm_xenstore_data.get('vm-data/networks/%s/ip' % vm_vif_params['device'], ""): - network_changes.append('ip') - need_poweredoff = True - - if network_prefix and network_prefix != vm_xenstore_data.get('vm-data/networks/%s/prefix' % vm_vif_params['device'], ""): - network_changes.append('prefix') - network_changes.append('netmask') - need_poweredoff = True - - if network_gateway is not None and network_gateway != vm_xenstore_data.get('vm-data/networks/%s/gateway' % - vm_vif_params['device'], ""): - network_changes.append('gateway') - need_poweredoff = True - - if network_type6 and network_type6 != vm_xenstore_data.get('vm-data/networks/%s/type6' % vm_vif_params['device'], "none"): - network_changes.append('type6') - need_poweredoff = True - - if network_type6 and network_type6 == "static": - if network_ip6 and network_ip6 != vm_xenstore_data.get('vm-data/networks/%s/ip6' % vm_vif_params['device'], ""): - network_changes.append('ip6') - need_poweredoff = True - - if network_prefix6 and network_prefix6 != vm_xenstore_data.get('vm-data/networks/%s/prefix6' % vm_vif_params['device'], ""): - network_changes.append('prefix6') - need_poweredoff = True - - if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get('vm-data/networks/%s/gateway6' % - vm_vif_params['device'], ""): - network_changes.append('gateway6') - need_poweredoff = True - - config_changes_networks.append(network_changes) - # If this is a new VIF. - else: - if not network_name: - self.module.fail_json(msg="VM check networks[%s]: network name is required for new network interface!" % position) - - if network_type and network_type == "static" and network_ip and not network_netmask: - self.module.fail_json(msg="VM check networks[%s]: IPv4 netmask or prefix is required for new network interface!" % position) - - if network_type6 and network_type6 == "static" and network_ip6 and not network_prefix6: - self.module.fail_json(msg="VM check networks[%s]: IPv6 prefix is required for new network interface!" % position) - - # Restart is needed if we are adding new network - # interface with IP/gateway parameters specified - # and custom agent is used. - if self.vm_params['customization_agent'] == "custom": - for parameter in ['type', 'ip', 'prefix', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']: - if network_params.get(parameter): - need_poweredoff = True - break - - if not vif_devices_allowed: - self.module.fail_json(msg="VM check networks[%s]: maximum number of network interfaces reached!" % position) - - # We need to place a new network interface right above the - # highest placed existing interface to maintain relative - # positions pairable with network interface specifications - # in module params. - vif_device = str(int(vif_device_highest) + 1) - - if vif_device not in vif_devices_allowed: - self.module.fail_json(msg="VM check networks[%s]: new network interface position %s is out of bounds!" % (position, vif_device)) - - vif_devices_allowed.remove(vif_device) - vif_device_highest = vif_device - - # For new VIFs we only track their position. - config_new_networks.append((position, vif_device)) - - # We should append config_changes_networks to config_changes only - # if there is at least one changed network, else skip. - for network_change in config_changes_networks: - if network_change: - config_changes.append({"networks_changed": config_changes_networks}) - break - - if config_new_networks: - config_changes.append({"networks_new": config_new_networks}) - - config_changes_custom_params = [] - - if self.module.params['custom_params']: - for position in range(len(self.module.params['custom_params'])): - custom_param = self.module.params['custom_params'][position] - - custom_param_key = custom_param['key'] - custom_param_value = custom_param['value'] - - if custom_param_key not in self.vm_params: - self.module.fail_json(msg="VM check custom_params[%s]: unknown VM param '%s'!" % (position, custom_param_key)) - - if custom_param_value != self.vm_params[custom_param_key]: - # We only need to track custom param position. - config_changes_custom_params.append(position) - - if config_changes_custom_params: - config_changes.append({"custom_params": config_changes_custom_params}) - - if need_poweredoff: - config_changes.append('need_poweredoff') - - return config_changes - - except XenAPI.Failure as f: - self.module.fail_json(msg="XAPI ERROR: %s" % f.details) - - def get_normalized_disk_size(self, disk_params, msg_prefix=""): - """Parses disk size parameters and returns disk size in bytes. - - This method tries to parse disk size module parameters. It fails - with an error message if size cannot be parsed. - - Args: - disk_params (dist): A dictionary with disk parameters. - msg_prefix (str): A string error messages should be prefixed - with (default: ""). - - Returns: - int: disk size in bytes if disk size is successfully parsed or - None if no disk size parameters were found. - """ - # There should be only single size spec but we make a list of all size - # specs just in case. Priority is given to 'size' but if not found, we - # check for 'size_tb', 'size_gb', 'size_mb' etc. and use first one - # found. - disk_size_spec = [x for x in disk_params.keys() if disk_params[x] is not None and (x.startswith('size_') or x == 'size')] - - if disk_size_spec: - try: - # size - if "size" in disk_size_spec: - size_regex = re.compile(r'(\d+(?:\.\d+)?)\s*(.*)') - disk_size_m = size_regex.match(disk_params['size']) - - if disk_size_m: - size = disk_size_m.group(1) - unit = disk_size_m.group(2) - else: - raise ValueError - # size_tb, size_gb, size_mb, size_kb, size_b - else: - size = disk_params[disk_size_spec[0]] - unit = disk_size_spec[0].split('_')[-1] - - if not unit: - unit = "b" - else: - unit = unit.lower() - - if re.match(r'\d+\.\d+', size): - # We found float value in string, let's typecast it. - if unit == "b": - # If we found float but unit is bytes, we get the integer part only. - size = int(float(size)) - else: - size = float(size) - else: - # We found int value in string, let's typecast it. - size = int(size) - - if not size or size < 0: - raise ValueError - - except (TypeError, ValueError, NameError): - # Common failure - self.module.fail_json(msg="%sfailed to parse disk size! Please review value provided using documentation." % msg_prefix) - - disk_units = dict(tb=4, gb=3, mb=2, kb=1, b=0) - - if unit in disk_units: - return int(size * (1024 ** disk_units[unit])) - else: - self.module.fail_json(msg="%s'%s' is not a supported unit for disk size! Supported units are ['%s']." % - (msg_prefix, unit, "', '".join(sorted(disk_units.keys(), key=lambda key: disk_units[key])))) - else: - return None - - @staticmethod - def get_cdrom_type(vm_cdrom_params): - """Returns VM CD-ROM type.""" - # TODO: implement support for detecting type host. No server to test - # this on at the moment. - if vm_cdrom_params['empty']: - return "none" - else: - return "iso" - - -def main(): - argument_spec = xenserver_common_argument_spec() - argument_spec.update( - state=dict(type='str', default='present', - choices=['present', 'absent', 'poweredon']), - name=dict(type='str', aliases=['name_label']), - name_desc=dict(type='str'), - uuid=dict(type='str'), - template=dict(type='str', aliases=['template_src']), - template_uuid=dict(type='str'), - is_template=dict(type='bool', default=False), - folder=dict(type='str'), - hardware=dict( - type='dict', - options=dict( - num_cpus=dict(type='int'), - num_cpu_cores_per_socket=dict(type='int'), - memory_mb=dict(type='int'), - ), - ), - disks=dict( - type='list', - elements='dict', - options=dict( - size=dict(type='str'), - size_tb=dict(type='str'), - size_gb=dict(type='str'), - size_mb=dict(type='str'), - size_kb=dict(type='str'), - size_b=dict(type='str'), - name=dict(type='str', aliases=['name_label']), - name_desc=dict(type='str'), - sr=dict(type='str'), - sr_uuid=dict(type='str'), - ), - aliases=['disk'], - mutually_exclusive=[ - ['size', 'size_tb', 'size_gb', 'size_mb', 'size_kb', 'size_b'], - ['sr', 'sr_uuid'], - ], - ), - cdrom=dict( - type='dict', - options=dict( - type=dict(type='str', choices=['none', 'iso']), - iso_name=dict(type='str'), - ), - required_if=[ - ['type', 'iso', ['iso_name']], - ], - ), - networks=dict( - type='list', - elements='dict', - options=dict( - name=dict(type='str', aliases=['name_label']), - mac=dict(type='str'), - type=dict(type='str', choices=['none', 'dhcp', 'static']), - ip=dict(type='str'), - netmask=dict(type='str'), - gateway=dict(type='str'), - type6=dict(type='str', choices=['none', 'dhcp', 'static']), - ip6=dict(type='str'), - gateway6=dict(type='str'), - ), - aliases=['network'], - required_if=[ - ['type', 'static', ['ip']], - ['type6', 'static', ['ip6']], - ], - ), - home_server=dict(type='str'), - custom_params=dict( - type='list', - elements='dict', - options=dict( - key=dict(type='str', required=True, no_log=False), - value=dict(type='raw', required=True), - ), - ), - wait_for_ip_address=dict(type='bool', default=False), - state_change_timeout=dict(type='int', default=0), - linked_clone=dict(type='bool', default=False), - force=dict(type='bool', default=False), - ) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[ - ['name', 'uuid'], - ], - mutually_exclusive=[ - ['template', 'template_uuid'], - ], - ) - - result = {'failed': False, 'changed': False} - - vm = XenServerVM(module) - - # Find existing VM - if vm.exists(): - if module.params['state'] == "absent": - vm.destroy() - result['changed'] = True - elif module.params['state'] == "present": - config_changes = vm.reconfigure() - - if config_changes: - result['changed'] = True - - # Make new disk and network changes more user friendly - # and informative. - for change in config_changes: - if isinstance(change, dict): - if change.get('disks_new'): - disks_new = [] - - for position, userdevice in change['disks_new']: - disk_new_params = {"position": position, "vbd_userdevice": userdevice} - disk_params = module.params['disks'][position] - - for k in disk_params.keys(): - if disk_params[k] is not None: - disk_new_params[k] = disk_params[k] - - disks_new.append(disk_new_params) - - if disks_new: - change['disks_new'] = disks_new - - elif change.get('networks_new'): - networks_new = [] - - for position, device in change['networks_new']: - network_new_params = {"position": position, "vif_device": device} - network_params = module.params['networks'][position] - - for k in network_params.keys(): - if network_params[k] is not None: - network_new_params[k] = network_params[k] - - networks_new.append(network_new_params) - - if networks_new: - change['networks_new'] = networks_new - - result['changes'] = config_changes - - elif module.params['state'] in ["poweredon", "poweredoff", "restarted", "shutdownguest", "rebootguest", "suspended"]: - result['changed'] = vm.set_power_state(module.params['state']) - elif module.params['state'] != "absent": - vm.deploy() - result['changed'] = True - - if module.params['wait_for_ip_address'] and module.params['state'] != "absent": - vm.wait_for_ip_address() - - result['instance'] = vm.gather_facts() - - if result['failed']: - module.fail_json(**result) - else: - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/xenserver/xenserver_guest_info.py b/plugins/modules/cloud/xenserver/xenserver_guest_info.py deleted file mode 100644 index a2e777253e..0000000000 --- a/plugins/modules/cloud/xenserver/xenserver_guest_info.py +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2018, Bojan Vitnik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: xenserver_guest_info -short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool -description: > - This module can be used to gather essential VM facts. -author: -- Bojan Vitnik (@bvitnik) -notes: -- Minimal supported version of XenServer is 5.6. -- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. -- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside - Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your - Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: - U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' -- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are - accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' -- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) - which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' -- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change. -requirements: -- python >= 2.6 -- XenAPI -options: - name: - description: - - Name of the VM to gather facts from. - - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. - - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage. - - This parameter is case sensitive. - type: str - aliases: [ name_label ] - uuid: - description: - - UUID of the VM to gather fact of. This is XenServer's unique identifier. - - It is required if name is not unique. - type: str -extends_documentation_fragment: -- community.general.xenserver.documentation - -''' - -EXAMPLES = r''' -- name: Gather facts - community.general.xenserver_guest_info: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - name: testvm_11 - delegate_to: localhost - register: facts -''' - -RETURN = r''' -instance: - description: Metadata about the VM - returned: always - type: dict - sample: { - "cdrom": { - "type": "none" - }, - "customization_agent": "native", - "disks": [ - { - "name": "testvm_11-0", - "name_desc": "", - "os_device": "xvda", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "0" - }, - { - "name": "testvm_11-1", - "name_desc": "", - "os_device": "xvdb", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "1" - } - ], - "domid": "56", - "folder": "", - "hardware": { - "memory_mb": 8192, - "num_cpu_cores_per_socket": 2, - "num_cpus": 4 - }, - "home_server": "", - "is_template": false, - "name": "testvm_11", - "name_desc": "", - "networks": [ - { - "gateway": "192.168.0.254", - "gateway6": "fc00::fffe", - "ip": "192.168.0.200", - "ip6": [ - "fe80:0000:0000:0000:e9cb:625a:32c5:c291", - "fc00:0000:0000:0000:0000:0000:0000:0001" - ], - "mac": "ba:91:3a:48:20:76", - "mtu": "1500", - "name": "Pool-wide network associated with eth1", - "netmask": "255.255.255.128", - "prefix": "25", - "prefix6": "64", - "vif_device": "0" - } - ], - "other_config": { - "base_template_name": "Windows Server 2016 (64-bit)", - "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", - "install-methods": "cdrom", - "instant": "true", - "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" - }, - "platform": { - "acpi": "1", - "apic": "true", - "cores-per-socket": "2", - "device_id": "0002", - "hpet": "true", - "nx": "true", - "pae": "true", - "timeoffset": "-25200", - "vga": "std", - "videoram": "8", - "viridian": "true", - "viridian_reference_tsc": "true", - "viridian_time_ref_count": "true" - }, - "state": "poweredon", - "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", - "xenstore_data": { - "vm-data": "" - } - } -''' - -HAS_XENAPI = False -try: - import XenAPI - HAS_XENAPI = True -except ImportError: - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref, - gather_vm_params, gather_vm_facts) - - -class XenServerVM(XenServerObject): - """Class for managing XenServer VM. - - Attributes: - vm_ref (str): XAPI reference to VM. - vm_params (dict): A dictionary with VM parameters as returned - by gather_vm_params() function. - """ - - def __init__(self, module): - """Inits XenServerVM using module parameters. - - Args: - module: Reference to AnsibleModule object. - """ - super(XenServerVM, self).__init__(module) - - self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ") - self.gather_params() - - def gather_params(self): - """Gathers all VM parameters available in XAPI database.""" - self.vm_params = gather_vm_params(self.module, self.vm_ref) - - def gather_facts(self): - """Gathers and returns VM facts.""" - return gather_vm_facts(self.module, self.vm_params) - - -def main(): - argument_spec = xenserver_common_argument_spec() - argument_spec.update( - name=dict(type='str', aliases=['name_label']), - uuid=dict(type='str'), - ) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[ - ['name', 'uuid'], - ], - ) - - result = {'failed': False, 'changed': False} - - # Module will exit with an error message if no VM is found. - vm = XenServerVM(module) - - # Gather facts. - result['instance'] = vm.gather_facts() - - if result['failed']: - module.fail_json(**result) - else: - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py b/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py deleted file mode 100644 index 4a195ff50a..0000000000 --- a/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py +++ /dev/null @@ -1,270 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2018, Bojan Vitnik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: xenserver_guest_powerstate -short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool -description: > - This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine. -author: -- Bojan Vitnik (@bvitnik) -notes: -- Minimal supported version of XenServer is 5.6. -- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. -- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside - Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your - Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: - U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' -- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are - accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' -- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) - which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' -requirements: -- python >= 2.6 -- XenAPI -options: - state: - description: - - Specify the state VM should be in. - - If C(state) is set to value other than C(present), then VM is transitioned into required state and facts are returned. - - If C(state) is set to C(present), then VM is just checked for existence and facts are returned. - type: str - default: present - choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ] - name: - description: - - Name of the VM to manage. - - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. - - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage. - - This parameter is case sensitive. - type: str - aliases: [ name_label ] - uuid: - description: - - UUID of the VM to manage if known. This is XenServer's unique identifier. - - It is required if name is not unique. - type: str - wait_for_ip_address: - description: - - Wait until XenServer detects an IP address for the VM. - - This requires XenServer Tools to be preinstalled on the VM to work properly. - type: bool - default: no - state_change_timeout: - description: - - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if C(wait_for_ip_address: yes).' - - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. - - In case of timeout, module will generate an error message. - type: int - default: 0 -extends_documentation_fragment: -- community.general.xenserver.documentation - -''' - -EXAMPLES = r''' -- name: Power on VM - community.general.xenserver_guest_powerstate: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - name: testvm_11 - state: powered-on - delegate_to: localhost - register: facts -''' - -RETURN = r''' -instance: - description: Metadata about the VM - returned: always - type: dict - sample: { - "cdrom": { - "type": "none" - }, - "customization_agent": "native", - "disks": [ - { - "name": "windows-template-testing-0", - "name_desc": "", - "os_device": "xvda", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "0" - }, - { - "name": "windows-template-testing-1", - "name_desc": "", - "os_device": "xvdb", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "1" - } - ], - "domid": "56", - "folder": "", - "hardware": { - "memory_mb": 8192, - "num_cpu_cores_per_socket": 2, - "num_cpus": 4 - }, - "home_server": "", - "is_template": false, - "name": "windows-template-testing", - "name_desc": "", - "networks": [ - { - "gateway": "192.168.0.254", - "gateway6": "fc00::fffe", - "ip": "192.168.0.200", - "ip6": [ - "fe80:0000:0000:0000:e9cb:625a:32c5:c291", - "fc00:0000:0000:0000:0000:0000:0000:0001" - ], - "mac": "ba:91:3a:48:20:76", - "mtu": "1500", - "name": "Pool-wide network associated with eth1", - "netmask": "255.255.255.128", - "prefix": "25", - "prefix6": "64", - "vif_device": "0" - } - ], - "other_config": { - "base_template_name": "Windows Server 2016 (64-bit)", - "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", - "install-methods": "cdrom", - "instant": "true", - "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" - }, - "platform": { - "acpi": "1", - "apic": "true", - "cores-per-socket": "2", - "device_id": "0002", - "hpet": "true", - "nx": "true", - "pae": "true", - "timeoffset": "-25200", - "vga": "std", - "videoram": "8", - "viridian": "true", - "viridian_reference_tsc": "true", - "viridian_time_ref_count": "true" - }, - "state": "poweredon", - "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", - "xenstore_data": { - "vm-data": "" - } - } -''' - -import re - -HAS_XENAPI = False -try: - import XenAPI - HAS_XENAPI = True -except ImportError: - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref, - gather_vm_params, gather_vm_facts, set_vm_power_state, - wait_for_vm_ip_address) - - -class XenServerVM(XenServerObject): - """Class for managing XenServer VM. - - Attributes: - vm_ref (str): XAPI reference to VM. - vm_params (dict): A dictionary with VM parameters as returned - by gather_vm_params() function. - """ - - def __init__(self, module): - """Inits XenServerVM using module parameters. - - Args: - module: Reference to Ansible module object. - """ - super(XenServerVM, self).__init__(module) - - self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ") - self.gather_params() - - def gather_params(self): - """Gathers all VM parameters available in XAPI database.""" - self.vm_params = gather_vm_params(self.module, self.vm_ref) - - def gather_facts(self): - """Gathers and returns VM facts.""" - return gather_vm_facts(self.module, self.vm_params) - - def set_power_state(self, power_state): - """Controls VM power state.""" - state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout']) - - # If state has changed, update vm_params. - if state_changed: - self.vm_params['power_state'] = current_state.capitalize() - - return state_changed - - def wait_for_ip_address(self): - """Waits for VM to acquire an IP address.""" - self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout']) - - -def main(): - argument_spec = xenserver_common_argument_spec() - argument_spec.update( - state=dict(type='str', default='present', - choices=['powered-on', 'powered-off', 'restarted', 'shutdown-guest', 'reboot-guest', 'suspended', 'present']), - name=dict(type='str', aliases=['name_label']), - uuid=dict(type='str'), - wait_for_ip_address=dict(type='bool', default=False), - state_change_timeout=dict(type='int', default=0), - ) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[ - ['name', 'uuid'], - ], - ) - - result = {'failed': False, 'changed': False} - - # Module will exit with an error message if no VM is found. - vm = XenServerVM(module) - - # Set VM power state. - if module.params['state'] != "present": - result['changed'] = vm.set_power_state(module.params['state']) - - if module.params['wait_for_ip_address']: - vm.wait_for_ip_address() - - result['instance'] = vm.gather_facts() - - if result['failed']: - module.fail_json(**result) - else: - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud_init_data_facts.py b/plugins/modules/cloud_init_data_facts.py deleted file mode 120000 index 278240056a..0000000000 --- a/plugins/modules/cloud_init_data_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/misc/cloud_init_data_facts.py \ No newline at end of file diff --git a/plugins/modules/cloud_init_data_facts.py b/plugins/modules/cloud_init_data_facts.py new file mode 100644 index 0000000000..8da427fa2e --- /dev/null +++ b/plugins/modules/cloud_init_data_facts.py @@ -0,0 +1,131 @@ +#!/usr/bin/python +# Copyright (c) 2018, René Moser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: cloud_init_data_facts +short_description: Retrieve facts of cloud-init +description: + - Gathers facts by reading the C(status.json) and C(result.json) of cloud-init. +author: René Moser (@resmo) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +options: + filter: + description: + - Filter facts. + type: str + choices: [status, result] +notes: + - See http://cloudinit.readthedocs.io/ for more information about cloud-init. +""" + +EXAMPLES = r""" +- name: Gather all facts of cloud init + community.general.cloud_init_data_facts: + register: result + +- ansible.builtin.debug: + var: result + +- name: Wait for cloud init to finish + community.general.cloud_init_data_facts: + filter: status + register: res + until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage" + retries: 50 + delay: 5 +""" + +RETURN = r""" +cloud_init_data_facts: + description: Facts of result and status. + returned: success + type: dict + sample: + { + "status": { + "v1": { + "datasource": "DataSourceCloudStack", + "errors": [] + } + }, + "result": { + "v1": { + "datasource": "DataSourceCloudStack", + "init": { + "errors": [], + "finished": 1522066377.0185432, + "start": 1522066375.2648022 + }, + "init-local": { + "errors": [], + "finished": 1522066373.70919, + "start": 1522066373.4726632 + }, + "modules-config": { + "errors": [], + "finished": 1522066380.9097016, + "start": 1522066379.0011985 + }, + "modules-final": { + "errors": [], + "finished": 1522066383.56594, + "start": 1522066382.3449218 + }, + "stage": null + } + } + } +""" + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text + + +CLOUD_INIT_PATH = "/var/lib/cloud/data" + + +def gather_cloud_init_data_facts(module): + res = { + 'cloud_init_data_facts': dict() + } + + for i in ['result', 'status']: + filter = module.params.get('filter') + if filter is None or filter == i: + res['cloud_init_data_facts'][i] = dict() + json_file = os.path.join(CLOUD_INIT_PATH, i + '.json') + + if os.path.exists(json_file): + with open(json_file, 'rb') as f: + contents = to_text(f.read(), errors='surrogate_or_strict') + + if contents: + res['cloud_init_data_facts'][i] = module.from_json(contents) + return res + + +def main(): + module = AnsibleModule( + argument_spec=dict( + filter=dict(choices=['result', 'status']), + ), + supports_check_mode=True, + ) + + facts = gather_cloud_init_data_facts(module) + result = dict(changed=False, ansible_facts=facts, **facts) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloudflare_dns.py b/plugins/modules/cloudflare_dns.py deleted file mode 120000 index 164171df24..0000000000 --- a/plugins/modules/cloudflare_dns.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/cloudflare_dns.py \ No newline at end of file diff --git a/plugins/modules/cloudflare_dns.py b/plugins/modules/cloudflare_dns.py new file mode 100644 index 0000000000..df10d0a0b6 --- /dev/null +++ b/plugins/modules/cloudflare_dns.py @@ -0,0 +1,999 @@ +#!/usr/bin/python + +# Copyright (c) 2016 Michael Gruener +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: cloudflare_dns +author: + - Michael Gruener (@mgruener) +short_description: Manage Cloudflare DNS records +description: + - 'Manages DNS records using the Cloudflare API, see the docs: U(https://api.cloudflare.com/).' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + api_token: + description: + - API token. + - Required for API token authentication. + - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." + - Can be specified in E(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0. + type: str + version_added: '0.2.0' + account_api_key: + description: + - Account API key. + - Required for API keys authentication. + - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." + type: str + aliases: [account_api_token] + account_email: + description: + - Account email. Required for API keys authentication. + type: str + algorithm: + description: + - Algorithm number. + - Required for O(type=DS) and O(type=SSHFP) when O(state=present). + type: int + cert_usage: + description: + - Certificate usage number. + - Required for O(type=TLSA) when O(state=present). + type: int + choices: [0, 1, 2, 3] + comment: + description: + - Comments or notes about the DNS record. + type: str + version_added: 10.1.0 + flag: + description: + - Issuer Critical Flag. + - Required for O(type=CAA) when O(state=present). + type: int + choices: [0, 1] + version_added: 8.0.0 + tag: + description: + - CAA issue restriction. + - Required for O(type=CAA) when O(state=present). + type: str + choices: [issue, issuewild, iodef] + version_added: 8.0.0 + hash_type: + description: + - Hash type number. + - Required for O(type=DS), O(type=SSHFP) and O(type=TLSA) when O(state=present). + type: int + choices: [1, 2] + key_tag: + description: + - DNSSEC key tag. + - Needed for O(type=DS) when O(state=present). + type: int + port: + description: + - Service port. + - Required for O(type=SRV) and O(type=TLSA). + type: int + priority: + description: + - Record priority. + - Required for O(type=MX) and O(type=SRV). + default: 1 + type: int + proto: + description: + - Service protocol. Required for O(type=SRV) and O(type=TLSA). + - Common values are TCP and UDP. + type: str + proxied: + description: + - Proxy through Cloudflare network or just use DNS. + type: bool + default: false + record: + description: + - Record to add. + - Required if O(state=present). + - Default is V(@) (that is, the zone name). + type: str + default: '@' + aliases: [name] + selector: + description: + - Selector number. + - Required for O(type=TLSA) when O(state=present). + choices: [0, 1] + type: int + service: + description: + - Record service. + - Required for O(type=SRV). + type: str + solo: + description: + - Whether the record should be the only one for that record type and record name. + - Only use with O(state=present). + - This deletes all other records with the same record name and type. + type: bool + state: + description: + - Whether the record(s) should exist or not. + type: str + choices: [absent, present] + default: present + tags: + description: + - Custom tags for the DNS record. + type: list + elements: str + version_added: 10.1.0 + timeout: + description: + - Timeout for Cloudflare API calls. + type: int + default: 30 + ttl: + description: + - The TTL to give the new record. + - Must be between V(120) and V(2,147,483,647) seconds, or V(1) for automatic. + type: int + default: 1 + type: + description: + - The type of DNS record to create. Required if O(state=present). + - Support for V(SPF) has been removed from community.general 9.0.0 since that record type is no longer supported by + CloudFlare. + - Support for V(PTR) has been added in community.general 11.1.0. + type: str + choices: [A, AAAA, CNAME, DS, MX, NS, SRV, SSHFP, TLSA, CAA, TXT, PTR] + value: + description: + - The record value. + - Required for O(state=present). + type: str + aliases: [content] + weight: + description: + - Service weight. + - Required for O(type=SRV). + type: int + default: 1 + zone: + description: + - The name of the Zone to work with (for example V(example.com)). + - The Zone must already exist. + type: str + required: true + aliases: [domain] +""" + +EXAMPLES = r""" +- name: Create a test.example.net A record to point to 127.0.0.1 + community.general.cloudflare_dns: + zone: example.net + record: test + type: A + value: 127.0.0.1 + account_email: test@example.com + account_api_key: dummyapitoken + register: record + +- name: Create a record using api token + community.general.cloudflare_dns: + zone: example.net + record: test + type: A + value: 127.0.0.1 + api_token: dummyapitoken + +- name: Create a record with comment and tags + community.general.cloudflare_dns: + zone: example.net + record: test + type: A + value: 127.0.0.1 + comment: Local test website + tags: + - test + - local + api_token: dummyapitoken + +- name: Create a example.net CNAME record to example.com + community.general.cloudflare_dns: + zone: example.net + type: CNAME + value: example.com + account_email: test@example.com + account_api_key: dummyapitoken + state: present + +- name: Change its TTL + community.general.cloudflare_dns: + zone: example.net + type: CNAME + value: example.com + ttl: 600 + account_email: test@example.com + account_api_key: dummyapitoken + state: present + +- name: Delete the record + community.general.cloudflare_dns: + zone: example.net + type: CNAME + value: example.com + account_email: test@example.com + account_api_key: dummyapitoken + state: absent + +- name: Create a example.net CNAME record to example.com and proxy through Cloudflare's network + community.general.cloudflare_dns: + zone: example.net + type: CNAME + value: example.com + proxied: true + account_email: test@example.com + account_api_key: dummyapitoken + state: present + +# This deletes all other TXT records named "test.example.net" +- name: Create TXT record "test.example.net" with value "unique value" + community.general.cloudflare_dns: + domain: example.net + record: test + type: TXT + value: unique value + solo: true + account_email: test@example.com + account_api_key: dummyapitoken + state: present + +- name: Create an SRV record _foo._tcp.example.net + community.general.cloudflare_dns: + domain: example.net + service: foo + proto: tcp + port: 3500 + priority: 10 + weight: 20 + type: SRV + value: fooserver.example.net + +- name: Create a SSHFP record login.example.com + community.general.cloudflare_dns: + zone: example.com + record: login + type: SSHFP + algorithm: 4 + hash_type: 2 + value: 9dc1d6742696d2f51ca1f1a78b3d16a840f7d111eb9454239e70db31363f33e1 + +- name: Create a TLSA record _25._tcp.mail.example.com + community.general.cloudflare_dns: + zone: example.com + record: mail + port: 25 + proto: tcp + type: TLSA + cert_usage: 3 + selector: 1 + hash_type: 1 + value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3 + +- name: Create a CAA record subdomain.example.com + community.general.cloudflare_dns: + zone: example.com + record: subdomain + type: CAA + flag: 0 + tag: issue + value: ca.example.com + +- name: Create a DS record for subdomain.example.com + community.general.cloudflare_dns: + zone: example.com + record: subdomain + type: DS + key_tag: 5464 + algorithm: 8 + hash_type: 2 + value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB + +- name: Create PTR record "1.2.0.192.in-addr.arpa" with value "test.example.com" + community.general.cloudflare_dns: + zone: 2.0.192.in-addr.arpa + record: 1 + type: PTR + value: test.example.com + state: present +""" + +RETURN = r""" +record: + description: A dictionary containing the record data. + returned: success, except on record deletion + type: complex + contains: + comment: + description: Comments or notes about the DNS record. + returned: success + type: str + sample: Domain verification record + version_added: 10.1.0 + comment_modified_on: + description: When the record comment was last modified. Omitted if there is no comment. + returned: success + type: str + sample: "2024-01-01T05:20:00.12345Z" + version_added: 10.1.0 + content: + description: The record content (details depend on record type). + returned: success + type: str + sample: 192.0.2.91 + created_on: + description: The record creation date. + returned: success + type: str + sample: "2016-03-25T19:09:42.516553Z" + data: + description: Additional record data. + returned: success, if type is SRV, DS, SSHFP TLSA or CAA + type: dict + sample: + { + "name": "jabber", + "port": 8080, + "priority": 10, + "proto": "_tcp", + "service": "_xmpp", + "target": "jabberhost.sample.com", + "weight": 5 + } + id: + description: The record ID. + returned: success + type: str + sample: f9efb0549e96abcb750de63b38c9576e + locked: + description: No documentation available. + returned: success + type: bool + sample: false + meta: + description: Extra Cloudflare-specific information about the record. + returned: success + type: dict + sample: {"auto_added": false} + modified_on: + description: Record modification date. + returned: success + type: str + sample: "2016-03-25T19:09:42.516553Z" + name: + description: The record name as FQDN (including _service and _proto for SRV). + returned: success + type: str + sample: www.sample.com + priority: + description: Priority of the MX record. + returned: success, if type is MX + type: int + sample: 10 + proxiable: + description: Whether this record can be proxied through Cloudflare. + returned: success + type: bool + sample: false + proxied: + description: Whether the record is proxied through Cloudflare. + returned: success + type: bool + sample: false + tags: + description: Custom tags for the DNS record. + returned: success + type: list + elements: str + sample: ["production", "app"] + version_added: 10.1.0 + tags_modified_on: + description: When the record tags were last modified. Omitted if there are no tags. + returned: success + type: str + sample: "2025-01-01T05:20:00.12345Z" + version_added: 10.1.0 + ttl: + description: The time-to-live for the record. + returned: success + type: int + sample: 300 + type: + description: The record type. + returned: success + type: str + sample: A + zone_id: + description: The ID of the zone containing the record. + returned: success + type: str + sample: abcede0bf9f0066f94029d2e6b73856a + zone_name: + description: The name of the zone containing the record. + returned: success + type: str + sample: sample.com +""" + +import json +from urllib.parse import urlencode + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.urls import fetch_url + + +def lowercase_string(param): + return param.lower() if isinstance(param, str) else param + + +def join_str(sep, *args): + return sep.join([str(arg) for arg in args]) + + +class CloudflareAPI(object): + + cf_api_endpoint = 'https://api.cloudflare.com/client/v4' + changed = False + + def __init__(self, module): + self.module = module + self.api_token = module.params['api_token'] + self.account_api_key = module.params['account_api_key'] + self.account_email = module.params['account_email'] + self.algorithm = module.params['algorithm'] + self.cert_usage = module.params['cert_usage'] + self.comment = module.params['comment'] + self.hash_type = module.params['hash_type'] + self.flag = module.params['flag'] + self.tag = module.params['tag'] + self.tags = module.params['tags'] + self.key_tag = module.params['key_tag'] + self.port = module.params['port'] + self.priority = module.params['priority'] + self.proto = lowercase_string(module.params['proto']) + self.proxied = module.params['proxied'] + self.selector = module.params['selector'] + self.record = lowercase_string(module.params['record']) + self.service = lowercase_string(module.params['service']) + self.is_solo = module.params['solo'] + self.state = module.params['state'] + self.timeout = module.params['timeout'] + self.ttl = module.params['ttl'] + self.type = module.params['type'] + self.value = module.params['value'] + self.weight = module.params['weight'] + self.zone = lowercase_string(module.params['zone']) + + if self.record == '@': + self.record = self.zone + + if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None): + self.value = self.value.rstrip('.').lower() + + if (self.type == 'AAAA') and (self.value is not None): + self.value = self.value.lower() + + if self.type == 'SRV': + if (self.proto is not None) and (not self.proto.startswith('_')): + self.proto = '_{0}'.format(self.proto) + if (self.service is not None) and (not self.service.startswith('_')): + self.service = '_{0}'.format(self.service) + + if self.type == 'TLSA': + if (self.proto is not None) and (not self.proto.startswith('_')): + self.proto = '_{0}'.format(self.proto) + if (self.port is not None): + self.port = '_{0}'.format(self.port) + + if not self.record.endswith(self.zone): + self.record = join_str('.', self.record, self.zone) + + if self.type == 'DS': + if self.record == self.zone: + self.module.fail_json(msg="DS records only apply to subdomains.") + + def _cf_simple_api_call(self, api_call, method='GET', payload=None): + if self.api_token: + headers = { + 'Authorization': 'Bearer {0}'.format(self.api_token), + 'Content-Type': 'application/json', + } + else: + headers = { + 'X-Auth-Email': self.account_email, + 'X-Auth-Key': self.account_api_key, + 'Content-Type': 'application/json', + } + data = None + if payload: + try: + data = json.dumps(payload) + except Exception as e: + self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e)) + + resp, info = fetch_url(self.module, + self.cf_api_endpoint + api_call, + headers=headers, + data=data, + method=method, + timeout=self.timeout) + + if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]: + self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}: {2}".format(api_call, info['status'], info.get('msg'))) + + error_msg = '' + if info['status'] == 401: + # Unauthorized + error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 403: + # Forbidden + error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 429: + # Too many requests + error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 405: + # Method not allowed + error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 415: + # Unsupported Media Type + error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 400: + # Bad Request + error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + + result = None + try: + content = resp.read() + except AttributeError: + content = None + + if not content: + if info['body']: + content = info['body'] + else: + error_msg += "; The API response was empty" + + if content: + try: + result = json.loads(to_text(content, errors='surrogate_or_strict')) + except (getattr(json, 'JSONDecodeError', ValueError)) as e: + error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content) + + # Without a valid/parsed JSON response no more error processing can be done + if result is None: + self.module.fail_json(msg=error_msg) + + if 'success' not in result: + error_msg += "; Unexpected error details: {0}".format(result.get('error')) + self.module.fail_json(msg=error_msg) + + if not result['success']: + error_msg += "; Error details: " + for error in result['errors']: + error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message']) + if 'error_chain' in error: + for chain_error in error['error_chain']: + error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message']) + self.module.fail_json(msg=error_msg) + + return result, info['status'] + + def _cf_api_call(self, api_call, method='GET', payload=None): + result, status = self._cf_simple_api_call(api_call, method, payload) + + data = result['result'] + + if 'result_info' in result: + pagination = result['result_info'] + if pagination['total_pages'] > 1: + next_page = int(pagination['page']) + 1 + parameters = ['page={0}'.format(next_page)] + # strip "page" parameter from call parameters (if there are any) + if '?' in api_call: + raw_api_call, query = api_call.split('?', 1) + parameters += [param for param in query.split('&') if not param.startswith('page')] + else: + raw_api_call = api_call + while next_page <= pagination['total_pages']: + raw_api_call += '?{0}'.format('&'.join(parameters)) + result, status = self._cf_simple_api_call(raw_api_call, method, payload) + data += result['result'] + next_page += 1 + + return data, status + + def _get_zone_id(self, zone=None): + if not zone: + zone = self.zone + + zones = self.get_zones(zone) + if len(zones) > 1: + self.module.fail_json(msg="More than one zone matches {0}".format(zone)) + + if len(zones) < 1: + self.module.fail_json(msg="No zone found with name {0}".format(zone)) + + return zones[0]['id'] + + def get_zones(self, name=None): + if not name: + name = self.zone + param = '' + if name: + param = '?{0}'.format(urlencode({'name': name})) + zones, status = self._cf_api_call('/zones{0}'.format(param)) + return zones + + def get_dns_records(self, zone_name=None, type=None, record=None, value=''): + if not zone_name: + zone_name = self.zone + if not type: + type = self.type + if not record: + record = self.record + # necessary because None as value means to override user + # set module value + if (not value) and (value is not None): + value = self.value + + zone_id = self._get_zone_id() + api_call = '/zones/{0}/dns_records'.format(zone_id) + query = {} + if type: + query['type'] = type + if record: + query['name'] = record + if value: + query['content'] = value + if query: + api_call += '?{0}'.format(urlencode(query)) + + records, status = self._cf_api_call(api_call) + return records + + def delete_dns_records(self, solo): + records = [] + content = self.value + search_record = self.record + if self.type == 'SRV': + if not (self.value is None or self.value == ''): + content = join_str('\t', self.weight, self.port, self.value) + search_record = join_str('.', self.service, self.proto, self.record) + elif self.type == 'DS': + if not (self.value is None or self.value == ''): + content = join_str('\t', self.key_tag, self.algorithm, self.hash_type, self.value) + elif self.type == 'SSHFP': + if not (self.value is None or self.value == ''): + content = join_str(' ', self.algorithm, self.hash_type, self.value.upper()) + elif self.type == 'TLSA': + if not (self.value is None or self.value == ''): + content = join_str('\t', self.cert_usage, self.selector, self.hash_type, self.value) + search_record = join_str('.', self.port, self.proto, self.record) + if solo: + search_value = None + else: + search_value = content + + zone_id = self._get_zone_id(self.zone) + records = self.get_dns_records(self.zone, self.type, search_record, search_value) + + for rr in records: + if solo: + if not ((rr['type'] == self.type) and (rr['name'] == search_record) and (rr['content'] == content)): + self.changed = True + if not self.module.check_mode: + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, rr['id']), 'DELETE') + else: + self.changed = True + if not self.module.check_mode: + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, rr['id']), 'DELETE') + return self.changed + + def ensure_dns_record(self): + search_value = self.value + search_record = self.record + new_record = None + + if self.type in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'PTR']: + if not self.value: + self.module.fail_json(msg="You must provide a non-empty value to create this record type") + + # there can only be one CNAME per record + # ignoring the value when searching for existing + # CNAME records allows us to update the value if it + # changes + if self.type == 'CNAME': + search_value = None + + new_record = { + "type": self.type, + "name": self.record, + "content": self.value, + "ttl": self.ttl + } + + if self.type in ['A', 'AAAA', 'CNAME']: + new_record["proxied"] = self.proxied + + if self.type == 'MX': + for attr in [self.priority, self.value]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide priority and a value to create this record type") + new_record = { + "type": self.type, + "name": self.record, + "content": self.value, + "priority": self.priority, + "ttl": self.ttl + } + + if self.type == 'SRV': + for attr in [self.port, self.priority, self.proto, self.service, self.weight, self.value]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type") + srv_data = { + "target": self.value, + "port": self.port, + "weight": self.weight, + "priority": self.priority, + } + + new_record = { + "type": self.type, + "name": join_str('.', self.service, self.proto, self.record), + "ttl": self.ttl, + 'data': srv_data, + } + search_value = join_str('\t', self.weight, self.port, self.value) + search_record = join_str('.', self.service, self.proto, self.record) + + if self.type == 'DS': + for attr in [self.key_tag, self.algorithm, self.hash_type, self.value]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type") + ds_data = { + "key_tag": self.key_tag, + "algorithm": self.algorithm, + "digest_type": self.hash_type, + "digest": self.value, + } + new_record = { + "type": self.type, + "name": self.record, + 'data': ds_data, + "ttl": self.ttl, + } + search_value = join_str('\t', self.key_tag, self.algorithm, self.hash_type, self.value) + + if self.type == 'SSHFP': + for attr in [self.algorithm, self.hash_type, self.value]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type") + sshfp_data = { + "fingerprint": self.value.upper(), + "type": self.hash_type, + "algorithm": self.algorithm, + } + new_record = { + "type": self.type, + "name": self.record, + 'data': sshfp_data, + "ttl": self.ttl, + } + search_value = join_str(' ', self.algorithm, self.hash_type, self.value) + + if self.type == 'TLSA': + for attr in [self.port, self.proto, self.cert_usage, self.selector, self.hash_type, self.value]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type") + search_record = join_str('.', self.port, self.proto, self.record) + tlsa_data = { + "usage": self.cert_usage, + "selector": self.selector, + "matching_type": self.hash_type, + "certificate": self.value, + } + new_record = { + "type": self.type, + "name": search_record, + 'data': tlsa_data, + "ttl": self.ttl, + } + search_value = join_str('\t', self.cert_usage, self.selector, self.hash_type, self.value) + + if self.type == 'CAA': + for attr in [self.flag, self.tag, self.value]: + if attr == '': + self.module.fail_json(msg="You must provide flag, tag and a value to create this record type") + caa_data = { + "flags": self.flag, + "tag": self.tag, + "value": self.value, + } + new_record = { + "type": self.type, + "name": self.record, + 'data': caa_data, + "ttl": self.ttl, + } + search_value = None + + new_record['comment'] = self.comment or None + new_record['tags'] = self.tags or [] + + zone_id = self._get_zone_id(self.zone) + records = self.get_dns_records(self.zone, self.type, search_record, search_value) + # in theory this should be impossible as cloudflare does not allow + # the creation of duplicate records but lets cover it anyways + if len(records) > 1: + # As Cloudflare API cannot filter record containing quotes + # CAA records must be compared locally + if self.type == 'CAA': + for rr in records: + if rr['data']['flags'] == caa_data['flags'] and rr['data']['tag'] == caa_data['tag'] and rr['data']['value'] == caa_data['value']: + return rr, self.changed + else: + self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!") + # record already exists, check if it must be updated + if len(records) == 1: + cur_record = records[0] + do_update = False + if (self.ttl is not None) and (cur_record['ttl'] != self.ttl): + do_update = True + if (self.priority is not None) and ('priority' in cur_record) and (cur_record['priority'] != self.priority): + do_update = True + if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != self.proxied): + do_update = True + if ('data' in new_record) and ('data' in cur_record): + if cur_record['data'] != new_record['data']: + do_update = True + if (self.type == 'CNAME') and (cur_record['content'] != new_record['content']): + do_update = True + if cur_record['comment'] != new_record['comment']: + do_update = True + if sorted(cur_record['tags']) != sorted(new_record['tags']): + do_update = True + if do_update: + if self.module.check_mode: + result = new_record + else: + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record) + self.changed = True + return result, self.changed + else: + return records, self.changed + if self.module.check_mode: + result = new_record + else: + result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record) + self.changed = True + return result, self.changed + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_token=dict(type="str", no_log=True, fallback=(env_fallback, ["CLOUDFLARE_TOKEN"])), + account_api_key=dict(type='str', no_log=True, aliases=['account_api_token']), + account_email=dict(type='str'), + algorithm=dict(type='int'), + cert_usage=dict(type='int', choices=[0, 1, 2, 3]), + comment=dict(type='str'), + hash_type=dict(type='int', choices=[1, 2]), + key_tag=dict(type='int', no_log=False), + port=dict(type='int'), + flag=dict(type='int', choices=[0, 1]), + tag=dict(type='str', choices=['issue', 'issuewild', 'iodef']), + tags=dict(type='list', elements='str'), + priority=dict(type='int', default=1), + proto=dict(type='str'), + proxied=dict(type='bool', default=False), + record=dict(type='str', default='@', aliases=['name']), + selector=dict(type='int', choices=[0, 1]), + service=dict(type='str'), + solo=dict(type='bool'), + state=dict(type='str', default='present', choices=['absent', 'present']), + timeout=dict(type='int', default=30), + ttl=dict(type='int', default=1), + type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SRV', 'SSHFP', 'TLSA', 'CAA', 'TXT', 'PTR']), + value=dict(type='str', aliases=['content']), + weight=dict(type='int', default=1), + zone=dict(type='str', required=True, aliases=['domain']), + ), + supports_check_mode=True, + required_if=[ + ('state', 'present', ['record', 'type', 'value']), + ('state', 'absent', ['record']), + ('type', 'SRV', ['proto', 'service']), + ('type', 'TLSA', ['proto', 'port']), + ('type', 'CAA', ['flag', 'tag', 'value']), + ], + required_together=[ + ('account_api_key', 'account_email'), + ], + required_one_of=[ + ['api_token', 'account_api_key'], + ], + ) + + if module.params['type'] == 'SRV': + if not ((module.params['weight'] is not None and module.params['port'] is not None + and not (module.params['value'] is None or module.params['value'] == '')) + or (module.params['weight'] is None and module.params['port'] is None + and (module.params['value'] is None or module.params['value'] == ''))): + module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.") + + if module.params['type'] == 'SSHFP': + if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None + and not (module.params['value'] is None or module.params['value'] == '')) + or (module.params['algorithm'] is None and module.params['hash_type'] is None + and (module.params['value'] is None or module.params['value'] == ''))): + module.fail_json(msg="For SSHFP records the params algorithm, hash_type and value all need to be defined, or not at all.") + + if module.params['type'] == 'TLSA': + if not ((module.params['cert_usage'] is not None and module.params['selector'] is not None and module.params['hash_type'] is not None + and not (module.params['value'] is None or module.params['value'] == '')) + or (module.params['cert_usage'] is None and module.params['selector'] is None and module.params['hash_type'] is None + and (module.params['value'] is None or module.params['value'] == ''))): + module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.") + + if module.params['type'] == 'CAA': + if not module.params['value'] == '': + module.fail_json(msg="For CAA records the params flag, tag and value all need to be defined.") + + if module.params['type'] == 'DS': + if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None + and not (module.params['value'] is None or module.params['value'] == '')) + or (module.params['key_tag'] is None and module.params['algorithm'] is None and module.params['hash_type'] is None + and (module.params['value'] is None or module.params['value'] == ''))): + module.fail_json(msg="For DS records the params key_tag, algorithm, hash_type and value all need to be defined, or not at all.") + + changed = False + cf_api = CloudflareAPI(module) + + # sanity checks + if cf_api.is_solo and cf_api.state == 'absent': + module.fail_json(msg="solo=true can only be used with state=present") + + # perform add, delete or update (only the TTL can be updated) of one or + # more records + if cf_api.state == 'present': + # delete all records matching record name + type + if cf_api.is_solo: + changed = cf_api.delete_dns_records(solo=cf_api.is_solo) + result, changed = cf_api.ensure_dns_record() + if isinstance(result, list): + module.exit_json(changed=changed, result={'record': result[0]}) + + module.exit_json(changed=changed, result={'record': result}) + else: + # force solo to False, just to be sure + changed = cf_api.delete_dns_records(solo=False) + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/clustering/consul/consul.py b/plugins/modules/clustering/consul/consul.py deleted file mode 100644 index f85e1cc729..0000000000 --- a/plugins/modules/clustering/consul/consul.py +++ /dev/null @@ -1,606 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2015, Steve Gargan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: consul -short_description: "Add, modify & delete services within a consul cluster." -description: - - Registers services and checks for an agent with a consul cluster. - A service is some process running on the agent node that should be advertised by - consul's discovery mechanism. It may optionally supply a check definition, - a periodic service test to notify the consul cluster of service's health. - - "Checks may also be registered per node e.g. disk usage, or cpu usage and - notify the health of the entire node to the cluster. - Service level checks do not require a check name or id as these are derived - by Consul from the Service name and id respectively by appending 'service:' - Node level checks require a I(check_name) and optionally a I(check_id)." - - Currently, there is no complete way to retrieve the script, interval or ttl - metadata for a registered check. Without this metadata it is not possible to - tell if the data supplied with ansible represents a change to a check. As a - result this does not attempt to determine changes and will always report a - changed occurred. An API method is planned to supply this metadata so at that - stage change management will be added. - - "See U(http://consul.io) for more details." -requirements: - - python-consul - - requests -author: "Steve Gargan (@sgargan)" -options: - state: - type: str - description: - - register or deregister the consul service, defaults to present - default: present - choices: ['present', 'absent'] - service_name: - type: str - description: - - Unique name for the service on a node, must be unique per node, - required if registering a service. May be omitted if registering - a node level check - service_id: - type: str - description: - - the ID for the service, must be unique per node. If I(state=absent), - defaults to the service name if supplied. - host: - type: str - description: - - host of the consul agent defaults to localhost - default: localhost - port: - type: int - description: - - the port on which the consul agent is running - default: 8500 - scheme: - type: str - description: - - the protocol scheme on which the consul agent is running - default: http - validate_certs: - description: - - whether to verify the TLS certificate of the consul agent - type: bool - default: 'yes' - notes: - type: str - description: - - Notes to attach to check when registering it. - service_port: - type: int - description: - - the port on which the service is listening. Can optionally be supplied for - registration of a service, i.e. if I(service_name) or I(service_id) is set - service_address: - type: str - description: - - the address to advertise that the service will be listening on. - This value will be passed as the I(address) parameter to Consul's - U(/v1/agent/service/register) API method, so refer to the Consul API - documentation for further details. - tags: - type: list - elements: str - description: - - tags that will be attached to the service registration. - script: - type: str - description: - - the script/command that will be run periodically to check the health - of the service. Scripts require I(interval) and vice versa. - interval: - type: str - description: - - the interval at which the service check will be run. This is a number - with a s or m suffix to signify the units of seconds or minutes e.g - C(15s) or C(1m). If no suffix is supplied, m will be used by default e.g. - C(1) will be C(1m). Required if the I(script) parameter is specified. - check_id: - type: str - description: - - an ID for the service check. If I(state=absent), defaults to - I(check_name). Ignored if part of a service definition. - check_name: - type: str - description: - - a name for the service check. Required if standalone, ignored if - part of service definition. - ttl: - type: str - description: - - checks can be registered with a ttl instead of a I(script) and I(interval) - this means that the service will check in with the agent before the - ttl expires. If it doesn't the check will be considered failed. - Required if registering a check and the script an interval are missing - Similar to the interval this is a number with a s or m suffix to - signify the units of seconds or minutes e.g C(15s) or C(1m). If no suffix - is supplied, C(m) will be used by default e.g. C(1) will be C(1m) - tcp: - type: str - description: - - Checks can be registered with a TCP port. This means that consul - will check if the connection attempt to that port is successful (that is, the port is currently accepting connections). - The format is C(host:port), for example C(localhost:80). - I(interval) must also be provided with this option. - version_added: '1.3.0' - http: - type: str - description: - - checks can be registered with an HTTP endpoint. This means that consul - will check that the http endpoint returns a successful HTTP status. - I(interval) must also be provided with this option. - timeout: - type: str - description: - - A custom HTTP check timeout. The consul default is 10 seconds. - Similar to the interval this is a number with a C(s) or C(m) suffix to - signify the units of seconds or minutes, e.g. C(15s) or C(1m). - token: - type: str - description: - - the token key identifying an ACL rule set. May be required to register services. -''' - -EXAMPLES = ''' -- name: Register nginx service with the local consul agent - community.general.consul: - service_name: nginx - service_port: 80 - -- name: Register nginx service with curl check - community.general.consul: - service_name: nginx - service_port: 80 - script: curl http://localhost - interval: 60s - -- name: register nginx with a tcp check - community.general.consul: - service_name: nginx - service_port: 80 - interval: 60s - tcp: localhost:80 - -- name: Register nginx with an http check - community.general.consul: - service_name: nginx - service_port: 80 - interval: 60s - http: http://localhost:80/status - -- name: Register external service nginx available at 10.1.5.23 - community.general.consul: - service_name: nginx - service_port: 80 - service_address: 10.1.5.23 - -- name: Register nginx with some service tags - community.general.consul: - service_name: nginx - service_port: 80 - tags: - - prod - - webservers - -- name: Remove nginx service - community.general.consul: - service_name: nginx - state: absent - -- name: Register celery worker service - community.general.consul: - service_name: celery-worker - tags: - - prod - - worker - -- name: Create a node level check to test disk usage - community.general.consul: - check_name: Disk usage - check_id: disk_usage - script: /opt/disk_usage.py - interval: 5m - -- name: Register an http check against a service that's already registered - community.general.consul: - check_name: nginx-check2 - check_id: nginx-check2 - service_id: nginx - interval: 60s - http: http://localhost:80/morestatus -''' - -try: - import consul - from requests.exceptions import ConnectionError - - class PatchedConsulAgentService(consul.Consul.Agent.Service): - def deregister(self, service_id, token=None): - params = {} - if token: - params['token'] = token - return self.agent.http.put(consul.base.CB.bool(), - '/v1/agent/service/deregister/%s' % service_id, - params=params) - - python_consul_installed = True -except ImportError: - python_consul_installed = False - -import re -from ansible.module_utils.basic import AnsibleModule - - -def register_with_consul(module): - state = module.params.get('state') - - if state == 'present': - add(module) - else: - remove(module) - - -def add(module): - ''' adds a service or a check depending on supplied configuration''' - check = parse_check(module) - service = parse_service(module) - - if not service and not check: - module.fail_json(msg='a name and port are required to register a service') - - if service: - if check: - service.add_check(check) - add_service(module, service) - elif check: - add_check(module, check) - - -def remove(module): - ''' removes a service or a check ''' - service_id = module.params.get('service_id') or module.params.get('service_name') - check_id = module.params.get('check_id') or module.params.get('check_name') - if not (service_id or check_id): - module.fail_json(msg='services and checks are removed by id or name. please supply a service id/name or a check id/name') - if service_id: - remove_service(module, service_id) - else: - remove_check(module, check_id) - - -def add_check(module, check): - ''' registers a check with the given agent. currently there is no way - retrieve the full metadata of an existing check through the consul api. - Without this we can't compare to the supplied check and so we must assume - a change. ''' - if not check.name and not check.service_id: - module.fail_json(msg='a check name is required for a node level check, one not attached to a service') - - consul_api = get_consul_api(module) - check.register(consul_api) - - module.exit_json(changed=True, - check_id=check.check_id, - check_name=check.name, - script=check.script, - interval=check.interval, - ttl=check.ttl, - tcp=check.tcp, - http=check.http, - timeout=check.timeout, - service_id=check.service_id) - - -def remove_check(module, check_id): - ''' removes a check using its id ''' - consul_api = get_consul_api(module) - - if check_id in consul_api.agent.checks(): - consul_api.agent.check.deregister(check_id) - module.exit_json(changed=True, id=check_id) - - module.exit_json(changed=False, id=check_id) - - -def add_service(module, service): - ''' registers a service with the current agent ''' - result = service - changed = False - - consul_api = get_consul_api(module) - existing = get_service_by_id_or_name(consul_api, service.id) - - # there is no way to retrieve the details of checks so if a check is present - # in the service it must be re-registered - if service.has_checks() or not existing or not existing == service: - - service.register(consul_api) - # check that it registered correctly - registered = get_service_by_id_or_name(consul_api, service.id) - if registered: - result = registered - changed = True - - module.exit_json(changed=changed, - service_id=result.id, - service_name=result.name, - service_port=result.port, - checks=[check.to_dict() for check in service.checks], - tags=result.tags) - - -def remove_service(module, service_id): - ''' deregister a service from the given agent using its service id ''' - consul_api = get_consul_api(module) - service = get_service_by_id_or_name(consul_api, service_id) - if service: - consul_api.agent.service.deregister(service_id, token=module.params.get('token')) - module.exit_json(changed=True, id=service_id) - - module.exit_json(changed=False, id=service_id) - - -def get_consul_api(module): - consulClient = consul.Consul(host=module.params.get('host'), - port=module.params.get('port'), - scheme=module.params.get('scheme'), - verify=module.params.get('validate_certs'), - token=module.params.get('token')) - consulClient.agent.service = PatchedConsulAgentService(consulClient) - return consulClient - - -def get_service_by_id_or_name(consul_api, service_id_or_name): - ''' iterate the registered services and find one with the given id ''' - for name, service in consul_api.agent.services().items(): - if service['ID'] == service_id_or_name or service['Service'] == service_id_or_name: - return ConsulService(loaded=service) - - -def parse_check(module): - if len([p for p in (module.params.get('script'), module.params.get('ttl'), module.params.get('tcp'), module.params.get('http')) if p]) > 1: - module.fail_json( - msg='checks are either script, tcp, http or ttl driven, supplying more than one does not make sense') - - if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl') or module.params.get('tcp') or module.params.get('http'): - - return ConsulCheck( - module.params.get('check_id'), - module.params.get('check_name'), - module.params.get('check_node'), - module.params.get('check_host'), - module.params.get('script'), - module.params.get('interval'), - module.params.get('ttl'), - module.params.get('notes'), - module.params.get('tcp'), - module.params.get('http'), - module.params.get('timeout'), - module.params.get('service_id'), - ) - - -def parse_service(module): - if module.params.get('service_name'): - return ConsulService( - module.params.get('service_id'), - module.params.get('service_name'), - module.params.get('service_address'), - module.params.get('service_port'), - module.params.get('tags'), - ) - elif not module.params.get('service_name'): - module.fail_json(msg="service_name is required to configure a service.") - - -class ConsulService(object): - - def __init__(self, service_id=None, name=None, address=None, port=-1, - tags=None, loaded=None): - self.id = self.name = name - if service_id: - self.id = service_id - self.address = address - self.port = port - self.tags = tags - self.checks = [] - if loaded: - self.id = loaded['ID'] - self.name = loaded['Service'] - self.port = loaded['Port'] - self.tags = loaded['Tags'] - - def register(self, consul_api): - optional = {} - - if self.port: - optional['port'] = self.port - - if len(self.checks) > 0: - optional['check'] = self.checks[0].check - - consul_api.agent.service.register( - self.name, - service_id=self.id, - address=self.address, - tags=self.tags, - **optional) - - def add_check(self, check): - self.checks.append(check) - - def checks(self): - return self.checks - - def has_checks(self): - return len(self.checks) > 0 - - def __eq__(self, other): - return (isinstance(other, self.__class__) and - self.id == other.id and - self.name == other.name and - self.port == other.port and - self.tags == other.tags) - - def __ne__(self, other): - return not self.__eq__(other) - - def to_dict(self): - data = {'id': self.id, "name": self.name} - if self.port: - data['port'] = self.port - if self.tags and len(self.tags) > 0: - data['tags'] = self.tags - if len(self.checks) > 0: - data['check'] = self.checks[0].to_dict() - return data - - -class ConsulCheck(object): - - def __init__(self, check_id, name, node=None, host='localhost', - script=None, interval=None, ttl=None, notes=None, tcp=None, http=None, timeout=None, service_id=None): - self.check_id = self.name = name - if check_id: - self.check_id = check_id - self.service_id = service_id - self.notes = notes - self.node = node - self.host = host - - self.interval = self.validate_duration('interval', interval) - self.ttl = self.validate_duration('ttl', ttl) - self.script = script - self.tcp = tcp - self.http = http - self.timeout = self.validate_duration('timeout', timeout) - - self.check = None - - if script: - self.check = consul.Check.script(script, self.interval) - - if ttl: - self.check = consul.Check.ttl(self.ttl) - - if http: - if interval is None: - raise Exception('http check must specify interval') - - self.check = consul.Check.http(http, self.interval, self.timeout) - - if tcp: - if interval is None: - raise Exception('tcp check must specify interval') - - regex = r"(?P.*)(?::)(?P(?:[0-9]+))$" - match = re.match(regex, tcp) - - if match is None: - raise Exception('tcp check must be in host:port format') - - self.check = consul.Check.tcp(match.group('host').strip('[]'), int(match.group('port')), self.interval) - - def validate_duration(self, name, duration): - if duration: - duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] - if not any((duration.endswith(suffix) for suffix in duration_units)): - duration = "{0}s".format(duration) - return duration - - def register(self, consul_api): - consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id, - notes=self.notes, - check=self.check) - - def __eq__(self, other): - return (isinstance(other, self.__class__) and - self.check_id == other.check_id and - self.service_id == other.service_id and - self.name == other.name and - self.script == other.script and - self.interval == other.interval) - - def __ne__(self, other): - return not self.__eq__(other) - - def to_dict(self): - data = {} - self._add(data, 'id', attr='check_id') - self._add(data, 'name', attr='check_name') - self._add(data, 'script') - self._add(data, 'node') - self._add(data, 'notes') - self._add(data, 'host') - self._add(data, 'interval') - self._add(data, 'ttl') - self._add(data, 'tcp') - self._add(data, 'http') - self._add(data, 'timeout') - self._add(data, 'service_id') - return data - - def _add(self, data, key, attr=None): - try: - if attr is None: - attr = key - data[key] = getattr(self, attr) - except Exception: - pass - - -def test_dependencies(module): - if not python_consul_installed: - module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation") - - -def main(): - module = AnsibleModule( - argument_spec=dict( - host=dict(default='localhost'), - port=dict(default=8500, type='int'), - scheme=dict(default='http'), - validate_certs=dict(default=True, type='bool'), - check_id=dict(), - check_name=dict(), - check_node=dict(), - check_host=dict(), - notes=dict(), - script=dict(), - service_id=dict(), - service_name=dict(), - service_address=dict(type='str'), - service_port=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - interval=dict(type='str'), - ttl=dict(type='str'), - tcp=dict(type='str'), - http=dict(type='str'), - timeout=dict(type='str'), - tags=dict(type='list', elements='str'), - token=dict(no_log=True) - ), - supports_check_mode=False, - ) - - test_dependencies(module) - - try: - register_with_consul(module) - except ConnectionError as e: - module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( - module.params.get('host'), module.params.get('port'), str(e))) - except Exception as e: - module.fail_json(msg=str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clustering/consul/consul_acl.py b/plugins/modules/clustering/consul/consul_acl.py deleted file mode 100644 index 1e01e58af5..0000000000 --- a/plugins/modules/clustering/consul/consul_acl.py +++ /dev/null @@ -1,683 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2015, Steve Gargan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: consul_acl -short_description: Manipulate Consul ACL keys and rules -description: - - Allows the addition, modification and deletion of ACL keys and associated - rules in a consul cluster via the agent. For more details on using and - configuring ACLs, see https://www.consul.io/docs/guides/acl.html. -author: - - Steve Gargan (@sgargan) - - Colin Nolan (@colin-nolan) -options: - mgmt_token: - description: - - a management token is required to manipulate the acl lists - required: true - type: str - state: - description: - - whether the ACL pair should be present or absent - required: false - choices: ['present', 'absent'] - default: present - type: str - token_type: - description: - - the type of token that should be created - choices: ['client', 'management'] - default: client - type: str - name: - description: - - the name that should be associated with the acl key, this is opaque - to Consul - required: false - type: str - token: - description: - - the token key identifying an ACL rule set. If generated by consul - this will be a UUID - required: false - type: str - rules: - type: list - elements: dict - description: - - rules that should be associated with a given token - required: false - host: - description: - - host of the consul agent defaults to localhost - required: false - default: localhost - type: str - port: - type: int - description: - - the port on which the consul agent is running - required: false - default: 8500 - scheme: - description: - - the protocol scheme on which the consul agent is running - required: false - default: http - type: str - validate_certs: - type: bool - description: - - whether to verify the tls certificate of the consul agent - required: false - default: True -requirements: - - python-consul - - pyhcl - - requests -''' - -EXAMPLES = """ -- name: Create an ACL with rules - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - rules: - - key: "foo" - policy: read - - key: "private/foo" - policy: deny - -- name: Create an ACL with a specific token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - token: my-token - rules: - - key: "foo" - policy: read - -- name: Update the rules associated to an ACL token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - token: some_client_token - rules: - - event: "bbq" - policy: write - - key: "foo" - policy: read - - key: "private" - policy: deny - - keyring: write - - node: "hgs4" - policy: write - - operator: read - - query: "" - policy: write - - service: "consul" - policy: write - - session: "standup" - policy: write - -- name: Remove a token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e - state: absent -""" - -RETURN = """ -token: - description: the token associated to the ACL (the ACL's ID) - returned: success - type: str - sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da -rules: - description: the HCL JSON representation of the rules associated to the ACL, in the format described in the - Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification). - returned: I(status) == "present" - type: str - sample: { - "key": { - "foo": { - "policy": "write" - }, - "bar": { - "policy": "deny" - } - } - } -operation: - description: the operation performed on the ACL - returned: changed - type: str - sample: update -""" - - -try: - import consul - python_consul_installed = True -except ImportError: - python_consul_installed = False - -try: - import hcl - pyhcl_installed = True -except ImportError: - pyhcl_installed = False - -try: - from requests.exceptions import ConnectionError - has_requests = True -except ImportError: - has_requests = False - -from collections import defaultdict -from ansible.module_utils.basic import to_text, AnsibleModule - - -RULE_SCOPES = [ - "agent", - "agent_prefix", - "event", - "event_prefix", - "key", - "key_prefix", - "keyring", - "node", - "node_prefix", - "operator", - "query", - "query_prefix", - "service", - "service_prefix", - "session", - "session_prefix", -] - -MANAGEMENT_PARAMETER_NAME = "mgmt_token" -HOST_PARAMETER_NAME = "host" -SCHEME_PARAMETER_NAME = "scheme" -VALIDATE_CERTS_PARAMETER_NAME = "validate_certs" -NAME_PARAMETER_NAME = "name" -PORT_PARAMETER_NAME = "port" -RULES_PARAMETER_NAME = "rules" -STATE_PARAMETER_NAME = "state" -TOKEN_PARAMETER_NAME = "token" -TOKEN_TYPE_PARAMETER_NAME = "token_type" - -PRESENT_STATE_VALUE = "present" -ABSENT_STATE_VALUE = "absent" - -CLIENT_TOKEN_TYPE_VALUE = "client" -MANAGEMENT_TOKEN_TYPE_VALUE = "management" - -REMOVE_OPERATION = "remove" -UPDATE_OPERATION = "update" -CREATE_OPERATION = "create" - -_POLICY_JSON_PROPERTY = "policy" -_RULES_JSON_PROPERTY = "Rules" -_TOKEN_JSON_PROPERTY = "ID" -_TOKEN_TYPE_JSON_PROPERTY = "Type" -_NAME_JSON_PROPERTY = "Name" -_POLICY_YML_PROPERTY = "policy" -_POLICY_HCL_PROPERTY = "policy" - -_ARGUMENT_SPEC = { - MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True), - HOST_PARAMETER_NAME: dict(default='localhost'), - SCHEME_PARAMETER_NAME: dict(default='http'), - VALIDATE_CERTS_PARAMETER_NAME: dict(type='bool', default=True), - NAME_PARAMETER_NAME: dict(), - PORT_PARAMETER_NAME: dict(default=8500, type='int'), - RULES_PARAMETER_NAME: dict(type='list', elements='dict'), - STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]), - TOKEN_PARAMETER_NAME: dict(no_log=False), - TOKEN_TYPE_PARAMETER_NAME: dict(choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE], - default=CLIENT_TOKEN_TYPE_VALUE) -} - - -def set_acl(consul_client, configuration): - """ - Sets an ACL based on the given configuration. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of setting the ACL - """ - acls_as_json = decode_acls_as_json(consul_client.acl.list()) - existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None) - existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json) - if None in existing_acls_mapped_by_token: - raise AssertionError("expecting ACL list to be associated to a token: %s" % - existing_acls_mapped_by_token[None]) - - if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name: - # No token but name given so can get token from name - configuration.token = existing_acls_mapped_by_name[configuration.name].token - - if configuration.token and configuration.token in existing_acls_mapped_by_token: - return update_acl(consul_client, configuration) - else: - if configuration.token in existing_acls_mapped_by_token: - raise AssertionError() - if configuration.name in existing_acls_mapped_by_name: - raise AssertionError() - return create_acl(consul_client, configuration) - - -def update_acl(consul_client, configuration): - """ - Updates an ACL. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of the update - """ - existing_acl = load_acl_with_token(consul_client, configuration.token) - changed = existing_acl.rules != configuration.rules - - if changed: - name = configuration.name if configuration.name is not None else existing_acl.name - rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) - updated_token = consul_client.acl.update( - configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl) - if updated_token != configuration.token: - raise AssertionError() - - return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION) - - -def create_acl(consul_client, configuration): - """ - Creates an ACL. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of the creation - """ - rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None - token = consul_client.acl.create( - name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token) - rules = configuration.rules - return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION) - - -def remove_acl(consul, configuration): - """ - Removes an ACL. - :param consul: the consul client - :param configuration: the run configuration - :return: the output of the removal - """ - token = configuration.token - changed = consul.acl.info(token) is not None - if changed: - consul.acl.destroy(token) - return Output(changed=changed, token=token, operation=REMOVE_OPERATION) - - -def load_acl_with_token(consul, token): - """ - Loads the ACL with the given token (token == rule ID). - :param consul: the consul client - :param token: the ACL "token"/ID (not name) - :return: the ACL associated to the given token - :exception ConsulACLTokenNotFoundException: raised if the given token does not exist - """ - acl_as_json = consul.acl.info(token) - if acl_as_json is None: - raise ConsulACLNotFoundException(token) - return decode_acl_as_json(acl_as_json) - - -def encode_rules_as_hcl_string(rules): - """ - Converts the given rules into the equivalent HCL (string) representation. - :param rules: the rules - :return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal - note for justification) - """ - if len(rules) == 0: - # Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty - # string if there is no rules... - return None - rules_as_hcl = "" - for rule in rules: - rules_as_hcl += encode_rule_as_hcl_string(rule) - return rules_as_hcl - - -def encode_rule_as_hcl_string(rule): - """ - Converts the given rule into the equivalent HCL (string) representation. - :param rule: the rule - :return: the equivalent HCL (string) representation of the rule - """ - if rule.pattern is not None: - return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy) - else: - return '%s = "%s"\n' % (rule.scope, rule.policy) - - -def decode_rules_as_hcl_string(rules_as_hcl): - """ - Converts the given HCL (string) representation of rules into a list of rule domain models. - :param rules_as_hcl: the HCL (string) representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules_as_hcl = to_text(rules_as_hcl) - rules_as_json = hcl.loads(rules_as_hcl) - return decode_rules_as_json(rules_as_json) - - -def decode_rules_as_json(rules_as_json): - """ - Converts the given JSON representation of rules into a list of rule domain models. - :param rules_as_json: the JSON representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules = RuleCollection() - for scope in rules_as_json: - if not isinstance(rules_as_json[scope], dict): - rules.add(Rule(scope, rules_as_json[scope])) - else: - for pattern, policy in rules_as_json[scope].items(): - rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern)) - return rules - - -def encode_rules_as_json(rules): - """ - Converts the given rules into the equivalent JSON representation according to the documentation: - https://www.consul.io/docs/guides/acl.html#rule-specification. - :param rules: the rules - :return: JSON representation of the given rules - """ - rules_as_json = defaultdict(dict) - for rule in rules: - if rule.pattern is not None: - if rule.pattern in rules_as_json[rule.scope]: - raise AssertionError() - rules_as_json[rule.scope][rule.pattern] = { - _POLICY_JSON_PROPERTY: rule.policy - } - else: - if rule.scope in rules_as_json: - raise AssertionError() - rules_as_json[rule.scope] = rule.policy - return rules_as_json - - -def decode_rules_as_yml(rules_as_yml): - """ - Converts the given YAML representation of rules into a list of rule domain models. - :param rules_as_yml: the YAML representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules = RuleCollection() - if rules_as_yml: - for rule_as_yml in rules_as_yml: - rule_added = False - for scope in RULE_SCOPES: - if scope in rule_as_yml: - if rule_as_yml[scope] is None: - raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope) - policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \ - else rule_as_yml[scope] - pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None - rules.add(Rule(scope, policy, pattern)) - rule_added = True - break - if not rule_added: - raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES))) - return rules - - -def decode_acl_as_json(acl_as_json): - """ - Converts the given JSON representation of an ACL into the equivalent domain model. - :param acl_as_json: the JSON representation of an ACL - :return: the equivalent domain model to the given ACL - """ - rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY] - rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \ - else RuleCollection() - return ACL( - rules=rules, - token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY], - token=acl_as_json[_TOKEN_JSON_PROPERTY], - name=acl_as_json[_NAME_JSON_PROPERTY] - ) - - -def decode_acls_as_json(acls_as_json): - """ - Converts the given JSON representation of ACLs into a list of ACL domain models. - :param acls_as_json: the JSON representation of a collection of ACLs - :return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same) - """ - return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json] - - -class ConsulACLNotFoundException(Exception): - """ - Exception raised if an ACL with is not found. - """ - - -class Configuration: - """ - Configuration for this module. - """ - - def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None, - rules=None, state=None, token=None, token_type=None): - self.management_token = management_token # type: str - self.host = host # type: str - self.scheme = scheme # type: str - self.validate_certs = validate_certs # type: bool - self.name = name # type: str - self.port = port # type: int - self.rules = rules # type: RuleCollection - self.state = state # type: str - self.token = token # type: str - self.token_type = token_type # type: str - - -class Output: - """ - Output of an action of this module. - """ - - def __init__(self, changed=None, token=None, rules=None, operation=None): - self.changed = changed # type: bool - self.token = token # type: str - self.rules = rules # type: RuleCollection - self.operation = operation # type: str - - -class ACL: - """ - Consul ACL. See: https://www.consul.io/docs/guides/acl.html. - """ - - def __init__(self, rules, token_type, token, name): - self.rules = rules - self.token_type = token_type - self.token = token - self.name = name - - def __eq__(self, other): - return other \ - and isinstance(other, self.__class__) \ - and self.rules == other.rules \ - and self.token_type == other.token_type \ - and self.token == other.token \ - and self.name == other.name - - def __hash__(self): - return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name) - - -class Rule: - """ - ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope. - """ - - def __init__(self, scope, policy, pattern=None): - self.scope = scope - self.policy = policy - self.pattern = pattern - - def __eq__(self, other): - return isinstance(other, self.__class__) \ - and self.scope == other.scope \ - and self.policy == other.policy \ - and self.pattern == other.pattern - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern) - - def __str__(self): - return encode_rule_as_hcl_string(self) - - -class RuleCollection: - """ - Collection of ACL rules, which are part of a Consul ACL. - """ - - def __init__(self): - self._rules = {} - for scope in RULE_SCOPES: - self._rules[scope] = {} - - def __iter__(self): - all_rules = [] - for scope, pattern_keyed_rules in self._rules.items(): - for pattern, rule in pattern_keyed_rules.items(): - all_rules.append(rule) - return iter(all_rules) - - def __len__(self): - count = 0 - for scope in RULE_SCOPES: - count += len(self._rules[scope]) - return count - - def __eq__(self, other): - return isinstance(other, self.__class__) \ - and set(self) == set(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __str__(self): - return encode_rules_as_hcl_string(self) - - def add(self, rule): - """ - Adds the given rule to this collection. - :param rule: model of a rule - :raises ValueError: raised if there already exists a rule for a given scope and pattern - """ - if rule.pattern in self._rules[rule.scope]: - patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else "" - raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info)) - self._rules[rule.scope][rule.pattern] = rule - - -def get_consul_client(configuration): - """ - Gets a Consul client for the given configuration. - - Does not check if the Consul client can connect. - :param configuration: the run configuration - :return: Consul client - """ - token = configuration.management_token - if token is None: - token = configuration.token - if token is None: - raise AssertionError("Expecting the management token to always be set") - return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme, - verify=configuration.validate_certs, token=token) - - -def check_dependencies(): - """ - Checks that the required dependencies have been imported. - :exception ImportError: if it is detected that any of the required dependencies have not been imported - """ - if not python_consul_installed: - raise ImportError("python-consul required for this module. " - "See: https://python-consul.readthedocs.io/en/latest/#installation") - - if not pyhcl_installed: - raise ImportError("pyhcl required for this module. " - "See: https://pypi.org/project/pyhcl/") - - if not has_requests: - raise ImportError("requests required for this module. See https://pypi.org/project/requests/") - - -def main(): - """ - Main method. - """ - module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False) - - try: - check_dependencies() - except ImportError as e: - module.fail_json(msg=str(e)) - - configuration = Configuration( - management_token=module.params.get(MANAGEMENT_PARAMETER_NAME), - host=module.params.get(HOST_PARAMETER_NAME), - scheme=module.params.get(SCHEME_PARAMETER_NAME), - validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME), - name=module.params.get(NAME_PARAMETER_NAME), - port=module.params.get(PORT_PARAMETER_NAME), - rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)), - state=module.params.get(STATE_PARAMETER_NAME), - token=module.params.get(TOKEN_PARAMETER_NAME), - token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME) - ) - consul_client = get_consul_client(configuration) - - try: - if configuration.state == PRESENT_STATE_VALUE: - output = set_acl(consul_client, configuration) - else: - output = remove_acl(consul_client, configuration) - except ConnectionError as e: - module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( - configuration.host, configuration.port, str(e))) - raise - - return_values = dict(changed=output.changed, token=output.token, operation=output.operation) - if output.rules is not None: - return_values["rules"] = encode_rules_as_json(output.rules) - module.exit_json(**return_values) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/clustering/consul/consul_kv.py b/plugins/modules/clustering/consul/consul_kv.py deleted file mode 100644 index f7b33b856e..0000000000 --- a/plugins/modules/clustering/consul/consul_kv.py +++ /dev/null @@ -1,328 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2015, Steve Gargan -# (c) 2018 Genome Research Ltd. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: consul_kv -short_description: Manipulate entries in the key/value store of a consul cluster -description: - - Allows the retrieval, addition, modification and deletion of key/value entries in a - consul cluster via the agent. The entire contents of the record, including - the indices, flags and session are returned as C(value). - - If the C(key) represents a prefix then note that when a value is removed, the existing - value if any is returned as part of the results. - - See http://www.consul.io/docs/agent/http.html#kv for more details. -requirements: - - python-consul - - requests -author: - - Steve Gargan (@sgargan) - - Colin Nolan (@colin-nolan) -options: - state: - description: - - The action to take with the supplied key and value. If the state is 'present' and `value` is set, the key - contents will be set to the value supplied and `changed` will be set to `true` only if the value was - different to the current contents. If the state is 'present' and `value` is not set, the existing value - associated to the key will be returned. The state 'absent' will remove the key/value pair, - again 'changed' will be set to true only if the key actually existed - prior to the removal. An attempt can be made to obtain or free the - lock associated with a key/value pair with the states 'acquire' or - 'release' respectively. a valid session must be supplied to make the - attempt changed will be true if the attempt is successful, false - otherwise. - type: str - choices: [ absent, acquire, present, release ] - default: present - key: - description: - - The key at which the value should be stored. - type: str - required: yes - value: - description: - - The value should be associated with the given key, required if C(state) - is C(present). - type: str - recurse: - description: - - If the key represents a prefix, each entry with the prefix can be - retrieved by setting this to C(yes). - type: bool - retrieve: - description: - - If the I(state) is C(present) and I(value) is set, perform a - read after setting the value and return this value. - default: True - type: bool - session: - description: - - The session that should be used to acquire or release a lock - associated with a key/value pair. - type: str - token: - description: - - The token key identifying an ACL rule set that controls access to - the key value pair - type: str - cas: - description: - - Used when acquiring a lock with a session. If the C(cas) is C(0), then - Consul will only put the key if it does not already exist. If the - C(cas) value is non-zero, then the key is only set if the index matches - the ModifyIndex of that key. - type: str - flags: - description: - - Opaque positive integer value that can be passed when setting a value. - type: str - host: - description: - - Host of the consul agent. - type: str - default: localhost - port: - description: - - The port on which the consul agent is running. - type: int - default: 8500 - scheme: - description: - - The protocol scheme on which the consul agent is running. - type: str - default: http - validate_certs: - description: - - Whether to verify the tls certificate of the consul agent. - type: bool - default: 'yes' -''' - - -EXAMPLES = ''' -# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None` -# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None` -- name: Retrieve a value from the key/value store - community.general.consul_kv: - key: somekey - register: retrieved_key - -- name: Add or update the value associated with a key in the key/value store - community.general.consul_kv: - key: somekey - value: somevalue - -- name: Remove a key from the store - community.general.consul_kv: - key: somekey - state: absent - -- name: Add a node to an arbitrary group via consul inventory (see consul.ini) - community.general.consul_kv: - key: ansible/groups/dc1/somenode - value: top_secret - -- name: Register a key/value pair with an associated session - community.general.consul_kv: - key: stg/node/server_birthday - value: 20160509 - session: "{{ sessionid }}" - state: acquire -''' - -from ansible.module_utils.common.text.converters import to_text - -try: - import consul - from requests.exceptions import ConnectionError - python_consul_installed = True -except ImportError: - python_consul_installed = False - -from ansible.module_utils.basic import AnsibleModule - -# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a -# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call, -# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key) -NOT_SET = None - - -def _has_value_changed(consul_client, key, target_value): - """ - Uses the given Consul client to determine if the value associated to the given key is different to the given target - value. - :param consul_client: Consul connected client - :param key: key in Consul - :param target_value: value to be associated to the key - :return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the - value has changed (i.e. the stored value is not the target value) - """ - index, existing = consul_client.kv.get(key) - if not existing: - return index, True - try: - changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value - return index, changed - except UnicodeError: - # Existing value was not decodable but all values we set are valid utf-8 - return index, True - - -def execute(module): - state = module.params.get('state') - - if state == 'acquire' or state == 'release': - lock(module, state) - elif state == 'present': - if module.params.get('value') is NOT_SET: - get_value(module) - else: - set_value(module) - elif state == 'absent': - remove_value(module) - else: - module.exit_json(msg="Unsupported state: %s" % (state, )) - - -def lock(module, state): - - consul_api = get_consul_api(module) - - session = module.params.get('session') - key = module.params.get('key') - value = module.params.get('value') - - if not session: - module.fail( - msg='%s of lock for %s requested but no session supplied' % - (state, key)) - - index, changed = _has_value_changed(consul_api, key, value) - - if changed and not module.check_mode: - if state == 'acquire': - changed = consul_api.kv.put(key, value, - cas=module.params.get('cas'), - acquire=session, - flags=module.params.get('flags')) - else: - changed = consul_api.kv.put(key, value, - cas=module.params.get('cas'), - release=session, - flags=module.params.get('flags')) - - module.exit_json(changed=changed, - index=index, - key=key) - - -def get_value(module): - consul_api = get_consul_api(module) - key = module.params.get('key') - - index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse')) - - module.exit_json(changed=False, index=index, data=existing_value) - - -def set_value(module): - consul_api = get_consul_api(module) - - key = module.params.get('key') - value = module.params.get('value') - - if value is NOT_SET: - raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key) - - index, changed = _has_value_changed(consul_api, key, value) - - if changed and not module.check_mode: - changed = consul_api.kv.put(key, value, - cas=module.params.get('cas'), - flags=module.params.get('flags')) - - stored = None - if module.params.get('retrieve'): - index, stored = consul_api.kv.get(key) - - module.exit_json(changed=changed, - index=index, - key=key, - data=stored) - - -def remove_value(module): - ''' remove the value associated with the given key. if the recurse parameter - is set then any key prefixed with the given key will be removed. ''' - consul_api = get_consul_api(module) - - key = module.params.get('key') - - index, existing = consul_api.kv.get( - key, recurse=module.params.get('recurse')) - - changed = existing is not None - if changed and not module.check_mode: - consul_api.kv.delete(key, module.params.get('recurse')) - - module.exit_json(changed=changed, - index=index, - key=key, - data=existing) - - -def get_consul_api(module, token=None): - return consul.Consul(host=module.params.get('host'), - port=module.params.get('port'), - scheme=module.params.get('scheme'), - verify=module.params.get('validate_certs'), - token=module.params.get('token')) - - -def test_dependencies(module): - if not python_consul_installed: - module.fail_json(msg="python-consul required for this module. " - "see https://python-consul.readthedocs.io/en/latest/#installation") - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - cas=dict(type='str'), - flags=dict(type='str'), - key=dict(type='str', required=True, no_log=False), - host=dict(type='str', default='localhost'), - scheme=dict(type='str', default='http'), - validate_certs=dict(type='bool', default=True), - port=dict(type='int', default=8500), - recurse=dict(type='bool'), - retrieve=dict(type='bool', default=True), - state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']), - token=dict(type='str', no_log=True), - value=dict(type='str', default=NOT_SET), - session=dict(type='str'), - ), - supports_check_mode=True - ) - - test_dependencies(module) - - try: - execute(module) - except ConnectionError as e: - module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( - module.params.get('host'), module.params.get('port'), e)) - except Exception as e: - module.fail_json(msg=str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clustering/consul/consul_session.py b/plugins/modules/clustering/consul/consul_session.py deleted file mode 100644 index 7ace1f89a8..0000000000 --- a/plugins/modules/clustering/consul/consul_session.py +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Steve Gargan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: consul_session -short_description: Manipulate consul sessions -description: - - Allows the addition, modification and deletion of sessions in a consul - cluster. These sessions can then be used in conjunction with key value pairs - to implement distributed locks. In depth documentation for working with - sessions can be found at http://www.consul.io/docs/internals/sessions.html -requirements: - - python-consul - - requests -author: -- Steve Gargan (@sgargan) -options: - id: - description: - - ID of the session, required when I(state) is either C(info) or - C(remove). - type: str - state: - description: - - Whether the session should be present i.e. created if it doesn't - exist, or absent, removed if present. If created, the I(id) for the - session is returned in the output. If C(absent), I(id) is - required to remove the session. Info for a single session, all the - sessions for a node or all available sessions can be retrieved by - specifying C(info), C(node) or C(list) for the I(state); for C(node) - or C(info), the node I(name) or session I(id) is required as parameter. - choices: [ absent, info, list, node, present ] - type: str - default: present - name: - description: - - The name that should be associated with the session. Required when - I(state=node) is used. - type: str - delay: - description: - - The optional lock delay that can be attached to the session when it - is created. Locks for invalidated sessions ar blocked from being - acquired until this delay has expired. Durations are in seconds. - type: int - default: 15 - node: - description: - - The name of the node that with which the session will be associated. - by default this is the name of the agent. - type: str - datacenter: - description: - - The name of the datacenter in which the session exists or should be - created. - type: str - checks: - description: - - Checks that will be used to verify the session health. If - all the checks fail, the session will be invalidated and any locks - associated with the session will be release and can be acquired once - the associated lock delay has expired. - type: list - elements: str - host: - description: - - The host of the consul agent defaults to localhost. - type: str - default: localhost - port: - description: - - The port on which the consul agent is running. - type: int - default: 8500 - scheme: - description: - - The protocol scheme on which the consul agent is running. - type: str - default: http - validate_certs: - description: - - Whether to verify the TLS certificate of the consul agent. - type: bool - default: True - behavior: - description: - - The optional behavior that can be attached to the session when it - is created. This controls the behavior when a session is invalidated. - choices: [ delete, release ] - type: str - default: release -''' - -EXAMPLES = ''' -- name: Register basic session with consul - community.general.consul_session: - name: session1 - -- name: Register a session with an existing check - community.general.consul_session: - name: session_with_check - checks: - - existing_check_name - -- name: Register a session with lock_delay - community.general.consul_session: - name: session_with_delay - delay: 20s - -- name: Retrieve info about session by id - community.general.consul_session: - id: session_id - state: info - -- name: Retrieve active sessions - community.general.consul_session: - state: list -''' - -try: - import consul - from requests.exceptions import ConnectionError - python_consul_installed = True -except ImportError: - python_consul_installed = False - -from ansible.module_utils.basic import AnsibleModule - - -def execute(module): - - state = module.params.get('state') - - if state in ['info', 'list', 'node']: - lookup_sessions(module) - elif state == 'present': - update_session(module) - else: - remove_session(module) - - -def lookup_sessions(module): - - datacenter = module.params.get('datacenter') - - state = module.params.get('state') - consul_client = get_consul_api(module) - try: - if state == 'list': - sessions_list = consul_client.session.list(dc=datacenter) - # Ditch the index, this can be grabbed from the results - if sessions_list and len(sessions_list) >= 2: - sessions_list = sessions_list[1] - module.exit_json(changed=True, - sessions=sessions_list) - elif state == 'node': - node = module.params.get('node') - sessions = consul_client.session.node(node, dc=datacenter) - module.exit_json(changed=True, - node=node, - sessions=sessions) - elif state == 'info': - session_id = module.params.get('id') - - session_by_id = consul_client.session.info(session_id, dc=datacenter) - module.exit_json(changed=True, - session_id=session_id, - sessions=session_by_id) - - except Exception as e: - module.fail_json(msg="Could not retrieve session info %s" % e) - - -def update_session(module): - - name = module.params.get('name') - delay = module.params.get('delay') - checks = module.params.get('checks') - datacenter = module.params.get('datacenter') - node = module.params.get('node') - behavior = module.params.get('behavior') - - consul_client = get_consul_api(module) - - try: - session = consul_client.session.create( - name=name, - behavior=behavior, - node=node, - lock_delay=delay, - dc=datacenter, - checks=checks - ) - module.exit_json(changed=True, - session_id=session, - name=name, - behavior=behavior, - delay=delay, - checks=checks, - node=node) - except Exception as e: - module.fail_json(msg="Could not create/update session %s" % e) - - -def remove_session(module): - session_id = module.params.get('id') - - consul_client = get_consul_api(module) - - try: - consul_client.session.destroy(session_id) - - module.exit_json(changed=True, - session_id=session_id) - except Exception as e: - module.fail_json(msg="Could not remove session with id '%s' %s" % ( - session_id, e)) - - -def get_consul_api(module): - return consul.Consul(host=module.params.get('host'), - port=module.params.get('port'), - scheme=module.params.get('scheme'), - verify=module.params.get('validate_certs')) - - -def test_dependencies(module): - if not python_consul_installed: - module.fail_json(msg="python-consul required for this module. " - "see https://python-consul.readthedocs.io/en/latest/#installation") - - -def main(): - argument_spec = dict( - checks=dict(type='list', elements='str'), - delay=dict(type='int', default='15'), - behavior=dict(type='str', default='release', choices=['release', 'delete']), - host=dict(type='str', default='localhost'), - port=dict(type='int', default=8500), - scheme=dict(type='str', default='http'), - validate_certs=dict(type='bool', default=True), - id=dict(type='str'), - name=dict(type='str'), - node=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']), - datacenter=dict(type='str'), - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_if=[ - ('state', 'node', ['name']), - ('state', 'info', ['id']), - ('state', 'remove', ['id']), - ], - supports_check_mode=False - ) - - test_dependencies(module) - - try: - execute(module) - except ConnectionError as e: - module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( - module.params.get('host'), module.params.get('port'), e)) - except Exception as e: - module.fail_json(msg=str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clustering/etcd3.py b/plugins/modules/clustering/etcd3.py deleted file mode 100644 index 6a09513364..0000000000 --- a/plugins/modules/clustering/etcd3.py +++ /dev/null @@ -1,252 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Jean-Philippe Evrard -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: etcd3 -short_description: "Set or delete key value pairs from an etcd3 cluster" -requirements: - - etcd3 -description: - - Sets or deletes values in etcd3 cluster using its v3 api. - - Needs python etcd3 lib to work -options: - key: - type: str - description: - - the key where the information is stored in the cluster - required: true - value: - type: str - description: - - the information stored - required: true - host: - type: str - description: - - the IP address of the cluster - default: 'localhost' - port: - type: int - description: - - the port number used to connect to the cluster - default: 2379 - state: - type: str - description: - - the state of the value for the key. - - can be present or absent - required: true - choices: [ present, absent ] - user: - type: str - description: - - The etcd user to authenticate with. - password: - type: str - description: - - The password to use for authentication. - - Required if I(user) is defined. - ca_cert: - type: path - description: - - The Certificate Authority to use to verify the etcd host. - - Required if I(client_cert) and I(client_key) are defined. - client_cert: - type: path - description: - - PEM formatted certificate chain file to be used for SSL client authentication. - - Required if I(client_key) is defined. - client_key: - type: path - description: - - PEM formatted file that contains your private key to be used for SSL client authentication. - - Required if I(client_cert) is defined. - timeout: - type: int - description: - - The socket level timeout in seconds. -author: - - Jean-Philippe Evrard (@evrardjp) - - Victor Fauth (@vfauth) -''' - -EXAMPLES = """ -- name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379" - community.general.etcd3: - key: "foo" - value: "baz3" - host: "localhost" - port: 2379 - state: "present" - -- name: Authenticate using user/password combination with a timeout of 10 seconds - community.general.etcd3: - key: "foo" - value: "baz3" - state: "present" - user: "someone" - password: "password123" - timeout: 10 - -- name: Authenticate using TLS certificates - community.general.etcd3: - key: "foo" - value: "baz3" - state: "present" - ca_cert: "/etc/ssl/certs/CA_CERT.pem" - client_cert: "/etc/ssl/certs/cert.crt" - client_key: "/etc/ssl/private/key.pem" -""" - -RETURN = ''' -key: - description: The key that was queried - returned: always - type: str -old_value: - description: The previous value in the cluster - returned: always - type: str -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -try: - import etcd3 - HAS_ETCD = True -except ImportError: - ETCD_IMP_ERR = traceback.format_exc() - HAS_ETCD = False - - -def run_module(): - # define the available arguments/parameters that a user can pass to - # the module - module_args = dict( - key=dict(type='str', required=True, no_log=False), - value=dict(type='str', required=True), - host=dict(type='str', default='localhost'), - port=dict(type='int', default=2379), - state=dict(type='str', required=True, choices=['present', 'absent']), - user=dict(type='str'), - password=dict(type='str', no_log=True), - ca_cert=dict(type='path'), - client_cert=dict(type='path'), - client_key=dict(type='path'), - timeout=dict(type='int'), - ) - - # seed the result dict in the object - # we primarily care about changed and state - # change is if this module effectively modified the target - # state will include any data that you want your module to pass back - # for consumption, for example, in a subsequent task - result = dict( - changed=False, - ) - - # the AnsibleModule object will be our abstraction working with Ansible - # this includes instantiation, a couple of common attr would be the - # args/params passed to the execution, as well as if the module - # supports check mode - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - required_together=[['client_cert', 'client_key'], ['user', 'password']], - ) - - # It is possible to set `ca_cert` to verify the server identity without - # setting `client_cert` or `client_key` to authenticate the client - # so required_together is enough - # Due to `required_together=[['client_cert', 'client_key']]`, checking the presence - # of either `client_cert` or `client_key` is enough - if module.params['ca_cert'] is None and module.params['client_cert'] is not None: - module.fail_json(msg="The 'ca_cert' parameter must be defined when 'client_cert' and 'client_key' are present.") - - result['key'] = module.params.get('key') - module.params['cert_cert'] = module.params.pop('client_cert') - module.params['cert_key'] = module.params.pop('client_key') - - if not HAS_ETCD: - module.fail_json(msg=missing_required_lib('etcd3'), exception=ETCD_IMP_ERR) - - allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key', - 'timeout', 'user', 'password'] - # TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is - # the minimum supported version - # client_params = {key: value for key, value in module.params.items() if key in allowed_keys} - client_params = dict() - for key, value in module.params.items(): - if key in allowed_keys: - client_params[key] = value - try: - etcd = etcd3.client(**client_params) - except Exception as exp: - module.fail_json(msg='Cannot connect to etcd cluster: %s' % (to_native(exp)), - exception=traceback.format_exc()) - try: - cluster_value = etcd.get(module.params['key']) - except Exception as exp: - module.fail_json(msg='Cannot reach data: %s' % (to_native(exp)), - exception=traceback.format_exc()) - - # Make the cluster_value[0] a string for string comparisons - result['old_value'] = to_native(cluster_value[0]) - - if module.params['state'] == 'absent': - if cluster_value[0] is not None: - if module.check_mode: - result['changed'] = True - else: - try: - etcd.delete(module.params['key']) - except Exception as exp: - module.fail_json(msg='Cannot delete %s: %s' % (module.params['key'], to_native(exp)), - exception=traceback.format_exc()) - else: - result['changed'] = True - elif module.params['state'] == 'present': - if result['old_value'] != module.params['value']: - if module.check_mode: - result['changed'] = True - else: - try: - etcd.put(module.params['key'], module.params['value']) - except Exception as exp: - module.fail_json(msg='Cannot add or edit key %s: %s' % (module.params['key'], to_native(exp)), - exception=traceback.format_exc()) - else: - result['changed'] = True - else: - module.fail_json(msg="State not recognized") - - # manipulate or modify the state as needed (this is going to be the - # part where your module will do what it needs to do) - - # during the execution of the module, if there is an exception or a - # conditional state that effectively causes a failure, run - # AnsibleModule.fail_json() to pass in the message and the result - - # in the event of a successful module execution, you will want to - # simple AnsibleModule.exit_json(), passing the key/value results - module.exit_json(**result) - - -def main(): - run_module() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clustering/nomad/nomad_job.py b/plugins/modules/clustering/nomad/nomad_job.py deleted file mode 100644 index 341592be50..0000000000 --- a/plugins/modules/clustering/nomad/nomad_job.py +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2020, FERREIRA Christophe -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: nomad_job -author: FERREIRA Christophe (@chris93111) -version_added: "1.3.0" -short_description: Launch a Nomad Job -description: - - Launch a Nomad job. - - Stop a Nomad job. - - Force start a Nomad job -requirements: - - python-nomad -extends_documentation_fragment: - - community.general.nomad -options: - name: - description: - - Name of job for delete, stop and start job without source. - - Name of job for delete, stop and start job without source. - - Either this or I(content) must be specified. - type: str - state: - description: - - Deploy or remove job. - choices: ["present", "absent"] - required: true - type: str - force_start: - description: - - Force job to started. - type: bool - default: false - content: - description: - - Content of Nomad job. - - Either this or I(name) must be specified. - type: str - content_format: - description: - - Type of content of Nomad job. - choices: ["hcl", "json"] - default: hcl - type: str -notes: - - C(check_mode) is supported. -seealso: - - name: Nomad jobs documentation - description: Complete documentation for Nomad API jobs. - link: https://www.nomadproject.io/api-docs/jobs/ -''' - -EXAMPLES = ''' -- name: Create job - community.general.nomad_job: - host: localhost - state: present - content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}" - timeout: 120 - -- name: Stop job - community.general.nomad_job: - host: localhost - state: absent - name: api - -- name: Force job to start - community.general.nomad_job: - host: localhost - state: present - name: api - timeout: 120 - force_start: true -''' - -import json - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -import_nomad = None -try: - import nomad - import_nomad = True -except ImportError: - import_nomad = False - - -def run(): - module = AnsibleModule( - argument_spec=dict( - host=dict(required=True, type='str'), - state=dict(required=True, choices=['present', 'absent']), - use_ssl=dict(type='bool', default=True), - timeout=dict(type='int', default=5), - validate_certs=dict(type='bool', default=True), - client_cert=dict(type='path', default=None), - client_key=dict(type='path', default=None), - namespace=dict(type='str', default=None), - name=dict(type='str', default=None), - content_format=dict(choices=['hcl', 'json'], default='hcl'), - content=dict(type='str', default=None), - force_start=dict(type='bool', default=False), - token=dict(type='str', default=None, no_log=True) - ), - supports_check_mode=True, - mutually_exclusive=[ - ["name", "content"] - ], - required_one_of=[ - ['name', 'content'] - ] - ) - - if not import_nomad: - module.fail_json(msg=missing_required_lib("python-nomad")) - - certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) - - nomad_client = nomad.Nomad( - host=module.params.get('host'), - secure=module.params.get('use_ssl'), - timeout=module.params.get('timeout'), - verify=module.params.get('validate_certs'), - cert=certificate_ssl, - namespace=module.params.get('namespace'), - token=module.params.get('token') - ) - - if module.params.get('state') == "present": - - if module.params.get('name') and not module.params.get('force_start'): - module.fail_json(msg='For start job with name, force_start is needed') - - changed = False - if module.params.get('content'): - - if module.params.get('content_format') == 'json': - - job_json = module.params.get('content') - try: - job_json = json.loads(job_json) - except ValueError as e: - module.fail_json(msg=to_native(e)) - job = dict() - job['job'] = job_json - try: - job_id = job_json.get('ID') - if job_id is None: - module.fail_json(msg="Cannot retrieve job with ID None") - plan = nomad_client.job.plan_job(job_id, job, diff=True) - if not plan['Diff'].get('Type') == "None": - changed = True - if not module.check_mode: - result = nomad_client.jobs.register_job(job) - else: - result = plan - else: - result = plan - except Exception as e: - module.fail_json(msg=to_native(e)) - - if module.params.get('content_format') == 'hcl': - - try: - job_hcl = module.params.get('content') - job_json = nomad_client.jobs.parse(job_hcl) - job = dict() - job['job'] = job_json - except nomad.api.exceptions.BadRequestNomadException as err: - msg = str(err.nomad_resp.reason) + " " + str(err.nomad_resp.text) - module.fail_json(msg=to_native(msg)) - try: - job_id = job_json.get('ID') - plan = nomad_client.job.plan_job(job_id, job, diff=True) - if not plan['Diff'].get('Type') == "None": - changed = True - if not module.check_mode: - result = nomad_client.jobs.register_job(job) - else: - result = plan - else: - result = plan - except Exception as e: - module.fail_json(msg=to_native(e)) - - if module.params.get('force_start'): - - try: - job = dict() - if module.params.get('name'): - job_name = module.params.get('name') - else: - job_name = job_json['Name'] - job_json = nomad_client.job.get_job(job_name) - if job_json['Status'] == 'running': - result = job_json - else: - job_json['Status'] = 'running' - job_json['Stop'] = False - job['job'] = job_json - if not module.check_mode: - result = nomad_client.jobs.register_job(job) - else: - result = nomad_client.validate.validate_job(job) - if not result.status_code == 200: - module.fail_json(msg=to_native(result.text)) - result = json.loads(result.text) - changed = True - except Exception as e: - module.fail_json(msg=to_native(e)) - - if module.params.get('state') == "absent": - - try: - if not module.params.get('name') is None: - job_name = module.params.get('name') - else: - if module.params.get('content_format') == 'hcl': - job_json = nomad_client.jobs.parse(module.params.get('content')) - job_name = job_json['Name'] - if module.params.get('content_format') == 'json': - job_json = module.params.get('content') - job_name = job_json['Name'] - job = nomad_client.job.get_job(job_name) - if job['Status'] == 'dead': - changed = False - result = job - else: - if not module.check_mode: - result = nomad_client.job.deregister_job(job_name) - else: - result = job - changed = True - except Exception as e: - module.fail_json(msg=to_native(e)) - - module.exit_json(changed=changed, result=result) - - -def main(): - - run() - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/clustering/nomad/nomad_job_info.py b/plugins/modules/clustering/nomad/nomad_job_info.py deleted file mode 100644 index d49111bb4f..0000000000 --- a/plugins/modules/clustering/nomad/nomad_job_info.py +++ /dev/null @@ -1,344 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2020, FERREIRA Christophe -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: nomad_job_info -author: FERREIRA Christophe (@chris93111) -version_added: "1.3.0" -short_description: Get Nomad Jobs info -description: - - Get info for one Nomad job. - - List Nomad jobs. -requirements: - - python-nomad -extends_documentation_fragment: - - community.general.nomad -options: - name: - description: - - Name of job for Get info. - - If not specified, lists all jobs. - type: str -notes: - - C(check_mode) is supported. -seealso: - - name: Nomad jobs documentation - description: Complete documentation for Nomad API jobs. - link: https://www.nomadproject.io/api-docs/jobs/ -''' - -EXAMPLES = ''' -- name: Get info for job awx - community.general.nomad_job_info: - host: localhost - name: awx - register: result - -- name: List Nomad jobs - community.general.nomad_job_info: - host: localhost - register: result - -''' - -RETURN = ''' -result: - description: List with dictionary contains jobs info - returned: success - type: list - sample: [ - { - "Affinities": null, - "AllAtOnce": false, - "Constraints": null, - "ConsulToken": "", - "CreateIndex": 13, - "Datacenters": [ - "dc1" - ], - "Dispatched": false, - "ID": "example", - "JobModifyIndex": 13, - "Meta": null, - "ModifyIndex": 13, - "Multiregion": null, - "Name": "example", - "Namespace": "default", - "NomadTokenID": "", - "ParameterizedJob": null, - "ParentID": "", - "Payload": null, - "Periodic": null, - "Priority": 50, - "Region": "global", - "Spreads": null, - "Stable": false, - "Status": "pending", - "StatusDescription": "", - "Stop": false, - "SubmitTime": 1602244370615307000, - "TaskGroups": [ - { - "Affinities": null, - "Constraints": null, - "Count": 1, - "EphemeralDisk": { - "Migrate": false, - "SizeMB": 300, - "Sticky": false - }, - "Meta": null, - "Migrate": { - "HealthCheck": "checks", - "HealthyDeadline": 300000000000, - "MaxParallel": 1, - "MinHealthyTime": 10000000000 - }, - "Name": "cache", - "Networks": null, - "ReschedulePolicy": { - "Attempts": 0, - "Delay": 30000000000, - "DelayFunction": "exponential", - "Interval": 0, - "MaxDelay": 3600000000000, - "Unlimited": true - }, - "RestartPolicy": { - "Attempts": 3, - "Delay": 15000000000, - "Interval": 1800000000000, - "Mode": "fail" - }, - "Scaling": null, - "Services": null, - "ShutdownDelay": null, - "Spreads": null, - "StopAfterClientDisconnect": null, - "Tasks": [ - { - "Affinities": null, - "Artifacts": null, - "CSIPluginConfig": null, - "Config": { - "image": "redis:3.2", - "port_map": [ - { - "db": 6379.0 - } - ] - }, - "Constraints": null, - "DispatchPayload": null, - "Driver": "docker", - "Env": null, - "KillSignal": "", - "KillTimeout": 5000000000, - "Kind": "", - "Leader": false, - "Lifecycle": null, - "LogConfig": { - "MaxFileSizeMB": 10, - "MaxFiles": 10 - }, - "Meta": null, - "Name": "redis", - "Resources": { - "CPU": 500, - "Devices": null, - "DiskMB": 0, - "IOPS": 0, - "MemoryMB": 256, - "Networks": [ - { - "CIDR": "", - "DNS": null, - "Device": "", - "DynamicPorts": [ - { - "HostNetwork": "default", - "Label": "db", - "To": 0, - "Value": 0 - } - ], - "IP": "", - "MBits": 10, - "Mode": "", - "ReservedPorts": null - } - ] - }, - "RestartPolicy": { - "Attempts": 3, - "Delay": 15000000000, - "Interval": 1800000000000, - "Mode": "fail" - }, - "Services": [ - { - "AddressMode": "auto", - "CanaryMeta": null, - "CanaryTags": null, - "Checks": [ - { - "AddressMode": "", - "Args": null, - "CheckRestart": null, - "Command": "", - "Expose": false, - "FailuresBeforeCritical": 0, - "GRPCService": "", - "GRPCUseTLS": false, - "Header": null, - "InitialStatus": "", - "Interval": 10000000000, - "Method": "", - "Name": "alive", - "Path": "", - "PortLabel": "", - "Protocol": "", - "SuccessBeforePassing": 0, - "TLSSkipVerify": false, - "TaskName": "", - "Timeout": 2000000000, - "Type": "tcp" - } - ], - "Connect": null, - "EnableTagOverride": false, - "Meta": null, - "Name": "redis-cache", - "PortLabel": "db", - "Tags": [ - "global", - "cache" - ], - "TaskName": "" - } - ], - "ShutdownDelay": 0, - "Templates": null, - "User": "", - "Vault": null, - "VolumeMounts": null - } - ], - "Update": { - "AutoPromote": false, - "AutoRevert": false, - "Canary": 0, - "HealthCheck": "checks", - "HealthyDeadline": 180000000000, - "MaxParallel": 1, - "MinHealthyTime": 10000000000, - "ProgressDeadline": 600000000000, - "Stagger": 30000000000 - }, - "Volumes": null - } - ], - "Type": "service", - "Update": { - "AutoPromote": false, - "AutoRevert": false, - "Canary": 0, - "HealthCheck": "", - "HealthyDeadline": 0, - "MaxParallel": 1, - "MinHealthyTime": 0, - "ProgressDeadline": 0, - "Stagger": 30000000000 - }, - "VaultNamespace": "", - "VaultToken": "", - "Version": 0 - } - ] - -''' - - -import os -import json - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -import_nomad = None -try: - import nomad - import_nomad = True -except ImportError: - import_nomad = False - - -def run(): - module = AnsibleModule( - argument_spec=dict( - host=dict(required=True, type='str'), - use_ssl=dict(type='bool', default=True), - timeout=dict(type='int', default=5), - validate_certs=dict(type='bool', default=True), - client_cert=dict(type='path', default=None), - client_key=dict(type='path', default=None), - namespace=dict(type='str', default=None), - name=dict(type='str', default=None), - token=dict(type='str', default=None, no_log=True) - ), - supports_check_mode=True - ) - - if not import_nomad: - module.fail_json(msg=missing_required_lib("python-nomad")) - - certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) - - nomad_client = nomad.Nomad( - host=module.params.get('host'), - secure=module.params.get('use_ssl'), - timeout=module.params.get('timeout'), - verify=module.params.get('validate_certs'), - cert=certificate_ssl, - namespace=module.params.get('namespace'), - token=module.params.get('token') - ) - - changed = False - result = list() - try: - job_list = nomad_client.jobs.get_jobs() - for job in job_list: - result.append(nomad_client.job.get_job(job.get('ID'))) - except Exception as e: - module.fail_json(msg=to_native(e)) - - if module.params.get('name'): - filter = list() - try: - for job in result: - if job.get('ID') == module.params.get('name'): - filter.append(job) - result = filter - if not filter: - module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name'))) - except Exception as e: - module.fail_json(msg=to_native(e)) - - module.exit_json(changed=changed, result=result) - - -def main(): - - run() - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/clustering/pacemaker_cluster.py b/plugins/modules/clustering/pacemaker_cluster.py deleted file mode 100644 index 4ec6010f53..0000000000 --- a/plugins/modules/clustering/pacemaker_cluster.py +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Mathieu Bultel -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: pacemaker_cluster -short_description: Manage pacemaker clusters -author: -- Mathieu Bultel (@matbu) -description: - - This module can manage a pacemaker cluster and nodes from Ansible using - the pacemaker cli. -options: - state: - description: - - Indicate desired state of the cluster - choices: [ cleanup, offline, online, restart ] - type: str - node: - description: - - Specify which node of the cluster you want to manage. None == the - cluster status itself, 'all' == check the status of all nodes. - type: str - timeout: - description: - - Timeout when the module should considered that the action has failed - default: 300 - type: int - force: - description: - - Force the change of the cluster state - type: bool - default: 'yes' -''' -EXAMPLES = ''' ---- -- name: Set cluster Online - hosts: localhost - gather_facts: no - tasks: - - name: Get cluster state - community.general.pacemaker_cluster: - state: online -''' - -RETURN = ''' -changed: - description: True if the cluster state has changed - type: bool - returned: always -out: - description: The output of the current state of the cluster. It return a - list of the nodes state. - type: str - sample: 'out: [[" overcloud-controller-0", " Online"]]}' - returned: always -rc: - description: exit code of the module - type: bool - returned: always -''' - -import time - -from ansible.module_utils.basic import AnsibleModule - - -_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node" - - -def get_cluster_status(module): - cmd = "pcs cluster status" - rc, out, err = module.run_command(cmd) - if out in _PCS_CLUSTER_DOWN: - return 'offline' - else: - return 'online' - - -def get_node_status(module, node='all'): - if node == 'all': - cmd = "pcs cluster pcsd-status %s" % node - else: - cmd = "pcs cluster pcsd-status" - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) - status = [] - for o in out.splitlines(): - status.append(o.split(':')) - return status - - -def clean_cluster(module, timeout): - cmd = "pcs resource cleanup" - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) - - -def set_cluster(module, state, timeout, force): - if state == 'online': - cmd = "pcs cluster start" - if state == 'offline': - cmd = "pcs cluster stop" - if force: - cmd = "%s --force" % cmd - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) - - t = time.time() - ready = False - while time.time() < t + timeout: - cluster_state = get_cluster_status(module) - if cluster_state == state: - ready = True - break - if not ready: - module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state)) - - -def set_node(module, state, timeout, force, node='all'): - # map states - if state == 'online': - cmd = "pcs cluster start" - if state == 'offline': - cmd = "pcs cluster stop" - if force: - cmd = "%s --force" % cmd - - nodes_state = get_node_status(module, node) - for node in nodes_state: - if node[1].strip().lower() != state: - cmd = "%s %s" % (cmd, node[0].strip()) - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) - - t = time.time() - ready = False - while time.time() < t + timeout: - nodes_state = get_node_status(module) - for node in nodes_state: - if node[1].strip().lower() == state: - ready = True - break - if not ready: - module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state)) - - -def main(): - argument_spec = dict( - state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']), - node=dict(type='str'), - timeout=dict(type='int', default=300), - force=dict(type='bool', default=True), - ) - - module = AnsibleModule( - argument_spec, - supports_check_mode=True, - ) - changed = False - state = module.params['state'] - node = module.params['node'] - force = module.params['force'] - timeout = module.params['timeout'] - - if state in ['online', 'offline']: - # Get cluster status - if node is None: - cluster_state = get_cluster_status(module) - if cluster_state == state: - module.exit_json(changed=changed, out=cluster_state) - else: - set_cluster(module, state, timeout, force) - cluster_state = get_cluster_status(module) - if cluster_state == state: - module.exit_json(changed=True, out=cluster_state) - else: - module.fail_json(msg="Fail to bring the cluster %s" % state) - else: - cluster_state = get_node_status(module, node) - # Check cluster state - for node_state in cluster_state: - if node_state[1].strip().lower() == state: - module.exit_json(changed=changed, out=cluster_state) - else: - # Set cluster status if needed - set_cluster(module, state, timeout, force) - cluster_state = get_node_status(module, node) - module.exit_json(changed=True, out=cluster_state) - - if state in ['restart']: - set_cluster(module, 'offline', timeout, force) - cluster_state = get_cluster_status(module) - if cluster_state == 'offline': - set_cluster(module, 'online', timeout, force) - cluster_state = get_cluster_status(module) - if cluster_state == 'online': - module.exit_json(changed=True, out=cluster_state) - else: - module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be started") - else: - module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped") - - if state in ['cleanup']: - clean_cluster(module, timeout) - cluster_state = get_cluster_status(module) - module.exit_json(changed=True, - out=cluster_state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clustering/znode.py b/plugins/modules/clustering/znode.py deleted file mode 100644 index d55a502b15..0000000000 --- a/plugins/modules/clustering/znode.py +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright 2015 WP Engine, Inc. All rights reserved. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: znode -short_description: Create, delete, retrieve, and update znodes using ZooKeeper -description: - - Create, delete, retrieve, and update znodes using ZooKeeper. -options: - hosts: - description: - - A list of ZooKeeper servers (format '[server]:[port]'). - required: true - type: str - name: - description: - - The path of the znode. - required: true - type: str - value: - description: - - The value assigned to the znode. - type: str - op: - description: - - An operation to perform. Mutually exclusive with state. - choices: [ get, wait, list ] - type: str - state: - description: - - The state to enforce. Mutually exclusive with op. - choices: [ present, absent ] - type: str - timeout: - description: - - The amount of time to wait for a node to appear. - default: 300 - type: int - recursive: - description: - - Recursively delete node and all its children. - type: bool - default: 'no' -requirements: - - kazoo >= 2.1 - - python >= 2.6 -author: "Trey Perry (@treyperry)" -''' - -EXAMPLES = """ -- name: Creating or updating a znode with a given value - community.general.znode: - hosts: 'localhost:2181' - name: /mypath - value: myvalue - state: present - -- name: Getting the value and stat structure for a znode - community.general.znode: - hosts: 'localhost:2181' - name: /mypath - op: get - -- name: Listing a particular znode's children - community.general.znode: - hosts: 'localhost:2181' - name: /zookeeper - op: list - -- name: Waiting 20 seconds for a znode to appear at path /mypath - community.general.znode: - hosts: 'localhost:2181' - name: /mypath - op: wait - timeout: 20 - -- name: Deleting a znode at path /mypath - community.general.znode: - hosts: 'localhost:2181' - name: /mypath - state: absent - -- name: Creating or updating a znode with a given value on a remote Zookeeper - community.general.znode: - hosts: 'my-zookeeper-node:2181' - name: /mypath - value: myvalue - state: present - delegate_to: 127.0.0.1 -""" - -import time -import traceback - -KAZOO_IMP_ERR = None -try: - from kazoo.client import KazooClient - from kazoo.handlers.threading import KazooTimeoutError - KAZOO_INSTALLED = True -except ImportError: - KAZOO_IMP_ERR = traceback.format_exc() - KAZOO_INSTALLED = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_bytes - - -def main(): - module = AnsibleModule( - argument_spec=dict( - hosts=dict(required=True, type='str'), - name=dict(required=True, type='str'), - value=dict(type='str'), - op=dict(choices=['get', 'wait', 'list']), - state=dict(choices=['present', 'absent']), - timeout=dict(default=300, type='int'), - recursive=dict(default=False, type='bool') - ), - supports_check_mode=False - ) - - if not KAZOO_INSTALLED: - module.fail_json(msg=missing_required_lib('kazoo >= 2.1'), exception=KAZOO_IMP_ERR) - - check = check_params(module.params) - if not check['success']: - module.fail_json(msg=check['msg']) - - zoo = KazooCommandProxy(module) - try: - zoo.start() - except KazooTimeoutError: - module.fail_json(msg='The connection to the ZooKeeper ensemble timed out.') - - command_dict = { - 'op': { - 'get': zoo.get, - 'list': zoo.list, - 'wait': zoo.wait - }, - 'state': { - 'present': zoo.present, - 'absent': zoo.absent - } - } - - command_type = 'op' if 'op' in module.params and module.params['op'] is not None else 'state' - method = module.params[command_type] - result, result_dict = command_dict[command_type][method]() - zoo.shutdown() - - if result: - module.exit_json(**result_dict) - else: - module.fail_json(**result_dict) - - -def check_params(params): - if not params['state'] and not params['op']: - return {'success': False, 'msg': 'Please define an operation (op) or a state.'} - - if params['state'] and params['op']: - return {'success': False, 'msg': 'Please choose an operation (op) or a state, but not both.'} - - return {'success': True} - - -class KazooCommandProxy(): - def __init__(self, module): - self.module = module - self.zk = KazooClient(module.params['hosts']) - - def absent(self): - return self._absent(self.module.params['name']) - - def exists(self, znode): - return self.zk.exists(znode) - - def list(self): - children = self.zk.get_children(self.module.params['name']) - return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.', - 'znode': self.module.params['name']} - - def present(self): - return self._present(self.module.params['name'], self.module.params['value']) - - def get(self): - return self._get(self.module.params['name']) - - def shutdown(self): - self.zk.stop() - self.zk.close() - - def start(self): - self.zk.start() - - def wait(self): - return self._wait(self.module.params['name'], self.module.params['timeout']) - - def _absent(self, znode): - if self.exists(znode): - self.zk.delete(znode, recursive=self.module.params['recursive']) - return True, {'changed': True, 'msg': 'The znode was deleted.'} - else: - return True, {'changed': False, 'msg': 'The znode does not exist.'} - - def _get(self, path): - if self.exists(path): - value, zstat = self.zk.get(path) - stat_dict = {} - for i in dir(zstat): - if not i.startswith('_'): - attr = getattr(zstat, i) - if isinstance(attr, (int, str)): - stat_dict[i] = attr - result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value, - 'stat': stat_dict} - else: - result = False, {'msg': 'The requested node does not exist.'} - - return result - - def _present(self, path, value): - if self.exists(path): - (current_value, zstat) = self.zk.get(path) - if value != current_value: - self.zk.set(path, to_bytes(value)) - return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path, - 'value': value} - else: - return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value} - else: - self.zk.create(path, to_bytes(value), makepath=True) - return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value} - - def _wait(self, path, timeout, interval=5): - lim = time.time() + timeout - - while time.time() < lim: - if self.exists(path): - return True, {'msg': 'The node appeared before the configured timeout.', - 'znode': path, 'timeout': timeout} - else: - time.sleep(interval) - - return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout, - 'znode': path} - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cobbler_sync.py b/plugins/modules/cobbler_sync.py deleted file mode 120000 index 0634d57b6a..0000000000 --- a/plugins/modules/cobbler_sync.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/cobbler/cobbler_sync.py \ No newline at end of file diff --git a/plugins/modules/cobbler_sync.py b/plugins/modules/cobbler_sync.py new file mode 100644 index 0000000000..158f6ee3d6 --- /dev/null +++ b/plugins/modules/cobbler_sync.py @@ -0,0 +1,148 @@ +#!/usr/bin/python + +# Copyright (c) 2018, Dag Wieers (dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: cobbler_sync +short_description: Sync Cobbler +description: + - Sync Cobbler to commit changes. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + host: + description: + - The name or IP address of the Cobbler system. + default: 127.0.0.1 + type: str + port: + description: + - Port number to be used for REST connection. + - The default value depends on parameter O(use_ssl). + type: int + username: + description: + - The username to log in to Cobbler. + default: cobbler + type: str + password: + description: + - The password to log in to Cobbler. + type: str + use_ssl: + description: + - If V(false), an HTTP connection is used instead of the default HTTPS connection. + type: bool + default: true + validate_certs: + description: + - If V(false), SSL certificates are not validated. + - This should only set to V(false) when used on personally controlled sites using self-signed certificates. + type: bool + default: true +author: + - Dag Wieers (@dagwieers) +todo: +notes: + - Concurrently syncing Cobbler is bound to fail with weird errors. +""" + +EXAMPLES = r""" +- name: Commit Cobbler changes + community.general.cobbler_sync: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + run_once: true + delegate_to: localhost +""" + +RETURN = r""" +# Default return values +""" + +import ssl +import xmlrpc.client as xmlrpc_client + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text + +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', default='127.0.0.1'), + port=dict(type='int'), + username=dict(type='str', default='cobbler'), + password=dict(type='str', no_log=True), + use_ssl=dict(type='bool', default=True), + validate_certs=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + + username = module.params['username'] + password = module.params['password'] + port = module.params['port'] + use_ssl = module.params['use_ssl'] + validate_certs = module.params['validate_certs'] + + module.params['proto'] = 'https' if use_ssl else 'http' + if not port: + module.params['port'] = '443' if use_ssl else '80' + + result = dict( + changed=True, + ) + + start = now() + + ssl_context = None + if not validate_certs: + try: + ssl_context = ssl._create_unverified_context() + except AttributeError: + # Legacy Python that doesn't verify HTTPS certificates by default + pass + else: + # Handle target environment that doesn't support HTTPS verification + ssl._create_default_https_context = ssl._create_unverified_context + + url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) + if ssl_context: + conn = xmlrpc_client.ServerProxy(url, context=ssl_context) + else: + conn = xmlrpc_client.Server(url) + + try: + token = conn.login(username, password) + except xmlrpc_client.Fault as e: + module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params)) + except Exception as e: + module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e))) + + if not module.check_mode: + try: + conn.sync(token) + except Exception as e: + module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e))) + + elapsed = now() - start + module.exit_json(elapsed=elapsed.seconds, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cobbler_system.py b/plugins/modules/cobbler_system.py deleted file mode 120000 index 6d8e74fc29..0000000000 --- a/plugins/modules/cobbler_system.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/cobbler/cobbler_system.py \ No newline at end of file diff --git a/plugins/modules/cobbler_system.py b/plugins/modules/cobbler_system.py new file mode 100644 index 0000000000..80a45854c9 --- /dev/null +++ b/plugins/modules/cobbler_system.py @@ -0,0 +1,350 @@ +#!/usr/bin/python + +# Copyright (c) 2018, Dag Wieers (dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: cobbler_system +short_description: Manage system objects in Cobbler +description: + - Add, modify or remove systems in Cobbler. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + host: + description: + - The name or IP address of the Cobbler system. + default: 127.0.0.1 + type: str + port: + description: + - Port number to be used for REST connection. + - The default value depends on parameter O(use_ssl). + type: int + username: + description: + - The username to log in to Cobbler. + default: cobbler + type: str + password: + description: + - The password to log in to Cobbler. + type: str + use_ssl: + description: + - If V(false), an HTTP connection is used instead of the default HTTPS connection. + type: bool + default: true + validate_certs: + description: + - If V(false), SSL certificates are not validated. + - This should only set to V(false) when used on personally controlled sites using self-signed certificates. + type: bool + default: true + name: + description: + - The system name to manage. + type: str + properties: + description: + - A dictionary with system properties. + type: dict + interfaces: + description: + - A list of dictionaries containing interface options. + type: dict + sync: + description: + - Sync on changes. + - Concurrently syncing Cobbler is bound to fail. + type: bool + default: false + state: + description: + - Whether the system should be present, absent or a query is made. + choices: [absent, present, query] + default: present + type: str +author: + - Dag Wieers (@dagwieers) +notes: + - Concurrently syncing Cobbler is bound to fail with weird errors. +""" + +EXAMPLES = r""" +- name: Ensure the system exists in Cobbler + community.general.cobbler_system: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + name: myhost + properties: + profile: CentOS6-x86_64 + name_servers: [2.3.4.5, 3.4.5.6] + name_servers_search: foo.com, bar.com + interfaces: + eth0: + macaddress: 00:01:02:03:04:05 + ipaddress: 1.2.3.4 + delegate_to: localhost + +- name: Enable network boot in Cobbler + community.general.cobbler_system: + host: bdsol-aci-cobbler-01 + username: cobbler + password: ins3965! + name: bdsol-aci51-apic1.cisco.com + properties: + netboot_enabled: true + state: present + delegate_to: localhost + +- name: Query all systems in Cobbler + community.general.cobbler_system: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + state: query + register: cobbler_systems + delegate_to: localhost + +- name: Query a specific system in Cobbler + community.general.cobbler_system: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + name: '{{ inventory_hostname }}' + state: query + register: cobbler_properties + delegate_to: localhost + +- name: Ensure the system does not exist in Cobbler + community.general.cobbler_system: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + name: myhost + state: absent + delegate_to: localhost +""" + +RETURN = r""" +systems: + description: List of systems. + returned: O(state=query) and O(name) is not provided + type: list +system: + description: (Resulting) information about the system we are working with. + returned: when O(name) is provided + type: dict +""" + +import ssl +import xmlrpc.client as xmlrpc_client + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text + +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +IFPROPS_MAPPING = dict( + bondingopts='bonding_opts', + bridgeopts='bridge_opts', + connected_mode='connected_mode', + cnames='cnames', + dhcptag='dhcp_tag', + dnsname='dns_name', + ifgateway='if_gateway', + interfacetype='interface_type', + interfacemaster='interface_master', + ipaddress='ip_address', + ipv6address='ipv6_address', + ipv6defaultgateway='ipv6_default_gateway', + ipv6mtu='ipv6_mtu', + ipv6prefix='ipv6_prefix', + ipv6secondaries='ipv6_secondariesu', + ipv6staticroutes='ipv6_static_routes', + macaddress='mac_address', + management='management', + mtu='mtu', + netmask='netmask', + static='static', + staticroutes='static_routes', + virtbridge='virt_bridge', +) + + +def getsystem(conn, name, token): + system = dict() + if name: + # system = conn.get_system(name, token) + systems = conn.find_system(dict(name=name), token) + if systems: + system = systems[0] + return system + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', default='127.0.0.1'), + port=dict(type='int'), + username=dict(type='str', default='cobbler'), + password=dict(type='str', no_log=True), + use_ssl=dict(type='bool', default=True), + validate_certs=dict(type='bool', default=True), + name=dict(type='str'), + interfaces=dict(type='dict'), + properties=dict(type='dict'), + sync=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present', 'query']), + ), + supports_check_mode=True, + ) + + username = module.params['username'] + password = module.params['password'] + port = module.params['port'] + use_ssl = module.params['use_ssl'] + validate_certs = module.params['validate_certs'] + + name = module.params['name'] + state = module.params['state'] + + module.params['proto'] = 'https' if use_ssl else 'http' + if not port: + module.params['port'] = '443' if use_ssl else '80' + + result = dict( + changed=False, + ) + + start = now() + + ssl_context = None + if not validate_certs: + try: + ssl_context = ssl._create_unverified_context() + except AttributeError: + # Legacy Python that doesn't verify HTTPS certificates by default + pass + else: + # Handle target environment that doesn't support HTTPS verification + ssl._create_default_https_context = ssl._create_unverified_context + + url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) + if ssl_context: + conn = xmlrpc_client.ServerProxy(url, context=ssl_context) + else: + conn = xmlrpc_client.Server(url) + + try: + token = conn.login(username, password) + except xmlrpc_client.Fault as e: + module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params)) + except Exception as e: + module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params)) + + system = getsystem(conn, name, token) + # result['system'] = system + + if state == 'query': + if name: + result['system'] = system + else: + # Turn it into a dictionary of dictionaries + # all_systems = conn.get_systems() + # result['systems'] = { system['name']: system for system in all_systems } + + # Return a list of dictionaries + result['systems'] = conn.get_systems() + + elif state == 'present': + + if system: + # Update existing entry + system_id = '' + if LooseVersion(str(conn.version())) >= LooseVersion('3.4'): + system_id = conn.get_system_handle(name) + else: + system_id = conn.get_system_handle(name, token) + + for key, value in module.params['properties'].items(): + if key not in system: + module.warn("Property '{0}' is not a valid system property.".format(key)) + if system[key] != value: + try: + conn.modify_system(system_id, key, value, token) + result['changed'] = True + except Exception as e: + module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e)) + + else: + # Create a new entry + system_id = conn.new_system(token) + conn.modify_system(system_id, 'name', name, token) + result['changed'] = True + + if module.params['properties']: + for key, value in module.params['properties'].items(): + try: + conn.modify_system(system_id, key, value, token) + except Exception as e: + module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e)) + + # Add interface properties + interface_properties = dict() + if module.params['interfaces']: + for device, values in module.params['interfaces'].items(): + for key, value in values.items(): + if key == 'name': + continue + if key not in IFPROPS_MAPPING: + module.warn("Property '{0}' is not a valid system property.".format(key)) + if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value: + result['changed'] = True + interface_properties['{0}-{1}'.format(key, device)] = value + + if result['changed'] is True: + conn.modify_system(system_id, "modify_interface", interface_properties, token) + + # Only save when the entry was changed + if not module.check_mode and result['changed']: + conn.save_system(system_id, token) + + elif state == 'absent': + + if system: + if not module.check_mode: + conn.remove_system(name, token) + result['changed'] = True + + if not module.check_mode and module.params['sync'] and result['changed']: + try: + conn.sync(token) + except Exception as e: + module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e))) + + if state in ('absent', 'present'): + result['system'] = getsystem(conn, name, token) + + if module._diff: + result['diff'] = dict(before=system, after=result['system']) + + elapsed = now() - start + module.exit_json(elapsed=elapsed.seconds, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/composer.py b/plugins/modules/composer.py deleted file mode 120000 index b203078dd0..0000000000 --- a/plugins/modules/composer.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/language/composer.py \ No newline at end of file diff --git a/plugins/modules/composer.py b/plugins/modules/composer.py new file mode 100644 index 0000000000..8301e3174f --- /dev/null +++ b/plugins/modules/composer.py @@ -0,0 +1,279 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Dimitrios Tydeas Mengidis +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: composer +author: + - "Dimitrios Tydeas Mengidis (@dmtrs)" + - "René Moser (@resmo)" +short_description: Dependency Manager for PHP +description: + - Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs + and it installs them in your project for you. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + command: + type: str + description: + - Composer command like V(install), V(update) and so on. + default: install + arguments: + type: str + description: + - Composer arguments like required package, version and so on. + default: '' + executable: + type: path + description: + - Path to PHP executable on the remote host, if PHP is not in E(PATH). + aliases: [php_path] + working_dir: + type: path + description: + - Directory of your project (see C(--working-dir)). This is required when the command is not run globally. + - This is ignored if O(global_command=true). + global_command: + description: + - Runs the specified command globally. + type: bool + default: false + prefer_source: + description: + - Forces installation from package sources when possible (see C(--prefer-source)). + default: false + type: bool + prefer_dist: + description: + - Forces installation from package dist even for dev versions (see C(--prefer-dist)). + default: false + type: bool + no_dev: + description: + - Disables installation of require-dev packages (see C(--no-dev)). + default: true + type: bool + no_scripts: + description: + - Skips the execution of all scripts defined in composer.json (see C(--no-scripts)). + default: false + type: bool + no_plugins: + description: + - Disables all plugins (see C(--no-plugins)). + default: false + type: bool + optimize_autoloader: + description: + - Optimize autoloader during autoloader dump (see C(--optimize-autoloader)). + - Convert PSR-0/4 autoloading to classmap to get a faster autoloader. + - Recommended especially for production, but can take a bit of time to run. + default: true + type: bool + classmap_authoritative: + description: + - Autoload classes from classmap only. + - Implicitly enable optimize_autoloader. + - Recommended especially for production, but can take a bit of time to run. + default: false + type: bool + apcu_autoloader: + description: + - Uses APCu to cache found/not-found classes. + default: false + type: bool + ignore_platform_reqs: + description: + - Ignore C(php), C(hhvm), C(lib-*) and C(ext-*) requirements and force the installation even if the local machine does not fulfill + these. + default: false + type: bool + composer_executable: + type: path + description: + - Path to composer executable on the remote host, if composer is not in E(PATH) or a custom composer is needed. + version_added: 3.2.0 +requirements: + - php + - composer installed in bin path (recommended C(/usr/local/bin)) or specified in O(composer_executable) +notes: + - Default options that are always appended in each execution are C(--no-ansi), C(--no-interaction) and C(--no-progress) + if available. + - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method + to avoid issues. +""" + +EXAMPLES = r""" +- name: Download and installs all libs and dependencies outlined in the /path/to/project/composer.lock + community.general.composer: + command: install + working_dir: /path/to/project + +- name: Install a new package + community.general.composer: + command: require + arguments: my/package + working_dir: /path/to/project + +- name: Clone and install a project with all dependencies + community.general.composer: + command: create-project + arguments: package/package /path/to/project ~1.0 + working_dir: /path/to/project + prefer_dist: true + +- name: Install a package globally + community.general.composer: + command: require + global_command: true + arguments: my/package +""" + +import re +import shlex +from ansible.module_utils.basic import AnsibleModule + + +def parse_out(string): + return re.sub(r"\s+", " ", string).strip() + + +def has_changed(string): + for no_change in ["Nothing to install or update", "Nothing to install, update or remove"]: + if no_change in string: + return False + + return True + + +def get_available_options(module, command='install'): + # get all available options from a composer command using composer help to json + rc, out, err = composer_command(module, ["help", command], arguments=["--no-interaction", "--format=json"]) + if rc != 0: + output = parse_out(err) + module.fail_json(msg=output) + + command_help_json = module.from_json(out) + return command_help_json['definition']['options'] + + +def composer_command(module, command, arguments=None, options=None): + if options is None: + options = [] + if arguments is None: + arguments = [] + + global_command = module.params['global_command'] + + if global_command: + global_arg = ["global"] + else: + global_arg = [] + options.extend(['--working-dir', module.params['working_dir']]) + + if module.params['executable'] is None: + php_path = module.get_bin_path("php", True, ["/usr/local/bin"]) + else: + php_path = module.params['executable'] + + if module.params['composer_executable'] is None: + composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"]) + else: + composer_path = module.params['composer_executable'] + + cmd = [php_path, composer_path] + global_arg + command + options + arguments + return module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + command=dict(default="install", type="str"), + arguments=dict(default="", type="str"), + executable=dict(type="path", aliases=["php_path"]), + working_dir=dict(type="path"), + global_command=dict(default=False, type="bool"), + prefer_source=dict(default=False, type="bool"), + prefer_dist=dict(default=False, type="bool"), + no_dev=dict(default=True, type="bool"), + no_scripts=dict(default=False, type="bool"), + no_plugins=dict(default=False, type="bool"), + apcu_autoloader=dict(default=False, type="bool"), + optimize_autoloader=dict(default=True, type="bool"), + classmap_authoritative=dict(default=False, type="bool"), + ignore_platform_reqs=dict(default=False, type="bool"), + composer_executable=dict(type="path"), + ), + required_if=[('global_command', False, ['working_dir'])], + supports_check_mode=True + ) + + # Get composer command with fallback to default + command = module.params['command'] + if re.search(r"\s", command): + module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'") + + arguments = shlex.split(module.params['arguments']) + available_options = get_available_options(module=module, command=command) + + options = [] + + # Default options + default_options = [ + 'no-ansi', + 'no-interaction', + 'no-progress', + ] + + for option in default_options: + if option in available_options: + option = "--%s" % option + options.append(option) + + option_params = { + 'prefer_source': 'prefer-source', + 'prefer_dist': 'prefer-dist', + 'no_dev': 'no-dev', + 'no_scripts': 'no-scripts', + 'no_plugins': 'no-plugins', + 'apcu_autoloader': 'acpu-autoloader', + 'optimize_autoloader': 'optimize-autoloader', + 'classmap_authoritative': 'classmap-authoritative', + 'ignore_platform_reqs': 'ignore-platform-reqs', + } + + for param, option in option_params.items(): + if module.params.get(param) and option in available_options: + option = "--%s" % option + options.append(option) + + if module.check_mode: + if 'dry-run' in available_options: + options.append('--dry-run') + else: + module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command) + + rc, out, err = composer_command(module, [command], arguments, options) + + if rc != 0: + output = parse_out(err) + module.fail_json(msg=output, stdout=err) + else: + # Composer version > 1.0.0-alpha9 now use stderr for standard notification messages + output = parse_out(out + err) + module.exit_json(changed=has_changed(output), msg=output, stdout=out + err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/consul.py b/plugins/modules/consul.py deleted file mode 120000 index 50411e09f0..0000000000 --- a/plugins/modules/consul.py +++ /dev/null @@ -1 +0,0 @@ -./clustering/consul/consul.py \ No newline at end of file diff --git a/plugins/modules/consul.py b/plugins/modules/consul.py new file mode 100644 index 0000000000..456335babf --- /dev/null +++ b/plugins/modules/consul.py @@ -0,0 +1,612 @@ +#!/usr/bin/python +# +# Copyright (c) 2015, Steve Gargan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul +short_description: Add, modify & delete services within a Consul cluster +description: + - Registers services and checks for an agent with a Consul cluster. A service is some process running on the agent node + that should be advertised by Consul's discovery mechanism. It may optionally supply a check definition, a periodic service + test to notify the Consul cluster of service's health. + - Checks may also be registered per node, for example disk usage, or cpu usage and notify the health of the entire node + to the cluster. Service level checks do not require a check name or ID as these are derived by Consul from the Service + name and ID respectively by appending V(service:) Node level checks require a O(check_name) and optionally a O(check_id). + - Currently, there is no complete way to retrieve the script, interval or TTL metadata for a registered check. Without this + metadata it is not possible to tell if the data supplied with ansible represents a change to a check. As a result this + does not attempt to determine changes and it always reports a changed occurred. An API method is planned to supply this + metadata so at that stage change management is to be added. + - See U(http://consul.io) for more details. +requirements: + - py-consul + - requests +author: "Steve Gargan (@sgargan)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - Register or deregister the Consul service, defaults to present. + default: present + choices: ['present', 'absent'] + service_name: + type: str + description: + - Unique name for the service on a node, must be unique per node, required if registering a service. May be omitted + if registering a node level check. + service_id: + type: str + description: + - The ID for the service, must be unique per node. If O(state=absent), defaults to the service name if supplied. + host: + type: str + description: + - Host of the Consul agent defaults to localhost. + default: localhost + port: + type: int + description: + - The port on which the Consul agent is running. + default: 8500 + scheme: + type: str + description: + - The protocol scheme on which the Consul agent is running. + default: http + validate_certs: + description: + - Whether to verify the TLS certificate of the Consul agent. + type: bool + default: true + notes: + type: str + description: + - Notes to attach to check when registering it. + service_port: + type: int + description: + - The port on which the service is listening. Can optionally be supplied for registration of a service, that is if O(service_name) + or O(service_id) is set. + service_address: + type: str + description: + - The address to advertise that the service is listening on. This value is passed as the C(address) parameter to Consul's + C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details. + tags: + type: list + elements: str + description: + - Tags that are attached to the service registration. + script: + type: str + description: + - The script/command that is run periodically to check the health of the service. + - Requires O(interval) to be provided. + - Mutually exclusive with O(ttl), O(tcp) and O(http). + interval: + type: str + description: + - The interval at which the service check is run. This is a number with a V(s) or V(m) suffix to signify the units of + seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for example V(10) + is V(10s). + - Required if one of the parameters O(script), O(http), or O(tcp) is specified. + check_id: + type: str + description: + - An ID for the service check. If O(state=absent), defaults to O(check_name). Ignored if part of a service definition. + check_name: + type: str + description: + - Name for the service check. Required if standalone, ignored if part of service definition. + check_node: + description: + - Node name. + type: str + check_host: + description: + - Host name. + type: str + ttl: + type: str + description: + - Checks can be registered with a TTL instead of a O(script) and O(interval) this means that the service checks in with + the agent before the TTL expires. If it does not the check is considered failed. Required if registering a check and + the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix to signify + the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for + example V(10) is equivalent to V(10s). + - Mutually exclusive with O(script), O(tcp) and O(http). + tcp: + type: str + description: + - Checks can be registered with a TCP port. This means that Consul checks if the connection attempt to that port is + successful (that is, the port is currently accepting connections). The format is V(host:port), for example V(localhost:80). + - Requires O(interval) to be provided. + - Mutually exclusive with O(script), O(ttl) and O(http). + version_added: '1.3.0' + http: + type: str + description: + - Checks can be registered with an HTTP endpoint. This means that Consul checks that the http endpoint returns a successful + HTTP status. + - Requires O(interval) to be provided. + - Mutually exclusive with O(script), O(ttl) and O(tcp). + timeout: + type: str + description: + - A custom HTTP check timeout. The Consul default is 10 seconds. Similar to the interval this is a number with a V(s) + or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) + is used by default, for example V(10) is equivalent to V(10s). + token: + type: str + description: + - The token key identifying an ACL rule set. May be required to register services. +""" + +EXAMPLES = r""" +- name: Register nginx service with the local Consul agent + community.general.consul: + service_name: nginx + service_port: 80 + +- name: Register nginx service with curl check + community.general.consul: + service_name: nginx + service_port: 80 + script: curl http://localhost + interval: 60s + +- name: register nginx with a tcp check + community.general.consul: + service_name: nginx + service_port: 80 + interval: 60s + tcp: localhost:80 + +- name: Register nginx with an http check + community.general.consul: + service_name: nginx + service_port: 80 + interval: 60s + http: http://localhost:80/status + +- name: Register external service nginx available at 10.1.5.23 + community.general.consul: + service_name: nginx + service_port: 80 + service_address: 10.1.5.23 + +- name: Register nginx with some service tags + community.general.consul: + service_name: nginx + service_port: 80 + tags: + - prod + - webservers + +- name: Remove nginx service + community.general.consul: + service_name: nginx + state: absent + +- name: Register celery worker service + community.general.consul: + service_name: celery-worker + tags: + - prod + - worker + +- name: Create a node level check to test disk usage + community.general.consul: + check_name: Disk usage + check_id: disk_usage + script: /opt/disk_usage.py + interval: 5m + +- name: Register an http check against a service that's already registered + community.general.consul: + check_name: nginx-check2 + check_id: nginx-check2 + service_id: nginx + interval: 60s + http: http://localhost:80/morestatus +""" + +try: + import consul + from requests.exceptions import ConnectionError + + class PatchedConsulAgentService(consul.Consul.Agent.Service): + def deregister(self, service_id, token=None): + params = {} + if token: + params['token'] = token + return self.agent.http.put(consul.base.CB.bool(), + '/v1/agent/service/deregister/%s' % service_id, + params=params) + + python_consul_installed = True +except ImportError: + python_consul_installed = False + +import re +from ansible.module_utils.basic import AnsibleModule + + +def register_with_consul(module): + state = module.params['state'] + + if state == 'present': + add(module) + else: + remove(module) + + +def add(module): + ''' adds a service or a check depending on supplied configuration''' + check = parse_check(module) + service = parse_service(module) + + if not service and not check: + module.fail_json(msg='a name and port are required to register a service') + + if service: + if check: + service.add_check(check) + add_service(module, service) + elif check: + add_check(module, check) + + +def remove(module): + ''' removes a service or a check ''' + service_id = module.params['service_id'] or module.params['service_name'] + check_id = module.params['check_id'] or module.params['check_name'] + if service_id: + remove_service(module, service_id) + else: + remove_check(module, check_id) + + +def add_check(module, check): + ''' registers a check with the given agent. currently there is no way + retrieve the full metadata of an existing check through the consul api. + Without this we can't compare to the supplied check and so we must assume + a change. ''' + if not check.name and not check.service_id: + module.fail_json(msg='a check name is required for a node level check, one not attached to a service') + + consul_api = get_consul_api(module) + check.register(consul_api) + + module.exit_json(changed=True, + check_id=check.check_id, + check_name=check.name, + script=check.script, + interval=check.interval, + ttl=check.ttl, + tcp=check.tcp, + http=check.http, + timeout=check.timeout, + service_id=check.service_id) + + +def remove_check(module, check_id): + ''' removes a check using its id ''' + consul_api = get_consul_api(module) + + if check_id in consul_api.agent.checks(): + consul_api.agent.check.deregister(check_id) + module.exit_json(changed=True, id=check_id) + + module.exit_json(changed=False, id=check_id) + + +def add_service(module, service): + ''' registers a service with the current agent ''' + result = service + changed = False + + consul_api = get_consul_api(module) + existing = get_service_by_id_or_name(consul_api, service.id) + + # there is no way to retrieve the details of checks so if a check is present + # in the service it must be re-registered + if service.has_checks() or not existing or not existing == service: + + service.register(consul_api) + # check that it registered correctly + registered = get_service_by_id_or_name(consul_api, service.id) + if registered: + result = registered + changed = True + + module.exit_json(changed=changed, + service_id=result.id, + service_name=result.name, + service_port=result.port, + checks=[check.to_dict() for check in service.checks()], + tags=result.tags) + + +def remove_service(module, service_id): + ''' deregister a service from the given agent using its service id ''' + consul_api = get_consul_api(module) + service = get_service_by_id_or_name(consul_api, service_id) + if service: + consul_api.agent.service.deregister(service_id, token=module.params['token']) + module.exit_json(changed=True, id=service_id) + + module.exit_json(changed=False, id=service_id) + + +def get_consul_api(module): + consulClient = consul.Consul(host=module.params['host'], + port=module.params['port'], + scheme=module.params['scheme'], + verify=module.params['validate_certs'], + token=module.params['token']) + consulClient.agent.service = PatchedConsulAgentService(consulClient) + return consulClient + + +def get_service_by_id_or_name(consul_api, service_id_or_name): + ''' iterate the registered services and find one with the given id ''' + for dummy, service in consul_api.agent.services().items(): + if service_id_or_name in (service['ID'], service['Service']): + return ConsulService(loaded=service) + + +def parse_check(module): + if module.params['check_id'] or any(module.params[p] is not None for p in ('script', 'ttl', 'tcp', 'http')): + return ConsulCheck( + module.params['check_id'], + module.params['check_name'], + module.params['check_node'], + module.params['check_host'], + module.params['script'], + module.params['interval'], + module.params['ttl'], + module.params['notes'], + module.params['tcp'], + module.params['http'], + module.params['timeout'], + module.params['service_id'], + ) + + +def parse_service(module): + return ConsulService( + module.params['service_id'], + module.params['service_name'], + module.params['service_address'], + module.params['service_port'], + module.params['tags'], + ) + + +class ConsulService(object): + + def __init__(self, service_id=None, name=None, address=None, port=-1, + tags=None, loaded=None): + self.id = self.name = name + if service_id: + self.id = service_id + self.address = address + self.port = port + self.tags = tags + self._checks = [] + if loaded: + self.id = loaded['ID'] + self.name = loaded['Service'] + self.port = loaded['Port'] + self.tags = loaded['Tags'] + + def register(self, consul_api): + optional = {} + + if self.port: + optional['port'] = self.port + + if len(self._checks) > 0: + optional['check'] = self._checks[0].check + + consul_api.agent.service.register( + self.name, + service_id=self.id, + address=self.address, + tags=self.tags, + **optional) + + def add_check(self, check): + self._checks.append(check) + + def checks(self): + return self._checks + + def has_checks(self): + return len(self._checks) > 0 + + def __eq__(self, other): + return (isinstance(other, self.__class__) and + self.id == other.id and + self.name == other.name and + self.port == other.port and + self.tags == other.tags) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + data = {'id': self.id, "name": self.name} + if self.port: + data['port'] = self.port + if self.tags and len(self.tags) > 0: + data['tags'] = self.tags + if len(self._checks) > 0: + data['check'] = self._checks[0].to_dict() + return data + + +class ConsulCheck(object): + + def __init__(self, check_id, name, node=None, host='localhost', + script=None, interval=None, ttl=None, notes=None, tcp=None, http=None, timeout=None, service_id=None): + self.check_id = self.name = name + if check_id: + self.check_id = check_id + self.service_id = service_id + self.notes = notes + self.node = node + self.host = host + + self.interval = self.validate_duration('interval', interval) + self.ttl = self.validate_duration('ttl', ttl) + self.script = script + self.tcp = tcp + self.http = http + self.timeout = self.validate_duration('timeout', timeout) + + self.check = None + + if script: + self.check = consul.Check.script(script, self.interval) + + if ttl: + self.check = consul.Check.ttl(self.ttl) + + if http: + self.check = consul.Check.http(http, self.interval, self.timeout) + + if tcp: + regex = r"(?P.*):(?P(?:[0-9]+))$" + match = re.match(regex, tcp) + + if not match: + raise Exception('tcp check must be in host:port format') + + self.check = consul.Check.tcp(match.group('host').strip('[]'), int(match.group('port')), self.interval) + + def validate_duration(self, name, duration): + if duration: + duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] + if not any(duration.endswith(suffix) for suffix in duration_units): + duration = "{0}s".format(duration) + return duration + + def register(self, consul_api): + consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id, + notes=self.notes, + check=self.check) + + def __eq__(self, other): + return (isinstance(other, self.__class__) and + self.check_id == other.check_id and + self.service_id == other.service_id and + self.name == other.name and + self.script == other.script and + self.interval == other.interval) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + data = {} + self._add(data, 'id', attr='check_id') + self._add(data, 'name', attr='check_name') + self._add(data, 'script') + self._add(data, 'node') + self._add(data, 'notes') + self._add(data, 'host') + self._add(data, 'interval') + self._add(data, 'ttl') + self._add(data, 'tcp') + self._add(data, 'http') + self._add(data, 'timeout') + self._add(data, 'service_id') + return data + + def _add(self, data, key, attr=None): + try: + if attr is None: + attr = key + data[key] = getattr(self, attr) + except Exception: + pass + + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="py-consul required for this module. see https://github.com/criteo/py-consul?tab=readme-ov-file#installation") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(default='localhost'), + port=dict(default=8500, type='int'), + scheme=dict(default='http'), + validate_certs=dict(default=True, type='bool'), + check_id=dict(), + check_name=dict(), + check_node=dict(), + check_host=dict(), + notes=dict(), + script=dict(), + service_id=dict(), + service_name=dict(), + service_address=dict(type='str'), + service_port=dict(type='int'), + state=dict(default='present', choices=['present', 'absent']), + interval=dict(type='str'), + ttl=dict(type='str'), + tcp=dict(type='str'), + http=dict(type='str'), + timeout=dict(type='str'), + tags=dict(type='list', elements='str'), + token=dict(no_log=True), + ), + mutually_exclusive=[ + ('script', 'ttl', 'tcp', 'http'), + ], + required_if=[ + ('state', 'present', ['service_name']), + ('state', 'absent', ['service_id', 'service_name', 'check_id', 'check_name'], True), + ], + required_by={ + 'script': 'interval', + 'http': 'interval', + 'tcp': 'interval', + }, + supports_check_mode=False, + ) + p = module.params + + test_dependencies(module) + if p['state'] == 'absent' and any(p[x] for x in ['script', 'ttl', 'tcp', 'http', 'interval']): + module.fail_json( + msg="The use of parameters 'script', 'ttl', 'tcp', 'http', 'interval' along with 'state=absent' is no longer allowed." + ) + + try: + register_with_consul(module) + except SystemExit: + raise + except ConnectionError as e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (p['host'], p['port'], str(e))) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/consul_acl.py b/plugins/modules/consul_acl.py deleted file mode 120000 index a7ae95f502..0000000000 --- a/plugins/modules/consul_acl.py +++ /dev/null @@ -1 +0,0 @@ -./clustering/consul/consul_acl.py \ No newline at end of file diff --git a/plugins/modules/consul_acl_bootstrap.py b/plugins/modules/consul_acl_bootstrap.py new file mode 100644 index 0000000000..d7d474e9c6 --- /dev/null +++ b/plugins/modules/consul_acl_bootstrap.py @@ -0,0 +1,104 @@ +#!/usr/bin/python +# +# Copyright (c) 2024, Florian Apolloner (@apollo13) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_acl_bootstrap +short_description: Bootstrap ACLs in Consul +version_added: 8.3.0 +description: + - Allows bootstrapping of ACLs in a Consul cluster, see U(https://developer.hashicorp.com/consul/api-docs/acl#bootstrap-acls) + for details. +author: + - Florian Apolloner (@apollo13) +extends_documentation_fragment: + - community.general.consul + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Whether the token should be present or absent. + choices: ['present', 'bootstrapped'] + default: present + type: str + bootstrap_secret: + description: + - The secret to be used as secret ID for the initial token. + - Needs to be an UUID. + type: str +""" + +EXAMPLES = r""" +- name: Bootstrap the ACL system + community.general.consul_acl_bootstrap: + bootstrap_secret: 22eaeed1-bdbd-4651-724e-42ae6c43e387 +""" + +RETURN = r""" +result: + description: + - The bootstrap result as returned by the Consul HTTP API. + - B(Note:) If O(bootstrap_secret) has been specified the C(SecretID) and C(ID) do not contain the secret but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). + If you pass O(bootstrap_secret), make sure your playbook/role does not depend on this return value! + returned: changed + type: dict + sample: + AccessorID: 834a5881-10a9-a45b-f63c-490e28743557 + CreateIndex: 25 + CreateTime: '2024-01-21T20:26:27.114612038+01:00' + Description: Bootstrap Token (Global Management) + Hash: X2AgaFhnQGRhSSF/h0m6qpX1wj/HJWbyXcxkEM/5GrY= + ID: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + Local: false + ModifyIndex: 25 + Policies: + - ID: 00000000-0000-0000-0000-000000000001 + Name: global-management + SecretID: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + RequestError, + _ConsulModule, +) + +_ARGUMENT_SPEC = { + "state": dict(type="str", choices=["present", "bootstrapped"], default="present"), + "bootstrap_secret": dict(type="str", no_log=True), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) +_ARGUMENT_SPEC.pop("token") + + +def main(): + module = AnsibleModule(_ARGUMENT_SPEC) + consul_module = _ConsulModule(module) + + data = {} + if "bootstrap_secret" in module.params: + data["BootstrapSecret"] = module.params["bootstrap_secret"] + + try: + response = consul_module.put("acl/bootstrap", data=data) + except RequestError as e: + if e.status == 403 and b"ACL bootstrap no longer allowed" in e.response_data: + return module.exit_json(changed=False) + raise + else: + return module.exit_json(changed=True, result=response) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_agent_check.py b/plugins/modules/consul_agent_check.py new file mode 100644 index 0000000000..e241c8ddf4 --- /dev/null +++ b/plugins/modules/consul_agent_check.py @@ -0,0 +1,244 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Michael Ilg +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_agent_check +short_description: Add, modify, and delete checks within a Consul cluster +version_added: 9.1.0 +description: + - Allows the addition, modification and deletion of checks in a Consul cluster using the agent. For more details on using + and configuring Checks, see U(https://developer.hashicorp.com/consul/api-docs/agent/check). + - Currently, there is no complete way to retrieve the script, interval or TTL metadata for a registered check. Without this + metadata it is not possible to tell if the data supplied with ansible represents a change to a check. As a result, the + module does not attempt to determine changes and it always reports a changed occurred. An API method is planned to supply + this metadata so at that stage change management is to be added. +author: + - Michael Ilg (@Ilgmi) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + details: + - The result is the object as it is defined in the module options and not the object structure of the Consul API. For + a better overview of what the object structure looks like, take a look at U(https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks). + diff_mode: + support: partial + details: + - In check mode the diff shows the object as it is defined in the module options and not the object structure of the + Consul API. +options: + state: + description: + - Whether the check should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Required name for the service check. + type: str + id: + description: + - Specifies a unique ID for this check on the node. This defaults to the O(name) parameter, but it may be necessary + to provide an ID for uniqueness. This value is returned in the response as V(CheckId). + type: str + interval: + description: + - The interval at which the service check is run. This is a number with a V(s) or V(m) suffix to signify the units of + seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for example V(10) + is equivalent to V(10s). + - Required if one of the parameters O(args), O(http), or O(tcp) is specified. + type: str + notes: + description: + - Notes to attach to check when registering it. + type: str + args: + description: + - Specifies command arguments to run to update the status of the check. + - Requires O(interval) to be provided. + - Mutually exclusive with O(ttl), O(tcp) and O(http). + type: list + elements: str + ttl: + description: + - Checks can be registered with a TTL instead of a O(args) and O(interval) this means that the service checks in with + the agent before the TTL expires. If it does not the check is considered failed. Required if registering a check and + the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix to signify + the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for + example V(10) is equivalent to V(10s). + - Mutually exclusive with O(args), O(tcp) and O(http). + type: str + tcp: + description: + - Checks can be registered with a TCP port. This means that Consul will check if the connection attempt to that port + is successful (that is, the port is currently accepting connections). The format is V(host:port), for example V(localhost:80). + - Requires O(interval) to be provided. + - Mutually exclusive with O(args), O(ttl) and O(http). + type: str + version_added: '1.3.0' + http: + description: + - Checks can be registered with an HTTP endpoint. This means that Consul checks that the HTTP endpoint returns a successful + HTTP status. + - Requires O(interval) to be provided. + - Mutually exclusive with O(args), O(ttl) and O(tcp). + type: str + timeout: + description: + - A custom HTTP check timeout. The Consul default is 10 seconds. Similar to the interval this is a number with a V(s) + or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) + is used by default, for example V(10) is equivalent to V(10s). + type: str + service_id: + description: + - The ID for the service, must be unique per node. If O(state=absent), defaults to the service name if supplied. + type: str +""" + +EXAMPLES = r""" +- name: Register tcp check for service 'nginx' + community.general.consul_agent_check: + name: nginx_tcp_check + service_id: nginx + interval: 60s + tcp: localhost:80 + notes: "Nginx Check" + +- name: Register http check for service 'nginx' + community.general.consul_agent_check: + name: nginx_http_check + service_id: nginx + interval: 60s + http: http://localhost:80/status + notes: "Nginx Check" + +- name: Remove check for service 'nginx' + community.general.consul_agent_check: + state: absent + id: nginx_http_check + service_id: "{{ nginx_service.ID }}" +""" + +RETURN = r""" +check: + description: The check as returned by the Consul HTTP API. + returned: always + type: dict + sample: + CheckID: nginx_check + ServiceID: nginx + Interval: 30s + Type: http + Notes: Nginx Check +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_CREATE, + OPERATION_UPDATE, + OPERATION_DELETE, + OPERATION_READ, + _ConsulModule, + validate_check, +) + +_ARGUMENT_SPEC = { + "state": dict(default="present", choices=["present", "absent"]), + "name": dict(type='str'), + "id": dict(type='str'), + "interval": dict(type='str'), + "notes": dict(type='str'), + "args": dict(type='list', elements='str'), + "http": dict(type='str'), + "tcp": dict(type='str'), + "ttl": dict(type='str'), + "timeout": dict(type='str'), + "service_id": dict(type='str'), +} + +_MUTUALLY_EXCLUSIVE = [ + ('args', 'ttl', 'tcp', 'http'), +] + +_REQUIRED_IF = [ + ('state', 'present', ['name']), + ('state', 'absent', ('id', 'name'), True), +] + +_REQUIRED_BY = { + 'args': 'interval', + 'http': 'interval', + 'tcp': 'interval', +} + +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +class ConsulAgentCheckModule(_ConsulModule): + api_endpoint = "agent/check" + result_key = "check" + unique_identifiers = ["id", "name"] + operational_attributes = {"Node", "CheckID", "Output", "ServiceName", "ServiceTags", + "Status", "Type", "ExposedPort", "Definition"} + + def endpoint_url(self, operation, identifier=None): + if operation == OPERATION_READ: + return "agent/checks" + if operation in [OPERATION_CREATE, OPERATION_UPDATE]: + return "/".join([self.api_endpoint, "register"]) + if operation == OPERATION_DELETE: + return "/".join([self.api_endpoint, "deregister", identifier]) + + return super(ConsulAgentCheckModule, self).endpoint_url(operation, identifier) + + def read_object(self): + url = self.endpoint_url(OPERATION_READ) + checks = self.get(url) + identifier = self.id_from_obj(self.params) + if identifier in checks: + return checks[identifier] + return None + + def prepare_object(self, existing, obj): + existing = super(ConsulAgentCheckModule, self).prepare_object(existing, obj) + validate_check(existing) + return existing + + def delete_object(self, obj): + if not self._module.check_mode: + self.put(self.endpoint_url(OPERATION_DELETE, obj.get("CheckID"))) + return {} + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + mutually_exclusive=_MUTUALLY_EXCLUSIVE, + required_if=_REQUIRED_IF, + required_by=_REQUIRED_BY, + supports_check_mode=True, + ) + + consul_module = ConsulAgentCheckModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_agent_service.py b/plugins/modules/consul_agent_service.py new file mode 100644 index 0000000000..7d7c94c05a --- /dev/null +++ b/plugins/modules/consul_agent_service.py @@ -0,0 +1,281 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Michael Ilg +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_agent_service +short_description: Add, modify and delete services within a Consul cluster +version_added: 9.1.0 +description: + - Allows the addition, modification and deletion of services in a Consul cluster using the agent. + - There are currently no plans to create services and checks in one. This is because the Consul API does not provide checks + for a service and the checks themselves do not match the module parameters. Therefore, only a service without checks can + be created in this module. +author: + - Michael Ilg (@Ilgmi) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. +options: + state: + description: + - Whether the service should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Unique name for the service on a node, must be unique per node, required if registering a service. + type: str + id: + description: + - Specifies a unique ID for this service. This must be unique per agent. This defaults to the O(name) parameter if not + provided. If O(state=absent), defaults to the service name if supplied. + type: str + tags: + description: + - Tags that are attached to the service registration. + type: list + elements: str + address: + description: + - The address to advertise that the service listens on. This value is passed as the C(address) parameter to Consul's + C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details. + type: str + meta: + description: + - Optional meta data used for filtering. For keys, the characters C(A-Z), C(a-z), C(0-9), C(_), C(-) are allowed. Not + allowed characters are replaced with underscores. + type: dict + service_port: + description: + - The port on which the service is listening. Can optionally be supplied for registration of a service, that is if O(name) + or O(id) is set. + type: int + enable_tag_override: + description: + - Specifies to disable the anti-entropy feature for this service's tags. If C(EnableTagOverride) is set to true then + external agents can update this service in the catalog and modify the tags. + type: bool + default: false + weights: + description: + - Specifies weights for the service. + type: dict + suboptions: + passing: + description: + - Weights for passing. + type: int + default: 1 + warning: + description: + - Weights for warning. + type: int + default: 1 + default: {"passing": 1, "warning": 1} +""" + +EXAMPLES = r""" +- name: Register nginx service with the local Consul agent + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register nginx with a tcp check + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register nginx with an http check + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register external service nginx available at 10.1.5.23 + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + address: 10.1.5.23 + +- name: Register nginx with some service tags + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + tags: + - prod + - webservers + +- name: Register nginx with some service meta + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + meta: + nginx_version: 1.25.3 + +- name: Remove nginx service + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + service_id: nginx + state: absent + +- name: Register celery worker service + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: celery-worker + tags: + - prod + - worker +""" + +RETURN = r""" +service: + description: The service as returned by the Consul HTTP API. + returned: always + type: dict + sample: + ID: nginx + Service: nginx + Address: localhost + Port: 80 + Tags: + - http + Meta: + - nginx_version: 1.23.3 + Datacenter: dc1 + Weights: + Passing: 1 + Warning: 1 + ContentHash: 61a245cd985261ac + EnableTagOverride: false +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_CREATE, + OPERATION_UPDATE, + OPERATION_DELETE, + _ConsulModule +) + +_CHECK_MUTUALLY_EXCLUSIVE = [('args', 'ttl', 'tcp', 'http')] +_CHECK_REQUIRED_BY = { + 'args': 'interval', + 'http': 'interval', + 'tcp': 'interval', +} + +_ARGUMENT_SPEC = { + "state": dict(default="present", choices=["present", "absent"]), + "name": dict(type='str'), + "id": dict(type='str'), + "tags": dict(type='list', elements='str'), + "address": dict(type='str'), + "meta": dict(type='dict'), + "service_port": dict(type='int'), + "enable_tag_override": dict(type='bool', default=False), + "weights": dict(type='dict', options=dict( + passing=dict(type='int', default=1, no_log=False), + warning=dict(type='int', default=1) + ), default={"passing": 1, "warning": 1}) +} + +_REQUIRED_IF = [ + ('state', 'present', ['name']), + ('state', 'absent', ('id', 'name'), True), +] + +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +class ConsulAgentServiceModule(_ConsulModule): + api_endpoint = "agent/service" + result_key = "service" + unique_identifiers = ["id", "name"] + operational_attributes = {"Service", "ContentHash", "Datacenter"} + + def endpoint_url(self, operation, identifier=None): + if operation in [OPERATION_CREATE, OPERATION_UPDATE]: + return "/".join([self.api_endpoint, "register"]) + if operation == OPERATION_DELETE: + return "/".join([self.api_endpoint, "deregister", identifier]) + + return super(ConsulAgentServiceModule, self).endpoint_url(operation, identifier) + + def prepare_object(self, existing, obj): + existing = super(ConsulAgentServiceModule, self).prepare_object(existing, obj) + if "ServicePort" in existing: + existing["Port"] = existing.pop("ServicePort") + + if "ID" not in existing: + existing["ID"] = existing["Name"] + + return existing + + def needs_update(self, api_obj, module_obj): + obj = {} + if "Service" in api_obj: + obj["Service"] = api_obj["Service"] + api_obj = self.prepare_object(api_obj, obj) + + if "Name" in module_obj: + module_obj["Service"] = module_obj.pop("Name") + if "ServicePort" in module_obj: + module_obj["Port"] = module_obj.pop("ServicePort") + + return super(ConsulAgentServiceModule, self).needs_update(api_obj, module_obj) + + def delete_object(self, obj): + if not self._module.check_mode: + url = self.endpoint_url(OPERATION_DELETE, self.id_from_obj(obj, camel_case=True)) + self.put(url) + return {} + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + required_if=_REQUIRED_IF, + supports_check_mode=True, + ) + + consul_module = ConsulAgentServiceModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_auth_method.py b/plugins/modules/consul_auth_method.py new file mode 100644 index 0000000000..88842662bb --- /dev/null +++ b/plugins/modules/consul_auth_method.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# +# Copyright (c) 2024, Florian Apolloner (@apollo13) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_auth_method +short_description: Manipulate Consul auth methods +version_added: 8.3.0 +description: + - Allows the addition, modification and deletion of auth methods in a Consul cluster using the agent. For more details on + using and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html). +author: + - Florian Apolloner (@apollo13) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. +options: + state: + description: + - Whether the token should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Specifies a name for the ACL auth method. + - The name can contain alphanumeric characters, dashes C(-), and underscores C(_). + type: str + required: true + type: + description: + - The type of auth method being configured. + - This field is immutable. + - Required when the auth method is created. + type: str + choices: ['kubernetes', 'jwt', 'oidc', 'aws-iam'] + description: + description: + - Free form human readable description of the auth method. + type: str + display_name: + description: + - An optional name to use instead of O(name) when displaying information about this auth method. + type: str + max_token_ttl: + description: + - This specifies the maximum life of any token created by this auth method. + - Can be specified in the form of V(60s) or V(5m) (that is, 60 seconds or 5 minutes, respectively). + type: str + token_locality: + description: + - Defines the kind of token that this auth method should produce. + type: str + choices: ['local', 'global'] + config: + description: + - The raw configuration to use for the chosen auth method. + - Contents vary depending upon the O(type) chosen. + - Required when the auth method is created. + type: dict +""" + +EXAMPLES = r""" +- name: Create an auth method + community.general.consul_auth_method: + name: test + type: jwt + config: + jwt_validation_pubkeys: + - | + -----BEGIN PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu1SU1LfVLPHCozMxH2Mo + 4lgOEePzNm0tRgeLezV6ffAt0gunVTLw7onLRnrq0/IzW7yWR7QkrmBL7jTKEn5u + +qKhbwKfBstIs+bMY2Zkp18gnTxKLxoS2tFczGkPLPgizskuemMghRniWaoLcyeh + kd3qqGElvW/VDL5AaWTg0nLVkjRo9z+40RQzuVaE8AkAFmxZzow3x+VJYKdjykkJ + 0iT9wCS0DRTXu269V264Vf/3jvredZiKRkgwlL9xNAwxXFg0x/XFw005UWVRIkdg + cKWTjpBP2dPwVZ4WWC+9aGVd+Gyn1o0CLelf4rEjGoXbAAEgAqeGUxrcIlbjXfbc + mwIDAQAB + -----END PUBLIC KEY----- + token: "{{ consul_management_token }}" + +- name: Delete auth method + community.general.consul_auth_method: + name: test + state: absent + token: "{{ consul_management_token }}" +""" + +RETURN = r""" +auth_method: + description: The auth method as returned by the Consul HTTP API. + returned: always + type: dict + sample: + Config: + JWTValidationPubkeys: + - |- + -----BEGIN PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu1SU1LfVLPHCozMxH2Mo + 4lgOEePzNm0tRgeLezV6ffAt0gunVTLw7onLRnrq0/IzW7yWR7QkrmBL7jTKEn5u + +qKhbwKfBstIs+bMY2Zkp18gnTxKLxoS2tFczGkPLPgizskuemMghRniWaoLcyeh + kd3qqGElvW/VDL5AaWTg0nLVkjRo9z+40RQzuVaE8AkAFmxZzow3x+VJYKdjykkJ + 0iT9wCS0DRTXu269V264Vf/3jvredZiKRkgwlL9xNAwxXFg0x/XFw005UWVRIkdg + cKWTjpBP2dPwVZ4WWC+9aGVd+Gyn1o0CLelf4rEjGoXbAAEgAqeGUxrcIlbjXfbc + mwIDAQAB + -----END PUBLIC KEY----- + CreateIndex: 416 + ModifyIndex: 487 + Name: test + Type: jwt +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + _ConsulModule, + camel_case_key, +) + + +def normalize_ttl(ttl): + matches = re.findall(r"(\d+)(:h|m|s)", ttl) + ttl = 0 + for value, unit in matches: + value = int(value) + if unit == "m": + value *= 60 + elif unit == "h": + value *= 60 * 60 + ttl += value + + new_ttl = "" + hours, remainder = divmod(ttl, 3600) + if hours: + new_ttl += "{0}h".format(hours) + minutes, seconds = divmod(remainder, 60) + if minutes: + new_ttl += "{0}m".format(minutes) + if seconds: + new_ttl += "{0}s".format(seconds) + return new_ttl + + +class ConsulAuthMethodModule(_ConsulModule): + api_endpoint = "acl/auth-method" + result_key = "auth_method" + unique_identifiers = ["name"] + + def map_param(self, k, v, is_update): + if k == "config" and v: + v = {camel_case_key(k2): v2 for k2, v2 in v.items()} + return super(ConsulAuthMethodModule, self).map_param(k, v, is_update) + + def needs_update(self, api_obj, module_obj): + if "MaxTokenTTL" in module_obj: + module_obj["MaxTokenTTL"] = normalize_ttl(module_obj["MaxTokenTTL"]) + return super(ConsulAuthMethodModule, self).needs_update(api_obj, module_obj) + + +_ARGUMENT_SPEC = { + "name": dict(type="str", required=True), + "type": dict(type="str", choices=["kubernetes", "jwt", "oidc", "aws-iam"]), + "description": dict(type="str"), + "display_name": dict(type="str"), + "max_token_ttl": dict(type="str", no_log=False), + "token_locality": dict(type="str", choices=["local", "global"]), + "config": dict(type="dict"), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + supports_check_mode=True, + ) + consul_module = ConsulAuthMethodModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_binding_rule.py b/plugins/modules/consul_binding_rule.py new file mode 100644 index 0000000000..de1fae9357 --- /dev/null +++ b/plugins/modules/consul_binding_rule.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# +# Copyright (c) 2024, Florian Apolloner (@apollo13) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_binding_rule +short_description: Manipulate Consul binding rules +version_added: 8.3.0 +description: + - Allows the addition, modification and deletion of binding rules in a Consul cluster using the agent. For more details + on using and configuring binding rules, see U(https://developer.hashicorp.com/consul/api-docs/acl/binding-rules). +author: + - Florian Apolloner (@apollo13) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. +options: + state: + description: + - Whether the binding rule should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Specifies a name for the binding rule. + - 'Note: This is used to identify the binding rule. But since the API does not support a name, it is prefixed to the + description.' + type: str + required: true + description: + description: + - Free form human readable description of the binding rule. + type: str + auth_method: + description: + - The name of the auth method that this rule applies to. + type: str + required: true + selector: + description: + - Specifies the expression used to match this rule against valid identities returned from an auth method validation. + - If empty this binding rule matches all valid identities returned from the auth method. + type: str + bind_type: + description: + - Specifies the way the binding rule affects a token created at login. + type: str + choices: [service, node, role, templated-policy] + bind_name: + description: + - The name to bind to a token at login-time. + - What it binds to can be adjusted with different values of the O(bind_type) parameter. + type: str + bind_vars: + description: + - Specifies the templated policy variables when O(bind_type) is set to V(templated-policy). + type: dict +""" + +EXAMPLES = r""" +- name: Create a binding rule + community.general.consul_binding_rule: + name: my_name + description: example rule + auth_method: minikube + bind_type: service + bind_name: "{{ serviceaccount.name }}" + token: "{{ consul_management_token }}" + +- name: Remove a binding rule + community.general.consul_binding_rule: + name: my_name + auth_method: minikube + state: absent +""" + +RETURN = r""" +binding_rule: + description: The binding rule as returned by the Consul HTTP API. + returned: always + type: dict + sample: + Description: "my_name: example rule" + AuthMethod: minikube + Selector: serviceaccount.namespace==default + BindType: service + BindName: "{{ serviceaccount.name }}" + CreateIndex: 30 + ID: 59c8a237-e481-4239-9202-45f117950c5f + ModifyIndex: 33 +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + RequestError, + _ConsulModule, +) + + +class ConsulBindingRuleModule(_ConsulModule): + api_endpoint = "acl/binding-rule" + result_key = "binding_rule" + unique_identifiers = ["id"] + + def read_object(self): + url = "acl/binding-rules?authmethod={0}".format(self.params["auth_method"]) + try: + results = self.get(url) + for result in results: + if result.get("Description").startswith( + "{0}: ".format(self.params["name"]) + ): + return result + except RequestError as e: + if e.status == 404: + return + elif e.status == 403 and b"ACL not found" in e.response_data: + return + raise + + def module_to_obj(self, is_update): + obj = super(ConsulBindingRuleModule, self).module_to_obj(is_update) + del obj["Name"] + return obj + + def prepare_object(self, existing, obj): + final = super(ConsulBindingRuleModule, self).prepare_object(existing, obj) + name = self.params["name"] + description = final.pop("Description", "").split(": ", 1)[-1] + final["Description"] = "{0}: {1}".format(name, description) + return final + + +_ARGUMENT_SPEC = { + "name": dict(type="str", required=True), + "description": dict(type="str"), + "auth_method": dict(type="str", required=True), + "selector": dict(type="str"), + "bind_type": dict( + type="str", choices=["service", "node", "role", "templated-policy"] + ), + "bind_name": dict(type="str"), + "bind_vars": dict(type="dict"), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + supports_check_mode=True, + ) + consul_module = ConsulBindingRuleModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_kv.py b/plugins/modules/consul_kv.py deleted file mode 120000 index 105d774f6d..0000000000 --- a/plugins/modules/consul_kv.py +++ /dev/null @@ -1 +0,0 @@ -./clustering/consul/consul_kv.py \ No newline at end of file diff --git a/plugins/modules/consul_kv.py b/plugins/modules/consul_kv.py new file mode 100644 index 0000000000..d9354e62c5 --- /dev/null +++ b/plugins/modules/consul_kv.py @@ -0,0 +1,330 @@ +#!/usr/bin/python +# +# Copyright (c) 2015, Steve Gargan +# Copyright (c) 2018 Genome Research Ltd. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_kv +short_description: Manipulate entries in the key/value store of a Consul cluster +description: + - Allows the retrieval, addition, modification and deletion of key/value entries in a Consul cluster using the agent. The + entire contents of the record, including the indices, flags and session are returned as C(value). + - If the O(key) represents a prefix then note that when a value is removed, the existing value if any is returned as part + of the results. + - See http://www.consul.io/docs/agent/http.html#kv for more details. +requirements: + - py-consul + - requests +author: + - Steve Gargan (@sgargan) + - Colin Nolan (@colin-nolan) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - The action to take with the supplied key and value. If the state is V(present) and O(value) is set, the key contents + is set to the value supplied and C(changed) is set to V(true) only if the value was different to the current contents. + If the state is V(present) and O(value) is not set, the existing value associated to the key is returned. The state + V(absent) is used to remove the key/value pair, again C(changed) is set to V(true) only if the key actually existed + prior to the removal. An attempt can be made to obtain or free the lock associated with a key/value pair with the + states V(acquire) or V(release) respectively. A valid session must be supplied to make the attempt C(changed) is V(true) + if the attempt is successful, V(false) otherwise. + type: str + choices: [absent, acquire, present, release] + default: present + key: + description: + - The key at which the value should be stored. + type: str + required: true + value: + description: + - The value should be associated with the given key, required if O(state) is V(present). + type: str + recurse: + description: + - If the key represents a prefix, each entry with the prefix can be retrieved by setting this to V(true). + type: bool + retrieve: + description: + - If the O(state) is V(present) and O(value) is set, perform a read after setting the value and return this value. + default: true + type: bool + session: + description: + - The session that should be used to acquire or release a lock associated with a key/value pair. + type: str + token: + description: + - The token key identifying an ACL rule set that controls access to the key value pair. + type: str + cas: + description: + - Used when acquiring a lock with a session. If the O(cas) is V(0), then Consul only puts the key if it does not already + exist. If the O(cas) value is non-zero, then the key is only set if the index matches the ModifyIndex of that key. + type: str + flags: + description: + - Opaque positive integer value that can be passed when setting a value. + type: str + host: + description: + - Host of the Consul agent. + type: str + default: localhost + port: + description: + - The port on which the Consul agent is running. + type: int + default: 8500 + scheme: + description: + - The protocol scheme on which the Consul agent is running. + type: str + default: http + validate_certs: + description: + - Whether to verify the tls certificate of the Consul agent. + type: bool + default: true + datacenter: + description: + - The name of the datacenter to query. If unspecified, the query defaults to the datacenter of the Consul agent on O(host). + type: str + version_added: 10.0.0 +""" + + +EXAMPLES = r""" +# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None` +# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None` +- name: Retrieve a value from the key/value store + community.general.consul_kv: + key: somekey + register: retrieved_key + +- name: Add or update the value associated with a key in the key/value store + community.general.consul_kv: + key: somekey + value: somevalue + +- name: Remove a key from the store + community.general.consul_kv: + key: somekey + state: absent + +- name: Add a node to an arbitrary group using Consul inventory (see consul.ini) + community.general.consul_kv: + key: ansible/groups/dc1/somenode + value: top_secret + +- name: Register a key/value pair with an associated session + community.general.consul_kv: + key: stg/node/server_birthday + value: 20160509 + session: "{{ sessionid }}" + state: acquire +""" + +from ansible.module_utils.common.text.converters import to_text + +try: + import consul + from requests.exceptions import ConnectionError + python_consul_installed = True +except ImportError: + python_consul_installed = False + +from ansible.module_utils.basic import AnsibleModule + +# Note: although the py-consul implementation implies that using a key with a value of `None` with `put` has a special +# meaning (https://github.com/criteo/py-consul/blob/master/consul/api/kv.py), if not set in the subsequently API call, +# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key) +NOT_SET = None + + +def _has_value_changed(consul_client, key, target_value): + """ + Uses the given Consul client to determine if the value associated to the given key is different to the given target + value. + :param consul_client: Consul connected client + :param key: key in Consul + :param target_value: value to be associated to the key + :return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the + value has changed (i.e. the stored value is not the target value) + """ + index, existing = consul_client.kv.get(key) + if not existing: + return index, True + try: + changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value + return index, changed + except UnicodeError: + # Existing value was not decodable but all values we set are valid utf-8 + return index, True + + +def execute(module): + state = module.params.get('state') + + if state == 'acquire' or state == 'release': + lock(module, state) + elif state == 'present': + if module.params.get('value') is NOT_SET: + get_value(module) + else: + set_value(module) + elif state == 'absent': + remove_value(module) + else: + module.exit_json(msg="Unsupported state: %s" % (state, )) + + +def lock(module, state): + + consul_api = get_consul_api(module) + + session = module.params.get('session') + key = module.params.get('key') + value = module.params.get('value') + + if not session: + module.fail( + msg='%s of lock for %s requested but no session supplied' % + (state, key)) + + index, changed = _has_value_changed(consul_api, key, value) + + if changed and not module.check_mode: + if state == 'acquire': + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + acquire=session, + flags=module.params.get('flags')) + else: + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + release=session, + flags=module.params.get('flags')) + + module.exit_json(changed=changed, + index=index, + key=key) + + +def get_value(module): + consul_api = get_consul_api(module) + key = module.params.get('key') + + index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse')) + + module.exit_json(changed=False, index=index, data=existing_value) + + +def set_value(module): + consul_api = get_consul_api(module) + + key = module.params.get('key') + value = module.params.get('value') + + if value is NOT_SET: + raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key) + + index, changed = _has_value_changed(consul_api, key, value) + + if changed and not module.check_mode: + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + flags=module.params.get('flags')) + + stored = None + if module.params.get('retrieve'): + index, stored = consul_api.kv.get(key) + + module.exit_json(changed=changed, + index=index, + key=key, + data=stored) + + +def remove_value(module): + ''' remove the value associated with the given key. if the recurse parameter + is set then any key prefixed with the given key will be removed. ''' + consul_api = get_consul_api(module) + + key = module.params.get('key') + + index, existing = consul_api.kv.get( + key, recurse=module.params.get('recurse')) + + changed = existing is not None + if changed and not module.check_mode: + consul_api.kv.delete(key, module.params.get('recurse')) + + module.exit_json(changed=changed, + index=index, + key=key, + data=existing) + + +def get_consul_api(module): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + scheme=module.params.get('scheme'), + verify=module.params.get('validate_certs'), + token=module.params.get('token'), + dc=module.params.get('datacenter')) + + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. " + "see https://python-consul.readthedocs.io/en/latest/#installation") + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + cas=dict(type='str'), + datacenter=dict(type='str'), + flags=dict(type='str'), + key=dict(type='str', required=True, no_log=False), + host=dict(type='str', default='localhost'), + scheme=dict(type='str', default='http'), + validate_certs=dict(type='bool', default=True), + port=dict(type='int', default=8500), + recurse=dict(type='bool'), + retrieve=dict(type='bool', default=True), + state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']), + token=dict(type='str', no_log=True), + value=dict(type='str', default=NOT_SET), + session=dict(type='str'), + ), + supports_check_mode=True + ) + + test_dependencies(module) + + try: + execute(module) + except ConnectionError as e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), e)) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/consul_policy.py b/plugins/modules/consul_policy.py new file mode 100644 index 0000000000..95d2ac48d0 --- /dev/null +++ b/plugins/modules/consul_policy.py @@ -0,0 +1,162 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Håkon Lerring +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_policy +short_description: Manipulate Consul policies +version_added: 7.2.0 +description: + - Allows the addition, modification and deletion of policies in a Consul cluster using the agent. For more details on using + and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html). +author: + - Håkon Lerring (@Hakon) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + version_added: 8.3.0 + diff_mode: + support: partial + version_added: 8.3.0 + details: + - In check mode the diff misses operational attributes. + action_group: + version_added: 8.3.0 +options: + state: + description: + - Whether the policy should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + valid_datacenters: + description: + - Valid datacenters for the policy. All if list is empty. + type: list + elements: str + name: + description: + - The name that should be associated with the policy, this is opaque to Consul. + required: true + type: str + description: + description: + - Description of the policy. + type: str + rules: + type: str + description: + - Rule document that should be associated with the current policy. +""" + +EXAMPLES = r""" +- name: Create a policy with rules + community.general.consul_policy: + host: consul1.example.com + token: some_management_acl + name: foo-access + rules: | + key "foo" { + policy = "read" + } + key "private/foo" { + policy = "deny" + } + +- name: Update the rules associated to a policy + community.general.consul_policy: + host: consul1.example.com + token: some_management_acl + name: foo-access + rules: | + key "foo" { + policy = "read" + } + key "private/foo" { + policy = "deny" + } + event "bbq" { + policy = "write" + } + +- name: Remove a policy + community.general.consul_policy: + host: consul1.example.com + token: some_management_acl + name: foo-access + state: absent +""" + +RETURN = r""" +policy: + description: The policy as returned by the Consul HTTP API. + returned: always + type: dict + sample: + CreateIndex: 632 + Description: Testing + Hash: rj5PeDHddHslkpW7Ij4OD6N4bbSXiecXFmiw2SYXg2A= + Name: foo-access + Rules: |- + key "foo" { + policy = "read" + } + key "private/foo" { + policy = "deny" + } +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_READ, + _ConsulModule, +) + +_ARGUMENT_SPEC = { + "name": dict(required=True), + "description": dict(type="str"), + "rules": dict(type="str"), + "valid_datacenters": dict(type="list", elements="str"), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +class ConsulPolicyModule(_ConsulModule): + api_endpoint = "acl/policy" + result_key = "policy" + unique_identifiers = ["id"] + + def endpoint_url(self, operation, identifier=None): + if operation == OPERATION_READ: + return [self.api_endpoint, "name", self.params["name"]] + return super(ConsulPolicyModule, self).endpoint_url(operation, identifier) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + supports_check_mode=True, + ) + consul_module = ConsulPolicyModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_role.py b/plugins/modules/consul_role.py new file mode 100644 index 0000000000..968de022a2 --- /dev/null +++ b/plugins/modules/consul_role.py @@ -0,0 +1,283 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Håkon Lerring +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_role +short_description: Manipulate Consul roles +version_added: 7.5.0 +description: + - Allows the addition, modification and deletion of roles in a Consul cluster using the agent. For more details on using + and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html). +author: + - Håkon Lerring (@Hakon) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.token + - community.general.consul.actiongroup_consul + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. + version_added: 8.3.0 + action_group: + version_added: 8.3.0 +options: + name: + description: + - A name used to identify the role. + required: true + type: str + state: + description: + - Whether the role should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + description: + description: + - Description of the role. + - If not specified, the assigned description is not changed. + type: str + policies: + type: list + elements: dict + description: + - List of policies to attach to the role. Each policy is a dict. + - If the parameter is left blank, any policies currently assigned are not changed. + - Any empty array (V([])) clears any policies previously set. + suboptions: + name: + description: + - The name of the policy to attach to this role; see M(community.general.consul_policy) for more info. + - Either this or O(policies[].id) must be specified. + type: str + id: + description: + - The ID of the policy to attach to this role; see M(community.general.consul_policy) for more info. + - Either this or O(policies[].name) must be specified. + type: str + templated_policies: + description: + - The list of templated policies that should be applied to the role. + type: list + elements: dict + version_added: 8.3.0 + suboptions: + template_name: + description: + - The templated policy name. + type: str + required: true + template_variables: + description: + - The templated policy variables. + - Not all templated policies require variables. + type: dict + service_identities: + type: list + elements: dict + description: + - List of service identities to attach to the role. + - If not specified, any service identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. + suboptions: + service_name: + description: + - The name of the node. + - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character. + - May only contain lowercase alphanumeric characters as well as V(-) and V(_). + - This suboption has been renamed from O(service_identities[].name) to O(service_identities[].service_name) in community.general + 8.3.0. The old name can still be used. + type: str + required: true + aliases: + - name + datacenters: + description: + - The datacenters where the policies are effective. + - This results in effective policy only being valid in this datacenter. + - If an empty array (V([])) is specified, the policies are valid in all datacenters. + - Including those which do not yet exist but may in the future. + type: list + elements: str + node_identities: + type: list + elements: dict + description: + - List of node identities to attach to the role. + - If not specified, any node identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. + suboptions: + node_name: + description: + - The name of the node. + - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character. + - May only contain lowercase alphanumeric characters as well as V(-) and V(_). + - This suboption has been renamed from O(node_identities[].name) to O(node_identities[].node_name) in community.general + 8.3.0. The old name can still be used. + type: str + required: true + aliases: + - name + datacenter: + description: + - The nodes datacenter. + - This results in effective policy only being valid in this datacenter. + type: str + required: true +""" + +EXAMPLES = r""" +- name: Create a role with 2 policies + community.general.consul_role: + host: consul1.example.com + token: some_management_acl + name: foo-role + policies: + - id: 783beef3-783f-f41f-7422-7087dc272765 + - name: "policy-1" + +- name: Create a role with service identity + community.general.consul_role: + host: consul1.example.com + token: some_management_acl + name: foo-role-2 + service_identities: + - name: web + datacenters: + - dc1 + +- name: Create a role with node identity + community.general.consul_role: + host: consul1.example.com + token: some_management_acl + name: foo-role-3 + node_identities: + - name: node-1 + datacenter: dc2 + +- name: Remove a role + community.general.consul_role: + host: consul1.example.com + token: some_management_acl + name: foo-role-3 + state: absent +""" + +RETURN = r""" +role: + description: The role object. + returned: success + type: dict + sample: + { + "CreateIndex": 39, + "Description": "", + "Hash": "Trt0QJtxVEfvTTIcdTUbIJRr6Dsi6E4EcwSFxx9tCYM=", + "ID": "9a300b8d-48db-b720-8544-a37c0f5dafb5", + "ModifyIndex": 39, + "Name": "foo-role", + "Policies": [ + { + "ID": "b1a00172-d7a1-0e66-a12e-7a4045c4b774", + "Name": "foo-access" + } + ] + } +operation: + description: The operation performed on the role. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_READ, + _ConsulModule, +) + + +class ConsulRoleModule(_ConsulModule): + api_endpoint = "acl/role" + result_key = "role" + unique_identifiers = ["id"] + + def endpoint_url(self, operation, identifier=None): + if operation == OPERATION_READ: + return [self.api_endpoint, "name", self.params["name"]] + return super(ConsulRoleModule, self).endpoint_url(operation, identifier) + + +NAME_ID_SPEC = dict( + name=dict(type="str"), + id=dict(type="str"), +) + +NODE_ID_SPEC = dict( + node_name=dict(type="str", required=True, aliases=["name"]), + datacenter=dict(type="str", required=True), +) + +SERVICE_ID_SPEC = dict( + service_name=dict(type="str", required=True, aliases=["name"]), + datacenters=dict(type="list", elements="str"), +) + +TEMPLATE_POLICY_SPEC = dict( + template_name=dict(type="str", required=True), + template_variables=dict(type="dict"), +) + +_ARGUMENT_SPEC = { + "name": dict(type="str", required=True), + "description": dict(type="str"), + "policies": dict( + type="list", + elements="dict", + options=NAME_ID_SPEC, + mutually_exclusive=[("name", "id")], + required_one_of=[("name", "id")], + ), + "templated_policies": dict( + type="list", + elements="dict", + options=TEMPLATE_POLICY_SPEC, + ), + "node_identities": dict( + type="list", + elements="dict", + options=NODE_ID_SPEC, + ), + "service_identities": dict( + type="list", + elements="dict", + options=SERVICE_ID_SPEC, + ), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + supports_check_mode=True, + ) + consul_module = ConsulRoleModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_session.py b/plugins/modules/consul_session.py deleted file mode 120000 index ca266c8e6f..0000000000 --- a/plugins/modules/consul_session.py +++ /dev/null @@ -1 +0,0 @@ -./clustering/consul/consul_session.py \ No newline at end of file diff --git a/plugins/modules/consul_session.py b/plugins/modules/consul_session.py new file mode 100644 index 0000000000..acfb8e5504 --- /dev/null +++ b/plugins/modules/consul_session.py @@ -0,0 +1,297 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Steve Gargan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: consul_session +short_description: Manipulate Consul sessions +description: + - Allows the addition, modification and deletion of sessions in a Consul cluster. These sessions can then be used in conjunction + with key value pairs to implement distributed locks. In depth documentation for working with sessions can be found at + U(http://www.consul.io/docs/internals/sessions.html). +author: + - Steve Gargan (@sgargan) + - Håkon Lerring (@Hakon) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none + action_group: + version_added: 8.3.0 +options: + id: + description: + - ID of the session, required when O(state) is either V(info) or V(remove). + type: str + state: + description: + - Whether the session should be present, in other words it should be created if it does not exist, or absent, removed + if present. If created, the O(id) for the session is returned in the output. If V(absent), O(id) is required to remove + the session. Info for a single session, all the sessions for a node or all available sessions can be retrieved by + specifying V(info), V(node) or V(list) for the O(state); for V(node) or V(info), the node O(name) or session O(id) + is required as parameter. + choices: [absent, info, list, node, present] + type: str + default: present + name: + description: + - The name that should be associated with the session. Required when O(state=node) is used. + type: str + delay: + description: + - The optional lock delay that can be attached to the session when it is created. Locks for invalidated sessions ar + blocked from being acquired until this delay has expired. Durations are in seconds. + type: int + default: 15 + node: + description: + - The name of the node that with which the session is associated. By default this is the name of the agent. + type: str + datacenter: + description: + - The name of the datacenter in which the session exists or should be created. + type: str + checks: + description: + - Checks that are used to verify the session health. If all the checks fail, the session is invalidated and any locks + associated with the session are released and can be acquired once the associated lock delay has expired. + type: list + elements: str + behavior: + description: + - The optional behavior that can be attached to the session when it is created. This controls the behavior when a session + is invalidated. + choices: [delete, release] + type: str + default: release + ttl: + description: + - Specifies the duration of a session in seconds (between 10 and 86400). + type: int + version_added: 5.4.0 + token: + version_added: 5.6.0 +""" + +EXAMPLES = r""" +- name: Register basic session with Consul + community.general.consul_session: + name: session1 + +- name: Register a session with an existing check + community.general.consul_session: + name: session_with_check + checks: + - existing_check_name + +- name: Register a session with lock_delay + community.general.consul_session: + name: session_with_delay + delay: 20s + +- name: Retrieve info about session by id + community.general.consul_session: + id: session_id + state: info + +- name: Retrieve active sessions + community.general.consul_session: + state: list + +- name: Register session with a ttl + community.general.consul_session: + name: session-with-ttl + ttl: 600 # sec +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, _ConsulModule +) + + +def execute(module, consul_module): + + state = module.params.get('state') + + if state in ['info', 'list', 'node']: + lookup_sessions(module, consul_module) + elif state == 'present': + update_session(module, consul_module) + else: + remove_session(module, consul_module) + + +def list_sessions(consul_module, datacenter): + return consul_module.get( + 'session/list', + params={'dc': datacenter}) + + +def list_sessions_for_node(consul_module, node, datacenter): + return consul_module.get( + ('session', 'node', node), + params={'dc': datacenter}) + + +def get_session_info(consul_module, session_id, datacenter): + return consul_module.get( + ('session', 'info', session_id), + params={'dc': datacenter}) + + +def lookup_sessions(module, consul_module): + + datacenter = module.params.get('datacenter') + + state = module.params.get('state') + try: + if state == 'list': + sessions_list = list_sessions(consul_module, datacenter) + # Ditch the index, this can be grabbed from the results + if sessions_list and len(sessions_list) >= 2: + sessions_list = sessions_list[1] + module.exit_json(changed=True, + sessions=sessions_list) + elif state == 'node': + node = module.params.get('node') + sessions = list_sessions_for_node(consul_module, node, datacenter) + module.exit_json(changed=True, + node=node, + sessions=sessions) + elif state == 'info': + session_id = module.params.get('id') + + session_by_id = get_session_info(consul_module, session_id, datacenter) + module.exit_json(changed=True, + session_id=session_id, + sessions=session_by_id) + + except Exception as e: + module.fail_json(msg="Could not retrieve session info %s" % e) + + +def create_session(consul_module, name, behavior, ttl, node, + lock_delay, datacenter, checks): + create_data = { + "LockDelay": lock_delay, + "Node": node, + "Name": name, + "Checks": checks, + "Behavior": behavior, + } + if ttl is not None: + create_data["TTL"] = "%ss" % str(ttl) # TTL is in seconds + create_session_response_dict = consul_module.put( + 'session/create', + params={ + 'dc': datacenter}, + data=create_data) + return create_session_response_dict["ID"] + + +def update_session(module, consul_module): + + name = module.params.get('name') + delay = module.params.get('delay') + checks = module.params.get('checks') + datacenter = module.params.get('datacenter') + node = module.params.get('node') + behavior = module.params.get('behavior') + ttl = module.params.get('ttl') + + try: + session = create_session(consul_module, + name=name, + behavior=behavior, + ttl=ttl, + node=node, + lock_delay=delay, + datacenter=datacenter, + checks=checks + ) + module.exit_json(changed=True, + session_id=session, + name=name, + behavior=behavior, + ttl=ttl, + delay=delay, + checks=checks, + node=node) + except Exception as e: + module.fail_json(msg="Could not create/update session %s" % e) + + +def destroy_session(consul_module, session_id): + return consul_module.put(('session', 'destroy', session_id)) + + +def remove_session(module, consul_module): + session_id = module.params.get('id') + + try: + destroy_session(consul_module, session_id) + + module.exit_json(changed=True, + session_id=session_id) + except Exception as e: + module.fail_json(msg="Could not remove session with id '%s' %s" % ( + session_id, e)) + + +def main(): + argument_spec = dict( + checks=dict(type='list', elements='str'), + delay=dict(type='int', default='15'), + behavior=dict( + type='str', + default='release', + choices=[ + 'release', + 'delete']), + ttl=dict(type='int'), + id=dict(type='str'), + name=dict(type='str'), + node=dict(type='str'), + state=dict( + type='str', + default='present', + choices=[ + 'absent', + 'info', + 'list', + 'node', + 'present']), + datacenter=dict(type='str'), + **AUTH_ARGUMENTS_SPEC + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'node', ['name']), + ('state', 'info', ['id']), + ('state', 'remove', ['id']), + ], + supports_check_mode=False + ) + consul_module = _ConsulModule(module) + + try: + execute(module, consul_module) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/consul_token.py b/plugins/modules/consul_token.py new file mode 100644 index 0000000000..cbe49ee2af --- /dev/null +++ b/plugins/modules/consul_token.py @@ -0,0 +1,326 @@ +#!/usr/bin/python +# +# Copyright (c) 2024, Florian Apolloner (@apollo13) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_token +short_description: Manipulate Consul tokens +version_added: 8.3.0 +description: + - Allows the addition, modification and deletion of tokens in a Consul cluster using the agent. For more details on using + and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html). +author: + - Florian Apolloner (@apollo13) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.token + - community.general.consul.actiongroup_consul + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. + action_group: + version_added: 8.3.0 +options: + state: + description: + - Whether the token should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + accessor_id: + description: + - Specifies a UUID to use as the token's Accessor ID. If not specified a UUID is generated for this field. + type: str + secret_id: + description: + - Specifies a UUID to use as the token's Secret ID. If not specified a UUID is generated for this field. + type: str + description: + description: + - Free form human readable description of the token. + type: str + policies: + type: list + elements: dict + description: + - List of policies to attach to the token. Each policy is a dict. + - If the parameter is left blank, any policies currently assigned are not changed. + - Any empty array (V([])) clears any policies previously set. + suboptions: + name: + description: + - The name of the policy to attach to this token; see M(community.general.consul_policy) for more info. + - Either this or O(policies[].id) must be specified. + type: str + id: + description: + - The ID of the policy to attach to this token; see M(community.general.consul_policy) for more info. + - Either this or O(policies[].name) must be specified. + type: str + roles: + type: list + elements: dict + description: + - List of roles to attach to the token. Each role is a dict. + - If the parameter is left blank, any roles currently assigned are not changed. + - Any empty array (V([])) clears any roles previously set. + suboptions: + name: + description: + - The name of the role to attach to this token; see M(community.general.consul_role) for more info. + - Either this or O(roles[].id) must be specified. + type: str + id: + description: + - The ID of the role to attach to this token; see M(community.general.consul_role) for more info. + - Either this or O(roles[].name) must be specified. + type: str + templated_policies: + description: + - The list of templated policies that should be applied to the role. + type: list + elements: dict + suboptions: + template_name: + description: + - The templated policy name. + type: str + required: true + template_variables: + description: + - The templated policy variables. + - Not all templated policies require variables. + type: dict + service_identities: + type: list + elements: dict + description: + - List of service identities to attach to the token. + - If not specified, any service identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. + suboptions: + service_name: + description: + - The name of the service. + - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character. + - May only contain lowercase alphanumeric characters as well as V(-) and V(_). + type: str + required: true + datacenters: + description: + - The datacenters where the token is effective. + - If an empty array (V([])) is specified, the token is valid in all datacenters. + - Including those which do not yet exist but may in the future. + type: list + elements: str + node_identities: + type: list + elements: dict + description: + - List of node identities to attach to the token. + - If not specified, any node identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. + suboptions: + node_name: + description: + - The name of the node. + - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character. + - May only contain lowercase alphanumeric characters as well as V(-) and V(_). + type: str + required: true + datacenter: + description: + - The nodes datacenter. + - This results in effective token only being valid in this datacenter. + type: str + required: true + local: + description: + - If true, indicates that the token should not be replicated globally and instead be local to the current datacenter. + type: bool + expiration_ttl: + description: + - This is a convenience field and if set it initializes the C(expiration_time). Can be specified in the form of V(60s) + or V(5m) (that is, 60 seconds or 5 minutes, respectively). Ingored when the token is updated! + type: str +""" + +EXAMPLES = r""" +- name: Create / Update a token by accessor_id + community.general.consul_token: + state: present + accessor_id: 07a7de84-c9c7-448a-99cc-beaf682efd21 + token: 8adddd91-0bd6-d41d-ae1a-3b49cfa9a0e8 + roles: + - name: role1 + - name: role2 + service_identities: + - service_name: service1 + datacenters: [dc1, dc2] + node_identities: + - node_name: node1 + datacenter: dc1 + expiration_ttl: 50m + +- name: Delete a token + community.general.consul_token: + state: absent + accessor_id: 07a7de84-c9c7-448a-99cc-beaf682efd21 + token: 8adddd91-0bd6-d41d-ae1a-3b49cfa9a0e8 +""" + +RETURN = r""" +token: + description: The token as returned by the Consul HTTP API. + returned: always + type: dict + sample: + AccessorID: 07a7de84-c9c7-448a-99cc-beaf682efd21 + CreateIndex: 632 + CreateTime: "2024-01-14T21:53:01.402749174+01:00" + Description: Testing + Hash: rj5PeDHddHslkpW7Ij4OD6N4bbSXiecXFmiw2SYXg2A= + Local: false + ModifyIndex: 633 + SecretID: bd380fba-da17-7cee-8576-8d6427c6c930 + ServiceIdentities: ["ServiceName": "test"] +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + _ConsulModule, +) + + +def normalize_link_obj(api_obj, module_obj, key): + api_objs = api_obj.get(key) + module_objs = module_obj.get(key) + if api_objs is None or module_objs is None: + return + name_to_id = {i["Name"]: i["ID"] for i in api_objs} + id_to_name = {i["ID"]: i["Name"] for i in api_objs} + + for obj in module_objs: + identifier = obj.get("ID") + name = obj.get("Name") + if identifier and not name and identifier in id_to_name: + obj["Name"] = id_to_name[identifier] + if not identifier and name and name in name_to_id: + obj["ID"] = name_to_id[name] + + +class ConsulTokenModule(_ConsulModule): + api_endpoint = "acl/token" + result_key = "token" + unique_identifiers = ["accessor_id"] + + create_only_fields = {"expiration_ttl"} + + def read_object(self): + # if `accessor_id` is not supplied we can only create objects and are not idempotent + if not self.id_from_obj(self.params): + return None + return super(ConsulTokenModule, self).read_object() + + def needs_update(self, api_obj, module_obj): + # SecretID is usually not supplied + if "SecretID" not in module_obj and "SecretID" in api_obj: + del api_obj["SecretID"] + normalize_link_obj(api_obj, module_obj, "Roles") + normalize_link_obj(api_obj, module_obj, "Policies") + # ExpirationTTL is only supported on create, not for update + # it writes to ExpirationTime, so we need to remove that as well + if "ExpirationTTL" in module_obj: + del module_obj["ExpirationTTL"] + return super(ConsulTokenModule, self).needs_update(api_obj, module_obj) + + +NAME_ID_SPEC = dict( + name=dict(type="str"), + id=dict(type="str"), +) + +NODE_ID_SPEC = dict( + node_name=dict(type="str", required=True), + datacenter=dict(type="str", required=True), +) + +SERVICE_ID_SPEC = dict( + service_name=dict(type="str", required=True), + datacenters=dict(type="list", elements="str"), +) + +TEMPLATE_POLICY_SPEC = dict( + template_name=dict(type="str", required=True), + template_variables=dict(type="dict"), +) + + +_ARGUMENT_SPEC = { + "description": dict(), + "accessor_id": dict(), + "secret_id": dict(no_log=True), + "roles": dict( + type="list", + elements="dict", + options=NAME_ID_SPEC, + mutually_exclusive=[("name", "id")], + required_one_of=[("name", "id")], + ), + "policies": dict( + type="list", + elements="dict", + options=NAME_ID_SPEC, + mutually_exclusive=[("name", "id")], + required_one_of=[("name", "id")], + ), + "templated_policies": dict( + type="list", + elements="dict", + options=TEMPLATE_POLICY_SPEC, + ), + "node_identities": dict( + type="list", + elements="dict", + options=NODE_ID_SPEC, + ), + "service_identities": dict( + type="list", + elements="dict", + options=SERVICE_ID_SPEC, + ), + "local": dict(type="bool"), + "expiration_ttl": dict(type="str"), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + required_if=[("state", "absent", ["accessor_id"])], + supports_check_mode=True, + ) + consul_module = ConsulTokenModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/copr.py b/plugins/modules/copr.py deleted file mode 120000 index f98edc454a..0000000000 --- a/plugins/modules/copr.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/copr.py \ No newline at end of file diff --git a/plugins/modules/copr.py b/plugins/modules/copr.py new file mode 100644 index 0000000000..4d627ceb8f --- /dev/null +++ b/plugins/modules/copr.py @@ -0,0 +1,539 @@ +#!/usr/bin/python + +# Copyright (c) 2020, Silvie Chlupova +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: copr +short_description: Manage one of the Copr repositories +version_added: 2.0.0 +description: This module can enable, disable or remove the specified repository. +author: Silvie Chlupova (@schlupov) +requirements: + - dnf + - dnf-plugins-core +notes: + - Supports C(check_mode). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + host: + description: The Copr host to work with. + default: copr.fedorainfracloud.org + type: str + protocol: + description: This indicate which protocol to use with the host. + default: https + type: str + name: + description: Copr directory name, for example C(@copr/copr-dev). + required: true + type: str + state: + description: + - Whether to set this project as V(enabled), V(disabled), or V(absent). + default: enabled + type: str + choices: [absent, enabled, disabled] + chroot: + description: + - The name of the chroot that you want to enable/disable/remove in the project, for example V(epel-7-x86_64). Default + chroot is determined by the operating system, version of the operating system, and architecture on which the module + is run. + type: str + includepkgs: + description: List of packages to include. + required: false + type: list + elements: str + version_added: 9.4.0 + excludepkgs: + description: List of packages to exclude. + required: false + type: list + elements: str + version_added: 9.4.0 +""" + +EXAMPLES = r""" +- name: Enable project Test of the user schlupov + community.general.copr: + host: copr.fedorainfracloud.org + state: enabled + name: schlupov/Test + chroot: fedora-31-x86_64 + +- name: Remove project integration_tests of the group copr + community.general.copr: + state: absent + name: '@copr/integration_tests' + +- name: Install Caddy + community.general.copr: + name: '@caddy/caddy' + chroot: fedora-rawhide-{{ ansible_facts.architecture }} + includepkgs: + - caddy +""" + +RETURN = r""" +repo_filename: + description: The name of the repo file in which the copr project information is stored. + returned: success + type: str + sample: _copr:copr.fedorainfracloud.org:group_copr:integration_tests.repo + +repo: + description: Path to the project on the host. + returned: success + type: str + sample: copr.fedorainfracloud.org/group_copr/integration_tests +""" + +import stat +import os +import traceback +from urllib.error import HTTPError + +try: + import dnf + import dnf.cli + import dnf.repodict + from dnf.conf import Conf + HAS_DNF_PACKAGES = True + DNF_IMP_ERR = None +except ImportError: + DNF_IMP_ERR = traceback.format_exc() + HAS_DNF_PACKAGES = False + +from ansible.module_utils.common import respawn +from ansible.module_utils.basic import missing_required_lib +from ansible.module_utils import distro +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import open_url + + +def _respawn_dnf(): + if respawn.has_respawned(): + return + system_interpreters = ( + "/usr/libexec/platform-python", + "/usr/bin/python3", + "/usr/bin/python", + ) + interpreter = respawn.probe_interpreters_for_module(system_interpreters, "dnf") + if interpreter: + respawn.respawn_module(interpreter) + + +class CoprModule(object): + """The class represents a copr module. + + The class contains methods that take care of the repository state of a project, + whether the project is enabled, disabled or missing. + """ + + ansible_module = None + + def __init__(self, host, name, state, protocol, chroot=None, check_mode=False): + self.host = host + self.name = name + self.state = state + self.chroot = chroot + self.protocol = protocol + self.check_mode = check_mode + if not chroot: + self.chroot = self.chroot_conf() + else: + self.chroot = chroot + self.get_base() + + @property + def short_chroot(self): + """str: Chroot (distribution-version-architecture) shorten to distribution-version.""" + return self.chroot.rsplit('-', 1)[0] + + @property + def arch(self): + """str: Target architecture.""" + chroot_parts = self.chroot.split("-") + return chroot_parts[-1] + + @property + def user(self): + """str: Copr user (this can also be the name of the group).""" + return self._sanitize_username(self.name.split("/")[0]) + + @property + def project(self): + """str: The name of the copr project.""" + return self.name.split("/")[1] + + @classmethod + def need_root(cls): + """Check if the module was run as root.""" + if os.geteuid() != 0: + cls.raise_exception("This command has to be run under the root user.") + + @classmethod + def get_base(cls): + """Initialize the configuration from dnf. + + Returns: + An instance of the BaseCli class. + """ + cls.base = dnf.cli.cli.BaseCli(Conf()) + return cls.base + + @classmethod + def raise_exception(cls, msg): + """Raise either an ansible exception or a python exception. + + Args: + msg: The message to be displayed when an exception is thrown. + """ + if cls.ansible_module: + raise cls.ansible_module.fail_json(msg=msg, changed=False) + raise Exception(msg) + + def _get(self, chroot): + """Send a get request to the server to obtain the necessary data. + + Args: + chroot: Chroot in the form of distribution-version. + + Returns: + Info about a repository and status code of the get request. + """ + repo_info = None + url = "{0}://{1}/coprs/{2}/repo/{3}/dnf.repo?arch={4}".format( + self.protocol, self.host, self.name, chroot, self.arch + ) + try: + r = open_url(url) + status_code = r.getcode() + repo_info = r.read().decode("utf-8") + except HTTPError as e: + status_code = e.getcode() + return repo_info, status_code + + def _download_repo_info(self): + """Download information about the repository. + + Returns: + Information about the repository. + """ + distribution, version = self.short_chroot.split('-', 1) + chroot = self.short_chroot + while True: + repo_info, status_code = self._get(chroot) + if repo_info: + return repo_info + if distribution == "rhel": + chroot = "centos-stream-8" + distribution = "centos" + elif distribution == "centos": + if version == "stream-8": + version = "8" + elif version == "stream-9": + version = "9" + chroot = "epel-{0}".format(version) + distribution = "epel" + else: + if str(status_code) != "404": + self.raise_exception( + "This repository does not have any builds yet so you cannot enable it now." + ) + else: + self.raise_exception( + "Chroot {0} does not exist in {1}".format(self.chroot, self.name) + ) + + def _enable_repo(self, repo_filename_path, repo_content=None): + """Write information to a repo file. + + Args: + repo_filename_path: Path to repository. + repo_content: Repository information from the host. + + Returns: + True, if the information in the repo file matches that stored on the host, + False otherwise. + """ + if not repo_content: + repo_content = self._download_repo_info() + if self.ansible_module.params["includepkgs"]: + includepkgs_value = ','.join(self.ansible_module.params['includepkgs']) + repo_content = repo_content.rstrip('\n') + '\nincludepkgs={0}\n'.format(includepkgs_value) + if self.ansible_module.params["excludepkgs"]: + excludepkgs_value = ','.join(self.ansible_module.params['excludepkgs']) + repo_content = repo_content.rstrip('\n') + '\nexcludepkgs={0}\n'.format(excludepkgs_value) + if self._compare_repo_content(repo_filename_path, repo_content): + return False + if not self.check_mode: + with open(repo_filename_path, "w+") as file: + file.write(repo_content) + os.chmod( + repo_filename_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH, + ) + return True + + def _get_repo_with_old_id(self): + """Try to get a repository with the old name.""" + repo_id = "{0}-{1}".format(self.user, self.project) + if repo_id in self.base.repos and "_copr" in self.base.repos[repo_id].repofile: + file_name = self.base.repos[repo_id].repofile.split("/")[-1] + try: + copr_hostname = file_name.rsplit(":", 2)[0].split(":", 1)[1] + if copr_hostname != self.host: + return None + return file_name + except IndexError: + return file_name + return None + + def _read_all_repos(self, repo_id=None): + """The method is used to initialize the base variable by + repositories using the RepoReader class from dnf. + + Args: + repo_id: Repo id of the repository we want to work with. + """ + reader = dnf.conf.read.RepoReader(self.base.conf, None) + for repo in reader: + try: + if repo_id: + if repo.id == repo_id: + self.base.repos.add(repo) + break + else: + self.base.repos.add(repo) + except dnf.exceptions.ConfigError as e: + self.raise_exception(str(e)) + + def _get_copr_repo(self): + """Return one specific repository from all repositories on the system. + + Returns: + The repository that a user wants to enable, disable, or remove. + """ + repo_id = "copr:{0}:{1}:{2}".format(self.host, self.user, self.project) + if repo_id not in self.base.repos: + if self._get_repo_with_old_id() is None: + return None + return self.base.repos[repo_id] + + def _disable_repo(self, repo_filename_path): + """Disable the repository. + + Args: + repo_filename_path: Path to repository. + + Returns: + False, if the repository is already disabled on the system, + True otherwise. + """ + self._read_all_repos() + repo = self._get_copr_repo() + if repo is None: + if self.check_mode: + return True + self._enable_repo(repo_filename_path) + self._read_all_repos("copr:{0}:{1}:{2}".format(self.host, self.user, self.project)) + repo = self._get_copr_repo() + for repo_id in repo.cfg.sections(): + repo_content_api = self._download_repo_info() + with open(repo_filename_path, "r") as file: + repo_content_file = file.read() + if repo_content_file != repo_content_api: + if not self.resolve_differences( + repo_content_file, repo_content_api, repo_filename_path + ): + return False + if not self.check_mode: + self.base.conf.write_raw_configfile( + repo.repofile, repo_id, self.base.conf.substitutions, {"enabled": "0"}, + ) + return True + + def resolve_differences(self, repo_content_file, repo_content_api, repo_filename_path): + """Detect differences between the contents of the repository stored on the + system and the information about the repository on the server. + + Args: + repo_content_file: The contents of the repository stored on the system. + repo_content_api: The information about the repository from the server. + repo_filename_path: Path to repository. + + Returns: + False, if the contents of the repo file and the information on the server match, + True otherwise. + """ + repo_file_lines = repo_content_file.split("\n") + repo_api_lines = repo_content_api.split("\n") + repo_api_lines.remove("enabled=1") + if "enabled=0" in repo_file_lines: + repo_file_lines.remove("enabled=0") + if " ".join(repo_api_lines) == " ".join(repo_file_lines): + return False + if not self.check_mode: + os.remove(repo_filename_path) + self._enable_repo(repo_filename_path, repo_content_api) + else: + repo_file_lines.remove("enabled=1") + if " ".join(repo_api_lines) != " ".join(repo_file_lines): + if not self.check_mode: + os.remove(repo_filename_path) + self._enable_repo(repo_filename_path, repo_content_api) + return True + + def _remove_repo(self): + """Remove the required repository. + + Returns: + True, if the repository has been removed, False otherwise. + """ + self._read_all_repos() + repo = self._get_copr_repo() + if not repo: + return False + if not self.check_mode: + try: + os.remove(repo.repofile) + except OSError as e: + self.raise_exception(str(e)) + return True + + def run(self): + """The method uses methods of the CoprModule class to change the state of the repository. + + Returns: + Dictionary with information that the ansible module displays to the user at the end of the run. + """ + self.need_root() + state = dict() + repo_filename = "_copr:{0}:{1}:{2}.repo".format(self.host, self.user, self.project) + state["repo"] = "{0}/{1}/{2}".format(self.host, self.user, self.project) + state["repo_filename"] = repo_filename + repo_filename_path = "{0}/_copr:{1}:{2}:{3}.repo".format( + self.base.conf.get_reposdir, self.host, self.user, self.project + ) + if self.state == "enabled": + enabled = self._enable_repo(repo_filename_path) + state["msg"] = "enabled" + state["state"] = bool(enabled) + elif self.state == "disabled": + disabled = self._disable_repo(repo_filename_path) + state["msg"] = "disabled" + state["state"] = bool(disabled) + elif self.state == "absent": + removed = self._remove_repo() + state["msg"] = "absent" + state["state"] = bool(removed) + return state + + @staticmethod + def _compare_repo_content(repo_filename_path, repo_content_api): + """Compare the contents of the stored repository with the information from the server. + + Args: + repo_filename_path: Path to repository. + repo_content_api: The information about the repository from the server. + + Returns: + True, if the information matches, False otherwise. + """ + if not os.path.isfile(repo_filename_path): + return False + with open(repo_filename_path, "r") as file: + repo_content_file = file.read() + return repo_content_file == repo_content_api + + @staticmethod + def chroot_conf(): + """Obtain information about the distribution, version, and architecture of the target. + + Returns: + Chroot info in the form of distribution-version-architecture. + """ + (distribution, version, codename) = distro.linux_distribution(full_distribution_name=False) + base = CoprModule.get_base() + return "{0}-{1}-{2}".format(distribution, version, base.conf.arch) + + @staticmethod + def _sanitize_username(user): + """Modify the group name. + + Args: + user: User name. + + Returns: + Modified user name if it is a group name with @. + """ + if user[0] == "@": + return "group_{0}".format(user[1:]) + return user + + +def run_module(): + """The function takes care of the functioning of the whole ansible copr module.""" + module_args = dict( + host=dict(type="str", default="copr.fedorainfracloud.org"), + protocol=dict(type="str", default="https"), + name=dict(type="str", required=True), + state=dict(type="str", choices=["enabled", "disabled", "absent"], default="enabled"), + chroot=dict(type="str"), + includepkgs=dict(type='list', elements="str"), + excludepkgs=dict(type='list', elements="str"), + ) + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + params = module.params + + if not HAS_DNF_PACKAGES: + _respawn_dnf() + module.fail_json(msg=missing_required_lib("dnf"), exception=DNF_IMP_ERR) + + CoprModule.ansible_module = module + copr_module = CoprModule( + host=params["host"], + name=params["name"], + state=params["state"], + protocol=params["protocol"], + chroot=params["chroot"], + check_mode=module.check_mode, + ) + state = copr_module.run() + + info = "Please note that this repository is not part of the main distribution" + + if params["state"] == "enabled" and state["state"]: + module.exit_json( + changed=state["state"], + msg=state["msg"], + repo=state["repo"], + repo_filename=state["repo_filename"], + info=info, + ) + module.exit_json( + changed=state["state"], + msg=state["msg"], + repo=state["repo"], + repo_filename=state["repo_filename"], + ) + + +def main(): + """Launches ansible Copr module.""" + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cpanm.py b/plugins/modules/cpanm.py deleted file mode 120000 index 64d9fe3f62..0000000000 --- a/plugins/modules/cpanm.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/language/cpanm.py \ No newline at end of file diff --git a/plugins/modules/cpanm.py b/plugins/modules/cpanm.py new file mode 100644 index 0000000000..39844d5f74 --- /dev/null +++ b/plugins/modules/cpanm.py @@ -0,0 +1,294 @@ +#!/usr/bin/python + +# Copyright (c) 2012, Franck Cuny +# Copyright (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: cpanm +short_description: Manages Perl library dependencies +description: + - Manage Perl library dependencies using cpanminus. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + type: str + description: + - The Perl library to install. Valid values change according to the O(mode), see notes for more details. + - Note that for installing from a local path the parameter O(from_path) should be used. + aliases: [pkg] + from_path: + type: path + description: + - The local directory or C(tar.gz) file to install from. + notest: + description: + - Do not run unit tests. + type: bool + default: false + locallib: + description: + - Specify the install base to install modules. + type: path + mirror: + description: + - Specifies the base URL for the CPAN mirror to use. + type: str + mirror_only: + description: + - Use the mirror's index file instead of the CPAN Meta DB. + type: bool + default: false + installdeps: + description: + - Only install dependencies. + type: bool + default: false + install_recommendations: + description: + - If V(true), installs dependencies declared as recommends per META spec. + - If V(false), it ensures the dependencies declared as recommends are not installed, overriding any decision made earlier + in E(PERL_CPANM_OPT). + - If parameter is not set, C(cpanm) uses its existing defaults. + - When these dependencies fail to install, cpanm continues the installation, since they are just recommendation. + type: bool + version_added: 10.3.0 + install_suggestions: + description: + - If V(true), installs dependencies declared as suggests per META spec. + - If V(false), it ensures the dependencies declared as suggests are not installed, overriding any decision made earlier + in E(PERL_CPANM_OPT). + - If parameter is not set, C(cpanm) uses its existing defaults. + - When these dependencies fail to install, cpanm continues the installation, since they are just suggestion. + type: bool + version_added: 10.3.0 + version: + description: + - Version specification for the perl module. When O(mode) is V(new), C(cpanm) version operators are accepted. + type: str + executable: + description: + - Override the path to the cpanm executable. + type: path + mode: + description: + - Controls the module behavior. See notes below for more details. + - The default changed from V(compatibility) to V(new) in community.general 9.0.0. + - 'O(mode=new): The O(name) parameter may refer to a module name, a distribution file, a HTTP URL or a git repository + URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized. This is the default mode + from community.general 9.0.0 onwards.' + - 'O(mode=compatibility): This was the default mode before community.general 9.0.0. O(name) must be either a module + name or a distribution file. If the perl module given by O(name) is installed (at the exact O(version) when specified), + then nothing happens. Otherwise, it is installed using the C(cpanm) executable. O(name) cannot be an URL, or a git + URL. C(cpanm) version specifiers do not work in this mode.' + - 'B(ATTENTION): V(compatibility) mode is deprecated and will be removed in community.general 13.0.0.' + type: str + choices: [compatibility, new] + default: new + version_added: 3.0.0 + name_check: + description: + - When O(mode=new), this parameter can be used to check if there is a module O(name) installed (at O(version), when + specified). + type: str + version_added: 3.0.0 +notes: + - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. +seealso: + - name: C(cpanm) command manual page + description: Manual page for the command. + link: https://metacpan.org/dist/App-cpanminus/view/bin/cpanm +author: + - "Franck Cuny (@fcuny)" + - "Alexei Znamensky (@russoz)" +""" + +EXAMPLES = r""" +- name: Install Dancer perl package + community.general.cpanm: + name: Dancer + +- name: Install version 0.99_05 of the Plack perl package + community.general.cpanm: + name: MIYAGAWA/Plack-0.99_05.tar.gz + +- name: Install Dancer into the specified locallib + community.general.cpanm: + name: Dancer + locallib: /srv/webapps/my_app/extlib + +- name: Install perl dependencies from local directory + community.general.cpanm: + from_path: /srv/webapps/my_app/src/ + +- name: Install Dancer perl package without running the unit tests in indicated locallib + community.general.cpanm: + name: Dancer + notest: true + locallib: /srv/webapps/my_app/extlib + +- name: Install Dancer perl package from a specific mirror + community.general.cpanm: + name: Dancer + mirror: 'http://cpan.cpantesters.org/' + +- name: Install Dancer perl package into the system root path + become: true + community.general.cpanm: + name: Dancer + +- name: Install Dancer if it is not already installed OR the installed version is older than version 1.0 + community.general.cpanm: + name: Dancer + version: '1.0' +""" + +RETURN = r""" +cpanm_version: + description: Version of CPANMinus. + type: str + returned: always + sample: "1.7047" + version_added: 10.0.0 +""" + + +import os +import re + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + +class CPANMinus(ModuleHelper): + output_params = ['name', 'version'] + module = dict( + argument_spec=dict( + name=dict(type='str', aliases=['pkg']), + version=dict(type='str'), + from_path=dict(type='path'), + notest=dict(type='bool', default=False), + locallib=dict(type='path'), + mirror=dict(type='str'), + mirror_only=dict(type='bool', default=False), + installdeps=dict(type='bool', default=False), + install_recommendations=dict(type='bool'), + install_suggestions=dict(type='bool'), + executable=dict(type='path'), + mode=dict(type='str', default='new', choices=['compatibility', 'new']), + name_check=dict(type='str') + ), + required_one_of=[('name', 'from_path')], + + ) + command = 'cpanm' + command_args_formats = dict( + notest=cmd_runner_fmt.as_bool("--notest"), + locallib=cmd_runner_fmt.as_opt_val('--local-lib'), + mirror=cmd_runner_fmt.as_opt_val('--mirror'), + mirror_only=cmd_runner_fmt.as_bool("--mirror-only"), + installdeps=cmd_runner_fmt.as_bool("--installdeps"), + install_recommendations=cmd_runner_fmt.as_bool("--with-recommends", "--without-recommends", ignore_none=True), + install_suggestions=cmd_runner_fmt.as_bool("--with-suggests", "--without-suggests", ignore_none=True), + pkg_spec=cmd_runner_fmt.as_list(), + cpanm_version=cmd_runner_fmt.as_fixed("--version"), + ) + + def __init_module__(self): + v = self.vars + if v.mode == "compatibility": + if v.name_check: + self.do_raise("Parameter name_check can only be used with mode=new") + self.deprecate("'mode=compatibility' is deprecated, use 'mode=new' instead", version='13.0.0', collection_name="community.general") + else: + if v.name and v.from_path: + self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'") + + self.command = v.executable if v.executable else self.command + self.runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True) + self.vars.binary = self.runner.binary + + with self.runner("cpanm_version") as ctx: + rc, out, err = ctx.run() + line = out.split('\n')[0] + match = re.search(r"version\s+([\d\.]+)\s+", line) + if not match: + self.do_raise("Failed to determine version number. First line of output: {0}".format(line)) + self.vars.cpanm_version = match.group(1) + + def _is_package_installed(self, name, locallib, version): + def process(rc, out, err): + return rc == 0 + + if name is None or name.endswith('.tar.gz'): + return False + version = "" if version is None else " " + version + + env = {"PERL5LIB": "%s/lib/perl5" % locallib} if locallib else {} + runner = CmdRunner(self.module, ["perl", "-le"], {"mod": cmd_runner_fmt.as_list()}, check_rc=False, environ_update=env) + with runner("mod", output_process=process) as ctx: + return ctx.run(mod='use %s%s;' % (name, version)) + + def sanitize_pkg_spec_version(self, pkg_spec, version): + if version is None: + return pkg_spec + if pkg_spec.endswith('.tar.gz'): + self.do_raise(msg="parameter 'version' must not be used when installing from a file") + if os.path.isdir(pkg_spec): + self.do_raise(msg="parameter 'version' must not be used when installing from a directory") + if pkg_spec.endswith('.git'): + if version.startswith('~'): + self.do_raise(msg="operator '~' not allowed in version parameter when installing from git repository") + version = version if version.startswith('@') else '@' + version + elif version[0] not in ('@', '~'): + version = '~' + version + return pkg_spec + version + + def __run__(self): + def process(rc, out, err): + if self.vars.mode == "compatibility" and rc != 0: + self.do_raise(msg=err, cmd=self.vars.cmd_args) + return 'is up to date' not in err and 'is up to date' not in out + + v = self.vars + pkg_param = 'from_path' if v.from_path else 'name' + + if v.mode == 'compatibility': + if self._is_package_installed(v.name, v.locallib, v.version): + return + pkg_spec = v[pkg_param] + else: + installed = self._is_package_installed(v.name_check, v.locallib, v.version) if v.name_check else False + if installed: + return + pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version) + + with self.runner([ + 'notest', + 'locallib', + 'mirror', + 'mirror_only', + 'installdeps', + 'install_recommendations', + 'install_suggestions', + 'pkg_spec' + ], output_process=process) as ctx: + self.changed = ctx.run(pkg_spec=pkg_spec) + + +def main(): + CPANMinus.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cronvar.py b/plugins/modules/cronvar.py deleted file mode 120000 index bb4ab4c54d..0000000000 --- a/plugins/modules/cronvar.py +++ /dev/null @@ -1 +0,0 @@ -./system/cronvar.py \ No newline at end of file diff --git a/plugins/modules/cronvar.py b/plugins/modules/cronvar.py new file mode 100644 index 0000000000..b67b94fe95 --- /dev/null +++ b/plugins/modules/cronvar.py @@ -0,0 +1,432 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Cronvar Plugin: The goal of this plugin is to provide an idempotent +# method for set cron variable values. It should play well with the +# existing cron module as well as allow for manually added variables. +# Each variable entered will be preceded with a comment describing the +# variable so that it can be found later. This is required to be +# present in order for this plugin to find/modify the variable + +# This module is based on the crontab module. + +from __future__ import annotations + +DOCUMENTATION = r""" +module: cronvar +short_description: Manage variables in crontabs +description: + - Use this module to manage crontab variables. + - This module allows you to create, update, or delete cron variable definitions. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of the crontab variable. + type: str + required: true + value: + description: + - The value to set this variable to. + - Required if O(state=present). + type: str + insertafter: + description: + - If specified, the variable is inserted after the variable specified. + - Used with O(state=present). + type: str + insertbefore: + description: + - Used with O(state=present). If specified, the variable is inserted just before the variable specified. + type: str + state: + description: + - Whether to ensure that the variable is present or absent. + type: str + choices: [absent, present] + default: present + user: + description: + - The specific user whose crontab should be modified. + - This parameter defaults to V(root) when unset. + type: str + cron_file: + description: + - If specified, uses this file instead of an individual user's crontab. + - Without a leading V(/), this is assumed to be in C(/etc/cron.d). + - With a leading V(/), this is taken as absolute. + type: str + backup: + description: + - If set, create a backup of the crontab before it is modified. The location of the backup is returned in the C(backup) + variable by this module. + type: bool + default: false +requirements: + - cron +author: + - Doug Luce (@dougluce) +""" + +EXAMPLES = r""" +- name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists + community.general.cronvar: + name: EMAIL + value: doug@ansibmod.con.com + +- name: Ensure a variable does not exist. This may remove any variable named "LEGACY" + community.general.cronvar: + name: LEGACY + state: absent + +- name: Add a variable to a file under /etc/cron.d + community.general.cronvar: + name: LOGFILE + value: /var/log/yum-autoupdate.log + user: root + cron_file: ansible_yum-autoupdate +""" + +import os +import platform +import pwd +import re +import shlex +import sys +import tempfile +from shlex import quote as shlex_quote + +from ansible.module_utils.basic import AnsibleModule + + +class CronVarError(Exception): + pass + + +class CronVar(object): + """ + CronVar object to write variables to crontabs. + + user - the user of the crontab (defaults to root) + cron_file - a cron file under /etc/cron.d + """ + + def __init__(self, module, user=None, cron_file=None): + self.module = module + self.user = user + self.lines = None + self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',)) + self.cron_cmd = self.module.get_bin_path('crontab', required=True) + + if cron_file: + self.cron_file = "" + if os.path.isabs(cron_file): + self.cron_file = cron_file + else: + self.cron_file = os.path.join('/etc/cron.d', cron_file) + parent_dir = os.path.dirname(self.cron_file) + if parent_dir and not os.path.isdir(parent_dir): + module.fail_json(msg="Parent directory '{}' does not exist for cron_file: '{}'".format(parent_dir, cron_file)) + else: + self.cron_file = None + + self.read() + + def read(self): + # Read in the crontab from the system + self.lines = [] + if self.cron_file: + # read the cronfile + try: + with open(self.cron_file, 'r') as f: + self.lines = f.read().splitlines() + except IOError: + # cron file does not exist + return + except Exception: + raise CronVarError("Unexpected error:", sys.exc_info()[0]) + else: + # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME + (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True) + + if rc != 0 and rc != 1: # 1 can mean that there are no jobs. + raise CronVarError("Unable to read crontab") + + lines = out.splitlines() + count = 0 + for l in lines: + if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l + ) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)): + self.lines.append(l) + count += 1 + + def log_message(self, message): + self.module.debug('ansible: "%s"' % message) + + def write(self, backup_file=None): + """ + Write the crontab to the system. Saves all information. + """ + if backup_file: + fileh = open(backup_file, 'w') + elif self.cron_file: + fileh = open(self.cron_file, 'w') + path = None + else: + filed, path = tempfile.mkstemp(prefix='crontab') + fileh = os.fdopen(filed, 'w') + + fileh.write(self.render()) + fileh.close() + + # return if making a backup + if backup_file: + return + + # Add the entire crontab back to the user crontab + if not self.cron_file: + # quoting shell args for now but really this should be two non-shell calls. FIXME + (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True) + os.unlink(path) + + if rc != 0: + self.module.fail_json(msg=err) + + def remove_variable_file(self): + try: + os.unlink(self.cron_file) + return True + except OSError: + # cron file does not exist + return False + except Exception: + raise CronVarError("Unexpected error:", sys.exc_info()[0]) + + def parse_for_var(self, line): + lexer = shlex.shlex(line) + lexer.wordchars = self.wordchars + varname = lexer.get_token() + is_env_var = lexer.get_token() == '=' + value = ''.join(lexer) + if is_env_var: + return (varname, value) + raise CronVarError("Not a variable.") + + def find_variable(self, name): + for l in self.lines: + try: + (varname, value) = self.parse_for_var(l) + if varname == name: + return value + except CronVarError: + pass + return None + + def get_var_names(self): + var_names = [] + for l in self.lines: + try: + var_name, dummy = self.parse_for_var(l) + var_names.append(var_name) + except CronVarError: + pass + return var_names + + def add_variable(self, name, value, insertbefore, insertafter): + if insertbefore is None and insertafter is None: + # Add the variable to the top of the file. + self.lines.insert(0, "%s=%s" % (name, value)) + else: + newlines = [] + for l in self.lines: + try: + varname, dummy = self.parse_for_var(l) # Throws if not a var line + if varname == insertbefore: + newlines.append("%s=%s" % (name, value)) + newlines.append(l) + elif varname == insertafter: + newlines.append(l) + newlines.append("%s=%s" % (name, value)) + else: + raise CronVarError # Append. + except CronVarError: + newlines.append(l) + + self.lines = newlines + + def remove_variable(self, name): + self.update_variable(name, None, remove=True) + + def update_variable(self, name, value, remove=False): + newlines = [] + for l in self.lines: + try: + varname, dummy = self.parse_for_var(l) # Throws if not a var line + if varname != name: + raise CronVarError # Append. + if not remove: + newlines.append("%s=%s" % (name, value)) + except CronVarError: + newlines.append(l) + + self.lines = newlines + + def render(self): + """ + Render a proper crontab + """ + result = '\n'.join(self.lines) + if result and result[-1] not in ['\n', '\r']: + result += '\n' + return result + + def _read_user_execute(self): + """ + Returns the command line for reading a crontab + """ + user = '' + + if self.user: + if platform.system() == 'SunOS': + return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd)) + elif platform.system() == 'AIX': + return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user)) + elif platform.system() == 'HP-UX': + return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user)) + elif pwd.getpwuid(os.getuid())[0] != self.user: + user = '-u %s' % shlex_quote(self.user) + return "%s %s %s" % (self.cron_cmd, user, '-l') + + def _write_execute(self, path): + """ + Return the command line for writing a crontab + """ + user = '' + if self.user: + if platform.system() in ['SunOS', 'HP-UX', 'AIX']: + return "chown %s %s ; su '%s' -c '%s %s'" % ( + shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path)) + elif pwd.getpwuid(os.getuid())[0] != self.user: + user = '-u %s' % shlex_quote(self.user) + return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path)) + + +# ================================================== + +def main(): + # The following example playbooks: + # + # - community.general.cronvar: name="SHELL" value="/bin/bash" + # + # - name: Set the email + # community.general.cronvar: name="EMAILTO" value="doug@ansibmod.con.com" + # + # - name: Get rid of the old new host variable + # community.general.cronvar: name="NEW_HOST" state=absent + # + # Would produce: + # SHELL = /bin/bash + # EMAILTO = doug@ansibmod.con.com + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + value=dict(type='str'), + user=dict(type='str'), + cron_file=dict(type='str'), + insertafter=dict(type='str'), + insertbefore=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present']), + backup=dict(type='bool', default=False), + ), + mutually_exclusive=[['insertbefore', 'insertafter']], + supports_check_mode=False, + ) + + name = module.params['name'] + value = module.params['value'] + user = module.params['user'] + cron_file = module.params['cron_file'] + insertafter = module.params['insertafter'] + insertbefore = module.params['insertbefore'] + state = module.params['state'] + backup = module.params['backup'] + ensure_present = state == 'present' + + changed = False + res_args = dict() + + # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. + os.umask(int('022', 8)) + cronvar = CronVar(module, user, cron_file) + + module.debug('cronvar instantiated - name: "%s"' % name) + + # --- user input validation --- + + if name is None and ensure_present: + module.fail_json(msg="You must specify 'name' to insert a new cron variable") + + if value is None and ensure_present: + module.fail_json(msg="You must specify 'value' to insert a new cron variable") + + if name is None and not ensure_present: + module.fail_json(msg="You must specify 'name' to remove a cron variable") + + # if requested make a backup before making a change + if backup: + dummy, backup_file = tempfile.mkstemp(prefix='cronvar') + cronvar.write(backup_file) + + if cronvar.cron_file and not name and not ensure_present: + changed = cronvar.remove_job_file() + module.exit_json(changed=changed, cron_file=cron_file, state=state) + + old_value = cronvar.find_variable(name) + + if ensure_present: + if value == "" and old_value != "": + value = '""' + if old_value is None: + cronvar.add_variable(name, value, insertbefore, insertafter) + changed = True + elif old_value != value: + cronvar.update_variable(name, value) + changed = True + else: + if old_value is not None: + cronvar.remove_variable(name) + changed = True + + res_args = { + "vars": cronvar.get_var_names(), + "changed": changed + } + + if changed: + cronvar.write() + + # retain the backup only if crontab or cron file have changed + if backup: + if changed: + res_args['backup_file'] = backup_file + else: + os.unlink(backup_file) + + if cron_file: + res_args['cron_file'] = cron_file + + module.exit_json(**res_args) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/crypttab.py b/plugins/modules/crypttab.py deleted file mode 120000 index a85e46be75..0000000000 --- a/plugins/modules/crypttab.py +++ /dev/null @@ -1 +0,0 @@ -./system/crypttab.py \ No newline at end of file diff --git a/plugins/modules/crypttab.py b/plugins/modules/crypttab.py new file mode 100644 index 0000000000..4eb8e4b6c2 --- /dev/null +++ b/plugins/modules/crypttab.py @@ -0,0 +1,357 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Steve +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: crypttab +short_description: Encrypted Linux block devices +description: + - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or optionally prefixed with V(/dev/mapper/), + as it appears in the filesystem. V(/dev/mapper/) is stripped from O(name). + type: str + required: true + state: + description: + - Use V(present) to add a line to C(/etc/crypttab) or update its definition if already present. + - Use V(absent) to remove a line with matching O(name). + - Use V(opts_present) to add options to those already present; options with different values are updated. + - Use V(opts_absent) to remove options from the existing set. + type: str + required: true + choices: [absent, opts_absent, opts_present, present] + backing_device: + description: + - Path to the underlying block device or file, or the UUID of a block-device prefixed with V(UUID=). + type: str + password: + description: + - Encryption password, the path to a file containing the password, or V(-) or unset if the password should be entered + at boot. + type: path + opts: + description: + - A comma-delimited list of options. See V(crypttab(5\)) for details. + type: str + path: + description: + - Path to file to use instead of V(/etc/crypttab). + - This might be useful in a chroot environment. + type: path + default: /etc/crypttab +author: + - Steve (@groks) +""" + +EXAMPLES = r""" +- name: Set the options explicitly a device which must already exist + community.general.crypttab: + name: luks-home + state: present + opts: discard,cipher=aes-cbc-essiv:sha256 + +- name: Add the 'discard' option to any existing options for all devices + community.general.crypttab: + name: '{{ item.device }}' + state: opts_present + opts: discard + loop: '{{ ansible_mounts }}' + when: "'/dev/mapper/luks-' in item.device" + +- name: Add entry to /etc/crypttab for luks-home with password file + community.general.crypttab: + name: luks-home + backing_device: UUID=123e4567-e89b-12d3-a456-426614174000 + password: /root/keys/luks-home.key + opts: discard,cipher=aes-cbc-essiv:sha256 + state: present +""" + +import os +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']), + backing_device=dict(type='str'), + password=dict(type='path'), + opts=dict(type='str'), + path=dict(type='path', default='/etc/crypttab') + ), + supports_check_mode=True, + ) + + backing_device = module.params['backing_device'] + password = module.params['password'] + opts = module.params['opts'] + state = module.params['state'] + path = module.params['path'] + name = module.params['name'] + if name.startswith('/dev/mapper/'): + name = name[len('/dev/mapper/'):] + + if state != 'absent' and backing_device is None and password is None and opts is None: + module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'", + **module.params) + + if 'opts' in state and (backing_device is not None or password is not None): + module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state, + **module.params) + + for arg_name, arg in (('name', name), + ('backing_device', backing_device), + ('password', password), + ('opts', opts)): + if arg is not None and (' ' in arg or '\t' in arg or arg == ''): + module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name, + **module.params) + + try: + crypttab = Crypttab(path) + existing_line = crypttab.match(name) + except Exception as e: + module.fail_json(msg="failed to open and parse crypttab file: %s" % to_native(e), + exception=traceback.format_exc(), **module.params) + + if 'present' in state and existing_line is None and backing_device is None: + module.fail_json(msg="'backing_device' required to add a new entry", + **module.params) + + changed, reason = False, '?' + + if state == 'absent': + if existing_line is not None: + changed, reason = existing_line.remove() + + elif state == 'present': + if existing_line is not None: + changed, reason = existing_line.set(backing_device, password, opts) + else: + changed, reason = crypttab.add(Line(None, name, backing_device, password, opts)) + + elif state == 'opts_present': + if existing_line is not None: + changed, reason = existing_line.opts.add(opts) + else: + changed, reason = crypttab.add(Line(None, name, backing_device, password, opts)) + + elif state == 'opts_absent': + if existing_line is not None: + changed, reason = existing_line.opts.remove(opts) + + if changed and not module.check_mode: + with open(path, 'wb') as f: + f.write(to_bytes(crypttab, errors='surrogate_or_strict')) + + module.exit_json(changed=changed, msg=reason, **module.params) + + +class Crypttab(object): + _lines = [] + + def __init__(self, path): + self.path = path + if not os.path.exists(path): + if not os.path.exists(os.path.dirname(path)): + os.makedirs(os.path.dirname(path)) + open(path, 'a').close() + + with open(path, 'r') as f: + for line in f.readlines(): + self._lines.append(Line(line)) + + def add(self, line): + self._lines.append(line) + return True, 'added line' + + def lines(self): + for line in self._lines: + if line.valid(): + yield line + + def match(self, name): + for line in self.lines(): + if line.name == name: + return line + return None + + def __str__(self): + lines = [] + for line in self._lines: + lines.append(str(line)) + crypttab = '\n'.join(lines) + if len(crypttab) == 0: + crypttab += '\n' + if crypttab[-1] != '\n': + crypttab += '\n' + return crypttab + + +class Line(object): + def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None): + self.line = line + self.name = name + self.backing_device = backing_device + self.password = password + self.opts = Options(opts) + + if line is not None: + self.line = self.line.rstrip('\n') + if self._line_valid(line): + self.name, backing_device, password, opts = self._split_line(line) + + self.set(backing_device, password, opts) + + def set(self, backing_device, password, opts): + changed = False + + if backing_device is not None and self.backing_device != backing_device: + self.backing_device = backing_device + changed = True + + if password is not None and self.password != password: + self.password = password + changed = True + + if opts is not None: + opts = Options(opts) + if opts != self.opts: + self.opts = opts + changed = True + + return changed, 'updated line' + + def _line_valid(self, line): + if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4): + return False + return True + + def _split_line(self, line): + fields = line.split() + try: + field2 = fields[2] + except IndexError: + field2 = None + try: + field3 = fields[3] + except IndexError: + field3 = None + + return (fields[0], + fields[1], + field2, + field3) + + def remove(self): + self.line, self.name, self.backing_device = '', None, None + return True, 'removed line' + + def valid(self): + if self.name is not None and self.backing_device is not None: + return True + return False + + def __str__(self): + if self.valid(): + fields = [self.name, self.backing_device] + if self.password is not None or self.opts: + if self.password is not None: + fields.append(self.password) + else: + fields.append('none') + if self.opts: + fields.append(str(self.opts)) + return ' '.join(fields) + return self.line + + +class Options(dict): + """opts_string looks like: 'discard,foo=bar,baz=greeble' """ + + def __init__(self, opts_string): + super(Options, self).__init__() + self.itemlist = [] + if opts_string is not None: + for opt in opts_string.split(','): + kv = opt.split('=') + if len(kv) > 1: + k, v = (kv[0], kv[1]) + else: + k, v = (kv[0], None) + self[k] = v + + def add(self, opts_string): + changed = False + for k, v in Options(opts_string).items(): + if k in self: + if self[k] != v: + changed = True + else: + changed = True + self[k] = v + return changed, 'updated options' + + def remove(self, opts_string): + changed = False + for k in Options(opts_string): + if k in self: + del self[k] + changed = True + return changed, 'removed options' + + def keys(self): + return self.itemlist + + def values(self): + return [self[key] for key in self] + + def items(self): + return [(key, self[key]) for key in self] + + def __iter__(self): + return iter(self.itemlist) + + def __setitem__(self, key, value): + if key not in self: + self.itemlist.append(key) + super(Options, self).__setitem__(key, value) + + def __delitem__(self, key): + self.itemlist.remove(key) + super(Options, self).__delitem__(key) + + def __ne__(self, obj): + return not (isinstance(obj, Options) and sorted(self.items()) == sorted(obj.items())) + + def __str__(self): + ret = [] + for k, v in self.items(): + if v is None: + ret.append(k) + else: + ret.append('%s=%s' % (k, v)) + return ','.join(ret) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/aerospike/aerospike_migrations.py b/plugins/modules/database/aerospike/aerospike_migrations.py deleted file mode 100644 index 27b979ad1f..0000000000 --- a/plugins/modules/database/aerospike/aerospike_migrations.py +++ /dev/null @@ -1,521 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -"""short_description: Check or wait for migrations between nodes""" - -# Copyright: (c) 2018, Albert Autin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: aerospike_migrations -short_description: Check or wait for migrations between nodes -description: - - This can be used to check for migrations in a cluster. - This makes it easy to do a rolling upgrade/update on Aerospike nodes. - - If waiting for migrations is not desired, simply just poll until - port 3000 if available or asinfo -v status returns ok -author: "Albert Autin (@Alb0t)" -options: - host: - description: - - Which host do we use as seed for info connection - required: False - type: str - default: localhost - port: - description: - - Which port to connect to Aerospike on (service port) - required: False - type: int - default: 3000 - connect_timeout: - description: - - How long to try to connect before giving up (milliseconds) - required: False - type: int - default: 1000 - consecutive_good_checks: - description: - - How many times should the cluster report "no migrations" - consecutively before returning OK back to ansible? - required: False - type: int - default: 3 - sleep_between_checks: - description: - - How long to sleep between each check (seconds). - required: False - type: int - default: 60 - tries_limit: - description: - - How many times do we poll before giving up and failing? - default: 300 - required: False - type: int - local_only: - description: - - Do you wish to only check for migrations on the local node - before returning, or do you want all nodes in the cluster - to finish before returning? - required: True - type: bool - min_cluster_size: - description: - - Check will return bad until cluster size is met - or until tries is exhausted - required: False - type: int - default: 1 - fail_on_cluster_change: - description: - - Fail if the cluster key changes - if something else is changing the cluster, we may want to fail - required: False - type: bool - default: True - migrate_tx_key: - description: - - The metric key used to determine if we have tx migrations - remaining. Changeable due to backwards compatibility. - required: False - type: str - default: migrate_tx_partitions_remaining - migrate_rx_key: - description: - - The metric key used to determine if we have rx migrations - remaining. Changeable due to backwards compatibility. - required: False - type: str - default: migrate_rx_partitions_remaining - target_cluster_size: - description: - - When all aerospike builds in the cluster are greater than - version 4.3, then the C(cluster-stable) info command will be used. - Inside this command, you can optionally specify what the target - cluster size is - but it is not necessary. You can still rely on - min_cluster_size if you don't want to use this option. - - If this option is specified on a cluster that has at least 1 - host <4.3 then it will be ignored until the min version reaches - 4.3. - required: False - type: int -''' -EXAMPLES = ''' -# check for migrations on local node -- name: Wait for migrations on local node before proceeding - community.general.aerospike_migrations: - host: "localhost" - connect_timeout: 2000 - consecutive_good_checks: 5 - sleep_between_checks: 15 - tries_limit: 600 - local_only: False - -# example playbook: -- name: Upgrade aerospike - hosts: all - become: true - serial: 1 - tasks: - - name: Install dependencies - ansible.builtin.apt: - name: - - python - - python-pip - - python-setuptools - state: latest - - name: Setup aerospike - ansible.builtin.pip: - name: aerospike -# check for migrations every (sleep_between_checks) -# If at least (consecutive_good_checks) checks come back OK in a row, then return OK. -# Will exit if any exception, which can be caused by bad nodes, -# nodes not returning data, or other reasons. -# Maximum runtime before giving up in this case will be: -# Tries Limit * Sleep Between Checks * delay * retries - - name: Wait for aerospike migrations - community.general.aerospike_migrations: - local_only: True - sleep_between_checks: 1 - tries_limit: 5 - consecutive_good_checks: 3 - fail_on_cluster_change: true - min_cluster_size: 3 - target_cluster_size: 4 - register: migrations_check - until: migrations_check is succeeded - changed_when: false - delay: 60 - retries: 120 - - name: Another thing - ansible.builtin.shell: | - echo foo - - name: Reboot - ansible.builtin.reboot: -''' - -RETURN = ''' -# Returns only a success/failure result. Changed is always false. -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -LIB_FOUND_ERR = None -try: - import aerospike - from time import sleep - import re -except ImportError as ie: - LIB_FOUND = False - LIB_FOUND_ERR = traceback.format_exc() -else: - LIB_FOUND = True - - -def run_module(): - """run ansible module""" - module_args = dict( - host=dict(type='str', required=False, default='localhost'), - port=dict(type='int', required=False, default=3000), - connect_timeout=dict(type='int', required=False, default=1000), - consecutive_good_checks=dict(type='int', required=False, default=3), - sleep_between_checks=dict(type='int', required=False, default=60), - tries_limit=dict(type='int', required=False, default=300), - local_only=dict(type='bool', required=True), - min_cluster_size=dict(type='int', required=False, default=1), - target_cluster_size=dict(type='int', required=False, default=None), - fail_on_cluster_change=dict(type='bool', required=False, default=True), - migrate_tx_key=dict(type='str', required=False, no_log=False, - default="migrate_tx_partitions_remaining"), - migrate_rx_key=dict(type='str', required=False, no_log=False, - default="migrate_rx_partitions_remaining") - ) - - result = dict( - changed=False, - ) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) - if not LIB_FOUND: - module.fail_json(msg=missing_required_lib('aerospike'), - exception=LIB_FOUND_ERR) - - try: - if module.check_mode: - has_migrations, skip_reason = False, None - else: - migrations = Migrations(module) - has_migrations, skip_reason = migrations.has_migs( - module.params['local_only'] - ) - - if has_migrations: - module.fail_json(msg="Failed.", skip_reason=skip_reason) - except Exception as e: - module.fail_json(msg="Error: {0}".format(e)) - - module.exit_json(**result) - - -class Migrations: - """ Check or wait for migrations between nodes """ - - def __init__(self, module): - self.module = module - self._client = self._create_client().connect() - self._nodes = {} - self._update_nodes_list() - self._cluster_statistics = {} - self._update_cluster_statistics() - self._namespaces = set() - self._update_cluster_namespace_list() - self._build_list = set() - self._update_build_list() - self._start_cluster_key = \ - self._cluster_statistics[self._nodes[0]]['cluster_key'] - - def _create_client(self): - """ TODO: add support for auth, tls, and other special features - I won't use those features, so I'll wait until somebody complains - or does it for me (Cross fingers) - create the client object""" - config = { - 'hosts': [ - (self.module.params['host'], self.module.params['port']) - ], - 'policies': { - 'timeout': self.module.params['connect_timeout'] - } - } - return aerospike.client(config) - - def _info_cmd_helper(self, cmd, node=None, delimiter=';'): - """delimiter is for separate stats that come back, NOT for kv - separation which is =""" - if node is None: # If no node passed, use the first one (local) - node = self._nodes[0] - data = self._client.info_node(cmd, node) - data = data.split("\t") - if len(data) != 1 and len(data) != 2: - self.module.fail_json( - msg="Unexpected number of values returned in info command: " + - str(len(data)) - ) - # data will be in format 'command\touput' - data = data[-1] - data = data.rstrip("\n\r") - data_arr = data.split(delimiter) - - # some commands don't return in kv format - # so we dont want a dict from those. - if '=' in data: - retval = dict( - metric.split("=", 1) for metric in data_arr - ) - else: - # if only 1 element found, and not kv, return just the value. - if len(data_arr) == 1: - retval = data_arr[0] - else: - retval = data_arr - return retval - - def _update_build_list(self): - """creates self._build_list which is a unique list - of build versions.""" - self._build_list = set() - for node in self._nodes: - build = self._info_cmd_helper('build', node) - self._build_list.add(build) - - # just checks to see if the version is 4.3 or greater - def _can_use_cluster_stable(self): - # if version <4.3 we can't use cluster-stable info cmd - # regex hack to check for versions beginning with 0-3 or - # beginning with 4.0,4.1,4.2 - if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)): - return False - return True - - def _update_cluster_namespace_list(self): - """ make a unique list of namespaces - TODO: does this work on a rolling namespace add/deletion? - thankfully if it doesn't, we dont need this on builds >=4.3""" - self._namespaces = set() - for node in self._nodes: - namespaces = self._info_cmd_helper('namespaces', node) - for namespace in namespaces: - self._namespaces.add(namespace) - - def _update_cluster_statistics(self): - """create a dict of nodes with their related stats """ - self._cluster_statistics = {} - for node in self._nodes: - self._cluster_statistics[node] = \ - self._info_cmd_helper('statistics', node) - - def _update_nodes_list(self): - """get a fresh list of all the nodes""" - self._nodes = self._client.get_nodes() - if not self._nodes: - self.module.fail_json("Failed to retrieve at least 1 node.") - - def _namespace_has_migs(self, namespace, node=None): - """returns a True or False. - Does the namespace have migrations for the node passed? - If no node passed, uses the local node or the first one in the list""" - namespace_stats = self._info_cmd_helper("namespace/" + namespace, node) - try: - namespace_tx = \ - int(namespace_stats[self.module.params['migrate_tx_key']]) - namespace_rx = \ - int(namespace_stats[self.module.params['migrate_rx_key']]) - except KeyError: - self.module.fail_json( - msg="Did not find partition remaining key:" + - self.module.params['migrate_tx_key'] + - " or key:" + - self.module.params['migrate_rx_key'] + - " in 'namespace/" + - namespace + - "' output." - ) - except TypeError: - self.module.fail_json( - msg="namespace stat returned was not numerical" - ) - return namespace_tx != 0 or namespace_rx != 0 - - def _node_has_migs(self, node=None): - """just calls namespace_has_migs and - if any namespace has migs returns true""" - migs = 0 - self._update_cluster_namespace_list() - for namespace in self._namespaces: - if self._namespace_has_migs(namespace, node): - migs += 1 - return migs != 0 - - def _cluster_key_consistent(self): - """create a dictionary to store what each node - returns the cluster key as. we should end up with only 1 dict key, - with the key being the cluster key.""" - cluster_keys = {} - for node in self._nodes: - cluster_key = self._cluster_statistics[node][ - 'cluster_key'] - if cluster_key not in cluster_keys: - cluster_keys[cluster_key] = 1 - else: - cluster_keys[cluster_key] += 1 - if len(cluster_keys.keys()) == 1 and \ - self._start_cluster_key in cluster_keys: - return True - return False - - def _cluster_migrates_allowed(self): - """ensure all nodes have 'migrate_allowed' in their stats output""" - for node in self._nodes: - node_stats = self._info_cmd_helper('statistics', node) - allowed = node_stats['migrate_allowed'] - if allowed == "false": - return False - return True - - def _cluster_has_migs(self): - """calls node_has_migs for each node""" - migs = 0 - for node in self._nodes: - if self._node_has_migs(node): - migs += 1 - if migs == 0: - return False - return True - - def _has_migs(self, local): - if local: - return self._local_node_has_migs() - return self._cluster_has_migs() - - def _local_node_has_migs(self): - return self._node_has_migs(None) - - def _is_min_cluster_size(self): - """checks that all nodes in the cluster are returning the - minimum cluster size specified in their statistics output""" - sizes = set() - for node in self._cluster_statistics: - sizes.add(int(self._cluster_statistics[node]['cluster_size'])) - - if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no - return False - if (min(sizes)) >= self.module.params['min_cluster_size']: - return True - return False - - def _cluster_stable(self): - """Added 4.3: - cluster-stable:size=;ignore-migrations=;namespace= - Returns the current 'cluster_key' when the following are satisfied: - - If 'size' is specified then the target node's 'cluster-size' - must match size. - If 'ignore-migrations' is either unspecified or 'false' then - the target node's migrations counts must be zero for the provided - 'namespace' or all namespaces if 'namespace' is not provided.""" - cluster_key = set() - cluster_key.add(self._info_cmd_helper('statistics')['cluster_key']) - cmd = "cluster-stable:" - target_cluster_size = self.module.params['target_cluster_size'] - if target_cluster_size is not None: - cmd = cmd + "size=" + str(target_cluster_size) + ";" - for node in self._nodes: - try: - cluster_key.add(self._info_cmd_helper(cmd, node)) - except aerospike.exception.ServerError as e: # unstable-cluster is returned in form of Exception - if 'unstable-cluster' in e.msg: - return False - raise e - if len(cluster_key) == 1: - return True - return False - - def _cluster_good_state(self): - """checks a few things to make sure we're OK to say the cluster - has no migs. It could be in a unhealthy condition that does not allow - migs, or a split brain""" - if self._cluster_key_consistent() is not True: - return False, "Cluster key inconsistent." - if self._is_min_cluster_size() is not True: - return False, "Cluster min size not reached." - if self._cluster_migrates_allowed() is not True: - return False, "migrate_allowed is false somewhere." - return True, "OK." - - def has_migs(self, local=True): - """returns a boolean, False if no migrations otherwise True""" - consecutive_good = 0 - try_num = 0 - skip_reason = list() - while \ - try_num < int(self.module.params['tries_limit']) and \ - consecutive_good < \ - int(self.module.params['consecutive_good_checks']): - - self._update_nodes_list() - self._update_cluster_statistics() - - # These checks are outside of the while loop because - # we probably want to skip & sleep instead of failing entirely - stable, reason = self._cluster_good_state() - if stable is not True: - skip_reason.append( - "Skipping on try#" + str(try_num) + - " for reason:" + reason - ) - else: - if self._can_use_cluster_stable(): - if self._cluster_stable(): - consecutive_good += 1 - else: - consecutive_good = 0 - skip_reason.append( - "Skipping on try#" + str(try_num) + - " for reason:" + " cluster_stable" - ) - elif self._has_migs(local): - # print("_has_migs") - skip_reason.append( - "Skipping on try#" + str(try_num) + - " for reason:" + " migrations" - ) - consecutive_good = 0 - else: - consecutive_good += 1 - if consecutive_good == self.module.params[ - 'consecutive_good_checks']: - break - try_num += 1 - sleep(self.module.params['sleep_between_checks']) - # print(skip_reason) - if consecutive_good == self.module.params['consecutive_good_checks']: - return False, None - return True, skip_reason - - -def main(): - """main method for ansible module""" - run_module() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/influxdb/influxdb_database.py b/plugins/modules/database/influxdb/influxdb_database.py deleted file mode 100644 index 6601b30124..0000000000 --- a/plugins/modules/database/influxdb/influxdb_database.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Kamil Szczygiel -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: influxdb_database -short_description: Manage InfluxDB databases -description: - - Manage InfluxDB databases. -author: "Kamil Szczygiel (@kamsz)" -requirements: - - "python >= 2.6" - - "influxdb >= 0.9" - - requests -options: - database_name: - description: - - Name of the database. - required: true - type: str - state: - description: - - Determines if the database should be created or destroyed. - choices: [ absent, present ] - default: present - type: str -extends_documentation_fragment: -- community.general.influxdb - -''' - -EXAMPLES = r''' -# Example influxdb_database command from Ansible Playbooks -- name: Create database - community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" - -- name: Destroy database - community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" - state: absent - -- name: Create database using custom credentials - community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - username: "{{influxdb_username}}" - password: "{{influxdb_password}}" - database_name: "{{influxdb_database_name}}" - ssl: yes - validate_certs: yes -''' - -RETURN = r''' -# only defaults -''' - -try: - import requests.exceptions - from influxdb import exceptions -except ImportError: - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb - - -def find_database(module, client, database_name): - database = None - - try: - databases = client.get_list_database() - for db in databases: - if db['name'] == database_name: - database = db - break - except requests.exceptions.ConnectionError as e: - module.fail_json(msg=str(e)) - return database - - -def create_database(module, client, database_name): - if not module.check_mode: - try: - client.create_database(database_name) - except requests.exceptions.ConnectionError as e: - module.fail_json(msg=str(e)) - - module.exit_json(changed=True) - - -def drop_database(module, client, database_name): - if not module.check_mode: - try: - client.drop_database(database_name) - except exceptions.InfluxDBClientError as e: - module.fail_json(msg=e.content) - - module.exit_json(changed=True) - - -def main(): - argument_spec = InfluxDb.influxdb_argument_spec() - argument_spec.update( - database_name=dict(required=True, type='str'), - state=dict(default='present', type='str', choices=['present', 'absent']) - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - state = module.params['state'] - - influxdb = InfluxDb(module) - client = influxdb.connect_to_influxdb() - database_name = influxdb.database_name - database = find_database(module, client, database_name) - - if state == 'present': - if database: - module.exit_json(changed=False) - else: - create_database(module, client, database_name) - - if state == 'absent': - if database: - drop_database(module, client, database_name) - else: - module.exit_json(changed=False) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/influxdb/influxdb_query.py b/plugins/modules/database/influxdb/influxdb_query.py deleted file mode 100644 index bff6fa989b..0000000000 --- a/plugins/modules/database/influxdb/influxdb_query.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: influxdb_query -short_description: Query data points from InfluxDB -description: - - Query data points from InfluxDB. -author: "René Moser (@resmo)" -requirements: - - "python >= 2.6" - - "influxdb >= 0.9" -options: - query: - description: - - Query to be executed. - required: true - type: str - database_name: - description: - - Name of the database. - required: true - type: str -extends_documentation_fragment: -- community.general.influxdb - -''' - -EXAMPLES = r''' -- name: Query connections - community.general.influxdb_query: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - query: "select mean(value) from connections" - register: connection - -- name: Query connections with tags filters - community.general.influxdb_query: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - query: "select mean(value) from connections where region='zue01' and host='server01'" - register: connection - -- name: Print results from the query - ansible.builtin.debug: - var: connection.query_results -''' - -RETURN = r''' -query_results: - description: Result from the query - returned: success - type: list - sample: - - mean: 1245.5333333333333 - time: "1970-01-01T00:00:00Z" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb - - -class AnsibleInfluxDBRead(InfluxDb): - - def read_by_query(self, query): - client = self.connect_to_influxdb() - try: - rs = client.query(query) - if rs: - return list(rs.get_points()) - except Exception as e: - self.module.fail_json(msg=to_native(e)) - - -def main(): - argument_spec = InfluxDb.influxdb_argument_spec() - argument_spec.update( - query=dict(type='str', required=True), - database_name=dict(required=True, type='str'), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - influx = AnsibleInfluxDBRead(module) - query = module.params.get('query') - results = influx.read_by_query(query) - module.exit_json(changed=True, query_results=results) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/influxdb/influxdb_retention_policy.py b/plugins/modules/database/influxdb/influxdb_retention_policy.py deleted file mode 100644 index 6cb45229cd..0000000000 --- a/plugins/modules/database/influxdb/influxdb_retention_policy.py +++ /dev/null @@ -1,343 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Kamil Szczygiel -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: influxdb_retention_policy -short_description: Manage InfluxDB retention policies -description: - - Manage InfluxDB retention policies. -author: "Kamil Szczygiel (@kamsz)" -requirements: - - "python >= 2.6" - - "influxdb >= 0.9" - - requests -options: - database_name: - description: - - Name of the database. - required: true - type: str - policy_name: - description: - - Name of the retention policy. - required: true - type: str - state: - description: - - State of the retention policy. - choices: [ absent, present ] - default: present - type: str - version_added: 3.1.0 - duration: - description: - - Determines how long InfluxDB should keep the data. If specified, it - should be C(INF) or at least one hour. If not specified, C(INF) is - assumed. Supports complex duration expressions with multiple units. - - Required only if I(state) is set to C(present). - type: str - replication: - description: - - Determines how many independent copies of each point are stored in the cluster. - - Required only if I(state) is set to C(present). - type: int - default: - description: - - Sets the retention policy as default retention policy. - type: bool - default: false - shard_group_duration: - description: - - Determines the time range covered by a shard group. If specified it - must be at least one hour. If none, it's determined by InfluxDB by - the rentention policy's duration. Supports complex duration expressions - with multiple units. - type: str - version_added: '2.0.0' -extends_documentation_fragment: -- community.general.influxdb - -''' - -EXAMPLES = r''' -# Example influxdb_retention_policy command from Ansible Playbooks -- name: Create 1 hour retention policy - community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1h - replication: 1 - ssl: yes - validate_certs: yes - state: present - -- name: Create 1 day retention policy with 1 hour shard group duration - community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1d - replication: 1 - shard_group_duration: 1h - state: present - -- name: Create 1 week retention policy with 1 day shard group duration - community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1w - replication: 1 - shard_group_duration: 1d - state: present - -- name: Create infinite retention policy with 1 week of shard group duration - community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: INF - replication: 1 - ssl: no - validate_certs: no - shard_group_duration: 1w - state: present - -- name: Create retention policy with complex durations - community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 5d1h30m - replication: 1 - ssl: no - validate_certs: no - shard_group_duration: 1d10h30m - state: present - -- name: Drop retention policy - community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - state: absent -''' - -RETURN = r''' -# only defaults -''' - -import re - -try: - import requests.exceptions - from influxdb import exceptions -except ImportError: - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb -from ansible.module_utils.common.text.converters import to_native - - -VALID_DURATION_REGEX = re.compile(r'^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$') - -DURATION_REGEX = re.compile(r'(\d+)(ns|u|µ|ms|s|m|h|d|w)') -EXTENDED_DURATION_REGEX = re.compile(r'(?:(\d+)(ns|u|µ|ms|m|h|d|w)|(\d+(?:\.\d+)?)(s))') - -DURATION_UNIT_NANOSECS = { - 'ns': 1, - 'u': 1000, - 'µ': 1000, - 'ms': 1000 * 1000, - 's': 1000 * 1000 * 1000, - 'm': 1000 * 1000 * 1000 * 60, - 'h': 1000 * 1000 * 1000 * 60 * 60, - 'd': 1000 * 1000 * 1000 * 60 * 60 * 24, - 'w': 1000 * 1000 * 1000 * 60 * 60 * 24 * 7, -} - -MINIMUM_VALID_DURATION = 1 * DURATION_UNIT_NANOSECS['h'] -MINIMUM_VALID_SHARD_GROUP_DURATION = 1 * DURATION_UNIT_NANOSECS['h'] - - -def check_duration_literal(value): - return VALID_DURATION_REGEX.search(value) is not None - - -def parse_duration_literal(value, extended=False): - duration = 0.0 - - if value == "INF": - return duration - - lookup = (EXTENDED_DURATION_REGEX if extended else DURATION_REGEX).findall(value) - - for duration_literal in lookup: - filtered_literal = list(filter(None, duration_literal)) - duration_val = float(filtered_literal[0]) - duration += duration_val * DURATION_UNIT_NANOSECS[filtered_literal[1]] - - return duration - - -def find_retention_policy(module, client): - database_name = module.params['database_name'] - policy_name = module.params['policy_name'] - hostname = module.params['hostname'] - retention_policy = None - - try: - retention_policies = client.get_list_retention_policies(database=database_name) - for policy in retention_policies: - if policy['name'] == policy_name: - retention_policy = policy - break - except requests.exceptions.ConnectionError as e: - module.fail_json(msg="Cannot connect to database %s on %s : %s" % (database_name, hostname, to_native(e))) - - if retention_policy is not None: - retention_policy["duration"] = parse_duration_literal(retention_policy["duration"], extended=True) - retention_policy["shardGroupDuration"] = parse_duration_literal(retention_policy["shardGroupDuration"], extended=True) - - return retention_policy - - -def create_retention_policy(module, client): - database_name = module.params['database_name'] - policy_name = module.params['policy_name'] - duration = module.params['duration'] - replication = module.params['replication'] - default = module.params['default'] - shard_group_duration = module.params['shard_group_duration'] - - if not check_duration_literal(duration): - module.fail_json(msg="Failed to parse value of duration") - - influxdb_duration_format = parse_duration_literal(duration) - if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION: - module.fail_json(msg="duration value must be at least 1h") - - if shard_group_duration is not None: - if not check_duration_literal(shard_group_duration): - module.fail_json(msg="Failed to parse value of shard_group_duration") - - influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) - if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION: - module.fail_json(msg="shard_group_duration value must be finite and at least 1h") - - if not module.check_mode: - try: - if shard_group_duration: - client.create_retention_policy(policy_name, duration, replication, database_name, default, - shard_group_duration) - else: - client.create_retention_policy(policy_name, duration, replication, database_name, default) - except exceptions.InfluxDBClientError as e: - module.fail_json(msg=e.content) - module.exit_json(changed=True) - - -def alter_retention_policy(module, client, retention_policy): - database_name = module.params['database_name'] - policy_name = module.params['policy_name'] - duration = module.params['duration'] - replication = module.params['replication'] - default = module.params['default'] - shard_group_duration = module.params['shard_group_duration'] - - changed = False - - if not check_duration_literal(duration): - module.fail_json(msg="Failed to parse value of duration") - - influxdb_duration_format = parse_duration_literal(duration) - if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION: - module.fail_json(msg="duration value must be at least 1h") - - if shard_group_duration is None: - influxdb_shard_group_duration_format = retention_policy["shardGroupDuration"] - else: - if not check_duration_literal(shard_group_duration): - module.fail_json(msg="Failed to parse value of shard_group_duration") - - influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) - if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION: - module.fail_json(msg="shard_group_duration value must be finite and at least 1h") - - if (retention_policy['duration'] != influxdb_duration_format or - retention_policy['shardGroupDuration'] != influxdb_shard_group_duration_format or - retention_policy['replicaN'] != int(replication) or - retention_policy['default'] != default): - if not module.check_mode: - try: - client.alter_retention_policy(policy_name, database_name, duration, replication, default, - shard_group_duration) - except exceptions.InfluxDBClientError as e: - module.fail_json(msg=e.content) - changed = True - module.exit_json(changed=changed) - - -def drop_retention_policy(module, client): - database_name = module.params['database_name'] - policy_name = module.params['policy_name'] - - if not module.check_mode: - try: - client.drop_retention_policy(policy_name, database_name) - except exceptions.InfluxDBClientError as e: - module.fail_json(msg=e.content) - module.exit_json(changed=True) - - -def main(): - argument_spec = InfluxDb.influxdb_argument_spec() - argument_spec.update( - state=dict(default='present', type='str', choices=['present', 'absent']), - database_name=dict(required=True, type='str'), - policy_name=dict(required=True, type='str'), - duration=dict(type='str'), - replication=dict(type='int'), - default=dict(default=False, type='bool'), - shard_group_duration=dict(type='str'), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_if=( - ('state', 'present', ['duration', 'replication']), - ), - ) - - state = module.params['state'] - - influxdb = InfluxDb(module) - client = influxdb.connect_to_influxdb() - - retention_policy = find_retention_policy(module, client) - - if state == 'present': - if retention_policy: - alter_retention_policy(module, client, retention_policy) - else: - create_retention_policy(module, client) - - if state == 'absent': - if retention_policy: - drop_retention_policy(module, client) - else: - module.exit_json(changed=False) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/influxdb/influxdb_user.py b/plugins/modules/database/influxdb/influxdb_user.py deleted file mode 100644 index 76524d8613..0000000000 --- a/plugins/modules/database/influxdb/influxdb_user.py +++ /dev/null @@ -1,285 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Vitaliy Zhhuta -# insipred by Kamil Szczygiel influxdb_database module -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: influxdb_user -short_description: Manage InfluxDB users -description: - - Manage InfluxDB users. -author: "Vitaliy Zhhuta (@zhhuta)" -requirements: - - "python >= 2.6" - - "influxdb >= 0.9" -options: - user_name: - description: - - Name of the user. - required: True - type: str - user_password: - description: - - Password to be set for the user. - required: false - type: str - admin: - description: - - Whether the user should be in the admin role or not. - - Since version 2.8, the role will also be updated. - default: no - type: bool - state: - description: - - State of the user. - choices: [ absent, present ] - default: present - type: str - grants: - description: - - Privileges to grant to this user. - - Takes a list of dicts containing the "database" and "privilege" keys. - - If this argument is not provided, the current grants will be left alone. - - If an empty list is provided, all grants for the user will be removed. - type: list - elements: dict -extends_documentation_fragment: -- community.general.influxdb - -''' - -EXAMPLES = r''' -- name: Create a user on localhost using default login credentials - community.general.influxdb_user: - user_name: john - user_password: s3cr3t - -- name: Create a user on localhost using custom login credentials - community.general.influxdb_user: - user_name: john - user_password: s3cr3t - login_username: "{{ influxdb_username }}" - login_password: "{{ influxdb_password }}" - -- name: Create an admin user on a remote host using custom login credentials - community.general.influxdb_user: - user_name: john - user_password: s3cr3t - admin: yes - hostname: "{{ influxdb_hostname }}" - login_username: "{{ influxdb_username }}" - login_password: "{{ influxdb_password }}" - -- name: Create a user on localhost with privileges - community.general.influxdb_user: - user_name: john - user_password: s3cr3t - login_username: "{{ influxdb_username }}" - login_password: "{{ influxdb_password }}" - grants: - - database: 'collectd' - privilege: 'WRITE' - - database: 'graphite' - privilege: 'READ' - -- name: Destroy a user using custom login credentials - community.general.influxdb_user: - user_name: john - login_username: "{{ influxdb_username }}" - login_password: "{{ influxdb_password }}" - state: absent -''' - -RETURN = r''' -#only defaults -''' - -import json - -from ansible.module_utils.urls import ConnectionError -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -import ansible_collections.community.general.plugins.module_utils.influxdb as influx - - -def find_user(module, client, user_name): - user_result = None - - try: - users = client.get_list_users() - for user in users: - if user['user'] == user_name: - user_result = user - break - except ConnectionError as e: - module.fail_json(msg=to_native(e)) - return user_result - - -def check_user_password(module, client, user_name, user_password): - try: - client.switch_user(user_name, user_password) - client.get_list_users() - except influx.exceptions.InfluxDBClientError as e: - if e.code == 401: - return False - except ConnectionError as e: - module.fail_json(msg=to_native(e)) - finally: - # restore previous user - client.switch_user(module.params['username'], module.params['password']) - return True - - -def set_user_password(module, client, user_name, user_password): - if not module.check_mode: - try: - client.set_user_password(user_name, user_password) - except ConnectionError as e: - module.fail_json(msg=to_native(e)) - - -def create_user(module, client, user_name, user_password, admin): - if not module.check_mode: - try: - client.create_user(user_name, user_password, admin) - except ConnectionError as e: - module.fail_json(msg=to_native(e)) - - -def drop_user(module, client, user_name): - if not module.check_mode: - try: - client.drop_user(user_name) - except influx.exceptions.InfluxDBClientError as e: - module.fail_json(msg=e.content) - - module.exit_json(changed=True) - - -def set_user_grants(module, client, user_name, grants): - changed = False - - try: - current_grants = client.get_list_privileges(user_name) - parsed_grants = [] - # Fix privileges wording - for i, v in enumerate(current_grants): - if v['privilege'] != 'NO PRIVILEGES': - if v['privilege'] == 'ALL PRIVILEGES': - v['privilege'] = 'ALL' - parsed_grants.append(v) - - # check if the current grants are included in the desired ones - for current_grant in parsed_grants: - if current_grant not in grants: - if not module.check_mode: - client.revoke_privilege(current_grant['privilege'], - current_grant['database'], - user_name) - changed = True - - # check if the desired grants are included in the current ones - for grant in grants: - if grant not in parsed_grants: - if not module.check_mode: - client.grant_privilege(grant['privilege'], - grant['database'], - user_name) - changed = True - - except influx.exceptions.InfluxDBClientError as e: - module.fail_json(msg=e.content) - - return changed - - -INFLUX_AUTH_FIRST_USER_REQUIRED = "error authorizing query: create admin user first or disable authentication" - - -def main(): - argument_spec = influx.InfluxDb.influxdb_argument_spec() - argument_spec.update( - state=dict(default='present', type='str', choices=['present', 'absent']), - user_name=dict(required=True, type='str'), - user_password=dict(required=False, type='str', no_log=True), - admin=dict(default='False', type='bool'), - grants=dict(type='list', elements='dict'), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - state = module.params['state'] - user_name = module.params['user_name'] - user_password = module.params['user_password'] - admin = module.params['admin'] - grants = module.params['grants'] - influxdb = influx.InfluxDb(module) - client = influxdb.connect_to_influxdb() - - user = None - try: - user = find_user(module, client, user_name) - except influx.exceptions.InfluxDBClientError as e: - if e.code == 403: - reason = None - try: - msg = json.loads(e.content) - reason = msg["error"] - except (KeyError, ValueError): - module.fail_json(msg=to_native(e)) - - if reason != INFLUX_AUTH_FIRST_USER_REQUIRED: - module.fail_json(msg=to_native(e)) - else: - module.fail_json(msg=to_native(e)) - - changed = False - - if state == 'present': - if user: - if not check_user_password(module, client, user_name, user_password) and user_password is not None: - set_user_password(module, client, user_name, user_password) - changed = True - - try: - if admin and not user['admin']: - if not module.check_mode: - client.grant_admin_privileges(user_name) - changed = True - elif not admin and user['admin']: - if not module.check_mode: - client.revoke_admin_privileges(user_name) - changed = True - except influx.exceptions.InfluxDBClientError as e: - module.fail_json(msg=to_native(e)) - - else: - user_password = user_password or '' - create_user(module, client, user_name, user_password, admin) - changed = True - - if grants is not None: - if set_user_grants(module, client, user_name, grants): - changed = True - - module.exit_json(changed=changed) - - if state == 'absent': - if user: - drop_user(module, client, user_name) - else: - module.exit_json(changed=False) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/influxdb/influxdb_write.py b/plugins/modules/database/influxdb/influxdb_write.py deleted file mode 100644 index e34fe9c2cf..0000000000 --- a/plugins/modules/database/influxdb/influxdb_write.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: influxdb_write -short_description: Write data points into InfluxDB -description: - - Write data points into InfluxDB. -author: "René Moser (@resmo)" -requirements: - - "python >= 2.6" - - "influxdb >= 0.9" -options: - data_points: - description: - - Data points as dict to write into the database. - required: true - type: list - elements: dict - database_name: - description: - - Name of the database. - required: true - type: str -extends_documentation_fragment: -- community.general.influxdb - -''' - -EXAMPLES = r''' -- name: Write points into database - community.general.influxdb_write: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" - data_points: - - measurement: connections - tags: - host: server01 - region: us-west - time: "{{ ansible_date_time.iso8601 }}" - fields: - value: 2000 - - measurement: connections - tags: - host: server02 - region: us-east - time: "{{ ansible_date_time.iso8601 }}" - fields: - value: 3000 -''' - -RETURN = r''' -# only defaults -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb - - -class AnsibleInfluxDBWrite(InfluxDb): - - def write_data_point(self, data_points): - client = self.connect_to_influxdb() - - try: - client.write_points(data_points) - except Exception as e: - self.module.fail_json(msg=to_native(e)) - - -def main(): - argument_spec = InfluxDb.influxdb_argument_spec() - argument_spec.update( - data_points=dict(required=True, type='list', elements='dict'), - database_name=dict(required=True, type='str'), - ) - module = AnsibleModule( - argument_spec=argument_spec, - ) - - influx = AnsibleInfluxDBWrite(module) - data_points = module.params.get('data_points') - influx.write_data_point(data_points) - module.exit_json(changed=True) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/misc/elasticsearch_plugin.py b/plugins/modules/database/misc/elasticsearch_plugin.py deleted file mode 100644 index bc7df931b6..0000000000 --- a/plugins/modules/database/misc/elasticsearch_plugin.py +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2015, Mathew Davies -# (c) 2017, Sam Doran -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: elasticsearch_plugin -short_description: Manage Elasticsearch plugins -description: - - Manages Elasticsearch plugins. -author: - - Mathew Davies (@ThePixelDeveloper) - - Sam Doran (@samdoran) -options: - name: - description: - - Name of the plugin to install. - required: True - type: str - state: - description: - - Desired state of a plugin. - choices: ["present", "absent"] - default: present - type: str - src: - description: - - Optionally set the source location to retrieve the plugin from. This can be a file:// - URL to install from a local file, or a remote URL. If this is not set, the plugin - location is just based on the name. - - The name parameter must match the descriptor in the plugin ZIP specified. - - Is only used if the state would change, which is solely checked based on the name - parameter. If, for example, the plugin is already installed, changing this has no - effect. - - For ES 1.x use url. - required: False - type: str - url: - description: - - Set exact URL to download the plugin from (Only works for ES 1.x). - - For ES 2.x and higher, use src. - required: False - type: str - timeout: - description: - - "Timeout setting: 30s, 1m, 1h..." - - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0. - default: 1m - type: str - force: - description: - - "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails." - default: False - type: bool - plugin_bin: - description: - - Location of the plugin binary. If this file is not found, the default plugin binaries will be used. - - The default changed in Ansible 2.4 to None. - type: path - plugin_dir: - description: - - Your configured plugin directory specified in Elasticsearch - default: /usr/share/elasticsearch/plugins/ - type: path - proxy_host: - description: - - Proxy host to use during plugin installation - type: str - proxy_port: - description: - - Proxy port to use during plugin installation - type: str - version: - description: - - Version of the plugin to be installed. - If plugin exists with previous version, it will NOT be updated - type: str -''' - -EXAMPLES = ''' -- name: Install Elasticsearch Head plugin in Elasticsearch 2.x - community.general.elasticsearch_plugin: - name: mobz/elasticsearch-head - state: present - -- name: Install a specific version of Elasticsearch Head in Elasticsearch 2.x - community.general.elasticsearch_plugin: - name: mobz/elasticsearch-head - version: 2.0.0 - -- name: Uninstall Elasticsearch head plugin in Elasticsearch 2.x - community.general.elasticsearch_plugin: - name: mobz/elasticsearch-head - state: absent - -- name: Install a specific plugin in Elasticsearch >= 5.0 - community.general.elasticsearch_plugin: - name: analysis-icu - state: present - -- name: Install the ingest-geoip plugin with a forced installation - community.general.elasticsearch_plugin: - name: ingest-geoip - state: present - force: yes -''' - -import os - -from ansible.module_utils.basic import AnsibleModule - - -PACKAGE_STATE_MAP = dict( - present="install", - absent="remove" -) - -PLUGIN_BIN_PATHS = tuple([ - '/usr/share/elasticsearch/bin/elasticsearch-plugin', - '/usr/share/elasticsearch/bin/plugin' -]) - - -def parse_plugin_repo(string): - elements = string.split("/") - - # We first consider the simplest form: pluginname - repo = elements[0] - - # We consider the form: username/pluginname - if len(elements) > 1: - repo = elements[1] - - # remove elasticsearch- prefix - # remove es- prefix - for string in ("elasticsearch-", "es-"): - if repo.startswith(string): - return repo[len(string):] - - return repo - - -def is_plugin_present(plugin_name, plugin_dir): - return os.path.isdir(os.path.join(plugin_dir, plugin_name)) - - -def parse_error(string): - reason = "ERROR: " - try: - return string[string.index(reason) + len(reason):].strip() - except ValueError: - return string - - -def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force): - cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]] - is_old_command = (os.path.basename(plugin_bin) == 'plugin') - - # Timeout and version are only valid for plugin, not elasticsearch-plugin - if is_old_command: - if timeout: - cmd_args.append("--timeout %s" % timeout) - - if version: - plugin_name = plugin_name + '/' + version - cmd_args[2] = plugin_name - - if proxy_host and proxy_port: - cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port)) - - # Legacy ES 1.x - if url: - cmd_args.append("--url %s" % url) - - if force: - cmd_args.append("--batch") - if src: - cmd_args.append(src) - else: - cmd_args.append(plugin_name) - - cmd = " ".join(cmd_args) - - if module.check_mode: - rc, out, err = 0, "check mode", "" - else: - rc, out, err = module.run_command(cmd) - - if rc != 0: - reason = parse_error(out) - module.fail_json(msg="Installing plugin '%s' failed: %s" % (plugin_name, reason), err=err) - - return True, cmd, out, err - - -def remove_plugin(module, plugin_bin, plugin_name): - cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)] - - cmd = " ".join(cmd_args) - - if module.check_mode: - rc, out, err = 0, "check mode", "" - else: - rc, out, err = module.run_command(cmd) - - if rc != 0: - reason = parse_error(out) - module.fail_json(msg="Removing plugin '%s' failed: %s" % (plugin_name, reason), err=err) - - return True, cmd, out, err - - -def get_plugin_bin(module, plugin_bin=None): - # Use the plugin_bin that was supplied first before trying other options - valid_plugin_bin = None - if plugin_bin and os.path.isfile(plugin_bin): - valid_plugin_bin = plugin_bin - - else: - # Add the plugin_bin passed into the module to the top of the list of paths to test, - # testing for that binary name first before falling back to the default paths. - bin_paths = list(PLUGIN_BIN_PATHS) - if plugin_bin and plugin_bin not in bin_paths: - bin_paths.insert(0, plugin_bin) - - # Get separate lists of dirs and binary names from the full paths to the - # plugin binaries. - plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths])) - plugin_bins = list(set([os.path.basename(x) for x in bin_paths])) - - # Check for the binary names in the default system paths as well as the path - # specified in the module arguments. - for bin_file in plugin_bins: - valid_plugin_bin = module.get_bin_path(bin_file, opt_dirs=plugin_dirs) - if valid_plugin_bin: - break - - if not valid_plugin_bin: - module.fail_json(msg='%s does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.' % plugin_bin) - - return valid_plugin_bin - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), - src=dict(default=None), - url=dict(default=None), - timeout=dict(default="1m"), - force=dict(type='bool', default=False), - plugin_bin=dict(type="path"), - plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"), - proxy_host=dict(default=None), - proxy_port=dict(default=None), - version=dict(default=None) - ), - mutually_exclusive=[("src", "url")], - supports_check_mode=True - ) - - name = module.params["name"] - state = module.params["state"] - url = module.params["url"] - src = module.params["src"] - timeout = module.params["timeout"] - force = module.params["force"] - plugin_bin = module.params["plugin_bin"] - plugin_dir = module.params["plugin_dir"] - proxy_host = module.params["proxy_host"] - proxy_port = module.params["proxy_port"] - version = module.params["version"] - - # Search provided path and system paths for valid binary - plugin_bin = get_plugin_bin(module, plugin_bin) - - repo = parse_plugin_repo(name) - present = is_plugin_present(repo, plugin_dir) - - # skip if the state is correct - if (present and state == "present") or (state == "absent" and not present): - module.exit_json(changed=False, name=name, state=state) - - if state == "present": - changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force) - - elif state == "absent": - changed, cmd, out, err = remove_plugin(module, plugin_bin, name) - - module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/misc/kibana_plugin.py b/plugins/modules/database/misc/kibana_plugin.py deleted file mode 100644 index 25d7719353..0000000000 --- a/plugins/modules/database/misc/kibana_plugin.py +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2016, Thierno IB. BARRY @barryib -# Sponsored by Polyconseil http://polyconseil.fr. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: kibana_plugin -short_description: Manage Kibana plugins -description: - - This module can be used to manage Kibana plugins. -author: Thierno IB. BARRY (@barryib) -options: - name: - description: - - Name of the plugin to install. - required: True - type: str - state: - description: - - Desired state of a plugin. - choices: ["present", "absent"] - default: present - type: str - url: - description: - - Set exact URL to download the plugin from. - - For local file, prefix its absolute path with file:// - type: str - timeout: - description: - - "Timeout setting: 30s, 1m, 1h etc." - default: 1m - type: str - plugin_bin: - description: - - Location of the Kibana binary. - default: /opt/kibana/bin/kibana - type: path - plugin_dir: - description: - - Your configured plugin directory specified in Kibana. - default: /opt/kibana/installedPlugins/ - type: path - version: - description: - - Version of the plugin to be installed. - - If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes. - type: str - force: - description: - - Delete and re-install the plugin. Can be useful for plugins update. - type: bool - default: false - allow_root: - description: - - Whether to allow C(kibana) and C(kibana-plugin) to be run as root. Passes the C(--allow-root) flag to these commands. - type: bool - default: false - version_added: 2.3.0 -''' - -EXAMPLES = ''' -- name: Install Elasticsearch head plugin - community.general.kibana_plugin: - state: present - name: elasticsearch/marvel - -- name: Install specific version of a plugin - community.general.kibana_plugin: - state: present - name: elasticsearch/marvel - version: '2.3.3' - -- name: Uninstall Elasticsearch head plugin - community.general.kibana_plugin: - state: absent - name: elasticsearch/marvel -''' - -RETURN = ''' -cmd: - description: the launched command during plugin management (install / remove) - returned: success - type: str -name: - description: the plugin name to install or remove - returned: success - type: str -url: - description: the url from where the plugin is installed from - returned: success - type: str -timeout: - description: the timeout for plugin download - returned: success - type: str -stdout: - description: the command stdout - returned: success - type: str -stderr: - description: the command stderr - returned: success - type: str -state: - description: the state for the managed plugin - returned: success - type: str -''' - -import os -from distutils.version import LooseVersion -from ansible.module_utils.basic import AnsibleModule - - -PACKAGE_STATE_MAP = dict( - present="--install", - absent="--remove" -) - - -def parse_plugin_repo(string): - elements = string.split("/") - - # We first consider the simplest form: pluginname - repo = elements[0] - - # We consider the form: username/pluginname - if len(elements) > 1: - repo = elements[1] - - # remove elasticsearch- prefix - # remove es- prefix - for string in ("elasticsearch-", "es-"): - if repo.startswith(string): - return repo[len(string):] - - return repo - - -def is_plugin_present(plugin_dir, working_dir): - return os.path.isdir(os.path.join(working_dir, plugin_dir)) - - -def parse_error(string): - reason = "reason: " - try: - return string[string.index(reason) + len(reason):].strip() - except ValueError: - return string - - -def install_plugin(module, plugin_bin, plugin_name, url, timeout, allow_root, kibana_version='4.6'): - if LooseVersion(kibana_version) > LooseVersion('4.6'): - kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') - cmd_args = [kibana_plugin_bin, "install"] - if url: - cmd_args.append(url) - else: - cmd_args.append(plugin_name) - else: - cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name] - - if url: - cmd_args.extend(["--url", url]) - - if timeout: - cmd_args.extend(["--timeout", timeout]) - - if allow_root: - cmd_args.append('--allow-root') - - if module.check_mode: - return True, " ".join(cmd_args), "check mode", "" - - rc, out, err = module.run_command(cmd_args) - if rc != 0: - reason = parse_error(out) - module.fail_json(msg=reason) - - return True, " ".join(cmd_args), out, err - - -def remove_plugin(module, plugin_bin, plugin_name, allow_root, kibana_version='4.6'): - if LooseVersion(kibana_version) > LooseVersion('4.6'): - kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') - cmd_args = [kibana_plugin_bin, "remove", plugin_name] - else: - cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name] - - if allow_root: - cmd_args.append('--allow-root') - - if module.check_mode: - return True, " ".join(cmd_args), "check mode", "" - - rc, out, err = module.run_command(cmd_args) - if rc != 0: - reason = parse_error(out) - module.fail_json(msg=reason) - - return True, " ".join(cmd_args), out, err - - -def get_kibana_version(module, plugin_bin, allow_root): - cmd_args = [plugin_bin, '--version'] - - if allow_root: - cmd_args.append('--allow-root') - - rc, out, err = module.run_command(cmd_args) - if rc != 0: - module.fail_json(msg="Failed to get Kibana version : %s" % err) - - return out.strip() - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), - url=dict(default=None), - timeout=dict(default="1m"), - plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"), - plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"), - version=dict(default=None), - force=dict(default=False, type="bool"), - allow_root=dict(default=False, type="bool"), - ), - supports_check_mode=True, - ) - - name = module.params["name"] - state = module.params["state"] - url = module.params["url"] - timeout = module.params["timeout"] - plugin_bin = module.params["plugin_bin"] - plugin_dir = module.params["plugin_dir"] - version = module.params["version"] - force = module.params["force"] - allow_root = module.params["allow_root"] - - changed, cmd, out, err = False, '', '', '' - - kibana_version = get_kibana_version(module, plugin_bin, allow_root) - - present = is_plugin_present(parse_plugin_repo(name), plugin_dir) - - # skip if the state is correct - if (present and state == "present" and not force) or (state == "absent" and not present and not force): - module.exit_json(changed=False, name=name, state=state) - - if version: - name = name + '/' + version - - if state == "present": - if force: - remove_plugin(module, plugin_bin, name, allow_root, kibana_version) - changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, allow_root, kibana_version) - - elif state == "absent": - changed, cmd, out, err = remove_plugin(module, plugin_bin, name, allow_root, kibana_version) - - module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/misc/odbc.py b/plugins/modules/database/misc/odbc.py deleted file mode 100644 index 5d1cdf884b..0000000000 --- a/plugins/modules/database/misc/odbc.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, John Westcott -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: odbc -author: "John Westcott IV (@john-westcott-iv)" -version_added: "1.0.0" -short_description: Execute SQL via ODBC -description: - - Read/Write info via ODBC drivers. -options: - dsn: - description: - - The connection string passed into ODBC. - required: yes - type: str - query: - description: - - The SQL query to perform. - required: yes - type: str - params: - description: - - Parameters to pass to the SQL query. - type: list - elements: str - commit: - description: - - Perform a commit after the execution of the SQL query. - - Some databases allow a commit after a select whereas others raise an exception. - - Default is C(true) to support legacy module behavior. - type: bool - default: yes - version_added: 1.3.0 -requirements: - - "python >= 2.6" - - "pyodbc" - -notes: - - "Like the command module, this module always returns changed = yes whether or not the query would change the database." - - "To alter this behavior you can use C(changed_when): [yes or no]." - - "For details about return values (description and row_count) see U(https://github.com/mkleehammer/pyodbc/wiki/Cursor)." -''' - -EXAMPLES = ''' -- name: Set some values in the test db - community.general.odbc: - dsn: "DRIVER={ODBC Driver 13 for SQL Server};Server=db.ansible.com;Database=my_db;UID=admin;PWD=password;" - query: "Select * from table_a where column1 = ?" - params: - - "value1" - commit: false - changed_when: no -''' - -RETURN = ''' -results: - description: List of lists of strings containing selected rows, likely empty for DDL statements. - returned: success - type: list - elements: list -description: - description: "List of dicts about the columns selected from the cursors, likely empty for DDL statements. See notes." - returned: success - type: list - elements: dict -row_count: - description: "The number of rows selected or modified according to the cursor defaults to -1. See notes." - returned: success - type: str -''' - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -HAS_PYODBC = None -try: - import pyodbc - HAS_PYODBC = True -except ImportError as e: - HAS_PYODBC = False - - -def main(): - module = AnsibleModule( - argument_spec=dict( - dsn=dict(type='str', required=True, no_log=True), - query=dict(type='str', required=True), - params=dict(type='list', elements='str'), - commit=dict(type='bool', default=True), - ), - ) - - dsn = module.params.get('dsn') - query = module.params.get('query') - params = module.params.get('params') - commit = module.params.get('commit') - - if not HAS_PYODBC: - module.fail_json(msg=missing_required_lib('pyodbc')) - - # Try to make a connection with the DSN - connection = None - try: - connection = pyodbc.connect(dsn) - except Exception as e: - module.fail_json(msg='Failed to connect to DSN: {0}'.format(to_native(e))) - - result = dict( - changed=True, - description=[], - row_count=-1, - results=[], - ) - - try: - cursor = connection.cursor() - - if params: - cursor.execute(query, params) - else: - cursor.execute(query) - if commit: - cursor.commit() - try: - # Get the rows out into an 2d array - for row in cursor.fetchall(): - new_row = [] - for column in row: - new_row.append("{0}".format(column)) - result['results'].append(new_row) - - # Return additional information from the cursor - for row_description in cursor.description: - description = {} - description['name'] = row_description[0] - description['type'] = row_description[1].__name__ - description['display_size'] = row_description[2] - description['internal_size'] = row_description[3] - description['precision'] = row_description[4] - description['scale'] = row_description[5] - description['nullable'] = row_description[6] - result['description'].append(description) - - result['row_count'] = cursor.rowcount - except pyodbc.ProgrammingError as pe: - pass - except Exception as e: - module.fail_json(msg="Exception while reading rows: {0}".format(to_native(e))) - - cursor.close() - except Exception as e: - module.fail_json(msg="Failed to execute query: {0}".format(to_native(e))) - finally: - connection.close() - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/misc/redis.py b/plugins/modules/database/misc/redis.py deleted file mode 100644 index 960b072fea..0000000000 --- a/plugins/modules/database/misc/redis.py +++ /dev/null @@ -1,330 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redis -short_description: Various redis commands, replica and flush -description: - - Unified utility to interact with redis instances. -options: - command: - description: - - The selected redis command - - C(config) ensures a configuration setting on an instance. - - C(flush) flushes all the instance or a specified db. - - C(replica) sets a redis instance in replica or master mode. (C(slave) is an alias for C(replica).) - choices: [ config, flush, replica, slave ] - type: str - login_password: - description: - - The password used to authenticate with (usually not used) - type: str - login_host: - description: - - The host running the database - default: localhost - type: str - login_port: - description: - - The port to connect to - default: 6379 - type: int - master_host: - description: - - The host of the master instance [replica command] - type: str - master_port: - description: - - The port of the master instance [replica command] - type: int - replica_mode: - description: - - The mode of the redis instance [replica command] - - C(slave) is an alias for C(replica). - default: replica - choices: [ master, replica, slave ] - type: str - aliases: - - slave_mode - db: - description: - - The database to flush (used in db mode) [flush command] - type: int - flush_mode: - description: - - Type of flush (all the dbs in a redis instance or a specific one) - [flush command] - default: all - choices: [ all, db ] - type: str - name: - description: - - A redis config key. - type: str - value: - description: - - A redis config value. When memory size is needed, it is possible - to specify it in the usal form of 1KB, 2M, 400MB where the base is 1024. - Units are case insensitive i.e. 1m = 1mb = 1M = 1MB. - type: str - -notes: - - Requires the redis-py Python package on the remote host. You can - install it with pip (pip install redis) or with a package manager. - https://github.com/andymccurdy/redis-py - - If the redis master instance we are making replica of is password protected - this needs to be in the redis.conf in the masterauth variable - -seealso: - - module: community.general.redis_info -requirements: [ redis ] -author: "Xabier Larrakoetxea (@slok)" -''' - -EXAMPLES = ''' -- name: Set local redis instance to be a replica of melee.island on port 6377 - community.general.redis: - command: replica - master_host: melee.island - master_port: 6377 - -- name: Deactivate replica mode - community.general.redis: - command: replica - replica_mode: master - -- name: Flush all the redis db - community.general.redis: - command: flush - flush_mode: all - -- name: Flush only one db in a redis instance - community.general.redis: - command: flush - db: 1 - flush_mode: db - -- name: Configure local redis to have 10000 max clients - community.general.redis: - command: config - name: maxclients - value: 10000 - -- name: Configure local redis maxmemory to 4GB - community.general.redis: - command: config - name: maxmemory - value: 4GB - -- name: Configure local redis to have lua time limit of 100 ms - community.general.redis: - command: config - name: lua-time-limit - value: 100 -''' - -import traceback - -REDIS_IMP_ERR = None -try: - import redis -except ImportError: - REDIS_IMP_ERR = traceback.format_exc() - redis_found = False -else: - redis_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.formatters import human_to_bytes -from ansible.module_utils.common.text.converters import to_native -import re - - -# Redis module specific support methods. -def set_replica_mode(client, master_host, master_port): - try: - return client.slaveof(master_host, master_port) - except Exception: - return False - - -def set_master_mode(client): - try: - return client.slaveof() - except Exception: - return False - - -def flush(client, db=None): - try: - if not isinstance(db, int): - return client.flushall() - else: - # The passed client has been connected to the database already - return client.flushdb() - except Exception: - return False - - -# Module execution. -def main(): - module = AnsibleModule( - argument_spec=dict( - command=dict(type='str', choices=['config', 'flush', 'replica', 'slave']), - login_password=dict(type='str', no_log=True), - login_host=dict(type='str', default='localhost'), - login_port=dict(type='int', default=6379), - master_host=dict(type='str'), - master_port=dict(type='int'), - replica_mode=dict(type='str', default='replica', choices=['master', 'replica', 'slave'], aliases=["slave_mode"]), - db=dict(type='int'), - flush_mode=dict(type='str', default='all', choices=['all', 'db']), - name=dict(type='str'), - value=dict(type='str') - ), - supports_check_mode=True, - ) - - if not redis_found: - module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR) - - login_password = module.params['login_password'] - login_host = module.params['login_host'] - login_port = module.params['login_port'] - command = module.params['command'] - if command == "slave": - command = "replica" - - # Replica Command section ----------- - if command == "replica": - master_host = module.params['master_host'] - master_port = module.params['master_port'] - mode = module.params['replica_mode'] - if mode == "slave": - mode = "replica" - - # Check if we have all the data - if mode == "replica": # Only need data if we want to be replica - if not master_host: - module.fail_json(msg='In replica mode master host must be provided') - - if not master_port: - module.fail_json(msg='In replica mode master port must be provided') - - # Connect and check - r = redis.StrictRedis(host=login_host, port=login_port, password=login_password) - try: - r.ping() - except Exception as e: - module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) - - # Check if we are already in the mode that we want - info = r.info() - if mode == "master" and info["role"] == "master": - module.exit_json(changed=False, mode=mode) - - elif mode == "replica" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port: - status = dict( - status=mode, - master_host=master_host, - master_port=master_port, - ) - module.exit_json(changed=False, mode=status) - else: - # Do the stuff - # (Check Check_mode before commands so the commands aren't evaluated - # if not necessary) - if mode == "replica": - if module.check_mode or set_replica_mode(r, master_host, master_port): - info = r.info() - status = { - 'status': mode, - 'master_host': master_host, - 'master_port': master_port, - } - module.exit_json(changed=True, mode=status) - else: - module.fail_json(msg='Unable to set replica mode') - - else: - if module.check_mode or set_master_mode(r): - module.exit_json(changed=True, mode=mode) - else: - module.fail_json(msg='Unable to set master mode') - - # flush Command section ----------- - elif command == "flush": - db = module.params['db'] - mode = module.params['flush_mode'] - - # Check if we have all the data - if mode == "db": - if db is None: - module.fail_json(msg="In db mode the db number must be provided") - - # Connect and check - r = redis.StrictRedis(host=login_host, port=login_port, password=login_password, db=db) - try: - r.ping() - except Exception as e: - module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) - - # Do the stuff - # (Check Check_mode before commands so the commands aren't evaluated - # if not necessary) - if mode == "all": - if module.check_mode or flush(r): - module.exit_json(changed=True, flushed=True) - else: # Flush never fails :) - module.fail_json(msg="Unable to flush all databases") - - else: - if module.check_mode or flush(r, db): - module.exit_json(changed=True, flushed=True, db=db) - else: # Flush never fails :) - module.fail_json(msg="Unable to flush '%d' database" % db) - elif command == 'config': - name = module.params['name'] - - try: # try to parse the value as if it were the memory size - if re.match(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?\s*$', module.params['value'].upper()): - value = str(human_to_bytes(module.params['value'].upper())) - else: - value = module.params['value'] - except ValueError: - value = module.params['value'] - - r = redis.StrictRedis(host=login_host, port=login_port, password=login_password) - - try: - r.ping() - except Exception as e: - module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) - - try: - old_value = r.config_get(name)[name] - except Exception as e: - module.fail_json(msg="unable to read config: %s" % to_native(e), exception=traceback.format_exc()) - changed = old_value != value - - if module.check_mode or not changed: - module.exit_json(changed=changed, name=name, value=value) - else: - try: - r.config_set(name, value) - except Exception as e: - module.fail_json(msg="unable to write config: %s" % to_native(e), exception=traceback.format_exc()) - module.exit_json(changed=changed, name=name, value=value) - else: - module.fail_json(msg='A valid command must be provided') - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/misc/redis_data.py b/plugins/modules/database/misc/redis_data.py deleted file mode 100644 index 88102b98b1..0000000000 --- a/plugins/modules/database/misc/redis_data.py +++ /dev/null @@ -1,249 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redis_data -short_description: Set key value pairs in Redis -version_added: 3.7.0 -description: - - Set key value pairs in Redis database. -author: "Andreas Botzner (@paginabianca)" -options: - key: - description: - - Database key. - required: true - type: str - value: - description: - - Value that key should be set to. - required: false - type: str - expiration: - description: - - Expiration time in milliseconds. - Setting this flag will always result in a change in the database. - required: false - type: int - non_existing: - description: - - Only set key if it does not already exist. - required: false - type: bool - existing: - description: - - Only set key if it already exists. - required: false - type: bool - keep_ttl: - description: - - Retain the time to live associated with the key. - required: false - type: bool - state: - description: - - State of the key. - default: present - type: str - choices: - - present - - absent - -extends_documentation_fragment: - - community.general.redis.documentation - -seealso: - - module: community.general.redis_data_info - - module: community.general.redis -''' - -EXAMPLES = ''' -- name: Set key foo=bar on localhost with no username - community.general.redis_data: - login_host: localhost - login_password: supersecret - key: foo - value: bar - state: present - -- name: Set key foo=bar if non existing with expiration of 30s - community.general.redis_data: - login_host: localhost - login_password: supersecret - key: foo - value: bar - non_existing: true - expiration: 30000 - state: present - -- name: Set key foo=bar if existing and keep current TTL - community.general.redis_data: - login_host: localhost - login_password: supersecret - key: foo - value: bar - existing: true - keep_ttl: true - -- name: Set key foo=bar on redishost with custom ca-cert file - community.general.redis_data: - login_host: redishost - login_password: supersecret - login_user: someuser - validate_certs: true - ssl_ca_certs: /path/to/ca/certs - key: foo - value: bar - -- name: Delete key foo on localhost with no username - community.general.redis_data: - login_host: localhost - login_password: supersecret - key: foo - state: absent -''' - -RETURN = ''' -old_value: - description: Value of key before setting. - returned: on_success if state is C(present) and key exists in database. - type: str - sample: 'old_value_of_key' -value: - description: Value key was set to. - returned: on success if state is C(present). - type: str - sample: 'new_value_of_key' -msg: - description: A short message. - returned: always - type: str - sample: 'Set key: foo to bar' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redis import ( - fail_imports, redis_auth_argument_spec, RedisAnsible) - - -def main(): - redis_auth_args = redis_auth_argument_spec() - module_args = dict( - key=dict(type='str', required=True, no_log=False), - value=dict(type='str', required=False), - expiration=dict(type='int', required=False), - non_existing=dict(type='bool', required=False), - existing=dict(type='bool', required=False), - keep_ttl=dict(type='bool', required=False), - state=dict(type='str', default='present', - choices=['present', 'absent']), - ) - module_args.update(redis_auth_args) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - required_if=[('state', 'present', ('value',))], - mutually_exclusive=[['non_existing', 'existing'], - ['keep_ttl', 'expiration']],) - fail_imports(module) - - redis = RedisAnsible(module) - - key = module.params['key'] - value = module.params['value'] - px = module.params['expiration'] - nx = module.params['non_existing'] - xx = module.params['existing'] - keepttl = module.params['keep_ttl'] - state = module.params['state'] - set_args = {'name': key, 'value': value, 'px': px, - 'nx': nx, 'xx': xx, 'keepttl': keepttl} - - result = {'changed': False} - - old_value = None - try: - old_value = redis.connection.get(key) - except Exception as e: - msg = 'Failed to get value of key: {0} with exception: {1}'.format( - key, str(e)) - result['msg'] = msg - module.fail_json(**result) - - if state == 'absent': - if module.check_mode: - if old_value is None: - msg = 'Key: {0} not present'.format(key) - result['msg'] = msg - module.exit_json(**result) - else: - msg = 'Deleted key: {0}'.format(key) - result['msg'] = msg - module.exit_json(**result) - try: - ret = redis.connection.delete(key) - if ret == 0: - msg = 'Key: {0} not present'.format(key) - result['msg'] = msg - module.exit_json(**result) - else: - msg = 'Deleted key: {0}'.format(key) - result['msg'] = msg - result['changed'] = True - module.exit_json(**result) - except Exception as e: - msg = 'Failed to delete key: {0} with exception: {1}'.format( - key, str(e)) - result['msg'] = msg - module.fail_json(**result) - - old_value = None - try: - old_value = redis.connection.get(key) - except Exception as e: - msg = 'Failed to get value of key: {0} with exception: {1}'.format( - key, str(e)) - result['msg'] = msg - module.fail_json(**result) - - result['old_value'] = old_value - if old_value == value and keepttl is not False and px is None: - msg = 'Key {0} already has desired value'.format(key) - result['msg'] = msg - result['value'] = value - module.exit_json(**result) - if module.check_mode: - result['msg'] = 'Set key: {0}'.format(key) - result['value'] = value - module.exit_json(**result) - try: - ret = redis.connection.set(**set_args) - if ret is None: - if nx: - msg = 'Could not set key: {0}. Key already present.'.format( - key) - else: - msg = 'Could not set key: {0}. Key not present.'.format(key) - result['msg'] = msg - module.fail_json(**result) - msg = 'Set key: {0}'.format(key) - result['msg'] = msg - result['changed'] = True - result['value'] = value - module.exit_json(**result) - except Exception as e: - msg = 'Failed to set key: {0} with exception: {2}'.format(key, str(e)) - result['msg'] = msg - module.fail_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/misc/redis_data_incr.py b/plugins/modules/database/misc/redis_data_incr.py deleted file mode 100644 index 008cd183e9..0000000000 --- a/plugins/modules/database/misc/redis_data_incr.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redis_data_incr -short_description: Increment keys in Redis -version_added: 4.0.0 -description: - - Increment integers or float keys in Redis database and get new value. - - Default increment for all keys is 1. For specific increments use the - I(increment_int) and I(increment_float) options. - - When using I(check_mode) the module will try to calculate the value that - Redis would return. If the key is not present, 0.0 is used as value. -author: "Andreas Botzner (@paginabianca)" -options: - key: - description: - - Database key. - type: str - required: true - increment_int: - description: - - Integer amount to increment the key by. - required: false - type: int - increment_float: - description: - - Float amount to increment the key by. - - This only works with keys that contain float values - in their string representation. - type: float - required: false - - -extends_documentation_fragment: - - community.general.redis.documentation - -notes: - - For C(check_mode) to work, the specified I(redis_user) needs permission to - run the C(GET) command on the key, otherwise the module will fail. - -seealso: - - module: community.general.redis_set - - module: community.general.redis_data_info - - module: community.general.redis -''' - -EXAMPLES = ''' -- name: Increment integer key foo on localhost with no username and print new value - community.general.redis_data_incr: - login_host: localhost - login_password: supersecret - key: foo - increment_int: 1 - register: result -- name: Print new value - debug: - var: result.value - -- name: Increment float key foo by 20.4 - community.general.redis_data_incr: - login_host: redishost - login_user: redisuser - login_password: somepass - key: foo - increment_float: '20.4' -''' - -RETURN = ''' -value: - description: Incremented value of key - returned: on success - type: float - sample: '4039.4' -msg: - description: A short message. - returned: always - type: str - sample: 'Incremented key: foo by 20.4 to 65.9' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redis import ( - fail_imports, redis_auth_argument_spec, RedisAnsible) - - -def main(): - redis_auth_args = redis_auth_argument_spec() - module_args = dict( - key=dict(type='str', required=True, no_log=False), - increment_int=dict(type='int', required=False), - increment_float=dict(type='float', required=False), - ) - module_args.update(redis_auth_args) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - mutually_exclusive=[['increment_int', 'increment_float']], - ) - fail_imports(module) - - redis = RedisAnsible(module) - key = module.params['key'] - increment_float = module.params['increment_float'] - increment_int = module.params['increment_int'] - increment = 1 - if increment_float is not None: - increment = increment_float - elif increment_int is not None: - increment = increment_int - - result = {'changed': False} - if module.check_mode: - value = 0.0 - try: - res = redis.connection.get(key) - if res is not None: - value = float(res) - except ValueError as e: - msg = 'Value: {0} of key: {1} is not incrementable(int or float)'.format( - res, key) - result['msg'] = msg - module.fail_json(**result) - except Exception as e: - msg = 'Failed to get value of key: {0} with exception: {1}'.format( - key, str(e)) - result['msg'] = msg - module.fail_json(**result) - msg = 'Incremented key: {0} by {1} to {2}'.format( - key, increment, value + increment) - result['msg'] = msg - result['value'] = float(value + increment) - module.exit_json(**result) - - if increment_float is not None: - try: - value = redis.connection.incrbyfloat(key, increment) - msg = 'Incremented key: {0} by {1} to {2}'.format( - key, increment, value) - result['msg'] = msg - result['value'] = float(value) - result['changed'] = True - module.exit_json(**result) - except Exception as e: - msg = 'Failed to increment key: {0} by {1} with exception: {2}'.format( - key, increment, str(e)) - result['msg'] = msg - module.fail_json(**result) - elif increment_int is not None: - try: - value = redis.connection.incrby(key, increment) - msg = 'Incremented key: {0} by {1} to {2}'.format( - key, increment, value) - result['msg'] = msg - result['value'] = float(value) - result['changed'] = True - module.exit_json(**result) - except Exception as e: - msg = 'Failed to increment key: {0} by {1} with exception: {2}'.format( - key, increment, str(e)) - result['msg'] = msg - module.fail_json(**result) - else: - try: - value = redis.connection.incr(key) - msg = 'Incremented key: {0} to {1}'.format(key, value) - result['msg'] = msg - result['value'] = float(value) - result['changed'] = True - module.exit_json(**result) - except Exception as e: - msg = 'Failed to increment key: {0} with exception: {1}'.format( - key, str(e)) - result['msg'] = msg - module.fail_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/misc/redis_data_info.py b/plugins/modules/database/misc/redis_data_info.py deleted file mode 100644 index 866bda62d1..0000000000 --- a/plugins/modules/database/misc/redis_data_info.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redis_data_info -short_description: Get value of key in Redis database -version_added: 3.7.0 -description: - - Get value of keys in Redis database. -author: "Andreas Botzner (@paginabianca)" -options: - key: - description: - - Database key. - type: str - required: true - -extends_documentation_fragment: - - community.general.redis - -seealso: - - module: community.general.redis_info - - module: community.general.redis -''' - -EXAMPLES = ''' -- name: Get key foo=bar from loalhost with no username - community.general.redis_data_info: - login_host: localhost - login_password: supersecret - key: foo - -- name: Get key foo=bar on redishost with custom ca-cert file - community.general.redis_data_info: - login_host: redishost - login_password: supersecret - login_user: somuser - validate_certs: true - ssl_ca_certs: /path/to/ca/certs - key: foo -''' - -RETURN = ''' -exists: - description: If they key exists in the database. - returned: on success - type: bool -value: - description: Value key was set to. - returned: if existing - type: str - sample: 'value_of_some_key' -msg: - description: A short message. - returned: always - type: str - sample: 'Got key: foo with value: bar' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redis import ( - fail_imports, redis_auth_argument_spec, RedisAnsible) - - -def main(): - redis_auth_args = redis_auth_argument_spec() - module_args = dict( - key=dict(type='str', required=True, no_log=False), - ) - module_args.update(redis_auth_args) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - ) - fail_imports(module) - - redis = RedisAnsible(module) - - key = module.params['key'] - result = {'changed': False} - - value = None - try: - value = redis.connection.get(key) - except Exception as e: - msg = 'Failed to get value of key "{0}" with exception: {1}'.format( - key, str(e)) - result['msg'] = msg - module.fail_json(**result) - - if value is None: - msg = 'Key "{0}" does not exist in database'.format(key) - result['exists'] = False - else: - msg = 'Got key "{0}"'.format(key) - result['value'] = value - result['exists'] = True - result['msg'] = msg - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/misc/redis_info.py b/plugins/modules/database/misc/redis_info.py deleted file mode 100644 index 9762b03c98..0000000000 --- a/plugins/modules/database/misc/redis_info.py +++ /dev/null @@ -1,236 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Pavlo Bashynskyi (@levonet) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: redis_info -short_description: Gather information about Redis servers -version_added: '0.2.0' -description: -- Gathers information and statistics about Redis servers. -options: - login_host: - description: - - The host running the database. - type: str - default: localhost - login_port: - description: - - The port to connect to. - type: int - default: 6379 - login_password: - description: - - The password used to authenticate with, when authentication is enabled for the Redis server. - type: str -notes: -- Requires the redis-py Python package on the remote host. You can - install it with pip (C(pip install redis)) or with a package manager. - U(https://github.com/andymccurdy/redis-py) -seealso: -- module: community.general.redis -requirements: [ redis ] -author: "Pavlo Bashynskyi (@levonet)" -''' - -EXAMPLES = r''' -- name: Get server information - community.general.redis_info: - register: result - -- name: Print server information - ansible.builtin.debug: - var: result.info -''' - -RETURN = r''' -info: - description: The default set of server information sections U(https://redis.io/commands/info). - returned: success - type: dict - sample: { - "active_defrag_hits": 0, - "active_defrag_key_hits": 0, - "active_defrag_key_misses": 0, - "active_defrag_misses": 0, - "active_defrag_running": 0, - "allocator_active": 932409344, - "allocator_allocated": 932062792, - "allocator_frag_bytes": 346552, - "allocator_frag_ratio": 1.0, - "allocator_resident": 947253248, - "allocator_rss_bytes": 14843904, - "allocator_rss_ratio": 1.02, - "aof_current_rewrite_time_sec": -1, - "aof_enabled": 0, - "aof_last_bgrewrite_status": "ok", - "aof_last_cow_size": 0, - "aof_last_rewrite_time_sec": -1, - "aof_last_write_status": "ok", - "aof_rewrite_in_progress": 0, - "aof_rewrite_scheduled": 0, - "arch_bits": 64, - "atomicvar_api": "atomic-builtin", - "blocked_clients": 0, - "client_recent_max_input_buffer": 4, - "client_recent_max_output_buffer": 0, - "cluster_enabled": 0, - "config_file": "", - "configured_hz": 10, - "connected_clients": 4, - "connected_slaves": 0, - "db0": { - "avg_ttl": 1945628530, - "expires": 16, - "keys": 3341411 - }, - "evicted_keys": 0, - "executable": "/data/redis-server", - "expired_keys": 9, - "expired_stale_perc": 1.72, - "expired_time_cap_reached_count": 0, - "gcc_version": "9.2.0", - "hz": 10, - "instantaneous_input_kbps": 0.0, - "instantaneous_ops_per_sec": 0, - "instantaneous_output_kbps": 0.0, - "keyspace_hits": 0, - "keyspace_misses": 0, - "latest_fork_usec": 0, - "lazyfree_pending_objects": 0, - "loading": 0, - "lru_clock": 11603632, - "master_repl_offset": 118831417, - "master_replid": "0d904704e424e38c3cd896783e9f9d28d4836e5e", - "master_replid2": "0000000000000000000000000000000000000000", - "maxmemory": 0, - "maxmemory_human": "0B", - "maxmemory_policy": "noeviction", - "mem_allocator": "jemalloc-5.1.0", - "mem_aof_buffer": 0, - "mem_clients_normal": 49694, - "mem_clients_slaves": 0, - "mem_fragmentation_bytes": 12355480, - "mem_fragmentation_ratio": 1.01, - "mem_not_counted_for_evict": 0, - "mem_replication_backlog": 1048576, - "migrate_cached_sockets": 0, - "multiplexing_api": "epoll", - "number_of_cached_scripts": 0, - "os": "Linux 3.10.0-862.14.4.el7.x86_64 x86_64", - "process_id": 1, - "pubsub_channels": 0, - "pubsub_patterns": 0, - "rdb_bgsave_in_progress": 0, - "rdb_changes_since_last_save": 671, - "rdb_current_bgsave_time_sec": -1, - "rdb_last_bgsave_status": "ok", - "rdb_last_bgsave_time_sec": -1, - "rdb_last_cow_size": 0, - "rdb_last_save_time": 1588702236, - "redis_build_id": "a31260535f820267", - "redis_git_dirty": 0, - "redis_git_sha1": 0, - "redis_mode": "standalone", - "redis_version": "999.999.999", - "rejected_connections": 0, - "repl_backlog_active": 1, - "repl_backlog_first_byte_offset": 118707937, - "repl_backlog_histlen": 123481, - "repl_backlog_size": 1048576, - "role": "master", - "rss_overhead_bytes": -3051520, - "rss_overhead_ratio": 1.0, - "run_id": "8d252f66c3ef89bd60a060cf8dc5cfe3d511c5e4", - "second_repl_offset": 118830003, - "slave_expires_tracked_keys": 0, - "sync_full": 0, - "sync_partial_err": 0, - "sync_partial_ok": 0, - "tcp_port": 6379, - "total_commands_processed": 885, - "total_connections_received": 10, - "total_net_input_bytes": 802709255, - "total_net_output_bytes": 31754, - "total_system_memory": 135029538816, - "total_system_memory_human": "125.76G", - "uptime_in_days": 53, - "uptime_in_seconds": 4631778, - "used_cpu_sys": 4.668282, - "used_cpu_sys_children": 0.002191, - "used_cpu_user": 4.21088, - "used_cpu_user_children": 0.0, - "used_memory": 931908760, - "used_memory_dataset": 910774306, - "used_memory_dataset_perc": "97.82%", - "used_memory_human": "888.74M", - "used_memory_lua": 37888, - "used_memory_lua_human": "37.00K", - "used_memory_overhead": 21134454, - "used_memory_peak": 932015216, - "used_memory_peak_human": "888.84M", - "used_memory_peak_perc": "99.99%", - "used_memory_rss": 944201728, - "used_memory_rss_human": "900.46M", - "used_memory_scripts": 0, - "used_memory_scripts_human": "0B", - "used_memory_startup": 791264 - } -''' - -import traceback - -REDIS_IMP_ERR = None -try: - from redis import StrictRedis - HAS_REDIS_PACKAGE = True -except ImportError: - REDIS_IMP_ERR = traceback.format_exc() - HAS_REDIS_PACKAGE = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def redis_client(**client_params): - return StrictRedis(**client_params) - - -# Module execution. -def main(): - module = AnsibleModule( - argument_spec=dict( - login_host=dict(type='str', default='localhost'), - login_port=dict(type='int', default=6379), - login_password=dict(type='str', no_log=True), - ), - supports_check_mode=True, - ) - - if not HAS_REDIS_PACKAGE: - module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR) - - login_host = module.params['login_host'] - login_port = module.params['login_port'] - login_password = module.params['login_password'] - - # Connect and check - client = redis_client(host=login_host, port=login_port, password=login_password) - try: - client.ping() - except Exception as e: - module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) - - info = client.info() - module.exit_json(changed=False, info=info) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/misc/riak.py b/plugins/modules/database/misc/riak.py deleted file mode 100644 index 4ee7b5b674..0000000000 --- a/plugins/modules/database/misc/riak.py +++ /dev/null @@ -1,230 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, James Martin , Drew Kerrigan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: riak -short_description: This module handles some common Riak operations -description: - - This module can be used to join nodes to a cluster, check - the status of the cluster. -author: - - "James Martin (@jsmartin)" - - "Drew Kerrigan (@drewkerrigan)" -options: - command: - description: - - The command you would like to perform against the cluster. - choices: ['ping', 'kv_test', 'join', 'plan', 'commit'] - type: str - config_dir: - description: - - The path to the riak configuration directory - default: /etc/riak - type: path - http_conn: - description: - - The ip address and port that is listening for Riak HTTP queries - default: 127.0.0.1:8098 - type: str - target_node: - description: - - The target node for certain operations (join, ping) - default: riak@127.0.0.1 - type: str - wait_for_handoffs: - description: - - Number of seconds to wait for handoffs to complete. - type: int - default: 0 - wait_for_ring: - description: - - Number of seconds to wait for all nodes to agree on the ring. - type: int - default: 0 - wait_for_service: - description: - - Waits for a riak service to come online before continuing. - choices: ['kv'] - type: str - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' -''' - -EXAMPLES = ''' -- name: "Join's a Riak node to another node" - community.general.riak: - command: join - target_node: riak@10.1.1.1 - -- name: Wait for handoffs to finish. Use with async and poll. - community.general.riak: - wait_for_handoffs: yes - -- name: Wait for riak_kv service to startup - community.general.riak: - wait_for_service: kv -''' - -import json -import time - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def ring_check(module, riak_admin_bin): - cmd = '%s ringready' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0 and 'TRUE All nodes agree on the ring' in out: - return True - else: - return False - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - command=dict(required=False, default=None, choices=[ - 'ping', 'kv_test', 'join', 'plan', 'commit']), - config_dir=dict(default='/etc/riak', type='path'), - http_conn=dict(required=False, default='127.0.0.1:8098'), - target_node=dict(default='riak@127.0.0.1', required=False), - wait_for_handoffs=dict(default=0, type='int'), - wait_for_ring=dict(default=0, type='int'), - wait_for_service=dict( - required=False, default=None, choices=['kv']), - validate_certs=dict(default=True, type='bool')) - ) - - command = module.params.get('command') - http_conn = module.params.get('http_conn') - target_node = module.params.get('target_node') - wait_for_handoffs = module.params.get('wait_for_handoffs') - wait_for_ring = module.params.get('wait_for_ring') - wait_for_service = module.params.get('wait_for_service') - - # make sure riak commands are on the path - riak_bin = module.get_bin_path('riak') - riak_admin_bin = module.get_bin_path('riak-admin') - - timeout = time.time() + 120 - while True: - if time.time() > timeout: - module.fail_json(msg='Timeout, could not fetch Riak stats.') - (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5) - if info['status'] == 200: - stats_raw = response.read() - break - time.sleep(5) - - # here we attempt to load those stats, - try: - stats = json.loads(stats_raw) - except Exception: - module.fail_json(msg='Could not parse Riak stats.') - - node_name = stats['nodename'] - nodes = stats['ring_members'] - ring_size = stats['ring_creation_size'] - rc, out, err = module.run_command([riak_bin, 'version']) - version = out.strip() - - result = dict(node_name=node_name, - nodes=nodes, - ring_size=ring_size, - version=version) - - if command == 'ping': - cmd = '%s ping %s' % (riak_bin, target_node) - rc, out, err = module.run_command(cmd) - if rc == 0: - result['ping'] = out - else: - module.fail_json(msg=out) - - elif command == 'kv_test': - cmd = '%s test' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0: - result['kv_test'] = out - else: - module.fail_json(msg=out) - - elif command == 'join': - if nodes.count(node_name) == 1 and len(nodes) > 1: - result['join'] = 'Node is already in cluster or staged to be in cluster.' - else: - cmd = '%s cluster join %s' % (riak_admin_bin, target_node) - rc, out, err = module.run_command(cmd) - if rc == 0: - result['join'] = out - result['changed'] = True - else: - module.fail_json(msg=out) - - elif command == 'plan': - cmd = '%s cluster plan' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0: - result['plan'] = out - if 'Staged Changes' in out: - result['changed'] = True - else: - module.fail_json(msg=out) - - elif command == 'commit': - cmd = '%s cluster commit' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0: - result['commit'] = out - result['changed'] = True - else: - module.fail_json(msg=out) - -# this could take a while, recommend to run in async mode - if wait_for_handoffs: - timeout = time.time() + wait_for_handoffs - while True: - cmd = '%s transfers' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if 'No transfers active' in out: - result['handoffs'] = 'No transfers active.' - break - time.sleep(10) - if time.time() > timeout: - module.fail_json(msg='Timeout waiting for handoffs.') - - if wait_for_service: - cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name] - rc, out, err = module.run_command(cmd) - result['service'] = out - - if wait_for_ring: - timeout = time.time() + wait_for_ring - while True: - if ring_check(module, riak_admin_bin): - break - time.sleep(10) - if time.time() > timeout: - module.fail_json(msg='Timeout waiting for nodes to agree on ring.') - - result['ring_ready'] = ring_check(module, riak_admin_bin) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/mssql/mssql_db.py b/plugins/modules/database/mssql/mssql_db.py deleted file mode 100644 index e6c5f183fa..0000000000 --- a/plugins/modules/database/mssql/mssql_db.py +++ /dev/null @@ -1,233 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Vedit Firat Arig -# Outline and parts are reused from Mark Theunissen's mysql_db module -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: mssql_db -short_description: Add or remove MSSQL databases from a remote host. -description: - - Add or remove MSSQL databases from a remote host. -options: - name: - description: - - name of the database to add or remove - required: true - aliases: [ db ] - type: str - login_user: - description: - - The username used to authenticate with - type: str - login_password: - description: - - The password used to authenticate with - type: str - login_host: - description: - - Host running the database - type: str - required: true - login_port: - description: - - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used - default: '1433' - type: str - state: - description: - - The database state - default: present - choices: [ "present", "absent", "import" ] - type: str - target: - description: - - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL - files (C(.sql)) files are supported. - type: str - autocommit: - description: - - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed - within a transaction. - type: bool - default: 'no' -notes: - - Requires the pymssql Python package on the remote host. For Ubuntu, this - is as easy as pip install pymssql (See M(ansible.builtin.pip).) -requirements: - - python >= 2.7 - - pymssql -author: Vedit Firat Arig (@vedit) -''' - -EXAMPLES = ''' -- name: Create a new database with name 'jackdata' - community.general.mssql_db: - name: jackdata - state: present - -# Copy database dump file to remote host and restore it to database 'my_db' -- name: Copy database dump file to remote host - ansible.builtin.copy: - src: dump.sql - dest: /tmp - -- name: Restore the dump file to database 'my_db' - community.general.mssql_db: - name: my_db - state: import - target: /tmp/dump.sql -''' - -RETURN = ''' -# -''' - -import os -import traceback - -PYMSSQL_IMP_ERR = None -try: - import pymssql -except ImportError: - PYMSSQL_IMP_ERR = traceback.format_exc() - mssql_found = False -else: - mssql_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -def db_exists(conn, cursor, db): - cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db) - conn.commit() - return bool(cursor.rowcount) - - -def db_create(conn, cursor, db): - cursor.execute("CREATE DATABASE [%s]" % db) - return db_exists(conn, cursor, db) - - -def db_delete(conn, cursor, db): - try: - cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db) - except Exception: - pass - cursor.execute("DROP DATABASE [%s]" % db) - return not db_exists(conn, cursor, db) - - -def db_import(conn, cursor, module, db, target): - if os.path.isfile(target): - with open(target, 'r') as backup: - sqlQuery = "USE [%s]\n" % db - for line in backup: - if line is None: - break - elif line.startswith('GO'): - cursor.execute(sqlQuery) - sqlQuery = "USE [%s]\n" % db - else: - sqlQuery += line - cursor.execute(sqlQuery) - conn.commit() - return 0, "import successful", "" - else: - return 1, "cannot find target file", "cannot find target file" - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True, aliases=['db']), - login_user=dict(default=''), - login_password=dict(default='', no_log=True), - login_host=dict(required=True), - login_port=dict(default='1433'), - target=dict(default=None), - autocommit=dict(type='bool', default=False), - state=dict( - default='present', choices=['present', 'absent', 'import']) - ) - ) - - if not mssql_found: - module.fail_json(msg=missing_required_lib('pymssql'), exception=PYMSSQL_IMP_ERR) - - db = module.params['name'] - state = module.params['state'] - autocommit = module.params['autocommit'] - target = module.params["target"] - - login_user = module.params['login_user'] - login_password = module.params['login_password'] - login_host = module.params['login_host'] - login_port = module.params['login_port'] - - login_querystring = login_host - if login_port != "1433": - login_querystring = "%s:%s" % (login_host, login_port) - - if login_user != "" and login_password == "": - module.fail_json(msg="when supplying login_user arguments login_password must be provided") - - try: - conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master') - cursor = conn.cursor() - except Exception as e: - if "Unknown database" in str(e): - errno, errstr = e.args - module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) - else: - module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your " - "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf") - - conn.autocommit(True) - changed = False - - if db_exists(conn, cursor, db): - if state == "absent": - try: - changed = db_delete(conn, cursor, db) - except Exception as e: - module.fail_json(msg="error deleting database: " + str(e)) - elif state == "import": - conn.autocommit(autocommit) - rc, stdout, stderr = db_import(conn, cursor, module, db, target) - - if rc != 0: - module.fail_json(msg="%s" % stderr) - else: - module.exit_json(changed=True, db=db, msg=stdout) - else: - if state == "present": - try: - changed = db_create(conn, cursor, db) - except Exception as e: - module.fail_json(msg="error creating database: " + str(e)) - elif state == "import": - try: - changed = db_create(conn, cursor, db) - except Exception as e: - module.fail_json(msg="error creating database: " + str(e)) - - conn.autocommit(autocommit) - rc, stdout, stderr = db_import(conn, cursor, module, db, target) - - if rc != 0: - module.fail_json(msg="%s" % stderr) - else: - module.exit_json(changed=True, db=db, msg=stdout) - - module.exit_json(changed=changed, db=db) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/mssql/mssql_script.py b/plugins/modules/database/mssql/mssql_script.py deleted file mode 100644 index bb80607ccf..0000000000 --- a/plugins/modules/database/mssql/mssql_script.py +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python - -# Copyright: (c) 2021, Kris Budde = 2.7 - - pymssql - -author: - - Kris Budde (@kbudde) -''' - -EXAMPLES = r''' -- name: Check DB connection - community.general.mssql_script: - login_user: "{{ mssql_login_user }}" - login_password: "{{ mssql_login_password }}" - login_host: "{{ mssql_host }}" - login_port: "{{ mssql_port }}" - db: master - script: "SELECT 1" - -- name: Query with parameter - community.general.mssql_script: - login_user: "{{ mssql_login_user }}" - login_password: "{{ mssql_login_password }}" - login_host: "{{ mssql_host }}" - login_port: "{{ mssql_port }}" - script: | - SELECT name, state_desc FROM sys.databases WHERE name = %(dbname)s - params: - dbname: msdb - register: result_params -- assert: - that: - - result_params.query_results[0][0][0][0] == 'msdb' - - result_params.query_results[0][0][0][1] == 'ONLINE' - -- name: two batches with default output - community.general.mssql_script: - login_user: "{{ mssql_login_user }}" - login_password: "{{ mssql_login_password }}" - login_host: "{{ mssql_host }}" - login_port: "{{ mssql_port }}" - script: | - SELECT 'Batch 0 - Select 0' - SELECT 'Batch 0 - Select 1' - GO - SELECT 'Batch 1 - Select 0' - register: result_batches -- assert: - that: - - result_batches.query_results | length == 2 # two batch results - - result_batches.query_results[0] | length == 2 # two selects in first batch - - result_batches.query_results[0][0] | length == 1 # one row in first select - - result_batches.query_results[0][0][0] | length == 1 # one column in first row - - result_batches.query_results[0][0][0][0] == 'Batch 0 - Select 0' # each row contains a list of values. - -- name: two batches with dict output - community.general.mssql_script: - login_user: "{{ mssql_login_user }}" - login_password: "{{ mssql_login_password }}" - login_host: "{{ mssql_host }}" - login_port: "{{ mssql_port }}" - output: dict - script: | - SELECT 'Batch 0 - Select 0' as b0s0 - SELECT 'Batch 0 - Select 1' as b0s1 - GO - SELECT 'Batch 1 - Select 0' as b1s0 - register: result_batches_dict -- assert: - that: - - result_batches_dict.query_results_dict | length == 2 # two batch results - - result_batches_dict.query_results_dict[0] | length == 2 # two selects in first batch - - result_batches_dict.query_results_dict[0][0] | length == 1 # one row in first select - - result_batches_dict.query_results_dict[0][0][0]['b0s0'] == 'Batch 0 - Select 0' # column 'b0s0' of first row -''' - -RETURN = r''' -query_results: - description: List of batches (queries separated by C(GO) keyword). - type: list - elements: list - returned: success and I(output=default) - sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]] - contains: - queries: - description: - - List of result sets of each query. - - If a query returns no results, the results of this and all the following queries will not be included in the output. - - Use the C(GO) keyword in I(script) to separate queries. - type: list - elements: list - contains: - rows: - description: List of rows returned by query. - type: list - elements: list - contains: - column_value: - description: - - List of column values. - - Any non-standard JSON type is converted to string. - type: list - example: ["Batch 0 - Select 0"] - returned: success, if output is default -query_results_dict: - description: List of batches (queries separated by C(GO) keyword). - type: list - elements: list - returned: success and I(output=dict) - sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]] - contains: - queries: - description: - - List of result sets of each query. - - If a query returns no results, the results of this and all the following queries will not be included in the output. - Use 'GO' keyword to separate queries. - type: list - elements: list - contains: - rows: - description: List of rows returned by query. - type: list - elements: list - contains: - column_dict: - description: - - Dictionary of column names and values. - - Any non-standard JSON type is converted to string. - type: dict - example: {"col_name": "Batch 0 - Select 0"} - returned: success, if output is dict -''' - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -import traceback -import json -PYMSSQL_IMP_ERR = None -try: - import pymssql -except ImportError: - PYMSSQL_IMP_ERR = traceback.format_exc() - MSSQL_FOUND = False -else: - MSSQL_FOUND = True - - -def clean_output(o): - return str(o) - - -def run_module(): - module_args = dict( - name=dict(required=False, aliases=['db'], default=''), - login_user=dict(), - login_password=dict(no_log=True), - login_host=dict(required=True), - login_port=dict(type='int', default=1433), - script=dict(required=True), - output=dict(default='default', choices=['dict', 'default']), - params=dict(type='dict'), - ) - - result = dict( - changed=False, - ) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) - if not MSSQL_FOUND: - module.fail_json(msg=missing_required_lib( - 'pymssql'), exception=PYMSSQL_IMP_ERR) - - db = module.params['name'] - login_user = module.params['login_user'] - login_password = module.params['login_password'] - login_host = module.params['login_host'] - login_port = module.params['login_port'] - script = module.params['script'] - output = module.params['output'] - sql_params = module.params['params'] - - login_querystring = login_host - if login_port != 1433: - login_querystring = "%s:%s" % (login_host, login_port) - - if login_user is not None and login_password is None: - module.fail_json( - msg="when supplying login_user argument, login_password must also be provided") - - try: - conn = pymssql.connect( - user=login_user, password=login_password, host=login_querystring, database=db) - cursor = conn.cursor() - except Exception as e: - if "Unknown database" in str(e): - errno, errstr = e.args - module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) - else: - module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your " - "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf") - - conn.autocommit(True) - - query_results_key = 'query_results' - if output == 'dict': - cursor = conn.cursor(as_dict=True) - query_results_key = 'query_results_dict' - - queries = script.split('\nGO\n') - result['changed'] = True - if module.check_mode: - module.exit_json(**result) - - query_results = [] - try: - for query in queries: - cursor.execute(query, sql_params) - qry_result = [] - rows = cursor.fetchall() - while rows: - qry_result.append(rows) - rows = cursor.fetchall() - query_results.append(qry_result) - except Exception as e: - return module.fail_json(msg="query failed", query=query, error=str(e), **result) - - # ensure that the result is json serializable - qry_results = json.loads(json.dumps(query_results, default=clean_output)) - - result[query_results_key] = qry_results - module.exit_json(**result) - - -def main(): - run_module() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/saphana/hana_query.py b/plugins/modules/database/saphana/hana_query.py deleted file mode 100644 index ac026d5adc..0000000000 --- a/plugins/modules/database/saphana/hana_query.py +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Rainer Leber -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: hana_query -short_description: Execute SQL on HANA -version_added: 3.2.0 -description: This module executes SQL statements on HANA with hdbsql. -options: - sid: - description: The system ID. - type: str - required: true - instance: - description: The instance number. - type: str - required: true - user: - description: A dedicated username. The user could be also in hdbuserstore. Defaults to C(SYSTEM). - type: str - default: SYSTEM - userstore: - description: If C(true) the user must be in hdbuserstore. - type: bool - default: false - version_added: 3.5.0 - password: - description: - - The password to connect to the database. - - "B(Note:) Since the passwords have to be passed as command line arguments, I(userstore=true) should - be used whenever possible, as command line arguments can be seen by other users - on the same machine." - type: str - autocommit: - description: Autocommit the statement. - type: bool - default: true - host: - description: The Host IP address. The port can be defined as well. - type: str - database: - description: Define the database on which to connect. - type: str - encrypted: - description: Use encrypted connection. Defaults to C(false). - type: bool - default: false - filepath: - description: - - One or more files each containing one SQL query to run. - - Must be a string or list containing strings. - type: list - elements: path - query: - description: - - SQL query to run. - - Must be a string or list containing strings. Please note that if you supply a string, it will be split by commas (C(,)) to a list. - It is better to supply a one-element list instead to avoid mangled input. - type: list - elements: str -notes: - - Does not support C(check_mode). -author: - - Rainer Leber (@rainerleber) -''' - -EXAMPLES = r''' -- name: Simple select query - community.general.hana_query: - sid: "hdb" - instance: "01" - password: "Test123" - query: "select user_name from users" - -- name: Run several queries - community.general.hana_query: - sid: "hdb" - instance: "01" - password: "Test123" - query: - - "select user_name from users;" - - select * from SYSTEM; - host: "localhost" - autocommit: False - -- name: Run several queries from file - community.general.hana_query: - sid: "hdb" - instance: "01" - password: "Test123" - filepath: - - /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt - - /tmp/HANA.txt - host: "localhost" - -- name: Run several queries from user store - community.general.hana_query: - sid: "hdb" - instance: "01" - user: hdbstoreuser - userstore: true - query: - - "select user_name from users;" - - select * from users; - autocommit: False -''' - -RETURN = r''' -query_result: - description: List containing results of all queries executed (one sublist for every query). - returned: on success - type: list - elements: list - sample: [[{"Column": "Value1"}, {"Column": "Value2"}], [{"Column": "Value1"}, {"Column": "Value2"}]] -''' - -import csv -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import StringIO -from ansible.module_utils.common.text.converters import to_native - - -def csv_to_list(rawcsv): - reader_raw = csv.DictReader(StringIO(rawcsv)) - reader = [dict((k, v.strip()) for k, v in row.items()) for row in reader_raw] - return list(reader) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - sid=dict(type='str', required=True), - instance=dict(type='str', required=True), - encrypted=dict(type='bool', default=False), - host=dict(type='str', required=False), - user=dict(type='str', default="SYSTEM"), - userstore=dict(type='bool', default=False), - password=dict(type='str', no_log=True), - database=dict(type='str', required=False), - query=dict(type='list', elements='str', required=False), - filepath=dict(type='list', elements='path', required=False), - autocommit=dict(type='bool', default=True), - ), - required_one_of=[('query', 'filepath')], - required_if=[('userstore', False, ['password'])], - supports_check_mode=False, - ) - rc, out, err, out_raw = [0, [], "", ""] - - params = module.params - - sid = (params['sid']).upper() - instance = params['instance'] - user = params['user'] - userstore = params['userstore'] - password = params['password'] - autocommit = params['autocommit'] - host = params['host'] - database = params['database'] - encrypted = params['encrypted'] - - filepath = params['filepath'] - query = params['query'] - - bin_path = "/usr/sap/{sid}/HDB{instance}/exe/hdbsql".format(sid=sid, instance=instance) - - try: - command = [module.get_bin_path(bin_path, required=True)] - except Exception as e: - module.fail_json(msg='Failed to find hdbsql at the expected path "{0}". Please check SID and instance number: "{1}"'.format(bin_path, to_native(e))) - - if encrypted is True: - command.extend(['-attemptencrypt']) - if autocommit is False: - command.extend(['-z']) - if host is not None: - command.extend(['-n', host]) - if database is not None: - command.extend(['-d', database]) - # -x Suppresses additional output, such as the number of selected rows in a result set. - if userstore: - command.extend(['-x', '-U', user]) - else: - command.extend(['-x', '-i', instance, '-u', user, '-p', password]) - - if filepath is not None: - command.extend(['-I']) - for p in filepath: - # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# -I /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt, - # iterates through files and append the output to var out. - query_command = command + [p] - (rc, out_raw, err) = module.run_command(query_command) - out.append(csv_to_list(out_raw)) - if query is not None: - for q in query: - # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# "select user_name from users", - # iterates through multiple commands and append the output to var out. - query_command = command + [q] - (rc, out_raw, err) = module.run_command(query_command) - out.append(csv_to_list(out_raw)) - changed = True - - module.exit_json(changed=changed, rc=rc, query_result=out, stderr=err) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/vertica/vertica_configuration.py b/plugins/modules/database/vertica/vertica_configuration.py deleted file mode 100644 index b210e3f6f0..0000000000 --- a/plugins/modules/database/vertica/vertica_configuration.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: vertica_configuration -short_description: Updates Vertica configuration parameters. -description: - - Updates Vertica configuration parameters. -options: - parameter: - description: - - Name of the parameter to update. - required: true - aliases: [name] - type: str - value: - description: - - Value of the parameter to be set. - type: str - db: - description: - - Name of the Vertica database. - type: str - cluster: - description: - - Name of the Vertica cluster. - default: localhost - type: str - port: - description: - - Vertica cluster port to connect to. - default: '5433' - type: str - login_user: - description: - - The username used to authenticate with. - default: dbadmin - type: str - login_password: - description: - - The password used to authenticate with. - type: str -notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] -author: "Dariusz Owczarek (@dareko)" -''' - -EXAMPLES = """ -- name: Updating load_balance_policy - community.general.vertica_configuration: name=failovertostandbyafter value='8 hours' -""" -import traceback - -PYODBC_IMP_ERR = None -try: - import pyodbc -except ImportError: - PYODBC_IMP_ERR = traceback.format_exc() - pyodbc_found = False -else: - pyodbc_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -class NotSupportedError(Exception): - pass - - -class CannotDropError(Exception): - pass - -# module specific functions - - -def get_configuration_facts(cursor, parameter_name=''): - facts = {} - cursor.execute(""" - select c.parameter_name, c.current_value, c.default_value - from configuration_parameters c - where c.node_name = 'ALL' - and (? = '' or c.parameter_name ilike ?) - """, parameter_name, parameter_name) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - facts[row.parameter_name.lower()] = { - 'parameter_name': row.parameter_name, - 'current_value': row.current_value, - 'default_value': row.default_value} - return facts - - -def check(configuration_facts, parameter_name, current_value): - parameter_key = parameter_name.lower() - if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): - return False - return True - - -def present(configuration_facts, cursor, parameter_name, current_value): - parameter_key = parameter_name.lower() - changed = False - if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): - cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value)) - changed = True - if changed: - configuration_facts.update(get_configuration_facts(cursor, parameter_name)) - return changed - -# module logic - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - parameter=dict(required=True, aliases=['name']), - value=dict(default=None), - db=dict(default=None), - cluster=dict(default='localhost'), - port=dict(default='5433'), - login_user=dict(default='dbadmin'), - login_password=dict(default=None, no_log=True), - ), supports_check_mode=True) - - if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) - - parameter_name = module.params['parameter'] - current_value = module.params['value'] - db = '' - if module.params['db']: - db = module.params['db'] - - changed = False - - try: - dsn = ( - "Driver=Vertica;" - "Server={0};" - "Port={1};" - "Database={2};" - "User={3};" - "Password={4};" - "ConnectionLoadBalance={5}" - ).format(module.params['cluster'], module.params['port'], db, - module.params['login_user'], module.params['login_password'], 'true') - db_conn = pyodbc.connect(dsn, autocommit=True) - cursor = db_conn.cursor() - except Exception as e: - module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)), - exception=traceback.format_exc()) - - try: - configuration_facts = get_configuration_facts(cursor) - if module.check_mode: - changed = not check(configuration_facts, parameter_name, current_value) - else: - try: - changed = present(configuration_facts, cursor, parameter_name, current_value) - except pyodbc.Error as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except NotSupportedError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts}) - except CannotDropError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts}) - except SystemExit: - # avoid catching this on python 2.4 - raise - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts}) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/vertica/vertica_info.py b/plugins/modules/database/vertica/vertica_info.py deleted file mode 100644 index feaebecbdc..0000000000 --- a/plugins/modules/database/vertica/vertica_info.py +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: vertica_info -short_description: Gathers Vertica database facts. -description: - - Gathers Vertica database information. - - This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.vertica_info) module no longer returns C(ansible_facts)! -options: - cluster: - description: - - Name of the cluster running the schema. - default: localhost - type: str - port: - description: - Database port to connect to. - default: 5433 - type: str - db: - description: - - Name of the database running the schema. - type: str - login_user: - description: - - The username used to authenticate with. - default: dbadmin - type: str - login_password: - description: - - The password used to authenticate with. - type: str -notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) are installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] -author: "Dariusz Owczarek (@dareko)" -''' - -EXAMPLES = """ -- name: Gathering vertica facts - community.general.vertica_info: db=db_name - register: result - -- name: Print schemas - ansible.builtin.debug: - msg: "{{ result.vertica_schemas }}" -""" -import traceback - -PYODBC_IMP_ERR = None -try: - import pyodbc -except ImportError: - PYODBC_IMP_ERR = traceback.format_exc() - pyodbc_found = False -else: - pyodbc_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -class NotSupportedError(Exception): - pass - -# module specific functions - - -def get_schema_facts(cursor, schema=''): - facts = {} - cursor.execute(""" - select schema_name, schema_owner, create_time - from schemata - where not is_system_schema and schema_name not in ('public') - and (? = '' or schema_name ilike ?) - """, schema, schema) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - facts[row.schema_name.lower()] = { - 'name': row.schema_name, - 'owner': row.schema_owner, - 'create_time': str(row.create_time), - 'usage_roles': [], - 'create_roles': []} - cursor.execute(""" - select g.object_name as schema_name, r.name as role_name, - lower(g.privileges_description) privileges_description - from roles r join grants g - on g.grantee = r.name and g.object_type='SCHEMA' - and g.privileges_description like '%USAGE%' - and g.grantee not in ('public', 'dbadmin') - and (? = '' or g.object_name ilike ?) - """, schema, schema) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - schema_key = row.schema_name.lower() - if 'create' in row.privileges_description: - facts[schema_key]['create_roles'].append(row.role_name) - else: - facts[schema_key]['usage_roles'].append(row.role_name) - return facts - - -def get_user_facts(cursor, user=''): - facts = {} - cursor.execute(""" - select u.user_name, u.is_locked, u.lock_time, - p.password, p.acctexpired as is_expired, - u.profile_name, u.resource_pool, - u.all_roles, u.default_roles - from users u join password_auditor p on p.user_id = u.user_id - where not u.is_super_user - and (? = '' or u.user_name ilike ?) - """, user, user) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - user_key = row.user_name.lower() - facts[user_key] = { - 'name': row.user_name, - 'locked': str(row.is_locked), - 'password': row.password, - 'expired': str(row.is_expired), - 'profile': row.profile_name, - 'resource_pool': row.resource_pool, - 'roles': [], - 'default_roles': []} - if row.is_locked: - facts[user_key]['locked_time'] = str(row.lock_time) - if row.all_roles: - facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') - if row.default_roles: - facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') - return facts - - -def get_role_facts(cursor, role=''): - facts = {} - cursor.execute(""" - select r.name, r.assigned_roles - from roles r - where (? = '' or r.name ilike ?) - """, role, role) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - role_key = row.name.lower() - facts[role_key] = { - 'name': row.name, - 'assigned_roles': []} - if row.assigned_roles: - facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') - return facts - - -def get_configuration_facts(cursor, parameter=''): - facts = {} - cursor.execute(""" - select c.parameter_name, c.current_value, c.default_value - from configuration_parameters c - where c.node_name = 'ALL' - and (? = '' or c.parameter_name ilike ?) - """, parameter, parameter) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - facts[row.parameter_name.lower()] = { - 'parameter_name': row.parameter_name, - 'current_value': row.current_value, - 'default_value': row.default_value} - return facts - - -def get_node_facts(cursor, schema=''): - facts = {} - cursor.execute(""" - select node_name, node_address, export_address, node_state, node_type, - catalog_path - from nodes - """) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - facts[row.node_address] = { - 'node_name': row.node_name, - 'export_address': row.export_address, - 'node_state': row.node_state, - 'node_type': row.node_type, - 'catalog_path': row.catalog_path} - return facts - -# module logic - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - cluster=dict(default='localhost'), - port=dict(default='5433'), - db=dict(default=None), - login_user=dict(default='dbadmin'), - login_password=dict(default=None, no_log=True), - ), supports_check_mode=True) - - if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) - - db = '' - if module.params['db']: - db = module.params['db'] - - try: - dsn = ( - "Driver=Vertica;" - "Server=%s;" - "Port=%s;" - "Database=%s;" - "User=%s;" - "Password=%s;" - "ConnectionLoadBalance=%s" - ) % (module.params['cluster'], module.params['port'], db, - module.params['login_user'], module.params['login_password'], 'true') - db_conn = pyodbc.connect(dsn, autocommit=True) - cursor = db_conn.cursor() - except Exception as e: - module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc()) - - try: - schema_facts = get_schema_facts(cursor) - user_facts = get_user_facts(cursor) - role_facts = get_role_facts(cursor) - configuration_facts = get_configuration_facts(cursor) - node_facts = get_node_facts(cursor) - - module.exit_json(changed=False, - vertica_schemas=schema_facts, - vertica_users=user_facts, - vertica_roles=role_facts, - vertica_configuration=configuration_facts, - vertica_nodes=node_facts) - except NotSupportedError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except SystemExit: - # avoid catching this on python 2.4 - raise - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/vertica/vertica_role.py b/plugins/modules/database/vertica/vertica_role.py deleted file mode 100644 index 06dd218ed0..0000000000 --- a/plugins/modules/database/vertica/vertica_role.py +++ /dev/null @@ -1,246 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: vertica_role -short_description: Adds or removes Vertica database roles and assigns roles to them. -description: - - Adds or removes Vertica database role and, optionally, assign other roles. -options: - role: - description: - - Name of the role to add or remove. - required: true - type: str - aliases: ['name'] - assigned_roles: - description: - - Comma separated list of roles to assign to the role. - aliases: ['assigned_role'] - type: str - state: - description: - - Whether to create C(present), drop C(absent) or lock C(locked) a role. - choices: ['present', 'absent'] - default: present - type: str - db: - description: - - Name of the Vertica database. - type: str - cluster: - description: - - Name of the Vertica cluster. - default: localhost - type: str - port: - description: - - Vertica cluster port to connect to. - default: 5433 - type: str - login_user: - description: - - The username used to authenticate with. - default: dbadmin - type: str - login_password: - description: - - The password used to authenticate with. - type: str -notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] -author: "Dariusz Owczarek (@dareko)" -''' - -EXAMPLES = """ -- name: Creating a new vertica role - community.general.vertica_role: name=role_name db=db_name state=present - -- name: Creating a new vertica role with other role assigned - community.general.vertica_role: name=role_name assigned_role=other_role_name state=present -""" -import traceback - -PYODBC_IMP_ERR = None -try: - import pyodbc -except ImportError: - PYODBC_IMP_ERR = traceback.format_exc() - pyodbc_found = False -else: - pyodbc_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -class NotSupportedError(Exception): - pass - - -class CannotDropError(Exception): - pass - -# module specific functions - - -def get_role_facts(cursor, role=''): - facts = {} - cursor.execute(""" - select r.name, r.assigned_roles - from roles r - where (? = '' or r.name ilike ?) - """, role, role) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - role_key = row.name.lower() - facts[role_key] = { - 'name': row.name, - 'assigned_roles': []} - if row.assigned_roles: - facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') - return facts - - -def update_roles(role_facts, cursor, role, - existing, required): - for assigned_role in set(existing) - set(required): - cursor.execute("revoke {0} from {1}".format(assigned_role, role)) - for assigned_role in set(required) - set(existing): - cursor.execute("grant {0} to {1}".format(assigned_role, role)) - - -def check(role_facts, role, assigned_roles): - role_key = role.lower() - if role_key not in role_facts: - return False - if assigned_roles and sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles']): - return False - return True - - -def present(role_facts, cursor, role, assigned_roles): - role_key = role.lower() - if role_key not in role_facts: - cursor.execute("create role {0}".format(role)) - update_roles(role_facts, cursor, role, [], assigned_roles) - role_facts.update(get_role_facts(cursor, role)) - return True - else: - changed = False - if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])): - update_roles(role_facts, cursor, role, - role_facts[role_key]['assigned_roles'], assigned_roles) - changed = True - if changed: - role_facts.update(get_role_facts(cursor, role)) - return changed - - -def absent(role_facts, cursor, role, assigned_roles): - role_key = role.lower() - if role_key in role_facts: - update_roles(role_facts, cursor, role, - role_facts[role_key]['assigned_roles'], []) - cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name'])) - del role_facts[role_key] - return True - else: - return False - -# module logic - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - role=dict(required=True, aliases=['name']), - assigned_roles=dict(default=None, aliases=['assigned_role']), - state=dict(default='present', choices=['absent', 'present']), - db=dict(), - cluster=dict(default='localhost'), - port=dict(default='5433'), - login_user=dict(default='dbadmin'), - login_password=dict(no_log=True), - ), supports_check_mode=True) - - if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) - - role = module.params['role'] - assigned_roles = [] - if module.params['assigned_roles']: - assigned_roles = module.params['assigned_roles'].split(',') - assigned_roles = filter(None, assigned_roles) - state = module.params['state'] - db = '' - if module.params['db']: - db = module.params['db'] - - changed = False - - try: - dsn = ( - "Driver=Vertica;" - "Server={0};" - "Port={1};" - "Database={2};" - "User={3};" - "Password={4};" - "ConnectionLoadBalance={5}" - ).format(module.params['cluster'], module.params['port'], db, - module.params['login_user'], module.params['login_password'], 'true') - db_conn = pyodbc.connect(dsn, autocommit=True) - cursor = db_conn.cursor() - except Exception as e: - module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e))) - - try: - role_facts = get_role_facts(cursor) - if module.check_mode: - changed = not check(role_facts, role, assigned_roles) - elif state == 'absent': - try: - changed = absent(role_facts, cursor, role, assigned_roles) - except pyodbc.Error as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - elif state == 'present': - try: - changed = present(role_facts, cursor, role, assigned_roles) - except pyodbc.Error as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except NotSupportedError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts}) - except CannotDropError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts}) - except SystemExit: - # avoid catching this on python 2.4 - raise - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts}) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/vertica/vertica_schema.py b/plugins/modules/database/vertica/vertica_schema.py deleted file mode 100644 index 749234add0..0000000000 --- a/plugins/modules/database/vertica/vertica_schema.py +++ /dev/null @@ -1,319 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: vertica_schema -short_description: Adds or removes Vertica database schema and roles. -description: - - Adds or removes Vertica database schema and, optionally, roles - with schema access privileges. - - A schema will not be removed until all the objects have been dropped. - - In such a situation, if the module tries to remove the schema it - will fail and only remove roles created for the schema if they have - no dependencies. -options: - schema: - description: - - Name of the schema to add or remove. - required: true - aliases: ['name'] - type: str - usage_roles: - description: - - Comma separated list of roles to create and grant usage access to the schema. - aliases: ['usage_role'] - type: str - create_roles: - description: - - Comma separated list of roles to create and grant usage and create access to the schema. - aliases: ['create_role'] - type: str - owner: - description: - - Name of the user to set as owner of the schema. - type: str - state: - description: - - Whether to create C(present), or drop C(absent) a schema. - default: present - choices: ['present', 'absent'] - type: str - db: - description: - - Name of the Vertica database. - type: str - cluster: - description: - - Name of the Vertica cluster. - default: localhost - type: str - port: - description: - - Vertica cluster port to connect to. - default: 5433 - type: str - login_user: - description: - - The username used to authenticate with. - default: dbadmin - type: str - login_password: - description: - - The password used to authenticate with. - type: str -notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] -author: "Dariusz Owczarek (@dareko)" -''' - -EXAMPLES = """ -- name: Creating a new vertica schema - community.general.vertica_schema: name=schema_name db=db_name state=present - -- name: Creating a new schema with specific schema owner - community.general.vertica_schema: name=schema_name owner=dbowner db=db_name state=present - -- name: Creating a new schema with roles - community.general.vertica_schema: - name=schema_name - create_roles=schema_name_all - usage_roles=schema_name_ro,schema_name_rw - db=db_name - state=present -""" -import traceback - -PYODBC_IMP_ERR = None -try: - import pyodbc -except ImportError: - PYODBC_IMP_ERR = traceback.format_exc() - pyodbc_found = False -else: - pyodbc_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -class NotSupportedError(Exception): - pass - - -class CannotDropError(Exception): - pass - -# module specific functions - - -def get_schema_facts(cursor, schema=''): - facts = {} - cursor.execute(""" - select schema_name, schema_owner, create_time - from schemata - where not is_system_schema and schema_name not in ('public', 'TxtIndex') - and (? = '' or schema_name ilike ?) - """, schema, schema) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - facts[row.schema_name.lower()] = { - 'name': row.schema_name, - 'owner': row.schema_owner, - 'create_time': str(row.create_time), - 'usage_roles': [], - 'create_roles': []} - cursor.execute(""" - select g.object_name as schema_name, r.name as role_name, - lower(g.privileges_description) privileges_description - from roles r join grants g - on g.grantee_id = r.role_id and g.object_type='SCHEMA' - and g.privileges_description like '%USAGE%' - and g.grantee not in ('public', 'dbadmin') - and (? = '' or g.object_name ilike ?) - """, schema, schema) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - schema_key = row.schema_name.lower() - if 'create' in row.privileges_description: - facts[schema_key]['create_roles'].append(row.role_name) - else: - facts[schema_key]['usage_roles'].append(row.role_name) - return facts - - -def update_roles(schema_facts, cursor, schema, - existing, required, - create_existing, create_required): - for role in set(existing + create_existing) - set(required + create_required): - cursor.execute("drop role {0} cascade".format(role)) - for role in set(create_existing) - set(create_required): - cursor.execute("revoke create on schema {0} from {1}".format(schema, role)) - for role in set(required + create_required) - set(existing + create_existing): - cursor.execute("create role {0}".format(role)) - cursor.execute("grant usage on schema {0} to {1}".format(schema, role)) - for role in set(create_required) - set(create_existing): - cursor.execute("grant create on schema {0} to {1}".format(schema, role)) - - -def check(schema_facts, schema, usage_roles, create_roles, owner): - schema_key = schema.lower() - if schema_key not in schema_facts: - return False - if owner and owner.lower() == schema_facts[schema_key]['owner'].lower(): - return False - if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']): - return False - if sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']): - return False - return True - - -def present(schema_facts, cursor, schema, usage_roles, create_roles, owner): - schema_key = schema.lower() - if schema_key not in schema_facts: - query_fragments = ["create schema {0}".format(schema)] - if owner: - query_fragments.append("authorization {0}".format(owner)) - cursor.execute(' '.join(query_fragments)) - update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles) - schema_facts.update(get_schema_facts(cursor, schema)) - return True - else: - changed = False - if owner and owner.lower() != schema_facts[schema_key]['owner'].lower(): - raise NotSupportedError(( - "Changing schema owner is not supported. " - "Current owner: {0}." - ).format(schema_facts[schema_key]['owner'])) - if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']) or \ - sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']): - - update_roles(schema_facts, cursor, schema, - schema_facts[schema_key]['usage_roles'], usage_roles, - schema_facts[schema_key]['create_roles'], create_roles) - changed = True - if changed: - schema_facts.update(get_schema_facts(cursor, schema)) - return changed - - -def absent(schema_facts, cursor, schema, usage_roles, create_roles): - schema_key = schema.lower() - if schema_key in schema_facts: - update_roles(schema_facts, cursor, schema, - schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], []) - try: - cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name'])) - except pyodbc.Error: - raise CannotDropError("Dropping schema failed due to dependencies.") - del schema_facts[schema_key] - return True - else: - return False - -# module logic - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - schema=dict(required=True, aliases=['name']), - usage_roles=dict(aliases=['usage_role']), - create_roles=dict(aliases=['create_role']), - owner=dict(), - state=dict(default='present', choices=['absent', 'present']), - db=dict(), - cluster=dict(default='localhost'), - port=dict(default='5433'), - login_user=dict(default='dbadmin'), - login_password=dict(no_log=True), - ), supports_check_mode=True) - - if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) - - schema = module.params['schema'] - usage_roles = [] - if module.params['usage_roles']: - usage_roles = module.params['usage_roles'].split(',') - usage_roles = filter(None, usage_roles) - create_roles = [] - if module.params['create_roles']: - create_roles = module.params['create_roles'].split(',') - create_roles = filter(None, create_roles) - owner = module.params['owner'] - state = module.params['state'] - db = '' - if module.params['db']: - db = module.params['db'] - - changed = False - - try: - dsn = ( - "Driver=Vertica;" - "Server={0};" - "Port={1};" - "Database={2};" - "User={3};" - "Password={4};" - "ConnectionLoadBalance={5}" - ).format(module.params['cluster'], module.params['port'], db, - module.params['login_user'], module.params['login_password'], 'true') - db_conn = pyodbc.connect(dsn, autocommit=True) - cursor = db_conn.cursor() - except Exception as e: - module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e))) - - try: - schema_facts = get_schema_facts(cursor) - if module.check_mode: - changed = not check(schema_facts, schema, usage_roles, create_roles, owner) - elif state == 'absent': - try: - changed = absent(schema_facts, cursor, schema, usage_roles, create_roles) - except pyodbc.Error as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - elif state == 'present': - try: - changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner) - except pyodbc.Error as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except NotSupportedError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts}) - except CannotDropError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts}) - except SystemExit: - # avoid catching this on python 2.4 - raise - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts}) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/vertica/vertica_user.py b/plugins/modules/database/vertica/vertica_user.py deleted file mode 100644 index fed3a2a56f..0000000000 --- a/plugins/modules/database/vertica/vertica_user.py +++ /dev/null @@ -1,385 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: vertica_user -short_description: Adds or removes Vertica database users and assigns roles. -description: - - Adds or removes Vertica database user and, optionally, assigns roles. - - A user will not be removed until all the dependencies have been dropped. - - In such a situation, if the module tries to remove the user it - will fail and only remove roles granted to the user. -options: - user: - description: - - Name of the user to add or remove. - required: true - type: str - aliases: ['name'] - profile: - description: - - Sets the user's profile. - type: str - resource_pool: - description: - - Sets the user's resource pool. - type: str - password: - description: - - The user's password encrypted by the MD5 algorithm. - - The password must be generated with the format C("md5" + md5[password + username]), - resulting in a total of 35 characters. An easy way to do this is by querying - the Vertica database with select 'md5'||md5(''). - type: str - expired: - description: - - Sets the user's password expiration. - type: bool - ldap: - description: - - Set to true if users are authenticated via LDAP. - - The user will be created with password expired and set to I($ldap$). - type: bool - roles: - description: - - Comma separated list of roles to assign to the user. - aliases: ['role'] - type: str - state: - description: - - Whether to create C(present), drop C(absent) or lock C(locked) a user. - choices: ['present', 'absent', 'locked'] - default: present - type: str - db: - description: - - Name of the Vertica database. - type: str - cluster: - description: - - Name of the Vertica cluster. - default: localhost - type: str - port: - description: - - Vertica cluster port to connect to. - default: 5433 - type: str - login_user: - description: - - The username used to authenticate with. - default: dbadmin - type: str - login_password: - description: - - The password used to authenticate with. - type: str -notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] -author: "Dariusz Owczarek (@dareko)" -''' - -EXAMPLES = """ -- name: Creating a new vertica user with password - community.general.vertica_user: name=user_name password=md5 db=db_name state=present - -- name: Creating a new vertica user authenticated via ldap with roles assigned - community.general.vertica_user: - name=user_name - ldap=true - db=db_name - roles=schema_name_ro - state=present -""" -import traceback - -PYODBC_IMP_ERR = None -try: - import pyodbc -except ImportError: - PYODBC_IMP_ERR = traceback.format_exc() - pyodbc_found = False -else: - pyodbc_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -class NotSupportedError(Exception): - pass - - -class CannotDropError(Exception): - pass - -# module specific functions - - -def get_user_facts(cursor, user=''): - facts = {} - cursor.execute(""" - select u.user_name, u.is_locked, u.lock_time, - p.password, p.acctexpired as is_expired, - u.profile_name, u.resource_pool, - u.all_roles, u.default_roles - from users u join password_auditor p on p.user_id = u.user_id - where not u.is_super_user - and (? = '' or u.user_name ilike ?) - """, user, user) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - user_key = row.user_name.lower() - facts[user_key] = { - 'name': row.user_name, - 'locked': str(row.is_locked), - 'password': row.password, - 'expired': str(row.is_expired), - 'profile': row.profile_name, - 'resource_pool': row.resource_pool, - 'roles': [], - 'default_roles': []} - if row.is_locked: - facts[user_key]['locked_time'] = str(row.lock_time) - if row.all_roles: - facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') - if row.default_roles: - facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') - return facts - - -def update_roles(user_facts, cursor, user, - existing_all, existing_default, required): - del_roles = list(set(existing_all) - set(required)) - if del_roles: - cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user)) - new_roles = list(set(required) - set(existing_all)) - if new_roles: - cursor.execute("grant {0} to {1}".format(','.join(new_roles), user)) - if required: - cursor.execute("alter user {0} default role {1}".format(user, ','.join(required))) - - -def check(user_facts, user, profile, resource_pool, - locked, password, expired, ldap, roles): - user_key = user.lower() - if user_key not in user_facts: - return False - if profile and profile != user_facts[user_key]['profile']: - return False - if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: - return False - if locked != (user_facts[user_key]['locked'] == 'True'): - return False - if password and password != user_facts[user_key]['password']: - return False - if (expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or - ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')): - return False - if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or - sorted(roles) != sorted(user_facts[user_key]['default_roles'])): - return False - return True - - -def present(user_facts, cursor, user, profile, resource_pool, - locked, password, expired, ldap, roles): - user_key = user.lower() - if user_key not in user_facts: - query_fragments = ["create user {0}".format(user)] - if locked: - query_fragments.append("account lock") - if password or ldap: - if password: - query_fragments.append("identified by '{0}'".format(password)) - else: - query_fragments.append("identified by '$ldap$'") - if expired or ldap: - query_fragments.append("password expire") - if profile: - query_fragments.append("profile {0}".format(profile)) - if resource_pool: - query_fragments.append("resource pool {0}".format(resource_pool)) - cursor.execute(' '.join(query_fragments)) - if resource_pool and resource_pool != 'general': - cursor.execute("grant usage on resource pool {0} to {1}".format( - resource_pool, user)) - update_roles(user_facts, cursor, user, [], [], roles) - user_facts.update(get_user_facts(cursor, user)) - return True - else: - changed = False - query_fragments = ["alter user {0}".format(user)] - if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'): - if locked: - state = 'lock' - else: - state = 'unlock' - query_fragments.append("account {0}".format(state)) - changed = True - if password and password != user_facts[user_key]['password']: - query_fragments.append("identified by '{0}'".format(password)) - changed = True - if ldap: - if ldap != (user_facts[user_key]['expired'] == 'True'): - query_fragments.append("password expire") - changed = True - elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'): - if expired: - query_fragments.append("password expire") - changed = True - else: - raise NotSupportedError("Unexpiring user password is not supported.") - if profile and profile != user_facts[user_key]['profile']: - query_fragments.append("profile {0}".format(profile)) - changed = True - if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: - query_fragments.append("resource pool {0}".format(resource_pool)) - if user_facts[user_key]['resource_pool'] != 'general': - cursor.execute("revoke usage on resource pool {0} from {1}".format( - user_facts[user_key]['resource_pool'], user)) - if resource_pool != 'general': - cursor.execute("grant usage on resource pool {0} to {1}".format( - resource_pool, user)) - changed = True - if changed: - cursor.execute(' '.join(query_fragments)) - if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or - sorted(roles) != sorted(user_facts[user_key]['default_roles'])): - update_roles(user_facts, cursor, user, - user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles) - changed = True - if changed: - user_facts.update(get_user_facts(cursor, user)) - return changed - - -def absent(user_facts, cursor, user, roles): - user_key = user.lower() - if user_key in user_facts: - update_roles(user_facts, cursor, user, - user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], []) - try: - cursor.execute("drop user {0}".format(user_facts[user_key]['name'])) - except pyodbc.Error: - raise CannotDropError("Dropping user failed due to dependencies.") - del user_facts[user_key] - return True - else: - return False - -# module logic - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - user=dict(required=True, aliases=['name']), - profile=dict(), - resource_pool=dict(), - password=dict(no_log=True), - expired=dict(type='bool'), - ldap=dict(type='bool'), - roles=dict(aliases=['role']), - state=dict(default='present', choices=['absent', 'present', 'locked']), - db=dict(), - cluster=dict(default='localhost'), - port=dict(default='5433'), - login_user=dict(default='dbadmin'), - login_password=dict(no_log=True), - ), supports_check_mode=True) - - if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) - - user = module.params['user'] - profile = module.params['profile'] - if profile: - profile = profile.lower() - resource_pool = module.params['resource_pool'] - if resource_pool: - resource_pool = resource_pool.lower() - password = module.params['password'] - expired = module.params['expired'] - ldap = module.params['ldap'] - roles = [] - if module.params['roles']: - roles = module.params['roles'].split(',') - roles = filter(None, roles) - state = module.params['state'] - if state == 'locked': - locked = True - else: - locked = False - db = '' - if module.params['db']: - db = module.params['db'] - - changed = False - - try: - dsn = ( - "Driver=Vertica;" - "Server={0};" - "Port={1};" - "Database={2};" - "User={3};" - "Password={4};" - "ConnectionLoadBalance={5}" - ).format(module.params['cluster'], module.params['port'], db, - module.params['login_user'], module.params['login_password'], 'true') - db_conn = pyodbc.connect(dsn, autocommit=True) - cursor = db_conn.cursor() - except Exception as e: - module.fail_json(msg="Unable to connect to database: {0}.".format(e)) - - try: - user_facts = get_user_facts(cursor) - if module.check_mode: - changed = not check(user_facts, user, profile, resource_pool, - locked, password, expired, ldap, roles) - elif state == 'absent': - try: - changed = absent(user_facts, cursor, user, roles) - except pyodbc.Error as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - elif state in ['present', 'locked']: - try: - changed = present(user_facts, cursor, user, profile, resource_pool, - locked, password, expired, ldap, roles) - except pyodbc.Error as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except NotSupportedError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts}) - except CannotDropError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts}) - except SystemExit: - # avoid catching this on python 2.4 - raise - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts}) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/datadog_downtime.py b/plugins/modules/datadog_downtime.py deleted file mode 120000 index feaa6d50c2..0000000000 --- a/plugins/modules/datadog_downtime.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/datadog/datadog_downtime.py \ No newline at end of file diff --git a/plugins/modules/datadog_downtime.py b/plugins/modules/datadog_downtime.py new file mode 100644 index 0000000000..82365ff06a --- /dev/null +++ b/plugins/modules/datadog_downtime.py @@ -0,0 +1,314 @@ +#!/usr/bin/python + +# Copyright (c) 2020, Datadog, Inc +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: datadog_downtime +short_description: Manages Datadog downtimes +version_added: 2.0.0 +description: + - Manages downtimes within Datadog. + - Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/). +author: + - Datadog (@Datadog) +requirements: + - datadog-api-client + - Python 3.6+ +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + api_key: + description: + - Your Datadog API key. + required: true + type: str + api_host: + description: + - The URL to the Datadog API. + - This value can also be set with the E(DATADOG_HOST) environment variable. + required: false + default: https://api.datadoghq.com + type: str + app_key: + description: + - Your Datadog app key. + required: true + type: str + state: + description: + - The designated state of the downtime. + required: false + choices: ["present", "absent"] + default: present + type: str + id: + description: + - The identifier of the downtime. + - If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the O(state). + - To keep your playbook idempotent, you should save the identifier in a file and read it in a lookup. + type: int + monitor_tags: + description: + - A list of monitor tags to which the downtime applies. + - The resulting downtime applies to monitors that match ALL provided monitor tags. + type: list + elements: str + scope: + description: + - A list of scopes to which the downtime applies. + - The resulting downtime applies to sources that matches ALL provided scopes. + type: list + elements: str + monitor_id: + description: + - The ID of the monitor to mute. If not provided, the downtime applies to all monitors. + type: int + downtime_message: + description: + - A message to include with notifications for this downtime. + - Email notifications can be sent to specific users by using the same "@username" notation as events. + type: str + start: + type: int + description: + - POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created. + end: + type: int + description: + - POSIX timestamp to end the downtime. If not provided, the downtime is in effect until you cancel it. + timezone: + description: + - The timezone for the downtime. + type: str + rrule: + description: + - The C(RRULE) standard for defining recurring events. + - For example, to have a recurring event on the first day of each month, select a type of rrule and set the C(FREQ) + to C(MONTHLY) and C(BYMONTHDAY) to C(1). + - Most common rrule options from the iCalendar Spec are supported. + - Attributes specifying the duration in C(RRULE) are not supported (for example C(DTSTART), C(DTEND), C(DURATION)). + type: str +""" + +EXAMPLES = r""" +- name: Create a downtime + register: downtime_var + community.general.datadog_downtime: + state: present + monitor_tags: + - "foo:bar" + downtime_message: "Downtime for foo:bar" + scope: "test" + api_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + app_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + # Lookup the id in the file and ignore errors if the file doesn't exits, so downtime gets created + id: "{{ lookup('file', inventory_hostname ~ '_downtime_id.txt', errors='ignore') }}" +- name: Save downtime id to file for later updates and idempotence + delegate_to: localhost + copy: + content: "{{ downtime.downtime.id }}" + dest: "{{ inventory_hostname ~ '_downtime_id.txt' }}" +""" + +RETURN = r""" +# Returns the downtime JSON dictionary from the API response under the C(downtime) key. +# See https://docs.datadoghq.com/api/v1/downtimes/#schedule-a-downtime for more details. +downtime: + description: The downtime returned by the API. + type: dict + returned: always + sample: + { + "active": true, + "canceled": null, + "creator_id": 1445416, + "disabled": false, + "downtime_type": 2, + "end": null, + "id": 1055751000, + "message": "Downtime for foo:bar", + "monitor_id": null, + "monitor_tags": [ + "foo:bar" + ], + "parent_id": null, + "recurrence": null, + "scope": [ + "test" + ], + "start": 1607015009, + "timezone": "UTC", + "updater_id": null + } +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +# Import Datadog + +DATADOG_IMP_ERR = None +HAS_DATADOG = True +try: + from datadog_api_client.v1 import Configuration, ApiClient, ApiException + from datadog_api_client.v1.api.downtimes_api import DowntimesApi + from datadog_api_client.v1.model.downtime import Downtime + from datadog_api_client.v1.model.downtime_recurrence import DowntimeRecurrence +except ImportError: + DATADOG_IMP_ERR = traceback.format_exc() + HAS_DATADOG = False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, no_log=True), + api_host=dict(default="https://api.datadoghq.com"), + app_key=dict(required=True, no_log=True), + state=dict(choices=["present", "absent"], default="present"), + monitor_tags=dict(type="list", elements="str"), + scope=dict(type="list", elements="str"), + monitor_id=dict(type="int"), + downtime_message=dict(no_log=True), + start=dict(type="int"), + end=dict(type="int"), + timezone=dict(type="str"), + rrule=dict(type="str"), + id=dict(type="int"), + ) + ) + + # Prepare Datadog + if not HAS_DATADOG: + module.fail_json(msg=missing_required_lib("datadog-api-client"), exception=DATADOG_IMP_ERR) + + configuration = Configuration( + host=module.params["api_host"], + api_key={ + "apiKeyAuth": module.params["api_key"], + "appKeyAuth": module.params["app_key"] + } + ) + with ApiClient(configuration) as api_client: + api_client.user_agent = "ansible_collection/community_general (module_name datadog_downtime) {0}".format( + api_client.user_agent + ) + api_instance = DowntimesApi(api_client) + + # Validate api and app keys + try: + api_instance.list_downtimes(current_only=True) + except ApiException as e: + module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key: {0}".format(e)) + + if module.params["state"] == "present": + schedule_downtime(module, api_client) + elif module.params["state"] == "absent": + cancel_downtime(module, api_client) + + +def _get_downtime(module, api_client): + api = DowntimesApi(api_client) + downtime = None + if module.params["id"]: + try: + downtime = api.get_downtime(module.params["id"]) + except ApiException as e: + module.fail_json(msg="Failed to retrieve downtime with id {0}: {1}".format(module.params["id"], e)) + return downtime + + +def build_downtime(module): + downtime = Downtime() + if module.params["monitor_tags"]: + downtime.monitor_tags = module.params["monitor_tags"] + if module.params["scope"]: + downtime.scope = module.params["scope"] + if module.params["monitor_id"]: + downtime.monitor_id = module.params["monitor_id"] + if module.params["downtime_message"]: + downtime.message = module.params["downtime_message"] + if module.params["start"]: + downtime.start = module.params["start"] + if module.params["end"]: + downtime.end = module.params["end"] + if module.params["timezone"]: + downtime.timezone = module.params["timezone"] + if module.params["rrule"]: + downtime.recurrence = DowntimeRecurrence( + rrule=module.params["rrule"], + type="rrule", + ) + return downtime + + +def _post_downtime(module, api_client): + api = DowntimesApi(api_client) + downtime = build_downtime(module) + try: + resp = api.create_downtime(downtime) + module.params["id"] = resp.id + module.exit_json(changed=True, downtime=resp.to_dict()) + except ApiException as e: + module.fail_json(msg="Failed to create downtime: {0}".format(e)) + + +def _equal_dicts(a, b, ignore_keys): + ka = set(a).difference(ignore_keys) + kb = set(b).difference(ignore_keys) + return ka == kb and all(a[k] == b[k] for k in ka) + + +def _update_downtime(module, current_downtime, api_client): + api = DowntimesApi(api_client) + downtime = build_downtime(module) + try: + if current_downtime.disabled: + resp = api.create_downtime(downtime) + else: + resp = api.update_downtime(module.params["id"], downtime) + if _equal_dicts( + resp.to_dict(), + current_downtime.to_dict(), + ["active", "creator_id", "updater_id"] + ): + module.exit_json(changed=False, downtime=resp.to_dict()) + else: + module.exit_json(changed=True, downtime=resp.to_dict()) + except ApiException as e: + module.fail_json(msg="Failed to update downtime: {0}".format(e)) + + +def schedule_downtime(module, api_client): + downtime = _get_downtime(module, api_client) + if downtime is None: + _post_downtime(module, api_client) + else: + _update_downtime(module, downtime, api_client) + + +def cancel_downtime(module, api_client): + downtime = _get_downtime(module, api_client) + api = DowntimesApi(api_client) + if downtime is None: + module.exit_json(changed=False) + try: + api.cancel_downtime(downtime["id"]) + except ApiException as e: + module.fail_json(msg="Failed to create downtime: {0}".format(e)) + + module.exit_json(changed=True) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/datadog_event.py b/plugins/modules/datadog_event.py deleted file mode 120000 index 29ea3609bd..0000000000 --- a/plugins/modules/datadog_event.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/datadog/datadog_event.py \ No newline at end of file diff --git a/plugins/modules/datadog_event.py b/plugins/modules/datadog_event.py new file mode 100644 index 0000000000..c34951992e --- /dev/null +++ b/plugins/modules/datadog_event.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# +# Author: Artūras 'arturaz' Šlajus +# Author: Naoya Nakazawa +# +# This module is proudly sponsored by iGeolise (www.igeolise.com) and +# Tiny Lab Productions (www.tinylabproductions.com). +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: datadog_event +short_description: Posts events to Datadog service +description: + - Allows to post events to Datadog (www.datadoghq.com) service. + - Uses http://docs.datadoghq.com/api/#events API. +author: + - "Artūras 'arturaz' Šlajus (@arturaz)" + - "Naoya Nakazawa (@n0ts)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + api_key: + type: str + description: + - Your DataDog API key. + required: true + app_key: + type: str + description: + - Your DataDog app key. + required: true + title: + type: str + description: + - The event title. + required: true + text: + type: str + description: + - The body of the event. + required: true + date_happened: + type: int + description: + - POSIX timestamp of the event. + - Default value is now. + priority: + type: str + description: + - The priority of the event. + default: normal + choices: [normal, low] + host: + type: str + description: + - Host name to associate with the event. + - If not specified, it defaults to the remote system's hostname. + api_host: + type: str + description: + - DataDog API endpoint URL. + version_added: '3.3.0' + tags: + type: list + elements: str + description: + - Comma separated list of tags to apply to the event. + alert_type: + type: str + description: + - Type of alert. + default: info + choices: ['error', 'warning', 'info', 'success'] + aggregation_key: + type: str + description: + - An arbitrary string to use for aggregation. + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + type: bool + default: true +""" + +EXAMPLES = r""" +- name: Post an event with low priority + community.general.datadog_event: + title: Testing from ansible + text: Test + priority: low + api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 + app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN + +- name: Post an event with several tags + community.general.datadog_event: + title: Testing from ansible + text: Test + api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 + app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN + tags: 'aa,bb,#host:{{ inventory_hostname }}' + +- name: Post an event with several tags to another endpoint + community.general.datadog_event: + title: Testing from ansible + text: Test + api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 + app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN + api_host: 'https://example.datadoghq.eu' + tags: + - aa + - b + - '#host:{{ inventory_hostname }}' +""" + +import platform +import traceback + +# Import Datadog +DATADOG_IMP_ERR = None +try: + from datadog import initialize, api + HAS_DATADOG = True +except Exception: + DATADOG_IMP_ERR = traceback.format_exc() + HAS_DATADOG = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, no_log=True), + app_key=dict(required=True, no_log=True), + api_host=dict(type='str'), + title=dict(required=True), + text=dict(required=True), + date_happened=dict(type='int'), + priority=dict(default='normal', choices=['normal', 'low']), + host=dict(), + tags=dict(type='list', elements='str'), + alert_type=dict(default='info', choices=['error', 'warning', 'info', 'success']), + aggregation_key=dict(no_log=False), + validate_certs=dict(default=True, type='bool'), + ) + ) + + # Prepare Datadog + if not HAS_DATADOG: + module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR) + + options = { + 'api_key': module.params['api_key'], + 'app_key': module.params['app_key'], + } + if module.params['api_host'] is not None: + options['api_host'] = module.params['api_host'] + + initialize(**options) + + _post_event(module) + + +def _post_event(module): + try: + if module.params['host'] is None: + module.params['host'] = platform.node().split('.')[0] + msg = api.Event.create(title=module.params['title'], + text=module.params['text'], + host=module.params['host'], + tags=module.params['tags'], + priority=module.params['priority'], + alert_type=module.params['alert_type'], + aggregation_key=module.params['aggregation_key'], + source_type_name='ansible') + if msg['status'] != 'ok': + module.fail_json(msg=msg) + + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/datadog_monitor.py b/plugins/modules/datadog_monitor.py deleted file mode 120000 index 53fbe2a81f..0000000000 --- a/plugins/modules/datadog_monitor.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/datadog/datadog_monitor.py \ No newline at end of file diff --git a/plugins/modules/datadog_monitor.py b/plugins/modules/datadog_monitor.py new file mode 100644 index 0000000000..2b84d7dbd8 --- /dev/null +++ b/plugins/modules/datadog_monitor.py @@ -0,0 +1,462 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Sebastian Kornehl +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: datadog_monitor +short_description: Manages Datadog monitors +description: + - Manages monitors within Datadog. + - Options as described on https://docs.datadoghq.com/api/. +author: Sebastian Kornehl (@skornehl) +requirements: [datadog] +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + api_key: + description: + - Your Datadog API key. + required: true + type: str + api_host: + description: + - The URL to the Datadog API. Default value is V(https://api.datadoghq.com). + - This value can also be set with the E(DATADOG_HOST) environment variable. + required: false + type: str + version_added: '0.2.0' + app_key: + description: + - Your Datadog app key. + required: true + type: str + state: + description: + - The designated state of the monitor. + required: true + choices: ['present', 'absent', 'mute', 'unmute'] + type: str + tags: + description: + - A list of tags to associate with your monitor when creating or updating. + - This can help you categorize and filter monitors. + type: list + elements: str + type: + description: + - The type of the monitor. + - The types V(query alert), V(trace-analytics alert) and V(rum alert) were added in community.general 2.1.0. + - The type V(composite) was added in community.general 3.4.0. + - The type V(event-v2 alert) was added in community.general 4.8.0. + choices: + - metric alert + - service check + - event alert + - event-v2 alert + - process alert + - log alert + - query alert + - trace-analytics alert + - rum alert + - composite + type: str + query: + description: + - The monitor query to notify on. + - Syntax varies depending on what type of monitor you are creating. + type: str + name: + description: + - The name of the alert. + required: true + type: str + notification_message: + description: + - A message to include with notifications for this monitor. + - Email notifications can be sent to specific users by using the same '@username' notation as events. + - Monitor message template variables can be accessed by using double square brackets, in other words C([[) and C(]]). + type: str + silenced: + type: dict + description: + - Dictionary of scopes to silence, with timestamps or None. + - Each scope is muted until the given POSIX timestamp or forever if the value is V(None). + notify_no_data: + description: + - Whether this monitor notifies when data stops reporting. + type: bool + default: false + no_data_timeframe: + description: + - The number of minutes before a monitor notifies when data stops reporting. + - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks. + - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service. + type: str + timeout_h: + description: + - The number of hours of the monitor not reporting data before it automatically resolves from a triggered state. + type: str + renotify_interval: + description: + - The number of minutes after the last notification before a monitor re-notifies on the current status. + - It only re-notifies if it is not resolved. + type: str + escalation_message: + description: + - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. + - Not applicable if O(renotify_interval=none). + type: str + notify_audit: + description: + - Whether tagged users are notified on changes to this monitor. + type: bool + default: false + thresholds: + type: dict + description: + - A dictionary of thresholds by status. + - Only available for service checks and metric alerts. + - Because each of them can have multiple thresholds, we do not define them directly in the query. + - "If not specified, it defaults to: V({'ok': 1, 'critical': 1, 'warning': 1})." + locked: + description: + - Whether changes to this monitor should be restricted to the creator or admins. + type: bool + default: false + require_full_window: + description: + - Whether this monitor needs a full window of data before it gets evaluated. + - We highly recommend you set this to V(false) for sparse metrics, otherwise some evaluations are skipped. + type: bool + new_host_delay: + description: + - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts. + - This gives the host time to fully initialize. + type: str + evaluation_delay: + description: + - Time to delay evaluation (in seconds). + - Effective for sparse values. + type: str + id: + description: + - The ID of the alert. + - If set, it is used instead of O(name) to locate the alert. + type: str + include_tags: + description: + - Whether notifications from this monitor automatically inserts its triggering tags into the title. + type: bool + default: true + version_added: 1.3.0 + priority: + description: + - Integer from V(1) (high) to V(5) (low) indicating alert severity. + type: int + version_added: 4.6.0 + notification_preset_name: + description: + - Toggles the display of additional content sent in the monitor notification. + choices: + - show_all + - hide_query + - hide_handles + - hide_all + type: str + version_added: 7.1.0 + renotify_occurrences: + description: + - The number of times re-notification messages should be sent on the current status at the provided re-notification + interval. + type: int + version_added: 7.1.0 + renotify_statuses: + description: + - The types of monitor statuses for which re-notification messages are sent. + choices: + - alert + - warn + - no data + type: list + elements: str + version_added: 7.1.0 +""" + +EXAMPLES = r""" +- name: Create a metric monitor + community.general.datadog_monitor: + type: "metric alert" + name: "Test monitor" + state: "present" + renotify_interval: 30 + renotify_occurrences: 1 + renotify_statuses: ["warn"] + notification_preset_name: "show_all" + query: "datadog.agent.up.over('host:host1').last(2).count_by_status()" + notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog." + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +- name: Deletes a monitor + community.general.datadog_monitor: + name: "Test monitor" + state: "absent" + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +- name: Mutes a monitor + community.general.datadog_monitor: + name: "Test monitor" + state: "mute" + silenced: '{"*":None}' + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +- name: Unmutes a monitor + community.general.datadog_monitor: + name: "Test monitor" + state: "unmute" + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +- name: Use datadoghq.eu platform instead of datadoghq.com + community.general.datadog_monitor: + name: "Test monitor" + state: "absent" + api_host: https://api.datadoghq.eu + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" +""" + +import traceback + +# Import Datadog +DATADOG_IMP_ERR = None +try: + from datadog import initialize, api + HAS_DATADOG = True +except Exception: + DATADOG_IMP_ERR = traceback.format_exc() + HAS_DATADOG = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, no_log=True), + api_host=dict(), + app_key=dict(required=True, no_log=True), + state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']), + type=dict(choices=['metric alert', 'service check', 'event alert', 'event-v2 alert', 'process alert', + 'log alert', 'query alert', 'trace-analytics alert', + 'rum alert', 'composite']), + name=dict(required=True), + query=dict(), + notification_message=dict(no_log=True), + silenced=dict(type='dict'), + notify_no_data=dict(default=False, type='bool'), + no_data_timeframe=dict(), + timeout_h=dict(), + renotify_interval=dict(), + escalation_message=dict(), + notify_audit=dict(default=False, type='bool'), + thresholds=dict(type='dict'), + tags=dict(type='list', elements='str'), + locked=dict(default=False, type='bool'), + require_full_window=dict(type='bool'), + new_host_delay=dict(), + evaluation_delay=dict(), + id=dict(), + include_tags=dict(default=True, type='bool'), + priority=dict(type='int'), + notification_preset_name=dict(choices=['show_all', 'hide_query', 'hide_handles', 'hide_all']), + renotify_occurrences=dict(type='int'), + renotify_statuses=dict(type='list', elements='str', choices=['alert', 'warn', 'no data']), + ) + ) + + # Prepare Datadog + if not HAS_DATADOG: + module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR) + + options = { + 'api_key': module.params['api_key'], + 'api_host': module.params['api_host'], + 'app_key': module.params['app_key'] + } + + initialize(**options) + + # Check if api_key and app_key is correct or not + # if not, then fail here. + response = api.Monitor.get_all() + if isinstance(response, dict): + msg = response.get('errors', None) + if msg: + module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key : {0}".format(msg[0])) + + if module.params['state'] == 'present': + install_monitor(module) + elif module.params['state'] == 'absent': + delete_monitor(module) + elif module.params['state'] == 'mute': + mute_monitor(module) + elif module.params['state'] == 'unmute': + unmute_monitor(module) + + +def _fix_template_vars(message): + if message: + return message.replace('[[', '{{').replace(']]', '}}') + return message + + +def _get_monitor(module): + if module.params['id'] is not None: + monitor = api.Monitor.get(module.params['id']) + if 'errors' in monitor: + module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors']))) + return monitor + else: + monitors = api.Monitor.get_all() + for monitor in monitors: + if monitor['name'] == _fix_template_vars(module.params['name']): + return monitor + return {} + + +def _post_monitor(module, options): + try: + kwargs = dict(type=module.params['type'], query=module.params['query'], + name=_fix_template_vars(module.params['name']), + message=_fix_template_vars(module.params['notification_message']), + escalation_message=_fix_template_vars(module.params['escalation_message']), + priority=module.params['priority'], + options=options) + if module.params['tags'] is not None: + kwargs['tags'] = module.params['tags'] + msg = api.Monitor.create(**kwargs) + if 'errors' in msg: + module.fail_json(msg=str(msg['errors'])) + else: + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +def _equal_dicts(a, b, ignore_keys): + ka = set(a).difference(ignore_keys) + kb = set(b).difference(ignore_keys) + return ka == kb and all(a[k] == b[k] for k in ka) + + +def _update_monitor(module, monitor, options): + try: + kwargs = dict(id=monitor['id'], query=module.params['query'], + name=_fix_template_vars(module.params['name']), + message=_fix_template_vars(module.params['notification_message']), + escalation_message=_fix_template_vars(module.params['escalation_message']), + priority=module.params['priority'], + options=options) + if module.params['tags'] is not None: + kwargs['tags'] = module.params['tags'] + msg = api.Monitor.update(**kwargs) + + if 'errors' in msg: + module.fail_json(msg=str(msg['errors'])) + elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']): + module.exit_json(changed=False, msg=msg) + else: + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +def install_monitor(module): + options = { + "silenced": module.params['silenced'], + "notify_no_data": module.boolean(module.params['notify_no_data']), + "no_data_timeframe": module.params['no_data_timeframe'], + "timeout_h": module.params['timeout_h'], + "renotify_interval": module.params['renotify_interval'], + "escalation_message": module.params['escalation_message'], + "notify_audit": module.boolean(module.params['notify_audit']), + "locked": module.boolean(module.params['locked']), + "require_full_window": module.params['require_full_window'], + "new_host_delay": module.params['new_host_delay'], + "evaluation_delay": module.params['evaluation_delay'], + "include_tags": module.params['include_tags'], + "notification_preset_name": module.params['notification_preset_name'], + "renotify_occurrences": module.params['renotify_occurrences'], + "renotify_statuses": module.params['renotify_statuses'], + } + + if module.params['type'] == "service check": + options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1} + if module.params['type'] in ["metric alert", "log alert", "query alert", "trace-analytics alert", "rum alert"] and module.params['thresholds'] is not None: + options["thresholds"] = module.params['thresholds'] + + monitor = _get_monitor(module) + if not monitor: + _post_monitor(module, options) + else: + _update_monitor(module, monitor, options) + + +def delete_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.exit_json(changed=False) + try: + msg = api.Monitor.delete(monitor['id']) + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +def mute_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.fail_json(msg="Monitor %s not found!" % module.params['name']) + elif monitor['options']['silenced']: + module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") + elif module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0: + module.exit_json(changed=False) + try: + if module.params['silenced'] is None or module.params['silenced'] == "": + msg = api.Monitor.mute(id=monitor['id']) + else: + msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced']) + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +def unmute_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.fail_json(msg="Monitor %s not found!" % module.params['name']) + elif not monitor['options']['silenced']: + module.exit_json(changed=False) + try: + msg = api.Monitor.unmute(monitor['id']) + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/dconf.py b/plugins/modules/dconf.py deleted file mode 120000 index b84e5b8885..0000000000 --- a/plugins/modules/dconf.py +++ /dev/null @@ -1 +0,0 @@ -./system/dconf.py \ No newline at end of file diff --git a/plugins/modules/dconf.py b/plugins/modules/dconf.py new file mode 100644 index 0000000000..e9e9d82514 --- /dev/null +++ b/plugins/modules/dconf.py @@ -0,0 +1,470 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Branko Majic +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: dconf +author: + - "Branko Majic (@azaghal)" +short_description: Modify and read dconf database +description: + - This module allows modifications and reading of C(dconf) database. The module is implemented as a wrapper around C(dconf) + tool. Please see the dconf(1) man page for more details. + - Since C(dconf) requires a running D-Bus session to change values, the module tries to detect an existing session and reuse + it, or run the tool using C(dbus-run-session). +requirements: + - Optionally the C(gi.repository) Python library (usually included in the OS on hosts which have C(dconf)); this is to become + a non-optional requirement in a future major release of community.general. +notes: + - This module depends on C(psutil) Python library (version 4.0.0 and upwards), C(dconf), C(dbus-send), and C(dbus-run-session) + binaries. Depending on distribution you are using, you may need to install additional packages to have these available. + - This module uses the C(gi.repository) Python library when available for accurate comparison of values in C(dconf) to values + specified in Ansible code. C(gi.repository) is likely to be present on most systems which have C(dconf) but may not be + present everywhere. When it is missing, a simple string comparison between values is used, and there may be false positives, + that is, Ansible may think that a value is being changed when it is not. This fallback is to be removed in a future version + of this module, at which point the module will stop working on hosts without C(gi.repository). + - Detection of existing, running D-Bus session, required to change settings using C(dconf), is not 100% reliable due to + implementation details of D-Bus daemon itself. This might lead to running applications not picking-up changes on-the-fly + if options are changed using Ansible and C(dbus-run-session). + - Keep in mind that the C(dconf) CLI tool, which this module wraps around, utilises an unusual syntax for the values (GVariant). + For example, if you wanted to provide a string value, the correct syntax would be O(value="'myvalue'") - with single quotes + as part of the Ansible parameter value. + - When using loops in combination with a value like V("[('xkb', 'us'\), ('xkb', 'se'\)]"), you need to be aware of possible + type conversions. Applying a filter V({{ item.value | string }}) to the parameter variable can avoid potential conversion + problems. + - The easiest way to figure out exact syntax/value you need to provide for a key is by making the configuration change in + application affected by the key, and then having a look at value set using commands C(dconf dump /path/to/dir/) or C(dconf + read /path/to/key). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + key: + type: str + required: true + description: + - A dconf key to modify or read from the dconf database. + value: + type: raw + required: false + description: + - Value to set for the specified dconf key. Value should be specified in GVariant format. Due to complexity of this + format, it is best to have a look at existing values in the dconf database. + - Required for O(state=present). + - Although the type is specified as "raw", it should typically be specified as a string. However, boolean values in + particular are handled properly even when specified as booleans rather than strings (in fact, handling booleans properly + is why the type of this parameter is "raw"). + state: + type: str + required: false + default: present + choices: ['read', 'present', 'absent'] + description: + - The action to take upon the key/value. +""" + +RETURN = r""" +value: + description: Value associated with the requested key. + returned: success, state was "read" + type: str + sample: "'Default'" +""" + +EXAMPLES = r""" +- name: Configure available keyboard layouts in Gnome + community.general.dconf: + key: "/org/gnome/desktop/input-sources/sources" + value: "[('xkb', 'us'), ('xkb', 'se')]" + state: present + +- name: Read currently available keyboard layouts in Gnome + community.general.dconf: + key: "/org/gnome/desktop/input-sources/sources" + state: read + register: keyboard_layouts + +- name: Reset the available keyboard layouts in Gnome + community.general.dconf: + key: "/org/gnome/desktop/input-sources/sources" + state: absent + +- name: Configure available keyboard layouts in Cinnamon + community.general.dconf: + key: "/org/gnome/libgnomekbd/keyboard/layouts" + value: "['us', 'se']" + state: present + +- name: Read currently available keyboard layouts in Cinnamon + community.general.dconf: + key: "/org/gnome/libgnomekbd/keyboard/layouts" + state: read + register: keyboard_layouts + +- name: Reset the available keyboard layouts in Cinnamon + community.general.dconf: + key: "/org/gnome/libgnomekbd/keyboard/layouts" + state: absent + +- name: Disable desktop effects in Cinnamon + community.general.dconf: + key: "/org/cinnamon/desktop-effects" + value: "false" + state: present +""" + + +import os +import sys + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.respawn import ( + has_respawned, + probe_interpreters_for_module, + respawn_module, +) +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils import deps + +glib_module_name = 'gi.repository.GLib' + +try: + from gi.repository.GLib import Variant, GError +except ImportError: + Variant = None + GError = AttributeError + +with deps.declare("psutil"): + import psutil + + +class DBusWrapper(object): + """ + Helper class that can be used for running a command with a working D-Bus + session. + + If possible, command will be run against an existing D-Bus session, + otherwise the session will be spawned via dbus-run-session. + + Example usage: + + dbus_wrapper = DBusWrapper(ansible_module) + dbus_wrapper.run_command(["printenv", "DBUS_SESSION_BUS_ADDRESS"]) + """ + + def __init__(self, module): + """ + Initialises an instance of the class. + + :param module: Ansible module instance used to signal failures and run commands. + :type module: AnsibleModule + """ + + # Store passed-in arguments and set-up some defaults. + self.module = module + + # Try to extract existing D-Bus session address. + self.dbus_session_bus_address = self._get_existing_dbus_session() + + # If no existing D-Bus session was detected, check if dbus-run-session + # is available. + if self.dbus_session_bus_address is None: + self.dbus_run_session_cmd = self.module.get_bin_path('dbus-run-session', required=True) + + def _get_existing_dbus_session(self): + """ + Detects and returns an existing D-Bus session bus address. + + :returns: string -- D-Bus session bus address. If a running D-Bus session was not detected, returns None. + """ + + # We'll be checking the processes of current user only. + uid = os.getuid() + + # Go through all the pids for this user, try to extract the D-Bus + # session bus address from environment, and ensure it is possible to + # connect to it. + self.module.debug("Trying to detect existing D-Bus user session for user: %d" % uid) + + for pid in psutil.pids(): + try: + process = psutil.Process(pid) + process_real_uid, dummy, dummy = process.uids() + if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ(): + dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS'] + self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate) + dbus_send_cmd = self.module.get_bin_path('dbus-send', required=True) + command = [dbus_send_cmd, '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test'] + rc, dummy, dummy = self.module.run_command(command) + + if rc == 0: + self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate) + + return dbus_session_bus_address_candidate + + # This can happen with things like SSH sessions etc. + except psutil.AccessDenied: + pass + # Process has disappeared while inspecting it + except psutil.NoSuchProcess: + pass + + self.module.debug("Failed to find running D-Bus user session, will use dbus-run-session") + + return None + + def run_command(self, command): + """ + Runs the specified command within a functional D-Bus session. Command is + effectively passed-on to AnsibleModule.run_command() method, with + modification for using dbus-run-session if necessary. + + :param command: Command to run, including parameters. Each element of the list should be a string. + :type module: list + + :returns: tuple(result_code, standard_output, standard_error) -- Result code, standard output, and standard error from running the command. + """ + + if self.dbus_session_bus_address is None: + self.module.debug("Using dbus-run-session wrapper for running commands.") + command = [self.dbus_run_session_cmd] + command + rc, out, err = self.module.run_command(command) + + if self.dbus_session_bus_address is None and rc == 127: + self.module.fail_json(msg="Failed to run passed-in command, dbus-run-session faced an internal error: %s" % err) + else: + extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address} + rc, out, err = self.module.run_command(command, environ_update=extra_environment) + + return rc, out, err + + +class DconfPreference(object): + + def __init__(self, module, check_mode=False): + """ + Initialises instance of the class. + + :param module: Ansible module instance used to signal failures and run commands. + :type module: AnsibleModule + + :param check_mode: Specify whether to only check if a change should be made or if to actually make a change. + :type check_mode: bool + """ + + self.module = module + self.check_mode = check_mode + # Check if dconf binary exists + self.dconf_bin = self.module.get_bin_path('dconf', required=True) + + @staticmethod + def variants_are_equal(canonical_value, user_value): + """Compare two string GVariant representations for equality. + + Assumes `canonical_value` is "canonical" in the sense that the type of + the variant is specified explicitly if it cannot be inferred; this is + true for textual representations of variants generated by the `dconf` + command. The type of `canonical_value` is used to parse `user_value`, + so the latter does not need to be explicitly typed. + + Returns True if the two values are equal. + """ + if canonical_value is None: + # It's unset in dconf database, so anything the user is trying to + # set is a change. + return False + try: + variant1 = Variant.parse(None, canonical_value) + variant2 = Variant.parse(variant1.get_type(), user_value) + return variant1 == variant2 + except GError: + return canonical_value == user_value + + def read(self, key): + """ + Retrieves current value associated with the dconf key. + + If an error occurs, a call will be made to AnsibleModule.fail_json. + + :returns: string -- Value assigned to the provided key. If the value is not set for specified key, returns None. + """ + command = [self.dconf_bin, "read", key] + + rc, out, err = self.module.run_command(command) + + if rc != 0: + self.module.fail_json(msg='dconf failed while reading the value with error: %s' % err, + out=out, + err=err) + + if out == '': + value = None + else: + value = out.rstrip('\n') + + return value + + def write(self, key, value): + """ + Writes the value for specified key. + + If an error occurs, a call will be made to AnsibleModule.fail_json. + + :param key: dconf key for which the value should be set. Should be a full path. + :type key: str + + :param value: Value to set for the specified dconf key. Should be specified in GVariant format. + :type value: str + + :returns: bool -- True if a change was made, False if no change was required. + """ + # If no change is needed (or won't be done due to check_mode), notify + # caller straight away. + if self.variants_are_equal(self.read(key), value): + return False + elif self.check_mode: + return True + + # Set-up command to run. Since DBus is needed for write operation, wrap + # dconf command dbus-launch. + command = [self.dconf_bin, "write", key, value] + + # Run the command and fetch standard return code, stdout, and stderr. + dbus_wrapper = DBusWrapper(self.module) + rc, out, err = dbus_wrapper.run_command(command) + + if rc != 0: + self.module.fail_json(msg='dconf failed while writing key %s, value %s with error: %s' % (key, value, err), + out=out, + err=err) + + # Value was changed. + return True + + def reset(self, key): + """ + Returns value for the specified key (removes it from user configuration). + + If an error occurs, a call will be made to AnsibleModule.fail_json. + + :param key: dconf key to reset. Should be a full path. + :type key: str + + :returns: bool -- True if a change was made, False if no change was required. + """ + + # Read the current value first. + current_value = self.read(key) + + # No change was needed, key is not set at all, or just notify user if we + # are in check mode. + if current_value is None: + return False + elif self.check_mode: + return True + + # Set-up command to run. Since DBus is needed for reset operation, wrap + # dconf command dbus-launch. + command = [self.dconf_bin, "reset", key] + + # Run the command and fetch standard return code, stdout, and stderr. + dbus_wrapper = DBusWrapper(self.module) + rc, out, err = dbus_wrapper.run_command(command) + + if rc != 0: + self.module.fail_json(msg='dconf failed while resetting the value with error: %s' % err, + out=out, + err=err) + + # Value was changed. + return True + + +def main(): + # Setup the Ansible module + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent', 'read']), + key=dict(required=True, type='str', no_log=False), + # Converted to str below after special handling of bool. + value=dict(type='raw'), + ), + supports_check_mode=True, + required_if=[ + ('state', 'present', ['value']), + ], + ) + + if Variant is None: + # This interpreter can't see the GLib module. To try to fix that, we'll + # look in common locations for system-owned interpreters that can see + # it; if we find one, we'll respawn under it. Otherwise we'll proceed + # with degraded performance, without the ability to parse GVariants. + # Later (in a different PR) we'll actually deprecate this degraded + # performance level and fail with an error if the library can't be + # found. + + if has_respawned(): + # This shouldn't be possible; short-circuit early if it happens. + module.fail_json( + msg="%s must be installed and visible from %s." % + (glib_module_name, sys.executable)) + + interpreters = ['/usr/bin/python3', '/usr/bin/python'] + + interpreter = probe_interpreters_for_module( + interpreters, glib_module_name) + + if interpreter: + # Found the Python bindings; respawn this module under the + # interpreter where we found them. + respawn_module(interpreter) + # This is the end of the line for this process, it will exit here + # once the respawned module has completed. + + # Try to be forgiving about the user specifying a boolean as the value, or + # more accurately about the fact that YAML and Ansible are quite insistent + # about converting strings that look like booleans into booleans. Convert + # the boolean into a string of the type dconf will understand. Any type for + # the value other than boolean is just converted into a string directly. + if module.params['value'] is not None: + if isinstance(module.params['value'], bool): + module.params['value'] = 'true' if module.params['value'] else 'false' + else: + module.params['value'] = to_native( + module.params['value'], errors='surrogate_or_strict') + + if Variant is None: + module.warn( + 'WARNING: The gi.repository Python library is not available; ' + 'using string comparison to check value equality. This fallback ' + 'will be deprecated in a future version of community.general.') + + deps.validate(module) + + # Create wrapper instance. + dconf = DconfPreference(module, module.check_mode) + + # Process based on different states. + if module.params['state'] == 'read': + value = dconf.read(module.params['key']) + module.exit_json(changed=False, value=value) + elif module.params['state'] == 'present': + changed = dconf.write(module.params['key'], module.params['value']) + module.exit_json(changed=changed) + elif module.params['state'] == 'absent': + changed = dconf.reset(module.params['key']) + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/decompress.py b/plugins/modules/decompress.py new file mode 100644 index 0000000000..3746810ca9 --- /dev/null +++ b/plugins/modules/decompress.py @@ -0,0 +1,201 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Stanislav Shamilov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: decompress +short_description: Decompresses compressed files +version_added: 10.1.0 +description: + - Decompresses compressed files. + - The source (compressed) file and destination (decompressed) files are on the remote host. + - Source file can be deleted after decompression. +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + src: + description: + - Remote absolute path for the file to decompress. + type: path + required: true + dest: + description: + - The file name of the destination file where the compressed file is decompressed. + - If the destination file exists, it is truncated and overwritten. + - If not specified, the destination filename is derived from O(src) by removing the compression format extension. For + example, when O(src) is V(/path/to/file.txt.gz) and O(format) is V(gz), O(dest) is V(/path/to/file.txt). If the O(src) + file does not have an extension for the current O(format), the O(dest) filename is made by appending C(_decompressed) + to the O(src) filename. For instance, when O(src) is V(/path/to/file.myextension), the (dest) filename is V(/path/to/file.myextension_decompressed). + type: path + format: + description: + - The type of compression to use to decompress. + type: str + choices: [gz, bz2, xz] + default: gz + remove: + description: + - Remove original compressed file after decompression. + type: bool + default: false +requirements: + - Requires C(lzma) (standard library of Python 3) if using C(xz) format. +author: + - Stanislav Shamilov (@shamilovstas) +""" + +EXAMPLES = r""" +- name: Decompress file /path/to/file.txt.gz into /path/to/file.txt (gz compression is used by default) + community.general.decompress: + src: /path/to/file.txt.gz + dest: /path/to/file.txt + +- name: Decompress file /path/to/file.txt.gz into /path/to/file.txt + community.general.decompress: + src: /path/to/file.txt.gz + +- name: Decompress file compressed with bzip2 + community.general.decompress: + src: /path/to/file.txt.bz2 + dest: /path/to/file.bz2 + format: bz2 + +- name: Decompress file and delete the compressed file afterwards + community.general.decompress: + src: /path/to/file.txt.gz + dest: /path/to/file.txt + remove: true +""" + +RETURN = r""" +dest: + description: Path to decompressed file. + type: str + returned: success + sample: /path/to/file.txt +""" + +import bz2 +import filecmp +import gzip +import os +import shutil +import tempfile + +from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ModuleHelper +from ansible.module_utils.common.text.converters import to_native, to_bytes +from ansible_collections.community.general.plugins.module_utils import deps + +with deps.declare("lzma"): + import lzma + + +def lzma_decompress(src): + return lzma.open(src, "rb") + + +def bz2_decompress(src): + return bz2.open(src, "rb") + + +def gzip_decompress(src): + return gzip.open(src, "rb") + + +def decompress(b_src, b_dest, handler): + with handler(b_src) as src_file: + with open(b_dest, "wb") as dest_file: + shutil.copyfileobj(src_file, dest_file) + + +class Decompress(ModuleHelper): + destination_filename_template = "%s_decompressed" + output_params = 'dest' + + module = dict( + argument_spec=dict( + src=dict(type='path', required=True), + dest=dict(type='path'), + format=dict(type='str', default='gz', choices=['gz', 'bz2', 'xz']), + remove=dict(type='bool', default=False) + ), + add_file_common_args=True, + supports_check_mode=True + ) + + def __init_module__(self): + self.handlers = {"gz": gzip_decompress, "bz2": bz2_decompress, "xz": lzma_decompress} + if self.vars.dest is None: + self.vars.dest = self.get_destination_filename() + deps.validate(self.module) + self.configure() + + def configure(self): + b_dest = to_bytes(self.vars.dest, errors='surrogate_or_strict') + b_src = to_bytes(self.vars.src, errors='surrogate_or_strict') + if not os.path.exists(b_src): + if self.vars.remove and os.path.exists(b_dest): + self.module.exit_json(changed=False) + else: + self.do_raise(msg="Path does not exist: '%s'" % b_src) + if os.path.isdir(b_src): + self.do_raise(msg="Cannot decompress directory '%s'" % b_src) + if os.path.isdir(b_dest): + self.do_raise(msg="Destination is a directory, cannot decompress: '%s'" % b_dest) + + def __run__(self): + b_dest = to_bytes(self.vars.dest, errors='surrogate_or_strict') + b_src = to_bytes(self.vars.src, errors='surrogate_or_strict') + + file_args = self.module.load_file_common_arguments(self.module.params, path=self.vars.dest) + handler = self.handlers[self.vars.format] + try: + tempfd, temppath = tempfile.mkstemp(dir=self.module.tmpdir) + self.module.add_cleanup_file(temppath) + b_temppath = to_bytes(temppath, errors='surrogate_or_strict') + decompress(b_src, b_temppath, handler) + except OSError as e: + self.do_raise(msg="Unable to create temporary file '%s'" % to_native(e)) + + if os.path.exists(b_dest): + self.changed = not filecmp.cmp(b_temppath, b_dest, shallow=False) + else: + self.changed = True + + if self.changed and not self.module.check_mode: + try: + self.module.atomic_move(b_temppath, b_dest) + except OSError: + self.do_raise(msg="Unable to move temporary file '%s' to '%s'" % (b_temppath, self.vars.dest)) + + if self.vars.remove and not self.check_mode: + os.remove(b_src) + self.changed = self.module.set_fs_attributes_if_different(file_args, self.changed) + + def get_destination_filename(self): + src = self.vars.src + fmt_extension = ".%s" % self.vars.format + if src.endswith(fmt_extension) and len(src) > len(fmt_extension): + filename = src[:-len(fmt_extension)] + else: + filename = Decompress.destination_filename_template % src + return filename + + +def main(): + Decompress.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/deploy_helper.py b/plugins/modules/deploy_helper.py deleted file mode 120000 index 5253781aef..0000000000 --- a/plugins/modules/deploy_helper.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/deploy_helper.py \ No newline at end of file diff --git a/plugins/modules/deploy_helper.py b/plugins/modules/deploy_helper.py new file mode 100644 index 0000000000..d9380d36f4 --- /dev/null +++ b/plugins/modules/deploy_helper.py @@ -0,0 +1,521 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Jasper N. Brouwer +# Copyright (c) 2014, Ramon de la Fuente +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: deploy_helper +author: "Ramon de la Fuente (@ramondelafuente)" +short_description: Manages some of the steps common in deploying projects +description: + - The Deploy Helper manages some of the steps common in deploying software. It creates a folder structure, manages a symlink + for the current release and cleans up old releases. + - Running it with the O(state=query) or O(state=present) returns the C(deploy_helper) fact. C(project_path), whatever you + set in the O(path) parameter, C(current_path), the path to the symlink that points to the active release, C(releases_path), + the path to the folder to keep releases in, C(shared_path), the path to the folder to keep shared resources in, C(unfinished_filename), + the file to check for to recognize unfinished builds, C(previous_release), the release the 'current' symlink is pointing + to, C(previous_release_path), the full path to the 'current' symlink target, C(new_release), either the O(release) parameter + or a generated timestamp, C(new_release_path), the path to the new release folder (not created by the module). +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + path: + type: path + required: true + aliases: ['dest'] + description: + - The root path of the project. Returned in the C(deploy_helper.project_path) fact. + state: + type: str + description: + - The state of the project. + - V(query) gathers facts. + - V(present) creates the project C(root) folder, and in it the C(releases) and C(shared) folders. + - V(finalize) removes the unfinished_filename file, creates a symlink to the newly deployed release and optionally cleans + old releases. + - V(clean) removes failed & old releases. + - V(absent) removes the project folder (synonymous to the M(ansible.builtin.file) module with O(state=absent)). + choices: [present, finalize, absent, clean, query] + default: present + + release: + type: str + description: + - The release version that is being deployed. Defaults to a timestamp format C(%Y%m%d%H%M%S) (for example V(20141119223359)). + This parameter is optional during O(state=present), but needs to be set explicitly for O(state=finalize). You can + use the generated fact C(release={{ deploy_helper.new_release }}). + releases_path: + type: str + description: + - The name of the folder that holds the releases. This can be relative to O(path) or absolute. Returned in the C(deploy_helper.releases_path) + fact. + default: releases + + shared_path: + type: path + description: + - The name of the folder that holds the shared resources. This can be relative to O(path) or absolute. If this is set + to an empty string, no shared folder is created. Returned in the C(deploy_helper.shared_path) fact. + default: shared + + current_path: + type: path + description: + - The name of the symlink that is created when the deploy is finalized. Used in O(state=finalize) and O(state=clean). + Returned in the C(deploy_helper.current_path) fact. + default: current + + unfinished_filename: + type: str + description: + - The name of the file that indicates a deploy has not finished. All folders in the O(releases_path) that contain this + file are deleted on O(state=finalize) with O(clean=true), or O(state=clean). This file is automatically deleted from + the C(new_release_path) during O(state=finalize). + default: DEPLOY_UNFINISHED + + clean: + description: + - Whether to run the clean procedure in case of O(state=finalize). + type: bool + default: true + + keep_releases: + type: int + description: + - The number of old releases to keep when cleaning. Used in O(state=finalize) and O(state=clean). Any unfinished builds + are deleted first, so only correct releases count. The current version does not count. + default: 5 + +notes: + - Facts are only returned for O(state=query) and O(state=present). If you use both, you should pass any overridden parameters + to both calls, otherwise the second call overwrites the facts of the first one. + - When using O(state=clean), the releases are ordered by I(creation date). You should be able to switch to a new naming + strategy without problems. + - Because of the default behaviour of generating the C(new_release) fact, this module is not idempotent unless you pass + your own release name with O(release). Due to the nature of deploying software, this should not be much of a problem. +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +""" + +EXAMPLES = r""" +# General explanation, starting with an example folder structure for a project: + +# root: +# releases: +# - 20140415234508 +# - 20140415235146 +# - 20140416082818 +# +# shared: +# - sessions +# - uploads +# +# current: releases/20140416082818 + + +# The 'releases' folder holds all the available releases. A release is a complete build of the application being +# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem. +# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like +# git tags or commit hashes. +# +# During a deploy, a new folder should be created in the releases folder and any build steps required should be +# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink +# with a link to this build. +# +# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server +# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release +# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps. +# +# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress. +# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new +# release is reduced to the time it takes to switch the link. +# +# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release +# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated +# procedure to remove it during cleanup. + + +# Typical usage +- name: Initialize the deploy root and gather facts + community.general.deploy_helper: + path: /path/to/root +- name: Clone the project to the new release folder + ansible.builtin.git: + repo: ansible.builtin.git://foosball.example.org/path/to/repo.git + dest: '{{ deploy_helper.new_release_path }}' + version: v1.1.1 +- name: Add an unfinished file, to allow cleanup on successful finalize + ansible.builtin.file: + path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}' + state: touch +- name: Perform some build steps, like running your dependency manager for example + composer: + command: install + working_dir: '{{ deploy_helper.new_release_path }}' +- name: Create some folders in the shared folder + ansible.builtin.file: + path: '{{ deploy_helper.shared_path }}/{{ item }}' + state: directory + with_items: + - sessions + - uploads +- name: Add symlinks from the new release to the shared folder + ansible.builtin.file: + path: '{{ deploy_helper.new_release_path }}/{{ item.path }}' + src: '{{ deploy_helper.shared_path }}/{{ item.src }}' + state: link + with_items: + - path: app/sessions + src: sessions + - path: web/uploads + src: uploads +- name: Finalize the deploy, removing the unfinished file and switching the symlink + community.general.deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Retrieving facts before running a deploy +- name: Run 'state=query' to gather facts without changing anything + community.general.deploy_helper: + path: /path/to/root + state: query +# Remember to set the 'release' parameter when you actually call 'state=present' later +- name: Initialize the deploy root + community.general.deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: present + +# all paths can be absolute or relative (to the 'path' parameter) +- community.general.deploy_helper: + path: /path/to/root + releases_path: /var/www/project/releases + shared_path: /var/www/shared + current_path: /var/www/active + +# Using your own naming strategy for releases (a version tag in this case): +- community.general.deploy_helper: + path: /path/to/root + release: v1.1.1 + state: present +- community.general.deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Using a different unfinished_filename: +- community.general.deploy_helper: + path: /path/to/root + unfinished_filename: README.md + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Postponing the cleanup of older builds: +- community.general.deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + clean: false +- community.general.deploy_helper: + path: /path/to/root + state: clean +# Or running the cleanup ahead of the new deploy +- community.general.deploy_helper: + path: /path/to/root + state: clean +- community.general.deploy_helper: + path: /path/to/root + state: present + +# Keeping more old releases: +- community.general.deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + keep_releases: 10 +# Or, if you use 'clean=false' on finalize: +- community.general.deploy_helper: + path: /path/to/root + state: clean + keep_releases: 10 + +# Removing the entire project root folder +- community.general.deploy_helper: + path: /path/to/root + state: absent + +# Debugging the facts returned by the module +- community.general.deploy_helper: + path: /path/to/root +- ansible.builtin.debug: + var: deploy_helper +""" + +import os +import shutil +import time +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +class DeployHelper(object): + + def __init__(self, module): + self.module = module + self.file_args = module.load_file_common_arguments(module.params) + + self.clean = module.params['clean'] + self.current_path = module.params['current_path'] + self.keep_releases = module.params['keep_releases'] + self.path = module.params['path'] + self.release = module.params['release'] + self.releases_path = module.params['releases_path'] + self.shared_path = module.params['shared_path'] + self.state = module.params['state'] + self.unfinished_filename = module.params['unfinished_filename'] + + def gather_facts(self): + current_path = os.path.join(self.path, self.current_path) + releases_path = os.path.join(self.path, self.releases_path) + if self.shared_path: + shared_path = os.path.join(self.path, self.shared_path) + else: + shared_path = None + + previous_release, previous_release_path = self._get_last_release(current_path) + + if not self.release and (self.state == 'query' or self.state == 'present'): + self.release = time.strftime("%Y%m%d%H%M%S") + + if self.release: + new_release_path = os.path.join(releases_path, self.release) + else: + new_release_path = None + + return { + 'project_path': self.path, + 'current_path': current_path, + 'releases_path': releases_path, + 'shared_path': shared_path, + 'previous_release': previous_release, + 'previous_release_path': previous_release_path, + 'new_release': self.release, + 'new_release_path': new_release_path, + 'unfinished_filename': self.unfinished_filename + } + + def delete_path(self, path): + if not os.path.lexists(path): + return False + + if not os.path.isdir(path): + self.module.fail_json(msg="%s exists but is not a directory" % path) + + if not self.module.check_mode: + try: + shutil.rmtree(path, ignore_errors=False) + except Exception as e: + self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc()) + + return True + + def create_path(self, path): + changed = False + + if not os.path.lexists(path): + changed = True + if not self.module.check_mode: + os.makedirs(path) + + elif not os.path.isdir(path): + self.module.fail_json(msg="%s exists but is not a directory" % path) + + changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed) + + return changed + + def check_link(self, path): + if os.path.lexists(path): + if not os.path.islink(path): + self.module.fail_json(msg="%s exists but is not a symbolic link" % path) + + def create_link(self, source, link_name): + if os.path.islink(link_name): + norm_link = os.path.normpath(os.path.realpath(link_name)) + norm_source = os.path.normpath(os.path.realpath(source)) + if norm_link == norm_source: + changed = False + else: + changed = True + if not self.module.check_mode: + if not os.path.lexists(source): + self.module.fail_json(msg="the symlink target %s doesn't exists" % source) + tmp_link_name = link_name + '.' + self.unfinished_filename + if os.path.islink(tmp_link_name): + os.unlink(tmp_link_name) + os.symlink(source, tmp_link_name) + os.rename(tmp_link_name, link_name) + else: + changed = True + if not self.module.check_mode: + os.symlink(source, link_name) + + return changed + + def remove_unfinished_file(self, new_release_path): + changed = False + unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename) + if os.path.lexists(unfinished_file_path): + changed = True + if not self.module.check_mode: + os.remove(unfinished_file_path) + + return changed + + def remove_unfinished_builds(self, releases_path): + changes = 0 + + for release in os.listdir(releases_path): + if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)): + if self.module.check_mode: + changes += 1 + else: + changes += self.delete_path(os.path.join(releases_path, release)) + + return changes + + def remove_unfinished_link(self, path): + changed = False + + if not self.release: + return changed + + tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename) + if not self.module.check_mode and os.path.exists(tmp_link_name): + changed = True + os.remove(tmp_link_name) + + return changed + + def cleanup(self, releases_path, reserve_version): + changes = 0 + + if os.path.lexists(releases_path): + releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))] + try: + releases.remove(reserve_version) + except ValueError: + pass + + if not self.module.check_mode: + releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True) + for release in releases[self.keep_releases:]: + changes += self.delete_path(os.path.join(releases_path, release)) + elif len(releases) > self.keep_releases: + changes += (len(releases) - self.keep_releases) + + return changes + + def _get_file_args(self, path): + file_args = self.file_args.copy() + file_args['path'] = path + return file_args + + def _get_last_release(self, current_path): + previous_release = None + previous_release_path = None + + if os.path.lexists(current_path): + previous_release_path = os.path.realpath(current_path) + previous_release = os.path.basename(previous_release_path) + + return previous_release, previous_release_path + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + path=dict(aliases=['dest'], required=True, type='path'), + release=dict(type='str'), + releases_path=dict(type='str', default='releases'), + shared_path=dict(type='path', default='shared'), + current_path=dict(type='path', default='current'), + keep_releases=dict(type='int', default=5), + clean=dict(type='bool', default=True), + unfinished_filename=dict(type='str', default='DEPLOY_UNFINISHED'), + state=dict(choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present') + ), + required_if=[ + ('state', 'finalize', ['release']), + ], + add_file_common_args=True, + supports_check_mode=True + ) + + deploy_helper = DeployHelper(module) + facts = deploy_helper.gather_facts() + + result = { + 'state': deploy_helper.state + } + + changes = 0 + + if deploy_helper.state == 'query': + result['ansible_facts'] = {'deploy_helper': facts} + + elif deploy_helper.state == 'present': + deploy_helper.check_link(facts['current_path']) + changes += deploy_helper.create_path(facts['project_path']) + changes += deploy_helper.create_path(facts['releases_path']) + if deploy_helper.shared_path: + changes += deploy_helper.create_path(facts['shared_path']) + + result['ansible_facts'] = {'deploy_helper': facts} + + elif deploy_helper.state == 'finalize': + if deploy_helper.keep_releases <= 0: + module.fail_json(msg="'keep_releases' should be at least 1") + + changes += deploy_helper.remove_unfinished_file(facts['new_release_path']) + changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path']) + if deploy_helper.clean: + changes += deploy_helper.remove_unfinished_link(facts['project_path']) + changes += deploy_helper.remove_unfinished_builds(facts['releases_path']) + changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release']) + + elif deploy_helper.state == 'clean': + changes += deploy_helper.remove_unfinished_link(facts['project_path']) + changes += deploy_helper.remove_unfinished_builds(facts['releases_path']) + changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release']) + + elif deploy_helper.state == 'absent': + # destroy the facts + result['ansible_facts'] = {'deploy_helper': []} + changes += deploy_helper.delete_path(facts['project_path']) + + if changes > 0: + result['changed'] = True + else: + result['changed'] = False + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/dimensiondata_network.py b/plugins/modules/dimensiondata_network.py deleted file mode 120000 index da7a3bcfc0..0000000000 --- a/plugins/modules/dimensiondata_network.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/dimensiondata/dimensiondata_network.py \ No newline at end of file diff --git a/plugins/modules/dimensiondata_network.py b/plugins/modules/dimensiondata_network.py new file mode 100644 index 0000000000..80ac17d47d --- /dev/null +++ b/plugins/modules/dimensiondata_network.py @@ -0,0 +1,300 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Dimension Data +# Authors: +# - Aimon Bustardo +# - Bert Diwa +# - Adam Friedman +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: dimensiondata_network +short_description: Create, update, and delete MCP 1.0 & 2.0 networks +extends_documentation_fragment: + - community.general.dimensiondata + - community.general.dimensiondata_wait + - community.general.attributes + +description: + - Create, update, and delete MCP 1.0 & 2.0 networks. +author: 'Aimon Bustardo (@aimonb)' +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - The name of the network domain to create. + required: true + type: str + description: + description: + - Additional description of the network domain. + required: false + type: str + service_plan: + description: + - The service plan, either "ESSENTIALS" or "ADVANCED". + - MCP 2.0 Only. + choices: [ESSENTIALS, ADVANCED] + default: ESSENTIALS + type: str + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present + type: str +""" + +EXAMPLES = r""" +- name: Create an MCP 1.0 network + community.general.dimensiondata_network: + region: na + location: NA5 + name: mynet + +- name: Create an MCP 2.0 network + community.general.dimensiondata_network: + region: na + mcp_user: my_user + mcp_password: my_password + location: NA9 + name: mynet + service_plan: ADVANCED + +- name: Delete a network + community.general.dimensiondata_network: + region: na + location: NA1 + name: mynet + state: absent +""" + +RETURN = r""" +network: + description: Dictionary describing the network. + returned: On success when O(state=present). + type: complex + contains: + id: + description: Network ID. + type: str + sample: "8c787000-a000-4050-a215-280893411a7d" + name: + description: Network name. + type: str + sample: "My network" + description: + description: Network description. + type: str + sample: "My network description" + location: + description: Datacenter location. + type: str + sample: NA3 + status: + description: Network status. (MCP 2.0 only). + type: str + sample: NORMAL + private_net: + description: Private network subnet. (MCP 1.0 only). + type: str + sample: "10.2.3.0" + multicast: + description: Multicast enabled? (MCP 1.0 only). + type: bool + sample: false +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule +from ansible.module_utils.common.text.converters import to_native + +if HAS_LIBCLOUD: + from libcloud.compute.base import NodeLocation + from libcloud.common.dimensiondata import DimensionDataAPIException + + +class DimensionDataNetworkModule(DimensionDataModule): + """ + The dimensiondata_network module for Ansible. + """ + + def __init__(self): + """ + Create a new Dimension Data network module. + """ + + super(DimensionDataNetworkModule, self).__init__( + module=AnsibleModule( + argument_spec=DimensionDataModule.argument_spec_with_wait( + name=dict(type='str', required=True), + description=dict(type='str'), + service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']), + state=dict(default='present', choices=['present', 'absent']) + ), + required_together=DimensionDataModule.required_together() + ) + ) + + self.name = self.module.params['name'] + self.description = self.module.params['description'] + self.service_plan = self.module.params['service_plan'] + self.state = self.module.params['state'] + + def state_present(self): + network = self._get_network() + + if network: + self.module.exit_json( + changed=False, + msg='Network already exists', + network=self._network_to_dict(network) + ) + + network = self._create_network() + + self.module.exit_json( + changed=True, + msg='Created network "%s" in datacenter "%s".' % (self.name, self.location), + network=self._network_to_dict(network) + ) + + def state_absent(self): + network = self._get_network() + + if not network: + self.module.exit_json( + changed=False, + msg='Network "%s" does not exist' % self.name, + network=self._network_to_dict(network) + ) + + self._delete_network(network) + + def _get_network(self): + if self.mcp_version == '1.0': + networks = self.driver.list_networks(location=self.location) + else: + networks = self.driver.ex_list_network_domains(location=self.location) + + matched_network = [network for network in networks if network.name == self.name] + if matched_network: + return matched_network[0] + + return None + + def _network_to_dict(self, network): + network_dict = dict( + id=network.id, + name=network.name, + description=network.description + ) + + if isinstance(network.location, NodeLocation): + network_dict['location'] = network.location.id + else: + network_dict['location'] = network.location + + if self.mcp_version == '1.0': + network_dict['private_net'] = network.private_net + network_dict['multicast'] = network.multicast + network_dict['status'] = None + else: + network_dict['private_net'] = None + network_dict['multicast'] = None + network_dict['status'] = network.status + + return network_dict + + def _create_network(self): + + # Make sure service_plan argument is defined + if self.mcp_version == '2.0' and 'service_plan' not in self.module.params: + self.module.fail_json( + msg='service_plan required when creating network and location is MCP 2.0' + ) + + # Create network + try: + if self.mcp_version == '1.0': + network = self.driver.ex_create_network( + self.location, + self.name, + description=self.description + ) + else: + network = self.driver.ex_create_network_domain( + self.location, + self.name, + self.module.params['service_plan'], + description=self.description + ) + except DimensionDataAPIException as e: + + self.module.fail_json( + msg="Failed to create new network: %s" % to_native(e), exception=traceback.format_exc() + ) + + if self.module.params['wait'] is True: + network = self._wait_for_network_state(network.id, 'NORMAL') + + return network + + def _delete_network(self, network): + try: + if self.mcp_version == '1.0': + deleted = self.driver.ex_delete_network(network) + else: + deleted = self.driver.ex_delete_network_domain(network) + + if deleted: + self.module.exit_json( + changed=True, + msg="Deleted network with id %s" % network.id + ) + + self.module.fail_json( + "Unexpected failure deleting network with id %s" % network.id + ) + + except DimensionDataAPIException as e: + self.module.fail_json( + msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc() + ) + + def _wait_for_network_state(self, net_id, state_to_wait_for): + try: + return self.driver.connection.wait_for_state( + state_to_wait_for, + self.driver.ex_get_network_domain, + self.module.params['wait_poll_interval'], + self.module.params['wait_time'], + net_id + ) + except DimensionDataAPIException as e: + self.module.fail_json( + msg='Network did not reach % state in time: %s' % (state_to_wait_for, to_native(e)), + exception=traceback.format_exc() + ) + + +def main(): + module = DimensionDataNetworkModule() + if module.state == 'present': + module.state_present() + elif module.state == 'absent': + module.state_absent() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/dimensiondata_vlan.py b/plugins/modules/dimensiondata_vlan.py deleted file mode 120000 index 9f6930aa0c..0000000000 --- a/plugins/modules/dimensiondata_vlan.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/dimensiondata/dimensiondata_vlan.py \ No newline at end of file diff --git a/plugins/modules/dimensiondata_vlan.py b/plugins/modules/dimensiondata_vlan.py new file mode 100644 index 0000000000..8f3de75b25 --- /dev/null +++ b/plugins/modules/dimensiondata_vlan.py @@ -0,0 +1,562 @@ +#!/usr/bin/python +# Copyright (c) 2016 Dimension Data +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# +# Authors: +# - Adam Friedman + +from __future__ import annotations + +DOCUMENTATION = r""" +module: dimensiondata_vlan +short_description: Manage a VLAN in a Cloud Control network domain +extends_documentation_fragment: + - community.general.dimensiondata + - community.general.dimensiondata_wait + - community.general.attributes + +description: + - Manage VLANs in Cloud Control network domains. +author: 'Adam Friedman (@tintoy)' +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - The name of the target VLAN. + type: str + required: true + description: + description: + - A description of the VLAN. + type: str + default: '' + network_domain: + description: + - The ID or name of the target network domain. + required: true + type: str + private_ipv4_base_address: + description: + - The base address for the VLAN's IPv4 network (for example V(192.168.1.0)). + type: str + default: '' + private_ipv4_prefix_size: + description: + - The size of the IPv4 address space, for example V(24). + - Required, if O(private_ipv4_base_address) is specified. + type: int + default: 0 + state: + description: + - The desired state for the target VLAN. + - V(readonly) ensures that the state is only ever read, not modified (the module fails if the resource does not exist). + choices: [present, absent, readonly] + default: present + type: str + allow_expand: + description: + - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently + possesses. + - If V(false), the module fails under these conditions. + - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible). + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Add or update VLAN + community.general.dimensiondata_vlan: + region: na + location: NA5 + network_domain: test_network + name: my_vlan1 + description: A test VLAN + private_ipv4_base_address: 192.168.23.0 + private_ipv4_prefix_size: 24 + state: present + wait: true + +- name: Read / get VLAN details + community.general.dimensiondata_vlan: + region: na + location: NA5 + network_domain: test_network + name: my_vlan1 + state: readonly + wait: true + +- name: Delete a VLAN + community.general.dimensiondata_vlan: + region: na + location: NA5 + network_domain: test_network + name: my_vlan_1 + state: absent + wait: true +""" + +RETURN = r""" +vlan: + description: Dictionary describing the VLAN. + returned: On success when O(state=present) + type: complex + contains: + id: + description: VLAN ID. + type: str + sample: "aaaaa000-a000-4050-a215-2808934ccccc" + name: + description: VLAN name. + type: str + sample: "My VLAN" + description: + description: VLAN description. + type: str + sample: "My VLAN description" + location: + description: Datacenter location. + type: str + sample: NA3 + private_ipv4_base_address: + description: The base address for the VLAN's private IPV4 network. + type: str + sample: 192.168.23.0 + private_ipv4_prefix_size: + description: The prefix size for the VLAN's private IPV4 network. + type: int + sample: 24 + private_ipv4_gateway_address: + description: The gateway address for the VLAN's private IPV4 network. + type: str + sample: 192.168.23.1 + private_ipv6_base_address: + description: The base address for the VLAN's IPV6 network. + type: str + sample: 2402:9900:111:1195:0:0:0:0 + private_ipv6_prefix_size: + description: The prefix size for the VLAN's IPV6 network. + type: int + sample: 64 + private_ipv6_gateway_address: + description: The gateway address for the VLAN's IPV6 network. + type: str + sample: 2402:9900:111:1195:0:0:0:1 + status: + description: VLAN status. + type: str + sample: NORMAL +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError + +try: + from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException + + HAS_LIBCLOUD = True + +except ImportError: + DimensionDataVlan = None + + HAS_LIBCLOUD = False + + +class DimensionDataVlanModule(DimensionDataModule): + """ + The dimensiondata_vlan module for Ansible. + """ + + def __init__(self): + """ + Create a new Dimension Data VLAN module. + """ + + super(DimensionDataVlanModule, self).__init__( + module=AnsibleModule( + argument_spec=DimensionDataModule.argument_spec_with_wait( + name=dict(required=True, type='str'), + description=dict(default='', type='str'), + network_domain=dict(required=True, type='str'), + private_ipv4_base_address=dict(default='', type='str'), + private_ipv4_prefix_size=dict(default=0, type='int'), + allow_expand=dict(default=False, type='bool'), + state=dict(default='present', choices=['present', 'absent', 'readonly']) + ), + required_together=DimensionDataModule.required_together() + ) + ) + + self.name = self.module.params['name'] + self.description = self.module.params['description'] + self.network_domain_selector = self.module.params['network_domain'] + self.private_ipv4_base_address = self.module.params['private_ipv4_base_address'] + self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size'] + self.state = self.module.params['state'] + self.allow_expand = self.module.params['allow_expand'] + + if self.wait and self.state != 'present': + self.module.fail_json( + msg='The wait parameter is only supported when state is "present".' + ) + + def state_present(self): + """ + Ensure that the target VLAN is present. + """ + + network_domain = self._get_network_domain() + + vlan = self._get_vlan(network_domain) + if not vlan: + if self.module.check_mode: + self.module.exit_json( + msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format( + self.name, self.network_domain_selector + ), + changed=True + ) + + vlan = self._create_vlan(network_domain) + self.module.exit_json( + msg='Created VLAN "{0}" in network domain "{1}".'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=True + ) + else: + diff = VlanDiff(vlan, self.module.params) + if not diff.has_changes(): + self.module.exit_json( + msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=False + ) + + return + + try: + diff.ensure_legal_change() + except InvalidVlanChangeError as invalid_vlan_change: + self.module.fail_json( + msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format( + self.name, self.network_domain_selector, invalid_vlan_change + ) + ) + + if diff.needs_expand() and not self.allow_expand: + self.module.fail_json( + msg='The configured private IPv4 network size ({0}-bit prefix) for '.format( + self.private_ipv4_prefix_size + ) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format( + vlan.private_ipv4_range_size + ) + 'and needs to be expanded. Use allow_expand=true if this is what you want.' + ) + + if self.module.check_mode: + self.module.exit_json( + msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=True + ) + + if diff.needs_edit(): + vlan.name = self.name + vlan.description = self.description + + self.driver.ex_update_vlan(vlan) + + if diff.needs_expand(): + vlan.private_ipv4_range_size = self.private_ipv4_prefix_size + self.driver.ex_expand_vlan(vlan) + + self.module.exit_json( + msg='Updated VLAN "{0}" in network domain "{1}".'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=True + ) + + def state_readonly(self): + """ + Read the target VLAN's state. + """ + + network_domain = self._get_network_domain() + + vlan = self._get_vlan(network_domain) + if vlan: + self.module.exit_json( + vlan=vlan_to_dict(vlan), + changed=False + ) + else: + self.module.fail_json( + msg='VLAN "{0}" does not exist in network domain "{1}".'.format( + self.name, self.network_domain_selector + ) + ) + + def state_absent(self): + """ + Ensure that the target VLAN is not present. + """ + + network_domain = self._get_network_domain() + + vlan = self._get_vlan(network_domain) + if not vlan: + self.module.exit_json( + msg='VLAN "{0}" is absent from network domain "{1}".'.format( + self.name, self.network_domain_selector + ), + changed=False + ) + + return + + if self.module.check_mode: + self.module.exit_json( + msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=True + ) + + self._delete_vlan(vlan) + + self.module.exit_json( + msg='Deleted VLAN "{0}" from network domain "{1}".'.format( + self.name, self.network_domain_selector + ), + changed=True + ) + + def _get_vlan(self, network_domain): + """ + Retrieve the target VLAN details from CloudControl. + + :param network_domain: The target network domain. + :return: The VLAN, or None if the target VLAN was not found. + :rtype: DimensionDataVlan + """ + + vlans = self.driver.ex_list_vlans( + location=self.location, + network_domain=network_domain + ) + matching_vlans = [vlan for vlan in vlans if vlan.name == self.name] + if matching_vlans: + return matching_vlans[0] + + return None + + def _create_vlan(self, network_domain): + vlan = self.driver.ex_create_vlan( + network_domain, + self.name, + self.private_ipv4_base_address, + self.description, + self.private_ipv4_prefix_size + ) + + if self.wait: + vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL') + + return vlan + + def _delete_vlan(self, vlan): + try: + self.driver.ex_delete_vlan(vlan) + + # Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present"). + if self.wait: + self._wait_for_vlan_state(vlan, 'NOT_FOUND') + + except DimensionDataAPIException as api_exception: + self.module.fail_json( + msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format( + vlan.id, api_exception.msg + ) + ) + + def _wait_for_vlan_state(self, vlan, state_to_wait_for): + network_domain = self._get_network_domain() + + wait_poll_interval = self.module.params['wait_poll_interval'] + wait_time = self.module.params['wait_time'] + + # Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try. + + try: + return self.driver.connection.wait_for_state( + state_to_wait_for, + self.driver.ex_get_vlan, + wait_poll_interval, + wait_time, + vlan + ) + + except DimensionDataAPIException as api_exception: + if api_exception.code != 'RESOURCE_NOT_FOUND': + raise + + return DimensionDataVlan( + id=vlan.id, + status='NOT_FOUND', + name='', + description='', + private_ipv4_range_address='', + private_ipv4_range_size=0, + ipv4_gateway='', + ipv6_range_address='', + ipv6_range_size=0, + ipv6_gateway='', + location=self.location, + network_domain=network_domain + ) + + def _get_network_domain(self): + """ + Retrieve the target network domain from the Cloud Control API. + + :return: The network domain. + """ + + try: + return self.get_network_domain( + self.network_domain_selector, self.location + ) + except UnknownNetworkError: + self.module.fail_json( + msg='Cannot find network domain "{0}" in datacenter "{1}".'.format( + self.network_domain_selector, self.location + ) + ) + + return None + + +class InvalidVlanChangeError(Exception): + """ + Error raised when an illegal change to VLAN state is attempted. + """ + + pass + + +class VlanDiff(object): + """ + Represents differences between VLAN information (from CloudControl) and module parameters. + """ + + def __init__(self, vlan, module_params): + """ + + :param vlan: The VLAN information from CloudControl. + :type vlan: DimensionDataVlan + :param module_params: The module parameters. + :type module_params: dict + """ + + self.vlan = vlan + self.module_params = module_params + + self.name_changed = module_params['name'] != vlan.name + self.description_changed = module_params['description'] != vlan.description + self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address + self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size + + # Is configured prefix size greater than or less than the actual prefix size? + private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size + self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0 + self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0 + + def has_changes(self): + """ + Does the VlanDiff represent any changes between the VLAN and module configuration? + + :return: True, if there are change changes; otherwise, False. + """ + + return self.needs_edit() or self.needs_expand() + + def ensure_legal_change(self): + """ + Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state. + + - private_ipv4_base_address cannot be changed + - private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size + + :raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state. + """ + + # Cannot change base address for private IPv4 network. + if self.private_ipv4_base_address_changed: + raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.') + + # Cannot shrink private IPv4 network (by increasing prefix size). + if self.private_ipv4_prefix_size_increased: + raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).') + + def needs_edit(self): + """ + Is an Edit operation required to resolve the differences between the VLAN information and the module parameters? + + :return: True, if an Edit operation is required; otherwise, False. + """ + + return self.name_changed or self.description_changed + + def needs_expand(self): + """ + Is an Expand operation required to resolve the differences between the VLAN information and the module parameters? + + The VLAN's network is expanded by reducing the size of its network prefix. + + :return: True, if an Expand operation is required; otherwise, False. + """ + + return self.private_ipv4_prefix_size_decreased + + +def vlan_to_dict(vlan): + return { + 'id': vlan.id, + 'name': vlan.name, + 'description': vlan.description, + 'location': vlan.location.id, + 'private_ipv4_base_address': vlan.private_ipv4_range_address, + 'private_ipv4_prefix_size': vlan.private_ipv4_range_size, + 'private_ipv4_gateway_address': vlan.ipv4_gateway, + 'ipv6_base_address': vlan.ipv6_range_address, + 'ipv6_prefix_size': vlan.ipv6_range_size, + 'ipv6_gateway_address': vlan.ipv6_gateway, + 'status': vlan.status + } + + +def main(): + module = DimensionDataVlanModule() + + if module.state == 'present': + module.state_present() + elif module.state == 'readonly': + module.state_readonly() + elif module.state == 'absent': + module.state_absent() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/discord.py b/plugins/modules/discord.py deleted file mode 120000 index 1acf222f94..0000000000 --- a/plugins/modules/discord.py +++ /dev/null @@ -1 +0,0 @@ -./notification/discord.py \ No newline at end of file diff --git a/plugins/modules/discord.py b/plugins/modules/discord.py new file mode 100644 index 0000000000..9cb732eb02 --- /dev/null +++ b/plugins/modules/discord.py @@ -0,0 +1,220 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Christian Wollinger +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: discord +short_description: Send Discord messages +version_added: 3.1.0 +description: + - Sends a message to a Discord channel using the Discord webhook API. +author: Christian Wollinger (@cwollinger) +seealso: + - name: API documentation + description: Documentation for Discord API. + link: https://discord.com/developers/docs/resources/webhook#execute-webhook +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + webhook_id: + description: + - The webhook ID. + - 'Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token}).' + required: true + type: str + webhook_token: + description: + - The webhook token. + - 'Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token}).' + required: true + type: str + content: + description: + - Content of the message to the Discord channel. + - At least one of O(content) and O(embeds) must be specified. + type: str + username: + description: + - Overrides the default username of the webhook. + type: str + avatar_url: + description: + - Overrides the default avatar of the webhook. + type: str + tts: + description: + - Set this to V(true) if this is a TTS (Text to Speech) message. + type: bool + default: false + embeds: + description: + - Send messages as Embeds to the Discord channel. + - Embeds can have a colored border, embedded images, text fields and more. + - 'Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object).' + - At least one of O(content) and O(embeds) must be specified. + type: list + elements: dict +""" + +EXAMPLES = r""" +- name: Send a message to the Discord channel + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + content: "This is a message from ansible" + +- name: Send a message to the Discord channel with specific username and avatar + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + content: "This is a message from ansible" + username: Ansible + avatar_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + +- name: Send a embedded message to the Discord channel + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + embeds: + - title: "Embedded message" + description: "This is an embedded message" + footer: + text: "Author: Ansible" + image: + url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + +- name: Send two embedded messages + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + embeds: + - title: "First message" + description: "This is my first embedded message" + footer: + text: "Author: Ansible" + image: + url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + - title: "Second message" + description: "This is my first second message" + footer: + text: "Author: Ansible" + icon_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + fields: + - name: "Field 1" + value: "Value of my first field" + - name: "Field 2" + value: "Value of my second field" + timestamp: "{{ ansible_date_time.iso8601 }}" +""" + +RETURN = r""" +http_code: + description: + - Response Code returned by Discord API. + returned: always + type: int + sample: 204 +""" + +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.basic import AnsibleModule + + +def discord_check_mode(module): + + webhook_id = module.params['webhook_id'] + webhook_token = module.params['webhook_token'] + + headers = { + 'content-type': 'application/json' + } + + url = "https://discord.com/api/webhooks/%s/%s" % ( + webhook_id, webhook_token) + + response, info = fetch_url(module, url, method='GET', headers=headers) + return response, info + + +def discord_text_msg(module): + + webhook_id = module.params['webhook_id'] + webhook_token = module.params['webhook_token'] + content = module.params['content'] + user = module.params['username'] + avatar_url = module.params['avatar_url'] + tts = module.params['tts'] + embeds = module.params['embeds'] + + headers = { + 'content-type': 'application/json' + } + + url = "https://discord.com/api/webhooks/%s/%s" % ( + webhook_id, webhook_token) + + payload = { + 'content': content, + 'username': user, + 'avatar_url': avatar_url, + 'tts': tts, + 'embeds': embeds, + } + + payload = module.jsonify(payload) + + response, info = fetch_url(module, url, data=payload, headers=headers, method='POST') + return response, info + + +def main(): + module = AnsibleModule( + argument_spec=dict( + webhook_id=dict(type='str', required=True), + webhook_token=dict(type='str', required=True, no_log=True), + content=dict(type='str'), + username=dict(type='str'), + avatar_url=dict(type='str'), + tts=dict(type='bool', default=False), + embeds=dict(type='list', elements='dict'), + ), + required_one_of=[['content', 'embeds']], + supports_check_mode=True + ) + + result = dict( + changed=False, + http_code='', + ) + + if module.check_mode: + response, info = discord_check_mode(module) + if info['status'] != 200: + try: + module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info) + except Exception: + module.fail_json(http_code=info['status'], msg=info['msg'], info=info) + else: + module.exit_json(msg=info['msg'], changed=False, http_code=info['status'], response=module.from_json(response.read())) + else: + response, info = discord_text_msg(module) + if info['status'] != 204: + try: + module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info) + except Exception: + module.fail_json(http_code=info['status'], msg=info['msg'], info=info) + else: + module.exit_json(msg=info['msg'], changed=True, http_code=info['status']) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/django_check.py b/plugins/modules/django_check.py new file mode 100644 index 0000000000..f2ee357072 --- /dev/null +++ b/plugins/modules/django_check.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_check +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin check) +version_added: 9.1.0 +description: + - This module is a wrapper for the execution of C(django-admin check). +extends_documentation_fragment: + - community.general.attributes + - community.general.django +options: + databases: + description: + - Specify databases to run checks against. + - If not specified, Django does not run database tests. + - The parameter has been renamed to O(databases) in community.general 11.3.0. The old name is still available as an alias. + type: list + elements: str + aliases: ["database"] + deploy: + description: + - Include additional checks relevant in a deployment setting. + type: bool + default: false + fail_level: + description: + - Message level that triggers failure. + - Default is the Django default value. Check the documentation for the version being used. + type: str + choices: [CRITICAL, ERROR, WARNING, INFO, DEBUG] + tags: + description: + - Restrict checks to specific tags. + type: list + elements: str + apps: + description: + - Restrict checks to specific applications. + - Default is to check all applications. + type: list + elements: str +notes: + - The outcome of the module is found in the common return values RV(ignore:stdout), RV(ignore:stderr), RV(ignore:rc). + - The module fails if RV(ignore:rc) is not zero. +attributes: + check_mode: + support: full + diff_mode: + support: none +""" + +EXAMPLES = r""" +- name: Check the entire project + community.general.django_check: + settings: myproject.settings + +- name: Create the project using specific databases + community.general.django_check: + database: + - somedb + - myotherdb + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and C(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 + version_added: 10.0.0 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper + + +class DjangoCheck(DjangoModuleHelper): + module = dict( + argument_spec=dict( + databases=dict(type="list", elements="str", aliases=["database"]), + deploy=dict(type="bool", default=False), + fail_level=dict(type="str", choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]), + tags=dict(type="list", elements="str"), + apps=dict(type="list", elements="str"), + ), + supports_check_mode=True, + ) + django_admin_cmd = "check" + django_admin_arg_order = "database_stacked_dash deploy fail_level tags apps" + + def __init_module__(self): + self.vars.set("database_stacked_dash", self.vars.databases, output=False) + + +def main(): + DjangoCheck.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_command.py b/plugins/modules/django_command.py new file mode 100644 index 0000000000..a6c3f409e5 --- /dev/null +++ b/plugins/modules/django_command.py @@ -0,0 +1,92 @@ +#!/usr/bin/python +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_command +author: + - Alexei Znamensky (@russoz) +short_description: Run Django admin commands +version_added: 9.0.0 +description: + - This module allows the execution of arbitrary Django admin commands. +extends_documentation_fragment: + - community.general.attributes + - community.general.django +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + command: + description: + - Django admin command. It must be a valid command accepted by C(python -m django) at the target system. + type: str + required: true + extra_args: + type: list + elements: str + description: + - List of extra arguments passed to the django admin command. +""" + +EXAMPLES = r""" +- name: Check the project + community.general.django_command: + command: check + settings: myproject.settings + +- name: Check the project in specified python path, using virtual environment + community.general.django_command: + command: check + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 + version_added: 10.0.0 +""" + +import shlex + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper +from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt + + +class DjangoCommand(DjangoModuleHelper): + module = dict( + argument_spec=dict( + command=dict(type="str", required=True), + extra_args=dict(type="list", elements="str"), + ), + supports_check_mode=False, + ) + arg_formats = dict( + extra_args=cmd_runner_fmt.as_list(), + ) + django_admin_arg_order = "extra_args" + + def __init_module__(self): + self.vars.command = shlex.split(self.vars.command) + + +def main(): + DjangoCommand.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_createcachetable.py b/plugins/modules/django_createcachetable.py new file mode 100644 index 0000000000..76a31ab0b1 --- /dev/null +++ b/plugins/modules/django_createcachetable.py @@ -0,0 +1,74 @@ +#!/usr/bin/python +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_createcachetable +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin createcachetable) +version_added: 9.1.0 +description: + - This module is a wrapper for the execution of C(django-admin createcachetable). +extends_documentation_fragment: + - community.general.attributes + - community.general.django + - community.general.django.database +attributes: + check_mode: + support: full + diff_mode: + support: none +""" + +EXAMPLES = r""" +- name: Create cache table in the default database + community.general.django_createcachetable: + settings: myproject.settings + +- name: Create cache table in the other database + community.general.django_createcachetable: + database: myotherdb + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 + version_added: 10.0.0 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper + + +class DjangoCreateCacheTable(DjangoModuleHelper): + module = dict( + supports_check_mode=True, + ) + django_admin_cmd = "createcachetable" + django_admin_arg_order = "noinput database_dash dry_run" + _django_args = ["database_dash"] + _check_mode_arg = "dry_run" + + def __init_module__(self): + self.vars.set("database_dash", self.vars.database, output=False) + + +def main(): + DjangoCreateCacheTable.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_dumpdata.py b/plugins/modules/django_dumpdata.py new file mode 100644 index 0000000000..5c819b2755 --- /dev/null +++ b/plugins/modules/django_dumpdata.py @@ -0,0 +1,124 @@ +#!/usr/bin/python +# Copyright (c) 2025, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_dumpdata +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin dumpdata) +version_added: 11.3.0 +description: + - This module is a wrapper for the execution of C(django-admin dumpdata). +extends_documentation_fragment: + - community.general.attributes + - community.general.django + - community.general.django.database + - community.general.django.data +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + all: + description: Dump all records, including those which might otherwise be filtered or modified by a custom manager. + type: bool + indent: + description: + - Indentation size for the output. + - Default is not to indent, so the output is generated in one single line. + type: int + natural_foreign: + description: Use natural keys when serializing for foreign keys. + type: bool + natural_primary: + description: Omit primary keys when serializing. + type: bool + primary_keys: + description: + - List of primary keys to include in the dump. + - Only available when dumping one single model. + type: list + elements: str + aliases: ["pks"] + fixture: + description: + - Path to the output file. + - The fixture filename may end with V(.bz2), V(.gz), V(.lzma) or V(.xz), in which case the corresponding + compression format will be used. + - This corresponds to the C(--output) parameter for the C(django-admin dumpdata) command. + type: path + aliases: [output] + required: true + apps_models: + description: + - Dump only the applications and models listed in the dump. + - Format must be either V(app_label) or V(app_label.ModelName). + - If not passed, all applications and models are to be dumped. + type: list + elements: str +""" + +EXAMPLES = r""" +- name: Dump all data + community.general.django_dumpdata: + settings: myproject.settings + fixture: /tmp/mydata.json + +- name: Dump data excluding certain apps, into a compressed JSON file + community.general.django_dumpdata: + settings: myproject.settings + database: myotherdb + excludes: + - auth + - contenttypes + fixture: /tmp/mydata.json.gz +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper + + +class DjangoDumpData(DjangoModuleHelper): + module = dict( + argument_spec=dict( + all=dict(type="bool"), + indent=dict(type="int"), + natural_foreign=dict(type="bool"), + natural_primary=dict(type="bool"), + primary_keys=dict(type="list", elements="str", aliases=["pks"], no_log=False), + # the underlying vardict does not allow the name "output" + fixture=dict(type="path", required=True, aliases=["output"]), + apps_models=dict(type="list", elements="str"), + ), + supports_check_mode=False, + ) + django_admin_cmd = "dumpdata" + django_admin_arg_order = "all format indent excludes database_dash natural_foreign natural_primary primary_keys fixture apps_models" + _django_args = ["data", "database_dash"] + + def __init_module__(self): + self.vars.set("database_dash", self.vars.database, output=False) + + +def main(): + DjangoDumpData.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_loaddata.py b/plugins/modules/django_loaddata.py new file mode 100644 index 0000000000..75b388de9a --- /dev/null +++ b/plugins/modules/django_loaddata.py @@ -0,0 +1,90 @@ +#!/usr/bin/python +# Copyright (c) 2025, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_loaddata +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin loaddata) +version_added: 11.3.0 +description: + - This module is a wrapper for the execution of C(django-admin loaddata). +extends_documentation_fragment: + - community.general.attributes + - community.general.django + - community.general.django.database + - community.general.django.data +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + app: + description: Specifies a single app to look for fixtures in rather than looking in all apps. + type: str + ignore_non_existent: + description: Ignores fields and models that may have been removed since the fixture was originally generated. + type: bool + fixtures: + description: + - List of paths to the fixture files. + type: list + elements: path +""" + +EXAMPLES = r""" +- name: Dump all data + community.general.django_dumpdata: + settings: myproject.settings + +- name: Create cache table in the other database + community.general.django_createcachetable: + database: myotherdb + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper + + +class DjangoLoadData(DjangoModuleHelper): + module = dict( + argument_spec=dict( + app=dict(type="str"), + ignore_non_existent=dict(type="bool"), + fixtures=dict(type="list", elements="path"), + ), + supports_check_mode=False, + ) + django_admin_cmd = "loaddata" + django_admin_arg_order = "database_dash ignore_non_existent app format excludes fixtures" + _django_args = ["data", "database_dash"] + + def __init_module__(self): + self.vars.set("database_dash", self.vars.database, output=False) + + +def main(): + DjangoLoadData.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_manage.py b/plugins/modules/django_manage.py deleted file mode 120000 index f9a9c23022..0000000000 --- a/plugins/modules/django_manage.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/django_manage.py \ No newline at end of file diff --git a/plugins/modules/django_manage.py b/plugins/modules/django_manage.py new file mode 100644 index 0000000000..ddda99849e --- /dev/null +++ b/plugins/modules/django_manage.py @@ -0,0 +1,352 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Alexei Znamensky +# Copyright (c) 2013, Scott Anderson +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: django_manage +short_description: Manages a Django application +description: + - Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the O(virtualenv) parameter, + all management commands are executed by the given C(virtualenv) installation. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + command: + description: + - The name of the Django management command to run. The commands listed below are built in this module and have some + basic parameter validation. + - V(collectstatic) - Collects the static files into C(STATIC_ROOT). + - V(createcachetable) - Creates the cache tables for use with the database cache backend. + - V(flush) - Removes all data from the database. + - V(loaddata) - Searches for and loads the contents of the named O(fixtures) into the database. + - V(migrate) - Synchronizes the database state with models and migrations. + - V(test) - Runs tests for all installed apps. + - Custom commands can be entered, but they fail unless they are known to Django. Custom commands that may prompt for + user input should be run with the C(--noinput) flag. + - Support for the values V(cleanup), V(syncdb), V(validate) was removed in community.general 9.0.0. See note about supported + versions of Django. + type: str + required: true + project_path: + description: + - The path to the root of the Django application where C(manage.py) lives. + type: path + required: true + aliases: [app_path, chdir] + settings: + description: + - The Python path to the application's settings module, such as V(myapp.settings). + type: path + required: false + pythonpath: + description: + - A directory to add to the Python path. Typically used to include the settings module if it is located external to + the application directory. + - This would be equivalent to adding O(pythonpath)'s value to the E(PYTHONPATH) environment variable. + type: path + required: false + aliases: [python_path] + virtualenv: + description: + - An optional path to a C(virtualenv) installation to use while running the manage application. + - The virtual environment must exist, otherwise the module fails. + type: path + aliases: [virtual_env] + apps: + description: + - A list of space-delimited apps to target. Used by the V(test) command. + type: str + required: false + cache_table: + description: + - The name of the table used for database-backed caching. Used by the V(createcachetable) command. + type: str + required: false + clear: + description: + - Clear the existing files before trying to copy or link the original file. + - Used only with the V(collectstatic) command. The C(--noinput) argument is added automatically. + required: false + default: false + type: bool + database: + description: + - The database to target. Used by the V(createcachetable), V(flush), V(loaddata), V(syncdb), and V(migrate) commands. + type: str + required: false + failfast: + description: + - Fail the command immediately if a test fails. Used by the V(test) command. + required: false + default: false + type: bool + aliases: [fail_fast] + fixtures: + description: + - A space-delimited list of fixture file names to load in the database. B(Required) by the V(loaddata) command. + type: str + required: false + skip: + description: + - Skips over out-of-order missing migrations, you can only use this parameter with V(migrate) command. + required: false + type: bool + merge: + description: + - Runs out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with + V(migrate) command. + required: false + type: bool + link: + description: + - Creates links to the files instead of copying them, you can only use this parameter with V(collectstatic) command. + required: false + type: bool + testrunner: + description: + - Controls the test runner class that is used to execute tests. + - This parameter is passed as-is to C(manage.py). + type: str + required: false + aliases: [test_runner] + +notes: + - 'B(ATTENTION): Support for Django releases older than 4.1 has been removed in community.general version 9.0.0. While the + module allows for free-form commands, not verifying the version of Django being used, it is B(strongly recommended) to + use a more recent version of the framework.' + - Please notice that Django 4.1 requires Python 3.8 or greater. + - This module does not create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment does not + already exist at the given location. This behavior changed in community.general version 9.0.0. + - The recommended way to create a virtual environment in Ansible is by using M(ansible.builtin.pip). + - This module assumes English error messages for the V(createcachetable) command to detect table existence, unfortunately. + - To be able to use the V(collectstatic) command, you must have enabled C(staticfiles) in your settings. + - Your C(manage.py) application must be executable (C(rwxr-xr-x)), and must have a valid shebang, for example C(#!/usr/bin/env + python), for invoking the appropriate Python interpreter. +seealso: + - name: django-admin and manage.py Reference + description: Reference for C(django-admin) or C(manage.py) commands. + link: https://docs.djangoproject.com/en/4.1/ref/django-admin/ + - name: Django Download page + description: The page showing how to get Django and the timeline of supported releases. + link: https://www.djangoproject.com/download/ + - name: What Python version can I use with Django? + description: From the Django FAQ, the response to Python requirements for the framework. + link: https://docs.djangoproject.com/en/dev/faq/install/#what-python-version-can-i-use-with-django +requirements: ["django >= 4.1"] +author: + - Alexei Znamensky (@russoz) + - Scott Anderson (@tastychutney) +""" + +EXAMPLES = r""" +- name: Run cleanup on the application installed in django_dir + community.general.django_manage: + command: clearsessions + project_path: "{{ django_dir }}" + +- name: Load the initial_data fixture into the application + community.general.django_manage: + command: loaddata + project_path: "{{ django_dir }}" + fixtures: "{{ initial_data }}" + +- name: Run syncdb on the application + community.general.django_manage: + command: migrate + project_path: "{{ django_dir }}" + settings: "{{ settings_app_name }}" + pythonpath: "{{ settings_dir }}" + virtualenv: "{{ virtualenv_dir }}" + +- name: Run the SmokeTest test case from the main app. Useful for testing deploys + community.general.django_manage: + command: test + project_path: "{{ django_dir }}" + apps: main.SmokeTest + +- name: Create an initial superuser + community.general.django_manage: + command: "createsuperuser --noinput --username=admin --email=admin@example.com" + project_path: "{{ django_dir }}" +""" + +import os +import sys +import shlex + +from ansible.module_utils.basic import AnsibleModule + + +def _fail(module, cmd, out, err, **kwargs): + msg = '' + if out: + msg += "stdout: %s" % (out, ) + if err: + msg += "\n:stderr: %s" % (err, ) + module.fail_json(cmd=cmd, msg=msg, **kwargs) + + +def _ensure_virtualenv(module): + + venv_param = module.params['virtualenv'] + if venv_param is None: + return + + vbin = os.path.join(venv_param, 'bin') + activate = os.path.join(vbin, 'activate') + + if not os.path.exists(activate): + module.fail_json(msg='%s does not point to a valid virtual environment' % venv_param) + + os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"]) + os.environ["VIRTUAL_ENV"] = venv_param + + +def createcachetable_check_changed(output): + return "already exists" not in output + + +def flush_filter_output(line): + return "Installed" in line and "Installed 0 object" not in line + + +def loaddata_filter_output(line): + return "Installed" in line and "Installed 0 object" not in line + + +def migrate_filter_output(line): + return ("Migrating forwards " in line) \ + or ("Installed" in line and "Installed 0 object" not in line) \ + or ("Applying" in line) + + +def collectstatic_filter_output(line): + return line and "0 static files" not in line + + +def main(): + command_allowed_param_map = dict( + createcachetable=('cache_table', 'database', ), + flush=('database', ), + loaddata=('database', 'fixtures', ), + test=('failfast', 'testrunner', 'apps', ), + migrate=('apps', 'skip', 'merge', 'database',), + collectstatic=('clear', 'link', ), + ) + + command_required_param_map = dict( + loaddata=('fixtures', ), + ) + + # forces --noinput on every command that needs it + noinput_commands = ( + 'flush', + 'migrate', + 'test', + 'collectstatic', + ) + + # These params are allowed for certain commands only + specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'testrunner') + + # These params are automatically added to the command if present + general_params = ('settings', 'pythonpath', 'database',) + specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link') + end_of_command_params = ('apps', 'cache_table', 'fixtures') + + module = AnsibleModule( + argument_spec=dict( + command=dict(required=True, type='str'), + project_path=dict(required=True, type='path', aliases=['app_path', 'chdir']), + settings=dict(type='path'), + pythonpath=dict(type='path', aliases=['python_path']), + virtualenv=dict(type='path', aliases=['virtual_env']), + + apps=dict(), + cache_table=dict(type='str'), + clear=dict(default=False, type='bool'), + database=dict(type='str'), + failfast=dict(default=False, type='bool', aliases=['fail_fast']), + fixtures=dict(type='str'), + testrunner=dict(type='str', aliases=['test_runner']), + skip=dict(type='bool'), + merge=dict(type='bool'), + link=dict(type='bool'), + ), + ) + + command_split = shlex.split(module.params['command']) + command_bin = command_split[0] + project_path = module.params['project_path'] + virtualenv = module.params['virtualenv'] + + for param in specific_params: + value = module.params[param] + if value and param not in command_allowed_param_map[command_bin]: + module.fail_json(msg='%s param is incompatible with command=%s' % (param, command_bin)) + + for param in command_required_param_map.get(command_bin, ()): + if not module.params[param]: + module.fail_json(msg='%s param is required for command=%s' % (param, command_bin)) + + _ensure_virtualenv(module) + + run_cmd_args = ["./manage.py"] + command_split + + if command_bin in noinput_commands and '--noinput' not in command_split: + run_cmd_args.append("--noinput") + + for param in general_params: + if module.params[param]: + run_cmd_args.append('--%s=%s' % (param, module.params[param])) + + for param in specific_boolean_params: + if module.params[param]: + run_cmd_args.append('--%s' % param) + + # these params always get tacked on the end of the command + for param in end_of_command_params: + if module.params[param]: + if param in ('fixtures', 'apps'): + run_cmd_args.extend(shlex.split(module.params[param])) + else: + run_cmd_args.append(module.params[param]) + + rc, out, err = module.run_command(run_cmd_args, cwd=project_path) + if rc != 0: + if command_bin == 'createcachetable' and 'table' in err and 'already exists' in err: + out = 'already exists.' + else: + if "Unknown command:" in err: + _fail(module, run_cmd_args, err, "Unknown django command: %s" % command_bin) + _fail(module, run_cmd_args, out, err, path=os.environ["PATH"], syspath=sys.path) + + changed = False + + lines = out.split('\n') + filt = globals().get(command_bin + "_filter_output", None) + if filt: + filtered_output = list(filter(filt, lines)) + if len(filtered_output): + changed = True + check_changed = globals().get("{0}_check_changed".format(command_bin), None) + if check_changed: + changed = check_changed(out) + + module.exit_json(changed=changed, out=out, cmd=run_cmd_args, app_path=project_path, project_path=project_path, + virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath']) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/dnf_config_manager.py b/plugins/modules/dnf_config_manager.py new file mode 100644 index 0000000000..847e912115 --- /dev/null +++ b/plugins/modules/dnf_config_manager.py @@ -0,0 +1,225 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Andrew Hyatt +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: dnf_config_manager +short_description: Enable or disable dnf repositories using config-manager +version_added: 8.2.0 +description: + - This module enables or disables repositories using the C(dnf config-manager) sub-command. +author: Andrew Hyatt (@ahyattdev) +requirements: + - dnf + - dnf-plugins-core +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Repository ID, for example V(crb). + default: [] + required: false + type: list + elements: str + state: + description: + - Whether the repositories should be V(enabled) or V(disabled). + default: enabled + required: false + type: str + choices: [enabled, disabled] +notes: + - Does not work with C(dnf5). +seealso: + - module: ansible.builtin.dnf + - module: ansible.builtin.yum_repository +""" + +EXAMPLES = r""" +- name: Ensure the crb repository is enabled + community.general.dnf_config_manager: + name: crb + state: enabled + +- name: Ensure the appstream and zfs repositories are disabled + community.general.dnf_config_manager: + name: + - appstream + - zfs + state: disabled +""" + +RETURN = r""" +repo_states_pre: + description: Repo IDs before action taken. + returned: success + type: dict + contains: + enabled: + description: Enabled repository IDs. + returned: success + type: list + elements: str + disabled: + description: Disabled repository IDs. + returned: success + type: list + elements: str + sample: + enabled: + - appstream + - baseos + - crb + disabled: + - appstream-debuginfo + - appstream-source + - baseos-debuginfo + - baseos-source + - crb-debug + - crb-source +repo_states_post: + description: Repository states after action taken. + returned: success + type: dict + contains: + enabled: + description: Enabled repository IDs. + returned: success + type: list + elements: str + disabled: + description: Disabled repository IDs. + returned: success + type: list + elements: str + sample: + enabled: + - appstream + - baseos + - crb + disabled: + - appstream-debuginfo + - appstream-source + - baseos-debuginfo + - baseos-source + - crb-debug + - crb-source +changed_repos: + description: Repositories changed. + returned: success + type: list + elements: str + sample: ["crb"] +""" + +from ansible.module_utils.basic import AnsibleModule +import os +import re + +DNF_BIN = "/usr/bin/dnf" +REPO_ID_RE = re.compile(r'^Repo-id\s*:\s*(\S+)$') +REPO_STATUS_RE = re.compile(r'^Repo-status\s*:\s*(disabled|enabled)$') + + +def get_repo_states(module): + rc, out, err = module.run_command([DNF_BIN, 'repolist', '--all', '--verbose'], check_rc=True) + + repos = dict() + last_repo = '' + for i, line in enumerate(out.split('\n')): + m = REPO_ID_RE.match(line) + if m: + if len(last_repo) > 0: + module.fail_json(msg='dnf repolist parse failure: parsed another repo id before next status') + last_repo = m.group(1) + continue + m = REPO_STATUS_RE.match(line) + if m: + if len(last_repo) == 0: + module.fail_json(msg='dnf repolist parse failure: parsed status before repo id') + repos[last_repo] = m.group(1) + last_repo = '' + return repos + + +def set_repo_states(module, repo_ids, state): + module.run_command([DNF_BIN, 'config-manager', '--assumeyes', '--set-{0}'.format(state)] + repo_ids, check_rc=True) + + +def pack_repo_states_for_return(states): + enabled = [] + disabled = [] + for repo_id in states: + if states[repo_id] == 'enabled': + enabled.append(repo_id) + else: + disabled.append(repo_id) + + # Sort for consistent results + enabled.sort() + disabled.sort() + + return {'enabled': enabled, 'disabled': disabled} + + +def main(): + module_args = dict( + name=dict(type='list', elements='str', default=[]), + state=dict(type='str', choices=['enabled', 'disabled'], default='enabled') + ) + + result = dict( + changed=False + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + module.run_command_environ_update = dict(LANGUAGE='C', LC_ALL='C') + + if not os.path.exists(DNF_BIN): + module.fail_json(msg="%s was not found" % DNF_BIN) + + repo_states = get_repo_states(module) + result['repo_states_pre'] = pack_repo_states_for_return(repo_states) + + desired_repo_state = module.params['state'] + names = module.params['name'] + + to_change = [] + for repo_id in names: + if repo_id not in repo_states: + module.fail_json(msg="did not find repo with ID '{0}' in dnf repolist --all --verbose".format(repo_id)) + if repo_states[repo_id] != desired_repo_state: + to_change.append(repo_id) + result['changed'] = len(to_change) > 0 + result['changed_repos'] = to_change + + if module.check_mode: + module.exit_json(**result) + + if len(to_change) > 0: + set_repo_states(module, to_change, desired_repo_state) + + repo_states_post = get_repo_states(module) + result['repo_states_post'] = pack_repo_states_for_return(repo_states_post) + + for repo_id in to_change: + if repo_states_post[repo_id] != desired_repo_state: + module.fail_json(msg="dnf config-manager failed to make '{0}' {1}".format(repo_id, desired_repo_state)) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/dnf_versionlock.py b/plugins/modules/dnf_versionlock.py deleted file mode 120000 index df7d478242..0000000000 --- a/plugins/modules/dnf_versionlock.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/dnf_versionlock.py \ No newline at end of file diff --git a/plugins/modules/dnf_versionlock.py b/plugins/modules/dnf_versionlock.py new file mode 100644 index 0000000000..e6fa546107 --- /dev/null +++ b/plugins/modules/dnf_versionlock.py @@ -0,0 +1,377 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Roberto Moreda +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: dnf_versionlock +version_added: '4.0.0' +short_description: Locks package versions in C(dnf) based systems +description: + - Locks package versions using the C(versionlock) plugin in C(dnf) based systems. This plugin takes a set of name and versions + for packages and excludes all other versions of those packages. This allows you to for example protect packages from being + updated by newer versions. The state of the plugin that reflects locking of packages is the C(locklist). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: partial + details: + - The logics of the C(versionlock) plugin for corner cases could be confusing, so please take in account that this module + does its best to give a C(check_mode) prediction on what is going to happen. In case of doubt, check the documentation + of the plugin. + - Sometimes the module could predict changes in C(check_mode) that are not fulfilled because C(versionlock) concludes + that there is already a entry in C(locklist) that already matches. + diff_mode: + support: none +options: + name: + description: + - Package name spec to add or exclude to or delete from the C(locklist) using the format expected by the C(dnf repoquery) + command. + - This parameter is mutually exclusive with O(state=clean). + type: list + required: false + elements: str + default: [] + raw: + description: + - Do not resolve package name specs to NEVRAs to find specific version to lock to. Instead the package name specs are + used as they are. This enables locking to not yet available versions of the package. + type: bool + default: false + state: + description: + - Whether to add (V(present) or V(excluded)) to or remove (V(absent) or V(clean)) from the C(locklist). + - V(present) adds a package name spec to the C(locklist). If there is a installed package that matches, then only that + version is added. Otherwise, all available package versions are added. + - V(excluded) adds a package name spec as excluded to the C(locklist). It means that packages represented by the package + name spec are excluded from transaction operations. All available package versions are added. + - V(absent) deletes entries in the C(locklist) that match the package name spec. + - V(clean) deletes all entries in the C(locklist). This option is mutually exclusive with O(name). + choices: ['absent', 'clean', 'excluded', 'present'] + type: str + default: present +notes: + - In an ideal world, the C(versionlock) plugin would have a dry-run option to know for sure what is going to happen. So + far we have to work with a best guess as close as possible to the behaviour inferred from its code. + - For most of cases where you want to lock and unlock specific versions of a package, this works fairly well. + - Does not work with C(dnf5). + - This module requires Python 3.6 or greater to run, which should not be a problem for most systems that use C(dnf). +requirements: + - dnf + - dnf-plugin-versionlock +author: + - Roberto Moreda (@moreda) +""" + +EXAMPLES = r""" +- name: Prevent installed nginx from being updated + community.general.dnf_versionlock: + name: nginx + state: present + +- name: Prevent multiple packages from being updated + community.general.dnf_versionlock: + name: + - nginx + - haproxy + state: present + +- name: Remove lock from nginx to be updated again + community.general.dnf_versionlock: + name: nginx + state: absent + +- name: Exclude bind 32:9.11 from installs or updates + community.general.dnf_versionlock: + name: bind-32:9.11* + state: excluded + +- name: Keep bash package in major version 4 + community.general.dnf_versionlock: + name: bash-0:4.* + raw: true + state: present + +- name: Delete all entries in the locklist of versionlock + community.general.dnf_versionlock: + state: clean +""" + +RETURN = r""" +locklist_pre: + description: Locklist before module execution. + returned: success + type: list + elements: str + sample: ["bash-0:4.4.20-1.el8_4.*", "!bind-32:9.11.26-4.el8_4.*"] +locklist_post: + description: Locklist after module execution. + returned: success and (not check mode or state is clean) + type: list + elements: str + sample: ["bash-0:4.4.20-1.el8_4.*"] +specs_toadd: + description: Package name specs meant to be added by versionlock. + returned: success + type: list + elements: str + sample: ["bash"] +specs_todelete: + description: Package name specs meant to be deleted by versionlock. + returned: success + type: list + elements: str + sample: ["bind"] +""" + +from ansible.module_utils.basic import AnsibleModule +import fnmatch +import os +import re + +DNF_BIN = "/usr/bin/dnf" +VERSIONLOCK_CONF = "/etc/dnf/plugins/versionlock.conf" +# NEVRA regex. +NEVRA_RE = re.compile(r"^(?P.+)-(?P\d+):(?P.+)-(?P.+)\.(?P.+)$") + + +def do_versionlock(module, command, patterns=None, raw=False): + patterns = [] if not patterns else patterns + raw_parameter = ["--raw"] if raw else [] + # Call dnf versionlock using a just one full NEVR package-name-spec each + # time because multiple package-name-spec and globs are not well supported. + # + # This is a workaround for two alleged bugs in the dnf versionlock plugin: + # * Multiple package-name-spec arguments don't lock correctly + # (https://bugzilla.redhat.com/show_bug.cgi?id=2013324). + # * Locking a version of a not installed package disallows locking other + # versions later (https://bugzilla.redhat.com/show_bug.cgi?id=2013332) + # + # NOTE: This is suboptimal in terms of performance if there are more than a + # few package-name-spec patterns to lock, because there is a command + # execution per each. This will improve by changing the strategy once the + # mentioned alleged bugs in the dnf versionlock plugin are fixed. + if patterns: + outs = [] + for p in patterns: + rc, out, err = module.run_command( + [DNF_BIN, "-q", "versionlock", command] + raw_parameter + [p], + check_rc=True) + outs.append(out) + out = "\n".join(outs) + else: + rc, out, err = module.run_command( + [DNF_BIN, "-q", "versionlock", command], check_rc=True) + return out + + +# This is equivalent to the _match function of the versionlock plugin. +def match(entry, pattern): + entry = entry.lstrip('!') + if entry == pattern: + return True + m = NEVRA_RE.match(entry) + if not m: + return False + # indexing a match object with [] is a Python 3.6+ construct + for name in ( + '%s' % m["name"], + '%s.%s' % (m["name"], m["arch"]), + '%s-%s' % (m["name"], m["version"]), + '%s-%s-%s' % (m["name"], m["version"], m["release"]), + '%s-%s:%s' % (m["name"], m["epoch"], m["version"]), + '%s-%s-%s.%s' % (m["name"], m["version"], m["release"], m["arch"]), + '%s-%s:%s-%s' % (m["name"], m["epoch"], m["version"], m["release"]), + '%s:%s-%s-%s.%s' % (m["epoch"], m["name"], m["version"], m["release"], + m["arch"]), + '%s-%s:%s-%s.%s' % (m["name"], m["epoch"], m["version"], m["release"], + m["arch"]) + ): + if fnmatch.fnmatch(name, pattern): + return True + return False + + +def get_packages(module, patterns, only_installed=False): + packages_available_map_name_evrs = {} + rc, out, err = module.run_command( + [DNF_BIN, "-q", "repoquery"] + + (["--installed"] if only_installed else []) + + patterns, + check_rc=True) + + for p in out.split(): + # Extract the NEVRA pattern. + m = NEVRA_RE.match(p) + if not m: + module.fail_json( + msg="failed to parse nevra for %s" % p, + rc=rc, out=out, err=err) + + evr = "%s:%s-%s" % (m["epoch"], + m["version"], + m["release"]) + + packages_available_map_name_evrs.setdefault(m["name"], set()) + packages_available_map_name_evrs[m["name"]].add(evr) + return packages_available_map_name_evrs + + +def get_package_mgr(): + for bin_path in (DNF_BIN,): + if os.path.exists(bin_path): + return "dnf5" if os.path.realpath(bin_path) == "/usr/bin/dnf5" else "dnf" + # fallback to dnf + return "dnf" + + +def get_package_list(module, package_mgr="dnf"): + if package_mgr == "dnf": + return do_versionlock(module, "list").split() + + package_list = [] + if package_mgr == "dnf5": + stanza_start = False + package_name = None + for line in do_versionlock(module, "list").splitlines(): + if line.startswith(("#", " ")): + continue + if line.startswith("Package name:"): + stanza_start = True + dummy, name = line.split(":", 1) + name = name.strip() + pkg_name = get_packages(module, patterns=[name]) + package_name = "%s-%s.*" % (name, pkg_name[name].pop()) + if package_name and package_name not in package_list: + package_list.append(package_name) + if line.startswith("evr"): + dummy, package_version = line.split("=", 1) + package_version = package_version.strip() + if stanza_start: + if package_name and package_name not in package_list: + package_list.append(package_name) + stanza_start = False + return package_list + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type="list", elements="str", default=[]), + raw=dict(type="bool", default=False), + state=dict(type="str", default="present", + choices=["present", "absent", "excluded", "clean"]), + ), + supports_check_mode=True, + ) + + patterns = module.params["name"] + raw = module.params["raw"] + state = module.params["state"] + changed = False + msg = "" + + # Check module pre-requisites. + global DNF_BIN + DNF_BIN = module.get_bin_path('dnf', True) + package_mgr = get_package_mgr() + if package_mgr == "dnf" and not os.path.exists(VERSIONLOCK_CONF): + module.fail_json(msg="plugin versionlock is required") + + # Check incompatible options. + if state == "clean" and patterns: + module.fail_json(msg="clean state is incompatible with a name list") + if state != "clean" and not patterns: + module.fail_json(msg="name list is required for %s state" % state) + + locklist_pre = get_package_list(module, package_mgr=package_mgr) + + specs_toadd = [] + specs_todelete = [] + + if state in ["present", "excluded"]: + + if raw: + # Add raw patterns as specs to add. + for p in patterns: + if (p if state == "present" else "!" + p) not in locklist_pre: + specs_toadd.append(p) + else: + # Get available packages that match the patterns. + packages_map_name_evrs = get_packages( + module, + patterns) + + # Get installed packages that match the patterns. + packages_installed_map_name_evrs = get_packages( + module, + patterns, + only_installed=True) + + # Obtain the list of package specs that require an entry in the + # locklist. This list is composed by: + # a) the non-installed packages list with all available + # versions + # b) the installed packages list + packages_map_name_evrs.update(packages_installed_map_name_evrs) + for name in packages_map_name_evrs: + for evr in packages_map_name_evrs[name]: + locklist_entry = "%s-%s.*" % (name, evr) + + if (locklist_entry if state == "present" else "!%s" % locklist_entry) not in locklist_pre: + specs_toadd.append(locklist_entry) + + if specs_toadd and not module.check_mode: + cmd = "add" if state == "present" else "exclude" + msg = do_versionlock(module, cmd, patterns=specs_toadd, raw=raw) + + elif state == "absent": + + if raw: + # Add raw patterns as specs to delete. + for p in patterns: + if p in locklist_pre: + specs_todelete.append(p) + + else: + # Get patterns that match the some line in the locklist. + for p in patterns: + for e in locklist_pre: + if match(e, p): + specs_todelete.append(p) + + if specs_todelete and not module.check_mode: + msg = do_versionlock( + module, "delete", patterns=specs_todelete, raw=raw) + + elif state == "clean": + specs_todelete = locklist_pre + + if specs_todelete and not module.check_mode: + msg = do_versionlock(module, "clear") + + if specs_toadd or specs_todelete: + changed = True + + response = { + "changed": changed, + "msg": msg, + "locklist_pre": locklist_pre, + "specs_toadd": specs_toadd, + "specs_todelete": specs_todelete + } + if not module.check_mode: + response["locklist_post"] = get_package_list(module, package_mgr=package_mgr) + else: + if state == "clean": + response["locklist_post"] = [] + + module.exit_json(**response) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/dnsimple.py b/plugins/modules/dnsimple.py deleted file mode 120000 index b63174a66f..0000000000 --- a/plugins/modules/dnsimple.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/dnsimple.py \ No newline at end of file diff --git a/plugins/modules/dnsimple.py b/plugins/modules/dnsimple.py new file mode 100644 index 0000000000..1e9fc8f317 --- /dev/null +++ b/plugins/modules/dnsimple.py @@ -0,0 +1,447 @@ +#!/usr/bin/python +# +# Copyright Ansible Project +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: dnsimple +short_description: Interface with dnsimple.com (a DNS hosting service) +description: + - 'Manages domains and records using the DNSimple API, see the docs: U(http://developer.dnsimple.com/).' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + account_email: + description: + - Account email. If omitted, the environment variables E(DNSIMPLE_EMAIL) and E(DNSIMPLE_API_TOKEN) are looked for. + - 'If those variables are not found, a C(.dnsimple) file is looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started).' + - C(.dnsimple) config files are only supported in dnsimple-python<2.0.0. + type: str + account_api_token: + description: + - Account API token. See O(account_email) for more information. + type: str + domain: + description: + - Domain to work with. Can be the domain name (for example V(mydomain.com)) or the numeric ID of the domain in DNSimple. + - If omitted, a list of domains is returned. + - If domain is present but the domain does not exist, it is created. + type: str + record: + description: + - Record to add, if blank a record for the domain is created, supports the wildcard (*). + type: str + record_ids: + description: + - List of records to ensure they either exist or do not exist. + type: list + elements: str + type: + description: + - The type of DNS record to create. + choices: + - A + - ALIAS + - CNAME + - MX + - SPF + - URL + - TXT + - NS + - SRV + - NAPTR + - PTR + - AAAA + - SSHFP + - HINFO + - POOL + - CAA + type: str + ttl: + description: + - The TTL to give the new record in seconds. + default: 3600 + type: int + value: + description: + - Record value. + - Must be specified when trying to ensure a record exists. + type: str + priority: + description: + - Record priority. + type: int + state: + description: + - Whether the record should exist or not. + choices: ['present', 'absent'] + default: present + type: str + solo: + description: + - Whether the record should be the only one for that record type and record name. + - Only use with O(state) is set to V(present) on a record. + type: 'bool' + default: false + sandbox: + description: + - Use the DNSimple sandbox environment. + - Requires a dedicated account in the dnsimple sandbox environment. + - Check U(https://developer.dnsimple.com/sandbox/) for more information. + type: 'bool' + default: false + version_added: 3.5.0 +requirements: + - "dnsimple >= 2.0.0" +author: "Alex Coomans (@drcapulet)" +""" + +EXAMPLES = r""" +- name: Authenticate using email and API token and fetch all domains + community.general.dnsimple: + account_email: test@example.com + account_api_token: dummyapitoken + delegate_to: localhost + +- name: Delete a domain + community.general.dnsimple: + domain: my.com + state: absent + delegate_to: localhost + +- name: Create a test.my.com A record to point to 127.0.0.1 + community.general.dnsimple: + domain: my.com + record: test + type: A + value: 127.0.0.1 + delegate_to: localhost + register: record + +- name: Delete record using record_ids + community.general.dnsimple: + domain: my.com + record_ids: '{{ record["id"] }}' + state: absent + delegate_to: localhost + +- name: Create a my.com CNAME record to example.com + community.general.dnsimple: + domain: my.com + record: '' + type: CNAME + value: example.com + state: present + delegate_to: localhost + +- name: Change TTL value for a record + community.general.dnsimple: + domain: my.com + record: '' + type: CNAME + value: example.com + ttl: 600 + state: present + delegate_to: localhost + +- name: Delete the record + community.general.dnsimple: + domain: my.com + record: '' + type: CNAME + value: example.com + state: absent + delegate_to: localhost +""" + +RETURN = r"""#""" + +import traceback +import re + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class DNSimpleV2(): + """class which uses dnsimple-python >= 2""" + + def __init__(self, account_email, account_api_token, sandbox, module): + """init""" + self.module = module + self.account_email = account_email + self.account_api_token = account_api_token + self.sandbox = sandbox + self.pagination_per_page = 30 + self.dnsimple_client() + self.dnsimple_account() + + def dnsimple_client(self): + """creates a dnsimple client object""" + if self.account_email and self.account_api_token: + client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token, user_agent="ansible/community.general") + else: + msg = "Option account_email or account_api_token not provided. " \ + "Dnsimple authentication with a .dnsimple config file is not " \ + "supported with dnsimple-python>=2.0.0" + raise DNSimpleException(msg) + client.identity.whoami() + self.client = client + + def dnsimple_account(self): + """select a dnsimple account. If a user token is used for authentication, + this user must only have access to a single account""" + account = self.client.identity.whoami().data.account + # user supplied a user token instead of account api token + if not account: + accounts = Accounts(self.client).list_accounts().data + if len(accounts) != 1: + msg = "The provided dnsimple token is a user token with multiple accounts." \ + "Use an account token or a user token with access to a single account." \ + "See https://support.dnsimple.com/articles/api-access-token/" + raise DNSimpleException(msg) + account = accounts[0] + self.account = account + + def get_all_domains(self): + """returns a list of all domains""" + domain_list = self._get_paginated_result(self.client.domains.list_domains, account_id=self.account.id) + return [d.__dict__ for d in domain_list] + + def get_domain(self, domain): + """returns a single domain by name or id""" + try: + dr = self.client.domains.get_domain(self.account.id, domain).data.__dict__ + except DNSimpleException as e: + exception_string = str(e.message) + if re.match(r"^Domain .+ not found$", exception_string): + dr = None + else: + raise + return dr + + def create_domain(self, domain): + """create a single domain""" + return self.client.domains.create_domain(self.account.id, domain).data.__dict__ + + def delete_domain(self, domain): + """delete a single domain""" + self.client.domains.delete_domain(self.account.id, domain) + + def get_records(self, zone, dnsimple_filter=None): + """return dns resource records which match a specified filter""" + records_list = self._get_paginated_result(self.client.zones.list_records, + account_id=self.account.id, + zone=zone, filter=dnsimple_filter) + return [d.__dict__ for d in records_list] + + def delete_record(self, domain, rid): + """delete a single dns resource record""" + self.client.zones.delete_record(self.account.id, domain, rid) + + def update_record(self, domain, rid, ttl=None, priority=None): + """update a single dns resource record""" + zr = ZoneRecordUpdateInput(ttl=ttl, priority=priority) + result = self.client.zones.update_record(self.account.id, str(domain), str(rid), zr).data.__dict__ + return result + + def create_record(self, domain, name, record_type, content, ttl=None, priority=None): + """create a single dns resource record""" + zr = ZoneRecordInput(name=name, type=record_type, content=content, ttl=ttl, priority=priority) + return self.client.zones.create_record(self.account.id, str(domain), zr).data.__dict__ + + def _get_paginated_result(self, operation, **options): + """return all results of a paginated api response""" + records_pagination = operation(per_page=self.pagination_per_page, **options).pagination + result_list = [] + for page in range(1, records_pagination.total_pages + 1): + page_data = operation(per_page=self.pagination_per_page, page=page, **options).data + result_list.extend(page_data) + return result_list + + +DNSIMPLE_IMP_ERR = [] +HAS_DNSIMPLE = False +try: + # try to import dnsimple >= 2.0.0 + from dnsimple import Client, DNSimpleException + from dnsimple.service import Accounts + from dnsimple.version import version as dnsimple_version + from dnsimple.struct.zone_record import ZoneRecordUpdateInput, ZoneRecordInput + HAS_DNSIMPLE = True +except ImportError: + DNSIMPLE_IMP_ERR.append(traceback.format_exc()) + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback + + +def main(): + module = AnsibleModule( + argument_spec=dict( + account_email=dict(type='str', fallback=(env_fallback, ['DNSIMPLE_EMAIL'])), + account_api_token=dict(type='str', + no_log=True, + fallback=(env_fallback, ['DNSIMPLE_API_TOKEN'])), + domain=dict(type='str'), + record=dict(type='str'), + record_ids=dict(type='list', elements='str'), + type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', + 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', + 'PTR', 'AAAA', 'SSHFP', 'HINFO', + 'POOL', 'CAA']), + ttl=dict(type='int', default=3600), + value=dict(type='str'), + priority=dict(type='int'), + state=dict(type='str', choices=['present', 'absent'], default='present'), + solo=dict(type='bool', default=False), + sandbox=dict(type='bool', default=False), + ), + required_together=[ + ['record', 'value'] + ], + supports_check_mode=True, + ) + + if not HAS_DNSIMPLE: + module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR[0]) + + account_email = module.params.get('account_email') + account_api_token = module.params.get('account_api_token') + domain = module.params.get('domain') + record = module.params.get('record') + record_ids = module.params.get('record_ids') + record_type = module.params.get('type') + ttl = module.params.get('ttl') + value = module.params.get('value') + priority = module.params.get('priority') + state = module.params.get('state') + is_solo = module.params.get('solo') + sandbox = module.params.get('sandbox') + + DNSIMPLE_MAJOR_VERSION = LooseVersion(dnsimple_version).version[0] + + try: + if DNSIMPLE_MAJOR_VERSION < 2: + module.fail_json( + msg='Support for python-dnsimple < 2 has been removed in community.general 5.0.0. Update python-dnsimple to version >= 2.0.0.') + ds = DNSimpleV2(account_email, account_api_token, sandbox, module) + # Let's figure out what operation we want to do + # No domain, return a list + if not domain: + all_domains = ds.get_all_domains() + module.exit_json(changed=False, result=all_domains) + + # Domain & No record + if record is None and not record_ids: + if domain.isdigit(): + typed_domain = int(domain) + else: + typed_domain = str(domain) + dr = ds.get_domain(typed_domain) + # domain does not exist + if state == 'present': + if dr: + module.exit_json(changed=False, result=dr) + else: + if module.check_mode: + module.exit_json(changed=True) + else: + response = ds.create_domain(domain) + module.exit_json(changed=True, result=response) + # state is absent + else: + if dr: + if not module.check_mode: + ds.delete_domain(domain) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + + # need the not none check since record could be an empty string + if record is not None: + if not record_type: + module.fail_json(msg="Missing the record type") + if not value: + module.fail_json(msg="Missing the record value") + + records_list = ds.get_records(domain, dnsimple_filter={'name': record}) + rr = next((r for r in records_list if r['name'] == record and r['type'] == record_type and r['content'] == value), None) + if state == 'present': + changed = False + if is_solo: + # delete any records that have the same name and record type + same_type = [r['id'] for r in records_list if r['name'] == record and r['type'] == record_type] + if rr: + same_type = [rid for rid in same_type if rid != rr['id']] + if same_type: + if not module.check_mode: + for rid in same_type: + ds.delete_record(domain, rid) + changed = True + if rr: + # check if we need to update + if rr['ttl'] != ttl or rr['priority'] != priority: + if module.check_mode: + module.exit_json(changed=True) + else: + response = ds.update_record(domain, rr['id'], ttl, priority) + module.exit_json(changed=True, result=response) + else: + module.exit_json(changed=changed, result=rr) + else: + # create it + if module.check_mode: + module.exit_json(changed=True) + else: + response = ds.create_record(domain, record, record_type, value, ttl, priority) + module.exit_json(changed=True, result=response) + # state is absent + else: + if rr: + if not module.check_mode: + ds.delete_record(domain, rr['id']) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + + # Make sure these record_ids either all exist or none + if record_ids: + current_records = ds.get_records(domain, dnsimple_filter=None) + current_record_ids = [str(d['id']) for d in current_records] + wanted_record_ids = [str(r) for r in record_ids] + if state == 'present': + difference = list(set(wanted_record_ids) - set(current_record_ids)) + if difference: + module.fail_json(msg="Missing the following records: %s" % difference) + else: + module.exit_json(changed=False) + # state is absent + else: + difference = list(set(wanted_record_ids) & set(current_record_ids)) + if difference: + if not module.check_mode: + for rid in difference: + ds.delete_record(domain, rid) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + + except DNSimpleException as e: + if DNSIMPLE_MAJOR_VERSION > 1: + module.fail_json(msg="DNSimple exception: %s" % e.message) + else: + module.fail_json(msg="DNSimple exception: %s" % str(e.args[0]['message'])) + module.fail_json(msg="Unknown what you wanted me to do") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/dnsimple_info.py b/plugins/modules/dnsimple_info.py new file mode 100644 index 0000000000..64cc4527a6 --- /dev/null +++ b/plugins/modules/dnsimple_info.py @@ -0,0 +1,328 @@ +#!/usr/bin/python + +# Copyright Edward Hilgendorf, +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: dnsimple_info + +short_description: Pull basic info from DNSimple API + +version_added: "4.2.0" + +description: Retrieve existing records and domains from DNSimple API. + +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + +options: + name: + description: + - The domain name to retrieve info from. + - Returns all associated records for this domain if specified. + - If not specified, returns all domains associated with the account ID. + type: str + + account_id: + description: The account ID to query. + required: true + type: str + + api_key: + description: The API key to use. + required: true + type: str + + record: + description: + - The record to find. + - If specified, only this record is returned instead of all records. + required: false + type: str + + sandbox: + description: Whether or not to use sandbox environment. + required: false + default: false + type: bool + +author: + - Edward Hilgendorf (@edhilgendorf) +""" + +EXAMPLES = r""" +- name: Get all domains from an account + community.general.dnsimple_info: + account_id: "1234" + api_key: "1234" + +- name: Get all records from a domain + community.general.dnsimple_info: + name: "example.com" + account_id: "1234" + api_key: "1234" + +- name: Get all info from a matching record + community.general.dnsimple_info: + name: "example.com" + record: "subdomain" + account_id: "1234" + api_key: "1234" +""" + +RETURN = r""" +dnsimple_domain_info: + description: Returns a list of dictionaries of all domains associated with the supplied account ID. + type: list + elements: dict + returned: success when O(name) is not specified + sample: + - account_id: 1234 + created_at: '2021-10-16T21:25:42Z' + id: 123456 + last_transferred_at: + name: example.com + reverse: false + secondary: false + updated_at: '2021-11-10T20:22:50Z' + contains: + account_id: + description: The account ID. + type: int + created_at: + description: When the domain entry was created. + type: str + id: + description: ID of the entry. + type: int + last_transferred_at: + description: Date the domain was transferred, or empty if not. + type: str + name: + description: Name of the record. + type: str + reverse: + description: Whether or not it is a reverse zone record. + type: bool + updated_at: + description: When the domain entry was updated. + type: str + +dnsimple_records_info: + description: Returns a list of dictionaries with all records for the domain supplied. + type: list + elements: dict + returned: success when O(name) is specified, but O(record) is not + sample: + - content: ns1.dnsimple.com admin.dnsimple.com + created_at: '2021-10-16T19:07:34Z' + id: 12345 + name: 'catheadbiscuit' + parent_id: + priority: + regions: + - global + system_record: true + ttl: 3600 + type: SOA + updated_at: '2021-11-15T23:55:51Z' + zone_id: example.com + contains: + content: + description: Content of the returned record. + type: str + created_at: + description: When the domain entry was created. + type: str + id: + description: ID of the entry. + type: int + name: + description: Name of the record. + type: str + parent_id: + description: Parent record or null. + type: int + priority: + description: Priority setting of the record. + type: str + regions: + description: List of regions where the record is available. + type: list + system_record: + description: Whether or not it is a system record. + type: bool + ttl: + description: Record TTL. + type: int + type: + description: Record type. + type: str + updated_at: + description: When the domain entry was updated. + type: str + zone_id: + description: ID of the zone that the record is associated with. + type: str +dnsimple_record_info: + description: Returns a list of dictionaries that match the record supplied. + returned: success when O(name) and O(record) are specified + type: list + elements: dict + sample: + - content: 1.2.3.4 + created_at: '2021-11-15T23:55:51Z' + id: 123456 + name: catheadbiscuit + parent_id: + priority: + regions: + - global + system_record: false + ttl: 3600 + type: A + updated_at: '2021-11-15T23:55:51Z' + zone_id: example.com + contains: + content: + description: Content of the returned record. + type: str + created_at: + description: When the domain entry was created. + type: str + id: + description: ID of the entry. + type: int + name: + description: Name of the record. + type: str + parent_id: + description: Parent record or null. + type: int + priority: + description: Priority setting of the record. + type: str + regions: + description: List of regions where the record is available. + type: list + system_record: + description: Whether or not it is a system record. + type: bool + ttl: + description: Record TTL. + type: int + type: + description: Record type. + type: str + updated_at: + description: When the domain entry was updated. + type: str + zone_id: + description: ID of the zone that the record is associated with. + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils import deps + +with deps.declare("requests"): + from requests import Request, Session + + +def build_url(account, key, is_sandbox): + headers = {'Accept': 'application/json', + 'Authorization': 'Bearer {0}'.format(key)} + sandbox = '.sandbox' if is_sandbox else '' + url = 'https://api{sandbox}.dnsimple.com/v2/{account}'.format(sandbox=sandbox, account=account) + req = Request(url=url, headers=headers) + prepped_request = req.prepare() + return prepped_request + + +def iterate_data(module, request_object): + base_url = request_object.url + response = Session().send(request_object) + if 'pagination' not in response.json(): + module.fail_json('API Call failed, check ID, key and sandbox values') + + data = response.json()["data"] + total_pages = response.json()["pagination"]["total_pages"] + page = 1 + + while page < total_pages: + page = page + 1 + request_object.url = '{url}&page={page}'.format(url=base_url, page=page) + new_results = Session().send(request_object) + data = data + new_results.json()['data'] + + return data + + +def record_info(dnsimple_mod, req_obj): + req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?name=' + dnsimple_mod.params["record"], 'GET' + return iterate_data(dnsimple_mod, req_obj) + + +def domain_info(dnsimple_mod, req_obj): + req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?per_page=100', 'GET' + return iterate_data(dnsimple_mod, req_obj) + + +def account_info(dnsimple_mod, req_obj): + req_obj.url, req_obj.method = req_obj.url + '/zones/?per_page=100', 'GET' + return iterate_data(dnsimple_mod, req_obj) + + +def main(): + # define available arguments/parameters a user can pass to the module + fields = { + "account_id": {"required": True, "type": "str"}, + "api_key": {"required": True, "type": "str", "no_log": True}, + "name": {"required": False, "type": "str"}, + "record": {"required": False, "type": "str"}, + "sandbox": {"required": False, "type": "bool", "default": False} + } + + result = { + 'changed': False + } + + module = AnsibleModule( + argument_spec=fields, + supports_check_mode=True + ) + + params = module.params + req = build_url(params['account_id'], + params['api_key'], + params['sandbox']) + + deps.validate(module) + + # At minimum we need account and key + if params['account_id'] and params['api_key']: + # If we have a record return info on that record + if params['name'] and params['record']: + result['dnsimple_record_info'] = record_info(module, req) + module.exit_json(**result) + + # If we have the account only and domain, return records for the domain + elif params['name']: + result['dnsimple_records_info'] = domain_info(module, req) + module.exit_json(**result) + + # If we have the account only, return domains + else: + result['dnsimple_domain_info'] = account_info(module, req) + module.exit_json(**result) + else: + module.fail_json(msg="Need at least account_id and api_key") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/dnsmadeeasy.py b/plugins/modules/dnsmadeeasy.py deleted file mode 120000 index 5fc24abd18..0000000000 --- a/plugins/modules/dnsmadeeasy.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/dnsmadeeasy.py \ No newline at end of file diff --git a/plugins/modules/dnsmadeeasy.py b/plugins/modules/dnsmadeeasy.py new file mode 100644 index 0000000000..e74e8a547b --- /dev/null +++ b/plugins/modules/dnsmadeeasy.py @@ -0,0 +1,716 @@ +#!/usr/bin/python + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: dnsmadeeasy +short_description: Interface with dnsmadeeasy.com (a DNS hosting service) +description: + - 'Manages DNS records using the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation + of domains or monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/).' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + account_key: + description: + - Account API Key. + required: true + type: str + + account_secret: + description: + - Account Secret Key. + required: true + type: str + + domain: + description: + - Domain to work with. Can be the domain name (for example V(mydomain.com)) or the numeric ID of the domain in DNS Made + Easy (for example V(839989)) for faster resolution. + required: true + type: str + + sandbox: + description: + - Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used. + type: bool + default: false + + record_name: + description: + - Record name to get/create/delete/update. If O(record_name) is not specified; all records for the domain are returned + in "result" regardless of the state argument. + type: str + + record_type: + description: + - Record type. + choices: ['A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT'] + type: str + + record_value: + description: + - 'Record value. HTTPRED: , MX: , NS: , PTR: , SRV: + , TXT: ".' + - If O(record_value) is not specified; no changes are made and the record is returned in RV(ignore:result) (in other + words, this module can be used to fetch a record's current ID, type, and TTL). + type: str + + record_ttl: + description: + - Record's "Time-To-Live". Number of seconds the record remains cached in DNS servers. + default: 1800 + type: int + + state: + description: + - Whether the record should exist or not. + required: true + choices: ['present', 'absent'] + type: str + + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + type: bool + default: true + + monitor: + description: + - If V(true), add or change the monitor. This is applicable only for A records. + type: bool + default: false + + systemDescription: + description: + - Description used by the monitor. + default: '' + type: str + + maxEmails: + description: + - Number of emails sent to the contact list by the monitor. + default: 1 + type: int + + protocol: + description: + - Protocol used by the monitor. + default: 'HTTP' + choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS'] + type: str + + port: + description: + - Port used by the monitor. + default: 80 + type: int + + sensitivity: + description: + - Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3. + default: 'Medium' + choices: ['Low', 'Medium', 'High'] + type: str + + contactList: + description: + - Name or ID of the contact list that the monitor notifies. + - The default V('') means the Account Owner. + type: str + + httpFqdn: + description: + - The fully qualified domain name used by the monitor. + type: str + + httpFile: + description: + - The file at the Fqdn that the monitor queries for HTTP or HTTPS. + type: str + + httpQueryString: + description: + - The string in the httpFile that the monitor queries for HTTP or HTTPS. + type: str + + failover: + description: + - If V(true), add or change the failover. This is applicable only for A records. + type: bool + default: false + + autoFailover: + description: + - If true, fallback to the primary IP address is manual after a failover. + - If false, fallback to the primary IP address is automatic after a failover. + type: bool + default: false + + ip1: + description: + - Primary IP address for the failover. + - Required if adding or changing the monitor or failover. + type: str + + ip2: + description: + - Secondary IP address for the failover. + - Required if adding or changing the failover. + type: str + + ip3: + description: + - Tertiary IP address for the failover. + type: str + + ip4: + description: + - Quaternary IP address for the failover. + type: str + + ip5: + description: + - Quinary IP address for the failover. + type: str + +notes: + - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure + you are within a few seconds of actual time by using NTP. + - This module returns record(s) and monitor(s) in the RV(ignore:result) element when O(state=present). These values can + be be registered and used in your playbooks. + - Only A records can have a O(monitor) or O(failover). + - To add failover, the O(failover), O(autoFailover), O(port), O(protocol), O(ip1), and O(ip2) options are required. + - To add monitor, the O(monitor), O(port), O(protocol), O(maxEmails), O(systemDescription), and O(ip1) options are required. + - The options O(monitor) and O(failover) share O(port), O(protocol), and O(ip1) options. +requirements: [hashlib, hmac] +author: "Brice Burgess (@briceburg)" +""" + +EXAMPLES = r""" +- name: Fetch my.com domain records + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + register: response + +- name: Create a record + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + +- name: Update the previously created record + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_value: 192.0.2.23 + +- name: Fetch a specific record + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + register: response + +- name: Delete a record + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + record_type: A + state: absent + record_name: test + +- name: Add a failover + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + failover: true + ip1: 127.0.0.2 + ip2: 127.0.0.3 + +- name: Add a failover + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + failover: true + ip1: 127.0.0.2 + ip2: 127.0.0.3 + ip3: 127.0.0.4 + ip4: 127.0.0.5 + ip5: 127.0.0.6 + +- name: Add a monitor + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + monitor: true + ip1: 127.0.0.2 + protocol: HTTP # default + port: 80 # default + maxEmails: 1 + systemDescription: Monitor Test A record + contactList: my contact list + +- name: Add a monitor with http options + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + monitor: true + ip1: 127.0.0.2 + protocol: HTTP # default + port: 80 # default + maxEmails: 1 + systemDescription: Monitor Test A record + contactList: 1174 # contact list id + httpFqdn: http://my.com + httpFile: example + httpQueryString: some string + +- name: Add a monitor and a failover + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + failover: true + ip1: 127.0.0.2 + ip2: 127.0.0.3 + monitor: true + protocol: HTTPS + port: 443 + maxEmails: 1 + systemDescription: monitoring my.com status + contactList: emergencycontacts + +- name: Remove a failover + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + failover: false + +- name: Remove a monitor + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + monitor: false +""" + +# ============================================ +# DNSMadeEasy module specific support methods. +# + +import json +import hashlib +import hmac +import locale +from time import strftime, gmtime +from urllib.parse import urlencode + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +class DME2(object): + + def __init__(self, apikey, secret, domain, sandbox, module): + self.module = module + + self.api = apikey + self.secret = secret + + if sandbox: + self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/' + self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl) + else: + self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/' + + self.domain = str(domain) + self.domain_map = None # ["domain_name"] => ID + self.record_map = None # ["record_name"] => ID + self.records = None # ["record_ID"] => + self.all_records = None + self.contactList_map = None # ["contactList_name"] => ID + + # Lookup the domain ID if passed as a domain name vs. ID + if not self.domain.isdigit(): + self.domain = self.getDomainByName(self.domain)['id'] + + self.record_url = 'dns/managed/' + str(self.domain) + '/records' + self.monitor_url = 'monitor' + self.contactList_url = 'contactList' + + def _headers(self): + currTime = self._get_date() + hashstring = self._create_hash(currTime) + headers = {'x-dnsme-apiKey': self.api, + 'x-dnsme-hmac': hashstring, + 'x-dnsme-requestDate': currTime, + 'content-type': 'application/json'} + return headers + + def _get_date(self): + locale.setlocale(locale.LC_TIME, 'C') + return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()) + + def _create_hash(self, rightnow): + return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest() + + def query(self, resource, method, data=None): + url = self.baseurl + resource + if data and not isinstance(data, str): + data = urlencode(data) + + response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers()) + if info['status'] not in (200, 201, 204): + self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) + + try: + return json.load(response) + except Exception: + return {} + + def getDomain(self, domain_id): + if not self.domain_map: + self._instMap('domain') + + return self.domains.get(domain_id, False) + + def getDomainByName(self, domain_name): + if not self.domain_map: + self._instMap('domain') + + return self.getDomain(self.domain_map.get(domain_name, 0)) + + def getDomains(self): + return self.query('dns/managed', 'GET')['data'] + + def getRecord(self, record_id): + if not self.record_map: + self._instMap('record') + + return self.records.get(record_id, False) + + # Try to find a single record matching this one. + # How we do this depends on the type of record. For instance, there + # can be several MX records for a single record_name while there can + # only be a single CNAME for a particular record_name. Note also that + # there can be several records with different types for a single name. + def getMatchingRecord(self, record_name, record_type, record_value): + # Get all the records if not already cached + if not self.all_records: + self.all_records = self.getRecords() + + if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]: + for result in self.all_records: + if result['name'] == record_name and result['type'] == record_type: + return result + return False + elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]: + for result in self.all_records: + if record_type == "MX": + value = record_value.split(" ")[1] + # Note that TXT records are surrounded by quotes in the API response. + elif record_type == "TXT": + value = '"{0}"'.format(record_value) + elif record_type == "SRV": + value = record_value.split(" ")[3] + else: + value = record_value + if result['name'] == record_name and result['type'] == record_type and result['value'] == value: + return result + return False + else: + raise Exception('record_type not yet supported') + + def getRecords(self): + return self.query(self.record_url, 'GET')['data'] + + def _instMap(self, type): + # @TODO cache this call so it is executed only once per ansible execution + map = {} + results = {} + + # iterate over e.g. self.getDomains() || self.getRecords() + for result in getattr(self, 'get' + type.title() + 's')(): + + map[result['name']] = result['id'] + results[result['id']] = result + + # e.g. self.domain_map || self.record_map + setattr(self, type + '_map', map) + setattr(self, type + 's', results) # e.g. self.domains || self.records + + def prepareRecord(self, data): + return json.dumps(data, separators=(',', ':')) + + def createRecord(self, data): + # @TODO update the cache w/ resultant record + id when implemented + return self.query(self.record_url, 'POST', data) + + def updateRecord(self, record_id, data): + # @TODO update the cache w/ resultant record + id when implemented + return self.query(self.record_url + '/' + str(record_id), 'PUT', data) + + def deleteRecord(self, record_id): + # @TODO remove record from the cache when implemented + return self.query(self.record_url + '/' + str(record_id), 'DELETE') + + def getMonitor(self, record_id): + return self.query(self.monitor_url + '/' + str(record_id), 'GET') + + def updateMonitor(self, record_id, data): + return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data) + + def prepareMonitor(self, data): + return json.dumps(data, separators=(',', ':')) + + def getContactList(self, contact_list_id): + if not self.contactList_map: + self._instMap('contactList') + + return self.contactLists.get(contact_list_id, False) + + def getContactlists(self): + return self.query(self.contactList_url, 'GET')['data'] + + def getContactListByName(self, name): + if not self.contactList_map: + self._instMap('contactList') + + return self.getContactList(self.contactList_map.get(name, 0)) + +# =========================================== +# Module execution. +# + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + account_key=dict(required=True, no_log=True), + account_secret=dict(required=True, no_log=True), + domain=dict(required=True), + sandbox=dict(default=False, type='bool'), + state=dict(required=True, choices=['present', 'absent']), + record_name=dict(), + record_type=dict(choices=[ + 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), + record_value=dict(), + record_ttl=dict(default=1800, type='int'), + monitor=dict(default=False, type='bool'), + systemDescription=dict(default=''), + maxEmails=dict(default=1, type='int'), + protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']), + port=dict(default=80, type='int'), + sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']), + contactList=dict(), + httpFqdn=dict(), + httpFile=dict(), + httpQueryString=dict(), + failover=dict(default=False, type='bool'), + autoFailover=dict(default=False, type='bool'), + ip1=dict(), + ip2=dict(), + ip3=dict(), + ip4=dict(), + ip5=dict(), + validate_certs=dict(default=True, type='bool'), + ), + required_together=[ + ['record_value', 'record_ttl', 'record_type'] + ], + required_if=[ + ['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']], + ['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']] + ] + ) + + protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6) + sensitivities = dict(Low=8, Medium=5, High=3) + + DME = DME2(module.params["account_key"], module.params[ + "account_secret"], module.params["domain"], module.params["sandbox"], module) + state = module.params["state"] + record_name = module.params["record_name"] + record_type = module.params["record_type"] + record_value = module.params["record_value"] + + # Follow Keyword Controlled Behavior + if record_name is None: + domain_records = DME.getRecords() + if not domain_records: + module.fail_json( + msg="The requested domain name is not accessible with this api_key; try using its ID if known.") + module.exit_json(changed=False, result=domain_records) + + # Fetch existing record + Build new one + current_record = DME.getMatchingRecord(record_name, record_type, record_value) + new_record = {'name': record_name} + for i in ["record_value", "record_type", "record_ttl"]: + if not module.params[i] is None: + new_record[i[len("record_"):]] = module.params[i] + # Special handling for mx record + if new_record["type"] == "MX": + new_record["mxLevel"] = new_record["value"].split(" ")[0] + new_record["value"] = new_record["value"].split(" ")[1] + + # Special handling for SRV records + if new_record["type"] == "SRV": + new_record["priority"] = new_record["value"].split(" ")[0] + new_record["weight"] = new_record["value"].split(" ")[1] + new_record["port"] = new_record["value"].split(" ")[2] + new_record["value"] = new_record["value"].split(" ")[3] + + # Fetch existing monitor if the A record indicates it should exist and build the new monitor + current_monitor = dict() + new_monitor = dict() + if current_record and current_record['type'] == 'A' and current_record.get('monitor'): + current_monitor = DME.getMonitor(current_record['id']) + + # Build the new monitor + for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails', + 'contactList', 'httpFqdn', 'httpFile', 'httpQueryString', + 'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']: + if module.params[i] is not None: + if i == 'protocol': + # The API requires protocol to be a numeric in the range 1-6 + new_monitor['protocolId'] = protocols[module.params[i]] + elif i == 'sensitivity': + # The API requires sensitivity to be a numeric of 8, 5, or 3 + new_monitor[i] = sensitivities[module.params[i]] + elif i == 'contactList': + # The module accepts either the name or the id of the contact list + contact_list_id = module.params[i] + if not contact_list_id.isdigit() and contact_list_id != '': + contact_list = DME.getContactListByName(contact_list_id) + if not contact_list: + module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id)) + contact_list_id = contact_list.get('id', '') + new_monitor['contactListId'] = contact_list_id + else: + # The module option names match the API field names + new_monitor[i] = module.params[i] + + # Compare new record against existing one + record_changed = False + if current_record: + for i in new_record: + # Remove leading and trailing quote character from values because TXT records + # are surrounded by quotes. + if str(current_record[i]).strip('"') != str(new_record[i]): + record_changed = True + new_record['id'] = str(current_record['id']) + + monitor_changed = False + if current_monitor: + for i in new_monitor: + if str(current_monitor.get(i)) != str(new_monitor[i]): + monitor_changed = True + + # Follow Keyword Controlled Behavior + if state == 'present': + # return the record if no value is specified + if "value" not in new_record: + if not current_record: + module.fail_json( + msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain'])) + module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor)) + + # create record and monitor as the record does not exist + if not current_record: + record = DME.createRecord(DME.prepareRecord(new_record)) + if new_monitor.get('monitor') and record_type == "A": + monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor)) + module.exit_json(changed=True, result=dict(record=record, monitor=monitor)) + else: + module.exit_json(changed=True, result=dict(record=record, monitor=current_monitor)) + + # update the record + updated = False + if record_changed: + DME.updateRecord(current_record['id'], DME.prepareRecord(new_record)) + updated = True + if monitor_changed: + DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor)) + updated = True + if updated: + module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor)) + + # return the record (no changes) + module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor)) + + elif state == 'absent': + changed = False + # delete the record (and the monitor/failover) if it exists + if current_record: + DME.deleteRecord(current_record['id']) + module.exit_json(changed=True) + + # record does not exist, return w/o change. + module.exit_json(changed=changed) + + else: + module.fail_json( + msg="'%s' is an unknown value for the state argument" % state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/dpkg_divert.py b/plugins/modules/dpkg_divert.py deleted file mode 120000 index 8d13f3d4b8..0000000000 --- a/plugins/modules/dpkg_divert.py +++ /dev/null @@ -1 +0,0 @@ -./system/dpkg_divert.py \ No newline at end of file diff --git a/plugins/modules/dpkg_divert.py b/plugins/modules/dpkg_divert.py new file mode 100644 index 0000000000..7f37a47de4 --- /dev/null +++ b/plugins/modules/dpkg_divert.py @@ -0,0 +1,355 @@ +#!/usr/bin/python + +# Copyright (c) 2017-2020, Yann Amar +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: dpkg_divert +short_description: Override a debian package's version of a file +version_added: '0.2.0' +author: + - quidame (@quidame) +description: + - A diversion is for C(dpkg) the knowledge that only a given package (or the local administrator) is allowed to install + a file at a given location. Other packages shipping their own version of this file are forced to O(divert) it, that is + to install it at another location. It allows one to keep changes in a file provided by a debian package by preventing + it being overwritten on package upgrade. + - This module manages diversions of debian packages files using the C(dpkg-divert) commandline tool. It can either create + or remove a diversion for a given file, but also update an existing diversion to modify its O(holder) and/or its O(divert) + location. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + path: + description: + - The original and absolute path of the file to be diverted or undiverted. This path is unique, in other words it is + not possible to get two diversions for the same O(path). + required: true + type: path + state: + description: + - When O(state=absent), remove the diversion of the specified O(path); when O(state=present), create the diversion if + it does not exist, or update its package O(holder) or O(divert) location, if it already exists. + type: str + default: present + choices: [absent, present] + holder: + description: + - The name of the package whose copy of file is not diverted, also known as the diversion holder or the package the + diversion belongs to. + - The actual package does not have to be installed or even to exist for its name to be valid. If not specified, the + diversion is hold by 'LOCAL', that is reserved by/for dpkg for local diversions. + - This parameter is ignored when O(state=absent). + type: str + divert: + description: + - The location where the versions of file are diverted. + - Default is to add suffix C(.distrib) to the file path. + - This parameter is ignored when O(state=absent). + type: path + rename: + description: + - Actually move the file aside (when O(state=present)) or back (when O(state=absent)), but only when changing the state + of the diversion. This parameter has no effect when attempting to add a diversion that already exists or when removing + an unexisting one. + - Unless O(force=true), renaming fails if the destination file already exists (this lock being a dpkg-divert feature, + and bypassing it being a module feature). + type: bool + default: false + force: + description: + - When O(rename=true) and O(force=true), renaming is performed even if the target of the renaming exists, in other words + the existing contents of the file at this location are lost. + - This parameter is ignored when O(rename=false). + type: bool + default: false +requirements: + - dpkg-divert >= 1.15.0 (Debian family) +""" + +EXAMPLES = r""" +- name: Divert /usr/bin/busybox to /usr/bin/busybox.distrib and keep file in place + community.general.dpkg_divert: + path: /usr/bin/busybox + +- name: Divert /usr/bin/busybox by package 'branding' + community.general.dpkg_divert: + path: /usr/bin/busybox + holder: branding + +- name: Divert and rename busybox to busybox.dpkg-divert + community.general.dpkg_divert: + path: /usr/bin/busybox + divert: /usr/bin/busybox.dpkg-divert + rename: true + +- name: Remove the busybox diversion and move the diverted file back + community.general.dpkg_divert: + path: /usr/bin/busybox + state: absent + rename: true + force: true +""" + +RETURN = r""" +commands: + description: The dpkg-divert commands ran internally by the module. + type: list + returned: on_success + elements: str + sample: "/usr/bin/dpkg-divert --no-rename --remove /etc/foobarrc" +messages: + description: The dpkg-divert relevant messages (stdout or stderr). + type: list + returned: on_success + elements: str + sample: "Removing 'local diversion of /etc/foobarrc to /etc/foobarrc.distrib'" +diversion: + description: The status of the diversion after task execution. + type: dict + returned: always + contains: + divert: + description: The location of the diverted file. + type: str + holder: + description: The package holding the diversion. + type: str + path: + description: The path of the file to divert/undivert. + type: str + state: + description: The state of the diversion. + type: str + sample: + { + "divert": "/etc/foobarrc.distrib", + "holder": "LOCAL", + "path": "/etc/foobarrc", + "state": "present" + } +""" + + +import re +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_native + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +def diversion_state(module, command, path): + diversion = dict(path=path, state='absent', divert=None, holder=None) + rc, out, err = module.run_command([command, '--listpackage', path], check_rc=True) + if out: + diversion['state'] = 'present' + diversion['holder'] = out.rstrip() + rc, out, err = module.run_command([command, '--truename', path], check_rc=True) + diversion['divert'] = out.rstrip() + return diversion + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(required=True, type='path'), + state=dict(type='str', default='present', choices=['absent', 'present']), + holder=dict(type='str'), + divert=dict(type='path'), + rename=dict(type='bool', default=False), + force=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + path = module.params['path'] + state = module.params['state'] + holder = module.params['holder'] + divert = module.params['divert'] + rename = module.params['rename'] + force = module.params['force'] + + diversion_wanted = dict(path=path, state=state) + changed = False + + DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True) + MAINCOMMAND = [DPKG_DIVERT] + + # Option --listpackage is needed and comes with 1.15.0 + rc, stdout, stderr = module.run_command([DPKG_DIVERT, '--version'], check_rc=True) + [current_version] = [x for x in stdout.splitlines()[0].split() if re.match('^[0-9]+[.][0-9]', x)] + if LooseVersion(current_version) < LooseVersion("1.15.0"): + module.fail_json(msg="Unsupported dpkg version (<1.15.0).") + no_rename_is_supported = (LooseVersion(current_version) >= LooseVersion("1.19.1")) + + b_path = to_bytes(path, errors='surrogate_or_strict') + path_exists = os.path.exists(b_path) + # Used for things not doable with a single dpkg-divert command (as forced + # renaming of files, and diversion's 'holder' or 'divert' updates). + target_exists = False + truename_exists = False + + diversion_before = diversion_state(module, DPKG_DIVERT, path) + if diversion_before['state'] == 'present': + b_divert = to_bytes(diversion_before['divert'], errors='surrogate_or_strict') + truename_exists = os.path.exists(b_divert) + + # Append options as requested in the task parameters, but ignore some of + # them when removing the diversion. + if rename: + MAINCOMMAND.append('--rename') + elif no_rename_is_supported: + MAINCOMMAND.append('--no-rename') + + if state == 'present': + if holder and holder != 'LOCAL': + MAINCOMMAND.extend(['--package', holder]) + diversion_wanted['holder'] = holder + else: + MAINCOMMAND.append('--local') + diversion_wanted['holder'] = 'LOCAL' + + if divert: + MAINCOMMAND.extend(['--divert', divert]) + target = divert + else: + target = '%s.distrib' % path + + MAINCOMMAND.extend(['--add', path]) + diversion_wanted['divert'] = target + b_target = to_bytes(target, errors='surrogate_or_strict') + target_exists = os.path.exists(b_target) + + else: + MAINCOMMAND.extend(['--remove', path]) + diversion_wanted['divert'] = None + diversion_wanted['holder'] = None + + # Start to populate the returned objects. + diversion = diversion_before.copy() + maincommand = ' '.join(MAINCOMMAND) + commands = [maincommand] + + if module.check_mode or diversion_wanted == diversion_before: + MAINCOMMAND.insert(1, '--test') + diversion_after = diversion_wanted + + # Just try and see + rc, stdout, stderr = module.run_command(MAINCOMMAND) + + if rc == 0: + messages = [stdout.rstrip()] + + # else... cases of failure with dpkg-divert are: + # - The diversion does not belong to the same package (or LOCAL) + # - The divert filename is not the same (e.g. path.distrib != path.divert) + # - The renaming is forbidden by dpkg-divert (i.e. both the file and the + # diverted file exist) + + elif state != diversion_before['state']: + # There should be no case with 'divert' and 'holder' when creating the + # diversion from none, and they're ignored when removing the diversion. + # So this is all about renaming... + if rename and path_exists and ( + (state == 'absent' and truename_exists) or + (state == 'present' and target_exists)): + if not force: + msg = "Set 'force' param to True to force renaming of files." + module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, + stderr=stderr, stdout=stdout, diversion=diversion) + else: + msg = "Unexpected error while changing state of the diversion." + module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, + stderr=stderr, stdout=stdout, diversion=diversion) + + to_remove = path + if state == 'present': + to_remove = target + + if not module.check_mode: + try: + b_remove = to_bytes(to_remove, errors='surrogate_or_strict') + os.unlink(b_remove) + except OSError as e: + msg = 'Failed to remove %s: %s' % (to_remove, to_native(e)) + module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, + stderr=stderr, stdout=stdout, diversion=diversion) + rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True) + + messages = [stdout.rstrip()] + + # The situation is that we want to modify the settings (holder or divert) + # of an existing diversion. dpkg-divert does not handle this, and we have + # to remove the existing diversion first, and then set a new one. + else: + RMDIVERSION = [DPKG_DIVERT, '--remove', path] + if no_rename_is_supported: + RMDIVERSION.insert(1, '--no-rename') + rmdiversion = ' '.join(RMDIVERSION) + + if module.check_mode: + RMDIVERSION.insert(1, '--test') + + if rename: + MAINCOMMAND.remove('--rename') + if no_rename_is_supported: + MAINCOMMAND.insert(1, '--no-rename') + maincommand = ' '.join(MAINCOMMAND) + + commands = [rmdiversion, maincommand] + rc, rmdout, rmderr = module.run_command(RMDIVERSION, check_rc=True) + + if module.check_mode: + messages = [rmdout.rstrip(), 'Running in check mode'] + else: + rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True) + messages = [rmdout.rstrip(), stdout.rstrip()] + + # Avoid if possible to orphan files (i.e. to dereference them in diversion + # database but let them in place), but do not make renaming issues fatal. + # BTW, this module is not about state of files involved in the diversion. + old = diversion_before['divert'] + new = diversion_wanted['divert'] + if new != old: + b_old = to_bytes(old, errors='surrogate_or_strict') + b_new = to_bytes(new, errors='surrogate_or_strict') + if os.path.exists(b_old) and not os.path.exists(b_new): + try: + os.rename(b_old, b_new) + except OSError as e: + pass + + if not module.check_mode: + diversion_after = diversion_state(module, DPKG_DIVERT, path) + + diversion = diversion_after.copy() + diff = dict() + if module._diff: + diff['before'] = diversion_before + diff['after'] = diversion_after + + if diversion_after != diversion_before: + changed = True + + if diversion_after == diversion_wanted: + module.exit_json(changed=changed, diversion=diversion, + commands=commands, messages=messages, diff=diff) + else: + msg = "Unexpected error: see stdout and stderr for details." + module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, + stderr=stderr, stdout=stdout, diversion=diversion) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/easy_install.py b/plugins/modules/easy_install.py deleted file mode 120000 index 96f4420acd..0000000000 --- a/plugins/modules/easy_install.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/language/easy_install.py \ No newline at end of file diff --git a/plugins/modules/easy_install.py b/plugins/modules/easy_install.py new file mode 100644 index 0000000000..d533da899f --- /dev/null +++ b/plugins/modules/easy_install.py @@ -0,0 +1,199 @@ +#!/usr/bin/python + +# Copyright (c) 2012, Matt Wright +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: easy_install +short_description: Installs Python libraries +description: + - Installs Python libraries, optionally in a C(virtualenv). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - A Python library name. + required: true + virtualenv: + type: str + description: + - An optional O(virtualenv) directory path to install into. If the O(virtualenv) does not exist, it is created automatically. + virtualenv_site_packages: + description: + - Whether the virtual environment inherits packages from the global site-packages directory. Note that this setting + has no effect on an already existing virtual environment, so if you want to change it, the environment must be deleted + and newly created. + type: bool + default: false + virtualenv_command: + type: str + description: + - The command to create the virtual environment with. For example V(pyvenv), V(virtualenv), V(virtualenv2). + default: virtualenv + executable: + type: str + description: + - The explicit executable or a pathname to the executable to be used to run easy_install for a specific version of Python + installed in the system. For example V(easy_install-3.3), if there are both Python 2.7 and 3.3 installations in the + system and you want to run easy_install for the Python 3.3 installation. + default: easy_install + state: + type: str + description: + - The desired state of the library. V(latest) ensures that the latest version is installed. + choices: [present, latest] + default: present +notes: + - Please note that the C(easy_install) module can only install Python libraries. Thus this module is not able to remove + libraries. It is generally recommended to use the M(ansible.builtin.pip) module which you can first install using M(community.general.easy_install). + - Also note that C(virtualenv) must be installed on the remote host if the O(virtualenv) parameter is specified. +requirements: ["virtualenv"] +author: "Matt Wright (@mattupstate)" +""" + +EXAMPLES = r""" +- name: Install or update pip + community.general.easy_install: + name: pip + state: latest + +- name: Install Bottle into the specified virtualenv + community.general.easy_install: + name: bottle + virtualenv: /webapps/myapp/venv + +- name: Install a python package using pyvenv as the virtualenv tool + community.general.easy_install: + name: package_name + virtualenv: /opt/myenv + virtualenv_command: pyvenv +""" + +import os +import os.path +import tempfile +from ansible.module_utils.basic import AnsibleModule + + +def install_package(module, name, easy_install, executable_arguments): + cmd = [easy_install] + executable_arguments + [name] + rc, out, err = module.run_command(cmd) + return rc, out, err + + +def _is_package_installed(module, name, easy_install, executable_arguments): + # Copy and add to the arguments + executable_arguments = executable_arguments[:] + executable_arguments.append('--dry-run') + rc, out, err = install_package(module, name, easy_install, executable_arguments) + if rc: + module.fail_json(msg=err) + return 'Downloading' not in out + + +def _get_easy_install(module, env=None, executable=None): + candidate_easy_inst_basenames = ['easy_install'] + easy_install = None + if executable is not None: + if os.path.isabs(executable): + easy_install = executable + else: + candidate_easy_inst_basenames.insert(0, executable) + if easy_install is None: + if env is None: + opt_dirs = [] + else: + # Try easy_install with the virtualenv directory first. + opt_dirs = ['%s/bin' % env] + for basename in candidate_easy_inst_basenames: + easy_install = module.get_bin_path(basename, False, opt_dirs) + if easy_install is not None: + break + # easy_install should have been found by now. The final call to + # get_bin_path will trigger fail_json. + if easy_install is None: + basename = candidate_easy_inst_basenames[0] + easy_install = module.get_bin_path(basename, True, opt_dirs) + return easy_install + + +def main(): + arg_spec = dict( + name=dict(required=True), + state=dict(default='present', + choices=['present', 'latest'], + type='str'), + virtualenv=dict(), + virtualenv_site_packages=dict(default=False, type='bool'), + virtualenv_command=dict(default='virtualenv'), + executable=dict(default='easy_install'), + ) + + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + + name = module.params['name'] + env = module.params['virtualenv'] + executable = module.params['executable'] + site_packages = module.params['virtualenv_site_packages'] + virtualenv_command = module.params['virtualenv_command'] + executable_arguments = [] + if module.params['state'] == 'latest': + executable_arguments.append('--upgrade') + + rc = 0 + err = '' + out = '' + + if env: + virtualenv = module.get_bin_path(virtualenv_command, True) + + if not os.path.exists(os.path.join(env, 'bin', 'activate')): + if module.check_mode: + module.exit_json(changed=True) + command = '%s %s' % (virtualenv, env) + if site_packages: + command += ' --system-site-packages' + cwd = tempfile.gettempdir() + rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd) + + rc += rc_venv + out += out_venv + err += err_venv + + easy_install = _get_easy_install(module, env, executable) + + cmd = None + changed = False + installed = _is_package_installed(module, name, easy_install, executable_arguments) + + if not installed: + if module.check_mode: + module.exit_json(changed=True) + rc_easy_inst, out_easy_inst, err_easy_inst = install_package(module, name, easy_install, executable_arguments) + + rc += rc_easy_inst + out += out_easy_inst + err += err_easy_inst + + changed = True + + if rc != 0: + module.fail_json(msg=err, cmd=cmd) + + module.exit_json(changed=changed, binary=easy_install, + name=name, virtualenv=env) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ejabberd_user.py b/plugins/modules/ejabberd_user.py deleted file mode 120000 index 506c2e785b..0000000000 --- a/plugins/modules/ejabberd_user.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/ejabberd_user.py \ No newline at end of file diff --git a/plugins/modules/ejabberd_user.py b/plugins/modules/ejabberd_user.py new file mode 100644 index 0000000000..d60a5d4f4a --- /dev/null +++ b/plugins/modules/ejabberd_user.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# +# Copyright (C) 2013, Peter Sprygada +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ejabberd_user +author: "Peter Sprygada (@privateip)" +short_description: Manages users for ejabberd servers +requirements: + - ejabberd with mod_admin_extra +description: + - This module provides user management for ejabberd servers. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + username: + type: str + description: + - The name of the user to manage. + required: true + host: + type: str + description: + - The ejabberd host associated with this username. + required: true + password: + type: str + description: + - The password to assign to the username. + required: false + state: + type: str + description: + - Describe the desired state of the user to be managed. + required: false + default: 'present' + choices: ['present', 'absent'] +notes: + - Password parameter is required for O(state=present) only. + - Passwords must be stored in clear text for this release. + - The ejabberd configuration file must include mod_admin_extra as a module. +""" +EXAMPLES = r""" +# Example playbook entries using the ejabberd_user module to manage users state. + +- name: Create a user if it does not exist + community.general.ejabberd_user: + username: test + host: server + password: password + +- name: Delete a user if it exists + community.general.ejabberd_user: + username: test + host: server + state: absent +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +class EjabberdUser(object): + """ This object represents a user resource for an ejabberd server. The + object manages user creation and deletion using ejabberdctl. The following + commands are currently supported: + * ejabberdctl register + * ejabberdctl unregister + """ + + def __init__(self, module): + self.module = module + self.state = module.params.get('state') + self.host = module.params.get('host') + self.user = module.params.get('username') + self.pwd = module.params.get('password') + self.runner = CmdRunner( + module, + command="ejabberdctl", + arg_formats=dict( + cmd=cmd_runner_fmt.as_list(), + host=cmd_runner_fmt.as_list(), + user=cmd_runner_fmt.as_list(), + pwd=cmd_runner_fmt.as_list(), + ), + check_rc=False, + ) + + @property + def changed(self): + """ This method will check the current user and see if the password has + changed. It will return True if the user does not match the supplied + credentials and False if it does not + """ + return self.run_command('check_password', 'user host pwd', (lambda rc, out, err: bool(rc))) + + @property + def exists(self): + """ This method will check to see if the supplied username exists for + host specified. If the user exists True is returned, otherwise False + is returned + """ + return self.run_command('check_account', 'user host', (lambda rc, out, err: not bool(rc))) + + def log(self, entry): + """ This method does nothing """ + pass + + def run_command(self, cmd, options, process=None): + """ This method will run the any command specified and return the + returns using the Ansible common module + """ + def _proc(*a): + return a + + if process is None: + process = _proc + + with self.runner("cmd " + options, output_process=process) as ctx: + res = ctx.run(cmd=cmd, host=self.host, user=self.user, pwd=self.pwd) + self.log('command: %s' % " ".join(ctx.run_info['cmd'])) + return res + + def update(self): + """ The update method will update the credentials for the user provided + """ + return self.run_command('change_password', 'user host pwd') + + def create(self): + """ The create method will create a new user on the host with the + password provided + """ + return self.run_command('register', 'user host pwd') + + def delete(self): + """ The delete method will delete the user from the host + """ + return self.run_command('unregister', 'user host') + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(required=True, type='str'), + username=dict(required=True, type='str'), + password=dict(type='str', no_log=True), + state=dict(default='present', choices=['present', 'absent']), + ), + required_if=[ + ('state', 'present', ['password']), + ], + supports_check_mode=True, + ) + + obj = EjabberdUser(module) + + rc = None + result = dict(changed=False) + + if obj.state == 'absent': + if obj.exists: + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = obj.delete() + if rc != 0: + module.fail_json(msg=err, rc=rc) + + elif obj.state == 'present': + if not obj.exists: + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = obj.create() + elif obj.changed: + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = obj.update() + if rc is not None and rc != 0: + module.fail_json(msg=err, rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/elasticsearch_plugin.py b/plugins/modules/elasticsearch_plugin.py deleted file mode 120000 index 32eee4e4b2..0000000000 --- a/plugins/modules/elasticsearch_plugin.py +++ /dev/null @@ -1 +0,0 @@ -./database/misc/elasticsearch_plugin.py \ No newline at end of file diff --git a/plugins/modules/elasticsearch_plugin.py b/plugins/modules/elasticsearch_plugin.py new file mode 100644 index 0000000000..7d49ebded1 --- /dev/null +++ b/plugins/modules/elasticsearch_plugin.py @@ -0,0 +1,306 @@ +#!/usr/bin/python +# Copyright (c) 2015, Mathew Davies +# Copyright (c) 2017, Sam Doran +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: elasticsearch_plugin +short_description: Manage Elasticsearch plugins +description: + - Manages Elasticsearch plugins. +author: + - Mathew Davies (@ThePixelDeveloper) + - Sam Doran (@samdoran) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the plugin to install. + required: true + type: str + state: + description: + - Desired state of a plugin. + choices: ["present", "absent"] + default: present + type: str + src: + description: + - Optionally set the source location to retrieve the plugin from. This can be a C(file://) URL to install from a local + file, or a remote URL. If this is not set, the plugin location is just based on the name. + - The name parameter must match the descriptor in the plugin ZIP specified. + - Is only used if the state would change, which is solely checked based on the name parameter. If, for example, the + plugin is already installed, changing this has no effect. + - For ES 1.x use O(url). + required: false + type: str + url: + description: + - Set exact URL to download the plugin from (Only works for ES 1.x). + - For ES 2.x and higher, use src. + required: false + type: str + timeout: + description: + - 'Timeout setting: V(30s), V(1m), V(1h)...' + - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0. + default: 1m + type: str + force: + description: + - Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console + detection fails. + default: false + type: bool + plugin_bin: + description: + - Location of the plugin binary. If this file is not found, the default plugin binaries are used. + type: path + plugin_dir: + description: + - Your configured plugin directory specified in Elasticsearch. + default: /usr/share/elasticsearch/plugins/ + type: path + proxy_host: + description: + - Proxy host to use during plugin installation. + type: str + proxy_port: + description: + - Proxy port to use during plugin installation. + type: str + version: + description: + - Version of the plugin to be installed. If plugin exists with previous version, it is NOT updated. + type: str +""" + +EXAMPLES = r""" +- name: Install Elasticsearch Head plugin in Elasticsearch 2.x + community.general.elasticsearch_plugin: + name: mobz/elasticsearch-head + state: present + +- name: Install a specific version of Elasticsearch Head in Elasticsearch 2.x + community.general.elasticsearch_plugin: + name: mobz/elasticsearch-head + version: 2.0.0 + +- name: Uninstall Elasticsearch head plugin in Elasticsearch 2.x + community.general.elasticsearch_plugin: + name: mobz/elasticsearch-head + state: absent + +- name: Install a specific plugin in Elasticsearch >= 5.0 + community.general.elasticsearch_plugin: + name: analysis-icu + state: present + +- name: Install the ingest-geoip plugin with a forced installation + community.general.elasticsearch_plugin: + name: ingest-geoip + state: present + force: true +""" + +import os + +from ansible.module_utils.basic import AnsibleModule + + +PACKAGE_STATE_MAP = dict( + present="install", + absent="remove" +) + +PLUGIN_BIN_PATHS = tuple([ + '/usr/share/elasticsearch/bin/elasticsearch-plugin', + '/usr/share/elasticsearch/bin/plugin' +]) + + +def parse_plugin_repo(string): + elements = string.split("/") + + # We first consider the simplest form: pluginname + repo = elements[0] + + # We consider the form: username/pluginname + if len(elements) > 1: + repo = elements[1] + + # remove elasticsearch- prefix + # remove es- prefix + for string in ("elasticsearch-", "es-"): + if repo.startswith(string): + return repo[len(string):] + + return repo + + +def is_plugin_present(plugin_name, plugin_dir): + return os.path.isdir(os.path.join(plugin_dir, plugin_name)) + + +def parse_error(string): + reason = "ERROR: " + try: + return string[string.index(reason) + len(reason):].strip() + except ValueError: + return string + + +def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force): + cmd = [plugin_bin, PACKAGE_STATE_MAP["present"]] + is_old_command = (os.path.basename(plugin_bin) == 'plugin') + + # Timeout and version are only valid for plugin, not elasticsearch-plugin + if is_old_command: + if timeout: + cmd.append("--timeout") + cmd.append(timeout) + + if version: + plugin_name = plugin_name + '/' + version + cmd[2] = plugin_name + + if proxy_host and proxy_port: + java_opts = ["-Dhttp.proxyHost=%s" % proxy_host, + "-Dhttp.proxyPort=%s" % proxy_port, + "-Dhttps.proxyHost=%s" % proxy_host, + "-Dhttps.proxyPort=%s" % proxy_port] + module.run_command_environ_update = dict(CLI_JAVA_OPTS=" ".join(java_opts), # Elasticsearch 8.x + ES_JAVA_OPTS=" ".join(java_opts)) # Older Elasticsearch versions + + # Legacy ES 1.x + if url: + cmd.append("--url") + cmd.append(url) + + if force: + cmd.append("--batch") + if src: + cmd.append(src) + else: + cmd.append(plugin_name) + + if module.check_mode: + rc, out, err = 0, "check mode", "" + else: + rc, out, err = module.run_command(cmd) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg="Installing plugin '%s' failed: %s" % (plugin_name, reason), err=err) + + return True, cmd, out, err + + +def remove_plugin(module, plugin_bin, plugin_name): + cmd = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)] + + if module.check_mode: + rc, out, err = 0, "check mode", "" + else: + rc, out, err = module.run_command(cmd) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg="Removing plugin '%s' failed: %s" % (plugin_name, reason), err=err) + + return True, cmd, out, err + + +def get_plugin_bin(module, plugin_bin=None): + # Use the plugin_bin that was supplied first before trying other options + valid_plugin_bin = None + if plugin_bin and os.path.isfile(plugin_bin): + valid_plugin_bin = plugin_bin + + else: + # Add the plugin_bin passed into the module to the top of the list of paths to test, + # testing for that binary name first before falling back to the default paths. + bin_paths = list(PLUGIN_BIN_PATHS) + if plugin_bin and plugin_bin not in bin_paths: + bin_paths.insert(0, plugin_bin) + + # Get separate lists of dirs and binary names from the full paths to the + # plugin binaries. + plugin_dirs = list(set(os.path.dirname(x) for x in bin_paths)) + plugin_bins = list(set(os.path.basename(x) for x in bin_paths)) + + # Check for the binary names in the default system paths as well as the path + # specified in the module arguments. + for bin_file in plugin_bins: + valid_plugin_bin = module.get_bin_path(bin_file, opt_dirs=plugin_dirs) + if valid_plugin_bin: + break + + if not valid_plugin_bin: + module.fail_json(msg='%s does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.' % plugin_bin) + + return valid_plugin_bin + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), + src=dict(), + url=dict(), + timeout=dict(default="1m"), + force=dict(type='bool', default=False), + plugin_bin=dict(type="path"), + plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"), + proxy_host=dict(), + proxy_port=dict(), + version=dict() + ), + mutually_exclusive=[("src", "url")], + supports_check_mode=True + ) + + name = module.params["name"] + state = module.params["state"] + url = module.params["url"] + src = module.params["src"] + timeout = module.params["timeout"] + force = module.params["force"] + plugin_bin = module.params["plugin_bin"] + plugin_dir = module.params["plugin_dir"] + proxy_host = module.params["proxy_host"] + proxy_port = module.params["proxy_port"] + version = module.params["version"] + + # Search provided path and system paths for valid binary + plugin_bin = get_plugin_bin(module, plugin_bin) + + repo = parse_plugin_repo(name) + present = is_plugin_present(repo, plugin_dir) + + # skip if the state is correct + if (present and state == "present") or (state == "absent" and not present): + module.exit_json(changed=False, name=name, state=state) + + if state == "present": + changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force) + + elif state == "absent": + changed, cmd, out, err = remove_plugin(module, plugin_bin, name) + + module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/emc_vnx_sg_member.py b/plugins/modules/emc_vnx_sg_member.py deleted file mode 120000 index 3cfe3d54b8..0000000000 --- a/plugins/modules/emc_vnx_sg_member.py +++ /dev/null @@ -1 +0,0 @@ -./storage/emc/emc_vnx_sg_member.py \ No newline at end of file diff --git a/plugins/modules/emc_vnx_sg_member.py b/plugins/modules/emc_vnx_sg_member.py new file mode 100644 index 0000000000..fce2c59c32 --- /dev/null +++ b/plugins/modules/emc_vnx_sg_member.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# + +from __future__ import annotations + +DOCUMENTATION = r""" +module: emc_vnx_sg_member + +short_description: Manage storage group member on EMC VNX + + +description: + - This module manages the members of an existing storage group. +extends_documentation_fragment: + - community.general.emc.emc_vnx + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + name: + description: + - Name of the Storage group to manage. + required: true + type: str + lunid: + description: + - LUN ID to be added. + required: true + type: int + state: + description: + - Indicates the desired lunid state. + - V(present) ensures specified O(lunid) is present in the Storage Group. + - V(absent) ensures specified O(lunid) is absent from Storage Group. + default: present + choices: ["present", "absent"] + type: str + + +author: + - Luca 'remix_tj' Lorenzetto (@remixtj) +""" + +EXAMPLES = r""" +- name: Add lun to storage group + community.general.emc_vnx_sg_member: + name: sg01 + sp_address: sp1a.fqdn + sp_user: sysadmin + sp_password: sysadmin + lunid: 100 + state: present + +- name: Remove lun from storage group + community.general.emc_vnx_sg_member: + name: sg01 + sp_address: sp1a.fqdn + sp_user: sysadmin + sp_password: sysadmin + lunid: 100 + state: absent +""" + +RETURN = r""" +hluid: + description: LUNID visible to hosts attached to the storage group. + type: int + returned: success +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec + +LIB_IMP_ERR = None +try: + from storops import VNXSystem + from storops.exception import VNXCredentialError, VNXStorageGroupError, \ + VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError + HAS_LIB = True +except Exception: + LIB_IMP_ERR = traceback.format_exc() + HAS_LIB = False + + +def run_module(): + module_args = dict( + name=dict(type='str', required=True), + lunid=dict(type='int', required=True), + state=dict(default='present', choices=['present', 'absent']), + ) + + module_args.update(emc_vnx_argument_spec) + + result = dict( + changed=False, + hluid=None + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + if not HAS_LIB: + module.fail_json(msg=missing_required_lib('storops >= 0.5.10'), + exception=LIB_IMP_ERR) + + sp_user = module.params['sp_user'] + sp_address = module.params['sp_address'] + sp_password = module.params['sp_password'] + alu = module.params['lunid'] + + # if the user is working with this module in only check mode we do not + # want to make any changes to the environment, just return the current + # state with no modifications + if module.check_mode: + return result + + try: + vnx = VNXSystem(sp_address, sp_user, sp_password) + sg = vnx.get_sg(module.params['name']) + if sg.existed: + if module.params['state'] == 'present': + if not sg.has_alu(alu): + try: + result['hluid'] = sg.attach_alu(alu) + result['changed'] = True + except VNXAluAlreadyAttachedError: + result['hluid'] = sg.get_hlu(alu) + except (VNXAttachAluError, VNXStorageGroupError) as e: + module.fail_json(msg='Error attaching {0}: ' + '{1} '.format(alu, to_native(e)), + **result) + else: + result['hluid'] = sg.get_hlu(alu) + if module.params['state'] == 'absent' and sg.has_alu(alu): + try: + sg.detach_alu(alu) + result['changed'] = True + except VNXDetachAluNotFoundError: + # being not attached when using absent is OK + pass + except VNXStorageGroupError as e: + module.fail_json(msg='Error detaching alu {0}: ' + '{1} '.format(alu, to_native(e)), + **result) + else: + module.fail_json(msg='No such storage group named ' + '{0}'.format(module.params['name']), + **result) + except VNXCredentialError as e: + module.fail_json(msg='{0}'.format(to_native(e)), **result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/etcd3.py b/plugins/modules/etcd3.py deleted file mode 120000 index 3ee484ea3a..0000000000 --- a/plugins/modules/etcd3.py +++ /dev/null @@ -1 +0,0 @@ -./clustering/etcd3.py \ No newline at end of file diff --git a/plugins/modules/etcd3.py b/plugins/modules/etcd3.py new file mode 100644 index 0000000000..397bb1d767 --- /dev/null +++ b/plugins/modules/etcd3.py @@ -0,0 +1,253 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Jean-Philippe Evrard +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: etcd3 +short_description: Set or delete key value pairs from an etcd3 cluster +requirements: + - etcd3 +description: + - Sets or deletes values in etcd3 cluster using its v3 API. + - Needs python etcd3 lib to work. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + key: + type: str + description: + - The key where the information is stored in the cluster. + required: true + value: + type: str + description: + - The information stored. + required: true + host: + type: str + description: + - The IP address of the cluster. + default: 'localhost' + port: + type: int + description: + - The port number used to connect to the cluster. + default: 2379 + state: + type: str + description: + - The state of the value for the key. + - Can be present or absent. + required: true + choices: [present, absent] + user: + type: str + description: + - The etcd user to authenticate with. + password: + type: str + description: + - The password to use for authentication. + - Required if O(user) is defined. + ca_cert: + type: path + description: + - The Certificate Authority to use to verify the etcd host. + - Required if O(client_cert) and O(client_key) are defined. + client_cert: + type: path + description: + - PEM formatted certificate chain file to be used for SSL client authentication. + - Required if O(client_key) is defined. + client_key: + type: path + description: + - PEM formatted file that contains your private key to be used for SSL client authentication. + - Required if O(client_cert) is defined. + timeout: + type: int + description: + - The socket level timeout in seconds. +author: + - Jean-Philippe Evrard (@evrardjp) + - Victor Fauth (@vfauth) +""" + +EXAMPLES = r""" +- name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379" + community.general.etcd3: + key: "foo" + value: "baz3" + host: "localhost" + port: 2379 + state: "present" + +- name: Authenticate using user/password combination with a timeout of 10 seconds + community.general.etcd3: + key: "foo" + value: "baz3" + state: "present" + user: "someone" + password: "password123" + timeout: 10 + +- name: Authenticate using TLS certificates + community.general.etcd3: + key: "foo" + value: "baz3" + state: "present" + ca_cert: "/etc/ssl/certs/CA_CERT.pem" + client_cert: "/etc/ssl/certs/cert.crt" + client_key: "/etc/ssl/private/key.pem" +""" + +RETURN = r""" +key: + description: The key that was queried. + returned: always + type: str +old_value: + description: The previous value in the cluster. + returned: always + type: str +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +try: + import etcd3 + HAS_ETCD = True + ETCD_IMP_ERR = None +except ImportError: + ETCD_IMP_ERR = traceback.format_exc() + HAS_ETCD = False + + +def run_module(): + # define the available arguments/parameters that a user can pass to + # the module + module_args = dict( + key=dict(type='str', required=True, no_log=False), + value=dict(type='str', required=True), + host=dict(type='str', default='localhost'), + port=dict(type='int', default=2379), + state=dict(type='str', required=True, choices=['present', 'absent']), + user=dict(type='str'), + password=dict(type='str', no_log=True), + ca_cert=dict(type='path'), + client_cert=dict(type='path'), + client_key=dict(type='path'), + timeout=dict(type='int'), + ) + + # seed the result dict in the object + # we primarily care about changed and state + # change is if this module effectively modified the target + # state will include any data that you want your module to pass back + # for consumption, for example, in a subsequent task + result = dict( + changed=False, + ) + + # the AnsibleModule object will be our abstraction working with Ansible + # this includes instantiation, a couple of common attr would be the + # args/params passed to the execution, as well as if the module + # supports check mode + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_together=[['client_cert', 'client_key'], ['user', 'password']], + ) + + # It is possible to set `ca_cert` to verify the server identity without + # setting `client_cert` or `client_key` to authenticate the client + # so required_together is enough + # Due to `required_together=[['client_cert', 'client_key']]`, checking the presence + # of either `client_cert` or `client_key` is enough + if module.params['ca_cert'] is None and module.params['client_cert'] is not None: + module.fail_json(msg="The 'ca_cert' parameter must be defined when 'client_cert' and 'client_key' are present.") + + result['key'] = module.params.get('key') + module.params['cert_cert'] = module.params.pop('client_cert') + module.params['cert_key'] = module.params.pop('client_key') + + if not HAS_ETCD: + module.fail_json(msg=missing_required_lib('etcd3'), exception=ETCD_IMP_ERR) + + allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key', + 'timeout', 'user', 'password'] + + client_params = {key: value for key, value in module.params.items() if key in allowed_keys} + try: + etcd = etcd3.client(**client_params) + except Exception as exp: + module.fail_json(msg='Cannot connect to etcd cluster: %s' % (to_native(exp)), + exception=traceback.format_exc()) + try: + cluster_value = etcd.get(module.params['key']) + except Exception as exp: + module.fail_json(msg='Cannot reach data: %s' % (to_native(exp)), + exception=traceback.format_exc()) + + # Make the cluster_value[0] a string for string comparisons + result['old_value'] = to_native(cluster_value[0]) + + if module.params['state'] == 'absent': + if cluster_value[0] is not None: + if module.check_mode: + result['changed'] = True + else: + try: + etcd.delete(module.params['key']) + except Exception as exp: + module.fail_json(msg='Cannot delete %s: %s' % (module.params['key'], to_native(exp)), + exception=traceback.format_exc()) + else: + result['changed'] = True + elif module.params['state'] == 'present': + if result['old_value'] != module.params['value']: + if module.check_mode: + result['changed'] = True + else: + try: + etcd.put(module.params['key'], module.params['value']) + except Exception as exp: + module.fail_json(msg='Cannot add or edit key %s: %s' % (module.params['key'], to_native(exp)), + exception=traceback.format_exc()) + else: + result['changed'] = True + else: + module.fail_json(msg="State not recognized") + + # manipulate or modify the state as needed (this is going to be the + # part where your module will do what it needs to do) + + # during the execution of the module, if there is an exception or a + # conditional state that effectively causes a failure, run + # AnsibleModule.fail_json() to pass in the message and the result + + # in the event of a successful module execution, you will want to + # simple AnsibleModule.exit_json(), passing the key/value results + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/facter.py b/plugins/modules/facter.py deleted file mode 120000 index 531d0f6a41..0000000000 --- a/plugins/modules/facter.py +++ /dev/null @@ -1 +0,0 @@ -./system/facter.py \ No newline at end of file diff --git a/plugins/modules/facter_facts.py b/plugins/modules/facter_facts.py new file mode 100644 index 0000000000..8ef5d7776b --- /dev/null +++ b/plugins/modules/facter_facts.py @@ -0,0 +1,86 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Alexei Znamensky +# Copyright (c) 2012, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: facter_facts +short_description: Runs the discovery program C(facter) on the remote system and return Ansible facts +version_added: 8.0.0 +description: + - Runs the C(facter) discovery program (U(https://github.com/puppetlabs/facter)) on the remote system, returning Ansible + facts from the JSON data that can be useful for inventory purposes. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +options: + arguments: + description: + - Specifies arguments for facter. + type: list + elements: str +requirements: + - facter + - ruby-json +author: + - Ansible Core Team + - Michael DeHaan +""" + +EXAMPLES = r""" +- name: Execute facter no arguments + community.general.facter_facts: + +- name: Execute facter with arguments + community.general.facter_facts: + arguments: + - -p + - system_uptime + - timezone + - is_virtual +""" + +RETURN = r""" +ansible_facts: + description: Dictionary with one key C(facter). + returned: always + type: dict + contains: + facter: + description: Dictionary containing facts discovered in the remote system. + returned: always + type: dict +""" + +import json + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + arguments=dict(type='list', elements='str'), + ), + supports_check_mode=True, + ) + + facter_path = module.get_bin_path( + 'facter', + opt_dirs=['/opt/puppetlabs/bin']) + + cmd = [facter_path, "--json"] + if module.params['arguments']: + cmd += module.params['arguments'] + + rc, out, err = module.run_command(cmd, check_rc=True) + module.exit_json(ansible_facts=dict(facter=json.loads(out))) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py deleted file mode 100644 index 91dc6e5112..0000000000 --- a/plugins/modules/files/archive.py +++ /dev/null @@ -1,668 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Ben Doherty -# Sponsored by Oomph, Inc. http://www.oomphinc.com -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: archive -short_description: Creates a compressed archive of one or more files or trees -extends_documentation_fragment: files -description: - - Creates or extends an archive. - - The source and archive are on the remote host, and the archive I(is not) copied to the local host. - - Source files can be deleted after archival by specifying I(remove=True). -options: - path: - description: - - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive. - type: list - elements: path - required: true - format: - description: - - The type of compression to use. - - Support for xz was added in Ansible 2.5. - type: str - choices: [ bz2, gz, tar, xz, zip ] - default: gz - dest: - description: - - The file name of the destination archive. The parent directory must exists on the remote host. - - This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list. - - If the destination archive already exists, it will be truncated and overwritten. - type: path - exclude_path: - description: - - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from I(path) list and glob expansion. - - Use I(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the I(path) list. - type: list - elements: path - default: [] - exclusion_patterns: - description: - - Glob style patterns to exclude files or directories from the resulting archive. - - This differs from I(exclude_path) which applies only to the source paths from I(path). - type: list - elements: path - version_added: 3.2.0 - force_archive: - description: - - Allows you to force the module to treat this as an archive even if only a single file is specified. - - By default when a single file is specified it is compressed only (not archived). - - Enable this if you want to use M(ansible.builtin.unarchive) on an archive of a single file created with this module. - type: bool - default: false - remove: - description: - - Remove any added source files and trees after adding to archive. - type: bool - default: no -notes: - - Requires tarfile, zipfile, gzip and bzip2 packages on target host. - - Requires lzma or backports.lzma if using xz format. - - Can produce I(gzip), I(bzip2), I(lzma) and I(zip) compressed files or archives. -seealso: -- module: ansible.builtin.unarchive -author: -- Ben Doherty (@bendoh) -''' - -EXAMPLES = r''' -- name: Compress directory /path/to/foo/ into /path/to/foo.tgz - community.general.archive: - path: /path/to/foo - dest: /path/to/foo.tgz - -- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it - community.general.archive: - path: /path/to/foo - remove: yes - -- name: Create a zip archive of /path/to/foo - community.general.archive: - path: /path/to/foo - format: zip - -- name: Create a bz2 archive of multiple files, rooted at /path - community.general.archive: - path: - - /path/to/foo - - /path/wong/foo - dest: /path/file.tar.bz2 - format: bz2 - -- name: Create a bz2 archive of a globbed path, while excluding specific dirnames - community.general.archive: - path: - - /path/to/foo/* - dest: /path/file.tar.bz2 - exclude_path: - - /path/to/foo/bar - - /path/to/foo/baz - format: bz2 - -- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames - community.general.archive: - path: - - /path/to/foo/* - dest: /path/file.tar.bz2 - exclude_path: - - /path/to/foo/ba* - format: bz2 - -- name: Use gzip to compress a single archive (i.e don't archive it first with tar) - community.general.archive: - path: /path/to/foo/single.file - dest: /path/file.gz - format: gz - -- name: Create a tar.gz archive of a single file. - community.general.archive: - path: /path/to/foo/single.file - dest: /path/file.tar.gz - format: gz - force_archive: true -''' - -RETURN = r''' -state: - description: - The state of the input C(path). - type: str - returned: always -dest_state: - description: - - The state of the I(dest) file. - - C(absent) when the file does not exist. - - C(archive) when the file is an archive. - - C(compress) when the file is compressed, but not an archive. - - C(incomplete) when the file is an archive, but some files under I(path) were not found. - type: str - returned: success - version_added: 3.4.0 -missing: - description: Any files that were missing from the source. - type: list - returned: success -archived: - description: Any files that were compressed or added to the archive. - type: list - returned: success -arcroot: - description: The archive root. - type: str - returned: always -expanded_paths: - description: The list of matching paths from paths argument. - type: list - returned: always -expanded_exclude_paths: - description: The list of matching exclude paths from the exclude_path argument. - type: list - returned: always -''' - -import abc -import bz2 -import glob -import gzip -import io -import os -import re -import shutil -import tarfile -import zipfile -from fnmatch import fnmatch -from sys import version_info -from traceback import format_exc -from zlib import crc32 - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_bytes, to_native -from ansible.module_utils import six - - -LZMA_IMP_ERR = None -if six.PY3: - try: - import lzma - HAS_LZMA = True - except ImportError: - LZMA_IMP_ERR = format_exc() - HAS_LZMA = False -else: - try: - from backports import lzma - HAS_LZMA = True - except ImportError: - LZMA_IMP_ERR = format_exc() - HAS_LZMA = False - -PY27 = version_info[0:2] >= (2, 7) - -STATE_ABSENT = 'absent' -STATE_ARCHIVED = 'archive' -STATE_COMPRESSED = 'compress' -STATE_INCOMPLETE = 'incomplete' - - -def common_path(paths): - empty = b'' if paths and isinstance(paths[0], six.binary_type) else '' - - return os.path.join( - os.path.dirname(os.path.commonprefix([os.path.join(os.path.dirname(p), empty) for p in paths])), empty - ) - - -def expand_paths(paths): - expanded_path = [] - is_globby = False - for path in paths: - b_path = _to_bytes(path) - if b'*' in b_path or b'?' in b_path: - e_paths = glob.glob(b_path) - is_globby = True - else: - e_paths = [b_path] - expanded_path.extend(e_paths) - return expanded_path, is_globby - - -def matches_exclusion_patterns(path, exclusion_patterns): - return any(fnmatch(path, p) for p in exclusion_patterns) - - -def is_archive(path): - return re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(path), re.IGNORECASE) - - -def strip_prefix(prefix, string): - return string[len(prefix):] if string.startswith(prefix) else string - - -def _to_bytes(s): - return to_bytes(s, errors='surrogate_or_strict') - - -def _to_native(s): - return to_native(s, errors='surrogate_or_strict') - - -def _to_native_ascii(s): - return to_native(s, errors='surrogate_or_strict', encoding='ascii') - - -@six.add_metaclass(abc.ABCMeta) -class Archive(object): - def __init__(self, module): - self.module = module - - self.destination = _to_bytes(module.params['dest']) if module.params['dest'] else None - self.exclusion_patterns = module.params['exclusion_patterns'] or [] - self.format = module.params['format'] - self.must_archive = module.params['force_archive'] - self.remove = module.params['remove'] - - self.changed = False - self.destination_state = STATE_ABSENT - self.errors = [] - self.file = None - self.successes = [] - self.targets = [] - self.not_found = [] - - paths = module.params['path'] - self.expanded_paths, has_globs = expand_paths(paths) - self.expanded_exclude_paths = expand_paths(module.params['exclude_path'])[0] - - self.paths = sorted(set(self.expanded_paths) - set(self.expanded_exclude_paths)) - - if not self.paths: - module.fail_json( - path=', '.join(paths), - expanded_paths=_to_native(b', '.join(self.expanded_paths)), - expanded_exclude_paths=_to_native(b', '.join(self.expanded_exclude_paths)), - msg='Error, no source paths were found' - ) - - self.root = common_path(self.paths) - - if not self.must_archive: - self.must_archive = any([has_globs, os.path.isdir(self.paths[0]), len(self.paths) > 1]) - - if not self.destination and not self.must_archive: - self.destination = b'%s.%s' % (self.paths[0], _to_bytes(self.format)) - - if self.must_archive and not self.destination: - module.fail_json( - dest=_to_native(self.destination), - path=', '.join(paths), - msg='Error, must specify "dest" when archiving multiple files or trees' - ) - - if self.remove: - self._check_removal_safety() - - self.original_checksums = self.destination_checksums() - self.original_size = self.destination_size() - - def add(self, path, archive_name): - try: - self._add(_to_native_ascii(path), _to_native(archive_name)) - if self.contains(_to_native(archive_name)): - self.successes.append(path) - except Exception as e: - self.errors.append('%s: %s' % (_to_native_ascii(path), _to_native(e))) - - def add_single_target(self, path): - if self.format in ('zip', 'tar'): - self.open() - self.add(path, strip_prefix(self.root, path)) - self.close() - self.destination_state = STATE_ARCHIVED - else: - try: - f_out = self._open_compressed_file(_to_native_ascii(self.destination), 'wb') - with open(path, 'rb') as f_in: - shutil.copyfileobj(f_in, f_out) - f_out.close() - self.successes.append(path) - self.destination_state = STATE_COMPRESSED - except (IOError, OSError) as e: - self.module.fail_json( - path=_to_native(path), - dest=_to_native(self.destination), - msg='Unable to write to compressed file: %s' % _to_native(e), exception=format_exc() - ) - - def add_targets(self): - self.open() - try: - for target in self.targets: - if os.path.isdir(target): - for directory_path, directory_names, file_names in os.walk(target, topdown=True): - for directory_name in directory_names: - full_path = os.path.join(directory_path, directory_name) - self.add(full_path, strip_prefix(self.root, full_path)) - - for file_name in file_names: - full_path = os.path.join(directory_path, file_name) - self.add(full_path, strip_prefix(self.root, full_path)) - else: - self.add(target, strip_prefix(self.root, target)) - except Exception as e: - if self.format in ('zip', 'tar'): - archive_format = self.format - else: - archive_format = 'tar.' + self.format - self.module.fail_json( - msg='Error when writing %s archive at %s: %s' % ( - archive_format, _to_native(self.destination), _to_native(e) - ), - exception=format_exc() - ) - self.close() - - if self.errors: - self.module.fail_json( - msg='Errors when writing archive at %s: %s' % (_to_native(self.destination), '; '.join(self.errors)) - ) - - def is_different_from_original(self): - if self.original_checksums is None: - return self.original_size != self.destination_size() - else: - return self.original_checksums != self.destination_checksums() - - def destination_checksums(self): - if self.destination_exists() and self.destination_readable(): - return self._get_checksums(self.destination) - return None - - def destination_exists(self): - return self.destination and os.path.exists(self.destination) - - def destination_readable(self): - return self.destination and os.access(self.destination, os.R_OK) - - def destination_size(self): - return os.path.getsize(self.destination) if self.destination_exists() else 0 - - def find_targets(self): - for path in self.paths: - if not os.path.lexists(path): - self.not_found.append(path) - else: - self.targets.append(path) - - def has_targets(self): - return bool(self.targets) - - def has_unfound_targets(self): - return bool(self.not_found) - - def remove_single_target(self, path): - try: - os.remove(path) - except OSError as e: - self.module.fail_json( - path=_to_native(path), - msg='Unable to remove source file: %s' % _to_native(e), exception=format_exc() - ) - - def remove_targets(self): - for path in self.successes: - if os.path.exists(path): - try: - if os.path.isdir(path): - shutil.rmtree(path) - else: - os.remove(path) - except OSError: - self.errors.append(_to_native(path)) - for path in self.paths: - try: - if os.path.isdir(path): - shutil.rmtree(path) - except OSError: - self.errors.append(_to_native(path)) - - if self.errors: - self.module.fail_json( - dest=_to_native(self.destination), msg='Error deleting some source files: ', files=self.errors - ) - - def update_permissions(self): - try: - file_args = self.module.load_file_common_arguments(self.module.params, path=self.destination) - except TypeError: - # The path argument is only supported in Ansible-base 2.10+. Fall back to - # pre-2.10 behavior for older Ansible versions. - self.module.params['path'] = self.destination - file_args = self.module.load_file_common_arguments(self.module.params) - - self.changed = self.module.set_fs_attributes_if_different(file_args, self.changed) - - @property - def result(self): - return { - 'archived': [_to_native(p) for p in self.successes], - 'dest': _to_native(self.destination), - 'dest_state': self.destination_state, - 'changed': self.changed, - 'arcroot': _to_native(self.root), - 'missing': [_to_native(p) for p in self.not_found], - 'expanded_paths': [_to_native(p) for p in self.expanded_paths], - 'expanded_exclude_paths': [_to_native(p) for p in self.expanded_exclude_paths], - } - - def _check_removal_safety(self): - for path in self.paths: - if os.path.isdir(path) and self.destination.startswith(os.path.join(path, b'')): - self.module.fail_json( - path=b', '.join(self.paths), - msg='Error, created archive can not be contained in source paths when remove=true' - ) - - def _open_compressed_file(self, path, mode): - f = None - if self.format == 'gz': - f = gzip.open(path, mode) - elif self.format == 'bz2': - f = bz2.BZ2File(path, mode) - elif self.format == 'xz': - f = lzma.LZMAFile(path, mode) - else: - self.module.fail_json(msg="%s is not a valid format" % self.format) - - return f - - @abc.abstractmethod - def close(self): - pass - - @abc.abstractmethod - def contains(self, name): - pass - - @abc.abstractmethod - def open(self): - pass - - @abc.abstractmethod - def _add(self, path, archive_name): - pass - - @abc.abstractmethod - def _get_checksums(self, path): - pass - - -class ZipArchive(Archive): - def __init__(self, module): - super(ZipArchive, self).__init__(module) - - def close(self): - self.file.close() - - def contains(self, name): - try: - self.file.getinfo(name) - except KeyError: - return False - return True - - def open(self): - self.file = zipfile.ZipFile(_to_native_ascii(self.destination), 'w', zipfile.ZIP_DEFLATED, True) - - def _add(self, path, archive_name): - if not matches_exclusion_patterns(path, self.exclusion_patterns): - self.file.write(path, archive_name) - - def _get_checksums(self, path): - try: - archive = zipfile.ZipFile(_to_native_ascii(path), 'r') - checksums = set((info.filename, info.CRC) for info in archive.infolist()) - archive.close() - except zipfile.BadZipfile: - checksums = set() - return checksums - - -class TarArchive(Archive): - def __init__(self, module): - super(TarArchive, self).__init__(module) - self.fileIO = None - - def close(self): - self.file.close() - if self.format == 'xz': - with lzma.open(_to_native(self.destination), 'wb') as f: - f.write(self.fileIO.getvalue()) - self.fileIO.close() - - def contains(self, name): - try: - self.file.getmember(name) - except KeyError: - return False - return True - - def open(self): - if self.format in ('gz', 'bz2'): - self.file = tarfile.open(_to_native_ascii(self.destination), 'w|' + self.format) - # python3 tarfile module allows xz format but for python2 we have to create the tarfile - # in memory and then compress it with lzma. - elif self.format == 'xz': - self.fileIO = io.BytesIO() - self.file = tarfile.open(fileobj=self.fileIO, mode='w') - elif self.format == 'tar': - self.file = tarfile.open(_to_native_ascii(self.destination), 'w') - else: - self.module.fail_json(msg="%s is not a valid archive format" % self.format) - - def _add(self, path, archive_name): - def py27_filter(tarinfo): - return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo - - def py26_filter(path): - return matches_exclusion_patterns(path, self.exclusion_patterns) - - if PY27: - self.file.add(path, archive_name, recursive=False, filter=py27_filter) - else: - self.file.add(path, archive_name, recursive=False, exclude=py26_filter) - - def _get_checksums(self, path): - try: - if self.format == 'xz': - with lzma.open(_to_native_ascii(path), 'r') as f: - archive = tarfile.open(fileobj=f) - checksums = set((info.name, info.chksum) for info in archive.getmembers()) - archive.close() - else: - archive = tarfile.open(_to_native_ascii(path), 'r|' + self.format) - checksums = set((info.name, info.chksum) for info in archive.getmembers()) - archive.close() - except (lzma.LZMAError, tarfile.ReadError, tarfile.CompressionError): - try: - # The python implementations of gzip, bz2, and lzma do not support restoring compressed files - # to their original names so only file checksum is returned - f = self._open_compressed_file(_to_native_ascii(path), 'r') - checksums = set([(b'', crc32(f.read()))]) - f.close() - except Exception: - checksums = set() - return checksums - - -def get_archive(module): - if module.params['format'] == 'zip': - return ZipArchive(module) - else: - return TarArchive(module) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(type='list', elements='path', required=True), - format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), - dest=dict(type='path'), - exclude_path=dict(type='list', elements='path', default=[]), - exclusion_patterns=dict(type='list', elements='path'), - force_archive=dict(type='bool', default=False), - remove=dict(type='bool', default=False), - ), - add_file_common_args=True, - supports_check_mode=True, - ) - - if not HAS_LZMA and module.params['format'] == 'xz': - module.fail_json( - msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"), exception=LZMA_IMP_ERR - ) - - check_mode = module.check_mode - - archive = get_archive(module) - archive.find_targets() - - if not archive.has_targets(): - if archive.destination_exists(): - archive.destination_state = STATE_ARCHIVED if is_archive(archive.destination) else STATE_COMPRESSED - elif archive.has_targets() and archive.must_archive: - if check_mode: - archive.changed = True - else: - archive.add_targets() - archive.destination_state = STATE_INCOMPLETE if archive.has_unfound_targets() else STATE_ARCHIVED - archive.changed |= archive.is_different_from_original() - if archive.remove: - archive.remove_targets() - else: - if check_mode: - if not archive.destination_exists(): - archive.changed = True - else: - path = archive.paths[0] - archive.add_single_target(path) - archive.changed |= archive.is_different_from_original() - if archive.remove: - archive.remove_single_target(path) - - if archive.destination_exists(): - archive.update_permissions() - - module.exit_json(**archive.result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/files/filesize.py b/plugins/modules/files/filesize.py deleted file mode 100644 index 81701438ca..0000000000 --- a/plugins/modules/files/filesize.py +++ /dev/null @@ -1,487 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, quidame -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: filesize - -short_description: Create a file with a given size, or resize it if it exists - -description: - - This module is a simple wrapper around C(dd) to create, extend or truncate - a file, given its size. It can be used to manage swap files (that require - contiguous blocks) or alternatively, huge sparse files. - -author: - - quidame (@quidame) - -version_added: "3.0.0" - -options: - path: - description: - - Path of the regular file to create or resize. - type: path - required: true - size: - description: - - Requested size of the file. - - The value is a number (either C(int) or C(float)) optionally followed - by a multiplicative suffix, that can be one of C(B) (bytes), C(KB) or - C(kB) (= 1000B), C(MB) or C(mB) (= 1000kB), C(GB) or C(gB) (= 1000MB), - and so on for C(T), C(P), C(E), C(Z) and C(Y); or alternatively one of - C(K), C(k) or C(KiB) (= 1024B); C(M), C(m) or C(MiB) (= 1024KiB); - C(G), C(g) or C(GiB) (= 1024MiB); and so on. - - If the multiplicative suffix is not provided, the value is treated as - an integer number of blocks of I(blocksize) bytes each (float values - are rounded to the closest integer). - - When the I(size) value is equal to the current file size, does nothing. - - When the I(size) value is bigger than the current file size, bytes from - I(source) (if I(sparse) is not C(false)) are appended to the file - without truncating it, in other words, without modifying the existing - bytes of the file. - - When the I(size) value is smaller than the current file size, it is - truncated to the requested value without modifying bytes before this - value. - - That means that a file of any arbitrary size can be grown to any other - arbitrary size, and then resized down to its initial size without - modifying its initial content. - type: raw - required: true - blocksize: - description: - - Size of blocks, in bytes if not followed by a multiplicative suffix. - - The numeric value (before the unit) C(MUST) be an integer (or a C(float) - if it equals an integer). - - If not set, the size of blocks is guessed from the OS and commonly - results in C(512) or C(4096) bytes, that is used internally by the - module or when I(size) has no unit. - type: raw - source: - description: - - Device or file that provides input data to provision the file. - - This parameter is ignored when I(sparse=true). - type: path - default: /dev/zero - force: - description: - - Whether or not to overwrite the file if it exists, in other words, to - truncate it from 0. When C(true), the module is not idempotent, that - means it always reports I(changed=true). - - I(force=true) and I(sparse=true) are mutually exclusive. - type: bool - default: false - sparse: - description: - - Whether or not the file to create should be a sparse file. - - This option is effective only on newly created files, or when growing a - file, only for the bytes to append. - - This option is not supported on OpenBSD, Solaris and AIX. - - I(force=true) and I(sparse=true) are mutually exclusive. - type: bool - default: false - unsafe_writes: - description: - - This option is silently ignored. This module always modifies file - size in-place. - -notes: - - This module supports C(check_mode) and C(diff). - -requirements: - - dd (Data Duplicator) in PATH - -extends_documentation_fragment: - - ansible.builtin.files - -seealso: - - name: dd(1) manpage for Linux - description: Manual page of the GNU/Linux's dd implementation (from GNU coreutils). - link: https://man7.org/linux/man-pages/man1/dd.1.html - - - name: dd(1) manpage for IBM AIX - description: Manual page of the IBM AIX's dd implementation. - link: https://www.ibm.com/support/knowledgecenter/ssw_aix_72/d_commands/dd.html - - - name: dd(1) manpage for Mac OSX - description: Manual page of the Mac OSX's dd implementation. - link: https://www.unix.com/man-page/osx/1/dd/ - - - name: dd(1M) manpage for Solaris - description: Manual page of the Oracle Solaris's dd implementation. - link: https://docs.oracle.com/cd/E36784_01/html/E36871/dd-1m.html - - - name: dd(1) manpage for FreeBSD - description: Manual page of the FreeBSD's dd implementation. - link: https://www.freebsd.org/cgi/man.cgi?dd(1) - - - name: dd(1) manpage for OpenBSD - description: Manual page of the OpenBSD's dd implementation. - link: https://man.openbsd.org/dd - - - name: dd(1) manpage for NetBSD - description: Manual page of the NetBSD's dd implementation. - link: https://man.netbsd.org/dd.1 -''' - -EXAMPLES = r''' -- name: Create a file of 1G filled with null bytes - community.general.filesize: - path: /var/bigfile - size: 1G - -- name: Extend the file to 2G (2*1024^3) - community.general.filesize: - path: /var/bigfile - size: 2G - -- name: Reduce the file to 2GB (2*1000^3) - community.general.filesize: - path: /var/bigfile - size: 2GB - -- name: Fill a file with random bytes for backing a LUKS device - community.general.filesize: - path: ~/diskimage.luks - size: 512.0 MiB - source: /dev/urandom - -- name: Take a backup of MBR boot code into a file, overwriting it if it exists - community.general.filesize: - path: /media/sdb1/mbr.bin - size: 440B - source: /dev/sda - force: true - -- name: Create/resize a sparse file of/to 8TB - community.general.filesize: - path: /var/local/sparsefile - size: 8TB - sparse: true - -- name: Create a file with specific size and attributes, to be used as swap space - community.general.filesize: - path: /var/swapfile - size: 2G - blocksize: 512B - mode: u=rw,go= - owner: root - group: root -''' - -RETURN = r''' -cmd: - description: Command executed to create or resize the file. - type: str - returned: when changed or failed - sample: /usr/bin/dd if=/dev/zero of=/var/swapfile bs=1048576 seek=3072 count=1024 - -filesize: - description: Dictionary of sizes related to the file. - type: dict - returned: always - contains: - blocks: - description: Number of blocks in the file. - type: int - sample: 500 - blocksize: - description: Size of the blocks in bytes. - type: int - sample: 1024 - bytes: - description: Size of the file, in bytes, as the product of C(blocks) and C(blocksize). - type: int - sample: 512000 - iec: - description: Size of the file, in human-readable format, following IEC standard. - type: str - sample: 500.0 KiB - si: - description: Size of the file, in human-readable format, following SI standard. - type: str - sample: 512.0 kB - -size_diff: - description: Difference (positive or negative) between old size and new size, in bytes. - type: int - sample: -1234567890 - returned: always - -path: - description: Realpath of the file if it is a symlink, otherwise the same than module's param. - type: str - sample: /var/swap0 - returned: always -''' - - -import re -import os -import math - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -# These are the multiplicative suffixes understood (or returned) by dd and -# others (ls, df, lvresize, lsblk...). -SIZE_UNITS = dict( - B=1, - kB=1000**1, KB=1000**1, KiB=1024**1, K=1024**1, k=1024**1, - MB=1000**2, mB=1000**2, MiB=1024**2, M=1024**2, m=1024**2, - GB=1000**3, gB=1000**3, GiB=1024**3, G=1024**3, g=1024**3, - TB=1000**4, tB=1000**4, TiB=1024**4, T=1024**4, t=1024**4, - PB=1000**5, pB=1000**5, PiB=1024**5, P=1024**5, p=1024**5, - EB=1000**6, eB=1000**6, EiB=1024**6, E=1024**6, e=1024**6, - ZB=1000**7, zB=1000**7, ZiB=1024**7, Z=1024**7, z=1024**7, - YB=1000**8, yB=1000**8, YiB=1024**8, Y=1024**8, y=1024**8, -) - - -def bytes_to_human(size, iec=False): - """Return human-readable size (with SI or IEC suffix) from bytes. This is - only to populate the returned result of the module, not to handle the - file itself (we only rely on bytes for that). - """ - unit = 'B' - for (u, v) in SIZE_UNITS.items(): - if size < v: - continue - if iec: - if 'i' not in u or size / v >= 1024: - continue - else: - if v % 5 or size / v >= 1000: - continue - unit = u - - hsize = round(size / SIZE_UNITS[unit], 2) - if unit == 'B': - hsize = int(hsize) - - unit = re.sub(r'^(.)', lambda m: m.expand(r'\1').upper(), unit) - if unit == 'KB': - unit = 'kB' - - return '%s %s' % (str(hsize), unit) - - -def smart_blocksize(size, unit, product, bsize): - """Ensure the total size can be written as blocks*blocksize, with blocks - and blocksize being integers. - """ - if not product % bsize: - return bsize - - # Basically, for a file of 8kB (=8000B), system's block size of 4096 bytes - # is not usable. The smallest integer number of kB to work with 512B blocks - # is 64, the nexts are 128, 192, 256, and so on. - - unit_size = SIZE_UNITS[unit] - - if size == int(size): - if unit_size > SIZE_UNITS['MiB']: - if unit_size % 5: - return SIZE_UNITS['MiB'] - return SIZE_UNITS['MB'] - return unit_size - - if unit == 'B': - raise AssertionError("byte is the smallest unit and requires an integer value") - - if 0 < product < bsize: - return product - - for bsz in (1024, 1000, 512, 256, 128, 100, 64, 32, 16, 10, 8, 4, 2): - if not product % bsz: - return bsz - return 1 - - -def split_size_unit(string, isint=False): - """Split a string between the size value (int or float) and the unit. - Support optional space(s) between the numeric value and the unit. - """ - unit = re.sub(r'(\d|\.)', r'', string).strip() - value = float(re.sub(r'%s' % unit, r'', string).strip()) - if isint and unit in ('B', ''): - if int(value) != value: - raise AssertionError("invalid blocksize value: bytes require an integer value") - - if not unit: - unit = None - product = int(round(value)) - else: - if unit not in SIZE_UNITS.keys(): - raise AssertionError("invalid size unit (%s): unit must be one of %s, or none." % - (unit, ', '.join(sorted(SIZE_UNITS, key=SIZE_UNITS.get)))) - product = int(round(value * SIZE_UNITS[unit])) - return value, unit, product - - -def size_string(value): - """Convert a raw value to a string, but only if it is an integer, a float - or a string itself. - """ - if not isinstance(value, (int, float, str)): - raise AssertionError("invalid value type (%s): size must be integer, float or string" % type(value)) - return str(value) - - -def size_spec(args): - """Return a dictionary with size specifications, especially the size in - bytes (after rounding it to an integer number of blocks). - """ - blocksize_in_bytes = split_size_unit(args['blocksize'], True)[2] - if blocksize_in_bytes == 0: - raise AssertionError("block size cannot be equal to zero") - - size_value, size_unit, size_result = split_size_unit(args['size']) - if not size_unit: - blocks = int(math.ceil(size_value)) - else: - blocksize_in_bytes = smart_blocksize(size_value, size_unit, size_result, blocksize_in_bytes) - blocks = int(math.ceil(size_result / blocksize_in_bytes)) - - args['size_diff'] = round_bytes = int(blocks * blocksize_in_bytes) - args['size_spec'] = dict(blocks=blocks, blocksize=blocksize_in_bytes, bytes=round_bytes, - iec=bytes_to_human(round_bytes, True), - si=bytes_to_human(round_bytes)) - return args['size_spec'] - - -def current_size(args): - """Return the size of the file at the given location if it exists, or None.""" - path = args['path'] - if os.path.exists(path): - if not os.path.isfile(path): - raise AssertionError("%s exists but is not a regular file" % path) - args['file_size'] = os.stat(path).st_size - else: - args['file_size'] = None - return args['file_size'] - - -def complete_dd_cmdline(args, dd_cmd): - """Compute dd options to grow or truncate a file.""" - if args['file_size'] == args['size_spec']['bytes'] and not args['force']: - # Nothing to do. - return list() - - bs = args['size_spec']['blocksize'] - conv = list() - - # For sparse files (create, truncate, grow): write count=0 block. - if args['sparse']: - seek = args['size_spec']['blocks'] - conv += ['sparse'] - elif args['force'] or not os.path.exists(args['path']): # Create file - seek = 0 - elif args['size_diff'] < 0: # Truncate file - seek = args['size_spec']['blocks'] - elif args['size_diff'] % bs: # Grow file - seek = int(args['file_size'] / bs) + 1 - else: - seek = int(args['file_size'] / bs) - - count = args['size_spec']['blocks'] - seek - dd_cmd += ['bs=%s' % str(bs), 'seek=%s' % str(seek), 'count=%s' % str(count)] - if conv: - dd_cmd += ['conv=%s' % ','.join(conv)] - - return dd_cmd - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(type='path', required=True), - size=dict(type='raw', required=True), - blocksize=dict(type='raw'), - source=dict(type='path', default='/dev/zero'), - sparse=dict(type='bool', default=False), - force=dict(type='bool', default=False), - ), - supports_check_mode=True, - add_file_common_args=True, - ) - args = dict(**module.params) - diff = dict(before=dict(), after=dict()) - - if args['sparse'] and args['force']: - module.fail_json(msg='parameters values are mutually exclusive: force=true|sparse=true') - if not os.path.exists(os.path.dirname(args['path'])): - module.fail_json(msg='parent directory of the file must exist prior to run this module') - if not args['blocksize']: - args['blocksize'] = str(os.statvfs(os.path.dirname(args['path'])).f_frsize) - - try: - args['size'] = size_string(args['size']) - args['blocksize'] = size_string(args['blocksize']) - initial_filesize = current_size(args) - size_descriptors = size_spec(args) - except AssertionError as err: - module.fail_json(msg=to_native(err)) - - expected_filesize = size_descriptors['bytes'] - if initial_filesize: - args['size_diff'] = expected_filesize - initial_filesize - diff['after']['size'] = expected_filesize - diff['before']['size'] = initial_filesize - - result = dict( - changed=args['force'], - size_diff=args['size_diff'], - path=args['path'], - filesize=size_descriptors) - - dd_bin = module.get_bin_path('dd', True) - dd_cmd = [dd_bin, 'if=%s' % args['source'], 'of=%s' % args['path']] - - if expected_filesize != initial_filesize or args['force']: - result['cmd'] = ' '.join(complete_dd_cmdline(args, dd_cmd)) - if module.check_mode: - result['changed'] = True - else: - result['rc'], dummy, result['stderr'] = module.run_command(dd_cmd) - - diff['after']['size'] = result_filesize = result['size_diff'] = current_size(args) - if initial_filesize: - result['size_diff'] = result_filesize - initial_filesize - if not args['force']: - result['changed'] = result_filesize != initial_filesize - - if result['rc']: - msg = "dd error while creating file %s with size %s from source %s: see stderr for details" % ( - args['path'], args['size'], args['source']) - module.fail_json(msg=msg, **result) - if result_filesize != expected_filesize: - msg = "module error while creating file %s with size %s from source %s: file is %s bytes long" % ( - args['path'], args['size'], args['source'], result_filesize) - module.fail_json(msg=msg, **result) - - # dd follows symlinks, and so does this module, while file module doesn't. - # If we call it, this is to manage file's mode, owner and so on, not the - # symlink's ones. - file_params = dict(**module.params) - if os.path.islink(args['path']): - file_params['path'] = result['path'] = os.path.realpath(args['path']) - - if args['file_size'] is not None: - file_args = module.load_file_common_arguments(file_params) - result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff) - result['diff'] = diff - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/files/ini_file.py b/plugins/modules/files/ini_file.py deleted file mode 100644 index f25cc063ff..0000000000 --- a/plugins/modules/files/ini_file.py +++ /dev/null @@ -1,482 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2012, Jan-Piet Mens -# Copyright: (c) 2015, Ales Nosek -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: ini_file -short_description: Tweak settings in INI files -extends_documentation_fragment: files -description: - - Manage (add, remove, change) individual settings in an INI-style file without having - to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). - - Adds missing sections if they don't exist. - - Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file. - - Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when - no other modifications need to be applied. -options: - path: - description: - - Path to the INI-style file; this file is created if required. - - Before Ansible 2.3 this option was only usable as I(dest). - type: path - required: true - aliases: [ dest ] - section: - description: - - Section name in INI file. This is added if C(state=present) automatically when - a single value is being set. - - If left empty or set to C(null), the I(option) will be placed before the first I(section). - - Using C(null) is also required if the config format does not support sections. - type: str - required: true - option: - description: - - If set (required for changing a I(value)), this is the name of the option. - - May be omitted if adding/removing a whole I(section). - type: str - value: - description: - - The string value to be associated with an I(option). - - May be omitted when removing an I(option). - - Mutually exclusive with I(values). - - I(value=v) is equivalent to I(values=[v]). - type: str - values: - description: - - The string value to be associated with an I(option). - - May be omitted when removing an I(option). - - Mutually exclusive with I(value). - - I(value=v) is equivalent to I(values=[v]). - type: list - elements: str - version_added: 3.6.0 - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - type: bool - default: no - state: - description: - - If set to C(absent) and I(exclusive) set to C(yes) all matching I(option) lines are removed. - - If set to C(absent) and I(exclusive) set to C(no) the specified C(option=value) lines are removed, - but the other I(option)s with the same name are not touched. - - If set to C(present) and I(exclusive) set to C(no) the specified C(option=values) lines are added, - but the other I(option)s with the same name are not touched. - - If set to C(present) and I(exclusive) set to C(yes) all given C(option=values) lines will be - added and the other I(option)s with the same name are removed. - type: str - choices: [ absent, present ] - default: present - exclusive: - description: - - If set to C(yes) (default), all matching I(option) lines are removed when I(state=absent), - or replaced when I(state=present). - - If set to C(no), only the specified I(value(s)) are added when I(state=present), - or removed when I(state=absent), and existing ones are not modified. - type: bool - default: yes - version_added: 3.6.0 - no_extra_spaces: - description: - - Do not insert spaces before and after '=' symbol. - type: bool - default: no - create: - description: - - If set to C(no), the module will fail if the file does not already exist. - - By default it will create the file if it is missing. - type: bool - default: yes - allow_no_value: - description: - - Allow option without value and without '=' symbol. - type: bool - default: no -notes: - - While it is possible to add an I(option) without specifying a I(value), this makes no sense. - - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well. - - As of community.general 3.2.0, UTF-8 BOM markers are discarded when reading files. -author: - - Jan-Piet Mens (@jpmens) - - Ales Nosek (@noseka1) -''' - -EXAMPLES = r''' -# Before Ansible 2.3, option 'dest' was used instead of 'path' -- name: Ensure "fav=lemonade is in section "[drinks]" in specified file - community.general.ini_file: - path: /etc/conf - section: drinks - option: fav - value: lemonade - mode: '0600' - backup: yes - -- name: Ensure "temperature=cold is in section "[drinks]" in specified file - community.general.ini_file: - path: /etc/anotherconf - section: drinks - option: temperature - value: cold - backup: yes - -- name: Add "beverage=lemon juice" is in section "[drinks]" in specified file - community.general.ini_file: - path: /etc/conf - section: drinks - option: beverage - value: lemon juice - mode: '0600' - state: present - exclusive: no - -- name: Ensure multiple values "beverage=coke" and "beverage=pepsi" are in section "[drinks]" in specified file - community.general.ini_file: - path: /etc/conf - section: drinks - option: beverage - values: - - coke - - pepsi - mode: '0600' - state: present -''' - -import io -import os -import re -import tempfile -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes, to_text - - -def match_opt(option, line): - option = re.escape(option) - return re.match('[#;]?( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) - - -def match_active_opt(option, line): - option = re.escape(option) - return re.match('( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) - - -def update_section_line(changed, section_lines, index, changed_lines, newline, msg): - option_changed = section_lines[index] != newline - changed = changed or option_changed - if option_changed: - msg = 'option changed' - section_lines[index] = newline - changed_lines[index] = 1 - return (changed, msg) - - -def do_ini(module, filename, section=None, option=None, values=None, - state='present', exclusive=True, backup=False, no_extra_spaces=False, - create=True, allow_no_value=False): - - if section is not None: - section = to_text(section) - if option is not None: - option = to_text(option) - - # deduplicate entries in values - values_unique = [] - [values_unique.append(to_text(value)) for value in values if value not in values_unique and value is not None] - values = values_unique - - diff = dict( - before='', - after='', - before_header='%s (content)' % filename, - after_header='%s (content)' % filename, - ) - - if not os.path.exists(filename): - if not create: - module.fail_json(rc=257, msg='Destination %s does not exist!' % filename) - destpath = os.path.dirname(filename) - if not os.path.exists(destpath) and not module.check_mode: - os.makedirs(destpath) - ini_lines = [] - else: - with io.open(filename, 'r', encoding="utf-8-sig") as ini_file: - ini_lines = [to_text(line) for line in ini_file.readlines()] - - if module._diff: - diff['before'] = u''.join(ini_lines) - - changed = False - - # ini file could be empty - if not ini_lines: - ini_lines.append(u'\n') - - # last line of file may not contain a trailing newline - if ini_lines[-1] == u"" or ini_lines[-1][-1] != u'\n': - ini_lines[-1] += u'\n' - changed = True - - # append fake section lines to simplify the logic - # At top: - # Fake random section to do not match any other in the file - # Using commit hash as fake section name - fake_section_name = u"ad01e11446efb704fcdbdb21f2c43757423d91c5" - - # Insert it at the beginning - ini_lines.insert(0, u'[%s]' % fake_section_name) - - # At bottom: - ini_lines.append(u'[') - - # If no section is defined, fake section is used - if not section: - section = fake_section_name - - within_section = not section - section_start = section_end = 0 - msg = 'OK' - if no_extra_spaces: - assignment_format = u'%s=%s\n' - else: - assignment_format = u'%s = %s\n' - - option_no_value_present = False - - non_blank_non_comment_pattern = re.compile(to_text(r'^[ \t]*([#;].*)?$')) - - before = after = [] - section_lines = [] - - for index, line in enumerate(ini_lines): - # find start and end of section - if line.startswith(u'[%s]' % section): - within_section = True - section_start = index - elif line.startswith(u'['): - if within_section: - section_end = index - break - - before = ini_lines[0:section_start] - section_lines = ini_lines[section_start:section_end] - after = ini_lines[section_end:len(ini_lines)] - - # Keep track of changed section_lines - changed_lines = [0] * len(section_lines) - - # handling multiple instances of option=value when state is 'present' with/without exclusive is a bit complex - # - # 1. edit all lines where we have a option=value pair with a matching value in values[] - # 2. edit all the remaing lines where we have a matching option - # 3. delete remaining lines where we have a matching option - # 4. insert missing option line(s) at the end of the section - - if state == 'present' and option: - for index, line in enumerate(section_lines): - if match_opt(option, line): - match = match_opt(option, line) - if values and match.group(6) in values: - matched_value = match.group(6) - if not matched_value and allow_no_value: - # replace existing option with no value line(s) - newline = u'%s\n' % option - option_no_value_present = True - else: - # replace existing option=value line(s) - newline = assignment_format % (option, matched_value) - (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) - values.remove(matched_value) - elif not values and allow_no_value: - # replace existing option with no value line(s) - newline = u'%s\n' % option - (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) - option_no_value_present = True - break - - if state == 'present' and exclusive and not allow_no_value: - # override option with no value to option with value if not allow_no_value - if len(values) > 0: - for index, line in enumerate(section_lines): - if not changed_lines[index] and match_active_opt(option, section_lines[index]): - newline = assignment_format % (option, values.pop(0)) - (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) - if len(values) == 0: - break - # remove all remaining option occurrences from the rest of the section - for index in range(len(section_lines) - 1, 0, -1): - if not changed_lines[index] and match_active_opt(option, section_lines[index]): - del section_lines[index] - del changed_lines[index] - changed = True - msg = 'option changed' - - if state == 'present': - # insert missing option line(s) at the end of the section - for index in range(len(section_lines), 0, -1): - # search backwards for previous non-blank or non-comment line - if not non_blank_non_comment_pattern.match(section_lines[index - 1]): - if option and values: - # insert option line(s) - for element in values[::-1]: - # items are added backwards, so traverse the list backwards to not confuse the user - # otherwise some of their options might appear in reverse order for whatever fancy reason ¯\_(ツ)_/¯ - if element is not None: - # insert option=value line - section_lines.insert(index, assignment_format % (option, element)) - msg = 'option added' - changed = True - elif element is None and allow_no_value: - # insert option with no value line - section_lines.insert(index, u'%s\n' % option) - msg = 'option added' - changed = True - elif option and not values and allow_no_value and not option_no_value_present: - # insert option with no value line(s) - section_lines.insert(index, u'%s\n' % option) - msg = 'option added' - changed = True - break - - if state == 'absent': - if option: - if exclusive: - # delete all option line(s) with given option and ignore value - new_section_lines = [line for line in section_lines if not (match_active_opt(option, line))] - if section_lines != new_section_lines: - changed = True - msg = 'option changed' - section_lines = new_section_lines - elif not exclusive and len(values) > 0: - # delete specified option=value line(s) - new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(6) in values)] - if section_lines != new_section_lines: - changed = True - msg = 'option changed' - section_lines = new_section_lines - else: - # drop the entire section - section_lines = [] - msg = 'section removed' - changed = True - - # reassemble the ini_lines after manipulation - ini_lines = before + section_lines + after - - # remove the fake section line - del ini_lines[0] - del ini_lines[-1:] - - if not within_section and state == 'present': - ini_lines.append(u'[%s]\n' % section) - msg = 'section and option added' - if option and values: - for value in values: - ini_lines.append(assignment_format % (option, value)) - elif option and not values and allow_no_value: - ini_lines.append(u'%s\n' % option) - else: - msg = 'only section added' - changed = True - - if module._diff: - diff['after'] = u''.join(ini_lines) - - backup_file = None - if changed and not module.check_mode: - if backup: - backup_file = module.backup_local(filename) - - encoded_ini_lines = [to_bytes(line) for line in ini_lines] - try: - tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir) - f = os.fdopen(tmpfd, 'wb') - f.writelines(encoded_ini_lines) - f.close() - except IOError: - module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc()) - - try: - module.atomic_move(tmpfile, filename) - except IOError: - module.ansible.fail_json(msg='Unable to move temporary \ - file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc()) - - return (changed, backup_file, diff, msg) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - path=dict(type='path', required=True, aliases=['dest']), - section=dict(type='str', required=True), - option=dict(type='str'), - value=dict(type='str'), - values=dict(type='list', elements='str'), - backup=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'present']), - exclusive=dict(type='bool', default=True), - no_extra_spaces=dict(type='bool', default=False), - allow_no_value=dict(type='bool', default=False), - create=dict(type='bool', default=True) - ), - mutually_exclusive=[ - ['value', 'values'] - ], - add_file_common_args=True, - supports_check_mode=True, - ) - - path = module.params['path'] - section = module.params['section'] - option = module.params['option'] - value = module.params['value'] - values = module.params['values'] - state = module.params['state'] - exclusive = module.params['exclusive'] - backup = module.params['backup'] - no_extra_spaces = module.params['no_extra_spaces'] - allow_no_value = module.params['allow_no_value'] - create = module.params['create'] - - if state == 'present' and not allow_no_value and value is None and not values: - module.fail_json(msg="Parameter 'value(s)' must be defined if state=present and allow_no_value=False.") - - if value is not None: - values = [value] - elif values is None: - values = [] - - (changed, backup_file, diff, msg) = do_ini(module, path, section, option, values, state, exclusive, backup, no_extra_spaces, create, allow_no_value) - - if not module.check_mode and os.path.exists(path): - file_args = module.load_file_common_arguments(module.params) - changed = module.set_fs_attributes_if_different(file_args, changed) - - results = dict( - changed=changed, - diff=diff, - msg=msg, - path=path, - ) - if backup_file is not None: - results['backup_file'] = backup_file - - # Mission complete - module.exit_json(**results) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/files/iso_create.py b/plugins/modules/files/iso_create.py deleted file mode 100644 index 3fa456339e..0000000000 --- a/plugins/modules/files/iso_create.py +++ /dev/null @@ -1,295 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Ansible Project -# Copyright: (c) 2020, VMware, Inc. All Rights Reserved. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: iso_create -short_description: Generate ISO file with specified files or folders -description: - - This module is used to generate ISO file with specified path of files. -author: - - Diane Wang (@Tomorrow9) -requirements: -- "pycdlib" -- "python >= 2.7" -version_added: '0.2.0' - -options: - src_files: - description: - - This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file. - - Will fail if specified file or folder in C(src_files) does not exist on local machine. - - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and - underscores (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path - names are limited to 255 characters.' - type: list - required: yes - elements: path - dest_iso: - description: - - The absolute path with file name of the new generated ISO file on local machine. - - Will create intermediate folders when they does not exist. - type: path - required: yes - interchange_level: - description: - - The ISO9660 interchange level to use, it dictates the rules on the names of files. - - Levels and valid values C(1), C(2), C(3), C(4) are supported. - - The default value is level C(1), which is the most conservative, level C(3) is recommended. - - ISO9660 file names at interchange level C(1) cannot have more than 8 characters or 3 characters in the extension. - type: int - default: 1 - choices: [1, 2, 3, 4] - vol_ident: - description: - - The volume identification string to use on the new generated ISO image. - type: str - rock_ridge: - description: - - Whether to make this ISO have the Rock Ridge extensions or not. - - Valid values are C(1.09), C(1.10) or C(1.12), means adding the specified Rock Ridge version to the ISO. - - If unsure, set C(1.09) to ensure maximum compatibility. - - If not specified, then not add Rock Ridge extension to the ISO. - type: str - choices: ['1.09', '1.10', '1.12'] - joliet: - description: - - Support levels and valid values are C(1), C(2), or C(3). - - Level C(3) is by far the most common. - - If not specified, then no Joliet support is added. - type: int - choices: [1, 2, 3] - udf: - description: - - Whether to add UDF support to this ISO. - - If set to C(True), then version 2.60 of the UDF spec is used. - - If not specified or set to C(False), then no UDF support is added. - type: bool - default: False -''' - -EXAMPLES = r''' -- name: Create an ISO file - community.general.iso_create: - src_files: - - /root/testfile.yml - - /root/testfolder - dest_iso: /tmp/test.iso - interchange_level: 3 - -- name: Create an ISO file with Rock Ridge extension - community.general.iso_create: - src_files: - - /root/testfile.yml - - /root/testfolder - dest_iso: /tmp/test.iso - rock_ridge: 1.09 - -- name: Create an ISO file with Joliet support - community.general.iso_create: - src_files: - - ./windows_config/Autounattend.xml - dest_iso: ./test.iso - interchange_level: 3 - joliet: 3 - vol_ident: WIN_AUTOINSTALL -''' - -RETURN = r''' -source_file: - description: Configured source files or directories list. - returned: on success - type: list - elements: path - sample: ["/path/to/file.txt", "/path/to/folder"] -created_iso: - description: Created iso file path. - returned: on success - type: str - sample: "/path/to/test.iso" -interchange_level: - description: Configured interchange level. - returned: on success - type: int - sample: 3 -vol_ident: - description: Configured volume identification string. - returned: on success - type: str - sample: "OEMDRV" -joliet: - description: Configured Joliet support level. - returned: on success - type: int - sample: 3 -rock_ridge: - description: Configured Rock Ridge version. - returned: on success - type: str - sample: "1.09" -udf: - description: Configured UDF support. - returned: on success - type: bool - sample: False -''' - -import os -import traceback - -PYCDLIB_IMP_ERR = None -try: - import pycdlib - HAS_PYCDLIB = True -except ImportError: - PYCDLIB_IMP_ERR = traceback.format_exc() - HAS_PYCDLIB = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def add_file(module, iso_file=None, src_file=None, file_path=None, rock_ridge=None, use_joliet=None, use_udf=None): - rr_name = None - joliet_path = None - udf_path = None - # In standard ISO interchange level 1, file names have a maximum of 8 characters, followed by a required dot, - # followed by a maximum 3 character extension, followed by a semicolon and a version - file_name = os.path.basename(file_path) - if '.' not in file_name: - file_in_iso_path = file_path.upper() + '.;1' - else: - file_in_iso_path = file_path.upper() + ';1' - if rock_ridge: - rr_name = file_name - if use_joliet: - joliet_path = file_path - if use_udf: - udf_path = file_path - try: - iso_file.add_file(src_file, iso_path=file_in_iso_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path) - except Exception as err: - module.fail_json(msg="Failed to add file %s to ISO file due to %s" % (src_file, to_native(err))) - - -def add_directory(module, iso_file=None, dir_path=None, rock_ridge=None, use_joliet=None, use_udf=None): - rr_name = None - joliet_path = None - udf_path = None - iso_dir_path = dir_path.upper() - if rock_ridge: - rr_name = os.path.basename(dir_path) - if use_joliet: - joliet_path = iso_dir_path - if use_udf: - udf_path = iso_dir_path - try: - iso_file.add_directory(iso_path=iso_dir_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path) - except Exception as err: - module.fail_json(msg="Failed to directory %s to ISO file due to %s" % (dir_path, to_native(err))) - - -def main(): - argument_spec = dict( - src_files=dict(type='list', required=True, elements='path'), - dest_iso=dict(type='path', required=True), - interchange_level=dict(type='int', choices=[1, 2, 3, 4], default=1), - vol_ident=dict(type='str'), - rock_ridge=dict(type='str', choices=['1.09', '1.10', '1.12']), - joliet=dict(type='int', choices=[1, 2, 3]), - udf=dict(type='bool', default=False), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - if not HAS_PYCDLIB: - module.fail_json(missing_required_lib('pycdlib'), exception=PYCDLIB_IMP_ERR) - - src_file_list = module.params.get('src_files') - if src_file_list and len(src_file_list) == 0: - module.fail_json(msg='Please specify source file and/or directory list using src_files parameter.') - for src_file in src_file_list: - if not os.path.exists(src_file): - module.fail_json(msg="Specified source file/directory path does not exist on local machine, %s" % src_file) - - dest_iso = module.params.get('dest_iso') - if dest_iso and len(dest_iso) == 0: - module.fail_json(msg='Please specify the absolute path of the new created ISO file using dest_iso parameter.') - - dest_iso_dir = os.path.dirname(dest_iso) - if dest_iso_dir and not os.path.exists(dest_iso_dir): - # will create intermediate dir for new ISO file - try: - os.makedirs(dest_iso_dir) - except OSError as err: - module.fail_json(msg='Exception caught when creating folder %s, with error %s' % (dest_iso_dir, to_native(err))) - - volume_id = module.params.get('vol_ident') - if volume_id is None: - volume_id = '' - inter_level = module.params.get('interchange_level') - rock_ridge = module.params.get('rock_ridge') - use_joliet = module.params.get('joliet') - use_udf = None - if module.params['udf']: - use_udf = '2.60' - - result = dict( - changed=False, - source_file=src_file_list, - created_iso=dest_iso, - interchange_level=inter_level, - vol_ident=volume_id, - rock_ridge=rock_ridge, - joliet=use_joliet, - udf=use_udf - ) - if not module.check_mode: - iso_file = pycdlib.PyCdlib() - iso_file.new(interchange_level=inter_level, vol_ident=volume_id, rock_ridge=rock_ridge, joliet=use_joliet, udf=use_udf) - - for src_file in src_file_list: - # if specify a dir then go through the dir to add files and dirs - if os.path.isdir(src_file): - dir_list = [] - file_list = [] - src_file = src_file.rstrip('/') - dir_name = os.path.basename(src_file) - add_directory(module, iso_file=iso_file, dir_path='/' + dir_name, rock_ridge=rock_ridge, - use_joliet=use_joliet, use_udf=use_udf) - - # get dir list and file list - for path, dirs, files in os.walk(src_file): - for filename in files: - file_list.append(os.path.join(path, filename)) - for dir in dirs: - dir_list.append(os.path.join(path, dir)) - for new_dir in dir_list: - add_directory(module, iso_file=iso_file, dir_path=new_dir.split(os.path.dirname(src_file))[1], - rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf) - for new_file in file_list: - add_file(module, iso_file=iso_file, src_file=new_file, - file_path=new_file.split(os.path.dirname(src_file))[1], rock_ridge=rock_ridge, - use_joliet=use_joliet, use_udf=use_udf) - # if specify a file then add this file directly to the '/' path in ISO - else: - add_file(module, iso_file=iso_file, src_file=src_file, file_path='/' + os.path.basename(src_file), - rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf) - - iso_file.write(dest_iso) - iso_file.close() - - result['changed'] = True - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/files/iso_extract.py b/plugins/modules/files/iso_extract.py deleted file mode 100644 index 18c283cbf7..0000000000 --- a/plugins/modules/files/iso_extract.py +++ /dev/null @@ -1,212 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2013, Jeroen Hoekx -# Copyright: (c) 2016, Matt Robinson -# Copyright: (c) 2017, Dag Wieers -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -author: -- Jeroen Hoekx (@jhoekx) -- Matt Robinson (@ribbons) -- Dag Wieers (@dagwieers) -module: iso_extract -short_description: Extract files from an ISO image -description: -- This module has two possible ways of operation. -- If 7zip is installed on the system, this module extracts files from an ISO - into a temporary directory and copies files to a given destination, - if needed. -- If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module - mounts the ISO image to a temporary location, and copies files to a given - destination, if needed. -requirements: -- Either 7z (from I(7zip) or I(p7zip) package) -- Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux) -options: - image: - description: - - The ISO image to extract files from. - type: path - required: yes - aliases: [ path, src ] - dest: - description: - - The destination directory to extract files to. - type: path - required: yes - files: - description: - - A list of files to extract from the image. - - Extracting directories does not work. - type: list - elements: str - required: yes - force: - description: - - If C(yes), which will replace the remote file when contents are different than the source. - - If C(no), the file will only be extracted and copied if the destination does not already exist. - type: bool - default: yes - executable: - description: - - The path to the C(7z) executable to use for extracting files from the ISO. - - If not provided, it will assume the value C(7z). - type: path -notes: -- Only the file checksum (content) is taken into account when extracting files - from the ISO image. If C(force=no), only checks the presence of the file. -- In Ansible 2.3 this module was using C(mount) and C(umount) commands only, - requiring root access. This is no longer needed with the introduction of 7zip - for extraction. -''' - -EXAMPLES = r''' -- name: Extract kernel and ramdisk from a LiveCD - community.general.iso_extract: - image: /tmp/rear-test.iso - dest: /tmp/virt-rear/ - files: - - isolinux/kernel - - isolinux/initrd.cgz -''' - -RETURN = r''' -# -''' - -import os.path -import shutil -import tempfile - -try: # python 3.3+ - from shlex import quote -except ImportError: # older python - from pipes import quote - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - argument_spec=dict( - image=dict(type='path', required=True, aliases=['path', 'src']), - dest=dict(type='path', required=True), - files=dict(type='list', elements='str', required=True), - force=dict(type='bool', default=True), - executable=dict(type='path'), # No default on purpose - ), - supports_check_mode=True, - ) - image = module.params['image'] - dest = module.params['dest'] - files = module.params['files'] - force = module.params['force'] - executable = module.params['executable'] - - result = dict( - changed=False, - dest=dest, - image=image, - ) - - # We want to know if the user provided it or not, so we set default here - if executable is None: - executable = '7z' - - binary = module.get_bin_path(executable, None) - - # When executable was provided and binary not found, warn user ! - if module.params['executable'] is not None and not binary: - module.warn("Executable '%s' is not found on the system, trying to mount ISO instead." % executable) - - if not os.path.exists(dest): - module.fail_json(msg="Directory '%s' does not exist" % dest) - - if not os.path.exists(os.path.dirname(image)): - module.fail_json(msg="ISO image '%s' does not exist" % image) - - result['files'] = [] - extract_files = list(files) - - if not force: - # Check if we have to process any files based on existence - for f in files: - dest_file = os.path.join(dest, os.path.basename(f)) - if os.path.exists(dest_file): - result['files'].append(dict( - checksum=None, - dest=dest_file, - src=f, - )) - extract_files.remove(f) - - if not extract_files: - module.exit_json(**result) - - tmp_dir = tempfile.mkdtemp() - - # Use 7zip when we have a binary, otherwise try to mount - if binary: - cmd = '%s x "%s" -o"%s" %s' % (binary, image, tmp_dir, ' '.join([quote(f) for f in extract_files])) - else: - cmd = 'mount -o loop,ro "%s" "%s"' % (image, tmp_dir) - - rc, out, err = module.run_command(cmd) - if rc != 0: - result.update(dict( - cmd=cmd, - rc=rc, - stderr=err, - stdout=out, - )) - shutil.rmtree(tmp_dir) - - if binary: - module.fail_json(msg="Failed to extract from ISO image '%s' to '%s'" % (image, tmp_dir), **result) - else: - module.fail_json(msg="Failed to mount ISO image '%s' to '%s', and we could not find executable '%s'." % (image, tmp_dir, executable), **result) - - try: - for f in extract_files: - tmp_src = os.path.join(tmp_dir, f) - if not os.path.exists(tmp_src): - module.fail_json(msg="Failed to extract '%s' from ISO image" % f, **result) - - src_checksum = module.sha1(tmp_src) - - dest_file = os.path.join(dest, os.path.basename(f)) - - if os.path.exists(dest_file): - dest_checksum = module.sha1(dest_file) - else: - dest_checksum = None - - result['files'].append(dict( - checksum=src_checksum, - dest=dest_file, - src=f, - )) - - if src_checksum != dest_checksum: - if not module.check_mode: - shutil.copy(tmp_src, dest_file) - - result['changed'] = True - finally: - if not binary: - module.run_command('umount "%s"' % tmp_dir) - - shutil.rmtree(tmp_dir) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/files/read_csv.py b/plugins/modules/files/read_csv.py deleted file mode 100644 index 2d5644db2e..0000000000 --- a/plugins/modules/files/read_csv.py +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Dag Wieers (@dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: read_csv -short_description: Read a CSV file -description: -- Read a CSV file and return a list or a dictionary, containing one dictionary per row. -author: -- Dag Wieers (@dagwieers) -options: - path: - description: - - The CSV filename to read data from. - type: path - required: yes - aliases: [ filename ] - key: - description: - - The column name used as a key for the resulting dictionary. - - If C(key) is unset, the module returns a list of dictionaries, - where each dictionary is a row in the CSV file. - type: str - dialect: - description: - - The CSV dialect to use when parsing the CSV file. - - Possible values include C(excel), C(excel-tab) or C(unix). - type: str - default: excel - fieldnames: - description: - - A list of field names for every column. - - This is needed if the CSV does not have a header. - type: list - elements: str - unique: - description: - - Whether the C(key) used is expected to be unique. - type: bool - default: yes - delimiter: - description: - - A one-character string used to separate fields. - - When using this parameter, you change the default value used by C(dialect). - - The default value depends on the dialect used. - type: str - skipinitialspace: - description: - - Whether to ignore any whitespaces immediately following the delimiter. - - When using this parameter, you change the default value used by C(dialect). - - The default value depends on the dialect used. - type: bool - strict: - description: - - Whether to raise an exception on bad CSV input. - - When using this parameter, you change the default value used by C(dialect). - - The default value depends on the dialect used. - type: bool -notes: -- Ansible also ships with the C(csvfile) lookup plugin, which can be used to do selective lookups in CSV files from Jinja. -''' - -EXAMPLES = r''' -# Example CSV file with header -# -# name,uid,gid -# dag,500,500 -# jeroen,501,500 - -# Read a CSV file and access user 'dag' -- name: Read users from CSV file and return a dictionary - community.general.read_csv: - path: users.csv - key: name - register: users - delegate_to: localhost - -- ansible.builtin.debug: - msg: 'User {{ users.dict.dag.name }} has UID {{ users.dict.dag.uid }} and GID {{ users.dict.dag.gid }}' - -# Read a CSV file and access the first item -- name: Read users from CSV file and return a list - community.general.read_csv: - path: users.csv - register: users - delegate_to: localhost - -- ansible.builtin.debug: - msg: 'User {{ users.list.1.name }} has UID {{ users.list.1.uid }} and GID {{ users.list.1.gid }}' - -# Example CSV file without header and semi-colon delimiter -# -# dag;500;500 -# jeroen;501;500 - -# Read a CSV file without headers -- name: Read users from CSV file and return a list - community.general.read_csv: - path: users.csv - fieldnames: name,uid,gid - delimiter: ';' - register: users - delegate_to: localhost -''' - -RETURN = r''' -dict: - description: The CSV content as a dictionary. - returned: success - type: dict - sample: - dag: - name: dag - uid: 500 - gid: 500 - jeroen: - name: jeroen - uid: 501 - gid: 500 -list: - description: The CSV content as a list. - returned: success - type: list - sample: - - name: dag - uid: 500 - gid: 500 - - name: jeroen - uid: 501 - gid: 500 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, - DialectNotAvailableError, - CustomDialectFailureError) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(type='path', required=True, aliases=['filename']), - dialect=dict(type='str', default='excel'), - key=dict(type='str', no_log=False), - fieldnames=dict(type='list', elements='str'), - unique=dict(type='bool', default=True), - delimiter=dict(type='str'), - skipinitialspace=dict(type='bool'), - strict=dict(type='bool'), - ), - supports_check_mode=True, - ) - - path = module.params['path'] - dialect = module.params['dialect'] - key = module.params['key'] - fieldnames = module.params['fieldnames'] - unique = module.params['unique'] - - dialect_params = { - "delimiter": module.params['delimiter'], - "skipinitialspace": module.params['skipinitialspace'], - "strict": module.params['strict'], - } - - try: - dialect = initialize_dialect(dialect, **dialect_params) - except (CustomDialectFailureError, DialectNotAvailableError) as e: - module.fail_json(msg=to_native(e)) - - try: - with open(path, 'rb') as f: - data = f.read() - except (IOError, OSError) as e: - module.fail_json(msg="Unable to open file: %s" % to_native(e)) - - reader = read_csv(data, dialect, fieldnames) - - if key and key not in reader.fieldnames: - module.fail_json(msg="Key '%s' was not found in the CSV header fields: %s" % (key, ', '.join(reader.fieldnames))) - - data_dict = dict() - data_list = list() - - if key is None: - try: - for row in reader: - data_list.append(row) - except CSVError as e: - module.fail_json(msg="Unable to process file: %s" % to_native(e)) - else: - try: - for row in reader: - if unique and row[key] in data_dict: - module.fail_json(msg="Key '%s' is not unique for value '%s'" % (key, row[key])) - data_dict[row[key]] = row - except CSVError as e: - module.fail_json(msg="Unable to process file: %s" % to_native(e)) - - module.exit_json(dict=data_dict, list=data_list) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/files/sapcar_extract.py b/plugins/modules/files/sapcar_extract.py deleted file mode 100644 index 8463703c1e..0000000000 --- a/plugins/modules/files/sapcar_extract.py +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Rainer Leber -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: sapcar_extract -short_description: Manages SAP SAPCAR archives -version_added: "3.2.0" -description: - - Provides support for unpacking C(sar)/C(car) files with the SAPCAR binary from SAP and pulling - information back into Ansible. -options: - path: - description: The path to the SAR/CAR file. - type: path - required: true - dest: - description: - - The destination where SAPCAR extracts the SAR file. Missing folders will be created. - If this parameter is not provided it will unpack in the same folder as the SAR file. - type: path - binary_path: - description: - - The path to the SAPCAR binary, for example, C(/home/dummy/sapcar) or C(https://myserver/SAPCAR). - If this parameter is not provided the module will look in C(PATH). - type: path - signature: - description: - - If C(true) the signature will be extracted. - default: false - type: bool - security_library: - description: - - The path to the security library, for example, C(/usr/sap/hostctrl/exe/libsapcrytp.so), for signature operations. - type: path - manifest: - description: - - The name of the manifest. - default: "SIGNATURE.SMF" - type: str - remove: - description: - - If C(true) the SAR/CAR file will be removed. B(This should be used with caution!) - default: false - type: bool -author: - - Rainer Leber (@RainerLeber) -notes: - - Always returns C(changed=true) in C(check_mode). -''' - -EXAMPLES = """ -- name: Extract SAR file - community.general.sapcar_extract: - path: "~/source/hana.sar" - -- name: Extract SAR file with destination - community.general.sapcar_extract: - path: "~/source/hana.sar" - dest: "~/test/" - -- name: Extract SAR file with destination and download from webserver can be a fileshare as well - community.general.sapcar_extract: - path: "~/source/hana.sar" - dest: "~/dest/" - binary_path: "https://myserver/SAPCAR" - -- name: Extract SAR file and delete SAR after extract - community.general.sapcar_extract: - path: "~/source/hana.sar" - remove: true - -- name: Extract SAR file with manifest - community.general.sapcar_extract: - path: "~/source/hana.sar" - signature: true - -- name: Extract SAR file with manifest and rename it - community.general.sapcar_extract: - path: "~/source/hana.sar" - manifest: "MyNewSignature.SMF" - signature: true -""" - -import os -from tempfile import NamedTemporaryFile -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import open_url -from ansible.module_utils.common.text.converters import to_native - - -def get_list_of_files(dir_name): - # create a list of file and directories - # names in the given directory - list_of_file = os.listdir(dir_name) - allFiles = list() - # Iterate over all the entries - for entry in list_of_file: - # Create full path - fullPath = os.path.join(dir_name, entry) - # If entry is a directory then get the list of files in this directory - if os.path.isdir(fullPath): - allFiles = allFiles + [fullPath] - allFiles = allFiles + get_list_of_files(fullPath) - else: - allFiles.append(fullPath) - return allFiles - - -def download_SAPCAR(binary_path, module): - bin_path = None - # download sapcar binary if url is provided otherwise path is returned - if binary_path is not None: - if binary_path.startswith('https://') or binary_path.startswith('http://'): - random_file = NamedTemporaryFile(delete=False) - with open_url(binary_path) as response: - with random_file as out_file: - data = response.read() - out_file.write(data) - os.chmod(out_file.name, 0o700) - bin_path = out_file.name - module.add_cleanup_file(bin_path) - else: - bin_path = binary_path - return bin_path - - -def check_if_present(command, path, dest, signature, manifest, module): - # manipuliating output from SAR file for compare with already extracted files - iter_command = [command, '-tvf', path] - sar_out = module.run_command(iter_command)[1] - sar_raw = sar_out.split("\n")[1:] - if dest[-1] != "/": - dest = dest + "/" - sar_files = [dest + x.split(" ")[-1] for x in sar_raw if x] - # remove any SIGNATURE.SMF from list because it will not unpacked if signature is false - if not signature: - sar_files = [item for item in sar_files if '.SMF' not in item] - # if signature is renamed manipulate files in list of sar file for compare. - if manifest != "SIGNATURE.SMF": - sar_files = [item for item in sar_files if '.SMF' not in item] - sar_files = sar_files + [manifest] - # get extracted files if present - files_extracted = get_list_of_files(dest) - # compare extracted files with files in sar file - present = all(elem in files_extracted for elem in sar_files) - return present - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(type='path', required=True), - dest=dict(type='path'), - binary_path=dict(type='path'), - signature=dict(type='bool', default=False), - security_library=dict(type='path'), - manifest=dict(type='str', default="SIGNATURE.SMF"), - remove=dict(type='bool', default=False), - ), - supports_check_mode=True, - ) - rc, out, err = [0, "", ""] - params = module.params - check_mode = module.check_mode - - path = params['path'] - dest = params['dest'] - signature = params['signature'] - security_library = params['security_library'] - manifest = params['manifest'] - remove = params['remove'] - - bin_path = download_SAPCAR(params['binary_path'], module) - - if dest is None: - dest_head_tail = os.path.split(path) - dest = dest_head_tail[0] + '/' - else: - if not os.path.exists(dest): - os.makedirs(dest, 0o755) - - if bin_path is not None: - command = [module.get_bin_path(bin_path, required=True)] - else: - try: - command = [module.get_bin_path('sapcar', required=True)] - except Exception as e: - module.fail_json(msg='Failed to find SAPCAR at the expected path or URL "{0}". Please check whether it is available: {1}' - .format(bin_path, to_native(e))) - - present = check_if_present(command[0], path, dest, signature, manifest, module) - - if not present: - command.extend(['-xvf', path, '-R', dest]) - if security_library: - command.extend(['-L', security_library]) - if signature: - command.extend(['-manifest', manifest]) - if not check_mode: - (rc, out, err) = module.run_command(command, check_rc=True) - changed = True - else: - changed = False - out = "allready unpacked" - - if remove: - os.remove(path) - - module.exit_json(changed=changed, message=rc, stdout=out, - stderr=err, command=' '.join(command)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/files/xattr.py b/plugins/modules/files/xattr.py deleted file mode 100644 index f862dd720b..0000000000 --- a/plugins/modules/files/xattr.py +++ /dev/null @@ -1,235 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: xattr -short_description: Manage user defined extended attributes -description: - - Manages filesystem user defined extended attributes. - - Requires that extended attributes are enabled on the target filesystem - and that the setfattr/getfattr utilities are present. -options: - path: - description: - - The full path of the file/object to get the facts of. - - Before 2.3 this option was only usable as I(name). - type: path - required: true - aliases: [ name ] - namespace: - description: - - Namespace of the named name/key. - type: str - default: user - key: - description: - - The name of a specific Extended attribute key to set/retrieve. - type: str - value: - description: - - The value to set the named name/key to, it automatically sets the C(state) to 'set'. - type: str - state: - description: - - defines which state you want to do. - C(read) retrieves the current value for a C(key) (default) - C(present) sets C(name) to C(value), default if value is set - C(all) dumps all data - C(keys) retrieves all keys - C(absent) deletes the key - type: str - choices: [ absent, all, keys, present, read ] - default: read - follow: - description: - - If C(yes), dereferences symlinks and sets/gets attributes on symlink target, - otherwise acts on symlink itself. - type: bool - default: yes -notes: - - As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well. -author: -- Brian Coca (@bcoca) -''' - -EXAMPLES = ''' -- name: Obtain the extended attributes of /etc/foo.conf - community.general.xattr: - path: /etc/foo.conf - -- name: Set the key 'user.foo' to value 'bar' - community.general.xattr: - path: /etc/foo.conf - key: foo - value: bar - -- name: Set the key 'trusted.glusterfs.volume-id' to value '0x817b94343f164f199e5b573b4ea1f914' - community.general.xattr: - path: /mnt/bricks/brick1 - namespace: trusted - key: glusterfs.volume-id - value: "0x817b94343f164f199e5b573b4ea1f914" - -- name: Remove the key 'user.foo' - community.general.xattr: - path: /etc/foo.conf - key: foo - state: absent - -- name: Remove the key 'trusted.glusterfs.volume-id' - community.general.xattr: - path: /mnt/bricks/brick1 - namespace: trusted - key: glusterfs.volume-id - state: absent -''' - -import os - -# import module snippets -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def get_xattr_keys(module, path, follow): - cmd = [module.get_bin_path('getfattr', True), '--absolute-names'] - - if not follow: - cmd.append('-h') - cmd.append(path) - - return _run_xattr(module, cmd) - - -def get_xattr(module, path, key, follow): - cmd = [module.get_bin_path('getfattr', True), '--absolute-names'] - - if not follow: - cmd.append('-h') - if key is None: - cmd.append('-d') - else: - cmd.append('-n %s' % key) - cmd.append(path) - - return _run_xattr(module, cmd, False) - - -def set_xattr(module, path, key, value, follow): - - cmd = [module.get_bin_path('setfattr', True)] - if not follow: - cmd.append('-h') - cmd.append('-n %s' % key) - cmd.append('-v %s' % value) - cmd.append(path) - - return _run_xattr(module, cmd) - - -def rm_xattr(module, path, key, follow): - - cmd = [module.get_bin_path('setfattr', True)] - if not follow: - cmd.append('-h') - cmd.append('-x %s' % key) - cmd.append(path) - - return _run_xattr(module, cmd, False) - - -def _run_xattr(module, cmd, check_rc=True): - - try: - (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc) - except Exception as e: - module.fail_json(msg="%s!" % to_native(e)) - - # result = {'raw': out} - result = {} - for line in out.splitlines(): - if line.startswith('#') or line == '': - pass - elif '=' in line: - (key, val) = line.split('=') - result[key] = val.strip('"') - else: - result[line] = '' - return result - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(type='path', required=True, aliases=['name']), - namespace=dict(type='str', default='user'), - key=dict(type='str', no_log=False), - value=dict(type='str'), - state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']), - follow=dict(type='bool', default=True), - ), - supports_check_mode=True, - ) - path = module.params.get('path') - namespace = module.params.get('namespace') - key = module.params.get('key') - value = module.params.get('value') - state = module.params.get('state') - follow = module.params.get('follow') - - if not os.path.exists(path): - module.fail_json(msg="path not found or not accessible!") - - changed = False - msg = "" - res = {} - - if key is None and state in ['absent', 'present']: - module.fail_json(msg="%s needs a key parameter" % state) - - # Prepend the key with the namespace if defined - if ( - key is not None and - namespace is not None and - len(namespace) > 0 and - not (namespace == 'user' and key.startswith('user.'))): - key = '%s.%s' % (namespace, key) - - if (state == 'present' or value is not None): - current = get_xattr(module, path, key, follow) - if current is None or key not in current or value != current[key]: - if not module.check_mode: - res = set_xattr(module, path, key, value, follow) - changed = True - res = current - msg = "%s set to %s" % (key, value) - elif state == 'absent': - current = get_xattr(module, path, key, follow) - if current is not None and key in current: - if not module.check_mode: - res = rm_xattr(module, path, key, follow) - changed = True - res = current - msg = "%s removed" % (key) - elif state == 'keys': - res = get_xattr_keys(module, path, follow) - msg = "returning all keys" - elif state == 'all': - res = get_xattr(module, path, None, follow) - msg = "dumping all" - else: - res = get_xattr(module, path, key, follow) - msg = "returning %s" % key - - module.exit_json(changed=changed, msg=msg, xattr=res) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/files/xml.py b/plugins/modules/files/xml.py deleted file mode 100644 index ffdb65400c..0000000000 --- a/plugins/modules/files/xml.py +++ /dev/null @@ -1,986 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Red Hat, Inc. -# Copyright: (c) 2014, Tim Bielawa -# Copyright: (c) 2014, Magnus Hedemark -# Copyright: (c) 2017, Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: xml -short_description: Manage bits and pieces of XML files or strings -description: -- A CRUD-like interface to managing bits of XML files. -options: - path: - description: - - Path to the file to operate on. - - This file must exist ahead of time. - - This parameter is required, unless C(xmlstring) is given. - type: path - aliases: [ dest, file ] - xmlstring: - description: - - A string containing XML on which to operate. - - This parameter is required, unless C(path) is given. - type: str - xpath: - description: - - A valid XPath expression describing the item(s) you want to manipulate. - - Operates on the document root, C(/), by default. - type: str - namespaces: - description: - - The namespace C(prefix:uri) mapping for the XPath expression. - - Needs to be a C(dict), not a C(list) of items. - type: dict - state: - description: - - Set or remove an xpath selection (node(s), attribute(s)). - type: str - choices: [ absent, present ] - default: present - aliases: [ ensure ] - attribute: - description: - - The attribute to select when using parameter C(value). - - This is a string, not prepended with C(@). - type: raw - value: - description: - - Desired state of the selected attribute. - - Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)). - - Elements default to no value (but present). - - Attributes default to an empty string. - type: raw - add_children: - description: - - Add additional child-element(s) to a selected element for a given C(xpath). - - Child elements must be given in a list and each item may be either a string - (eg. C(children=ansible) to add an empty C() child element), - or a hash where the key is an element name and the value is the element value. - - This parameter requires C(xpath) to be set. - type: list - elements: raw - set_children: - description: - - Set the child-element(s) of a selected element for a given C(xpath). - - Removes any existing children. - - Child elements must be specified as in C(add_children). - - This parameter requires C(xpath) to be set. - type: list - elements: raw - count: - description: - - Search for a given C(xpath) and provide the count of any matches. - - This parameter requires C(xpath) to be set. - type: bool - default: no - print_match: - description: - - Search for a given C(xpath) and print out any matches. - - This parameter requires C(xpath) to be set. - type: bool - default: no - pretty_print: - description: - - Pretty print XML output. - type: bool - default: no - content: - description: - - Search for a given C(xpath) and get content. - - This parameter requires C(xpath) to be set. - type: str - choices: [ attribute, text ] - input_type: - description: - - Type of input for C(add_children) and C(set_children). - type: str - choices: [ xml, yaml ] - default: yaml - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - type: bool - default: no - strip_cdata_tags: - description: - - Remove CDATA tags surrounding text values. - - Note that this might break your XML file if text values contain characters that could be interpreted as XML. - type: bool - default: no - insertbefore: - description: - - Add additional child-element(s) before the first selected element for a given C(xpath). - - Child elements must be given in a list and each item may be either a string - (eg. C(children=ansible) to add an empty C() child element), - or a hash where the key is an element name and the value is the element value. - - This parameter requires C(xpath) to be set. - type: bool - default: no - insertafter: - description: - - Add additional child-element(s) after the last selected element for a given C(xpath). - - Child elements must be given in a list and each item may be either a string - (eg. C(children=ansible) to add an empty C() child element), - or a hash where the key is an element name and the value is the element value. - - This parameter requires C(xpath) to be set. - type: bool - default: no -requirements: -- lxml >= 2.3.0 -notes: -- Use the C(--check) and C(--diff) options when testing your expressions. -- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure. -- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions. -- Beware that in case your XML elements are namespaced, you need to use the C(namespaces) parameter, see the examples. -- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them. -seealso: -- name: Xml module development community wiki - description: More information related to the development of this xml module. - link: https://github.com/ansible/community/wiki/Module:-xml -- name: Introduction to XPath - description: A brief tutorial on XPath (w3schools.com). - link: https://www.w3schools.com/xml/xpath_intro.asp -- name: XPath Reference document - description: The reference documentation on XSLT/XPath (developer.mozilla.org). - link: https://developer.mozilla.org/en-US/docs/Web/XPath -author: -- Tim Bielawa (@tbielawa) -- Magnus Hedemark (@magnus919) -- Dag Wieers (@dagwieers) -''' - -EXAMPLES = r''' -# Consider the following XML file: -# -# -# Tasty Beverage Co. -# -# Rochefort 10 -# St. Bernardus Abbot 12 -# Schlitz -# -# 10 -# -# -#
http://tastybeverageco.com
-#
-#
- -- name: Remove the 'subjective' attribute of the 'rating' element - community.general.xml: - path: /foo/bar.xml - xpath: /business/rating/@subjective - state: absent - -- name: Set the rating to '11' - community.general.xml: - path: /foo/bar.xml - xpath: /business/rating - value: 11 - -# Retrieve and display the number of nodes -- name: Get count of 'beers' nodes - community.general.xml: - path: /foo/bar.xml - xpath: /business/beers/beer - count: yes - register: hits - -- ansible.builtin.debug: - var: hits.count - -# Example where parent XML nodes are created automatically -- name: Add a 'phonenumber' element to the 'business' element - community.general.xml: - path: /foo/bar.xml - xpath: /business/phonenumber - value: 555-555-1234 - -- name: Add several more beers to the 'beers' element - community.general.xml: - path: /foo/bar.xml - xpath: /business/beers - add_children: - - beer: Old Rasputin - - beer: Old Motor Oil - - beer: Old Curmudgeon - -- name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element - community.general.xml: - path: /foo/bar.xml - xpath: '/business/beers/beer[text()="Rochefort 10"]' - insertbefore: yes - add_children: - - beer: Old Rasputin - - beer: Old Motor Oil - - beer: Old Curmudgeon - -# NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements -- name: Add a 'validxhtml' element to the 'website' element - community.general.xml: - path: /foo/bar.xml - xpath: /business/website/validxhtml - -- name: Add an empty 'validatedon' attribute to the 'validxhtml' element - community.general.xml: - path: /foo/bar.xml - xpath: /business/website/validxhtml/@validatedon - -- name: Add or modify an attribute, add element if needed - community.general.xml: - path: /foo/bar.xml - xpath: /business/website/validxhtml - attribute: validatedon - value: 1976-08-05 - -# How to read an attribute value and access it in Ansible -- name: Read an element's attribute values - community.general.xml: - path: /foo/bar.xml - xpath: /business/website/validxhtml - content: attribute - register: xmlresp - -- name: Show an attribute value - ansible.builtin.debug: - var: xmlresp.matches[0].validxhtml.validatedon - -- name: Remove all children from the 'website' element (option 1) - community.general.xml: - path: /foo/bar.xml - xpath: /business/website/* - state: absent - -- name: Remove all children from the 'website' element (option 2) - community.general.xml: - path: /foo/bar.xml - xpath: /business/website - children: [] - -# In case of namespaces, like in below XML, they have to be explicitly stated. -# -# -# -# -# -# - -# NOTE: There is the prefix 'x' in front of the 'bar' element, too. -- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false' - community.general.xml: - path: foo.xml - xpath: /x:foo/x:bar/y:baz - namespaces: - x: http://x.test - y: http://y.test - z: http://z.test - attribute: z:my_namespaced_attribute - value: 'false' - -- name: Adding building nodes with floor subnodes from a YAML variable - community.general.xml: - path: /foo/bar.xml - xpath: /business - add_children: - - building: - # Attributes - name: Scumm bar - location: Monkey island - # Subnodes - _: - - floor: Pirate hall - - floor: Grog storage - - construction_date: "1990" # Only strings are valid - - building: Grog factory - -# Consider this XML for following example - -# -# -# -# part to remove -# -# -# part to keep -# -# - -- name: Delete element node based upon attribute - community.general.xml: - path: bar.xml - xpath: /config/element[@name='test1'] - state: absent -''' - -RETURN = r''' -actions: - description: A dictionary with the original xpath, namespaces and state. - type: dict - returned: success - sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present} -backup_file: - description: The name of the backup file that was created - type: str - returned: when backup=yes - sample: /path/to/file.xml.1942.2017-08-24@14:16:01~ -count: - description: The count of xpath matches. - type: int - returned: when parameter 'count' is set - sample: 2 -matches: - description: The xpath matches found. - type: list - returned: when parameter 'print_match' is set -msg: - description: A message related to the performed action(s). - type: str - returned: always -xmlstring: - description: An XML string of the resulting output. - type: str - returned: when parameter 'xmlstring' is set -''' - -import copy -import json -import os -import re -import traceback - -from distutils.version import LooseVersion -from io import BytesIO - -LXML_IMP_ERR = None -try: - from lxml import etree, objectify - HAS_LXML = True -except ImportError: - LXML_IMP_ERR = traceback.format_exc() - HAS_LXML = False - -from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib -from ansible.module_utils.six import iteritems, string_types -from ansible.module_utils.common.text.converters import to_bytes, to_native -from ansible.module_utils.common._collections_compat import MutableMapping - -_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*" -_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT -# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate -# strings wrapped by the other delimiter' XPath trick, especially as simple XPath. -_XPSTR = "('(?:.*)'|\"(?:.*)\")" - -_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$") -_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$") -_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$") -_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$") -_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$") -_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$") - - -def has_changed(doc): - orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc))) - obj = etree.tostring(objectify.fromstring(etree.tostring(doc))) - return (orig_obj != obj) - - -def do_print_match(module, tree, xpath, namespaces): - match = tree.xpath(xpath, namespaces=namespaces) - match_xpaths = [] - for m in match: - match_xpaths.append(tree.getpath(m)) - match_str = json.dumps(match_xpaths) - msg = "selector '%s' match: %s" % (xpath, match_str) - finish(module, tree, xpath, namespaces, changed=False, msg=msg) - - -def count_nodes(module, tree, xpath, namespaces): - """ Return the count of nodes matching the xpath """ - hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces) - msg = "found %d nodes" % hits - finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits)) - - -def is_node(tree, xpath, namespaces): - """ Test if a given xpath matches anything and if that match is a node. - - For now we just assume you're only searching for one specific thing.""" - if xpath_matches(tree, xpath, namespaces): - # OK, it found something - match = tree.xpath(xpath, namespaces=namespaces) - if isinstance(match[0], etree._Element): - return True - - return False - - -def is_attribute(tree, xpath, namespaces): - """ Test if a given xpath matches and that match is an attribute - - An xpath attribute search will only match one item""" - if xpath_matches(tree, xpath, namespaces): - match = tree.xpath(xpath, namespaces=namespaces) - if isinstance(match[0], etree._ElementStringResult): - return True - elif isinstance(match[0], etree._ElementUnicodeResult): - return True - return False - - -def xpath_matches(tree, xpath, namespaces): - """ Test if a node exists """ - if tree.xpath(xpath, namespaces=namespaces): - return True - return False - - -def delete_xpath_target(module, tree, xpath, namespaces): - """ Delete an attribute or element from a tree """ - changed = False - try: - for result in tree.xpath(xpath, namespaces=namespaces): - changed = True - # Get the xpath for this result - if is_attribute(tree, xpath, namespaces): - # Delete an attribute - parent = result.getparent() - # Pop this attribute match out of the parent - # node's 'attrib' dict by using this match's - # 'attrname' attribute for the key - parent.attrib.pop(result.attrname) - elif is_node(tree, xpath, namespaces): - # Delete an element - result.getparent().remove(result) - else: - raise Exception("Impossible error") - except Exception as e: - module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e)) - else: - finish(module, tree, xpath, namespaces, changed=changed) - - -def replace_children_of(children, match): - for element in list(match): - match.remove(element) - match.extend(children) - - -def set_target_children_inner(module, tree, xpath, namespaces, children, in_type): - matches = tree.xpath(xpath, namespaces=namespaces) - - # Create a list of our new children - children = children_to_nodes(module, children, in_type) - children_as_string = [etree.tostring(c) for c in children] - - changed = False - - # xpaths always return matches as a list, so.... - for match in matches: - # Check if elements differ - if len(list(match)) == len(children): - for idx, element in enumerate(list(match)): - if etree.tostring(element) != children_as_string[idx]: - replace_children_of(children, match) - changed = True - break - else: - replace_children_of(children, match) - changed = True - - return changed - - -def set_target_children(module, tree, xpath, namespaces, children, in_type): - changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type) - # Write it out - finish(module, tree, xpath, namespaces, changed=changed) - - -def add_target_children(module, tree, xpath, namespaces, children, in_type, insertbefore, insertafter): - if is_node(tree, xpath, namespaces): - new_kids = children_to_nodes(module, children, in_type) - if insertbefore or insertafter: - insert_target_children(tree, xpath, namespaces, new_kids, insertbefore, insertafter) - else: - for node in tree.xpath(xpath, namespaces=namespaces): - node.extend(new_kids) - finish(module, tree, xpath, namespaces, changed=True) - else: - finish(module, tree, xpath, namespaces) - - -def insert_target_children(tree, xpath, namespaces, children, insertbefore, insertafter): - """ - Insert the given children before or after the given xpath. If insertbefore is True, it is inserted before the - first xpath hit, with insertafter, it is inserted after the last xpath hit. - """ - insert_target = tree.xpath(xpath, namespaces=namespaces) - loc_index = 0 if insertbefore else -1 - index_in_parent = insert_target[loc_index].getparent().index(insert_target[loc_index]) - parent = insert_target[0].getparent() - if insertafter: - index_in_parent += 1 - for child in children: - parent.insert(index_in_parent, child) - index_in_parent += 1 - - -def _extract_xpstr(g): - return g[1:-1] - - -def split_xpath_last(xpath): - """split an XPath of the form /foo/bar/baz into /foo/bar and baz""" - xpath = xpath.strip() - m = _RE_SPLITSIMPLELAST.match(xpath) - if m: - # requesting an element to exist - return (m.group(1), [(m.group(2), None)]) - m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath) - if m: - # requesting an element to exist with an inner text - return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))]) - - m = _RE_SPLITSIMPLEATTRLAST.match(xpath) - if m: - # requesting an attribute to exist - return (m.group(1), [(m.group(2), None)]) - m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath) - if m: - # requesting an attribute to exist with a value - return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))]) - - m = _RE_SPLITSUBLAST.match(xpath) - if m: - content = [x.strip() for x in m.group(3).split(" and ")] - return (m.group(1), [('/' + m.group(2), content)]) - - m = _RE_SPLITONLYEQVALUE.match(xpath) - if m: - # requesting a change of inner text - return (m.group(1), [("", _extract_xpstr(m.group(2)))]) - return (xpath, []) - - -def nsnameToClark(name, namespaces): - if ":" in name: - (nsname, rawname) = name.split(":") - # return "{{%s}}%s" % (namespaces[nsname], rawname) - return "{{{0}}}{1}".format(namespaces[nsname], rawname) - - # no namespace name here - return name - - -def check_or_make_target(module, tree, xpath, namespaces): - (inner_xpath, changes) = split_xpath_last(xpath) - if (inner_xpath == xpath) or (changes is None): - module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" % - (xpath, etree.tostring(tree, pretty_print=True))) - return False - - changed = False - - if not is_node(tree, inner_xpath, namespaces): - changed = check_or_make_target(module, tree, inner_xpath, namespaces) - - # we test again after calling check_or_make_target - if is_node(tree, inner_xpath, namespaces) and changes: - for (eoa, eoa_value) in changes: - if eoa and eoa[0] != '@' and eoa[0] != '/': - # implicitly creating an element - new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml") - if eoa_value: - for nk in new_kids: - nk.text = eoa_value - - for node in tree.xpath(inner_xpath, namespaces=namespaces): - node.extend(new_kids) - changed = True - # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True)) - elif eoa and eoa[0] == '/': - element = eoa[1:] - new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml") - for node in tree.xpath(inner_xpath, namespaces=namespaces): - node.extend(new_kids) - for nk in new_kids: - for subexpr in eoa_value: - # module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" % - # (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True)) - check_or_make_target(module, nk, "./" + subexpr, namespaces) - changed = True - - # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True)) - elif eoa == "": - for node in tree.xpath(inner_xpath, namespaces=namespaces): - if (node.text != eoa_value): - node.text = eoa_value - changed = True - - elif eoa and eoa[0] == '@': - attribute = nsnameToClark(eoa[1:], namespaces) - - for element in tree.xpath(inner_xpath, namespaces=namespaces): - changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value) - - if changing: - changed = changed or changing - if eoa_value is None: - value = "" - else: - value = eoa_value - element.attrib[attribute] = value - - # module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" % - # (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True))) - - else: - module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True)) - - return changed - - -def ensure_xpath_exists(module, tree, xpath, namespaces): - changed = False - - if not is_node(tree, xpath, namespaces): - changed = check_or_make_target(module, tree, xpath, namespaces) - - finish(module, tree, xpath, namespaces, changed) - - -def set_target_inner(module, tree, xpath, namespaces, attribute, value): - changed = False - - try: - if not is_node(tree, xpath, namespaces): - changed = check_or_make_target(module, tree, xpath, namespaces) - except Exception as e: - missing_namespace = "" - # NOTE: This checks only the namespaces defined in root element! - # TODO: Implement a more robust check to check for child namespaces' existence - if tree.getroot().nsmap and ":" not in xpath: - missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n" - module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" % - (missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc()) - - if not is_node(tree, xpath, namespaces): - module.fail_json(msg="Xpath %s does not reference a node! tree is %s" % - (xpath, etree.tostring(tree, pretty_print=True))) - - for element in tree.xpath(xpath, namespaces=namespaces): - if not attribute: - changed = changed or (element.text != value) - if element.text != value: - element.text = value - else: - changed = changed or (element.get(attribute) != value) - if ":" in attribute: - attr_ns, attr_name = attribute.split(":") - # attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name) - attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name) - if element.get(attribute) != value: - element.set(attribute, value) - - return changed - - -def set_target(module, tree, xpath, namespaces, attribute, value): - changed = set_target_inner(module, tree, xpath, namespaces, attribute, value) - finish(module, tree, xpath, namespaces, changed) - - -def get_element_text(module, tree, xpath, namespaces): - if not is_node(tree, xpath, namespaces): - module.fail_json(msg="Xpath %s does not reference a node!" % xpath) - - elements = [] - for element in tree.xpath(xpath, namespaces=namespaces): - elements.append({element.tag: element.text}) - - finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements) - - -def get_element_attr(module, tree, xpath, namespaces): - if not is_node(tree, xpath, namespaces): - module.fail_json(msg="Xpath %s does not reference a node!" % xpath) - - elements = [] - for element in tree.xpath(xpath, namespaces=namespaces): - child = {} - for key in element.keys(): - value = element.get(key) - child.update({key: value}) - elements.append({element.tag: child}) - - finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements) - - -def child_to_element(module, child, in_type): - if in_type == 'xml': - infile = BytesIO(to_bytes(child, errors='surrogate_or_strict')) - - try: - parser = etree.XMLParser() - node = etree.parse(infile, parser) - return node.getroot() - except etree.XMLSyntaxError as e: - module.fail_json(msg="Error while parsing child element: %s" % e) - elif in_type == 'yaml': - if isinstance(child, string_types): - return etree.Element(child) - elif isinstance(child, MutableMapping): - if len(child) > 1: - module.fail_json(msg="Can only create children from hashes with one key") - - (key, value) = next(iteritems(child)) - if isinstance(value, MutableMapping): - children = value.pop('_', None) - - node = etree.Element(key, value) - - if children is not None: - if not isinstance(children, list): - module.fail_json(msg="Invalid children type: %s, must be list." % type(children)) - - subnodes = children_to_nodes(module, children) - node.extend(subnodes) - else: - node = etree.Element(key) - node.text = value - return node - else: - module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child)) - else: - module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type) - - -def children_to_nodes(module=None, children=None, type='yaml'): - """turn a str/hash/list of str&hash into a list of elements""" - children = [] if children is None else children - - return [child_to_element(module, child, type) for child in children] - - -def make_pretty(module, tree): - xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) - - result = dict( - changed=False, - ) - - if module.params['path']: - xml_file = module.params['path'] - with open(xml_file, 'rb') as xml_content: - if xml_string != xml_content.read(): - result['changed'] = True - if not module.check_mode: - if module.params['backup']: - result['backup_file'] = module.backup_local(module.params['path']) - tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) - - elif module.params['xmlstring']: - result['xmlstring'] = xml_string - # NOTE: Modifying a string is not considered a change ! - if xml_string != module.params['xmlstring']: - result['changed'] = True - - module.exit_json(**result) - - -def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()): - - result = dict( - actions=dict( - xpath=xpath, - namespaces=namespaces, - state=module.params['state'] - ), - changed=has_changed(tree), - ) - - if module.params['count'] or hitcount: - result['count'] = hitcount - - if module.params['print_match'] or matches: - result['matches'] = matches - - if msg: - result['msg'] = msg - - if result['changed']: - if module._diff: - result['diff'] = dict( - before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True), - after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True), - ) - - if module.params['path'] and not module.check_mode: - if module.params['backup']: - result['backup_file'] = module.backup_local(module.params['path']) - tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) - - if module.params['xmlstring']: - result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) - - module.exit_json(**result) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(type='path', aliases=['dest', 'file']), - xmlstring=dict(type='str'), - xpath=dict(type='str'), - namespaces=dict(type='dict', default={}), - state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']), - value=dict(type='raw'), - attribute=dict(type='raw'), - add_children=dict(type='list', elements='raw'), - set_children=dict(type='list', elements='raw'), - count=dict(type='bool', default=False), - print_match=dict(type='bool', default=False), - pretty_print=dict(type='bool', default=False), - content=dict(type='str', choices=['attribute', 'text']), - input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']), - backup=dict(type='bool', default=False), - strip_cdata_tags=dict(type='bool', default=False), - insertbefore=dict(type='bool', default=False), - insertafter=dict(type='bool', default=False), - ), - supports_check_mode=True, - required_by=dict( - add_children=['xpath'], - attribute=['value'], - content=['xpath'], - set_children=['xpath'], - value=['xpath'], - ), - required_if=[ - ['count', True, ['xpath']], - ['print_match', True, ['xpath']], - ['insertbefore', True, ['xpath']], - ['insertafter', True, ['xpath']], - ], - required_one_of=[ - ['path', 'xmlstring'], - ['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'], - ], - mutually_exclusive=[ - ['add_children', 'content', 'count', 'print_match', 'set_children', 'value'], - ['path', 'xmlstring'], - ['insertbefore', 'insertafter'], - ], - ) - - xml_file = module.params['path'] - xml_string = module.params['xmlstring'] - xpath = module.params['xpath'] - namespaces = module.params['namespaces'] - state = module.params['state'] - value = json_dict_bytes_to_unicode(module.params['value']) - attribute = module.params['attribute'] - set_children = json_dict_bytes_to_unicode(module.params['set_children']) - add_children = json_dict_bytes_to_unicode(module.params['add_children']) - pretty_print = module.params['pretty_print'] - content = module.params['content'] - input_type = module.params['input_type'] - print_match = module.params['print_match'] - count = module.params['count'] - backup = module.params['backup'] - strip_cdata_tags = module.params['strip_cdata_tags'] - insertbefore = module.params['insertbefore'] - insertafter = module.params['insertafter'] - - # Check if we have lxml 2.3.0 or newer installed - if not HAS_LXML: - module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR) - elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'): - module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine') - elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'): - module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.') - - # Check if the file exists - if xml_string: - infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict')) - elif os.path.isfile(xml_file): - infile = open(xml_file, 'rb') - else: - module.fail_json(msg="The target XML source '%s' does not exist." % xml_file) - - # Parse and evaluate xpath expression - if xpath is not None: - try: - etree.XPath(xpath) - except etree.XPathSyntaxError as e: - module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e)) - except etree.XPathEvalError as e: - module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e)) - - # Try to parse in the target XML file - try: - parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags) - doc = etree.parse(infile, parser) - except etree.XMLSyntaxError as e: - module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e)) - - # Ensure we have the original copy to compare - global orig_doc - orig_doc = copy.deepcopy(doc) - - if print_match: - do_print_match(module, doc, xpath, namespaces) - - if count: - count_nodes(module, doc, xpath, namespaces) - - if content == 'attribute': - get_element_attr(module, doc, xpath, namespaces) - elif content == 'text': - get_element_text(module, doc, xpath, namespaces) - - # File exists: - if state == 'absent': - # - absent: delete xpath target - delete_xpath_target(module, doc, xpath, namespaces) - - # - present: carry on - - # children && value both set?: should have already aborted by now - # add_children && set_children both set?: should have already aborted by now - - # set_children set? - if set_children: - set_target_children(module, doc, xpath, namespaces, set_children, input_type) - - # add_children set? - if add_children: - add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter) - - # No?: Carry on - - # Is the xpath target an attribute selector? - if value is not None: - set_target(module, doc, xpath, namespaces, attribute, value) - - # If an xpath was provided, we need to do something with the data - if xpath is not None: - ensure_xpath_exists(module, doc, xpath, namespaces) - - # Otherwise only reformat the xml data? - if pretty_print: - make_pretty(module, doc) - - module.fail_json(msg="Don't know what to do") - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/filesize.py b/plugins/modules/filesize.py deleted file mode 120000 index fc4a211c87..0000000000 --- a/plugins/modules/filesize.py +++ /dev/null @@ -1 +0,0 @@ -files/filesize.py \ No newline at end of file diff --git a/plugins/modules/filesize.py b/plugins/modules/filesize.py new file mode 100644 index 0000000000..b0ef189143 --- /dev/null +++ b/plugins/modules/filesize.py @@ -0,0 +1,474 @@ +#!/usr/bin/python + +# Copyright (c) 2021, quidame +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: filesize + +short_description: Create a file with a given size, or resize it if it exists + +description: + - This module is a simple wrapper around C(dd) to create, extend or truncate a file, given its size. It can be used to manage + swap files (that require contiguous blocks) or alternatively, huge sparse files. +author: + - quidame (@quidame) + +version_added: "3.0.0" + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + path: + description: + - Path of the regular file to create or resize. + type: path + required: true + size: + description: + - Requested size of the file. + - The value is a number (either C(int) or C(float)) optionally followed by a multiplicative suffix, that can be one + of V(B) (bytes), V(KB) or V(kB) (= 1000B), V(MB) or V(mB) (= 1000kB), V(GB) or V(gB) (= 1000MB), and so on for V(T), + V(P), V(E), V(Z) and V(Y); or alternatively one of V(K), V(k) or V(KiB) (= 1024B); V(M), V(m) or V(MiB) (= 1024KiB); + V(G), V(g) or V(GiB) (= 1024MiB); and so on. + - If the multiplicative suffix is not provided, the value is treated as an integer number of blocks of O(blocksize) + bytes each (float values are rounded to the closest integer). + - When the O(size) value is equal to the current file size, does nothing. + - When the O(size) value is bigger than the current file size, bytes from O(source) (if O(sparse) is not V(false)) are + appended to the file without truncating it, in other words, without modifying the existing bytes of the file. + - When the O(size) value is smaller than the current file size, it is truncated to the requested value without modifying + bytes before this value. + - That means that a file of any arbitrary size can be grown to any other arbitrary size, and then resized down to its + initial size without modifying its initial content. + type: raw + required: true + blocksize: + description: + - Size of blocks, in bytes if not followed by a multiplicative suffix. + - The numeric value (before the unit) B(MUST) be an integer (or a C(float) if it equals an integer). + - If not set, the size of blocks is guessed from the OS and commonly results in V(512) or V(4096) bytes, that is used + internally by the module or when O(size) has no unit. + type: raw + source: + description: + - Device or file that provides input data to provision the file. + - This parameter is ignored when O(sparse=true). + type: path + default: /dev/zero + force: + description: + - Whether or not to overwrite the file if it exists, in other words, to truncate it from 0. When V(true), the module + is not idempotent, that means it always reports C(changed=true). + - O(force=true) and O(sparse=true) are mutually exclusive. + type: bool + default: false + sparse: + description: + - Whether or not the file to create should be a sparse file. + - This option is effective only on newly created files, or when growing a file, only for the bytes to append. + - This option is not supported on OSes or filesystems not supporting sparse files. + - O(force=true) and O(sparse=true) are mutually exclusive. + type: bool + default: false + unsafe_writes: + description: + - This option is silently ignored. This module always modifies file size in-place. +requirements: + - dd (Data Duplicator) in PATH + +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes + +seealso: + - name: dd(1) manpage for Linux + description: Manual page of the GNU/Linux's dd implementation (from GNU coreutils). + link: https://man7.org/linux/man-pages/man1/dd.1.html + + - name: dd(1) manpage for IBM AIX + description: Manual page of the IBM AIX's dd implementation. + link: https://www.ibm.com/support/knowledgecenter/ssw_aix_72/d_commands/dd.html + + - name: dd(1) manpage for Mac OSX + description: Manual page of the Mac OSX's dd implementation. + link: https://www.unix.com/man-page/osx/1/dd/ + + - name: dd(1M) manpage for Solaris + description: Manual page of the Oracle Solaris's dd implementation. + link: https://docs.oracle.com/cd/E36784_01/html/E36871/dd-1m.html + + - name: dd(1) manpage for FreeBSD + description: Manual page of the FreeBSD's dd implementation. + link: https://www.freebsd.org/cgi/man.cgi?dd(1) + + - name: dd(1) manpage for OpenBSD + description: Manual page of the OpenBSD's dd implementation. + link: https://man.openbsd.org/dd + + - name: dd(1) manpage for NetBSD + description: Manual page of the NetBSD's dd implementation. + link: https://man.netbsd.org/dd.1 + + - name: busybox(1) manpage for Linux + description: Manual page of the GNU/Linux's busybox, that provides its own dd implementation. + link: https://www.unix.com/man-page/linux/1/busybox +""" + +EXAMPLES = r""" +- name: Create a file of 1G filled with null bytes + community.general.filesize: + path: /var/bigfile + size: 1G + +- name: Extend the file to 2G (2*1024^3) + community.general.filesize: + path: /var/bigfile + size: 2G + +- name: Reduce the file to 2GB (2*1000^3) + community.general.filesize: + path: /var/bigfile + size: 2GB + +- name: Fill a file with random bytes for backing a LUKS device + community.general.filesize: + path: ~/diskimage.luks + size: 512.0 MiB + source: /dev/urandom + +- name: Take a backup of MBR boot code into a file, overwriting it if it exists + community.general.filesize: + path: /media/sdb1/mbr.bin + size: 440B + source: /dev/sda + force: true + +- name: Create/resize a sparse file of/to 8TB + community.general.filesize: + path: /var/local/sparsefile + size: 8TB + sparse: true + +- name: Create a file with specific size and attributes, to be used as swap space + community.general.filesize: + path: /var/swapfile + size: 2G + blocksize: 512B + mode: u=rw,go= + owner: root + group: root +""" + +RETURN = r""" +cmd: + description: Command executed to create or resize the file. + type: str + returned: when changed or failed + sample: /usr/bin/dd if=/dev/zero of=/var/swapfile bs=1048576 seek=3072 count=1024 + +filesize: + description: Dictionary of sizes related to the file. + type: dict + returned: always + contains: + blocks: + description: Number of blocks in the file. + type: int + sample: 500 + blocksize: + description: Size of the blocks in bytes. + type: int + sample: 1024 + bytes: + description: Size of the file, in bytes, as the product of RV(filesize.blocks) and RV(filesize.blocksize). + type: int + sample: 512000 + iec: + description: Size of the file, in human-readable format, following IEC standard. + type: str + sample: 500.0 KiB + si: + description: Size of the file, in human-readable format, following SI standard. + type: str + sample: 512.0 kB + +size_diff: + description: Difference (positive or negative) between old size and new size, in bytes. + type: int + sample: -1234567890 + returned: always + +path: + description: Realpath of the file if it is a symlink, otherwise the same than module's param. + type: str + sample: /var/swap0 + returned: always +""" + + +import re +import os +import math + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +# These are the multiplicative suffixes understood (or returned) by dd and +# others (ls, df, lvresize, lsblk...). +SIZE_UNITS = dict( + B=1, + kB=1000**1, KB=1000**1, KiB=1024**1, K=1024**1, k=1024**1, + MB=1000**2, mB=1000**2, MiB=1024**2, M=1024**2, m=1024**2, + GB=1000**3, gB=1000**3, GiB=1024**3, G=1024**3, g=1024**3, + TB=1000**4, tB=1000**4, TiB=1024**4, T=1024**4, t=1024**4, + PB=1000**5, pB=1000**5, PiB=1024**5, P=1024**5, p=1024**5, + EB=1000**6, eB=1000**6, EiB=1024**6, E=1024**6, e=1024**6, + ZB=1000**7, zB=1000**7, ZiB=1024**7, Z=1024**7, z=1024**7, + YB=1000**8, yB=1000**8, YiB=1024**8, Y=1024**8, y=1024**8, +) + + +def bytes_to_human(size, iec=False): + """Return human-readable size (with SI or IEC suffix) from bytes. This is + only to populate the returned result of the module, not to handle the + file itself (we only rely on bytes for that). + """ + unit = 'B' + for (u, v) in SIZE_UNITS.items(): + if size < v: + continue + if iec: + if 'i' not in u or size / v >= 1024: + continue + else: + if v % 5 or size / v >= 1000: + continue + unit = u + + hsize = round(size / SIZE_UNITS[unit], 2) + if unit == 'B': + hsize = int(hsize) + + unit = re.sub(r'^(.)', lambda m: m.expand(r'\1').upper(), unit) + if unit == 'KB': + unit = 'kB' + + return '%s %s' % (str(hsize), unit) + + +def smart_blocksize(size, unit, product, bsize): + """Ensure the total size can be written as blocks*blocksize, with blocks + and blocksize being integers. + """ + if not product % bsize: + return bsize + + # Basically, for a file of 8kB (=8000B), system's block size of 4096 bytes + # is not usable. The smallest integer number of kB to work with 512B blocks + # is 64, the nexts are 128, 192, 256, and so on. + + unit_size = SIZE_UNITS[unit] + + if size == int(size): + if unit_size > SIZE_UNITS['MiB']: + if unit_size % 5: + return SIZE_UNITS['MiB'] + return SIZE_UNITS['MB'] + return unit_size + + if unit == 'B': + raise AssertionError("byte is the smallest unit and requires an integer value") + + if 0 < product < bsize: + return product + + for bsz in (1024, 1000, 512, 256, 128, 100, 64, 32, 16, 10, 8, 4, 2): + if not product % bsz: + return bsz + return 1 + + +def split_size_unit(string, isint=False): + """Split a string between the size value (int or float) and the unit. + Support optional space(s) between the numeric value and the unit. + """ + unit = re.sub(r'(\d|\.)', r'', string).strip() + value = float(re.sub(unit, r'', string).strip()) + if isint and unit in ('B', ''): + if int(value) != value: + raise AssertionError("invalid blocksize value: bytes require an integer value") + + if not unit: + unit = None + product = int(round(value)) + else: + if unit not in SIZE_UNITS.keys(): + raise AssertionError("invalid size unit (%s): unit must be one of %s, or none." % + (unit, ', '.join(sorted(SIZE_UNITS, key=SIZE_UNITS.get)))) + product = int(round(value * SIZE_UNITS[unit])) + return value, unit, product + + +def size_string(value): + """Convert a raw value to a string, but only if it is an integer, a float + or a string itself. + """ + if not isinstance(value, (int, float, str)): + raise AssertionError("invalid value type (%s): size must be integer, float or string" % type(value)) + return str(value) + + +def size_spec(args): + """Return a dictionary with size specifications, especially the size in + bytes (after rounding it to an integer number of blocks). + """ + blocksize_in_bytes = split_size_unit(args['blocksize'], True)[2] + if blocksize_in_bytes == 0: + raise AssertionError("block size cannot be equal to zero") + + size_value, size_unit, size_result = split_size_unit(args['size']) + if not size_unit: + blocks = int(math.ceil(size_value)) + else: + blocksize_in_bytes = smart_blocksize(size_value, size_unit, size_result, blocksize_in_bytes) + blocks = int(math.ceil(size_result / blocksize_in_bytes)) + + args['size_diff'] = round_bytes = int(blocks * blocksize_in_bytes) + args['size_spec'] = dict(blocks=blocks, blocksize=blocksize_in_bytes, bytes=round_bytes, + iec=bytes_to_human(round_bytes, True), + si=bytes_to_human(round_bytes)) + return args['size_spec'] + + +def current_size(args): + """Return the size of the file at the given location if it exists, or None.""" + path = args['path'] + if os.path.exists(path): + if not os.path.isfile(path): + raise AssertionError("%s exists but is not a regular file" % path) + args['file_size'] = os.stat(path).st_size + else: + args['file_size'] = None + return args['file_size'] + + +def complete_dd_cmdline(args, dd_cmd): + """Compute dd options to grow or truncate a file.""" + if args['file_size'] == args['size_spec']['bytes'] and not args['force']: + # Nothing to do. + return list() + + bs = args['size_spec']['blocksize'] + + # For sparse files (create, truncate, grow): write count=0 block. + if args['sparse']: + seek = args['size_spec']['blocks'] + elif args['force'] or not os.path.exists(args['path']): # Create file + seek = 0 + elif args['size_diff'] < 0: # Truncate file + seek = args['size_spec']['blocks'] + elif args['size_diff'] % bs: # Grow file + seek = int(args['file_size'] / bs) + 1 + else: + seek = int(args['file_size'] / bs) + + count = args['size_spec']['blocks'] - seek + dd_cmd += ['bs=%s' % str(bs), 'seek=%s' % str(seek), 'count=%s' % str(count)] + + return dd_cmd + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True), + size=dict(type='raw', required=True), + blocksize=dict(type='raw'), + source=dict(type='path', default='/dev/zero'), + sparse=dict(type='bool', default=False), + force=dict(type='bool', default=False), + ), + supports_check_mode=True, + add_file_common_args=True, + ) + args = dict(**module.params) + diff = dict(before=dict(), after=dict()) + + if args['sparse'] and args['force']: + module.fail_json(msg='parameters values are mutually exclusive: force=true|sparse=true') + if not os.path.exists(os.path.dirname(args['path'])): + module.fail_json(msg='parent directory of the file must exist prior to run this module') + if not args['blocksize']: + args['blocksize'] = str(os.statvfs(os.path.dirname(args['path'])).f_frsize) + + try: + args['size'] = size_string(args['size']) + args['blocksize'] = size_string(args['blocksize']) + initial_filesize = current_size(args) + size_descriptors = size_spec(args) + except AssertionError as err: + module.fail_json(msg=to_native(err)) + + expected_filesize = size_descriptors['bytes'] + if initial_filesize: + args['size_diff'] = expected_filesize - initial_filesize + diff['after']['size'] = expected_filesize + diff['before']['size'] = initial_filesize + + result = dict( + changed=args['force'], + size_diff=args['size_diff'], + path=args['path'], + filesize=size_descriptors) + + dd_bin = module.get_bin_path('dd', True) + dd_cmd = [dd_bin, 'if=%s' % args['source'], 'of=%s' % args['path']] + + if expected_filesize != initial_filesize or args['force']: + result['cmd'] = ' '.join(complete_dd_cmdline(args, dd_cmd)) + if module.check_mode: + result['changed'] = True + else: + result['rc'], dummy, result['stderr'] = module.run_command(dd_cmd) + + diff['after']['size'] = result_filesize = result['size_diff'] = current_size(args) + if initial_filesize: + result['size_diff'] = result_filesize - initial_filesize + if not args['force']: + result['changed'] = result_filesize != initial_filesize + + if result['rc']: + msg = "dd error while creating file %s with size %s from source %s: see stderr for details" % ( + args['path'], args['size'], args['source']) + module.fail_json(msg=msg, **result) + if result_filesize != expected_filesize: + msg = "module error while creating file %s with size %s from source %s: file is %s bytes long" % ( + args['path'], args['size'], args['source'], result_filesize) + module.fail_json(msg=msg, **result) + + # dd follows symlinks, and so does this module, while file module doesn't. + # If we call it, this is to manage file's mode, owner and so on, not the + # symlink's ones. + file_params = dict(**module.params) + if os.path.islink(args['path']): + file_params['path'] = result['path'] = os.path.realpath(args['path']) + + if args['file_size'] is not None: + file_args = module.load_file_common_arguments(file_params) + result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff) + result['diff'] = diff + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/filesystem.py b/plugins/modules/filesystem.py deleted file mode 120000 index f725fed012..0000000000 --- a/plugins/modules/filesystem.py +++ /dev/null @@ -1 +0,0 @@ -./system/filesystem.py \ No newline at end of file diff --git a/plugins/modules/filesystem.py b/plugins/modules/filesystem.py new file mode 100644 index 0000000000..1477925de3 --- /dev/null +++ b/plugins/modules/filesystem.py @@ -0,0 +1,727 @@ +#!/usr/bin/python + +# Copyright (c) 2021, quidame +# Copyright (c) 2013, Alexander Bulimov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +author: + - Alexander Bulimov (@abulimov) + - quidame (@quidame) +module: filesystem +short_description: Makes a filesystem +description: + - This module creates a filesystem. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - If O(state=present), the filesystem is created if it does not already exist, that is the default behaviour if O(state) + is omitted. + - If O(state=absent), filesystem signatures on O(dev) are wiped if it contains a filesystem (as known by C(blkid)). + - When O(state=absent), all other options but O(dev) are ignored, and the module does not fail if the device O(dev) + does not actually exist. + type: str + choices: [present, absent] + default: present + version_added: 1.3.0 + fstype: + choices: [bcachefs, btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs] + description: + - Filesystem type to be created. This option is required with O(state=present) (or if O(state) is omitted). + - V(ufs) support has been added in community.general 3.4.0. + - V(bcachefs) support has been added in community.general 8.6.0. + type: str + aliases: [type] + dev: + description: + - Target path to block device (Linux) or character device (FreeBSD) or regular file (both). + - When setting Linux-specific filesystem types on FreeBSD, this module only works when applying to regular files, also known as + disk images. + - Currently V(lvm) (Linux-only) and V(ufs) (FreeBSD-only) do not support a regular file as their target O(dev). + - Support for character devices on FreeBSD has been added in community.general 3.4.0. + type: path + required: true + aliases: [device] + force: + description: + - If V(true), allows to create new filesystem on devices that already has filesystem. + type: bool + default: false + resizefs: + description: + - If V(true), if the block device and filesystem size differ, grow the filesystem into the space. + - >- + Supported when O(fstype) is one of: V(bcachefs), V(btrfs), V(ext2), V(ext3), V(ext4), V(ext4dev), V(f2fs), V(lvm), V(xfs), V(ufs) and V(vfat). + Attempts to resize other filesystem types fail. + - XFS only grows if mounted. Currently, the module is based on commands from C(util-linux) package to perform operations, + so resizing of XFS is not supported on FreeBSD systems. + - VFAT is likely to fail if C(fatresize < 1.04). + - Mutually exclusive with O(uuid). + type: bool + default: false + opts: + description: + - List of options to be passed to C(mkfs) command. + type: str + uuid: + description: + - Set filesystem's UUID to the given value. + - The UUID options specified in O(opts) take precedence over this value. + - See xfs_admin(8) (C(xfs)), tune2fs(8) (C(ext2), C(ext3), C(ext4), C(ext4dev)) for possible values. + - For O(fstype=lvm) the value is ignored, it resets the PV UUID if set. + - Supported for O(fstype) being one of V(bcachefs), V(ext2), V(ext3), V(ext4), V(ext4dev), V(lvm), or V(xfs). + - This is B(not idempotent). Specifying this option always results in a change. + - Mutually exclusive with O(resizefs). + type: str + version_added: 7.1.0 +requirements: + - Uses specific tools related to the O(fstype) for creating or resizing a filesystem (from packages e2fsprogs, xfsprogs, + dosfstools, and so on). + - Uses generic tools mostly related to the Operating System (Linux or FreeBSD) or available on both, as C(blkid). + - On FreeBSD, either C(util-linux) or C(e2fsprogs) package is required. +notes: + - Potential filesystems on O(dev) are checked using C(blkid). In case C(blkid) is unable to detect a filesystem (and in + case C(fstyp) on FreeBSD is also unable to detect a filesystem), this filesystem is overwritten even if O(force=false). + - On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide a C(blkid) command that is compatible with this + module. However, these packages conflict with each other, and only the C(util-linux) package provides the command required + to not fail when O(state=absent). +seealso: + - module: community.general.filesize + - module: ansible.posix.mount + - name: xfs_admin(8) manpage for Linux + description: Manual page of the GNU/Linux's xfs_admin implementation. + link: https://man7.org/linux/man-pages/man8/xfs_admin.8.html + - name: tune2fs(8) manpage for Linux + description: Manual page of the GNU/Linux's tune2fs implementation. + link: https://man7.org/linux/man-pages/man8/tune2fs.8.html +""" + +EXAMPLES = r""" +- name: Create a ext2 filesystem on /dev/sdb1 + community.general.filesystem: + fstype: ext2 + dev: /dev/sdb1 + +- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks + community.general.filesystem: + fstype: ext4 + dev: /dev/sdb1 + opts: -cc + +- name: Blank filesystem signature on /dev/sdb1 + community.general.filesystem: + dev: /dev/sdb1 + state: absent + +- name: Create a filesystem on top of a regular file + community.general.filesystem: + dev: /path/to/disk.img + fstype: vfat + +- name: Reset an xfs filesystem UUID on /dev/sdb1 + community.general.filesystem: + fstype: xfs + dev: /dev/sdb1 + uuid: generate + +- name: Reset an ext4 filesystem UUID on /dev/sdb1 + community.general.filesystem: + fstype: ext4 + dev: /dev/sdb1 + uuid: random + +- name: Reset an LVM filesystem (PV) UUID on /dev/sdc + community.general.filesystem: + fstype: lvm + dev: /dev/sdc + uuid: random +""" + +import os +import platform +import re +import stat + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class Device(object): + def __init__(self, module, path): + self.module = module + self.path = path + + def size(self): + """ Return size in bytes of device. Returns int """ + statinfo = os.stat(self.path) + if stat.S_ISBLK(statinfo.st_mode): + blockdev_cmd = self.module.get_bin_path("blockdev", required=True) + dummy, out, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) + devsize_in_bytes = int(out) + elif stat.S_ISCHR(statinfo.st_mode) and platform.system() == 'FreeBSD': + diskinfo_cmd = self.module.get_bin_path("diskinfo", required=True) + dummy, out, dummy = self.module.run_command([diskinfo_cmd, self.path], check_rc=True) + devsize_in_bytes = int(out.split()[2]) + elif os.path.isfile(self.path): + devsize_in_bytes = os.path.getsize(self.path) + else: + self.module.fail_json(changed=False, msg="Target device not supported: %s" % self) + + return devsize_in_bytes + + def get_mountpoint(self): + """Return (first) mountpoint of device. Returns None when not mounted.""" + cmd_findmnt = self.module.get_bin_path("findmnt", required=True) + + # find mountpoint + rc, mountpoint, dummy = self.module.run_command([cmd_findmnt, "--mtab", "--noheadings", "--output", + "TARGET", "--source", self.path], check_rc=False) + if rc != 0: + mountpoint = None + else: + mountpoint = mountpoint.split('\n')[0] + + return mountpoint + + def __str__(self): + return self.path + + +class Filesystem(object): + + MKFS = None + MKFS_FORCE_FLAGS = [] + MKFS_SET_UUID_OPTIONS = None + MKFS_SET_UUID_EXTRA_OPTIONS = [] + INFO = None + GROW = None + GROW_MAX_SPACE_FLAGS = [] + GROW_MOUNTPOINT_ONLY = False + CHANGE_UUID = None + CHANGE_UUID_OPTION = None + CHANGE_UUID_OPTION_HAS_ARG = True + + LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'} + + def __init__(self, module): + self.module = module + + @property + def fstype(self): + return type(self).__name__ + + def get_fs_size(self, dev): + """Return size in bytes of filesystem on device (integer). + Should query the info with a per-fstype command that can access the + device whenever it is mounted or not, and parse the command output. + Parser must ensure to return an integer, or raise a ValueError. + """ + raise NotImplementedError() + + def create(self, opts, dev, uuid=None): + if self.module.check_mode: + return + + if uuid and self.MKFS_SET_UUID_OPTIONS: + if not (set(self.MKFS_SET_UUID_OPTIONS) & set(opts)): + opts += [self.MKFS_SET_UUID_OPTIONS[0], uuid] + self.MKFS_SET_UUID_EXTRA_OPTIONS + + mkfs = self.module.get_bin_path(self.MKFS, required=True) + cmd = [mkfs] + self.MKFS_FORCE_FLAGS + opts + [str(dev)] + self.module.run_command(cmd, check_rc=True) + if uuid and self.CHANGE_UUID and self.MKFS_SET_UUID_OPTIONS is None: + self.change_uuid(new_uuid=uuid, dev=dev) + + def wipefs(self, dev): + if self.module.check_mode: + return + + # wipefs comes with util-linux package (as 'blockdev' & 'findmnt' above) + # that is ported to FreeBSD. The use of dd as a portable fallback is + # not doable here if it needs get_mountpoint() (to prevent corruption of + # a mounted filesystem), since 'findmnt' is not available on FreeBSD, + # even in util-linux port for this OS. + wipefs = self.module.get_bin_path('wipefs', required=True) + cmd = [wipefs, "--all", str(dev)] + self.module.run_command(cmd, check_rc=True) + + def grow_cmd(self, target): + """Build and return the resizefs commandline as list.""" + cmdline = [self.module.get_bin_path(self.GROW, required=True)] + cmdline += self.GROW_MAX_SPACE_FLAGS + [target] + return cmdline + + def grow(self, dev): + """Get dev and fs size and compare. Returns stdout of used command.""" + devsize_in_bytes = dev.size() + + try: + fssize_in_bytes = self.get_fs_size(dev) + except NotImplementedError: + self.module.fail_json(msg="module does not support resizing %s filesystem yet" % self.fstype) + except ValueError as err: + self.module.warn("unable to process %s output '%s'" % (self.INFO, to_native(err))) + self.module.fail_json(msg="unable to process %s output for %s" % (self.INFO, dev)) + + if not fssize_in_bytes < devsize_in_bytes: + self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev)) + elif self.module.check_mode: + self.module.exit_json(changed=True, msg="resizing filesystem %s on device %s" % (self.fstype, dev)) + + if self.GROW_MOUNTPOINT_ONLY: + mountpoint = dev.get_mountpoint() + if not mountpoint: + self.module.fail_json(msg="%s needs to be mounted for %s operations" % (dev, self.fstype)) + grow_target = mountpoint + else: + grow_target = str(dev) + + dummy, out, dummy = self.module.run_command(self.grow_cmd(grow_target), check_rc=True) + return out + + def change_uuid_cmd(self, new_uuid, target): + """Build and return the UUID change command line as list.""" + cmdline = [self.module.get_bin_path(self.CHANGE_UUID, required=True)] + if self.CHANGE_UUID_OPTION_HAS_ARG: + cmdline += [self.CHANGE_UUID_OPTION, new_uuid, target] + else: + cmdline += [self.CHANGE_UUID_OPTION, target] + return cmdline + + def change_uuid(self, new_uuid, dev): + """Change filesystem UUID. Returns stdout of used command""" + if self.module.check_mode: + self.module.exit_json(change=True, msg='Changing %s filesystem UUID on device %s' % (self.fstype, dev)) + + dummy, out, dummy = self.module.run_command(self.change_uuid_cmd(new_uuid=new_uuid, target=str(dev)), check_rc=True) + return out + + +class Ext(Filesystem): + MKFS_FORCE_FLAGS = ['-F'] + MKFS_SET_UUID_OPTIONS = ['-U'] + INFO = 'tune2fs' + GROW = 'resize2fs' + CHANGE_UUID = 'tune2fs' + CHANGE_UUID_OPTION = "-U" + + def get_fs_size(self, dev): + """Get Block count and Block size and return their product.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + + block_count = block_size = None + for line in out.splitlines(): + if 'Block count:' in line: + block_count = int(line.split(':')[1].strip()) + elif 'Block size:' in line: + block_size = int(line.split(':')[1].strip()) + if None not in (block_size, block_count): + break + else: + raise ValueError(repr(out)) + + return block_size * block_count + + +class Ext2(Ext): + MKFS = 'mkfs.ext2' + + +class Ext3(Ext): + MKFS = 'mkfs.ext3' + + +class Ext4(Ext): + MKFS = 'mkfs.ext4' + + +class XFS(Filesystem): + MKFS = 'mkfs.xfs' + MKFS_FORCE_FLAGS = ['-f'] + INFO = 'xfs_info' + GROW = 'xfs_growfs' + GROW_MOUNTPOINT_ONLY = True + CHANGE_UUID = "xfs_admin" + CHANGE_UUID_OPTION = "-U" + + def get_fs_size(self, dev): + """Get bsize and blocks and return their product.""" + cmdline = [self.module.get_bin_path(self.INFO, required=True)] + + # Depending on the versions, xfs_info is able to get info from the + # device, whenever it is mounted or not, or only if unmounted, or + # only if mounted, or not at all. For any version until now, it is + # able to query info from the mountpoint. So try it first, and use + # device as the last resort: it may or may not work. + mountpoint = dev.get_mountpoint() + if mountpoint: + cmdline += [mountpoint] + else: + cmdline += [str(dev)] + dummy, out, dummy = self.module.run_command(cmdline, check_rc=True, environ_update=self.LANG_ENV) + + block_size = block_count = None + for line in out.splitlines(): + col = line.split('=') + if col[0].strip() == 'data': + if col[1].strip() == 'bsize': + block_size = int(col[2].split()[0]) + if col[2].split()[1] == 'blocks': + block_count = int(col[3].split(',')[0]) + if None not in (block_size, block_count): + break + else: + raise ValueError(repr(out)) + + return block_size * block_count + + +class Reiserfs(Filesystem): + MKFS = 'mkfs.reiserfs' + MKFS_FORCE_FLAGS = ['-q'] + + +class Bcachefs(Filesystem): + MKFS = 'mkfs.bcachefs' + MKFS_FORCE_FLAGS = ['--force'] + MKFS_SET_UUID_OPTIONS = ['-U', '--uuid'] + INFO = 'bcachefs' + GROW = 'bcachefs' + GROW_MAX_SPACE_FLAGS = ['device', 'resize'] + + def get_fs_size(self, dev): + """Return size in bytes of filesystem on device (integer).""" + dummy, stdout, dummy = self.module.run_command([self.module.get_bin_path(self.INFO), + 'show-super', str(dev)], check_rc=True) + + for line in stdout.splitlines(): + if "Size: " in line: + parts = line.split() + unit = parts[2] + + base = None + exp = None + + units_2 = ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"] + units_10 = ["B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"] + + try: + exp = units_2.index(unit) + base = 1024 + except ValueError: + exp = units_10.index(unit) + base = 1000 + + if exp == 0: + value = int(parts[1]) + else: + value = float(parts[1]) + + if base is not None and exp is not None: + return int(value * pow(base, exp)) + + raise ValueError(repr(stdout)) + + +class Btrfs(Filesystem): + MKFS = 'mkfs.btrfs' + INFO = 'btrfs' + GROW = 'btrfs' + GROW_MAX_SPACE_FLAGS = ['filesystem', 'resize', 'max'] + GROW_MOUNTPOINT_ONLY = True + + def __init__(self, module): + super(Btrfs, self).__init__(module) + mkfs = self.module.get_bin_path(self.MKFS, required=True) + dummy, stdout, stderr = self.module.run_command([mkfs, '--version'], check_rc=True) + match = re.search(r" v([0-9.]+)", stdout) + if not match: + # v0.20-rc1 use stderr + match = re.search(r" v([0-9.]+)", stderr) + if match: + # v0.20-rc1 doesn't have --force parameter added in following version v3.12 + if LooseVersion(match.group(1)) >= LooseVersion('3.12'): + self.MKFS_FORCE_FLAGS = ['-f'] + else: + # assume version is greater or equal to 3.12 + self.MKFS_FORCE_FLAGS = ['-f'] + self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr)) + + def get_fs_size(self, dev): + """Return size in bytes of filesystem on device (integer).""" + mountpoint = dev.get_mountpoint() + if not mountpoint: + self.module.fail_json(msg="%s needs to be mounted for %s operations" % (dev, self.fstype)) + + dummy, stdout, dummy = self.module.run_command([self.module.get_bin_path(self.INFO), + 'filesystem', 'usage', '-b', mountpoint], check_rc=True) + for line in stdout.splitlines(): + if "Device size" in line: + return int(line.split()[-1]) + raise ValueError(repr(stdout)) + + +class Ocfs2(Filesystem): + MKFS = 'mkfs.ocfs2' + MKFS_FORCE_FLAGS = ['-Fx'] + + +class F2fs(Filesystem): + MKFS = 'mkfs.f2fs' + INFO = 'dump.f2fs' + GROW = 'resize.f2fs' + + def __init__(self, module): + super(F2fs, self).__init__(module) + mkfs = self.module.get_bin_path(self.MKFS, required=True) + dummy, out, dummy = self.module.run_command([mkfs, os.devnull], check_rc=False, environ_update=self.LANG_ENV) + # Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)" + # mkfs.f2fs displays version since v1.2.0 + match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out) + if match is not None: + # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem + # before that version -f switch wasn't used + if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'): + self.MKFS_FORCE_FLAGS = ['-f'] + + def get_fs_size(self, dev): + """Get sector size and total FS sectors and return their product.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) + sector_size = sector_count = None + for line in out.splitlines(): + if 'Info: sector size = ' in line: + # expected: 'Info: sector size = 512' + sector_size = int(line.split()[4]) + elif 'Info: total FS sectors = ' in line: + # expected: 'Info: total FS sectors = 102400 (50 MB)' + sector_count = int(line.split()[5]) + if None not in (sector_size, sector_count): + break + else: + raise ValueError(repr(out)) + + return sector_size * sector_count + + +class VFAT(Filesystem): + INFO = 'fatresize' + GROW = 'fatresize' + GROW_MAX_SPACE_FLAGS = ['-s', 'max'] + + def __init__(self, module): + super(VFAT, self).__init__(module) + if platform.system() == 'FreeBSD': + self.MKFS = 'newfs_msdos' + else: + self.MKFS = 'mkfs.vfat' + + def get_fs_size(self, dev): + """Get and return size of filesystem, in bytes.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + fssize = None + for line in out.splitlines()[1:]: + parts = line.split(':', 1) + if len(parts) < 2: + continue + param, value = parts + if param.strip() in ('Size', 'Cur size'): + fssize = int(value.strip()) + break + else: + raise ValueError(repr(out)) + + return fssize + + +class LVM(Filesystem): + MKFS = 'pvcreate' + MKFS_FORCE_FLAGS = ['-f'] + MKFS_SET_UUID_OPTIONS = ['-u', '--uuid'] + MKFS_SET_UUID_EXTRA_OPTIONS = ['--norestorefile'] + INFO = 'pvs' + GROW = 'pvresize' + CHANGE_UUID = 'pvchange' + CHANGE_UUID_OPTION = '-u' + CHANGE_UUID_OPTION_HAS_ARG = False + + def get_fs_size(self, dev): + """Get and return PV size, in bytes.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, size, dummy = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True) + pv_size = int(size) + return pv_size + + +class Swap(Filesystem): + MKFS = 'mkswap' + MKFS_FORCE_FLAGS = ['-f'] + + +class UFS(Filesystem): + MKFS = 'newfs' + INFO = 'dumpfs' + GROW = 'growfs' + GROW_MAX_SPACE_FLAGS = ['-y'] + + def get_fs_size(self, dev): + """Get providersize and fragment size and return their product.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) + + fragmentsize = providersize = None + for line in out.splitlines(): + if line.startswith('fsize'): + fragmentsize = int(line.split()[1]) + elif 'providersize' in line: + providersize = int(line.split()[-1]) + if None not in (fragmentsize, providersize): + break + else: + raise ValueError(repr(out)) + + return fragmentsize * providersize + + +FILESYSTEMS = { + 'bcachefs': Bcachefs, + 'ext2': Ext2, + 'ext3': Ext3, + 'ext4': Ext4, + 'ext4dev': Ext4, + 'f2fs': F2fs, + 'reiserfs': Reiserfs, + 'xfs': XFS, + 'btrfs': Btrfs, + 'vfat': VFAT, + 'ocfs2': Ocfs2, + 'LVM2_member': LVM, + 'swap': Swap, + 'ufs': UFS, +} + + +def main(): + friendly_names = { + 'lvm': 'LVM2_member', + } + + fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys()) + + # There is no "single command" to manipulate filesystems, so we map them all out and their options + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + fstype=dict(type='str', aliases=['type'], choices=list(fstypes)), + dev=dict(type='path', required=True, aliases=['device']), + opts=dict(type='str'), + force=dict(type='bool', default=False), + resizefs=dict(type='bool', default=False), + uuid=dict(type='str'), + ), + required_if=[ + ('state', 'present', ['fstype']) + ], + mutually_exclusive=[ + ('resizefs', 'uuid'), + ], + supports_check_mode=True, + ) + + state = module.params['state'] + dev = module.params['dev'] + fstype = module.params['fstype'] + opts = module.params['opts'] + force = module.params['force'] + resizefs = module.params['resizefs'] + uuid = module.params['uuid'] + + mkfs_opts = [] + if opts is not None: + mkfs_opts = opts.split() + + changed = False + + if not os.path.exists(dev): + msg = "Device %s not found." % dev + if state == "present": + module.fail_json(msg=msg) + else: + module.exit_json(msg=msg) + + dev = Device(module, dev) + + # In case blkid/fstyp isn't able to identify an existing filesystem, device + # is considered as empty, then this existing filesystem would be overwritten + # even if force isn't enabled. + cmd = module.get_bin_path('blkid', required=True) + rc, raw_fs, err = module.run_command([cmd, '-c', os.devnull, '-o', 'value', '-s', 'TYPE', str(dev)]) + fs = raw_fs.strip() + if not fs and platform.system() == 'FreeBSD': + cmd = module.get_bin_path('fstyp', required=True) + rc, raw_fs, err = module.run_command([cmd, str(dev)]) + fs = raw_fs.strip() + + if state == "present": + if fstype in friendly_names: + fstype = friendly_names[fstype] + + try: + klass = FILESYSTEMS[fstype] + except KeyError: + module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype) + + filesystem = klass(module) + + if uuid and not (filesystem.CHANGE_UUID or filesystem.MKFS_SET_UUID_OPTIONS): + module.fail_json(changed=False, msg="module does not support UUID option for this filesystem (%s) yet." % fstype) + + same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype] + if same_fs and not resizefs and not uuid and not force: + module.exit_json(changed=False) + elif same_fs: + if resizefs: + if not filesystem.GROW: + module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype) + + out = filesystem.grow(dev) + + module.exit_json(changed=True, msg=out) + elif uuid: + + out = filesystem.change_uuid(new_uuid=uuid, dev=dev) + + module.exit_json(changed=True, msg=out) + elif fs and not force: + module.fail_json(msg="'%s' is already used as %s, use force=true to overwrite" % (dev, fs), rc=rc, err=err) + + # create fs + filesystem.create(opts=mkfs_opts, dev=dev, uuid=uuid) + changed = True + + elif fs: + # wipe fs signatures + filesystem = Filesystem(module) + filesystem.wipefs(dev) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/flatpak.py b/plugins/modules/flatpak.py deleted file mode 120000 index 5db5488ee3..0000000000 --- a/plugins/modules/flatpak.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/flatpak.py \ No newline at end of file diff --git a/plugins/modules/flatpak.py b/plugins/modules/flatpak.py new file mode 100644 index 0000000000..3fab8f820b --- /dev/null +++ b/plugins/modules/flatpak.py @@ -0,0 +1,415 @@ +#!/usr/bin/python + +# Copyright (c) 2017 John Kwiatkoski (@JayKayy) +# Copyright (c) 2018 Alexander Bethke (@oolongbrothers) +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: flatpak +short_description: Manage flatpaks +description: + - Allows users to add or remove flatpaks. + - See the M(community.general.flatpak_remote) module for managing flatpak remotes. +author: + - John Kwiatkoski (@JayKayy) + - Alexander Bethke (@oolongbrothers) +requirements: + - flatpak +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: partial + details: + - If O(state=latest), the module always returns RV(ignore:changed=true). + diff_mode: + support: none +options: + executable: + description: + - The path to the C(flatpak) executable to use. + - By default, this module looks for the C(flatpak) executable on the path. + type: path + default: flatpak + method: + description: + - The installation method to use. + - Defines if the C(flatpak) is supposed to be installed globally for the whole V(system) or only for the current V(user). + type: str + choices: [system, user] + default: system + name: + description: + - The name of the flatpak to manage. To operate on several packages this can accept a list of packages. + - When used with O(state=present), O(name) can be specified as a URL to a C(flatpakref) file or the unique reverse DNS + name that identifies a flatpak. + - Both C(https://) and C(http://) URLs are supported. + - When supplying a reverse DNS name, you can use the O(remote) option to specify on what remote to look for the flatpak. + An example for a reverse DNS name is C(org.gnome.gedit). + - When used with O(state=absent) or O(state=latest), it is recommended to specify the name in the reverse DNS format. + - When supplying a URL with O(state=absent) or O(state=latest), the module tries to match the installed flatpak based + on the name of the flatpakref to remove or update it. However, there is no guarantee that the names of the flatpakref + file and the reverse DNS name of the installed flatpak do match. + type: list + elements: str + required: true + no_dependencies: + description: + - If installing runtime dependencies should be omitted or not. + - This parameter is primarily implemented for integration testing this module. There might however be some use cases + where you would want to have this, like when you are packaging your own flatpaks. + type: bool + default: false + version_added: 3.2.0 + remote: + description: + - The flatpak remote (repository) to install the flatpak from. + - By default, V(flathub) is assumed, but you do need to add the flathub flatpak_remote before you can use this. + - See the M(community.general.flatpak_remote) module for managing flatpak remotes. + type: str + default: flathub + state: + description: + - Indicates the desired package state. + - The value V(latest) is supported since community.general 8.6.0. + choices: [absent, present, latest] + type: str + default: present +""" + +EXAMPLES = r""" +- name: Install the spotify flatpak + community.general.flatpak: + name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref + state: present + +- name: Install the gedit flatpak package without dependencies (not recommended) + community.general.flatpak: + name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref + state: present + no_dependencies: true + +- name: Install the gedit package from flathub for current user + community.general.flatpak: + name: org.gnome.gedit + state: present + method: user + +- name: Install the Gnome Calendar flatpak from the gnome remote system-wide + community.general.flatpak: + name: org.gnome.Calendar + state: present + remote: gnome + +- name: Install GIMP using custom flatpak binary path + community.general.flatpak: + name: org.gimp.GIMP + state: present + executable: /usr/local/bin/flatpak-dev + +- name: Install multiple packages + community.general.flatpak: + name: + - org.gimp.GIMP + - org.inkscape.Inkscape + - org.mozilla.firefox + +- name: Update the spotify flatpak + community.general.flatpak: + name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref + state: latest + +- name: Update the gedit flatpak package without dependencies (not recommended) + community.general.flatpak: + name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref + state: latest + no_dependencies: true + +- name: Update the gedit package from flathub for current user + community.general.flatpak: + name: org.gnome.gedit + state: latest + method: user + +- name: Update the Gnome Calendar flatpak from the gnome remote system-wide + community.general.flatpak: + name: org.gnome.Calendar + state: latest + remote: gnome + +- name: Update multiple packages + community.general.flatpak: + name: + - org.gimp.GIMP + - org.inkscape.Inkscape + - org.mozilla.firefox + state: latest + +- name: Remove the gedit flatpak + community.general.flatpak: + name: org.gnome.gedit + state: absent + +- name: Remove multiple packages + community.general.flatpak: + name: + - org.gimp.GIMP + - org.inkscape.Inkscape + - org.mozilla.firefox + state: absent +""" + +RETURN = r""" +command: + description: The exact flatpak command that was executed. + returned: When a flatpak command has been executed + type: str + sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator" +""" + +from urllib.parse import urlparse + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application" + + +def install_flat(module, binary, remote, names, method, no_dependencies): + """Add new flatpaks.""" + global result # pylint: disable=global-variable-not-assigned + uri_names = [] + id_names = [] + for name in names: + if name.startswith('http://') or name.startswith('https://'): + uri_names.append(name) + else: + id_names.append(name) + base_command = [binary, "install", "--{0}".format(method)] + flatpak_version = _flatpak_version(module, binary) + if LooseVersion(flatpak_version) < LooseVersion('1.1.3'): + base_command += ["-y"] + else: + base_command += ["--noninteractive"] + if no_dependencies: + base_command += ["--no-deps"] + if uri_names: + command = base_command + uri_names + _flatpak_command(module, module.check_mode, command) + if id_names: + command = base_command + [remote] + id_names + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def update_flat(module, binary, names, method, no_dependencies): + """Update existing flatpaks.""" + global result # pylint: disable=global-variable-not-assigned + installed_flat_names = [ + _match_installed_flat_name(module, binary, name, method) + for name in names + ] + command = [binary, "update", "--{0}".format(method)] + flatpak_version = _flatpak_version(module, binary) + if LooseVersion(flatpak_version) < LooseVersion('1.1.3'): + command += ["-y"] + else: + command += ["--noninteractive"] + if no_dependencies: + command += ["--no-deps"] + command += installed_flat_names + stdout = _flatpak_command(module, module.check_mode, command) + result["changed"] = ( + True if module.check_mode else stdout.find("Nothing to do.") == -1 + ) + + +def uninstall_flat(module, binary, names, method): + """Remove existing flatpaks.""" + global result # pylint: disable=global-variable-not-assigned + installed_flat_names = [ + _match_installed_flat_name(module, binary, name, method) + for name in names + ] + command = [binary, "uninstall"] + flatpak_version = _flatpak_version(module, binary) + if LooseVersion(flatpak_version) < LooseVersion('1.1.3'): + command += ["-y"] + else: + command += ["--noninteractive"] + command += ["--{0}".format(method)] + installed_flat_names + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def flatpak_exists(module, binary, names, method): + """Check if the flatpaks are installed.""" + command = [binary, "list", "--{0}".format(method)] + output = _flatpak_command(module, False, command) + installed = [] + not_installed = [] + for name in names: + parsed_name = _parse_flatpak_name(name).lower() + if parsed_name in output.lower(): + installed.append(name) + else: + not_installed.append(name) + return installed, not_installed + + +def _match_installed_flat_name(module, binary, name, method): + # This is a difficult function, since if the user supplies a flatpakref url, + # we have to rely on a naming convention: + # The flatpakref file name needs to match the flatpak name + global result # pylint: disable=global-variable-not-assigned + parsed_name = _parse_flatpak_name(name) + # Try running flatpak list with columns feature + command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"] + _flatpak_command(module, False, command, ignore_failure=True) + if result['rc'] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result['stderr']: + # Probably flatpak before 1.2 + matched_flatpak_name = \ + _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method) + else: + # Probably flatpak >= 1.2 + matched_flatpak_name = \ + _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method) + + if matched_flatpak_name: + return matched_flatpak_name + else: + result['msg'] = "Flatpak removal failed: Could not match any installed flatpaks to " +\ + "the name `{0}`. ".format(_parse_flatpak_name(name)) +\ + "If you used a URL, try using the reverse DNS name of the flatpak" + module.fail_json(**result) + + +def _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method): + global result # pylint: disable=global-variable-not-assigned + command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"] + output = _flatpak_command(module, False, command) + for row in output.split('\n'): + if parsed_name.lower() == row.lower(): + return row + + +def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method): + global result # pylint: disable=global-variable-not-assigned + command = [binary, "list", "--{0}".format(method), "--app"] + output = _flatpak_command(module, False, command) + for row in output.split('\n'): + if parsed_name.lower() in row.lower(): + return row.split()[0] + + +def _is_flatpak_id(part): + # For guidelines on application IDs, refer to the following resources: + # Flatpak: + # https://docs.flatpak.org/en/latest/conventions.html#application-ids + # Flathub: + # https://docs.flathub.org/docs/for-app-authors/requirements#application-id + if '.' not in part: + return False + sections = part.split('.') + if len(sections) < 2: + return False + domain = sections[0] + if not domain.islower(): + return False + for section in sections[1:]: + if not section.isalnum(): + return False + return True + + +def _parse_flatpak_name(name): + if name.startswith('http://') or name.startswith('https://'): + file_name = urlparse(name).path.split('/')[-1] + file_name_without_extension = file_name.split('.')[0:-1] + common_name = ".".join(file_name_without_extension) + else: + parts = name.split('/') + for part in parts: + if _is_flatpak_id(part): + common_name = part + break + else: + common_name = name + return common_name + + +def _flatpak_version(module, binary): + global result # pylint: disable=global-variable-not-assigned + command = [binary, "--version"] + output = _flatpak_command(module, False, command) + version_number = output.split()[1] + return version_number + + +def _flatpak_command(module, noop, command, ignore_failure=False): + global result # pylint: disable=global-variable-not-assigned + result['command'] = ' '.join(command) + if noop: + result['rc'] = 0 + return "" + + result['rc'], result['stdout'], result['stderr'] = module.run_command( + command, check_rc=not ignore_failure + ) + return result['stdout'] + + +def main(): + # This module supports check mode + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', required=True), + remote=dict(type='str', default='flathub'), + method=dict(type='str', default='system', + choices=['user', 'system']), + state=dict(type='str', default='present', + choices=['absent', 'present', 'latest']), + no_dependencies=dict(type='bool', default=False), + executable=dict(type='path', default='flatpak') + ), + supports_check_mode=True, + ) + + name = module.params['name'] + state = module.params['state'] + remote = module.params['remote'] + no_dependencies = module.params['no_dependencies'] + method = module.params['method'] + executable = module.params['executable'] + binary = module.get_bin_path(executable, None) + + global result + result = dict( + changed=False + ) + + # If the binary was not found, fail the operation + if not binary: + module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) + + module.run_command_environ_update = dict(LANGUAGE='C', LC_ALL='C') + + installed, not_installed = flatpak_exists(module, binary, name, method) + if state == 'absent' and installed: + uninstall_flat(module, binary, installed, method) + else: + if state == 'latest' and installed: + update_flat(module, binary, installed, method, no_dependencies) + if state in ('present', 'latest') and not_installed: + install_flat(module, binary, remote, not_installed, method, no_dependencies) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/flatpak_remote.py b/plugins/modules/flatpak_remote.py deleted file mode 120000 index 0d0394c03c..0000000000 --- a/plugins/modules/flatpak_remote.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/flatpak_remote.py \ No newline at end of file diff --git a/plugins/modules/flatpak_remote.py b/plugins/modules/flatpak_remote.py new file mode 100644 index 0000000000..891942143d --- /dev/null +++ b/plugins/modules/flatpak_remote.py @@ -0,0 +1,247 @@ +#!/usr/bin/python + +# Copyright (c) 2017 John Kwiatkoski (@JayKayy) +# Copyright (c) 2018 Alexander Bethke (@oolongbrothers) +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: flatpak_remote +short_description: Manage flatpak repository remotes +description: + - Allows users to add or remove flatpak remotes. + - The flatpak remotes concept is comparable to what is called repositories in other packaging formats. + - Currently, remote addition is only supported using C(flatpakrepo) file URLs. + - Existing remotes are not updated. + - See the M(community.general.flatpak) module for managing flatpaks. +author: + - John Kwiatkoski (@JayKayy) + - Alexander Bethke (@oolongbrothers) +requirements: + - flatpak +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + executable: + description: + - The path to the C(flatpak) executable to use. + - By default, this module looks for the C(flatpak) executable on the path. + type: str + default: flatpak + flatpakrepo_url: + description: + - The URL to the C(flatpakrepo) file representing the repository remote to add. + - When used with O(state=present), the flatpak remote specified under the O(flatpakrepo_url) is added using the specified + installation O(method). + - When used with O(state=absent), this is not required. + - Required when O(state=present). + type: str + method: + description: + - The installation method to use. + - Defines if the C(flatpak) is supposed to be installed globally for the whole V(system) or only for the current V(user). + type: str + choices: [system, user] + default: system + name: + description: + - The desired name for the flatpak remote to be registered under on the managed host. + - When used with O(state=present), the remote is added to the managed host under the specified O(name). + - When used with O(state=absent) the remote with that name is removed. + type: str + required: true + state: + description: + - Indicates the desired package state. + type: str + choices: [absent, present] + default: present + enabled: + description: + - Indicates whether this remote is enabled. + type: bool + default: true + version_added: 6.4.0 +""" + +EXAMPLES = r""" +- name: Add the Gnome flatpak remote to the system installation + community.general.flatpak_remote: + name: gnome + state: present + flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo + +- name: Add the flathub flatpak repository remote to the user installation + community.general.flatpak_remote: + name: flathub + state: present + flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo + method: user + +- name: Remove the Gnome flatpak remote from the user installation + community.general.flatpak_remote: + name: gnome + state: absent + method: user + +- name: Remove the flathub remote from the system installation + community.general.flatpak_remote: + name: flathub + state: absent + +- name: Disable the flathub remote in the system installation + community.general.flatpak_remote: + name: flathub + state: present + enabled: false +""" + +RETURN = r""" +command: + description: The exact flatpak command that was executed. + returned: When a flatpak command has been executed + type: str + sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_native + + +def add_remote(module, binary, name, flatpakrepo_url, method): + """Add a new remote.""" + global result # pylint: disable=global-variable-not-assigned + command = [binary, "remote-add", "--{0}".format(method), name, flatpakrepo_url] + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remove_remote(module, binary, name, method): + """Remove an existing remote.""" + global result # pylint: disable=global-variable-not-assigned + command = [binary, "remote-delete", "--{0}".format(method), "--force", name] + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remote_exists(module, binary, name, method): + """Check if the remote exists.""" + command = [binary, "remote-list", "--show-disabled", "--{0}".format(method)] + # The query operation for the remote needs to be run even in check mode + output = _flatpak_command(module, False, command) + for line in output.splitlines(): + listed_remote = line.split() + if len(listed_remote) == 0: + continue + if listed_remote[0] == to_native(name): + return True + return False + + +def enable_remote(module, binary, name, method): + """Enable a remote.""" + global result # pylint: disable=global-variable-not-assigned + command = [binary, "remote-modify", "--enable", "--{0}".format(method), name] + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def disable_remote(module, binary, name, method): + """Disable a remote.""" + global result # pylint: disable=global-variable-not-assigned + command = [binary, "remote-modify", "--disable", "--{0}".format(method), name] + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remote_enabled(module, binary, name, method): + """Check if the remote is enabled.""" + command = [binary, "remote-list", "--show-disabled", "--{0}".format(method)] + # The query operation for the remote needs to be run even in check mode + output = _flatpak_command(module, False, command) + for line in output.splitlines(): + listed_remote = line.split() + if len(listed_remote) == 0: + continue + if listed_remote[0] == to_native(name): + return len(listed_remote) == 1 or "disabled" not in listed_remote[1].split(",") + return False + + +def _flatpak_command(module, noop, command): + global result # pylint: disable=global-variable-not-assigned + result['command'] = ' '.join(command) + if noop: + result['rc'] = 0 + return "" + + result['rc'], result['stdout'], result['stderr'] = module.run_command( + command, check_rc=True + ) + return result['stdout'] + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + flatpakrepo_url=dict(type='str'), + method=dict(type='str', default='system', + choices=['user', 'system']), + state=dict(type='str', default="present", + choices=['absent', 'present']), + enabled=dict(type='bool', default=True), + executable=dict(type='str', default="flatpak") + ), + # This module supports check mode + supports_check_mode=True, + ) + + name = module.params['name'] + flatpakrepo_url = module.params['flatpakrepo_url'] + method = module.params['method'] + state = module.params['state'] + enabled = module.params['enabled'] + executable = module.params['executable'] + binary = module.get_bin_path(executable, None) + + if flatpakrepo_url is None: + flatpakrepo_url = '' + + global result + result = dict( + changed=False + ) + + # If the binary was not found, fail the operation + if not binary: + module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) + + remote_already_exists = remote_exists(module, binary, to_bytes(name), method) + + if state == 'present' and not remote_already_exists: + add_remote(module, binary, name, flatpakrepo_url, method) + elif state == 'absent' and remote_already_exists: + remove_remote(module, binary, name, method) + + if state == 'present': + remote_already_enabled = remote_enabled(module, binary, to_bytes(name), method) + + if enabled and not remote_already_enabled: + enable_remote(module, binary, name, method) + if not enabled and remote_already_enabled: + disable_remote(module, binary, name, method) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/flowdock.py b/plugins/modules/flowdock.py deleted file mode 120000 index 8e8e0c48aa..0000000000 --- a/plugins/modules/flowdock.py +++ /dev/null @@ -1 +0,0 @@ -./notification/flowdock.py \ No newline at end of file diff --git a/plugins/modules/gandi_livedns.py b/plugins/modules/gandi_livedns.py deleted file mode 120000 index 6a8a82fab7..0000000000 --- a/plugins/modules/gandi_livedns.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/gandi_livedns.py \ No newline at end of file diff --git a/plugins/modules/gandi_livedns.py b/plugins/modules/gandi_livedns.py new file mode 100644 index 0000000000..0d6f93529d --- /dev/null +++ b/plugins/modules/gandi_livedns.py @@ -0,0 +1,214 @@ +#!/usr/bin/python + +# Copyright (c) 2019 Gregory Thiemonge +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gandi_livedns +author: + - Gregory Thiemonge (@gthiemonge) +version_added: "2.3.0" +short_description: Manage Gandi LiveDNS records +description: + - 'Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/).' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + personal_access_token: + description: + - Scoped API token. + - One of O(personal_access_token) and O(api_key) must be specified. + type: str + version_added: 9.0.0 + api_key: + description: + - Account API token. + - Note that these type of keys are deprecated and might stop working at some point. Use personal access tokens instead. + - One of O(personal_access_token) and O(api_key) must be specified. + type: str + record: + description: + - Record to add. + type: str + required: true + state: + description: + - Whether the record(s) should exist or not. + type: str + choices: [absent, present] + default: present + ttl: + description: + - The TTL to give the new record. + - Required when O(state=present). + type: int + type: + description: + - The type of DNS record to create. + type: str + required: true + values: + description: + - The record values. + - Required when O(state=present). + type: list + elements: str + domain: + description: + - The name of the Domain to work with (for example, V(example.com)). + required: true + type: str +""" + +EXAMPLES = r""" +- name: Create a test A record to point to 127.0.0.1 in the my.com domain + community.general.gandi_livedns: + domain: my.com + record: test + type: A + values: + - 127.0.0.1 + ttl: 7200 + personal_access_token: dummytoken + register: record + +- name: Create a mail CNAME record to www.my.com domain + community.general.gandi_livedns: + domain: my.com + type: CNAME + record: mail + values: + - www + ttl: 7200 + personal_access_token: dummytoken + state: present + +- name: Change its TTL + community.general.gandi_livedns: + domain: my.com + type: CNAME + record: mail + values: + - www + ttl: 10800 + personal_access_token: dummytoken + state: present + +- name: Delete the record + community.general.gandi_livedns: + domain: my.com + type: CNAME + record: mail + personal_access_token: dummytoken + state: absent + +- name: Use a (deprecated) API Key + community.general.gandi_livedns: + domain: my.com + record: test + type: A + values: + - 127.0.0.1 + ttl: 7200 + api_key: dummyapikey +""" + +RETURN = r""" +record: + description: A dictionary containing the record data. + returned: success, except on record deletion + type: dict + contains: + values: + description: The record content (details depend on record type). + returned: success + type: list + elements: str + sample: + - 192.0.2.91 + - 192.0.2.92 + record: + description: The record name. + returned: success + type: str + sample: www + ttl: + description: The time-to-live for the record. + returned: success + type: int + sample: 300 + type: + description: The record type. + returned: success + type: str + sample: A + domain: + description: The domain associated with the record. + returned: success + type: str + sample: my.com +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gandi_livedns_api import GandiLiveDNSAPI + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(type='str', no_log=True), + personal_access_token=dict(type='str', no_log=True), + record=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + ttl=dict(type='int'), + type=dict(type='str', required=True), + values=dict(type='list', elements='str'), + domain=dict(type='str', required=True), + ), + supports_check_mode=True, + required_if=[ + ('state', 'present', ['values', 'ttl']), + ], + mutually_exclusive=[ + ('api_key', 'personal_access_token'), + ], + required_one_of=[ + ('api_key', 'personal_access_token'), + ], + ) + + gandi_api = GandiLiveDNSAPI(module) + + if module.params['state'] == 'present': + ret, changed = gandi_api.ensure_dns_record(module.params['record'], + module.params['type'], + module.params['ttl'], + module.params['values'], + module.params['domain']) + else: + ret, changed = gandi_api.delete_dns_record(module.params['record'], + module.params['type'], + module.params['values'], + module.params['domain']) + + result = dict( + changed=changed, + ) + if ret: + result['record'] = gandi_api.build_result(ret, + module.params['domain']) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gconftool2.py b/plugins/modules/gconftool2.py deleted file mode 120000 index 484a178ea8..0000000000 --- a/plugins/modules/gconftool2.py +++ /dev/null @@ -1 +0,0 @@ -./system/gconftool2.py \ No newline at end of file diff --git a/plugins/modules/gconftool2.py b/plugins/modules/gconftool2.py new file mode 100644 index 0000000000..4092a8b7e6 --- /dev/null +++ b/plugins/modules/gconftool2.py @@ -0,0 +1,177 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Kenneth D. Evensen +# Copyright (c) 2017, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gconftool2 +author: + - Kenneth D. Evensen (@kevensen) +short_description: Edit GNOME Configurations +description: + - This module allows for the manipulation of GNOME 2 Configuration using C(gconftool-2). Please see the gconftool-2(1) man + pages for more details. +seealso: + - name: C(gconftool-2) command manual page + description: Manual page for the command. + link: https://help.gnome.org/admin//system-admin-guide/2.32/gconf-6.html.en + +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + key: + type: str + description: + - A GConf preference key is an element in the GConf repository that corresponds to an application preference. + required: true + value: + type: str + description: + - Preference keys typically have simple values such as strings, integers, or lists of strings and integers. This is + ignored unless O(state=present). + value_type: + type: str + description: + - The type of value being set. This is ignored unless O(state=present). + choices: [bool, float, int, string] + state: + type: str + description: + - The action to take upon the key/value. + required: true + choices: [absent, present] + config_source: + type: str + description: + - Specify a configuration source to use rather than the default path. + direct: + description: + - Access the config database directly, bypassing server. If O(direct) is specified then the O(config_source) must be + specified as well. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Change the widget font to "Serif 12" + community.general.gconftool2: + key: "/desktop/gnome/interface/font_name" + value_type: "string" + value: "Serif 12" +""" + +RETURN = r""" +key: + description: The key specified in the module parameters. + returned: success + type: str + sample: /desktop/gnome/interface/font_name +value_type: + description: The type of the value that was changed. + returned: success + type: str + sample: string +value: + description: + - The value of the preference key after executing the module or V(null) if key is removed. + - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that. + returned: success + type: str + sample: "Serif 12" +previous_value: + description: + - The value of the preference key before executing the module. + - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that. + returned: success + type: str + sample: "Serif 12" +version: + description: Version of gconftool-2. + type: str + returned: always + sample: "3.2.6" + version_added: 10.0.0 +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner + + +class GConftool(StateModuleHelper): + diff_params = ('value', ) + output_params = ('key', 'value_type') + facts_params = ('key', 'value_type') + facts_name = 'gconftool2' + module = dict( + argument_spec=dict( + key=dict(type='str', required=True, no_log=False), + value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']), + value=dict(type='str'), + state=dict(type='str', required=True, choices=['absent', 'present']), + direct=dict(type='bool', default=False), + config_source=dict(type='str'), + ), + required_if=[ + ('state', 'present', ['value', 'value_type']), + ('direct', True, ['config_source']), + ], + supports_check_mode=True, + ) + + def __init_module__(self): + self.runner = gconftool2_runner(self.module, check_rc=True) + if not self.vars.direct and self.vars.config_source is not None: + self.do_raise('If the "config_source" is specified then "direct" must be "true"') + + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + + self.vars.set('previous_value', self._get(), fact=True) + self.vars.set('value_type', self.vars.value_type) + self.vars.set('_value', self.vars.previous_value, output=False, change=True) + self.vars.set_meta('value', initial_value=self.vars.previous_value) + self.vars.set('playbook_value', self.vars.value, fact=True) + + def _make_process(self, fail_on_err): + def process(rc, out, err): + if err and fail_on_err: + self.do_raise('gconftool-2 failed with error:\n%s' % err.strip()) + out = out.rstrip() + self.vars.value = None if out == "" else out + return self.vars.value + return process + + def _get(self): + return self.runner("state key", output_process=self._make_process(False)).run(state="get") + + def state_absent(self): + with self.runner("state key", output_process=self._make_process(False)) as ctx: + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', None, fact=True) + self.vars._value = None + + def state_present(self): + with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx: + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', self._get(), fact=True) + self.vars._value = self.vars.new_value + + +def main(): + GConftool.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gconftool2_info.py b/plugins/modules/gconftool2_info.py new file mode 100644 index 0000000000..f1047bccee --- /dev/null +++ b/plugins/modules/gconftool2_info.py @@ -0,0 +1,86 @@ +#!/usr/bin/python +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gconftool2_info +author: + - "Alexei Znamensky (@russoz)" +short_description: Retrieve GConf configurations +version_added: 5.1.0 +description: + - This module allows retrieving application preferences from the GConf database, with the help of C(gconftool-2). +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + key: + description: + - The key name for an element in the GConf database. + type: str + required: true +seealso: + - name: C(gconftool-2) command manual page + description: Manual page for the command. + link: https://help.gnome.org/admin//system-admin-guide/2.32/gconf-6.html.en + - name: gconf repository (archived) + description: Git repository for the project. It is an archived project, so the repository is read-only. + link: https://gitlab.gnome.org/Archive/gconf +""" + +EXAMPLES = r""" +- name: Get value for a certain key in the database. + community.general.gconftool2_info: + key: /desktop/gnome/background/picture_filename + register: result +""" + +RETURN = r""" +value: + description: + - The value of the property. + returned: success + type: str + sample: Monospace 10 +version: + description: Version of gconftool-2. + type: str + returned: always + sample: "3.2.6" + version_added: 10.0.0 +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper +from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner + + +class GConftoolInfo(ModuleHelper): + output_params = ['key'] + module = dict( + argument_spec=dict( + key=dict(type='str', required=True, no_log=False), + ), + supports_check_mode=True, + ) + + def __init_module__(self): + self.runner = gconftool2_runner(self.module, check_rc=True) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + + def __run__(self): + with self.runner.context(args_order=["state", "key"]) as ctx: + rc, out, err = ctx.run(state="get") + self.vars.value = None if err and not out else out.rstrip() + + +def main(): + GConftoolInfo.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gem.py b/plugins/modules/gem.py deleted file mode 120000 index 211608a8f7..0000000000 --- a/plugins/modules/gem.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/language/gem.py \ No newline at end of file diff --git a/plugins/modules/gem.py b/plugins/modules/gem.py new file mode 100644 index 0000000000..535e420e71 --- /dev/null +++ b/plugins/modules/gem.py @@ -0,0 +1,361 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Johan Wiren +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: gem +short_description: Manage Ruby gems +description: + - Manage installation and uninstallation of Ruby gems. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - The name of the gem to be managed. + required: true + state: + type: str + description: + - The desired state of the gem. V(latest) ensures that the latest version is installed. + required: false + choices: [present, absent, latest] + default: present + gem_source: + type: path + description: + - The path to a local gem used as installation source. + required: false + include_dependencies: + description: + - Whether to include dependencies or not. + required: false + type: bool + default: true + repository: + type: str + description: + - The repository from which the gem is installed. + required: false + aliases: [source] + user_install: + description: + - Install gem in user's local gems cache or for all users. + required: false + type: bool + default: true + executable: + type: path + description: + - Override the path to the gem executable. + required: false + install_dir: + type: path + description: + - Install the gems into a specific directory. These gems are independent from the global installed ones. Specifying + this requires user_install to be false. + required: false + bindir: + type: path + description: + - Install executables into a specific directory. + version_added: 3.3.0 + norc: + type: bool + default: true + description: + - Avoid loading any C(.gemrc) file. Ignored for RubyGems prior to 2.5.2. + - The default changed from V(false) to V(true) in community.general 6.0.0. + version_added: 3.3.0 + env_shebang: + description: + - Rewrite the shebang line on installed scripts to use /usr/bin/env. + required: false + default: false + type: bool + version: + type: str + description: + - Version of the gem to be installed/removed. + required: false + pre_release: + description: + - Allow installation of pre-release versions of the gem. + required: false + default: false + type: bool + include_doc: + description: + - Install with or without docs. + required: false + default: false + type: bool + build_flags: + type: str + description: + - Allow adding build flags for gem compilation. + required: false + force: + description: + - Force gem to (un-)install, bypassing dependency checks. + required: false + default: false + type: bool +author: + - "Ansible Core Team" + - "Johan Wiren (@johanwiren)" +""" + +EXAMPLES = r""" +- name: Install version 1.0 of vagrant + community.general.gem: + name: vagrant + version: 1.0 + state: present + +- name: Install latest available version of rake + community.general.gem: + name: rake + state: latest + +- name: Install rake version 1.0 from a local gem on disk + community.general.gem: + name: rake + gem_source: /path/to/gems/rake-1.0.gem + state: present +""" + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def get_rubygems_path(module): + if module.params['executable']: + result = module.params['executable'].split(' ') + else: + result = [module.get_bin_path('gem', True)] + return result + + +def get_rubygems_version(module): + if hasattr(get_rubygems_version, "ver"): + return get_rubygems_version.ver + + cmd = get_rubygems_path(module) + ['--version'] + (rc, out, err) = module.run_command(cmd, check_rc=True) + + match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out) + if not match: + return None + + ver = tuple(int(x) for x in match.groups()) + get_rubygems_version.ver = ver + + return ver + + +def get_rubygems_environ(module): + if module.params['install_dir']: + return {'GEM_HOME': module.params['install_dir']} + return None + + +def get_installed_versions(module, remote=False): + + cmd = get_rubygems_path(module) + cmd.append('query') + cmd.extend(common_opts(module)) + if remote: + cmd.append('--remote') + if module.params['repository']: + cmd.extend(['--source', module.params['repository']]) + cmd.append('-n') + cmd.append('^%s$' % module.params['name']) + + environ = get_rubygems_environ(module) + (rc, out, err) = module.run_command(cmd, environ_update=environ, check_rc=True) + installed_versions = [] + for line in out.splitlines(): + match = re.match(r"\S+\s+\((?:default: )?(.+)\)", line) + if match: + versions = match.group(1) + for version in versions.split(', '): + installed_versions.append(version.split()[0]) + return installed_versions + + +def exists(module): + if module.params['state'] == 'latest': + remoteversions = get_installed_versions(module, remote=True) + if remoteversions: + module.params['version'] = remoteversions[0] + installed_versions = get_installed_versions(module) + if module.params['version']: + if module.params['version'] in installed_versions: + return True + else: + if installed_versions: + return True + return False + + +def common_opts(module): + opts = [] + ver = get_rubygems_version(module) + if module.params['norc'] and ver and ver >= (2, 5, 2): + opts.append('--norc') + return opts + + +def uninstall(module): + + if module.check_mode: + return + cmd = get_rubygems_path(module) + environ = get_rubygems_environ(module) + cmd.append('uninstall') + cmd.extend(common_opts(module)) + if module.params['install_dir']: + cmd.extend(['--install-dir', module.params['install_dir']]) + + if module.params['bindir']: + cmd.extend(['--bindir', module.params['bindir']]) + + if module.params['version']: + cmd.extend(['--version', module.params['version']]) + else: + cmd.append('--all') + cmd.append('--executable') + if module.params['force']: + cmd.append('--force') + cmd.append(module.params['name']) + return module.run_command(cmd, environ_update=environ, check_rc=True) + + +def install(module): + + if module.check_mode: + return + + ver = get_rubygems_version(module) + + cmd = get_rubygems_path(module) + cmd.append('install') + cmd.extend(common_opts(module)) + if module.params['version']: + cmd.extend(['--version', module.params['version']]) + if module.params['repository']: + cmd.extend(['--source', module.params['repository']]) + if not module.params['include_dependencies']: + cmd.append('--ignore-dependencies') + else: + if ver and ver < (2, 0, 0): + cmd.append('--include-dependencies') + if module.params['user_install']: + cmd.append('--user-install') + else: + cmd.append('--no-user-install') + if module.params['install_dir']: + cmd.extend(['--install-dir', module.params['install_dir']]) + if module.params['bindir']: + cmd.extend(['--bindir', module.params['bindir']]) + if module.params['pre_release']: + cmd.append('--pre') + if not module.params['include_doc']: + if ver and ver < (2, 0, 0): + cmd.append('--no-rdoc') + cmd.append('--no-ri') + else: + cmd.append('--no-document') + if module.params['env_shebang']: + cmd.append('--env-shebang') + cmd.append(module.params['gem_source']) + if module.params['build_flags']: + cmd.extend(['--', module.params['build_flags']]) + if module.params['force']: + cmd.append('--force') + module.run_command(cmd, check_rc=True) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + executable=dict(type='path'), + gem_source=dict(type='path'), + include_dependencies=dict(default=True, type='bool'), + name=dict(required=True, type='str'), + repository=dict(aliases=['source'], type='str'), + state=dict(default='present', choices=['present', 'absent', 'latest'], type='str'), + user_install=dict(default=True, type='bool'), + install_dir=dict(type='path'), + bindir=dict(type='path'), + norc=dict(type='bool', default=True), + pre_release=dict(default=False, type='bool'), + include_doc=dict(default=False, type='bool'), + env_shebang=dict(default=False, type='bool'), + version=dict(type='str'), + build_flags=dict(type='str'), + force=dict(default=False, type='bool'), + ), + supports_check_mode=True, + mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']], + ) + + if module.params['version'] and module.params['state'] == 'latest': + module.fail_json(msg="Cannot specify version when state=latest") + if module.params['gem_source'] and module.params['state'] == 'latest': + module.fail_json(msg="Cannot maintain state=latest when installing from local source") + if module.params['user_install'] and module.params['install_dir']: + module.fail_json(msg="install_dir requires user_install=false") + + if not module.params['gem_source']: + module.params['gem_source'] = module.params['name'] + + changed = False + + if module.params['state'] in ['present', 'latest']: + if not exists(module): + install(module) + changed = True + elif module.params['state'] == 'absent': + if exists(module): + command_output = uninstall(module) + if command_output is not None and exists(module): + rc, out, err = command_output + module.fail_json( + msg=( + "Failed to uninstall gem '%s': it is still present after 'gem uninstall'. " + "This usually happens with default or system gems provided by the OS, " + "which cannot be removed with the gem command." + ) % module.params['name'], + rc=rc, + stdout=out, + stderr=err + ) + else: + changed = True + result = {} + result['name'] = module.params['name'] + result['state'] = module.params['state'] + if module.params['version']: + result['version'] = module.params['version'] + result['changed'] = changed + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gio_mime.py b/plugins/modules/gio_mime.py new file mode 100644 index 0000000000..a7fb3c4fcf --- /dev/null +++ b/plugins/modules/gio_mime.py @@ -0,0 +1,106 @@ +#!/usr/bin/python +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gio_mime +author: + - "Alexei Znamensky (@russoz)" +short_description: Set default handler for MIME type, for applications using Gnome GIO +version_added: 7.5.0 +description: + - This module allows configuring the default handler for a specific MIME type, to be used by applications built with the + Gnome GIO API. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + mime_type: + description: + - MIME type for which a default handler is set. + type: str + required: true + handler: + description: + - Default handler set for the MIME type. + type: str + required: true +notes: + - This module is a thin wrapper around the C(gio mime) command (and subcommand). + - See man gio(1) for more details. +seealso: + - name: C(gio) command manual page + description: Manual page for the command. + link: https://man.archlinux.org/man/gio.1 + - name: GIO Documentation + description: Reference documentation for the GIO API.. + link: https://docs.gtk.org/gio/ +""" + +EXAMPLES = r""" +- name: Set chrome as the default handler for https + community.general.gio_mime: + mime_type: x-scheme-handler/https + handler: google-chrome.desktop + register: result +""" + +RETURN = r""" +handler: + description: + - The handler set as default. + returned: success + type: str + sample: google-chrome.desktop +version: + description: Version of gio. + type: str + returned: always + sample: "2.80.0" + version_added: 10.0.0 +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper +from ansible_collections.community.general.plugins.module_utils.gio_mime import gio_mime_runner, gio_mime_get + + +class GioMime(ModuleHelper): + output_params = ['handler'] + module = dict( + argument_spec=dict( + mime_type=dict(type='str', required=True), + handler=dict(type='str', required=True), + ), + supports_check_mode=True, + ) + + def __init_module__(self): + self.runner = gio_mime_runner(self.module, check_rc=True) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True) + + def __run__(self): + check_mode_return = (0, 'Module executed in check mode', '') + if self.vars.has_changed: + with self.runner.context(args_order="mime mime_type handler", check_mode_skip=True, check_mode_return=check_mode_return) as ctx: + rc, out, err = ctx.run() + self.vars.stdout = out + self.vars.stderr = err + self.vars.set("run_info", ctx.run_info, verbosity=4) + + +def main(): + GioMime.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/git_config.py b/plugins/modules/git_config.py deleted file mode 120000 index 68041b95ac..0000000000 --- a/plugins/modules/git_config.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/git_config.py \ No newline at end of file diff --git a/plugins/modules/git_config.py b/plugins/modules/git_config.py new file mode 100644 index 0000000000..30af5b43fd --- /dev/null +++ b/plugins/modules/git_config.py @@ -0,0 +1,263 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Marius Gedminas +# Copyright (c) 2016, Matthew Gamble +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: git_config +author: + - Matthew Gamble (@djmattyg007) + - Marius Gedminas (@mgedmin) +requirements: ['git'] +short_description: Update git configuration +description: + - The M(community.general.git_config) module changes git configuration by invoking C(git config). This is needed if you + do not want to use M(ansible.builtin.template) for the entire git config file (for example because you need to change + just C(user.email) in C(/etc/.git/config)). Solutions involving M(ansible.builtin.command) are cumbersome or do not work + correctly in check mode. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The name of the setting. + type: str + required: true + repo: + description: + - Path to a git repository for reading and writing values from a specific repo. + type: path + file: + description: + - Path to an adhoc git configuration file to be managed using the V(file) scope. + type: path + version_added: 2.0.0 + scope: + description: + - Specify which scope to read/set values from. + - This is required when setting config values. + - If this is set to V(local), you must also specify the O(repo) parameter. + - If this is set to V(file), you must also specify the O(file) parameter. + - It defaults to system. + choices: ["file", "local", "global", "system"] + type: str + state: + description: + - 'Indicates the setting should be set/unset. This parameter has higher precedence than O(value) parameter: when O(state=absent) + and O(value) is defined, O(value) is discarded.' + choices: ['present', 'absent'] + default: 'present' + type: str + value: + description: + - When specifying the name of a single setting, supply a value to set that setting to the given value. + - From community.general 11.0.0 on, O(value) is required if O(state=present). To read values, use the M(community.general.git_config_info) + module instead. + type: str + add_mode: + description: + - Specify if a value should replace the existing value(s) or if the new value should be added alongside other values + with the same name. + - This option is only relevant when adding/replacing values. If O(state=absent) or values are just read out, this option + is not considered. + choices: ["add", "replace-all"] + type: str + default: "replace-all" + version_added: 8.1.0 +""" + +EXAMPLES = r""" +- name: Add a setting to ~/.gitconfig + community.general.git_config: + name: alias.ci + scope: global + value: commit + +- name: Add a setting to ~/.gitconfig + community.general.git_config: + name: alias.st + scope: global + value: status + +- name: Remove a setting from ~/.gitconfig + community.general.git_config: + name: alias.ci + scope: global + state: absent + +- name: Add a setting to ~/.gitconfig + community.general.git_config: + name: core.editor + scope: global + value: vim + +- name: Add a setting system-wide + community.general.git_config: + name: alias.remotev + scope: system + value: remote -v + +- name: Add a setting to a system scope (default) + community.general.git_config: + name: alias.diffc + value: diff --cached + +- name: Add a setting to a system scope (default) + community.general.git_config: + name: color.ui + value: auto + +- name: Add several options for the same name + community.general.git_config: + name: push.pushoption + value: "{{ item }}" + add_mode: add + loop: + - merge_request.create + - merge_request.draft + +- name: Make etckeeper not complaining when it is invoked by cron + community.general.git_config: + name: user.email + repo: /etc + scope: local + value: 'root@{{ ansible_fqdn }}' +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + repo=dict(type='path'), + file=dict(type='path'), + add_mode=dict(type='str', default='replace-all', choices=['add', 'replace-all']), + scope=dict(type='str', choices=['file', 'local', 'global', 'system']), + state=dict(type='str', default='present', choices=['present', 'absent']), + value=dict(), + ), + required_if=[ + ('scope', 'local', ['repo']), + ('scope', 'file', ['file']), + ('state', 'present', ['value']), + ], + supports_check_mode=True, + ) + git_path = module.get_bin_path('git', True) + + params = module.params + # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting. + # Set the locale to C to ensure consistent messages. + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + name = params['name'] or '' + unset = params['state'] == 'absent' + new_value = params['value'] or '' + add_mode = params['add_mode'] + + if not unset and not new_value: + module.fail_json(msg="If state=present, a value must be specified. Use the community.general.git_config_info module to read a config value.") + + scope = determine_scope(params) + cwd = determine_cwd(scope, params) + + base_args = [git_path, "config", "--includes"] + + if scope == 'file': + base_args.append('-f') + base_args.append(params['file']) + elif scope: + base_args.append("--" + scope) + + list_args = list(base_args) + + list_args.append("--get-all") + list_args.append(name) + + (rc, out, err) = module.run_command(list_args, cwd=cwd, expand_user_and_vars=False) + + if rc >= 2: + # If the return code is 1, it just means the option hasn't been set yet, which is fine. + module.fail_json(rc=rc, msg=err, cmd=' '.join(list_args)) + + old_values = out.rstrip().splitlines() + + if unset and not out: + module.exit_json(changed=False, msg='no setting to unset') + elif new_value in old_values and (len(old_values) == 1 or add_mode == "add") and not unset: + module.exit_json(changed=False, msg="") + + # Until this point, the git config was just read and in case no change is needed, the module has already exited. + + set_args = list(base_args) + if unset: + set_args.append("--unset-all") + set_args.append(name) + else: + set_args.append("--" + add_mode) + set_args.append(name) + set_args.append(new_value) + + if not module.check_mode: + (rc, out, err) = module.run_command(set_args, cwd=cwd, ignore_invalid_cwd=False, expand_user_and_vars=False) + if err: + module.fail_json(rc=rc, msg=err, cmd=set_args) + + if unset: + after_values = [] + elif add_mode == "add": + after_values = old_values + [new_value] + else: + after_values = [new_value] + + module.exit_json( + msg='setting changed', + diff=dict( + before_header=' '.join(set_args), + before=build_diff_value(old_values), + after_header=' '.join(set_args), + after=build_diff_value(after_values), + ), + changed=True + ) + + +def determine_scope(params): + if params['scope']: + return params['scope'] + return 'system' + + +def build_diff_value(value): + if not value: + return "\n" + if len(value) == 1: + return value[0] + "\n" + return value + + +def determine_cwd(scope, params): + if scope == 'local': + return params['repo'] + # Run from root directory to avoid accidentally picking up any local config settings + return "/" + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/git_config_info.py b/plugins/modules/git_config_info.py new file mode 100644 index 0000000000..b5a15fe94f --- /dev/null +++ b/plugins/modules/git_config_info.py @@ -0,0 +1,182 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Guenther Grill +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: git_config_info +author: + - Guenther Grill (@guenhter) +version_added: 8.1.0 +requirements: ['git'] +short_description: Read git configuration +description: + - The M(community.general.git_config_info) module reads the git configuration by invoking C(git config). +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + name: + description: + - The name of the setting to read. + - If not provided, all settings are returned as RV(config_values). + type: str + path: + description: + - Path to a git repository or file for reading values from a specific repo. + - If O(scope) is V(local), this must point to a repository to read from. + - If O(scope) is V(file), this must point to specific git config file to read from. + - Otherwise O(path) is ignored if set. + type: path + scope: + description: + - Specify which scope to read values from. + - If set to V(global), the global git config is used. O(path) is ignored. + - If set to V(system), the system git config is used. O(path) is ignored. + - If set to V(local), O(path) must be set to the repo to read from. + - If set to V(file), O(path) must be set to the config file to read from. + choices: ["global", "system", "local", "file"] + default: "system" + type: str +""" + +EXAMPLES = r""" +- name: Read a system wide config + community.general.git_config_info: + name: core.editor + register: result + +- name: Show value of core.editor + ansible.builtin.debug: + msg: "{{ result.config_value | default('(not set)', true) }}" + +- name: Read a global config from ~/.gitconfig + community.general.git_config_info: + name: alias.remotev + scope: global + +- name: Read a project specific config + community.general.git_config_info: + name: color.ui + scope: local + path: /etc + +- name: Read all global values + community.general.git_config_info: + scope: global + +- name: Read all system wide values + community.general.git_config_info: + +- name: Read all values of a specific file + community.general.git_config_info: + scope: file + path: /etc/gitconfig +""" + +RETURN = r""" +config_value: + description: >- + When O(name) is set, a string containing the value of the setting in name. If O(name) is not set, empty. If a config key + such as V(push.pushoption) has more then one entry, just the first one is returned here. + returned: success if O(name) is set + type: str + sample: "vim" + +config_values: + description: + - This is a dictionary mapping a git configuration setting to a list of its values. + - When O(name) is not set, all configuration settings are returned here. + - When O(name) is set, only the setting specified in O(name) is returned here. If that setting is not set, the key is + still present, and its value is an empty list. + returned: success + type: dict + sample: + core.editor: ["vim"] + color.ui: ["auto"] + push.pushoption: ["merge_request.create", "merge_request.draft"] + alias.remotev: ["remote -v"] +""" + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type="str"), + path=dict(type="path"), + scope=dict(type="str", default="system", choices=["global", "system", "local", "file"]), + ), + required_if=[ + ("scope", "local", ["path"]), + ("scope", "file", ["path"]), + ], + required_one_of=[], + supports_check_mode=True, + ) + + # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting. + # Set the locale to C to ensure consistent messages. + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + name = module.params["name"] + path = module.params["path"] + scope = module.params["scope"] + + run_cwd = path if scope == "local" else "/" + args = build_args(module, name, path, scope) + + (rc, out, err) = module.run_command(args, cwd=run_cwd, expand_user_and_vars=False) + + if rc == 128 and "unable to read config file" in err: + # This just means nothing has been set at the given scope + pass + elif rc >= 2: + # If the return code is 1, it just means the option hasn't been set yet, which is fine. + module.fail_json(rc=rc, msg=err, cmd=" ".join(args)) + + output_lines = out.strip("\0").split("\0") if out else [] + + if name: + first_value = output_lines[0] if output_lines else "" + config_values = {name: output_lines} + module.exit_json(changed=False, msg="", config_value=first_value, config_values=config_values) + else: + config_values = text_to_dict(output_lines) + module.exit_json(changed=False, msg="", config_value="", config_values=config_values) + + +def build_args(module, name, path, scope): + git_path = module.get_bin_path("git", True) + args = [git_path, "config", "--includes", "--null", "--" + scope] + + if scope == "file": + args.append(path) + + if name: + args.extend(["--get-all", name]) + else: + args.append("--list") + + return args + + +def text_to_dict(text_lines): + config_values = {} + for value in text_lines: + k, v = value.split("\n", 1) + if k in config_values: + config_values[k].append(v) + else: + config_values[k] = [v] + return config_values + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/github_deploy_key.py b/plugins/modules/github_deploy_key.py deleted file mode 120000 index 55238dbe44..0000000000 --- a/plugins/modules/github_deploy_key.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/github/github_deploy_key.py \ No newline at end of file diff --git a/plugins/modules/github_deploy_key.py b/plugins/modules/github_deploy_key.py new file mode 100644 index 0000000000..799ee300c5 --- /dev/null +++ b/plugins/modules/github_deploy_key.py @@ -0,0 +1,353 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: github_deploy_key +author: "Ali (@bincyber)" +short_description: Manages deploy keys for GitHub repositories +description: + - Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password, username and + password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin rights on the repository + are required. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + github_url: + description: + - The base URL of the GitHub API. + required: false + type: str + version_added: '0.2.0' + default: https://api.github.com + owner: + description: + - The name of the individual account or organization that owns the GitHub repository. + required: true + aliases: ['account', 'organization'] + type: str + repo: + description: + - The name of the GitHub repository. + required: true + aliases: ['repository'] + type: str + name: + description: + - The name for the deploy key. + required: true + aliases: ['title', 'label'] + type: str + key: + description: + - The SSH public key to add to the repository as a deploy key. + required: true + type: str + read_only: + description: + - If V(true), the deploy key is only able to read repository contents. Otherwise, the deploy key is able to read and + write. + type: bool + default: true + state: + description: + - The state of the deploy key. + default: "present" + choices: ["present", "absent"] + type: str + force: + description: + - If V(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title. + type: bool + default: false + username: + description: + - The username to authenticate with. Should not be set when using personal access token. + type: str + password: + description: + - The password to authenticate with. Alternatively, a personal access token can be used instead of O(username) and O(password) + combination. + type: str + token: + description: + - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with O(password). + type: str + otp: + description: + - The 6 digit One Time Password for 2-Factor Authentication. Required together with O(username) and O(password). + type: int +notes: + - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/." +""" + +EXAMPLES = r""" +- name: Add a new read-only deploy key to a GitHub repository using basic authentication + community.general.github_deploy_key: + owner: "johndoe" + repo: "example" + name: "new-deploy-key" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." + read_only: true + username: "johndoe" + password: "supersecretpassword" + +- name: Remove an existing deploy key from a GitHub repository + community.general.github_deploy_key: + owner: "johndoe" + repository: "example" + name: "new-deploy-key" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." + force: true + username: "johndoe" + password: "supersecretpassword" + state: absent + +- name: Add a new deploy key to a GitHub repository, replace an existing key, use an OAuth2 token to authenticate + community.general.github_deploy_key: + owner: "johndoe" + repository: "example" + name: "new-deploy-key" + key: "{{ lookup('file', '~/.ssh/github.pub') }}" + force: true + token: "ABAQDAwXxn7kIMNWzcDfo..." + +- name: Re-add a deploy key to a GitHub repository but with a different name + community.general.github_deploy_key: + owner: "johndoe" + repository: "example" + name: "replace-deploy-key" + key: "{{ lookup('file', '~/.ssh/github.pub') }}" + username: "johndoe" + password: "supersecretpassword" + +- name: Add a new deploy key to a GitHub repository using 2FA + community.general.github_deploy_key: + owner: "johndoe" + repo: "example" + name: "new-deploy-key-2" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." + username: "johndoe" + password: "supersecretpassword" + otp: 123456 + +- name: Add a read-only deploy key to a repository hosted on GitHub Enterprise + community.general.github_deploy_key: + github_url: "https://api.example.com" + owner: "janedoe" + repo: "example" + name: "new-deploy-key" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." + read_only: true + username: "janedoe" + password: "supersecretpassword" +""" + +RETURN = r""" +msg: + description: The status message describing what occurred. + returned: always + type: str + sample: "Deploy key added successfully" + +http_status_code: + description: The HTTP status code returned by the GitHub API. + returned: failed + type: int + sample: 400 + +error: + description: The error message returned by the GitHub API. + returned: failed + type: str + sample: "key is already in use" + +id: + description: The key identifier assigned by GitHub for the deploy key. + returned: changed + type: int + sample: 24381901 +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from re import findall + + +class GithubDeployKey(object): + def __init__(self, module): + self.module = module + + self.github_url = self.module.params['github_url'] + self.name = module.params['name'] + self.key = module.params['key'] + self.state = module.params['state'] + self.read_only = module.params.get('read_only', True) + self.force = module.params.get('force', False) + self.username = module.params.get('username', None) + self.password = module.params.get('password', None) + self.token = module.params.get('token', None) + self.otp = module.params.get('otp', None) + + @property + def url(self): + owner = self.module.params['owner'] + repo = self.module.params['repo'] + return "{0}/repos/{1}/{2}/keys".format(self.github_url, owner, repo) + + @property + def headers(self): + if self.username is not None and self.password is not None: + self.module.params['url_username'] = self.username + self.module.params['url_password'] = self.password + self.module.params['force_basic_auth'] = True + if self.otp is not None: + return {"X-GitHub-OTP": self.otp} + elif self.token is not None: + return {"Authorization": "token {0}".format(self.token)} + else: + return None + + def paginate(self, url): + while url: + resp, info = fetch_url(self.module, url, headers=self.headers, method="GET") + + if info["status"] == 200: + yield self.module.from_json(resp.read()) + + links = {} + for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info.get("link", '')): + links[y] = x + + url = links.get('next') + else: + self.handle_error(method="GET", info=info) + + def get_existing_key(self): + for keys in self.paginate(self.url): + if keys: + for i in keys: + existing_key_id = str(i["id"]) + if i["key"].split() == self.key.split()[:2]: + return existing_key_id + elif i['title'] == self.name and self.force: + return existing_key_id + else: + return None + + def add_new_key(self): + request_body = {"title": self.name, "key": self.key, "read_only": self.read_only} + + resp, info = fetch_url(self.module, self.url, data=self.module.jsonify(request_body), headers=self.headers, method="POST", timeout=30) + + status_code = info["status"] + + if status_code == 201: + response_body = self.module.from_json(resp.read()) + key_id = response_body["id"] + self.module.exit_json(changed=True, msg="Deploy key successfully added", id=key_id) + elif status_code == 422: + # there might be multiple reasons for a 422 + # so we must check if the reason is that the key already exists + if self.get_existing_key(): + self.module.exit_json(changed=False, msg="Deploy key already exists") + else: + self.handle_error(method="POST", info=info) + else: + self.handle_error(method="POST", info=info) + + def remove_existing_key(self, key_id): + resp, info = fetch_url(self.module, "{0}/{1}".format(self.url, key_id), headers=self.headers, method="DELETE") + + status_code = info["status"] + + if status_code == 204: + if self.state == 'absent': + self.module.exit_json(changed=True, msg="Deploy key successfully deleted", id=key_id) + else: + self.handle_error(method="DELETE", info=info, key_id=key_id) + + def handle_error(self, method, info, key_id=None): + status_code = info['status'] + body = info.get('body') + if body: + err = self.module.from_json(body)['message'] + else: + err = None + + if status_code == 401: + self.module.fail_json(msg="Failed to connect to {0} due to invalid credentials".format(self.github_url), http_status_code=status_code, error=err) + elif status_code == 404: + self.module.fail_json(msg="GitHub repository does not exist", http_status_code=status_code, error=err) + else: + if method == "GET": + self.module.fail_json(msg="Failed to retrieve existing deploy keys", http_status_code=status_code, error=err) + elif method == "POST": + self.module.fail_json(msg="Failed to add deploy key", http_status_code=status_code, error=err) + elif method == "DELETE": + self.module.fail_json(msg="Failed to delete existing deploy key", id=key_id, http_status_code=status_code, error=err) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + github_url=dict(type='str', default="https://api.github.com"), + owner=dict(required=True, type='str', aliases=['account', 'organization']), + repo=dict(required=True, type='str', aliases=['repository']), + name=dict(required=True, type='str', aliases=['title', 'label']), + key=dict(required=True, type='str', no_log=False), + read_only=dict(type='bool', default=True), + state=dict(default='present', choices=['present', 'absent']), + force=dict(type='bool', default=False), + username=dict(type='str'), + password=dict(type='str', no_log=True), + otp=dict(type='int', no_log=True), + token=dict(type='str', no_log=True) + ), + mutually_exclusive=[ + ['password', 'token'] + ], + required_together=[ + ['username', 'password'], + ['otp', 'username', 'password'] + ], + required_one_of=[ + ['username', 'token'] + ], + supports_check_mode=True, + ) + + deploy_key = GithubDeployKey(module) + + if module.check_mode: + key_id = deploy_key.get_existing_key() + if deploy_key.state == "present" and key_id is None: + module.exit_json(changed=True) + elif deploy_key.state == "present" and key_id is not None: + module.exit_json(changed=False) + + # to forcefully modify an existing key, the existing key must be deleted first + if deploy_key.state == 'absent' or deploy_key.force: + key_id = deploy_key.get_existing_key() + + if key_id is not None: + deploy_key.remove_existing_key(key_id) + elif deploy_key.state == 'absent': + module.exit_json(changed=False, msg="Deploy key does not exist") + + if deploy_key.state == "present": + deploy_key.add_new_key() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/github_issue.py b/plugins/modules/github_issue.py deleted file mode 120000 index 12ebc919eb..0000000000 --- a/plugins/modules/github_issue.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/github/github_issue.py \ No newline at end of file diff --git a/plugins/modules/github_issue.py b/plugins/modules/github_issue.py new file mode 100644 index 0000000000..2923917eec --- /dev/null +++ b/plugins/modules/github_issue.py @@ -0,0 +1,122 @@ +#!/usr/bin/python + +# Copyright (c) 2017-18, Abhijeet Kasurde +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: github_issue +short_description: View GitHub issue +description: + - View GitHub issue for a given repository and organization. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + repo: + description: + - Name of repository from which issue needs to be retrieved. + required: true + type: str + organization: + description: + - Name of the GitHub organization in which the repository is hosted. + required: true + type: str + issue: + description: + - Issue number for which information is required. + required: true + type: int + action: + description: + - Get various details about issue depending upon action specified. + default: 'get_status' + choices: + - get_status + type: str +author: + - Abhijeet Kasurde (@Akasurde) +""" + +RETURN = r""" +issue_status: + description: State of the GitHub issue. + type: str + returned: success + sample: open, closed +""" + +EXAMPLES = r""" +- name: Check if GitHub issue is closed or not + community.general.github_issue: + organization: ansible + repo: ansible + issue: 23642 + action: get_status + register: r + +- name: Take action depending upon issue status + ansible.builtin.debug: + msg: Do something when issue 23642 is open + when: r.issue_status == 'open' +""" + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def main(): + module = AnsibleModule( + argument_spec=dict( + organization=dict(required=True), + repo=dict(required=True), + issue=dict(type='int', required=True), + action=dict(choices=['get_status'], default='get_status'), + ), + supports_check_mode=True, + ) + + organization = module.params['organization'] + repo = module.params['repo'] + issue = module.params['issue'] + action = module.params['action'] + + result = dict() + + headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/vnd.github.v3+json', + } + + url = "https://api.github.com/repos/%s/%s/issues/%s" % (organization, repo, issue) + + response, info = fetch_url(module, url, headers=headers) + if not (200 <= info['status'] < 400): + if info['status'] == 404: + module.fail_json(msg="Failed to find issue %s" % issue) + module.fail_json(msg="Failed to send request to %s: %s" % (url, info['msg'])) + + gh_obj = json.loads(response.read()) + + if action == 'get_status' or action is None: + if module.check_mode: + result.update(changed=True) + else: + result.update(changed=True, issue_status=gh_obj['state']) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/github_key.py b/plugins/modules/github_key.py deleted file mode 120000 index a5ef4e8b88..0000000000 --- a/plugins/modules/github_key.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/github/github_key.py \ No newline at end of file diff --git a/plugins/modules/github_key.py b/plugins/modules/github_key.py new file mode 100644 index 0000000000..957d130774 --- /dev/null +++ b/plugins/modules/github_key.py @@ -0,0 +1,297 @@ +#!/usr/bin/python + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: github_key +short_description: Manage GitHub access keys +description: + - Creates, removes, or updates GitHub access keys. + - Works with both GitHub.com and GitHub Enterprise Server installations. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + description: + - GitHub Access Token with permission to list and create public keys. + required: true + type: str + name: + description: + - SSH key name. + required: true + type: str + pubkey: + description: + - SSH public key value. Required when O(state=present). + type: str + state: + description: + - Whether to remove a key, ensure that it exists, or update its value. + choices: ['present', 'absent'] + default: 'present' + type: str + force: + description: + - The default is V(true), which replaces the existing remote key if it is different than O(pubkey). If V(false), the + key is only set if no key with the given O(name) exists. + type: bool + default: true + api_url: + description: + - URL to the GitHub API if not using github.com but your own GitHub Enterprise instance. + type: str + default: 'https://api.github.com' + version_added: "11.0.0" + +author: Robert Estelle (@erydo) +""" + +RETURN = r""" +deleted_keys: + description: An array of key objects that were deleted. Only present on state=absent. + type: list + returned: When state=absent + sample: + [ + { + "id": 0, + "key": "BASE64 encoded key", + "url": "http://example.com/github key", + "created_at": "YYYY-MM-DDTHH:MM:SZ", + "read_only": false + } + ] +matching_keys: + description: An array of keys matching the specified name. Only present on state=present. + type: list + returned: When state=present + sample: + [ + { + "id": 0, + "key": "BASE64 encoded key", + "url": "http://example.com/github key", + "created_at": "YYYY-MM-DDTHH:MM:SZ", + "read_only": false + } + ] +key: + description: Metadata about the key just created. Only present on state=present. + type: dict + returned: success + sample: + { + "id": 0, + "key": "BASE64 encoded key", + "url": "http://example.com/github key", + "created_at": "YYYY-MM-DDTHH:MM:SZ", + "read_only": false + } +""" + +EXAMPLES = r""" +- name: Read SSH public key to authorize + ansible.builtin.shell: cat /home/foo/.ssh/id_rsa.pub + register: ssh_pub_key + +- name: Authorize key with GitHub + local_action: + module: github_key + name: Access Key for Some Machine + token: '{{ github_access_token }}' + pubkey: '{{ ssh_pub_key.stdout }}' + +# Alternatively, a single task can be used reading a key from a file on the controller +- name: Authorize key with GitHub + community.general.github_key: + name: Access Key for Some Machine + token: '{{ github_access_token }}' + pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}" + +# GitHub Enterprise Server usage +- name: Authorize key with GitHub Enterprise + community.general.github_key: + name: Access Key for Some Machine + token: '{{ github_enterprise_token }}' + pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}" + api_url: 'https://github.company.com/api/v3' +""" + +import datetime +import json +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + + +class GitHubResponse(object): + def __init__(self, response, info): + self.content = response.read() + self.info = info + + def json(self): + return json.loads(self.content) + + def links(self): + links = {} + if 'link' in self.info: + link_header = self.info['link'] + matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header) + for url, rel in matches: + links[rel] = url + return links + + +class GitHubSession(object): + def __init__(self, module, token, api_url): + self.module = module + self.token = token + self.api_url = api_url.rstrip('/') + + def request(self, method, url, data=None): + headers = { + 'Authorization': 'token %s' % self.token, + 'Content-Type': 'application/json', + 'Accept': 'application/vnd.github.v3+json', + } + response, info = fetch_url( + self.module, url, method=method, data=data, headers=headers) + if not (200 <= info['status'] < 400): + self.module.fail_json( + msg=(" failed to send request %s to %s: %s" + % (method, url, info['msg']))) + return GitHubResponse(response, info) + + +def get_all_keys(session): + url = session.api_url + '/user/keys' + result = [] + while url: + r = session.request('GET', url) + result.extend(r.json()) + url = r.links().get('next') + return result + + +def create_key(session, name, pubkey, check_mode): + if check_mode: + now_t = now() + return { + 'id': 0, + 'key': pubkey, + 'title': name, + 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY', + 'created_at': datetime.datetime.strftime(now_t, '%Y-%m-%dT%H:%M:%SZ'), + 'read_only': False, + 'verified': False + } + else: + return session.request( + 'POST', + session.api_url + '/user/keys', + data=json.dumps({'title': name, 'key': pubkey})).json() + + +def delete_keys(session, to_delete, check_mode): + if check_mode: + return + + for key in to_delete: + session.request('DELETE', session.api_url + '/user/keys/%s' % key["id"]) + + +def ensure_key_absent(session, name, check_mode): + to_delete = [key for key in get_all_keys(session) if key['title'] == name] + delete_keys(session, to_delete, check_mode=check_mode) + + return {'changed': bool(to_delete), + 'deleted_keys': to_delete} + + +def ensure_key_present(module, session, name, pubkey, force, check_mode): + all_keys = get_all_keys(session) + matching_keys = [k for k in all_keys if k['title'] == name] + deleted_keys = [] + + new_signature = pubkey.split(' ')[1] + for key in all_keys: + existing_signature = key['key'].split(' ')[1] + if new_signature == existing_signature and key['title'] != name: + module.fail_json(msg=( + "another key with the same content is already registered " + "under the name |{0}|").format(key['title'])) + + if matching_keys and force and matching_keys[0]['key'].split(' ')[1] != new_signature: + delete_keys(session, matching_keys, check_mode=check_mode) + (deleted_keys, matching_keys) = (matching_keys, []) + + if not matching_keys: + key = create_key(session, name, pubkey, check_mode=check_mode) + else: + key = matching_keys[0] + + return { + 'changed': bool(deleted_keys or not matching_keys), + 'deleted_keys': deleted_keys, + 'matching_keys': matching_keys, + 'key': key + } + + +def main(): + argument_spec = { + 'token': {'required': True, 'no_log': True}, + 'name': {'required': True}, + 'pubkey': {}, + 'state': {'choices': ['present', 'absent'], 'default': 'present'}, + 'force': {'default': True, 'type': 'bool'}, + 'api_url': {'default': 'https://api.github.com', 'type': 'str'}, + } + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + token = module.params['token'] + name = module.params['name'] + state = module.params['state'] + force = module.params['force'] + pubkey = module.params.get('pubkey') + api_url = module.params.get('api_url') + + if pubkey: + pubkey_parts = pubkey.split(' ') + # Keys consist of a protocol, the key data, and an optional comment. + if len(pubkey_parts) < 2: + module.fail_json(msg='"pubkey" parameter has an invalid format') + elif state == 'present': + module.fail_json(msg='"pubkey" is required when state=present') + + session = GitHubSession(module, token, api_url) + if state == 'present': + result = ensure_key_present(module, session, name, pubkey, force=force, + check_mode=module.check_mode) + elif state == 'absent': + result = ensure_key_absent(session, name, check_mode=module.check_mode) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/github_release.py b/plugins/modules/github_release.py deleted file mode 120000 index ecd144cfe9..0000000000 --- a/plugins/modules/github_release.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/github/github_release.py \ No newline at end of file diff --git a/plugins/modules/github_release.py b/plugins/modules/github_release.py new file mode 100644 index 0000000000..933b9c8bd1 --- /dev/null +++ b/plugins/modules/github_release.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# +# Copyright Ansible Team +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: github_release +short_description: Interact with GitHub Releases +description: + - Fetch metadata about GitHub Releases. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + description: + - GitHub Personal Access Token for authenticating. Mutually exclusive with O(password). + type: str + user: + description: + - The GitHub account that owns the repository. + type: str + required: true + password: + description: + - The GitHub account password for the user. Mutually exclusive with O(token). + type: str + repo: + description: + - Repository name. + type: str + required: true + action: + description: + - Action to perform. + type: str + required: true + choices: ['latest_release', 'create_release'] + tag: + description: + - Tag name when creating a release. Required when using O(action=create_release). + type: str + target: + description: + - Target of release when creating a release. + type: str + name: + description: + - Name of release when creating a release. + type: str + body: + description: + - Description of the release when creating a release. + type: str + draft: + description: + - Sets if the release is a draft or not. (boolean). + type: bool + default: false + prerelease: + description: + - Sets if the release is a prerelease or not. (boolean). + type: bool + default: false + +author: + - "Adrian Moisey (@adrianmoisey)" +requirements: + - "github3.py >= 1.0.0a3" +""" + +EXAMPLES = r""" +- name: Get latest release of a public repository + community.general.github_release: + user: ansible + repo: ansible + action: latest_release + +- name: Get latest release of testuseer/testrepo + community.general.github_release: + token: tokenabc1234567890 + user: testuser + repo: testrepo + action: latest_release + +- name: Get latest release of test repo using username and password + community.general.github_release: + user: testuser + password: secret123 + repo: testrepo + action: latest_release + +- name: Create a new release + community.general.github_release: + token: tokenabc1234567890 + user: testuser + repo: testrepo + action: create_release + tag: test + target: master + name: My Release + body: Some description +""" + +RETURN = r""" +tag: + description: Version of the created/latest release. + type: str + returned: success + sample: 1.1.0 +""" + +import traceback + +GITHUB_IMP_ERR = None +try: + import github3 + + HAS_GITHUB_API = True +except ImportError: + GITHUB_IMP_ERR = traceback.format_exc() + HAS_GITHUB_API = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + repo=dict(required=True), + user=dict(required=True), + password=dict(no_log=True), + token=dict(no_log=True), + action=dict( + required=True, choices=['latest_release', 'create_release']), + tag=dict(type='str'), + target=dict(type='str'), + name=dict(type='str'), + body=dict(type='str'), + draft=dict(type='bool', default=False), + prerelease=dict(type='bool', default=False), + ), + supports_check_mode=True, + mutually_exclusive=(('password', 'token'),), + required_if=[('action', 'create_release', ['tag']), + ('action', 'create_release', ['password', 'token'], True)], + ) + + if not HAS_GITHUB_API: + module.fail_json(msg=missing_required_lib('github3.py >= 1.0.0a3'), + exception=GITHUB_IMP_ERR) + + repo = module.params['repo'] + user = module.params['user'] + password = module.params['password'] + login_token = module.params['token'] + action = module.params['action'] + tag = module.params.get('tag') + target = module.params.get('target') + name = module.params.get('name') + body = module.params.get('body') + draft = module.params.get('draft') + prerelease = module.params.get('prerelease') + + # login to github + try: + if password: + gh_obj = github3.login(user, password=password) + elif login_token: + gh_obj = github3.login(token=login_token) + else: + gh_obj = github3.GitHub() + + # GitHub's token formats: + # - ghp_ - Personal access token (classic) + # - github_pat_ - Fine-grained personal access token + # - gho_ - OAuth access token + # - ghu_ - User access token for a GitHub App + # - ghs_ - Installation access token for a GitHub App + # - ghr_ - Refresh token for a GitHub App + # + # References: + # https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-authentication-to-github#githubs-token-formats + # + # Test if we're actually logged in, but skip this check for some token prefixes + SKIPPED_TOKEN_PREFIXES = ['ghs_'] + if password or (login_token and not any(login_token.startswith(prefix) for prefix in SKIPPED_TOKEN_PREFIXES)): + gh_obj.me() + except github3.exceptions.AuthenticationFailed as e: + module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e), + details="Please check username and password or token " + "for repository %s" % repo) + except github3.exceptions.GitHubError as e: + module.fail_json(msg='GitHub API error: %s' % to_native(e), + details="Please check username and password or token " + "for repository %s" % repo) + + repository = gh_obj.repository(user, repo) + + if not repository: + module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo)) + + if action == 'latest_release': + release = repository.latest_release() + if release: + module.exit_json(tag=release.tag_name) + else: + module.exit_json(tag=None) + + if action == 'create_release': + release_exists = repository.release_from_tag(tag) + if release_exists: + module.exit_json(changed=False, msg="Release for tag %s already exists." % tag) + + release = repository.create_release( + tag, target, name, body, draft, prerelease) + if release: + module.exit_json(changed=True, tag=release.tag_name) + else: + module.exit_json(changed=False, tag=None) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/github_repo.py b/plugins/modules/github_repo.py deleted file mode 120000 index ef55c25c2f..0000000000 --- a/plugins/modules/github_repo.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/github/github_repo.py \ No newline at end of file diff --git a/plugins/modules/github_repo.py b/plugins/modules/github_repo.py new file mode 100644 index 0000000000..601bea71fd --- /dev/null +++ b/plugins/modules/github_repo.py @@ -0,0 +1,278 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Álvaro Torres Cogollo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: github_repo +short_description: Manage your repositories on Github +version_added: 2.2.0 +description: + - Manages Github repositories using PyGithub library. + - Authentication can be done with O(access_token) or with O(username) and O(password). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + username: + description: + - Username used for authentication. + - This is only needed when not using O(access_token). + type: str + required: false + password: + description: + - Password used for authentication. + - This is only needed when not using O(access_token). + type: str + required: false + access_token: + description: + - Token parameter for authentication. + - This is only needed when not using O(username) and O(password). + type: str + required: false + name: + description: + - Repository name. + type: str + required: true + description: + description: + - Description for the repository. + - Defaults to empty if O(force_defaults=true), which is the default in this module. + - Defaults to empty if O(force_defaults=false) when creating a new repository. + - This is only used when O(state) is V(present). + type: str + required: false + private: + description: + - Whether the repository should be private or not. + - Defaults to V(false) if O(force_defaults=true), which is the default in this module. + - Defaults to V(false) if O(force_defaults=false) when creating a new repository. + - This is only used when O(state=present). + type: bool + required: false + state: + description: + - Whether the repository should exist or not. + type: str + default: present + choices: [absent, present] + required: false + organization: + description: + - Organization for the repository. + - When O(state=present), the repository is created in the current user profile. + type: str + required: false + api_url: + description: + - URL to the GitHub API if not using github.com but you own instance. + type: str + default: 'https://api.github.com' + version_added: "3.5.0" + force_defaults: + description: + - If V(true), overwrite current O(description) and O(private) attributes with defaults. + - V(true) is deprecated for this option and will not be allowed starting in community.general 13.0.0. V(false) will be the default value then. + type: bool + required: false + version_added: 4.1.0 +requirements: + - PyGithub>=1.54 +notes: + - For Python 3, PyGithub>=1.54 should be used. +author: + - Álvaro Torres Cogollo (@atorrescogollo) +""" + +EXAMPLES = r""" +- name: Create a Github repository + community.general.github_repo: + access_token: mytoken + organization: MyOrganization + name: myrepo + description: "Just for fun" + private: true + state: present + force_defaults: false + register: result + +- name: Delete the repository + community.general.github_repo: + username: octocat + password: password + organization: MyOrganization + name: myrepo + state: absent + register: result +""" + +RETURN = r""" +repo: + description: Repository information as JSON. See U(https://docs.github.com/en/rest/reference/repos#get-a-repository). + returned: success and O(state=present) + type: dict +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +GITHUB_IMP_ERR = None +try: + from github import Github, GithubException, GithubObject + from github.GithubException import UnknownObjectException + HAS_GITHUB_PACKAGE = True +except Exception: + GITHUB_IMP_ERR = traceback.format_exc() + HAS_GITHUB_PACKAGE = False + + +def authenticate(username=None, password=None, access_token=None, api_url=None): + if not api_url: + return None + + if access_token: + return Github(base_url=api_url, login_or_token=access_token) + else: + return Github(base_url=api_url, login_or_token=username, password=password) + + +def create_repo(gh, name, organization=None, private=None, description=None, check_mode=False): + result = dict( + changed=False, + repo=dict()) + if organization: + target = gh.get_organization(organization) + else: + target = gh.get_user() + + repo = None + try: + repo = target.get_repo(name=name) + result['repo'] = repo.raw_data + except UnknownObjectException: + if not check_mode: + repo = target.create_repo( + name=name, + private=GithubObject.NotSet if private is None else private, + description=GithubObject.NotSet if description is None else description, + ) + result['repo'] = repo.raw_data + + result['changed'] = True + + changes = {} + if private is not None: + if repo is None or repo.raw_data['private'] != private: + changes['private'] = private + if description is not None: + if repo is None or repo.raw_data['description'] not in (description, description or None): + changes['description'] = description + + if changes: + if not check_mode: + repo.edit(**changes) + + result['repo'].update({ + 'private': repo._private.value if not check_mode else private, + 'description': repo._description.value if not check_mode else description, + }) + result['changed'] = True + + return result + + +def delete_repo(gh, name, organization=None, check_mode=False): + result = dict(changed=False) + if organization: + target = gh.get_organization(organization) + else: + target = gh.get_user() + try: + repo = target.get_repo(name=name) + if not check_mode: + repo.delete() + result['changed'] = True + except UnknownObjectException: + pass + + return result + + +def run_module(params, check_mode=False): + if params['force_defaults']: + params['description'] = params['description'] or '' + params['private'] = params['private'] or False + + gh = authenticate( + username=params['username'], password=params['password'], access_token=params['access_token'], + api_url=params['api_url']) + if params['state'] == "absent": + return delete_repo( + gh=gh, + name=params['name'], + organization=params['organization'], + check_mode=check_mode + ) + else: + return create_repo( + gh=gh, + name=params['name'], + organization=params['organization'], + private=params['private'], + description=params['description'], + check_mode=check_mode + ) + + +def main(): + module_args = dict( + username=dict(type='str'), + password=dict(type='str', no_log=True), + access_token=dict(type='str', no_log=True), + name=dict(type='str', required=True), + state=dict(type='str', default="present", + choices=["present", "absent"]), + organization=dict(type='str', ), + private=dict(type='bool'), + description=dict(type='str'), + api_url=dict(type='str', default='https://api.github.com'), + force_defaults=dict(type='bool'), + ) + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_together=[('username', 'password')], + required_one_of=[('username', 'access_token')], + mutually_exclusive=[('username', 'access_token')] + ) + + if module.params['force_defaults'] is None: + module.deprecate("'force_defaults=true' is deprecated and will not be allowed in community.general 13.0.0, use 'force_defaults=false' instead", + version="13.0.0", collection_name="community.general") + module.params['force_defaults'] = True + + if not HAS_GITHUB_PACKAGE: + module.fail_json(msg=missing_required_lib( + "PyGithub"), exception=GITHUB_IMP_ERR) + + try: + result = run_module(module.params, module.check_mode) + module.exit_json(**result) + except GithubException as e: + module.fail_json(msg="Github error. {0}".format(repr(e))) + except Exception as e: + module.fail_json(msg="Unexpected error. {0}".format(repr(e))) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/github_webhook.py b/plugins/modules/github_webhook.py deleted file mode 120000 index f27d450d1c..0000000000 --- a/plugins/modules/github_webhook.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/github/github_webhook.py \ No newline at end of file diff --git a/plugins/modules/github_webhook.py b/plugins/modules/github_webhook.py new file mode 100644 index 0000000000..867bfc380e --- /dev/null +++ b/plugins/modules/github_webhook.py @@ -0,0 +1,286 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: github_webhook +short_description: Manage GitHub webhooks +description: + - Create and delete GitHub webhooks. +requirements: + - "PyGithub >= 1.3.5" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + repository: + description: + - Full name of the repository to configure a hook for. + type: str + required: true + aliases: + - repo + url: + description: + - URL to which payloads are delivered. + type: str + required: true + content_type: + description: + - The media type used to serialize the payloads. + type: str + required: false + choices: [form, json] + default: form + secret: + description: + - The shared secret between GitHub and the payload URL. + type: str + required: false + insecure_ssl: + description: + - Flag to indicate that GitHub should skip SSL verification when calling the hook. + required: false + type: bool + default: false + events: + description: + - A list of GitHub events the hook is triggered for. Events are listed at U(https://developer.github.com/v3/activity/events/types/). + Required unless O(state=absent). + required: false + type: list + elements: str + active: + description: + - Whether or not the hook is active. + required: false + type: bool + default: true + state: + description: + - Whether the hook should be present or absent. + type: str + required: false + choices: [absent, present] + default: present + user: + description: + - User to authenticate to GitHub as. + type: str + required: true + password: + description: + - Password to authenticate to GitHub with. + type: str + required: false + token: + description: + - Token to authenticate to GitHub with. + type: str + required: false + github_url: + description: + - Base URL of the GitHub API. + type: str + required: false + default: https://api.github.com + +author: + - "Chris St. Pierre (@stpierre)" +""" + +EXAMPLES = r""" +- name: Create a new webhook that triggers on push (password auth) + community.general.github_webhook: + repository: ansible/ansible + url: https://www.example.com/hooks/ + events: + - push + user: "{{ github_user }}" + password: "{{ github_password }}" + +- name: Create a new webhook in a github enterprise installation with multiple event triggers (token auth) + community.general.github_webhook: + repository: myorg/myrepo + url: https://jenkins.example.com/ghprbhook/ + content_type: json + secret: "{{ github_shared_secret }}" + insecure_ssl: true + events: + - issue_comment + - pull_request + user: "{{ github_user }}" + token: "{{ github_user_api_token }}" + github_url: https://github.example.com + +- name: Delete a webhook (password auth) + community.general.github_webhook: + repository: ansible/ansible + url: https://www.example.com/hooks/ + state: absent + user: "{{ github_user }}" + password: "{{ github_password }}" +""" + +RETURN = r""" +hook_id: + description: The GitHub ID of the hook created/updated. + returned: when state is 'present' + type: int + sample: 6206 +""" + +import traceback + +GITHUB_IMP_ERR = None +try: + import github + HAS_GITHUB = True +except ImportError: + GITHUB_IMP_ERR = traceback.format_exc() + HAS_GITHUB = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def _create_hook_config(module): + hook_config = { + "url": module.params["url"], + "content_type": module.params["content_type"], + "insecure_ssl": "1" if module.params["insecure_ssl"] else "0" + } + + secret = module.params.get("secret") + if secret: + hook_config["secret"] = secret + + return hook_config + + +def create_hook(repo, module): + config = _create_hook_config(module) + try: + hook = repo.create_hook( + name="web", + config=config, + events=module.params["events"], + active=module.params["active"]) + except github.GithubException as err: + module.fail_json(msg="Unable to create hook for repository %s: %s" % ( + repo.full_name, to_native(err))) + + data = {"hook_id": hook.id} + return True, data + + +def update_hook(repo, hook, module): + config = _create_hook_config(module) + try: + hook.update() + hook.edit( + name="web", + config=config, + events=module.params["events"], + active=module.params["active"]) + + changed = hook.update() + except github.GithubException as err: + module.fail_json(msg="Unable to modify hook for repository %s: %s" % ( + repo.full_name, to_native(err))) + + data = {"hook_id": hook.id} + return changed, data + + +def main(): + module = AnsibleModule( + argument_spec=dict( + repository=dict(type='str', required=True, aliases=['repo']), + url=dict(type='str', required=True), + content_type=dict(type='str', choices=('json', 'form'), default='form'), + secret=dict(type='str', no_log=True), + insecure_ssl=dict(type='bool', default=False), + events=dict(type='list', elements='str', ), + active=dict(type='bool', default=True), + state=dict(type='str', choices=('absent', 'present'), default='present'), + user=dict(type='str', required=True), + password=dict(type='str', no_log=True), + token=dict(type='str', no_log=True), + github_url=dict(type='str', default="https://api.github.com")), + mutually_exclusive=(('password', 'token'),), + required_one_of=(("password", "token"),), + required_if=(("state", "present", ("events",)),), + ) + + if not HAS_GITHUB: + module.fail_json(msg=missing_required_lib('PyGithub'), + exception=GITHUB_IMP_ERR) + + try: + github_conn = github.Github( + module.params["user"], + module.params.get("password") or module.params.get("token"), + base_url=module.params["github_url"]) + except github.GithubException as err: + module.fail_json(msg="Could not connect to GitHub at %s: %s" % ( + module.params["github_url"], to_native(err))) + + try: + repo = github_conn.get_repo(module.params["repository"]) + except github.BadCredentialsException as err: + module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % ( + module.params["github_url"], to_native(err))) + except github.UnknownObjectException as err: + module.fail_json( + msg="Could not find repository %s in GitHub at %s: %s" % ( + module.params["repository"], module.params["github_url"], + to_native(err))) + except Exception as err: + module.fail_json( + msg="Could not fetch repository %s from GitHub at %s: %s" % + (module.params["repository"], module.params["github_url"], + to_native(err)), + exception=traceback.format_exc()) + + hook = None + try: + for hook in repo.get_hooks(): + if hook.config.get("url") == module.params["url"]: + break + else: + hook = None + except github.GithubException as err: + module.fail_json(msg="Unable to get hooks from repository %s: %s" % ( + module.params["repository"], to_native(err))) + + changed = False + data = {} + if hook is None and module.params["state"] == "present": + changed, data = create_hook(repo, module) + elif hook is not None and module.params["state"] == "absent": + try: + hook.delete() + except github.GithubException as err: + module.fail_json( + msg="Unable to delete hook from repository %s: %s" % ( + repo.full_name, to_native(err))) + else: + changed = True + elif hook is not None and module.params["state"] == "present": + changed, data = update_hook(repo, hook, module) + # else, there is no hook and we want there to be no hook + + module.exit_json(changed=changed, **data) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/github_webhook_info.py b/plugins/modules/github_webhook_info.py deleted file mode 120000 index cea3106c6d..0000000000 --- a/plugins/modules/github_webhook_info.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/github/github_webhook_info.py \ No newline at end of file diff --git a/plugins/modules/github_webhook_info.py b/plugins/modules/github_webhook_info.py new file mode 100644 index 0000000000..30b3e719f3 --- /dev/null +++ b/plugins/modules/github_webhook_info.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: github_webhook_info +short_description: Query information about GitHub webhooks +description: + - Query information about GitHub webhooks. +requirements: + - "PyGithub >= 1.3.5" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + repository: + description: + - Full name of the repository to configure a hook for. + type: str + required: true + aliases: + - repo + user: + description: + - User to authenticate to GitHub as. + type: str + required: true + password: + description: + - Password to authenticate to GitHub with. + type: str + required: false + token: + description: + - Token to authenticate to GitHub with. + type: str + required: false + github_url: + description: + - Base URL of the GitHub API. + type: str + required: false + default: https://api.github.com + +author: + - "Chris St. Pierre (@stpierre)" +""" + +EXAMPLES = r""" +- name: List hooks for a repository (password auth) + community.general.github_webhook_info: + repository: ansible/ansible + user: "{{ github_user }}" + password: "{{ github_password }}" + register: ansible_webhooks + +- name: List hooks for a repository on GitHub Enterprise (token auth) + community.general.github_webhook_info: + repository: myorg/myrepo + user: "{{ github_user }}" + token: "{{ github_user_api_token }}" + github_url: https://github.example.com/api/v3/ + register: myrepo_webhooks +""" + +RETURN = r""" +hooks: + description: A list of hooks that exist for the repo. + returned: always + type: list + elements: dict + sample: + - has_shared_secret: true + url: https://jenkins.example.com/ghprbhook/ + events: [issue_comment, pull_request] + insecure_ssl: "1" + content_type: json + active: true + id: 6206 + last_response: + status: active + message: OK + code: 200 +""" + +import traceback + +GITHUB_IMP_ERR = None +try: + import github + HAS_GITHUB = True +except ImportError: + GITHUB_IMP_ERR = traceback.format_exc() + HAS_GITHUB = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def _munge_hook(hook_obj): + retval = { + "active": hook_obj.active, + "events": hook_obj.events, + "id": hook_obj.id, + "url": hook_obj.url, + } + retval.update(hook_obj.config) + retval["has_shared_secret"] = "secret" in retval + if "secret" in retval: + del retval["secret"] + + retval["last_response"] = hook_obj.last_response.raw_data + return retval + + +def main(): + module = AnsibleModule( + argument_spec=dict( + repository=dict(type='str', required=True, aliases=["repo"]), + user=dict(type='str', required=True), + password=dict(type='str', no_log=True), + token=dict(type='str', no_log=True), + github_url=dict( + type='str', default="https://api.github.com")), + mutually_exclusive=(('password', 'token'), ), + required_one_of=(("password", "token"), ), + supports_check_mode=True) + + if not HAS_GITHUB: + module.fail_json(msg=missing_required_lib('PyGithub'), + exception=GITHUB_IMP_ERR) + + try: + github_conn = github.Github( + module.params["user"], + module.params.get("password") or module.params.get("token"), + base_url=module.params["github_url"]) + except github.GithubException as err: + module.fail_json(msg="Could not connect to GitHub at %s: %s" % ( + module.params["github_url"], to_native(err))) + + try: + repo = github_conn.get_repo(module.params["repository"]) + except github.BadCredentialsException as err: + module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % ( + module.params["github_url"], to_native(err))) + except github.UnknownObjectException as err: + module.fail_json( + msg="Could not find repository %s in GitHub at %s: %s" % ( + module.params["repository"], module.params["github_url"], + to_native(err))) + except Exception as err: + module.fail_json( + msg="Could not fetch repository %s from GitHub at %s: %s" % + (module.params["repository"], module.params["github_url"], + to_native(err)), + exception=traceback.format_exc()) + + try: + hooks = [_munge_hook(h) for h in repo.get_hooks()] + except github.GithubException as err: + module.fail_json( + msg="Unable to get hooks from repository %s: %s" % + (module.params["repository"], to_native(err)), + exception=traceback.format_exc()) + + module.exit_json(changed=False, hooks=hooks) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_branch.py b/plugins/modules/gitlab_branch.py new file mode 100644 index 0000000000..514300a924 --- /dev/null +++ b/plugins/modules/gitlab_branch.py @@ -0,0 +1,180 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_branch +short_description: Create or delete a branch +version_added: 4.2.0 +description: + - This module allows to create or delete branches. +author: + - paytroff (@paytroff) +requirements: + - python-gitlab >= 2.3.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + description: + - Create or delete branch. + default: present + type: str + choices: ["present", "absent"] + project: + description: + - The path or name of the project. + required: true + type: str + branch: + description: + - The name of the branch that needs to be created. + required: true + type: str + ref_branch: + description: + - Reference branch to create from. + - This must be specified if O(state=present). + type: str +""" + + +EXAMPLES = r""" +- name: Create branch branch2 from main + community.general.gitlab_branch: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + branch: branch2 + ref_branch: main + state: present + +- name: Delete branch branch2 + community.general.gitlab_branch: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + branch: branch2 + state: absent +""" + +RETURN = r""" +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab +) + + +class GitlabBranch(object): + + def __init__(self, module, project, gitlab_instance): + self.repo = gitlab_instance + self._module = module + self.project = self.get_project(project) + + def get_project(self, project): + try: + return self.repo.projects.get(project) + except Exception as e: + return False + + def get_branch(self, branch): + try: + return self.project.branches.get(branch) + except Exception as e: + return False + + def create_branch(self, branch, ref_branch): + return self.project.branches.create({'branch': branch, 'ref': ref_branch}) + + def delete_branch(self, branch): + return branch.delete() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str', required=True), + branch=dict(type='str', required=True), + ref_branch=dict(type='str'), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + required_if=[ + ['state', 'present', ['ref_branch'], True], + ], + supports_check_mode=False + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + project = module.params['project'] + branch = module.params['branch'] + ref_branch = module.params['ref_branch'] + state = module.params['state'] + + gitlab_version = gitlab.__version__ + if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): + module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." + " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) + + this_gitlab = GitlabBranch(module=module, project=project, gitlab_instance=gitlab_instance) + + this_branch = this_gitlab.get_branch(branch) + + if not this_branch and state == "present": + r_branch = this_gitlab.get_branch(ref_branch) + if not r_branch: + module.fail_json(msg="Ref branch {b} not exist.".format(b=ref_branch)) + this_gitlab.create_branch(branch, ref_branch) + module.exit_json(changed=True, msg="Created the branch {b}.".format(b=branch)) + elif this_branch and state == "present": + module.exit_json(changed=False, msg="Branch {b} already exist".format(b=branch)) + elif this_branch and state == "absent": + try: + this_gitlab.delete_branch(this_branch) + module.exit_json(changed=True, msg="Branch {b} deleted.".format(b=branch)) + except Exception as e: + module.fail_json(msg="Error delete branch.", exception=traceback.format_exc()) + else: + module.exit_json(changed=False, msg="No changes are needed.") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_deploy_key.py b/plugins/modules/gitlab_deploy_key.py deleted file mode 120000 index 998ce4b7be..0000000000 --- a/plugins/modules/gitlab_deploy_key.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/gitlab/gitlab_deploy_key.py \ No newline at end of file diff --git a/plugins/modules/gitlab_deploy_key.py b/plugins/modules/gitlab_deploy_key.py new file mode 100644 index 0000000000..9252341863 --- /dev/null +++ b/plugins/modules/gitlab_deploy_key.py @@ -0,0 +1,296 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Marcus Watkins +# Based on code: +# Copyright (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_deploy_key +short_description: Manages GitLab project deploy keys +description: + - Adds, updates and removes project deploy keys. +author: + - Marcus Watkins (@marwatk) + - Guillaume Martinez (@Lunik) +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + project: + description: + - ID or Full path of project in the form of group/name. + required: true + type: str + title: + description: + - Deploy key's title. + required: true + type: str + key: + description: + - Deploy key. + required: true + type: str + can_push: + description: + - Whether this key can push to the project. + type: bool + default: false + state: + description: + - When V(present) the deploy key is added to the project if it does not exist. + - When V(absent) it is removed from the project if it exists. + default: present + type: str + choices: ["present", "absent"] +""" + +EXAMPLES = r""" +- name: "Adding a project deploy key" + community.general.gitlab_deploy_key: + api_url: https://gitlab.example.com/ + api_token: "{{ api_token }}" + project: "my_group/my_project" + title: "Jenkins CI" + state: present + key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..." + +- name: "Update the above deploy key to add push access" + community.general.gitlab_deploy_key: + api_url: https://gitlab.example.com/ + api_token: "{{ api_token }}" + project: "my_group/my_project" + title: "Jenkins CI" + state: present + can_push: true + +- name: "Remove the previous deploy key from the project" + community.general.gitlab_deploy_key: + api_url: https://gitlab.example.com/ + api_token: "{{ api_token }}" + project: "my_group/my_project" + state: absent + key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..." +""" + +RETURN = r""" +msg: + description: Success or failure message. + returned: always + type: str + sample: "Success" + +result: + description: JSON-parsed response from the server. + returned: always + type: dict + +error: + description: The error message returned by the GitLab API. + returned: failed + type: str + sample: "400: key is already in use" + +deploy_key: + description: API object. + returned: always + type: dict +""" + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_project, gitlab_authentication, gitlab, list_all_kwargs +) + + +class GitLabDeployKey(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.deploy_key_object = None + + ''' + @param project Project object + @param key_title Title of the key + @param key_key String of the key + @param key_can_push Option of the deploy_key + @param options Deploy key options + ''' + def create_or_update_deploy_key(self, project, key_title, key_key, options): + changed = False + + # note: unfortunately public key cannot be updated directly by + # GitLab REST API, so for that case we need to delete and + # than recreate the key + if self.deploy_key_object and self.deploy_key_object.key != key_key: + if not self._module.check_mode: + self.deploy_key_object.delete() + self.deploy_key_object = None + + # Because we have already call exists_deploy_key in main() + if self.deploy_key_object is None: + deploy_key = self.create_deploy_key(project, { + 'title': key_title, + 'key': key_key, + 'can_push': options['can_push']}) + changed = True + else: + changed, deploy_key = self.update_deploy_key(self.deploy_key_object, { + 'title': key_title, + 'can_push': options['can_push']}) + + self.deploy_key_object = deploy_key + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title) + + try: + deploy_key.save() + except Exception as e: + self._module.fail_json(msg="Failed to update deploy key: %s " % e) + return True + else: + return False + + ''' + @param project Project Object + @param arguments Attributes of the deploy_key + ''' + def create_deploy_key(self, project, arguments): + if self._module.check_mode: + return True + + try: + deploy_key = project.keys.create(arguments) + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create deploy key: %s " % to_native(e)) + + return deploy_key + + ''' + @param deploy_key Deploy Key Object + @param arguments Attributes of the deploy_key + ''' + def update_deploy_key(self, deploy_key, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arg_value is not None: + if getattr(deploy_key, arg_key) != arg_value: + setattr(deploy_key, arg_key, arg_value) + changed = True + + return (changed, deploy_key) + + ''' + @param project Project object + @param key_title Title of the key + ''' + def find_deploy_key(self, project, key_title): + for deploy_key in project.keys.list(**list_all_kwargs): + if deploy_key.title == key_title: + return deploy_key + + ''' + @param project Project object + @param key_title Title of the key + ''' + def exists_deploy_key(self, project, key_title): + # When project exists, object will be stored in self.project_object. + deploy_key = self.find_deploy_key(project, key_title) + if deploy_key: + self.deploy_key_object = deploy_key + return True + return False + + def delete_deploy_key(self): + if self._module.check_mode: + return True + + return self.deploy_key_object.delete() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default="present", choices=["absent", "present"]), + project=dict(type='str', required=True), + key=dict(type='str', required=True, no_log=False), + can_push=dict(type='bool', default=False), + title=dict(type='str', required=True) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'] + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True, + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + state = module.params['state'] + project_identifier = module.params['project'] + key_title = module.params['title'] + key_keyfile = module.params['key'] + key_can_push = module.params['can_push'] + + gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance) + + project = find_project(gitlab_instance, project_identifier) + + if project is None: + module.fail_json(msg="Failed to create deploy key: project %s doesn't exists" % project_identifier) + + deploy_key_exists = gitlab_deploy_key.exists_deploy_key(project, key_title) + + if state == 'absent': + if deploy_key_exists: + gitlab_deploy_key.delete_deploy_key() + module.exit_json(changed=True, msg="Successfully deleted deploy key %s" % key_title) + else: + module.exit_json(changed=False, msg="Deploy key deleted or does not exists") + + if state == 'present': + if gitlab_deploy_key.create_or_update_deploy_key(project, key_title, key_keyfile, {'can_push': key_can_push}): + + module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title, + deploy_key=gitlab_deploy_key.deploy_key_object._attrs) + else: + module.exit_json(changed=False, msg="No need to update the deploy key %s" % key_title, + deploy_key=gitlab_deploy_key.deploy_key_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_group.py b/plugins/modules/gitlab_group.py deleted file mode 120000 index d08beec585..0000000000 --- a/plugins/modules/gitlab_group.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/gitlab/gitlab_group.py \ No newline at end of file diff --git a/plugins/modules/gitlab_group.py b/plugins/modules/gitlab_group.py new file mode 100644 index 0000000000..6356ce2e2c --- /dev/null +++ b/plugins/modules/gitlab_group.py @@ -0,0 +1,532 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_group +short_description: Creates/updates/deletes GitLab Groups +description: + - When the group does not exist in GitLab, it is created. + - When the group does exist and O(state=absent), the group is deleted. +author: + - Werner Dijkerman (@dj-wasabi) + - Guillaume Martinez (@Lunik) +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + auto_devops_enabled: + description: + - Default to Auto DevOps pipeline for all projects within this group. + type: bool + version_added: 3.7.0 + avatar_path: + description: + - Absolute path image to configure avatar. File size should not exceed 200 kb. + - This option is only used on creation, not for updates. + type: path + version_added: 4.2.0 + default_branch: + description: + - All merge requests and commits are made against this branch unless you specify a different one. + type: str + version_added: 9.5.0 + description: + description: + - A description for the group. + type: str + enabled_git_access_protocol: + description: + - V(all) means SSH and HTTP(S) is enabled. + - V(ssh) means only SSH is enabled. + - V(http) means only HTTP(S) is enabled. + - Only available for top level groups. + choices: ["all", "ssh", "http"] + type: str + version_added: 9.5.0 + force_delete: + description: + - Force delete group even if projects in it. + - Used only when O(state=absent). + type: bool + default: false + version_added: 7.5.0 + lfs_enabled: + description: + - Projects in this group can use Git LFS. + type: bool + version_added: 9.5.0 + lock_duo_features_enabled: + description: + - Enforce GitLab Duo features for all subgroups. + - Only available for top level groups. + type: bool + version_added: 9.5.0 + membership_lock: + description: + - Users cannot be added to projects in this group. + type: bool + version_added: 9.5.0 + mentions_disabled: + description: + - Group mentions are disabled. + type: bool + version_added: 9.5.0 + name: + description: + - Name of the group you want to create. + required: true + type: str + parent: + description: + - Allow to create subgroups. + - ID or Full path of parent group in the form of group/name. + type: str + path: + description: + - The path of the group you want to create, this is O(api_url)/O(path). + - If not supplied, O(name) is used. + type: str + prevent_forking_outside_group: + description: + - Prevent forking outside of the group. + type: bool + version_added: 9.5.0 + prevent_sharing_groups_outside_hierarchy: + description: + - Members cannot invite groups outside of this group and its subgroups. + - Only available for top level groups. + type: bool + version_added: 9.5.0 + project_creation_level: + description: + - Determine if developers can create projects in the group. + choices: ["developer", "maintainer", "noone"] + type: str + version_added: 3.7.0 + request_access_enabled: + description: + - Users can request access (if visibility is public or internal). + type: bool + version_added: 9.5.0 + service_access_tokens_expiration_enforced: + description: + - Service account token expiration. + - Changes do not affect existing token expiration dates. + - Only available for top level groups. + type: bool + version_added: 9.5.0 + share_with_group_lock: + description: + - Projects cannot be shared with other groups. + type: bool + version_added: 9.5.0 + require_two_factor_authentication: + description: + - Require all users in this group to setup two-factor authentication. + type: bool + version_added: 3.7.0 + state: + description: + - Create or delete group. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] + subgroup_creation_level: + description: + - Allowed to create subgroups. + choices: ["maintainer", "owner"] + type: str + version_added: 3.7.0 + two_factor_grace_period: + description: + - Delay 2FA enforcement (hours). + type: str + version_added: 9.5.0 + visibility: + description: + - Default visibility of the group. + choices: ["private", "internal", "public"] + default: private + type: str + wiki_access_level: + description: + - V(enabled) means everyone can access the wiki. + - V(private) means only members of this group can access the wiki. + - V(disabled) means group-level wiki is disabled. + choices: ["enabled", "private", "disabled"] + type: str + version_added: 9.5.0 +""" + +EXAMPLES = r""" +- name: "Delete GitLab Group" + community.general.gitlab_group: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + name: my_first_group + state: absent + +- name: "Create GitLab Group" + community.general.gitlab_group: + api_url: https://gitlab.example.com/ + validate_certs: true + api_username: dj-wasabi + api_password: "MySecretPassword" + name: my_first_group + path: my_first_group + state: present + +# The group will by created at https://gitlab.dj-wasabi.local/super_parent/parent/my_first_group +- name: "Create GitLab SubGroup" + community.general.gitlab_group: + api_url: https://gitlab.example.com/ + validate_certs: true + api_username: dj-wasabi + api_password: "MySecretPassword" + name: my_first_group + path: my_first_group + state: present + parent: "super_parent/parent" + +# Other group which only allows sub-groups - no projects +- name: "Create GitLab Group for SubGroups only" + community.general.gitlab_group: + api_url: https://gitlab.example.com/ + validate_certs: true + api_username: dj-wasabi + api_password: "MySecretPassword" + name: my_main_group + path: my_main_group + state: present + project_creation_level: noone + auto_devops_enabled: false + subgroup_creation_level: maintainer +""" + +RETURN = r""" +msg: + description: Success or failure message. + returned: always + type: str + sample: "Success" + +result: + description: JSON-parsed response from the server. + returned: always + type: dict + +error: + description: The error message returned by the GitLab API. + returned: failed + type: str + sample: "400: path is already in use" + +group: + description: API object. + returned: always + type: dict +""" + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_group, gitlab_authentication, gitlab +) + + +class GitLabGroup(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.group_object = None + + ''' + @param group Group object + ''' + def get_group_id(self, group): + if group is not None: + return group.id + return None + + ''' + @param name Name of the group + @param parent Parent group full path + @param options Group options + ''' + def create_or_update_group(self, name, parent, options): + changed = False + + payload = { + 'auto_devops_enabled': options['auto_devops_enabled'], + 'default_branch': options['default_branch'], + 'description': options['description'], + 'lfs_enabled': options['lfs_enabled'], + 'membership_lock': options['membership_lock'], + 'mentions_disabled': options['mentions_disabled'], + 'name': name, + 'path': options['path'], + 'prevent_forking_outside_group': options['prevent_forking_outside_group'], + 'project_creation_level': options['project_creation_level'], + 'request_access_enabled': options['request_access_enabled'], + 'require_two_factor_authentication': options['require_two_factor_authentication'], + 'share_with_group_lock': options['share_with_group_lock'], + 'subgroup_creation_level': options['subgroup_creation_level'], + 'visibility': options['visibility'], + 'wiki_access_level': options['wiki_access_level'], + } + if options.get('enabled_git_access_protocol') and parent is None: + payload['enabled_git_access_protocol'] = options['enabled_git_access_protocol'] + if options.get('lock_duo_features_enabled') and parent is None: + payload['lock_duo_features_enabled'] = options['lock_duo_features_enabled'] + if options.get('prevent_sharing_groups_outside_hierarchy') and parent is None: + payload['prevent_sharing_groups_outside_hierarchy'] = options['prevent_sharing_groups_outside_hierarchy'] + if options.get('service_access_tokens_expiration_enforced') and parent is None: + payload['service_access_tokens_expiration_enforced'] = options['service_access_tokens_expiration_enforced'] + if options.get('two_factor_grace_period'): + payload['two_factor_grace_period'] = int(options['two_factor_grace_period']) + + # Because we have already call userExists in main() + if self.group_object is None: + payload['parent_id'] = self.get_group_id(parent) + group = self.create_group(payload) + + # add avatar to group + if options['avatar_path']: + try: + group.avatar = open(options['avatar_path'], 'rb') + except IOError as e: + self._module.fail_json(msg='Cannot open {0}: {1}'.format(options['avatar_path'], e)) + changed = True + else: + changed, group = self.update_group(self.group_object, payload) + + self.group_object = group + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the group %s" % name) + + try: + group.save() + except Exception as e: + self._module.fail_json(msg="Failed to update group: %s " % e) + return True + else: + return False + + ''' + @param arguments Attributes of the group + ''' + def create_group(self, arguments): + if self._module.check_mode: + return True + + try: + # Filter out None values + filtered = {arg_key: arg_value for arg_key, arg_value in arguments.items() if arg_value is not None} + + group = self._gitlab.groups.create(filtered) + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create group: %s " % to_native(e)) + + return group + + ''' + @param group Group Object + @param arguments Attributes of the group + ''' + def update_group(self, group, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arg_value is not None: + if getattr(group, arg_key) != arg_value: + setattr(group, arg_key, arg_value) + changed = True + + return (changed, group) + + ''' + @param force To delete even if projects inside + ''' + def delete_group(self, force=False): + group = self.group_object + + if not force and len(group.projects.list(all=False)) >= 1: + self._module.fail_json( + msg=("There are still projects in this group. " + "These needs to be moved or deleted before this group can be removed. " + "Use 'force_delete' to 'true' to force deletion of existing projects.") + ) + else: + if self._module.check_mode: + return True + + try: + group.delete() + except Exception as e: + self._module.fail_json(msg="Failed to delete group: %s " % to_native(e)) + + ''' + @param name Name of the group + @param full_path Complete path of the Group including parent group path. / + ''' + def exists_group(self, project_identifier): + # When group/user exists, object will be stored in self.group_object. + group = find_group(self._gitlab, project_identifier) + if group: + self.group_object = group + return True + return False + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + auto_devops_enabled=dict(type='bool'), + avatar_path=dict(type='path'), + default_branch=dict(type='str'), + description=dict(type='str'), + enabled_git_access_protocol=dict(type='str', choices=['all', 'ssh', 'http']), + force_delete=dict(type='bool', default=False), + lfs_enabled=dict(type='bool'), + lock_duo_features_enabled=dict(type='bool'), + membership_lock=dict(type='bool'), + mentions_disabled=dict(type='bool'), + name=dict(type='str', required=True), + parent=dict(type='str'), + path=dict(type='str'), + prevent_forking_outside_group=dict(type='bool'), + prevent_sharing_groups_outside_hierarchy=dict(type='bool'), + project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), + request_access_enabled=dict(type='bool'), + require_two_factor_authentication=dict(type='bool'), + service_access_tokens_expiration_enforced=dict(type='bool'), + share_with_group_lock=dict(type='bool'), + state=dict(type='str', default="present", choices=["absent", "present"]), + subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), + two_factor_grace_period=dict(type='str'), + visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), + wiki_access_level=dict(type='str', choices=['enabled', 'private', 'disabled']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_token', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True, + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + auto_devops_enabled = module.params['auto_devops_enabled'] + avatar_path = module.params['avatar_path'] + default_branch = module.params['default_branch'] + description = module.params['description'] + enabled_git_access_protocol = module.params['enabled_git_access_protocol'] + force_delete = module.params['force_delete'] + group_name = module.params['name'] + group_path = module.params['path'] + group_visibility = module.params['visibility'] + lfs_enabled = module.params['lfs_enabled'] + lock_duo_features_enabled = module.params['lock_duo_features_enabled'] + membership_lock = module.params['membership_lock'] + mentions_disabled = module.params['mentions_disabled'] + parent_identifier = module.params['parent'] + prevent_forking_outside_group = module.params['prevent_forking_outside_group'] + prevent_sharing_groups_outside_hierarchy = module.params['prevent_sharing_groups_outside_hierarchy'] + project_creation_level = module.params['project_creation_level'] + request_access_enabled = module.params['request_access_enabled'] + require_two_factor_authentication = module.params['require_two_factor_authentication'] + service_access_tokens_expiration_enforced = module.params['service_access_tokens_expiration_enforced'] + share_with_group_lock = module.params['share_with_group_lock'] + state = module.params['state'] + subgroup_creation_level = module.params['subgroup_creation_level'] + two_factor_grace_period = module.params['two_factor_grace_period'] + wiki_access_level = module.params['wiki_access_level'] + + # Define default group_path based on group_name + if group_path is None: + group_path = group_name.replace(" ", "_") + + gitlab_group = GitLabGroup(module, gitlab_instance) + + parent_group = None + if parent_identifier: + parent_group = find_group(gitlab_instance, parent_identifier) + if not parent_group: + module.fail_json(msg="Failed to create GitLab group: Parent group doesn't exist") + + group_exists = gitlab_group.exists_group(parent_group.full_path + '/' + group_path) + else: + group_exists = gitlab_group.exists_group(group_path) + + if state == 'absent': + if group_exists: + gitlab_group.delete_group(force=force_delete) + module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name) + else: + module.exit_json(changed=False, msg="Group deleted or does not exist") + + if state == 'present': + if gitlab_group.create_or_update_group(group_name, parent_group, { + "auto_devops_enabled": auto_devops_enabled, + "avatar_path": avatar_path, + "default_branch": default_branch, + "description": description, + "enabled_git_access_protocol": enabled_git_access_protocol, + "lfs_enabled": lfs_enabled, + "lock_duo_features_enabled": lock_duo_features_enabled, + "membership_lock": membership_lock, + "mentions_disabled": mentions_disabled, + "path": group_path, + "prevent_forking_outside_group": prevent_forking_outside_group, + "prevent_sharing_groups_outside_hierarchy": prevent_sharing_groups_outside_hierarchy, + "project_creation_level": project_creation_level, + "request_access_enabled": request_access_enabled, + "require_two_factor_authentication": require_two_factor_authentication, + "service_access_tokens_expiration_enforced": service_access_tokens_expiration_enforced, + "share_with_group_lock": share_with_group_lock, + "subgroup_creation_level": subgroup_creation_level, + "two_factor_grace_period": two_factor_grace_period, + "visibility": group_visibility, + "wiki_access_level": wiki_access_level, + }): + module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.group_object._attrs) + else: + module.exit_json(changed=False, msg="No need to update the group %s" % group_name, group=gitlab_group.group_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_group_access_token.py b/plugins/modules/gitlab_group_access_token.py new file mode 100644 index 0000000000..59afc74bea --- /dev/null +++ b/plugins/modules/gitlab_group_access_token.py @@ -0,0 +1,339 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Zoran Krleza (zoran.krleza@true-north.hr) +# Based on code: +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Marcus Watkins +# Copyright (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_group_access_token +short_description: Manages GitLab group access tokens +version_added: 8.4.0 +description: + - Creates and revokes group access tokens. +author: + - Zoran Krleza (@pixslx) +requirements: + - python-gitlab >= 3.1.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes +notes: + - Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. Whether tokens + are recreated or not is controlled by the O(recreate) option, which defaults to V(never). + - Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards. + - Token matching is done by comparing O(name) option. +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + group: + description: + - ID or full path of group in the form of group/subgroup. + required: true + type: str + name: + description: + - Access token's name. + required: true + type: str + scopes: + description: + - Scope of the access token. + - The values V(read_virtual_registry), V(write_virtual_registry), V(manage_runner), and V(self_rotate) were added in community.general 11.3.0. + required: true + type: list + elements: str + aliases: ["scope"] + choices: + - api + - read_api + - read_registry + - write_registry + - read_virtual_registry + - write_virtual_registry + - read_repository + - write_repository + - create_runner + - manage_runner + - ai_features + - k8s_proxy + - self_rotate + access_level: + description: + - Access level of the access token. + - The value V(planner) was added in community.general 11.3.0. + type: str + default: maintainer + choices: ["guest", "planner", "reporter", "developer", "maintainer", "owner"] + expires_at: + description: + - Expiration date of the access token in C(YYYY-MM-DD) format. + - Make sure to quote this value in YAML to ensure it is kept as a string and not interpreted as a YAML date. + type: str + required: true + recreate: + description: + - Whether the access token is recreated if it already exists. + - When V(never) the token is never recreated. + - When V(always) the token is always recreated. + - When V(state_change) the token is recreated if there is a difference between desired state and actual state. + type: str + choices: ["never", "always", "state_change"] + default: never + state: + description: + - When V(present) the access token is added to the group if it does not exist. + - When V(absent) it is removed from the group if it exists. + default: present + type: str + choices: ["present", "absent"] +""" + +EXAMPLES = r""" +- name: "Creating a group access token" + community.general.gitlab_group_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + group: "my_group/my_subgroup" + name: "group_token" + expires_at: "2024-12-31" + access_level: developer + scopes: + - api + - read_api + - read_repository + - write_repository + state: present + +- name: "Revoking a group access token" + community.general.gitlab_group_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + group: "my_group/my_group" + name: "group_token" + expires_at: "2024-12-31" + scopes: + - api + - read_api + - read_repository + - write_repository + state: absent + +- name: "Change (recreate) existing token if its actual state is different than desired state" + community.general.gitlab_group_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + group: "my_group/my_group" + name: "group_token" + expires_at: "2024-12-31" + scopes: + - api + - read_api + - read_repository + - write_repository + recreate: state_change + state: present +""" + +RETURN = r""" +access_token: + description: + - API object. + - Only contains the value of the token if the token was created or recreated. + returned: success and O(state=present) + type: dict +""" + +from datetime import datetime + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_group, gitlab_authentication, gitlab +) + +ACCESS_LEVELS = dict(guest=10, planner=15, reporter=20, developer=30, maintainer=40, owner=50) + + +class GitLabGroupAccessToken(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.access_token_object = None + + ''' + @param project Project Object + @param group Group Object + @param arguments Attributes of the access_token + ''' + def create_access_token(self, group, arguments): + changed = False + if self._module.check_mode: + return True + + try: + self.access_token_object = group.access_tokens.create(arguments) + changed = True + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create access token: %s " % to_native(e)) + + return changed + + ''' + @param project Project object + @param group Group Object + @param name of the access token + ''' + def find_access_token(self, group, name): + access_tokens = [x for x in group.access_tokens.list(all=True) if not getattr(x, 'revoked', False)] + for access_token in access_tokens: + if access_token.name == name: + self.access_token_object = access_token + return False + return False + + def revoke_access_token(self): + if self._module.check_mode: + return True + + changed = False + try: + self.access_token_object.delete() + changed = True + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to revoke access token: %s " % to_native(e)) + + return changed + + def access_tokens_equal(self): + if self.access_token_object.name != self._module.params['name']: + return False + if self.access_token_object.scopes != self._module.params['scopes']: + return False + if self.access_token_object.access_level != ACCESS_LEVELS[self._module.params['access_level']]: + return False + if self.access_token_object.expires_at != self._module.params['expires_at']: + return False + return True + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default="present", choices=["absent", "present"]), + group=dict(type='str', required=True), + name=dict(type='str', required=True), + scopes=dict(type='list', + required=True, + aliases=['scope'], + elements='str', + choices=['api', + 'read_api', + 'read_registry', + 'write_registry', + 'read_virtual_registry', + 'write_virtual_registry', + 'read_repository', + 'write_repository', + 'create_runner', + 'manage_runner', + 'ai_features', + 'k8s_proxy', + 'self_rotate']), + access_level=dict(type='str', default='maintainer', choices=['guest', 'planner', 'reporter', 'developer', 'maintainer', 'owner']), + expires_at=dict(type='str', required=True), + recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change']) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'] + ], + required_together=[ + ['api_username', 'api_password'] + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + state = module.params['state'] + group_identifier = module.params['group'] + name = module.params['name'] + scopes = module.params['scopes'] + access_level_str = module.params['access_level'] + expires_at = module.params['expires_at'] + recreate = module.params['recreate'] + + access_level = ACCESS_LEVELS[access_level_str] + + try: + datetime.strptime(expires_at, '%Y-%m-%d') + except ValueError: + module.fail_json(msg="Argument expires_at is not in required format YYYY-MM-DD") + + gitlab_instance = gitlab_authentication(module) + + gitlab_access_token = GitLabGroupAccessToken(module, gitlab_instance) + + group = find_group(gitlab_instance, group_identifier) + if group is None: + module.fail_json(msg="Failed to create access token: group %s does not exists" % group_identifier) + + gitlab_access_token_exists = False + gitlab_access_token.find_access_token(group, name) + if gitlab_access_token.access_token_object is not None: + gitlab_access_token_exists = True + + if state == 'absent': + if gitlab_access_token_exists: + gitlab_access_token.revoke_access_token() + module.exit_json(changed=True, msg="Successfully deleted access token %s" % name) + else: + module.exit_json(changed=False, msg="Access token does not exists") + + if state == 'present': + if gitlab_access_token_exists: + if gitlab_access_token.access_tokens_equal(): + if recreate == 'always': + gitlab_access_token.revoke_access_token() + gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + else: + module.exit_json(changed=False, msg="Access token already exists", access_token=gitlab_access_token.access_token_object._attrs) + else: + if recreate == 'never': + module.fail_json(msg="Access token already exists and its state is different. It can not be updated without recreating.") + else: + gitlab_access_token.revoke_access_token() + gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + else: + gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + if module.check_mode: + module.exit_json(changed=True, msg="Successfully created access token", access_token={}) + else: + module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_group_members.py b/plugins/modules/gitlab_group_members.py deleted file mode 120000 index ab80ca08ba..0000000000 --- a/plugins/modules/gitlab_group_members.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/gitlab/gitlab_group_members.py \ No newline at end of file diff --git a/plugins/modules/gitlab_group_members.py b/plugins/modules/gitlab_group_members.py new file mode 100644 index 0000000000..b101cb4e43 --- /dev/null +++ b/plugins/modules/gitlab_group_members.py @@ -0,0 +1,441 @@ +#!/usr/bin/python + +# Copyright (c) 2020, Zainab Alsaffar +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_group_members +short_description: Manage group members on GitLab Server +description: + - This module allows to add and remove members to/from a group, or change a member's access level in a group on GitLab. +version_added: '1.2.0' +author: Zainab Alsaffar (@zanssa) +requirements: + - python-gitlab python module <= 1.15.0 + - administrator rights on the GitLab server +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + gitlab_group: + description: + - The C(full_path) of the GitLab group the member is added to/removed from. + - Setting this to C(name) or C(path) has been disallowed since community.general 6.0.0. Use C(full_path) instead. + required: true + type: str + gitlab_user: + description: + - A username or a list of usernames to add to/remove from the GitLab group. + - Mutually exclusive with O(gitlab_users_access). + type: list + elements: str + access_level: + description: + - The access level for the user. + - Required if O(state=present), user state is set to present. + - Mutually exclusive with O(gitlab_users_access). + type: str + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + gitlab_users_access: + description: + - Provide a list of user to access level mappings. + - Every dictionary in this list specifies a user (by username) and the access level the user should have. + - Mutually exclusive with O(gitlab_user) and O(access_level). + - Use together with O(purge_users) to remove all users not specified here from the group. + type: list + elements: dict + suboptions: + name: + description: A username or a list of usernames to add to/remove from the GitLab group. + type: str + required: true + access_level: + description: + - The access level for the user. + - Required if O(state=present), user state is set to present. + type: str + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + required: true + version_added: 3.6.0 + state: + description: + - State of the member in the group. + - On V(present), it adds a user to a GitLab group. + - On V(absent), it removes a user from a GitLab group. + choices: ['present', 'absent'] + default: 'present' + type: str + purge_users: + description: + - Adds/remove users of the given access_level to match the given O(gitlab_user)/O(gitlab_users_access) list. If omitted + do not purge orphaned members. + - Is only used when O(state=present). + type: list + elements: str + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + version_added: 3.6.0 +""" + +EXAMPLES = r""" +- name: Add a user to a GitLab Group + community.general.gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_user: username + access_level: developer + state: present + +- name: Remove a user from a GitLab Group + community.general.gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_user: username + state: absent + +- name: Add a list of Users to A GitLab Group + community.general.gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_user: + - user1 + - user2 + access_level: developer + state: present + +- name: Add a list of Users with Dedicated Access Levels to A GitLab Group + community.general.gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: present + +- name: Add a user, remove all others which might be on this access level + community.general.gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_user: username + access_level: developer + pruge_users: developer + state: present + +- name: Remove a list of Users with Dedicated Access Levels to A GitLab Group + community.general.gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: absent +""" + +RETURN = r""" # """ + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, list_all_kwargs +) + + +class GitLabGroup(object): + def __init__(self, module, gl): + self._module = module + self._gitlab = gl + + # get user id if the user exists + def get_user_id(self, gitlab_user): + return next( + (u.id for u in self._gitlab.users.list(username=gitlab_user, **list_all_kwargs)), + None + ) + + # get group id if group exists + def get_group_id(self, gitlab_group): + return next( + ( + g.id for g in self._gitlab.groups.list(search=gitlab_group, **list_all_kwargs) + if g.full_path == gitlab_group + ), + None + ) + + # get all members in a group + def get_members_in_a_group(self, gitlab_group_id): + group = self._gitlab.groups.get(gitlab_group_id) + return group.members.list(all=True) + + # get single member in a group by user name + def get_member_in_a_group(self, gitlab_group_id, gitlab_user_id): + member = None + group = self._gitlab.groups.get(gitlab_group_id) + try: + member = group.members.get(gitlab_user_id) + if member: + return member + except gitlab.exceptions.GitlabGetError as e: + return None + + # check if the user is a member of the group + def is_user_a_member(self, members, gitlab_user_id): + for member in members: + if member.id == gitlab_user_id: + return True + return False + + # add user to a group + def add_member_to_group(self, gitlab_user_id, gitlab_group_id, access_level): + group = self._gitlab.groups.get(gitlab_group_id) + add_member = group.members.create( + {'user_id': gitlab_user_id, 'access_level': access_level}) + + # remove user from a group + def remove_user_from_group(self, gitlab_user_id, gitlab_group_id): + group = self._gitlab.groups.get(gitlab_group_id) + group.members.delete(gitlab_user_id) + + # get user's access level + def get_user_access_level(self, members, gitlab_user_id): + for member in members: + if member.id == gitlab_user_id: + return member.access_level + + # update user's access level in a group + def update_user_access_level(self, members, gitlab_user_id, access_level): + for member in members: + if member.id == gitlab_user_id: + member.access_level = access_level + member.save() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + gitlab_group=dict(type='str', required=True), + gitlab_user=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent']), + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), + purge_users=dict(type='list', elements='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), + gitlab_users_access=dict( + type='list', + elements='dict', + options=dict( + name=dict(type='str', required=True), + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True), + ) + ), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['gitlab_user', 'gitlab_users_access'], + ['access_level', 'gitlab_users_access'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ['gitlab_user', 'access_level'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ['gitlab_user', 'gitlab_users_access'], + ], + required_if=[ + ['state', 'present', ['access_level', 'gitlab_users_access'], True], + ], + supports_check_mode=True, + ) + + # check prerequisites and connect to gitlab server + gl = gitlab_authentication(module) + + access_level_int = { + 'guest': gitlab.const.GUEST_ACCESS, + 'reporter': gitlab.const.REPORTER_ACCESS, + 'developer': gitlab.const.DEVELOPER_ACCESS, + 'maintainer': gitlab.const.MAINTAINER_ACCESS, + 'owner': gitlab.const.OWNER_ACCESS, + } + + gitlab_group = module.params['gitlab_group'] + state = module.params['state'] + access_level = module.params['access_level'] + purge_users = module.params['purge_users'] + + if purge_users: + purge_users = [access_level_int[level] for level in purge_users] + + group = GitLabGroup(module, gl) + + gitlab_group_id = group.get_group_id(gitlab_group) + + # group doesn't exist + if not gitlab_group_id: + module.fail_json(msg="group '%s' not found." % gitlab_group) + + members = [] + if module.params['gitlab_user'] is not None: + gitlab_users_access = [] + gitlab_users = module.params['gitlab_user'] + for gl_user in gitlab_users: + gitlab_users_access.append({'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None}) + elif module.params['gitlab_users_access'] is not None: + gitlab_users_access = module.params['gitlab_users_access'] + for user_level in gitlab_users_access: + user_level['access_level'] = access_level_int[user_level['access_level']] + + if len(gitlab_users_access) == 1 and not purge_users: + # only single user given + members = [group.get_member_in_a_group(gitlab_group_id, group.get_user_id(gitlab_users_access[0]['name']))] + if members[0] is None: + members = [] + elif len(gitlab_users_access) > 1 or purge_users: + # list of users given + members = group.get_members_in_a_group(gitlab_group_id) + else: + module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.", + result_data=[]) + + changed = False + error = False + changed_users = [] + changed_data = [] + + for gitlab_user in gitlab_users_access: + gitlab_user_id = group.get_user_id(gitlab_user['name']) + + # user doesn't exist + if not gitlab_user_id: + if state == 'absent': + changed_users.append("user '%s' not found, and thus also not part of the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "user '%s' not found, and thus also not part of the group" % gitlab_user['name']}) + else: + error = True + changed_users.append("user '%s' not found." % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "user '%s' not found." % gitlab_user['name']}) + continue + + is_user_a_member = group.is_user_a_member(members, gitlab_user_id) + + # check if the user is a member in the group + if not is_user_a_member: + if state == 'present': + # add user to the group + try: + if not module.check_mode: + group.add_member_to_group(gitlab_user_id, gitlab_group_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully added user '%s' to group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully added user '%s' to group" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabCreateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + # state as absent + else: + changed_users.append("User, '%s', is not a member in the group. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is not a member in the group. No change to report" % gitlab_user['name']}) + # in case that a user is a member + else: + if state == 'present': + # compare the access level + user_access_level = group.get_user_access_level(members, gitlab_user_id) + if user_access_level == gitlab_user['access_level']: + changed_users.append("User, '%s', is already a member in the group. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is already a member in the group. No change to report" % gitlab_user['name']}) + else: + # update the access level for the user + try: + if not module.check_mode: + group.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabUpdateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + else: + # remove the user from the group + try: + if not module.check_mode: + group.remove_user_from_group(gitlab_user_id, gitlab_group_id) + changed = True + changed_users.append("Successfully removed user, '%s', from the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully removed user, '%s', from the group" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)}) + + # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users + if state == 'present' and purge_users: + uppercase_names_in_gitlab_users_access = [] + for name in gitlab_users_access: + uppercase_names_in_gitlab_users_access.append(name['name'].upper()) + + for member in members: + if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access: + try: + if not module.check_mode: + group.remove_user_from_group(member.id, gitlab_group_id) + changed = True + changed_users.append("Successfully removed user '%s', from group. Was not in given list" % member.username) + changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED', + 'msg': "Successfully removed user '%s', from group. Was not in given list" % member.username}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)}) + + if len(gitlab_users_access) == 1 and error: + # if single user given and an error occurred return error for list errors will be per user + module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data) + elif error: + module.fail_json(msg='FAILED: At least one given user/permission could not be set', result_data=changed_data) + + module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_group_variable.py b/plugins/modules/gitlab_group_variable.py deleted file mode 120000 index 8fdfaea5f5..0000000000 --- a/plugins/modules/gitlab_group_variable.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_group_variable.py \ No newline at end of file diff --git a/plugins/modules/gitlab_group_variable.py b/plugins/modules/gitlab_group_variable.py new file mode 100644 index 0000000000..c505547d87 --- /dev/null +++ b/plugins/modules/gitlab_group_variable.py @@ -0,0 +1,472 @@ +#!/usr/bin/python + +# Copyright (c) 2020, Florent Madiot (scodeman@scode.io) +# Based on code: +# Copyright (c) 2019, Markus Bergholz (markuman@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_group_variable +short_description: Creates, updates, or deletes GitLab groups variables +version_added: 1.2.0 +description: + - Creates a group variable if it does not exist. + - When a group variable does exist and is not hidden, its value is updated when the values are different. + When a group variable does exist and is hidden, its value is updated. In this case, the module is B(not idempotent). + - Variables which are untouched in the playbook, but are not untouched in the GitLab group, they stay untouched (O(purge=false)) + or are deleted (O(purge=true)). +author: + - Florent Madiot (@scodeman) +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete group variable. + default: present + type: str + choices: ["present", "absent"] + group: + description: + - The path and name of the group. + required: true + type: str + purge: + description: + - When set to V(true), delete all variables which are not untouched in the task. + default: false + type: bool + vars: + description: + - When the list element is a simple key-value pair, C(masked), C(hidden), C(raw), and C(protected) are set to V(false). + - When the list element is a dict with the keys C(value), C(masked), C(hidden), C(raw), and C(protected), the user can have full + control about whether a value should be masked, hidden, raw, protected, or a combination. + - Support for group variables requires GitLab >= 9.5. + - Support for environment_scope requires GitLab Premium >= 13.11. + - Support for protected values requires GitLab >= 9.3. + - Support for masked values requires GitLab >= 11.10. + - Support for hidden values requires GitLab >= 17.4, and was added in community.general 11.3.0. + - Support for raw values requires GitLab >= 15.7. + - A C(value) must be a string or a number. + - Field C(variable_type) must be a string with either V(env_var), which is the default, or V(file). + - When a value is masked, it must be in Base64 and have a length of at least 8 characters. See GitLab documentation + on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)). + default: {} + type: dict + variables: + version_added: 4.5.0 + description: + - A list of dictionaries that represents CI/CD variables. + - This modules works internal with this structure, even if the older O(vars) parameter is used. + default: [] + type: list + elements: dict + suboptions: + name: + description: + - The name of the variable. + type: str + required: true + value: + description: + - The variable value. + - Required when O(state=present). + type: str + description: + description: + - A description for the variable. + - Support for descriptions requires GitLab >= 16.2. + type: str + version_added: '11.4.0' + masked: + description: + - Whether variable value is masked or not. + type: bool + default: false + hidden: + description: + - Whether variable value is hidden or not. + - Implies C(masked). + - Support for hidden values requires GitLab >= 17.4. + type: bool + default: false + version_added: '11.3.0' + protected: + description: + - Whether variable value is protected or not. + type: bool + default: false + raw: + description: + - Whether variable value is raw or not. + - Support for raw values requires GitLab >= 15.7. + type: bool + default: false + version_added: '7.4.0' + variable_type: + description: + - Whether a variable is an environment variable (V(env_var)) or a file (V(file)). + type: str + choices: ["env_var", "file"] + default: env_var + environment_scope: + description: + - The scope for the variable. + type: str + default: '*' +""" + + +EXAMPLES = r""" +- name: Set or update some CI/CD variables + community.general.gitlab_group_variable: + api_url: https://gitlab.com + api_token: secret_access_token + group: scodeman/testgroup/ + purge: false + variables: + - name: ACCESS_KEY_ID + value: abc123 + - name: SECRET_ACCESS_KEY + value: 3214cbad + masked: true + protected: true + variable_type: env_var + environment_scope: production + +- name: Set or update some CI/CD variables with raw value + community.general.gitlab_group_variable: + api_url: https://gitlab.com + api_token: secret_access_token + group: scodeman/testgroup/ + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: 3214cbad + masked: true + protected: true + raw: true + variable_type: env_var + environment_scope: '*' + +- name: Set or update some CI/CD variables with expandable value + community.general.gitlab_group_variable: + api_url: https://gitlab.com + api_token: secret_access_token + group: scodeman/testgroup/ + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: '$MY_OTHER_VARIABLE' + masked: true + protected: true + raw: false + variable_type: env_var + environment_scope: '*' + +- name: Delete one variable + community.general.gitlab_group_variable: + api_url: https://gitlab.com + api_token: secret_access_token + group: scodeman/testgroup/ + state: absent + vars: + ACCESS_KEY_ID: abc123 +""" + +RETURN = r""" +group_variable: + description: Four lists of the variablenames which were added, updated, removed or exist. + returned: always + type: dict + contains: + added: + description: A list of variables which were created. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + untouched: + description: A list of variables which exist. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + removed: + description: A list of variables which were deleted. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + updated: + description: A list of variables whose values were changed. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, filter_returned_variables, vars_to_variables, + list_all_kwargs +) + + +class GitlabGroupVariables(object): + + def __init__(self, module, gitlab_instance): + self.repo = gitlab_instance + self.group = self.get_group(module.params['group']) + self._module = module + + def get_group(self, group_name): + return self.repo.groups.get(group_name) + + def list_all_group_variables(self): + return list(self.group.variables.list(**list_all_kwargs)) + + def create_variable(self, var_obj): + if self._module.check_mode: + return True + var = { + "key": var_obj.get('key'), + "value": var_obj.get('value'), + "description": var_obj.get('description'), + "masked": var_obj.get('masked'), + "masked_and_hidden": var_obj.get('hidden'), + "protected": var_obj.get('protected'), + "raw": var_obj.get('raw'), + "variable_type": var_obj.get('variable_type'), + } + if var_obj.get('environment_scope') is not None: + var["environment_scope"] = var_obj.get('environment_scope') + + self.group.variables.create(var) + return True + + def update_variable(self, var_obj): + if self._module.check_mode: + return True + self.delete_variable(var_obj) + self.create_variable(var_obj) + return True + + def delete_variable(self, var_obj): + if self._module.check_mode: + return True + self.group.variables.delete(var_obj.get('key'), filter={'environment_scope': var_obj.get('environment_scope')}) + return True + + +def compare(requested_variables, existing_variables, state): + # we need to do this, because it was determined in a previous version - more or less buggy + # basically it is not necessary and might results in more/other bugs! + # but it is required and only relevant for check mode!! + # logic represents state 'present' when not purge. all other can be derived from that + # untouched => equal in both + # updated => name and scope are equal + # added => name and scope does not exist + untouched = list() + updated = list() + added = list() + + if state == 'present': + existing_key_scope_vars = list() + for item in existing_variables: + existing_key_scope_vars.append({'key': item.get('key'), 'environment_scope': item.get('environment_scope')}) + + for var in requested_variables: + if var in existing_variables: + untouched.append(var) + else: + compare_item = {'key': var.get('name'), 'environment_scope': var.get('environment_scope')} + if compare_item in existing_key_scope_vars: + updated.append(var) + else: + added.append(var) + + return untouched, updated, added + + +def native_python_main(this_gitlab, purge, requested_variables, state, module): + + change = False + return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) + + gitlab_keys = this_gitlab.list_all_group_variables() + before = [x.attributes for x in gitlab_keys] + + gitlab_keys = this_gitlab.list_all_group_variables() + existing_variables = filter_returned_variables(gitlab_keys) + + for item in requested_variables: + item['key'] = item.pop('name') + item['value'] = str(item.get('value')) + if item.get('protected') is None: + item['protected'] = False + if item.get('raw') is None: + item['raw'] = False + if item.get('masked') is None: + item['masked'] = False + if item.get('hidden') is None: + item['hidden'] = False + if item.get('environment_scope') is None: + item['environment_scope'] = '*' + if item.get('variable_type') is None: + item['variable_type'] = 'env_var' + + if module.check_mode: + untouched, updated, added = compare(requested_variables, existing_variables, state) + + if state == 'present': + add_or_update = [x for x in requested_variables if x not in existing_variables] + for item in add_or_update: + try: + if this_gitlab.create_variable(item): + return_value['added'].append(item) + + except Exception: + if this_gitlab.update_variable(item): + return_value['updated'].append(item) + + if purge: + # refetch and filter + gitlab_keys = this_gitlab.list_all_group_variables() + existing_variables = filter_returned_variables(gitlab_keys) + + remove = [x for x in existing_variables if x not in requested_variables] + for item in remove: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + elif state == 'absent': + # value, type, and description do not matter on removing variables. + keys_ignored_on_deletion = ['value', 'variable_type', 'description'] + for key in keys_ignored_on_deletion: + for item in existing_variables: + item.pop(key) + for item in requested_variables: + item.pop(key) + + if not purge: + remove_requested = [x for x in requested_variables if x in existing_variables] + for item in remove_requested: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + else: + for item in existing_variables: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + if module.check_mode: + return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched) + + if len(return_value['added'] + return_value['removed'] + return_value['updated']) > 0: + change = True + + gitlab_keys = this_gitlab.list_all_group_variables() + after = [x.attributes for x in gitlab_keys] + + return change, return_value, before, after + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + group=dict(type='str', required=True), + purge=dict(type='bool', default=False), + vars=dict(type='dict', default=dict(), no_log=True), + # please mind whenever changing the variables dict to also change module_utils/gitlab.py's + # KNOWN dict in filter_returned_variables or bad evil will happen + variables=dict(type='list', elements='dict', default=list(), options=dict( + name=dict(type='str', required=True), + value=dict(type='str', no_log=True), + description=dict(type='str'), + masked=dict(type='bool', default=False), + hidden=dict(type='bool', default=False), + protected=dict(type='bool', default=False), + raw=dict(type='bool', default=False), + environment_scope=dict(type='str', default='*'), + variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]) + )), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['vars', 'variables'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + purge = module.params['purge'] + var_list = module.params['vars'] + state = module.params['state'] + + if var_list: + variables = vars_to_variables(var_list, module) + else: + variables = module.params['variables'] + + if state == 'present': + if any(x['value'] is None for x in variables): + module.fail_json(msg='value parameter is required in state present') + + this_gitlab = GitlabGroupVariables(module=module, gitlab_instance=gitlab_instance) + + changed, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module) + + # postprocessing + for item in after: + item.pop('group_id') + item['name'] = item.pop('key') + for item in before: + item.pop('group_id') + item['name'] = item.pop('key') + + untouched_key_name = 'key' + if not module.check_mode: + untouched_key_name = 'name' + raw_return_value['untouched'] = [x for x in before if x in after] + + added = [x.get('key') for x in raw_return_value['added']] + updated = [x.get('key') for x in raw_return_value['updated']] + removed = [x.get('key') for x in raw_return_value['removed']] + untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']] + return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) + + module.exit_json(changed=changed, group_variable=return_value) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_hook.py b/plugins/modules/gitlab_hook.py deleted file mode 120000 index 34cf58578a..0000000000 --- a/plugins/modules/gitlab_hook.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/gitlab/gitlab_hook.py \ No newline at end of file diff --git a/plugins/modules/gitlab_hook.py b/plugins/modules/gitlab_hook.py new file mode 100644 index 0000000000..46997c5f62 --- /dev/null +++ b/plugins/modules/gitlab_hook.py @@ -0,0 +1,388 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Marcus Watkins +# Based on code: +# Copyright (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_hook +short_description: Manages GitLab project hooks +description: + - Adds, updates and removes project hook. +author: + - Marcus Watkins (@marwatk) + - Guillaume Martinez (@Lunik) +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + project: + description: + - ID or Full path of the project in the form of group/name. + required: true + type: str + hook_url: + description: + - The URL that you want GitLab to post to, this is used as the primary key for updates and deletion. + required: true + type: str + state: + description: + - When V(present) the hook is updated to match the input or created if it does not exist. + - When V(absent) hook is deleted if it exists. + default: present + type: str + choices: ["present", "absent"] + push_events: + description: + - Trigger hook on push events. + type: bool + default: true + push_events_branch_filter: + description: + - Branch name of wildcard to trigger hook on push events. + type: str + version_added: '0.2.0' + default: '' + issues_events: + description: + - Trigger hook on issues events. + type: bool + default: false + merge_requests_events: + description: + - Trigger hook on merge requests events. + type: bool + default: false + tag_push_events: + description: + - Trigger hook on tag push events. + type: bool + default: false + note_events: + description: + - Trigger hook on note events or when someone adds a comment. + type: bool + default: false + job_events: + description: + - Trigger hook on job events. + type: bool + default: false + pipeline_events: + description: + - Trigger hook on pipeline events. + type: bool + default: false + wiki_page_events: + description: + - Trigger hook on wiki events. + type: bool + default: false + releases_events: + description: + - Trigger hook on release events. + type: bool + version_added: '8.4.0' + hook_validate_certs: + description: + - Whether GitLab performs SSL verification when triggering the hook. + type: bool + default: false + aliases: [enable_ssl_verification] + token: + description: + - Secret token to validate hook messages at the receiver. + - If this is present it always results in a change as it cannot be retrieved from GitLab. + - It shows up in the C(X-GitLab-Token) HTTP request header. + required: false + type: str +""" + +EXAMPLES = r""" +- name: "Adding a project hook" + community.general.gitlab_hook: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + project: "my_group/my_project" + hook_url: "https://my-ci-server.example.com/gitlab-hook" + state: present + push_events: true + tag_push_events: true + token: "my-super-secret-token-that-my-ci-server-will-check" + +- name: "Delete the previous hook" + community.general.gitlab_hook: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + project: "my_group/my_project" + hook_url: "https://my-ci-server.example.com/gitlab-hook" + state: absent + +- name: "Delete a hook by numeric project id" + community.general.gitlab_hook: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + project: 10 + hook_url: "https://my-ci-server.example.com/gitlab-hook" + state: absent +""" + +RETURN = r""" +msg: + description: Success or failure message. + returned: always + type: str + sample: "Success" + +result: + description: JSON parsed response from the server. + returned: always + type: dict + +error: + description: The error message returned by the GitLab API. + returned: failed + type: str + sample: "400: path is already in use" + +hook: + description: API object. + returned: always + type: dict +""" + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_project, gitlab_authentication, list_all_kwargs +) + + +class GitLabHook(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.hook_object = None + + ''' + @param project Project Object + @param hook_url Url to call on event + @param description Description of the group + @param parent Parent group full path + ''' + def create_or_update_hook(self, project, hook_url, options): + changed = False + + # Because we have already call userExists in main() + if self.hook_object is None: + hook = self.create_hook(project, { + 'url': hook_url, + 'push_events': options['push_events'], + 'push_events_branch_filter': options['push_events_branch_filter'], + 'issues_events': options['issues_events'], + 'merge_requests_events': options['merge_requests_events'], + 'tag_push_events': options['tag_push_events'], + 'note_events': options['note_events'], + 'job_events': options['job_events'], + 'pipeline_events': options['pipeline_events'], + 'wiki_page_events': options['wiki_page_events'], + 'releases_events': options['releases_events'], + 'enable_ssl_verification': options['enable_ssl_verification'], + 'token': options['token'], + }) + changed = True + else: + changed, hook = self.update_hook(self.hook_object, { + 'push_events': options['push_events'], + 'push_events_branch_filter': options['push_events_branch_filter'], + 'issues_events': options['issues_events'], + 'merge_requests_events': options['merge_requests_events'], + 'tag_push_events': options['tag_push_events'], + 'note_events': options['note_events'], + 'job_events': options['job_events'], + 'pipeline_events': options['pipeline_events'], + 'wiki_page_events': options['wiki_page_events'], + 'releases_events': options['releases_events'], + 'enable_ssl_verification': options['enable_ssl_verification'], + 'token': options['token'], + }) + + self.hook_object = hook + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url) + + try: + hook.save() + except Exception as e: + self._module.fail_json(msg="Failed to update hook: %s " % e) + + return changed + + ''' + @param project Project Object + @param arguments Attributes of the hook + ''' + def create_hook(self, project, arguments): + if self._module.check_mode: + return True + + hook = project.hooks.create(arguments) + + return hook + + ''' + @param hook Hook Object + @param arguments Attributes of the hook + ''' + def update_hook(self, hook, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arg_value is not None: + if getattr(hook, arg_key, None) != arg_value: + setattr(hook, arg_key, arg_value) + changed = True + + return (changed, hook) + + ''' + @param project Project object + @param hook_url Url to call on event + ''' + def find_hook(self, project, hook_url): + for hook in project.hooks.list(**list_all_kwargs): + if hook.url == hook_url: + return hook + + ''' + @param project Project object + @param hook_url Url to call on event + ''' + def exists_hook(self, project, hook_url): + # When project exists, object will be stored in self.project_object. + hook = self.find_hook(project, hook_url) + if hook: + self.hook_object = hook + return True + return False + + def delete_hook(self): + if not self._module.check_mode: + self.hook_object.delete() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default="present", choices=["absent", "present"]), + project=dict(type='str', required=True), + hook_url=dict(type='str', required=True), + push_events=dict(type='bool', default=True), + push_events_branch_filter=dict(type='str', default=''), + issues_events=dict(type='bool', default=False), + merge_requests_events=dict(type='bool', default=False), + tag_push_events=dict(type='bool', default=False), + note_events=dict(type='bool', default=False), + job_events=dict(type='bool', default=False), + pipeline_events=dict(type='bool', default=False), + wiki_page_events=dict(type='bool', default=False), + releases_events=dict(type='bool'), + hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']), + token=dict(type='str', no_log=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'] + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True, + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + state = module.params['state'] + project_identifier = module.params['project'] + hook_url = module.params['hook_url'] + push_events = module.params['push_events'] + push_events_branch_filter = module.params['push_events_branch_filter'] + issues_events = module.params['issues_events'] + merge_requests_events = module.params['merge_requests_events'] + tag_push_events = module.params['tag_push_events'] + note_events = module.params['note_events'] + job_events = module.params['job_events'] + pipeline_events = module.params['pipeline_events'] + wiki_page_events = module.params['wiki_page_events'] + releases_events = module.params['releases_events'] + enable_ssl_verification = module.params['hook_validate_certs'] + hook_token = module.params['token'] + + gitlab_hook = GitLabHook(module, gitlab_instance) + + project = find_project(gitlab_instance, project_identifier) + + if project is None: + module.fail_json(msg="Failed to create hook: project %s doesn't exists" % project_identifier) + + hook_exists = gitlab_hook.exists_hook(project, hook_url) + + if state == 'absent': + if hook_exists: + gitlab_hook.delete_hook() + module.exit_json(changed=True, msg="Successfully deleted hook %s" % hook_url) + else: + module.exit_json(changed=False, msg="Hook deleted or does not exists") + + if state == 'present': + if gitlab_hook.create_or_update_hook(project, hook_url, { + "push_events": push_events, + "push_events_branch_filter": push_events_branch_filter, + "issues_events": issues_events, + "merge_requests_events": merge_requests_events, + "tag_push_events": tag_push_events, + "note_events": note_events, + "job_events": job_events, + "pipeline_events": pipeline_events, + "wiki_page_events": wiki_page_events, + "releases_events": releases_events, + "enable_ssl_verification": enable_ssl_verification, + "token": hook_token, + }): + + module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url, hook=gitlab_hook.hook_object._attrs) + else: + module.exit_json(changed=False, msg="No need to update the hook %s" % hook_url, hook=gitlab_hook.hook_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_instance_variable.py b/plugins/modules/gitlab_instance_variable.py new file mode 100644 index 0000000000..c7075f7454 --- /dev/null +++ b/plugins/modules/gitlab_instance_variable.py @@ -0,0 +1,376 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Benedikt Braunger (bebr@adm.ku.dk) +# Based on code: +# Copyright (c) 2020, Florent Madiot (scodeman@scode.io) +# Copyright (c) 2019, Markus Bergholz (markuman@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_instance_variable +short_description: Creates, updates, or deletes GitLab instance variables +version_added: 7.1.0 +description: + - Creates a instance variable if it does not exist. + - When a instance variable does exist, its value is updated if the values are different. + - Support for instance variables requires GitLab >= 13.0. + - Variables which are not mentioned in the modules options, but are present on the GitLab instance, either stay (O(purge=false)) + or are deleted (O(purge=true)). +author: + - Benedikt Braunger (@benibr) +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete instance variable. + default: present + type: str + choices: ["present", "absent"] + purge: + description: + - When set to V(true), delete all variables which are not mentioned in the task. + default: false + type: bool + variables: + description: + - A list of dictionaries that represents CI/CD variables. + default: [] + type: list + elements: dict + suboptions: + name: + description: + - The name of the variable. + type: str + required: true + value: + description: + - The variable value. + - Required when O(state=present). + type: str + description: + description: + - A description for the variable. + - Support for descriptions requires GitLab >= 16.8. + type: str + version_added: '11.4.0' + masked: + description: + - Whether variable value is masked or not. + type: bool + default: false + protected: + description: + - Whether variable value is protected or not. + type: bool + default: false + raw: + description: + - Whether variable value is raw or not. + - Support for raw values requires GitLab >= 15.7. + type: bool + default: false + version_added: 10.2.0 + variable_type: + description: + - Whether a variable is an environment variable (V(env_var)) or a file (V(file)). + type: str + choices: ["env_var", "file"] + default: env_var +""" + + +EXAMPLES = r""" +- name: Set or update some CI/CD variables + community.general.gitlab_instance_variable: + api_url: https://gitlab.com + api_token: secret_access_token + purge: false + variables: + - name: ACCESS_KEY_ID + value: abc1312cba + - name: SECRET_ACCESS_KEY + value: 1337 + masked: true + protected: true + variable_type: env_var + +- name: Delete one variable + community.general.gitlab_instance_variable: + api_url: https://gitlab.com + api_token: secret_access_token + state: absent + variables: + - name: ACCESS_KEY_ID +""" + +RETURN = r""" +instance_variable: + description: Four lists of the variablenames which were added, updated, removed or exist. + returned: always + type: dict + contains: + added: + description: A list of variables which were created. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + untouched: + description: A list of variables which exist. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + removed: + description: A list of variables which were deleted. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + updated: + description: A list pre-existing variables whose values have been set. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, filter_returned_variables, + list_all_kwargs +) + + +class GitlabInstanceVariables(object): + + def __init__(self, module, gitlab_instance): + self.instance = gitlab_instance + self._module = module + + def list_all_instance_variables(self): + return list(self.instance.variables.list(**list_all_kwargs)) + + def create_variable(self, var_obj): + if self._module.check_mode: + return True + var = { + "key": var_obj.get('key'), + "value": var_obj.get('value'), + "description": var_obj.get('description'), + "masked": var_obj.get('masked'), + "protected": var_obj.get('protected'), + "raw": var_obj.get('raw'), + "variable_type": var_obj.get('variable_type'), + } + + self.instance.variables.create(var) + return True + + def update_variable(self, var_obj): + if self._module.check_mode: + return True + self.delete_variable(var_obj) + self.create_variable(var_obj) + return True + + def delete_variable(self, var_obj): + if self._module.check_mode: + return True + self.instance.variables.delete(var_obj.get('key')) + return True + + +def compare(requested_variables, existing_variables, state): + # we need to do this, because it was determined in a previous version - more or less buggy + # basically it is not necessary and might results in more/other bugs! + # but it is required and only relevant for check mode!! + # logic represents state 'present' when not purge. all other can be derived from that + # untouched => equal in both + # updated => name and scope are equal + # added => name and scope does not exist + untouched = list() + updated = list() + added = list() + + if state == 'present': + existing_key_scope_vars = list() + for item in existing_variables: + existing_key_scope_vars.append({'key': item.get('key')}) + + for var in requested_variables: + if var in existing_variables: + untouched.append(var) + else: + compare_item = {'key': var.get('name')} + if compare_item in existing_key_scope_vars: + updated.append(var) + else: + added.append(var) + + return untouched, updated, added + + +def native_python_main(this_gitlab, purge, requested_variables, state, module): + + change = False + return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) + + gitlab_keys = this_gitlab.list_all_instance_variables() + before = [x.attributes for x in gitlab_keys] + + existing_variables = filter_returned_variables(gitlab_keys) + + for item in requested_variables: + item['key'] = item.pop('name') + item['value'] = str(item.get('value')) + if item.get('protected') is None: + item['protected'] = False + if item.get('masked') is None: + item['masked'] = False + if item.get('raw') is None: + item['raw'] = False + if item.get('variable_type') is None: + item['variable_type'] = 'env_var' + + if module.check_mode: + untouched, updated, added = compare(requested_variables, existing_variables, state) + + if state == 'present': + add_or_update = [x for x in requested_variables if x not in existing_variables] + for item in add_or_update: + try: + if this_gitlab.create_variable(item): + return_value['added'].append(item) + + except Exception: + if this_gitlab.update_variable(item): + return_value['updated'].append(item) + + if purge: + # refetch and filter + gitlab_keys = this_gitlab.list_all_instance_variables() + existing_variables = filter_returned_variables(gitlab_keys) + + remove = [x for x in existing_variables if x not in requested_variables] + for item in remove: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + elif state == 'absent': + # value, type, and description do not matter on removing variables. + keys_ignored_on_deletion = ['value', 'variable_type', 'description'] + for key in keys_ignored_on_deletion: + for item in existing_variables: + item.pop(key) + for item in requested_variables: + item.pop(key) + + if not purge: + remove_requested = [x for x in requested_variables if x in existing_variables] + for item in remove_requested: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + else: + for item in existing_variables: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + if module.check_mode: + return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched) + + if len(return_value['added'] + return_value['removed'] + return_value['updated']) > 0: + change = True + + gitlab_keys = this_gitlab.list_all_instance_variables() + after = [x.attributes for x in gitlab_keys] + + return change, return_value, before, after + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + purge=dict(type='bool', default=False), + variables=dict(type='list', elements='dict', default=list(), options=dict( + name=dict(type='str', required=True), + value=dict(type='str', no_log=True), + description=dict(type='str'), + masked=dict(type='bool', default=False), + protected=dict(type='bool', default=False), + raw=dict(type='bool', default=False), + variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]) + )), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + purge = module.params['purge'] + state = module.params['state'] + + variables = module.params['variables'] + + if state == 'present': + if any(x['value'] is None for x in variables): + module.fail_json(msg='value parameter is required in state present') + + this_gitlab = GitlabInstanceVariables(module=module, gitlab_instance=gitlab_instance) + + changed, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module) + + # postprocessing + for item in after: + item['name'] = item.pop('key') + for item in before: + item['name'] = item.pop('key') + + untouched_key_name = 'key' + if not module.check_mode: + untouched_key_name = 'name' + raw_return_value['untouched'] = [x for x in before if x in after] + + added = [x.get('key') for x in raw_return_value['added']] + updated = [x.get('key') for x in raw_return_value['updated']] + removed = [x.get('key') for x in raw_return_value['removed']] + untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']] + return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) + + module.exit_json(changed=changed, instance_variable=return_value) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_issue.py b/plugins/modules/gitlab_issue.py new file mode 100644 index 0000000000..aab9f2a346 --- /dev/null +++ b/plugins/modules/gitlab_issue.py @@ -0,0 +1,400 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Ondrej Zvara (ozvara1@gmail.com) +# Based on code: +# Copyright (c) 2021, Lennert Mertens (lennert@nubera.be) +# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_issue +short_description: Create, update, or delete GitLab issues +version_added: '8.1.0' +description: + - Creates an issue if it does not exist. + - When an issue does exist, it is updated if the provided parameters are different. + - When an issue does exist and O(state=absent), the issue is deleted. + - When multiple issues are detected, the task fails. + - Existing issues are matched based on O(title) and O(state_filter) filters. +author: + - zvaraondrej (@zvaraondrej) +requirements: + - python-gitlab >= 2.3.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + assignee_ids: + description: + - A list of assignee usernames omitting V(@) character. + - Set to an empty array to unassign all assignees. + type: list + elements: str + description: + description: + - A description of the issue. + - Gets overridden by a content of file specified at O(description_path), if found. + type: str + description_path: + description: + - A path of file containing issue's description. + - Accepts MarkDown formatted files. + type: path + issue_type: + description: + - Type of the issue. + default: issue + type: str + choices: ["issue", "incident", "test_case"] + labels: + description: + - A list of label names. + - Set to an empty array to remove all labels. + type: list + elements: str + milestone_search: + description: + - The name of the milestone. + - Set to empty string to unassign milestone. + type: str + milestone_group_id: + description: + - The path or numeric ID of the group hosting desired milestone. + type: str + project: + description: + - The path or name of the project. + required: true + type: str + state: + description: + - Create or delete issue. + default: present + type: str + choices: ["present", "absent"] + state_filter: + description: + - Filter specifying state of issues while searching. + type: str + choices: ["opened", "closed"] + default: opened + title: + description: + - A title for the issue. The title is used as a unique identifier to ensure idempotency. + type: str + required: true +""" + + +EXAMPLES = r""" +- name: Create Issue + community.general.gitlab_issue: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + title: "Ansible demo Issue" + description: "Demo Issue description" + labels: + - Ansible + - Demo + assignee_ids: + - testassignee + state_filter: "opened" + state: present + +- name: Delete Issue + community.general.gitlab_issue: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + title: "Ansible demo Issue" + state_filter: "opened" + state: absent +""" + +RETURN = r""" +msg: + description: Success or failure message. + returned: always + type: str + sample: "Success" + +issue: + description: API object. + returned: success + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.common.text.converters import to_native, to_text + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, find_project, find_group +) + + +class GitlabIssue(object): + + def __init__(self, module, project, gitlab_instance): + self._gitlab = gitlab_instance + self._module = module + self.project = project + + ''' + @param milestone_id Title of the milestone + ''' + def get_milestone(self, milestone_id, group): + milestones = [] + try: + milestones = group.milestones.list(search=milestone_id) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the Milestones: %s" % to_native(e)) + + if len(milestones) > 1: + self._module.fail_json(msg="Multiple Milestones matched search criteria.") + if len(milestones) < 1: + self._module.fail_json(msg="No Milestones matched search criteria.") + if len(milestones) == 1: + try: + return group.milestones.get(id=milestones[0].id) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to get the Milestones: %s" % to_native(e)) + + ''' + @param title Title of the Issue + @param state_filter Issue's state to filter on + ''' + def get_issue(self, title, state_filter): + issues = [] + try: + issues = self.project.issues.list(query_parameters={"search": title, "in": "title", "state": state_filter}) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the Issues: %s" % to_native(e)) + + if len(issues) > 1: + self._module.fail_json(msg="Multiple Issues matched search criteria.") + if len(issues) == 1: + try: + return self.project.issues.get(id=issues[0].iid) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to get the Issue: %s" % to_native(e)) + + ''' + @param username Name of the user + ''' + def get_user(self, username): + users = [] + try: + users = [user for user in self.project.users.list(username=username, all=True) if user.username == username] + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the users: %s" % to_native(e)) + + if len(users) > 1: + self._module.fail_json(msg="Multiple Users matched search criteria.") + elif len(users) < 1: + self._module.fail_json(msg="No User matched search criteria.") + else: + return users[0] + + ''' + @param users List of usernames + ''' + def get_user_ids(self, users): + return [self.get_user(user).id for user in users] + + ''' + @param options Options of the Issue + ''' + def create_issue(self, options): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created Issue '%s'." % options["title"]) + + try: + return self.project.issues.create(options) + except gitlab.exceptions.GitlabCreateError as e: + self._module.fail_json(msg="Failed to create Issue: %s " % to_native(e)) + + ''' + @param issue Issue object to delete + ''' + def delete_issue(self, issue): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully deleted Issue '%s'." % issue["title"]) + + try: + return issue.delete() + except gitlab.exceptions.GitlabDeleteError as e: + self._module.fail_json(msg="Failed to delete Issue: '%s'." % to_native(e)) + + ''' + @param issue Issue object to update + @param options Options of the Issue + ''' + def update_issue(self, issue, options): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully updated Issue '%s'." % issue["title"]) + + try: + return self.project.issues.update(issue.iid, options) + except gitlab.exceptions.GitlabUpdateError as e: + self._module.fail_json(msg="Failed to update Issue %s." % to_native(e)) + + ''' + @param issue Issue object to evaluate + @param options New options to update Issue with + ''' + def issue_has_changed(self, issue, options): + for key, value in options.items(): + if value is not None: + + if key == 'milestone_id': + old_milestone = getattr(issue, 'milestone')['id'] if getattr(issue, 'milestone') else "" + if value != old_milestone: + return True + elif key == 'assignee_ids': + if value != sorted([user["id"] for user in getattr(issue, 'assignees')]): + return True + + elif key == 'labels': + if value != sorted(getattr(issue, key)): + return True + + elif getattr(issue, key) != value: + return True + + return False + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + assignee_ids=dict(type='list', elements='str'), + description=dict(type='str'), + description_path=dict(type='path'), + issue_type=dict(type='str', default='issue', choices=["issue", "incident", "test_case"]), + labels=dict(type='list', elements='str'), + milestone_search=dict(type='str'), + milestone_group_id=dict(type='str'), + project=dict(type='str', required=True), + state=dict(type='str', default="present", choices=["absent", "present"]), + state_filter=dict(type='str', default="opened", choices=["opened", "closed"]), + title=dict(type='str', required=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['description', 'description_path'], + ], + required_together=[ + ['api_username', 'api_password'], + ['milestone_search', 'milestone_group_id'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + assignee_ids = module.params['assignee_ids'] + description = module.params['description'] + description_path = module.params['description_path'] + issue_type = module.params['issue_type'] + labels = module.params['labels'] + milestone_id = module.params['milestone_search'] + milestone_group_id = module.params['milestone_group_id'] + project = module.params['project'] + state = module.params['state'] + state_filter = module.params['state_filter'] + title = module.params['title'] + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module, min_version='2.3.0') + + this_project = find_project(gitlab_instance, project) + if this_project is None: + module.fail_json(msg="Failed to get the project: %s" % project) + + this_gitlab = GitlabIssue(module=module, project=this_project, gitlab_instance=gitlab_instance) + + if milestone_id and milestone_group_id: + this_group = find_group(gitlab_instance, milestone_group_id) + if this_group is None: + module.fail_json(msg="Failed to get the group: %s" % milestone_group_id) + + milestone_id = this_gitlab.get_milestone(milestone_id, this_group).id + + this_issue = this_gitlab.get_issue(title, state_filter) + + if state == "present": + if description_path: + try: + with open(description_path, 'rb') as f: + description = to_text(f.read(), errors='surrogate_or_strict') + except IOError as e: + module.fail_json(msg='Cannot open {0}: {1}'.format(description_path, e)) + + # sorting necessary in order to properly detect changes, as we don't want to get false positive + # results due to differences in ids ordering; + assignee_ids = sorted(this_gitlab.get_user_ids(assignee_ids)) if assignee_ids else assignee_ids + labels = sorted(labels) if labels else labels + + options = { + "title": title, + "description": description, + "labels": labels, + "issue_type": issue_type, + "milestone_id": milestone_id, + "assignee_ids": assignee_ids, + } + + if not this_issue: + issue = this_gitlab.create_issue(options) + module.exit_json( + changed=True, msg="Created Issue '{t}'.".format(t=title), + issue=issue.asdict() + ) + else: + if this_gitlab.issue_has_changed(this_issue, options): + issue = this_gitlab.update_issue(this_issue, options) + module.exit_json( + changed=True, msg="Updated Issue '{t}'.".format(t=title), + issue=issue + ) + else: + module.exit_json( + changed=False, msg="Issue '{t}' already exists".format(t=title), + issue=this_issue.asdict() + ) + elif state == "absent": + if not this_issue: + module.exit_json(changed=False, msg="Issue '{t}' does not exist or has already been deleted.".format(t=title)) + else: + issue = this_gitlab.delete_issue(this_issue) + module.exit_json( + changed=True, msg="Issue '{t}' deleted.".format(t=title), + issue=issue + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_label.py b/plugins/modules/gitlab_label.py new file mode 100644 index 0000000000..5b6d80e20c --- /dev/null +++ b/plugins/modules/gitlab_label.py @@ -0,0 +1,492 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Gabriele Pongelli (gabriele.pongelli@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_label +short_description: Creates/updates/deletes GitLab Labels belonging to project or group +version_added: 8.3.0 +description: + - When a label does not exist, it is created. + - When a label does exist, its value is updated when the values are different. + - Labels can be purged. +author: + - "Gabriele Pongelli (@gpongelli)" +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete project or group label. + default: present + type: str + choices: ["present", "absent"] + purge: + description: + - When set to V(true), delete all labels which are not mentioned in the task. + default: false + type: bool + required: false + project: + description: + - The path and name of the project. Either this or O(group) is required. + required: false + type: str + group: + description: + - The path of the group. Either this or O(project) is required. + required: false + type: str + labels: + description: + - A list of dictionaries that represents gitlab project's or group's labels. + type: list + elements: dict + required: false + default: [] + suboptions: + name: + description: + - The name of the label. + type: str + required: true + color: + description: + - The color of the label. + - Required when O(state=present). + type: str + priority: + description: + - Integer value to give priority to the label. + type: int + required: false + default: + description: + description: + - Label's description. + type: str + default: + new_name: + description: + - Optional field to change label's name. + type: str + default: +""" + + +EXAMPLES = r""" +# same project's task can be executed for group +- name: Create one Label + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + color: "#123456" + state: present + +- name: Create many group labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + group: "group1" + labels: + - name: label_one + color: "#123456" + description: this is a label + priority: 20 + - name: label_two + color: "#554422" + state: present + +- name: Create many project labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + color: "#123456" + description: this is a label + priority: 20 + - name: label_two + color: "#554422" + state: present + +- name: Set or update some labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + color: "#224488" + state: present + +- name: Add label in check mode + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + color: "#224488" + check_mode: true + +- name: Delete Label + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + state: absent + +- name: Change Label name + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + new_name: label_two + state: absent + +- name: Purge all labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + purge: true + +- name: Delete many labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + state: absent + labels: + - name: label-abc123 + - name: label-two +""" + +RETURN = r""" +labels: + description: Four lists of the labels which were added, updated, removed or exist. + returned: success + type: dict + contains: + added: + description: A list of labels which were created. + returned: always + type: list + sample: ["abcd", "label-one"] + untouched: + description: A list of labels which exist. + returned: always + type: list + sample: ["defg", "new-label"] + removed: + description: A list of labels which were deleted. + returned: always + type: list + sample: ["defg", "new-label"] + updated: + description: A list pre-existing labels whose values have been set. + returned: always + type: list + sample: ["defg", "new-label"] +labels_obj: + description: API object. + returned: success + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, ensure_gitlab_package, find_group, find_project +) + + +class GitlabLabels(object): + + def __init__(self, module, gitlab_instance, group_id, project_id): + self._gitlab = gitlab_instance + self.gitlab_object = group_id if group_id else project_id + self.is_group_label = True if group_id else False + self._module = module + + def list_all_labels(self): + page_nb = 1 + labels = [] + vars_page = self.gitlab_object.labels.list(page=page_nb) + while len(vars_page) > 0: + labels += vars_page + page_nb += 1 + vars_page = self.gitlab_object.labels.list(page=page_nb) + return labels + + def create_label(self, var_obj): + if self._module.check_mode: + return True, True + + var = { + "name": var_obj.get('name'), + "color": var_obj.get('color'), + } + + if var_obj.get('description') is not None: + var["description"] = var_obj.get('description') + + if var_obj.get('priority') is not None: + var["priority"] = var_obj.get('priority') + + _obj = self.gitlab_object.labels.create(var) + return True, _obj.asdict() + + def update_label(self, var_obj): + if self._module.check_mode: + return True, True + _label = self.gitlab_object.labels.get(var_obj.get('name')) + + if var_obj.get('new_name') is not None: + _label.new_name = var_obj.get('new_name') + + if var_obj.get('description') is not None: + _label.description = var_obj.get('description') + if var_obj.get('priority') is not None: + _label.priority = var_obj.get('priority') + if var_obj.get('color') is not None: + _label.color = var_obj.get('color') + + # save returns None + _label.save() + return True, _label.asdict() + + def delete_label(self, var_obj): + if self._module.check_mode: + return True, True + _label = self.gitlab_object.labels.get(var_obj.get('name')) + # delete returns None + _label.delete() + return True, _label.asdict() + + +def compare(requested_labels, existing_labels, state): + # we need to do this, because it was determined in a previous version - more or less buggy + # basically it is not necessary and might result in more/other bugs! + # but it is required and only relevant for check mode!! + # logic represents state 'present' when not purge. all other can be derived from that + # untouched => equal in both + # updated => name and scope are equal + # added => name and scope does not exist + untouched = list() + updated = list() + added = list() + + if state == 'present': + _existing_labels = list() + for item in existing_labels: + _existing_labels.append({'name': item.get('name')}) + + for var in requested_labels: + if var in existing_labels: + untouched.append(var) + else: + compare_item = {'name': var.get('name')} + if compare_item in _existing_labels: + updated.append(var) + else: + added.append(var) + + return untouched, updated, added + + +def native_python_main(this_gitlab, purge, requested_labels, state, module): + change = False + return_value = dict(added=[], updated=[], removed=[], untouched=[]) + return_obj = dict(added=[], updated=[], removed=[]) + + labels_before = [x.asdict() for x in this_gitlab.list_all_labels()] + + # filter out and enrich before compare + for item in requested_labels: + # add defaults when not present + if item.get('description') is None: + item['description'] = "" + if item.get('new_name') is None: + item['new_name'] = None + if item.get('priority') is None: + item['priority'] = None + + # group label does not have priority, removing for comparison + if this_gitlab.is_group_label: + item.pop('priority') + + for item in labels_before: + # remove field only from server + item.pop('id') + item.pop('description_html') + item.pop('text_color') + item.pop('subscribed') + # field present only when it is a project's label + if 'is_project_label' in item: + item.pop('is_project_label') + item['new_name'] = None + + if state == 'present': + add_or_update = [x for x in requested_labels if x not in labels_before] + for item in add_or_update: + try: + _rv, _obj = this_gitlab.create_label(item) + if _rv: + return_value['added'].append(item) + return_obj['added'].append(_obj) + except Exception: + # create raises exception with following error message when label already exists + _rv, _obj = this_gitlab.update_label(item) + if _rv: + return_value['updated'].append(item) + return_obj['updated'].append(_obj) + + if purge: + # re-fetch + _labels = this_gitlab.list_all_labels() + + for item in labels_before: + _rv, _obj = this_gitlab.delete_label(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + + elif state == 'absent': + if not purge: + _label_names_requested = [x['name'] for x in requested_labels] + remove_requested = [x for x in labels_before if x['name'] in _label_names_requested] + for item in remove_requested: + _rv, _obj = this_gitlab.delete_label(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + else: + for item in labels_before: + _rv, _obj = this_gitlab.delete_label(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + + if module.check_mode: + _untouched, _updated, _added = compare(requested_labels, labels_before, state) + return_value = dict(added=_added, updated=_updated, removed=return_value['removed'], untouched=_untouched) + + if any(return_value[x] for x in ['added', 'removed', 'updated']): + change = True + + labels_after = [x.asdict() for x in this_gitlab.list_all_labels()] + + return change, return_value, labels_before, labels_after, return_obj + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str'), + group=dict(type='str'), + purge=dict(type='bool', default=False), + labels=dict(type='list', elements='dict', default=list(), + options=dict( + name=dict(type='str', required=True), + color=dict(type='str'), + description=dict(type='str'), + priority=dict(type='int'), + new_name=dict(type='str')) + ), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['project', 'group'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ['project', 'group'] + ], + supports_check_mode=True + ) + ensure_gitlab_package(module) + + gitlab_project = module.params['project'] + gitlab_group = module.params['group'] + purge = module.params['purge'] + label_list = module.params['labels'] + state = module.params['state'] + + gitlab_instance = gitlab_authentication(module, min_version='3.2.0') + + # find_project can return None, but the other must exist + gitlab_project_id = find_project(gitlab_instance, gitlab_project) + + # find_group can return None, but the other must exist + gitlab_group_id = find_group(gitlab_instance, gitlab_group) + + # if both not found, module must exist + if not gitlab_project_id and not gitlab_group_id: + if gitlab_project and not gitlab_project_id: + module.fail_json(msg="project '%s' not found." % gitlab_project) + if gitlab_group and not gitlab_group_id: + module.fail_json(msg="group '%s' not found." % gitlab_group) + + this_gitlab = GitlabLabels(module=module, gitlab_instance=gitlab_instance, group_id=gitlab_group_id, + project_id=gitlab_project_id) + + if state == 'present': + _existing_labels = [x.asdict()['name'] for x in this_gitlab.list_all_labels()] + + # color is mandatory when creating label, but it is optional when changing name or updating other fields + if any(x['color'] is None and x['new_name'] is None and x['name'] not in _existing_labels for x in label_list): + module.fail_json(msg='color parameter is required for new labels') + + change, raw_return_value, before, after, _obj = native_python_main(this_gitlab, purge, label_list, state, module) + + if not module.check_mode: + raw_return_value['untouched'] = [x for x in before if x in after] + + added = [x.get('name') for x in raw_return_value['added']] + updated = [x.get('name') for x in raw_return_value['updated']] + removed = [x.get('name') for x in raw_return_value['removed']] + untouched = [x.get('name') for x in raw_return_value['untouched']] + return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) + + module.exit_json(changed=change, labels=return_value, labels_obj=_obj) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_merge_request.py b/plugins/modules/gitlab_merge_request.py new file mode 100644 index 0000000000..83000a8ac1 --- /dev/null +++ b/plugins/modules/gitlab_merge_request.py @@ -0,0 +1,413 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Ondrej Zvara (ozvara1@gmail.com) +# Based on code: +# Copyright (c) 2021, Lennert Mertens (lennert@nubera.be) +# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_merge_request +short_description: Create, update, or delete GitLab merge requests +version_added: 7.1.0 +description: + - Creates a merge request if it does not exist. + - When a single merge request does exist, it is updated if the provided parameters are different. + - When a single merge request does exist and O(state=absent), the merge request is deleted. + - When multiple merge requests are detected, the task fails. + - Existing merge requests are matched based on O(title), O(source_branch), O(target_branch), and O(state_filter) filters. +author: + - zvaraondrej (@zvaraondrej) +requirements: + - python-gitlab >= 2.3.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete merge request. + default: present + type: str + choices: ["present", "absent"] + project: + description: + - The path or name of the project. + required: true + type: str + source_branch: + description: + - Merge request's source branch. + - Ignored while updating existing merge request. + required: true + type: str + target_branch: + description: + - Merge request's target branch. + required: true + type: str + title: + description: + - A title for the merge request. + type: str + required: true + description: + description: + - A description for the merge request. + - Gets overridden by a content of file specified at O(description_path), if found. + type: str + description_path: + description: + - A path of file containing merge request's description. + - Accepts MarkDown formatted files. + type: path + labels: + description: + - Comma separated list of label names. + type: str + default: "" + remove_source_branch: + description: + - Flag indicating if a merge request should remove the source branch when merging. + type: bool + default: false + state_filter: + description: + - Filter specifying state of merge requests while searching. + type: str + choices: ["opened", "closed", "locked", "merged"] + default: opened + assignee_ids: + description: + - Comma separated list of assignees usernames omitting V(@) character. + - Set to empty string to unassign all assignees. + type: str + reviewer_ids: + description: + - Comma separated list of reviewers usernames omitting V(@) character. + - Set to empty string to unassign all reviewers. + type: str +""" + + +EXAMPLES = r""" +- name: Create Merge Request from branch1 to branch2 + community.general.gitlab_merge_request: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + source_branch: branch1 + target_branch: branch2 + title: "Ansible demo MR" + description: "Demo MR description" + labels: "Ansible,Demo" + state_filter: "opened" + remove_source_branch: true + state: present + +- name: Delete Merge Request from branch1 to branch2 + community.general.gitlab_merge_request: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + source_branch: branch1 + target_branch: branch2 + title: "Ansible demo MR" + state_filter: "opened" + state: absent +""" + +RETURN = r""" +msg: + description: Success or failure message. + returned: always + type: str + sample: "Success" + +mr: + description: API object. + returned: success + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.common.text.converters import to_native, to_text + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, find_project +) + + +class GitlabMergeRequest(object): + + def __init__(self, module, project, gitlab_instance): + self._gitlab = gitlab_instance + self._module = module + self.project = project + + ''' + @param branch Name of the branch + ''' + def get_branch(self, branch): + try: + return self.project.branches.get(branch) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to get the branch: %s" % to_native(e)) + + ''' + @param title Title of the Merge Request + @param source_branch Merge Request's source branch + @param target_branch Merge Request's target branch + @param state_filter Merge Request's state to filter on + ''' + def get_mr(self, title, source_branch, target_branch, state_filter): + mrs = [] + try: + mrs = self.project.mergerequests.list(search=title, source_branch=source_branch, target_branch=target_branch, state=state_filter) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the Merge Request: %s" % to_native(e)) + + if len(mrs) > 1: + self._module.fail_json(msg="Multiple Merge Requests matched search criteria.") + if len(mrs) == 1: + try: + return self.project.mergerequests.get(id=mrs[0].iid) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to get the Merge Request: %s" % to_native(e)) + + ''' + @param username Name of the user + ''' + def get_user(self, username): + users = [] + try: + users = [user for user in self.project.users.list(username=username, all=True) if user.username == username] + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the users: %s" % to_native(e)) + + if len(users) > 1: + self._module.fail_json(msg="Multiple Users matched search criteria.") + elif len(users) < 1: + self._module.fail_json(msg="No User matched search criteria.") + else: + return users[0] + + ''' + @param users List of usernames + ''' + def get_user_ids(self, users): + return [self.get_user(user).id for user in users] + + ''' + @param options Options of the Merge Request + ''' + def create_mr(self, options): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created the Merge Request %s" % options["title"]) + + try: + return self.project.mergerequests.create(options) + except gitlab.exceptions.GitlabCreateError as e: + self._module.fail_json(msg="Failed to create Merge Request: %s " % to_native(e)) + + ''' + @param mr Merge Request object to delete + ''' + def delete_mr(self, mr): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully deleted the Merge Request %s" % mr["title"]) + + try: + return mr.delete() + except gitlab.exceptions.GitlabDeleteError as e: + self._module.fail_json(msg="Failed to delete Merge Request: %s " % to_native(e)) + + ''' + @param mr Merge Request object to update + ''' + def update_mr(self, mr, options): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully updated the Merge Request %s" % mr["title"]) + + try: + return self.project.mergerequests.update(mr.iid, options) + except gitlab.exceptions.GitlabUpdateError as e: + self._module.fail_json(msg="Failed to update Merge Request: %s " % to_native(e)) + + ''' + @param mr Merge Request object to evaluate + @param options New options to update MR with + ''' + def mr_has_changed(self, mr, options): + for key, value in options.items(): + if value is not None: + # see https://gitlab.com/gitlab-org/gitlab-foss/-/issues/27355 + if key == 'remove_source_branch': + key = 'force_remove_source_branch' + + if key == 'assignee_ids': + if value != sorted([user["id"] for user in getattr(mr, 'assignees')]): + return True + + elif key == 'reviewer_ids': + if value != sorted([user["id"] for user in getattr(mr, 'reviewers')]): + return True + + elif key == 'labels': + if value != sorted(getattr(mr, key)): + return True + + elif getattr(mr, key) != value: + return True + + return False + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str', required=True), + source_branch=dict(type='str', required=True), + target_branch=dict(type='str', required=True), + title=dict(type='str', required=True), + description=dict(type='str'), + labels=dict(type='str', default=""), + description_path=dict(type='path'), + remove_source_branch=dict(type='bool', default=False), + state_filter=dict(type='str', default="opened", choices=["opened", "closed", "locked", "merged"]), + assignee_ids=dict(type='str'), + reviewer_ids=dict(type='str'), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['description', 'description_path'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + required_if=[ + ['state', 'present', ['source_branch', 'target_branch', 'title'], True], + ['state', 'absent', ['source_branch', 'target_branch', 'title'], True], + ], + supports_check_mode=True + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + project = module.params['project'] + source_branch = module.params['source_branch'] + target_branch = module.params['target_branch'] + title = module.params['title'] + description = module.params['description'] + labels = module.params['labels'] + description_path = module.params['description_path'] + remove_source_branch = module.params['remove_source_branch'] + state_filter = module.params['state_filter'] + assignee_ids = module.params['assignee_ids'] + reviewer_ids = module.params['reviewer_ids'] + state = module.params['state'] + + gitlab_version = gitlab.__version__ + if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): + module.fail_json(msg="community.general.gitlab_merge_request requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." + " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) + + this_project = find_project(gitlab_instance, project) + if this_project is None: + module.fail_json(msg="Failed to get the project: %s" % project) + + this_gitlab = GitlabMergeRequest(module=module, project=this_project, gitlab_instance=gitlab_instance) + + r_source_branch = this_gitlab.get_branch(source_branch) + if not r_source_branch: + module.fail_json(msg="Source branch {b} not exist.".format(b=r_source_branch)) + + r_target_branch = this_gitlab.get_branch(target_branch) + if not r_target_branch: + module.fail_json(msg="Destination branch {b} not exist.".format(b=r_target_branch)) + + this_mr = this_gitlab.get_mr(title, source_branch, target_branch, state_filter) + + if state == "present": + if description_path: + try: + with open(description_path, 'rb') as f: + description = to_text(f.read(), errors='surrogate_or_strict') + except IOError as e: + module.fail_json(msg='Cannot open {0}: {1}'.format(description_path, e)) + + # sorting necessary in order to properly detect changes, as we don't want to get false positive + # results due to differences in ids ordering; see `mr_has_changed()` + assignee_ids = sorted(this_gitlab.get_user_ids(assignee_ids.split(","))) if assignee_ids else [] + reviewer_ids = sorted(this_gitlab.get_user_ids(reviewer_ids.split(","))) if reviewer_ids else [] + labels = sorted(labels.split(",")) if labels else [] + + options = { + "target_branch": target_branch, + "title": title, + "description": description, + "labels": labels, + "remove_source_branch": remove_source_branch, + "reviewer_ids": reviewer_ids, + "assignee_ids": assignee_ids, + } + + if not this_mr: + options["source_branch"] = source_branch + + mr = this_gitlab.create_mr(options) + module.exit_json( + changed=True, msg="Created the Merge Request {t} from branch {s} to branch {d}.".format(t=title, d=target_branch, s=source_branch), + mr=mr.asdict() + ) + else: + if this_gitlab.mr_has_changed(this_mr, options): + mr = this_gitlab.update_mr(this_mr, options) + module.exit_json( + changed=True, msg="Merge Request {t} from branch {s} to branch {d} updated.".format(t=title, d=target_branch, s=source_branch), + mr=mr + ) + else: + module.exit_json( + changed=False, msg="Merge Request {t} from branch {s} to branch {d} already exist".format(t=title, d=target_branch, s=source_branch), + mr=this_mr.asdict() + ) + elif this_mr and state == "absent": + mr = this_gitlab.delete_mr(this_mr) + module.exit_json( + changed=True, msg="Merge Request {t} from branch {s} to branch {d} deleted.".format(t=title, d=target_branch, s=source_branch), + mr=mr + ) + else: + module.exit_json(changed=False, msg="No changes are needed.", mr=this_mr.asdict()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_milestone.py b/plugins/modules/gitlab_milestone.py new file mode 100644 index 0000000000..bb4992117c --- /dev/null +++ b/plugins/modules/gitlab_milestone.py @@ -0,0 +1,486 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Gabriele Pongelli (gabriele.pongelli@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_milestone +short_description: Creates/updates/deletes GitLab Milestones belonging to project or group +version_added: 8.3.0 +description: + - When a milestone does not exist, it is created. + - When a milestone does exist, its value is updated when the values are different. + - Milestones can be purged. +author: + - "Gabriele Pongelli (@gpongelli)" +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete milestone. + default: present + type: str + choices: ["present", "absent"] + purge: + description: + - When set to V(true), delete all milestone which are not mentioned in the task. + default: false + type: bool + required: false + project: + description: + - The path and name of the project. Either this or O(group) is required. + required: false + type: str + group: + description: + - The path of the group. Either this or O(project) is required. + required: false + type: str + milestones: + description: + - A list of dictionaries that represents gitlab project's or group's milestones. + type: list + elements: dict + required: false + default: [] + suboptions: + title: + description: + - The name of the milestone. + type: str + required: true + due_date: + description: + - Milestone due date in YYYY-MM-DD format. + type: str + required: false + default: null + start_date: + description: + - Milestone start date in YYYY-MM-DD format. + type: str + required: false + default: null + description: + description: + - Milestone's description. + type: str + default: null +""" + + +EXAMPLES = r""" +# same project's task can be executed for group +- name: Create one milestone + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + start_date: "2024-01-04" + state: present + +- name: Create many group milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + group: "group1" + milestones: + - title: milestone_one + start_date: "2024-01-04" + description: this is a milestone + due_date: "2024-02-04" + - title: milestone_two + state: present + +- name: Create many project milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + start_date: "2024-01-04" + description: this is a milestone + due_date: "2024-02-04" + - title: milestone_two + state: present + +- name: Set or update some milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + start_date: "2024-05-04" + state: present + +- name: Add milestone in check mode + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + start_date: "2024-05-04" + check_mode: true + +- name: Delete milestone + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + state: absent + +- name: Purge all milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + purge: true + +- name: Delete many milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + state: absent + milestones: + - title: milestone-abc123 + - title: milestone-two +""" + +RETURN = r""" +milestones: + description: Four lists of the milestones which were added, updated, removed or exist. + returned: success + type: dict + contains: + added: + description: A list of milestones which were created. + returned: always + type: list + sample: ["abcd", "milestone-one"] + untouched: + description: A list of milestones which exist. + returned: always + type: list + sample: ["defg", "new-milestone"] + removed: + description: A list of milestones which were deleted. + returned: always + type: list + sample: ["defg", "new-milestone"] + updated: + description: A list pre-existing milestones whose values have been set. + returned: always + type: list + sample: ["defg", "new-milestone"] +milestones_obj: + description: API object. + returned: success + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, ensure_gitlab_package, find_group, find_project +) +from datetime import datetime + + +class GitlabMilestones(object): + + def __init__(self, module, gitlab_instance, group_id, project_id): + self._gitlab = gitlab_instance + self.gitlab_object = group_id if group_id else project_id + self.is_group_milestone = True if group_id else False + self._module = module + + def list_all_milestones(self): + page_nb = 1 + milestones = [] + vars_page = self.gitlab_object.milestones.list(page=page_nb) + while len(vars_page) > 0: + milestones += vars_page + page_nb += 1 + vars_page = self.gitlab_object.milestones.list(page=page_nb) + return milestones + + def create_milestone(self, var_obj): + if self._module.check_mode: + return True, True + + var = { + "title": var_obj.get('title'), + } + + if var_obj.get('description') is not None: + var["description"] = var_obj.get('description') + + if var_obj.get('start_date') is not None: + var["start_date"] = self.check_date(var_obj.get('start_date')) + + if var_obj.get('due_date') is not None: + var["due_date"] = self.check_date(var_obj.get('due_date')) + + _obj = self.gitlab_object.milestones.create(var) + return True, _obj.asdict() + + def update_milestone(self, var_obj): + if self._module.check_mode: + return True, True + _milestone = self.gitlab_object.milestones.get(self.get_milestone_id(var_obj.get('title'))) + + if var_obj.get('description') is not None: + _milestone.description = var_obj.get('description') + + if var_obj.get('start_date') is not None: + _milestone.start_date = var_obj.get('start_date') + + if var_obj.get('due_date') is not None: + _milestone.due_date = var_obj.get('due_date') + + # save returns None + _milestone.save() + return True, _milestone.asdict() + + def get_milestone_id(self, _title): + _milestone_list = self.gitlab_object.milestones.list() + _found = [x for x in _milestone_list if x.title == _title] + if _found: + return _found[0].id + else: + self._module.fail_json(msg="milestone '%s' not found." % _title) + + def check_date(self, _date): + try: + datetime.strptime(_date, '%Y-%m-%d') + except ValueError: + self._module.fail_json(msg="milestone's date '%s' not in correct format." % _date) + return _date + + def delete_milestone(self, var_obj): + if self._module.check_mode: + return True, True + _milestone = self.gitlab_object.milestones.get(self.get_milestone_id(var_obj.get('title'))) + # delete returns None + _milestone.delete() + return True, _milestone.asdict() + + +def compare(requested_milestones, existing_milestones, state): + # we need to do this, because it was determined in a previous version - more or less buggy + # basically it is not necessary and might result in more/other bugs! + # but it is required and only relevant for check mode!! + # logic represents state 'present' when not purge. all other can be derived from that + # untouched => equal in both + # updated => title are equal + # added => title does not exist + untouched = list() + updated = list() + added = list() + + if state == 'present': + _existing_milestones = list() + for item in existing_milestones: + _existing_milestones.append({'title': item.get('title')}) + + for var in requested_milestones: + if var in existing_milestones: + untouched.append(var) + else: + compare_item = {'title': var.get('title')} + if compare_item in _existing_milestones: + updated.append(var) + else: + added.append(var) + + return untouched, updated, added + + +def native_python_main(this_gitlab, purge, requested_milestones, state, module): + change = False + return_value = dict(added=[], updated=[], removed=[], untouched=[]) + return_obj = dict(added=[], updated=[], removed=[]) + + milestones_before = [x.asdict() for x in this_gitlab.list_all_milestones()] + + # filter out and enrich before compare + for item in requested_milestones: + # add defaults when not present + if item.get('description') is None: + item['description'] = "" + if item.get('due_date') is None: + item['due_date'] = None + if item.get('start_date') is None: + item['start_date'] = None + + for item in milestones_before: + # remove field only from server + item.pop('id') + item.pop('iid') + item.pop('created_at') + item.pop('expired') + item.pop('state') + item.pop('updated_at') + item.pop('web_url') + # group milestone has group_id, while project has project_id + if 'group_id' in item: + item.pop('group_id') + if 'project_id' in item: + item.pop('project_id') + + if state == 'present': + add_or_update = [x for x in requested_milestones if x not in milestones_before] + for item in add_or_update: + try: + _rv, _obj = this_gitlab.create_milestone(item) + if _rv: + return_value['added'].append(item) + return_obj['added'].append(_obj) + except Exception: + # create raises exception with following error message when milestone already exists + _rv, _obj = this_gitlab.update_milestone(item) + if _rv: + return_value['updated'].append(item) + return_obj['updated'].append(_obj) + + if purge: + # re-fetch + _milestones = this_gitlab.list_all_milestones() + + for item in milestones_before: + _rv, _obj = this_gitlab.delete_milestone(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + + elif state == 'absent': + if not purge: + _milestone_titles_requested = [x['title'] for x in requested_milestones] + remove_requested = [x for x in milestones_before if x['title'] in _milestone_titles_requested] + for item in remove_requested: + _rv, _obj = this_gitlab.delete_milestone(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + else: + for item in milestones_before: + _rv, _obj = this_gitlab.delete_milestone(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + + if module.check_mode: + _untouched, _updated, _added = compare(requested_milestones, milestones_before, state) + return_value = dict(added=_added, updated=_updated, removed=return_value['removed'], untouched=_untouched) + + if any(return_value[x] for x in ['added', 'removed', 'updated']): + change = True + + milestones_after = [x.asdict() for x in this_gitlab.list_all_milestones()] + + return change, return_value, milestones_before, milestones_after, return_obj + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str'), + group=dict(type='str'), + purge=dict(type='bool', default=False), + milestones=dict(type='list', elements='dict', default=[], + options=dict( + title=dict(type='str', required=True), + description=dict(type='str'), + due_date=dict(type='str'), + start_date=dict(type='str')) + ), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['project', 'group'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ['project', 'group'] + ], + supports_check_mode=True + ) + ensure_gitlab_package(module) + + gitlab_project = module.params['project'] + gitlab_group = module.params['group'] + purge = module.params['purge'] + milestone_list = module.params['milestones'] + state = module.params['state'] + + gitlab_instance = gitlab_authentication(module, min_version='3.2.0') + + # find_project can return None, but the other must exist + gitlab_project_id = find_project(gitlab_instance, gitlab_project) + + # find_group can return None, but the other must exist + gitlab_group_id = find_group(gitlab_instance, gitlab_group) + + # if both not found, module must exist + if not gitlab_project_id and not gitlab_group_id: + if gitlab_project and not gitlab_project_id: + module.fail_json(msg="project '%s' not found." % gitlab_project) + if gitlab_group and not gitlab_group_id: + module.fail_json(msg="group '%s' not found." % gitlab_group) + + this_gitlab = GitlabMilestones(module=module, gitlab_instance=gitlab_instance, group_id=gitlab_group_id, + project_id=gitlab_project_id) + + change, raw_return_value, before, after, _obj = native_python_main(this_gitlab, purge, milestone_list, state, + module) + + if not module.check_mode: + raw_return_value['untouched'] = [x for x in before if x in after] + + added = [x.get('title') for x in raw_return_value['added']] + updated = [x.get('title') for x in raw_return_value['updated']] + removed = [x.get('title') for x in raw_return_value['removed']] + untouched = [x.get('title') for x in raw_return_value['untouched']] + return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) + + module.exit_json(changed=change, milestones=return_value, milestones_obj=_obj) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py deleted file mode 120000 index deba829039..0000000000 --- a/plugins/modules/gitlab_project.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/gitlab/gitlab_project.py \ No newline at end of file diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py new file mode 100644 index 0000000000..b745fe9424 --- /dev/null +++ b/plugins/modules/gitlab_project.py @@ -0,0 +1,800 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_project +short_description: Creates/updates/deletes GitLab Projects +description: + - When the project does not exist in GitLab, it is created. + - When the project does exist and O(state=absent), the project is deleted. + - When changes are made to the project, the project is updated. +author: + - Werner Dijkerman (@dj-wasabi) + - Guillaume Martinez (@Lunik) +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + allow_merge_on_skipped_pipeline: + description: + - Allow merge when skipped pipelines exist. + type: bool + version_added: "3.4.0" + avatar_path: + description: + - Absolute path image to configure avatar. File size should not exceed 200 kb. + - This option is only used on creation, not for updates. + type: path + version_added: "4.2.0" + build_timeout: + description: + - Maximum number of seconds a CI job can run. + - If not specified on creation, GitLab imposes a default value. + type: int + version_added: "10.6.0" + builds_access_level: + description: + - V(private) means that repository CI/CD is allowed only to project members. + - V(disabled) means that repository CI/CD is disabled. + - V(enabled) means that repository CI/CD is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.2.0" + ci_config_path: + description: + - Custom path to the CI configuration file for this project. + type: str + version_added: "3.7.0" + container_expiration_policy: + description: + - Project cleanup policy for its container registry. + type: dict + suboptions: + cadence: + description: + - How often cleanup should be run. + type: str + choices: ["1d", "7d", "14d", "1month", "3month"] + enabled: + description: + - Enable the cleanup policy. + type: bool + keep_n: + description: + - Number of tags kept per image name. + - V(0) clears the field. + type: int + choices: [0, 1, 5, 10, 25, 50, 100] + older_than: + description: + - Destroy tags older than this. + - V(0d) clears the field. + type: str + choices: ["0d", "7d", "14d", "30d", "90d"] + name_regex: + description: + - Destroy tags matching this regular expression. + type: str + name_regex_keep: + description: + - Keep tags matching this regular expression. + type: str + version_added: "9.3.0" + container_registry_access_level: + description: + - V(private) means that container registry is allowed only to project members. + - V(disabled) means that container registry is disabled. + - V(enabled) means that container registry is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.2.0" + default_branch: + description: + - The default branch name for this project. + - For project creation, this option requires O(initialize_with_readme=true). + - For project update, the branch must exist. + - Supports project's default branch update since community.general 8.0.0. + type: str + version_added: "4.2.0" + description: + description: + - An description for the project. + type: str + environments_access_level: + description: + - V(private) means that deployment to environment is allowed only to project members. + - V(disabled) means that deployment to environment is disabled. + - V(enabled) means that deployment to environment is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + feature_flags_access_level: + description: + - V(private) means that feature rollout is allowed only to project members. + - V(disabled) means that feature rollout is disabled. + - V(enabled) means that feature rollout is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + forking_access_level: + description: + - V(private) means that repository forks is allowed only to project members. + - V(disabled) means that repository forks are disabled. + - V(enabled) means that repository forks are enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.2.0" + group: + description: + - ID or the full path of the group of which this projects belongs to. + type: str + import_url: + description: + - Git repository which is imported into gitlab. + - GitLab server needs read access to this git repository. + required: false + type: str + infrastructure_access_level: + description: + - V(private) means that configuring infrastructure is allowed only to project members. + - V(disabled) means that configuring infrastructure is disabled. + - V(enabled) means that configuring infrastructure is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + initialize_with_readme: + description: + - Initializes the project with a default C(README.md). + - Is only used when the project is created, and ignored otherwise. + type: bool + default: false + version_added: "4.0.0" + issues_access_level: + description: + - V(private) means that accessing issues tab is allowed only to project members. + - V(disabled) means that accessing issues tab is disabled. + - V(enabled) means that accessing issues tab is enabled. + - O(issues_access_level) and O(issues_enabled) are mutually exclusive. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.4.0" + issues_enabled: + description: + - Whether you want to create issues or not. + - O(issues_access_level) and O(issues_enabled) are mutually exclusive. + type: bool + default: true + lfs_enabled: + description: + - Enable Git large file systems to manages large files such as audio, video, and graphics files. + type: bool + required: false + default: false + version_added: "2.0.0" + merge_method: + description: + - What requirements are placed upon merges. + - Possible values are V(merge), V(rebase_merge) merge commit with semi-linear history, V(ff) fast-forward merges only. + type: str + choices: ["ff", "merge", "rebase_merge"] + default: merge + version_added: "1.0.0" + merge_requests_enabled: + description: + - If merge requests can be made or not. + type: bool + default: true + model_registry_access_level: + description: + - V(private) means that accessing model registry tab is allowed only to project members. + - V(disabled) means that accessing model registry tab is disabled. + - V(enabled) means that accessing model registry tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" + monitor_access_level: + description: + - V(private) means that monitoring health is allowed only to project members. + - V(disabled) means that monitoring health is disabled. + - V(enabled) means that monitoring health is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + name: + description: + - The name of the project. + required: true + type: str + only_allow_merge_if_all_discussions_are_resolved: + description: + - All discussions on a merge request (MR) have to be resolved. + type: bool + version_added: "3.4.0" + only_allow_merge_if_pipeline_succeeds: + description: + - Only allow merges if pipeline succeeded. + type: bool + version_added: "3.4.0" + packages_enabled: + description: + - Enable GitLab package repository. + type: bool + version_added: "3.4.0" + pages_access_level: + description: + - V(private) means that accessing pages tab is allowed only to project members. + - V(disabled) means that accessing pages tab is disabled. + - V(enabled) means that accessing pages tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" + path: + description: + - The path of the project you want to create, this is server_url/O(group)/O(path). + - If not supplied, O(name) is used. + type: str + releases_access_level: + description: + - V(private) means that accessing release is allowed only to project members. + - V(disabled) means that accessing release is disabled. + - V(enabled) means that accessing release is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + remove_source_branch_after_merge: + description: + - Remove the source branch after merge. + type: bool + version_added: "3.4.0" + repository_access_level: + description: + - V(private) means that accessing repository is allowed only to project members. + - V(disabled) means that accessing repository is disabled. + - V(enabled) means that accessing repository is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" + security_and_compliance_access_level: + description: + - V(private) means that accessing security and complicance tab is allowed only to project members. + - V(disabled) means that accessing security and complicance tab is disabled. + - V(enabled) means that accessing security and complicance tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + service_desk_enabled: + description: + - Enable Service Desk. + type: bool + version_added: "9.3.0" + shared_runners_enabled: + description: + - Enable shared runners for this project. + type: bool + version_added: "3.7.0" + snippets_enabled: + description: + - If creating snippets should be available or not. + type: bool + default: true + squash_option: + description: + - Squash commits when merging. + type: str + choices: ["never", "always", "default_off", "default_on"] + version_added: "3.4.0" + state: + description: + - Create or delete project. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] + topics: + description: + - A topic or list of topics to be assigned to a project. + - It is compatible with old GitLab server releases (versions before 14, correspond to C(tag_list)). + type: list + elements: str + version_added: "6.6.0" + username: + description: + - Used to create a personal project under a user's name. + type: str + version_added: "3.3.0" + visibility: + description: + - V(private) Project access must be granted explicitly for each user. + - V(internal) The project can be cloned by any logged in user. + - V(public) The project can be cloned without any authentication. + default: private + type: str + choices: ["private", "internal", "public"] + aliases: + - visibility_level + wiki_enabled: + description: + - If an wiki for this project should be available or not. + type: bool + default: true +""" + +EXAMPLES = r""" +- name: Create GitLab Project + community.general.gitlab_project: + api_url: https://gitlab.example.com/ + api_token: "{{ api_token }}" + name: my_first_project + group: "10481470" + +- name: Delete GitLab Project + community.general.gitlab_project: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + name: my_first_project + state: absent + delegate_to: localhost + +- name: Create GitLab Project in group Ansible + community.general.gitlab_project: + api_url: https://gitlab.example.com/ + validate_certs: true + api_username: dj-wasabi + api_password: "MySecretPassword" + name: my_first_project + group: ansible + issues_enabled: false + merge_method: rebase_merge + wiki_enabled: true + snippets_enabled: true + import_url: http://git.example.com/example/lab.git + initialize_with_readme: true + state: present + delegate_to: localhost + +- name: get the initial root password + ansible.builtin.shell: | + grep 'Password:' /etc/gitlab/initial_root_password | sed -e 's/Password\: \(.*\)/\1/' + register: initial_root_password + +- name: Create a GitLab Project using a username/password via oauth_token + community.general.gitlab_project: + api_url: https://gitlab.example.com/ + api_username: root + api_password: "{{ initial_root_password }}" + name: my_second_project + group: "10481470" +""" + +RETURN = r""" +msg: + description: Success or failure message. + returned: always + type: str + sample: "Success" + +result: + description: JSON-parsed response from the server. + returned: always + type: dict + +error: + description: The error message returned by the GitLab API. + returned: failed + type: str + sample: "400: path is already in use" + +project: + description: API object. + returned: always + type: dict +""" + + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_group, find_project, gitlab_authentication, gitlab +) + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class GitLabProject(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.project_object = None + + ''' + @param project_name Name of the project + @param namespace Namespace Object (User or Group) + @param options Options of the project + ''' + def create_or_update_project(self, module, project_name, namespace, options): + changed = False + project_options = { + 'allow_merge_on_skipped_pipeline': options['allow_merge_on_skipped_pipeline'], + 'builds_access_level': options['builds_access_level'], + 'build_timeout': options['build_timeout'], + 'ci_config_path': options['ci_config_path'], + 'container_expiration_policy': options['container_expiration_policy'], + 'container_registry_access_level': options['container_registry_access_level'], + 'description': options['description'], + 'environments_access_level': options['environments_access_level'], + 'feature_flags_access_level': options['feature_flags_access_level'], + 'forking_access_level': options['forking_access_level'], + 'infrastructure_access_level': options['infrastructure_access_level'], + 'issues_access_level': options['issues_access_level'], + 'issues_enabled': options['issues_enabled'], + 'lfs_enabled': options['lfs_enabled'], + 'merge_method': options['merge_method'], + 'merge_requests_enabled': options['merge_requests_enabled'], + 'model_registry_access_level': options['model_registry_access_level'], + 'monitor_access_level': options['monitor_access_level'], + 'name': project_name, + 'only_allow_merge_if_all_discussions_are_resolved': options['only_allow_merge_if_all_discussions_are_resolved'], + 'only_allow_merge_if_pipeline_succeeds': options['only_allow_merge_if_pipeline_succeeds'], + 'packages_enabled': options['packages_enabled'], + 'pages_access_level': options['pages_access_level'], + 'releases_access_level': options['releases_access_level'], + 'remove_source_branch_after_merge': options['remove_source_branch_after_merge'], + 'repository_access_level': options['repository_access_level'], + 'security_and_compliance_access_level': options['security_and_compliance_access_level'], + 'service_desk_enabled': options['service_desk_enabled'], + 'shared_runners_enabled': options['shared_runners_enabled'], + 'snippets_enabled': options['snippets_enabled'], + 'squash_option': options['squash_option'], + 'visibility': options['visibility'], + 'wiki_enabled': options['wiki_enabled'], + } + + # topics was introduced on gitlab >=14 and replace tag_list. We get current gitlab version + # and check if less than 14. If yes we use tag_list instead topics + if LooseVersion(self._gitlab.version()[0]) < LooseVersion("14"): + project_options['tag_list'] = options['topics'] + else: + project_options['topics'] = options['topics'] + + # Because we have already call userExists in main() + if self.project_object is None: + if options['default_branch'] and not options['initialize_with_readme']: + module.fail_json(msg="Param default_branch needs param initialize_with_readme set to true") + project_options.update({ + 'path': options['path'], + 'import_url': options['import_url'], + }) + if options['initialize_with_readme']: + project_options['initialize_with_readme'] = options['initialize_with_readme'] + if options['default_branch']: + project_options['default_branch'] = options['default_branch'] + + project_options = self.get_options_with_value(project_options) + project = self.create_project(namespace, project_options) + + # add avatar to project + if options['avatar_path']: + try: + project.avatar = open(options['avatar_path'], 'rb') + except IOError as e: + self._module.fail_json(msg='Cannot open {0}: {1}'.format(options['avatar_path'], e)) + + changed = True + else: + if options['default_branch']: + project_options['default_branch'] = options['default_branch'] + changed, project = self.update_project(self.project_object, project_options) + + self.project_object = project + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name) + + try: + project.save() + except Exception as e: + self._module.fail_json(msg="Failed to update project: %s " % e) + return True + return False + + ''' + @param namespace Namespace Object (User or Group) + @param arguments Attributes of the project + ''' + def create_project(self, namespace, arguments): + if self._module.check_mode: + return True + + arguments['namespace_id'] = namespace.id + if 'container_expiration_policy' in arguments: + arguments['container_expiration_policy_attributes'] = arguments['container_expiration_policy'] + try: + project = self._gitlab.projects.create(arguments) + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create project: %s " % to_native(e)) + + return project + + ''' + @param arguments Attributes of the project + ''' + def get_options_with_value(self, arguments): + ret_arguments = {k: v for k, v in arguments.items() if v is not None} + return ret_arguments + + ''' + @param project Project Object + @param arguments Attributes of the project + ''' + def update_project(self, project, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arg_value is not None: + if getattr(project, arg_key, None) != arg_value: + if arg_key == 'container_expiration_policy': + old_val = getattr(project, arg_key, {}) + final_val = {key: value for key, value in arg_value.items() if value is not None} + + if final_val.get('older_than') == '0d': + final_val['older_than'] = None + if final_val.get('keep_n') == 0: + final_val['keep_n'] = None + + if all(old_val.get(key) == value for key, value in final_val.items()): + continue + setattr(project, 'container_expiration_policy_attributes', final_val) + else: + setattr(project, arg_key, arg_value) + changed = True + + return (changed, project) + + def delete_project(self): + if self._module.check_mode: + return True + + project = self.project_object + + return project.delete() + + ''' + @param namespace User/Group object + @param name Name of the project + ''' + def exists_project(self, namespace, path): + # When project exists, object will be stored in self.project_object. + project = find_project(self._gitlab, namespace.full_path + '/' + path) + if project: + self.project_object = project + return True + return False + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + allow_merge_on_skipped_pipeline=dict(type='bool'), + avatar_path=dict(type='path'), + builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + build_timeout=dict(type='int'), + ci_config_path=dict(type='str'), + container_expiration_policy=dict(type='dict', options=dict( + cadence=dict(type='str', choices=["1d", "7d", "14d", "1month", "3month"]), + enabled=dict(type='bool'), + keep_n=dict(type='int', choices=[0, 1, 5, 10, 25, 50, 100]), + older_than=dict(type='str', choices=["0d", "7d", "14d", "30d", "90d"]), + name_regex=dict(type='str'), + name_regex_keep=dict(type='str'), + )), + container_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + default_branch=dict(type='str'), + description=dict(type='str'), + environments_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + feature_flags_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + forking_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + group=dict(type='str'), + import_url=dict(type='str'), + infrastructure_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + initialize_with_readme=dict(type='bool', default=False), + issues_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + issues_enabled=dict(type='bool', default=True), + lfs_enabled=dict(default=False, type='bool'), + merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]), + merge_requests_enabled=dict(type='bool', default=True), + model_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + monitor_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + name=dict(type='str', required=True), + only_allow_merge_if_all_discussions_are_resolved=dict(type='bool'), + only_allow_merge_if_pipeline_succeeds=dict(type='bool'), + packages_enabled=dict(type='bool'), + pages_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + path=dict(type='str'), + releases_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + remove_source_branch_after_merge=dict(type='bool'), + repository_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + security_and_compliance_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + service_desk_enabled=dict(type='bool'), + shared_runners_enabled=dict(type='bool'), + snippets_enabled=dict(default=True, type='bool'), + squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), + state=dict(type='str', default="present", choices=["absent", "present"]), + topics=dict(type='list', elements='str'), + username=dict(type='str'), + visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), + wiki_enabled=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['group', 'username'], + ['issues_access_level', 'issues_enabled'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True, + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline'] + avatar_path = module.params['avatar_path'] + builds_access_level = module.params['builds_access_level'] + build_timeout = module.params['build_timeout'] + ci_config_path = module.params['ci_config_path'] + container_expiration_policy = module.params['container_expiration_policy'] + container_registry_access_level = module.params['container_registry_access_level'] + default_branch = module.params['default_branch'] + environments_access_level = module.params['environments_access_level'] + feature_flags_access_level = module.params['feature_flags_access_level'] + forking_access_level = module.params['forking_access_level'] + group_identifier = module.params['group'] + import_url = module.params['import_url'] + infrastructure_access_level = module.params['infrastructure_access_level'] + initialize_with_readme = module.params['initialize_with_readme'] + issues_access_level = module.params['issues_access_level'] + issues_enabled = module.params['issues_enabled'] + lfs_enabled = module.params['lfs_enabled'] + merge_method = module.params['merge_method'] + merge_requests_enabled = module.params['merge_requests_enabled'] + model_registry_access_level = module.params['model_registry_access_level'] + monitor_access_level = module.params['monitor_access_level'] + only_allow_merge_if_all_discussions_are_resolved = module.params['only_allow_merge_if_all_discussions_are_resolved'] + only_allow_merge_if_pipeline_succeeds = module.params['only_allow_merge_if_pipeline_succeeds'] + packages_enabled = module.params['packages_enabled'] + pages_access_level = module.params['pages_access_level'] + project_description = module.params['description'] + project_name = module.params['name'] + project_path = module.params['path'] + releases_access_level = module.params['releases_access_level'] + remove_source_branch_after_merge = module.params['remove_source_branch_after_merge'] + repository_access_level = module.params['repository_access_level'] + security_and_compliance_access_level = module.params['security_and_compliance_access_level'] + service_desk_enabled = module.params['service_desk_enabled'] + shared_runners_enabled = module.params['shared_runners_enabled'] + snippets_enabled = module.params['snippets_enabled'] + squash_option = module.params['squash_option'] + state = module.params['state'] + topics = module.params['topics'] + username = module.params['username'] + visibility = module.params['visibility'] + wiki_enabled = module.params['wiki_enabled'] + + # Set project_path to project_name if it is empty. + if project_path is None: + project_path = project_name.replace(" ", "_") + + gitlab_project = GitLabProject(module, gitlab_instance) + + namespace = None + namespace_id = None + if group_identifier: + group = find_group(gitlab_instance, group_identifier) + if group is None: + module.fail_json(msg="Failed to create project: group %s doesn't exist" % group_identifier) + + namespace_id = group.id + else: + if username: + namespace = gitlab_instance.namespaces.list(search=username, all=False)[0] + else: + namespace = gitlab_instance.namespaces.list(search=gitlab_instance.user.username, all=False)[0] + namespace_id = namespace.id + + if not namespace_id: + module.fail_json(msg="Failed to find the namespace or group ID which is required to look up the namespace") + + try: + namespace = gitlab_instance.namespaces.get(namespace_id) + except gitlab.exceptions.GitlabGetError as e: + module.fail_json(msg="Failed to find the namespace for the given user: %s" % to_native(e)) + + if not namespace: + module.fail_json(msg="Failed to find the namespace for the project") + project_exists = gitlab_project.exists_project(namespace, project_path) + + if state == 'absent': + if project_exists: + gitlab_project.delete_project() + module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name) + module.exit_json(changed=False, msg="Project deleted or does not exist") + + if state == 'present': + + if gitlab_project.create_or_update_project(module, project_name, namespace, { + "allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline, + "avatar_path": avatar_path, + "builds_access_level": builds_access_level, + "build_timeout": build_timeout, + "ci_config_path": ci_config_path, + "container_expiration_policy": container_expiration_policy, + "container_registry_access_level": container_registry_access_level, + "default_branch": default_branch, + "description": project_description, + "environments_access_level": environments_access_level, + "feature_flags_access_level": feature_flags_access_level, + "forking_access_level": forking_access_level, + "import_url": import_url, + "infrastructure_access_level": infrastructure_access_level, + "initialize_with_readme": initialize_with_readme, + "issues_access_level": issues_access_level, + "issues_enabled": issues_enabled, + "lfs_enabled": lfs_enabled, + "merge_method": merge_method, + "merge_requests_enabled": merge_requests_enabled, + "model_registry_access_level": model_registry_access_level, + "monitor_access_level": monitor_access_level, + "only_allow_merge_if_all_discussions_are_resolved": only_allow_merge_if_all_discussions_are_resolved, + "only_allow_merge_if_pipeline_succeeds": only_allow_merge_if_pipeline_succeeds, + "packages_enabled": packages_enabled, + "pages_access_level": pages_access_level, + "path": project_path, + "releases_access_level": releases_access_level, + "remove_source_branch_after_merge": remove_source_branch_after_merge, + "repository_access_level": repository_access_level, + "security_and_compliance_access_level": security_and_compliance_access_level, + "service_desk_enabled": service_desk_enabled, + "shared_runners_enabled": shared_runners_enabled, + "snippets_enabled": snippets_enabled, + "squash_option": squash_option, + "topics": topics, + "visibility": visibility, + "wiki_enabled": wiki_enabled, + }): + + module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.project_object._attrs) + module.exit_json(changed=False, msg="No need to update the project %s" % project_name, project=gitlab_project.project_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_project_access_token.py b/plugins/modules/gitlab_project_access_token.py new file mode 100644 index 0000000000..27e3b07129 --- /dev/null +++ b/plugins/modules/gitlab_project_access_token.py @@ -0,0 +1,333 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Zoran Krleza (zoran.krleza@true-north.hr) +# Based on code: +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Marcus Watkins +# Copyright (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_project_access_token +short_description: Manages GitLab project access tokens +version_added: 8.4.0 +description: + - Creates and revokes project access tokens. +author: + - Zoran Krleza (@pixslx) +requirements: + - python-gitlab >= 3.1.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes +notes: + - Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. Whether tokens + are recreated or not is controlled by the O(recreate) option, which defaults to V(never). + - Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards. + - Token matching is done by comparing O(name) option. +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + project: + description: + - ID or full path of project in the form of group/name. + required: true + type: str + name: + description: + - Access token's name. + required: true + type: str + scopes: + description: + - Scope of the access token. + - The values V(manage_runner) and V(self_rotate) were added in community.general 11.3.0. + required: true + type: list + elements: str + aliases: ["scope"] + choices: + - api + - read_api + - read_registry + - write_registry + - read_repository + - write_repository + - create_runner + - manage_runner + - ai_features + - k8s_proxy + - self_rotate + access_level: + description: + - Access level of the access token. + - The value V(planner) was added in community.general 11.3.0. + type: str + default: maintainer + choices: ["guest", "planner", "reporter", "developer", "maintainer", "owner"] + expires_at: + description: + - Expiration date of the access token in C(YYYY-MM-DD) format. + - Make sure to quote this value in YAML to ensure it is kept as a string and not interpreted as a YAML date. + type: str + required: true + recreate: + description: + - Whether the access token is recreated if it already exists. + - When V(never) the token is never recreated. + - When V(always) the token is always recreated. + - When V(state_change) the token is recreated if there is a difference between desired state and actual state. + type: str + choices: ["never", "always", "state_change"] + default: never + state: + description: + - When V(present) the access token is added to the project if it does not exist. + - When V(absent) it is removed from the project if it exists. + default: present + type: str + choices: ["present", "absent"] +""" + +EXAMPLES = r""" +- name: "Creating a project access token" + community.general.gitlab_project_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + project: "my_group/my_project" + name: "project_token" + expires_at: "2024-12-31" + access_level: developer + scopes: + - api + - read_api + - read_repository + - write_repository + state: present + +- name: "Revoking a project access token" + community.general.gitlab_project_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + project: "my_group/my_project" + name: "project_token" + expires_at: "2024-12-31" + scopes: + - api + - read_api + - read_repository + - write_repository + state: absent + +- name: "Change (recreate) existing token if its actual state is different than desired state" + community.general.gitlab_project_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + project: "my_group/my_project" + name: "project_token" + expires_at: "2024-12-31" + scopes: + - api + - read_api + - read_repository + - write_repository + recreate: state_change + state: present +""" + +RETURN = r""" +access_token: + description: + - API object. + - Only contains the value of the token if the token was created or recreated. + returned: success and O(state=present) + type: dict +""" + +from datetime import datetime + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_project, gitlab_authentication, gitlab +) + +ACCESS_LEVELS = dict(guest=10, planner=15, reporter=20, developer=30, maintainer=40, owner=50) + + +class GitLabProjectAccessToken(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.access_token_object = None + + ''' + @param project Project Object + @param arguments Attributes of the access_token + ''' + def create_access_token(self, project, arguments): + changed = False + if self._module.check_mode: + return True + + try: + self.access_token_object = project.access_tokens.create(arguments) + changed = True + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create access token: %s " % to_native(e)) + + return changed + + ''' + @param project Project object + @param name of the access token + ''' + def find_access_token(self, project, name): + access_tokens = [x for x in project.access_tokens.list(all=True) if not getattr(x, 'revoked', False)] + for access_token in access_tokens: + if access_token.name == name: + self.access_token_object = access_token + return False + return False + + def revoke_access_token(self): + if self._module.check_mode: + return True + + changed = False + try: + self.access_token_object.delete() + changed = True + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to revoke access token: %s " % to_native(e)) + + return changed + + def access_tokens_equal(self): + if self.access_token_object.name != self._module.params['name']: + return False + if self.access_token_object.scopes != self._module.params['scopes']: + return False + if self.access_token_object.access_level != ACCESS_LEVELS[self._module.params['access_level']]: + return False + if self.access_token_object.expires_at != self._module.params['expires_at']: + return False + return True + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default="present", choices=["absent", "present"]), + project=dict(type='str', required=True), + name=dict(type='str', required=True), + scopes=dict(type='list', + required=True, + aliases=['scope'], + elements='str', + choices=['api', + 'read_api', + 'read_registry', + 'write_registry', + 'read_repository', + 'write_repository', + 'create_runner', + 'manage_runner', + 'ai_features', + 'k8s_proxy', + 'self_rotate']), + access_level=dict(type='str', default='maintainer', choices=['guest', 'planner', 'reporter', 'developer', 'maintainer', 'owner']), + expires_at=dict(type='str', required=True), + recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change']) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'] + ], + required_together=[ + ['api_username', 'api_password'] + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + state = module.params['state'] + project_identifier = module.params['project'] + name = module.params['name'] + scopes = module.params['scopes'] + access_level_str = module.params['access_level'] + expires_at = module.params['expires_at'] + recreate = module.params['recreate'] + + access_level = ACCESS_LEVELS[access_level_str] + + try: + datetime.strptime(expires_at, '%Y-%m-%d') + except ValueError: + module.fail_json(msg="Argument expires_at is not in required format YYYY-MM-DD") + + gitlab_instance = gitlab_authentication(module) + + gitlab_access_token = GitLabProjectAccessToken(module, gitlab_instance) + + project = find_project(gitlab_instance, project_identifier) + if project is None: + module.fail_json(msg="Failed to create access token: project %s does not exists" % project_identifier) + + gitlab_access_token_exists = False + gitlab_access_token.find_access_token(project, name) + if gitlab_access_token.access_token_object is not None: + gitlab_access_token_exists = True + + if state == 'absent': + if gitlab_access_token_exists: + gitlab_access_token.revoke_access_token() + module.exit_json(changed=True, msg="Successfully deleted access token %s" % name) + else: + module.exit_json(changed=False, msg="Access token does not exists") + + if state == 'present': + if gitlab_access_token_exists: + if gitlab_access_token.access_tokens_equal(): + if recreate == 'always': + gitlab_access_token.revoke_access_token() + gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + else: + module.exit_json(changed=False, msg="Access token already exists", access_token=gitlab_access_token.access_token_object._attrs) + else: + if recreate == 'never': + module.fail_json(msg="Access token already exists and its state is different. It can not be updated without recreating.") + else: + gitlab_access_token.revoke_access_token() + gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + else: + gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + if module.check_mode: + module.exit_json(changed=True, msg="Successfully created access token", access_token={}) + else: + module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_project_badge.py b/plugins/modules/gitlab_project_badge.py new file mode 100644 index 0000000000..8d81765f99 --- /dev/null +++ b/plugins/modules/gitlab_project_badge.py @@ -0,0 +1,212 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Guillaume MARTINEZ (lunik@tiwabbit.fr) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_project_badge +short_description: Manage project badges on GitLab Server +version_added: 6.1.0 +description: + - This module allows to add and remove badges to/from a project. +author: Guillaume MARTINEZ (@Lunik) +requirements: + - C(owner) or C(maintainer) rights to project on the GitLab server +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + project: + description: + - The name (or full path) of the GitLab project the badge is added to/removed from. + required: true + type: str + + state: + description: + - State of the badge in the project. + - On V(present), it adds a badge to a GitLab project. + - On V(absent), it removes a badge from a GitLab project. + choices: ['present', 'absent'] + default: 'present' + type: str + + link_url: + description: + - The URL associated with the badge. + required: true + type: str + + image_url: + description: + - The image URL of the badge. + - A badge is identified by this URL. + required: true + type: str +""" + +EXAMPLES = r""" +- name: Add a badge to a GitLab Project + community.general.gitlab_project_badge: + api_url: 'https://example.gitlab.com' + api_token: 'Your-Private-Token' + project: projectname + state: present + link_url: 'https://example.gitlab.com/%{project_path}' + image_url: 'https://example.gitlab.com/%{project_path}/badges/%{default_branch}/pipeline.svg' + +- name: Remove a badge from a GitLab Project + community.general.gitlab_project_badge: + api_url: 'https://example.gitlab.com' + api_token: 'Your-Private-Token' + project: projectname + state: absent + link_url: 'https://example.gitlab.com/%{project_path}' + image_url: 'https://example.gitlab.com/%{project_path}/badges/%{default_branch}/pipeline.svg' +""" + +RETURN = r""" +badge: + description: The badge information. + returned: when O(state=present) + type: dict + sample: + id: 1 + link_url: 'http://example.com/ci_status.svg?project=%{project_path}&ref=%{default_branch}' + image_url: 'https://shields.io/my/badge' + rendered_link_url: 'http://example.com/ci_status.svg?project=example-org/example-project&ref=master' + rendered_image_url: 'https://shields.io/my/badge' + kind: project +""" + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, find_project, list_all_kwargs +) + + +def present_strategy(module, gl, project, wished_badge): + changed = False + + existing_badge = None + for badge in project.badges.list(**list_all_kwargs): + if badge.image_url == wished_badge["image_url"]: + existing_badge = badge + break + + if not existing_badge: + changed = True + if module.check_mode: + return changed, {"status": "A project badge would be created."} + + badge = project.badges.create(wished_badge) + return changed, badge.attributes + + if existing_badge.link_url != wished_badge["link_url"]: + changed = True + existing_badge.link_url = wished_badge["link_url"] + + if changed: + if module.check_mode: + return changed, {"status": "Project badge attributes would be changed."} + + existing_badge.save() + + return changed, existing_badge.attributes + + +def absent_strategy(module, gl, project, wished_badge): + changed = False + + existing_badge = None + for badge in project.badges.list(**list_all_kwargs): + if badge.image_url == wished_badge["image_url"]: + existing_badge = badge + break + + if not existing_badge: + return changed, None + + changed = True + if module.check_mode: + return changed, {"status": "Project badge would be destroyed."} + + existing_badge.delete() + + return changed, None + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + # check prerequisites and connect to gitlab server + gl = gitlab_authentication(module) + + gitlab_project = module.params['project'] + state = module.params['state'] + + project = find_project(gl, gitlab_project) + # project doesn't exist + if not project: + module.fail_json(msg="project '%s' not found." % gitlab_project) + + wished_badge = { + "link_url": module.params["link_url"], + "image_url": module.params["image_url"], + } + + changed, summary = state_strategy[state](module=module, gl=gl, project=project, wished_badge=wished_badge) + + module.exit_json(changed=changed, badge=summary) + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + project=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + link_url=dict(type='str', required=True), + image_url=dict(type='str', required=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ], + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_project_members.py b/plugins/modules/gitlab_project_members.py deleted file mode 120000 index 2e1e69acf9..0000000000 --- a/plugins/modules/gitlab_project_members.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_project_members.py \ No newline at end of file diff --git a/plugins/modules/gitlab_project_members.py b/plugins/modules/gitlab_project_members.py new file mode 100644 index 0000000000..c496d4aae5 --- /dev/null +++ b/plugins/modules/gitlab_project_members.py @@ -0,0 +1,448 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Sergey Mikhaltsov +# Copyright (c) 2020, Zainab Alsaffar +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_project_members +short_description: Manage project members on GitLab Server +version_added: 2.2.0 +description: + - This module allows to add and remove members to/from a project, or change a member's access level in a project on GitLab. +author: + - Sergey Mikhaltsov (@metanovii) + - Zainab Alsaffar (@zanssa) +requirements: + - python-gitlab python module <= 1.15.0 + - owner or maintainer rights to project on the GitLab server +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + project: + description: + - The name (or full path) of the GitLab project the member is added to/removed from. + required: true + type: str + gitlab_user: + description: + - A username or a list of usernames to add to/remove from the GitLab project. + - Mutually exclusive with O(gitlab_users_access). + type: list + elements: str + access_level: + description: + - The access level for the user. + - Required if O(state=present), user state is set to present. + - V(owner) was added in community.general 10.6.0. + type: str + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + gitlab_users_access: + description: + - Provide a list of user to access level mappings. + - Every dictionary in this list specifies a user (by username) and the access level the user should have. + - Mutually exclusive with O(gitlab_user) and O(access_level). + - Use together with O(purge_users) to remove all users not specified here from the project. + type: list + elements: dict + suboptions: + name: + description: A username or a list of usernames to add to/remove from the GitLab project. + type: str + required: true + access_level: + description: + - The access level for the user. + - Required if O(state=present), user state is set to present. + - V(owner) was added in community.general 10.6.0. + type: str + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + required: true + version_added: 3.7.0 + state: + description: + - State of the member in the project. + - On V(present), it adds a user to a GitLab project. + - On V(absent), it removes a user from a GitLab project. + choices: ['present', 'absent'] + default: 'present' + type: str + purge_users: + description: + - Adds/remove users of the given access_level to match the given O(gitlab_user)/O(gitlab_users_access) list. If omitted + do not purge orphaned members. + - Is only used when O(state=present). + - V(owner) was added in community.general 10.6.0. + type: list + elements: str + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + version_added: 3.7.0 +""" + +EXAMPLES = r""" +- name: Add a user to a GitLab Project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + validate_certs: true + project: projectname + gitlab_user: username + access_level: developer + state: present + +- name: Remove a user from a GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + project: projectname + gitlab_user: username + state: absent + +- name: Add a list of Users to A GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_project: projectname + gitlab_user: + - user1 + - user2 + access_level: developer + state: present + +- name: Add a list of Users with Dedicated Access Levels to A GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + project: projectname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: present + +- name: Add a user, remove all others which might be on this access level + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + project: projectname + gitlab_user: username + access_level: developer + purge_users: developer + state: present + +- name: Remove a list of Users with Dedicated Access Levels to A GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + project: projectname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: absent +""" + +RETURN = r""" # """ + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab +) + + +class GitLabProjectMembers(object): + def __init__(self, module, gl): + self._module = module + self._gitlab = gl + + def get_project(self, project_name): + try: + project_exists = self._gitlab.projects.get(project_name) + return project_exists.id + except gitlab.exceptions.GitlabGetError as e: + project_exists = self._gitlab.projects.list(search=project_name, all=False) + if project_exists: + return project_exists[0].id + + def get_user_id(self, gitlab_user): + user_exists = self._gitlab.users.list(username=gitlab_user, all=False) + if user_exists: + return user_exists[0].id + + # get all members in a project + def get_members_in_a_project(self, gitlab_project_id): + project = self._gitlab.projects.get(gitlab_project_id) + return project.members.list(all=True) + + # get single member in a project by user name + def get_member_in_a_project(self, gitlab_project_id, gitlab_user_id): + member = None + project = self._gitlab.projects.get(gitlab_project_id) + try: + member = project.members.get(gitlab_user_id) + if member: + return member + except gitlab.exceptions.GitlabGetError as e: + return None + + # check if the user is a member of the project + def is_user_a_member(self, members, gitlab_user_id): + for member in members: + if member.id == gitlab_user_id: + return True + return False + + # add user to a project + def add_member_to_project(self, gitlab_user_id, gitlab_project_id, access_level): + project = self._gitlab.projects.get(gitlab_project_id) + add_member = project.members.create( + {'user_id': gitlab_user_id, 'access_level': access_level}) + + # remove user from a project + def remove_user_from_project(self, gitlab_user_id, gitlab_project_id): + project = self._gitlab.projects.get(gitlab_project_id) + project.members.delete(gitlab_user_id) + + # get user's access level + def get_user_access_level(self, members, gitlab_user_id): + for member in members: + if member.id == gitlab_user_id: + return member.access_level + + # update user's access level in a project + def update_user_access_level(self, members, gitlab_user_id, access_level): + for member in members: + if member.id == gitlab_user_id: + member.access_level = access_level + member.save() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + project=dict(type='str', required=True), + gitlab_user=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent']), + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), + purge_users=dict(type='list', elements='str', choices=[ + 'guest', 'reporter', 'developer', 'maintainer', 'owner']), + gitlab_users_access=dict( + type='list', + elements='dict', + options=dict( + name=dict(type='str', required=True), + access_level=dict(type='str', choices=[ + 'guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True), + ) + ), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['gitlab_user', 'gitlab_users_access'], + ['access_level', 'gitlab_users_access'], + ], + required_together=[ + ['api_username', 'api_password'], + ['gitlab_user', 'access_level'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ['gitlab_user', 'gitlab_users_access'], + ], + required_if=[ + ['state', 'present', ['access_level', 'gitlab_users_access'], True], + ], + supports_check_mode=True, + ) + + # check prerequisites and connect to gitlab server + gl = gitlab_authentication(module) + + access_level_int = { + 'guest': gitlab.const.GUEST_ACCESS, + 'reporter': gitlab.const.REPORTER_ACCESS, + 'developer': gitlab.const.DEVELOPER_ACCESS, + 'maintainer': gitlab.const.MAINTAINER_ACCESS, + 'owner': gitlab.const.OWNER_ACCESS, + } + + gitlab_project = module.params['project'] + state = module.params['state'] + access_level = module.params['access_level'] + purge_users = module.params['purge_users'] + + if purge_users: + purge_users = [access_level_int[level] for level in purge_users] + + project = GitLabProjectMembers(module, gl) + + gitlab_project_id = project.get_project(gitlab_project) + + # project doesn't exist + if not gitlab_project_id: + module.fail_json(msg="project '%s' not found." % gitlab_project) + + members = [] + if module.params['gitlab_user'] is not None: + gitlab_users_access = [] + gitlab_users = module.params['gitlab_user'] + for gl_user in gitlab_users: + gitlab_users_access.append( + {'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None}) + elif module.params['gitlab_users_access'] is not None: + gitlab_users_access = module.params['gitlab_users_access'] + for user_level in gitlab_users_access: + user_level['access_level'] = access_level_int[user_level['access_level']] + + if len(gitlab_users_access) == 1 and not purge_users: + # only single user given + members = [project.get_member_in_a_project( + gitlab_project_id, project.get_user_id(gitlab_users_access[0]['name']))] + if members[0] is None: + members = [] + elif len(gitlab_users_access) > 1 or purge_users: + # list of users given + members = project.get_members_in_a_project(gitlab_project_id) + else: + module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.", + result_data=[]) + + changed = False + error = False + changed_users = [] + changed_data = [] + + for gitlab_user in gitlab_users_access: + gitlab_user_id = project.get_user_id(gitlab_user['name']) + + # user doesn't exist + if not gitlab_user_id: + if state == 'absent': + changed_users.append("user '%s' not found, and thus also not part of the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "user '%s' not found, and thus also not part of the project" % gitlab_user['name']}) + else: + error = True + changed_users.append("user '%s' not found." % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "user '%s' not found." % gitlab_user['name']}) + continue + + is_user_a_member = project.is_user_a_member(members, gitlab_user_id) + + # check if the user is a member in the project + if not is_user_a_member: + if state == 'present': + # add user to the project + try: + if not module.check_mode: + project.add_member_to_project(gitlab_user_id, gitlab_project_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully added user '%s' to project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully added user '%s' to project" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabCreateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + # state as absent + else: + changed_users.append("User, '%s', is not a member in the project. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is not a member in the project. No change to report" % gitlab_user['name']}) + # in case that a user is a member + else: + if state == 'present': + # compare the access level + user_access_level = project.get_user_access_level(members, gitlab_user_id) + if user_access_level == gitlab_user['access_level']: + changed_users.append("User, '%s', is already a member in the project. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is already a member in the project. No change to report" % gitlab_user['name']}) + else: + # update the access level for the user + try: + if not module.check_mode: + project.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabUpdateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + else: + # remove the user from the project + try: + if not module.check_mode: + project.remove_user_from_project(gitlab_user_id, gitlab_project_id) + changed = True + changed_users.append("Successfully removed user, '%s', from the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully removed user, '%s', from the project" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)}) + + # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users + if state == 'present' and purge_users: + uppercase_names_in_gitlab_users_access = [] + for name in gitlab_users_access: + uppercase_names_in_gitlab_users_access.append(name['name'].upper()) + + for member in members: + if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access: + try: + if not module.check_mode: + project.remove_user_from_project(member.id, gitlab_project_id) + changed = True + changed_users.append("Successfully removed user '%s', from project. Was not in given list" % member.username) + changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED', + 'msg': "Successfully removed user '%s', from project. Was not in given list" % member.username}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)}) + + if len(gitlab_users_access) == 1 and error: + # if single user given and an error occurred return error for list errors will be per user + module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data) + elif error: + module.fail_json( + msg='FAILED: At least one given user/permission could not be set', result_data=changed_data) + + module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_project_variable.py b/plugins/modules/gitlab_project_variable.py deleted file mode 120000 index 5f7b254687..0000000000 --- a/plugins/modules/gitlab_project_variable.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/gitlab/gitlab_project_variable.py \ No newline at end of file diff --git a/plugins/modules/gitlab_project_variable.py b/plugins/modules/gitlab_project_variable.py new file mode 100644 index 0000000000..cf8dd47524 --- /dev/null +++ b/plugins/modules/gitlab_project_variable.py @@ -0,0 +1,494 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Markus Bergholz (markuman@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_project_variable +short_description: Creates/updates/deletes GitLab Projects Variables +description: + - When a project variable does not exist, it is created. + - When a project variable does exist and is not hidden, its value is updated when the values are different. + When a project variable does exist and is hidden, its value is updated. In this case, the module is B(not idempotent). + - Variables which are untouched in the playbook, but are not untouched in the GitLab project, they stay untouched (O(purge=false)) + or are deleted (O(purge=true)). +author: + - "Markus Bergholz (@markuman)" +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete project variable. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] + project: + description: + - The path and name of the project. + required: true + type: str + purge: + description: + - When set to V(true), all variables which are not untouched in the task are deleted. + default: false + type: bool + vars: + description: + - When the list element is a simple key-value pair, C(masked), C(hidden), C(raw), and C(protected) are set to V(false). + - When the list element is a dict with the keys C(value), C(masked), C(hidden), C(raw), and C(protected), the user can have full + control about whether a value should be masked, hidden, raw, protected, or a combination. + - Support for protected values requires GitLab >= 9.3. + - Support for masked values requires GitLab >= 11.10. + - Support for hidden values requires GitLab >= 17.4, and was added in community.general 11.3.0. + - Support for raw values requires GitLab >= 15.7. + - Support for environment_scope requires GitLab Premium >= 13.11. + - Support for variable_type requires GitLab >= 11.11. + - A C(value) must be a string or a number. + - Field C(variable_type) must be a string with either V(env_var), which is the default, or V(file). + - Field C(environment_scope) must be a string defined by scope environment. + - When a value is masked, it must be in Base64 and have a length of at least 8 characters. See GitLab documentation + on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables). + default: {} + type: dict + variables: + version_added: 4.4.0 + description: + - A list of dictionaries that represents CI/CD variables. + - This module works internal with this structure, even if the older O(vars) parameter is used. + default: [] + type: list + elements: dict + suboptions: + name: + description: + - The name of the variable. + type: str + required: true + value: + description: + - The variable value. + - Required when O(state=present). + type: str + description: + description: + - A description for the variable. + - Support for descriptions requires GitLab >= 16.2. + type: str + version_added: '11.4.0' + masked: + description: + - Whether variable value is masked or not. + - Support for masked values requires GitLab >= 11.10. + type: bool + default: false + hidden: + description: + - Whether variable value is hidden or not. + - Implies C(masked). + - Support for hidden values requires GitLab >= 17.4. + type: bool + default: false + version_added: '11.3.0' + protected: + description: + - Whether variable value is protected or not. + - Support for protected values requires GitLab >= 9.3. + type: bool + default: false + raw: + description: + - Whether variable value is raw or not. + - Support for raw values requires GitLab >= 15.7. + type: bool + default: false + version_added: '7.4.0' + variable_type: + description: + - Whether a variable is an environment variable (V(env_var)) or a file (V(file)). + - Support for O(variables[].variable_type) requires GitLab >= 11.11. + type: str + choices: ["env_var", "file"] + default: env_var + environment_scope: + description: + - The scope for the variable. + - Support for O(variables[].environment_scope) requires GitLab Premium >= 13.11. + type: str + default: '*' +""" + + +EXAMPLES = r""" +- name: Set or update some CI/CD variables + community.general.gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + purge: false + variables: + - name: ACCESS_KEY_ID + value: abc123 + - name: SECRET_ACCESS_KEY + value: dassgrfaeui8989 + masked: true + protected: true + environment_scope: production + +- name: Set or update some CI/CD variables + community.general.gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: 3214cbad + masked: true + protected: true + variable_type: env_var + environment_scope: '*' + +- name: Set or update some CI/CD variables with raw value + community.general.gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: 3214cbad + masked: true + protected: true + raw: true + variable_type: env_var + environment_scope: '*' + +- name: Set or update some CI/CD variables with expandable value + community.general.gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: '$MY_OTHER_VARIABLE' + masked: true + protected: true + raw: false + variable_type: env_var + environment_scope: '*' + +- name: Delete one variable + community.general.gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + state: absent + vars: + ACCESS_KEY_ID: abc123 +""" + +RETURN = r""" +project_variable: + description: Four lists of the variablenames which were added, updated, removed or exist. + returned: always + type: dict + contains: + added: + description: A list of variables which were created. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + untouched: + description: A list of variables which exist. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + removed: + description: A list of variables which were deleted. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + updated: + description: A list of variables whose values were changed. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec + + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, filter_returned_variables, vars_to_variables, + list_all_kwargs +) + + +class GitlabProjectVariables(object): + + def __init__(self, module, gitlab_instance): + self.repo = gitlab_instance + self.project = self.get_project(module.params['project']) + self._module = module + + def get_project(self, project_name): + return self.repo.projects.get(project_name) + + def list_all_project_variables(self): + return list(self.project.variables.list(**list_all_kwargs)) + + def create_variable(self, var_obj): + if self._module.check_mode: + return True + + var = { + "key": var_obj.get('key'), + "value": var_obj.get('value'), + "description": var_obj.get('description'), + "masked": var_obj.get('masked'), + "masked_and_hidden": var_obj.get('hidden'), + "protected": var_obj.get('protected'), + "raw": var_obj.get('raw'), + "variable_type": var_obj.get('variable_type'), + } + + if var_obj.get('environment_scope') is not None: + var["environment_scope"] = var_obj.get('environment_scope') + + self.project.variables.create(var) + return True + + def update_variable(self, var_obj): + if self._module.check_mode: + return True + self.delete_variable(var_obj) + self.create_variable(var_obj) + return True + + def delete_variable(self, var_obj): + if self._module.check_mode: + return True + self.project.variables.delete(var_obj.get('key'), filter={'environment_scope': var_obj.get('environment_scope')}) + return True + + +def compare(requested_variables, existing_variables, state): + # we need to do this, because it was determined in a previous version - more or less buggy + # basically it is not necessary and might results in more/other bugs! + # but it is required and only relevant for check mode!! + # logic represents state 'present' when not purge. all other can be derived from that + # untouched => equal in both + # updated => name and scope are equal + # added => name and scope does not exist + untouched = list() + updated = list() + added = list() + + if state == 'present': + existing_key_scope_vars = list() + for item in existing_variables: + existing_key_scope_vars.append({'key': item.get('key'), 'environment_scope': item.get('environment_scope')}) + + for var in requested_variables: + if var in existing_variables: + untouched.append(var) + else: + compare_item = {'key': var.get('name'), 'environment_scope': var.get('environment_scope')} + if compare_item in existing_key_scope_vars: + updated.append(var) + else: + added.append(var) + + return untouched, updated, added + + +def native_python_main(this_gitlab, purge, requested_variables, state, module): + + change = False + return_value = dict(added=[], updated=[], removed=[], untouched=[]) + + gitlab_keys = this_gitlab.list_all_project_variables() + before = [x.attributes for x in gitlab_keys] + + gitlab_keys = this_gitlab.list_all_project_variables() + existing_variables = filter_returned_variables(gitlab_keys) + + # filter out and enrich before compare + for item in requested_variables: + item['key'] = item.pop('name') + item['value'] = str(item.get('value')) + if item.get('protected') is None: + item['protected'] = False + if item.get('raw') is None: + item['raw'] = False + if item.get('masked') is None: + item['masked'] = False + if item.get('hidden') is None: + item['hidden'] = False + if item.get('environment_scope') is None: + item['environment_scope'] = '*' + if item.get('variable_type') is None: + item['variable_type'] = 'env_var' + + if module.check_mode: + untouched, updated, added = compare(requested_variables, existing_variables, state) + + if state == 'present': + add_or_update = [x for x in requested_variables if x not in existing_variables] + for item in add_or_update: + try: + if this_gitlab.create_variable(item): + return_value['added'].append(item) + + except Exception: + if this_gitlab.update_variable(item): + return_value['updated'].append(item) + + if purge: + # refetch and filter + gitlab_keys = this_gitlab.list_all_project_variables() + existing_variables = filter_returned_variables(gitlab_keys) + + remove = [x for x in existing_variables if x not in requested_variables] + for item in remove: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + elif state == 'absent': + # value, type, and description do not matter on removing variables. + keys_ignored_on_deletion = ['value', 'variable_type', 'description'] + for key in keys_ignored_on_deletion: + for item in existing_variables: + item.pop(key) + for item in requested_variables: + item.pop(key) + + if not purge: + remove_requested = [x for x in requested_variables if x in existing_variables] + for item in remove_requested: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + else: + for item in existing_variables: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + if module.check_mode: + return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched) + + if any(return_value[x] for x in ['added', 'removed', 'updated']): + change = True + + gitlab_keys = this_gitlab.list_all_project_variables() + after = [x.attributes for x in gitlab_keys] + + return change, return_value, before, after + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str', required=True), + purge=dict(type='bool', default=False), + vars=dict(type='dict', default=dict(), no_log=True), + # please mind whenever changing the variables dict to also change module_utils/gitlab.py's + # KNOWN dict in filter_returned_variables or bad evil will happen + variables=dict(type='list', elements='dict', default=list(), options=dict( + name=dict(type='str', required=True), + value=dict(type='str', no_log=True), + description=dict(type='str'), + masked=dict(type='bool', default=False), + hidden=dict(type='bool', default=False), + protected=dict(type='bool', default=False), + raw=dict(type='bool', default=False), + environment_scope=dict(type='str', default='*'), + variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]), + )), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['vars', 'variables'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + purge = module.params['purge'] + var_list = module.params['vars'] + state = module.params['state'] + + if var_list: + variables = vars_to_variables(var_list, module) + else: + variables = module.params['variables'] + + if state == 'present': + if any(x['value'] is None for x in variables): + module.fail_json(msg='value parameter is required for all variables in state present') + + this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance) + + change, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module) + + # postprocessing + for item in after: + item.pop('project_id') + item['name'] = item.pop('key') + for item in before: + item.pop('project_id') + item['name'] = item.pop('key') + + untouched_key_name = 'key' + if not module.check_mode: + untouched_key_name = 'name' + raw_return_value['untouched'] = [x for x in before if x in after] + + added = [x.get('key') for x in raw_return_value['added']] + updated = [x.get('key') for x in raw_return_value['updated']] + removed = [x.get('key') for x in raw_return_value['removed']] + untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']] + return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) + + module.exit_json(changed=change, project_variable=return_value) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_protected_branch.py b/plugins/modules/gitlab_protected_branch.py deleted file mode 120000 index 7af5b500ce..0000000000 --- a/plugins/modules/gitlab_protected_branch.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_protected_branch.py \ No newline at end of file diff --git a/plugins/modules/gitlab_protected_branch.py b/plugins/modules/gitlab_protected_branch.py new file mode 100644 index 0000000000..c779736cc6 --- /dev/null +++ b/plugins/modules/gitlab_protected_branch.py @@ -0,0 +1,225 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_protected_branch +short_description: Manage protection of existing branches +version_added: 3.4.0 +description: + - (un)Marking existing branches for protection. +author: + - "Werner Dijkerman (@dj-wasabi)" +requirements: + - python-gitlab >= 2.3.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete protected branch. + default: present + type: str + choices: ["present", "absent"] + project: + description: + - The path and name of the project. + required: true + type: str + name: + description: + - The name of the branch that needs to be protected. + - Can make use a wildcard character for like V(production/*) or just have V(main) or V(develop) as value. + required: true + type: str + merge_access_levels: + description: + - Access levels allowed to merge. + default: maintainer + type: str + choices: ["maintainer", "developer", "nobody"] + push_access_level: + description: + - Access levels allowed to push. + default: maintainer + type: str + choices: ["maintainer", "developer", "nobody"] + allow_force_push: + description: + - Whether or not to allow force pushes to the protected branch. + type: bool + version_added: '11.3.0' + code_owner_approval_required: + description: + - Whether or not to require code owner approval to push. + type: bool + version_added: '11.3.0' +""" + + +EXAMPLES = r""" +- name: Create protected branch on main + community.general.gitlab_protected_branch: + api_url: https://gitlab.com + api_token: secret_access_token + project: "dj-wasabi/collection.general" + name: main + merge_access_levels: maintainer + push_access_level: nobody +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab +) + + +class GitlabProtectedBranch(object): + + def __init__(self, module, project, gitlab_instance): + self.repo = gitlab_instance + self._module = module + self.project = self.get_project(project) + self.ACCESS_LEVEL = { + 'nobody': gitlab.const.NO_ACCESS, + 'developer': gitlab.const.DEVELOPER_ACCESS, + 'maintainer': gitlab.const.MAINTAINER_ACCESS + } + + def get_project(self, project_name): + return self.repo.projects.get(project_name) + + def protected_branch_exist(self, name): + try: + return self.project.protectedbranches.get(name) + except Exception as e: + return False + + def create_or_update_protected_branch(self, name, options): + protected_branch_options = { + 'name': name, + 'allow_force_push': options['allow_force_push'], + 'code_owner_approval_required': options['code_owner_approval_required'], + } + protected_branch = self.protected_branch_exist(name=name) + changed = False + if protected_branch and self.can_update(protected_branch, options): + for arg_key, arg_value in protected_branch_options.items(): + if arg_value is not None: + if getattr(protected_branch, arg_key) != arg_value: + setattr(protected_branch, arg_key, arg_value) + changed = True + if changed and not self._module.check_mode: + protected_branch.save() + else: + # Set immutable options only on (re)creation + protected_branch_options['merge_access_level'] = options['merge_access_levels'] + protected_branch_options['push_access_level'] = options['push_access_level'] + if protected_branch: + # Exists, but couldn't update. So, delete first + self.delete_protected_branch(name) + if not self._module.check_mode: + self.project.protectedbranches.create(protected_branch_options) + changed = True + + return changed + + def can_update(self, protected_branch, options): + # these keys are not set on update the same way they are on creation + configured_merge = options['merge_access_levels'] + configured_push = options['push_access_level'] + current_merge = protected_branch.merge_access_levels[0]['access_level'] + current_push = protected_branch.push_access_levels[0]['access_level'] + return ((configured_merge is None or current_merge == configured_merge) and + (configured_push is None or current_push == configured_push)) + + def delete_protected_branch(self, name): + if self._module.check_mode: + return True + return self.project.protectedbranches.delete(name) + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str', required=True), + name=dict(type='str', required=True), + merge_access_levels=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), + push_access_level=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), + allow_force_push=dict(type='bool'), + code_owner_approval_required=dict(type='bool'), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + project = module.params['project'] + name = module.params['name'] + merge_access_levels = module.params['merge_access_levels'] + push_access_level = module.params['push_access_level'] + state = module.params['state'] + + gitlab_version = gitlab.__version__ + if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): + module.fail_json(msg="community.general.gitlab_protected_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." + " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) + + this_gitlab = GitlabProtectedBranch(module=module, project=project, gitlab_instance=gitlab_instance) + + p_branch = this_gitlab.protected_branch_exist(name=name) + options = { + "merge_access_levels": this_gitlab.ACCESS_LEVEL[merge_access_levels], + "push_access_level": this_gitlab.ACCESS_LEVEL[push_access_level], + "allow_force_push": module.params["allow_force_push"], + "code_owner_approval_required": module.params["code_owner_approval_required"], + } + if state == "present": + changed = this_gitlab.create_or_update_protected_branch(name, options) + module.exit_json(changed=changed, msg="Created or updated the protected branch.") + elif p_branch and state == "absent": + this_gitlab.delete_protected_branch(name=name) + module.exit_json(changed=True, msg="Deleted the protected branch.") + module.exit_json(changed=False, msg="No changes are needed.") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_runner.py b/plugins/modules/gitlab_runner.py deleted file mode 120000 index 15150123f3..0000000000 --- a/plugins/modules/gitlab_runner.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/gitlab/gitlab_runner.py \ No newline at end of file diff --git a/plugins/modules/gitlab_runner.py b/plugins/modules/gitlab_runner.py new file mode 100644 index 0000000000..889e2471cc --- /dev/null +++ b/plugins/modules/gitlab_runner.py @@ -0,0 +1,530 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Raphaël Droz (raphael.droz@gmail.com) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Samy Coenen +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_runner +short_description: Create, modify and delete GitLab Runners +description: + - Register, update and delete runners on GitLab Server side with the GitLab API. + - All operations are performed using the GitLab API v4. + - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html) and + U(https://docs.gitlab.com/ee/api/users.html#create-a-runner-linked-to-a-user). + - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web + interface at U(https://$GITLAB_URL/profile/personal_access_tokens). + - A valid registration token is required for registering a new runner. To create shared runners, you need to ask your administrator + to give you this token. It can be found at U(https://$GITLAB_URL/admin/runners/). + - This module does not handle the C(gitlab-runner) process part, but only manages the runner on GitLab Server side through + its API. Once the module has created the runner, you may use the generated token to run C(gitlab-runner register) command. +notes: + - To create a new runner at least the O(api_token), O(description) and O(api_url) options are required. + - Runners need to have unique descriptions, since this attribute is used as key for idempotency. +author: + - Samy Coenen (@SamyCoenen) + - Guillaume Martinez (@Lunik) +requirements: + - python-gitlab >= 1.5.0 for legacy runner registration workflow (runner registration token - + U(https://docs.gitlab.com/runner/register/#register-with-a-runner-registration-token-deprecated)) + - python-gitlab >= 4.0.0 for new runner registration workflow (runner authentication token - + U(https://docs.gitlab.com/runner/register/#register-with-a-runner-authentication-token)) +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + group: + description: + - ID or full path of the group in the form group/subgroup. + - Mutually exclusive with O(owned) and O(project). + - Must be group's numeric ID if O(registration_token) is not set and O(state=present). + type: str + version_added: '6.5.0' + project: + description: + - ID or full path of the project in the form of group/name. + - Mutually exclusive with O(owned) since community.general 4.5.0. + - Mutually exclusive with O(group). + - Must be project's numeric ID if O(registration_token) is not set and O(state=present). + type: str + version_added: '3.7.0' + description: + description: + - The unique name of the runner. + required: true + type: str + aliases: + - name + state: + description: + - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same + name. + required: false + default: present + choices: ["present", "absent"] + type: str + registration_token: + description: + - The registration token is used to register new runners before GitLab 16.0. + - Required if O(state=present) for GitLab < 16.0. + - If set, the runner is created using the old runner creation workflow. + - If not set, the runner is created using the new runner creation workflow, introduced in GitLab 16.0. + - If not set, requires python-gitlab >= 4.0.0. + type: str + owned: + description: + - Searches only runners available to the user when searching for existing, when false admin token required. + - Mutually exclusive with O(project) since community.general 4.5.0. + - Mutually exclusive with O(group). + default: false + type: bool + version_added: 2.0.0 + active: + description: + - Define if the runners is immediately active after creation. + - Mutually exclusive with O(paused). + required: false + default: true + type: bool + paused: + description: + - Define if the runners is active or paused after creation. + - Mutually exclusive with O(active). + required: false + default: false + type: bool + version_added: 8.1.0 + locked: + description: + - Determines if the runner is locked or not. + required: false + default: false + type: bool + access_level: + description: + - Determines if a runner can pick up jobs only from protected branches. + - If O(access_level_on_creation) is not explicitly set to V(true), this option is ignored on registration and is only + applied on updates. + - If set to V(not_protected), runner can pick up jobs from both protected and unprotected branches. + - If set to V(ref_protected), runner can pick up jobs only from protected branches. + - Before community.general 8.0.0 the default was V(ref_protected). This was changed to no default in community.general + 8.0.0. If this option is not specified explicitly, GitLab uses V(not_protected) on creation, and the value set is + not changed on any updates. + required: false + choices: ["not_protected", "ref_protected"] + type: str + access_level_on_creation: + description: + - Whether the runner should be registered with an access level or not. + - If set to V(true), the value of O(access_level) is used for runner registration. + - If set to V(false), GitLab registers the runner with the default access level. + - The default of this option changed to V(true) in community.general 7.0.0. Before, it was V(false). + required: false + default: true + type: bool + version_added: 6.3.0 + maximum_timeout: + description: + - The maximum time that a runner has to complete a specific job. + required: false + default: 3600 + type: int + run_untagged: + description: + - Run untagged jobs or not. + required: false + default: true + type: bool + tag_list: + description: The tags that apply to the runner. + required: false + default: [] + type: list + elements: str +""" + +EXAMPLES = r""" +- name: Create an instance-level runner + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + register: runner # Register module output to run C(gitlab-runner register) command in another task + +- name: Create a group-level runner + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + group: top-level-group/subgroup + register: runner # Register module output to run C(gitlab-runner register) command in another task + +- name: Create a project-level runner + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + project: top-level-group/subgroup/project + register: runner # Register module output to run C(gitlab-runner register) command in another task + +- name: "Register instance-level runner with registration token (deprecated)" + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + registration_token: 4gfdsg345 + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + register: runner # Register module output to run C(gitlab-runner register) command in another task + +- name: "Delete runner" + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: absent + +- name: Delete an owned runner as a non-admin + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + owned: true + state: absent + +- name: "Register a project-level runner with registration token (deprecated)" + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + registration_token: 4gfdsg345 + description: MyProject runner + state: present + project: mygroup/mysubgroup/myproject + register: runner # Register module output to run C(gitlab-runner register) command in another task +""" + +RETURN = r""" +msg: + description: Success or failure message. + returned: always + type: str + sample: "Success" + +result: + description: JSON-parsed response from the server. + returned: always + type: dict + +error: + description: The error message returned by the GitLab API. + returned: failed + type: str + sample: "400: path is already in use" + +runner: + description: API object. + returned: always + type: dict +""" + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, list_all_kwargs +) + + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class GitLabRunner(object): + def __init__(self, module, gitlab_instance, group=None, project=None): + self._module = module + self._gitlab = gitlab_instance + self.runner_object = None + + # Whether to operate on GitLab-instance-wide or project-wide runners + # See https://gitlab.com/gitlab-org/gitlab-ce/issues/60774 + # for group runner token access + if project: + self._runners_endpoint = project.runners.list + elif group: + self._runners_endpoint = group.runners.list + elif module.params['owned']: + self._runners_endpoint = gitlab_instance.runners.list + else: + self._runners_endpoint = gitlab_instance.runners.all + + def create_or_update_runner(self, description, options): + changed = False + + arguments = { + 'locked': options['locked'], + 'run_untagged': options['run_untagged'], + 'maximum_timeout': options['maximum_timeout'], + 'tag_list': options['tag_list'], + } + + if options.get('paused') is not None: + arguments['paused'] = options['paused'] + else: + arguments['active'] = options['active'] + + if options.get('access_level') is not None: + arguments['access_level'] = options['access_level'] + # Because we have already call userExists in main() + if self.runner_object is None: + arguments['description'] = description + if options.get('registration_token') is not None: + arguments['token'] = options['registration_token'] + elif options.get('group') is not None: + arguments['runner_type'] = 'group_type' + arguments['group_id'] = options['group'] + elif options.get('project') is not None: + arguments['runner_type'] = 'project_type' + arguments['project_id'] = options['project'] + else: + arguments['runner_type'] = 'instance_type' + + access_level_on_creation = self._module.params['access_level_on_creation'] + if not access_level_on_creation: + arguments.pop('access_level', None) + + runner = self.create_runner(arguments) + changed = True + else: + changed, runner = self.update_runner(self.runner_object, arguments) + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully updated the runner %s" % description) + + try: + runner.save() + except Exception as e: + self._module.fail_json(msg="Failed to update runner: %s " % to_native(e)) + + self.runner_object = runner + return changed + + ''' + @param arguments Attributes of the runner + ''' + def create_runner(self, arguments): + if self._module.check_mode: + class MockRunner: + def __init__(self): + self._attrs = {} + return MockRunner() + + try: + if arguments.get('token') is not None: + runner = self._gitlab.runners.create(arguments) + elif LooseVersion(gitlab.__version__) < LooseVersion('4.0.0'): + self._module.fail_json(msg="New runner creation workflow requires python-gitlab 4.0.0 or higher") + else: + runner = self._gitlab.user.runners.create(arguments) + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create runner: %s " % to_native(e)) + + return runner + + ''' + @param runner Runner object + @param arguments Attributes of the runner + ''' + def update_runner(self, runner, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arg_value is not None: + if isinstance(arg_value, list): + list1 = getattr(runner, arg_key) + list1.sort() + list2 = arg_value + list2.sort() + if list1 != list2: + setattr(runner, arg_key, arg_value) + changed = True + else: + if getattr(runner, arg_key) != arg_value: + setattr(runner, arg_key, arg_value) + changed = True + + return (changed, runner) + + ''' + @param description Description of the runner + ''' + def find_runner(self, description): + runners = self._runners_endpoint(**list_all_kwargs) + + for runner in runners: + # python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner + # object, so we need to handle both + if hasattr(runner, "description"): + if runner.description == description: + return self._gitlab.runners.get(runner.id) + else: + if runner['description'] == description: + return self._gitlab.runners.get(runner['id']) + + ''' + @param description Description of the runner + ''' + def exists_runner(self, description): + # When runner exists, object will be stored in self.runner_object. + runner = self.find_runner(description) + + if runner: + self.runner_object = runner + return True + return False + + def delete_runner(self): + if self._module.check_mode: + return True + + runner = self.runner_object + + return runner.delete() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + description=dict(type='str', required=True, aliases=["name"]), + active=dict(type='bool', default=True), + paused=dict(type='bool', default=False), + owned=dict(type='bool', default=False), + tag_list=dict(type='list', elements='str', default=[]), + run_untagged=dict(type='bool', default=True), + locked=dict(type='bool', default=False), + access_level=dict(type='str', choices=["not_protected", "ref_protected"]), + access_level_on_creation=dict(type='bool', default=True), + maximum_timeout=dict(type='int', default=3600), + registration_token=dict(type='str', no_log=True), + project=dict(type='str'), + group=dict(type='str'), + state=dict(type='str', default="present", choices=["absent", "present"]), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['project', 'owned'], + ['group', 'owned'], + ['project', 'group'], + ['active', 'paused'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ], + supports_check_mode=True, + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + state = module.params['state'] + runner_description = module.params['description'] + runner_active = module.params['active'] + runner_paused = module.params['paused'] + tag_list = module.params['tag_list'] + run_untagged = module.params['run_untagged'] + runner_locked = module.params['locked'] + access_level = module.params['access_level'] + maximum_timeout = module.params['maximum_timeout'] + registration_token = module.params['registration_token'] + project = module.params['project'] + group = module.params['group'] + + gitlab_project = None + gitlab_group = None + + if project: + try: + gitlab_project = gitlab_instance.projects.get(project) + except gitlab.exceptions.GitlabGetError as e: + module.fail_json(msg='No such a project %s' % project, exception=to_native(e)) + elif group: + try: + gitlab_group = gitlab_instance.groups.get(group) + except gitlab.exceptions.GitlabGetError as e: + module.fail_json(msg='No such a group %s' % group, exception=to_native(e)) + + gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_group, gitlab_project) + runner_exists = gitlab_runner.exists_runner(runner_description) + + if state == 'absent': + if runner_exists: + gitlab_runner.delete_runner() + module.exit_json(changed=True, msg="Successfully deleted runner %s" % runner_description) + else: + module.exit_json(changed=False, msg="Runner deleted or does not exists") + + if state == 'present': + runner_values = { + "active": runner_active, + "tag_list": tag_list, + "run_untagged": run_untagged, + "locked": runner_locked, + "access_level": access_level, + "maximum_timeout": maximum_timeout, + "registration_token": registration_token, + "group": group, + "project": project, + } + if LooseVersion(gitlab_runner._gitlab.version()[0]) >= LooseVersion("14.8.0"): + # the paused attribute for runners is available since 14.8 + runner_values["paused"] = runner_paused + if gitlab_runner.create_or_update_runner(runner_description, runner_values): + module.exit_json(changed=True, runner=gitlab_runner.runner_object._attrs, + msg="Successfully created or updated the runner %s" % runner_description) + else: + module.exit_json(changed=False, runner=gitlab_runner.runner_object._attrs, + msg="No need to update the runner %s" % runner_description) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_user.py b/plugins/modules/gitlab_user.py deleted file mode 120000 index 2224dc977d..0000000000 --- a/plugins/modules/gitlab_user.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/gitlab/gitlab_user.py \ No newline at end of file diff --git a/plugins/modules/gitlab_user.py b/plugins/modules/gitlab_user.py new file mode 100644 index 0000000000..58bfc126ac --- /dev/null +++ b/plugins/modules/gitlab_user.py @@ -0,0 +1,683 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Lennert Mertens (lennert@nubera.be) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_user +short_description: Creates/updates/deletes/blocks/unblocks GitLab Users +description: + - When the user does not exist in GitLab, it is created. + - When the user exists and state=absent, the user is deleted. + - When the user exists and state=blocked, the user is blocked. + - When changes are made to user, the user is updated. +notes: + - From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user. +author: + - Werner Dijkerman (@dj-wasabi) + - Guillaume Martinez (@Lunik) + - Lennert Mertens (@LennertMertens) + - Stef Graces (@stgrace) +requirements: + - python-gitlab python module + - administrator rights on the GitLab server +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + name: + description: + - Name of the user you want to create. + - Required only if O(state=present). + type: str + username: + description: + - The username of the user. + required: true + type: str + password: + description: + - The password of the user. + - GitLab server enforces minimum password length to 8, set this value with 8 or more characters. + type: str + reset_password: + description: + - Whether the user can change its password or not. + default: false + type: bool + version_added: 3.3.0 + email: + description: + - The email that belongs to the user. + - Required only if O(state=present). + type: str + sshkey_name: + description: + - The name of the SSH public key. + type: str + sshkey_file: + description: + - The SSH public key itself. + type: str + sshkey_expires_at: + description: + - The expiration date of the SSH public key in ISO 8601 format C(YYYY-MM-DDTHH:MM:SSZ). + - This is only used when adding new SSH public keys. + type: str + version_added: 3.1.0 + group: + description: + - ID or Full path of parent group in the form of group/name. + - Add user as a member to this group. + type: str + access_level: + description: + - The access level to the group. + - The value V(master) is an alias for V(maintainer). + default: guest + type: str + choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"] + state: + description: + - Create, delete or block a user. + default: present + type: str + choices: ["present", "absent", "blocked", "unblocked"] + confirm: + description: + - Require confirmation. + type: bool + default: true + isadmin: + description: + - Grant admin privileges to the user. + type: bool + default: false + external: + description: + - Define external parameter for this user. + type: bool + default: false + identities: + description: + - List of identities to be added/updated for this user. + - To remove all other identities from this user, set O(overwrite_identities=true). + type: list + elements: dict + suboptions: + provider: + description: + - The name of the external identity provider. + type: str + extern_uid: + description: + - User ID for external identity. + type: str + version_added: 3.3.0 + overwrite_identities: + description: + - Overwrite identities with identities added in this module. + - This means that all identities that the user has and that are not listed in O(identities) are removed from the user. + - This is only done if a list is provided for O(identities). To remove all identities, provide an empty list. + type: bool + default: false + version_added: 3.3.0 +""" + +EXAMPLES = r""" +- name: "Delete GitLab User" + community.general.gitlab_user: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + username: myusername + state: absent + +- name: "Create GitLab User" + community.general.gitlab_user: + api_url: https://gitlab.example.com/ + validate_certs: true + api_username: dj-wasabi + api_password: "MySecretPassword" + name: My Name + username: myusername + password: mysecretpassword + email: me@example.com + sshkey_name: MySSH + sshkey_file: ssh-rsa AAAAB3NzaC1yc... + state: present + group: super_group/mon_group + access_level: owner + +- name: "Create GitLab User using external identity provider" + community.general.gitlab_user: + api_url: https://gitlab.example.com/ + validate_certs: true + api_token: "{{ access_token }}" + name: My Name + username: myusername + password: mysecretpassword + email: me@example.com + identities: + - provider: Keycloak + extern_uid: f278f95c-12c7-4d51-996f-758cc2eb11bc + state: present + group: super_group/mon_group + access_level: owner + +- name: "Block GitLab User" + community.general.gitlab_user: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + username: myusername + state: blocked + +- name: "Unblock GitLab User" + community.general.gitlab_user: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + username: myusername + state: unblocked +""" + +RETURN = r""" +msg: + description: Success or failure message. + returned: always + type: str + sample: "Success" + +result: + description: JSON-parsed response from the server. + returned: always + type: dict + +error: + description: The error message returned by the GitLab API. + returned: failed + type: str + sample: "400: path is already in use" + +user: + description: API object. + returned: always + type: dict +""" + + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_group, gitlab_authentication, gitlab, list_all_kwargs +) + + +class GitLabUser(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.user_object = None + self.ACCESS_LEVEL = { + 'guest': gitlab.const.GUEST_ACCESS, + 'reporter': gitlab.const.REPORTER_ACCESS, + 'developer': gitlab.const.DEVELOPER_ACCESS, + 'master': gitlab.const.MAINTAINER_ACCESS, + 'maintainer': gitlab.const.MAINTAINER_ACCESS, + 'owner': gitlab.const.OWNER_ACCESS, + } + + ''' + @param username Username of the user + @param options User options + ''' + def create_or_update_user(self, username, options): + changed = False + potentionally_changed = False + + # Because we have already call userExists in main() + if self.user_object is None: + user = self.create_user({ + 'name': options['name'], + 'username': username, + 'password': options['password'], + 'reset_password': options['reset_password'], + 'email': options['email'], + 'skip_confirmation': not options['confirm'], + 'admin': options['isadmin'], + 'external': options['external'], + 'identities': options['identities'], + }) + changed = True + else: + changed, user = self.update_user( + self.user_object, { + # add "normal" parameters here, put uncheckable + # params in the dict below + 'name': {'value': options['name']}, + 'email': {'value': options['email']}, + + # note: for some attributes like this one the key + # from reading back from server is unfortunately + # different to the one needed for pushing/writing, + # in that case use the optional setter key + 'is_admin': { + 'value': options['isadmin'], 'setter': 'admin' + }, + 'external': {'value': options['external']}, + 'identities': {'value': options['identities']}, + }, + { + # put "uncheckable" params here, this means params + # which the gitlab does accept for setting but does + # not return any information about it + 'skip_reconfirmation': {'value': not options['confirm']}, + 'password': {'value': options['password']}, + 'reset_password': {'value': options['reset_password']}, + 'overwrite_identities': {'value': options['overwrite_identities']}, + } + ) + + # note: as we unfortunately have some uncheckable parameters + # where it is not possible to determine if the update + # changed something or not, we must assume here that a + # changed happened and that an user object update is needed + potentionally_changed = True + + # Assign ssh keys + if options['sshkey_name'] and options['sshkey_file']: + key_changed = self.add_ssh_key_to_user(user, { + 'name': options['sshkey_name'], + 'file': options['sshkey_file'], + 'expires_at': options['sshkey_expires_at']}) + changed = changed or key_changed + + # Assign group + if options['group_path']: + group_changed = self.assign_user_to_group(user, options['group_path'], options['access_level']) + changed = changed or group_changed + + self.user_object = user + if (changed or potentionally_changed) and not self._module.check_mode: + try: + user.save() + except Exception as e: + self._module.fail_json(msg="Failed to update user: %s " % to_native(e)) + + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the user %s" % username) + return True + else: + return False + + ''' + @param group User object + ''' + def get_user_id(self, user): + if user is not None: + return user.id + return None + + ''' + @param user User object + @param sshkey_name Name of the ssh key + ''' + def ssh_key_exists(self, user, sshkey_name): + return any( + k.title == sshkey_name + for k in user.keys.list(**list_all_kwargs) + ) + + ''' + @param user User object + @param sshkey Dict containing sshkey infos {"name": "", "file": "", "expires_at": ""} + ''' + def add_ssh_key_to_user(self, user, sshkey): + if not self.ssh_key_exists(user, sshkey['name']): + if self._module.check_mode: + return True + + try: + parameter = { + 'title': sshkey['name'], + 'key': sshkey['file'], + } + if sshkey['expires_at'] is not None: + parameter['expires_at'] = sshkey['expires_at'] + user.keys.create(parameter) + except gitlab.exceptions.GitlabCreateError as e: + self._module.fail_json(msg="Failed to assign sshkey to user: %s" % to_native(e)) + return True + return False + + ''' + @param group Group object + @param user_id Id of the user to find + ''' + def find_member(self, group, user_id): + try: + member = group.members.get(user_id) + except gitlab.exceptions.GitlabGetError: + return None + return member + + ''' + @param group Group object + @param user_id Id of the user to check + ''' + def member_exists(self, group, user_id): + member = self.find_member(group, user_id) + + return member is not None + + ''' + @param group Group object + @param user_id Id of the user to check + @param access_level GitLab access_level to check + ''' + def member_as_good_access_level(self, group, user_id, access_level): + member = self.find_member(group, user_id) + + return member.access_level == access_level + + ''' + @param user User object + @param group_path Complete path of the Group including parent group path. / + @param access_level GitLab access_level to assign + ''' + def assign_user_to_group(self, user, group_identifier, access_level): + group = find_group(self._gitlab, group_identifier) + + if self._module.check_mode: + return True + + if group is None: + return False + + if self.member_exists(group, self.get_user_id(user)): + member = self.find_member(group, self.get_user_id(user)) + if not self.member_as_good_access_level(group, member.id, self.ACCESS_LEVEL[access_level]): + member.access_level = self.ACCESS_LEVEL[access_level] + member.save() + return True + else: + try: + group.members.create({ + 'user_id': self.get_user_id(user), + 'access_level': self.ACCESS_LEVEL[access_level]}) + except gitlab.exceptions.GitlabCreateError as e: + self._module.fail_json(msg="Failed to assign user to group: %s" % to_native(e)) + return True + return False + + ''' + @param user User object + @param arguments User attributes + ''' + def update_user(self, user, arguments, uncheckable_args): + changed = False + + for arg_key, arg_value in arguments.items(): + av = arg_value['value'] + + if av is not None: + if arg_key == "identities": + changed = self.add_identities(user, av, uncheckable_args['overwrite_identities']['value']) + + elif getattr(user, arg_key) != av: + setattr(user, arg_value.get('setter', arg_key), av) + changed = True + + for arg_key, arg_value in uncheckable_args.items(): + av = arg_value['value'] + + if av is not None: + setattr(user, arg_value.get('setter', arg_key), av) + + return (changed, user) + + ''' + @param arguments User attributes + ''' + def create_user(self, arguments): + if self._module.check_mode: + return True + + identities = None + if 'identities' in arguments: + identities = arguments['identities'] + del arguments['identities'] + + try: + user = self._gitlab.users.create(arguments) + if identities: + self.add_identities(user, identities) + + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create user: %s " % to_native(e)) + + return user + + ''' + @param user User object + @param identities List of identities to be added/updated + @param overwrite_identities Overwrite user identities with identities passed to this module + ''' + def add_identities(self, user, identities, overwrite_identities=False): + changed = False + if overwrite_identities: + changed = self.delete_identities(user, identities) + + for identity in identities: + if identity not in user.identities: + setattr(user, 'provider', identity['provider']) + setattr(user, 'extern_uid', identity['extern_uid']) + if not self._module.check_mode: + user.save() + changed = True + return changed + + ''' + @param user User object + @param identities List of identities to be added/updated + ''' + def delete_identities(self, user, identities): + changed = False + for identity in user.identities: + if identity not in identities: + if not self._module.check_mode: + user.identityproviders.delete(identity['provider']) + changed = True + return changed + + ''' + @param username Username of the user + ''' + def find_user(self, username): + return next( + ( + user for user in self._gitlab.users.list(search=username, **list_all_kwargs) + if user.username == username + ), + None + ) + + ''' + @param username Username of the user + ''' + def exists_user(self, username): + # When user exists, object will be stored in self.user_object. + user = self.find_user(username) + if user: + self.user_object = user + return True + return False + + ''' + @param username Username of the user + ''' + def is_active(self, username): + user = self.find_user(username) + return user.attributes['state'] == 'active' + + def delete_user(self): + if self._module.check_mode: + return True + + user = self.user_object + + return user.delete() + + def block_user(self): + if self._module.check_mode: + return True + + user = self.user_object + + return user.block() + + def unblock_user(self): + if self._module.check_mode: + return True + + user = self.user_object + + return user.unblock() + + +def sanitize_arguments(arguments): + for key, value in list(arguments.items()): + if value is None: + del arguments[key] + return arguments + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + name=dict(type='str'), + state=dict(type='str', default="present", choices=["absent", "present", "blocked", "unblocked"]), + username=dict(type='str', required=True), + password=dict(type='str', no_log=True), + reset_password=dict(type='bool', default=False, no_log=False), + email=dict(type='str'), + sshkey_name=dict(type='str'), + sshkey_file=dict(type='str', no_log=False), + sshkey_expires_at=dict(type='str', no_log=False), + group=dict(type='str'), + access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]), + confirm=dict(type='bool', default=True), + isadmin=dict(type='bool', default=False), + external=dict(type='bool', default=False), + identities=dict(type='list', elements='dict'), + overwrite_identities=dict(type='bool', default=False), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True, + required_if=( + ('state', 'present', ['name', 'email']), + ) + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + user_name = module.params['name'] + state = module.params['state'] + user_username = module.params['username'].lower() + user_password = module.params['password'] + user_reset_password = module.params['reset_password'] + user_email = module.params['email'] + user_sshkey_name = module.params['sshkey_name'] + user_sshkey_file = module.params['sshkey_file'] + user_sshkey_expires_at = module.params['sshkey_expires_at'] + group_path = module.params['group'] + access_level = module.params['access_level'] + confirm = module.params['confirm'] + user_isadmin = module.params['isadmin'] + user_external = module.params['external'] + user_identities = module.params['identities'] + overwrite_identities = module.params['overwrite_identities'] + + gitlab_user = GitLabUser(module, gitlab_instance) + user_exists = gitlab_user.exists_user(user_username) + if user_exists: + user_is_active = gitlab_user.is_active(user_username) + else: + user_is_active = False + + if state == 'absent': + if user_exists: + gitlab_user.delete_user() + module.exit_json(changed=True, msg="Successfully deleted user %s" % user_username) + else: + module.exit_json(changed=False, msg="User deleted or does not exists") + + if state == 'blocked': + if user_exists and user_is_active: + gitlab_user.block_user() + module.exit_json(changed=True, msg="Successfully blocked user %s" % user_username) + else: + module.exit_json(changed=False, msg="User already blocked or does not exists") + + if state == 'unblocked': + if user_exists and not user_is_active: + gitlab_user.unblock_user() + module.exit_json(changed=True, msg="Successfully unblocked user %s" % user_username) + else: + module.exit_json(changed=False, msg="User is not blocked or does not exists") + + if state == 'present': + if gitlab_user.create_or_update_user(user_username, { + "name": user_name, + "password": user_password, + "reset_password": user_reset_password, + "email": user_email, + "sshkey_name": user_sshkey_name, + "sshkey_file": user_sshkey_file, + "sshkey_expires_at": user_sshkey_expires_at, + "group_path": group_path, + "access_level": access_level, + "confirm": confirm, + "isadmin": user_isadmin, + "external": user_external, + "identities": user_identities, + "overwrite_identities": overwrite_identities, + }): + module.exit_json(changed=True, msg="Successfully created or updated the user %s" % user_username, user=gitlab_user.user_object._attrs) + else: + module.exit_json(changed=False, msg="No need to update the user %s" % user_username, user=gitlab_user.user_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/grove.py b/plugins/modules/grove.py deleted file mode 120000 index d461af74a2..0000000000 --- a/plugins/modules/grove.py +++ /dev/null @@ -1 +0,0 @@ -./notification/grove.py \ No newline at end of file diff --git a/plugins/modules/grove.py b/plugins/modules/grove.py new file mode 100644 index 0000000000..fc71322688 --- /dev/null +++ b/plugins/modules/grove.py @@ -0,0 +1,122 @@ +#!/usr/bin/python +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: grove +short_description: Sends a notification to a grove.io channel +description: + - The C(grove) module sends a message for a service to a Grove.io channel. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + channel_token: + type: str + description: + - Token of the channel to post to. + required: true + service: + type: str + description: + - Name of the service (displayed as the "user" in the message). + required: false + default: ansible + message_content: + type: str + description: + - Message content. + - The alias O(ignore:message) has been removed in community.general 4.0.0. + required: true + url: + type: str + description: + - Service URL for the web client. + required: false + icon_url: + type: str + description: + - Icon for the service. + required: false + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + default: true + type: bool +author: "Jonas Pfenniger (@zimbatm)" +""" + +EXAMPLES = r""" +- name: Sends a notification to a grove.io channel + community.general.grove: + channel_token: 6Ph62VBBJOccmtTPZbubiPzdrhipZXtg + service: my-app + message: 'deployed {{ target }}' +""" + +from urllib.parse import urlencode + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +BASE_URL = 'https://grove.io/api/notice/%s/' + +# ============================================================== +# do_notify_grove + + +def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None): + my_url = BASE_URL % (channel_token,) + + my_data = dict(service=service, message=message) + if url is not None: + my_data['url'] = url + if icon_url is not None: + my_data['icon_url'] = icon_url + + data = urlencode(my_data) + response, info = fetch_url(module, my_url, data=data) + if info['status'] != 200: + module.fail_json(msg="failed to send notification: %s" % info['msg']) + +# ============================================================== +# main + + +def main(): + module = AnsibleModule( + argument_spec=dict( + channel_token=dict(type='str', required=True, no_log=True), + message_content=dict(type='str', required=True), + service=dict(type='str', default='ansible'), + url=dict(type='str'), + icon_url=dict(type='str'), + validate_certs=dict(default=True, type='bool'), + ) + ) + + channel_token = module.params['channel_token'] + service = module.params['service'] + message = module.params['message_content'] + url = module.params['url'] + icon_url = module.params['icon_url'] + + do_notify_grove(module, channel_token, service, message, url, icon_url) + + # Mission complete + module.exit_json(msg="OK") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gunicorn.py b/plugins/modules/gunicorn.py deleted file mode 120000 index fa50f2a718..0000000000 --- a/plugins/modules/gunicorn.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/gunicorn.py \ No newline at end of file diff --git a/plugins/modules/gunicorn.py b/plugins/modules/gunicorn.py new file mode 100644 index 0000000000..b7033d3471 --- /dev/null +++ b/plugins/modules/gunicorn.py @@ -0,0 +1,227 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Alejandro Gomez +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: gunicorn +short_description: Run gunicorn with various settings +description: + - Starts gunicorn with the parameters specified. Common settings for gunicorn configuration are supported. For additional + configuration use a config file See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more options. + It's recommended to always use the chdir option to avoid problems with the location of the app. +requirements: [gunicorn] +author: + - "Alejandro Gomez (@agmezr)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + app: + type: str + required: true + aliases: ['name'] + description: + - The app module. A name refers to a WSGI callable that should be found in the specified module. + venv: + type: path + aliases: ['virtualenv'] + description: + - Path to the virtualenv directory. + config: + type: path + description: + - Path to the gunicorn configuration file. + aliases: ['conf'] + chdir: + type: path + description: + - Chdir to specified directory before apps loading. + pid: + type: path + description: + - A filename to use for the PID file. If not set and not found on the configuration file a tmp pid file is created to + check a successful run of gunicorn. + worker: + type: str + choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp'] + description: + - The type of workers to use. The default class (sync) should handle most "normal" types of workloads. + user: + type: str + description: + - Switch worker processes to run as this user. +notes: + - If not specified on config file, a temporary error log is created on C(/tmp) directory. Please make sure you have write + access in C(/tmp) directory. Not needed but it is helpful to identify any problem with configuration. +""" + +EXAMPLES = r""" +- name: Simple gunicorn run example + community.general.gunicorn: + app: 'wsgi' + chdir: '/workspace/example' + +- name: Run gunicorn on a virtualenv + community.general.gunicorn: + app: 'wsgi' + chdir: '/workspace/example' + venv: '/workspace/example/venv' + +- name: Run gunicorn with a config file + community.general.gunicorn: + app: 'wsgi' + chdir: '/workspace/example' + conf: '/workspace/example/gunicorn.cfg' + +- name: Run gunicorn as ansible user with specified pid and config file + community.general.gunicorn: + app: 'wsgi' + chdir: '/workspace/example' + conf: '/workspace/example/gunicorn.cfg' + venv: '/workspace/example/venv' + pid: '/workspace/example/gunicorn.pid' + user: 'ansible' +""" + +RETURN = r""" +gunicorn: + description: Process ID of gunicorn. + returned: changed + type: str + sample: "1234" +""" + +import os +import time + +from ansible.module_utils.basic import AnsibleModule + + +def search_existing_config(config, option): + ''' search in config file for specified option ''' + if config and os.path.isfile(config): + with open(config, 'r') as f: + for line in f: + if option in line: + return line + return None + + +def remove_tmp_file(file_path): + ''' remove temporary files ''' + if os.path.isfile(file_path): + os.remove(file_path) + + +def main(): + + # available gunicorn options on module + gunicorn_options = { + 'config': '-c', + 'chdir': '--chdir', + 'worker': '-k', + 'user': '-u', + } + + module = AnsibleModule( + argument_spec=dict( + app=dict(required=True, type='str', aliases=['name']), + venv=dict(type='path', aliases=['virtualenv']), + config=dict(type='path', aliases=['conf']), + chdir=dict(type='path'), + pid=dict(type='path'), + user=dict(type='str'), + worker=dict(type='str', choices=['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']), + ) + ) + + # temporary files in case no option provided + tmp_error_log = os.path.join(module.tmpdir, 'gunicorn.temp.error.log') + tmp_pid_file = os.path.join(module.tmpdir, 'gunicorn.temp.pid') + + # remove temp file if exists + remove_tmp_file(tmp_pid_file) + remove_tmp_file(tmp_error_log) + + # obtain app name and venv + params = module.params + app = params['app'] + venv = params['venv'] + pid = params['pid'] + + # use venv path if exists + if venv: + gunicorn_command = "/".join((venv, 'bin', 'gunicorn')) + else: + gunicorn_command = module.get_bin_path('gunicorn') + + # to daemonize the process + options = ["-D"] + + # fill options + for option in gunicorn_options: + param = params[option] + if param: + options.append(gunicorn_options[option]) + options.append(param) + + error_log = search_existing_config(params['config'], 'errorlog') + if not error_log: + # place error log somewhere in case of fail + options.append("--error-logfile") + options.append(tmp_error_log) + + pid_file = search_existing_config(params['config'], 'pid') + if not params['pid'] and not pid_file: + pid = tmp_pid_file + + # add option for pid file if not found on config file + if not pid_file: + options.append('--pid') + options.append(pid) + + # put args together + args = [gunicorn_command] + options + [app] + rc, out, err = module.run_command(args, use_unsafe_shell=False, encoding=None) + + if not err: + # wait for gunicorn to dump to log + time.sleep(0.5) + if os.path.isfile(pid): + with open(pid, 'r') as f: + result = f.readline().strip() + + if not params['pid']: + os.remove(pid) + + module.exit_json(changed=True, pid=result, debug=" ".join(args)) + else: + # if user defined own error log, check that + if error_log: + error = 'Please check your {0}'.format(error_log.strip()) + else: + if os.path.isfile(tmp_error_log): + with open(tmp_error_log, 'r') as f: + error = f.read() + # delete tmp log + os.remove(tmp_error_log) + else: + error = "Log not found" + + module.fail_json(msg='Failed to start gunicorn. {0}'.format(error), error=err) + + else: + module.fail_json(msg='Failed to start gunicorn {0}'.format(err), error=err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hana_query.py b/plugins/modules/hana_query.py deleted file mode 120000 index ea869eb7a4..0000000000 --- a/plugins/modules/hana_query.py +++ /dev/null @@ -1 +0,0 @@ -./database/saphana/hana_query.py \ No newline at end of file diff --git a/plugins/modules/haproxy.py b/plugins/modules/haproxy.py deleted file mode 120000 index 1b243c872b..0000000000 --- a/plugins/modules/haproxy.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/haproxy.py \ No newline at end of file diff --git a/plugins/modules/haproxy.py b/plugins/modules/haproxy.py new file mode 100644 index 0000000000..5fd927ba4e --- /dev/null +++ b/plugins/modules/haproxy.py @@ -0,0 +1,479 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Ravi Bhure +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: haproxy +short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands +author: + - Ravi Bhure (@ravibhure) +description: + - Enable, disable, drain and set weights for HAProxy backend servers using socket commands. +notes: + - Enable, disable and drain commands are restricted and can only be issued on sockets configured for level C(admin). For + example, you can add the line C(stats socket /var/run/haproxy.sock level admin) to the general section of C(haproxy.cfg). + See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt). + - Depends on netcat (C(nc)) being available; you need to install the appropriate package for your operating system before + this module can be used. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + backend: + description: + - Name of the HAProxy backend pool. + - If this parameter is unset, it is auto-detected. + type: str + drain: + description: + - Wait until the server has no active connections or until the timeout determined by O(wait_interval) and O(wait_retries) + is reached. + - Continue only after the status changes to C(MAINT). + - This overrides the shutdown_sessions option. + type: bool + default: false + host: + description: + - Name of the backend host to change. + type: str + required: true + shutdown_sessions: + description: + - When disabling a server, immediately terminate all the sessions attached to the specified server. + - This can be used to terminate long-running sessions after a server is put into maintenance mode. Overridden by the + drain option. + type: bool + default: false + socket: + description: + - Path to the HAProxy socket file. + type: path + default: /var/run/haproxy.sock + state: + description: + - Desired state of the provided backend host. + - Note that V(drain) state is supported only by HAProxy version 1.5 or later. When used on versions < 1.5, it is ignored. + type: str + required: true + choices: [disabled, drain, enabled] + agent: + description: + - Disable/enable agent checks (depending on O(state) value). + type: bool + default: false + version_added: 1.0.0 + health: + description: + - Disable/enable health checks (depending on O(state) value). + type: bool + default: false + version_added: "1.0.0" + fail_on_not_found: + description: + - Fail whenever trying to enable/disable a backend host that does not exist. + type: bool + default: false + wait: + description: + - Wait until the server reports a status of C(UP) when O(state=enabled), status of C(MAINT) when O(state=disabled) or + status of C(DRAIN) when O(state=drain). + type: bool + default: false + wait_interval: + description: + - Number of seconds to wait between retries. + type: int + default: 5 + wait_retries: + description: + - Number of times to check for status after changing the state. + type: int + default: 25 + weight: + description: + - The value passed in argument. + - If the value ends with the V(%) sign, then the new weight is relative to the initially configured weight. + - Relative weights are only permitted between 0 and 100% and absolute weights are permitted between 0 and 256. + type: str +""" + +EXAMPLES = r""" +- name: Disable server in 'www' backend pool + community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + backend: www + +- name: Disable server in 'www' backend pool, also stop health/agent checks + community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + health: true + agent: true + +- name: Disable server without backend pool name (apply to all available backend pool) + community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + +- name: Disable server, provide socket file + community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + backend: www + +- name: Disable server, provide socket file, wait until status reports in maintenance + community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + backend: www + wait: true + +# Place server in drain mode, providing a socket file. Then check the server's +# status every minute to see if it changes to maintenance mode, continuing if it +# does in an hour and failing otherwise. +- community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + backend: www + wait: true + drain: true + wait_interval: 60 + wait_retries: 60 + +- name: Disable backend server in 'www' backend pool and drop open sessions to it + community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + backend: www + socket: /var/run/haproxy.sock + shutdown_sessions: true + +- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is + not found + community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + fail_on_not_found: true + +- name: Enable server in 'www' backend pool + community.general.haproxy: + state: enabled + host: '{{ inventory_hostname }}' + backend: www + +- name: Enable server in 'www' backend pool wait until healthy + community.general.haproxy: + state: enabled + host: '{{ inventory_hostname }}' + backend: www + wait: true + +- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the + health + community.general.haproxy: + state: enabled + host: '{{ inventory_hostname }}' + backend: www + wait: true + wait_retries: 10 + wait_interval: 5 + +- name: Enable server in 'www' backend pool with change server(s) weight + community.general.haproxy: + state: enabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + weight: 10 + backend: www + +- name: Set the server in 'www' backend pool to drain mode + community.general.haproxy: + state: drain + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + backend: www +""" + +import csv +import socket +import time +from string import Template + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_text + + +DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock" +RECV_SIZE = 1024 +ACTION_CHOICES = ['enabled', 'disabled', 'drain'] +WAIT_RETRIES = 25 +WAIT_INTERVAL = 5 + + +###################################################################### +class TimeoutException(Exception): + pass + + +class HAProxy(object): + """ + Used for communicating with HAProxy through its local UNIX socket interface. + Perform common tasks in Haproxy related to enable server and + disable server. + + The complete set of external commands Haproxy handles is documented + on their website: + + http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands + """ + + def __init__(self, module): + self.module = module + + self.state = self.module.params['state'] + self.host = self.module.params['host'] + self.backend = self.module.params['backend'] + self.weight = self.module.params['weight'] + self.socket = self.module.params['socket'] + self.shutdown_sessions = self.module.params['shutdown_sessions'] + self.fail_on_not_found = self.module.params['fail_on_not_found'] + self.agent = self.module.params['agent'] + self.health = self.module.params['health'] + self.wait = self.module.params['wait'] + self.wait_retries = self.module.params['wait_retries'] + self.wait_interval = self.module.params['wait_interval'] + self._drain = self.module.params['drain'] + self.command_results = {} + + def execute(self, cmd, timeout=200, capture_output=True): + """ + Executes a HAProxy command by sending a message to a HAProxy's local + UNIX socket and waiting up to 'timeout' milliseconds for the response. + """ + self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.client.connect(self.socket) + self.client.sendall(to_bytes('%s\n' % cmd)) + + result = b'' + buf = b'' + buf = self.client.recv(RECV_SIZE) + while buf: + result += buf + buf = self.client.recv(RECV_SIZE) + result = to_text(result, errors='surrogate_or_strict') + + if capture_output: + self.capture_command_output(cmd, result.strip()) + self.client.close() + return result + + def capture_command_output(self, cmd, output): + """ + Capture the output for a command + """ + if 'command' not in self.command_results: + self.command_results['command'] = [] + self.command_results['command'].append(cmd) + if 'output' not in self.command_results: + self.command_results['output'] = [] + self.command_results['output'].append(output) + + def discover_all_backends(self): + """ + Discover all entries with svname = 'BACKEND' and return a list of their corresponding + pxnames + """ + data = self.execute('show stat', 200, False).lstrip('# ') + r = csv.DictReader(data.splitlines()) + return tuple(d['pxname'] for d in r if d['svname'] == 'BACKEND') + + def discover_version(self): + """ + Attempt to extract the haproxy version. + Return a tuple containing major and minor version. + """ + data = self.execute('show info', 200, False) + lines = data.splitlines() + line = [x for x in lines if 'Version:' in x] + try: + version_values = line[0].partition(':')[2].strip().split('.', 3) + version = (int(version_values[0]), int(version_values[1])) + except (ValueError, TypeError, IndexError): + version = None + + return version + + def execute_for_backends(self, cmd, pxname, svname, wait_for_status=None): + """ + Run some command on the specified backends. If no backends are provided they will + be discovered automatically (all backends) + """ + # Discover backends if none are given + if pxname is None: + backends = self.discover_all_backends() + else: + backends = [pxname] + + # Run the command for each requested backend + for backend in backends: + # Fail when backends were not found + state = self.get_state_for(backend, svname) + if (self.fail_on_not_found) and state is None: + self.module.fail_json( + msg="The specified backend '%s/%s' was not found!" % (backend, svname)) + + if state is not None: + self.execute(Template(cmd).substitute(pxname=backend, svname=svname)) + if self.wait and not (wait_for_status == "DRAIN" and state == "DOWN"): + self.wait_until_status(backend, svname, wait_for_status) + + def get_state_for(self, pxname, svname): + """ + Find the state of specific services. When pxname is not set, get all backends for a specific host. + Returns a list of dictionaries containing the status and weight for those services. + """ + data = self.execute('show stat', 200, False).lstrip('# ') + r = csv.DictReader(data.splitlines()) + + def unpack_state(d): + return {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']} + + state = tuple(unpack_state(d) for d in r if (pxname is None or d['pxname'] == pxname) and d['svname'] == svname) + return state or None + + def wait_until_status(self, pxname, svname, status): + """ + Wait for a service to reach the specified status. Try RETRIES times + with INTERVAL seconds of sleep in between. If the service has not reached + the expected status in that time, the module will fail. If the service was + not found, the module will fail. + """ + for i in range(1, self.wait_retries): + state = self.get_state_for(pxname, svname) + + # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here + # When using track we get a status like this: MAINT (via pxname/svname) so we need to do substring matching + if status in state[0]['status']: + if not self._drain or state[0]['scur'] == '0': + return True + time.sleep(self.wait_interval) + + self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." % + (pxname, svname, status, self.wait_retries)) + + def enabled(self, host, backend, weight): + """ + Enabled action, marks server to UP and checks are re-enabled, + also supports to get current weight for server (default) and + set the weight for haproxy backend server when provides. + """ + cmd = "get weight $pxname/$svname; enable server $pxname/$svname" + if self.agent: + cmd += "; enable agent $pxname/$svname" + if self.health: + cmd += "; enable health $pxname/$svname" + if weight: + cmd += "; set weight $pxname/$svname %s" % weight + self.execute_for_backends(cmd, backend, host, 'UP') + + def disabled(self, host, backend, shutdown_sessions): + """ + Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be + performed on the server until it leaves maintenance, + also it shutdown sessions while disabling backend host server. + """ + cmd = "get weight $pxname/$svname" + if self.agent: + cmd += "; disable agent $pxname/$svname" + if self.health: + cmd += "; disable health $pxname/$svname" + cmd += "; disable server $pxname/$svname" + if shutdown_sessions: + cmd += "; shutdown sessions server $pxname/$svname" + self.execute_for_backends(cmd, backend, host, 'MAINT') + + def drain(self, host, backend, status='DRAIN'): + """ + Drain action, sets the server to DRAIN mode. + In this mode, the server will not accept any new connections + other than those that are accepted via persistence. + """ + haproxy_version = self.discover_version() + + # check if haproxy version supports DRAIN state (starting with 1.5) + if haproxy_version and (1, 5) <= haproxy_version: + cmd = "set server $pxname/$svname state drain" + self.execute_for_backends(cmd, backend, host, "DRAIN") + if status == "MAINT": + self.disabled(host, backend, self.shutdown_sessions) + + def act(self): + """ + Figure out what you want to do from ansible, and then do it. + """ + # Get the state before the run + self.command_results['state_before'] = self.get_state_for(self.backend, self.host) + + # toggle enable/disable server + if self.state == 'enabled': + self.enabled(self.host, self.backend, self.weight) + elif self.state == 'disabled' and self._drain: + self.drain(self.host, self.backend, status='MAINT') + elif self.state == 'disabled': + self.disabled(self.host, self.backend, self.shutdown_sessions) + elif self.state == 'drain': + self.drain(self.host, self.backend) + else: + self.module.fail_json(msg="unknown state specified: '%s'" % self.state) + + # Get the state after the run + self.command_results['state_after'] = self.get_state_for(self.backend, self.host) + + # Report change status + self.command_results['changed'] = (self.command_results['state_before'] != self.command_results['state_after']) + + self.module.exit_json(**self.command_results) + + +def main(): + + # load ansible module object + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', required=True, choices=ACTION_CHOICES), + host=dict(type='str', required=True), + backend=dict(type='str'), + weight=dict(type='str'), + socket=dict(type='path', default=DEFAULT_SOCKET_LOCATION), + shutdown_sessions=dict(type='bool', default=False), + fail_on_not_found=dict(type='bool', default=False), + health=dict(type='bool', default=False), + agent=dict(type='bool', default=False), + wait=dict(type='bool', default=False), + wait_retries=dict(type='int', default=WAIT_RETRIES), + wait_interval=dict(type='int', default=WAIT_INTERVAL), + drain=dict(type='bool', default=False), + ), + ) + + if not socket: + module.fail_json(msg="unable to locate haproxy socket") + + ansible_haproxy = HAProxy(module) + ansible_haproxy.act() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/heroku_collaborator.py b/plugins/modules/heroku_collaborator.py deleted file mode 120000 index d1304cb3eb..0000000000 --- a/plugins/modules/heroku_collaborator.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/heroku/heroku_collaborator.py \ No newline at end of file diff --git a/plugins/modules/heroku_collaborator.py b/plugins/modules/heroku_collaborator.py new file mode 100644 index 0000000000..e8094760a6 --- /dev/null +++ b/plugins/modules/heroku_collaborator.py @@ -0,0 +1,135 @@ +#!/usr/bin/python + +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: heroku_collaborator +short_description: Add or delete app collaborators on Heroku +description: + - Manages collaborators for Heroku apps. + - If set to V(present) and heroku user is already collaborator, then do nothing. + - If set to V(present) and heroku user is not collaborator, then add user to app. + - If set to V(absent) and heroku user is collaborator, then delete user from app. +author: + - Marcel Arns (@marns93) +requirements: + - heroku3 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + api_key: + type: str + description: + - Heroku API key. + apps: + type: list + elements: str + description: + - List of Heroku App names. + required: true + suppress_invitation: + description: + - Suppress email invitation when creating collaborator. + type: bool + default: false + user: + type: str + description: + - User ID or e-mail. + required: true + state: + type: str + description: + - Create or remove the heroku collaborator. + choices: ["present", "absent"] + default: "present" +notes: + - E(HEROKU_API_KEY) and E(TF_VAR_HEROKU_API_KEY) environment variables can be used instead setting O(api_key). + - If you use C(check_mode), you can also pass the C(-v) flag to see affected apps in C(msg), for example C(["heroku-example-app"]). +""" + +EXAMPLES = r""" +- name: Create a heroku collaborator + community.general.heroku_collaborator: + api_key: YOUR_API_KEY + user: max.mustermann@example.com + apps: heroku-example-app + state: present + +- name: An example of using the module in loop + community.general.heroku_collaborator: + api_key: YOUR_API_KEY + user: '{{ item.user }}' + apps: '{{ item.apps | default(apps) }}' + suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}' + state: '{{ item.state | default("present") }}' + with_items: + - {user: 'a.b@example.com'} + - {state: 'absent', user: 'b.c@example.com', suppress_invitation: false} + - {user: 'x.y@example.com', apps: ["heroku-example-app"]} +""" + +RETURN = """ # """ + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.heroku import HerokuHelper + + +def add_or_delete_heroku_collaborator(module, client): + user = module.params['user'] + state = module.params['state'] + affected_apps = [] + result_state = False + + for app in module.params['apps']: + if app not in client.apps(): + module.fail_json(msg='App {0} does not exist'.format(app)) + + heroku_app = client.apps()[app] + + heroku_collaborator_list = [collaborator.user.email for collaborator in heroku_app.collaborators()] + + if state == 'absent' and user in heroku_collaborator_list: + if not module.check_mode: + heroku_app.remove_collaborator(user) + affected_apps += [app] + result_state = True + elif state == 'present' and user not in heroku_collaborator_list: + if not module.check_mode: + heroku_app.add_collaborator(user_id_or_email=user, silent=module.params['suppress_invitation']) + affected_apps += [app] + result_state = True + + return result_state, affected_apps + + +def main(): + argument_spec = HerokuHelper.heroku_argument_spec() + argument_spec.update( + user=dict(required=True, type='str'), + apps=dict(required=True, type='list', elements='str'), + suppress_invitation=dict(default=False, type='bool'), + state=dict(default='present', type='str', choices=['present', 'absent']), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = HerokuHelper(module).get_heroku_client() + + has_changed, msg = add_or_delete_heroku_collaborator(module, client) + module.exit_json(changed=has_changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hg.py b/plugins/modules/hg.py deleted file mode 120000 index bb12c3b78c..0000000000 --- a/plugins/modules/hg.py +++ /dev/null @@ -1 +0,0 @@ -./source_control/hg.py \ No newline at end of file diff --git a/plugins/modules/hg.py b/plugins/modules/hg.py new file mode 100644 index 0000000000..afd3e59dd3 --- /dev/null +++ b/plugins/modules/hg.py @@ -0,0 +1,296 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Yeukhon Wong +# Copyright (c) 2014, Nate Coraor +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: hg +short_description: Manages Mercurial (hg) repositories +description: + - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. +author: "Yeukhon Wong (@yeukhon)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + repo: + description: + - The repository address. + required: true + aliases: [name] + type: str + dest: + description: + - Absolute path of where the repository should be cloned to. This parameter is required, unless clone and update are + set to no. + type: path + revision: + description: + - Equivalent C(-r) option in hg command which could be the changeset, revision number, branch name or even tag. + aliases: [version] + type: str + force: + description: + - Discards uncommitted changes. Runs C(hg update -C). + type: bool + default: false + purge: + description: + - Deletes untracked files. Runs C(hg purge). + type: bool + default: false + update: + description: + - If V(false), do not retrieve new revisions from the origin repository. + type: bool + default: true + clone: + description: + - If V(false), do not clone the repository if it does not exist locally. + type: bool + default: true + executable: + description: + - Path to C(hg) executable to use. If not supplied, the normal mechanism for resolving binary paths is used. + type: str +notes: + - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156). + - 'If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH prompts user to authorize the first + contact with a remote host. To avoid this prompt, one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) + before calling the hg module, with the following command: C(ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts).' + - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such, if the underlying system + still uses a Python version below 2.7.9, you are bound to have issues checking out bitbucket repositories. See + U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01). +""" + +EXAMPLES = r""" +- name: Ensure the current working copy is inside the stable branch and deletes untracked files if any. + community.general.hg: + repo: https://bitbucket.org/user/repo1 + dest: /home/user/repo1 + revision: stable + purge: true + +- name: Get information about the repository whether or not it has already been cloned locally. + community.general.hg: + repo: git://bitbucket.org/user/repo + dest: /srv/checkout + clone: false + update: false +""" + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +class Hg(object): + def __init__(self, module, dest, repo, revision, hg_path): + self.module = module + self.dest = dest + self.repo = repo + self.revision = revision + self.hg_path = hg_path + + def _command(self, args_list): + (rc, out, err) = self.module.run_command([self.hg_path] + args_list) + return (rc, out, err) + + def _list_untracked(self): + args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print'] + return self._command(args) + + def get_revision(self): + """ + hg id -b -i -t returns a string in the format: + "[+] " + This format lists the state of the current working copy, + and indicates whether there are uncommitted changes by the + plus sign. Otherwise, the sign is omitted. + + Read the full description via hg id --help + """ + (rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest]) + if rc != 0: + self.module.fail_json(msg=err) + else: + return to_native(out).strip('\n') + + def get_remote_revision(self): + (rc, out, err) = self._command(['id', self.repo]) + if rc != 0: + self.module.fail_json(msg=err) + else: + return to_native(out).strip('\n') + + def has_local_mods(self): + now = self.get_revision() + if '+' in now: + return True + else: + return False + + def discard(self): + before = self.has_local_mods() + if not before: + return False + + args = ['update', '-C', '-R', self.dest, '-r', '.'] + (rc, out, err) = self._command(args) + if rc != 0: + self.module.fail_json(msg=err) + + after = self.has_local_mods() + if before != after and not after: # no more local modification + return True + + def purge(self): + # before purge, find out if there are any untracked files + (rc1, out1, err1) = self._list_untracked() + if rc1 != 0: + self.module.fail_json(msg=err1) + + # there are some untrackd files + if out1 != '': + args = ['purge', '--config', 'extensions.purge=', '-R', self.dest] + (rc2, out2, err2) = self._command(args) + if rc2 != 0: + self.module.fail_json(msg=err2) + return True + else: + return False + + def cleanup(self, force, purge): + discarded = False + purged = False + + if force: + discarded = self.discard() + if purge: + purged = self.purge() + if discarded or purged: + return True + else: + return False + + def pull(self): + return self._command( + ['pull', '-R', self.dest, self.repo]) + + def update(self): + if self.revision is not None: + return self._command(['update', '-r', self.revision, '-R', self.dest]) + return self._command(['update', '-R', self.dest]) + + def clone(self): + if self.revision is not None: + return self._command(['clone', self.repo, self.dest, '-r', self.revision]) + return self._command(['clone', self.repo, self.dest]) + + @property + def at_revision(self): + """ + There is no point in pulling from a potentially down/slow remote site + if the desired changeset is already the current changeset. + """ + if self.revision is None or len(self.revision) < 7: + # Assume it is a rev number, tag, or branch + return False + (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest]) + if rc != 0: + self.module.fail_json(msg=err) + if out.startswith(self.revision): + return True + return False + + +# =========================================== + +def main(): + module = AnsibleModule( + argument_spec=dict( + repo=dict(type='str', required=True, aliases=['name']), + dest=dict(type='path'), + revision=dict(type='str', aliases=['version']), + force=dict(type='bool', default=False), + purge=dict(type='bool', default=False), + update=dict(type='bool', default=True), + clone=dict(type='bool', default=True), + executable=dict(type='str'), + ), + ) + repo = module.params['repo'] + dest = module.params['dest'] + revision = module.params['revision'] + force = module.params['force'] + purge = module.params['purge'] + update = module.params['update'] + clone = module.params['clone'] + hg_path = module.params['executable'] or module.get_bin_path('hg', True) + if dest is not None: + hgrc = os.path.join(dest, '.hg/hgrc') + + # initial states + before = '' + changed = False + cleaned = False + + if not dest and (clone or update): + module.fail_json(msg="the destination directory must be specified unless clone=false and update=false") + + hg = Hg(module, dest, repo, revision, hg_path) + + # If there is no hgrc file, then assume repo is absent + # and perform clone. Otherwise, perform pull and update. + if not clone and not update: + out = hg.get_remote_revision() + module.exit_json(after=out, changed=False) + if not os.path.exists(hgrc): + if clone: + (rc, out, err) = hg.clone() + if rc != 0: + module.fail_json(msg=err) + else: + module.exit_json(changed=False) + elif not update: + # Just return having found a repo already in the dest path + before = hg.get_revision() + elif hg.at_revision: + # no update needed, don't pull + before = hg.get_revision() + + # but force and purge if desired + cleaned = hg.cleanup(force, purge) + else: + # get the current state before doing pulling + before = hg.get_revision() + + # can perform force and purge + cleaned = hg.cleanup(force, purge) + + (rc, out, err) = hg.pull() + if rc != 0: + module.fail_json(msg=err) + + (rc, out, err) = hg.update() + if rc != 0: + module.fail_json(msg=err) + + after = hg.get_revision() + if before != after or cleaned: + changed = True + + module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hipchat.py b/plugins/modules/hipchat.py deleted file mode 120000 index 31acef9c23..0000000000 --- a/plugins/modules/hipchat.py +++ /dev/null @@ -1 +0,0 @@ -./notification/hipchat.py \ No newline at end of file diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py deleted file mode 120000 index 444f03a408..0000000000 --- a/plugins/modules/homebrew.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/homebrew.py \ No newline at end of file diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py new file mode 100644 index 0000000000..2b0e4408a2 --- /dev/null +++ b/plugins/modules/homebrew.py @@ -0,0 +1,921 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Andrew Dunham +# Copyright (c) 2013, Daniel Jaouen +# Copyright (c) 2015, Indrajit Raychaudhuri +# +# Based on macports (Jimmy Tang ) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: homebrew +author: + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" + - "Andrew Dunham (@andrew-d)" +requirements: + - homebrew must already be installed on the target system +short_description: Package manager for Homebrew +description: + - Manages Homebrew packages. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - A list of names of packages to install/remove. + aliases: ['formula', 'package', 'pkg'] + type: list + elements: str + path: + description: + - A V(:) separated list of paths to search for C(brew) executable. Since a package (I(formula) in homebrew parlance) + location is prefixed relative to the actual path of C(brew) command, providing an alternative C(brew) path enables + managing different set of packages in an alternative location in the system. + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + state: + description: + - State of the package. + choices: ['absent', 'head', 'installed', 'latest', 'linked', 'present', 'removed', 'uninstalled', 'unlinked', 'upgraded'] + default: present + type: str + update_homebrew: + description: + - Update homebrew itself first. + type: bool + default: false + upgrade_all: + description: + - Upgrade all homebrew packages. + type: bool + default: false + aliases: ['upgrade'] + install_options: + description: + - Options flags to install a package. + aliases: ['options'] + type: list + elements: str + upgrade_options: + description: + - Option flags to upgrade. + type: list + elements: str + version_added: '0.2.0' + force_formula: + description: + - Force the package(s) to be treated as a formula (equivalent to C(brew --formula)). + - To install a cask, use the M(community.general.homebrew_cask) module. + type: bool + default: false + version_added: 9.0.0 +notes: + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. +""" + +EXAMPLES = r""" +# Install formula foo with 'brew' in default path +- community.general.homebrew: + name: foo + state: present + +# Install formula foo with 'brew' in alternate path (/my/other/location/bin) +- community.general.homebrew: + name: foo + path: /my/other/location/bin + state: present + +# Update homebrew first and install formula foo with 'brew' in default path +- community.general.homebrew: + name: foo + state: present + update_homebrew: true + +# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path +- community.general.homebrew: + name: foo + state: latest + update_homebrew: true + +# Update homebrew and upgrade all packages +- community.general.homebrew: + update_homebrew: true + upgrade_all: true + +# Miscellaneous other examples +- community.general.homebrew: + name: foo + state: head + +- community.general.homebrew: + name: foo + state: linked + +- community.general.homebrew: + name: foo + state: absent + +- community.general.homebrew: + name: foo,bar + state: absent + +- community.general.homebrew: + name: foo + state: present + install_options: with-baz,enable-debug + +- name: Install formula foo with 'brew' from cask + community.general.homebrew: + name: homebrew/cask/foo + state: present + +- name: Use ignore-pinned option while upgrading all + community.general.homebrew: + upgrade_all: true + upgrade_options: ignore-pinned + +- name: Force installing a formula whose name is also a cask name + community.general.homebrew: + name: ambiguous_formula + state: present + force_formula: true +""" + +RETURN = r""" +msg: + description: If the cache was updated or not. + returned: always + type: str + sample: "Changed: 0, Unchanged: 2" +unchanged_pkgs: + description: + - List of package names which are unchanged after module run. + returned: success + type: list + sample: ["awscli", "ag"] + version_added: '0.2.0' +changed_pkgs: + description: + - List of package names which are changed after module run. + returned: success + type: list + sample: ["git", "git-cola"] + version_added: '0.2.0' +""" + +import json +import re + +from ansible_collections.community.general.plugins.module_utils.homebrew import HomebrewValidate + +from ansible.module_utils.basic import AnsibleModule + + +# exceptions -------------------------------------------------------------- {{{ +class HomebrewException(Exception): + pass +# /exceptions ------------------------------------------------------------- }}} + + +# utils ------------------------------------------------------------------- {{{ +def _create_regex_group_complement(s): + lines = (line.strip() for line in s.split('\n') if line.strip()) + chars = [_f for _f in (line.split('#')[0].strip() for line in lines) if _f] + group = r'[^' + r''.join(chars) + r']' + return re.compile(group) + + +def _check_package_in_json(json_output, package_type): + return bool(json_output.get(package_type, []) and json_output[package_type][0].get("installed")) +# /utils ------------------------------------------------------------------ }}} + + +class Homebrew(object): + '''A class to manage Homebrew packages.''' + + # class validations -------------------------------------------- {{{ + @classmethod + def valid_state(cls, state): + ''' + A valid state is one of: + - None + - installed + - upgraded + - head + - linked + - unlinked + - absent + ''' + + if state is None: + return True + else: + return ( + isinstance(state, str) + and state.lower() in ( + 'installed', + 'upgraded', + 'head', + 'linked', + 'unlinked', + 'absent', + ) + ) + + @classmethod + def valid_module(cls, module): + '''A valid module is an instance of AnsibleModule.''' + + return isinstance(module, AnsibleModule) + + # /class validations ------------------------------------------- }}} + + # class properties --------------------------------------------- {{{ + @property + def module(self): + return self._module + + @module.setter + def module(self, module): + if not self.valid_module(module): + self._module = None + self.failed = True + self.message = 'Invalid module: {0}.'.format(module) + raise HomebrewException(self.message) + + else: + self._module = module + return module + + @property + def path(self): + return self._path + + @path.setter + def path(self, path): + if not HomebrewValidate.valid_path(path): + self._path = [] + self.failed = True + self.message = 'Invalid path: {0}.'.format(path) + raise HomebrewException(self.message) + + else: + if isinstance(path, str): + self._path = path.split(':') + else: + self._path = path + + return path + + @property + def brew_path(self): + return self._brew_path + + @brew_path.setter + def brew_path(self, brew_path): + if not HomebrewValidate.valid_brew_path(brew_path): + self._brew_path = None + self.failed = True + self.message = 'Invalid brew_path: {0}.'.format(brew_path) + raise HomebrewException(self.message) + + else: + self._brew_path = brew_path + return brew_path + + @property + def params(self): + return self._params + + @params.setter + def params(self, params): + self._params = self.module.params + return self._params + + # /class properties -------------------------------------------- }}} + + def __init__(self, module, path, packages=None, state=None, + update_homebrew=False, upgrade_all=False, + install_options=None, upgrade_options=None, + force_formula=False): + if not install_options: + install_options = list() + if not upgrade_options: + upgrade_options = list() + self._setup_status_vars() + self._setup_instance_vars(module=module, path=path, packages=packages, + state=state, update_homebrew=update_homebrew, + upgrade_all=upgrade_all, + install_options=install_options, + upgrade_options=upgrade_options, + force_formula=force_formula) + + self._prep() + + # prep --------------------------------------------------------- {{{ + def _setup_status_vars(self): + self.failed = False + self.changed = False + self.changed_pkgs = [] + self.unchanged_pkgs = [] + self.message = '' + + def _setup_instance_vars(self, **kwargs): + self.installed_packages = set() + self.outdated_packages = set() + for key, val in kwargs.items(): + setattr(self, key, val) + + def _prep(self): + self._prep_brew_path() + + def _prep_brew_path(self): + if not self.module: + self.brew_path = None + self.failed = True + self.message = 'AnsibleModule not set.' + raise HomebrewException(self.message) + + self.brew_path = self.module.get_bin_path( + 'brew', + required=True, + opt_dirs=self.path, + ) + if not self.brew_path: + self.brew_path = None + self.failed = True + self.message = 'Unable to locate homebrew executable.' + raise HomebrewException('Unable to locate homebrew executable.') + + return self.brew_path + + def _validate_packages_names(self): + invalid_packages = [] + for package in self.packages: + if not HomebrewValidate.valid_package(package): + invalid_packages.append(package) + + if invalid_packages: + self.failed = True + self.message = 'Invalid package{0}: {1}'.format( + "s" if len(invalid_packages) > 1 else "", + ", ".join(invalid_packages), + ) + raise HomebrewException(self.message) + + def _save_package_info(self, package_detail, package_name): + if bool(package_detail.get("installed")): + self.installed_packages.add(package_name) + if bool(package_detail.get("outdated")): + self.outdated_packages.add(package_name) + + def _extract_package_name(self, package_detail, is_cask): + # "brew info" can lookup by name, full_name, token, full_token, + # oldnames, old_tokens, or aliases. In addition, any of the + # above names can be prefixed by the tap. Any of these can be + # supplied by the user as the package name. In case of + # ambiguity, where a given name might match multiple packages, + # formulae are preferred over casks. For all other ambiguities, + # the results are an error. Note that in the homebrew/core and + # homebrew/cask taps, there are no "other" ambiguities. + if is_cask: # according to brew info + name = package_detail["token"] + full_name = package_detail["full_token"] + else: + name = package_detail["name"] + full_name = package_detail["full_name"] + + # Issue https://github.com/ansible-collections/community.general/issues/9803: + # name can include the tap as a prefix, in order to disambiguate, + # e.g. casks from identically named formulae. + # + # Issue https://github.com/ansible-collections/community.general/issues/10012: + # package_detail["tap"] is None if package is no longer available. + # + # Issue https://github.com/ansible-collections/community.general/issues/10804 + # name can be an alias, oldnames or old_tokens optionally prefixed by tap + package_names = {name, full_name} + package_names.update(package_detail.get("aliases", [])) + package_names.update(package_detail.get("oldnames", [])) + package_names.update(package_detail.get("old_tokens", [])) + if package_detail['tap']: + # names so far, with tap prefix added to each + tapped_names = {package_detail["tap"] + "/" + x for x in package_names} + package_names.update(tapped_names) + + # Finally, identify which of all those package names was the one supplied by the user. + package_names = package_names & set(self.packages) + if len(package_names) != 1: + self.failed = True + self.message = "Package names for {name} are missing or ambiguous: {packages}".format( + name=name, + packages=", ".join(str(p) for p in package_names), + ) + raise HomebrewException(self.message) + + # Then make sure the user provided name resurface. + return package_names.pop() + + def _get_packages_info(self): + cmd = [ + "{brew_path}".format(brew_path=self.brew_path), + "info", + "--json=v2", + ] + cmd.extend(self.packages) + if self.force_formula: + cmd.append("--formula") + + rc, out, err = self.module.run_command(cmd) + if rc != 0: + self.failed = True + self.message = err.strip() or ("Unknown failure with exit code %d" % rc) + raise HomebrewException(self.message) + + data = json.loads(out) + for package_detail in data.get("formulae", []): + package_name = self._extract_package_name(package_detail, is_cask=False) + self._save_package_info(package_detail, package_name) + + for package_detail in data.get("casks", []): + package_name = self._extract_package_name(package_detail, is_cask=True) + self._save_package_info(package_detail, package_name) + + # /prep -------------------------------------------------------- }}} + + def run(self): + try: + self._run() + except HomebrewException: + pass + + changed_count = len(self.changed_pkgs) + unchanged_count = len(self.unchanged_pkgs) + if not self.failed and (changed_count + unchanged_count > 1): + self.message = "Changed: %d, Unchanged: %d" % ( + changed_count, + unchanged_count, + ) + return (self.failed, self.changed, self.message) + + # commands ----------------------------------------------------- {{{ + def _run(self): + if self.update_homebrew: + self._update_homebrew() + + if self.upgrade_all: + self._upgrade_all() + + if self.packages: + self._validate_packages_names() + self._get_packages_info() + if self.state == 'installed': + return self._install_packages() + elif self.state == 'upgraded': + return self._upgrade_packages() + elif self.state == 'head': + return self._install_packages() + elif self.state == 'linked': + return self._link_packages() + elif self.state == 'unlinked': + return self._unlink_packages() + elif self.state == 'absent': + return self._uninstall_packages() + + # updated -------------------------------- {{{ + def _update_homebrew(self): + if self.module.check_mode: + self.changed = True + self.message = 'Homebrew would be updated.' + raise HomebrewException(self.message) + + rc, out, err = self.module.run_command([ + self.brew_path, + 'update', + ]) + if rc == 0: + if out and isinstance(out, str): + already_updated = any( + re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) + for s in out.split('\n') + if s + ) + if not already_updated: + self.changed = True + self.message = 'Homebrew updated successfully.' + else: + self.message = 'Homebrew already up-to-date.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + # /updated ------------------------------- }}} + + # _upgrade_all --------------------------- {{{ + def _upgrade_all(self): + if self.module.check_mode: + self.changed = True + self.message = 'Homebrew packages would be upgraded.' + raise HomebrewException(self.message) + cmd = [self.brew_path, 'upgrade'] + self.upgrade_options + + rc, out, err = self.module.run_command(cmd) + if rc == 0: + if not out: + self.message = 'Homebrew packages already upgraded.' + + else: + self.changed = True + self.message = 'Homebrew upgraded.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + # /_upgrade_all -------------------------- }}} + + # installed ------------------------------ {{{ + def _install_packages(self): + packages_to_install = set(self.packages) - self.installed_packages + + if len(packages_to_install) == 0: + self.unchanged_pkgs.extend(self.packages) + self.message = 'Package{0} already installed: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages), + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package{0} would be installed: {1}'.format( + "s" if len(packages_to_install) > 1 else "", + ", ".join(packages_to_install) + ) + raise HomebrewException(self.message) + + if self.state == 'head': + head = '--HEAD' + else: + head = None + + if self.force_formula: + formula = '--formula' + else: + formula = None + + opts = ( + [self.brew_path, 'install'] + + self.install_options + + list(packages_to_install) + + [head, formula] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + self.changed_pkgs.extend(packages_to_install) + self.unchanged_pkgs.extend(self.installed_packages) + self.changed = True + self.message = 'Package{0} installed: {1}'.format( + "s" if len(packages_to_install) > 1 else "", + ", ".join(packages_to_install) + ) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + # /installed ----------------------------- }}} + + # upgraded ------------------------------- {{{ + def _upgrade_all_packages(self): + opts = ( + [self.brew_path, 'upgrade'] + + self.install_options + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + self.changed = True + self.message = 'All packages upgraded.' + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _upgrade_packages(self): + if not self.packages: + self._upgrade_all_packages() + else: + # There are 3 action possible here depending on installed and outdated states: + # - not installed -> 'install' + # - installed and outdated -> 'upgrade' + # - installed and NOT outdated -> Nothing to do! + packages_to_install = set(self.packages) - self.installed_packages + packages_to_upgrade = self.installed_packages & self.outdated_packages + packages_to_install_or_upgrade = packages_to_install | packages_to_upgrade + + if len(packages_to_install_or_upgrade) == 0: + self.unchanged_pkgs.extend(self.packages) + self.message = 'Package{0} already upgraded: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages), + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package{0} would be upgraded: {1}'.format( + "s" if len(packages_to_install_or_upgrade) > 1 else "", + ", ".join(packages_to_install_or_upgrade) + ) + raise HomebrewException(self.message) + + for command, packages in [ + ("install", packages_to_install), + ("upgrade", packages_to_upgrade) + ]: + if not packages: + continue + + opts = ( + [self.brew_path, command] + + self.install_options + + list(packages) + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc != 0: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + self.changed_pkgs.extend(packages_to_install_or_upgrade) + self.unchanged_pkgs.extend(set(self.packages) - packages_to_install_or_upgrade) + self.changed = True + self.message = 'Package{0} upgraded: {1}'.format( + "s" if len(packages_to_install_or_upgrade) > 1 else "", + ", ".join(packages_to_install_or_upgrade), + ) + # /upgraded ------------------------------ }}} + + # uninstalled ---------------------------- {{{ + def _uninstall_packages(self): + packages_to_uninstall = self.installed_packages & set(self.packages) + + if len(packages_to_uninstall) == 0: + self.unchanged_pkgs.extend(self.packages) + self.message = 'Package{0} already uninstalled: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages), + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package{0} would be uninstalled: {1}'.format( + "s" if len(packages_to_uninstall) > 1 else "", + ", ".join(packages_to_uninstall) + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, 'uninstall', '--force'] + + self.install_options + + list(packages_to_uninstall) + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + self.changed_pkgs.extend(packages_to_uninstall) + self.unchanged_pkgs.extend(set(self.packages) - self.installed_packages) + self.changed = True + self.message = 'Package{0} uninstalled: {1}'.format( + "s" if len(packages_to_uninstall) > 1 else "", + ", ".join(packages_to_uninstall) + ) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + # /uninstalled ----------------------------- }}} + + # linked --------------------------------- {{{ + def _link_packages(self): + missing_packages = set(self.packages) - self.installed_packages + if missing_packages: + self.failed = True + self.message = 'Package{0} not installed: {1}.'.format( + "s" if len(missing_packages) > 1 else "", + ", ".join(missing_packages), + ) + raise HomebrewException(self.message) + + if self.module.check_mode: + self.changed = True + self.message = 'Package{0} would be linked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, 'link'] + + self.install_options + + self.packages + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + self.changed_pkgs.extend(self.packages) + self.changed = True + self.message = 'Package{0} linked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) + return True + else: + self.failed = True + self.message = 'Package{0} could not be linked: {1}.'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) + raise HomebrewException(self.message) + # /linked -------------------------------- }}} + + # unlinked ------------------------------- {{{ + def _unlink_packages(self): + missing_packages = set(self.packages) - self.installed_packages + if missing_packages: + self.failed = True + self.message = 'Package{0} not installed: {1}.'.format( + "s" if len(missing_packages) > 1 else "", + ", ".join(missing_packages), + ) + raise HomebrewException(self.message) + + if self.module.check_mode: + self.changed = True + self.message = 'Package{0} would be unlinked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, 'unlink'] + + self.install_options + + self.packages + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + self.changed_pkgs.extend(self.packages) + self.changed = True + self.message = 'Package{0} unlinked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) + return True + else: + self.failed = True + self.message = 'Package{0} could not be unlinked: {1}.'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) + raise HomebrewException(self.message) + # /unlinked ------------------------------ }}} + # /commands ---------------------------------------------------- }}} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict( + aliases=["pkg", "package", "formula"], + type='list', + elements='str', + ), + path=dict( + default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", + type='path', + ), + state=dict( + default="present", + choices=[ + "present", "installed", + "latest", "upgraded", "head", + "linked", "unlinked", + "absent", "removed", "uninstalled", + ], + ), + update_homebrew=dict( + default=False, + type='bool', + ), + upgrade_all=dict( + default=False, + aliases=["upgrade"], + type='bool', + ), + install_options=dict( + aliases=['options'], + type='list', + elements='str', + ), + upgrade_options=dict( + type='list', + elements='str', + ), + force_formula=dict( + default=False, + type='bool', + ), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + p = module.params + + if p['name']: + packages = [package_name.lower() for package_name in p['name']] + else: + packages = None + + path = p['path'] + if path: + path = path.split(':') + + state = p['state'] + if state in ('present', 'installed'): + state = 'installed' + if state in ('head', ): + state = 'head' + if state in ('latest', 'upgraded'): + state = 'upgraded' + if state == 'linked': + state = 'linked' + if state == 'unlinked': + state = 'unlinked' + if state in ('absent', 'removed', 'uninstalled'): + state = 'absent' + + force_formula = p['force_formula'] + update_homebrew = p['update_homebrew'] + if not update_homebrew: + module.run_command_environ_update.update( + dict(HOMEBREW_NO_AUTO_UPDATE="True") + ) + upgrade_all = p['upgrade_all'] + p['install_options'] = p['install_options'] or [] + install_options = ['--{0}'.format(install_option) + for install_option in p['install_options']] + + p['upgrade_options'] = p['upgrade_options'] or [] + upgrade_options = ['--{0}'.format(upgrade_option) + for upgrade_option in p['upgrade_options']] + brew = Homebrew(module=module, path=path, packages=packages, + state=state, update_homebrew=update_homebrew, + upgrade_all=upgrade_all, install_options=install_options, + upgrade_options=upgrade_options, force_formula=force_formula) + (failed, changed, message) = brew.run() + changed_pkgs = brew.changed_pkgs + unchanged_pkgs = brew.unchanged_pkgs + + if failed: + module.fail_json(msg=message) + module.exit_json( + changed=changed, + msg=message, + unchanged_pkgs=unchanged_pkgs, + changed_pkgs=changed_pkgs + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/homebrew_cask.py b/plugins/modules/homebrew_cask.py deleted file mode 120000 index 3e0bfa16d5..0000000000 --- a/plugins/modules/homebrew_cask.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/homebrew_cask.py \ No newline at end of file diff --git a/plugins/modules/homebrew_cask.py b/plugins/modules/homebrew_cask.py new file mode 100644 index 0000000000..ac88e1bafe --- /dev/null +++ b/plugins/modules/homebrew_cask.py @@ -0,0 +1,827 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Daniel Jaouen +# Copyright (c) 2016, Indrajit Raychaudhuri +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: homebrew_cask +author: + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" + - "Enric Lluelles (@enriclluelles)" +short_description: Install and uninstall homebrew casks +description: + - Manages Homebrew casks. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of cask to install or remove. + aliases: ['cask', 'package', 'pkg'] + type: list + elements: str + path: + description: + - "':' separated list of paths to search for 'brew' executable." + default: '/usr/local/bin:/opt/homebrew/bin' + type: path + state: + description: + - State of the cask. + choices: ['absent', 'installed', 'latest', 'present', 'removed', 'uninstalled', 'upgraded'] + default: present + type: str + sudo_password: + description: + - The sudo password to be passed to E(SUDO_ASKPASS). + required: false + type: str + update_homebrew: + description: + - Update homebrew itself first. + - Note that C(brew cask update) is a synonym for C(brew update). + type: bool + default: false + install_options: + description: + - Options flags to install a package. + aliases: ['options'] + type: list + elements: str + accept_external_apps: + description: + - Allow external apps. + type: bool + default: false + upgrade_all: + description: + - Upgrade all casks. + - Mutually exclusive with C(upgraded) state. + type: bool + default: false + aliases: ['upgrade'] + greedy: + description: + - Upgrade casks that auto update. + - Passes C(--greedy) to C(brew outdated --cask) when checking if an installed cask has a newer version available, or + to C(brew upgrade --cask) when upgrading all casks. + type: bool + default: false +""" +EXAMPLES = r""" +- name: Install cask + community.general.homebrew_cask: + name: alfred + state: present + +- name: Remove cask + community.general.homebrew_cask: + name: alfred + state: absent + +- name: Install cask with install options + community.general.homebrew_cask: + name: alfred + state: present + install_options: 'appdir=/Applications' + +- name: Install cask with install options + community.general.homebrew_cask: + name: alfred + state: present + install_options: 'debug,appdir=/Applications' + +- name: Install cask with force option + community.general.homebrew_cask: + name: alfred + state: present + install_options: force + +- name: Allow external app + community.general.homebrew_cask: + name: alfred + state: present + accept_external_apps: true + +- name: Remove cask with force option + community.general.homebrew_cask: + name: alfred + state: absent + install_options: force + +- name: Upgrade all casks + community.general.homebrew_cask: + upgrade_all: true + +- name: Upgrade all casks with greedy option + community.general.homebrew_cask: + upgrade_all: true + greedy: true + +- name: Upgrade given cask with force option + community.general.homebrew_cask: + name: alfred + state: upgraded + install_options: force + +- name: Upgrade cask with greedy option + community.general.homebrew_cask: + name: 1password + state: upgraded + greedy: true + +- name: Using sudo password for installing cask + community.general.homebrew_cask: + name: wireshark + state: present + sudo_password: "{{ ansible_become_pass }}" +""" + +import os +import re +import tempfile + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.module_utils.homebrew import HomebrewValidate + +from ansible.module_utils.common.text.converters import to_bytes +from ansible.module_utils.basic import AnsibleModule + + +# exceptions -------------------------------------------------------------- {{{ +class HomebrewCaskException(Exception): + pass +# /exceptions ------------------------------------------------------------- }}} + + +# utils ------------------------------------------------------------------- {{{ +def _create_regex_group_complement(s): + lines = (line.strip() for line in s.split('\n') if line.strip()) + chars = [_f for _f in (line.split('#')[0].strip() for line in lines) if _f] + group = r'[^' + r''.join(chars) + r']' + return re.compile(group) +# /utils ------------------------------------------------------------------ }}} + + +class HomebrewCask(object): + '''A class to manage Homebrew casks.''' + + # class regexes ------------------------------------------------ {{{ + VALID_CASK_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + . # dots + / # slash (for taps) + \- # dashes + @ # at symbol + \+ # plus symbol + ''' + + INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS) + # /class regexes ----------------------------------------------- }}} + + # class validations -------------------------------------------- {{{ + @classmethod + def valid_cask(cls, cask): + '''A valid cask is either None or alphanumeric + backslashes.''' + + if cask is None: + return True + + return ( + isinstance(cask, str) + and not cls.INVALID_CASK_REGEX.search(cask) + ) + + @classmethod + def valid_state(cls, state): + ''' + A valid state is one of: + - installed + - absent + ''' + + if state is None: + return True + else: + return ( + isinstance(state, str) + and state.lower() in ( + 'installed', + 'absent', + ) + ) + + @classmethod + def valid_module(cls, module): + '''A valid module is an instance of AnsibleModule.''' + + return isinstance(module, AnsibleModule) + # /class validations ------------------------------------------- }}} + + # class properties --------------------------------------------- {{{ + @property + def module(self): + return self._module + + @module.setter + def module(self, module): + if not self.valid_module(module): + self._module = None + self.failed = True + self.message = 'Invalid module: {0}.'.format(module) + raise HomebrewCaskException(self.message) + + else: + self._module = module + return module + + @property + def path(self): + return self._path + + @path.setter + def path(self, path): + if not HomebrewValidate.valid_path(path): + self._path = [] + self.failed = True + self.message = 'Invalid path: {0}.'.format(path) + raise HomebrewCaskException(self.message) + + else: + if isinstance(path, str): + self._path = path.split(':') + else: + self._path = path + + return path + + @property + def brew_path(self): + return self._brew_path + + @brew_path.setter + def brew_path(self, brew_path): + if not HomebrewValidate.valid_brew_path(brew_path): + self._brew_path = None + self.failed = True + self.message = 'Invalid brew_path: {0}.'.format(brew_path) + raise HomebrewCaskException(self.message) + + else: + self._brew_path = brew_path + return brew_path + + @property + def params(self): + return self._params + + @params.setter + def params(self, params): + self._params = self.module.params + return self._params + + @property + def current_cask(self): + return self._current_cask + + @current_cask.setter + def current_cask(self, cask): + if not self.valid_cask(cask): + self._current_cask = None + self.failed = True + self.message = 'Invalid cask: {0}.'.format(cask) + raise HomebrewCaskException(self.message) + + else: + self._current_cask = cask + return cask + + @property + def brew_version(self): + try: + return self._brew_version + except AttributeError: + return None + + @brew_version.setter + def brew_version(self, brew_version): + self._brew_version = brew_version + + # /class properties -------------------------------------------- }}} + + def __init__(self, module, path=path, casks=None, state=None, + sudo_password=None, update_homebrew=False, + install_options=None, accept_external_apps=False, + upgrade_all=False, greedy=False): + if not install_options: + install_options = list() + self._setup_status_vars() + self._setup_instance_vars(module=module, path=path, casks=casks, + state=state, sudo_password=sudo_password, + update_homebrew=update_homebrew, + install_options=install_options, + accept_external_apps=accept_external_apps, + upgrade_all=upgrade_all, + greedy=greedy, ) + + self._prep() + + # prep --------------------------------------------------------- {{{ + def _setup_status_vars(self): + self.failed = False + self.changed = False + self.changed_count = 0 + self.unchanged_count = 0 + self.message = '' + + def _setup_instance_vars(self, **kwargs): + for key, val in kwargs.items(): + setattr(self, key, val) + + def _prep(self): + self._prep_brew_path() + + def _prep_brew_path(self): + if not self.module: + self.brew_path = None + self.failed = True + self.message = 'AnsibleModule not set.' + raise HomebrewCaskException(self.message) + + self.brew_path = self.module.get_bin_path( + 'brew', + required=True, + opt_dirs=self.path, + ) + if not self.brew_path: + self.brew_path = None + self.failed = True + self.message = 'Unable to locate homebrew executable.' + raise HomebrewCaskException('Unable to locate homebrew executable.') + + return self.brew_path + + def _status(self): + return (self.failed, self.changed, self.message) + # /prep -------------------------------------------------------- }}} + + def run(self): + try: + self._run() + except HomebrewCaskException: + pass + + if not self.failed and (self.changed_count + self.unchanged_count > 1): + self.message = "Changed: %d, Unchanged: %d" % ( + self.changed_count, + self.unchanged_count, + ) + (failed, changed, message) = self._status() + + return (failed, changed, message) + + # checks ------------------------------------------------------- {{{ + def _current_cask_is_outdated(self): + if not self.valid_cask(self.current_cask): + return False + + if self._brew_cask_command_is_deprecated(): + base_opts = [self.brew_path, 'outdated', '--cask'] + else: + base_opts = [self.brew_path, 'cask', 'outdated'] + + cask_is_outdated_command = base_opts + (['--greedy'] if self.greedy else []) + [self.current_cask] + + rc, out, err = self.module.run_command(cask_is_outdated_command) + + return out != "" + + def _current_cask_is_installed(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if self._brew_cask_command_is_deprecated(): + base_opts = [self.brew_path, "list", "--cask"] + else: + base_opts = [self.brew_path, "cask", "list"] + + cmd = base_opts + [self.current_cask] + rc, out, err = self.module.run_command(cmd) + + return rc == 0 + + def _get_brew_version(self): + if self.brew_version: + return self.brew_version + + cmd = [self.brew_path, '--version'] + + dummy, out, dummy = self.module.run_command(cmd, check_rc=True) + + pattern = r"Homebrew (.*)(\d+\.\d+\.\d+)(-dirty)?" + rematch = re.search(pattern, out) + if not rematch: + self.module.fail_json(msg="Failed to match regex to get brew version", stdout=out) + self.brew_version = rematch.groups()[1] + return self.brew_version + + def _brew_cask_command_is_deprecated(self): + # The `brew cask` replacements were fully available in 2.6.0 (https://brew.sh/2020/12/01/homebrew-2.6.0/) + return LooseVersion(self._get_brew_version()) >= LooseVersion('2.6.0') + # /checks ------------------------------------------------------ }}} + + # commands ----------------------------------------------------- {{{ + def _run(self): + if self.upgrade_all: + return self._upgrade_all() + + if self.casks: + if self.state == 'installed': + return self._install_casks() + elif self.state == 'upgraded': + return self._upgrade_casks() + elif self.state == 'absent': + return self._uninstall_casks() + + self.failed = True + self.message = "You must select a cask to install." + raise HomebrewCaskException(self.message) + + # sudo_password fix ---------------------- {{{ + def _run_command_with_sudo_password(self, cmd): + rc, out, err = '', '', '' + + with tempfile.NamedTemporaryFile() as sudo_askpass_file: + sudo_askpass_file.write(b"#!/bin/sh\n\necho '%s'\n" % to_bytes(self.sudo_password)) + os.chmod(sudo_askpass_file.name, 0o700) + sudo_askpass_file.file.close() + + rc, out, err = self.module.run_command( + cmd, + environ_update={'SUDO_ASKPASS': sudo_askpass_file.name} + ) + + self.module.add_cleanup_file(sudo_askpass_file.name) + + return (rc, out, err) + # /sudo_password fix --------------------- }}} + + # updated -------------------------------- {{{ + def _update_homebrew(self): + rc, out, err = self.module.run_command([ + self.brew_path, + 'update', + ]) + if rc == 0: + if out and isinstance(out, str): + already_updated = any( + re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) + for s in out.split('\n') + if s + ) + if not already_updated: + self.changed = True + self.message = 'Homebrew updated successfully.' + else: + self.message = 'Homebrew already up-to-date.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + # /updated ------------------------------- }}} + + # _upgrade_all --------------------------- {{{ + def _upgrade_all(self): + if self.module.check_mode: + self.changed = True + self.message = 'Casks would be upgraded.' + raise HomebrewCaskException(self.message) + + if self._brew_cask_command_is_deprecated(): + cmd = [self.brew_path, 'upgrade', '--cask'] + else: + cmd = [self.brew_path, 'cask', 'upgrade'] + + if self.greedy: + cmd = cmd + ['--greedy'] + + rc, out, err = '', '', '' + + if self.sudo_password: + rc, out, err = self._run_command_with_sudo_password(cmd) + else: + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + # 'brew upgrade --cask' does not output anything if no casks are upgraded + if not out.strip(): + self.message = 'Homebrew casks already upgraded.' + + # handle legacy 'brew cask upgrade' + elif re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE): + self.message = 'Homebrew casks already upgraded.' + + else: + self.changed = True + self.message = 'Homebrew casks upgraded.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + # /_upgrade_all -------------------------- }}} + + # installed ------------------------------ {{{ + def _install_current_cask(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if '--force' not in self.install_options and self._current_cask_is_installed(): + self.unchanged_count += 1 + self.message = 'Cask already installed: {0}'.format( + self.current_cask, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Cask would be installed: {0}'.format( + self.current_cask + ) + raise HomebrewCaskException(self.message) + + if self._brew_cask_command_is_deprecated(): + base_opts = [self.brew_path, 'install', '--cask'] + else: + base_opts = [self.brew_path, 'cask', 'install'] + + opts = base_opts + [self.current_cask] + self.install_options + + cmd = [opt for opt in opts if opt] + + rc, out, err = '', '', '' + + if self.sudo_password: + rc, out, err = self._run_command_with_sudo_password(cmd) + else: + rc, out, err = self.module.run_command(cmd) + + if self._current_cask_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Cask installed: {0}'.format(self.current_cask) + return True + elif self.accept_external_apps and re.search(r"Error: It seems there is already an App at", err): + self.unchanged_count += 1 + self.message = 'Cask already installed: {0}'.format( + self.current_cask, + ) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + + def _install_casks(self): + for cask in self.casks: + self.current_cask = cask + self._install_current_cask() + + return True + # /installed ----------------------------- }}} + + # upgraded ------------------------------- {{{ + def _upgrade_current_cask(self): + command = 'upgrade' + + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if not self._current_cask_is_installed(): + command = 'install' + + if self._current_cask_is_installed() and not self._current_cask_is_outdated(): + self.message = 'Cask is already upgraded: {0}'.format( + self.current_cask, + ) + self.unchanged_count += 1 + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Cask would be upgraded: {0}'.format( + self.current_cask + ) + raise HomebrewCaskException(self.message) + + if self._brew_cask_command_is_deprecated(): + base_opts = [self.brew_path, command, '--cask'] + else: + base_opts = [self.brew_path, 'cask', command] + + opts = base_opts + self.install_options + [self.current_cask] + + cmd = [opt for opt in opts if opt] + + rc, out, err = '', '', '' + + if self.sudo_password: + rc, out, err = self._run_command_with_sudo_password(cmd) + else: + rc, out, err = self.module.run_command(cmd) + + if self._current_cask_is_installed() and not self._current_cask_is_outdated(): + self.changed_count += 1 + self.changed = True + self.message = 'Cask upgraded: {0}'.format(self.current_cask) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + + def _upgrade_casks(self): + for cask in self.casks: + self.current_cask = cask + self._upgrade_current_cask() + + return True + # /upgraded ------------------------------ }}} + + # uninstalled ---------------------------- {{{ + def _uninstall_current_cask(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if not self._current_cask_is_installed(): + self.unchanged_count += 1 + self.message = 'Cask already uninstalled: {0}'.format( + self.current_cask, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Cask would be uninstalled: {0}'.format( + self.current_cask + ) + raise HomebrewCaskException(self.message) + + if self._brew_cask_command_is_deprecated(): + base_opts = [self.brew_path, 'uninstall', '--cask'] + else: + base_opts = [self.brew_path, 'cask', 'uninstall'] + + opts = base_opts + [self.current_cask] + self.install_options + + cmd = [opt for opt in opts if opt] + + rc, out, err = '', '', '' + + if self.sudo_password: + rc, out, err = self._run_command_with_sudo_password(cmd) + else: + rc, out, err = self.module.run_command(cmd) + + if not self._current_cask_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Cask uninstalled: {0}'.format(self.current_cask) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + + def _uninstall_casks(self): + for cask in self.casks: + self.current_cask = cask + self._uninstall_current_cask() + + return True + # /uninstalled --------------------------- }}} + # /commands ---------------------------------------------------- }}} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict( + aliases=["pkg", "package", "cask"], + type='list', + elements='str', + ), + path=dict( + default="/usr/local/bin:/opt/homebrew/bin", + type='path', + ), + state=dict( + default="present", + choices=[ + "present", "installed", + "latest", "upgraded", + "absent", "removed", "uninstalled", + ], + ), + sudo_password=dict( + type="str", + no_log=True, + ), + update_homebrew=dict( + default=False, + type='bool', + ), + install_options=dict( + aliases=['options'], + type='list', + elements='str', + ), + accept_external_apps=dict( + default=False, + type='bool', + ), + upgrade_all=dict( + default=False, + aliases=["upgrade"], + type='bool', + ), + greedy=dict( + default=False, + type='bool', + ), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + p = module.params + + if p['name']: + casks = p['name'] + else: + casks = None + + path = p['path'] + if path: + path = path.split(':') + + state = p['state'] + if state in ('present', 'installed'): + state = 'installed' + if state in ('latest', 'upgraded'): + state = 'upgraded' + if state in ('absent', 'removed', 'uninstalled'): + state = 'absent' + + sudo_password = p['sudo_password'] + + update_homebrew = p['update_homebrew'] + upgrade_all = p['upgrade_all'] + greedy = p['greedy'] + p['install_options'] = p['install_options'] or [] + install_options = ['--{0}'.format(install_option) + for install_option in p['install_options']] + + accept_external_apps = p['accept_external_apps'] + + brew_cask = HomebrewCask(module=module, path=path, casks=casks, + state=state, sudo_password=sudo_password, + update_homebrew=update_homebrew, + install_options=install_options, + accept_external_apps=accept_external_apps, + upgrade_all=upgrade_all, + greedy=greedy, + ) + (failed, changed, message) = brew_cask.run() + if failed: + module.fail_json(msg=message) + else: + module.exit_json(changed=changed, msg=message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/homebrew_services.py b/plugins/modules/homebrew_services.py new file mode 100644 index 0000000000..5527aae133 --- /dev/null +++ b/plugins/modules/homebrew_services.py @@ -0,0 +1,251 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Andrew Dunham +# Copyright (c) 2013, Daniel Jaouen +# Copyright (c) 2015, Indrajit Raychaudhuri +# Copyright (c) 2024, Kit Ham +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: homebrew_services +author: + - "Kit Ham (@kitizz)" +requirements: + - homebrew must already be installed on the target system +short_description: Services manager for Homebrew +version_added: 9.3.0 +description: + - Manages daemons and services using Homebrew. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - An installed homebrew package whose service is to be updated. + aliases: ['formula'] + type: str + required: true + path: + description: + - A V(:) separated list of paths to search for C(brew) executable. Since a package (I(formula) in homebrew parlance) + location is prefixed relative to the actual path of C(brew) command, providing an alternative C(brew) path enables + managing different set of packages in an alternative location in the system. + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + state: + description: + - State of the package's service. + choices: ['present', 'absent', 'restarted'] + default: present + type: str +""" + +EXAMPLES = r""" +- name: Install foo package + community.general.homebrew: + name: foo + state: present + +- name: Start the foo service (equivalent to `brew services start foo`) + community.general.homebrew_services: + name: foo + state: present + +- name: Restart the foo service (equivalent to `brew services restart foo`) + community.general.homebrew_services: + name: foo + state: restarted + +- name: Remove the foo service (equivalent to `brew services stop foo`) + community.general.homebrew_services: + name: foo + state: absent +""" + +RETURN = r""" +pid: + description: + - If the service is now running, this is the PID of the service, otherwise -1. + returned: success + type: int + sample: 1234 +running: + description: + - Whether the service is running after running this command. + returned: success + type: bool + sample: true +""" + +import json +import sys + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.homebrew import ( + HomebrewValidate, + parse_brew_path, +) + +if sys.version_info < (3, 5): + from collections import namedtuple + + # Stores validated arguments for an instance of an action. + # See DOCUMENTATION string for argument-specific information. + HomebrewServiceArgs = namedtuple( + "HomebrewServiceArgs", ["name", "state", "brew_path"] + ) + + # Stores the state of a Homebrew service. + HomebrewServiceState = namedtuple("HomebrewServiceState", ["running", "pid"]) + +else: + from typing import NamedTuple, Optional + + # Stores validated arguments for an instance of an action. + # See DOCUMENTATION string for argument-specific information. + HomebrewServiceArgs = NamedTuple( + "HomebrewServiceArgs", [("name", str), ("state", str), ("brew_path", str)] + ) + + # Stores the state of a Homebrew service. + HomebrewServiceState = NamedTuple( + "HomebrewServiceState", [("running", bool), ("pid", Optional[int])] + ) + + +def _brew_service_state(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> HomebrewServiceState + cmd = [args.brew_path, "services", "info", args.name, "--json"] + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + + try: + data = json.loads(stdout)[0] + except json.JSONDecodeError: + module.fail_json(msg="Failed to parse JSON output:\n{0}".format(stdout)) + + return HomebrewServiceState(running=data["status"] == "started", pid=data["pid"]) + + +def _exit_with_state(args, module, changed=False, message=None): + # type: (HomebrewServiceArgs, AnsibleModule, bool, Optional[str]) -> None + state = _brew_service_state(args, module) + if message is None: + message = ( + "Running: {state.running}, Changed: {changed}, PID: {state.pid}".format( + state=state, changed=changed + ) + ) + module.exit_json(msg=message, pid=state.pid, running=state.running, changed=changed) + + +def validate_and_load_arguments(module): + # type: (AnsibleModule) -> HomebrewServiceArgs + """Reuse the Homebrew module's validation logic to validate these arguments.""" + package = module.params["name"] # type: ignore + if not HomebrewValidate.valid_package(package): + module.fail_json(msg="Invalid package name: {0}".format(package)) + + state = module.params["state"] # type: ignore + if state not in ["present", "absent", "restarted"]: + module.fail_json(msg="Invalid state: {0}".format(state)) + + brew_path = parse_brew_path(module) + + return HomebrewServiceArgs(name=package, state=state, brew_path=brew_path) + + +def start_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Start the requested brew service if it is not already running.""" + state = _brew_service_state(args, module) + if state.running: + # Nothing to do, return early. + _exit_with_state(args, module, changed=False, message="Service already running") + + if module.check_mode: + _exit_with_state(args, module, changed=True, message="Service would be started") + + start_cmd = [args.brew_path, "services", "start", args.name] + rc, stdout, stderr = module.run_command(start_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def stop_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Stop the requested brew service if it is running.""" + state = _brew_service_state(args, module) + if not state.running: + # Nothing to do, return early. + _exit_with_state(args, module, changed=False, message="Service already stopped") + + if module.check_mode: + _exit_with_state(args, module, changed=True, message="Service would be stopped") + + stop_cmd = [args.brew_path, "services", "stop", args.name] + rc, stdout, stderr = module.run_command(stop_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def restart_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Restart the requested brew service. This always results in a change.""" + if module.check_mode: + _exit_with_state( + args, module, changed=True, message="Service would be restarted" + ) + + restart_cmd = [args.brew_path, "services", "restart", args.name] + rc, stdout, stderr = module.run_command(restart_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict( + aliases=["formula"], + required=True, + type="str", + ), + state=dict( + choices=["present", "absent", "restarted"], + default="present", + ), + path=dict( + default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", + type="path", + ), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict( + LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C" + ) + + # Pre-validate arguments. + service_args = validate_and_load_arguments(module) + + # Choose logic based on the desired state. + if service_args.state == "present": + start_service(service_args, module) + elif service_args.state == "absent": + stop_service(service_args, module) + elif service_args.state == "restarted": + restart_service(service_args, module) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/homebrew_tap.py b/plugins/modules/homebrew_tap.py deleted file mode 120000 index 1604488f93..0000000000 --- a/plugins/modules/homebrew_tap.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/homebrew_tap.py \ No newline at end of file diff --git a/plugins/modules/homebrew_tap.py b/plugins/modules/homebrew_tap.py new file mode 100644 index 0000000000..813b89db44 --- /dev/null +++ b/plugins/modules/homebrew_tap.py @@ -0,0 +1,273 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Daniel Jaouen +# Copyright (c) 2016, Indrajit Raychaudhuri +# +# Based on homebrew (Andrew Dunham ) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: homebrew_tap +author: + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" +short_description: Tap a Homebrew repository +description: + - Tap external Homebrew repositories. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The GitHub user/organization repository to tap. + required: true + aliases: ['tap'] + type: list + elements: str + url: + description: + - The optional git URL of the repository to tap. The URL is not assumed to be on GitHub, and the protocol does not have + to be HTTP. Any location and protocol that git can handle is fine. + - O(name) option may not be a list of multiple taps (but a single tap instead) when this option is provided. + required: false + type: str + state: + description: + - State of the repository. + choices: ['present', 'absent'] + required: false + default: 'present' + type: str + path: + description: + - A V(:) separated list of paths to search for C(brew) executable. + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + version_added: '2.1.0' +requirements: [homebrew] +""" + +EXAMPLES = r""" +- name: Tap a Homebrew repository, state present + community.general.homebrew_tap: + name: homebrew/dupes + +- name: Tap a Homebrew repository, state absent + community.general.homebrew_tap: + name: homebrew/dupes + state: absent + +- name: Tap a Homebrew repository, state present + community.general.homebrew_tap: + name: homebrew/dupes,homebrew/science + state: present + +- name: Tap a Homebrew repository using url, state present + community.general.homebrew_tap: + name: telemachus/brew + url: 'https://bitbucket.org/telemachus/brew' +""" + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def a_valid_tap(tap): + '''Returns True if the tap is valid.''' + regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$') + return regex.match(tap) + + +def already_tapped(module, brew_path, tap): + '''Returns True if already tapped.''' + + rc, out, err = module.run_command([ + brew_path, + 'tap', + ]) + + taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_] + tap_name = re.sub('homebrew-', '', tap.lower()) + + return tap_name in taps + + +def add_tap(module, brew_path, tap, url=None): + '''Adds a single tap.''' + failed, changed, msg = False, False, '' + + if not a_valid_tap(tap): + failed = True + msg = 'not a valid tap: %s' % tap + + elif not already_tapped(module, brew_path, tap): + if module.check_mode: + module.exit_json(changed=True) + + rc, out, err = module.run_command([ + brew_path, + 'tap', + tap, + url, + ]) + if rc == 0: + changed = True + msg = 'successfully tapped: %s' % tap + else: + failed = True + msg = 'failed to tap: %s due to %s' % (tap, err) + + else: + msg = 'already tapped: %s' % tap + + return (failed, changed, msg) + + +def add_taps(module, brew_path, taps): + '''Adds one or more taps.''' + failed, changed, unchanged, added, msg = False, False, 0, 0, '' + + for tap in taps: + (failed, changed, msg) = add_tap(module, brew_path, tap) + if failed: + break + if changed: + added += 1 + else: + unchanged += 1 + + if failed: + msg = 'added: %d, unchanged: %d, error: ' + msg + msg = msg % (added, unchanged) + elif added: + changed = True + msg = 'added: %d, unchanged: %d' % (added, unchanged) + else: + msg = 'added: %d, unchanged: %d' % (added, unchanged) + + return (failed, changed, msg) + + +def remove_tap(module, brew_path, tap): + '''Removes a single tap.''' + failed, changed, msg = False, False, '' + + if not a_valid_tap(tap): + failed = True + msg = 'not a valid tap: %s' % tap + + elif already_tapped(module, brew_path, tap): + if module.check_mode: + module.exit_json(changed=True) + + rc, out, err = module.run_command([ + brew_path, + 'untap', + tap, + ]) + if not already_tapped(module, brew_path, tap): + changed = True + msg = 'successfully untapped: %s' % tap + else: + failed = True + msg = 'failed to untap: %s due to %s' % (tap, err) + + else: + msg = 'already untapped: %s' % tap + + return (failed, changed, msg) + + +def remove_taps(module, brew_path, taps): + '''Removes one or more taps.''' + failed, changed, unchanged, removed, msg = False, False, 0, 0, '' + + for tap in taps: + (failed, changed, msg) = remove_tap(module, brew_path, tap) + if failed: + break + if changed: + removed += 1 + else: + unchanged += 1 + + if failed: + msg = 'removed: %d, unchanged: %d, error: ' + msg + msg = msg % (removed, unchanged) + elif removed: + changed = True + msg = 'removed: %d, unchanged: %d' % (removed, unchanged) + else: + msg = 'removed: %d, unchanged: %d' % (removed, unchanged) + + return (failed, changed, msg) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['tap'], type='list', required=True, elements='str'), + url=dict(), + state=dict(default='present', choices=['present', 'absent']), + path=dict( + default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", + type='path', + ), + ), + supports_check_mode=True, + ) + + path = module.params['path'] + if path: + path = path.split(':') + + brew_path = module.get_bin_path( + 'brew', + required=True, + opt_dirs=path, + ) + + taps = module.params['name'] + url = module.params['url'] + + if module.params['state'] == 'present': + if url is None: + # No tap URL provided explicitly, continue with bulk addition + # of all the taps. + failed, changed, msg = add_taps(module, brew_path, taps) + else: + # When an tap URL is provided explicitly, we allow adding + # *single* tap only. Validate and proceed to add single tap. + if len(taps) > 1: + msg = "List of multiple taps may not be provided with 'url' option." + module.fail_json(msg=msg) + else: + failed, changed, msg = add_tap(module, brew_path, taps[0], url) + + if failed: + module.fail_json(msg=msg) + else: + module.exit_json(changed=changed, msg=msg) + + elif module.params['state'] == 'absent': + failed, changed, msg = remove_taps(module, brew_path, taps) + + if failed: + module.fail_json(msg=msg) + else: + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/homectl.py b/plugins/modules/homectl.py new file mode 100644 index 0000000000..90e97fc484 --- /dev/null +++ b/plugins/modules/homectl.py @@ -0,0 +1,686 @@ +#!/usr/bin/python + +# Copyright (c) 2022, James Livulpi +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: homectl +author: + - "James Livulpi (@jameslivulpi)" +short_description: Manage user accounts with systemd-homed +version_added: 4.4.0 +description: + - Manages a user's home directory managed by systemd-homed. +notes: + - This module requires the deprecated L(crypt Python module, https://docs.python.org/3.12/library/crypt.html) library which + was removed from Python 3.13. For Python 3.13 or newer, you need to install L(legacycrypt, https://pypi.org/project/legacycrypt/). +requirements: + - legacycrypt (on Python 3.13 or newer) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The user name to create, remove, or update. + required: true + aliases: ['user', 'username'] + type: str + password: + description: + - Set the user's password to this. + - Homed requires this value to be in cleartext on user creation and updating a user. + - The module takes the password and generates a password hash in SHA-512 with 10000 rounds of salt generation using + crypt. + - See U(https://systemd.io/USER_RECORD/). + - This is required for O(state=present). When an existing user is updated this is checked against the stored hash in + homed. + type: str + state: + description: + - The operation to take on the user. + choices: ['absent', 'present'] + default: present + type: str + storage: + description: + - Indicates the storage mechanism for the user's home directory. + - If the storage type is not specified, C(homed.conf(5\)) defines which default storage to use. + - Only used when a user is first created. + choices: ['classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs'] + type: str + disksize: + description: + - The intended home directory disk space. + - Human readable value such as V(10G), V(10M), or V(10B). + type: str + resize: + description: + - When used with O(disksize) this attempts to resize the home directory immediately. + default: false + type: bool + realname: + description: + - The user's real ('human') name. + - This can also be used to add a comment to maintain compatibility with C(useradd). + aliases: ['comment'] + type: str + realm: + description: + - The 'realm' a user is defined in. + type: str + email: + description: + - The email address of the user. + type: str + location: + description: + - A free-form location string describing the location of the user. + type: str + iconname: + description: + - The name of an icon picked by the user, for example for the purpose of an avatar. + - Should follow the semantics defined in the Icon Naming Specification. + - See U(https://specifications.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html) for specifics. + type: str + homedir: + description: + - Path to use as home directory for the user. + - This is the directory the user's home directory is mounted to while the user is logged in. + - This is not where the user's data is actually stored, see O(imagepath) for that. + - Only used when a user is first created. + type: path + imagepath: + description: + - Path to place the user's home directory. + - See U(https://www.freedesktop.org/software/systemd/man/homectl.html#--image-path=PATH) for more information. + - Only used when a user is first created. + type: path + uid: + description: + - Sets the UID of the user. + - If using O(gid) homed requires the value to be the same. + - Only used when a user is first created. + type: int + gid: + description: + - Sets the gid of the user. + - If using O(uid) homed requires the value to be the same. + - Only used when a user is first created. + type: int + mountopts: + description: + - String separated by comma each indicating mount options for a users home directory. + - Valid options are V(nosuid), V(nodev) or V(noexec). + - Homed by default uses V(nodev) and V(nosuid) while V(noexec) is off. + type: str + umask: + description: + - Sets the umask for the user's login sessions. + - Value from V(0000) to V(0777). + type: int + memberof: + description: + - String separated by comma each indicating a UNIX group this user shall be a member of. + - Groups the user should be a member of should be supplied as comma separated list. + aliases: ['groups'] + type: str + skeleton: + description: + - The absolute path to the skeleton directory to populate a new home directory from. + - This is only used when a home directory is first created. + - If not specified homed by default uses V(/etc/skel). + aliases: ['skel'] + type: path + shell: + description: + - Shell binary to use for terminal logins of given user. + - If not specified homed by default uses V(/bin/bash). + type: str + environment: + description: + - String separated by comma each containing an environment variable and its value to set for the user's login session, + in a format compatible with C(putenv(\)). + - Any environment variable listed here is automatically set by pam_systemd for all login sessions of the user. + aliases: ['setenv'] + type: str + timezone: + description: + - Preferred timezone to use for the user. + - Should be a tzdata compatible location string such as V(America/New_York). + type: str + locked: + description: + - Whether the user account should be locked or not. + type: bool + language: + description: + - The preferred language/locale for the user. + - This should be in a format compatible with the E(LANG) environment variable. + type: str + passwordhint: + description: + - Password hint for the given user. + type: str + sshkeys: + description: + - String separated by comma each listing a SSH public key that is authorized to access the account. + - The keys should follow the same format as the lines in a traditional C(~/.ssh/authorized_key) file. + type: str + notbefore: + description: + - A time since the UNIX epoch before which the record should be considered invalid for the purpose of logging in. + type: int + notafter: + description: + - A time since the UNIX epoch after which the record should be considered invalid for the purpose of logging in. + type: int +""" + +EXAMPLES = r""" +- name: Add the user 'james' + community.general.homectl: + name: johnd + password: myreallysecurepassword1! + state: present + +- name: Add the user 'alice' with a zsh shell, uid of 1000, and gid of 2000 + community.general.homectl: + name: alice + password: myreallysecurepassword1! + state: present + shell: /bin/zsh + uid: 1000 + gid: 1000 + +- name: Modify an existing user 'frank' to have 10G of diskspace and resize usage now + community.general.homectl: + name: frank + password: myreallysecurepassword1! + state: present + disksize: 10G + resize: true + +- name: Remove an existing user 'janet' + community.general.homectl: + name: janet + state: absent +""" + +RETURN = r""" +data: + description: Dictionary returned from C(homectl inspect -j). + returned: success + type: dict + sample: + { + "data": { + "binding": { + "e9ed2a5b0033427286b228e97c1e8343": { + "fileSystemType": "btrfs", + "fileSystemUuid": "7bd59491-2812-4642-a492-220c3f0c6c0b", + "gid": 60268, + "imagePath": "/home/james.home", + "luksCipher": "aes", + "luksCipherMode": "xts-plain64", + "luksUuid": "7f05825a-2c38-47b4-90e1-f21540a35a81", + "luksVolumeKeySize": 32, + "partitionUuid": "5a906126-d3c8-4234-b230-8f6e9b427b2f", + "storage": "luks", + "uid": 60268 + } + }, + "diskSize": 3221225472, + "disposition": "regular", + "lastChangeUSec": 1641941238208691, + "lastPasswordChangeUSec": 1641941238208691, + "privileged": { + "hashedPassword": [ + "$6$ov9AKni.trf76inT$tTtfSyHgbPTdUsG0CvSSQZXGqFGdHKQ9Pb6e0BTZhDmlgrL/vA5BxrXduBi8u/PCBiYUffGLIkGhApjKMK3bV." + ] + }, + "signature": [ + { + "data": "o6zVFbymcmk4YTVaY6KPQK23YCp+VkXdGEeniZeV1pzIbFzoaZBvVLPkNKMoPAQbodY5BYfBtuy41prNL78qAg==", + "key": "-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAbs7ELeiEYBxkUQhxZ+5NGyu6J7gTtZtZ5vmIw3jowcY=\n-----END PUBLIC KEY-----\n" + } + ], + "status": { + "e9ed2a5b0033427286b228e97c1e8343": { + "diskCeiling": 21845405696, + "diskFloor": 268435456, + "diskSize": 3221225472, + "service": "io.systemd.Home", + "signedLocally": true, + "state": "inactive" + } + }, + "userName": "james" + } + } +""" + +import json +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import jsonify +from ansible.module_utils.common.text.formatters import human_to_bytes + +try: + import crypt +except ImportError: + HAS_CRYPT = False + CRYPT_IMPORT_ERROR = traceback.format_exc() +else: + HAS_CRYPT = True + CRYPT_IMPORT_ERROR = None + +try: + import legacycrypt + if not HAS_CRYPT: + crypt = legacycrypt +except ImportError: + HAS_LEGACYCRYPT = False + LEGACYCRYPT_IMPORT_ERROR = traceback.format_exc() +else: + HAS_LEGACYCRYPT = True + LEGACYCRYPT_IMPORT_ERROR = None + + +class Homectl(object): + def __init__(self, module): + self.module = module + self.state = module.params['state'] + self.name = module.params['name'] + self.password = module.params['password'] + self.storage = module.params['storage'] + self.disksize = module.params['disksize'] + self.resize = module.params['resize'] + self.realname = module.params['realname'] + self.realm = module.params['realm'] + self.email = module.params['email'] + self.location = module.params['location'] + self.iconname = module.params['iconname'] + self.homedir = module.params['homedir'] + self.imagepath = module.params['imagepath'] + self.uid = module.params['uid'] + self.gid = module.params['gid'] + self.umask = module.params['umask'] + self.memberof = module.params['memberof'] + self.skeleton = module.params['skeleton'] + self.shell = module.params['shell'] + self.environment = module.params['environment'] + self.timezone = module.params['timezone'] + self.locked = module.params['locked'] + self.passwordhint = module.params['passwordhint'] + self.sshkeys = module.params['sshkeys'] + self.language = module.params['language'] + self.notbefore = module.params['notbefore'] + self.notafter = module.params['notafter'] + self.mountopts = module.params['mountopts'] + + self.result = {} + + # Cannot run homectl commands if service is not active + def homed_service_active(self): + is_active = True + cmd = ['systemctl', 'show', 'systemd-homed.service', '-p', 'ActiveState'] + rc, show_service_stdout, stderr = self.module.run_command(cmd) + if rc == 0: + state = show_service_stdout.rsplit('=')[1] + if state.strip() != 'active': + is_active = False + return is_active + + def user_exists(self): + exists = False + valid_pw = False + # Get user properties if they exist in json + rc, stdout, stderr = self.get_user_metadata() + if rc == 0: + exists = True + # User exists now compare password given with current hashed password stored in the user metadata. + if self.state != 'absent': # Don't need checking on remove user + stored_pwhash = json.loads(stdout)['privileged']['hashedPassword'][0] + if self._check_password(stored_pwhash): + valid_pw = True + return exists, valid_pw + + def create_user(self): + record = self.create_json_record(create=True) + cmd = [self.module.get_bin_path('homectl', True)] + cmd.append('create') + cmd.append('--identity=-') # Read the user record from standard input. + return self.module.run_command(cmd, data=record) + + def _hash_password(self, password): + method = crypt.METHOD_SHA512 + salt = crypt.mksalt(method, rounds=10000) + pw_hash = crypt.crypt(password, salt) + return pw_hash + + def _check_password(self, pwhash): + hash = crypt.crypt(self.password, pwhash) + return pwhash == hash + + def remove_user(self): + cmd = [self.module.get_bin_path('homectl', True)] + cmd.append('remove') + cmd.append(self.name) + return self.module.run_command(cmd) + + def prepare_modify_user_command(self): + record = self.create_json_record() + cmd = [self.module.get_bin_path('homectl', True)] + cmd.append('update') + cmd.append(self.name) + cmd.append('--identity=-') # Read the user record from standard input. + # Resize disksize now resize = true + # This is not valid in user record (json) and requires it to be passed on command. + if self.disksize and self.resize: + cmd.append('--and-resize') + cmd.append('true') + self.result['changed'] = True + return cmd, record + + def get_user_metadata(self): + cmd = [self.module.get_bin_path('homectl', True)] + cmd.append('inspect') + cmd.append(self.name) + cmd.append('-j') + cmd.append('--no-pager') + rc, stdout, stderr = self.module.run_command(cmd) + return rc, stdout, stderr + + # Build up dictionary to jsonify for homectl commands. + def create_json_record(self, create=False): + record = {} + user_metadata = {} + self.result['changed'] = False + # Get the current user record if not creating a new user record. + if not create: + rc, user_metadata, stderr = self.get_user_metadata() + user_metadata = json.loads(user_metadata) + # Remove elements that are not meant to be updated from record. + # These are always part of the record when a user exists. + user_metadata.pop('signature', None) + user_metadata.pop('binding', None) + user_metadata.pop('status', None) + # Let last change Usec be updated by homed when command runs. + user_metadata.pop('lastChangeUSec', None) + # Now only change fields that are called on leaving what's currently in the record intact. + record = user_metadata + + record['userName'] = self.name + record['secret'] = {'password': [self.password]} + + if create: + password_hash = self._hash_password(self.password) + record['privileged'] = {'hashedPassword': [password_hash]} + self.result['changed'] = True + + if self.uid and self.gid and create: + record['uid'] = self.uid + record['gid'] = self.gid + self.result['changed'] = True + + if self.memberof: + member_list = list(self.memberof.split(',')) + if member_list != record.get('memberOf', [None]): + record['memberOf'] = member_list + self.result['changed'] = True + + if self.realname: + if self.realname != record.get('realName'): + record['realName'] = self.realname + self.result['changed'] = True + + # Cannot update storage unless were creating a new user. + # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/ + if self.storage and create: + record['storage'] = self.storage + self.result['changed'] = True + + # Cannot update homedir unless were creating a new user. + # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/ + if self.homedir and create: + record['homeDirectory'] = self.homedir + self.result['changed'] = True + + # Cannot update imagepath unless were creating a new user. + # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/ + if self.imagepath and create: + record['imagePath'] = self.imagepath + self.result['changed'] = True + + if self.disksize: + # convert human readable to bytes + if self.disksize != record.get('diskSize'): + record['diskSize'] = human_to_bytes(self.disksize) + self.result['changed'] = True + + if self.realm: + if self.realm != record.get('realm'): + record['realm'] = self.realm + self.result['changed'] = True + + if self.email: + if self.email != record.get('emailAddress'): + record['emailAddress'] = self.email + self.result['changed'] = True + + if self.location: + if self.location != record.get('location'): + record['location'] = self.location + self.result['changed'] = True + + if self.iconname: + if self.iconname != record.get('iconName'): + record['iconName'] = self.iconname + self.result['changed'] = True + + if self.skeleton: + if self.skeleton != record.get('skeletonDirectory'): + record['skeletonDirectory'] = self.skeleton + self.result['changed'] = True + + if self.shell: + if self.shell != record.get('shell'): + record['shell'] = self.shell + self.result['changed'] = True + + if self.umask: + if self.umask != record.get('umask'): + record['umask'] = self.umask + self.result['changed'] = True + + if self.environment: + if self.environment != record.get('environment', [None]): + record['environment'] = list(self.environment.split(',')) + self.result['changed'] = True + + if self.timezone: + if self.timezone != record.get('timeZone'): + record['timeZone'] = self.timezone + self.result['changed'] = True + + if self.locked: + if self.locked != record.get('locked'): + record['locked'] = self.locked + self.result['changed'] = True + + if self.passwordhint: + if self.passwordhint != record.get('privileged', {}).get('passwordHint'): + record['privileged']['passwordHint'] = self.passwordhint + self.result['changed'] = True + + if self.sshkeys: + if self.sshkeys != record.get('privileged', {}).get('sshAuthorizedKeys'): + record['privileged']['sshAuthorizedKeys'] = list(self.sshkeys.split(',')) + self.result['changed'] = True + + if self.language: + if self.locked != record.get('preferredLanguage'): + record['preferredLanguage'] = self.language + self.result['changed'] = True + + if self.notbefore: + if self.locked != record.get('notBeforeUSec'): + record['notBeforeUSec'] = self.notbefore + self.result['changed'] = True + + if self.notafter: + if self.locked != record.get('notAfterUSec'): + record['notAfterUSec'] = self.notafter + self.result['changed'] = True + + if self.mountopts: + opts = list(self.mountopts.split(',')) + if 'nosuid' in opts: + if record.get('mountNoSuid') is not True: + record['mountNoSuid'] = True + self.result['changed'] = True + else: + if record.get('mountNoSuid') is not False: + record['mountNoSuid'] = False + self.result['changed'] = True + + if 'nodev' in opts: + if record.get('mountNoDevices') is not True: + record['mountNoDevices'] = True + self.result['changed'] = True + else: + if record.get('mountNoDevices') is not False: + record['mountNoDevices'] = False + self.result['changed'] = True + + if 'noexec' in opts: + if record.get('mountNoExecute') is not True: + record['mountNoExecute'] = True + self.result['changed'] = True + else: + if record.get('mountNoExecute') is not False: + record['mountNoExecute'] = False + self.result['changed'] = True + + return jsonify(record) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + name=dict(type='str', required=True, aliases=['user', 'username']), + password=dict(type='str', no_log=True), + storage=dict(type='str', choices=['classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs']), + disksize=dict(type='str'), + resize=dict(type='bool', default=False), + realname=dict(type='str', aliases=['comment']), + realm=dict(type='str'), + email=dict(type='str'), + location=dict(type='str'), + iconname=dict(type='str'), + homedir=dict(type='path'), + imagepath=dict(type='path'), + uid=dict(type='int'), + gid=dict(type='int'), + umask=dict(type='int'), + environment=dict(type='str', aliases=['setenv']), + timezone=dict(type='str'), + memberof=dict(type='str', aliases=['groups']), + skeleton=dict(type='path', aliases=['skel']), + shell=dict(type='str'), + locked=dict(type='bool'), + passwordhint=dict(type='str', no_log=True), + sshkeys=dict(type='str', no_log=True), + language=dict(type='str'), + notbefore=dict(type='int'), + notafter=dict(type='int'), + mountopts=dict(type='str'), + ), + supports_check_mode=True, + + required_if=[ + ('state', 'present', ['password']), + ('resize', True, ['disksize']), + ] + ) + + if not HAS_CRYPT and not HAS_LEGACYCRYPT: + module.fail_json( + msg=missing_required_lib('crypt (part of standard library up to Python 3.12) or legacycrypt (PyPI)'), + exception=CRYPT_IMPORT_ERROR, + ) + + homectl = Homectl(module) + homectl.result['state'] = homectl.state + + # First we need to make sure homed service is active + if not homectl.homed_service_active(): + module.fail_json(msg='systemd-homed.service is not active') + + # handle removing user + if homectl.state == 'absent': + user_exists, valid_pwhash = homectl.user_exists() + if user_exists: + if module.check_mode: + module.exit_json(changed=True) + rc, stdout, stderr = homectl.remove_user() + if rc != 0: + module.fail_json(name=homectl.name, msg=stderr, rc=rc) + homectl.result['changed'] = True + homectl.result['rc'] = rc + homectl.result['msg'] = 'User %s removed!' % homectl.name + else: + homectl.result['changed'] = False + homectl.result['msg'] = 'User does not exist!' + + # Handle adding a user + if homectl.state == 'present': + user_exists, valid_pwhash = homectl.user_exists() + if not user_exists: + if module.check_mode: + module.exit_json(changed=True) + rc, stdout, stderr = homectl.create_user() + if rc != 0: + module.fail_json(name=homectl.name, msg=stderr, rc=rc) + rc, user_metadata, stderr = homectl.get_user_metadata() + homectl.result['data'] = json.loads(user_metadata) + homectl.result['rc'] = rc + homectl.result['msg'] = 'User %s created!' % homectl.name + else: + if valid_pwhash: + # Run this to see if changed would be True or False which is useful for check_mode + cmd, record = homectl.prepare_modify_user_command() + else: + # User gave wrong password fail with message + homectl.result['changed'] = False + homectl.result['msg'] = 'User exists but password is incorrect!' + module.fail_json(**homectl.result) + + if module.check_mode: + module.exit_json(**homectl.result) + + # Now actually modify the user if changed was set to true at any point. + if homectl.result['changed']: + rc, stdout, stderr = module.run_command(cmd, data=record) + if rc != 0: + module.fail_json(name=homectl.name, msg=stderr, rc=rc, changed=False) + rc, user_metadata, stderr = homectl.get_user_metadata() + homectl.result['data'] = json.loads(user_metadata) + homectl.result['rc'] = rc + if homectl.result['changed']: + homectl.result['msg'] = 'User %s modified' % homectl.name + + module.exit_json(**homectl.result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/honeybadger_deployment.py b/plugins/modules/honeybadger_deployment.py deleted file mode 120000 index 994cf8ee58..0000000000 --- a/plugins/modules/honeybadger_deployment.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/honeybadger_deployment.py \ No newline at end of file diff --git a/plugins/modules/honeybadger_deployment.py b/plugins/modules/honeybadger_deployment.py new file mode 100644 index 0000000000..a5fe8c86f7 --- /dev/null +++ b/plugins/modules/honeybadger_deployment.py @@ -0,0 +1,132 @@ +#!/usr/bin/python + +# Copyright 2014 Benjamin Curtis +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: honeybadger_deployment +author: "Benjamin Curtis (@stympy)" +short_description: Notify Honeybadger.io about app deployments +description: + - Notify Honeybadger.io about app deployments (see U(http://docs.honeybadger.io/article/188-deployment-tracking)). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + type: str + description: + - API token. + required: true + environment: + type: str + description: + - The environment name, typically V(production), V(staging), and so on. + required: true + user: + type: str + description: + - The username of the person doing the deployment. + repo: + type: str + description: + - URL of the project repository. + revision: + type: str + description: + - A hash, number, tag, or other identifier showing what revision was deployed. + url: + type: str + description: + - Optional URL to submit the notification to. + default: "https://api.honeybadger.io/v1/deploys" + validate_certs: + description: + - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled + sites using self-signed certificates. + type: bool + default: true +""" + +EXAMPLES = r""" +- name: Notify Honeybadger.io about an app deployment + community.general.honeybadger_deployment: + token: AAAAAA + environment: staging + user: ansible + revision: b6826b8 + repo: 'git@github.com:user/repo.git' +""" + +RETURN = """#""" + +import traceback +from urllib.parse import urlencode + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.urls import fetch_url + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + environment=dict(required=True), + user=dict(required=False), + repo=dict(), + revision=dict(), + url=dict(default='https://api.honeybadger.io/v1/deploys'), + validate_certs=dict(default=True, type='bool'), + ), + supports_check_mode=True + ) + + params = {} + + if module.params["environment"]: + params["deploy[environment]"] = module.params["environment"] + + if module.params["user"]: + params["deploy[local_username]"] = module.params["user"] + + if module.params["repo"]: + params["deploy[repository]"] = module.params["repo"] + + if module.params["revision"]: + params["deploy[revision]"] = module.params["revision"] + + params["api_key"] = module.params["token"] + + url = module.params.get('url') + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True) + + try: + data = urlencode(params) + response, info = fetch_url(module, url, data=data) + except Exception as e: + module.fail_json(msg='Unable to notify Honeybadger: %s' % to_native(e), exception=traceback.format_exc()) + else: + if info['status'] == 201: + module.exit_json(changed=True) + else: + module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hpilo_boot.py b/plugins/modules/hpilo_boot.py deleted file mode 120000 index 24fb0f82c9..0000000000 --- a/plugins/modules/hpilo_boot.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/hpilo/hpilo_boot.py \ No newline at end of file diff --git a/plugins/modules/hpilo_boot.py b/plugins/modules/hpilo_boot.py new file mode 100644 index 0000000000..bf44a4dac4 --- /dev/null +++ b/plugins/modules/hpilo_boot.py @@ -0,0 +1,230 @@ +#!/usr/bin/python + +# Copyright 2012 Dag Wieers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: hpilo_boot +author: Dag Wieers (@dagwieers) +short_description: Boot system using specific media through HP iLO interface +description: + - 'This module boots a system through its HP iLO interface. The boot media can be one of: V(cdrom), V(floppy), V(hdd), V(network), + or V(usb).' + - This module requires the hpilo python module. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + host: + description: + - The HP iLO hostname/address that is linked to the physical system. + type: str + required: true + login: + description: + - The login name to authenticate to the HP iLO interface. + default: Administrator + type: str + password: + description: + - The password to authenticate to the HP iLO interface. + default: admin + type: str + media: + description: + - The boot media to boot the system from. + choices: ["cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb"] + type: str + image: + description: + - The URL of a cdrom, floppy or usb boot media image in the form V(protocol://username:password@hostname:port/filename). + - V(protocol) is either V(http) or V(https). + - V(username:password) is optional. + - V(port) is optional. + type: str + state: + description: + - The state of the boot media. + - 'V(no_boot): Do not boot from the device.' + - 'V(boot_once): Boot from the device once and then notthereafter.' + - 'V(boot_always): Boot from the device each time the server is rebooted.' + - 'V(connect): Connect the virtual media device and set to boot_always.' + - 'V(disconnect): Disconnects the virtual media device and set to no_boot.' + - 'V(poweroff): Power off the server.' + default: boot_once + type: str + choices: ["boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff"] + force: + description: + - Whether to force a reboot (even when the system is already booted). + - As a safeguard, without force, M(community.general.hpilo_boot) refuses to reboot a server that is already running. + default: false + type: bool + ssl_version: + description: + - Change the ssl_version used. + default: TLSv1 + type: str + choices: ["SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2"] + idempotent_boot_once: + description: + - This option makes O(state=boot_once) succeed instead of failing when the server is already powered on. + type: bool + default: false + version_added: 10.6.0 +requirements: + - python-hpilo +notes: + - To use a USB key image you need to specify floppy as boot media. + - This module ought to be run from a system that can access the HP iLO interface directly, either by using C(local_action) + or using C(delegate_to). +""" + +EXAMPLES = r""" +- name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server + community.general.hpilo_boot: + host: YOUR_ILO_ADDRESS + login: YOUR_ILO_LOGIN + password: YOUR_ILO_PASSWORD + media: cdrom + image: http://some-web-server/iso/boot.iso + when: cmdb_hwmodel.startswith('HP ') + delegate_to: localhost + +- name: Power off a server + community.general.hpilo_boot: + host: YOUR_ILO_HOST + login: YOUR_ILO_LOGIN + password: YOUR_ILO_PASSWORD + state: poweroff + delegate_to: localhost +""" + +RETURN = r""" +# Default return values +""" + +import time +import traceback +import warnings + +HPILO_IMP_ERR = None +try: + import hpilo + HAS_HPILO = True +except ImportError: + HPILO_IMP_ERR = traceback.format_exc() + HAS_HPILO = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +# Suppress warnings from hpilo +warnings.simplefilter('ignore') + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', required=True), + login=dict(type='str', default='Administrator'), + password=dict(type='str', default='admin', no_log=True), + media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']), + image=dict(type='str'), + state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']), + force=dict(type='bool', default=False), + idempotent_boot_once=dict(type='bool', default=False), + ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']), + ) + ) + + if not HAS_HPILO: + module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR) + + host = module.params['host'] + login = module.params['login'] + password = module.params['password'] + media = module.params['media'] + image = module.params['image'] + state = module.params['state'] + force = module.params['force'] + idempotent_boot_once = module.params['idempotent_boot_once'] + ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v')) + + ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version) + changed = False + status = {} + power_status = 'UNKNOWN' + + if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'): + + # Workaround for: Error communicating with iLO: Problem manipulating EV + try: + ilo.set_one_time_boot(media) + except hpilo.IloError: + time.sleep(60) + ilo.set_one_time_boot(media) + + # TODO: Verify if image URL exists/works + if image: + ilo.insert_virtual_media(media, image) + changed = True + + if media == 'cdrom': + ilo.set_vm_status('cdrom', state, True) + status = ilo.get_vm_status() + changed = True + elif media in ('floppy', 'usb'): + ilo.set_vf_status(state, True) + status = ilo.get_vf_status() + changed = True + + # Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot + if state in ('boot_once', 'boot_always') or force: + + power_status = ilo.get_host_power_status() + + if power_status == 'ON': + if not force and not idempotent_boot_once: + # module.deprecate( + # 'The failure of the module when the server is already powered on is being deprecated.' + # ' Please set the parameter "idempotent_boot_once=true" to start using the new behavior.', + # version='11.0.0', + # collection_name='community.general' + # ) + module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host) + elif not force and idempotent_boot_once: + pass + elif force: + ilo.warm_boot_server() + # ilo.cold_boot_server() + changed = True + else: + ilo.press_pwr_btn() +# ilo.reset_server() +# ilo.set_host_power(host_power=True) + changed = True + + elif state in ('poweroff'): + + power_status = ilo.get_host_power_status() + + if not power_status == 'OFF': + ilo.hold_pwr_btn() +# ilo.set_host_power(host_power=False) + changed = True + + module.exit_json(changed=changed, power=power_status, **status) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hpilo_info.py b/plugins/modules/hpilo_info.py deleted file mode 120000 index bffa6001a5..0000000000 --- a/plugins/modules/hpilo_info.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/hpilo/hpilo_info.py \ No newline at end of file diff --git a/plugins/modules/hpilo_info.py b/plugins/modules/hpilo_info.py new file mode 100644 index 0000000000..8f2739180d --- /dev/null +++ b/plugins/modules/hpilo_info.py @@ -0,0 +1,265 @@ +#!/usr/bin/python + +# Copyright 2012 Dag Wieers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: hpilo_info +author: Dag Wieers (@dagwieers) +short_description: Gather information through an HP iLO interface +description: + - This module gathers information on a specific system using its HP iLO interface. These information includes hardware and + network related information useful for provisioning (for example macaddress, uuid). + - This module requires the C(hpilo) python module. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + host: + description: + - The HP iLO hostname/address that is linked to the physical system. + type: str + required: true + login: + description: + - The login name to authenticate to the HP iLO interface. + type: str + default: Administrator + password: + description: + - The password to authenticate to the HP iLO interface. + type: str + default: admin + ssl_version: + description: + - Change the ssl_version used. + default: TLSv1 + type: str + choices: ["SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2"] +requirements: + - hpilo +notes: + - This module ought to be run from a system that can access the HP iLO interface directly, either by using C(local_action) + or using C(delegate_to). +""" + +EXAMPLES = r""" +- name: Gather facts from a HP iLO interface only if the system is an HP server + community.general.hpilo_info: + host: YOUR_ILO_ADDRESS + login: YOUR_ILO_LOGIN + password: YOUR_ILO_PASSWORD + when: cmdb_hwmodel.startswith('HP ') + delegate_to: localhost + register: results + +- ansible.builtin.fail: + msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !' + when: cmdb_serialno != results.hw_system_serial +""" + +RETURN = r""" +# Typical output of HP iLO_info for a physical system +hw_bios_date: + description: BIOS date. + returned: always + type: str + sample: 05/05/2011 + +hw_bios_version: + description: BIOS version. + returned: always + type: str + sample: P68 + +hw_ethX: + description: Interface information (for each interface). + returned: always + type: dict + sample: + - macaddress: 00:11:22:33:44:55 + macaddress_dash: 00-11-22-33-44-55 + +hw_eth_ilo: + description: Interface information (for the iLO network interface). + returned: always + type: dict + sample: + - macaddress: 00:11:22:33:44:BA + - macaddress_dash: 00-11-22-33-44-BA + +hw_product_name: + description: Product name. + returned: always + type: str + sample: ProLiant DL360 G7 + +hw_product_uuid: + description: Product UUID. + returned: always + type: str + sample: ef50bac8-2845-40ff-81d9-675315501dac + +hw_system_serial: + description: System serial number. + returned: always + type: str + sample: ABC12345D6 + +hw_uuid: + description: Hardware UUID. + returned: always + type: str + sample: 123456ABC78901D2 + +host_power_status: + description: + - Power status of host. + - It is one of V(ON), V(OFF) and V(UNKNOWN). + returned: always + type: str + sample: "ON" + version_added: 3.5.0 +""" + +import re +import traceback +import warnings + +HPILO_IMP_ERR = None +try: + import hpilo + HAS_HPILO = True +except ImportError: + HPILO_IMP_ERR = traceback.format_exc() + HAS_HPILO = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +# Suppress warnings from hpilo +warnings.simplefilter('ignore') + + +def parse_flat_interface(entry, non_numeric='hw_eth_ilo'): + try: + infoname = 'hw_eth' + str(int(entry['Port']) - 1) + except Exception: + infoname = non_numeric + + info = { + 'macaddress': entry['MAC'].replace('-', ':'), + 'macaddress_dash': entry['MAC'] + } + return (infoname, info) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', required=True), + login=dict(type='str', default='Administrator'), + password=dict(type='str', default='admin', no_log=True), + ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']), + ), + supports_check_mode=True, + ) + + if not HAS_HPILO: + module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR) + + host = module.params['host'] + login = module.params['login'] + password = module.params['password'] + ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v')) + + ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version) + + info = { + 'module_hw': True, + } + + # TODO: Count number of CPUs, DIMMs and total memory + try: + data = ilo.get_host_data() + power_state = ilo.get_host_power_status() + except hpilo.IloCommunicationError as e: + module.fail_json(msg=to_native(e)) + + for entry in data: + if 'type' not in entry: + continue + elif entry['type'] == 0: # BIOS Information + info['hw_bios_version'] = entry['Family'] + info['hw_bios_date'] = entry['Date'] + elif entry['type'] == 1: # System Information + info['hw_uuid'] = entry['UUID'] + info['hw_system_serial'] = entry['Serial Number'].rstrip() + info['hw_product_name'] = entry['Product Name'] + info['hw_product_uuid'] = entry['cUUID'] + elif entry['type'] == 209: # Embedded NIC MAC Assignment + if 'fields' in entry: + for (name, value) in [(e['name'], e['value']) for e in entry['fields']]: + if name.startswith('Port'): + try: + infoname = 'hw_eth' + str(int(value) - 1) + except Exception: + infoname = 'hw_eth_ilo' + elif name.startswith('MAC'): + info[infoname] = { + 'macaddress': value.replace('-', ':'), + 'macaddress_dash': value + } + else: + (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo') + info[infoname] = entry_info + elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info + for (name, value) in [(e['name'], e['value']) for e in entry['fields']]: + if name.startswith('Port'): + try: + infoname = 'hw_iscsi' + str(int(value) - 1) + except Exception: + infoname = 'hw_iscsi_ilo' + elif name.startswith('MAC'): + info[infoname] = { + 'macaddress': value.replace('-', ':'), + 'macaddress_dash': value + } + elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format) + (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo') + info[infoname] = entry_info + + # Collect health (RAM/CPU data) + health = ilo.get_embedded_health() + info['hw_health'] = health + + memory_details_summary = health.get('memory', {}).get('memory_details_summary') + # RAM as reported by iLO 2.10 on ProLiant BL460c Gen8 + if memory_details_summary: + info['hw_memory_details_summary'] = memory_details_summary + info['hw_memory_total'] = 0 + for cpu, details in memory_details_summary.items(): + cpu_total_memory_size = details.get('total_memory_size') + if cpu_total_memory_size: + ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size) + if ram: + if ram.group(2) == 'GB': + info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1)) + + # reformat into a text friendly format + info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total']) + + # Report host state + info['host_power_status'] = power_state or 'UNKNOWN' + + module.exit_json(**info) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hponcfg.py b/plugins/modules/hponcfg.py deleted file mode 120000 index f5c70a6d15..0000000000 --- a/plugins/modules/hponcfg.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/hpilo/hponcfg.py \ No newline at end of file diff --git a/plugins/modules/hponcfg.py b/plugins/modules/hponcfg.py new file mode 100644 index 0000000000..a17a905916 --- /dev/null +++ b/plugins/modules/hponcfg.py @@ -0,0 +1,117 @@ +#!/usr/bin/python + +# Copyright (c) 2012, Dag Wieers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: hponcfg +author: Dag Wieers (@dagwieers) +short_description: Configure HP iLO interface using C(hponcfg) +description: + - This modules configures the HP iLO interface using C(hponcfg). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + path: + description: + - The XML file as accepted by C(hponcfg). + required: true + aliases: ['src'] + type: path + minfw: + description: + - The minimum firmware level needed. + required: false + type: str + executable: + description: + - Path to the hponcfg executable (C(hponcfg) which uses E(PATH)). + default: hponcfg + type: str + verbose: + description: + - Run C(hponcfg) in verbose mode (-v). + default: false + type: bool +requirements: + - hponcfg tool +notes: + - You need a working C(hponcfg) on the target system. +""" + +EXAMPLES = r""" +- name: Example hponcfg configuration XML + ansible.builtin.copy: + content: | + + + + + + + + + + + + + + dest: /tmp/enable-ssh.xml + +- name: Configure HP iLO using enable-ssh.xml + community.general.hponcfg: + src: /tmp/enable-ssh.xml + +- name: Configure HP iLO on VMware ESXi hypervisor + community.general.hponcfg: + src: /tmp/enable-ssh.xml + executable: /opt/hp/tools/hponcfg +""" + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + +class HPOnCfg(ModuleHelper): + module = dict( + argument_spec=dict( + src=dict(type='path', required=True, aliases=['path']), + minfw=dict(type='str'), + executable=dict(default='hponcfg', type='str'), + verbose=dict(default=False, type='bool'), + ) + ) + command_args_formats = dict( + src=cmd_runner_fmt.as_opt_val("-f"), + verbose=cmd_runner_fmt.as_bool("-v"), + minfw=cmd_runner_fmt.as_opt_val("-m"), + ) + + def __run__(self): + runner = CmdRunner( + self.module, + self.vars.executable, + self.command_args_formats, + check_rc=True, + ) + runner(['src', 'verbose', 'minfw']).run() + + # Consider every action a change (not idempotent yet!) + self.changed = True + + +def main(): + HPOnCfg.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/htpasswd.py b/plugins/modules/htpasswd.py deleted file mode 120000 index 4ac73e9fb9..0000000000 --- a/plugins/modules/htpasswd.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/htpasswd.py \ No newline at end of file diff --git a/plugins/modules/htpasswd.py b/plugins/modules/htpasswd.py new file mode 100644 index 0000000000..d0e0941601 --- /dev/null +++ b/plugins/modules/htpasswd.py @@ -0,0 +1,256 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Nimbis Services, Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: htpasswd +short_description: Manage user files for basic authentication +description: + - Add and remove username/password entries in a password file using htpasswd. + - This is used by web servers such as Apache and Nginx for basic authentication. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + type: path + required: true + aliases: [dest, destfile] + description: + - Path to the file that contains the usernames and passwords. + name: + type: str + required: true + aliases: [username] + description: + - User name to add or remove. + password: + type: str + required: false + description: + - Password associated with user. + - Must be specified if user does not exist yet. + hash_scheme: + type: str + required: false + default: "apr_md5_crypt" + description: + - Hashing scheme to be used. As well as the four choices listed here, you can also use any other hash supported by passlib, + such as V(portable_apache22) and V(host_apache24); or V(md5_crypt) and V(sha256_crypt), which are Linux passwd hashes. + Only some schemes in addition to the four choices below are compatible with Apache or Nginx, and supported schemes + depend on C(passlib) version and its dependencies. + - See U(https://passlib.readthedocs.io/en/stable/lib/passlib.apache.html#passlib.apache.HtpasswdFile) parameter C(default_scheme). + - 'Some of the available choices might be: V(apr_md5_crypt), V(des_crypt), V(ldap_sha1), V(plaintext).' + - 'B(WARNING): The module has no mechanism to determine the O(hash_scheme) of an existing entry, therefore, it does + not detect whether the O(hash_scheme) has changed. If you want to change the scheme, you must remove the existing + entry and then create a new one using the new scheme.' + aliases: [crypt_scheme] + state: + type: str + required: false + choices: [present, absent] + default: "present" + description: + - Whether the user entry should be present or not. + create: + required: false + type: bool + default: true + description: + - Used with O(state=present). If V(true), the file is created if it does not exist. Conversely, if set to V(false) and + the file does not exist, it fails. +notes: + - This module depends on the C(passlib) Python library, which needs to be installed on all target systems. + - 'On Debian < 11, Ubuntu <= 20.04, or Fedora: install C(python-passlib).' + - 'On Debian, Ubuntu: install C(python3-passlib).' + - 'On RHEL or CentOS: Enable EPEL, then install C(python-passlib).' +requirements: [passlib>=1.6] +author: "Ansible Core Team" +extends_documentation_fragment: + - files + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Add a user to a password file and ensure permissions are set + community.general.htpasswd: + path: /etc/nginx/passwdfile + name: janedoe + password: '9s36?;fyNp' + owner: root + group: www-data + mode: '0640' + +- name: Remove a user from a password file + community.general.htpasswd: + path: /etc/apache2/passwdfile + name: foobar + state: absent + +- name: Add a user to a password file suitable for use by libpam-pwdfile + community.general.htpasswd: + path: /etc/mail/passwords + name: alex + password: oedu2eGh + hash_scheme: md5_crypt +""" + + +import os +import tempfile + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils import deps +from ansible.module_utils.common.text.converters import to_native + + +with deps.declare("passlib"): + from passlib.apache import HtpasswdFile, htpasswd_context + from passlib.context import CryptContext + + +apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] + + +def create_missing_directories(dest): + destpath = os.path.dirname(dest) + if not os.path.exists(destpath): + os.makedirs(destpath) + + +def present(dest, username, password, hash_scheme, create, check_mode): + """ Ensures user is present + + Returns (msg, changed) """ + if hash_scheme in apache_hashes: + context = htpasswd_context + else: + context = CryptContext(schemes=[hash_scheme] + apache_hashes) + if not os.path.exists(dest): + if not create: + raise ValueError('Destination %s does not exist' % dest) + if check_mode: + return ("Create %s" % dest, True) + create_missing_directories(dest) + ht = HtpasswdFile(dest, new=True, default_scheme=hash_scheme, context=context) + ht.set_password(username, password) + ht.save() + return ("Created %s and added %s" % (dest, username), True) + else: + ht = HtpasswdFile(dest, new=False, default_scheme=hash_scheme, context=context) + + found = ht.check_password(username, password) + + if found: + return ("%s already present" % username, False) + else: + if not check_mode: + ht.set_password(username, password) + ht.save() + return ("Add/update %s" % username, True) + + +def absent(dest, username, check_mode): + """ Ensures user is absent + + Returns (msg, changed) """ + ht = HtpasswdFile(dest, new=False) + + if username not in ht.users(): + return ("%s not present" % username, False) + else: + if not check_mode: + ht.delete(username) + ht.save() + return ("Remove %s" % username, True) + + +def check_file_attrs(module, changed, message): + + file_args = module.load_file_common_arguments(module.params) + if module.set_fs_attributes_if_different(file_args, False): + + if changed: + message += " and " + changed = True + message += "ownership, perms or SE linux context changed" + + return message, changed + + +def main(): + arg_spec = dict( + path=dict(type='path', required=True, aliases=["dest", "destfile"]), + name=dict(type='str', required=True, aliases=["username"]), + password=dict(type='str', no_log=True), + hash_scheme=dict(type='str', default="apr_md5_crypt", aliases=["crypt_scheme"]), + state=dict(type='str', default="present", choices=["present", "absent"]), + create=dict(type='bool', default=True), + + ) + module = AnsibleModule(argument_spec=arg_spec, + add_file_common_args=True, + supports_check_mode=True) + + path = module.params['path'] + username = module.params['name'] + password = module.params['password'] + hash_scheme = module.params['hash_scheme'] + state = module.params['state'] + create = module.params['create'] + check_mode = module.check_mode + + deps.validate(module) + + # TODO double check if this hack below is still needed. + # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error. + try: + with open(path, "r") as f: + lines = f.readlines() + + # If the file gets edited, it returns true, so only edit the file if it has blank lines + strip = False + for line in lines: + if not line.strip(): + strip = True + break + + if strip: + # If check mode, create a temporary file + if check_mode: + temp = tempfile.NamedTemporaryFile() + path = temp.name + with open(path, "w") as f: + f.writelines(line for line in lines if line.strip()) + + except IOError: + # No preexisting file to remove blank lines from + pass + + try: + if state == 'present': + (msg, changed) = present(path, username, password, hash_scheme, create, check_mode) + elif state == 'absent': + if not os.path.exists(path): + module.warn("%s does not exist" % path) + module.exit_json(msg="%s not present" % username, changed=False) + (msg, changed) = absent(path, username, check_mode) + else: + module.fail_json(msg="Invalid state: %s" % state) + return # needed to make pylint happy + + (msg, changed) = check_file_attrs(module, changed, msg) + module.exit_json(msg=msg, changed=changed) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hwc_ecs_instance.py b/plugins/modules/hwc_ecs_instance.py deleted file mode 120000 index 48a0fb0b40..0000000000 --- a/plugins/modules/hwc_ecs_instance.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/huawei/hwc_ecs_instance.py \ No newline at end of file diff --git a/plugins/modules/hwc_ecs_instance.py b/plugins/modules/hwc_ecs_instance.py new file mode 100644 index 0000000000..610cd8b872 --- /dev/null +++ b/plugins/modules/hwc_ecs_instance.py @@ -0,0 +1,2094 @@ +#!/usr/bin/python +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = r""" +module: hwc_ecs_instance +description: + - Instance management. +short_description: Creates a resource of Ecs/Instance in Huawei Cloud +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '30m' + update: + description: + - The timeouts for update operation. + type: str + default: '30m' + delete: + description: + - The timeouts for delete operation. + type: str + default: '30m' + availability_zone: + description: + - Specifies the name of the AZ where the ECS is located. + type: str + required: true + flavor_name: + description: + - Specifies the name of the system flavor. + type: str + required: true + image_id: + description: + - Specifies the ID of the system image. + type: str + required: true + name: + description: + - Specifies the ECS name. Value requirements consists of 1 to 64 characters, including letters, digits, underscores + (V(_)), hyphens (V(-)), periods (V(.)). + type: str + required: true + nics: + description: + - Specifies the NIC information of the ECS. Constraints the network of the NIC must belong to the VPC specified by vpc_id. + A maximum of 12 NICs can be attached to an ECS. + type: list + elements: dict + required: true + suboptions: + ip_address: + description: + - Specifies the IP address of the NIC. The value is an IPv4 address. Its value must be an unused IP address in the + network segment of the subnet. + type: str + required: true + subnet_id: + description: + - Specifies the ID of subnet. + type: str + required: true + root_volume: + description: + - Specifies the configuration of the ECS's system disks. + type: dict + required: true + suboptions: + volume_type: + description: + - Specifies the ECS system disk type. + - SATA is common I/O disk type. + - SAS is high I/O disk type. + - SSD is ultra-high I/O disk type. + - Co-p1 is high I/O (performance-optimized I) disk type. + - Uh-l1 is ultra-high I/O (latency-optimized) disk type. + - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 disks. For other ECSs, do not use co-p1 or uh-l1 disks. + type: str + required: true + size: + description: + - Specifies the system disk size, in GB. The value range is 1 to 1024. The system disk size must be greater than + or equal to the minimum system disk size supported by the image (min_disk attribute of the image). If this parameter + is not specified or is set to 0, the default system disk size is the minimum value of the system disk in the image + (min_disk attribute of the image). + type: int + required: false + snapshot_id: + description: + - Specifies the snapshot ID or ID of the original data disk contained in the full-ECS image. + type: str + required: false + vpc_id: + description: + - Specifies the ID of the VPC to which the ECS belongs. + type: str + required: true + admin_pass: + description: + - Specifies the initial login password of the administrator account for logging in to an ECS using password authentication. + The Linux administrator is root, and the Windows administrator is Administrator. Password complexity requirements, + consists of 8 to 26 characters. The password must contain at least three of the following character types 'uppercase + letters, lowercase letters, digits, and special characters (V(!@$%^-_=+[{}]:,./?))'. The password cannot contain the + username or the username in reverse. The Windows ECS password cannot contain the username, the username in reverse, + or more than two consecutive characters in the username. + type: str + required: false + data_volumes: + description: + - Specifies the data disks of ECS instance. + type: list + elements: dict + required: false + suboptions: + volume_id: + description: + - Specifies the disk ID. + type: str + required: true + device: + description: + - Specifies the disk device name. + type: str + required: false + description: + description: + - Specifies the description of an ECS, which is a null string by default. Can contain a maximum of 85 characters. Cannot + contain special characters, such as V(<) and V(>). + type: str + required: false + eip_id: + description: + - Specifies the ID of the elastic IP address assigned to the ECS. Only elastic IP addresses in the DOWN state can be + assigned. + type: str + required: false + enable_auto_recovery: + description: + - Specifies whether automatic recovery is enabled on the ECS. + type: bool + required: false + enterprise_project_id: + description: + - Specifies the ID of the enterprise project to which the ECS belongs. + type: str + required: false + security_groups: + description: + - Specifies the security groups of the ECS. If this parameter is left blank, the default security group is bound to + the ECS by default. + type: list + elements: str + required: false + server_metadata: + description: + - Specifies the metadata of ECS to be created. + type: dict + required: false + server_tags: + description: + - Specifies the tags of an ECS. When you create ECSs, one ECS supports up to 10 tags. + type: dict + required: false + ssh_key_name: + description: + - Specifies the name of the SSH key used for logging in to the ECS. + type: str + required: false + user_data: + description: + - Specifies the user data to be injected during the ECS creation process. Text, text files, and gzip files can be injected. + The content to be injected must be encoded with base64. The maximum size of the content to be injected (before encoding) + is 32 KB. For Linux ECSs, this parameter does not take effect when adminPass is used. + type: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes +""" + +EXAMPLES = r""" +# create an ecs instance +- name: Create a vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: Create a subnet + hwc_vpc_subnet: + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: true + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + register: subnet +- name: Create a eip + hwc_vpc_eip: + dedicated_bandwidth: + charge_mode: "traffic" + name: "ansible_test_dedicated_bandwidth" + size: 1 + type: "5_bgp" + register: eip +- name: Create a disk + hwc_evs_disk: + availability_zone: "cn-north-1a" + name: "ansible_evs_disk_test" + volume_type: "SATA" + size: 10 + register: disk +- name: Create an instance + community.general.hwc_ecs_instance: + data_volumes: + - volume_id: "{{ disk.id }}" + enable_auto_recovery: false + eip_id: "{{ eip.id }}" + name: "ansible_ecs_instance_test" + availability_zone: "cn-north-1a" + nics: + - subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.33" + - subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.34" + server_tags: + my_server: "my_server" + image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892" + flavor_name: "s3.small.1" + vpc_id: "{{ vpc.id }}" + root_volume: + volume_type: "SAS" +""" + +RETURN = r""" +availability_zone: + description: + - Specifies the name of the AZ where the ECS is located. + type: str + returned: success +flavor_name: + description: + - Specifies the name of the system flavor. + type: str + returned: success +image_id: + description: + - Specifies the ID of the system image. + type: str + returned: success +name: + description: + - Specifies the ECS name. Value requirements "Consists of 1 to 64 characters, including letters, digits, underscores (V(_)), + hyphens (V(-)), periods (V(.)).". + type: str + returned: success +nics: + description: + - Specifies the NIC information of the ECS. The network of the NIC must belong to the VPC specified by vpc_id. A maximum + of 12 NICs can be attached to an ECS. + type: list + returned: success + contains: + ip_address: + description: + - Specifies the IP address of the NIC. The value is an IPv4 address. Its value must be an unused IP address in the + network segment of the subnet. + type: str + returned: success + subnet_id: + description: + - Specifies the ID of subnet. + type: str + returned: success + port_id: + description: + - Specifies the port ID corresponding to the IP address. + type: str + returned: success +root_volume: + description: + - Specifies the configuration of the ECS's system disks. + type: dict + returned: success + contains: + volume_type: + description: + - Specifies the ECS system disk type. + - SATA is common I/O disk type. + - SAS is high I/O disk type. + - SSD is ultra-high I/O disk type. + - Co-p1 is high I/O (performance-optimized I) disk type. + - Uh-l1 is ultra-high I/O (latency-optimized) disk type. + - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 disks. For other ECSs, do not use co-p1 or uh-l1 disks. + type: str + returned: success + size: + description: + - Specifies the system disk size, in GB. The value range is 1 to 1024. The system disk size must be greater than or + equal to the minimum system disk size supported by the image (min_disk attribute of the image). If this parameter + is not specified or is set to 0, the default system disk size is the minimum value of the system disk in the image + (min_disk attribute of the image). + type: int + returned: success + snapshot_id: + description: + - Specifies the snapshot ID or ID of the original data disk contained in the full-ECS image. + type: str + returned: success + device: + description: + - Specifies the disk device name. + type: str + returned: success + volume_id: + description: + - Specifies the disk ID. + type: str + returned: success +vpc_id: + description: + - Specifies the ID of the VPC to which the ECS belongs. + type: str + returned: success +admin_pass: + description: + - Specifies the initial login password of the administrator account for logging in to an ECS using password authentication. + The Linux administrator is root, and the Windows administrator is Administrator. Password complexity requirements consists + of 8 to 26 characters. The password must contain at least three of the following character types "uppercase letters, + lowercase letters, digits, and special characters (!@$%^-_=+[{}]:,./?)". The password cannot contain the username or + the username in reverse. The Windows ECS password cannot contain the username, the username in reverse, or more than + two consecutive characters in the username. + type: str + returned: success +data_volumes: + description: + - Specifies the data disks of ECS instance. + type: list + returned: success + contains: + volume_id: + description: + - Specifies the disk ID. + type: str + returned: success + device: + description: + - Specifies the disk device name. + type: str + returned: success +description: + description: + - Specifies the description of an ECS, which is a null string by default. Can contain a maximum of 85 characters. Cannot + contain special characters, such as < and >. + type: str + returned: success +eip_id: + description: + - Specifies the ID of the elastic IP address assigned to the ECS. Only elastic IP addresses in the DOWN state can be assigned. + type: str + returned: success +enable_auto_recovery: + description: + - Specifies whether automatic recovery is enabled on the ECS. + type: bool + returned: success +enterprise_project_id: + description: + - Specifies the ID of the enterprise project to which the ECS belongs. + type: str + returned: success +security_groups: + description: + - Specifies the security groups of the ECS. If this parameter is left blank, the default security group is bound to the + ECS by default. + type: list + returned: success +server_metadata: + description: + - Specifies the metadata of ECS to be created. + type: dict + returned: success +server_tags: + description: + - Specifies the tags of an ECS. When you create ECSs, one ECS supports up to 10 tags. + type: dict + returned: success +ssh_key_name: + description: + - Specifies the name of the SSH key used for logging in to the ECS. + type: str + returned: success +user_data: + description: + - Specifies the user data to be injected during the ECS creation process. Text, text files, and gzip files can be injected. + The content to be injected must be encoded with base64. The maximum size of the content to be injected (before encoding) + is 32 KB. For Linux ECSs, this parameter does not take effect when adminPass is used. + type: str + returned: success +config_drive: + description: + - Specifies the configuration driver. + type: str + returned: success +created: + description: + - Specifies the time when an ECS was created. + type: str + returned: success +disk_config_type: + description: + - Specifies the disk configuration type. MANUAL is The image space is not expanded. AUTO is the image space of the system + disk is expanded to be as same as the flavor. + type: str + returned: success +host_name: + description: + - Specifies the host name of the ECS. + type: str + returned: success +image_name: + description: + - Specifies the image name of the ECS. + type: str + returned: success +power_state: + description: + - Specifies the power status of the ECS. + type: int + returned: success +server_alias: + description: + - Specifies the ECS alias. + type: str + returned: success +status: + description: + - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT, REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, + ERROR, and DELETED. + type: str + returned: success +""" + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='30m', type='str'), + update=dict(default='30m', type='str'), + delete=dict(default='30m', type='str'), + ), default=dict()), + availability_zone=dict(type='str', required=True), + flavor_name=dict(type='str', required=True), + image_id=dict(type='str', required=True), + name=dict(type='str', required=True), + nics=dict( + type='list', required=True, elements='dict', + options=dict( + ip_address=dict(type='str', required=True), + subnet_id=dict(type='str', required=True) + ), + ), + root_volume=dict(type='dict', required=True, options=dict( + volume_type=dict(type='str', required=True), + size=dict(type='int'), + snapshot_id=dict(type='str') + )), + vpc_id=dict(type='str', required=True), + admin_pass=dict(type='str', no_log=True), + data_volumes=dict(type='list', elements='dict', options=dict( + volume_id=dict(type='str', required=True), + device=dict(type='str') + )), + description=dict(type='str'), + eip_id=dict(type='str'), + enable_auto_recovery=dict(type='bool'), + enterprise_project_id=dict(type='str'), + security_groups=dict(type='list', elements='str'), + server_metadata=dict(type='dict'), + server_tags=dict(type='dict'), + ssh_key_name=dict(type='str'), + user_data=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "ecs") + + try: + _init(config) + is_exist = module.params['id'] + + result = None + changed = False + if module.params['state'] == 'present': + if not is_exist: + if not module.check_mode: + create(config) + changed = True + + inputv = user_input_parameters(module) + resp, array_index = read_resource(config) + result = build_state(inputv, resp, array_index) + set_readonly_options(inputv, result) + if are_different_dicts(inputv, result): + if not module.check_mode: + update(config, inputv, result) + + inputv = user_input_parameters(module) + resp, array_index = read_resource(config) + result = build_state(inputv, resp, array_index) + set_readonly_options(inputv, result) + if are_different_dicts(inputv, result): + raise Exception("Update resource failed, " + "some attributes are not updated") + + changed = True + + result['id'] = module.params.get('id') + else: + result = dict() + if is_exist: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def _init(config): + module = config.module + if module.params['id']: + return + + v = search_resource(config) + n = len(v) + if n > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) + for i in v + ])) + + if n == 1: + module.params['id'] = navigate_value(v[0], ["id"]) + + +def user_input_parameters(module): + return { + "admin_pass": module.params.get("admin_pass"), + "availability_zone": module.params.get("availability_zone"), + "data_volumes": module.params.get("data_volumes"), + "description": module.params.get("description"), + "eip_id": module.params.get("eip_id"), + "enable_auto_recovery": module.params.get("enable_auto_recovery"), + "enterprise_project_id": module.params.get("enterprise_project_id"), + "flavor_name": module.params.get("flavor_name"), + "image_id": module.params.get("image_id"), + "name": module.params.get("name"), + "nics": module.params.get("nics"), + "root_volume": module.params.get("root_volume"), + "security_groups": module.params.get("security_groups"), + "server_metadata": module.params.get("server_metadata"), + "server_tags": module.params.get("server_tags"), + "ssh_key_name": module.params.get("ssh_key_name"), + "user_data": module.params.get("user_data"), + "vpc_id": module.params.get("vpc_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "ecs", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + opts["ansible_module"] = module + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait(config, r, client, timeout) + + sub_job_identity = { + "job_type": "createSingleServer", + } + for item in navigate_value(obj, ["entities", "sub_jobs"]): + for k, v in sub_job_identity.items(): + if item[k] != v: + break + else: + obj = item + break + else: + raise Exception("Can't find the sub job") + module.params['id'] = navigate_value(obj, ["entities", "server_id"]) + + +def update(config, expect_state, current_state): + module = config.module + expect_state["current_state"] = current_state + current_state["current_state"] = current_state + timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + client = config.client(get_region(module), "ecs", "project") + + params = build_delete_nics_parameters(expect_state) + params1 = build_delete_nics_parameters(current_state) + if params and are_different_dicts(params, params1): + r = send_delete_nics_request(module, params, client) + async_wait(config, r, client, timeout) + + params = build_set_auto_recovery_parameters(expect_state) + params1 = build_set_auto_recovery_parameters(current_state) + if params and are_different_dicts(params, params1): + send_set_auto_recovery_request(module, params, client) + + params = build_attach_nics_parameters(expect_state) + params1 = build_attach_nics_parameters(current_state) + if params and are_different_dicts(params, params1): + r = send_attach_nics_request(module, params, client) + async_wait(config, r, client, timeout) + + multi_invoke_delete_volume(config, expect_state, client, timeout) + + multi_invoke_attach_data_disk(config, expect_state, client, timeout) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "ecs", "project") + timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) + + opts = user_input_parameters(module) + opts["ansible_module"] = module + + params = build_delete_parameters(opts) + if params: + r = send_delete_request(module, params, client) + async_wait(config, r, client, timeout) + + +def read_resource(config): + module = config.module + client = config.client(get_region(module), "ecs", "project") + + res = {} + + r = send_read_request(module, client) + preprocess_read_response(r) + res["read"] = fill_read_resp_body(r) + + r = send_read_auto_recovery_request(module, client) + res["read_auto_recovery"] = fill_read_auto_recovery_resp_body(r) + + return res, None + + +def preprocess_read_response(resp): + v = resp.get("os-extended-volumes:volumes_attached") + if v and isinstance(v, list): + for i in range(len(v)): + if v[i].get("bootIndex") == "0": + root_volume = v[i] + + if (i + 1) != len(v): + v[i] = v[-1] + + v.pop() + + resp["root_volume"] = root_volume + break + + v = resp.get("addresses") + if v: + rv = {} + eips = [] + for val in v.values(): + for item in val: + if item["OS-EXT-IPS:type"] == "floating": + eips.append(item) + else: + rv[item["OS-EXT-IPS:port_id"]] = item + + for item in eips: + k = item["OS-EXT-IPS:port_id"] + if k in rv: + rv[k]["eip_address"] = item.get("addr", "") + else: + rv[k] = item + item["eip_address"] = item.get("addr", "") + item["addr"] = "" + + resp["address"] = rv.values() + + +def build_state(opts, response, array_index): + states = flatten_options(response, array_index) + set_unreadable_options(opts, states) + adjust_options(opts, states) + return states + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["enterprise_project_id"]) + if v or v in [False, 0]: + query_params.append( + "enterprise_project_id=" + (str(v) if v else str(v).lower())) + + v = navigate_value(opts, ["name"]) + if v or v in [False, 0]: + query_params.append( + "name=" + (str(v) if v else str(v).lower())) + + query_link = "?limit=10&offset={offset}" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "ecs", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "cloudservers/detail" + query_link + + result = [] + p = {'offset': 1} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + adjust_list_resp(identity_obj, item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['offset'] += 1 + + return result + + +def build_delete_nics_parameters(opts): + params = dict() + + v = expand_delete_nics_nics(opts, None) + if not is_empty_value(v): + params["nics"] = v + + return params + + +def expand_delete_nics_nics(d, array_index): + cv = d["current_state"].get("nics") + if not cv: + return None + + val = cv + + ev = d.get("nics") + if ev: + m = [item.get("ip_address") for item in ev] + val = [item for item in cv if item.get("ip_address") not in m] + + r = [] + for item in val: + transformed = dict() + + v = item.get("port_id") + if not is_empty_value(v): + transformed["id"] = v + + if transformed: + r.append(transformed) + + return r + + +def send_delete_nics_request(module, params, client): + url = build_path(module, "cloudservers/{id}/nics/delete") + + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(delete_nics), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_set_auto_recovery_parameters(opts): + params = dict() + + v = expand_set_auto_recovery_support_auto_recovery(opts, None) + if v is not None: + params["support_auto_recovery"] = v + + return params + + +def expand_set_auto_recovery_support_auto_recovery(d, array_index): + v = navigate_value(d, ["enable_auto_recovery"], None) + return None if v is None else str(v).lower() + + +def send_set_auto_recovery_request(module, params, client): + url = build_path(module, "cloudservers/{id}/autorecovery") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(set_auto_recovery), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["admin_pass"], None) + if not is_empty_value(v): + params["adminPass"] = v + + v = navigate_value(opts, ["availability_zone"], None) + if not is_empty_value(v): + params["availability_zone"] = v + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = expand_create_extendparam(opts, None) + if not is_empty_value(v): + params["extendparam"] = v + + v = navigate_value(opts, ["flavor_name"], None) + if not is_empty_value(v): + params["flavorRef"] = v + + v = navigate_value(opts, ["image_id"], None) + if not is_empty_value(v): + params["imageRef"] = v + + v = navigate_value(opts, ["ssh_key_name"], None) + if not is_empty_value(v): + params["key_name"] = v + + v = navigate_value(opts, ["server_metadata"], None) + if not is_empty_value(v): + params["metadata"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = expand_create_nics(opts, None) + if not is_empty_value(v): + params["nics"] = v + + v = expand_create_publicip(opts, None) + if not is_empty_value(v): + params["publicip"] = v + + v = expand_create_root_volume(opts, None) + if not is_empty_value(v): + params["root_volume"] = v + + v = expand_create_security_groups(opts, None) + if not is_empty_value(v): + params["security_groups"] = v + + v = expand_create_server_tags(opts, None) + if not is_empty_value(v): + params["server_tags"] = v + + v = navigate_value(opts, ["user_data"], None) + if not is_empty_value(v): + params["user_data"] = v + + v = navigate_value(opts, ["vpc_id"], None) + if not is_empty_value(v): + params["vpcid"] = v + + if not params: + return params + + params = {"server": params} + + return params + + +def expand_create_extendparam(d, array_index): + r = dict() + + r["chargingMode"] = 0 + + v = navigate_value(d, ["enterprise_project_id"], array_index) + if not is_empty_value(v): + r["enterprise_project_id"] = v + + v = navigate_value(d, ["enable_auto_recovery"], array_index) + if not is_empty_value(v): + r["support_auto_recovery"] = v + + return r + + +def expand_create_nics(d, array_index): + new_ai = dict() + if array_index: + new_ai.update(array_index) + + req = [] + + v = navigate_value( + d, ["nics"], new_ai) + + if not v: + return req + n = len(v) + for i in range(n): + new_ai["nics"] = i + transformed = dict() + + v = navigate_value(d, ["nics", "ip_address"], new_ai) + if not is_empty_value(v): + transformed["ip_address"] = v + + v = navigate_value(d, ["nics", "subnet_id"], new_ai) + if not is_empty_value(v): + transformed["subnet_id"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_create_publicip(d, array_index): + r = dict() + + v = navigate_value(d, ["eip_id"], array_index) + if not is_empty_value(v): + r["id"] = v + + return r + + +def expand_create_root_volume(d, array_index): + r = dict() + + v = expand_create_root_volume_extendparam(d, array_index) + if not is_empty_value(v): + r["extendparam"] = v + + v = navigate_value(d, ["root_volume", "size"], array_index) + if not is_empty_value(v): + r["size"] = v + + v = navigate_value(d, ["root_volume", "volume_type"], array_index) + if not is_empty_value(v): + r["volumetype"] = v + + return r + + +def expand_create_root_volume_extendparam(d, array_index): + r = dict() + + v = navigate_value(d, ["root_volume", "snapshot_id"], array_index) + if not is_empty_value(v): + r["snapshotId"] = v + + return r + + +def expand_create_security_groups(d, array_index): + v = d.get("security_groups") + if not v: + return None + + return [{"id": i} for i in v] + + +def expand_create_server_tags(d, array_index): + v = d.get("server_tags") + if not v: + return None + + return [{"key": k, "value": v1} for k, v1 in v.items()] + + +def send_create_request(module, params, client): + url = "cloudservers" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_attach_nics_parameters(opts): + params = dict() + + v = expand_attach_nics_nics(opts, None) + if not is_empty_value(v): + params["nics"] = v + + return params + + +def expand_attach_nics_nics(d, array_index): + ev = d.get("nics") + if not ev: + return None + + val = ev + + cv = d["current_state"].get("nics") + if cv: + m = [item.get("ip_address") for item in cv] + val = [item for item in ev if item.get("ip_address") not in m] + + r = [] + for item in val: + transformed = dict() + + v = item.get("ip_address") + if not is_empty_value(v): + transformed["ip_address"] = v + + v = item.get("subnet_id") + if not is_empty_value(v): + transformed["subnet_id"] = v + + if transformed: + r.append(transformed) + + return r + + +def send_attach_nics_request(module, params, client): + url = build_path(module, "cloudservers/{id}/nics") + + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(attach_nics), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_volume_request(module, params, client, info): + path_parameters = { + "volume_id": ["volume_id"], + } + data = {key: navigate_value(info, path) for key, path in path_parameters.items()} + + url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data) + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(delete_volume), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_attach_data_disk_parameters(opts, array_index): + params = dict() + + v = expand_attach_data_disk_volume_attachment(opts, array_index) + if not is_empty_value(v): + params["volumeAttachment"] = v + + return params + + +def expand_attach_data_disk_volume_attachment(d, array_index): + r = dict() + + v = navigate_value(d, ["data_volumes", "device"], array_index) + if not is_empty_value(v): + r["device"] = v + + v = navigate_value(d, ["data_volumes", "volume_id"], array_index) + if not is_empty_value(v): + r["volumeId"] = v + + return r + + +def send_attach_data_disk_request(module, params, client): + url = build_path(module, "cloudservers/{id}/attachvolume") + + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(attach_data_disk), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_delete_parameters(opts): + params = dict() + + params["delete_publicip"] = False + + params["delete_volume"] = False + + v = expand_delete_servers(opts, None) + if not is_empty_value(v): + params["servers"] = v + + return params + + +def expand_delete_servers(d, array_index): + new_ai = dict() + if array_index: + new_ai.update(array_index) + + req = [] + + n = 1 + for i in range(n): + transformed = dict() + + v = expand_delete_servers_id(d, new_ai) + if not is_empty_value(v): + transformed["id"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_delete_servers_id(d, array_index): + return d["ansible_module"].params.get("id") + + +def send_delete_request(module, params, client): + url = "cloudservers/delete" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait(config, result, client, timeout): + module = config.module + + url = build_path(module, "jobs/{job_id}", result) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["SUCCESS"], + ["RUNNING", "INIT"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_ecs_instance): error " + "waiting to be done, error= %s" % str(ex)) + + +def multi_invoke_delete_volume(config, opts, client, timeout): + module = config.module + + opts1 = None + expect = opts["data_volumes"] + current = opts["current_state"]["data_volumes"] + if expect and current: + v = [i["volume_id"] for i in expect] + opts1 = { + "data_volumes": [ + i for i in current if i["volume_id"] not in v + ] + } + + loop_val = navigate_value(opts1, ["data_volumes"]) + if not loop_val: + return + + for i in range(len(loop_val)): + r = send_delete_volume_request(module, None, client, loop_val[i]) + async_wait(config, r, client, timeout) + + +def multi_invoke_attach_data_disk(config, opts, client, timeout): + module = config.module + + opts1 = opts + expect = opts["data_volumes"] + current = opts["current_state"]["data_volumes"] + if expect and current: + v = [i["volume_id"] for i in current] + opts1 = { + "data_volumes": [ + i for i in expect if i["volume_id"] not in v + ] + } + + loop_val = navigate_value(opts1, ["data_volumes"]) + if not loop_val: + return + + for i in range(len(loop_val)): + params = build_attach_data_disk_parameters(opts1, {"data_volumes": i}) + r = send_attach_data_disk_request(module, params, client) + async_wait(config, r, client, timeout) + + +def send_read_request(module, client): + url = build_path(module, "cloudservers/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["server"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig") + + result["OS-EXT-AZ:availability_zone"] = body.get( + "OS-EXT-AZ:availability_zone") + + result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname") + + result["OS-EXT-SRV-ATTR:instance_name"] = body.get( + "OS-EXT-SRV-ATTR:instance_name") + + result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data") + + result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state") + + v = fill_read_resp_address(body.get("address")) + result["address"] = v + + result["config_drive"] = body.get("config_drive") + + result["created"] = body.get("created") + + result["description"] = body.get("description") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + v = fill_read_resp_flavor(body.get("flavor")) + result["flavor"] = v + + result["id"] = body.get("id") + + v = fill_read_resp_image(body.get("image")) + result["image"] = v + + result["key_name"] = body.get("key_name") + + v = fill_read_resp_metadata(body.get("metadata")) + result["metadata"] = v + + result["name"] = body.get("name") + + v = fill_read_resp_os_extended_volumes_volumes_attached( + body.get("os-extended-volumes:volumes_attached")) + result["os-extended-volumes:volumes_attached"] = v + + v = fill_read_resp_root_volume(body.get("root_volume")) + result["root_volume"] = v + + result["status"] = body.get("status") + + result["tags"] = body.get("tags") + + return result + + +def fill_read_resp_address(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["OS-EXT-IPS:port_id"] = item.get("OS-EXT-IPS:port_id") + + val["OS-EXT-IPS:type"] = item.get("OS-EXT-IPS:type") + + val["addr"] = item.get("addr") + + result.append(val) + + return result + + +def fill_read_resp_flavor(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def fill_read_resp_image(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def fill_read_resp_metadata(value): + if not value: + return None + + result = dict() + + result["image_name"] = value.get("image_name") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def fill_read_resp_os_extended_volumes_volumes_attached(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["bootIndex"] = item.get("bootIndex") + + val["device"] = item.get("device") + + val["id"] = item.get("id") + + result.append(val) + + return result + + +def fill_read_resp_root_volume(value): + if not value: + return None + + result = dict() + + result["device"] = value.get("device") + + result["id"] = value.get("id") + + return result + + +def send_read_auto_recovery_request(module, client): + url = build_path(module, "cloudservers/{id}/autorecovery") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(read_auto_recovery), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def fill_read_auto_recovery_resp_body(body): + result = dict() + + result["support_auto_recovery"] = body.get("support_auto_recovery") + + return result + + +def flatten_options(response, array_index): + r = dict() + + v = navigate_value( + response, ["read", "OS-EXT-AZ:availability_zone"], array_index) + r["availability_zone"] = v + + v = navigate_value(response, ["read", "config_drive"], array_index) + r["config_drive"] = v + + v = navigate_value(response, ["read", "created"], array_index) + r["created"] = v + + v = flatten_data_volumes(response, array_index) + r["data_volumes"] = v + + v = navigate_value(response, ["read", "description"], array_index) + r["description"] = v + + v = navigate_value(response, ["read", "OS-DCF:diskConfig"], array_index) + r["disk_config_type"] = v + + v = flatten_enable_auto_recovery(response, array_index) + r["enable_auto_recovery"] = v + + v = navigate_value( + response, ["read", "enterprise_project_id"], array_index) + r["enterprise_project_id"] = v + + v = navigate_value(response, ["read", "flavor", "id"], array_index) + r["flavor_name"] = v + + v = navigate_value( + response, ["read", "OS-EXT-SRV-ATTR:hostname"], array_index) + r["host_name"] = v + + v = navigate_value(response, ["read", "image", "id"], array_index) + r["image_id"] = v + + v = navigate_value( + response, ["read", "metadata", "image_name"], array_index) + r["image_name"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = flatten_nics(response, array_index) + r["nics"] = v + + v = navigate_value( + response, ["read", "OS-EXT-STS:power_state"], array_index) + r["power_state"] = v + + v = flatten_root_volume(response, array_index) + r["root_volume"] = v + + v = navigate_value( + response, ["read", "OS-EXT-SRV-ATTR:instance_name"], array_index) + r["server_alias"] = v + + v = flatten_server_tags(response, array_index) + r["server_tags"] = v + + v = navigate_value(response, ["read", "key_name"], array_index) + r["ssh_key_name"] = v + + v = navigate_value(response, ["read", "status"], array_index) + r["status"] = v + + v = navigate_value( + response, ["read", "OS-EXT-SRV-ATTR:user_data"], array_index) + r["user_data"] = v + + v = navigate_value(response, ["read", "metadata", "vpc_id"], array_index) + r["vpc_id"] = v + + return r + + +def flatten_data_volumes(d, array_index): + v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached"], + array_index) + if not v: + return None + n = len(v) + result = [] + + new_ai = dict() + if array_index: + new_ai.update(array_index) + + for i in range(n): + new_ai["read.os-extended-volumes:volumes_attached"] = i + + val = dict() + + v = navigate_value( + d, ["read", "os-extended-volumes:volumes_attached", "device"], new_ai) + val["device"] = v + + v = navigate_value( + d, ["read", "os-extended-volumes:volumes_attached", "id"], new_ai) + val["volume_id"] = v + + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if result else None + + +def flatten_enable_auto_recovery(d, array_index): + v = navigate_value(d, ["read_auto_recovery", "support_auto_recovery"], + array_index) + return v == "true" + + +def flatten_nics(d, array_index): + v = navigate_value(d, ["read", "address"], + array_index) + if not v: + return None + n = len(v) + result = [] + + new_ai = dict() + if array_index: + new_ai.update(array_index) + + for i in range(n): + new_ai["read.address"] = i + + val = dict() + + v = navigate_value(d, ["read", "address", "addr"], new_ai) + val["ip_address"] = v + + v = navigate_value( + d, ["read", "address", "OS-EXT-IPS:port_id"], new_ai) + val["port_id"] = v + + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if result else None + + +def flatten_root_volume(d, array_index): + result = dict() + + v = navigate_value(d, ["read", "root_volume", "device"], array_index) + result["device"] = v + + v = navigate_value(d, ["read", "root_volume", "id"], array_index) + result["volume_id"] = v + + for v in result.values(): + if v is not None: + return result + return None + + +def flatten_server_tags(d, array_index): + v = navigate_value(d, ["read", "tags"], array_index) + if not v: + return None + + r = dict() + for item in v: + v1 = item.split("=") + if v1: + r[v1[0]] = v1[1] + return r + + +def adjust_options(opts, states): + adjust_data_volumes(opts, states) + + adjust_nics(opts, states) + + +def adjust_data_volumes(parent_input, parent_cur): + iv = parent_input.get("data_volumes") + if not (iv and isinstance(iv, list)): + return + + cv = parent_cur.get("data_volumes") + if not (cv and isinstance(cv, list)): + return + + lcv = len(cv) + result = [] + q = [] + for iiv in iv: + if len(q) == lcv: + break + + icv = None + for j in range(lcv): + if j in q: + continue + + icv = cv[j] + + if iiv["volume_id"] != icv["volume_id"]: + continue + + result.append(icv) + q.append(j) + break + else: + break + + if len(q) != lcv: + for i in range(lcv): + if i not in q: + result.append(cv[i]) + + if len(result) != lcv: + raise Exception("adjust property(data_volumes) failed, " + "the array number is not equal") + + parent_cur["data_volumes"] = result + + +def adjust_nics(parent_input, parent_cur): + iv = parent_input.get("nics") + if not (iv and isinstance(iv, list)): + return + + cv = parent_cur.get("nics") + if not (cv and isinstance(cv, list)): + return + + lcv = len(cv) + result = [] + q = [] + for iiv in iv: + if len(q) == lcv: + break + + icv = None + for j in range(lcv): + if j in q: + continue + + icv = cv[j] + + if iiv["ip_address"] != icv["ip_address"]: + continue + + result.append(icv) + q.append(j) + break + else: + break + + if len(q) != lcv: + for i in range(lcv): + if i not in q: + result.append(cv[i]) + + if len(result) != lcv: + raise Exception("adjust property(nics) failed, " + "the array number is not equal") + + parent_cur["nics"] = result + + +def set_unreadable_options(opts, states): + states["admin_pass"] = opts.get("admin_pass") + + states["eip_id"] = opts.get("eip_id") + + set_unread_nics( + opts.get("nics"), states.get("nics")) + + set_unread_root_volume( + opts.get("root_volume"), states.get("root_volume")) + + states["security_groups"] = opts.get("security_groups") + + states["server_metadata"] = opts.get("server_metadata") + + +def set_unread_nics(inputv, curv): + if not (inputv and isinstance(inputv, list)): + return + + if not (curv and isinstance(curv, list)): + return + + lcv = len(curv) + q = [] + for iv in inputv: + if len(q) == lcv: + break + + cv = None + for j in range(lcv): + if j in q: + continue + + cv = curv[j] + + if iv["ip_address"] != cv["ip_address"]: + continue + + q.append(j) + break + else: + continue + + cv["subnet_id"] = iv.get("subnet_id") + + +def set_unread_root_volume(inputv, curv): + if not (inputv and isinstance(inputv, dict)): + return + + if not (curv and isinstance(curv, dict)): + return + + curv["size"] = inputv.get("size") + + curv["snapshot_id"] = inputv.get("snapshot_id") + + curv["volume_type"] = inputv.get("volume_type") + + +def set_readonly_options(opts, states): + opts["config_drive"] = states.get("config_drive") + + opts["created"] = states.get("created") + + opts["disk_config_type"] = states.get("disk_config_type") + + opts["host_name"] = states.get("host_name") + + opts["image_name"] = states.get("image_name") + + set_readonly_nics( + opts.get("nics"), states.get("nics")) + + opts["power_state"] = states.get("power_state") + + set_readonly_root_volume( + opts.get("root_volume"), states.get("root_volume")) + + opts["server_alias"] = states.get("server_alias") + + opts["status"] = states.get("status") + + +def set_readonly_nics(inputv, curv): + if not (curv and isinstance(curv, list)): + return + + if not (inputv and isinstance(inputv, list)): + return + + lcv = len(curv) + q = [] + for iv in inputv: + if len(q) == lcv: + break + + cv = None + for j in range(lcv): + if j in q: + continue + + cv = curv[j] + + if iv["ip_address"] != cv["ip_address"]: + continue + + q.append(j) + break + else: + continue + + iv["port_id"] = cv.get("port_id") + + +def set_readonly_root_volume(inputv, curv): + if not (inputv and isinstance(inputv, dict)): + return + + if not (curv and isinstance(curv, dict)): + return + + inputv["device"] = curv.get("device") + + inputv["volume_id"] = curv.get("volume_id") + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["servers"], None) + + +def _build_identity_object(all_opts): + result = dict() + + result["OS-DCF:diskConfig"] = None + + v = navigate_value(all_opts, ["availability_zone"], None) + result["OS-EXT-AZ:availability_zone"] = v + + result["OS-EXT-SRV-ATTR:hostname"] = None + + result["OS-EXT-SRV-ATTR:instance_name"] = None + + v = navigate_value(all_opts, ["user_data"], None) + result["OS-EXT-SRV-ATTR:user_data"] = v + + result["OS-EXT-STS:power_state"] = None + + result["config_drive"] = None + + result["created"] = None + + v = navigate_value(all_opts, ["description"], None) + result["description"] = v + + v = navigate_value(all_opts, ["enterprise_project_id"], None) + result["enterprise_project_id"] = v + + v = expand_list_flavor(all_opts, None) + result["flavor"] = v + + result["id"] = None + + v = expand_list_image(all_opts, None) + result["image"] = v + + v = navigate_value(all_opts, ["ssh_key_name"], None) + result["key_name"] = v + + v = expand_list_metadata(all_opts, None) + result["metadata"] = v + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + result["status"] = None + + v = expand_list_tags(all_opts, None) + result["tags"] = v + + return result + + +def expand_list_flavor(d, array_index): + r = dict() + + v = navigate_value(d, ["flavor_name"], array_index) + r["id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_image(d, array_index): + r = dict() + + v = navigate_value(d, ["image_id"], array_index) + r["id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_metadata(d, array_index): + r = dict() + + v = navigate_value(d, ["vpc_id"], array_index) + r["vpc_id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_tags(d, array_index): + v = d.get("server_tags") + if not v: + return None + + return [k + "=" + v1 for k, v1 in v.items()] + + +def fill_list_resp_body(body): + result = dict() + + result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig") + + result["OS-EXT-AZ:availability_zone"] = body.get( + "OS-EXT-AZ:availability_zone") + + result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname") + + result["OS-EXT-SRV-ATTR:instance_name"] = body.get( + "OS-EXT-SRV-ATTR:instance_name") + + result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data") + + result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state") + + result["config_drive"] = body.get("config_drive") + + result["created"] = body.get("created") + + result["description"] = body.get("description") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + v = fill_list_resp_flavor(body.get("flavor")) + result["flavor"] = v + + result["id"] = body.get("id") + + v = fill_list_resp_image(body.get("image")) + result["image"] = v + + result["key_name"] = body.get("key_name") + + v = fill_list_resp_metadata(body.get("metadata")) + result["metadata"] = v + + result["name"] = body.get("name") + + result["status"] = body.get("status") + + result["tags"] = body.get("tags") + + return result + + +def fill_list_resp_flavor(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def fill_list_resp_image(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def fill_list_resp_metadata(value): + if not value: + return None + + result = dict() + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def adjust_list_resp(opts, resp): + adjust_list_api_tags(opts, resp) + + +def adjust_list_api_tags(parent_input, parent_cur): + iv = parent_input.get("tags") + if not (iv and isinstance(iv, list)): + return + + cv = parent_cur.get("tags") + if not (cv and isinstance(cv, list)): + return + + result = [] + for iiv in iv: + if iiv not in cv: + break + + result.append(iiv) + + j = cv.index(iiv) + cv[j] = cv[-1] + cv.pop() + + if cv: + result.extend(cv) + parent_cur["tags"] = result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hwc_evs_disk.py b/plugins/modules/hwc_evs_disk.py deleted file mode 120000 index 4693c3c9f0..0000000000 --- a/plugins/modules/hwc_evs_disk.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/huawei/hwc_evs_disk.py \ No newline at end of file diff --git a/plugins/modules/hwc_evs_disk.py b/plugins/modules/hwc_evs_disk.py new file mode 100644 index 0000000000..0963736ec2 --- /dev/null +++ b/plugins/modules/hwc_evs_disk.py @@ -0,0 +1,1171 @@ +#!/usr/bin/python +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = r""" +module: hwc_evs_disk +description: + - Block storage management. +short_description: Creates a resource of Evs/Disk in Huawei Cloud +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '30m' + update: + description: + - The timeouts for update operation. + type: str + default: '30m' + delete: + description: + - The timeouts for delete operation. + type: str + default: '30m' + availability_zone: + description: + - Specifies the AZ where you want to create the disk. + type: str + required: true + name: + description: + - Specifies the disk name. The value can contain a maximum of 255 bytes. + type: str + required: true + volume_type: + description: + - Specifies the disk type. Currently, the value can be SSD, SAS, or SATA. + - SSD specifies the ultra-high I/O disk type. + - SAS specifies the high I/O disk type. + - SATA specifies the common I/O disk type. + - If the specified disk type is not available in the AZ, the disk creation fails. If the EVS disk is created from a + snapshot, the volume_type field must be the same as that of the snapshot's source disk. + type: str + required: true + backup_id: + description: + - Specifies the ID of the backup that can be used to create a disk. This parameter is mandatory when you use a backup + to create the disk. + type: str + required: false + description: + description: + - Specifies the disk description. The value can contain a maximum of 255 bytes. + type: str + required: false + enable_full_clone: + description: + - If the disk is created from a snapshot and linked cloning needs to be used, set this parameter to True. + type: bool + required: false + enable_scsi: + description: + - If this parameter is set to V(true), the disk device type is SCSI, which allows ECS OSs to directly access underlying + storage media. SCSI reservation command is supported. If this parameter is set to V(false), the disk device type is + VBD, which supports only simple SCSI read/write commands. + - If parameter enable_share is set to True and this parameter is not specified, shared SCSI disks are created. SCSI + EVS disks cannot be created from backups, which means that this parameter cannot be True if backup_id has been specified. + type: bool + required: false + enable_share: + description: + - Specifies whether the disk is shareable. The default value is False. + type: bool + required: false + encryption_id: + description: + - Specifies the encryption ID. The length of it fixes at 36 bytes. + type: str + required: false + enterprise_project_id: + description: + - Specifies the enterprise project ID. This ID is associated with the disk during the disk creation. If it is not specified, + the disk is bound to the default enterprise project. + type: str + required: false + image_id: + description: + - Specifies the image ID. If this parameter is specified, the disk is created from an image. BMS system disks cannot + be created from BMS images. + type: str + required: false + size: + description: + - Specifies the disk size, in GB. Its values are as follows, System disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. + This parameter is mandatory when you create an empty disk or use an image or a snapshot to create a disk. If you use + an image or a snapshot to create a disk, the disk size must be greater than or equal to the image or snapshot size. + This parameter is optional when you use a backup to create a disk. If this parameter is not specified, the disk size + is equal to the backup size. + type: int + required: false + snapshot_id: + description: + - Specifies the snapshot ID. If this parameter is specified, the disk is created from a snapshot. + type: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes +""" + +EXAMPLES = r""" +# test create disk +- name: Create a disk + community.general.hwc_evs_disk: + availability_zone: "cn-north-1a" + name: "ansible_evs_disk_test" + volume_type: "SATA" + size: 10 +""" + +RETURN = r""" +availability_zone: + description: + - Specifies the AZ where you want to create the disk. + type: str + returned: success +name: + description: + - Specifies the disk name. The value can contain a maximum of 255 bytes. + type: str + returned: success +volume_type: + description: + - Specifies the disk type. Currently, the value can be SSD, SAS, or SATA. + - SSD specifies the ultra-high I/O disk type. + - SAS specifies the high I/O disk type. + - SATA specifies the common I/O disk type. + - If the specified disk type is not available in the AZ, the disk creation fails. If the EVS disk is created from a snapshot, + the volume_type field must be the same as that of the snapshot's source disk. + type: str + returned: success +backup_id: + description: + - Specifies the ID of the backup that can be used to create a disk. This parameter is mandatory when you use a backup + to create the disk. + type: str + returned: success +description: + description: + - Specifies the disk description. The value can contain a maximum of 255 bytes. + type: str + returned: success +enable_full_clone: + description: + - If the disk is created from a snapshot and linked cloning needs to be used, set this parameter to True. + type: bool + returned: success +enable_scsi: + description: + - If this parameter is set to V(true), the disk device type is SCSI, which allows ECS OSs to directly access underlying + storage media. SCSI reservation command is supported. If this parameter is set to V(false), the disk device type is + VBD, which supports only simple SCSI read/write commands. + - If parameter enable_share is set to True and this parameter is not specified, shared SCSI disks are created. SCSI EVS + disks cannot be created from backups, which means that this parameter cannot be True if backup_id has been specified. + type: bool + returned: success +enable_share: + description: + - Specifies whether the disk is shareable. The default value is False. + type: bool + returned: success +encryption_id: + description: + - Specifies the encryption ID. The length of it fixes at 36 bytes. + type: str + returned: success +enterprise_project_id: + description: + - Specifies the enterprise project ID. This ID is associated with the disk during the disk creation. If it is not specified, + the disk is bound to the default enterprise project. + type: str + returned: success +image_id: + description: + - Specifies the image ID. If this parameter is specified, the disk is created from an image. BMS system disks cannot be + created from BMS images. + type: str + returned: success +size: + description: + - Specifies the disk size, in GB. Its values are as follows, System disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. + This parameter is mandatory when you create an empty disk or use an image or a snapshot to create a disk. If you use + an image or a snapshot to create a disk, the disk size must be greater than or equal to the image or snapshot size. + This parameter is optional when you use a backup to create a disk. If this parameter is not specified, the disk size + is equal to the backup size. + type: int + returned: success +snapshot_id: + description: + - Specifies the snapshot ID. If this parameter is specified, the disk is created from a snapshot. + type: str + returned: success +attachments: + description: + - Specifies the disk attachment information. + type: complex + returned: success + contains: + attached_at: + description: + - Specifies the time when the disk was attached. Time format is 'UTC YYYY-MM-DDTHH:MM:SS'. + type: str + returned: success + attachment_id: + description: + - Specifies the ID of the attachment information. + type: str + returned: success + device: + description: + - Specifies the device name. + type: str + returned: success + server_id: + description: + - Specifies the ID of the server to which the disk is attached. + type: str + returned: success +backup_policy_id: + description: + - Specifies the backup policy ID. + type: str + returned: success +created_at: + description: + - Specifies the time when the disk was created. Time format is 'UTC YYYY-MM-DDTHH:MM:SS'. + type: str + returned: success +is_bootable: + description: + - Specifies whether the disk is bootable. + type: bool + returned: success +is_readonly: + description: + - Specifies whether the disk is read-only or read/write. True indicates that the disk is read-only. False indicates that + the disk is read/write. + type: bool + returned: success +source_volume_id: + description: + - Specifies the source disk ID. This parameter has a value if the disk is created from a source disk. + type: str + returned: success +status: + description: + - Specifies the disk status. + type: str + returned: success +tags: + description: + - Specifies the disk tags. + type: dict + returned: success +""" + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='30m', type='str'), + update=dict(default='30m', type='str'), + delete=dict(default='30m', type='str'), + ), default=dict()), + availability_zone=dict(type='str', required=True), + name=dict(type='str', required=True), + volume_type=dict(type='str', required=True), + backup_id=dict(type='str'), + description=dict(type='str'), + enable_full_clone=dict(type='bool'), + enable_scsi=dict(type='bool'), + enable_share=dict(type='bool'), + encryption_id=dict(type='str'), + enterprise_project_id=dict(type='str'), + image_id=dict(type='str'), + size=dict(type='int'), + snapshot_id=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "evs") + + try: + _init(config) + is_exist = module.params.get('id') + + result = None + changed = False + if module.params['state'] == 'present': + if not is_exist: + if not module.check_mode: + create(config) + changed = True + + inputv = user_input_parameters(module) + resp, array_index = read_resource(config) + result = build_state(inputv, resp, array_index) + set_readonly_options(inputv, result) + if are_different_dicts(inputv, result): + if not module.check_mode: + update(config, inputv, result) + + inputv = user_input_parameters(module) + resp, array_index = read_resource(config) + result = build_state(inputv, resp, array_index) + set_readonly_options(inputv, result) + if are_different_dicts(inputv, result): + raise Exception("Update resource failed, " + "some attributes are not updated") + + changed = True + + result['id'] = module.params.get('id') + else: + result = dict() + if is_exist: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def _init(config): + module = config.module + if module.params.get('id'): + return + + v = search_resource(config) + n = len(v) + if n > 1: + raise Exception("find more than one resources(%s)" % ", ".join([ + navigate_value(i, ["id"]) + for i in v + ])) + + if n == 1: + module.params['id'] = navigate_value(v[0], ["id"]) + + +def user_input_parameters(module): + return { + "availability_zone": module.params.get("availability_zone"), + "backup_id": module.params.get("backup_id"), + "description": module.params.get("description"), + "enable_full_clone": module.params.get("enable_full_clone"), + "enable_scsi": module.params.get("enable_scsi"), + "enable_share": module.params.get("enable_share"), + "encryption_id": module.params.get("encryption_id"), + "enterprise_project_id": module.params.get("enterprise_project_id"), + "image_id": module.params.get("image_id"), + "name": module.params.get("name"), + "size": module.params.get("size"), + "snapshot_id": module.params.get("snapshot_id"), + "volume_type": module.params.get("volume_type"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "volumev3", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + opts["ansible_module"] = module + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + + client1 = config.client(get_region(module), "volume", "project") + client1.endpoint = client1.endpoint.replace("/v2/", "/v1/") + obj = async_wait(config, r, client1, timeout) + module.params['id'] = navigate_value(obj, ["entities", "volume_id"]) + + +def update(config, expect_state, current_state): + module = config.module + expect_state["current_state"] = current_state + current_state["current_state"] = current_state + client = config.client(get_region(module), "evs", "project") + timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + + params = build_update_parameters(expect_state) + params1 = build_update_parameters(current_state) + if params and are_different_dicts(params, params1): + send_update_request(module, params, client) + + params = build_extend_disk_parameters(expect_state) + params1 = build_extend_disk_parameters(current_state) + if params and are_different_dicts(params, params1): + client1 = config.client(get_region(module), "evsv2.1", "project") + r = send_extend_disk_request(module, params, client1) + + client1 = config.client(get_region(module), "volume", "project") + client1.endpoint = client1.endpoint.replace("/v2/", "/v1/") + async_wait(config, r, client1, timeout) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "evs", "project") + timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) + + r = send_delete_request(module, None, client) + + client = config.client(get_region(module), "volume", "project") + client.endpoint = client.endpoint.replace("/v2/", "/v1/") + async_wait(config, r, client, timeout) + + +def read_resource(config): + module = config.module + client = config.client(get_region(module), "volumev3", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return res, None + + +def build_state(opts, response, array_index): + states = flatten_options(response, array_index) + set_unreadable_options(opts, states) + return states + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["enable_share"]) + if v or v in [False, 0]: + query_params.append( + "multiattach=" + (str(v) if v else str(v).lower())) + + v = navigate_value(opts, ["name"]) + if v or v in [False, 0]: + query_params.append( + "name=" + (str(v) if v else str(v).lower())) + + v = navigate_value(opts, ["availability_zone"]) + if v or v in [False, 0]: + query_params.append( + "availability_zone=" + (str(v) if v else str(v).lower())) + + query_link = "?limit=10&offset={start}" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "volumev3", "project") + opts = user_input_parameters(module) + name = module.params.get("name") + query_link = _build_query_link(opts) + link = "os-vendor-volumes/detail" + query_link + + result = [] + p = {'start': 0} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + if name == item.get("name"): + result.append(item) + + if len(result) > 1: + break + + p['start'] += len(r) + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["availability_zone"], None) + if not is_empty_value(v): + params["availability_zone"] = v + + v = navigate_value(opts, ["backup_id"], None) + if not is_empty_value(v): + params["backup_id"] = v + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = navigate_value(opts, ["enterprise_project_id"], None) + if not is_empty_value(v): + params["enterprise_project_id"] = v + + v = navigate_value(opts, ["image_id"], None) + if not is_empty_value(v): + params["imageRef"] = v + + v = expand_create_metadata(opts, None) + if not is_empty_value(v): + params["metadata"] = v + + v = navigate_value(opts, ["enable_share"], None) + if not is_empty_value(v): + params["multiattach"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = navigate_value(opts, ["size"], None) + if not is_empty_value(v): + params["size"] = v + + v = navigate_value(opts, ["snapshot_id"], None) + if not is_empty_value(v): + params["snapshot_id"] = v + + v = navigate_value(opts, ["volume_type"], None) + if not is_empty_value(v): + params["volume_type"] = v + + if not params: + return params + + params = {"volume": params} + + return params + + +def expand_create_metadata(d, array_index): + r = dict() + + v = navigate_value(d, ["encryption_id"], array_index) + if not is_empty_value(v): + r["__system__cmkid"] = v + + v = expand_create_metadata_system_encrypted(d, array_index) + if not is_empty_value(v): + r["__system__encrypted"] = v + + v = expand_create_metadata_full_clone(d, array_index) + if not is_empty_value(v): + r["full_clone"] = v + + v = expand_create_metadata_hw_passthrough(d, array_index) + if not is_empty_value(v): + r["hw:passthrough"] = v + + return r + + +def expand_create_metadata_system_encrypted(d, array_index): + v = navigate_value(d, ["encryption_id"], array_index) + return "1" if v else "" + + +def expand_create_metadata_full_clone(d, array_index): + v = navigate_value(d, ["enable_full_clone"], array_index) + return "0" if v else "" + + +def expand_create_metadata_hw_passthrough(d, array_index): + v = navigate_value(d, ["enable_scsi"], array_index) + if v is None: + return v + return "true" if v else "false" + + +def send_create_request(module, params, client): + url = "cloudvolumes" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_update_parameters(opts): + params = dict() + + v = navigate_value(opts, ["description"], None) + if v is not None: + params["description"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + if not params: + return params + + params = {"volume": params} + + return params + + +def send_update_request(module, params, client): + url = build_path(module, "cloudvolumes/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "cloudvolumes/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_extend_disk_parameters(opts): + params = dict() + + v = expand_extend_disk_os_extend(opts, None) + if not is_empty_value(v): + params["os-extend"] = v + + return params + + +def expand_extend_disk_os_extend(d, array_index): + r = dict() + + v = navigate_value(d, ["size"], array_index) + if not is_empty_value(v): + r["new_size"] = v + + return r + + +def send_extend_disk_request(module, params, client): + url = build_path(module, "cloudvolumes/{id}/action") + + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(extend_disk), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait(config, result, client, timeout): + module = config.module + + path_parameters = { + "job_id": ["job_id"], + } + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} + + url = build_path(module, "jobs/{job_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["SUCCESS"], + ["RUNNING", "INIT"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_evs_disk): error " + "waiting to be done, error= %s" % str(ex)) + + +def send_read_request(module, client): + url = build_path(module, "os-vendor-volumes/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["volume"], None) + + +def fill_read_resp_body(body): + result = dict() + + v = fill_read_resp_attachments(body.get("attachments")) + result["attachments"] = v + + result["availability_zone"] = body.get("availability_zone") + + result["bootable"] = body.get("bootable") + + result["created_at"] = body.get("created_at") + + result["description"] = body.get("description") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + v = fill_read_resp_metadata(body.get("metadata")) + result["metadata"] = v + + result["multiattach"] = body.get("multiattach") + + result["name"] = body.get("name") + + result["size"] = body.get("size") + + result["snapshot_id"] = body.get("snapshot_id") + + result["source_volid"] = body.get("source_volid") + + result["status"] = body.get("status") + + result["tags"] = body.get("tags") + + v = fill_read_resp_volume_image_metadata(body.get("volume_image_metadata")) + result["volume_image_metadata"] = v + + result["volume_type"] = body.get("volume_type") + + return result + + +def fill_read_resp_attachments(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["attached_at"] = item.get("attached_at") + + val["attachment_id"] = item.get("attachment_id") + + val["device"] = item.get("device") + + val["server_id"] = item.get("server_id") + + result.append(val) + + return result + + +def fill_read_resp_metadata(value): + if not value: + return None + + result = dict() + + result["__system__cmkid"] = value.get("__system__cmkid") + + result["attached_mode"] = value.get("attached_mode") + + result["full_clone"] = value.get("full_clone") + + result["hw:passthrough"] = value.get("hw:passthrough") + + result["policy"] = value.get("policy") + + result["readonly"] = value.get("readonly") + + return result + + +def fill_read_resp_volume_image_metadata(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def flatten_options(response, array_index): + r = dict() + + v = flatten_attachments(response, array_index) + r["attachments"] = v + + v = navigate_value(response, ["read", "availability_zone"], array_index) + r["availability_zone"] = v + + v = navigate_value(response, ["read", "metadata", "policy"], array_index) + r["backup_policy_id"] = v + + v = navigate_value(response, ["read", "created_at"], array_index) + r["created_at"] = v + + v = navigate_value(response, ["read", "description"], array_index) + r["description"] = v + + v = flatten_enable_full_clone(response, array_index) + r["enable_full_clone"] = v + + v = flatten_enable_scsi(response, array_index) + r["enable_scsi"] = v + + v = navigate_value(response, ["read", "multiattach"], array_index) + r["enable_share"] = v + + v = navigate_value( + response, ["read", "metadata", "__system__cmkid"], array_index) + r["encryption_id"] = v + + v = navigate_value( + response, ["read", "enterprise_project_id"], array_index) + r["enterprise_project_id"] = v + + v = navigate_value( + response, ["read", "volume_image_metadata", "id"], array_index) + r["image_id"] = v + + v = flatten_is_bootable(response, array_index) + r["is_bootable"] = v + + v = flatten_is_readonly(response, array_index) + r["is_readonly"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = navigate_value(response, ["read", "size"], array_index) + r["size"] = v + + v = navigate_value(response, ["read", "snapshot_id"], array_index) + r["snapshot_id"] = v + + v = navigate_value(response, ["read", "source_volid"], array_index) + r["source_volume_id"] = v + + v = navigate_value(response, ["read", "status"], array_index) + r["status"] = v + + v = navigate_value(response, ["read", "tags"], array_index) + r["tags"] = v + + v = navigate_value(response, ["read", "volume_type"], array_index) + r["volume_type"] = v + + return r + + +def flatten_attachments(d, array_index): + v = navigate_value(d, ["read", "attachments"], + array_index) + if not v: + return None + n = len(v) + result = [] + + new_ai = dict() + if array_index: + new_ai.update(array_index) + + for i in range(n): + new_ai["read.attachments"] = i + + val = dict() + + v = navigate_value(d, ["read", "attachments", "attached_at"], new_ai) + val["attached_at"] = v + + v = navigate_value(d, ["read", "attachments", "attachment_id"], new_ai) + val["attachment_id"] = v + + v = navigate_value(d, ["read", "attachments", "device"], new_ai) + val["device"] = v + + v = navigate_value(d, ["read", "attachments", "server_id"], new_ai) + val["server_id"] = v + + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if result else None + + +def flatten_enable_full_clone(d, array_index): + v = navigate_value(d, ["read", "metadata", "full_clone"], + array_index) + if v is None: + return v + return True if v == "0" else False + + +def flatten_enable_scsi(d, array_index): + v = navigate_value(d, ["read", "metadata", "hw:passthrough"], + array_index) + if v is None: + return v + return True if v in ["true", "True"] else False + + +def flatten_is_bootable(d, array_index): + v = navigate_value(d, ["read", "bootable"], array_index) + if v is None: + return v + return True if v in ["true", "True"] else False + + +def flatten_is_readonly(d, array_index): + v = navigate_value(d, ["read", "metadata", "readonly"], + array_index) + if v is None: + return v + return True if v in ["true", "True"] else False + + +def set_unreadable_options(opts, states): + states["backup_id"] = opts.get("backup_id") + + +def set_readonly_options(opts, states): + opts["attachments"] = states.get("attachments") + + opts["backup_policy_id"] = states.get("backup_policy_id") + + opts["created_at"] = states.get("created_at") + + opts["is_bootable"] = states.get("is_bootable") + + opts["is_readonly"] = states.get("is_readonly") + + opts["source_volume_id"] = states.get("source_volume_id") + + opts["status"] = states.get("status") + + opts["tags"] = states.get("tags") + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["volumes"], None) + + +def expand_list_metadata(d, array_index): + r = dict() + + v = navigate_value(d, ["encryption_id"], array_index) + r["__system__cmkid"] = v + + r["attached_mode"] = None + + v = navigate_value(d, ["enable_full_clone"], array_index) + r["full_clone"] = v + + v = navigate_value(d, ["enable_scsi"], array_index) + r["hw:passthrough"] = v + + r["policy"] = None + + r["readonly"] = None + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_volume_image_metadata(d, array_index): + r = dict() + + v = navigate_value(d, ["image_id"], array_index) + r["id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def fill_list_resp_body(body): + result = dict() + + v = fill_list_resp_attachments(body.get("attachments")) + result["attachments"] = v + + result["availability_zone"] = body.get("availability_zone") + + result["bootable"] = body.get("bootable") + + result["created_at"] = body.get("created_at") + + result["description"] = body.get("description") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + v = fill_list_resp_metadata(body.get("metadata")) + result["metadata"] = v + + result["multiattach"] = body.get("multiattach") + + result["name"] = body.get("name") + + result["size"] = body.get("size") + + result["snapshot_id"] = body.get("snapshot_id") + + result["source_volid"] = body.get("source_volid") + + result["status"] = body.get("status") + + result["tags"] = body.get("tags") + + v = fill_list_resp_volume_image_metadata(body.get("volume_image_metadata")) + result["volume_image_metadata"] = v + + result["volume_type"] = body.get("volume_type") + + return result + + +def fill_list_resp_attachments(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["attached_at"] = item.get("attached_at") + + val["attachment_id"] = item.get("attachment_id") + + val["device"] = item.get("device") + + val["server_id"] = item.get("server_id") + + result.append(val) + + return result + + +def fill_list_resp_metadata(value): + if not value: + return None + + result = dict() + + result["__system__cmkid"] = value.get("__system__cmkid") + + result["attached_mode"] = value.get("attached_mode") + + result["full_clone"] = value.get("full_clone") + + result["hw:passthrough"] = value.get("hw:passthrough") + + result["policy"] = value.get("policy") + + result["readonly"] = value.get("readonly") + + return result + + +def fill_list_resp_volume_image_metadata(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hwc_network_vpc.py b/plugins/modules/hwc_network_vpc.py deleted file mode 120000 index 4e7943ff7e..0000000000 --- a/plugins/modules/hwc_network_vpc.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/huawei/hwc_network_vpc.py \ No newline at end of file diff --git a/plugins/modules/hwc_network_vpc.py b/plugins/modules/hwc_network_vpc.py new file mode 100644 index 0000000000..b974831c87 --- /dev/null +++ b/plugins/modules/hwc_network_vpc.py @@ -0,0 +1,495 @@ +#!/usr/bin/python +# +# Copyright (C) 2018 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = r""" +module: hwc_network_vpc +description: + - Represents an vpc resource. +short_description: Creates a Huawei Cloud VPC +author: Huawei Inc. (@huaweicloud) +requirements: + - requests >= 2.18.4 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in VPC. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: + description: + - The timeout for create operation. + type: str + default: '15m' + update: + description: + - The timeout for update operation. + type: str + default: '15m' + delete: + description: + - The timeout for delete operation. + type: str + default: '15m' + name: + description: + - The name of vpc. + type: str + required: true + cidr: + description: + - The range of available subnets in the VPC. + type: str + required: true +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Create a vpc + community.general.hwc_network_vpc: + identity_endpoint: "{{ identity_endpoint }}" + user: "{{ user }}" + password: "{{ password }}" + domain: "{{ domain }}" + project: "{{ project }}" + region: "{{ region }}" + name: "vpc_1" + cidr: "192.168.100.0/24" + state: present +""" + +RETURN = r""" +id: + description: + - The ID of VPC. + type: str + returned: success +name: + description: + - The name of VPC. + type: str + returned: success +cidr: + description: + - The range of available subnets in the VPC. + type: str + returned: success +status: + description: + - The status of VPC. + type: str + returned: success +routes: + description: + - The route information. + type: complex + returned: success + contains: + destination: + description: + - The destination network segment of a route. + type: str + returned: success + next_hop: + description: + - The next hop of a route. If the route type is peering, it provides VPC peering connection ID. + type: str + returned: success +enable_shared_snat: + description: + - Show whether the shared SNAT is enabled. + type: bool + returned: success +""" + +############################################################################### +# Imports +############################################################################### + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException, + HwcClientException404, HwcModule, + are_different_dicts, is_empty_value, + wait_to_finish, get_region, + build_path, navigate_value) +import re + +############################################################################### +# Main +############################################################################### + + +def main(): + """Main function""" + + module = HwcModule( + argument_spec=dict( + state=dict( + default='present', choices=['present', 'absent'], type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='15m', type='str'), + update=dict(default='15m', type='str'), + delete=dict(default='15m', type='str'), + ), default=dict()), + name=dict(required=True, type='str'), + cidr=dict(required=True, type='str') + ), + supports_check_mode=True, + ) + config = Config(module, 'vpc') + + state = module.params['state'] + + if (not module.params.get("id")) and module.params.get("name"): + module.params['id'] = get_id_by_name(config) + + fetch = None + link = self_link(module) + # the link will include Nones if required format parameters are missed + if not re.search('/None/|/None$', link): + client = config.client(get_region(module), "vpc", "project") + fetch = fetch_resource(module, client, link) + if fetch: + fetch = fetch.get('vpc') + changed = False + + if fetch: + if state == 'present': + expect = _get_editable_properties(module) + current_state = response_to_hash(module, fetch) + current = {"cidr": current_state["cidr"]} + if are_different_dicts(expect, current): + if not module.check_mode: + fetch = update(config, self_link(module)) + fetch = response_to_hash(module, fetch.get('vpc')) + changed = True + else: + fetch = current_state + else: + if not module.check_mode: + delete(config, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + if not module.check_mode: + fetch = create(config, "vpcs") + fetch = response_to_hash(module, fetch.get('vpc')) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(config, link): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + r = None + try: + r = client.post(link, resource_to_create(module)) + except HwcClientException as ex: + msg = ("module(hwc_network_vpc): error creating " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + wait_done = wait_for_operation(config, 'create', r) + v = "" + try: + v = navigate_value(wait_done, ['vpc', 'id']) + except Exception as ex: + module.fail_json(msg=str(ex)) + + url = build_path(module, 'vpcs/{op_id}', {'op_id': v}) + return fetch_resource(module, client, url) + + +def update(config, link): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + r = None + try: + r = client.put(link, resource_to_update(module)) + except HwcClientException as ex: + msg = ("module(hwc_network_vpc): error updating " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + wait_for_operation(config, 'update', r) + + return fetch_resource(module, client, link) + + +def delete(config, link): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + try: + client.delete(link) + except HwcClientException as ex: + msg = ("module(hwc_network_vpc): error deleting " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + wait_for_delete(module, client, link) + + +def fetch_resource(module, client, link): + try: + return client.get(link) + except HwcClientException as ex: + msg = ("module(hwc_network_vpc): error fetching " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + +def get_id_by_name(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + name = module.params.get("name") + link = "vpcs" + query_link = "?marker={marker}&limit=10" + link += query_link + not_format_keys = re.findall("={marker}", link) + none_values = re.findall("=None", link) + + if not (not_format_keys or none_values): + r = None + try: + r = client.get(link) + except Exception: + pass + if r is None: + return None + r = r.get('vpcs', []) + ids = [ + i.get('id') for i in r if i.get('name', '') == name + ] + if not ids: + return None + elif len(ids) == 1: + return ids[0] + else: + module.fail_json( + msg="Multiple resources with same name are found.") + elif none_values: + module.fail_json( + msg="Can not find id by name because url includes None.") + else: + p = {'marker': ''} + ids = set() + while True: + r = None + try: + r = client.get(link.format(**p)) + except Exception: + pass + if r is None: + break + r = r.get('vpcs', []) + if r == []: + break + for i in r: + if i.get('name') == name: + ids.add(i.get('id')) + if len(ids) >= 2: + module.fail_json( + msg="Multiple resources with same name are found.") + + p['marker'] = r[-1].get('id') + + return ids.pop() if ids else None + + +def self_link(module): + return build_path(module, "vpcs/{id}") + + +def resource_to_create(module): + params = dict() + + v = module.params.get('cidr') + if not is_empty_value(v): + params["cidr"] = v + + v = module.params.get('name') + if not is_empty_value(v): + params["name"] = v + + if not params: + return params + + params = {"vpc": params} + + return params + + +def resource_to_update(module): + params = dict() + + v = module.params.get('cidr') + if not is_empty_value(v): + params["cidr"] = v + + if not params: + return params + + params = {"vpc": params} + + return params + + +def _get_editable_properties(module): + return { + "cidr": module.params.get("cidr"), + } + + +def response_to_hash(module, response): + """ Remove unnecessary properties from the response. + This is for doing comparisons with Ansible's current parameters. + """ + return { + 'id': response.get('id'), + 'name': response.get('name'), + 'cidr': response.get('cidr'), + 'status': response.get('status'), + 'routes': VpcRoutesArray( + response.get('routes', []), module).from_response(), + 'enable_shared_snat': response.get('enable_shared_snat') + } + + +def wait_for_operation(config, op_type, op_result): + module = config.module + op_id = "" + try: + op_id = navigate_value(op_result, ['vpc', 'id']) + except Exception as ex: + module.fail_json(msg=str(ex)) + + url = build_path(module, "vpcs/{op_id}", {'op_id': op_id}) + timeout = 60 * int(module.params['timeouts'][op_type].rstrip('m')) + states = { + 'create': { + 'allowed': ['CREATING', 'DONW', 'OK'], + 'complete': ['OK'], + }, + 'update': { + 'allowed': ['PENDING_UPDATE', 'DONW', 'OK'], + 'complete': ['OK'], + } + } + + return wait_for_completion(url, timeout, states[op_type]['allowed'], + states[op_type]['complete'], config) + + +def wait_for_completion(op_uri, timeout, allowed_states, + complete_states, config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + def _refresh_status(): + r = None + try: + r = fetch_resource(module, client, op_uri) + except Exception: + return None, "" + + status = "" + try: + status = navigate_value(r, ['vpc', 'status']) + except Exception: + return None, "" + + return r, status + + try: + return wait_to_finish(complete_states, allowed_states, + _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def wait_for_delete(module, client, link): + + def _refresh_status(): + try: + client.get(link) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) + try: + return wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +class VpcRoutesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return { + 'destination': item.get('destination'), + 'nexthop': item.get('next_hop') + } + + def _response_from_item(self, item): + return { + 'destination': item.get('destination'), + 'next_hop': item.get('nexthop') + } + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hwc_smn_topic.py b/plugins/modules/hwc_smn_topic.py deleted file mode 120000 index 8471292894..0000000000 --- a/plugins/modules/hwc_smn_topic.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/huawei/hwc_smn_topic.py \ No newline at end of file diff --git a/plugins/modules/hwc_smn_topic.py b/plugins/modules/hwc_smn_topic.py new file mode 100644 index 0000000000..6fb9a3814d --- /dev/null +++ b/plugins/modules/hwc_smn_topic.py @@ -0,0 +1,332 @@ +#!/usr/bin/python +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = r""" +module: hwc_smn_topic +description: + - Represents a SMN notification topic resource. +short_description: Creates a resource of SMNTopic in Huawei Cloud +author: Huawei Inc. (@huaweicloud) +requirements: + - requests >= 2.18.4 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + display_name: + description: + - Topic display name, which is presented as the name of the email sender in an email message. The topic display name + contains a maximum of 192 bytes. + type: str + required: false + name: + description: + - Name of the topic to be created. The topic name is a string of 1 to 256 characters. It must contain upper- or lower-case + letters, digits, hyphens (V(-)), and underscores (V(_)), and must start with a letter or digit. + type: str + required: true +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Create a smn topic + community.general.hwc_smn_topic: + identity_endpoint: "{{ identity_endpoint }}" + user_name: "{{ user_name }}" + password: "{{ password }}" + domain_name: "{{ domain_name }}" + project_name: "{{ project_name }}" + region: "{{ region }}" + name: "ansible_smn_topic_test" + state: present +""" + +RETURN = r""" +create_time: + description: + - Time when the topic was created. + returned: success + type: str +display_name: + description: + - Topic display name, which is presented as the name of the email sender in an email message. The topic display name contains + a maximum of 192 bytes. + returned: success + type: str +name: + description: + - Name of the topic to be created. The topic name is a string of 1 to 256 characters. It must contain upper- or lower-case + letters, digits, hyphens (V(-)), and underscores (V(_)), and must start with a letter or digit. + returned: success + type: str +push_policy: + description: + - Message pushing policy. V(0) indicates that the message sending fails and the message is cached in the queue. V(1) indicates + that the failed message is discarded. + returned: success + type: int +topic_urn: + description: + - Resource identifier of a topic, which is unique. + returned: success + type: str +update_time: + description: + - Time when the topic was updated. + returned: success + type: str +""" + +############################################################################### +# Imports +############################################################################### + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException, + HwcModule, navigate_value, + are_different_dicts, is_empty_value, + build_path, get_region) +import re + +############################################################################### +# Main +############################################################################### + + +def main(): + """Main function""" + + module = HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + display_name=dict(type='str'), + name=dict(required=True, type='str') + ), + supports_check_mode=True, + ) + + config = Config(module, "smn") + + state = module.params['state'] + + if not module.params.get("id"): + module.params['id'] = get_resource_id(config) + + fetch = None + link = self_link(module) + # the link will include Nones if required format parameters are missed + if not re.search('/None/|/None$', link): + client = config.client(get_region(module), "smn", "project") + fetch = fetch_resource(module, client, link) + changed = False + + if fetch: + if state == 'present': + expect = _get_resource_editable_properties(module) + current_state = response_to_hash(module, fetch) + current = {'display_name': current_state['display_name']} + if are_different_dicts(expect, current): + if not module.check_mode: + fetch = update(config) + fetch = response_to_hash(module, fetch) + changed = True + else: + fetch = current_state + else: + if not module.check_mode: + delete(config) + fetch = {} + changed = True + else: + if state == 'present': + if not module.check_mode: + fetch = create(config) + fetch = response_to_hash(module, fetch) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(config): + module = config.module + client = config.client(get_region(module), "smn", "project") + + link = "notifications/topics" + r = None + try: + r = client.post(link, create_resource_opts(module)) + except HwcClientException as ex: + msg = ("module(hwc_smn_topic): error creating " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + return get_resource(config, r) + + +def update(config): + module = config.module + client = config.client(get_region(module), "smn", "project") + + link = self_link(module) + try: + client.put(link, update_resource_opts(module)) + except HwcClientException as ex: + msg = ("module(hwc_smn_topic): error updating " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + return fetch_resource(module, client, link) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "smn", "project") + + link = self_link(module) + try: + client.delete(link) + except HwcClientException as ex: + msg = ("module(hwc_smn_topic): error deleting " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + +def fetch_resource(module, client, link): + try: + return client.get(link) + except HwcClientException as ex: + msg = ("module(hwc_smn_topic): error fetching " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + +def get_resource(config, result): + module = config.module + client = config.client(get_region(module), "smn", "project") + + v = "" + try: + v = navigate_value(result, ['topic_urn']) + except Exception as ex: + module.fail_json(msg=str(ex)) + + d = {'topic_urn': v} + url = build_path(module, 'notifications/topics/{topic_urn}', d) + + return fetch_resource(module, client, url) + + +def get_resource_id(config): + module = config.module + client = config.client(get_region(module), "smn", "project") + + link = "notifications/topics" + query_link = "?offset={offset}&limit=10" + link += query_link + + p = {'offset': 0} + v = module.params.get('name') + ids = set() + while True: + r = None + try: + r = client.get(link.format(**p)) + except Exception: + pass + if r is None: + break + r = r.get('topics', []) + if r == []: + break + for i in r: + if i.get('name') == v: + ids.add(i.get('topic_urn')) + if len(ids) >= 2: + module.fail_json(msg="Multiple resources are found") + + p['offset'] += 1 + + return ids.pop() if ids else None + + +def self_link(module): + return build_path(module, "notifications/topics/{id}") + + +def create_resource_opts(module): + params = dict() + + v = module.params.get('display_name') + if not is_empty_value(v): + params["display_name"] = v + + v = module.params.get('name') + if not is_empty_value(v): + params["name"] = v + + return params + + +def update_resource_opts(module): + params = dict() + + v = module.params.get('display_name') + if not is_empty_value(v): + params["display_name"] = v + + return params + + +def _get_resource_editable_properties(module): + return { + "display_name": module.params.get("display_name"), + } + + +def response_to_hash(module, response): + """Remove unnecessary properties from the response. + This is for doing comparisons with Ansible's current parameters. + """ + return { + 'create_time': response.get('create_time'), + 'display_name': response.get('display_name'), + 'name': response.get('name'), + 'push_policy': _push_policy_convert_from_response(response.get('push_policy')), + 'topic_urn': response.get('topic_urn'), + 'update_time': response.get('update_time') + } + + +def _push_policy_convert_from_response(value): + return { + 0: "the message sending fails and is cached in the queue", + 1: "the failed message is discarded", + }.get(int(value)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hwc_vpc_eip.py b/plugins/modules/hwc_vpc_eip.py deleted file mode 120000 index 7e8a4934de..0000000000 --- a/plugins/modules/hwc_vpc_eip.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/huawei/hwc_vpc_eip.py \ No newline at end of file diff --git a/plugins/modules/hwc_vpc_eip.py b/plugins/modules/hwc_vpc_eip.py new file mode 100644 index 0000000000..9a23b7b3f9 --- /dev/null +++ b/plugins/modules/hwc_vpc_eip.py @@ -0,0 +1,849 @@ +#!/usr/bin/python +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = r""" +module: hwc_vpc_eip +description: + - Elastic IP management. +short_description: Creates a resource of VPC/EIP in Huawei Cloud +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '5m' + update: + description: + - The timeouts for update operation. + type: str + default: '5m' + type: + description: + - Specifies the EIP type. + type: str + required: true + dedicated_bandwidth: + description: + - Specifies the dedicated bandwidth object. + type: dict + required: false + suboptions: + charge_mode: + description: + - Specifies whether the bandwidth is billed by traffic or by bandwidth size. The value can be bandwidth or traffic. + If this parameter is left blank or is null character string, default value bandwidth is used. For IPv6 addresses, + the default parameter value is bandwidth outside China and is traffic in China. + type: str + required: true + name: + description: + - Specifies the bandwidth name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + required: true + size: + description: + - Specifies the bandwidth size. The value ranges from 1 Mbit/s to 2000 Mbit/s by default. (The specific range may + vary depending on the configuration in each region. You can see the bandwidth range of each region on the management + console.) The minimum unit for bandwidth adjustment varies depending on the bandwidth range. The details are as + follows. + - The minimum unit is 1 Mbit/s if the allowed bandwidth size ranges from 0 to 300 Mbit/s (with 300 Mbit/s included). + - The minimum unit is 50 Mbit/s if the allowed bandwidth size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s + included). + - The minimum unit is 500 Mbit/s if the allowed bandwidth size is greater than 1000 Mbit/s. + type: int + required: true + enterprise_project_id: + description: + - Specifies the enterprise project ID. + type: str + required: false + ip_version: + description: + - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address is assigned. + type: int + required: false + ipv4_address: + description: + - Specifies the obtained IPv4 EIP. The system automatically assigns an EIP if you do not specify it. + type: str + required: false + port_id: + description: + - Specifies the port ID. This parameter is returned only when a private IP address is bound with the EIP. + type: str + required: false + shared_bandwidth_id: + description: + - Specifies the ID of shared bandwidth. + type: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes +""" + +EXAMPLES = r""" +# create an eip and bind it to a port +- name: Create vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: Create subnet + hwc_vpc_subnet: + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: true + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + register: subnet +- name: Create a port + hwc_vpc_port: + subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.33" + register: port +- name: Create an eip and bind it to a port + community.general.hwc_vpc_eip: + type: "5_bgp" + dedicated_bandwidth: + charge_mode: "traffic" + name: "ansible_test_dedicated_bandwidth" + size: 1 + port_id: "{{ port.id }}" +""" + +RETURN = r""" +type: + description: + - Specifies the EIP type. + type: str + returned: success +dedicated_bandwidth: + description: + - Specifies the dedicated bandwidth object. + type: dict + returned: success + contains: + charge_mode: + description: + - Specifies whether the bandwidth is billed by traffic or by bandwidth size. The value can be bandwidth or traffic. + If this parameter is left blank or is null character string, default value bandwidth is used. For IPv6 addresses, + the default parameter value is bandwidth outside China and is traffic in China. + type: str + returned: success + name: + description: + - Specifies the bandwidth name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + returned: success + size: + description: + - Specifies the bandwidth size. The value ranges from 1 Mbit/s to 2000 Mbit/s by default. (The specific range may + vary depending on the configuration in each region. You can see the bandwidth range of each region on the management + console.) The minimum unit for bandwidth adjustment varies depending on the bandwidth range. The details are as + follows:. + - The minimum unit is 1 Mbit/s if the allowed bandwidth size ranges from 0 to 300 Mbit/s (with 300 Mbit/s included). + - The minimum unit is 50 Mbit/s if the allowed bandwidth size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s included). + - The minimum unit is 500 Mbit/s if the allowed bandwidth size is greater than 1000 Mbit/s. + type: int + returned: success + id: + description: + - Specifies the ID of dedicated bandwidth. + type: str + returned: success +enterprise_project_id: + description: + - Specifies the enterprise project ID. + type: str + returned: success +ip_version: + description: + - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address is assigned. + type: int + returned: success +ipv4_address: + description: + - Specifies the obtained IPv4 EIP. The system automatically assigns an EIP if you do not specify it. + type: str + returned: success +port_id: + description: + - Specifies the port ID. This parameter is returned only when a private IP address is bound with the EIP. + type: str + returned: success +shared_bandwidth_id: + description: + - Specifies the ID of shared bandwidth. + type: str + returned: success +create_time: + description: + - Specifies the time (UTC time) when the EIP was assigned. + type: str + returned: success +ipv6_address: + description: + - Specifies the obtained IPv6 EIP. + type: str + returned: success +private_ip_address: + description: + - Specifies the private IP address bound with the EIP. This parameter is returned only when a private IP address is bound + with the EIP. + type: str + returned: success +""" + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcClientException404, HwcModule, + are_different_dicts, build_path, get_region, is_empty_value, + navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='5m', type='str'), + update=dict(default='5m', type='str'), + ), default=dict()), + type=dict(type='str', required=True), + dedicated_bandwidth=dict(type='dict', options=dict( + charge_mode=dict(type='str', required=True), + name=dict(type='str', required=True), + size=dict(type='int', required=True) + )), + enterprise_project_id=dict(type='str'), + ip_version=dict(type='int'), + ipv4_address=dict(type='str'), + port_id=dict(type='str'), + shared_bandwidth_id=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + if not module.check_mode: + update(config) + changed = True + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "dedicated_bandwidth": module.params.get("dedicated_bandwidth"), + "enterprise_project_id": module.params.get("enterprise_project_id"), + "ip_version": module.params.get("ip_version"), + "ipv4_address": module.params.get("ipv4_address"), + "port_id": module.params.get("port_id"), + "shared_bandwidth_id": module.params.get("shared_bandwidth_id"), + "type": module.params.get("type"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait_create(config, r, client, timeout) + module.params['id'] = navigate_value(obj, ["publicip", "id"]) + + +def update(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_update_parameters(opts) + if params: + r = send_update_request(module, params, client) + async_wait_update(config, r, client, timeout) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + if module.params["port_id"]: + module.params["port_id"] = "" + update(config) + + send_delete_request(module, None, client) + + url = build_path(module, "publicips/{id}") + + def _refresh_status(): + try: + client.get(url) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + try: + wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_eip): error " + "waiting for api(delete) to " + "be done, error= %s" % str(ex)) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["ip_version"]) + if v: + query_params.append("ip_version=" + str(v)) + + v = navigate_value(opts, ["enterprise_project_id"]) + if v: + query_params.append("enterprise_project_id=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "publicips" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = expand_create_bandwidth(opts, None) + if not is_empty_value(v): + params["bandwidth"] = v + + v = navigate_value(opts, ["enterprise_project_id"], None) + if not is_empty_value(v): + params["enterprise_project_id"] = v + + v = expand_create_publicip(opts, None) + if not is_empty_value(v): + params["publicip"] = v + + return params + + +def expand_create_bandwidth(d, array_index): + v = navigate_value(d, ["dedicated_bandwidth"], array_index) + sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index) + if v and sbwid: + raise Exception("don't input shared_bandwidth_id and " + "dedicated_bandwidth at same time") + + if not (v or sbwid): + raise Exception("must input shared_bandwidth_id or " + "dedicated_bandwidth") + + if sbwid: + return { + "id": sbwid, + "share_type": "WHOLE"} + + return { + "charge_mode": v["charge_mode"], + "name": v["name"], + "share_type": "PER", + "size": v["size"]} + + +def expand_create_publicip(d, array_index): + r = dict() + + v = navigate_value(d, ["ipv4_address"], array_index) + if not is_empty_value(v): + r["ip_address"] = v + + v = navigate_value(d, ["ip_version"], array_index) + if not is_empty_value(v): + r["ip_version"] = v + + v = navigate_value(d, ["type"], array_index) + if not is_empty_value(v): + r["type"] = v + + return r + + +def send_create_request(module, params, client): + url = "publicips" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_create(config, result, client, timeout): + module = config.module + + path_parameters = { + "publicip_id": ["publicip", "id"], + } + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} + + url = build_path(module, "publicips/{publicip_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["publicip", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE", "DOWN"], + None, + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_eip): error " + "waiting for api(create) to " + "be done, error= %s" % str(ex)) + + +def build_update_parameters(opts): + params = dict() + + v = navigate_value(opts, ["ip_version"], None) + if not is_empty_value(v): + params["ip_version"] = v + + v = navigate_value(opts, ["port_id"], None) + if v is not None: + params["port_id"] = v + + if not params: + return params + + params = {"publicip": params} + + return params + + +def send_update_request(module, params, client): + url = build_path(module, "publicips/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_update(config, result, client, timeout): + module = config.module + + url = build_path(module, "publicips/{id}") + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["publicip", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE", "DOWN"], + None, + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_eip): error " + "waiting for api(update) to " + "be done, error= %s" % str(ex)) + + +def send_delete_request(module, params, client): + url = build_path(module, "publicips/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "publicips/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["publicip"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["bandwidth_id"] = body.get("bandwidth_id") + + result["bandwidth_name"] = body.get("bandwidth_name") + + result["bandwidth_share_type"] = body.get("bandwidth_share_type") + + result["bandwidth_size"] = body.get("bandwidth_size") + + result["create_time"] = body.get("create_time") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + result["ip_version"] = body.get("ip_version") + + result["port_id"] = body.get("port_id") + + result["private_ip_address"] = body.get("private_ip_address") + + result["public_ip_address"] = body.get("public_ip_address") + + result["public_ipv6_address"] = body.get("public_ipv6_address") + + result["status"] = body.get("status") + + result["tenant_id"] = body.get("tenant_id") + + result["type"] = body.get("type") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + if not exclude_output: + v = navigate_value(response, ["read", "create_time"], array_index) + r["create_time"] = v + + v = r.get("dedicated_bandwidth") + v = flatten_dedicated_bandwidth(response, array_index, v, exclude_output) + r["dedicated_bandwidth"] = v + + v = navigate_value(response, ["read", "enterprise_project_id"], + array_index) + r["enterprise_project_id"] = v + + v = navigate_value(response, ["read", "ip_version"], array_index) + r["ip_version"] = v + + v = navigate_value(response, ["read", "public_ip_address"], array_index) + r["ipv4_address"] = v + + if not exclude_output: + v = navigate_value(response, ["read", "public_ipv6_address"], + array_index) + r["ipv6_address"] = v + + v = navigate_value(response, ["read", "port_id"], array_index) + r["port_id"] = v + + if not exclude_output: + v = navigate_value(response, ["read", "private_ip_address"], + array_index) + r["private_ip_address"] = v + + v = r.get("shared_bandwidth_id") + v = flatten_shared_bandwidth_id(response, array_index, v, exclude_output) + r["shared_bandwidth_id"] = v + + v = navigate_value(response, ["read", "type"], array_index) + r["type"] = v + + return r + + +def flatten_dedicated_bandwidth(d, array_index, current_value, exclude_output): + v = navigate_value(d, ["read", "bandwidth_share_type"], array_index) + if not (v and v == "PER"): + return current_value + + result = current_value + if not result: + result = dict() + + if not exclude_output: + v = navigate_value(d, ["read", "bandwidth_id"], array_index) + if v is not None: + result["id"] = v + + v = navigate_value(d, ["read", "bandwidth_name"], array_index) + if v is not None: + result["name"] = v + + v = navigate_value(d, ["read", "bandwidth_size"], array_index) + if v is not None: + result["size"] = v + + return result if result else current_value + + +def flatten_shared_bandwidth_id(d, array_index, current_value, exclude_output): + v = navigate_value(d, ["read", "bandwidth_id"], array_index) + + v1 = navigate_value(d, ["read", "bandwidth_share_type"], array_index) + + return v if (v1 and v1 == "WHOLE") else current_value + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["publicips"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = expand_list_bandwidth_id(all_opts, None) + result["bandwidth_id"] = v + + v = navigate_value(all_opts, ["dedicated_bandwidth", "name"], None) + result["bandwidth_name"] = v + + result["bandwidth_share_type"] = None + + v = navigate_value(all_opts, ["dedicated_bandwidth", "size"], None) + result["bandwidth_size"] = v + + result["create_time"] = None + + v = navigate_value(all_opts, ["enterprise_project_id"], None) + result["enterprise_project_id"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["ip_version"], None) + result["ip_version"] = v + + v = navigate_value(all_opts, ["port_id"], None) + result["port_id"] = v + + result["private_ip_address"] = None + + v = navigate_value(all_opts, ["ipv4_address"], None) + result["public_ip_address"] = v + + result["public_ipv6_address"] = None + + result["status"] = None + + result["tenant_id"] = None + + v = navigate_value(all_opts, ["type"], None) + result["type"] = v + + return result + + +def expand_list_bandwidth_id(d, array_index): + v = navigate_value(d, ["dedicated_bandwidth"], array_index) + sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index) + if v and sbwid: + raise Exception("don't input shared_bandwidth_id and " + "dedicated_bandwidth at same time") + + return sbwid + + +def fill_list_resp_body(body): + result = dict() + + result["bandwidth_id"] = body.get("bandwidth_id") + + result["bandwidth_name"] = body.get("bandwidth_name") + + result["bandwidth_share_type"] = body.get("bandwidth_share_type") + + result["bandwidth_size"] = body.get("bandwidth_size") + + result["create_time"] = body.get("create_time") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + result["ip_version"] = body.get("ip_version") + + result["port_id"] = body.get("port_id") + + result["private_ip_address"] = body.get("private_ip_address") + + result["public_ip_address"] = body.get("public_ip_address") + + result["public_ipv6_address"] = body.get("public_ipv6_address") + + result["status"] = body.get("status") + + result["tenant_id"] = body.get("tenant_id") + + result["type"] = body.get("type") + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hwc_vpc_peering_connect.py b/plugins/modules/hwc_vpc_peering_connect.py deleted file mode 120000 index ffe33a4fe6..0000000000 --- a/plugins/modules/hwc_vpc_peering_connect.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/huawei/hwc_vpc_peering_connect.py \ No newline at end of file diff --git a/plugins/modules/hwc_vpc_peering_connect.py b/plugins/modules/hwc_vpc_peering_connect.py new file mode 100644 index 0000000000..e5d410c327 --- /dev/null +++ b/plugins/modules/hwc_vpc_peering_connect.py @@ -0,0 +1,689 @@ +#!/usr/bin/python +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or SPDX-License-Identifier: GPL-3.0-or-later +# https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import annotations + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = r""" +module: hwc_vpc_peering_connect +description: + - VPC peering management. +short_description: Creates a resource of VPC/PeeringConnect in Huawei Cloud +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '15m' + local_vpc_id: + description: + - Specifies the ID of local VPC. + type: str + required: true + name: + description: + - Specifies the name of the VPC peering connection. The value can contain 1 to 64 characters. + type: str + required: true + peering_vpc: + description: + - Specifies information about the peering VPC. + type: dict + required: true + suboptions: + vpc_id: + description: + - Specifies the ID of peering VPC. + type: str + required: true + project_id: + description: + - Specifies the ID of the project which the peering vpc belongs to. + type: str + required: false + description: + description: + - The description of vpc peering connection. + type: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes +""" + +EXAMPLES = r""" +# create a peering connect +- name: Create a local vpc + hwc_network_vpc: + cidr: "192.168.0.0/16" + name: "ansible_network_vpc_test_local" + register: vpc1 +- name: Create a peering vpc + hwc_network_vpc: + cidr: "192.168.0.0/16" + name: "ansible_network_vpc_test_peering" + register: vpc2 +- name: Create a peering connect + community.general.hwc_vpc_peering_connect: + local_vpc_id: "{{ vpc1.id }}" + name: "ansible_network_peering_test" + peering_vpc: + vpc_id: "{{ vpc2.id }}" +""" + +RETURN = r""" +local_vpc_id: + description: + - Specifies the ID of local VPC. + type: str + returned: success +name: + description: + - Specifies the name of the VPC peering connection. The value can contain 1 to 64 characters. + type: str + returned: success +peering_vpc: + description: + - Specifies information about the peering VPC. + type: dict + returned: success + contains: + vpc_id: + description: + - Specifies the ID of peering VPC. + type: str + returned: success + project_id: + description: + - Specifies the ID of the project which the peering vpc belongs to. + type: str + returned: success +description: + description: + - The description of vpc peering connection. + type: str + returned: success +""" + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcClientException404, HwcModule, + are_different_dicts, build_path, get_region, is_empty_value, + navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='15m', type='str'), + ), default=dict()), + local_vpc_id=dict(type='str', required=True), + name=dict(type='str', required=True), + peering_vpc=dict(type='dict', required=True, options=dict( + vpc_id=dict(type='str', required=True), + project_id=dict(type='str') + )), + description=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + if not module.check_mode: + update(config) + changed = True + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "description": module.params.get("description"), + "local_vpc_id": module.params.get("local_vpc_id"), + "name": module.params.get("name"), + "peering_vpc": module.params.get("peering_vpc"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "network", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait_create(config, r, client, timeout) + module.params['id'] = navigate_value(obj, ["peering", "id"]) + + +def update(config): + module = config.module + client = config.client(get_region(module), "network", "project") + opts = user_input_parameters(module) + + params = build_update_parameters(opts) + if params: + send_update_request(module, params, client) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "network", "project") + + send_delete_request(module, None, client) + + url = build_path(module, "v2.0/vpc/peerings/{id}") + + def _refresh_status(): + try: + client.get(url) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + try: + wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_peering_connect): error " + "waiting for api(delete) to " + "be done, error= %s" % str(ex)) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "network", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["local_vpc_id"]) + if v: + query_params.append("vpc_id=" + str(v)) + + v = navigate_value(opts, ["name"]) + if v: + query_params.append("name=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "network", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "v2.0/vpc/peerings" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = expand_create_accept_vpc_info(opts, None) + if not is_empty_value(v): + params["accept_vpc_info"] = v + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = expand_create_request_vpc_info(opts, None) + if not is_empty_value(v): + params["request_vpc_info"] = v + + if not params: + return params + + params = {"peering": params} + + return params + + +def expand_create_accept_vpc_info(d, array_index): + r = dict() + + v = navigate_value(d, ["peering_vpc", "project_id"], array_index) + if not is_empty_value(v): + r["tenant_id"] = v + + v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index) + if not is_empty_value(v): + r["vpc_id"] = v + + return r + + +def expand_create_request_vpc_info(d, array_index): + r = dict() + + r["tenant_id"] = "" + + v = navigate_value(d, ["local_vpc_id"], array_index) + if not is_empty_value(v): + r["vpc_id"] = v + + return r + + +def send_create_request(module, params, client): + url = "v2.0/vpc/peerings" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_create(config, result, client, timeout): + module = config.module + + path_parameters = { + "peering_id": ["peering", "id"], + } + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} + + url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["peering", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE"], + ["PENDING_ACCEPTANCE"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_peering_connect): error " + "waiting for api(create) to " + "be done, error= %s" % str(ex)) + + +def build_update_parameters(opts): + params = dict() + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + if not params: + return params + + params = {"peering": params} + + return params + + +def send_update_request(module, params, client): + url = build_path(module, "v2.0/vpc/peerings/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "v2.0/vpc/peerings/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "v2.0/vpc/peerings/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["peering"], None) + + +def fill_read_resp_body(body): + result = dict() + + v = fill_read_resp_accept_vpc_info(body.get("accept_vpc_info")) + result["accept_vpc_info"] = v + + result["description"] = body.get("description") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + v = fill_read_resp_request_vpc_info(body.get("request_vpc_info")) + result["request_vpc_info"] = v + + result["status"] = body.get("status") + + return result + + +def fill_read_resp_accept_vpc_info(value): + if not value: + return None + + result = dict() + + result["tenant_id"] = value.get("tenant_id") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def fill_read_resp_request_vpc_info(value): + if not value: + return None + + result = dict() + + result["tenant_id"] = value.get("tenant_id") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "description"], array_index) + r["description"] = v + + v = navigate_value(response, ["read", "request_vpc_info", "vpc_id"], + array_index) + r["local_vpc_id"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = r.get("peering_vpc") + v = flatten_peering_vpc(response, array_index, v, exclude_output) + r["peering_vpc"] = v + + return r + + +def flatten_peering_vpc(d, array_index, current_value, exclude_output): + result = current_value + has_init_value = True + if not result: + result = dict() + has_init_value = False + + v = navigate_value(d, ["read", "accept_vpc_info", "tenant_id"], + array_index) + result["project_id"] = v + + v = navigate_value(d, ["read", "accept_vpc_info", "vpc_id"], array_index) + result["vpc_id"] = v + + if has_init_value: + return result + + for v in result.values(): + if v is not None: + return result + return current_value + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["peerings"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = expand_list_accept_vpc_info(all_opts, None) + result["accept_vpc_info"] = v + + v = navigate_value(all_opts, ["description"], None) + result["description"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + v = expand_list_request_vpc_info(all_opts, None) + result["request_vpc_info"] = v + + result["status"] = None + + return result + + +def expand_list_accept_vpc_info(d, array_index): + r = dict() + + v = navigate_value(d, ["peering_vpc", "project_id"], array_index) + r["tenant_id"] = v + + v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index) + r["vpc_id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_request_vpc_info(d, array_index): + r = dict() + + r["tenant_id"] = None + + v = navigate_value(d, ["local_vpc_id"], array_index) + r["vpc_id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def fill_list_resp_body(body): + result = dict() + + v = fill_list_resp_accept_vpc_info(body.get("accept_vpc_info")) + result["accept_vpc_info"] = v + + result["description"] = body.get("description") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + v = fill_list_resp_request_vpc_info(body.get("request_vpc_info")) + result["request_vpc_info"] = v + + result["status"] = body.get("status") + + return result + + +def fill_list_resp_accept_vpc_info(value): + if not value: + return None + + result = dict() + + result["tenant_id"] = value.get("tenant_id") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def fill_list_resp_request_vpc_info(value): + if not value: + return None + + result = dict() + + result["tenant_id"] = value.get("tenant_id") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hwc_vpc_port.py b/plugins/modules/hwc_vpc_port.py deleted file mode 120000 index e127fe59b4..0000000000 --- a/plugins/modules/hwc_vpc_port.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/huawei/hwc_vpc_port.py \ No newline at end of file diff --git a/plugins/modules/hwc_vpc_port.py b/plugins/modules/hwc_vpc_port.py new file mode 100644 index 0000000000..54bea0f249 --- /dev/null +++ b/plugins/modules/hwc_vpc_port.py @@ -0,0 +1,1156 @@ +#!/usr/bin/python +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = r""" +module: hwc_vpc_port +description: + - VPC port management. +short_description: Creates a resource of VPC/Port in Huawei Cloud +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '15m' + subnet_id: + description: + - Specifies the ID of the subnet to which the port belongs. + type: str + required: true + admin_state_up: + description: + - Specifies the administrative state of the port. + type: bool + required: false + allowed_address_pairs: + description: + - Specifies a set of zero or more allowed address pairs. + required: false + type: list + elements: dict + suboptions: + ip_address: + description: + - Specifies the IP address. It cannot set it to 0.0.0.0. Configure an independent security group for the port if + a large CIDR block (subnet mask less than 24) is configured for parameter allowed_address_pairs. + type: str + required: false + mac_address: + description: + - Specifies the MAC address. + type: str + required: false + extra_dhcp_opts: + description: + - Specifies the extended option of DHCP. + type: list + elements: dict + required: false + suboptions: + name: + description: + - Specifies the option name. + type: str + required: false + value: + description: + - Specifies the option value. + type: str + required: false + ip_address: + description: + - Specifies the port IP address. + type: str + required: false + name: + description: + - Specifies the port name. The value can contain no more than 255 characters. + type: str + required: false + security_groups: + description: + - Specifies the ID of the security group. + type: list + elements: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes +""" + +EXAMPLES = r""" +# create a port +- name: Create vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: Create subnet + hwc_vpc_subnet: + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: true + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + register: subnet +- name: Create a port + community.general.hwc_vpc_port: + subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.33" +""" + +RETURN = r""" +subnet_id: + description: + - Specifies the ID of the subnet to which the port belongs. + type: str + returned: success +admin_state_up: + description: + - Specifies the administrative state of the port. + type: bool + returned: success +allowed_address_pairs: + description: + - Specifies a set of zero or more allowed address pairs. + type: list + returned: success + contains: + ip_address: + description: + - Specifies the IP address. It cannot set it to 0.0.0.0. Configure an independent security group for the port if a + large CIDR block (subnet mask less than 24) is configured for parameter allowed_address_pairs. + type: str + returned: success + mac_address: + description: + - Specifies the MAC address. + type: str + returned: success +extra_dhcp_opts: + description: + - Specifies the extended option of DHCP. + type: list + returned: success + contains: + name: + description: + - Specifies the option name. + type: str + returned: success + value: + description: + - Specifies the option value. + type: str + returned: success +ip_address: + description: + - Specifies the port IP address. + type: str + returned: success +name: + description: + - Specifies the port name. The value can contain no more than 255 characters. + type: str + returned: success +security_groups: + description: + - Specifies the ID of the security group. + type: list + returned: success +mac_address: + description: + - Specifies the port MAC address. + type: str + returned: success +""" + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcClientException404, HwcModule, + are_different_dicts, build_path, get_region, is_empty_value, + navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='15m', type='str'), + ), default=dict()), + subnet_id=dict(type='str', required=True), + admin_state_up=dict(type='bool'), + allowed_address_pairs=dict( + type='list', elements='dict', + options=dict( + ip_address=dict(type='str'), + mac_address=dict(type='str') + ), + ), + extra_dhcp_opts=dict(type='list', elements='dict', options=dict( + name=dict(type='str'), + value=dict(type='str') + )), + ip_address=dict(type='str'), + name=dict(type='str'), + security_groups=dict(type='list', elements='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + if not module.check_mode: + update(config) + changed = True + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "admin_state_up": module.params.get("admin_state_up"), + "allowed_address_pairs": module.params.get("allowed_address_pairs"), + "extra_dhcp_opts": module.params.get("extra_dhcp_opts"), + "ip_address": module.params.get("ip_address"), + "name": module.params.get("name"), + "security_groups": module.params.get("security_groups"), + "subnet_id": module.params.get("subnet_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait_create(config, r, client, timeout) + module.params['id'] = navigate_value(obj, ["port", "id"]) + + +def update(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + + params = build_update_parameters(opts) + if params: + send_update_request(module, params, client) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + url = build_path(module, "ports/{id}") + + def _refresh_status(): + try: + client.get(url) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + try: + wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_port): error " + "waiting for api(delete) to " + "be done, error= %s" % str(ex)) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + array_index = { + "read.fixed_ips": 0, + } + + return update_properties(module, res, array_index, exclude_output) + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["subnet_id"]) + if v: + query_params.append("network_id=" + str(v)) + + v = navigate_value(opts, ["name"]) + if v: + query_params.append("name=" + str(v)) + + v = navigate_value(opts, ["admin_state_up"]) + if v: + query_params.append("admin_state_up=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "ports" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["admin_state_up"], None) + if not is_empty_value(v): + params["admin_state_up"] = v + + v = expand_create_allowed_address_pairs(opts, None) + if not is_empty_value(v): + params["allowed_address_pairs"] = v + + v = expand_create_extra_dhcp_opts(opts, None) + if not is_empty_value(v): + params["extra_dhcp_opts"] = v + + v = expand_create_fixed_ips(opts, None) + if not is_empty_value(v): + params["fixed_ips"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = navigate_value(opts, ["subnet_id"], None) + if not is_empty_value(v): + params["network_id"] = v + + v = navigate_value(opts, ["security_groups"], None) + if not is_empty_value(v): + params["security_groups"] = v + + if not params: + return params + + params = {"port": params} + + return params + + +def expand_create_allowed_address_pairs(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["allowed_address_pairs"], + new_array_index) + if not v: + return req + n = len(v) + for i in range(n): + new_array_index["allowed_address_pairs"] = i + transformed = dict() + + v = navigate_value(d, ["allowed_address_pairs", "ip_address"], + new_array_index) + if not is_empty_value(v): + transformed["ip_address"] = v + + v = navigate_value(d, ["allowed_address_pairs", "mac_address"], + new_array_index) + if not is_empty_value(v): + transformed["mac_address"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_create_extra_dhcp_opts(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["extra_dhcp_opts"], + new_array_index) + if not v: + return req + n = len(v) + for i in range(n): + new_array_index["extra_dhcp_opts"] = i + transformed = dict() + + v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) + if not is_empty_value(v): + transformed["opt_name"] = v + + v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) + if not is_empty_value(v): + transformed["opt_value"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_create_fixed_ips(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + n = 1 + for i in range(n): + transformed = dict() + + v = navigate_value(d, ["ip_address"], new_array_index) + if not is_empty_value(v): + transformed["ip_address"] = v + + if transformed: + req.append(transformed) + + return req + + +def send_create_request(module, params, client): + url = "ports" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_create(config, result, client, timeout): + module = config.module + + path_parameters = { + "port_id": ["port", "id"], + } + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} + + url = build_path(module, "ports/{port_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["port", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE", "DOWN"], + ["BUILD"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_port): error " + "waiting for api(create) to " + "be done, error= %s" % str(ex)) + + +def build_update_parameters(opts): + params = dict() + + v = expand_update_allowed_address_pairs(opts, None) + if v is not None: + params["allowed_address_pairs"] = v + + v = expand_update_extra_dhcp_opts(opts, None) + if v is not None: + params["extra_dhcp_opts"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = navigate_value(opts, ["security_groups"], None) + if not is_empty_value(v): + params["security_groups"] = v + + if not params: + return params + + params = {"port": params} + + return params + + +def expand_update_allowed_address_pairs(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["allowed_address_pairs"], + new_array_index) + if not v: + return req + n = len(v) + for i in range(n): + new_array_index["allowed_address_pairs"] = i + transformed = dict() + + v = navigate_value(d, ["allowed_address_pairs", "ip_address"], + new_array_index) + if not is_empty_value(v): + transformed["ip_address"] = v + + v = navigate_value(d, ["allowed_address_pairs", "mac_address"], + new_array_index) + if not is_empty_value(v): + transformed["mac_address"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_update_extra_dhcp_opts(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["extra_dhcp_opts"], + new_array_index) + if not v: + return req + n = len(v) + for i in range(n): + new_array_index["extra_dhcp_opts"] = i + transformed = dict() + + v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) + if not is_empty_value(v): + transformed["opt_name"] = v + + v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) + if not is_empty_value(v): + transformed["opt_value"] = v + + if transformed: + req.append(transformed) + + return req + + +def send_update_request(module, params, client): + url = build_path(module, "ports/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "ports/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "ports/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["port"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["admin_state_up"] = body.get("admin_state_up") + + v = fill_read_resp_allowed_address_pairs(body.get("allowed_address_pairs")) + result["allowed_address_pairs"] = v + + result["binding_host_id"] = body.get("binding_host_id") + + result["binding_vnic_type"] = body.get("binding_vnic_type") + + result["device_id"] = body.get("device_id") + + result["device_owner"] = body.get("device_owner") + + result["dns_name"] = body.get("dns_name") + + v = fill_read_resp_extra_dhcp_opts(body.get("extra_dhcp_opts")) + result["extra_dhcp_opts"] = v + + v = fill_read_resp_fixed_ips(body.get("fixed_ips")) + result["fixed_ips"] = v + + result["id"] = body.get("id") + + result["mac_address"] = body.get("mac_address") + + result["name"] = body.get("name") + + result["network_id"] = body.get("network_id") + + result["security_groups"] = body.get("security_groups") + + result["status"] = body.get("status") + + result["tenant_id"] = body.get("tenant_id") + + return result + + +def fill_read_resp_allowed_address_pairs(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["ip_address"] = item.get("ip_address") + + val["mac_address"] = item.get("mac_address") + + result.append(val) + + return result + + +def fill_read_resp_extra_dhcp_opts(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["opt_name"] = item.get("opt_name") + + val["opt_value"] = item.get("opt_value") + + result.append(val) + + return result + + +def fill_read_resp_fixed_ips(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["ip_address"] = item.get("ip_address") + + result.append(val) + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "admin_state_up"], array_index) + r["admin_state_up"] = v + + v = r.get("allowed_address_pairs") + v = flatten_allowed_address_pairs(response, array_index, v, exclude_output) + r["allowed_address_pairs"] = v + + v = r.get("extra_dhcp_opts") + v = flatten_extra_dhcp_opts(response, array_index, v, exclude_output) + r["extra_dhcp_opts"] = v + + v = navigate_value(response, ["read", "fixed_ips", "ip_address"], + array_index) + r["ip_address"] = v + + if not exclude_output: + v = navigate_value(response, ["read", "mac_address"], array_index) + r["mac_address"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = navigate_value(response, ["read", "security_groups"], array_index) + r["security_groups"] = v + + v = navigate_value(response, ["read", "network_id"], array_index) + r["subnet_id"] = v + + return r + + +def flatten_allowed_address_pairs(d, array_index, + current_value, exclude_output): + n = 0 + result = current_value + has_init_value = True + if result: + n = len(result) + else: + has_init_value = False + result = [] + v = navigate_value(d, ["read", "allowed_address_pairs"], + array_index) + if not v: + return current_value + n = len(v) + + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + for i in range(n): + new_array_index["read.allowed_address_pairs"] = i + + val = dict() + if len(result) >= (i + 1) and result[i]: + val = result[i] + + v = navigate_value(d, ["read", "allowed_address_pairs", "ip_address"], + new_array_index) + val["ip_address"] = v + + v = navigate_value(d, ["read", "allowed_address_pairs", "mac_address"], + new_array_index) + val["mac_address"] = v + + if len(result) >= (i + 1): + result[i] = val + else: + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if (has_init_value or result) else current_value + + +def flatten_extra_dhcp_opts(d, array_index, current_value, exclude_output): + n = 0 + result = current_value + has_init_value = True + if result: + n = len(result) + else: + has_init_value = False + result = [] + v = navigate_value(d, ["read", "extra_dhcp_opts"], + array_index) + if not v: + return current_value + n = len(v) + + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + for i in range(n): + new_array_index["read.extra_dhcp_opts"] = i + + val = dict() + if len(result) >= (i + 1) and result[i]: + val = result[i] + + v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_name"], + new_array_index) + val["name"] = v + + v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_value"], + new_array_index) + val["value"] = v + + if len(result) >= (i + 1): + result[i] = val + else: + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if (has_init_value or result) else current_value + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["ports"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["admin_state_up"], None) + result["admin_state_up"] = v + + v = expand_list_allowed_address_pairs(all_opts, None) + result["allowed_address_pairs"] = v + + result["binding_host_id"] = None + + result["binding_vnic_type"] = None + + result["device_id"] = None + + result["device_owner"] = None + + result["dns_name"] = None + + v = expand_list_extra_dhcp_opts(all_opts, None) + result["extra_dhcp_opts"] = v + + v = expand_list_fixed_ips(all_opts, None) + result["fixed_ips"] = v + + result["id"] = None + + result["mac_address"] = None + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + v = navigate_value(all_opts, ["subnet_id"], None) + result["network_id"] = v + + v = navigate_value(all_opts, ["security_groups"], None) + result["security_groups"] = v + + result["status"] = None + + result["tenant_id"] = None + + return result + + +def expand_list_allowed_address_pairs(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["allowed_address_pairs"], + new_array_index) + + n = len(v) if v else 1 + for i in range(n): + new_array_index["allowed_address_pairs"] = i + transformed = dict() + + v = navigate_value(d, ["allowed_address_pairs", "ip_address"], + new_array_index) + transformed["ip_address"] = v + + v = navigate_value(d, ["allowed_address_pairs", "mac_address"], + new_array_index) + transformed["mac_address"] = v + + for v in transformed.values(): + if v is not None: + req.append(transformed) + break + + return req if req else None + + +def expand_list_extra_dhcp_opts(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["extra_dhcp_opts"], + new_array_index) + + n = len(v) if v else 1 + for i in range(n): + new_array_index["extra_dhcp_opts"] = i + transformed = dict() + + v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) + transformed["opt_name"] = v + + v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) + transformed["opt_value"] = v + + for v in transformed.values(): + if v is not None: + req.append(transformed) + break + + return req if req else None + + +def expand_list_fixed_ips(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + n = 1 + for i in range(n): + transformed = dict() + + v = navigate_value(d, ["ip_address"], new_array_index) + transformed["ip_address"] = v + + for v in transformed.values(): + if v is not None: + req.append(transformed) + break + + return req if req else None + + +def fill_list_resp_body(body): + result = dict() + + result["admin_state_up"] = body.get("admin_state_up") + + v = fill_list_resp_allowed_address_pairs(body.get("allowed_address_pairs")) + result["allowed_address_pairs"] = v + + result["binding_host_id"] = body.get("binding_host_id") + + result["binding_vnic_type"] = body.get("binding_vnic_type") + + result["device_id"] = body.get("device_id") + + result["device_owner"] = body.get("device_owner") + + result["dns_name"] = body.get("dns_name") + + v = fill_list_resp_extra_dhcp_opts(body.get("extra_dhcp_opts")) + result["extra_dhcp_opts"] = v + + v = fill_list_resp_fixed_ips(body.get("fixed_ips")) + result["fixed_ips"] = v + + result["id"] = body.get("id") + + result["mac_address"] = body.get("mac_address") + + result["name"] = body.get("name") + + result["network_id"] = body.get("network_id") + + result["security_groups"] = body.get("security_groups") + + result["status"] = body.get("status") + + result["tenant_id"] = body.get("tenant_id") + + return result + + +def fill_list_resp_allowed_address_pairs(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["ip_address"] = item.get("ip_address") + + val["mac_address"] = item.get("mac_address") + + result.append(val) + + return result + + +def fill_list_resp_extra_dhcp_opts(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["opt_name"] = item.get("opt_name") + + val["opt_value"] = item.get("opt_value") + + result.append(val) + + return result + + +def fill_list_resp_fixed_ips(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["ip_address"] = item.get("ip_address") + + result.append(val) + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hwc_vpc_private_ip.py b/plugins/modules/hwc_vpc_private_ip.py deleted file mode 120000 index 383b482483..0000000000 --- a/plugins/modules/hwc_vpc_private_ip.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/huawei/hwc_vpc_private_ip.py \ No newline at end of file diff --git a/plugins/modules/hwc_vpc_private_ip.py b/plugins/modules/hwc_vpc_private_ip.py new file mode 100644 index 0000000000..664b4c84e4 --- /dev/null +++ b/plugins/modules/hwc_vpc_private_ip.py @@ -0,0 +1,353 @@ +#!/usr/bin/python +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = r""" +module: hwc_vpc_private_ip +description: + - VPC private IP management. +short_description: Creates a resource of VPC/PrivateIP in Huawei Cloud +notes: + - If O(id) option is provided, it takes precedence over O(subnet_id), O(ip_address) for private IP selection. + - O(subnet_id), O(ip_address) are used for private IP selection. If more than one private IP with this options exists, execution + is aborted. + - No parameter support updating. If one of option is changed, the module creates a new resource. +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + subnet_id: + description: + - Specifies the ID of the subnet from which IP addresses are assigned. Cannot be changed after creating the private + IP. + type: str + required: true + ip_address: + description: + - Specifies the target IP address. The value can be an available IP address in the subnet. If it is not specified, the + system automatically assigns an IP address. Cannot be changed after creating the private IP. + type: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes +""" + +EXAMPLES = r""" +# create a private IP +- name: Create vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: Create subnet + hwc_vpc_subnet: + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: true + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + register: subnet +- name: Create a private IP + community.general.hwc_vpc_private_ip: + subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.33" +""" + +RETURN = r""" +subnet_id: + description: + - Specifies the ID of the subnet from which IP addresses are assigned. + type: str + returned: success +ip_address: + description: + - Specifies the target IP address. The value can be an available IP address in the subnet. If it is not specified, the + system automatically assigns an IP address. + type: str + returned: success +""" + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + subnet_id=dict(type='str', required=True), + ip_address=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + raise Exception( + "Cannot change option from (%s) to (%s)of an" + " existing resource.(%s)" % (current, expect, module.params.get('id'))) + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "ip_address": module.params.get("ip_address"), + "subnet_id": module.params.get("subnet_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + module.params['id'] = navigate_value(r, ["privateips", "id"], + {"privateips": 0}) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_link = "?marker={marker}&limit=10" + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = build_path(module, "subnets/{subnet_id}/privateips") + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["ip_address"], None) + if not is_empty_value(v): + params["ip_address"] = v + + v = navigate_value(opts, ["subnet_id"], None) + if not is_empty_value(v): + params["subnet_id"] = v + + if not params: + return params + + params = {"privateips": [params]} + + return params + + +def send_create_request(module, params, client): + url = "privateips" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_private_ip): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "privateips/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_private_ip): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "privateips/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_private_ip): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["privateip"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["id"] = body.get("id") + + result["ip_address"] = body.get("ip_address") + + result["subnet_id"] = body.get("subnet_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "ip_address"], array_index) + r["ip_address"] = v + + v = navigate_value(response, ["read", "subnet_id"], array_index) + r["subnet_id"] = v + + return r + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_private_ip): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["privateips"], None) + + +def _build_identity_object(all_opts): + result = dict() + + result["id"] = None + + v = navigate_value(all_opts, ["ip_address"], None) + result["ip_address"] = v + + v = navigate_value(all_opts, ["subnet_id"], None) + result["subnet_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["id"] = body.get("id") + + result["ip_address"] = body.get("ip_address") + + result["subnet_id"] = body.get("subnet_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hwc_vpc_route.py b/plugins/modules/hwc_vpc_route.py deleted file mode 120000 index c89090fe5d..0000000000 --- a/plugins/modules/hwc_vpc_route.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/huawei/hwc_vpc_route.py \ No newline at end of file diff --git a/plugins/modules/hwc_vpc_route.py b/plugins/modules/hwc_vpc_route.py new file mode 100644 index 0000000000..dfb1aea61b --- /dev/null +++ b/plugins/modules/hwc_vpc_route.py @@ -0,0 +1,440 @@ +#!/usr/bin/python +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = r""" +module: hwc_vpc_route +description: + - VPC route management. +short_description: Creates a resource of VPC/Route in Huawei Cloud +notes: + - If O(id) option is provided, it takes precedence over O(destination), O(vpc_id), O(type), and O(next_hop) for route selection. + - O(destination), O(vpc_id), O(type) and O(next_hop) are used for route selection. If more than one route with this options + exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module creates a new resource. +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + destination: + description: + - Specifies the destination IP address or CIDR block. + type: str + required: true + next_hop: + description: + - Specifies the next hop. The value is VPC peering connection ID. + type: str + required: true + vpc_id: + description: + - Specifies the VPC ID to which route is added. + type: str + required: true + type: + description: + - Specifies the type of route. + type: str + required: false + default: 'peering' +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes +""" + +EXAMPLES = r""" +# create a peering connect +- name: Create a local vpc + hwc_network_vpc: + cidr: "192.168.0.0/16" + name: "ansible_network_vpc_test_local" + register: vpc1 +- name: Create a peering vpc + hwc_network_vpc: + cidr: "192.168.0.0/16" + name: "ansible_network_vpc_test_peering" + register: vpc2 +- name: Create a peering connect + hwc_vpc_peering_connect: + local_vpc_id: "{{ vpc1.id }}" + name: "ansible_network_peering_test" + filters: + - "name" + peering_vpc: + vpc_id: "{{ vpc2.id }}" + register: connect +- name: Create a route + community.general.hwc_vpc_route: + vpc_id: "{{ vpc1.id }}" + destination: "192.168.0.0/16" + next_hop: "{{ connect.id }}" +""" + +RETURN = r""" +id: + description: + - UUID of the route. + type: str + returned: success +destination: + description: + - Specifies the destination IP address or CIDR block. + type: str + returned: success +next_hop: + description: + - Specifies the next hop. The value is VPC peering connection ID. + type: str + returned: success +vpc_id: + description: + - Specifies the VPC ID to which route is added. + type: str + returned: success +type: + description: + - Specifies the type of route. + type: str + returned: success +""" + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + destination=dict(type='str', required=True), + next_hop=dict(type='str', required=True), + vpc_id=dict(type='str', required=True), + type=dict(type='str', default='peering'), + id=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params.get("id"): + resource = get_resource_by_id(config) + if module.params['state'] == 'present': + opts = user_input_parameters(module) + if are_different_dicts(resource, opts): + raise Exception( + "Cannot change option from (%s) to (%s) for an" + " existing route.(%s)" % (resource, opts, + config.module.params.get( + 'id'))) + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = update_properties(module, {"read": v[0]}, None) + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + resource = create(config) + changed = True + + result = resource + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "destination": module.params.get("destination"), + "next_hop": module.params.get("next_hop"), + "type": module.params.get("type"), + "vpc_id": module.params.get("vpc_id"), + "id": module.params.get("id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "network", "project") + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + module.params['id'] = navigate_value(r, ["route", "id"]) + + result = update_properties(module, {"read": fill_resp_body(r)}, None) + return result + + +def delete(config): + module = config.module + client = config.client(get_region(module), "network", "project") + + send_delete_request(module, None, client) + + +def get_resource_by_id(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "network", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_resp_body(r) + + result = update_properties(module, res, None, exclude_output) + return result + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["type"]) + if v: + query_params.append("type=" + str(v)) + + v = navigate_value(opts, ["destination"]) + if v: + query_params.append("destination=" + str(v)) + + v = navigate_value(opts, ["vpc_id"]) + if v: + query_params.append("vpc_id=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "network", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "v2.0/vpc/routes" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["destination"], None) + if not is_empty_value(v): + params["destination"] = v + + v = navigate_value(opts, ["next_hop"], None) + if not is_empty_value(v): + params["nexthop"] = v + + v = navigate_value(opts, ["type"], None) + if not is_empty_value(v): + params["type"] = v + + v = navigate_value(opts, ["vpc_id"], None) + if not is_empty_value(v): + params["vpc_id"] = v + + if not params: + return params + + params = {"route": params} + + return params + + +def send_create_request(module, params, client): + url = "v2.0/vpc/routes" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_route): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "v2.0/vpc/routes/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_route): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "v2.0/vpc/routes/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_route): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["route"], None) + + +def fill_resp_body(body): + result = dict() + + result["destination"] = body.get("destination") + + result["id"] = body.get("id") + + result["nexthop"] = body.get("nexthop") + + result["type"] = body.get("type") + + result["vpc_id"] = body.get("vpc_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "destination"], array_index) + r["destination"] = v + + v = navigate_value(response, ["read", "nexthop"], array_index) + r["next_hop"] = v + + v = navigate_value(response, ["read", "type"], array_index) + r["type"] = v + + v = navigate_value(response, ["read", "vpc_id"], array_index) + r["vpc_id"] = v + + v = navigate_value(response, ["read", "id"], array_index) + r["id"] = v + + return r + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_route): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["routes"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["destination"], None) + result["destination"] = v + + v = navigate_value(all_opts, ["id"], None) + result["id"] = v + + v = navigate_value(all_opts, ["next_hop"], None) + result["nexthop"] = v + + v = navigate_value(all_opts, ["type"], None) + result["type"] = v + + v = navigate_value(all_opts, ["vpc_id"], None) + result["vpc_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["destination"] = body.get("destination") + + result["id"] = body.get("id") + + result["nexthop"] = body.get("nexthop") + + result["type"] = body.get("type") + + result["vpc_id"] = body.get("vpc_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hwc_vpc_security_group.py b/plugins/modules/hwc_vpc_security_group.py deleted file mode 120000 index 2ca08c79ea..0000000000 --- a/plugins/modules/hwc_vpc_security_group.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/huawei/hwc_vpc_security_group.py \ No newline at end of file diff --git a/plugins/modules/hwc_vpc_security_group.py b/plugins/modules/hwc_vpc_security_group.py new file mode 100644 index 0000000000..d73318666c --- /dev/null +++ b/plugins/modules/hwc_vpc_security_group.py @@ -0,0 +1,627 @@ +#!/usr/bin/python +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = r""" +module: hwc_vpc_security_group +description: + - VPC security group management. +short_description: Creates a resource of VPC/SecurityGroup in Huawei Cloud +notes: + - If O(id) option is provided, it takes precedence over O(name), O(enterprise_project_id), and O(vpc_id) for security group + selection. + - O(name), O(enterprise_project_id) and O(vpc_id) are used for security group selection. If more than one security group + with this options exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module creates a new resource. +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + name: + description: + - Specifies the security group name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + required: true + enterprise_project_id: + description: + - Specifies the enterprise project ID. When creating a security group, associate the enterprise project ID with the + security group.s. + type: str + required: false + vpc_id: + description: + - Specifies the resource ID of the VPC to which the security group belongs. + type: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes +""" + +EXAMPLES = r""" +# create a security group +- name: Create a security group + community.general.hwc_vpc_security_group: + name: "ansible_network_security_group_test" +""" + +RETURN = r""" +name: + description: + - Specifies the security group name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + returned: success +enterprise_project_id: + description: + - Specifies the enterprise project ID. When creating a security group, associate the enterprise project ID with the security + group. + type: str + returned: success +vpc_id: + description: + - Specifies the resource ID of the VPC to which the security group belongs. + type: str + returned: success +rules: + description: + - Specifies the security group rule, which ensures that resources in the security group can communicate with one another. + type: complex + returned: success + contains: + description: + description: + - Provides supplementary information about the security group rule. + type: str + returned: success + direction: + description: + - Specifies the direction of access control. The value can be egress or ingress. + type: str + returned: success + ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. + type: str + returned: success + id: + description: + - Specifies the security group rule ID. + type: str + returned: success + port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. If the protocol is not icmp, the value cannot be + smaller than the port_range_min value. An empty value indicates all ports. + type: int + returned: success + port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to 65535. The value cannot be greater than the port_range_max + value. An empty value indicates all ports. + type: int + returned: success + protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, udp, or others. If the parameter is left blank, the security + group supports all protocols. + type: str + returned: success + remote_address_group_id: + description: + - Specifies the ID of remote IP address group. + type: str + returned: success + remote_group_id: + description: + - Specifies the ID of the peer security group. + type: str + returned: success + remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction is set to egress, the parameter specifies the source + IP address. If the access control direction is set to ingress, the parameter specifies the destination IP address. + type: str + returned: success +""" + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + name=dict(type='str', required=True), + enterprise_project_id=dict(type='str'), + vpc_id=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params.get("id"): + resource = read_resource(config) + if module.params['state'] == 'present': + check_resource_option(resource, module) + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = update_properties(module, {"read": v[0]}, None) + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + resource = create(config) + changed = True + + result = resource + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "enterprise_project_id": module.params.get("enterprise_project_id"), + "name": module.params.get("name"), + "vpc_id": module.params.get("vpc_id"), + "id": module.params.get("id"), + } + + +def check_resource_option(resource, module): + opts = user_input_parameters(module) + + resource = { + "enterprise_project_id": resource.get("enterprise_project_id"), + "name": resource.get("name"), + "vpc_id": resource.get("vpc_id"), + "id": resource.get("id"), + } + + if are_different_dicts(resource, opts): + raise Exception( + "Cannot change option from (%s) to (%s) for an" + " existing security group(%s)." % (resource, opts, + module.params.get('id'))) + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + module.params['id'] = navigate_value(r, ["security_group", "id"]) + + result = update_properties(module, {"read": fill_read_resp_body(r)}, None) + return result + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["enterprise_project_id"]) + if v: + query_params.append("enterprise_project_id=" + str(v)) + + v = navigate_value(opts, ["vpc_id"]) + if v: + query_params.append("vpc_id=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "security-groups" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["enterprise_project_id"], None) + if not is_empty_value(v): + params["enterprise_project_id"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = navigate_value(opts, ["vpc_id"], None) + if not is_empty_value(v): + params["vpc_id"] = v + + if not params: + return params + + params = {"security_group": params} + + return params + + +def send_create_request(module, params, client): + url = "security-groups" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "security-groups/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "security-groups/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["security_group"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + v = fill_read_resp_security_group_rules(body.get("security_group_rules")) + result["security_group_rules"] = v + + result["vpc_id"] = body.get("vpc_id") + + return result + + +def fill_read_resp_security_group_rules(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["description"] = item.get("description") + + val["direction"] = item.get("direction") + + val["ethertype"] = item.get("ethertype") + + val["id"] = item.get("id") + + val["port_range_max"] = item.get("port_range_max") + + val["port_range_min"] = item.get("port_range_min") + + val["protocol"] = item.get("protocol") + + val["remote_address_group_id"] = item.get("remote_address_group_id") + + val["remote_group_id"] = item.get("remote_group_id") + + val["remote_ip_prefix"] = item.get("remote_ip_prefix") + + val["security_group_id"] = item.get("security_group_id") + + result.append(val) + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "enterprise_project_id"], + array_index) + r["enterprise_project_id"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + if not exclude_output: + v = r.get("rules") + v = flatten_rules(response, array_index, v, exclude_output) + r["rules"] = v + + v = navigate_value(response, ["read", "vpc_id"], array_index) + r["vpc_id"] = v + + v = navigate_value(response, ["read", "id"], array_index) + r["id"] = v + + return r + + +def flatten_rules(d, array_index, current_value, exclude_output): + n = 0 + result = current_value + has_init_value = True + if result: + n = len(result) + else: + has_init_value = False + result = [] + v = navigate_value(d, ["read", "security_group_rules"], + array_index) + if not v: + return current_value + n = len(v) + + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + for i in range(n): + new_array_index["read.security_group_rules"] = i + + val = dict() + if len(result) >= (i + 1) and result[i]: + val = result[i] + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "description"], + new_array_index) + val["description"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "direction"], + new_array_index) + val["direction"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "ethertype"], + new_array_index) + val["ethertype"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "id"], + new_array_index) + val["id"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "port_range_max"], + new_array_index) + val["port_range_max"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "port_range_min"], + new_array_index) + val["port_range_min"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "protocol"], + new_array_index) + val["protocol"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "remote_address_group_id"], + new_array_index) + val["remote_address_group_id"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "remote_group_id"], + new_array_index) + val["remote_group_id"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "remote_ip_prefix"], + new_array_index) + val["remote_ip_prefix"] = v + + if len(result) >= (i + 1): + result[i] = val + else: + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if (has_init_value or result) else current_value + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["security_groups"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["enterprise_project_id"], None) + result["enterprise_project_id"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + result["security_group_rules"] = None + + v = navigate_value(all_opts, ["vpc_id"], None) + result["vpc_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + v = fill_list_resp_security_group_rules(body.get("security_group_rules")) + result["security_group_rules"] = v + + result["vpc_id"] = body.get("vpc_id") + + return result + + +def fill_list_resp_security_group_rules(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["description"] = item.get("description") + + val["direction"] = item.get("direction") + + val["ethertype"] = item.get("ethertype") + + val["id"] = item.get("id") + + val["port_range_max"] = item.get("port_range_max") + + val["port_range_min"] = item.get("port_range_min") + + val["protocol"] = item.get("protocol") + + val["remote_address_group_id"] = item.get("remote_address_group_id") + + val["remote_group_id"] = item.get("remote_group_id") + + val["remote_ip_prefix"] = item.get("remote_ip_prefix") + + val["security_group_id"] = item.get("security_group_id") + + result.append(val) + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hwc_vpc_security_group_rule.py b/plugins/modules/hwc_vpc_security_group_rule.py deleted file mode 120000 index 0fa1ed50f7..0000000000 --- a/plugins/modules/hwc_vpc_security_group_rule.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/huawei/hwc_vpc_security_group_rule.py \ No newline at end of file diff --git a/plugins/modules/hwc_vpc_security_group_rule.py b/plugins/modules/hwc_vpc_security_group_rule.py new file mode 100644 index 0000000000..153950fb2d --- /dev/null +++ b/plugins/modules/hwc_vpc_security_group_rule.py @@ -0,0 +1,549 @@ +#!/usr/bin/python +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = r""" +module: hwc_vpc_security_group_rule +description: + - VPC security group management. +short_description: Creates a resource of VPC/SecurityGroupRule in Huawei Cloud +notes: + - If O(id) option is provided, it takes precedence over O(security_group_id) for security group rule selection. + - O(security_group_id) is used for security group rule selection. If more than one security group rule with this options + exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module creates a new resource. +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + direction: + description: + - Specifies the direction of access control. The value can be egress or ingress. + type: str + required: true + security_group_id: + description: + - Specifies the security group rule ID, which uniquely identifies the security group rule. + type: str + required: true + description: + description: + - Provides supplementary information about the security group rule. The value is a string of no more than 255 characters + that can contain letters and digits. + type: str + required: false + ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. If you do not set this parameter, IPv4 is used by + default. + type: str + required: false + port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. If the protocol is not icmp, the value cannot be + smaller than the port_range_min value. An empty value indicates all ports. + type: int + required: false + port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to 65535. The value cannot be greater than the port_range_max + value. An empty value indicates all ports. + type: int + required: false + protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, or udp. If the parameter is left blank, the security group + supports all protocols. + type: str + required: false + remote_group_id: + description: + - Specifies the ID of the peer security group. The value is exclusive with parameter remote_ip_prefix. + type: str + required: false + remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction is set to egress, the parameter specifies the source + IP address. If the access control direction is set to ingress, the parameter specifies the destination IP address. + The value can be in the CIDR format or IP addresses. The parameter is exclusive with parameter remote_group_id. + type: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes +""" + +EXAMPLES = r""" +# create a security group rule +- name: Create a security group + hwc_vpc_security_group: + name: "ansible_network_security_group_test" + register: sg +- name: Create a security group rule + community.general.hwc_vpc_security_group_rule: + direction: "ingress" + protocol: "tcp" + ethertype: "IPv4" + port_range_max: 22 + security_group_id: "{{ sg.id }}" + port_range_min: 22 + remote_ip_prefix: "0.0.0.0/0" +""" + +RETURN = r""" +direction: + description: + - Specifies the direction of access control. The value can be egress or ingress. + type: str + returned: success +security_group_id: + description: + - Specifies the security group rule ID, which uniquely identifies the security group rule. + type: str + returned: success +description: + description: + - Provides supplementary information about the security group rule. The value is a string of no more than 255 characters + that can contain letters and digits. + type: str + returned: success +ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. If you do not set this parameter, IPv4 is used by + default. + type: str + returned: success +port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. If the protocol is not icmp, the value cannot be smaller + than the port_range_min value. An empty value indicates all ports. + type: int + returned: success +port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to 65535. The value cannot be greater than the port_range_max + value. An empty value indicates all ports. + type: int + returned: success +protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, or udp. If the parameter is left blank, the security group + supports all protocols. + type: str + returned: success +remote_group_id: + description: + - Specifies the ID of the peer security group. The value is exclusive with parameter remote_ip_prefix. + type: str + returned: success +remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction is set to egress, the parameter specifies the source + IP address. If the access control direction is set to ingress, the parameter specifies the destination IP address. The + value can be in the CIDR format or IP addresses. The parameter is exclusive with parameter remote_group_id. + type: str + returned: success +""" + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + direction=dict(type='str', required=True), + security_group_id=dict(type='str', required=True), + description=dict(type='str'), + ethertype=dict(type='str'), + port_range_max=dict(type='int'), + port_range_min=dict(type='int'), + protocol=dict(type='str'), + remote_group_id=dict(type='str'), + remote_ip_prefix=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + raise Exception( + "Cannot change option from (%s) to (%s) for an" + " existing security group(%s)." % (current, expect, module.params.get('id'))) + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "description": module.params.get("description"), + "direction": module.params.get("direction"), + "ethertype": module.params.get("ethertype"), + "port_range_max": module.params.get("port_range_max"), + "port_range_min": module.params.get("port_range_min"), + "protocol": module.params.get("protocol"), + "remote_group_id": module.params.get("remote_group_id"), + "remote_ip_prefix": module.params.get("remote_ip_prefix"), + "security_group_id": module.params.get("security_group_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + module.params['id'] = navigate_value(r, ["security_group_rule", "id"]) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_link = "?marker={marker}&limit=10" + v = navigate_value(opts, ["security_group_id"]) + if v: + query_link += "&security_group_id=" + str(v) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "security-group-rules" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = navigate_value(opts, ["direction"], None) + if not is_empty_value(v): + params["direction"] = v + + v = navigate_value(opts, ["ethertype"], None) + if not is_empty_value(v): + params["ethertype"] = v + + v = navigate_value(opts, ["port_range_max"], None) + if not is_empty_value(v): + params["port_range_max"] = v + + v = navigate_value(opts, ["port_range_min"], None) + if not is_empty_value(v): + params["port_range_min"] = v + + v = navigate_value(opts, ["protocol"], None) + if not is_empty_value(v): + params["protocol"] = v + + v = navigate_value(opts, ["remote_group_id"], None) + if not is_empty_value(v): + params["remote_group_id"] = v + + v = navigate_value(opts, ["remote_ip_prefix"], None) + if not is_empty_value(v): + params["remote_ip_prefix"] = v + + v = navigate_value(opts, ["security_group_id"], None) + if not is_empty_value(v): + params["security_group_id"] = v + + if not params: + return params + + params = {"security_group_rule": params} + + return params + + +def send_create_request(module, params, client): + url = "security-group-rules" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group_rule): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "security-group-rules/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group_rule): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "security-group-rules/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group_rule): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["security_group_rule"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["description"] = body.get("description") + + result["direction"] = body.get("direction") + + result["ethertype"] = body.get("ethertype") + + result["id"] = body.get("id") + + result["port_range_max"] = body.get("port_range_max") + + result["port_range_min"] = body.get("port_range_min") + + result["protocol"] = body.get("protocol") + + result["remote_address_group_id"] = body.get("remote_address_group_id") + + result["remote_group_id"] = body.get("remote_group_id") + + result["remote_ip_prefix"] = body.get("remote_ip_prefix") + + result["security_group_id"] = body.get("security_group_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "description"], array_index) + r["description"] = v + + v = navigate_value(response, ["read", "direction"], array_index) + r["direction"] = v + + v = navigate_value(response, ["read", "ethertype"], array_index) + r["ethertype"] = v + + v = navigate_value(response, ["read", "port_range_max"], array_index) + r["port_range_max"] = v + + v = navigate_value(response, ["read", "port_range_min"], array_index) + r["port_range_min"] = v + + v = navigate_value(response, ["read", "protocol"], array_index) + r["protocol"] = v + + v = navigate_value(response, ["read", "remote_group_id"], array_index) + r["remote_group_id"] = v + + v = navigate_value(response, ["read", "remote_ip_prefix"], array_index) + r["remote_ip_prefix"] = v + + v = navigate_value(response, ["read", "security_group_id"], array_index) + r["security_group_id"] = v + + return r + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group_rule): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["security_group_rules"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["description"], None) + result["description"] = v + + v = navigate_value(all_opts, ["direction"], None) + result["direction"] = v + + v = navigate_value(all_opts, ["ethertype"], None) + result["ethertype"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["port_range_max"], None) + result["port_range_max"] = v + + v = navigate_value(all_opts, ["port_range_min"], None) + result["port_range_min"] = v + + v = navigate_value(all_opts, ["protocol"], None) + result["protocol"] = v + + result["remote_address_group_id"] = None + + v = navigate_value(all_opts, ["remote_group_id"], None) + result["remote_group_id"] = v + + v = navigate_value(all_opts, ["remote_ip_prefix"], None) + result["remote_ip_prefix"] = v + + v = navigate_value(all_opts, ["security_group_id"], None) + result["security_group_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["description"] = body.get("description") + + result["direction"] = body.get("direction") + + result["ethertype"] = body.get("ethertype") + + result["id"] = body.get("id") + + result["port_range_max"] = body.get("port_range_max") + + result["port_range_min"] = body.get("port_range_min") + + result["protocol"] = body.get("protocol") + + result["remote_address_group_id"] = body.get("remote_address_group_id") + + result["remote_group_id"] = body.get("remote_group_id") + + result["remote_ip_prefix"] = body.get("remote_ip_prefix") + + result["security_group_id"] = body.get("security_group_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hwc_vpc_subnet.py b/plugins/modules/hwc_vpc_subnet.py deleted file mode 120000 index ff98e8f654..0000000000 --- a/plugins/modules/hwc_vpc_subnet.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/huawei/hwc_vpc_subnet.py \ No newline at end of file diff --git a/plugins/modules/hwc_vpc_subnet.py b/plugins/modules/hwc_vpc_subnet.py new file mode 100644 index 0000000000..316ed39c1f --- /dev/null +++ b/plugins/modules/hwc_vpc_subnet.py @@ -0,0 +1,722 @@ +#!/usr/bin/python +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = r""" +module: hwc_vpc_subnet +description: + - Subnet management. +short_description: Creates a resource of VPC/Subnet in Huawei Cloud +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '15m' + update: + description: + - The timeouts for update operation. + type: str + default: '15m' + cidr: + description: + - Specifies the subnet CIDR block. The value must be within the VPC CIDR block and be in CIDR format. The subnet mask + cannot be greater than 28. Cannot be changed after creating the subnet. + type: str + required: true + gateway_ip: + description: + - Specifies the gateway of the subnet. The value must be an IP address in the subnet. Cannot be changed after creating + the subnet. + type: str + required: true + name: + description: + - Specifies the subnet name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + required: true + vpc_id: + description: + - Specifies the ID of the VPC to which the subnet belongs. Cannot be changed after creating the subnet. + type: str + required: true + availability_zone: + description: + - Specifies the AZ to which the subnet belongs. Cannot be changed after creating the subnet. + type: str + required: false + dhcp_enable: + description: + - Specifies whether DHCP is enabled for the subnet. The value can be true (enabled) or false(disabled), and default + value is true. If this parameter is set to false, newly created ECSs cannot obtain IP addresses, and usernames and + passwords cannot be injected using Cloud-init. + type: bool + required: false + dns_address: + description: + - Specifies the DNS server addresses for subnet. The address in the head is used first. + type: list + elements: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes +""" + +EXAMPLES = r""" +# create subnet +- name: Create vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: Create subnet + community.general.hwc_vpc_subnet: + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: true +""" + +RETURN = r""" +cidr: + description: + - Specifies the subnet CIDR block. The value must be within the VPC CIDR block and be in CIDR format. The subnet mask + cannot be greater than 28. + type: str + returned: success +gateway_ip: + description: + - Specifies the gateway of the subnet. The value must be an IP address in the subnet. + type: str + returned: success +name: + description: + - Specifies the subnet name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + returned: success +vpc_id: + description: + - Specifies the ID of the VPC to which the subnet belongs. + type: str + returned: success +availability_zone: + description: + - Specifies the AZ to which the subnet belongs. + type: str + returned: success +dhcp_enable: + description: + - Specifies whether DHCP is enabled for the subnet. The value can be true (enabled) or false(disabled), and default value + is true. If this parameter is set to false, newly created ECSs cannot obtain IP addresses, and usernames and passwords + cannot be injected using Cloud-init. + type: bool + returned: success +dns_address: + description: + - Specifies the DNS server addresses for subnet. The address in the head is used first. + type: list + returned: success +""" + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcClientException404, HwcModule, + are_different_dicts, build_path, get_region, is_empty_value, + navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='15m', type='str'), + update=dict(default='15m', type='str'), + ), default=dict()), + cidr=dict(type='str', required=True), + gateway_ip=dict(type='str', required=True), + name=dict(type='str', required=True), + vpc_id=dict(type='str', required=True), + availability_zone=dict(type='str'), + dhcp_enable=dict(type='bool'), + dns_address=dict(type='list', elements='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params.get('id'): + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + if not module.check_mode: + update(config) + changed = True + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "availability_zone": module.params.get("availability_zone"), + "cidr": module.params.get("cidr"), + "dhcp_enable": module.params.get("dhcp_enable"), + "dns_address": module.params.get("dns_address"), + "gateway_ip": module.params.get("gateway_ip"), + "name": module.params.get("name"), + "vpc_id": module.params.get("vpc_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait_create(config, r, client, timeout) + module.params['id'] = navigate_value(obj, ["subnet", "id"]) + + +def update(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_update_parameters(opts) + if params: + r = send_update_request(module, params, client) + async_wait_update(config, r, client, timeout) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + url = build_path(module, "subnets/{id}") + + def _refresh_status(): + try: + client.get(url) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + try: + wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_subnet): error " + "waiting for api(delete) to " + "be done, error= %s" % str(ex)) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_link = "?marker={marker}&limit=10" + v = navigate_value(opts, ["vpc_id"]) + if v: + query_link += "&vpc_id=" + str(v) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "subnets" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["availability_zone"], None) + if not is_empty_value(v): + params["availability_zone"] = v + + v = navigate_value(opts, ["cidr"], None) + if not is_empty_value(v): + params["cidr"] = v + + v = navigate_value(opts, ["dhcp_enable"], None) + if v is not None: + params["dhcp_enable"] = v + + v = expand_create_dns_list(opts, None) + if not is_empty_value(v): + params["dnsList"] = v + + v = navigate_value(opts, ["gateway_ip"], None) + if not is_empty_value(v): + params["gateway_ip"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = expand_create_primary_dns(opts, None) + if not is_empty_value(v): + params["primary_dns"] = v + + v = expand_create_secondary_dns(opts, None) + if not is_empty_value(v): + params["secondary_dns"] = v + + v = navigate_value(opts, ["vpc_id"], None) + if not is_empty_value(v): + params["vpc_id"] = v + + if not params: + return params + + params = {"subnet": params} + + return params + + +def expand_create_dns_list(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v if (v and len(v) > 2) else [] + + +def expand_create_primary_dns(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v[0] if v else "" + + +def expand_create_secondary_dns(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v[1] if (v and len(v) > 1) else "" + + +def send_create_request(module, params, client): + url = "subnets" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_create(config, result, client, timeout): + module = config.module + + path_parameters = { + "subnet_id": ["subnet", "id"], + } + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} + + url = build_path(module, "subnets/{subnet_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["subnet", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE"], + ["UNKNOWN"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_subnet): error " + "waiting for api(create) to " + "be done, error= %s" % str(ex)) + + +def build_update_parameters(opts): + params = dict() + + v = navigate_value(opts, ["dhcp_enable"], None) + if v is not None: + params["dhcp_enable"] = v + + v = expand_update_dns_list(opts, None) + if v is not None: + params["dnsList"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = expand_update_primary_dns(opts, None) + if v is not None: + params["primary_dns"] = v + + v = expand_update_secondary_dns(opts, None) + if v is not None: + params["secondary_dns"] = v + + if not params: + return params + + params = {"subnet": params} + + return params + + +def expand_update_dns_list(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + if v: + if len(v) > 2: + return v + return None + return [] + + +def expand_update_primary_dns(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v[0] if v else "" + + +def expand_update_secondary_dns(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v[1] if (v and len(v) > 1) else "" + + +def send_update_request(module, params, client): + url = build_path(module, "vpcs/{vpc_id}/subnets/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_update(config, result, client, timeout): + module = config.module + + path_parameters = { + "subnet_id": ["subnet", "id"], + } + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} + + url = build_path(module, "subnets/{subnet_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["subnet", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE"], + ["UNKNOWN"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_subnet): error " + "waiting for api(update) to " + "be done, error= %s" % str(ex)) + + +def send_delete_request(module, params, client): + url = build_path(module, "vpcs/{vpc_id}/subnets/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "subnets/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["subnet"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["availability_zone"] = body.get("availability_zone") + + result["cidr"] = body.get("cidr") + + result["dhcp_enable"] = body.get("dhcp_enable") + + result["dnsList"] = body.get("dnsList") + + result["gateway_ip"] = body.get("gateway_ip") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + result["neutron_network_id"] = body.get("neutron_network_id") + + result["neutron_subnet_id"] = body.get("neutron_subnet_id") + + result["primary_dns"] = body.get("primary_dns") + + result["secondary_dns"] = body.get("secondary_dns") + + result["status"] = body.get("status") + + result["vpc_id"] = body.get("vpc_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "availability_zone"], array_index) + r["availability_zone"] = v + + v = navigate_value(response, ["read", "cidr"], array_index) + r["cidr"] = v + + v = navigate_value(response, ["read", "dhcp_enable"], array_index) + r["dhcp_enable"] = v + + v = navigate_value(response, ["read", "dnsList"], array_index) + r["dns_address"] = v + + v = navigate_value(response, ["read", "gateway_ip"], array_index) + r["gateway_ip"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = navigate_value(response, ["read", "vpc_id"], array_index) + r["vpc_id"] = v + + return r + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["subnets"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["availability_zone"], None) + result["availability_zone"] = v + + v = navigate_value(all_opts, ["cidr"], None) + result["cidr"] = v + + v = navigate_value(all_opts, ["dhcp_enable"], None) + result["dhcp_enable"] = v + + v = navigate_value(all_opts, ["dns_address"], None) + result["dnsList"] = v + + v = navigate_value(all_opts, ["gateway_ip"], None) + result["gateway_ip"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + result["neutron_network_id"] = None + + result["neutron_subnet_id"] = None + + result["primary_dns"] = None + + result["secondary_dns"] = None + + result["status"] = None + + v = navigate_value(all_opts, ["vpc_id"], None) + result["vpc_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["availability_zone"] = body.get("availability_zone") + + result["cidr"] = body.get("cidr") + + result["dhcp_enable"] = body.get("dhcp_enable") + + result["dnsList"] = body.get("dnsList") + + result["gateway_ip"] = body.get("gateway_ip") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + result["neutron_network_id"] = body.get("neutron_network_id") + + result["neutron_subnet_id"] = body.get("neutron_subnet_id") + + result["primary_dns"] = body.get("primary_dns") + + result["secondary_dns"] = body.get("secondary_dns") + + result["status"] = body.get("status") + + result["vpc_id"] = body.get("vpc_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ibm_sa_domain.py b/plugins/modules/ibm_sa_domain.py deleted file mode 120000 index f9c994e942..0000000000 --- a/plugins/modules/ibm_sa_domain.py +++ /dev/null @@ -1 +0,0 @@ -./storage/ibm/ibm_sa_domain.py \ No newline at end of file diff --git a/plugins/modules/ibm_sa_domain.py b/plugins/modules/ibm_sa_domain.py new file mode 100644 index 0000000000..f377bce761 --- /dev/null +++ b/plugins/modules/ibm_sa_domain.py @@ -0,0 +1,170 @@ +#!/usr/bin/python + +# Copyright (c) 2018, IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ibm_sa_domain +short_description: Manages domains on IBM Spectrum Accelerate Family storage systems + +description: + - This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + domain: + description: + - Name of the domain to be managed. + required: true + type: str + state: + description: + - The desired state of the domain. + default: "present" + choices: ["present", "absent"] + type: str + ldap_id: + description: + - LDAP ID to add to the domain. + required: false + type: str + size: + description: + - Size of the domain. + required: false + type: str + hard_capacity: + description: + - Hard capacity of the domain. + required: false + type: str + soft_capacity: + description: + - Soft capacity of the domain. + required: false + type: str + max_cgs: + description: + - Number of max cgs. + required: false + type: str + max_dms: + description: + - Number of max dms. + required: false + type: str + max_mirrors: + description: + - Number of max_mirrors. + required: false + type: str + max_pools: + description: + - Number of max_pools. + required: false + type: str + max_volumes: + description: + - Number of max_volumes. + required: false + type: str + perf_class: + description: + - Add the domain to a performance class. + required: false + type: str + +extends_documentation_fragment: + - community.general.ibm_storage + - community.general.attributes + +author: + - Tzur Eliyahu (@tzure) +""" + +EXAMPLES = r""" +- name: Define new domain. + community.general.ibm_sa_domain: + domain: domain_name + size: domain_size + state: present + username: admin + password: secret + endpoints: hostdev-system + +- name: Delete domain. + community.general.ibm_sa_domain: + domain: domain_name + state: absent + username: admin + password: secret + endpoints: hostdev-system +""" +RETURN = r""" +msg: + description: Module return status. + returned: as needed + type: str + sample: "domain 'domain_name' created successfully." +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + domain=dict(required=True), + size=dict(), + max_dms=dict(), + max_cgs=dict(), + ldap_id=dict(), + max_mirrors=dict(), + max_pools=dict(), + max_volumes=dict(), + perf_class=dict(), + hard_capacity=dict(), + soft_capacity=dict() + ) + ) + + module = AnsibleModule(argument_spec) + + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + domain = xcli_client.cmd.domain_list( + domain=module.params['domain']).as_single_element + state = module.params['state'] + + state_changed = False + msg = 'Domain \'{0}\''.format(module.params['domain']) + if state == 'present' and not domain: + state_changed = execute_pyxcli_command( + module, 'domain_create', xcli_client) + msg += " created successfully." + elif state == 'absent' and domain: + state_changed = execute_pyxcli_command( + module, 'domain_delete', xcli_client) + msg += " deleted successfully." + else: + msg += " state unchanged." + + module.exit_json(changed=state_changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ibm_sa_host.py b/plugins/modules/ibm_sa_host.py deleted file mode 120000 index 6876a44fe7..0000000000 --- a/plugins/modules/ibm_sa_host.py +++ /dev/null @@ -1 +0,0 @@ -./storage/ibm/ibm_sa_host.py \ No newline at end of file diff --git a/plugins/modules/ibm_sa_host.py b/plugins/modules/ibm_sa_host.py new file mode 100644 index 0000000000..17615390f0 --- /dev/null +++ b/plugins/modules/ibm_sa_host.py @@ -0,0 +1,124 @@ +#!/usr/bin/python + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ibm_sa_host +short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems + +description: + - This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + host: + description: + - Host name. + required: true + type: str + state: + description: + - Host state. + default: "present" + choices: ["present", "absent"] + type: str + cluster: + description: + - The name of the cluster to include the host. + required: false + type: str + domain: + description: + - The domains the cluster is attached to. To include more than one domain, separate domain names with commas. To include + all existing domains, use an asterisk (V(*)). + required: false + type: str + iscsi_chap_name: + description: + - The host's CHAP name identifier. + required: false + type: str + iscsi_chap_secret: + description: + - The password of the initiator used to authenticate to the system when CHAP is enable. + required: false + type: str + +extends_documentation_fragment: + - community.general.ibm_storage + - community.general.attributes + +author: + - Tzur Eliyahu (@tzure) +""" + +EXAMPLES = r""" +- name: Define new host. + community.general.ibm_sa_host: + host: host_name + state: present + username: admin + password: secret + endpoints: hostdev-system + +- name: Delete host. + community.general.ibm_sa_host: + host: host_name + state: absent + username: admin + password: secret + endpoints: hostdev-system +""" +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + host=dict(required=True), + cluster=dict(), + domain=dict(), + iscsi_chap_name=dict(), + iscsi_chap_secret=dict(no_log=True), + ) + ) + + module = AnsibleModule(argument_spec) + + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + host = xcli_client.cmd.host_list( + host=module.params['host']).as_single_element + state = module.params['state'] + + state_changed = False + if state == 'present' and not host: + state_changed = execute_pyxcli_command( + module, 'host_define', xcli_client) + elif state == 'absent' and host: + state_changed = execute_pyxcli_command( + module, 'host_delete', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ibm_sa_host_ports.py b/plugins/modules/ibm_sa_host_ports.py deleted file mode 120000 index 42edcbde96..0000000000 --- a/plugins/modules/ibm_sa_host_ports.py +++ /dev/null @@ -1 +0,0 @@ -./storage/ibm/ibm_sa_host_ports.py \ No newline at end of file diff --git a/plugins/modules/ibm_sa_host_ports.py b/plugins/modules/ibm_sa_host_ports.py new file mode 100644 index 0000000000..4c5b2b2d04 --- /dev/null +++ b/plugins/modules/ibm_sa_host_ports.py @@ -0,0 +1,133 @@ +#!/usr/bin/python + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ibm_sa_host_ports +short_description: Add host ports on IBM Spectrum Accelerate Family storage systems + +description: + - This module adds ports to or removes them from the hosts on IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + host: + description: + - Host name. + required: true + type: str + state: + description: + - Host ports state. + default: "present" + choices: ["present", "absent"] + type: str + iscsi_name: + description: + - The iSCSI initiator name. + required: false + type: str + fcaddress: + description: + - Fiber channel address. + required: false + type: str + num_of_visible_targets: + description: + - Number of visible targets. + required: false + type: str + +extends_documentation_fragment: + - community.general.ibm_storage + - community.general.attributes + +author: + - Tzur Eliyahu (@tzure) +""" + +EXAMPLES = r""" +- name: Add ports for host. + community.general.ibm_sa_host_ports: + host: test_host + iscsi_name: iqn.1994-05.com*** + username: admin + password: secret + endpoints: hostdev-system + state: present + +- name: Remove ports for host. + community.general.ibm_sa_host_ports: + host: test_host + iscsi_name: iqn.1994-05.com*** + username: admin + password: secret + endpoints: hostdev-system + state: absent +""" +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl, + spectrum_accelerate_spec, is_pyxcli_installed) + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + host=dict(required=True), + iscsi_name=dict(), + fcaddress=dict(), + num_of_visible_targets=dict() + ) + ) + + module = AnsibleModule(argument_spec) + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + # required args + ports = [] + try: + ports = xcli_client.cmd.host_list_ports( + host=module.params.get('host')).as_list + except Exception: + pass + state = module.params['state'] + port_exists = False + ports = [port.get('port_name') for port in ports] + + fc_ports = (module.params.get('fcaddress') + if module.params.get('fcaddress') else []) + iscsi_ports = (module.params.get('iscsi_name') + if module.params.get('iscsi_name') else []) + for port in ports: + if port in iscsi_ports or port in fc_ports: + port_exists = True + break + state_changed = False + if state == 'present' and not port_exists: + state_changed = execute_pyxcli_command( + module, 'host_add_port', xcli_client) + if state == 'absent' and port_exists: + state_changed = execute_pyxcli_command( + module, 'host_remove_port', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ibm_sa_pool.py b/plugins/modules/ibm_sa_pool.py deleted file mode 120000 index 3a73256b36..0000000000 --- a/plugins/modules/ibm_sa_pool.py +++ /dev/null @@ -1 +0,0 @@ -./storage/ibm/ibm_sa_pool.py \ No newline at end of file diff --git a/plugins/modules/ibm_sa_pool.py b/plugins/modules/ibm_sa_pool.py new file mode 100644 index 0000000000..bb7102fa71 --- /dev/null +++ b/plugins/modules/ibm_sa_pool.py @@ -0,0 +1,124 @@ +#!/usr/bin/python + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ibm_sa_pool +short_description: Handles pools on IBM Spectrum Accelerate Family storage systems + +description: + - This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + pool: + description: + - Pool name. + required: true + type: str + state: + description: + - Pool state. + default: "present" + choices: ["present", "absent"] + type: str + size: + description: + - Pool size in GB. + required: false + type: str + snapshot_size: + description: + - Pool snapshot size in GB. + required: false + type: str + domain: + description: + - Adds the pool to the specified domain. + required: false + type: str + perf_class: + description: + - Assigns a perf_class to the pool. + required: false + type: str + +extends_documentation_fragment: + - community.general.ibm_storage + - community.general.attributes + +author: + - Tzur Eliyahu (@tzure) +""" + +EXAMPLES = r""" +- name: Create new pool. + community.general.ibm_sa_pool: + name: pool_name + size: 300 + state: present + username: admin + password: secret + endpoints: hostdev-system + +- name: Delete pool. + community.general.ibm_sa_pool: + name: pool_name + state: absent + username: admin + password: secret + endpoints: hostdev-system +""" +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + pool=dict(required=True), + size=dict(), + snapshot_size=dict(), + domain=dict(), + perf_class=dict() + ) + ) + + module = AnsibleModule(argument_spec) + + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + pool = xcli_client.cmd.pool_list( + pool=module.params['pool']).as_single_element + state = module.params['state'] + + state_changed = False + if state == 'present' and not pool: + state_changed = execute_pyxcli_command( + module, 'pool_create', xcli_client) + if state == 'absent' and pool: + state_changed = execute_pyxcli_command( + module, 'pool_delete', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ibm_sa_vol.py b/plugins/modules/ibm_sa_vol.py deleted file mode 120000 index 54a7aa3c12..0000000000 --- a/plugins/modules/ibm_sa_vol.py +++ /dev/null @@ -1 +0,0 @@ -./storage/ibm/ibm_sa_vol.py \ No newline at end of file diff --git a/plugins/modules/ibm_sa_vol.py b/plugins/modules/ibm_sa_vol.py new file mode 100644 index 0000000000..48450084e2 --- /dev/null +++ b/plugins/modules/ibm_sa_vol.py @@ -0,0 +1,114 @@ +#!/usr/bin/python + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ibm_sa_vol +short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems + +description: + - This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + vol: + description: + - Volume name. + required: true + type: str + pool: + description: + - Volume pool. + required: false + type: str + state: + description: + - Volume state. + default: "present" + choices: ["present", "absent"] + type: str + size: + description: + - Volume size. + required: false + type: str + +extends_documentation_fragment: + - community.general.ibm_storage + - community.general.attributes + +author: + - Tzur Eliyahu (@tzure) +""" + +EXAMPLES = r""" +- name: Create a new volume. + community.general.ibm_sa_vol: + vol: volume_name + pool: pool_name + size: 17 + state: present + username: admin + password: secret + endpoints: hostdev-system + +- name: Delete an existing volume. + community.general.ibm_sa_vol: + vol: volume_name + state: absent + username: admin + password: secret + endpoints: hostdev-system +""" +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + vol=dict(required=True), + pool=dict(), + size=dict() + ) + ) + + module = AnsibleModule(argument_spec) + + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + # required args + volume = xcli_client.cmd.vol_list( + vol=module.params.get('vol')).as_single_element + state = module.params['state'] + + state_changed = False + if state == 'present' and not volume: + state_changed = execute_pyxcli_command( + module, 'vol_create', xcli_client) + elif state == 'absent' and volume: + state_changed = execute_pyxcli_command( + module, 'vol_delete', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ibm_sa_vol_map.py b/plugins/modules/ibm_sa_vol_map.py deleted file mode 120000 index 14526261f2..0000000000 --- a/plugins/modules/ibm_sa_vol_map.py +++ /dev/null @@ -1 +0,0 @@ -./storage/ibm/ibm_sa_vol_map.py \ No newline at end of file diff --git a/plugins/modules/ibm_sa_vol_map.py b/plugins/modules/ibm_sa_vol_map.py new file mode 100644 index 0000000000..03c87ca37b --- /dev/null +++ b/plugins/modules/ibm_sa_vol_map.py @@ -0,0 +1,142 @@ +#!/usr/bin/python + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ibm_sa_vol_map +short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems + +description: + - This module maps volumes to or unmaps them from the hosts on IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + vol: + description: + - Volume name. + required: true + type: str + state: + default: "present" + choices: ["present", "absent"] + description: + - When the state is present the volume is mapped. When the state is absent, the volume is meant to be unmapped. + type: str + + cluster: + description: + - Maps the volume to a cluster. + required: false + type: str + host: + description: + - Maps the volume to a host. + required: false + type: str + lun: + description: + - The LUN identifier. + required: false + type: str + override: + description: + - Overrides the existing volume mapping. + required: false + type: str + +extends_documentation_fragment: + - community.general.ibm_storage + - community.general.attributes + +author: + - Tzur Eliyahu (@tzure) +""" + +EXAMPLES = r""" +- name: Map volume to host. + community.general.ibm_sa_vol_map: + vol: volume_name + lun: 1 + host: host_name + username: admin + password: secret + endpoints: hostdev-system + state: present + +- name: Map volume to cluster. + community.general.ibm_sa_vol_map: + vol: volume_name + lun: 1 + cluster: cluster_name + username: admin + password: secret + endpoints: hostdev-system + state: present + +- name: Unmap volume. + community.general.ibm_sa_vol_map: + host: host_name + username: admin + password: secret + endpoints: hostdev-system + state: absent +""" +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed) + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + vol=dict(required=True), + lun=dict(), + cluster=dict(), + host=dict(), + override=dict() + ) + ) + + module = AnsibleModule(argument_spec) + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + # required args + mapping = False + try: + mapped_hosts = xcli_client.cmd.vol_mapping_list( + vol=module.params.get('vol')).as_list + for host in mapped_hosts: + if host['host'] == module.params.get("host", ""): + mapping = True + except Exception: + pass + state = module.params['state'] + + state_changed = False + if state == 'present' and not mapping: + state_changed = execute_pyxcli_command(module, 'map_vol', xcli_client) + if state == 'absent' and mapping: + state_changed = execute_pyxcli_command( + module, 'unmap_vol', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/icinga2_feature.py b/plugins/modules/icinga2_feature.py deleted file mode 120000 index 3c9a728868..0000000000 --- a/plugins/modules/icinga2_feature.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/icinga2_feature.py \ No newline at end of file diff --git a/plugins/modules/icinga2_feature.py b/plugins/modules/icinga2_feature.py new file mode 100644 index 0000000000..6899fe2e23 --- /dev/null +++ b/plugins/modules/icinga2_feature.py @@ -0,0 +1,131 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Loic Blot +# Copyright (c) 2018, Ansible Project +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: icinga2_feature + +short_description: Manage Icinga2 feature +description: + - This module can be used to enable or disable an Icinga2 feature. +author: "Loic Blot (@nerzhul)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - This is the feature name to enable or disable. + required: true + state: + type: str + description: + - If set to V(present) and feature is disabled, then feature is enabled. + - If set to V(present) and feature is already enabled, then nothing is changed. + - If set to V(absent) and feature is enabled, then feature is disabled. + - If set to V(absent) and feature is already disabled, then nothing is changed. + choices: ["present", "absent"] + default: present +""" + +EXAMPLES = r""" +- name: Enable ido-pgsql feature + community.general.icinga2_feature: + name: ido-pgsql + state: present + +- name: Disable api feature + community.general.icinga2_feature: + name: api + state: absent +""" + +RETURN = r""" +# +""" + +import re +from ansible.module_utils.basic import AnsibleModule + + +class Icinga2FeatureHelper: + def __init__(self, module): + self.module = module + self._icinga2 = module.get_bin_path('icinga2', True) + self.feature_name = self.module.params['name'] + self.state = self.module.params['state'] + + def _exec(self, args): + cmd = [self._icinga2, 'feature'] + rc, out, err = self.module.run_command(cmd + args, check_rc=True) + return rc, out + + def manage(self): + rc, out = self._exec(["list"]) + if rc != 0: + self.module.fail_json(msg="Unable to list icinga2 features. " + "Ensure icinga2 is installed and present in binary path.") + + # If feature is already in good state, just exit + if (re.search("Disabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "absent") or \ + (re.search("Enabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "present"): + self.module.exit_json(changed=False) + + if self.module.check_mode: + self.module.exit_json(changed=True) + + feature_enable_str = "enable" if self.state == "present" else "disable" + + rc, out = self._exec([feature_enable_str, self.feature_name]) + + change_applied = False + if self.state == "present": + if rc != 0: + self.module.fail_json(msg="Failed to %s feature %s." + " icinga2 command returned %s" % (feature_enable_str, + self.feature_name, + out)) + + if re.search("already enabled", out) is None: + change_applied = True + else: + if rc == 0: + change_applied = True + # RC is not 0 for this already disabled feature, handle it as no change applied + elif re.search("Cannot disable feature '%s'. Target file .* does not exist" % self.feature_name, out): + change_applied = False + else: + self.module.fail_json(msg="Failed to disable feature. Command returns %s" % out) + + self.module.exit_json(changed=change_applied) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', choices=["present", "absent"], default="present") + ), + supports_check_mode=True + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + Icinga2FeatureHelper(module).manage() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/icinga2_host.py b/plugins/modules/icinga2_host.py deleted file mode 120000 index b9c3bec331..0000000000 --- a/plugins/modules/icinga2_host.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/icinga2_host.py \ No newline at end of file diff --git a/plugins/modules/icinga2_host.py b/plugins/modules/icinga2_host.py new file mode 100644 index 0000000000..39a7b48a6d --- /dev/null +++ b/plugins/modules/icinga2_host.py @@ -0,0 +1,328 @@ +#!/usr/bin/python + +# This module is proudly sponsored by CGI (www.cgi.com) and +# KPN (www.kpn.com). +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: icinga2_host +short_description: Manage a host in Icinga2 +description: + - Add or remove a host to Icinga2 through the API. + - See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/). +author: "Jurgen Brand (@t794104)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + url: + type: str + description: + - HTTP, HTTPS, or FTP URL in the form V((http|https|ftp\)://[user[:pass]]@host.domain[:port]/path). + use_proxy: + description: + - If V(false), it does not use a proxy, even if one is defined in an environment variable on the target hosts. + type: bool + default: true + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + type: bool + default: true + url_username: + type: str + description: + - The username for use in HTTP basic authentication. + - This parameter can be used without O(url_password) for sites that allow empty passwords. + url_password: + type: str + description: + - The password for use in HTTP basic authentication. + - If the O(url_username) parameter is not specified, the O(url_password) parameter is not used. + force_basic_auth: + description: + - C(httplib2), the library used by Ansible's HTTP request code only sends authentication information when a webservice + responds to an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins + may fail. This option forces the sending of the Basic authentication header upon initial request. + type: bool + default: false + client_cert: + type: path + description: + - PEM formatted certificate chain file to be used for SSL client authentication. This file can also include the key + as well, and if the key is included, O(client_key) is not required. + client_key: + type: path + description: + - PEM formatted file that contains your private key to be used for SSL client authentication. If O(client_cert) contains + both the certificate and key, this option is not required. + state: + type: str + description: + - Apply feature state. + choices: ["present", "absent"] + default: present + name: + type: str + description: + - Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique. + required: true + aliases: [host] + zone: + type: str + description: + - The zone from where this host should be polled. + template: + type: str + description: + - The template used to define the host. + - Template cannot be modified after object creation. + check_command: + type: str + description: + - The command used to check if the host is alive. + default: "hostalive" + display_name: + type: str + description: + - The name used to display the host. + - If not specified, it defaults to the value of the O(name) parameter. + ip: + type: str + description: + - The IP address of the host. + - This is no longer required since community.general 8.0.0. + variables: + type: dict + description: + - Dictionary of variables. +extends_documentation_fragment: + - ansible.builtin.url + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Add host to icinga + community.general.icinga2_host: + url: "https://icinga2.example.com" + url_username: "ansible" + url_password: "a_secret" + state: present + name: "{{ ansible_fqdn }}" + ip: "{{ ansible_default_ipv4.address }}" + variables: + foo: "bar" + delegate_to: 127.0.0.1 +""" + +RETURN = r""" +name: + description: The name used to create, modify or delete the host. + type: str + returned: always +data: + description: The data structure used for create, modify or delete of the host. + type: dict + returned: always +""" + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url, url_argument_spec + + +# =========================================== +# Icinga2 API class +# +class icinga2_api: + module = None + + def __init__(self, module): + self.module = module + + def call_url(self, path, data='', method='GET'): + headers = { + 'Accept': 'application/json', + 'X-HTTP-Method-Override': method, + } + url = self.module.params.get("url") + "/" + path + rsp, info = fetch_url(module=self.module, url=url, data=data, headers=headers, method=method, use_proxy=self.module.params['use_proxy']) + body = '' + if rsp: + body = json.loads(rsp.read()) + if info['status'] >= 400: + body = info['body'] + return {'code': info['status'], 'data': body} + + def check_connection(self): + ret = self.call_url('v1/status') + if ret['code'] == 200: + return True + return False + + def exists(self, hostname): + data = { + "filter": "match(\"" + hostname + "\", host.name)", + } + ret = self.call_url( + path="v1/objects/hosts", + data=self.module.jsonify(data) + ) + if ret['code'] == 200: + if len(ret['data']['results']) == 1: + return True + return False + + def create(self, hostname, data): + ret = self.call_url( + path="v1/objects/hosts/" + hostname, + data=self.module.jsonify(data), + method="PUT" + ) + return ret + + def delete(self, hostname): + data = {"cascade": 1} + ret = self.call_url( + path="v1/objects/hosts/" + hostname, + data=self.module.jsonify(data), + method="DELETE" + ) + return ret + + def modify(self, hostname, data): + ret = self.call_url( + path="v1/objects/hosts/" + hostname, + data=self.module.jsonify(data), + method="POST" + ) + return ret + + def diff(self, hostname, data): + ret = self.call_url( + path="v1/objects/hosts/" + hostname, + method="GET" + ) + changed = False + ic_data = ret['data']['results'][0] + for key in data['attrs']: + if key not in ic_data['attrs'].keys(): + changed = True + elif data['attrs'][key] != ic_data['attrs'][key]: + changed = True + return changed + + +# =========================================== +# Module execution. +# +def main(): + # use the predefined argument spec for url + argument_spec = url_argument_spec() + # add our own arguments + argument_spec.update( + state=dict(default="present", choices=["absent", "present"]), + name=dict(required=True, aliases=['host']), + zone=dict(), + template=dict(), + check_command=dict(default="hostalive"), + display_name=dict(), + ip=dict(), + variables=dict(type='dict'), + ) + + # Define the main module + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + state = module.params["state"] + name = module.params["name"] + zone = module.params["zone"] + template = [] + if module.params["template"]: + template = [module.params["template"]] + check_command = module.params["check_command"] + ip = module.params["ip"] + display_name = module.params["display_name"] + if not display_name: + display_name = name + variables = module.params["variables"] + + try: + icinga = icinga2_api(module=module) + icinga.check_connection() + except Exception as e: + module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e)) + + data = { + 'templates': template, + 'attrs': { + 'address': ip, + 'display_name': display_name, + 'check_command': check_command, + 'zone': zone, + 'vars.made_by': "ansible" + } + } + data['attrs'].update({'vars.' + key: value for key, value in variables.items()}) + + changed = False + if icinga.exists(name): + if state == "absent": + if module.check_mode: + module.exit_json(changed=True, name=name, data=data) + else: + try: + ret = icinga.delete(name) + if ret['code'] == 200: + changed = True + else: + module.fail_json(msg="bad return code (%s) deleting host: '%s'" % (ret['code'], ret['data'])) + except Exception as e: + module.fail_json(msg="exception deleting host: " + str(e)) + + elif icinga.diff(name, data): + if module.check_mode: + module.exit_json(changed=False, name=name, data=data) + + # Template attribute is not allowed in modification + del data['templates'] + + ret = icinga.modify(name, data) + + if ret['code'] == 200: + changed = True + else: + module.fail_json(msg="bad return code (%s) modifying host: '%s'" % (ret['code'], ret['data'])) + + else: + if state == "present": + if module.check_mode: + changed = True + else: + try: + ret = icinga.create(name, data) + if ret['code'] == 200: + changed = True + else: + module.fail_json(msg="bad return code (%s) creating host: '%s'" % (ret['code'], ret['data'])) + except Exception as e: + module.fail_json(msg="exception creating host: " + str(e)) + + module.exit_json(changed=changed, name=name, data=data) + + +# import module snippets +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_config.py b/plugins/modules/identity/ipa/ipa_config.py deleted file mode 100644 index 2b41dfb098..0000000000 --- a/plugins/modules/identity/ipa/ipa_config.py +++ /dev/null @@ -1,362 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Fran Fitzpatrick -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_config -author: Fran Fitzpatrick (@fxfitz) -short_description: Manage Global FreeIPA Configuration Settings -description: -- Modify global configuration settings of a FreeIPA Server. -options: - ipaconfigstring: - description: Extra hashes to generate in password plug-in. - aliases: ["configstring"] - type: list - elements: str - choices: ["AllowNThash", "KDC:Disable Last Success", "KDC:Disable Lockout", "KDC:Disable Default Preauth for SPNs"] - version_added: '2.5.0' - ipadefaultloginshell: - description: Default shell for new users. - aliases: ["loginshell"] - type: str - ipadefaultemaildomain: - description: Default e-mail domain for new users. - aliases: ["emaildomain"] - type: str - ipadefaultprimarygroup: - description: Default group for new users. - aliases: ["primarygroup"] - type: str - version_added: '2.5.0' - ipagroupsearchfields: - description: A list of fields to search in when searching for groups. - aliases: ["groupsearchfields"] - type: list - elements: str - version_added: '2.5.0' - ipahomesrootdir: - description: Default location of home directories. - aliases: ["homesrootdir"] - type: str - version_added: '2.5.0' - ipakrbauthzdata: - description: Default types of PAC supported for services. - aliases: ["krbauthzdata"] - type: list - elements: str - choices: ["MS-PAC", "PAD", "nfs:NONE"] - version_added: '2.5.0' - ipamaxusernamelength: - description: Maximum length of usernames. - aliases: ["maxusernamelength"] - type: int - version_added: '2.5.0' - ipapwdexpadvnotify: - description: Notice of impending password expiration, in days. - aliases: ["pwdexpadvnotify"] - type: int - version_added: '2.5.0' - ipasearchrecordslimit: - description: Maximum number of records to search (-1 or 0 is unlimited). - aliases: ["searchrecordslimit"] - type: int - version_added: '2.5.0' - ipasearchtimelimit: - description: Maximum amount of time (seconds) for a search (-1 or 0 is unlimited). - aliases: ["searchtimelimit"] - type: int - version_added: '2.5.0' - ipaselinuxusermaporder: - description: The SELinux user map order (order in increasing priority of SELinux users). - aliases: ["selinuxusermaporder"] - type: list - elements: str - version_added: '3.7.0' - ipauserauthtype: - description: The authentication type to use by default. - aliases: ["userauthtype"] - choices: ["password", "radius", "otp", "pkinit", "hardened", "disabled"] - type: list - elements: str - version_added: '2.5.0' - ipausersearchfields: - description: A list of fields to search in when searching for users. - aliases: ["usersearchfields"] - type: list - elements: str - version_added: '2.5.0' -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure password plugin features DC:Disable Last Success and KDC:Disable Lockout are enabled - community.general.ipa_config: - ipaconfigstring: ["KDC:Disable Last Success", "KDC:Disable Lockout"] - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the default login shell is bash - community.general.ipa_config: - ipadefaultloginshell: /bin/bash - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the default e-mail domain is ansible.com - community.general.ipa_config: - ipadefaultemaildomain: ansible.com - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the default primary group is set to ipausers - community.general.ipa_config: - ipadefaultprimarygroup: ipausers - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the group search fields are set to 'cn,description' - community.general.ipa_config: - ipagroupsearchfields: ['cn', 'description'] - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the home directory location is set to /home - community.general.ipa_config: - ipahomesrootdir: /home - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the default types of PAC supported for services is set to MS-PAC and PAD - community.general.ipa_config: - ipakrbauthzdata: ["MS-PAC", "PAD"] - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the maximum user name length is set to 32 - community.general.ipa_config: - ipamaxusernamelength: 32 - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the password expiration notice is set to 4 days - community.general.ipa_config: - ipapwdexpadvnotify: 4 - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the search record limit is set to 100 - community.general.ipa_config: - ipasearchrecordslimit: 100 - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the search time limit is set to 2 seconds - community.general.ipa_config: - ipasearchtimelimit: 2 - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the default user auth type is password - community.general.ipa_config: - ipauserauthtype: ['password'] - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the user search fields is set to 'uid,givenname,sn,ou,title' - community.general.ipa_config: - ipausersearchfields: ['uid', 'givenname', 'sn', 'ou', 'title'] - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the SELinux user map order is set - community.general.ipa_config: - ipaselinuxusermaporder: - - "guest_u:s0" - - "xguest_u:s0" - - "user_u:s0" - - "staff_u:s0-s0:c0.c1023" - - "unconfined_u:s0-s0:c0.c1023" - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret -''' - -RETURN = r''' -config: - description: Configuration as returned by IPA API. - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class ConfigIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(ConfigIPAClient, self).__init__(module, host, port, protocol) - - def config_show(self): - return self._post_json(method='config_show', name=None) - - def config_mod(self, name, item): - return self._post_json(method='config_mod', name=name, item=item) - - -def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None, - ipadefaultemaildomain=None, ipadefaultprimarygroup=None, - ipagroupsearchfields=None, ipahomesrootdir=None, - ipakrbauthzdata=None, ipamaxusernamelength=None, - ipapwdexpadvnotify=None, ipasearchrecordslimit=None, - ipasearchtimelimit=None, ipaselinuxusermaporder=None, - ipauserauthtype=None, ipausersearchfields=None): - config = {} - if ipaconfigstring is not None: - config['ipaconfigstring'] = ipaconfigstring - if ipadefaultloginshell is not None: - config['ipadefaultloginshell'] = ipadefaultloginshell - if ipadefaultemaildomain is not None: - config['ipadefaultemaildomain'] = ipadefaultemaildomain - if ipadefaultprimarygroup is not None: - config['ipadefaultprimarygroup'] = ipadefaultprimarygroup - if ipagroupsearchfields is not None: - config['ipagroupsearchfields'] = ','.join(ipagroupsearchfields) - if ipahomesrootdir is not None: - config['ipahomesrootdir'] = ipahomesrootdir - if ipakrbauthzdata is not None: - config['ipakrbauthzdata'] = ipakrbauthzdata - if ipamaxusernamelength is not None: - config['ipamaxusernamelength'] = str(ipamaxusernamelength) - if ipapwdexpadvnotify is not None: - config['ipapwdexpadvnotify'] = str(ipapwdexpadvnotify) - if ipasearchrecordslimit is not None: - config['ipasearchrecordslimit'] = str(ipasearchrecordslimit) - if ipasearchtimelimit is not None: - config['ipasearchtimelimit'] = str(ipasearchtimelimit) - if ipaselinuxusermaporder is not None: - config['ipaselinuxusermaporder'] = '$'.join(ipaselinuxusermaporder) - if ipauserauthtype is not None: - config['ipauserauthtype'] = ipauserauthtype - if ipausersearchfields is not None: - config['ipausersearchfields'] = ','.join(ipausersearchfields) - - return config - - -def get_config_diff(client, ipa_config, module_config): - return client.get_diff(ipa_data=ipa_config, module_data=module_config) - - -def ensure(module, client): - module_config = get_config_dict( - ipaconfigstring=module.params.get('ipaconfigstring'), - ipadefaultloginshell=module.params.get('ipadefaultloginshell'), - ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'), - ipadefaultprimarygroup=module.params.get('ipadefaultprimarygroup'), - ipagroupsearchfields=module.params.get('ipagroupsearchfields'), - ipahomesrootdir=module.params.get('ipahomesrootdir'), - ipakrbauthzdata=module.params.get('ipakrbauthzdata'), - ipamaxusernamelength=module.params.get('ipamaxusernamelength'), - ipapwdexpadvnotify=module.params.get('ipapwdexpadvnotify'), - ipasearchrecordslimit=module.params.get('ipasearchrecordslimit'), - ipasearchtimelimit=module.params.get('ipasearchtimelimit'), - ipaselinuxusermaporder=module.params.get('ipaselinuxusermaporder'), - ipauserauthtype=module.params.get('ipauserauthtype'), - ipausersearchfields=module.params.get('ipausersearchfields'), - ) - ipa_config = client.config_show() - diff = get_config_diff(client, ipa_config, module_config) - - changed = False - new_config = {} - for module_key in diff: - if module_config.get(module_key) != ipa_config.get(module_key, None): - changed = True - new_config.update({module_key: module_config.get(module_key)}) - - if changed and not module.check_mode: - client.config_mod(name=None, item=new_config) - - return changed, client.config_show() - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update( - ipaconfigstring=dict(type='list', elements='str', - choices=['AllowNThash', - 'KDC:Disable Last Success', - 'KDC:Disable Lockout', - 'KDC:Disable Default Preauth for SPNs'], - aliases=['configstring']), - ipadefaultloginshell=dict(type='str', aliases=['loginshell']), - ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']), - ipadefaultprimarygroup=dict(type='str', aliases=['primarygroup']), - ipagroupsearchfields=dict(type='list', elements='str', - aliases=['groupsearchfields']), - ipahomesrootdir=dict(type='str', aliases=['homesrootdir']), - ipakrbauthzdata=dict(type='list', elements='str', - choices=['MS-PAC', 'PAD', 'nfs:NONE'], - aliases=['krbauthzdata']), - ipamaxusernamelength=dict(type='int', aliases=['maxusernamelength']), - ipapwdexpadvnotify=dict(type='int', aliases=['pwdexpadvnotify']), - ipasearchrecordslimit=dict(type='int', aliases=['searchrecordslimit']), - ipasearchtimelimit=dict(type='int', aliases=['searchtimelimit']), - ipaselinuxusermaporder=dict(type='list', elements='str', - aliases=['selinuxusermaporder']), - ipauserauthtype=dict(type='list', elements='str', - aliases=['userauthtype'], - choices=["password", "radius", "otp", "pkinit", - "hardened", "disabled"]), - ipausersearchfields=dict(type='list', elements='str', - aliases=['usersearchfields']), - ) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - client = ConfigIPAClient( - module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot'] - ) - - try: - client.login( - username=module.params['ipa_user'], - password=module.params['ipa_pass'] - ) - changed, user = ensure(module, client) - module.exit_json(changed=changed, user=user) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_dnsrecord.py b/plugins/modules/identity/ipa/ipa_dnsrecord.py deleted file mode 100644 index 73b6695698..0000000000 --- a/plugins/modules/identity/ipa/ipa_dnsrecord.py +++ /dev/null @@ -1,319 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_dnsrecord -author: Abhijeet Kasurde (@Akasurde) -short_description: Manage FreeIPA DNS records -description: -- Add, modify and delete an IPA DNS Record using IPA API. -options: - zone_name: - description: - - The DNS zone name to which DNS record needs to be managed. - required: true - type: str - record_name: - description: - - The DNS record name to manage. - required: true - aliases: ["name"] - type: str - record_type: - description: - - The type of DNS record name. - - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV' and 'MX' are supported. - - "'A6', 'CNAME', 'DNAME' and 'TXT' are added in version 2.5." - - "'SRV' and 'MX' are added in version 2.8." - required: false - default: 'A' - choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'PTR', 'SRV', 'TXT'] - type: str - record_value: - description: - - Manage DNS record name with this value. - - In the case of 'A' or 'AAAA' record types, this will be the IP address. - - In the case of 'A6' record type, this will be the A6 Record data. - - In the case of 'CNAME' record type, this will be the hostname. - - In the case of 'DNAME' record type, this will be the DNAME target. - - In the case of 'PTR' record type, this will be the hostname. - - In the case of 'TXT' record type, this will be a text. - - In the case of 'SRV' record type, this will be a service record. - - In the case of 'MX' record type, this will be a mail exchanger record. - required: true - type: str - record_ttl: - description: - - Set the TTL for the record. - - Applies only when adding a new or changing the value of record_value. - required: false - type: int - state: - description: State to ensure - required: false - default: present - choices: ["absent", "present"] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure dns record is present - community.general.ipa_dnsrecord: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: example.com - record_name: vm-001 - record_type: 'AAAA' - record_value: '::1' - -- name: Ensure that dns record exists with a TTL - community.general.ipa_dnsrecord: - name: host02 - zone_name: example.com - record_type: 'AAAA' - record_value: '::1' - record_ttl: 300 - ipa_host: ipa.example.com - ipa_pass: topsecret - state: present - -- name: Ensure a PTR record is present - community.general.ipa_dnsrecord: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: 2.168.192.in-addr.arpa - record_name: 5 - record_type: 'PTR' - record_value: 'internal.ipa.example.com' - -- name: Ensure a TXT record is present - community.general.ipa_dnsrecord: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: example.com - record_name: _kerberos - record_type: 'TXT' - record_value: 'EXAMPLE.COM' - -- name: Ensure an SRV record is present - community.general.ipa_dnsrecord: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: example.com - record_name: _kerberos._udp.example.com - record_type: 'SRV' - record_value: '10 50 88 ipa.example.com' - -- name: Ensure an MX record is present - community.general.ipa_dnsrecord: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: example.com - record_name: '@' - record_type: 'MX' - record_value: '1 mailserver.example.com' - -- name: Ensure that dns record is removed - community.general.ipa_dnsrecord: - name: host01 - zone_name: example.com - record_type: 'AAAA' - record_value: '::1' - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - state: absent -''' - -RETURN = r''' -dnsrecord: - description: DNS record as returned by IPA API. - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class DNSRecordIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(DNSRecordIPAClient, self).__init__(module, host, port, protocol) - - def dnsrecord_find(self, zone_name, record_name): - if record_name == '@': - return self._post_json(method='dnsrecord_show', name=zone_name, item={'idnsname': record_name, 'all': True}) - else: - return self._post_json(method='dnsrecord_find', name=zone_name, item={'idnsname': record_name, 'all': True}) - - def dnsrecord_add(self, zone_name=None, record_name=None, details=None): - item = dict(idnsname=record_name) - if details['record_type'] == 'A': - item.update(a_part_ip_address=details['record_value']) - elif details['record_type'] == 'AAAA': - item.update(aaaa_part_ip_address=details['record_value']) - elif details['record_type'] == 'A6': - item.update(a6_part_data=details['record_value']) - elif details['record_type'] == 'CNAME': - item.update(cname_part_hostname=details['record_value']) - elif details['record_type'] == 'DNAME': - item.update(dname_part_target=details['record_value']) - elif details['record_type'] == 'PTR': - item.update(ptr_part_hostname=details['record_value']) - elif details['record_type'] == 'TXT': - item.update(txtrecord=details['record_value']) - elif details['record_type'] == 'SRV': - item.update(srvrecord=details['record_value']) - elif details['record_type'] == 'MX': - item.update(mxrecord=details['record_value']) - - if details.get('record_ttl'): - item.update(dnsttl=details['record_ttl']) - - return self._post_json(method='dnsrecord_add', name=zone_name, item=item) - - def dnsrecord_mod(self, zone_name=None, record_name=None, details=None): - item = get_dnsrecord_dict(details) - item.update(idnsname=record_name) - if details.get('record_ttl'): - item.update(dnsttl=details['record_ttl']) - return self._post_json(method='dnsrecord_mod', name=zone_name, item=item) - - def dnsrecord_del(self, zone_name=None, record_name=None, details=None): - item = get_dnsrecord_dict(details) - item.update(idnsname=record_name) - return self._post_json(method='dnsrecord_del', name=zone_name, item=item) - - -def get_dnsrecord_dict(details=None): - module_dnsrecord = dict() - if details['record_type'] == 'A' and details['record_value']: - module_dnsrecord.update(arecord=details['record_value']) - elif details['record_type'] == 'AAAA' and details['record_value']: - module_dnsrecord.update(aaaarecord=details['record_value']) - elif details['record_type'] == 'A6' and details['record_value']: - module_dnsrecord.update(a6record=details['record_value']) - elif details['record_type'] == 'CNAME' and details['record_value']: - module_dnsrecord.update(cnamerecord=details['record_value']) - elif details['record_type'] == 'DNAME' and details['record_value']: - module_dnsrecord.update(dnamerecord=details['record_value']) - elif details['record_type'] == 'PTR' and details['record_value']: - module_dnsrecord.update(ptrrecord=details['record_value']) - elif details['record_type'] == 'TXT' and details['record_value']: - module_dnsrecord.update(txtrecord=details['record_value']) - elif details['record_type'] == 'SRV' and details['record_value']: - module_dnsrecord.update(srvrecord=details['record_value']) - elif details['record_type'] == 'MX' and details['record_value']: - module_dnsrecord.update(mxrecord=details['record_value']) - - if details.get('record_ttl'): - module_dnsrecord.update(dnsttl=details['record_ttl']) - - return module_dnsrecord - - -def get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord): - details = get_dnsrecord_dict(module_dnsrecord) - return client.get_diff(ipa_data=ipa_dnsrecord, module_data=details) - - -def ensure(module, client): - zone_name = module.params['zone_name'] - record_name = module.params['record_name'] - record_ttl = module.params.get('record_ttl') - state = module.params['state'] - - ipa_dnsrecord = client.dnsrecord_find(zone_name, record_name) - - module_dnsrecord = dict( - record_type=module.params['record_type'], - record_value=module.params['record_value'], - record_ttl=to_native(record_ttl, nonstring='passthru'), - ) - - # ttl is not required to change records - if module_dnsrecord['record_ttl'] is None: - module_dnsrecord.pop('record_ttl') - - changed = False - if state == 'present': - if not ipa_dnsrecord: - changed = True - if not module.check_mode: - client.dnsrecord_add(zone_name=zone_name, - record_name=record_name, - details=module_dnsrecord) - else: - diff = get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord) - if len(diff) > 0: - changed = True - if not module.check_mode: - client.dnsrecord_mod(zone_name=zone_name, - record_name=record_name, - details=module_dnsrecord) - else: - if ipa_dnsrecord: - changed = True - if not module.check_mode: - client.dnsrecord_del(zone_name=zone_name, - record_name=record_name, - details=module_dnsrecord) - - return changed, client.dnsrecord_find(zone_name, record_name) - - -def main(): - record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV', 'MX'] - argument_spec = ipa_argument_spec() - argument_spec.update( - zone_name=dict(type='str', required=True), - record_name=dict(type='str', aliases=['name'], required=True), - record_type=dict(type='str', default='A', choices=record_types), - record_value=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - record_ttl=dict(type='int', required=False), - ) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - client = DNSRecordIPAClient( - module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot'] - ) - - try: - client.login( - username=module.params['ipa_user'], - password=module.params['ipa_pass'] - ) - changed, record = ensure(module, client) - module.exit_json(changed=changed, record=record) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_dnszone.py b/plugins/modules/identity/ipa/ipa_dnszone.py deleted file mode 100644 index 3dabad8db8..0000000000 --- a/plugins/modules/identity/ipa/ipa_dnszone.py +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Fran Fitzpatrick (francis.x.fitzpatrick@gmail.com) -# Borrowed heavily from other work by Abhijeet Kasurde (akasurde@redhat.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_dnszone -author: Fran Fitzpatrick (@fxfitz) -short_description: Manage FreeIPA DNS Zones -description: -- Add and delete an IPA DNS Zones using IPA API -options: - zone_name: - description: - - The DNS zone name to which needs to be managed. - required: true - type: str - state: - description: State to ensure - required: false - default: present - choices: ["absent", "present"] - type: str - dynamicupdate: - description: Apply dynamic update to zone - required: false - default: "false" - choices: ["false", "true"] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure dns zone is present - community.general.ipa_dnszone: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: example.com - -- name: Ensure dns zone is present and is dynamic update - community.general.ipa_dnszone: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: example.com - dynamicupdate: true - -- name: Ensure that dns zone is removed - community.general.ipa_dnszone: - zone_name: example.com - ipa_host: localhost - ipa_user: admin - ipa_pass: topsecret - state: absent -''' - -RETURN = r''' -zone: - description: DNS zone as returned by IPA API. - returned: always - type: dict -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class DNSZoneIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(DNSZoneIPAClient, self).__init__(module, host, port, protocol) - - def dnszone_find(self, zone_name, details=None): - itens = {'idnsname': zone_name} - if details is not None: - itens.update(details) - - return self._post_json( - method='dnszone_find', - name=zone_name, - item=itens - ) - - def dnszone_add(self, zone_name=None, details=None): - itens = {} - if details is not None: - itens.update(details) - - return self._post_json( - method='dnszone_add', - name=zone_name, - item=itens - ) - - def dnszone_del(self, zone_name=None, record_name=None, details=None): - return self._post_json( - method='dnszone_del', name=zone_name, item={}) - - -def ensure(module, client): - zone_name = module.params['zone_name'] - state = module.params['state'] - dynamicupdate = module.params['dynamicupdate'] - - ipa_dnszone = client.dnszone_find(zone_name) - - changed = False - if state == 'present': - if not ipa_dnszone: - changed = True - if not module.check_mode: - client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate}) - else: - changed = False - else: - if ipa_dnszone: - changed = True - if not module.check_mode: - client.dnszone_del(zone_name=zone_name) - - return changed, client.dnszone_find(zone_name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(zone_name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - dynamicupdate=dict(type='str', required=False, default='false', choices=['true', 'false']), - ) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - ) - - client = DNSZoneIPAClient( - module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot'] - ) - - try: - client.login( - username=module.params['ipa_user'], - password=module.params['ipa_pass'] - ) - changed, zone = ensure(module, client) - module.exit_json(changed=changed, zone=zone) - except Exception as e: - module.fail_json(msg=to_native(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_group.py b/plugins/modules/identity/ipa/ipa_group.py deleted file mode 100644 index d6af57ba1f..0000000000 --- a/plugins/modules/identity/ipa/ipa_group.py +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_group -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA group -description: -- Add, modify and delete group within IPA server -options: - append: - description: - - If C(yes), add the listed I(user) and I(group) to the group members. - - If C(no), only the listed I(user) and I(group) will be group members, removing any other members. - default: no - type: bool - version_added: 4.0.0 - cn: - description: - - Canonical name. - - Can not be changed as it is the unique identifier. - required: true - aliases: ['name'] - type: str - description: - description: - - Description of the group. - type: str - external: - description: - - Allow adding external non-IPA members from trusted domains. - type: bool - gidnumber: - description: - - GID (use this option to set it manually). - aliases: ['gid'] - type: str - group: - description: - - List of group names assigned to this group. - - If I(append=no) and an empty list is passed all groups will be removed from this group. - - Groups that are already assigned but not passed will be removed. - - If I(append=yes) the listed groups will be assigned without removing other groups. - - If option is omitted assigned groups will not be checked or changed. - type: list - elements: str - nonposix: - description: - - Create as a non-POSIX group. - type: bool - user: - description: - - List of user names assigned to this group. - - If I(append=no) and an empty list is passed all users will be removed from this group. - - Users that are already assigned but not passed will be removed. - - If I(append=yes) the listed users will be assigned without removing other users. - - If option is omitted assigned users will not be checked or changed. - type: list - elements: str - state: - description: - - State to ensure - default: "present" - choices: ["absent", "present"] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure group is present - community.general.ipa_group: - name: oinstall - gidnumber: '54321' - state: present - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure that groups sysops and appops are assigned to ops but no other group - community.general.ipa_group: - name: ops - group: - - sysops - - appops - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure that users linus and larry are assign to the group, but no other user - community.general.ipa_group: - name: sysops - user: - - linus - - larry - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure that new starter named john is member of the group, without removing other members - community.general.ipa_group: - name: developers - user: - - john - append: yes - state: present - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure group is absent - community.general.ipa_group: - name: sysops - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -group: - description: Group as returned by IPA API - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class GroupIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(GroupIPAClient, self).__init__(module, host, port, protocol) - - def group_find(self, name): - return self._post_json(method='group_find', name=None, item={'all': True, 'cn': name}) - - def group_add(self, name, item): - return self._post_json(method='group_add', name=name, item=item) - - def group_mod(self, name, item): - return self._post_json(method='group_mod', name=name, item=item) - - def group_del(self, name): - return self._post_json(method='group_del', name=name) - - def group_add_member(self, name, item): - return self._post_json(method='group_add_member', name=name, item=item) - - def group_add_member_group(self, name, item): - return self.group_add_member(name=name, item={'group': item}) - - def group_add_member_user(self, name, item): - return self.group_add_member(name=name, item={'user': item}) - - def group_remove_member(self, name, item): - return self._post_json(method='group_remove_member', name=name, item=item) - - def group_remove_member_group(self, name, item): - return self.group_remove_member(name=name, item={'group': item}) - - def group_remove_member_user(self, name, item): - return self.group_remove_member(name=name, item={'user': item}) - - -def get_group_dict(description=None, external=None, gid=None, nonposix=None): - group = {} - if description is not None: - group['description'] = description - if external is not None: - group['external'] = external - if gid is not None: - group['gidnumber'] = gid - if nonposix is not None: - group['nonposix'] = nonposix - return group - - -def get_group_diff(client, ipa_group, module_group): - data = [] - # With group_add attribute nonposix is passed, whereas with group_mod only posix can be passed. - if 'nonposix' in module_group: - # Only non-posix groups can be changed to posix - if not module_group['nonposix'] and ipa_group.get('nonposix'): - module_group['posix'] = True - del module_group['nonposix'] - - if 'external' in module_group: - if module_group['external'] and 'ipaexternalgroup' in ipa_group.get('objectclass'): - del module_group['external'] - - return client.get_diff(ipa_data=ipa_group, module_data=module_group) - - -def ensure(module, client): - state = module.params['state'] - name = module.params['cn'] - group = module.params['group'] - user = module.params['user'] - append = module.params['append'] - - module_group = get_group_dict(description=module.params['description'], external=module.params['external'], - gid=module.params['gidnumber'], nonposix=module.params['nonposix']) - ipa_group = client.group_find(name=name) - - changed = False - if state == 'present': - if not ipa_group: - changed = True - if not module.check_mode: - ipa_group = client.group_add(name, item=module_group) - else: - diff = get_group_diff(client, ipa_group, module_group) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_group.get(key) - client.group_mod(name=name, item=data) - - if group is not None: - changed = client.modify_if_diff(name, ipa_group.get('member_group', []), group, - client.group_add_member_group, - client.group_remove_member_group, - append=append) or changed - - if user is not None: - changed = client.modify_if_diff(name, ipa_group.get('member_user', []), user, - client.group_add_member_user, - client.group_remove_member_user, - append=append) or changed - - else: - if ipa_group: - changed = True - if not module.check_mode: - client.group_del(name) - - return changed, client.group_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - external=dict(type='bool'), - gidnumber=dict(type='str', aliases=['gid']), - group=dict(type='list', elements='str'), - nonposix=dict(type='bool'), - state=dict(type='str', default='present', choices=['present', 'absent']), - user=dict(type='list', elements='str'), - append=dict(type='bool', default=False)) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - ) - - client = GroupIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, group = ensure(module, client) - module.exit_json(changed=changed, group=group) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_hbacrule.py b/plugins/modules/identity/ipa/ipa_hbacrule.py deleted file mode 100644 index 5f0704d58b..0000000000 --- a/plugins/modules/identity/ipa/ipa_hbacrule.py +++ /dev/null @@ -1,355 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_hbacrule -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA HBAC rule -description: -- Add, modify or delete an IPA HBAC rule using IPA API. -options: - cn: - description: - - Canonical name. - - Can not be changed as it is the unique identifier. - required: true - aliases: ["name"] - type: str - description: - description: Description - type: str - host: - description: - - List of host names to assign. - - If an empty list is passed all hosts will be removed from the rule. - - If option is omitted hosts will not be checked or changed. - required: false - type: list - elements: str - hostcategory: - description: Host category - choices: ['all'] - type: str - hostgroup: - description: - - List of hostgroup names to assign. - - If an empty list is passed all hostgroups will be removed. from the rule - - If option is omitted hostgroups will not be checked or changed. - type: list - elements: str - service: - description: - - List of service names to assign. - - If an empty list is passed all services will be removed from the rule. - - If option is omitted services will not be checked or changed. - type: list - elements: str - servicecategory: - description: Service category - choices: ['all'] - type: str - servicegroup: - description: - - List of service group names to assign. - - If an empty list is passed all assigned service groups will be removed from the rule. - - If option is omitted service groups will not be checked or changed. - type: list - elements: str - sourcehost: - description: - - List of source host names to assign. - - If an empty list if passed all assigned source hosts will be removed from the rule. - - If option is omitted source hosts will not be checked or changed. - type: list - elements: str - sourcehostcategory: - description: Source host category - choices: ['all'] - type: str - sourcehostgroup: - description: - - List of source host group names to assign. - - If an empty list if passed all assigned source host groups will be removed from the rule. - - If option is omitted source host groups will not be checked or changed. - type: list - elements: str - state: - description: State to ensure - default: "present" - choices: ["absent", "disabled", "enabled","present"] - type: str - user: - description: - - List of user names to assign. - - If an empty list if passed all assigned users will be removed from the rule. - - If option is omitted users will not be checked or changed. - type: list - elements: str - usercategory: - description: User category - choices: ['all'] - type: str - usergroup: - description: - - List of user group names to assign. - - If an empty list if passed all assigned user groups will be removed from the rule. - - If option is omitted user groups will not be checked or changed. - type: list - elements: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure rule to allow all users to access any host from any host - community.general.ipa_hbacrule: - name: allow_all - description: Allow all users to access any host from any host - hostcategory: all - servicecategory: all - usercategory: all - state: present - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure rule with certain limitations - community.general.ipa_hbacrule: - name: allow_all_developers_access_to_db - description: Allow all developers to access any database from any host - hostgroup: - - db-server - usergroup: - - developers - state: present - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure rule is absent - community.general.ipa_hbacrule: - name: rule_to_be_deleted - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -hbacrule: - description: HBAC rule as returned by IPA API. - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class HBACRuleIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(HBACRuleIPAClient, self).__init__(module, host, port, protocol) - - def hbacrule_find(self, name): - return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name}) - - def hbacrule_add(self, name, item): - return self._post_json(method='hbacrule_add', name=name, item=item) - - def hbacrule_mod(self, name, item): - return self._post_json(method='hbacrule_mod', name=name, item=item) - - def hbacrule_del(self, name): - return self._post_json(method='hbacrule_del', name=name) - - def hbacrule_add_host(self, name, item): - return self._post_json(method='hbacrule_add_host', name=name, item=item) - - def hbacrule_remove_host(self, name, item): - return self._post_json(method='hbacrule_remove_host', name=name, item=item) - - def hbacrule_add_service(self, name, item): - return self._post_json(method='hbacrule_add_service', name=name, item=item) - - def hbacrule_remove_service(self, name, item): - return self._post_json(method='hbacrule_remove_service', name=name, item=item) - - def hbacrule_add_user(self, name, item): - return self._post_json(method='hbacrule_add_user', name=name, item=item) - - def hbacrule_remove_user(self, name, item): - return self._post_json(method='hbacrule_remove_user', name=name, item=item) - - def hbacrule_add_sourcehost(self, name, item): - return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item) - - def hbacrule_remove_sourcehost(self, name, item): - return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item) - - -def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None, - sourcehostcategory=None, - usercategory=None): - data = {} - if description is not None: - data['description'] = description - if hostcategory is not None: - data['hostcategory'] = hostcategory - if ipaenabledflag is not None: - data['ipaenabledflag'] = ipaenabledflag - if servicecategory is not None: - data['servicecategory'] = servicecategory - if sourcehostcategory is not None: - data['sourcehostcategory'] = sourcehostcategory - if usercategory is not None: - data['usercategory'] = usercategory - return data - - -def get_hbcarule_diff(client, ipa_hbcarule, module_hbcarule): - return client.get_diff(ipa_data=ipa_hbcarule, module_data=module_hbcarule) - - -def ensure(module, client): - name = module.params['cn'] - state = module.params['state'] - - if state in ['present', 'enabled']: - ipaenabledflag = 'TRUE' - else: - ipaenabledflag = 'FALSE' - - host = module.params['host'] - hostcategory = module.params['hostcategory'] - hostgroup = module.params['hostgroup'] - service = module.params['service'] - servicecategory = module.params['servicecategory'] - servicegroup = module.params['servicegroup'] - sourcehost = module.params['sourcehost'] - sourcehostcategory = module.params['sourcehostcategory'] - sourcehostgroup = module.params['sourcehostgroup'] - user = module.params['user'] - usercategory = module.params['usercategory'] - usergroup = module.params['usergroup'] - - module_hbacrule = get_hbacrule_dict(description=module.params['description'], - hostcategory=hostcategory, - ipaenabledflag=ipaenabledflag, - servicecategory=servicecategory, - sourcehostcategory=sourcehostcategory, - usercategory=usercategory) - ipa_hbacrule = client.hbacrule_find(name=name) - - changed = False - if state in ['present', 'enabled', 'disabled']: - if not ipa_hbacrule: - changed = True - if not module.check_mode: - ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule) - else: - diff = get_hbcarule_diff(client, ipa_hbacrule, module_hbacrule) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_hbacrule.get(key) - client.hbacrule_mod(name=name, item=data) - - if host is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_host', []), host, - client.hbacrule_add_host, - client.hbacrule_remove_host, 'host') or changed - - if hostgroup is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup, - client.hbacrule_add_host, - client.hbacrule_remove_host, 'hostgroup') or changed - - if service is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvc', []), service, - client.hbacrule_add_service, - client.hbacrule_remove_service, 'hbacsvc') or changed - - if servicegroup is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []), - servicegroup, - client.hbacrule_add_service, - client.hbacrule_remove_service, 'hbacsvcgroup') or changed - - if sourcehost is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_host', []), sourcehost, - client.hbacrule_add_sourcehost, - client.hbacrule_remove_sourcehost, 'host') or changed - - if sourcehostgroup is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup, - client.hbacrule_add_sourcehost, - client.hbacrule_remove_sourcehost, 'hostgroup') or changed - - if user is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_user', []), user, - client.hbacrule_add_user, - client.hbacrule_remove_user, 'user') or changed - - if usergroup is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_group', []), usergroup, - client.hbacrule_add_user, - client.hbacrule_remove_user, 'group') or changed - else: - if ipa_hbacrule: - changed = True - if not module.check_mode: - client.hbacrule_del(name=name) - - return changed, client.hbacrule_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - host=dict(type='list', elements='str'), - hostcategory=dict(type='str', choices=['all']), - hostgroup=dict(type='list', elements='str'), - service=dict(type='list', elements='str'), - servicecategory=dict(type='str', choices=['all']), - servicegroup=dict(type='list', elements='str'), - sourcehost=dict(type='list', elements='str'), - sourcehostcategory=dict(type='str', choices=['all']), - sourcehostgroup=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - user=dict(type='list', elements='str'), - usercategory=dict(type='str', choices=['all']), - usergroup=dict(type='list', elements='str')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True - ) - - client = HBACRuleIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, hbacrule = ensure(module, client) - module.exit_json(changed=changed, hbacrule=hbacrule) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_host.py b/plugins/modules/identity/ipa/ipa_host.py deleted file mode 100644 index 25c65f0b34..0000000000 --- a/plugins/modules/identity/ipa/ipa_host.py +++ /dev/null @@ -1,305 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_host -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA host -description: -- Add, modify and delete an IPA host using IPA API. -options: - fqdn: - description: - - Full qualified domain name. - - Can not be changed as it is the unique identifier. - required: true - aliases: ["name"] - type: str - description: - description: - - A description of this host. - type: str - force: - description: - - Force host name even if not in DNS. - required: false - type: bool - ip_address: - description: - - Add the host to DNS with this IP address. - type: str - mac_address: - description: - - List of Hardware MAC address(es) off this host. - - If option is omitted MAC addresses will not be checked or changed. - - If an empty list is passed all assigned MAC addresses will be removed. - - MAC addresses that are already assigned but not passed will be removed. - aliases: ["macaddress"] - type: list - elements: str - ns_host_location: - description: - - Host location (e.g. "Lab 2") - aliases: ["nshostlocation"] - type: str - ns_hardware_platform: - description: - - Host hardware platform (e.g. "Lenovo T61") - aliases: ["nshardwareplatform"] - type: str - ns_os_version: - description: - - Host operating system and version (e.g. "Fedora 9") - aliases: ["nsosversion"] - type: str - user_certificate: - description: - - List of Base-64 encoded server certificates. - - If option is omitted certificates will not be checked or changed. - - If an empty list is passed all assigned certificates will be removed. - - Certificates already assigned but not passed will be removed. - aliases: ["usercertificate"] - type: list - elements: str - state: - description: State to ensure. - default: present - choices: ["absent", "disabled", "enabled", "present"] - type: str - update_dns: - description: - - If set C("True") with state as C("absent"), then removes DNS records of the host managed by FreeIPA DNS. - - This option has no effect for states other than "absent". - type: bool - random_password: - description: Generate a random password to be used in bulk enrollment. - type: bool -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure host is present - community.general.ipa_host: - name: host01.example.com - description: Example host - ip_address: 192.168.0.123 - ns_host_location: Lab - ns_os_version: CentOS 7 - ns_hardware_platform: Lenovo T61 - mac_address: - - "08:00:27:E3:B1:2D" - - "52:54:00:BD:97:1E" - state: present - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Generate a random password for bulk enrolment - community.general.ipa_host: - name: host01.example.com - description: Example host - ip_address: 192.168.0.123 - state: present - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - validate_certs: False - random_password: True - -- name: Ensure host is disabled - community.general.ipa_host: - name: host01.example.com - state: disabled - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure that all user certificates are removed - community.general.ipa_host: - name: host01.example.com - user_certificate: [] - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure host is absent - community.general.ipa_host: - name: host01.example.com - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure host and its DNS record is absent - community.general.ipa_host: - name: host01.example.com - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - update_dns: True -''' - -RETURN = r''' -host: - description: Host as returned by IPA API. - returned: always - type: dict -host_diff: - description: List of options that differ and would be changed - returned: if check mode and a difference is found - type: list -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class HostIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(HostIPAClient, self).__init__(module, host, port, protocol) - - def host_show(self, name): - return self._post_json(method='host_show', name=name) - - def host_find(self, name): - return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name}) - - def host_add(self, name, host): - return self._post_json(method='host_add', name=name, item=host) - - def host_mod(self, name, host): - return self._post_json(method='host_mod', name=name, item=host) - - def host_del(self, name, update_dns): - return self._post_json(method='host_del', name=name, item={'updatedns': update_dns}) - - def host_disable(self, name): - return self._post_json(method='host_disable', name=name) - - -def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None, - ns_os_version=None, user_certificate=None, mac_address=None, random_password=None): - data = {} - if description is not None: - data['description'] = description - if force is not None: - data['force'] = force - if ip_address is not None: - data['ip_address'] = ip_address - if ns_host_location is not None: - data['nshostlocation'] = ns_host_location - if ns_hardware_platform is not None: - data['nshardwareplatform'] = ns_hardware_platform - if ns_os_version is not None: - data['nsosversion'] = ns_os_version - if user_certificate is not None: - data['usercertificate'] = [{"__base64__": item} for item in user_certificate] - if mac_address is not None: - data['macaddress'] = mac_address - if random_password is not None: - data['random'] = random_password - return data - - -def get_host_diff(client, ipa_host, module_host): - non_updateable_keys = ['force', 'ip_address'] - if not module_host.get('random'): - non_updateable_keys.append('random') - for key in non_updateable_keys: - if key in module_host: - del module_host[key] - - return client.get_diff(ipa_data=ipa_host, module_data=module_host) - - -def ensure(module, client): - name = module.params['fqdn'] - state = module.params['state'] - - ipa_host = client.host_find(name=name) - module_host = get_host_dict(description=module.params['description'], - force=module.params['force'], ip_address=module.params['ip_address'], - ns_host_location=module.params['ns_host_location'], - ns_hardware_platform=module.params['ns_hardware_platform'], - ns_os_version=module.params['ns_os_version'], - user_certificate=module.params['user_certificate'], - mac_address=module.params['mac_address'], - random_password=module.params.get('random_password'), - ) - changed = False - if state in ['present', 'enabled', 'disabled']: - if not ipa_host: - changed = True - if not module.check_mode: - # OTP password generated by FreeIPA is visible only for host_add command - # so, return directly from here. - return changed, client.host_add(name=name, host=module_host) - else: - diff = get_host_diff(client, ipa_host, module_host) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_host.get(key) - ipa_host_show = client.host_show(name=name) - if ipa_host_show.get('has_keytab', False) and module.params.get('random_password'): - client.host_disable(name=name) - return changed, client.host_mod(name=name, host=data) - - else: - if ipa_host: - changed = True - update_dns = module.params.get('update_dns', False) - if not module.check_mode: - client.host_del(name=name, update_dns=update_dns) - - return changed, client.host_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(description=dict(type='str'), - fqdn=dict(type='str', required=True, aliases=['name']), - force=dict(type='bool'), - ip_address=dict(type='str'), - ns_host_location=dict(type='str', aliases=['nshostlocation']), - ns_hardware_platform=dict(type='str', aliases=['nshardwareplatform']), - ns_os_version=dict(type='str', aliases=['nsosversion']), - user_certificate=dict(type='list', aliases=['usercertificate'], elements='str'), - mac_address=dict(type='list', aliases=['macaddress'], elements='str'), - update_dns=dict(type='bool'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - random_password=dict(type='bool', no_log=False),) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = HostIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, host = ensure(module, client) - module.exit_json(changed=changed, host=host) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_hostgroup.py b/plugins/modules/identity/ipa/ipa_hostgroup.py deleted file mode 100644 index 9d5c6f99c7..0000000000 --- a/plugins/modules/identity/ipa/ipa_hostgroup.py +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_hostgroup -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA host-group -description: -- Add, modify and delete an IPA host-group using IPA API. -options: - cn: - description: - - Name of host-group. - - Can not be changed as it is the unique identifier. - required: true - aliases: ["name"] - type: str - description: - description: - - Description. - type: str - host: - description: - - List of hosts that belong to the host-group. - - If an empty list is passed all hosts will be removed from the group. - - If option is omitted hosts will not be checked or changed. - - If option is passed all assigned hosts that are not passed will be unassigned from the group. - type: list - elements: str - hostgroup: - description: - - List of host-groups than belong to that host-group. - - If an empty list is passed all host-groups will be removed from the group. - - If option is omitted host-groups will not be checked or changed. - - If option is passed all assigned hostgroups that are not passed will be unassigned from the group. - type: list - elements: str - state: - description: - - State to ensure. - default: "present" - choices: ["absent", "disabled", "enabled", "present"] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure host-group databases is present - community.general.ipa_hostgroup: - name: databases - state: present - host: - - db.example.com - hostgroup: - - mysql-server - - oracle-server - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure host-group databases is absent - community.general.ipa_hostgroup: - name: databases - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -hostgroup: - description: Hostgroup as returned by IPA API. - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class HostGroupIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(HostGroupIPAClient, self).__init__(module, host, port, protocol) - - def hostgroup_find(self, name): - return self._post_json(method='hostgroup_find', name=None, item={'all': True, 'cn': name}) - - def hostgroup_add(self, name, item): - return self._post_json(method='hostgroup_add', name=name, item=item) - - def hostgroup_mod(self, name, item): - return self._post_json(method='hostgroup_mod', name=name, item=item) - - def hostgroup_del(self, name): - return self._post_json(method='hostgroup_del', name=name) - - def hostgroup_add_member(self, name, item): - return self._post_json(method='hostgroup_add_member', name=name, item=item) - - def hostgroup_add_host(self, name, item): - return self.hostgroup_add_member(name=name, item={'host': item}) - - def hostgroup_add_hostgroup(self, name, item): - return self.hostgroup_add_member(name=name, item={'hostgroup': item}) - - def hostgroup_remove_member(self, name, item): - return self._post_json(method='hostgroup_remove_member', name=name, item=item) - - def hostgroup_remove_host(self, name, item): - return self.hostgroup_remove_member(name=name, item={'host': item}) - - def hostgroup_remove_hostgroup(self, name, item): - return self.hostgroup_remove_member(name=name, item={'hostgroup': item}) - - -def get_hostgroup_dict(description=None): - data = {} - if description is not None: - data['description'] = description - return data - - -def get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup): - return client.get_diff(ipa_data=ipa_hostgroup, module_data=module_hostgroup) - - -def ensure(module, client): - name = module.params['cn'] - state = module.params['state'] - host = module.params['host'] - hostgroup = module.params['hostgroup'] - - ipa_hostgroup = client.hostgroup_find(name=name) - module_hostgroup = get_hostgroup_dict(description=module.params['description']) - - changed = False - if state == 'present': - if not ipa_hostgroup: - changed = True - if not module.check_mode: - ipa_hostgroup = client.hostgroup_add(name=name, item=module_hostgroup) - else: - diff = get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_hostgroup.get(key) - client.hostgroup_mod(name=name, item=data) - - if host is not None: - changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []), [item.lower() for item in host], - client.hostgroup_add_host, client.hostgroup_remove_host) or changed - - if hostgroup is not None: - changed = client.modify_if_diff(name, ipa_hostgroup.get('member_hostgroup', []), - [item.lower() for item in hostgroup], - client.hostgroup_add_hostgroup, - client.hostgroup_remove_hostgroup) or changed - - else: - if ipa_hostgroup: - changed = True - if not module.check_mode: - client.hostgroup_del(name=name) - - return changed, client.hostgroup_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - host=dict(type='list', elements='str'), - hostgroup=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled'])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = HostGroupIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, hostgroup = ensure(module, client) - module.exit_json(changed=changed, hostgroup=hostgroup) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_otpconfig.py b/plugins/modules/identity/ipa/ipa_otpconfig.py deleted file mode 100644 index 9a10baec0b..0000000000 --- a/plugins/modules/identity/ipa/ipa_otpconfig.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Ansible Project -# Heavily influenced from Fran Fitzpatrick ipa_config module -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_otpconfig -author: justchris1 (@justchris1) -short_description: Manage FreeIPA OTP Configuration Settings -version_added: 2.5.0 -description: -- Modify global configuration settings of a FreeIPA Server with respect to OTP (One Time Passwords). -options: - ipatokentotpauthwindow: - description: TOTP authentication window in seconds. - aliases: ["totpauthwindow"] - type: int - ipatokentotpsyncwindow: - description: TOTP synchronization window in seconds. - aliases: ["totpsyncwindow"] - type: int - ipatokenhotpauthwindow: - description: HOTP authentication window in number of hops. - aliases: ["hotpauthwindow"] - type: int - ipatokenhotpsyncwindow: - description: HOTP synchronization window in hops. - aliases: ["hotpsyncwindow"] - type: int -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure the TOTP authentication window is set to 300 seconds - community.general.ipa_otpconfig: - ipatokentotpauthwindow: '300' - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the TOTP syncronization window is set to 86400 seconds - community.general.ipa_otpconfig: - ipatokentotpsyncwindow: '86400' - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the HOTP authentication window is set to 10 hops - community.general.ipa_otpconfig: - ipatokenhotpauthwindow: '10' - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the HOTP syncronization window is set to 100 hops - community.general.ipa_otpconfig: - ipatokenhotpsyncwindow: '100' - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret -''' - -RETURN = r''' -otpconfig: - description: OTP configuration as returned by IPA API. - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class OTPConfigIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(OTPConfigIPAClient, self).__init__(module, host, port, protocol) - - def otpconfig_show(self): - return self._post_json(method='otpconfig_show', name=None) - - def otpconfig_mod(self, name, item): - return self._post_json(method='otpconfig_mod', name=name, item=item) - - -def get_otpconfig_dict(ipatokentotpauthwindow=None, ipatokentotpsyncwindow=None, - ipatokenhotpauthwindow=None, ipatokenhotpsyncwindow=None): - - config = {} - if ipatokentotpauthwindow is not None: - config['ipatokentotpauthwindow'] = str(ipatokentotpauthwindow) - if ipatokentotpsyncwindow is not None: - config['ipatokentotpsyncwindow'] = str(ipatokentotpsyncwindow) - if ipatokenhotpauthwindow is not None: - config['ipatokenhotpauthwindow'] = str(ipatokenhotpauthwindow) - if ipatokenhotpsyncwindow is not None: - config['ipatokenhotpsyncwindow'] = str(ipatokenhotpsyncwindow) - - return config - - -def get_otpconfig_diff(client, ipa_config, module_config): - return client.get_diff(ipa_data=ipa_config, module_data=module_config) - - -def ensure(module, client): - module_otpconfig = get_otpconfig_dict( - ipatokentotpauthwindow=module.params.get('ipatokentotpauthwindow'), - ipatokentotpsyncwindow=module.params.get('ipatokentotpsyncwindow'), - ipatokenhotpauthwindow=module.params.get('ipatokenhotpauthwindow'), - ipatokenhotpsyncwindow=module.params.get('ipatokenhotpsyncwindow'), - ) - ipa_otpconfig = client.otpconfig_show() - diff = get_otpconfig_diff(client, ipa_otpconfig, module_otpconfig) - - changed = False - new_otpconfig = {} - for module_key in diff: - if module_otpconfig.get(module_key) != ipa_otpconfig.get(module_key, None): - changed = True - new_otpconfig.update({module_key: module_otpconfig.get(module_key)}) - - if changed and not module.check_mode: - client.otpconfig_mod(name=None, item=new_otpconfig) - - return changed, client.otpconfig_show() - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update( - ipatokentotpauthwindow=dict(type='int', aliases=['totpauthwindow'], no_log=False), - ipatokentotpsyncwindow=dict(type='int', aliases=['totpsyncwindow'], no_log=False), - ipatokenhotpauthwindow=dict(type='int', aliases=['hotpauthwindow'], no_log=False), - ipatokenhotpsyncwindow=dict(type='int', aliases=['hotpsyncwindow'], no_log=False), - ) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - client = OTPConfigIPAClient( - module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot'] - ) - - try: - client.login( - username=module.params['ipa_user'], - password=module.params['ipa_pass'] - ) - changed, otpconfig = ensure(module, client) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, otpconfig=otpconfig) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_otptoken.py b/plugins/modules/identity/ipa/ipa_otptoken.py deleted file mode 100644 index 4027a1c459..0000000000 --- a/plugins/modules/identity/ipa/ipa_otptoken.py +++ /dev/null @@ -1,527 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_otptoken -author: justchris1 (@justchris1) -short_description: Manage FreeIPA OTPs -version_added: 2.5.0 -description: -- Add, modify, and delete One Time Passwords in IPA. -options: - uniqueid: - description: Unique ID of the token in IPA. - required: true - aliases: ["name"] - type: str - newuniqueid: - description: If specified, the unique id specified will be changed to this. - type: str - otptype: - description: - - Type of OTP. - - "B(Note:) Cannot be modified after OTP is created." - type: str - choices: [ totp, hotp ] - secretkey: - description: - - Token secret (Base64). - - If OTP is created and this is not specified, a random secret will be generated by IPA. - - "B(Note:) Cannot be modified after OTP is created." - type: str - description: - description: Description of the token (informational only). - type: str - owner: - description: Assigned user of the token. - type: str - enabled: - description: Mark the token as enabled (default C(true)). - default: true - type: bool - notbefore: - description: - - First date/time the token can be used. - - In the format C(YYYYMMddHHmmss). - - For example, C(20180121182022) will allow the token to be used starting on 21 January 2018 at 18:20:22. - type: str - notafter: - description: - - Last date/time the token can be used. - - In the format C(YYYYMMddHHmmss). - - For example, C(20200121182022) will allow the token to be used until 21 January 2020 at 18:20:22. - type: str - vendor: - description: Token vendor name (informational only). - type: str - model: - description: Token model (informational only). - type: str - serial: - description: Token serial (informational only). - type: str - state: - description: State to ensure. - choices: ['present', 'absent'] - default: 'present' - type: str - algorithm: - description: - - Token hash algorithm. - - "B(Note:) Cannot be modified after OTP is created." - choices: ['sha1', 'sha256', 'sha384', 'sha512'] - type: str - digits: - description: - - Number of digits each token code will have. - - "B(Note:) Cannot be modified after OTP is created." - choices: [ 6, 8 ] - type: int - offset: - description: - - TOTP token / IPA server time difference. - - "B(Note:) Cannot be modified after OTP is created." - type: int - interval: - description: - - Length of TOTP token code validity in seconds. - - "B(Note:) Cannot be modified after OTP is created." - type: int - counter: - description: - - Initial counter for the HOTP token. - - "B(Note:) Cannot be modified after OTP is created." - type: int -extends_documentation_fragment: -- community.general.ipa.documentation -''' - -EXAMPLES = r''' -- name: Create a totp for pinky, allowing the IPA server to generate using defaults - community.general.ipa_otptoken: - uniqueid: Token123 - otptype: totp - owner: pinky - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Create a 8 digit hotp for pinky with sha256 with specified validity times - community.general.ipa_otptoken: - uniqueid: Token123 - enabled: true - otptype: hotp - digits: 8 - secretkey: UMKSIER00zT2T2tWMUlTRmNlekRCbFQvWFBVZUh2dElHWGR6T3VUR3IzK2xjaFk9 - algorithm: sha256 - notbefore: 20180121182123 - notafter: 20220121182123 - owner: pinky - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Update Token123 to indicate a vendor, model, serial number (info only), and description - community.general.ipa_otptoken: - uniqueid: Token123 - vendor: Acme - model: acme101 - serial: SerialNumber1 - description: Acme OTP device - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Disable Token123 - community.general.ipa_otptoken: - uniqueid: Token123 - enabled: false - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Rename Token123 to TokenABC and enable it - community.general.ipa_otptoken: - uniqueid: Token123 - newuniqueid: TokenABC - enabled: true - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -otptoken: - description: OTP Token as returned by IPA API - returned: always - type: dict -''' - -import base64 -import traceback - -from ansible.module_utils.basic import AnsibleModule, sanitize_keys -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class OTPTokenIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(OTPTokenIPAClient, self).__init__(module, host, port, protocol) - - def otptoken_find(self, name): - return self._post_json(method='otptoken_find', name=None, item={'all': True, - 'ipatokenuniqueid': name, - 'timelimit': '0', - 'sizelimit': '0'}) - - def otptoken_add(self, name, item): - return self._post_json(method='otptoken_add', name=name, item=item) - - def otptoken_mod(self, name, item): - return self._post_json(method='otptoken_mod', name=name, item=item) - - def otptoken_del(self, name): - return self._post_json(method='otptoken_del', name=name) - - -def base64_to_base32(base64_string): - """Converts base64 string to base32 string""" - b32_string = base64.b32encode(base64.b64decode(base64_string)).decode('ascii') - return b32_string - - -def base32_to_base64(base32_string): - """Converts base32 string to base64 string""" - b64_string = base64.b64encode(base64.b32decode(base32_string)).decode('ascii') - return b64_string - - -def get_otptoken_dict(ansible_to_ipa, uniqueid=None, newuniqueid=None, otptype=None, secretkey=None, description=None, owner=None, - enabled=None, notbefore=None, notafter=None, vendor=None, - model=None, serial=None, algorithm=None, digits=None, offset=None, - interval=None, counter=None): - """Create the dictionary of settings passed in""" - - otptoken = {} - if uniqueid is not None: - otptoken[ansible_to_ipa['uniqueid']] = uniqueid - if newuniqueid is not None: - otptoken[ansible_to_ipa['newuniqueid']] = newuniqueid - if otptype is not None: - otptoken[ansible_to_ipa['otptype']] = otptype.upper() - if secretkey is not None: - # For some unknown reason, while IPA returns the secret in base64, - # it wants the secret passed in as base32. This makes it more difficult - # for comparison (does 'current' equal to 'new'). Moreover, this may - # cause some subtle issue in a playbook as the output is encoded - # in a different way than if it was passed in as a parameter. For - # these reasons, have the module standardize on base64 input (as parameter) - # and output (from IPA). - otptoken[ansible_to_ipa['secretkey']] = base64_to_base32(secretkey) - if description is not None: - otptoken[ansible_to_ipa['description']] = description - if owner is not None: - otptoken[ansible_to_ipa['owner']] = owner - if enabled is not None: - otptoken[ansible_to_ipa['enabled']] = 'FALSE' if enabled else 'TRUE' - if notbefore is not None: - otptoken[ansible_to_ipa['notbefore']] = notbefore + 'Z' - if notafter is not None: - otptoken[ansible_to_ipa['notafter']] = notafter + 'Z' - if vendor is not None: - otptoken[ansible_to_ipa['vendor']] = vendor - if model is not None: - otptoken[ansible_to_ipa['model']] = model - if serial is not None: - otptoken[ansible_to_ipa['serial']] = serial - if algorithm is not None: - otptoken[ansible_to_ipa['algorithm']] = algorithm - if digits is not None: - otptoken[ansible_to_ipa['digits']] = str(digits) - if offset is not None: - otptoken[ansible_to_ipa['offset']] = str(offset) - if interval is not None: - otptoken[ansible_to_ipa['interval']] = str(interval) - if counter is not None: - otptoken[ansible_to_ipa['counter']] = str(counter) - - return otptoken - - -def transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible): - """Transform the output received by IPA to a format more friendly - before it is returned to the user. IPA returns even simple - strings as a list of strings. It also returns bools and - int as string. This function cleans that up before return. - """ - updated_otptoken = ipa_otptoken - - # Used to hold values that will be sanitized from output as no_log. - # For the case where secretkey is not specified at the module, but - # is passed back from IPA. - sanitize_strings = set() - - # Rename the IPA parameters to the more friendly ansible module names for them - for ipa_parameter in ipa_to_ansible: - if ipa_parameter in ipa_otptoken: - updated_otptoken[ipa_to_ansible[ipa_parameter]] = ipa_otptoken[ipa_parameter] - updated_otptoken.pop(ipa_parameter) - - # Change the type from IPA's list of string to the appropriate return value type - # based on field. By default, assume they should be strings. - for ansible_parameter in ansible_to_ipa: - if ansible_parameter in updated_otptoken: - if isinstance(updated_otptoken[ansible_parameter], list) and len(updated_otptoken[ansible_parameter]) == 1: - if ansible_parameter in ['digits', 'offset', 'interval', 'counter']: - updated_otptoken[ansible_parameter] = int(updated_otptoken[ansible_parameter][0]) - elif ansible_parameter == 'enabled': - updated_otptoken[ansible_parameter] = bool(updated_otptoken[ansible_parameter][0]) - else: - updated_otptoken[ansible_parameter] = updated_otptoken[ansible_parameter][0] - - if 'secretkey' in updated_otptoken: - if isinstance(updated_otptoken['secretkey'], dict): - if '__base64__' in updated_otptoken['secretkey']: - sanitize_strings.add(updated_otptoken['secretkey']['__base64__']) - b64key = updated_otptoken['secretkey']['__base64__'] - updated_otptoken.pop('secretkey') - updated_otptoken['secretkey'] = b64key - sanitize_strings.add(b64key) - elif '__base32__' in updated_otptoken['secretkey']: - sanitize_strings.add(updated_otptoken['secretkey']['__base32__']) - b32key = updated_otptoken['secretkey']['__base32__'] - b64key = base32_to_base64(b32key) - updated_otptoken.pop('secretkey') - updated_otptoken['secretkey'] = b64key - sanitize_strings.add(b32key) - sanitize_strings.add(b64key) - - return updated_otptoken, sanitize_strings - - -def validate_modifications(ansible_to_ipa, module, ipa_otptoken, - module_otptoken, unmodifiable_after_creation): - """Checks to see if the requested modifications are valid. Some elements - cannot be modified after initial creation. However, we still want to - validate arguments that are specified, but are not different than what - is currently set on the server. - """ - - modifications_valid = True - - for parameter in unmodifiable_after_creation: - if ansible_to_ipa[parameter] in module_otptoken and ansible_to_ipa[parameter] in ipa_otptoken: - mod_value = module_otptoken[ansible_to_ipa[parameter]] - - # For someone unknown reason, the returns from IPA put almost all - # values in a list, even though passing them in a list (even of - # length 1) will be rejected. The module values for all elements - # other than type (totp or hotp) have this happen. - if parameter == 'otptype': - ipa_value = ipa_otptoken[ansible_to_ipa[parameter]] - else: - if len(ipa_otptoken[ansible_to_ipa[parameter]]) != 1: - module.fail_json(msg=("Invariant fail: Return value from IPA is not a list " + - "of length 1. Please open a bug report for the module.")) - if parameter == 'secretkey': - # We stored the secret key in base32 since we had assumed that would need to - # be the format if we were contacting IPA to create it. However, we are - # now comparing it against what is already set in the IPA server, so convert - # back to base64 for comparison. - mod_value = base32_to_base64(mod_value) - - # For the secret key, it is even more specific in that the key is returned - # in a dict, in the list, as the __base64__ entry for the IPA response. - ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__'] - if '__base64__' in ipa_otptoken[ansible_to_ipa[parameter]][0]: - ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__'] - elif '__base32__' in ipa_otptoken[ansible_to_ipa[parameter]][0]: - b32key = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base32__'] - b64key = base32_to_base64(b32key) - ipa_value = b64key - else: - ipa_value = None - else: - ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0] - - if mod_value != ipa_value: - modifications_valid = False - fail_message = ("Parameter '" + parameter + "' cannot be changed once " + - "the OTP is created and the requested value specified here (" + - str(mod_value) + - ") differs from what is set in the IPA server (" - + str(ipa_value) + ")") - module.fail_json(msg=fail_message) - - return modifications_valid - - -def ensure(module, client): - # dict to map from ansible parameter names to attribute names - # used by IPA (which are not so friendly). - ansible_to_ipa = {'uniqueid': 'ipatokenuniqueid', - 'newuniqueid': 'rename', - 'otptype': 'type', - 'secretkey': 'ipatokenotpkey', - 'description': 'description', - 'owner': 'ipatokenowner', - 'enabled': 'ipatokendisabled', - 'notbefore': 'ipatokennotbefore', - 'notafter': 'ipatokennotafter', - 'vendor': 'ipatokenvendor', - 'model': 'ipatokenmodel', - 'serial': 'ipatokenserial', - 'algorithm': 'ipatokenotpalgorithm', - 'digits': 'ipatokenotpdigits', - 'offset': 'ipatokentotpclockoffset', - 'interval': 'ipatokentotptimestep', - 'counter': 'ipatokenhotpcounter'} - - # Create inverse dictionary for mapping return values - ipa_to_ansible = {} - for (k, v) in ansible_to_ipa.items(): - ipa_to_ansible[v] = k - - unmodifiable_after_creation = ['otptype', 'secretkey', 'algorithm', - 'digits', 'offset', 'interval', 'counter'] - state = module.params['state'] - uniqueid = module.params['uniqueid'] - - module_otptoken = get_otptoken_dict(ansible_to_ipa=ansible_to_ipa, - uniqueid=module.params.get('uniqueid'), - newuniqueid=module.params.get('newuniqueid'), - otptype=module.params.get('otptype'), - secretkey=module.params.get('secretkey'), - description=module.params.get('description'), - owner=module.params.get('owner'), - enabled=module.params.get('enabled'), - notbefore=module.params.get('notbefore'), - notafter=module.params.get('notafter'), - vendor=module.params.get('vendor'), - model=module.params.get('model'), - serial=module.params.get('serial'), - algorithm=module.params.get('algorithm'), - digits=module.params.get('digits'), - offset=module.params.get('offset'), - interval=module.params.get('interval'), - counter=module.params.get('counter')) - - ipa_otptoken = client.otptoken_find(name=uniqueid) - - if ansible_to_ipa['newuniqueid'] in module_otptoken: - # Check to see if the new unique id is already taken in use - ipa_otptoken_new = client.otptoken_find(name=module_otptoken[ansible_to_ipa['newuniqueid']]) - if ipa_otptoken_new: - module.fail_json(msg=("Requested rename through newuniqueid to " + - module_otptoken[ansible_to_ipa['newuniqueid']] + - " failed because the new unique id is already in use")) - - changed = False - if state == 'present': - if not ipa_otptoken: - changed = True - if not module.check_mode: - # It would not make sense to have a rename after creation, so if the user - # specified a newuniqueid, just replace the uniqueid with the updated one - # before creation - if ansible_to_ipa['newuniqueid'] in module_otptoken: - module_otptoken[ansible_to_ipa['uniqueid']] = module_otptoken[ansible_to_ipa['newuniqueid']] - uniqueid = module_otptoken[ansible_to_ipa['newuniqueid']] - module_otptoken.pop(ansible_to_ipa['newuniqueid']) - - # IPA wants the unique id in the first position and not as a key/value pair. - # Get rid of it from the otptoken dict and just specify it in the name field - # for otptoken_add. - if ansible_to_ipa['uniqueid'] in module_otptoken: - module_otptoken.pop(ansible_to_ipa['uniqueid']) - - module_otptoken['all'] = True - ipa_otptoken = client.otptoken_add(name=uniqueid, item=module_otptoken) - else: - if not(validate_modifications(ansible_to_ipa, module, ipa_otptoken, - module_otptoken, unmodifiable_after_creation)): - module.fail_json(msg="Modifications requested in module are not valid") - - # IPA will reject 'modifications' that do not actually modify anything - # if any of the unmodifiable elements are specified. Explicitly - # get rid of them here. They were not different or else the - # we would have failed out in validate_modifications. - for x in unmodifiable_after_creation: - if ansible_to_ipa[x] in module_otptoken: - module_otptoken.pop(ansible_to_ipa[x]) - - diff = client.get_diff(ipa_data=ipa_otptoken, module_data=module_otptoken) - if len(diff) > 0: - changed = True - if not module.check_mode: - - # IPA wants the unique id in the first position and not as a key/value pair. - # Get rid of it from the otptoken dict and just specify it in the name field - # for otptoken_mod. - if ansible_to_ipa['uniqueid'] in module_otptoken: - module_otptoken.pop(ansible_to_ipa['uniqueid']) - - module_otptoken['all'] = True - ipa_otptoken = client.otptoken_mod(name=uniqueid, item=module_otptoken) - else: - if ipa_otptoken: - changed = True - if not module.check_mode: - client.otptoken_del(name=uniqueid) - - # Transform the output to use ansible keywords (not the IPA keywords) and - # sanitize any key values in the output. - ipa_otptoken, sanitize_strings = transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible) - module.no_log_values = module.no_log_values.union(sanitize_strings) - sanitized_otptoken = sanitize_keys(obj=ipa_otptoken, no_log_strings=module.no_log_values) - return changed, sanitized_otptoken - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(uniqueid=dict(type='str', aliases=['name'], required=True), - newuniqueid=dict(type='str'), - otptype=dict(type='str', choices=['totp', 'hotp']), - secretkey=dict(type='str', no_log=True), - description=dict(type='str'), - owner=dict(type='str'), - enabled=dict(type='bool', default=True), - notbefore=dict(type='str'), - notafter=dict(type='str'), - vendor=dict(type='str'), - model=dict(type='str'), - serial=dict(type='str'), - state=dict(type='str', choices=['present', 'absent'], default='present'), - algorithm=dict(type='str', choices=['sha1', 'sha256', 'sha384', 'sha512']), - digits=dict(type='int', choices=[6, 8]), - offset=dict(type='int'), - interval=dict(type='int'), - counter=dict(type='int')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = OTPTokenIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, otptoken = ensure(module, client) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, otptoken=otptoken) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_pwpolicy.py b/plugins/modules/identity/ipa/ipa_pwpolicy.py deleted file mode 100644 index 0f9b141b4c..0000000000 --- a/plugins/modules/identity/ipa/ipa_pwpolicy.py +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_pwpolicy -author: Adralioh (@adralioh) -short_description: Manage FreeIPA password policies -description: -- Add, modify, or delete a password policy using the IPA API. -version_added: 2.0.0 -options: - group: - description: - - Name of the group that the policy applies to. - - If omitted, the global policy is used. - aliases: ["name"] - type: str - state: - description: State to ensure. - default: "present" - choices: ["absent", "present"] - type: str - maxpwdlife: - description: Maximum password lifetime (in days). - type: str - minpwdlife: - description: Minimum password lifetime (in hours). - type: str - historylength: - description: - - Number of previous passwords that are remembered. - - Users cannot reuse remembered passwords. - type: str - minclasses: - description: Minimum number of character classes. - type: str - minlength: - description: Minimum password length. - type: str - priority: - description: - - Priority of the policy. - - High number means lower priority. - - Required when C(cn) is not the global policy. - type: str - maxfailcount: - description: Maximum number of consecutive failures before lockout. - type: str - failinterval: - description: Period (in seconds) after which the number of failed login attempts is reset. - type: str - lockouttime: - description: Period (in seconds) for which users are locked out. - type: str -extends_documentation_fragment: -- community.general.ipa.documentation -notes: -- Supports C(check_mode). -''' - -EXAMPLES = r''' -- name: Modify the global password policy - community.general.ipa_pwpolicy: - maxpwdlife: '90' - minpwdlife: '1' - historylength: '8' - minclasses: '3' - minlength: '16' - maxfailcount: '6' - failinterval: '60' - lockouttime: '600' - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure the password policy for the group admins is present - community.general.ipa_pwpolicy: - group: admins - state: present - maxpwdlife: '60' - minpwdlife: '24' - historylength: '16' - minclasses: '4' - priority: '10' - maxfailcount: '4' - failinterval: '600' - lockouttime: '1200' - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure that the group sysops does not have a unique password policy - community.general.ipa_pwpolicy: - group: sysops - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -pwpolicy: - description: Password policy as returned by IPA API. - returned: always - type: dict - sample: - cn: ['admins'] - cospriority: ['10'] - dn: 'cn=admins,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com' - krbmaxpwdlife: ['60'] - krbminpwdlife: ['24'] - krbpwdfailurecountinterval: ['600'] - krbpwdhistorylength: ['16'] - krbpwdlockoutduration: ['1200'] - krbpwdmaxfailure: ['4'] - krbpwdmindiffchars: ['4'] - objectclass: ['top', 'nscontainer', 'krbpwdpolicy'] -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class PwPolicyIPAClient(IPAClient): - '''The global policy will be selected when `name` is `None`''' - def __init__(self, module, host, port, protocol): - super(PwPolicyIPAClient, self).__init__(module, host, port, protocol) - - def pwpolicy_find(self, name): - if name is None: - # Manually set the cn to the global policy because pwpolicy_find will return a random - # different policy if cn is `None` - name = 'global_policy' - return self._post_json(method='pwpolicy_find', name=None, item={'all': True, 'cn': name}) - - def pwpolicy_add(self, name, item): - return self._post_json(method='pwpolicy_add', name=name, item=item) - - def pwpolicy_mod(self, name, item): - return self._post_json(method='pwpolicy_mod', name=name, item=item) - - def pwpolicy_del(self, name): - return self._post_json(method='pwpolicy_del', name=name) - - -def get_pwpolicy_dict(maxpwdlife=None, minpwdlife=None, historylength=None, minclasses=None, - minlength=None, priority=None, maxfailcount=None, failinterval=None, - lockouttime=None): - pwpolicy = {} - if maxpwdlife is not None: - pwpolicy['krbmaxpwdlife'] = maxpwdlife - if minpwdlife is not None: - pwpolicy['krbminpwdlife'] = minpwdlife - if historylength is not None: - pwpolicy['krbpwdhistorylength'] = historylength - if minclasses is not None: - pwpolicy['krbpwdmindiffchars'] = minclasses - if minlength is not None: - pwpolicy['krbpwdminlength'] = minlength - if priority is not None: - pwpolicy['cospriority'] = priority - if maxfailcount is not None: - pwpolicy['krbpwdmaxfailure'] = maxfailcount - if failinterval is not None: - pwpolicy['krbpwdfailurecountinterval'] = failinterval - if lockouttime is not None: - pwpolicy['krbpwdlockoutduration'] = lockouttime - - return pwpolicy - - -def get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy): - return client.get_diff(ipa_data=ipa_pwpolicy, module_data=module_pwpolicy) - - -def ensure(module, client): - state = module.params['state'] - name = module.params['group'] - - module_pwpolicy = get_pwpolicy_dict(maxpwdlife=module.params.get('maxpwdlife'), - minpwdlife=module.params.get('minpwdlife'), - historylength=module.params.get('historylength'), - minclasses=module.params.get('minclasses'), - minlength=module.params.get('minlength'), - priority=module.params.get('priority'), - maxfailcount=module.params.get('maxfailcount'), - failinterval=module.params.get('failinterval'), - lockouttime=module.params.get('lockouttime')) - - ipa_pwpolicy = client.pwpolicy_find(name=name) - - changed = False - if state == 'present': - if not ipa_pwpolicy: - changed = True - if not module.check_mode: - ipa_pwpolicy = client.pwpolicy_add(name=name, item=module_pwpolicy) - else: - diff = get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy) - if len(diff) > 0: - changed = True - if not module.check_mode: - ipa_pwpolicy = client.pwpolicy_mod(name=name, item=module_pwpolicy) - else: - if ipa_pwpolicy: - changed = True - if not module.check_mode: - client.pwpolicy_del(name=name) - - return changed, ipa_pwpolicy - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(group=dict(type='str', aliases=['name']), - state=dict(type='str', default='present', choices=['present', 'absent']), - maxpwdlife=dict(type='str'), - minpwdlife=dict(type='str'), - historylength=dict(type='str'), - minclasses=dict(type='str'), - minlength=dict(type='str'), - priority=dict(type='str'), - maxfailcount=dict(type='str'), - failinterval=dict(type='str'), - lockouttime=dict(type='str')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = PwPolicyIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, pwpolicy = ensure(module, client) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, pwpolicy=pwpolicy) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_role.py b/plugins/modules/identity/ipa/ipa_role.py deleted file mode 100644 index c602614ef9..0000000000 --- a/plugins/modules/identity/ipa/ipa_role.py +++ /dev/null @@ -1,302 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_role -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA role -description: -- Add, modify and delete a role within FreeIPA server using FreeIPA API. -options: - cn: - description: - - Role name. - - Can not be changed as it is the unique identifier. - required: true - aliases: ['name'] - type: str - description: - description: - - A description of this role-group. - type: str - group: - description: - - List of group names assign to this role. - - If an empty list is passed all assigned groups will be unassigned from the role. - - If option is omitted groups will not be checked or changed. - - If option is passed all assigned groups that are not passed will be unassigned from the role. - type: list - elements: str - host: - description: - - List of host names to assign. - - If an empty list is passed all assigned hosts will be unassigned from the role. - - If option is omitted hosts will not be checked or changed. - - If option is passed all assigned hosts that are not passed will be unassigned from the role. - type: list - elements: str - hostgroup: - description: - - List of host group names to assign. - - If an empty list is passed all assigned host groups will be removed from the role. - - If option is omitted host groups will not be checked or changed. - - If option is passed all assigned hostgroups that are not passed will be unassigned from the role. - type: list - elements: str - privilege: - description: - - List of privileges granted to the role. - - If an empty list is passed all assigned privileges will be removed. - - If option is omitted privileges will not be checked or changed. - - If option is passed all assigned privileges that are not passed will be removed. - type: list - elements: str - service: - description: - - List of service names to assign. - - If an empty list is passed all assigned services will be removed from the role. - - If option is omitted services will not be checked or changed. - - If option is passed all assigned services that are not passed will be removed from the role. - type: list - elements: str - state: - description: State to ensure. - default: "present" - choices: ["absent", "present"] - type: str - user: - description: - - List of user names to assign. - - If an empty list is passed all assigned users will be removed from the role. - - If option is omitted users will not be checked or changed. - type: list - elements: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure role is present - community.general.ipa_role: - name: dba - description: Database Administrators - state: present - user: - - pinky - - brain - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure role with certain details - community.general.ipa_role: - name: another-role - description: Just another role - group: - - editors - host: - - host01.example.com - hostgroup: - - hostgroup01 - privilege: - - Group Administrators - - User Administrators - service: - - service01 - -- name: Ensure role is absent - community.general.ipa_role: - name: dba - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -role: - description: Role as returned by IPA API. - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class RoleIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(RoleIPAClient, self).__init__(module, host, port, protocol) - - def role_find(self, name): - return self._post_json(method='role_find', name=None, item={'all': True, 'cn': name}) - - def role_add(self, name, item): - return self._post_json(method='role_add', name=name, item=item) - - def role_mod(self, name, item): - return self._post_json(method='role_mod', name=name, item=item) - - def role_del(self, name): - return self._post_json(method='role_del', name=name) - - def role_add_member(self, name, item): - return self._post_json(method='role_add_member', name=name, item=item) - - def role_add_group(self, name, item): - return self.role_add_member(name=name, item={'group': item}) - - def role_add_host(self, name, item): - return self.role_add_member(name=name, item={'host': item}) - - def role_add_hostgroup(self, name, item): - return self.role_add_member(name=name, item={'hostgroup': item}) - - def role_add_service(self, name, item): - return self.role_add_member(name=name, item={'service': item}) - - def role_add_user(self, name, item): - return self.role_add_member(name=name, item={'user': item}) - - def role_remove_member(self, name, item): - return self._post_json(method='role_remove_member', name=name, item=item) - - def role_remove_group(self, name, item): - return self.role_remove_member(name=name, item={'group': item}) - - def role_remove_host(self, name, item): - return self.role_remove_member(name=name, item={'host': item}) - - def role_remove_hostgroup(self, name, item): - return self.role_remove_member(name=name, item={'hostgroup': item}) - - def role_remove_service(self, name, item): - return self.role_remove_member(name=name, item={'service': item}) - - def role_remove_user(self, name, item): - return self.role_remove_member(name=name, item={'user': item}) - - def role_add_privilege(self, name, item): - return self._post_json(method='role_add_privilege', name=name, item={'privilege': item}) - - def role_remove_privilege(self, name, item): - return self._post_json(method='role_remove_privilege', name=name, item={'privilege': item}) - - -def get_role_dict(description=None): - data = {} - if description is not None: - data['description'] = description - return data - - -def get_role_diff(client, ipa_role, module_role): - return client.get_diff(ipa_data=ipa_role, module_data=module_role) - - -def ensure(module, client): - state = module.params['state'] - name = module.params['cn'] - group = module.params['group'] - host = module.params['host'] - hostgroup = module.params['hostgroup'] - privilege = module.params['privilege'] - service = module.params['service'] - user = module.params['user'] - - module_role = get_role_dict(description=module.params['description']) - ipa_role = client.role_find(name=name) - - changed = False - if state == 'present': - if not ipa_role: - changed = True - if not module.check_mode: - ipa_role = client.role_add(name=name, item=module_role) - else: - diff = get_role_diff(client, ipa_role, module_role) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_role.get(key) - client.role_mod(name=name, item=data) - - if group is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_group', []), group, - client.role_add_group, - client.role_remove_group) or changed - if host is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_host', []), host, - client.role_add_host, - client.role_remove_host) or changed - - if hostgroup is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_hostgroup', []), hostgroup, - client.role_add_hostgroup, - client.role_remove_hostgroup) or changed - - if privilege is not None: - changed = client.modify_if_diff(name, ipa_role.get('memberof_privilege', []), privilege, - client.role_add_privilege, - client.role_remove_privilege) or changed - if service is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_service', []), service, - client.role_add_service, - client.role_remove_service) or changed - if user is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_user', []), user, - client.role_add_user, - client.role_remove_user) or changed - - else: - if ipa_role: - changed = True - if not module.check_mode: - client.role_del(name) - - return changed, client.role_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - group=dict(type='list', elements='str'), - host=dict(type='list', elements='str'), - hostgroup=dict(type='list', elements='str'), - privilege=dict(type='list', elements='str'), - service=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent']), - user=dict(type='list', elements='str')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = RoleIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, role = ensure(module, client) - module.exit_json(changed=changed, role=role) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_service.py b/plugins/modules/identity/ipa/ipa_service.py deleted file mode 100644 index f85b80d44e..0000000000 --- a/plugins/modules/identity/ipa/ipa_service.py +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_service -author: Cédric Parent (@cprh) -short_description: Manage FreeIPA service -description: -- Add and delete an IPA service using IPA API. -options: - krbcanonicalname: - description: - - Principal of the service. - - Can not be changed as it is the unique identifier. - required: true - aliases: ["name"] - type: str - hosts: - description: - - Defines the list of 'ManagedBy' hosts. - required: false - type: list - elements: str - force: - description: - - Force principal name even if host is not in DNS. - required: false - type: bool - state: - description: State to ensure. - required: false - default: present - choices: ["absent", "present"] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure service is present - community.general.ipa_service: - name: http/host01.example.com - state: present - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure service is absent - community.general.ipa_service: - name: http/host01.example.com - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Changing Managing hosts list - community.general.ipa_service: - name: http/host01.example.com - hosts: - - host01.example.com - - host02.example.com - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -service: - description: Service as returned by IPA API. - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class ServiceIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(ServiceIPAClient, self).__init__(module, host, port, protocol) - - def service_find(self, name): - return self._post_json(method='service_find', name=None, item={'all': True, 'krbcanonicalname': name}) - - def service_add(self, name, service): - return self._post_json(method='service_add', name=name, item=service) - - def service_mod(self, name, service): - return self._post_json(method='service_mod', name=name, item=service) - - def service_del(self, name): - return self._post_json(method='service_del', name=name) - - def service_disable(self, name): - return self._post_json(method='service_disable', name=name) - - def service_add_host(self, name, item): - return self._post_json(method='service_add_host', name=name, item={'host': item}) - - def service_remove_host(self, name, item): - return self._post_json(method='service_remove_host', name=name, item={'host': item}) - - -def get_service_dict(force=None, krbcanonicalname=None): - data = {} - if force is not None: - data['force'] = force - if krbcanonicalname is not None: - data['krbcanonicalname'] = krbcanonicalname - return data - - -def get_service_diff(client, ipa_host, module_service): - non_updateable_keys = ['force', 'krbcanonicalname'] - for key in non_updateable_keys: - if key in module_service: - del module_service[key] - - return client.get_diff(ipa_data=ipa_host, module_data=module_service) - - -def ensure(module, client): - name = module.params['krbcanonicalname'] - state = module.params['state'] - hosts = module.params['hosts'] - - ipa_service = client.service_find(name=name) - module_service = get_service_dict(force=module.params['force']) - changed = False - if state in ['present', 'enabled', 'disabled']: - if not ipa_service: - changed = True - if not module.check_mode: - client.service_add(name=name, service=module_service) - else: - diff = get_service_diff(client, ipa_service, module_service) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_service.get(key) - client.service_mod(name=name, service=data) - if hosts is not None: - if 'managedby_host' in ipa_service: - for host in ipa_service['managedby_host']: - if host not in hosts: - if not module.check_mode: - client.service_remove_host(name=name, item=host) - changed = True - for host in hosts: - if host not in ipa_service['managedby_host']: - if not module.check_mode: - client.service_add_host(name=name, item=host) - changed = True - else: - for host in hosts: - if not module.check_mode: - client.service_add_host(name=name, item=host) - changed = True - - else: - if ipa_service: - changed = True - if not module.check_mode: - client.service_del(name=name) - - return changed, client.service_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update( - krbcanonicalname=dict(type='str', required=True, aliases=['name']), - force=dict(type='bool', required=False), - hosts=dict(type='list', required=False, elements='str'), - state=dict(type='str', required=False, default='present', - choices=['present', 'absent'])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = ServiceIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, host = ensure(module, client) - module.exit_json(changed=changed, host=host) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_subca.py b/plugins/modules/identity/ipa/ipa_subca.py deleted file mode 100644 index 3b0d3e8707..0000000000 --- a/plugins/modules/identity/ipa/ipa_subca.py +++ /dev/null @@ -1,211 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_subca -author: Abhijeet Kasurde (@Akasurde) -short_description: Manage FreeIPA Lightweight Sub Certificate Authorities. -description: -- Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API. -options: - subca_name: - description: - - The Sub Certificate Authority name which needs to be managed. - required: true - aliases: ["name"] - type: str - subca_subject: - description: - - The Sub Certificate Authority's Subject. e.g., 'CN=SampleSubCA1,O=testrelm.test'. - required: true - type: str - subca_desc: - description: - - The Sub Certificate Authority's description. - type: str - state: - description: - - State to ensure. - - State 'disable' and 'enable' is available for FreeIPA 4.4.2 version and onwards. - required: false - default: present - choices: ["absent", "disabled", "enabled", "present"] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = ''' -- name: Ensure IPA Sub CA is present - community.general.ipa_subca: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - subca_name: AnsibleSubCA1 - subca_subject: 'CN=AnsibleSubCA1,O=example.com' - subca_desc: Ansible Sub CA - -- name: Ensure that IPA Sub CA is removed - community.general.ipa_subca: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: absent - subca_name: AnsibleSubCA1 - -- name: Ensure that IPA Sub CA is disabled - community.general.ipa_subca: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: disable - subca_name: AnsibleSubCA1 -''' - -RETURN = r''' -subca: - description: IPA Sub CA record as returned by IPA API. - returned: always - type: dict -''' - -from distutils.version import LooseVersion -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class SubCAIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(SubCAIPAClient, self).__init__(module, host, port, protocol) - - def subca_find(self, subca_name): - return self._post_json(method='ca_find', name=subca_name, item=None) - - def subca_add(self, subca_name=None, subject_dn=None, details=None): - item = dict(ipacasubjectdn=subject_dn) - subca_desc = details.get('description', None) - if subca_desc is not None: - item.update(description=subca_desc) - return self._post_json(method='ca_add', name=subca_name, item=item) - - def subca_mod(self, subca_name=None, diff=None, details=None): - item = get_subca_dict(details) - for change in diff: - update_detail = dict() - if item[change] is not None: - update_detail.update(setattr="{0}={1}".format(change, item[change])) - self._post_json(method='ca_mod', name=subca_name, item=update_detail) - - def subca_del(self, subca_name=None): - return self._post_json(method='ca_del', name=subca_name) - - def subca_disable(self, subca_name=None): - return self._post_json(method='ca_disable', name=subca_name) - - def subca_enable(self, subca_name=None): - return self._post_json(method='ca_enable', name=subca_name) - - -def get_subca_dict(details=None): - module_subca = dict() - if details['description'] is not None: - module_subca['description'] = details['description'] - if details['subca_subject'] is not None: - module_subca['ipacasubjectdn'] = details['subca_subject'] - return module_subca - - -def get_subca_diff(client, ipa_subca, module_subca): - details = get_subca_dict(module_subca) - return client.get_diff(ipa_data=ipa_subca, module_data=details) - - -def ensure(module, client): - subca_name = module.params['subca_name'] - subca_subject_dn = module.params['subca_subject'] - subca_desc = module.params['subca_desc'] - - state = module.params['state'] - - ipa_subca = client.subca_find(subca_name) - module_subca = dict(description=subca_desc, - subca_subject=subca_subject_dn) - - changed = False - if state == 'present': - if not ipa_subca: - changed = True - if not module.check_mode: - client.subca_add(subca_name=subca_name, subject_dn=subca_subject_dn, details=module_subca) - else: - diff = get_subca_diff(client, ipa_subca, module_subca) - # IPA does not allow to modify Sub CA's subject DN - # So skip it for now. - if 'ipacasubjectdn' in diff: - diff.remove('ipacasubjectdn') - del module_subca['subca_subject'] - - if len(diff) > 0: - changed = True - if not module.check_mode: - client.subca_mod(subca_name=subca_name, diff=diff, details=module_subca) - elif state == 'absent': - if ipa_subca: - changed = True - if not module.check_mode: - client.subca_del(subca_name=subca_name) - elif state == 'disable': - ipa_version = client.get_ipa_version() - if LooseVersion(ipa_version) < LooseVersion('4.4.2'): - module.fail_json(msg="Current version of IPA server [%s] does not support 'CA disable' option. Please upgrade to " - "version greater than 4.4.2") - if ipa_subca: - changed = True - if not module.check_mode: - client.subca_disable(subca_name=subca_name) - elif state == 'enable': - ipa_version = client.get_ipa_version() - if LooseVersion(ipa_version) < LooseVersion('4.4.2'): - module.fail_json(msg="Current version of IPA server [%s] does not support 'CA enable' option. Please upgrade to " - "version greater than 4.4.2") - if ipa_subca: - changed = True - if not module.check_mode: - client.subca_enable(subca_name=subca_name) - - return changed, client.subca_find(subca_name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(subca_name=dict(type='str', required=True, aliases=['name']), - subca_subject=dict(type='str', required=True), - subca_desc=dict(type='str'), - state=dict(type='str', default='present', - choices=['present', 'absent', 'enabled', 'disabled']),) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True,) - - client = SubCAIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, record = ensure(module, client) - module.exit_json(changed=changed, record=record) - except Exception as exc: - module.fail_json(msg=to_native(exc)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_sudocmd.py b/plugins/modules/identity/ipa/ipa_sudocmd.py deleted file mode 100644 index d75aff44ce..0000000000 --- a/plugins/modules/identity/ipa/ipa_sudocmd.py +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_sudocmd -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA sudo command -description: -- Add, modify or delete sudo command within FreeIPA server using FreeIPA API. -options: - sudocmd: - description: - - Sudo command. - aliases: ['name'] - required: true - type: str - description: - description: - - A description of this command. - type: str - state: - description: State to ensure. - default: present - choices: ['absent', 'disabled', 'enabled', 'present'] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure sudo command exists - community.general.ipa_sudocmd: - name: su - description: Allow to run su via sudo - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure sudo command does not exist - community.general.ipa_sudocmd: - name: su - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -sudocmd: - description: Sudo command as return from IPA API - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class SudoCmdIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(SudoCmdIPAClient, self).__init__(module, host, port, protocol) - - def sudocmd_find(self, name): - return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name}) - - def sudocmd_add(self, name, item): - return self._post_json(method='sudocmd_add', name=name, item=item) - - def sudocmd_mod(self, name, item): - return self._post_json(method='sudocmd_mod', name=name, item=item) - - def sudocmd_del(self, name): - return self._post_json(method='sudocmd_del', name=name) - - -def get_sudocmd_dict(description=None): - data = {} - if description is not None: - data['description'] = description - return data - - -def get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd): - return client.get_diff(ipa_data=ipa_sudocmd, module_data=module_sudocmd) - - -def ensure(module, client): - name = module.params['sudocmd'] - state = module.params['state'] - - module_sudocmd = get_sudocmd_dict(description=module.params['description']) - ipa_sudocmd = client.sudocmd_find(name=name) - - changed = False - if state == 'present': - if not ipa_sudocmd: - changed = True - if not module.check_mode: - client.sudocmd_add(name=name, item=module_sudocmd) - else: - diff = get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_sudocmd.get(key) - client.sudocmd_mod(name=name, item=data) - else: - if ipa_sudocmd: - changed = True - if not module.check_mode: - client.sudocmd_del(name=name) - - return changed, client.sudocmd_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(description=dict(type='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - sudocmd=dict(type='str', required=True, aliases=['name'])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = SudoCmdIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, sudocmd = ensure(module, client) - module.exit_json(changed=changed, sudocmd=sudocmd) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_sudocmdgroup.py b/plugins/modules/identity/ipa/ipa_sudocmdgroup.py deleted file mode 100644 index 65fdd4f75f..0000000000 --- a/plugins/modules/identity/ipa/ipa_sudocmdgroup.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_sudocmdgroup -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA sudo command group -description: -- Add, modify or delete sudo command group within IPA server using IPA API. -options: - cn: - description: - - Sudo Command Group. - aliases: ['name'] - required: true - type: str - description: - description: - - Group description. - type: str - state: - description: State to ensure. - default: present - choices: ['absent', 'disabled', 'enabled', 'present'] - type: str - sudocmd: - description: - - List of sudo commands to assign to the group. - - If an empty list is passed all assigned commands will be removed from the group. - - If option is omitted sudo commands will not be checked or changed. - type: list - elements: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure sudo command group exists - community.general.ipa_sudocmdgroup: - name: group01 - description: Group of important commands - sudocmd: - - su - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure sudo command group does not exist - community.general.ipa_sudocmdgroup: - name: group01 - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -sudocmdgroup: - description: Sudo command group as returned by IPA API - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class SudoCmdGroupIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(SudoCmdGroupIPAClient, self).__init__(module, host, port, protocol) - - def sudocmdgroup_find(self, name): - return self._post_json(method='sudocmdgroup_find', name=None, item={'all': True, 'cn': name}) - - def sudocmdgroup_add(self, name, item): - return self._post_json(method='sudocmdgroup_add', name=name, item=item) - - def sudocmdgroup_mod(self, name, item): - return self._post_json(method='sudocmdgroup_mod', name=name, item=item) - - def sudocmdgroup_del(self, name): - return self._post_json(method='sudocmdgroup_del', name=name) - - def sudocmdgroup_add_member(self, name, item): - return self._post_json(method='sudocmdgroup_add_member', name=name, item=item) - - def sudocmdgroup_add_member_sudocmd(self, name, item): - return self.sudocmdgroup_add_member(name=name, item={'sudocmd': item}) - - def sudocmdgroup_remove_member(self, name, item): - return self._post_json(method='sudocmdgroup_remove_member', name=name, item=item) - - def sudocmdgroup_remove_member_sudocmd(self, name, item): - return self.sudocmdgroup_remove_member(name=name, item={'sudocmd': item}) - - -def get_sudocmdgroup_dict(description=None): - data = {} - if description is not None: - data['description'] = description - return data - - -def get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup): - return client.get_diff(ipa_data=ipa_sudocmdgroup, module_data=module_sudocmdgroup) - - -def ensure(module, client): - name = module.params['cn'] - state = module.params['state'] - sudocmd = module.params['sudocmd'] - - module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params['description']) - ipa_sudocmdgroup = client.sudocmdgroup_find(name=name) - - changed = False - if state == 'present': - if not ipa_sudocmdgroup: - changed = True - if not module.check_mode: - ipa_sudocmdgroup = client.sudocmdgroup_add(name=name, item=module_sudocmdgroup) - else: - diff = get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_sudocmdgroup.get(key) - client.sudocmdgroup_mod(name=name, item=data) - - if sudocmd is not None: - changed = client.modify_if_diff(name, ipa_sudocmdgroup.get('member_sudocmd', []), sudocmd, - client.sudocmdgroup_add_member_sudocmd, - client.sudocmdgroup_remove_member_sudocmd) - else: - if ipa_sudocmdgroup: - changed = True - if not module.check_mode: - client.sudocmdgroup_del(name=name) - - return changed, client.sudocmdgroup_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - sudocmd=dict(type='list', elements='str')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = SudoCmdGroupIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, sudocmdgroup = ensure(module, client) - module.exit_json(changed=changed, sudorule=sudocmdgroup) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_sudorule.py b/plugins/modules/identity/ipa/ipa_sudorule.py deleted file mode 100644 index 2054599f9d..0000000000 --- a/plugins/modules/identity/ipa/ipa_sudorule.py +++ /dev/null @@ -1,464 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_sudorule -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA sudo rule -description: -- Add, modify or delete sudo rule within IPA server using IPA API. -options: - cn: - description: - - Canonical name. - - Can not be changed as it is the unique identifier. - required: true - aliases: ['name'] - type: str - cmdcategory: - description: - - Command category the rule applies to. - choices: ['all'] - type: str - cmd: - description: - - List of commands assigned to the rule. - - If an empty list is passed all commands will be removed from the rule. - - If option is omitted commands will not be checked or changed. - type: list - elements: str - cmdgroup: - description: - - List of command groups assigned to the rule. - - If an empty list is passed all command groups will be removed from the rule. - - If option is omitted command groups will not be checked or changed. - type: list - elements: str - version_added: 2.0.0 - description: - description: - - Description of the sudo rule. - type: str - host: - description: - - List of hosts assigned to the rule. - - If an empty list is passed all hosts will be removed from the rule. - - If option is omitted hosts will not be checked or changed. - - Option C(hostcategory) must be omitted to assign hosts. - type: list - elements: str - hostcategory: - description: - - Host category the rule applies to. - - If 'all' is passed one must omit C(host) and C(hostgroup). - - Option C(host) and C(hostgroup) must be omitted to assign 'all'. - choices: ['all'] - type: str - hostgroup: - description: - - List of host groups assigned to the rule. - - If an empty list is passed all host groups will be removed from the rule. - - If option is omitted host groups will not be checked or changed. - - Option C(hostcategory) must be omitted to assign host groups. - type: list - elements: str - runasextusers: - description: - - List of external RunAs users - type: list - elements: str - version_added: 2.3.0 - runasusercategory: - description: - - RunAs User category the rule applies to. - choices: ['all'] - type: str - runasgroupcategory: - description: - - RunAs Group category the rule applies to. - choices: ['all'] - type: str - sudoopt: - description: - - List of options to add to the sudo rule. - type: list - elements: str - user: - description: - - List of users assigned to the rule. - - If an empty list is passed all users will be removed from the rule. - - If option is omitted users will not be checked or changed. - type: list - elements: str - usercategory: - description: - - User category the rule applies to. - choices: ['all'] - type: str - usergroup: - description: - - List of user groups assigned to the rule. - - If an empty list is passed all user groups will be removed from the rule. - - If option is omitted user groups will not be checked or changed. - type: list - elements: str - state: - description: State to ensure. - default: present - choices: ['absent', 'disabled', 'enabled', 'present'] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked for a password. - community.general.ipa_sudorule: - name: sudo_all_nopasswd - cmdcategory: all - description: Allow to run every command with sudo without password - hostcategory: all - sudoopt: - - '!authenticate' - usercategory: all - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure user group developers can run every command on host group db-server as well as on host db01.example.com. - community.general.ipa_sudorule: - name: sudo_dev_dbserver - description: Allow developers to run every command with sudo on all database server - cmdcategory: all - host: - - db01.example.com - hostgroup: - - db-server - sudoopt: - - '!authenticate' - usergroup: - - developers - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure user group operations can run any commands that is part of operations-cmdgroup on any host as user root. - community.general.ipa_sudorule: - name: sudo_operations_all - description: Allow operators to run any commands that is part of operations-cmdgroup on any host as user root. - cmdgroup: - - operations-cmdgroup - hostcategory: all - runasextusers: - - root - sudoopt: - - '!authenticate' - usergroup: - - operators - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -sudorule: - description: Sudorule as returned by IPA - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class SudoRuleIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(SudoRuleIPAClient, self).__init__(module, host, port, protocol) - - def sudorule_find(self, name): - return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name}) - - def sudorule_add(self, name, item): - return self._post_json(method='sudorule_add', name=name, item=item) - - def sudorule_add_runasuser(self, name, item): - return self._post_json(method='sudorule_add_runasuser', name=name, item={'user': item}) - - def sudorule_remove_runasuser(self, name, item): - return self._post_json(method='sudorule_remove_runasuser', name=name, item={'user': item}) - - def sudorule_mod(self, name, item): - return self._post_json(method='sudorule_mod', name=name, item=item) - - def sudorule_del(self, name): - return self._post_json(method='sudorule_del', name=name) - - def sudorule_add_option(self, name, item): - return self._post_json(method='sudorule_add_option', name=name, item=item) - - def sudorule_add_option_ipasudoopt(self, name, item): - return self.sudorule_add_option(name=name, item={'ipasudoopt': item}) - - def sudorule_remove_option(self, name, item): - return self._post_json(method='sudorule_remove_option', name=name, item=item) - - def sudorule_remove_option_ipasudoopt(self, name, item): - return self.sudorule_remove_option(name=name, item={'ipasudoopt': item}) - - def sudorule_add_host(self, name, item): - return self._post_json(method='sudorule_add_host', name=name, item=item) - - def sudorule_add_host_host(self, name, item): - return self.sudorule_add_host(name=name, item={'host': item}) - - def sudorule_add_host_hostgroup(self, name, item): - return self.sudorule_add_host(name=name, item={'hostgroup': item}) - - def sudorule_remove_host(self, name, item): - return self._post_json(method='sudorule_remove_host', name=name, item=item) - - def sudorule_remove_host_host(self, name, item): - return self.sudorule_remove_host(name=name, item={'host': item}) - - def sudorule_remove_host_hostgroup(self, name, item): - return self.sudorule_remove_host(name=name, item={'hostgroup': item}) - - def sudorule_add_allow_command(self, name, item): - return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item}) - - def sudorule_add_allow_command_group(self, name, item): - return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmdgroup': item}) - - def sudorule_remove_allow_command(self, name, item): - return self._post_json(method='sudorule_remove_allow_command', name=name, item=item) - - def sudorule_add_user(self, name, item): - return self._post_json(method='sudorule_add_user', name=name, item=item) - - def sudorule_add_user_user(self, name, item): - return self.sudorule_add_user(name=name, item={'user': item}) - - def sudorule_add_user_group(self, name, item): - return self.sudorule_add_user(name=name, item={'group': item}) - - def sudorule_remove_user(self, name, item): - return self._post_json(method='sudorule_remove_user', name=name, item=item) - - def sudorule_remove_user_user(self, name, item): - return self.sudorule_remove_user(name=name, item={'user': item}) - - def sudorule_remove_user_group(self, name, item): - return self.sudorule_remove_user(name=name, item={'group': item}) - - -def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None, - runasgroupcategory=None, runasusercategory=None): - data = {} - if cmdcategory is not None: - data['cmdcategory'] = cmdcategory - if description is not None: - data['description'] = description - if hostcategory is not None: - data['hostcategory'] = hostcategory - if ipaenabledflag is not None: - data['ipaenabledflag'] = ipaenabledflag - if usercategory is not None: - data['usercategory'] = usercategory - if runasusercategory is not None: - data['ipasudorunasusercategory'] = runasusercategory - if runasgroupcategory is not None: - data['ipasudorunasgroupcategory'] = runasgroupcategory - return data - - -def category_changed(module, client, category_name, ipa_sudorule): - if ipa_sudorule.get(category_name, None) == ['all']: - if not module.check_mode: - # cn is returned as list even with only a single value. - client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None}) - return True - return False - - -def ensure(module, client): - state = module.params['state'] - name = module.params['cn'] - cmd = module.params['cmd'] - cmdgroup = module.params['cmdgroup'] - cmdcategory = module.params['cmdcategory'] - host = module.params['host'] - hostcategory = module.params['hostcategory'] - hostgroup = module.params['hostgroup'] - runasusercategory = module.params['runasusercategory'] - runasgroupcategory = module.params['runasgroupcategory'] - runasextusers = module.params['runasextusers'] - - if state in ['present', 'enabled']: - ipaenabledflag = 'TRUE' - else: - ipaenabledflag = 'FALSE' - - sudoopt = module.params['sudoopt'] - user = module.params['user'] - usercategory = module.params['usercategory'] - usergroup = module.params['usergroup'] - - module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory, - description=module.params['description'], - hostcategory=hostcategory, - ipaenabledflag=ipaenabledflag, - usercategory=usercategory, - runasusercategory=runasusercategory, - runasgroupcategory=runasgroupcategory) - ipa_sudorule = client.sudorule_find(name=name) - - changed = False - if state in ['present', 'disabled', 'enabled']: - if not ipa_sudorule: - changed = True - if not module.check_mode: - ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule) - else: - diff = client.get_diff(ipa_sudorule, module_sudorule) - if len(diff) > 0: - changed = True - if not module.check_mode: - if 'hostcategory' in diff: - if ipa_sudorule.get('memberhost_host', None) is not None: - client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host')) - if ipa_sudorule.get('memberhost_hostgroup', None) is not None: - client.sudorule_remove_host_hostgroup(name=name, - item=ipa_sudorule.get('memberhost_hostgroup')) - - client.sudorule_mod(name=name, item=module_sudorule) - - if cmd is not None: - changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed - if not module.check_mode: - client.sudorule_add_allow_command(name=name, item=cmd) - - if cmdgroup is not None: - changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed - if not module.check_mode: - client.sudorule_add_allow_command_group(name=name, item=cmdgroup) - - if runasusercategory is not None: - changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed - - if runasgroupcategory is not None: - changed = category_changed(module, client, 'iparunasgroupcategory', ipa_sudorule) or changed - - if host is not None: - changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed - changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host, - client.sudorule_add_host_host, - client.sudorule_remove_host_host) or changed - - if hostgroup is not None: - changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed - changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup, - client.sudorule_add_host_hostgroup, - client.sudorule_remove_host_hostgroup) or changed - if sudoopt is not None: - # client.modify_if_diff does not work as each option must be removed/added by its own - ipa_list = ipa_sudorule.get('ipasudoopt', []) - module_list = sudoopt - diff = list(set(ipa_list) - set(module_list)) - if len(diff) > 0: - changed = True - if not module.check_mode: - for item in diff: - client.sudorule_remove_option_ipasudoopt(name, item) - diff = list(set(module_list) - set(ipa_list)) - if len(diff) > 0: - changed = True - if not module.check_mode: - for item in diff: - client.sudorule_add_option_ipasudoopt(name, item) - - if runasextusers is not None: - ipa_sudorule_run_as_user = ipa_sudorule.get('ipasudorunasextuser', []) - diff = list(set(ipa_sudorule_run_as_user) - set(runasextusers)) - if len(diff) > 0: - changed = True - if not module.check_mode: - for item in diff: - client.sudorule_remove_runasuser(name=name, item=item) - diff = list(set(runasextusers) - set(ipa_sudorule_run_as_user)) - if len(diff) > 0: - changed = True - if not module.check_mode: - for item in diff: - client.sudorule_add_runasuser(name=name, item=item) - - if user is not None: - changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed - changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user, - client.sudorule_add_user_user, - client.sudorule_remove_user_user) or changed - if usergroup is not None: - changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed - changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup, - client.sudorule_add_user_group, - client.sudorule_remove_user_group) or changed - else: - if ipa_sudorule: - changed = True - if not module.check_mode: - client.sudorule_del(name) - - return changed, client.sudorule_find(name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(cmd=dict(type='list', elements='str'), - cmdgroup=dict(type='list', elements='str'), - cmdcategory=dict(type='str', choices=['all']), - cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - host=dict(type='list', elements='str'), - hostcategory=dict(type='str', choices=['all']), - hostgroup=dict(type='list', elements='str'), - runasusercategory=dict(type='str', choices=['all']), - runasgroupcategory=dict(type='str', choices=['all']), - sudoopt=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - user=dict(type='list', elements='str'), - usercategory=dict(type='str', choices=['all']), - usergroup=dict(type='list', elements='str'), - runasextusers=dict(type='list', elements='str')) - module = AnsibleModule(argument_spec=argument_spec, - mutually_exclusive=[['cmdcategory', 'cmd'], - ['cmdcategory', 'cmdgroup'], - ['hostcategory', 'host'], - ['hostcategory', 'hostgroup'], - ['usercategory', 'user'], - ['usercategory', 'usergroup']], - supports_check_mode=True) - - client = SudoRuleIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, sudorule = ensure(module, client) - module.exit_json(changed=changed, sudorule=sudorule) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_user.py b/plugins/modules/identity/ipa/ipa_user.py deleted file mode 100644 index 8a7b3abea2..0000000000 --- a/plugins/modules/identity/ipa/ipa_user.py +++ /dev/null @@ -1,397 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_user -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA users -description: -- Add, modify and delete user within IPA server. -options: - displayname: - description: Display name. - type: str - update_password: - description: - - Set password for a user. - type: str - default: 'always' - choices: [ always, on_create ] - givenname: - description: First name. - type: str - krbpasswordexpiration: - description: - - Date at which the user password will expire. - - In the format YYYYMMddHHmmss. - - e.g. 20180121182022 will expire on 21 January 2018 at 18:20:22. - type: str - loginshell: - description: Login shell. - type: str - mail: - description: - - List of mail addresses assigned to the user. - - If an empty list is passed all assigned email addresses will be deleted. - - If None is passed email addresses will not be checked or changed. - type: list - elements: str - password: - description: - - Password for a user. - - Will not be set for an existing user unless I(update_password=always), which is the default. - type: str - sn: - description: Surname. - type: str - sshpubkey: - description: - - List of public SSH key. - - If an empty list is passed all assigned public keys will be deleted. - - If None is passed SSH public keys will not be checked or changed. - type: list - elements: str - state: - description: State to ensure. - default: "present" - choices: ["absent", "disabled", "enabled", "present"] - type: str - telephonenumber: - description: - - List of telephone numbers assigned to the user. - - If an empty list is passed all assigned telephone numbers will be deleted. - - If None is passed telephone numbers will not be checked or changed. - type: list - elements: str - title: - description: Title. - type: str - uid: - description: uid of the user. - required: true - aliases: ["name"] - type: str - uidnumber: - description: - - Account Settings UID/Posix User ID number. - type: str - gidnumber: - description: - - Posix Group ID. - type: str - homedirectory: - description: - - Default home directory of the user. - type: str - version_added: '0.2.0' - userauthtype: - description: - - The authentication type to use for the user. - choices: ["password", "radius", "otp", "pkinit", "hardened"] - type: list - elements: str - version_added: '1.2.0' -extends_documentation_fragment: -- community.general.ipa.documentation - -requirements: -- base64 -- hashlib -''' - -EXAMPLES = r''' -- name: Ensure pinky is present and always reset password - community.general.ipa_user: - name: pinky - state: present - krbpasswordexpiration: 20200119235959 - givenname: Pinky - sn: Acme - mail: - - pinky@acme.com - telephonenumber: - - '+555123456' - sshpubkey: - - ssh-rsa .... - - ssh-dsa .... - uidnumber: '1001' - gidnumber: '100' - homedirectory: /home/pinky - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure brain is absent - community.general.ipa_user: - name: brain - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure pinky is present but don't reset password if already exists - community.general.ipa_user: - name: pinky - state: present - givenname: Pinky - sn: Acme - password: zounds - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - update_password: on_create - -- name: Ensure pinky is present and using one time password and RADIUS authentication - community.general.ipa_user: - name: pinky - state: present - userauthtype: - - otp - - radius - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -user: - description: User as returned by IPA API - returned: always - type: dict -''' - -import base64 -import hashlib -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class UserIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(UserIPAClient, self).__init__(module, host, port, protocol) - - def user_find(self, name): - return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name}) - - def user_add(self, name, item): - return self._post_json(method='user_add', name=name, item=item) - - def user_mod(self, name, item): - return self._post_json(method='user_mod', name=name, item=item) - - def user_del(self, name): - return self._post_json(method='user_del', name=name) - - def user_disable(self, name): - return self._post_json(method='user_disable', name=name) - - def user_enable(self, name): - return self._post_json(method='user_enable', name=name) - - -def get_user_dict(displayname=None, givenname=None, krbpasswordexpiration=None, loginshell=None, - mail=None, nsaccountlock=False, sn=None, sshpubkey=None, telephonenumber=None, - title=None, userpassword=None, gidnumber=None, uidnumber=None, homedirectory=None, - userauthtype=None): - user = {} - if displayname is not None: - user['displayname'] = displayname - if krbpasswordexpiration is not None: - user['krbpasswordexpiration'] = krbpasswordexpiration + "Z" - if givenname is not None: - user['givenname'] = givenname - if loginshell is not None: - user['loginshell'] = loginshell - if mail is not None: - user['mail'] = mail - user['nsaccountlock'] = nsaccountlock - if sn is not None: - user['sn'] = sn - if sshpubkey is not None: - user['ipasshpubkey'] = sshpubkey - if telephonenumber is not None: - user['telephonenumber'] = telephonenumber - if title is not None: - user['title'] = title - if userpassword is not None: - user['userpassword'] = userpassword - if gidnumber is not None: - user['gidnumber'] = gidnumber - if uidnumber is not None: - user['uidnumber'] = uidnumber - if homedirectory is not None: - user['homedirectory'] = homedirectory - if userauthtype is not None: - user['ipauserauthtype'] = userauthtype - - return user - - -def get_user_diff(client, ipa_user, module_user): - """ - Return the keys of each dict whereas values are different. Unfortunately the IPA - API returns everything as a list even if only a single value is possible. - Therefore some more complexity is needed. - The method will check if the value type of module_user.attr is not a list and - create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method - must not be changed if the returned API dict is changed. - :param ipa_user: - :param module_user: - :return: - """ - # sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints. - # These are used for comparison. - sshpubkey = None - if 'ipasshpubkey' in module_user: - hash_algo = 'md5' - if 'sshpubkeyfp' in ipa_user and ipa_user['sshpubkeyfp'][0][:7].upper() == 'SHA256:': - hash_algo = 'sha256' - module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user['ipasshpubkey']] - # Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on - sshpubkey = module_user['ipasshpubkey'] - del module_user['ipasshpubkey'] - - result = client.get_diff(ipa_data=ipa_user, module_data=module_user) - - # If there are public keys, remove the fingerprints and add them back to the dict - if sshpubkey is not None: - del module_user['sshpubkeyfp'] - module_user['ipasshpubkey'] = sshpubkey - return result - - -def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'): - """ - Return the public key fingerprint of a given public SSH key - in format "[fp] [comment] (ssh-rsa)" where fp is of the format: - FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7 - for md5 or - SHA256:[base64] - for sha256 - Comments are assumed to be all characters past the second - whitespace character in the sshpubkey string. - :param ssh_key: - :param hash_algo: - :return: - """ - parts = ssh_key.strip().split(None, 2) - if len(parts) == 0: - return None - key_type = parts[0] - key = base64.b64decode(parts[1].encode('ascii')) - - if hash_algo == 'md5': - fp_plain = hashlib.md5(key).hexdigest() - key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper() - elif hash_algo == 'sha256': - fp_plain = base64.b64encode(hashlib.sha256(key).digest()).decode('ascii').rstrip('=') - key_fp = 'SHA256:{fp}'.format(fp=fp_plain) - if len(parts) < 3: - return "%s (%s)" % (key_fp, key_type) - else: - comment = parts[2] - return "%s %s (%s)" % (key_fp, comment, key_type) - - -def ensure(module, client): - state = module.params['state'] - name = module.params['uid'] - nsaccountlock = state == 'disabled' - - module_user = get_user_dict(displayname=module.params.get('displayname'), - krbpasswordexpiration=module.params.get('krbpasswordexpiration'), - givenname=module.params.get('givenname'), - loginshell=module.params['loginshell'], - mail=module.params['mail'], sn=module.params['sn'], - sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock, - telephonenumber=module.params['telephonenumber'], title=module.params['title'], - userpassword=module.params['password'], - gidnumber=module.params.get('gidnumber'), uidnumber=module.params.get('uidnumber'), - homedirectory=module.params.get('homedirectory'), - userauthtype=module.params.get('userauthtype')) - - update_password = module.params.get('update_password') - ipa_user = client.user_find(name=name) - - changed = False - if state in ['present', 'enabled', 'disabled']: - if not ipa_user: - changed = True - if not module.check_mode: - ipa_user = client.user_add(name=name, item=module_user) - else: - if update_password == 'on_create': - module_user.pop('userpassword', None) - diff = get_user_diff(client, ipa_user, module_user) - if len(diff) > 0: - changed = True - if not module.check_mode: - ipa_user = client.user_mod(name=name, item=module_user) - else: - if ipa_user: - changed = True - if not module.check_mode: - client.user_del(name) - - return changed, ipa_user - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(displayname=dict(type='str'), - givenname=dict(type='str'), - update_password=dict(type='str', default="always", - choices=['always', 'on_create'], - no_log=False), - krbpasswordexpiration=dict(type='str', no_log=False), - loginshell=dict(type='str'), - mail=dict(type='list', elements='str'), - sn=dict(type='str'), - uid=dict(type='str', required=True, aliases=['name']), - gidnumber=dict(type='str'), - uidnumber=dict(type='str'), - password=dict(type='str', no_log=True), - sshpubkey=dict(type='list', elements='str'), - state=dict(type='str', default='present', - choices=['present', 'absent', 'enabled', 'disabled']), - telephonenumber=dict(type='list', elements='str'), - title=dict(type='str'), - homedirectory=dict(type='str'), - userauthtype=dict(type='list', elements='str', - choices=['password', 'radius', 'otp', 'pkinit', 'hardened'])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = UserIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - # If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list). - # Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey - # as different which should be avoided. - if module.params['sshpubkey'] is not None: - if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] == "": - module.params['sshpubkey'] = None - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, user = ensure(module, client) - module.exit_json(changed=changed, user=user) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/ipa/ipa_vault.py b/plugins/modules/identity/ipa/ipa_vault.py deleted file mode 100644 index 7a6a601fa9..0000000000 --- a/plugins/modules/identity/ipa/ipa_vault.py +++ /dev/null @@ -1,249 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Juan Manuel Parrilla -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_vault -author: Juan Manuel Parrilla (@jparrill) -short_description: Manage FreeIPA vaults -description: -- Add, modify and delete vaults and secret vaults. -- KRA service should be enabled to use this module. -options: - cn: - description: - - Vault name. - - Can not be changed as it is the unique identifier. - required: true - aliases: ["name"] - type: str - description: - description: - - Description. - type: str - ipavaulttype: - description: - - Vault types are based on security level. - default: "symmetric" - choices: ["asymmetric", "standard", "symmetric"] - aliases: ["vault_type"] - type: str - ipavaultpublickey: - description: - - Public key. - aliases: ["vault_public_key"] - type: str - ipavaultsalt: - description: - - Vault Salt. - aliases: ["vault_salt"] - type: str - username: - description: - - Any user can own one or more user vaults. - - Mutually exclusive with service. - aliases: ["user"] - type: list - elements: str - service: - description: - - Any service can own one or more service vaults. - - Mutually exclusive with user. - type: str - state: - description: - - State to ensure. - default: "present" - choices: ["absent", "present"] - type: str - replace: - description: - - Force replace the existant vault on IPA server. - type: bool - default: False - choices: ["True", "False"] - validate_certs: - description: - - Validate IPA server certificates. - type: bool - default: true -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure vault is present - community.general.ipa_vault: - name: vault01 - vault_type: standard - user: user01 - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - validate_certs: false - -- name: Ensure vault is present for Admin user - community.general.ipa_vault: - name: vault01 - vault_type: standard - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure vault is absent - community.general.ipa_vault: - name: vault01 - vault_type: standard - user: user01 - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Modify vault if already exists - community.general.ipa_vault: - name: vault01 - vault_type: standard - description: "Vault for test" - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - replace: True - -- name: Get vault info if already exists - community.general.ipa_vault: - name: vault01 - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -vault: - description: Vault as returned by IPA API - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class VaultIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(VaultIPAClient, self).__init__(module, host, port, protocol) - - def vault_find(self, name): - return self._post_json(method='vault_find', name=None, item={'all': True, 'cn': name}) - - def vault_add_internal(self, name, item): - return self._post_json(method='vault_add_internal', name=name, item=item) - - def vault_mod_internal(self, name, item): - return self._post_json(method='vault_mod_internal', name=name, item=item) - - def vault_del(self, name): - return self._post_json(method='vault_del', name=name) - - -def get_vault_dict(description=None, vault_type=None, vault_salt=None, vault_public_key=None, service=None): - vault = {} - - if description is not None: - vault['description'] = description - if vault_type is not None: - vault['ipavaulttype'] = vault_type - if vault_salt is not None: - vault['ipavaultsalt'] = vault_salt - if vault_public_key is not None: - vault['ipavaultpublickey'] = vault_public_key - if service is not None: - vault['service'] = service - return vault - - -def get_vault_diff(client, ipa_vault, module_vault, module): - return client.get_diff(ipa_data=ipa_vault, module_data=module_vault) - - -def ensure(module, client): - state = module.params['state'] - name = module.params['cn'] - user = module.params['username'] - replace = module.params['replace'] - - module_vault = get_vault_dict(description=module.params['description'], vault_type=module.params['ipavaulttype'], - vault_salt=module.params['ipavaultsalt'], - vault_public_key=module.params['ipavaultpublickey'], - service=module.params['service']) - ipa_vault = client.vault_find(name=name) - - changed = False - if state == 'present': - if not ipa_vault: - # New vault - changed = True - if not module.check_mode: - ipa_vault = client.vault_add_internal(name, item=module_vault) - else: - # Already exists - if replace: - diff = get_vault_diff(client, ipa_vault, module_vault, module) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_vault.get(key) - client.vault_mod_internal(name=name, item=data) - - else: - if ipa_vault: - changed = True - if not module.check_mode: - client.vault_del(name) - - return changed, client.vault_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - ipavaulttype=dict(type='str', default='symmetric', - choices=['standard', 'symmetric', 'asymmetric'], aliases=['vault_type']), - ipavaultsalt=dict(type='str', aliases=['vault_salt']), - ipavaultpublickey=dict(type='str', aliases=['vault_public_key']), - service=dict(type='str'), - replace=dict(type='bool', default=False, choices=[True, False]), - state=dict(type='str', default='present', choices=['present', 'absent']), - username=dict(type='list', elements='str', aliases=['user'])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[['username', 'service']]) - - client = VaultIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, vault = ensure(module, client) - module.exit_json(changed=changed, vault=vault) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_authentication.py b/plugins/modules/identity/keycloak/keycloak_authentication.py deleted file mode 100644 index c7bf5bc01f..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_authentication.py +++ /dev/null @@ -1,502 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, INSPQ -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_authentication - -short_description: Configure authentication in Keycloak - -description: - - This module actually can only make a copy of an existing authentication flow, add an execution to it and configure it. - - It can also delete the flow. - -version_added: "3.3.0" - -options: - realm: - description: - - The name of the realm in which is the authentication. - required: true - type: str - alias: - description: - - Alias for the authentication flow. - required: true - type: str - description: - description: - - Description of the flow. - type: str - providerId: - description: - - C(providerId) for the new flow when not copied from an existing flow. - type: str - copyFrom: - description: - - C(flowAlias) of the authentication flow to use for the copy. - type: str - authenticationExecutions: - description: - - Configuration structure for the executions. - type: list - elements: dict - suboptions: - providerId: - description: - - C(providerID) for the new flow when not copied from an existing flow. - type: str - displayName: - description: - - Name of the execution or subflow to create or update. - type: str - requirement: - description: - - Control status of the subflow or execution. - choices: [ "REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL" ] - type: str - flowAlias: - description: - - Alias of parent flow. - type: str - authenticationConfig: - description: - - Describe the config of the authentication. - type: dict - index: - description: - - Priority order of the execution. - type: int - state: - description: - - Control if the authentication flow must exists or not. - choices: [ "present", "absent" ] - default: present - type: str - force: - type: bool - default: false - description: - - If C(true), allows to remove the authentication flow and recreate it. - -extends_documentation_fragment: -- community.general.keycloak - -author: - - Philippe Gauthier (@elfelip) - - Gaëtan Daubresse (@Gaetan2907) -''' - -EXAMPLES = ''' - - name: Create an authentication flow from first broker login and add an execution to it. - community.general.keycloak_authentication: - auth_keycloak_url: http://localhost:8080/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: master - alias: "Copy of first broker login" - copyFrom: "first broker login" - authenticationExecutions: - - providerId: "test-execution1" - requirement: "REQUIRED" - authenticationConfig: - alias: "test.execution1.property" - config: - test1.property: "value" - - providerId: "test-execution2" - requirement: "REQUIRED" - authenticationConfig: - alias: "test.execution2.property" - config: - test2.property: "value" - state: present - - - name: Re-create the authentication flow - community.general.keycloak_authentication: - auth_keycloak_url: http://localhost:8080/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: master - alias: "Copy of first broker login" - copyFrom: "first broker login" - authenticationExecutions: - - providerId: "test-provisioning" - requirement: "REQUIRED" - authenticationConfig: - alias: "test.provisioning.property" - config: - test.provisioning.property: "value" - state: present - force: true - - - name: Create an authentication flow with subflow containing an execution. - community.general.keycloak_authentication: - auth_keycloak_url: http://localhost:8080/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: master - alias: "Copy of first broker login" - copyFrom: "first broker login" - authenticationExecutions: - - providerId: "test-execution1" - requirement: "REQUIRED" - - displayName: "New Subflow" - requirement: "REQUIRED" - - providerId: "auth-cookie" - requirement: "REQUIRED" - flowAlias: "New Sublow" - state: present - - - name: Remove authentication. - community.general.keycloak_authentication: - auth_keycloak_url: http://localhost:8080/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: master - alias: "Copy of first broker login" - state: absent -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - -flow: - description: - - JSON representation for the authentication. - - Deprecated return value, it will be removed in community.general 6.0.0. Please use the return value I(end_state) instead. - returned: on success - type: dict - sample: { - "alias": "Copy of first broker login", - "authenticationExecutions": [ - { - "alias": "review profile config", - "authenticationConfig": { - "alias": "review profile config", - "config": { "update.profile.on.first.login": "missing" }, - "id": "6f09e4fb-aad4-496a-b873-7fa9779df6d7" - }, - "configurable": true, - "displayName": "Review Profile", - "id": "8f77dab8-2008-416f-989e-88b09ccf0b4c", - "index": 0, - "level": 0, - "providerId": "idp-review-profile", - "requirement": "REQUIRED", - "requirementChoices": [ "REQUIRED", "ALTERNATIVE", "DISABLED" ] - } - ], - "builtIn": false, - "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", - "id": "bc228863-5887-4297-b898-4d988f8eaa5c", - "providerId": "basic-flow", - "topLevel": true - } - -end_state: - description: Representation of the authentication after module execution. - returned: on success - type: dict - sample: { - "alias": "Copy of first broker login", - "authenticationExecutions": [ - { - "alias": "review profile config", - "authenticationConfig": { - "alias": "review profile config", - "config": { "update.profile.on.first.login": "missing" }, - "id": "6f09e4fb-aad4-496a-b873-7fa9779df6d7" - }, - "configurable": true, - "displayName": "Review Profile", - "id": "8f77dab8-2008-416f-989e-88b09ccf0b4c", - "index": 0, - "level": 0, - "providerId": "idp-review-profile", - "requirement": "REQUIRED", - "requirementChoices": [ "REQUIRED", "ALTERNATIVE", "DISABLED" ] - } - ], - "builtIn": false, - "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", - "id": "bc228863-5887-4297-b898-4d988f8eaa5c", - "providerId": "basic-flow", - "topLevel": true - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak \ - import KeycloakAPI, camel, keycloak_argument_spec, get_token, KeycloakError, is_struct_included -from ansible.module_utils.basic import AnsibleModule - - -def find_exec_in_executions(searched_exec, executions): - """ - Search if exec is contained in the executions. - :param searched_exec: Execution to search for. - :param executions: List of executions. - :return: Index of the execution, -1 if not found.. - """ - for i, existing_exec in enumerate(executions, start=0): - if ("providerId" in existing_exec and "providerId" in searched_exec and - existing_exec["providerId"] == searched_exec["providerId"] or - "displayName" in existing_exec and "displayName" in searched_exec and - existing_exec["displayName"] == searched_exec["displayName"]): - return i - return -1 - - -def create_or_update_executions(kc, config, realm='master'): - """ - Create or update executions for an authentication flow. - :param kc: Keycloak API access. - :param config: Representation of the authentication flow including it's executions. - :param realm: Realm - :return: tuple (changed, dict(before, after) - WHERE - bool changed indicates if changes have been made - dict(str, str) shows state before and after creation/update - """ - try: - changed = False - after = "" - before = "" - if "authenticationExecutions" in config: - # Get existing executions on the Keycloak server for this alias - existing_executions = kc.get_executions_representation(config, realm=realm) - for new_exec_index, new_exec in enumerate(config["authenticationExecutions"], start=0): - if new_exec["index"] is not None: - new_exec_index = new_exec["index"] - exec_found = False - # Get flowalias parent if given - if new_exec["flowAlias"] is not None: - flow_alias_parent = new_exec["flowAlias"] - else: - flow_alias_parent = config["alias"] - # Check if same providerId or displayName name between existing and new execution - exec_index = find_exec_in_executions(new_exec, existing_executions) - if exec_index != -1: - # Remove key that doesn't need to be compared with existing_exec - exclude_key = ["flowAlias"] - for index_key, key in enumerate(new_exec, start=0): - if new_exec[key] is None: - exclude_key.append(key) - # Compare the executions to see if it need changes - if not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) or exec_index != new_exec_index: - exec_found = True - before += str(existing_executions[exec_index]) + '\n' - id_to_update = existing_executions[exec_index]["id"] - # Remove exec from list in case 2 exec with same name - existing_executions[exec_index].clear() - elif new_exec["providerId"] is not None: - kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm) - exec_found = True - exec_index = new_exec_index - id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"] - after += str(new_exec) + '\n' - elif new_exec["displayName"] is not None: - kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm) - exec_found = True - exec_index = new_exec_index - id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"] - after += str(new_exec) + '\n' - if exec_found: - changed = True - if exec_index != -1: - # Update the existing execution - updated_exec = { - "id": id_to_update - } - # add the execution configuration - if new_exec["authenticationConfig"] is not None: - kc.add_authenticationConfig_to_execution(updated_exec["id"], new_exec["authenticationConfig"], realm=realm) - for key in new_exec: - # remove unwanted key for the next API call - if key != "flowAlias" and key != "authenticationConfig": - updated_exec[key] = new_exec[key] - if new_exec["requirement"] is not None: - kc.update_authentication_executions(flow_alias_parent, updated_exec, realm=realm) - diff = exec_index - new_exec_index - kc.change_execution_priority(updated_exec["id"], diff, realm=realm) - after += str(kc.get_executions_representation(config, realm=realm)[new_exec_index]) + '\n' - return changed, dict(before=before, after=after) - except Exception as e: - kc.module.fail_json(msg='Could not create or update executions for authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - meta_args = dict( - realm=dict(type='str', required=True), - alias=dict(type='str', required=True), - providerId=dict(type='str'), - description=dict(type='str'), - copyFrom=dict(type='str'), - authenticationExecutions=dict(type='list', elements='dict', - options=dict( - providerId=dict(type='str'), - displayName=dict(type='str'), - requirement=dict(choices=["REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL"], type='str'), - flowAlias=dict(type='str'), - authenticationConfig=dict(type='dict'), - index=dict(type='int'), - )), - state=dict(choices=["absent", "present"], default='present'), - force=dict(type='bool', default=False), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']]) - ) - - result = dict(changed=False, msg='', flow={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - force = module.params.get('force') - - new_auth_repr = { - "alias": module.params.get("alias"), - "copyFrom": module.params.get("copyFrom"), - "providerId": module.params.get("providerId"), - "authenticationExecutions": module.params.get("authenticationExecutions"), - "description": module.params.get("description"), - "builtIn": module.params.get("builtIn"), - "subflow": module.params.get("subflow"), - } - - auth_repr = kc.get_authentication_flow_by_alias(alias=new_auth_repr["alias"], realm=realm) - - # Cater for when it doesn't exist (an empty dict) - if not auth_repr: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['flow'] = result['end_state'] - result['msg'] = new_auth_repr["alias"] + ' absent' - module.exit_json(**result) - - elif state == 'present': - # Process a creation - result['changed'] = True - - if module._diff: - result['diff'] = dict(before='', after=new_auth_repr) - - if module.check_mode: - module.exit_json(**result) - - # If copyFrom is defined, create authentication flow from a copy - if "copyFrom" in new_auth_repr and new_auth_repr["copyFrom"] is not None: - auth_repr = kc.copy_auth_flow(config=new_auth_repr, realm=realm) - else: # Create an empty authentication flow - auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm) - - # If the authentication still not exist on the server, raise an exception. - if auth_repr is None: - result['msg'] = "Authentication just created not found: " + str(new_auth_repr) - module.fail_json(**result) - - # Configure the executions for the flow - create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm) - - # Get executions created - exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) - if exec_repr is not None: - auth_repr["authenticationExecutions"] = exec_repr - result['end_state'] = auth_repr - result['flow'] = result['end_state'] - - else: - if state == 'present': - # Process an update - - if force: # If force option is true - # Delete the actual authentication flow - result['changed'] = True - if module._diff: - result['diff'] = dict(before=auth_repr, after=new_auth_repr) - if module.check_mode: - module.exit_json(**result) - kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm) - # If copyFrom is defined, create authentication flow from a copy - if "copyFrom" in new_auth_repr and new_auth_repr["copyFrom"] is not None: - auth_repr = kc.copy_auth_flow(config=new_auth_repr, realm=realm) - else: # Create an empty authentication flow - auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm) - # If the authentication still not exist on the server, raise an exception. - if auth_repr is None: - result['msg'] = "Authentication just created not found: " + str(new_auth_repr) - module.fail_json(**result) - # Configure the executions for the flow - - if module.check_mode: - module.exit_json(**result) - changed, diff = create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm) - result['changed'] |= changed - - if module._diff: - result['diff'] = diff - - # Get executions created - exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) - if exec_repr is not None: - auth_repr["authenticationExecutions"] = exec_repr - result['end_state'] = auth_repr - result['flow'] = result['end_state'] - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=auth_repr, after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm) - - result['msg'] = 'Authentication flow: {alias} id: {id} is deleted'.format(alias=new_auth_repr['alias'], - id=auth_repr["id"]) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_client.py b/plugins/modules/identity/keycloak/keycloak_client.py deleted file mode 100644 index 82cdab8b6c..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_client.py +++ /dev/null @@ -1,944 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017, Eike Frost -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_client - -short_description: Allows administration of Keycloak clients via Keycloak API - - -description: - - This module allows the administration of Keycloak clients via the Keycloak REST API. It - requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - Aliases are provided so camelCased versions can be used as well. - - - The Keycloak API does not always sanity check inputs e.g. you can set - SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. - If you do not specify a setting, usually a sensible default is chosen. - -options: - state: - description: - - State of the client - - On C(present), the client will be created (or updated if it exists already). - - On C(absent), the client will be removed if it exists - choices: ['present', 'absent'] - default: 'present' - type: str - - realm: - description: - - The realm to create the client in. - type: str - default: master - - client_id: - description: - - Client id of client to be worked on. This is usually an alphanumeric name chosen by - you. Either this or I(id) is required. If you specify both, I(id) takes precedence. - This is 'clientId' in the Keycloak REST API. - aliases: - - clientId - type: str - - id: - description: - - Id of client to be worked on. This is usually an UUID. Either this or I(client_id) - is required. If you specify both, this takes precedence. - type: str - - name: - description: - - Name of the client (this is not the same as I(client_id)). - type: str - - description: - description: - - Description of the client in Keycloak. - type: str - - root_url: - description: - - Root URL appended to relative URLs for this client. - This is 'rootUrl' in the Keycloak REST API. - aliases: - - rootUrl - type: str - - admin_url: - description: - - URL to the admin interface of the client. - This is 'adminUrl' in the Keycloak REST API. - aliases: - - adminUrl - type: str - - base_url: - description: - - Default URL to use when the auth server needs to redirect or link back to the client - This is 'baseUrl' in the Keycloak REST API. - aliases: - - baseUrl - type: str - - enabled: - description: - - Is this client enabled or not? - type: bool - - client_authenticator_type: - description: - - How do clients authenticate with the auth server? Either C(client-secret) or - C(client-jwt) can be chosen. When using C(client-secret), the module parameter - I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url), - C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter - to configure its behavior. - This is 'clientAuthenticatorType' in the Keycloak REST API. - choices: ['client-secret', 'client-jwt'] - aliases: - - clientAuthenticatorType - type: str - - secret: - description: - - When using I(client_authenticator_type) C(client-secret) (the default), you can - specify a secret here (otherwise one will be generated if it does not exit). If - changing this secret, the module will not register a change currently (but the - changed secret will be saved). - type: str - - registration_access_token: - description: - - The registration access token provides access for clients to the client registration - service. - This is 'registrationAccessToken' in the Keycloak REST API. - aliases: - - registrationAccessToken - type: str - - default_roles: - description: - - list of default roles for this client. If the client roles referenced do not exist - yet, they will be created. - This is 'defaultRoles' in the Keycloak REST API. - aliases: - - defaultRoles - type: list - elements: str - - redirect_uris: - description: - - Acceptable redirect URIs for this client. - This is 'redirectUris' in the Keycloak REST API. - aliases: - - redirectUris - type: list - elements: str - - web_origins: - description: - - List of allowed CORS origins. - This is 'webOrigins' in the Keycloak REST API. - aliases: - - webOrigins - type: list - elements: str - - not_before: - description: - - Revoke any tokens issued before this date for this client (this is a UNIX timestamp). - This is 'notBefore' in the Keycloak REST API. - type: int - aliases: - - notBefore - - bearer_only: - description: - - The access type of this client is bearer-only. - This is 'bearerOnly' in the Keycloak REST API. - aliases: - - bearerOnly - type: bool - - consent_required: - description: - - If enabled, users have to consent to client access. - This is 'consentRequired' in the Keycloak REST API. - aliases: - - consentRequired - type: bool - - standard_flow_enabled: - description: - - Enable standard flow for this client or not (OpenID connect). - This is 'standardFlowEnabled' in the Keycloak REST API. - aliases: - - standardFlowEnabled - type: bool - - implicit_flow_enabled: - description: - - Enable implicit flow for this client or not (OpenID connect). - This is 'implicitFlowEnabled' in the Keycloak REST API. - aliases: - - implicitFlowEnabled - type: bool - - direct_access_grants_enabled: - description: - - Are direct access grants enabled for this client or not (OpenID connect). - This is 'directAccessGrantsEnabled' in the Keycloak REST API. - aliases: - - directAccessGrantsEnabled - type: bool - - service_accounts_enabled: - description: - - Are service accounts enabled for this client or not (OpenID connect). - This is 'serviceAccountsEnabled' in the Keycloak REST API. - aliases: - - serviceAccountsEnabled - type: bool - - authorization_services_enabled: - description: - - Are authorization services enabled for this client or not (OpenID connect). - This is 'authorizationServicesEnabled' in the Keycloak REST API. - aliases: - - authorizationServicesEnabled - type: bool - - public_client: - description: - - Is the access type for this client public or not. - This is 'publicClient' in the Keycloak REST API. - aliases: - - publicClient - type: bool - - frontchannel_logout: - description: - - Is frontchannel logout enabled for this client or not. - This is 'frontchannelLogout' in the Keycloak REST API. - aliases: - - frontchannelLogout - type: bool - - protocol: - description: - - Type of client (either C(openid-connect) or C(saml). - type: str - choices: ['openid-connect', 'saml'] - - full_scope_allowed: - description: - - Is the "Full Scope Allowed" feature set for this client or not. - This is 'fullScopeAllowed' in the Keycloak REST API. - aliases: - - fullScopeAllowed - type: bool - - node_re_registration_timeout: - description: - - Cluster node re-registration timeout for this client. - This is 'nodeReRegistrationTimeout' in the Keycloak REST API. - type: int - aliases: - - nodeReRegistrationTimeout - - registered_nodes: - description: - - dict of registered cluster nodes (with C(nodename) as the key and last registration - time as the value). - This is 'registeredNodes' in the Keycloak REST API. - type: dict - aliases: - - registeredNodes - - client_template: - description: - - Client template to use for this client. If it does not exist this field will silently - be dropped. - This is 'clientTemplate' in the Keycloak REST API. - type: str - aliases: - - clientTemplate - - use_template_config: - description: - - Whether or not to use configuration from the I(client_template). - This is 'useTemplateConfig' in the Keycloak REST API. - aliases: - - useTemplateConfig - type: bool - - use_template_scope: - description: - - Whether or not to use scope configuration from the I(client_template). - This is 'useTemplateScope' in the Keycloak REST API. - aliases: - - useTemplateScope - type: bool - - use_template_mappers: - description: - - Whether or not to use mapper configuration from the I(client_template). - This is 'useTemplateMappers' in the Keycloak REST API. - aliases: - - useTemplateMappers - type: bool - - surrogate_auth_required: - description: - - Whether or not surrogate auth is required. - This is 'surrogateAuthRequired' in the Keycloak REST API. - aliases: - - surrogateAuthRequired - type: bool - - authorization_settings: - description: - - a data structure defining the authorization settings for this client. For reference, - please see the Keycloak API docs at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation). - This is 'authorizationSettings' in the Keycloak REST API. - type: dict - aliases: - - authorizationSettings - - authentication_flow_binding_overrides: - description: - - Override realm authentication flow bindings. - type: dict - aliases: - - authenticationFlowBindingOverrides - version_added: 3.4.0 - - protocol_mappers: - description: - - a list of dicts defining protocol mappers for this client. - This is 'protocolMappers' in the Keycloak REST API. - aliases: - - protocolMappers - type: list - elements: dict - suboptions: - consentRequired: - description: - - Specifies whether a user needs to provide consent to a client for this mapper to be active. - type: bool - - consentText: - description: - - The human-readable name of the consent the user is presented to accept. - type: str - - id: - description: - - Usually a UUID specifying the internal ID of this protocol mapper instance. - type: str - - name: - description: - - The name of this protocol mapper. - type: str - - protocol: - description: - - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper. - is active. - choices: ['openid-connect', 'saml'] - type: str - - protocolMapper: - description: - - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is - impossible to provide since this may be extended through SPIs by the user of Keycloak, - by default Keycloak as of 3.4 ships with at least - - C(docker-v2-allow-all-mapper) - - C(oidc-address-mapper) - - C(oidc-full-name-mapper) - - C(oidc-group-membership-mapper) - - C(oidc-hardcoded-claim-mapper) - - C(oidc-hardcoded-role-mapper) - - C(oidc-role-name-mapper) - - C(oidc-script-based-protocol-mapper) - - C(oidc-sha256-pairwise-sub-mapper) - - C(oidc-usermodel-attribute-mapper) - - C(oidc-usermodel-client-role-mapper) - - C(oidc-usermodel-property-mapper) - - C(oidc-usermodel-realm-role-mapper) - - C(oidc-usersessionmodel-note-mapper) - - C(saml-group-membership-mapper) - - C(saml-hardcode-attribute-mapper) - - C(saml-hardcode-role-mapper) - - C(saml-role-list-mapper) - - C(saml-role-name-mapper) - - C(saml-user-attribute-mapper) - - C(saml-user-property-mapper) - - C(saml-user-session-note-mapper) - - An exhaustive list of available mappers on your installation can be obtained on - the admin console by going to Server Info -> Providers and looking under - 'protocol-mapper'. - type: str - - config: - description: - - Dict specifying the configuration options for the protocol mapper; the - contents differ depending on the value of I(protocolMapper) and are not documented - other than by the source of the mappers and its parent class(es). An example is given - below. It is easiest to obtain valid config values by dumping an already-existing - protocol mapper configuration through check-mode in the I(existing) field. - type: dict - - attributes: - description: - - A dict of further attributes for this client. This can contain various configuration - settings; an example is given in the examples section. While an exhaustive list of - permissible options is not available; possible options as of Keycloak 3.4 are listed below. The Keycloak - API does not validate whether a given option is appropriate for the protocol used; if specified - anyway, Keycloak will simply not use it. - type: dict - suboptions: - saml.authnstatement: - description: - - For SAML clients, boolean specifying whether or not a statement containing method and timestamp - should be included in the login response. - - saml.client.signature: - description: - - For SAML clients, boolean specifying whether a client signature is required and validated. - - saml.encrypt: - description: - - Boolean specifying whether SAML assertions should be encrypted with the client's public key. - - saml.force.post.binding: - description: - - For SAML clients, boolean specifying whether always to use POST binding for responses. - - saml.onetimeuse.condition: - description: - - For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses. - - saml.server.signature: - description: - - Boolean specifying whether SAML documents should be signed by the realm. - - saml.server.signature.keyinfo.ext: - description: - - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion - of the signing key id in the SAML Extensions element. - - saml.signature.algorithm: - description: - - Signature algorithm used to sign SAML documents. One of C(RSA_SHA256), C(RSA_SHA1), C(RSA_SHA512), or C(DSA_SHA1). - - saml.signing.certificate: - description: - - SAML signing key certificate, base64-encoded. - - saml.signing.private.key: - description: - - SAML signing key private key, base64-encoded. - - saml_assertion_consumer_url_post: - description: - - SAML POST Binding URL for the client's assertion consumer service (login responses). - - saml_assertion_consumer_url_redirect: - description: - - SAML Redirect Binding URL for the client's assertion consumer service (login responses). - - - saml_force_name_id_format: - description: - - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead. - - saml_name_id_format: - description: - - For SAML clients, the NameID format to use (one of C(username), C(email), C(transient), or C(persistent)) - - saml_signature_canonicalization_method: - description: - - SAML signature canonicalization method. This is one of four values, namely - C(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE, - C(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS, - C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and - C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS. - - saml_single_logout_service_url_post: - description: - - SAML POST binding url for the client's single logout service. - - saml_single_logout_service_url_redirect: - description: - - SAML redirect binding url for the client's single logout service. - - user.info.response.signature.alg: - description: - - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of C(RS256) or C(unsigned). - - request.object.signature.alg: - description: - - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending - OIDC request object. One of C(any), C(none), C(RS256). - - use.jwks.url: - description: - - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client - public keys. - - jwks.url: - description: - - For OpenID-Connect clients, URL where client keys in JWK are stored. - - jwt.credential.certificate: - description: - - For OpenID-Connect clients, client certificate for validating JWT issued by - client and signed by its key, base64-encoded. - -extends_documentation_fragment: -- community.general.keycloak - -author: - - Eike Frost (@eikef) -''' - -EXAMPLES = ''' -- name: Create or update Keycloak client (minimal example), authentication with credentials - community.general.keycloak_client: - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - client_id: test - state: present - delegate_to: localhost - - -- name: Create or update Keycloak client (minimal example), authentication with token - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - token: TOKEN - client_id: test - state: present - delegate_to: localhost - - -- name: Delete a Keycloak client - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - client_id: test - state: absent - delegate_to: localhost - - -- name: Create or update a Keycloak client (with all the bells and whistles) - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - state: present - realm: master - client_id: test - id: d8b127a3-31f6-44c8-a7e4-4ab9a3e78d95 - name: this_is_a_test - description: Description of this wonderful client - root_url: https://www.example.com/ - admin_url: https://www.example.com/admin_url - base_url: basepath - enabled: True - client_authenticator_type: client-secret - secret: REALLYWELLKEPTSECRET - redirect_uris: - - https://www.example.com/* - - http://localhost:8888/ - web_origins: - - https://www.example.com/* - not_before: 1507825725 - bearer_only: False - consent_required: False - standard_flow_enabled: True - implicit_flow_enabled: False - direct_access_grants_enabled: False - service_accounts_enabled: False - authorization_services_enabled: False - public_client: False - frontchannel_logout: False - protocol: openid-connect - full_scope_allowed: false - node_re_registration_timeout: -1 - client_template: test - use_template_config: False - use_template_scope: false - use_template_mappers: no - registered_nodes: - node01.example.com: 1507828202 - registration_access_token: eyJWT_TOKEN - surrogate_auth_required: false - default_roles: - - test01 - - test02 - authentication_flow_binding_overrides: - browser: 4c90336b-bf1d-4b87-916d-3677ba4e5fbb - protocol_mappers: - - config: - access.token.claim: True - claim.name: "family_name" - id.token.claim: True - jsonType.label: String - user.attribute: lastName - userinfo.token.claim: True - consentRequired: True - consentText: "${familyName}" - name: family name - protocol: openid-connect - protocolMapper: oidc-usermodel-property-mapper - - config: - attribute.name: Role - attribute.nameformat: Basic - single: false - consentRequired: false - name: role list - protocol: saml - protocolMapper: saml-role-list-mapper - attributes: - saml.authnstatement: True - saml.client.signature: True - saml.force.post.binding: True - saml.server.signature: True - saml.signature.algorithm: RSA_SHA256 - saml.signing.certificate: CERTIFICATEHERE - saml.signing.private.key: PRIVATEKEYHERE - saml_force_name_id_format: False - saml_name_id_format: username - saml_signature_canonicalization_method: "http://www.w3.org/2001/10/xml-exc-c14n#" - user.info.response.signature.alg: RS256 - request.object.signature.alg: RS256 - use.jwks.url: true - jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT - jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH - delegate_to: localhost -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Client testclient has been updated" - -proposed: - description: Representation of proposed client. - returned: always - type: dict - sample: { - clientId: "test" - } - -existing: - description: Representation of existing client (sample is truncated). - returned: always - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } - -end_state: - description: Representation of client after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule - - -def normalise_cr(clientrep, remove_ids=False): - """ Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the - the change detection is more effective. - - :param clientrep: the clientrep dict to be sanitized - :param remove_ids: If set to true, then the unique ID's of objects is removed to make the diff and checks for changed - not alert when the ID's of objects are not usually known, (e.g. for protocol_mappers) - :return: normalised clientrep dict - """ - # Avoid the dict passed in to be modified - clientrep = clientrep.copy() - - if 'attributes' in clientrep: - clientrep['attributes'] = list(sorted(clientrep['attributes'])) - - if 'redirectUris' in clientrep: - clientrep['redirectUris'] = list(sorted(clientrep['redirectUris'])) - - if 'protocolMappers' in clientrep: - clientrep['protocolMappers'] = sorted(clientrep['protocolMappers'], key=lambda x: (x.get('name'), x.get('protocol'), x.get('protocolMapper'))) - for mapper in clientrep['protocolMappers']: - if remove_ids: - mapper.pop('id', None) - - # Set to a default value. - mapper['consentRequired'] = mapper.get('consentRequired', False) - - return clientrep - - -def sanitize_cr(clientrep): - """ Removes probably sensitive details from a client representation. - - :param clientrep: the clientrep dict to be sanitized - :return: sanitized clientrep dict - """ - result = clientrep.copy() - if 'secret' in result: - result['secret'] = 'no_log' - if 'attributes' in result: - if 'saml.signing.private.key' in result['attributes']: - result['attributes']['saml.signing.private.key'] = 'no_log' - return normalise_cr(result) - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - protmapper_spec = dict( - consentRequired=dict(type='bool'), - consentText=dict(type='str'), - id=dict(type='str'), - name=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml']), - protocolMapper=dict(type='str'), - config=dict(type='dict'), - ) - - meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(type='str', default='master'), - - id=dict(type='str'), - client_id=dict(type='str', aliases=['clientId']), - name=dict(type='str'), - description=dict(type='str'), - root_url=dict(type='str', aliases=['rootUrl']), - admin_url=dict(type='str', aliases=['adminUrl']), - base_url=dict(type='str', aliases=['baseUrl']), - surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']), - enabled=dict(type='bool'), - client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']), - secret=dict(type='str', no_log=True), - registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True), - default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), - redirect_uris=dict(type='list', elements='str', aliases=['redirectUris']), - web_origins=dict(type='list', elements='str', aliases=['webOrigins']), - not_before=dict(type='int', aliases=['notBefore']), - bearer_only=dict(type='bool', aliases=['bearerOnly']), - consent_required=dict(type='bool', aliases=['consentRequired']), - standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']), - implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']), - direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']), - service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']), - authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']), - public_client=dict(type='bool', aliases=['publicClient']), - frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']), - protocol=dict(type='str', choices=['openid-connect', 'saml']), - attributes=dict(type='dict'), - full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']), - node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']), - registered_nodes=dict(type='dict', aliases=['registeredNodes']), - client_template=dict(type='str', aliases=['clientTemplate']), - use_template_config=dict(type='bool', aliases=['useTemplateConfig']), - use_template_scope=dict(type='bool', aliases=['useTemplateScope']), - use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']), - authentication_flow_binding_overrides=dict(type='dict', aliases=['authenticationFlowBindingOverrides']), - protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), - authorization_settings=dict(type='dict', aliases=['authorizationSettings']), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['client_id', 'id'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - cid = module.params.get('id') - state = module.params.get('state') - - # Filter and map the parameters names that apply to the client - client_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - if cid is None: - before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm) - if before_client is not None: - cid = before_client['id'] - else: - before_client = kc.get_client_by_id(cid, realm=realm) - - if before_client is None: - before_client = {} - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for client_param in client_params: - new_param_value = module.params.get(client_param) - - # some lists in the Keycloak API are sorted, some are not. - if isinstance(new_param_value, list): - if client_param in ['attributes']: - try: - new_param_value = sorted(new_param_value) - except TypeError: - pass - # Unfortunately, the ansible argument spec checker introduces variables with null values when - # they are not specified - if client_param == 'protocol_mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] - - changeset[camel(client_param)] = new_param_value - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_client = before_client.copy() - desired_client.update(changeset) - - result['proposed'] = sanitize_cr(changeset) - result['existing'] = sanitize_cr(before_client) - - # Cater for when it doesn't exist (an empty dict) - if not before_client: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Client does not exist; doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if 'clientId' not in desired_client: - module.fail_json(msg='client_id needs to be specified when creating a new client') - - if module._diff: - result['diff'] = dict(before='', after=sanitize_cr(desired_client)) - - if module.check_mode: - module.exit_json(**result) - - # create it - kc.create_client(desired_client, realm=realm) - after_client = kc.get_client_by_clientid(desired_client['clientId'], realm=realm) - - result['end_state'] = sanitize_cr(after_client) - - result['msg'] = 'Client %s has been created.' % desired_client['clientId'] - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - result['changed'] = True - - if module.check_mode: - # We can only compare the current client with the proposed updates we have - before_norm = normalise_cr(before_client, remove_ids=True) - desired_norm = normalise_cr(desired_client, remove_ids=True) - if module._diff: - result['diff'] = dict(before=sanitize_cr(before_norm), - after=sanitize_cr(desired_norm)) - result['changed'] = (before_norm != desired_norm) - - module.exit_json(**result) - - # do the update - kc.update_client(cid, desired_client, realm=realm) - - after_client = kc.get_client_by_id(cid, realm=realm) - if before_client == after_client: - result['changed'] = False - if module._diff: - result['diff'] = dict(before=sanitize_cr(before_client), - after=sanitize_cr(after_client)) - - result['end_state'] = sanitize_cr(after_client) - - result['msg'] = 'Client %s has been updated.' % desired_client['clientId'] - module.exit_json(**result) - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize_cr(before_client), after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_client(cid, realm=realm) - result['proposed'] = {} - - result['end_state'] = {} - - result['msg'] = 'Client %s has been deleted.' % before_client['clientId'] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py b/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py deleted file mode 100644 index b7cd70c122..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py +++ /dev/null @@ -1,350 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_client_rolemapping - -short_description: Allows administration of Keycloak client_rolemapping with the Keycloak API - -version_added: 3.5.0 - -description: - - This module allows you to add, remove or modify Keycloak client_rolemapping with the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will - be returned that way by this module. You may pass single values for attributes when calling the module, - and this will be translated into a list suitable for the API. - - - When updating a client_rolemapping, where possible provide the role ID to the module. This removes a lookup - to the API to translate the name into the role ID. - - -options: - state: - description: - - State of the client_rolemapping. - - On C(present), the client_rolemapping will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the client_rolemapping will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - realm: - type: str - description: - - They Keycloak realm under which this role_representation resides. - default: 'master' - - group_name: - type: str - description: - - Name of the group to be mapped. - - This parameter is required (can be replaced by gid for less API call). - - gid: - type: str - description: - - Id of the group to be mapped. - - This parameter is not required for updating or deleting the rolemapping but - providing it will reduce the number of API calls required. - - client_id: - type: str - description: - - Name of the client to be mapped (different than I(cid)). - - This parameter is required (can be replaced by cid for less API call). - - cid: - type: str - description: - - Id of the client to be mapped. - - This parameter is not required for updating or deleting the rolemapping but - providing it will reduce the number of API calls required. - - roles: - description: - - Roles to be mapped to the group. - type: list - elements: dict - suboptions: - name: - type: str - description: - - Name of the role_representation. - - This parameter is required only when creating or updating the role_representation. - id: - type: str - description: - - The unique identifier for this role_representation. - - This parameter is not required for updating or deleting a role_representation but - providing it will reduce the number of API calls required. - -extends_documentation_fragment: -- community.general.keycloak - - -author: - - Gaëtan Daubresse (@Gaetan2907) -''' - -EXAMPLES = ''' -- name: Map a client role to a group, authentication with credentials - community.general.keycloak_client_rolemappings: - realm: MyCustomRealm - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - state: present - client_id: client1 - group_name: group1 - roles: - - name: role_name1 - id: role_id1 - - name: role_name2 - id: role_id2 - delegate_to: localhost - -- name: Map a client role to a group, authentication with token - community.general.keycloak_client_rolemappings: - realm: MyCustomRealm - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - token: TOKEN - state: present - client_id: client1 - group_name: group1 - roles: - - name: role_name1 - id: role_id1 - - name: role_name2 - id: role_id2 - delegate_to: localhost - -- name: Unmap client role from a group - community.general.keycloak_client_rolemappings: - realm: MyCustomRealm - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - state: absent - client_id: client1 - group_name: group1 - roles: - - name: role_name1 - id: role_id1 - - name: role_name2 - id: role_id2 - delegate_to: localhost - -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Role role1 assigned to group group1." - -proposed: - description: Representation of proposed client role mapping. - returned: always - type: dict - sample: { - clientId: "test" - } - -existing: - description: - - Representation of existing client role mapping. - - The sample is truncated. - returned: always - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } - -end_state: - description: - - Representation of client role mapping after module execution. - - The sample is truncated. - returned: on success - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError, is_struct_included -from ansible.module_utils.basic import AnsibleModule - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - roles_spec = dict( - name=dict(type='str'), - id=dict(type='str'), - ) - - meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(default='master'), - gid=dict(type='str'), - group_name=dict(type='str'), - cid=dict(type='str'), - client_id=dict(type='str'), - roles=dict(type='list', elements='dict', options=roles_spec), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - cid = module.params.get('cid') - client_id = module.params.get('client_id') - gid = module.params.get('gid') - group_name = module.params.get('group_name') - roles = module.params.get('roles') - - # Check the parameters - if cid is None and client_id is None: - module.fail_json(msg='Either the `client_id` or `cid` has to be specified.') - if gid is None and group_name is None: - module.fail_json(msg='Either the `group_name` or `gid` has to be specified.') - - # Get the potential missing parameters - if gid is None: - group_rep = kc.get_group_by_name(group_name, realm=realm) - if group_rep is not None: - gid = group_rep['id'] - else: - module.fail_json(msg='Could not fetch group %s:' % group_name) - if cid is None: - cid = kc.get_client_id(client_id, realm=realm) - if cid is None: - module.fail_json(msg='Could not fetch client %s:' % client_id) - if roles is None: - module.exit_json(msg="Nothing to do (no roles specified).") - else: - for role_index, role in enumerate(roles, start=0): - if role['name'] is None and role['id'] is None: - module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') - # Fetch missing role_id - if role['id'] is None: - role_id = kc.get_client_role_by_name(gid, cid, role['name'], realm=realm) - if role_id is not None: - role['id'] = role_id - else: - module.fail_json(msg='Could not fetch role %s:' % (role['name'])) - # Fetch missing role_name - else: - role['name'] = kc.get_client_rolemapping_by_id(gid, cid, role['id'], realm=realm)['name'] - if role['name'] is None: - module.fail_json(msg='Could not fetch role %s' % (role['id'])) - - # Get effective client-level role mappings - available_roles_before = kc.get_client_available_rolemappings(gid, cid, realm=realm) - assigned_roles_before = kc.get_client_composite_rolemappings(gid, cid, realm=realm) - - result['existing'] = assigned_roles_before - result['proposed'] = roles - - update_roles = [] - for role_index, role in enumerate(roles, start=0): - # Fetch roles to assign if state present - if state == 'present': - for available_role in available_roles_before: - if role['name'] == available_role['name']: - update_roles.append({ - 'id': role['id'], - 'name': role['name'], - }) - # Fetch roles to remove if state absent - else: - for assigned_role in assigned_roles_before: - if role['name'] == assigned_role['name']: - update_roles.append({ - 'id': role['id'], - 'name': role['name'], - }) - - if len(update_roles): - if state == 'present': - # Assign roles - result['changed'] = True - if module._diff: - result['diff'] = dict(before=assigned_roles_before, after=update_roles) - if module.check_mode: - module.exit_json(**result) - kc.add_group_rolemapping(gid, cid, update_roles, realm=realm) - result['msg'] = 'Roles %s assigned to group %s.' % (update_roles, group_name) - assigned_roles_after = kc.get_client_composite_rolemappings(gid, cid, realm=realm) - result['end_state'] = assigned_roles_after - module.exit_json(**result) - else: - # Remove mapping of role - result['changed'] = True - if module._diff: - result['diff'] = dict(before=assigned_roles_before, after=update_roles) - if module.check_mode: - module.exit_json(**result) - kc.delete_group_rolemapping(gid, cid, update_roles, realm=realm) - result['msg'] = 'Roles %s removed from group %s.' % (update_roles, group_name) - assigned_roles_after = kc.get_client_composite_rolemappings(gid, cid, realm=realm) - result['end_state'] = assigned_roles_after - module.exit_json(**result) - # Do nothing - else: - result['changed'] = False - result['msg'] = 'Nothing to do, roles %s are correctly mapped with group %s.' % (roles, group_name) - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_clientscope.py b/plugins/modules/identity/keycloak/keycloak_clientscope.py deleted file mode 100644 index 2deab5547d..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_clientscope.py +++ /dev/null @@ -1,499 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_clientscope - -short_description: Allows administration of Keycloak client_scopes via Keycloak API - -version_added: 3.4.0 - -description: - - This module allows you to add, remove or modify Keycloak client_scopes via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will - be returned that way by this module. You may pass single values for attributes when calling the module, - and this will be translated into a list suitable for the API. - - - When updating a client_scope, where possible provide the client_scope ID to the module. This removes a lookup - to the API to translate the name into the client_scope ID. - - -options: - state: - description: - - State of the client_scope. - - On C(present), the client_scope will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the client_scope will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - name: - type: str - description: - - Name of the client_scope. - - This parameter is required only when creating or updating the client_scope. - - realm: - type: str - description: - - They Keycloak realm under which this client_scope resides. - default: 'master' - - id: - type: str - description: - - The unique identifier for this client_scope. - - This parameter is not required for updating or deleting a client_scope but - providing it will reduce the number of API calls required. - - description: - type: str - description: - - Description for this client_scope. - - This parameter is not required for updating or deleting a client_scope. - - protocol: - description: - - Type of client. - choices: ['openid-connect', 'saml', 'wsfed'] - type: str - - protocol_mappers: - description: - - A list of dicts defining protocol mappers for this client. - - This is 'protocolMappers' in the Keycloak REST API. - aliases: - - protocolMappers - type: list - elements: dict - suboptions: - protocol: - description: - - This specifies for which protocol this protocol mapper. - - is active. - choices: ['openid-connect', 'saml', 'wsfed'] - type: str - - protocolMapper: - description: - - "The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is - impossible to provide since this may be extended through SPIs by the user of Keycloak, - by default Keycloak as of 3.4 ships with at least:" - - C(docker-v2-allow-all-mapper) - - C(oidc-address-mapper) - - C(oidc-full-name-mapper) - - C(oidc-group-membership-mapper) - - C(oidc-hardcoded-claim-mapper) - - C(oidc-hardcoded-role-mapper) - - C(oidc-role-name-mapper) - - C(oidc-script-based-protocol-mapper) - - C(oidc-sha256-pairwise-sub-mapper) - - C(oidc-usermodel-attribute-mapper) - - C(oidc-usermodel-client-role-mapper) - - C(oidc-usermodel-property-mapper) - - C(oidc-usermodel-realm-role-mapper) - - C(oidc-usersessionmodel-note-mapper) - - C(saml-group-membership-mapper) - - C(saml-hardcode-attribute-mapper) - - C(saml-hardcode-role-mapper) - - C(saml-role-list-mapper) - - C(saml-role-name-mapper) - - C(saml-user-attribute-mapper) - - C(saml-user-property-mapper) - - C(saml-user-session-note-mapper) - - An exhaustive list of available mappers on your installation can be obtained on - the admin console by going to Server Info -> Providers and looking under - 'protocol-mapper'. - type: str - - name: - description: - - The name of this protocol mapper. - type: str - - id: - description: - - Usually a UUID specifying the internal ID of this protocol mapper instance. - type: str - - config: - description: - - Dict specifying the configuration options for the protocol mapper; the - contents differ depending on the value of I(protocolMapper) and are not documented - other than by the source of the mappers and its parent class(es). An example is given - below. It is easiest to obtain valid config values by dumping an already-existing - protocol mapper configuration through check-mode in the C(existing) return value. - type: dict - - attributes: - type: dict - description: - - A dict of key/value pairs to set as custom attributes for the client_scope. - - Values may be single values (for example a string) or a list of strings. - -extends_documentation_fragment: -- community.general.keycloak - - -author: - - Gaëtan Daubresse (@Gaetan2907) -''' - -EXAMPLES = ''' -- name: Create a Keycloak client_scopes, authentication with credentials - community.general.keycloak_clientscope: - name: my-new-kc-clientscope - realm: MyCustomRealm - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Create a Keycloak client_scopes, authentication with token - community.general.keycloak_clientscope: - name: my-new-kc-clientscope - realm: MyCustomRealm - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - token: TOKEN - delegate_to: localhost - -- name: Delete a keycloak client_scopes - community.general.keycloak_clientscope: - id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' - state: absent - realm: MyCustomRealm - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Delete a Keycloak client_scope based on name - community.general.keycloak_clientscope: - name: my-clientscope-for-deletion - state: absent - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Update the name of a Keycloak client_scope - community.general.keycloak_clientscope: - id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' - name: an-updated-kc-clientscope-name - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Create a Keycloak client_scope with some custom attributes - community.general.keycloak_clientscope: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - name: my-new_clientscope - description: description-of-clientscope - protocol: openid-connect - protocol_mappers: - - config: - access.token.claim: True - claim.name: "family_name" - id.token.claim: True - jsonType.label: String - user.attribute: lastName - userinfo.token.claim: True - name: family name - protocol: openid-connect - protocolMapper: oidc-usermodel-property-mapper - - config: - attribute.name: Role - attribute.nameformat: Basic - single: false - name: role list - protocol: saml - protocolMapper: saml-role-list-mapper - attributes: - attrib1: value1 - attrib2: value2 - attrib3: - - with - - numerous - - individual - - list - - items - delegate_to: localhost -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Client_scope testclientscope has been updated" - -proposed: - description: Representation of proposed client scope. - returned: always - type: dict - sample: { - clientId: "test" - } - -existing: - description: Representation of existing client scope (sample is truncated). - returned: always - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } - -end_state: - description: Representation of client scope after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError, is_struct_included -from ansible.module_utils.basic import AnsibleModule - - -def sanitize_cr(clientscoperep): - """ Removes probably sensitive details from a clientscoperep representation. - - :param clientscoperep: the clientscoperep dict to be sanitized - :return: sanitized clientrep dict - """ - result = clientscoperep.copy() - if 'secret' in result: - result['secret'] = 'no_log' - if 'attributes' in result: - if 'saml.signing.private.key' in result['attributes']: - result['attributes']['saml.signing.private.key'] = 'no_log' - return result - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - protmapper_spec = dict( - id=dict(type='str'), - name=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']), - protocolMapper=dict(type='str'), - config=dict(type='dict'), - ) - - meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(default='master'), - id=dict(type='str'), - name=dict(type='str'), - description=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']), - attributes=dict(type='dict'), - protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - cid = module.params.get('id') - name = module.params.get('name') - protocol_mappers = module.params.get('protocol_mappers') - - # Filter and map the parameters names that apply to the client scope - clientscope_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - if cid is None: - before_clientscope = kc.get_clientscope_by_name(name, realm=realm) - else: - before_clientscope = kc.get_clientscope_by_clientscopeid(cid, realm=realm) - - if before_clientscope is None: - before_clientscope = {} - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for clientscope_param in clientscope_params: - new_param_value = module.params.get(clientscope_param) - - # some lists in the Keycloak API are sorted, some are not. - if isinstance(new_param_value, list): - if clientscope_param in ['attributes']: - try: - new_param_value = sorted(new_param_value) - except TypeError: - pass - # Unfortunately, the ansible argument spec checker introduces variables with null values when - # they are not specified - if clientscope_param == 'protocol_mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] - changeset[camel(clientscope_param)] = new_param_value - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_clientscope = before_clientscope.copy() - desired_clientscope.update(changeset) - - # Cater for when it doesn't exist (an empty dict) - if not before_clientscope: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Clientscope does not exist; doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if name is None: - module.fail_json(msg='name must be specified when creating a new clientscope') - - if module._diff: - result['diff'] = dict(before='', after=sanitize_cr(desired_clientscope)) - - if module.check_mode: - module.exit_json(**result) - - # create it - kc.create_clientscope(desired_clientscope, realm=realm) - after_clientscope = kc.get_clientscope_by_name(name, realm) - - result['end_state'] = sanitize_cr(after_clientscope) - - result['msg'] = 'Clientscope {name} has been created with ID {id}'.format(name=after_clientscope['name'], - id=after_clientscope['id']) - - else: - if state == 'present': - # Process an update - - # no changes - if desired_clientscope == before_clientscope: - result['changed'] = False - result['end_state'] = sanitize_cr(desired_clientscope) - result['msg'] = "No changes required to clientscope {name}.".format(name=before_clientscope['name']) - module.exit_json(**result) - - # doing an update - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize_cr(before_clientscope), after=sanitize_cr(desired_clientscope)) - - if module.check_mode: - module.exit_json(**result) - - # do the update - kc.update_clientscope(desired_clientscope, realm=realm) - - # do the protocolmappers update - if protocol_mappers is not None: - for protocol_mapper in protocol_mappers: - # update if protocolmapper exist - current_protocolmapper = kc.get_clientscope_protocolmapper_by_name(desired_clientscope['id'], protocol_mapper['name'], realm=realm) - if current_protocolmapper is not None: - protocol_mapper['id'] = current_protocolmapper['id'] - kc.update_clientscope_protocolmappers(desired_clientscope['id'], protocol_mapper, realm=realm) - # create otherwise - else: - kc.create_clientscope_protocolmapper(desired_clientscope['id'], protocol_mapper, realm=realm) - - after_clientscope = kc.get_clientscope_by_clientscopeid(desired_clientscope['id'], realm=realm) - - result['end_state'] = after_clientscope - - result['msg'] = "Clientscope {id} has been updated".format(id=after_clientscope['id']) - module.exit_json(**result) - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize_cr(before_clientscope), after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - cid = before_clientscope['id'] - kc.delete_clientscope(cid=cid, realm=realm) - - result['end_state'] = {} - - result['msg'] = "Clientscope {name} has been deleted".format(name=before_clientscope['name']) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_clienttemplate.py b/plugins/modules/identity/keycloak/keycloak_clienttemplate.py deleted file mode 100644 index cec7c93d8d..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_clienttemplate.py +++ /dev/null @@ -1,449 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017, Eike Frost -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_clienttemplate - -short_description: Allows administration of Keycloak client templates via Keycloak API - - -description: - - This module allows the administration of Keycloak client templates via the Keycloak REST API. It - requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html) - - - The Keycloak API does not always enforce for only sensible settings to be used -- you can set - SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. - If you do not specify a setting, usually a sensible default is chosen. - -options: - state: - description: - - State of the client template. - - On C(present), the client template will be created (or updated if it exists already). - - On C(absent), the client template will be removed if it exists - choices: ['present', 'absent'] - default: 'present' - type: str - - id: - description: - - Id of client template to be worked on. This is usually a UUID. - type: str - - realm: - description: - - Realm this client template is found in. - type: str - default: master - - name: - description: - - Name of the client template. - type: str - - description: - description: - - Description of the client template in Keycloak. - type: str - - protocol: - description: - - Type of client template (either C(openid-connect) or C(saml). - choices: ['openid-connect', 'saml'] - type: str - - full_scope_allowed: - description: - - Is the "Full Scope Allowed" feature set for this client template or not. - This is 'fullScopeAllowed' in the Keycloak REST API. - type: bool - - protocol_mappers: - description: - - a list of dicts defining protocol mappers for this client template. - This is 'protocolMappers' in the Keycloak REST API. - type: list - elements: dict - suboptions: - consentRequired: - description: - - Specifies whether a user needs to provide consent to a client for this mapper to be active. - type: bool - - consentText: - description: - - The human-readable name of the consent the user is presented to accept. - type: str - - id: - description: - - Usually a UUID specifying the internal ID of this protocol mapper instance. - type: str - - name: - description: - - The name of this protocol mapper. - type: str - - protocol: - description: - - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper. - is active. - choices: ['openid-connect', 'saml'] - type: str - - protocolMapper: - description: - - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is - impossible to provide since this may be extended through SPIs by the user of Keycloak, - by default Keycloak as of 3.4 ships with at least - - C(docker-v2-allow-all-mapper) - - C(oidc-address-mapper) - - C(oidc-full-name-mapper) - - C(oidc-group-membership-mapper) - - C(oidc-hardcoded-claim-mapper) - - C(oidc-hardcoded-role-mapper) - - C(oidc-role-name-mapper) - - C(oidc-script-based-protocol-mapper) - - C(oidc-sha256-pairwise-sub-mapper) - - C(oidc-usermodel-attribute-mapper) - - C(oidc-usermodel-client-role-mapper) - - C(oidc-usermodel-property-mapper) - - C(oidc-usermodel-realm-role-mapper) - - C(oidc-usersessionmodel-note-mapper) - - C(saml-group-membership-mapper) - - C(saml-hardcode-attribute-mapper) - - C(saml-hardcode-role-mapper) - - C(saml-role-list-mapper) - - C(saml-role-name-mapper) - - C(saml-user-attribute-mapper) - - C(saml-user-property-mapper) - - C(saml-user-session-note-mapper) - - An exhaustive list of available mappers on your installation can be obtained on - the admin console by going to Server Info -> Providers and looking under - 'protocol-mapper'. - type: str - - config: - description: - - Dict specifying the configuration options for the protocol mapper; the - contents differ depending on the value of I(protocolMapper) and are not documented - other than by the source of the mappers and its parent class(es). An example is given - below. It is easiest to obtain valid config values by dumping an already-existing - protocol mapper configuration through check-mode in the I(existing) field. - type: dict - - attributes: - description: - - A dict of further attributes for this client template. This can contain various - configuration settings, though in the default installation of Keycloak as of 3.4, none - are documented or known, so this is usually empty. - type: dict - -notes: -- The Keycloak REST API defines further fields (namely I(bearerOnly), I(consentRequired), I(standardFlowEnabled), - I(implicitFlowEnabled), I(directAccessGrantsEnabled), I(serviceAccountsEnabled), I(publicClient), and - I(frontchannelLogout)) which, while available with keycloak_client, do not have any effect on - Keycloak client-templates and are discarded if supplied with an API request changing client-templates. As such, - they are not available through this module. - -extends_documentation_fragment: -- community.general.keycloak - -author: - - Eike Frost (@eikef) -''' - -EXAMPLES = ''' -- name: Create or update Keycloak client template (minimal), authentication with credentials - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - realm: master - name: this_is_a_test - delegate_to: localhost - -- name: Create or update Keycloak client template (minimal), authentication with token - community.general.keycloak_clienttemplate: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - token: TOKEN - realm: master - name: this_is_a_test - delegate_to: localhost - -- name: Delete Keycloak client template - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - realm: master - state: absent - name: test01 - delegate_to: localhost - -- name: Create or update Keycloak client template (with a protocol mapper) - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - realm: master - name: this_is_a_test - protocol_mappers: - - config: - access.token.claim: True - claim.name: "family_name" - id.token.claim: True - jsonType.label: String - user.attribute: lastName - userinfo.token.claim: True - consentRequired: True - consentText: "${familyName}" - name: family name - protocol: openid-connect - protocolMapper: oidc-usermodel-property-mapper - full_scope_allowed: false - id: bce6f5e9-d7d3-4955-817e-c5b7f8d65b3f - delegate_to: localhost -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Client template testclient has been updated" - -proposed: - description: Representation of proposed client template. - returned: always - type: dict - sample: { - name: "test01" - } - -existing: - description: Representation of existing client template (sample is truncated). - returned: always - type: dict - sample: { - "description": "test01", - "fullScopeAllowed": false, - "id": "9c3712ab-decd-481e-954f-76da7b006e5f", - "name": "test01", - "protocol": "saml" - } - -end_state: - description: Representation of client template after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "description": "test01", - "fullScopeAllowed": false, - "id": "9c3712ab-decd-481e-954f-76da7b006e5f", - "name": "test01", - "protocol": "saml" - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - protmapper_spec = dict( - consentRequired=dict(type='bool'), - consentText=dict(type='str'), - id=dict(type='str'), - name=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml']), - protocolMapper=dict(type='str'), - config=dict(type='dict'), - ) - - meta_args = dict( - realm=dict(type='str', default='master'), - state=dict(default='present', choices=['present', 'absent']), - - id=dict(type='str'), - name=dict(type='str'), - description=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml']), - attributes=dict(type='dict'), - full_scope_allowed=dict(type='bool'), - protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - cid = module.params.get('id') - - # Filter and map the parameters names that apply to the client template - clientt_params = [x for x in module.params - if x not in ['state', 'auth_keycloak_url', 'auth_client_id', 'auth_realm', - 'auth_client_secret', 'auth_username', 'auth_password', - 'validate_certs', 'realm'] and module.params.get(x) is not None] - - # See if it already exists in Keycloak - if cid is None: - before_clientt = kc.get_client_template_by_name(module.params.get('name'), realm=realm) - if before_clientt is not None: - cid = before_clientt['id'] - else: - before_clientt = kc.get_client_template_by_id(cid, realm=realm) - - if before_clientt is None: - before_clientt = {} - - result['existing'] = before_clientt - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for clientt_param in clientt_params: - # lists in the Keycloak API are sorted - new_param_value = module.params.get(clientt_param) - if isinstance(new_param_value, list): - try: - new_param_value = sorted(new_param_value) - except TypeError: - pass - changeset[camel(clientt_param)] = new_param_value - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_clientt = before_clientt.copy() - desired_clientt.update(changeset) - - result['proposed'] = changeset - - # Cater for when it doesn't exist (an empty dict) - if not before_clientt: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Client template does not exist, doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if 'name' not in desired_clientt: - module.fail_json(msg='name needs to be specified when creating a new client') - - if module._diff: - result['diff'] = dict(before='', after=desired_clientt) - - if module.check_mode: - module.exit_json(**result) - - # create it - kc.create_client_template(desired_clientt, realm=realm) - after_clientt = kc.get_client_template_by_name(desired_clientt['name'], realm=realm) - - result['end_state'] = after_clientt - - result['msg'] = 'Client template %s has been created.' % desired_clientt['name'] - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - result['changed'] = True - if module.check_mode: - # We can only compare the current client template with the proposed updates we have - if module._diff: - result['diff'] = dict(before=before_clientt, - after=desired_clientt) - - module.exit_json(**result) - - # do the update - kc.update_client_template(cid, desired_clientt, realm=realm) - - after_clientt = kc.get_client_template_by_id(cid, realm=realm) - if before_clientt == after_clientt: - result['changed'] = False - - result['end_state'] = after_clientt - - if module._diff: - result['diff'] = dict(before=before_clientt, after=after_clientt) - - result['msg'] = 'Client template %s has been updated.' % desired_clientt['name'] - module.exit_json(**result) - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=before_clientt, after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_client_template(cid, realm=realm) - result['proposed'] = {} - - result['end_state'] = {} - - result['msg'] = 'Client template %s has been deleted.' % before_clientt['name'] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_group.py b/plugins/modules/identity/keycloak/keycloak_group.py deleted file mode 100644 index 3455f57818..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_group.py +++ /dev/null @@ -1,440 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2019, Adam Goossens -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_group - -short_description: Allows administration of Keycloak groups via Keycloak API - -description: - - This module allows you to add, remove or modify Keycloak groups via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will - be returned that way by this module. You may pass single values for attributes when calling the module, - and this will be translated into a list suitable for the API. - - - When updating a group, where possible provide the group ID to the module. This removes a lookup - to the API to translate the name into the group ID. - - -options: - state: - description: - - State of the group. - - On C(present), the group will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the group will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - name: - type: str - description: - - Name of the group. - - This parameter is required only when creating or updating the group. - - realm: - type: str - description: - - They Keycloak realm under which this group resides. - default: 'master' - - id: - type: str - description: - - The unique identifier for this group. - - This parameter is not required for updating or deleting a group but - providing it will reduce the number of API calls required. - - attributes: - type: dict - description: - - A dict of key/value pairs to set as custom attributes for the group. - - Values may be single values (e.g. a string) or a list of strings. - -notes: - - Presently, the I(realmRoles), I(clientRoles) and I(access) attributes returned by the Keycloak API - are read-only for groups. This limitation will be removed in a later version of this module. - -extends_documentation_fragment: -- community.general.keycloak - - -author: - - Adam Goossens (@adamgoossens) -''' - -EXAMPLES = ''' -- name: Create a Keycloak group, authentication with credentials - community.general.keycloak_group: - name: my-new-kc-group - realm: MyCustomRealm - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Create a Keycloak group, authentication with token - community.general.keycloak_group: - name: my-new-kc-group - realm: MyCustomRealm - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - token: TOKEN - delegate_to: localhost - -- name: Delete a keycloak group - community.general.keycloak_group: - id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' - state: absent - realm: MyCustomRealm - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Delete a Keycloak group based on name - community.general.keycloak_group: - name: my-group-for-deletion - state: absent - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Update the name of a Keycloak group - community.general.keycloak_group: - id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' - name: an-updated-kc-group-name - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Create a keycloak group with some custom attributes - community.general.keycloak_group: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - name: my-new_group - attributes: - attrib1: value1 - attrib2: value2 - attrib3: - - with - - numerous - - individual - - list - - items - delegate_to: localhost -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - -end_state: - description: Representation of the group after module execution (sample is truncated). - returned: on success - type: complex - contains: - id: - description: GUID that identifies the group. - type: str - returned: always - sample: 23f38145-3195-462c-97e7-97041ccea73e - name: - description: Name of the group. - type: str - returned: always - sample: grp-test-123 - attributes: - description: Attributes applied to this group. - type: dict - returned: always - sample: - attr1: ["val1", "val2", "val3"] - path: - description: URI path to the group. - type: str - returned: always - sample: /grp-test-123 - realmRoles: - description: An array of the realm-level roles granted to this group. - type: list - returned: always - sample: [] - subGroups: - description: A list of groups that are children of this group. These groups will have the same parameters as - documented here. - type: list - returned: always - clientRoles: - description: A list of client-level roles granted to this group. - type: list - returned: always - sample: [] - access: - description: A dict describing the accesses you have to this group based on the credentials used. - type: dict - returned: always - sample: - manage: true - manageMembership: true - view: true - -group: - description: - - Representation of the group after module execution. - - Deprecated return value, it will be removed in community.general 6.0.0. Please use the return value I(end_state) instead. - returned: always - type: complex - contains: - id: - description: GUID that identifies the group. - type: str - returned: always - sample: 23f38145-3195-462c-97e7-97041ccea73e - name: - description: Name of the group. - type: str - returned: always - sample: grp-test-123 - attributes: - description: Attributes applied to this group. - type: dict - returned: always - sample: - attr1: ["val1", "val2", "val3"] - path: - description: URI path to the group. - type: str - returned: always - sample: /grp-test-123 - realmRoles: - description: An array of the realm-level roles granted to this group. - type: list - returned: always - sample: [] - subGroups: - description: A list of groups that are children of this group. These groups will have the same parameters as - documented here. - type: list - returned: always - clientRoles: - description: A list of client-level roles granted to this group. - type: list - returned: always - sample: [] - access: - description: A dict describing the accesses you have to this group based on the credentials used. - type: dict - returned: always - sample: - manage: true - manageMembership: true - view: true - -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(default='master'), - id=dict(type='str'), - name=dict(type='str'), - attributes=dict(type='dict'), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, group='') - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - gid = module.params.get('id') - name = module.params.get('name') - attributes = module.params.get('attributes') - - # attributes in Keycloak have their values returned as lists - # via the API. attributes is a dict, so we'll transparently convert - # the values to lists. - if attributes is not None: - for key, val in module.params['attributes'].items(): - module.params['attributes'][key] = [val] if not isinstance(val, list) else val - - # Filter and map the parameters names that apply to the group - group_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - if gid is None: - before_group = kc.get_group_by_name(name, realm=realm) - else: - before_group = kc.get_group_by_groupid(gid, realm=realm) - - if before_group is None: - before_group = {} - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for param in group_params: - new_param_value = module.params.get(param) - old_value = before_group[param] if param in before_group else None - if new_param_value != old_value: - changeset[camel(param)] = new_param_value - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_group = before_group.copy() - desired_group.update(changeset) - - # Cater for when it doesn't exist (an empty dict) - if not before_group: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['group'] = result['end_state'] - result['msg'] = 'Group does not exist; doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if name is None: - module.fail_json(msg='name must be specified when creating a new group') - - if module._diff: - result['diff'] = dict(before='', after=desired_group) - - if module.check_mode: - module.exit_json(**result) - - # create it - kc.create_group(desired_group, realm=realm) - after_group = kc.get_group_by_name(name, realm) - - result['end_state'] = after_group - result['group'] = result['end_state'] - - result['msg'] = 'Group {name} has been created with ID {id}'.format(name=after_group['name'], - id=after_group['id']) - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - # no changes - if desired_group == before_group: - result['changed'] = False - result['end_state'] = desired_group - result['group'] = result['end_state'] - result['msg'] = "No changes required to group {name}.".format(name=before_group['name']) - module.exit_json(**result) - - # doing an update - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=before_group, after=desired_group) - - if module.check_mode: - module.exit_json(**result) - - # do the update - kc.update_group(desired_group, realm=realm) - - after_group = kc.get_group_by_groupid(desired_group['id'], realm=realm) - - result['end_state'] = after_group - result['group'] = result['end_state'] - - result['msg'] = "Group {id} has been updated".format(id=after_group['id']) - module.exit_json(**result) - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=before_group, after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - gid = before_group['id'] - kc.delete_group(groupid=gid, realm=realm) - - result['end_state'] = {} - result['group'] = result['end_state'] - - result['msg'] = "Group {name} has been deleted".format(name=before_group['name']) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_identity_provider.py b/plugins/modules/identity/keycloak/keycloak_identity_provider.py deleted file mode 100644 index a4adddd951..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_identity_provider.py +++ /dev/null @@ -1,646 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_identity_provider - -short_description: Allows administration of Keycloak identity providers via Keycloak API - -version_added: 3.6.0 - -description: - - This module allows you to add, remove or modify Keycloak identity providers via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html). - - -options: - state: - description: - - State of the identity provider. - - On C(present), the identity provider will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the identity provider will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - realm: - description: - - The Keycloak realm under which this identity provider resides. - default: 'master' - type: str - - alias: - description: - - The alias uniquely identifies an identity provider and it is also used to build the redirect URI. - required: true - type: str - - display_name: - description: - - Friendly name for identity provider. - aliases: - - displayName - type: str - - enabled: - description: - - Enable/disable this identity provider. - type: bool - - store_token: - description: - - Enable/disable whether tokens must be stored after authenticating users. - aliases: - - storeToken - type: bool - - add_read_token_role_on_create: - description: - - Enable/disable whether new users can read any stored tokens. This assigns the C(broker.read-token) role. - aliases: - - addReadTokenRoleOnCreate - type: bool - - trust_email: - description: - - If enabled, email provided by this provider is not verified even if verification is enabled for the realm. - aliases: - - trustEmail - type: bool - - link_only: - description: - - If true, users cannot log in through this provider. They can only link to this provider. - This is useful if you don't want to allow login from the provider, but want to integrate with a provider. - aliases: - - linkOnly - type: bool - - first_broker_login_flow_alias: - description: - - Alias of authentication flow, which is triggered after first login with this identity provider. - aliases: - - firstBrokerLoginFlowAlias - type: str - - post_broker_login_flow_alias: - description: - - Alias of authentication flow, which is triggered after each login with this identity provider. - aliases: - - postBrokerLoginFlowAlias - type: str - - authenticate_by_default: - description: - - Specifies if this identity provider should be used by default for authentication even before displaying login screen. - aliases: - - authenticateByDefault - type: bool - - provider_id: - description: - - Protocol used by this provider (supported values are C(oidc) or C(saml)). - aliases: - - providerId - type: str - - config: - description: - - Dict specifying the configuration options for the provider; the contents differ depending on the value of I(providerId). - Examples are given below for C(oidc) and C(saml). It is easiest to obtain valid config values by dumping an already-existing - identity provider configuration through check-mode in the I(existing) field. - type: dict - suboptions: - hide_on_login_page: - description: - - If hidden, login with this provider is possible only if requested explicitly, for example using the C(kc_idp_hint) parameter. - aliases: - - hideOnLoginPage - type: bool - - gui_order: - description: - - Number defining order of the provider in GUI (for example, on Login page). - aliases: - - guiOrder - type: int - - sync_mode: - description: - - Default sync mode for all mappers. The sync mode determines when user data will be synced using the mappers. - aliases: - - syncMode - type: str - - issuer: - description: - - The issuer identifier for the issuer of the response. If not provided, no validation will be performed. - type: str - - authorizationUrl: - description: - - The Authorization URL. - type: str - - tokenUrl: - description: - - The Token URL. - type: str - - logoutUrl: - description: - - End session endpoint to use to logout user from external IDP. - type: str - - userInfoUrl: - description: - - The User Info URL. - type: str - - clientAuthMethod: - description: - - The client authentication method. - type: str - - clientId: - description: - - The client or client identifier registered within the identity provider. - type: str - - clientSecret: - description: - - The client or client secret registered within the identity provider. - type: str - - defaultScope: - description: - - The scopes to be sent when asking for authorization. - type: str - - validateSignature: - description: - - Enable/disable signature validation of external IDP signatures. - type: bool - - useJwksUrl: - description: - - If the switch is on, identity provider public keys will be downloaded from given JWKS URL. - type: bool - - jwksUrl: - description: - - URL where identity provider keys in JWK format are stored. See JWK specification for more details. - type: str - - entityId: - description: - - The Entity ID that will be used to uniquely identify this SAML Service Provider. - type: str - - singleSignOnServiceUrl: - description: - - The URL that must be used to send authentication requests (SAML AuthnRequest). - type: str - - singleLogoutServiceUrl: - description: - - The URL that must be used to send logout requests. - type: str - - backchannelSupported: - description: - - Does the external IDP support backchannel logout? - type: str - - nameIDPolicyFormat: - description: - - Specifies the URI reference corresponding to a name identifier format. - type: str - - principalType: - description: - - Way to identify and track external users from the assertion. - type: str - - mappers: - description: - - A list of dicts defining mappers associated with this Identity Provider. - type: list - elements: dict - suboptions: - id: - description: - - Unique ID of this mapper. - type: str - - name: - description: - - Name of the mapper. - type: str - - identityProviderAlias: - description: - - Alias of the identity provider for this mapper. - type: str - - identityProviderMapper: - description: - - Type of mapper. - type: str - - config: - description: - - Dict specifying the configuration options for the mapper; the contents differ depending on the value of I(identityProviderMapper). - type: dict - -extends_documentation_fragment: -- community.general.keycloak - -author: - - Laurent Paumier (@laurpaum) -''' - -EXAMPLES = ''' -- name: Create OIDC identity provider, authentication with credentials - community.general.keycloak_identity_provider: - state: present - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: admin - auth_password: admin - realm: myrealm - alias: oidc-idp - display_name: OpenID Connect IdP - enabled: true - provider_id: oidc - config: - issuer: https://idp.example.com - authorizationUrl: https://idp.example.com/auth - tokenUrl: https://idp.example.com/token - userInfoUrl: https://idp.example.com/userinfo - clientAuthMethod: client_secret_post - clientId: my-client - clientSecret: secret - syncMode: FORCE - mappers: - - name: first_name - identityProviderMapper: oidc-user-attribute-idp-mapper - config: - claim: first_name - user.attribute: first_name - syncMode: INHERIT - - name: last_name - identityProviderMapper: oidc-user-attribute-idp-mapper - config: - claim: last_name - user.attribute: last_name - syncMode: INHERIT - -- name: Create SAML identity provider, authentication with credentials - community.general.keycloak_identity_provider: - state: present - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: admin - auth_password: admin - realm: myrealm - alias: saml-idp - display_name: SAML IdP - enabled: true - provider_id: saml - config: - entityId: https://auth.example.com/auth/realms/myrealm - singleSignOnServiceUrl: https://idp.example.com/login - wantAuthnRequestsSigned: true - wantAssertionsSigned: true - mappers: - - name: roles - identityProviderMapper: saml-user-attribute-idp-mapper - config: - user.attribute: roles - attribute.friendly.name: User Roles - attribute.name: roles - syncMode: INHERIT -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Identity provider my-idp has been created" - -proposed: - description: Representation of proposed identity provider. - returned: always - type: dict - sample: { - "config": { - "authorizationUrl": "https://idp.example.com/auth", - "clientAuthMethod": "client_secret_post", - "clientId": "my-client", - "clientSecret": "secret", - "issuer": "https://idp.example.com", - "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" - }, - "displayName": "OpenID Connect IdP", - "providerId": "oidc" - } - -existing: - description: Representation of existing identity provider. - returned: always - type: dict - sample: { - "addReadTokenRoleOnCreate": false, - "alias": "my-idp", - "authenticateByDefault": false, - "config": { - "authorizationUrl": "https://old.example.com/auth", - "clientAuthMethod": "client_secret_post", - "clientId": "my-client", - "clientSecret": "**********", - "issuer": "https://old.example.com", - "syncMode": "FORCE", - "tokenUrl": "https://old.example.com/token", - "userInfoUrl": "https://old.example.com/userinfo" - }, - "displayName": "OpenID Connect IdP", - "enabled": true, - "firstBrokerLoginFlowAlias": "first broker login", - "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", - "linkOnly": false, - "providerId": "oidc", - "storeToken": false, - "trustEmail": false, - } - -end_state: - description: Representation of identity provider after module execution. - returned: on success - type: dict - sample: { - "addReadTokenRoleOnCreate": false, - "alias": "my-idp", - "authenticateByDefault": false, - "config": { - "authorizationUrl": "https://idp.example.com/auth", - "clientAuthMethod": "client_secret_post", - "clientId": "my-client", - "clientSecret": "**********", - "issuer": "https://idp.example.com", - "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" - }, - "displayName": "OpenID Connect IdP", - "enabled": true, - "firstBrokerLoginFlowAlias": "first broker login", - "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", - "linkOnly": false, - "providerId": "oidc", - "storeToken": false, - "trustEmail": false, - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule -from copy import deepcopy - - -def sanitize(idp): - idpcopy = deepcopy(idp) - if 'config' in idpcopy: - if 'clientSecret' in idpcopy['config']: - idpcopy['clientSecret'] = '**********' - return idpcopy - - -def get_identity_provider_with_mappers(kc, alias, realm): - idp = kc.get_identity_provider(alias, realm) - if idp is not None: - idp['mappers'] = sorted(kc.get_identity_provider_mappers(alias, realm), key=lambda x: x.get('name')) - if idp is None: - idp = {} - return idp - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - mapper_spec = dict( - id=dict(type='str'), - name=dict(type='str'), - identityProviderAlias=dict(type='str'), - identityProviderMapper=dict(type='str'), - config=dict(type='dict'), - ) - - meta_args = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - realm=dict(type='str', default='master'), - alias=dict(type='str', required=True), - add_read_token_role_on_create=dict(type='bool', aliases=['addReadTokenRoleOnCreate']), - authenticate_by_default=dict(type='bool', aliases=['authenticateByDefault']), - config=dict(type='dict'), - display_name=dict(type='str', aliases=['displayName']), - enabled=dict(type='bool'), - first_broker_login_flow_alias=dict(type='str', aliases=['firstBrokerLoginFlowAlias']), - link_only=dict(type='bool', aliases=['linkOnly']), - post_broker_login_flow_alias=dict(type='str', aliases=['postBrokerLoginFlowAlias']), - provider_id=dict(type='str', aliases=['providerId']), - store_token=dict(type='bool', aliases=['storeToken']), - trust_email=dict(type='bool', aliases=['trustEmail']), - mappers=dict(type='list', elements='dict', options=mapper_spec), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - alias = module.params.get('alias') - state = module.params.get('state') - - # Filter and map the parameters names that apply to the identity provider. - idp_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - before_idp = get_identity_provider_with_mappers(kc, alias, realm) - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for param in idp_params: - new_param_value = module.params.get(param) - old_value = before_idp[camel(param)] if camel(param) in before_idp else None - if new_param_value != old_value: - changeset[camel(param)] = new_param_value - - # special handling of mappers list to allow change detection - if module.params.get('mappers') is not None: - for change in module.params['mappers']: - change = dict((k, v) for k, v in change.items() if change[k] is not None) - if change.get('id') is None and change.get('name') is None: - module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') - if before_idp == dict(): - old_mapper = dict() - elif change.get('id') is not None: - old_mapper = kc.get_identity_provider_mapper(change['id'], alias, realm) - if old_mapper is None: - old_mapper = dict() - else: - found = [x for x in kc.get_identity_provider_mappers(alias, realm) if x['name'] == change['name']] - if len(found) == 1: - old_mapper = found[0] - else: - old_mapper = dict() - new_mapper = old_mapper.copy() - new_mapper.update(change) - if new_mapper != old_mapper: - if changeset.get('mappers') is None: - changeset['mappers'] = list() - changeset['mappers'].append(new_mapper) - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_idp = before_idp.copy() - desired_idp.update(changeset) - - result['proposed'] = sanitize(changeset) - result['existing'] = sanitize(before_idp) - - # Cater for when it doesn't exist (an empty dict) - if not before_idp: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Identity provider does not exist; doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if module._diff: - result['diff'] = dict(before='', after=sanitize(desired_idp)) - - if module.check_mode: - module.exit_json(**result) - - # create it - desired_idp = desired_idp.copy() - mappers = desired_idp.pop('mappers', []) - kc.create_identity_provider(desired_idp, realm) - for mapper in mappers: - if mapper.get('identityProviderAlias') is None: - mapper['identityProviderAlias'] = alias - kc.create_identity_provider_mapper(mapper, alias, realm) - after_idp = get_identity_provider_with_mappers(kc, alias, realm) - - result['end_state'] = sanitize(after_idp) - - result['msg'] = 'Identity provider {alias} has been created'.format(alias=alias) - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - # no changes - if desired_idp == before_idp: - result['changed'] = False - result['end_state'] = sanitize(desired_idp) - result['msg'] = "No changes required to identity provider {alias}.".format(alias=alias) - module.exit_json(**result) - - # doing an update - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize(before_idp), after=sanitize(desired_idp)) - - if module.check_mode: - module.exit_json(**result) - - # do the update - desired_idp = desired_idp.copy() - updated_mappers = desired_idp.pop('mappers', []) - kc.update_identity_provider(desired_idp, realm) - for mapper in updated_mappers: - if mapper.get('id') is not None: - kc.update_identity_provider_mapper(mapper, alias, realm) - else: - if mapper.get('identityProviderAlias') is None: - mapper['identityProviderAlias'] = alias - kc.create_identity_provider_mapper(mapper, alias, realm) - for mapper in [x for x in before_idp['mappers'] - if [y for y in updated_mappers if y["name"] == x['name']] == []]: - kc.delete_identity_provider_mapper(mapper['id'], alias, realm) - - after_idp = get_identity_provider_with_mappers(kc, alias, realm) - - result['end_state'] = sanitize(after_idp) - - result['msg'] = "Identity provider {alias} has been updated".format(alias=alias) - module.exit_json(**result) - - elif state == 'absent': - # Process a deletion - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize(before_idp), after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_identity_provider(alias, realm) - - result['end_state'] = {} - - result['msg'] = "Identity provider {alias} has been deleted".format(alias=alias) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_realm.py b/plugins/modules/identity/keycloak/keycloak_realm.py deleted file mode 100644 index 289c13504a..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_realm.py +++ /dev/null @@ -1,819 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017, Eike Frost -# Copyright (c) 2021, Christophe Gilles -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_realm - -short_description: Allows administration of Keycloak realm via Keycloak API - -version_added: 3.0.0 - - -description: - - This module allows the administration of Keycloak realm via the Keycloak REST API. It - requires access to the REST API via OpenID Connect; the user connecting and the realm being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate realm definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - Aliases are provided so camelCased versions can be used as well. - - - The Keycloak API does not always sanity check inputs e.g. you can set - SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. - If you do not specify a setting, usually a sensible default is chosen. - -options: - state: - description: - - State of the realm. - - On C(present), the realm will be created (or updated if it exists already). - - On C(absent), the realm will be removed if it exists. - choices: ['present', 'absent'] - default: 'present' - type: str - - id: - description: - - The realm to create. - type: str - realm: - description: - - The realm name. - type: str - access_code_lifespan: - description: - - The realm access code lifespan. - aliases: - - accessCodeLifespan - type: int - access_code_lifespan_login: - description: - - The realm access code lifespan login. - aliases: - - accessCodeLifespanLogin - type: int - access_code_lifespan_user_action: - description: - - The realm access code lifespan user action. - aliases: - - accessCodeLifespanUserAction - type: int - access_token_lifespan: - description: - - The realm access token lifespan. - aliases: - - accessTokenLifespan - type: int - access_token_lifespan_for_implicit_flow: - description: - - The realm access token lifespan for implicit flow. - aliases: - - accessTokenLifespanForImplicitFlow - type: int - account_theme: - description: - - The realm account theme. - aliases: - - accountTheme - type: str - action_token_generated_by_admin_lifespan: - description: - - The realm action token generated by admin lifespan. - aliases: - - actionTokenGeneratedByAdminLifespan - type: int - action_token_generated_by_user_lifespan: - description: - - The realm action token generated by user lifespan. - aliases: - - actionTokenGeneratedByUserLifespan - type: int - admin_events_details_enabled: - description: - - The realm admin events details enabled. - aliases: - - adminEventsDetailsEnabled - type: bool - admin_events_enabled: - description: - - The realm admin events enabled. - aliases: - - adminEventsEnabled - type: bool - admin_theme: - description: - - The realm admin theme. - aliases: - - adminTheme - type: str - attributes: - description: - - The realm attributes. - type: dict - browser_flow: - description: - - The realm browser flow. - aliases: - - browserFlow - type: str - browser_security_headers: - description: - - The realm browser security headers. - aliases: - - browserSecurityHeaders - type: dict - brute_force_protected: - description: - - The realm brute force protected. - aliases: - - bruteForceProtected - type: bool - client_authentication_flow: - description: - - The realm client authentication flow. - aliases: - - clientAuthenticationFlow - type: str - client_scope_mappings: - description: - - The realm client scope mappings. - aliases: - - clientScopeMappings - type: dict - default_default_client_scopes: - description: - - The realm default default client scopes. - aliases: - - defaultDefaultClientScopes - type: list - elements: dict - default_groups: - description: - - The realm default groups. - aliases: - - defaultGroups - type: list - elements: dict - default_locale: - description: - - The realm default locale. - aliases: - - defaultLocale - type: str - default_optional_client_scopes: - description: - - The realm default optional client scopes. - aliases: - - defaultOptionalClientScopes - type: list - elements: dict - default_roles: - description: - - The realm default roles. - aliases: - - defaultRoles - type: list - elements: dict - default_signature_algorithm: - description: - - The realm default signature algorithm. - aliases: - - defaultSignatureAlgorithm - type: str - direct_grant_flow: - description: - - The realm direct grant flow. - aliases: - - directGrantFlow - type: str - display_name: - description: - - The realm display name. - aliases: - - displayName - type: str - display_name_html: - description: - - The realm display name HTML. - aliases: - - displayNameHtml - type: str - docker_authentication_flow: - description: - - The realm docker authentication flow. - aliases: - - dockerAuthenticationFlow - type: str - duplicate_emails_allowed: - description: - - The realm duplicate emails allowed option. - aliases: - - duplicateEmailsAllowed - type: bool - edit_username_allowed: - description: - - The realm edit username allowed option. - aliases: - - editUsernameAllowed - type: bool - email_theme: - description: - - The realm email theme. - aliases: - - emailTheme - type: str - enabled: - description: - - The realm enabled option. - type: bool - enabled_event_types: - description: - - The realm enabled event types. - aliases: - - enabledEventTypes - type: list - elements: str - events_enabled: - description: - - Enables or disables login events for this realm. - aliases: - - eventsEnabled - type: bool - version_added: 3.6.0 - events_expiration: - description: - - The realm events expiration. - aliases: - - eventsExpiration - type: int - events_listeners: - description: - - The realm events listeners. - aliases: - - eventsListeners - type: list - elements: str - failure_factor: - description: - - The realm failure factor. - aliases: - - failureFactor - type: int - internationalization_enabled: - description: - - The realm internationalization enabled option. - aliases: - - internationalizationEnabled - type: bool - login_theme: - description: - - The realm login theme. - aliases: - - loginTheme - type: str - login_with_email_allowed: - description: - - The realm login with email allowed option. - aliases: - - loginWithEmailAllowed - type: bool - max_delta_time_seconds: - description: - - The realm max delta time in seconds. - aliases: - - maxDeltaTimeSeconds - type: int - max_failure_wait_seconds: - description: - - The realm max failure wait in seconds. - aliases: - - maxFailureWaitSeconds - type: int - minimum_quick_login_wait_seconds: - description: - - The realm minimum quick login wait in seconds. - aliases: - - minimumQuickLoginWaitSeconds - type: int - not_before: - description: - - The realm not before. - aliases: - - notBefore - type: int - offline_session_idle_timeout: - description: - - The realm offline session idle timeout. - aliases: - - offlineSessionIdleTimeout - type: int - offline_session_max_lifespan: - description: - - The realm offline session max lifespan. - aliases: - - offlineSessionMaxLifespan - type: int - offline_session_max_lifespan_enabled: - description: - - The realm offline session max lifespan enabled option. - aliases: - - offlineSessionMaxLifespanEnabled - type: bool - otp_policy_algorithm: - description: - - The realm otp policy algorithm. - aliases: - - otpPolicyAlgorithm - type: str - otp_policy_digits: - description: - - The realm otp policy digits. - aliases: - - otpPolicyDigits - type: int - otp_policy_initial_counter: - description: - - The realm otp policy initial counter. - aliases: - - otpPolicyInitialCounter - type: int - otp_policy_look_ahead_window: - description: - - The realm otp policy look ahead window. - aliases: - - otpPolicyLookAheadWindow - type: int - otp_policy_period: - description: - - The realm otp policy period. - aliases: - - otpPolicyPeriod - type: int - otp_policy_type: - description: - - The realm otp policy type. - aliases: - - otpPolicyType - type: str - otp_supported_applications: - description: - - The realm otp supported applications. - aliases: - - otpSupportedApplications - type: list - elements: str - password_policy: - description: - - The realm password policy. - aliases: - - passwordPolicy - type: str - permanent_lockout: - description: - - The realm permanent lockout. - aliases: - - permanentLockout - type: bool - quick_login_check_milli_seconds: - description: - - The realm quick login check in milliseconds. - aliases: - - quickLoginCheckMilliSeconds - type: int - refresh_token_max_reuse: - description: - - The realm refresh token max reuse. - aliases: - - refreshTokenMaxReuse - type: int - registration_allowed: - description: - - The realm registration allowed option. - aliases: - - registrationAllowed - type: bool - registration_email_as_username: - description: - - The realm registration email as username option. - aliases: - - registrationEmailAsUsername - type: bool - registration_flow: - description: - - The realm registration flow. - aliases: - - registrationFlow - type: str - remember_me: - description: - - The realm remember me option. - aliases: - - rememberMe - type: bool - reset_credentials_flow: - description: - - The realm reset credentials flow. - aliases: - - resetCredentialsFlow - type: str - reset_password_allowed: - description: - - The realm reset password allowed option. - aliases: - - resetPasswordAllowed - type: bool - revoke_refresh_token: - description: - - The realm revoke refresh token option. - aliases: - - revokeRefreshToken - type: bool - smtp_server: - description: - - The realm smtp server. - aliases: - - smtpServer - type: dict - ssl_required: - description: - - The realm ssl required option. - choices: ['all', 'external', 'none'] - aliases: - - sslRequired - type: str - sso_session_idle_timeout: - description: - - The realm sso session idle timeout. - aliases: - - ssoSessionIdleTimeout - type: int - sso_session_idle_timeout_remember_me: - description: - - The realm sso session idle timeout remember me. - aliases: - - ssoSessionIdleTimeoutRememberMe - type: int - sso_session_max_lifespan: - description: - - The realm sso session max lifespan. - aliases: - - ssoSessionMaxLifespan - type: int - sso_session_max_lifespan_remember_me: - description: - - The realm sso session max lifespan remember me. - aliases: - - ssoSessionMaxLifespanRememberMe - type: int - supported_locales: - description: - - The realm supported locales. - aliases: - - supportedLocales - type: list - elements: str - user_managed_access_allowed: - description: - - The realm user managed access allowed option. - aliases: - - userManagedAccessAllowed - type: bool - verify_email: - description: - - The realm verify email option. - aliases: - - verifyEmail - type: bool - wait_increment_seconds: - description: - - The realm wait increment in seconds. - aliases: - - waitIncrementSeconds - type: int - -extends_documentation_fragment: -- community.general.keycloak - - -author: - - Christophe Gilles (@kris2kris) -''' - -EXAMPLES = ''' -- name: Create or update Keycloak realm (minimal example) - community.general.keycloak_realm: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - id: realm - state: present - -- name: Delete a Keycloak realm - community.general.keycloak_realm: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - id: test - state: absent - -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Realm testrealm has been updated" - -proposed: - description: Representation of proposed realm. - returned: always - type: dict - sample: { - id: "test" - } - -existing: - description: Representation of existing realm (sample is truncated). - returned: always - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } - -end_state: - description: Representation of realm after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule - - -def sanitize_cr(realmrep): - """ Removes probably sensitive details from a realm representation. - - :param realmrep: the realmrep dict to be sanitized - :return: sanitized realmrep dict - """ - result = realmrep.copy() - if 'secret' in result: - result['secret'] = '********' - if 'attributes' in result: - if 'saml.signing.private.key' in result['attributes']: - result['attributes'] = result['attributes'].copy() - result['attributes']['saml.signing.private.key'] = '********' - return result - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - - id=dict(type='str'), - realm=dict(type='str'), - access_code_lifespan=dict(type='int', aliases=['accessCodeLifespan']), - access_code_lifespan_login=dict(type='int', aliases=['accessCodeLifespanLogin']), - access_code_lifespan_user_action=dict(type='int', aliases=['accessCodeLifespanUserAction']), - access_token_lifespan=dict(type='int', aliases=['accessTokenLifespan'], no_log=False), - access_token_lifespan_for_implicit_flow=dict(type='int', aliases=['accessTokenLifespanForImplicitFlow'], no_log=False), - account_theme=dict(type='str', aliases=['accountTheme']), - action_token_generated_by_admin_lifespan=dict(type='int', aliases=['actionTokenGeneratedByAdminLifespan'], no_log=False), - action_token_generated_by_user_lifespan=dict(type='int', aliases=['actionTokenGeneratedByUserLifespan'], no_log=False), - admin_events_details_enabled=dict(type='bool', aliases=['adminEventsDetailsEnabled']), - admin_events_enabled=dict(type='bool', aliases=['adminEventsEnabled']), - admin_theme=dict(type='str', aliases=['adminTheme']), - attributes=dict(type='dict'), - browser_flow=dict(type='str', aliases=['browserFlow']), - browser_security_headers=dict(type='dict', aliases=['browserSecurityHeaders']), - brute_force_protected=dict(type='bool', aliases=['bruteForceProtected']), - client_authentication_flow=dict(type='str', aliases=['clientAuthenticationFlow']), - client_scope_mappings=dict(type='dict', aliases=['clientScopeMappings']), - default_default_client_scopes=dict(type='list', elements='dict', aliases=['defaultDefaultClientScopes']), - default_groups=dict(type='list', elements='dict', aliases=['defaultGroups']), - default_locale=dict(type='str', aliases=['defaultLocale']), - default_optional_client_scopes=dict(type='list', elements='dict', aliases=['defaultOptionalClientScopes']), - default_roles=dict(type='list', elements='dict', aliases=['defaultRoles']), - default_signature_algorithm=dict(type='str', aliases=['defaultSignatureAlgorithm']), - direct_grant_flow=dict(type='str', aliases=['directGrantFlow']), - display_name=dict(type='str', aliases=['displayName']), - display_name_html=dict(type='str', aliases=['displayNameHtml']), - docker_authentication_flow=dict(type='str', aliases=['dockerAuthenticationFlow']), - duplicate_emails_allowed=dict(type='bool', aliases=['duplicateEmailsAllowed']), - edit_username_allowed=dict(type='bool', aliases=['editUsernameAllowed']), - email_theme=dict(type='str', aliases=['emailTheme']), - enabled=dict(type='bool'), - enabled_event_types=dict(type='list', elements='str', aliases=['enabledEventTypes']), - events_enabled=dict(type='bool', aliases=['eventsEnabled']), - events_expiration=dict(type='int', aliases=['eventsExpiration']), - events_listeners=dict(type='list', elements='str', aliases=['eventsListeners']), - failure_factor=dict(type='int', aliases=['failureFactor']), - internationalization_enabled=dict(type='bool', aliases=['internationalizationEnabled']), - login_theme=dict(type='str', aliases=['loginTheme']), - login_with_email_allowed=dict(type='bool', aliases=['loginWithEmailAllowed']), - max_delta_time_seconds=dict(type='int', aliases=['maxDeltaTimeSeconds']), - max_failure_wait_seconds=dict(type='int', aliases=['maxFailureWaitSeconds']), - minimum_quick_login_wait_seconds=dict(type='int', aliases=['minimumQuickLoginWaitSeconds']), - not_before=dict(type='int', aliases=['notBefore']), - offline_session_idle_timeout=dict(type='int', aliases=['offlineSessionIdleTimeout']), - offline_session_max_lifespan=dict(type='int', aliases=['offlineSessionMaxLifespan']), - offline_session_max_lifespan_enabled=dict(type='bool', aliases=['offlineSessionMaxLifespanEnabled']), - otp_policy_algorithm=dict(type='str', aliases=['otpPolicyAlgorithm']), - otp_policy_digits=dict(type='int', aliases=['otpPolicyDigits']), - otp_policy_initial_counter=dict(type='int', aliases=['otpPolicyInitialCounter']), - otp_policy_look_ahead_window=dict(type='int', aliases=['otpPolicyLookAheadWindow']), - otp_policy_period=dict(type='int', aliases=['otpPolicyPeriod']), - otp_policy_type=dict(type='str', aliases=['otpPolicyType']), - otp_supported_applications=dict(type='list', elements='str', aliases=['otpSupportedApplications']), - password_policy=dict(type='str', aliases=['passwordPolicy'], no_log=False), - permanent_lockout=dict(type='bool', aliases=['permanentLockout']), - quick_login_check_milli_seconds=dict(type='int', aliases=['quickLoginCheckMilliSeconds']), - refresh_token_max_reuse=dict(type='int', aliases=['refreshTokenMaxReuse'], no_log=False), - registration_allowed=dict(type='bool', aliases=['registrationAllowed']), - registration_email_as_username=dict(type='bool', aliases=['registrationEmailAsUsername']), - registration_flow=dict(type='str', aliases=['registrationFlow']), - remember_me=dict(type='bool', aliases=['rememberMe']), - reset_credentials_flow=dict(type='str', aliases=['resetCredentialsFlow']), - reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed'], no_log=False), - revoke_refresh_token=dict(type='bool', aliases=['revokeRefreshToken']), - smtp_server=dict(type='dict', aliases=['smtpServer']), - ssl_required=dict(choices=["external", "all", "none"], aliases=['sslRequired']), - sso_session_idle_timeout=dict(type='int', aliases=['ssoSessionIdleTimeout']), - sso_session_idle_timeout_remember_me=dict(type='int', aliases=['ssoSessionIdleTimeoutRememberMe']), - sso_session_max_lifespan=dict(type='int', aliases=['ssoSessionMaxLifespan']), - sso_session_max_lifespan_remember_me=dict(type='int', aliases=['ssoSessionMaxLifespanRememberMe']), - supported_locales=dict(type='list', elements='str', aliases=['supportedLocales']), - user_managed_access_allowed=dict(type='bool', aliases=['userManagedAccessAllowed']), - verify_email=dict(type='bool', aliases=['verifyEmail']), - wait_increment_seconds=dict(type='int', aliases=['waitIncrementSeconds']), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'realm', 'enabled'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - - # convert module parameters to realm representation parameters (if they belong in there) - params_to_ignore = list(keycloak_argument_spec().keys()) + ['state'] - - # Filter and map the parameters names that apply to the role - realm_params = [x for x in module.params - if x not in params_to_ignore and - module.params.get(x) is not None] - - # See whether the realm already exists in Keycloak - before_realm = kc.get_realm_by_id(realm=realm) - - if before_realm is None: - before_realm = {} - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for realm_param in realm_params: - new_param_value = module.params.get(realm_param) - changeset[camel(realm_param)] = new_param_value - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_realm = before_realm.copy() - desired_realm.update(changeset) - - result['proposed'] = sanitize_cr(changeset) - before_realm_sanitized = sanitize_cr(before_realm) - result['existing'] = before_realm_sanitized - - # Cater for when it doesn't exist (an empty dict) - if not before_realm: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Realm does not exist, doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if 'id' not in desired_realm: - module.fail_json(msg='id needs to be specified when creating a new realm') - - if module._diff: - result['diff'] = dict(before='', after=sanitize_cr(desired_realm)) - - if module.check_mode: - module.exit_json(**result) - - # create it - kc.create_realm(desired_realm) - after_realm = kc.get_realm_by_id(desired_realm['id']) - - result['end_state'] = sanitize_cr(after_realm) - - result['msg'] = 'Realm %s has been created.' % desired_realm['id'] - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - # doing an update - result['changed'] = True - if module.check_mode: - # We can only compare the current realm with the proposed updates we have - if module._diff: - result['diff'] = dict(before=before_realm_sanitized, - after=sanitize_cr(desired_realm)) - result['changed'] = (before_realm != desired_realm) - - module.exit_json(**result) - - # do the update - kc.update_realm(desired_realm, realm=realm) - - after_realm = kc.get_realm_by_id(realm=realm) - - if before_realm == after_realm: - result['changed'] = False - - result['end_state'] = sanitize_cr(after_realm) - - if module._diff: - result['diff'] = dict(before=before_realm_sanitized, - after=sanitize_cr(after_realm)) - - result['msg'] = 'Realm %s has been updated.' % desired_realm['id'] - module.exit_json(**result) - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=before_realm_sanitized, after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_realm(realm=realm) - - result['proposed'] = {} - result['end_state'] = {} - - result['msg'] = 'Realm %s has been deleted.' % before_realm['id'] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_role.py b/plugins/modules/identity/keycloak/keycloak_role.py deleted file mode 100644 index 2dd2438e42..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_role.py +++ /dev/null @@ -1,368 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2019, Adam Goossens -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_role - -short_description: Allows administration of Keycloak roles via Keycloak API - -version_added: 3.4.0 - -description: - - This module allows you to add, remove or modify Keycloak roles via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will - be returned that way by this module. You may pass single values for attributes when calling the module, - and this will be translated into a list suitable for the API. - - -options: - state: - description: - - State of the role. - - On C(present), the role will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the role will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - name: - type: str - required: true - description: - - Name of the role. - - This parameter is required. - - description: - type: str - description: - - The role description. - - realm: - type: str - description: - - The Keycloak realm under which this role resides. - default: 'master' - - client_id: - type: str - description: - - If the role is a client role, the client id under which it resides. - - If this parameter is absent, the role is considered a realm role. - - attributes: - type: dict - description: - - A dict of key/value pairs to set as custom attributes for the role. - - Values may be single values (e.g. a string) or a list of strings. - -extends_documentation_fragment: -- community.general.keycloak - - -author: - - Laurent Paumier (@laurpaum) -''' - -EXAMPLES = ''' -- name: Create a Keycloak realm role, authentication with credentials - community.general.keycloak_role: - name: my-new-kc-role - realm: MyCustomRealm - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Create a Keycloak realm role, authentication with token - community.general.keycloak_role: - name: my-new-kc-role - realm: MyCustomRealm - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - token: TOKEN - delegate_to: localhost - -- name: Create a Keycloak client role - community.general.keycloak_role: - name: my-new-kc-role - realm: MyCustomRealm - client_id: MyClient - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Delete a Keycloak role - community.general.keycloak_role: - name: my-role-for-deletion - state: absent - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Create a keycloak role with some custom attributes - community.general.keycloak_role: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - name: my-new-role - attributes: - attrib1: value1 - attrib2: value2 - attrib3: - - with - - numerous - - individual - - list - - items - delegate_to: localhost -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Role myrole has been updated" - -proposed: - description: Representation of proposed role. - returned: always - type: dict - sample: { - "description": "My updated test description" - } - -existing: - description: Representation of existing role. - returned: always - type: dict - sample: { - "attributes": {}, - "clientRole": true, - "composite": false, - "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", - "description": "My client test role", - "id": "561703dd-0f38-45ff-9a5a-0c978f794547", - "name": "myrole" - } - -end_state: - description: Representation of role after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "attributes": {}, - "clientRole": true, - "composite": false, - "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", - "description": "My updated client test role", - "id": "561703dd-0f38-45ff-9a5a-0c978f794547", - "name": "myrole" - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - meta_args = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - name=dict(type='str', required=True), - description=dict(type='str'), - realm=dict(type='str', default='master'), - client_id=dict(type='str'), - attributes=dict(type='dict'), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - clientid = module.params.get('client_id') - name = module.params.get('name') - state = module.params.get('state') - - # attributes in Keycloak have their values returned as lists - # via the API. attributes is a dict, so we'll transparently convert - # the values to lists. - if module.params.get('attributes') is not None: - for key, val in module.params['attributes'].items(): - module.params['attributes'][key] = [val] if not isinstance(val, list) else val - - # Filter and map the parameters names that apply to the role - role_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'client_id', 'composites'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - if clientid is None: - before_role = kc.get_realm_role(name, realm) - else: - before_role = kc.get_client_role(name, clientid, realm) - - if before_role is None: - before_role = {} - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for param in role_params: - new_param_value = module.params.get(param) - old_value = before_role[param] if param in before_role else None - if new_param_value != old_value: - changeset[camel(param)] = new_param_value - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_role = before_role.copy() - desired_role.update(changeset) - - result['proposed'] = changeset - result['existing'] = before_role - - # Cater for when it doesn't exist (an empty dict) - if not before_role: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Role does not exist, doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if name is None: - module.fail_json(msg='name must be specified when creating a new role') - - if module._diff: - result['diff'] = dict(before='', after=desired_role) - - if module.check_mode: - module.exit_json(**result) - - # create it - if clientid is None: - kc.create_realm_role(desired_role, realm) - after_role = kc.get_realm_role(name, realm) - else: - kc.create_client_role(desired_role, clientid, realm) - after_role = kc.get_client_role(name, clientid, realm) - - result['end_state'] = after_role - - result['msg'] = 'Role {name} has been created'.format(name=name) - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - # no changes - if desired_role == before_role: - result['changed'] = False - result['end_state'] = desired_role - result['msg'] = "No changes required to role {name}.".format(name=name) - module.exit_json(**result) - - # doing an update - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=before_role, after=desired_role) - - if module.check_mode: - module.exit_json(**result) - - # do the update - if clientid is None: - kc.update_realm_role(desired_role, realm) - after_role = kc.get_realm_role(name, realm) - else: - kc.update_client_role(desired_role, clientid, realm) - after_role = kc.get_client_role(name, clientid, realm) - - result['end_state'] = after_role - - result['msg'] = "Role {name} has been updated".format(name=name) - module.exit_json(**result) - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=before_role, after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - if clientid is None: - kc.delete_realm_role(name, realm) - else: - kc.delete_client_role(name, clientid, realm) - - result['end_state'] = {} - - result['msg'] = "Role {name} has been deleted".format(name=name) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_user_federation.py b/plugins/modules/identity/keycloak/keycloak_user_federation.py deleted file mode 100644 index 7c4b0e41c9..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_user_federation.py +++ /dev/null @@ -1,980 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_user_federation - -short_description: Allows administration of Keycloak user federations via Keycloak API - -version_added: 3.7.0 - -description: - - This module allows you to add, remove or modify Keycloak user federations via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html). - - -options: - state: - description: - - State of the user federation. - - On C(present), the user federation will be created if it does not yet exist, or updated with - the parameters you provide. - - On C(absent), the user federation will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - realm: - description: - - The Keycloak realm under which this user federation resides. - default: 'master' - type: str - - id: - description: - - The unique ID for this user federation. If left empty, the user federation will be searched - by its I(name). - type: str - - name: - description: - - Display name of provider when linked in admin console. - type: str - - provider_id: - description: - - Provider for this user federation. - aliases: - - providerId - type: str - choices: - - ldap - - kerberos - - provider_type: - description: - - Component type for user federation (only supported value is C(org.keycloak.storage.UserStorageProvider)). - aliases: - - providerType - default: org.keycloak.storage.UserStorageProvider - type: str - - parent_id: - description: - - Unique ID for the parent of this user federation. Realm ID will be automatically used if left blank. - aliases: - - parentId - type: str - - config: - description: - - Dict specifying the configuration options for the provider; the contents differ depending on - the value of I(provider_id). Examples are given below for C(ldap) and C(kerberos). It is easiest - to obtain valid config values by dumping an already-existing user federation configuration - through check-mode in the I(existing) field. - type: dict - suboptions: - enabled: - description: - - Enable/disable this user federation. - default: true - type: bool - - priority: - description: - - Priority of provider when doing a user lookup. Lowest first. - default: 0 - type: int - - importEnabled: - description: - - If C(true), LDAP users will be imported into Keycloak DB and synced by the configured - sync policies. - default: true - type: bool - - editMode: - description: - - C(READ_ONLY) is a read-only LDAP store. C(WRITABLE) means data will be synced back to LDAP - on demand. C(UNSYNCED) means user data will be imported, but not synced back to LDAP. - type: str - choices: - - READ_ONLY - - WRITABLE - - UNSYNCED - - syncRegistrations: - description: - - Should newly created users be created within LDAP store? Priority effects which - provider is chosen to sync the new user. - default: false - type: bool - - vendor: - description: - - LDAP vendor (provider). - type: str - - usernameLDAPAttribute: - description: - - Name of LDAP attribute, which is mapped as Keycloak username. For many LDAP server - vendors it can be C(uid). For Active directory it can be C(sAMAccountName) or C(cn). - The attribute should be filled for all LDAP user records you want to import from - LDAP to Keycloak. - type: str - - rdnLDAPAttribute: - description: - - Name of LDAP attribute, which is used as RDN (top attribute) of typical user DN. - Usually it's the same as Username LDAP attribute, however it is not required. For - example for Active directory, it is common to use C(cn) as RDN attribute when - username attribute might be C(sAMAccountName). - type: str - - uuidLDAPAttribute: - description: - - Name of LDAP attribute, which is used as unique object identifier (UUID) for objects - in LDAP. For many LDAP server vendors, it is C(entryUUID); however some are different. - For example for Active directory it should be C(objectGUID). If your LDAP server does - not support the notion of UUID, you can use any other attribute that is supposed to - be unique among LDAP users in tree. - type: str - - userObjectClasses: - description: - - All values of LDAP objectClass attribute for users in LDAP divided by comma. - For example C(inetOrgPerson, organizationalPerson). Newly created Keycloak users - will be written to LDAP with all those object classes and existing LDAP user records - are found just if they contain all those object classes. - type: str - - connectionUrl: - description: - - Connection URL to your LDAP server. - type: str - - usersDn: - description: - - Full DN of LDAP tree where your users are. This DN is the parent of LDAP users. - type: str - - customUserSearchFilter: - description: - - Additional LDAP Filter for filtering searched users. Leave this empty if you don't - need additional filter. - type: str - - searchScope: - description: - - For one level, the search applies only for users in the DNs specified by User DNs. - For subtree, the search applies to the whole subtree. See LDAP documentation for - more details. - default: '1' - type: str - choices: - - '1' - - '2' - - authType: - description: - - Type of the Authentication method used during LDAP Bind operation. It is used in - most of the requests sent to the LDAP server. - default: 'none' - type: str - choices: - - none - - simple - - bindDn: - description: - - DN of LDAP user which will be used by Keycloak to access LDAP server. - type: str - - bindCredential: - description: - - Password of LDAP admin. - type: str - - startTls: - description: - - Encrypts the connection to LDAP using STARTTLS, which will disable connection pooling. - default: false - type: bool - - usePasswordModifyExtendedOp: - description: - - Use the LDAPv3 Password Modify Extended Operation (RFC-3062). The password modify - extended operation usually requires that LDAP user already has password in the LDAP - server. So when this is used with 'Sync Registrations', it can be good to add also - 'Hardcoded LDAP attribute mapper' with randomly generated initial password. - default: false - type: bool - - validatePasswordPolicy: - description: - - Determines if Keycloak should validate the password with the realm password policy - before updating it. - default: false - type: bool - - trustEmail: - description: - - If enabled, email provided by this provider is not verified even if verification is - enabled for the realm. - default: false - type: bool - - useTruststoreSpi: - description: - - Specifies whether LDAP connection will use the truststore SPI with the truststore - configured in standalone.xml/domain.xml. C(Always) means that it will always use it. - C(Never) means that it will not use it. C(Only for ldaps) means that it will use if - your connection URL use ldaps. Note even if standalone.xml/domain.xml is not - configured, the default Java cacerts or certificate specified by - C(javax.net.ssl.trustStore) property will be used. - default: ldapsOnly - type: str - choices: - - always - - ldapsOnly - - never - - connectionTimeout: - description: - - LDAP Connection Timeout in milliseconds. - type: int - - readTimeout: - description: - - LDAP Read Timeout in milliseconds. This timeout applies for LDAP read operations. - type: int - - pagination: - description: - - Does the LDAP server support pagination. - default: true - type: bool - - connectionPooling: - description: - - Determines if Keycloak should use connection pooling for accessing LDAP server. - default: true - type: bool - - connectionPoolingAuthentication: - description: - - A list of space-separated authentication types of connections that may be pooled. - type: str - choices: - - none - - simple - - DIGEST-MD5 - - connectionPoolingDebug: - description: - - A string that indicates the level of debug output to produce. Example valid values are - C(fine) (trace connection creation and removal) and C(all) (all debugging information). - type: str - - connectionPoolingInitSize: - description: - - The number of connections per connection identity to create when initially creating a - connection for the identity. - type: int - - connectionPoolingMaxSize: - description: - - The maximum number of connections per connection identity that can be maintained - concurrently. - type: int - - connectionPoolingPrefSize: - description: - - The preferred number of connections per connection identity that should be maintained - concurrently. - type: int - - connectionPoolingProtocol: - description: - - A list of space-separated protocol types of connections that may be pooled. - Valid types are C(plain) and C(ssl). - type: str - - connectionPoolingTimeout: - description: - - The number of milliseconds that an idle connection may remain in the pool without - being closed and removed from the pool. - type: int - - allowKerberosAuthentication: - description: - - Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data - about authenticated users will be provisioned from this LDAP server. - default: false - type: bool - - kerberosRealm: - description: - - Name of kerberos realm. - type: str - - serverPrincipal: - description: - - Full name of server principal for HTTP service including server and domain name. For - example C(HTTP/host.foo.org@FOO.ORG). Use C(*) to accept any service principal in the - KeyTab file. - type: str - - keyTab: - description: - - Location of Kerberos KeyTab file containing the credentials of server principal. For - example C(/etc/krb5.keytab). - type: str - - debug: - description: - - Enable/disable debug logging to standard output for Krb5LoginModule. - type: bool - - useKerberosForPasswordAuthentication: - description: - - Use Kerberos login module for authenticate username/password against Kerberos server - instead of authenticating against LDAP server with Directory Service API. - default: false - type: bool - - allowPasswordAuthentication: - description: - - Enable/disable possibility of username/password authentication against Kerberos database. - type: bool - - batchSizeForSync: - description: - - Count of LDAP users to be imported from LDAP to Keycloak within a single transaction. - default: 1000 - type: int - - fullSyncPeriod: - description: - - Period for full synchronization in seconds. - default: -1 - type: int - - changedSyncPeriod: - description: - - Period for synchronization of changed or newly created LDAP users in seconds. - default: -1 - type: int - - updateProfileFirstLogin: - description: - - Update profile on first login. - type: bool - - cachePolicy: - description: - - Cache Policy for this storage provider. - type: str - default: 'DEFAULT' - choices: - - DEFAULT - - EVICT_DAILY - - EVICT_WEEKLY - - MAX_LIFESPAN - - NO_CACHE - - evictionDay: - description: - - Day of the week the entry will become invalid on. - type: str - - evictionHour: - description: - - Hour of day the entry will become invalid on. - type: str - - evictionMinute: - description: - - Minute of day the entry will become invalid on. - type: str - - maxLifespan: - description: - - Max lifespan of cache entry in milliseconds. - type: int - - mappers: - description: - - A list of dicts defining mappers associated with this Identity Provider. - type: list - elements: dict - suboptions: - id: - description: - - Unique ID of this mapper. - type: str - - name: - description: - - Name of the mapper. If no ID is given, the mapper will be searched by name. - type: str - - parentId: - description: - - Unique ID for the parent of this mapper. ID of the user federation will automatically - be used if left blank. - type: str - - providerId: - description: - - The mapper type for this mapper (for instance C(user-attribute-ldap-mapper)). - type: str - - providerType: - description: - - Component type for this mapper (only supported value is C(org.keycloak.storage.ldap.mappers.LDAPStorageMapper)). - type: str - - config: - description: - - Dict specifying the configuration options for the mapper; the contents differ - depending on the value of I(identityProviderMapper). - type: dict - -extends_documentation_fragment: -- community.general.keycloak - -author: - - Laurent Paumier (@laurpaum) -''' - -EXAMPLES = ''' - - name: Create LDAP user federation - community.general.keycloak_user_federation: - auth_keycloak_url: https://keycloak.example.com/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: my-realm - name: my-ldap - state: present - provider_id: ldap - provider_type: org.keycloak.storage.UserStorageProvider - config: - priority: 0 - enabled: true - cachePolicy: DEFAULT - batchSizeForSync: 1000 - editMode: READ_ONLY - importEnabled: true - syncRegistrations: false - vendor: other - usernameLDAPAttribute: uid - rdnLDAPAttribute: uid - uuidLDAPAttribute: entryUUID - userObjectClasses: inetOrgPerson, organizationalPerson - connectionUrl: ldaps://ldap.example.com:636 - usersDn: ou=Users,dc=example,dc=com - authType: simple - bindDn: cn=directory reader - bindCredential: password - searchScope: 1 - validatePasswordPolicy: false - trustEmail: false - useTruststoreSpi: ldapsOnly - connectionPooling: true - pagination: true - allowKerberosAuthentication: false - debug: false - useKerberosForPasswordAuthentication: false - mappers: - - name: "full name" - providerId: "full-name-ldap-mapper" - providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" - config: - ldap.full.name.attribute: cn - read.only: true - write.only: false - - - name: Create Kerberos user federation - community.general.keycloak_user_federation: - auth_keycloak_url: https://keycloak.example.com/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: my-realm - name: my-kerberos - state: present - provider_id: kerberos - provider_type: org.keycloak.storage.UserStorageProvider - config: - priority: 0 - enabled: true - cachePolicy: DEFAULT - kerberosRealm: EXAMPLE.COM - serverPrincipal: HTTP/host.example.com@EXAMPLE.COM - keyTab: keytab - allowPasswordAuthentication: false - updateProfileFirstLogin: false - - - name: Delete user federation - community.general.keycloak_user_federation: - auth_keycloak_url: https://keycloak.example.com/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: my-realm - name: my-federation - state: absent - -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "No changes required to user federation 164bb483-c613-482e-80fe-7f1431308799." - -proposed: - description: Representation of proposed user federation. - returned: always - type: dict - sample: { - "config": { - "allowKerberosAuthentication": "false", - "authType": "simple", - "batchSizeForSync": "1000", - "bindCredential": "**********", - "bindDn": "cn=directory reader", - "cachePolicy": "DEFAULT", - "connectionPooling": "true", - "connectionUrl": "ldaps://ldap.example.com:636", - "debug": "false", - "editMode": "READ_ONLY", - "enabled": "true", - "importEnabled": "true", - "pagination": "true", - "priority": "0", - "rdnLDAPAttribute": "uid", - "searchScope": "1", - "syncRegistrations": "false", - "trustEmail": "false", - "useKerberosForPasswordAuthentication": "false", - "useTruststoreSpi": "ldapsOnly", - "userObjectClasses": "inetOrgPerson, organizationalPerson", - "usernameLDAPAttribute": "uid", - "usersDn": "ou=Users,dc=example,dc=com", - "uuidLDAPAttribute": "entryUUID", - "validatePasswordPolicy": "false", - "vendor": "other" - }, - "name": "ldap", - "providerId": "ldap", - "providerType": "org.keycloak.storage.UserStorageProvider" - } - -existing: - description: Representation of existing user federation. - returned: always - type: dict - sample: { - "config": { - "allowKerberosAuthentication": "false", - "authType": "simple", - "batchSizeForSync": "1000", - "bindCredential": "**********", - "bindDn": "cn=directory reader", - "cachePolicy": "DEFAULT", - "changedSyncPeriod": "-1", - "connectionPooling": "true", - "connectionUrl": "ldaps://ldap.example.com:636", - "debug": "false", - "editMode": "READ_ONLY", - "enabled": "true", - "fullSyncPeriod": "-1", - "importEnabled": "true", - "pagination": "true", - "priority": "0", - "rdnLDAPAttribute": "uid", - "searchScope": "1", - "syncRegistrations": "false", - "trustEmail": "false", - "useKerberosForPasswordAuthentication": "false", - "useTruststoreSpi": "ldapsOnly", - "userObjectClasses": "inetOrgPerson, organizationalPerson", - "usernameLDAPAttribute": "uid", - "usersDn": "ou=Users,dc=example,dc=com", - "uuidLDAPAttribute": "entryUUID", - "validatePasswordPolicy": "false", - "vendor": "other" - }, - "id": "01122837-9047-4ae4-8ca0-6e2e891a765f", - "mappers": [ - { - "config": { - "always.read.value.from.ldap": "false", - "is.mandatory.in.ldap": "false", - "ldap.attribute": "mail", - "read.only": "true", - "user.model.attribute": "email" - }, - "id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f", - "name": "email", - "parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f", - "providerId": "user-attribute-ldap-mapper", - "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" - } - ], - "name": "myfed", - "parentId": "myrealm", - "providerId": "ldap", - "providerType": "org.keycloak.storage.UserStorageProvider" - } - -end_state: - description: Representation of user federation after module execution. - returned: on success - type: dict - sample: { - "config": { - "allowPasswordAuthentication": "false", - "cachePolicy": "DEFAULT", - "enabled": "true", - "kerberosRealm": "EXAMPLE.COM", - "keyTab": "/etc/krb5.keytab", - "priority": "0", - "serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM", - "updateProfileFirstLogin": "false" - }, - "id": "cf52ae4f-4471-4435-a0cf-bb620cadc122", - "mappers": [], - "name": "kerberos", - "parentId": "myrealm", - "providerId": "kerberos", - "providerType": "org.keycloak.storage.UserStorageProvider" - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from copy import deepcopy - - -def sanitize(comp): - compcopy = deepcopy(comp) - if 'config' in compcopy: - compcopy['config'] = dict((k, v[0]) for k, v in compcopy['config'].items()) - if 'bindCredential' in compcopy['config']: - compcopy['config']['bindCredential'] = '**********' - if 'mappers' in compcopy: - for mapper in compcopy['mappers']: - if 'config' in mapper: - mapper['config'] = dict((k, v[0]) for k, v in mapper['config'].items()) - return compcopy - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - config_spec = dict( - allowKerberosAuthentication=dict(type='bool', default=False), - allowPasswordAuthentication=dict(type='bool'), - authType=dict(type='str', choices=['none', 'simple'], default='none'), - batchSizeForSync=dict(type='int', default=1000), - bindCredential=dict(type='str', no_log=True), - bindDn=dict(type='str'), - cachePolicy=dict(type='str', choices=['DEFAULT', 'EVICT_DAILY', 'EVICT_WEEKLY', 'MAX_LIFESPAN', 'NO_CACHE'], default='DEFAULT'), - changedSyncPeriod=dict(type='int', default=-1), - connectionPooling=dict(type='bool', default=True), - connectionPoolingAuthentication=dict(type='str', choices=['none', 'simple', 'DIGEST-MD5']), - connectionPoolingDebug=dict(type='str'), - connectionPoolingInitSize=dict(type='int'), - connectionPoolingMaxSize=dict(type='int'), - connectionPoolingPrefSize=dict(type='int'), - connectionPoolingProtocol=dict(type='str'), - connectionPoolingTimeout=dict(type='int'), - connectionTimeout=dict(type='int'), - connectionUrl=dict(type='str'), - customUserSearchFilter=dict(type='str'), - debug=dict(type='bool'), - editMode=dict(type='str', choices=['READ_ONLY', 'WRITABLE', 'UNSYNCED']), - enabled=dict(type='bool', default=True), - evictionDay=dict(type='str'), - evictionHour=dict(type='str'), - evictionMinute=dict(type='str'), - fullSyncPeriod=dict(type='int', default=-1), - importEnabled=dict(type='bool', default=True), - kerberosRealm=dict(type='str'), - keyTab=dict(type='str', no_log=False), - maxLifespan=dict(type='int'), - pagination=dict(type='bool', default=True), - priority=dict(type='int', default=0), - rdnLDAPAttribute=dict(type='str'), - readTimeout=dict(type='int'), - searchScope=dict(type='str', choices=['1', '2'], default='1'), - serverPrincipal=dict(type='str'), - startTls=dict(type='bool', default=False), - syncRegistrations=dict(type='bool', default=False), - trustEmail=dict(type='bool', default=False), - updateProfileFirstLogin=dict(type='bool'), - useKerberosForPasswordAuthentication=dict(type='bool', default=False), - usePasswordModifyExtendedOp=dict(type='bool', default=False, no_log=False), - useTruststoreSpi=dict(type='str', choices=['always', 'ldapsOnly', 'never'], default='ldapsOnly'), - userObjectClasses=dict(type='str'), - usernameLDAPAttribute=dict(type='str'), - usersDn=dict(type='str'), - uuidLDAPAttribute=dict(type='str'), - validatePasswordPolicy=dict(type='bool', default=False), - vendor=dict(type='str'), - ) - - mapper_spec = dict( - id=dict(type='str'), - name=dict(type='str'), - parentId=dict(type='str'), - providerId=dict(type='str'), - providerType=dict(type='str'), - config=dict(type='dict'), - ) - - meta_args = dict( - config=dict(type='dict', options=config_spec), - state=dict(type='str', default='present', choices=['present', 'absent']), - realm=dict(type='str', default='master'), - id=dict(type='str'), - name=dict(type='str'), - provider_id=dict(type='str', aliases=['providerId'], choices=['ldap', 'kerberos']), - provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'), - parent_id=dict(type='str', aliases=['parentId']), - mappers=dict(type='list', elements='dict', options=mapper_spec), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - config = module.params.get('config') - mappers = module.params.get('mappers') - cid = module.params.get('id') - name = module.params.get('name') - - # Keycloak API expects config parameters to be arrays containing a single string element - if config is not None: - module.params['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v]) - for k, v in config.items() if config[k] is not None) - - if mappers is not None: - for mapper in mappers: - if mapper.get('config') is not None: - mapper['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v]) - for k, v in mapper['config'].items() if mapper['config'][k] is not None) - - # Filter and map the parameters names that apply - comp_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - if cid is None: - found = kc.get_components(urlencode(dict(type='org.keycloak.storage.UserStorageProvider', parent=realm, name=name)), realm) - if len(found) > 1: - module.fail_json(msg='No ID given and found multiple user federations with name `{name}`. Cannot continue.'.format(name=name)) - before_comp = next(iter(found), None) - if before_comp is not None: - cid = before_comp['id'] - else: - before_comp = kc.get_component(cid, realm) - - if before_comp is None: - before_comp = {} - - # if user federation exists, get associated mappers - if cid is not None: - before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name')) - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for param in comp_params: - new_param_value = module.params.get(param) - old_value = before_comp[camel(param)] if camel(param) in before_comp else None - if param == 'mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] - if new_param_value != old_value: - changeset[camel(param)] = new_param_value - - # special handling of mappers list to allow change detection - if module.params.get('mappers') is not None: - if module.params['provider_id'] == 'kerberos': - module.fail_json(msg='Cannot configure mappers for Kerberos federations.') - for change in module.params['mappers']: - change = dict((k, v) for k, v in change.items() if change[k] is not None) - if change.get('id') is None and change.get('name') is None: - module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') - if cid is None: - old_mapper = {} - elif change.get('id') is not None: - old_mapper = kc.get_component(change['id'], realm) - if old_mapper is None: - old_mapper = {} - else: - found = kc.get_components(urlencode(dict(parent=cid, name=change['name'])), realm) - if len(found) > 1: - module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=change['name'])) - if len(found) == 1: - old_mapper = found[0] - else: - old_mapper = {} - new_mapper = old_mapper.copy() - new_mapper.update(change) - if new_mapper != old_mapper: - if changeset.get('mappers') is None: - changeset['mappers'] = list() - changeset['mappers'].append(new_mapper) - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_comp = before_comp.copy() - desired_comp.update(changeset) - - result['proposed'] = sanitize(changeset) - result['existing'] = sanitize(before_comp) - - # Cater for when it doesn't exist (an empty dict) - if not before_comp: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'User federation does not exist; doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if module._diff: - result['diff'] = dict(before='', after=sanitize(desired_comp)) - - if module.check_mode: - module.exit_json(**result) - - # create it - desired_comp = desired_comp.copy() - updated_mappers = desired_comp.pop('mappers', []) - after_comp = kc.create_component(desired_comp, realm) - - for mapper in updated_mappers: - if mapper.get('id') is not None: - kc.update_component(mapper, realm) - else: - if mapper.get('parentId') is None: - mapper['parentId'] = after_comp['id'] - mapper = kc.create_component(mapper, realm) - - after_comp['mappers'] = updated_mappers - result['end_state'] = sanitize(after_comp) - - result['msg'] = "User federation {id} has been created".format(id=after_comp['id']) - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - # no changes - if desired_comp == before_comp: - result['changed'] = False - result['end_state'] = sanitize(desired_comp) - result['msg'] = "No changes required to user federation {id}.".format(id=cid) - module.exit_json(**result) - - # doing an update - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize(before_comp), after=sanitize(desired_comp)) - - if module.check_mode: - module.exit_json(**result) - - # do the update - desired_comp = desired_comp.copy() - updated_mappers = desired_comp.pop('mappers', []) - kc.update_component(desired_comp, realm) - after_comp = kc.get_component(cid, realm) - - for mapper in updated_mappers: - if mapper.get('id') is not None: - kc.update_component(mapper, realm) - else: - if mapper.get('parentId') is None: - mapper['parentId'] = desired_comp['id'] - mapper = kc.create_component(mapper, realm) - - after_comp['mappers'] = updated_mappers - result['end_state'] = sanitize(after_comp) - - result['msg'] = "User federation {id} has been updated".format(id=cid) - module.exit_json(**result) - - elif state == 'absent': - # Process a deletion - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize(before_comp), after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_component(cid, realm) - - result['end_state'] = {} - - result['msg'] = "User federation {id} has been deleted".format(id=cid) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/onepassword_info.py b/plugins/modules/identity/onepassword_info.py deleted file mode 100644 index 95ef7c12b7..0000000000 --- a/plugins/modules/identity/onepassword_info.py +++ /dev/null @@ -1,384 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Ryan Conway (@rylon) -# (c) 2018, Scott Buchanan (onepassword.py used as starting point) -# (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' -module: onepassword_info -author: - - Ryan Conway (@Rylon) -requirements: - - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) -notes: - - Tested with C(op) version 0.5.5 - - "Based on the C(onepassword) lookup plugin by Scott Buchanan ." -short_description: Gather items from 1Password -description: - - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items. - - A fatal error occurs if any of the items being searched for can not be found. - - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved. - - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.onepassword_info) module no longer returns C(ansible_facts)! - You must now use the C(register) option to use the facts in other tasks. -options: - search_terms: - type: list - elements: dict - description: - - A list of one or more search terms. - - Each search term can either be a simple string or it can be a dictionary for more control. - - When passing a simple string, I(field) is assumed to be C(password). - - When passing a dictionary, the following fields are available. - suboptions: - name: - type: str - description: - - The name of the 1Password item to search for (required). - field: - type: str - description: - - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment). - section: - type: str - description: - - The name of a section within this item containing the specified field (optional, will search all sections if not specified). - vault: - type: str - description: - - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional). - required: True - auto_login: - type: dict - description: - - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info) - will attempt to sign in to 1Password automatically. - - Without this option, you must have already logged in via the 1Password CLI before running Ansible. - - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt - the Ansible Vault is equal to or greater in strength than the 1Password master password. - suboptions: - subdomain: - type: str - description: - - 1Password subdomain name (.1password.com). - - If this is not specified, the most recent subdomain will be used. - username: - type: str - description: - - 1Password username. - - Only required for initial sign in. - master_password: - type: str - description: - - The master password for your subdomain. - - This is always required when specifying C(auto_login). - required: True - secret_key: - type: str - description: - - The secret key for your subdomain. - - Only required for initial sign in. - default: {} - required: False - cli_path: - type: path - description: Used to specify the exact path to the C(op) command line interface - required: False - default: 'op' -''' - -EXAMPLES = ''' -# Gather secrets from 1Password, assuming there is a 'password' field: -- name: Get a password - community.general.onepassword_info: - search_terms: My 1Password item - delegate_to: localhost - register: my_1password_item - no_log: true # Don't want to log the secrets to the console! - -# Gather secrets from 1Password, with more advanced search terms: -- name: Get a password - community.general.onepassword_info: - search_terms: - - name: My 1Password item - field: Custom field name # optional, defaults to 'password' - section: Custom section name # optional, defaults to 'None' - vault: Name of the vault # optional, only necessary if there is more than 1 Vault available - delegate_to: localhost - register: my_1password_item - no_log: True # Don't want to log the secrets to the console! - -# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two -# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the -# second, 'Custom field name' is fetched, as that is specified explicitly. -- name: Get a password - community.general.onepassword_info: - search_terms: - - My 1Password item # 'name' is optional when passing a simple string... - - name: My Other 1Password item # ...but it can also be set for consistency - - name: My 1Password item - field: Custom field name # optional, defaults to 'password' - section: Custom section name # optional, defaults to 'None' - vault: Name of the vault # optional, only necessary if there is more than 1 Vault available - - name: A 1Password item with document attachment - delegate_to: localhost - register: my_1password_item - no_log: true # Don't want to log the secrets to the console! - -- name: Debug a password (for example) - ansible.builtin.debug: - msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}" -''' - -RETURN = ''' ---- -# One or more dictionaries for each matching item from 1Password, along with the appropriate fields. -# This shows the response you would expect to receive from the third example documented above. -onepassword: - description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above. - returned: success - type: dict - sample: - "My 1Password item": - password: the value of this field - Custom field name: the value of this field - "My Other 1Password item": - password: the value of this field - "A 1Password item with document attachment": - document: the contents of the document attached to this item -''' - - -import errno -import json -import os -import re - -from subprocess import Popen, PIPE - -from ansible.module_utils.common.text.converters import to_bytes, to_native -from ansible.module_utils.basic import AnsibleModule - - -class AnsibleModuleError(Exception): - def __init__(self, results): - self.results = results - - def __repr__(self): - return self.results - - -class OnePasswordInfo(object): - - def __init__(self): - self.cli_path = module.params.get('cli_path') - self.config_file_path = '~/.op/config' - self.auto_login = module.params.get('auto_login') - self.logged_in = False - self.token = None - - terms = module.params.get('search_terms') - self.terms = self.parse_search_terms(terms) - - def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False): - if self.token: - # Adds the session token to all commands if we're logged in. - args += [to_bytes('--session=') + self.token] - - command = [self.cli_path] + args - p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) - out, err = p.communicate(input=command_input) - rc = p.wait() - if not ignore_errors and rc != expected_rc: - raise AnsibleModuleError(to_native(err)) - return rc, out, err - - def _parse_field(self, data_json, item_id, field_name, section_title=None): - data = json.loads(data_json) - - if ('documentAttributes' in data['details']): - # This is actually a document, let's fetch the document data instead! - document = self._run(["get", "document", data['overview']['title']]) - return {'document': document[1].strip()} - - else: - # This is not a document, let's try to find the requested field - - # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute, - # not inside it, so we need to check there first. - if (field_name in data['details']): - return {field_name: data['details'][field_name]} - - # Otherwise we continue looking inside the 'fields' attribute for the specified field. - else: - if section_title is None: - for field_data in data['details'].get('fields', []): - if field_data.get('name', '').lower() == field_name.lower(): - return {field_name: field_data.get('value', '')} - - # Not found it yet, so now lets see if there are any sections defined - # and search through those for the field. If a section was given, we skip - # any non-matching sections, otherwise we search them all until we find the field. - for section_data in data['details'].get('sections', []): - if section_title is not None and section_title.lower() != section_data['title'].lower(): - continue - for field_data in section_data.get('fields', []): - if field_data.get('t', '').lower() == field_name.lower(): - return {field_name: field_data.get('v', '')} - - # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded. - optional_section_title = '' if section_title is None else " in the section '%s'" % section_title - module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title)) - - def parse_search_terms(self, terms): - processed_terms = [] - - for term in terms: - if not isinstance(term, dict): - term = {'name': term} - - if 'name' not in term: - module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term)) - - term['field'] = term.get('field', 'password') - term['section'] = term.get('section', None) - term['vault'] = term.get('vault', None) - - processed_terms.append(term) - - return processed_terms - - def get_raw(self, item_id, vault=None): - try: - args = ["get", "item", item_id] - if vault is not None: - args += ['--vault={0}'.format(vault)] - rc, output, dummy = self._run(args) - return output - - except Exception as e: - if re.search(".*not found.*", to_native(e)): - module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id) - else: - module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e))) - - def get_field(self, item_id, field, section=None, vault=None): - output = self.get_raw(item_id, vault) - return self._parse_field(output, item_id, field, section) if output != '' else '' - - def full_login(self): - if self.auto_login is not None: - if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'), - self.auto_login.get('secret_key'), self.auto_login.get('master_password')]: - module.fail_json(msg='Unable to perform initial sign in to 1Password. ' - 'subdomain, username, secret_key, and master_password are required to perform initial sign in.') - - args = [ - 'signin', - '{0}.1password.com'.format(self.auto_login['subdomain']), - to_bytes(self.auto_login['username']), - to_bytes(self.auto_login['secret_key']), - '--output=raw', - ] - - try: - rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password'])) - self.token = out.strip() - except AnsibleModuleError as e: - module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e)) - else: - module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s sigin' " - "or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path) - - def get_token(self): - # If the config file exists, assume an initial signin has taken place and try basic sign in - if os.path.isfile(self.config_file_path): - - if self.auto_login is not None: - - # Since we are not currently signed in, master_password is required at a minimum - if not self.auto_login.get('master_password'): - module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.") - - # Try signing in using the master_password and a subdomain if one is provided - try: - args = ['signin', '--output=raw'] - - if self.auto_login.get('subdomain'): - args = ['signin', self.auto_login['subdomain'], '--output=raw'] - - rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password'])) - self.token = out.strip() - - except AnsibleModuleError: - self.full_login() - - else: - self.full_login() - - else: - # Attempt a full sign in since there appears to be no existing sign in - self.full_login() - - def assert_logged_in(self): - try: - rc, out, err = self._run(['get', 'account'], ignore_errors=True) - if rc == 0: - self.logged_in = True - if not self.logged_in: - self.get_token() - except OSError as e: - if e.errno == errno.ENOENT: - module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path) - raise e - - def run(self): - result = {} - - self.assert_logged_in() - - for term in self.terms: - value = self.get_field(term['name'], term['field'], term['section'], term['vault']) - - if term['name'] in result: - # If we already have a result for this key, we have to append this result dictionary - # to the existing one. This is only applicable when there is a single item - # in 1Password which has two different fields, and we want to retrieve both of them. - result[term['name']].update(value) - else: - # If this is the first result for this key, simply set it. - result[term['name']] = value - - return result - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - cli_path=dict(type='path', default='op'), - auto_login=dict(type='dict', options=dict( - subdomain=dict(type='str'), - username=dict(type='str'), - master_password=dict(required=True, type='str', no_log=True), - secret_key=dict(type='str', no_log=True), - ), default=None), - search_terms=dict(required=True, type='list', elements='dict'), - ), - supports_check_mode=True - ) - - results = {'onepassword': OnePasswordInfo().run()} - - module.exit_json(changed=False, **results) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/opendj/opendj_backendprop.py b/plugins/modules/identity/opendj/opendj_backendprop.py deleted file mode 100644 index be118a505d..0000000000 --- a/plugins/modules/identity/opendj/opendj_backendprop.py +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl) -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: opendj_backendprop -short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command. -description: - - This module will update settings for OpenDJ with the command set-backend-prop. - - It will check first via de get-backend-prop if configuration needs to be applied. -author: - - Werner Dijkerman (@dj-wasabi) -options: - opendj_bindir: - description: - - The path to the bin directory of OpenDJ. - required: false - default: /opt/opendj/bin - type: path - hostname: - description: - - The hostname of the OpenDJ server. - required: true - type: str - port: - description: - - The Admin port on which the OpenDJ instance is available. - required: true - type: str - username: - description: - - The username to connect to. - required: false - default: cn=Directory Manager - type: str - password: - description: - - The password for the cn=Directory Manager user. - - Either password or passwordfile is needed. - required: false - type: str - passwordfile: - description: - - Location to the password file which holds the password for the cn=Directory Manager user. - - Either password or passwordfile is needed. - required: false - type: path - backend: - description: - - The name of the backend on which the property needs to be updated. - required: true - type: str - name: - description: - - The configuration setting to update. - required: true - type: str - value: - description: - - The value for the configuration item. - required: true - type: str - state: - description: - - If configuration needs to be added/updated - required: false - default: "present" - type: str -''' - -EXAMPLES = ''' - - name: Add or update OpenDJ backend properties - action: opendj_backendprop - hostname=localhost - port=4444 - username="cn=Directory Manager" - password=password - backend=userRoot - name=index-entry-limit - value=5000 -''' - -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule - - -class BackendProp(object): - - def __init__(self, module): - self._module = module - - def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name): - my_command = [ - opendj_bindir + '/dsconfig', - 'get-backend-prop', - '-h', hostname, - '--port', str(port), - '--bindDN', username, - '--backend-name', backend_name, - '-n', '-X', '-s' - ] + password_method - rc, stdout, stderr = self._module.run_command(my_command) - if rc == 0: - return stdout - else: - self._module.fail_json(msg="Error message: " + str(stderr)) - - def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value): - my_command = [ - opendj_bindir + '/dsconfig', - 'set-backend-prop', - '-h', hostname, - '--port', str(port), - '--bindDN', username, - '--backend-name', backend_name, - '--set', name + ":" + value, - '-n', '-X' - ] + password_method - rc, stdout, stderr = self._module.run_command(my_command) - if rc == 0: - return True - else: - self._module.fail_json(msg="Error message: " + stderr) - - def validate_data(self, data=None, name=None, value=None): - for config_line in data.split('\n'): - if config_line: - split_line = config_line.split() - if split_line[0] == name: - if split_line[1] == value: - return True - return False - - -def main(): - module = AnsibleModule( - argument_spec=dict( - opendj_bindir=dict(default="/opt/opendj/bin", type="path"), - hostname=dict(required=True), - port=dict(required=True), - username=dict(default="cn=Directory Manager", required=False), - password=dict(required=False, no_log=True), - passwordfile=dict(required=False, type="path"), - backend=dict(required=True), - name=dict(required=True), - value=dict(required=True), - state=dict(default="present"), - ), - supports_check_mode=True, - mutually_exclusive=[['password', 'passwordfile']], - required_one_of=[['password', 'passwordfile']] - ) - - opendj_bindir = module.params['opendj_bindir'] - hostname = module.params['hostname'] - port = module.params['port'] - username = module.params['username'] - password = module.params['password'] - passwordfile = module.params['passwordfile'] - backend_name = module.params['backend'] - name = module.params['name'] - value = module.params['value'] - state = module.params['state'] - - if module.params["password"] is not None: - password_method = ['-w', password] - elif module.params["passwordfile"] is not None: - password_method = ['-j', passwordfile] - - opendj = BackendProp(module) - validate = opendj.get_property(opendj_bindir=opendj_bindir, - hostname=hostname, - port=port, - username=username, - password_method=password_method, - backend_name=backend_name) - - if validate: - if not opendj.validate_data(data=validate, name=name, value=value): - if module.check_mode: - module.exit_json(changed=True) - if opendj.set_property(opendj_bindir=opendj_bindir, - hostname=hostname, - port=port, - username=username, - password_method=password_method, - backend_name=backend_name, - name=name, - value=value): - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - else: - module.exit_json(changed=False) - else: - module.exit_json(changed=False) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/idrac_redfish_command.py b/plugins/modules/idrac_redfish_command.py deleted file mode 120000 index c2b5ec87aa..0000000000 --- a/plugins/modules/idrac_redfish_command.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/redfish/idrac_redfish_command.py \ No newline at end of file diff --git a/plugins/modules/idrac_redfish_command.py b/plugins/modules/idrac_redfish_command.py new file mode 100644 index 0000000000..b60126764a --- /dev/null +++ b/plugins/modules/idrac_redfish_command.py @@ -0,0 +1,249 @@ +#!/usr/bin/python + +# Copyright (c) 2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: idrac_redfish_command +short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action. + - For use with Dell iDRAC operations that require Redfish OEM extensions. +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + category: + required: true + description: + - Category to execute on iDRAC. + type: str + command: + required: true + description: + - List of commands to execute on iDRAC. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of iDRAC. + type: str + username: + description: + - Username for authenticating to iDRAC. + type: str + password: + description: + - Password for authenticating to iDRAC. + type: str + auth_token: + description: + - Security token for authenticating to iDRAC. + type: str + version_added: 2.3.0 + timeout: + description: + - Timeout in seconds for HTTP requests to iDRAC. + default: 10 + type: int + resource_id: + required: false + description: + - ID of the System, Manager or Chassis to modify. + type: str + version_added: '0.2.0' + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 + +author: "Jose Delarosa (@jose-delarosa)" +""" + +EXAMPLES = r""" +- name: Create BIOS configuration job (schedule BIOS setting update) + community.general.idrac_redfish_command: + category: Systems + command: CreateBiosConfigJob + resource_id: System.Embedded.1 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = r""" +msg: + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +return_values: + description: Dictionary containing command-specific response data from the action. + returned: on success + type: dict + version_added: 6.6.0 + sample: {"job_id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_471269252011"} +""" + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC +from ansible.module_utils.common.text.converters import to_native + + +class IdracRedfishUtils(RedfishUtils): + + def create_bios_config_job(self): + result = {} + key = "Bios" + jobs = "Jobs" + + # Search for 'key' entry and extract URI from it + response = self.get_request(self.root_uri + self.systems_uris[0]) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if key not in data: + return {'ret': False, 'msg': "Key %s not found" % key} + + bios_uri = data[key]["@odata.id"] + + # Extract proper URI + response = self.get_request(self.root_uri + bios_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][ + "@odata.id"] + + payload = {"TargetSettingsURI": set_bios_attr_uri} + response = self.post_request( + self.root_uri + self.manager_uri + "/" + jobs, payload) + if response['ret'] is False: + return response + + response_output = response['resp'].__dict__ + job_id_full = response_output["headers"]["Location"] + job_id = re.search("JID_.+", job_id_full).group() + return {'ret': True, 'msg': "Config job %s created" % job_id, 'job_id': job_id_full} + + +CATEGORY_COMMANDS_ALL = { + "Systems": ["CreateBiosConfigJob"], + "Accounts": [], + "Manager": [] +} + + +def main(): + result = {} + return_values = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10), + resource_id=dict() + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module, + resource_id=resource_id, data_modification=True) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + + if category == "Systems": + # execute only if we find a System resource + # NOTE: Currently overriding the usage of 'data_modification' due to + # how 'resource_id' is processed. In the case of CreateBiosConfigJob, + # we interact with BOTH systems and managers, so you currently cannot + # specify a single 'resource_id' to make both '_find_systems_resource' + # and '_find_managers_resource' return success. Since + # CreateBiosConfigJob doesn't use the matched 'resource_id' for a + # system regardless of what's specified, disabling the 'resource_id' + # inspection for the next call allows a specific manager to be + # specified with 'resource_id'. If we ever need to expand the input + # to inspect a specific system and manager in parallel, this will need + # updates. + rf_utils.data_modification = False + result = rf_utils._find_systems_resource() + rf_utils.data_modification = True + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "CreateBiosConfigJob": + # execute only if we find a Managers resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + result = rf_utils.create_bios_config_job() + if 'job_id' in result: + return_values['job_id'] = result['job_id'] + + # Return data back or fail with proper message + if result['ret'] is True: + del result['ret'] + module.exit_json(changed=True, msg='Action was successful', return_values=return_values) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/idrac_redfish_config.py b/plugins/modules/idrac_redfish_config.py deleted file mode 120000 index 2ca1308083..0000000000 --- a/plugins/modules/idrac_redfish_config.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/redfish/idrac_redfish_config.py \ No newline at end of file diff --git a/plugins/modules/idrac_redfish_config.py b/plugins/modules/idrac_redfish_config.py new file mode 100644 index 0000000000..e7d6250624 --- /dev/null +++ b/plugins/modules/idrac_redfish_config.py @@ -0,0 +1,343 @@ +#!/usr/bin/python + +# Copyright (c) 2019 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: idrac_redfish_config +short_description: Manages servers through iDRAC using Dell Redfish APIs +description: + - For use with Dell iDRAC operations that require Redfish OEM extensions. + - Builds Redfish URIs locally and sends them to remote iDRAC controllers to set or update a configuration attribute. +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + category: + required: true + type: str + description: + - Category to execute on iDRAC. + command: + required: true + description: + - List of commands to execute on iDRAC. + - V(SetManagerAttributes), V(SetLifecycleControllerAttributes) and V(SetSystemAttributes) are mutually exclusive commands + when O(category) is V(Manager). + type: list + elements: str + baseuri: + required: true + description: + - Base URI of iDRAC. + type: str + username: + description: + - Username for authenticating to iDRAC. + type: str + password: + description: + - Password for authenticating to iDRAC. + type: str + auth_token: + description: + - Security token for authenticating to iDRAC. + type: str + version_added: 2.3.0 + manager_attributes: + required: false + description: + - Dictionary of iDRAC attribute name and value pairs to update. + default: {} + type: 'dict' + version_added: '0.2.0' + timeout: + description: + - Timeout in seconds for HTTP requests to iDRAC. + default: 10 + type: int + resource_id: + required: false + description: + - ID of the System, Manager or Chassis to modify. + type: str + version_added: '0.2.0' + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 + +author: "Jose Delarosa (@jose-delarosa)" +""" + +EXAMPLES = r""" +- name: Enable NTP and set NTP server and Time zone attributes in iDRAC + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + NTPConfigGroup.1.NTPEnable: "Enabled" + NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}" + Time.1.Timezone: "{{ timezone }}" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" + +- name: Enable Syslog and set Syslog servers in iDRAC + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + SysLog.1.SysLogEnable: "Enabled" + SysLog.1.Server1: "{{ syslog_server1 }}" + SysLog.1.Server2: "{{ syslog_server2 }}" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" + +- name: Configure SNMP community string, port, protocol and trap format + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + SNMP.1.AgentEnable: "Enabled" + SNMP.1.AgentCommunity: "public_community_string" + SNMP.1.TrapFormat: "SNMPv1" + SNMP.1.SNMPProtocol: "All" + SNMP.1.DiscoveryPort: 161 + SNMP.1.AlertPort: 162 + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" + +- name: Enable CSIOR + community.general.idrac_redfish_config: + category: Manager + command: SetLifecycleControllerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" + +- name: Set Power Supply Redundancy Policy to A/B Grid Redundant + community.general.idrac_redfish_config: + category: Manager + command: SetSystemAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + ServerPwr.1.PSRedPolicy: "A/B Grid Redundant" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" +""" + +RETURN = r""" +msg: + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.validation import ( + check_mutually_exclusive, + check_required_arguments +) +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC +from ansible.module_utils.common.text.converters import to_native + + +class IdracRedfishUtils(RedfishUtils): + + def set_manager_attributes(self, command): + + result = {} + required_arg_spec = {'manager_attributes': {'required': True}} + + try: + check_required_arguments(required_arg_spec, self.module.params) + + except TypeError as e: + msg = to_native(e) + self.module.fail_json(msg=msg) + + key = "Attributes" + command_manager_attributes_uri_map = { + "SetManagerAttributes": self.manager_uri, + "SetLifecycleControllerAttributes": "/redfish/v1/Managers/LifecycleController.Embedded.1", + "SetSystemAttributes": "/redfish/v1/Managers/System.Embedded.1" + } + manager_uri = command_manager_attributes_uri_map.get(command, self.manager_uri) + + attributes = self.module.params['manager_attributes'] + + attrs_to_patch = {} + attrs_skipped = {} + attrs_bad = {} # Store attrs which were not found in the system + + # Search for key entry and extract URI from it + response = self.get_request(self.root_uri + manager_uri + "/" + key) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if key not in data: + return {'ret': False, + 'msg': "%s: Key %s not found" % (command, key), + 'warning': ""} + + for attr_name, attr_value in attributes.items(): + # Check if attribute exists + if attr_name not in data['Attributes']: + # Skip and proceed to next attribute if this isn't valid + attrs_bad.update({attr_name: attr_value}) + continue + + # Find out if value is already set to what we want. If yes, exclude + # those attributes + if data['Attributes'][attr_name] == attr_value: + attrs_skipped.update({attr_name: attr_value}) + else: + attrs_to_patch.update({attr_name: attr_value}) + + warning = "" + if attrs_bad: + warning = "Incorrect attributes %s" % (attrs_bad) + + if not attrs_to_patch: + return {'ret': True, 'changed': False, + 'msg': "No changes made. Manager attributes already set.", + 'warning': warning} + + payload = {"Attributes": attrs_to_patch} + response = self.patch_request(self.root_uri + manager_uri + "/" + key, payload) + if response['ret'] is False: + return response + + return {'ret': True, 'changed': True, + 'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch), + 'warning': warning} + + +CATEGORY_COMMANDS_ALL = { + "Manager": ["SetManagerAttributes", "SetLifecycleControllerAttributes", + "SetSystemAttributes"] +} + + +# list of mutually exclusive commands for a category +CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = { + "Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes", + "SetSystemAttributes"]] +} + + +def main(): + result = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + manager_attributes=dict(type='dict', default={}), + timeout=dict(type='int', default=10), + resource_id=dict() + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module, + resource_id=resource_id, data_modification=True) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # check for mutually exclusive commands + try: + # check_mutually_exclusive accepts a single list or list of lists that + # are groups of terms that should be mutually exclusive with one another + # and checks that against a dictionary + check_mutually_exclusive(CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE[category], + dict.fromkeys(command_list, True)) + + except TypeError as e: + module.fail_json(msg=to_native(e)) + + # Organize by Categories / Commands + + if category == "Manager": + # execute only if we find a Manager resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command in ["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]: + result = rf_utils.set_manager_attributes(command) + + # Return data back or fail with proper message + if result['ret'] is True: + if result.get('warning'): + module.warn(to_native(result['warning'])) + + module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/idrac_redfish_info.py b/plugins/modules/idrac_redfish_info.py deleted file mode 120000 index 09928c5e9e..0000000000 --- a/plugins/modules/idrac_redfish_info.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/redfish/idrac_redfish_info.py \ No newline at end of file diff --git a/plugins/modules/idrac_redfish_info.py b/plugins/modules/idrac_redfish_info.py new file mode 100644 index 0000000000..309cefc15f --- /dev/null +++ b/plugins/modules/idrac_redfish_info.py @@ -0,0 +1,255 @@ +#!/usr/bin/python + +# Copyright (c) 2019 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: idrac_redfish_info +short_description: Gather PowerEdge server information through iDRAC using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote iDRAC controllers to get information back. + - For use with Dell EMC iDRAC operations that require Redfish OEM extensions. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + - community.general.redfish +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + category: + required: true + description: + - Category to execute on iDRAC. + type: str + command: + required: true + description: + - List of commands to execute on iDRAC. + - V(GetManagerAttributes) returns the list of dicts containing iDRAC, LifecycleController and System attributes. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of iDRAC. + type: str + username: + description: + - Username for authenticating to iDRAC. + type: str + password: + description: + - Password for authenticating to iDRAC. + type: str + auth_token: + description: + - Security token for authenticating to iDRAC. + type: str + version_added: 2.3.0 + timeout: + description: + - Timeout in seconds for HTTP requests to iDRAC. + default: 10 + type: int + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 + +author: "Jose Delarosa (@jose-delarosa)" +""" + +EXAMPLES = r""" +- name: Get Manager attributes with a default of 20 seconds + community.general.idrac_redfish_info: + category: Manager + command: GetManagerAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + register: result + +# Examples to display the value of all or a single iDRAC attribute +- name: Store iDRAC attributes as a fact variable + ansible.builtin.set_fact: + idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') + | list | first }}" + +- name: Display all iDRAC attributes + ansible.builtin.debug: + var: idrac_attributes + +- name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute + ansible.builtin.debug: + var: idrac_attributes['Syslog.1.SysLogEnable'] + +# Examples to display the value of all or a single LifecycleController attribute +- name: Store LifecycleController attributes as a fact variable + ansible.builtin.set_fact: + lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') + | list | first }}" + +- name: Display LifecycleController attributes + ansible.builtin.debug: + var: lc_attributes + +- name: Display the value of 'CollectSystemInventoryOnRestart' attribute + ansible.builtin.debug: + var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart'] + +# Examples to display the value of all or a single System attribute +- name: Store System attributes as a fact variable + ansible.builtin.set_fact: + system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') + | list | first }}" + +- name: Display System attributes + ansible.builtin.debug: + var: system_attributes + +- name: Display the value of 'PSRedPolicy' + ansible.builtin.debug: + var: system_attributes['ServerPwr.1.PSRedPolicy'] +""" + +RETURN = r""" +msg: + description: Different results depending on task. + returned: always + type: dict + sample: List of Manager attributes +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC +from ansible.module_utils.common.text.converters import to_native + + +class IdracRedfishUtils(RedfishUtils): + + def get_manager_attributes(self): + result = {} + manager_attributes = [] + properties = ['Attributes', 'Id'] + + response = self.get_request(self.root_uri + self.manager_uri) + + if response['ret'] is False: + return response + data = response['data'] + + # Manager attributes are supported as part of iDRAC OEM extension + # Attributes are supported only on iDRAC9 + try: + for members in data['Links']['Oem']['Dell']['DellAttributes']: + attributes_uri = members['@odata.id'] + + response = self.get_request(self.root_uri + attributes_uri) + if response['ret'] is False: + return response + data = response['data'] + + attributes = {} + for prop in properties: + if prop in data: + attributes[prop] = data.get(prop) + + if attributes: + manager_attributes.append(attributes) + + result['ret'] = True + + except (AttributeError, KeyError) as e: + result['ret'] = False + result['msg'] = "Failed to find attribute/key: " + str(e) + + result["entries"] = manager_attributes + return result + + +CATEGORY_COMMANDS_ALL = { + "Manager": ["GetManagerAttributes"] +} + + +def main(): + result = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=True, + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + + if category == "Manager": + # execute only if we find a Manager resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "GetManagerAttributes": + result = rf_utils.get_manager_attributes() + + # Return data back or fail with proper message + if result['ret'] is True: + del result['ret'] + module.exit_json(redfish_facts=result) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ilo_redfish_command.py b/plugins/modules/ilo_redfish_command.py new file mode 100644 index 0000000000..7f20a45631 --- /dev/null +++ b/plugins/modules/ilo_redfish_command.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: ilo_redfish_command +short_description: Manages Out-Of-Band controllers using Redfish APIs +version_added: 6.6.0 +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action. +attributes: + check_mode: + support: none + diff_mode: + support: none +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + choices: ['Systems'] + command: + required: true + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + required: false + description: + - Username for authenticating to iLO. + type: str + password: + required: false + description: + - Password for authenticating to iLO. + type: str + auth_token: + required: false + description: + - Security token for authenticating to iLO. + type: str + timeout: + required: false + description: + - Timeout in seconds for HTTP requests to iLO. + default: 60 + type: int + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 +author: + - Varni H P (@varini-hp) +""" + +EXAMPLES = r""" +- name: Wait for iLO Reboot Completion + community.general.ilo_redfish_command: + category: Systems + command: WaitforiLORebootCompletion + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = r""" +ilo_redfish_command: + description: Returns the status of the operation performed on the iLO. + type: dict + contains: + WaitforiLORebootCompletion: + description: Returns the output msg and whether the function executed successfully. + type: dict + contains: + ret: + description: Return V(true)/V(false) based on whether the operation was performed successfully. + type: bool + msg: + description: Status of the operation performed on the iLO. + type: str + returned: always +""" + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Systems": ["WaitforiLORebootCompletion"] +} + +from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + result = {} + argument_spec = dict( + category=dict(required=True, choices=list(CATEGORY_COMMANDS_ALL.keys())), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + timeout=dict(type="int", default=60), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + timeout = module.params['timeout'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = iLORedfishUtils(creds, root_uri, timeout, module) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native( + "Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json( + msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + if category == "Systems": + # execute only if we find a System resource + + result = rf_utils._find_systems_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "WaitforiLORebootCompletion": + result[command] = rf_utils.wait_for_ilo_reboot_completion() + + # Return data back or fail with proper message + if not result[command]['ret']: + module.fail_json(msg=result) + + changed = result[command].get('changed', False) + module.exit_json(ilo_redfish_command=result, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ilo_redfish_config.py b/plugins/modules/ilo_redfish_config.py new file mode 100644 index 0000000000..5cd441827f --- /dev/null +++ b/plugins/modules/ilo_redfish_config.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: ilo_redfish_config +short_description: Sets or updates configuration attributes on HPE iLO with Redfish OEM extensions +version_added: 4.2.0 +description: + - Builds Redfish URIs locally and sends them to iLO to set or update a configuration attribute. + - For use with HPE iLO operations that require Redfish OEM extensions. +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + category: + required: true + type: str + description: + - Command category to execute on iLO. + choices: ['Manager'] + command: + required: true + description: + - List of commands to execute on iLO. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of iLO. + type: str + username: + description: + - Username for authenticating to iLO. + type: str + password: + description: + - Password for authenticating to iLO. + type: str + auth_token: + description: + - Security token for authenticating to iLO. + type: str + timeout: + description: + - Timeout in seconds for HTTP requests to iLO. + default: 10 + type: int + attribute_name: + required: true + description: + - Name of the attribute to be configured. + type: str + attribute_value: + required: false + description: + - Value of the attribute to be configured. + type: str + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 +author: + - "Bhavya B (@bhavya06)" +""" + +EXAMPLES = r""" +- name: Disable WINS Registration + community.general.ilo_redfish_config: + category: Manager + command: SetWINSReg + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: WINSRegistration + +- name: Set Time Zone + community.general.ilo_redfish_config: + category: Manager + command: SetTimeZone + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: TimeZone + attribute_value: Chennai + +- name: Set NTP Servers + community.general.ilo_redfish_config: + category: Manager + command: SetNTPServers + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: StaticNTPServers + attribute_value: X.X.X.X +""" + +RETURN = r""" +msg: + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +""" + +CATEGORY_COMMANDS_ALL = { + "Manager": ["SetTimeZone", "SetDNSserver", "SetDomainName", "SetNTPServers", "SetWINSReg"] +} + +from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + result = {} + argument_spec = dict( + category=dict(required=True, choices=list( + CATEGORY_COMMANDS_ALL.keys())), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + attribute_name=dict(required=True), + attribute_value=dict(type='str'), + timeout=dict(type='int', default=10) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + creds = {"user": module.params['username'], + "pswd": module.params['password'], + "token": module.params['auth_token']} + + timeout = module.params['timeout'] + + root_uri = "https://" + module.params['baseuri'] + rf_utils = iLORedfishUtils(creds, root_uri, timeout, module) + mgr_attributes = {'mgr_attr_name': module.params['attribute_name'], + 'mgr_attr_value': module.params['attribute_value']} + changed = False + + offending = [ + cmd for cmd in command_list if cmd not in CATEGORY_COMMANDS_ALL[category]] + + if offending: + module.fail_json(msg=to_native("Invalid Command(s): '%s'. Allowed Commands = %s" % ( + offending, CATEGORY_COMMANDS_ALL[category]))) + + if category == "Manager": + resource = rf_utils._find_managers_resource() + if not resource['ret']: + module.fail_json(msg=to_native(resource['msg'])) + + dispatch = dict( + SetTimeZone=rf_utils.set_time_zone, + SetDNSserver=rf_utils.set_dns_server, + SetDomainName=rf_utils.set_domain_name, + SetNTPServers=rf_utils.set_ntp_server, + SetWINSReg=rf_utils.set_wins_registration + ) + + for command in command_list: + result[command] = dispatch[command](mgr_attributes) + if 'changed' in result[command]: + changed |= result[command]['changed'] + + module.exit_json(ilo_redfish_config=result, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ilo_redfish_info.py b/plugins/modules/ilo_redfish_info.py new file mode 100644 index 0000000000..6eb7d7b3f4 --- /dev/null +++ b/plugins/modules/ilo_redfish_info.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: ilo_redfish_info +short_description: Gathers server information through iLO using Redfish APIs +version_added: 4.2.0 +description: + - Builds Redfish URIs locally and sends them to iLO to get information back. + - For use with HPE iLO operations that require Redfish OEM extensions. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + - community.general.redfish +options: + category: + required: true + description: + - List of categories to execute on iLO. + type: list + elements: str + command: + required: true + description: + - List of commands to execute on iLO. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of iLO. + type: str + username: + description: + - Username for authenticating to iLO. + type: str + password: + description: + - Password for authenticating to iLO. + type: str + auth_token: + description: + - Security token for authenticating to iLO. + type: str + timeout: + description: + - Timeout in seconds for HTTP requests to iLO. + default: 10 + type: int + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 +author: + - "Bhavya B (@bhavya06)" +""" + +EXAMPLES = r""" +- name: Get iLO Sessions + community.general.ilo_redfish_info: + category: Sessions + command: GetiLOSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result_sessions +""" + +RETURN = r""" +ilo_redfish_info: + description: Returns iLO sessions. + type: dict + contains: + GetiLOSessions: + description: Returns the iLO session msg and whether the function executed successfully. + type: dict + contains: + ret: + description: Check variable to see if the information was successfully retrieved. + type: bool + msg: + description: Information of all active iLO sessions. + type: list + elements: dict + contains: + Description: + description: Provides a description of the resource. + type: str + Id: + description: The sessionId. + type: str + Name: + description: The name of the resource. + type: str + UserName: + description: Name to use to log in to the management processor. + type: str + returned: always +""" + +CATEGORY_COMMANDS_ALL = { + "Sessions": ["GetiLOSessions"] +} + +CATEGORY_COMMANDS_DEFAULT = { + "Sessions": "GetiLOSessions" +} + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC + + +def main(): + result = {} + category_list = [] + argument_spec = dict( + category=dict(required=True, type='list', elements='str'), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=True + ) + + creds = {"user": module.params['username'], + "pswd": module.params['password'], + "token": module.params['auth_token']} + + timeout = module.params['timeout'] + + root_uri = "https://" + module.params['baseuri'] + rf_utils = iLORedfishUtils(creds, root_uri, timeout, module) + + # Build Category list + if "all" in module.params['category']: + for entry in CATEGORY_COMMANDS_ALL: + category_list.append(entry) + else: + # one or more categories specified + category_list = module.params['category'] + + for category in category_list: + command_list = [] + # Build Command list for each Category + if category in CATEGORY_COMMANDS_ALL: + if not module.params['command']: + # True if we don't specify a command --> use default + command_list.append(CATEGORY_COMMANDS_DEFAULT[category]) + elif "all" in module.params['command']: + for entry in CATEGORY_COMMANDS_ALL[category]: + command_list.append(entry) + # one or more commands + else: + command_list = module.params['command'] + # Verify that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg="Invalid Command: %s" % cmd) + else: + # Fail if even one category given is invalid + module.fail_json(msg="Invalid Category: %s" % category) + + # Organize by Categories / Commands + if category == "Sessions": + for command in command_list: + if command == "GetiLOSessions": + result[command] = rf_utils.get_ilo_sessions() + + module.exit_json(ilo_redfish_info=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/imc_rest.py b/plugins/modules/imc_rest.py deleted file mode 120000 index 4e9de42e1e..0000000000 --- a/plugins/modules/imc_rest.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/imc/imc_rest.py \ No newline at end of file diff --git a/plugins/modules/imc_rest.py b/plugins/modules/imc_rest.py new file mode 100644 index 0000000000..ef543c62e0 --- /dev/null +++ b/plugins/modules/imc_rest.py @@ -0,0 +1,439 @@ +#!/usr/bin/python +# Copyright (c) 2017, Dag Wieers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: imc_rest +short_description: Manage Cisco IMC hardware through its REST API +description: + - Provides direct access to the Cisco IMC REST API. + - Perform any configuration changes and actions that the Cisco IMC supports. + - More information about the IMC REST API is available from + U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html). +author: + - Dag Wieers (@dagwieers) +requirements: + - lxml + - xmljson >= 0.1.8 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + hostname: + description: + - IP Address or hostname of Cisco IMC, resolvable by Ansible control host. + required: true + aliases: [host, ip] + type: str + username: + description: + - Username used to login to the switch. + default: admin + aliases: [user] + type: str + password: + description: + - The password to use for authentication. + default: password + type: str + path: + description: + - Name of the absolute path of the filename that includes the body of the http request being sent to the Cisco IMC REST + API. + - Parameter O(path) is mutual exclusive with parameter O(content). + aliases: ['src', 'config_file'] + type: path + content: + description: + - When used instead of O(path), sets the content of the API requests directly. + - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module. + - You can collate multiple IMC XML fragments and they are processed sequentially in a single stream, the Cisco IMC output + is subsequently merged. + - Parameter O(content) is mutual exclusive with parameter O(path). + type: str + protocol: + description: + - Connection protocol to use. + default: https + choices: [http, https] + type: str + timeout: + description: + - The socket level timeout in seconds. + - This is the time that every single connection (every fragment) can spend. If this O(timeout) is reached, the module + fails with a C(Connection failure) indicating that C(The read operation timed out). + default: 60 + type: int + validate_certs: + description: + - If V(false), SSL certificates are not validated. + - This should only set to V(false) used on personally controlled sites using self-signed certificates. + type: bool + default: true +notes: + - The XML fragments do not need an authentication cookie, this is injected by the module automatically. + - The Cisco IMC XML output is being translated to JSON using the Cobra convention. + - Any configConfMo change requested has a return status of C(modified), even if there was no actual change from the previous + configuration. As a result, this module always reports a change on subsequent runs. In case this behaviour is fixed in + a future update to Cisco IMC, this module is meant to automatically adapt. + - If you get a C(Connection failure) related to C(The read operation timed out) increase the O(timeout) parameter. Some + XML fragments can take longer than the default timeout. + - More information about the IMC REST API is available from + U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html). +""" + +EXAMPLES = r""" +- name: Power down server + community.general.imc_rest: + hostname: '{{ imc_hostname }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: false # only do this when you trust the network! + content: | + + + + delegate_to: localhost + +- name: Configure IMC using multiple XML fragments + community.general.imc_rest: + hostname: '{{ imc_hostname }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: false # only do this when you trust the network! + timeout: 120 + content: | + + + + + + + + + + delegate_to: localhost + +- name: Enable PXE boot and power-cycle server + community.general.imc_rest: + hostname: '{{ imc_hostname }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: false # only do this when you trust the network! + content: | + + + + + + + + + + delegate_to: localhost + +- name: Reconfigure IMC to boot from storage + community.general.imc_rest: + hostname: '{{ imc_host }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: false # only do this when you trust the network! + content: | + + + + delegate_to: localhost + +- name: Add customer description to server + community.general.imc_rest: + hostname: '{{ imc_host }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: false # only do this when you trust the network! + content: | + + + + delegate_to: localhost + +- name: Disable HTTP and increase session timeout to max value 10800 secs + community.general.imc_rest: + hostname: '{{ imc_host }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: false # only do this when you trust the network! + timeout: 120 + content: | + + + + + + + + delegate_to: localhost +""" + +RETURN = r""" +aaLogin: + description: Cisco IMC XML output for the login, translated to JSON using Cobra convention. + returned: success + type: dict + sample: | + "attributes": { + "cookie": "", + "outCookie": "1498902428/9de6dc36-417c-157c-106c-139efe2dc02a", + "outPriv": "admin", + "outRefreshPeriod": "600", + "outSessionId": "114", + "outVersion": "2.0(13e)", + "response": "yes" + } +configConfMo: + description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention. + returned: success + type: dict + sample: | +elapsed: + description: Elapsed time in seconds. + returned: always + type: int + sample: 31 +response: + description: HTTP response message, including content length. + returned: always + type: str + sample: OK (729 bytes) +status: + description: The HTTP response status code. + returned: always + type: dict + sample: 200 +error: + description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention. + returned: failed + type: dict + sample: | + "attributes": { + "cookie": "", + "errorCode": "ERR-xml-parse-error", + "errorDescr": "XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. ", + "invocationResult": "594", + "response": "yes" + } +error_code: + description: Cisco IMC error code. + returned: failed + type: str + sample: ERR-xml-parse-error +error_text: + description: Cisco IMC error message. + returned: failed + type: str + sample: | + XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. +input: + description: RAW XML input sent to the Cisco IMC, causing the error. + returned: failed + type: str + sample: | + +output: + description: RAW XML output received from the Cisco IMC, with error details. + returned: failed + type: str + sample: > + +""" + +import os +import traceback +from itertools import zip_longest + +LXML_ETREE_IMP_ERR = None +try: + import lxml.etree + HAS_LXML_ETREE = True +except ImportError: + LXML_ETREE_IMP_ERR = traceback.format_exc() + HAS_LXML_ETREE = False + +XMLJSON_COBRA_IMP_ERR = None +try: + from xmljson import cobra + HAS_XMLJSON_COBRA = True +except ImportError: + XMLJSON_COBRA_IMP_ERR = traceback.format_exc() + HAS_XMLJSON_COBRA = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.urls import fetch_url + +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + + +def imc_response(module, rawoutput, rawinput=''): + ''' Handle IMC returned data ''' + xmloutput = lxml.etree.fromstring(rawoutput) + result = cobra.data(xmloutput) + + # Handle errors + if xmloutput.get('errorCode') and xmloutput.get('errorDescr'): + if rawinput: + result['input'] = rawinput + result['output'] = rawoutput + result['error_code'] = xmloutput.get('errorCode') + result['error_text'] = xmloutput.get('errorDescr') + module.fail_json(msg='Request failed: %(error_text)s' % result, **result) + + return result + + +def logout(module, url, cookie, timeout): + ''' Perform a logout, if needed ''' + data = '' % (cookie, cookie) + resp, auth = fetch_url(module, url, data=data, method="POST", timeout=timeout) + + +def merge(one, two): + ''' Merge two complex nested datastructures into one''' + if isinstance(one, dict) and isinstance(two, dict): + copy = dict(one) + copy.update({key: merge(one.get(key, None), two[key]) for key in two}) + return copy + + elif isinstance(one, list) and isinstance(two, list): + return [merge(alpha, beta) for (alpha, beta) in zip_longest(one, two)] + + return one if two is None else two + + +def main(): + module = AnsibleModule( + argument_spec=dict( + hostname=dict(type='str', required=True, aliases=['host', 'ip']), + username=dict(type='str', default='admin', aliases=['user']), + password=dict(type='str', default='password', no_log=True), + content=dict(type='str'), + path=dict(type='path', aliases=['config_file', 'src']), + protocol=dict(type='str', default='https', choices=['http', 'https']), + timeout=dict(type='int', default=60), + validate_certs=dict(type='bool', default=True), + ), + supports_check_mode=True, + mutually_exclusive=[['content', 'path']], + ) + + if not HAS_LXML_ETREE: + module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR) + + if not HAS_XMLJSON_COBRA: + module.fail_json(msg=missing_required_lib('xmljson >= 0.1.8'), exception=XMLJSON_COBRA_IMP_ERR) + + hostname = module.params['hostname'] + username = module.params['username'] + password = module.params['password'] + + content = module.params['content'] + path = module.params['path'] + + protocol = module.params['protocol'] + timeout = module.params['timeout'] + + result = dict( + failed=False, + changed=False, + ) + + # Report missing file + file_exists = False + if path: + if os.path.isfile(path): + file_exists = True + else: + module.fail_json(msg='Cannot find/access path:\n%s' % path) + + start = now() + + # Perform login first + url = '%s://%s/nuova' % (protocol, hostname) + data = '' % (username, password) + resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout) + if resp is None or auth['status'] != 200: + result['elapsed'] = (now() - start).seconds + module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result) + result.update(imc_response(module, resp.read())) + + # Store cookie for future requests + cookie = '' + try: + cookie = result['aaaLogin']['attributes']['outCookie'] + except Exception: + module.fail_json(msg='Could not find cookie in output', **result) + + try: + # Prepare request data + if content: + rawdata = content + elif file_exists: + with open(path, 'r') as config_object: + rawdata = config_object.read() + + # Wrap the XML documents in a element + xmldata = lxml.etree.fromstring('%s' % rawdata.replace('\n', '')) + + # Handle each XML document separately in the same session + for xmldoc in list(xmldata): + if xmldoc.tag is lxml.etree.Comment: + continue + # Add cookie to XML + xmldoc.set('cookie', cookie) + data = lxml.etree.tostring(xmldoc) + + # Perform actual request + resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout) + if resp is None or info['status'] != 200: + result['elapsed'] = (now() - start).seconds + module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result) + + # Merge results with previous results + rawoutput = resp.read() + result = merge(result, imc_response(module, rawoutput, rawinput=data)) + result['response'] = info['msg'] + result['status'] = info['status'] + + # Check for any changes + # NOTE: Unfortunately IMC API always report status as 'modified' + xmloutput = lxml.etree.fromstring(rawoutput) + results = xmloutput.xpath('/configConfMo/outConfig/*/@status') + result['changed'] = ('modified' in results) + + # Report success + result['elapsed'] = (now() - start).seconds + module.exit_json(**result) + finally: + logout(module, url, cookie, timeout) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/imgadm.py b/plugins/modules/imgadm.py deleted file mode 120000 index 487dbbbf58..0000000000 --- a/plugins/modules/imgadm.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/smartos/imgadm.py \ No newline at end of file diff --git a/plugins/modules/imgadm.py b/plugins/modules/imgadm.py new file mode 100644 index 0000000000..da016f8597 --- /dev/null +++ b/plugins/modules/imgadm.py @@ -0,0 +1,310 @@ +#!/usr/bin/python + +# Copyright (c) 2016, 2017 Jasper Lievisse Adriaanse +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: imgadm +short_description: Manage SmartOS images +description: + - Manage SmartOS virtual machine images through imgadm(1M). +author: Jasper Lievisse Adriaanse (@jasperla) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + force: + required: false + type: bool + description: + - Force a given operation (where supported by imgadm(1M)). + pool: + required: false + default: zones + description: + - The zpool to import to or delete images from. + type: str + source: + required: false + description: + - URI for the image source. + type: str + state: + required: true + choices: [present, absent, deleted, imported, updated, vacuumed] + description: + - State the object operated on should be in. V(imported) is an alias for for V(present) and V(deleted) for V(absent). + When set to V(vacuumed) and O(uuid=*), it removes all unused images. + type: str + + type: + required: false + choices: [imgapi, docker, dsapi] + default: imgapi + description: + - Type for image sources. + type: str + + uuid: + required: false + description: + - Image UUID. Can either be a full UUID or V(*) for all images. + type: str +""" + +EXAMPLES = r""" +- name: Import an image + community.general.imgadm: + uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' + state: imported + +- name: Delete an image + community.general.imgadm: + uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' + state: deleted + +- name: Update all images + community.general.imgadm: + uuid: '*' + state: updated + +- name: Update a single image + community.general.imgadm: + uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' + state: updated + +- name: Add a source + community.general.imgadm: + source: 'https://datasets.project-fifo.net' + state: present + +- name: Add a Docker source + community.general.imgadm: + source: 'https://docker.io' + type: docker + state: present + +- name: Remove a source + community.general.imgadm: + source: 'https://docker.io' + state: absent +""" + +RETURN = r""" +source: + description: Source that is managed. + returned: When not managing an image. + type: str + sample: https://datasets.project-fifo.net +uuid: + description: UUID for an image operated on. + returned: When not managing an image source. + type: str + sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764 +state: + description: State of the target, after execution. + returned: success + type: str + sample: 'present' +""" + +import re + +from ansible.module_utils.basic import AnsibleModule + +# Shortcut for the imgadm(1M) command. While imgadm(1M) supports a +# -E option to return any errors in JSON, the generated JSON does not play well +# with the JSON parsers of Python. The returned message contains '\n' as part of +# the stacktrace, which breaks the parsers. + + +class Imgadm(object): + def __init__(self, module): + self.module = module + self.params = module.params + self.cmd = module.get_bin_path('imgadm', required=True) + self.changed = False + self.uuid = module.params['uuid'] + + # Since there are a number of (natural) aliases, prevent having to look + # them up every time we operate on `state`. + if self.params['state'] in ['present', 'imported', 'updated']: + self.present = True + else: + self.present = False + + # Perform basic UUID validation upfront. + if self.uuid and self.uuid != '*': + if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE): + module.fail_json(msg='Provided value for uuid option is not a valid UUID.') + + # Helper method to massage stderr + def errmsg(self, stderr): + match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr) + if match: + return match.groups()[0] + else: + return 'Unexpected failure' + + def update_images(self): + if self.uuid == '*': + cmd = [self.cmd, 'update'] + else: + cmd = [self.cmd, 'update', self.uuid] + + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to update images: {0}'.format(self.errmsg(stderr))) + + # There is no feedback from imgadm(1M) to determine if anything + # was actually changed. So treat this as an 'always-changes' operation. + # Note that 'imgadm -v' produces unparsable JSON... + self.changed = True + + def manage_sources(self): + force = self.params['force'] + source = self.params['source'] + imgtype = self.params['type'] + + cmd = [self.cmd, 'sources'] + + if force: + cmd = cmd + ['-f'] + + if self.present: + cmd = cmd + ['-a', source, '-t', imgtype] + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to add source: {0}'.format(self.errmsg(stderr))) + + # Check the various responses. + # Note that trying to add a source with the wrong type is handled + # above as it results in a non-zero status. + + regex = 'Already have "{0}" image source "{1}", no change'.format(imgtype, source) + if re.match(regex, stdout): + self.changed = False + + regex = 'Added "%s" image source "%s"' % (imgtype, source) + if re.match(regex, stdout): + self.changed = True + else: + # Type is ignored by imgadm(1M) here + cmd += ' -d %s' % source + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to remove source: {0}'.format(self.errmsg(stderr))) + + regex = 'Do not have image source "%s", no change' % source + if re.match(regex, stdout): + self.changed = False + + regex = 'Deleted ".*" image source "%s"' % source + if re.match(regex, stdout): + self.changed = True + + def manage_images(self): + pool = self.params['pool'] + state = self.params['state'] + + if state == 'vacuumed': + # Unconditionally pass '--force', otherwise we're prompted with 'y/N' + cmd = [self.cmd, 'vacuum', '-f'] + + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to vacuum images: {0}'.format(self.errmsg(stderr))) + else: + if stdout == '': + self.changed = False + else: + self.changed = True + if self.present: + cmd = [self.cmd, 'import', '-P', pool, '-q'] + ([self.uuid] if self.uuid else []) + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr))) + + regex = r'Image {0} \(.*\) is already installed, skipping'.format(self.uuid) + if re.match(regex, stdout): + self.changed = False + + regex = '.*ActiveImageNotFound.*' + if re.match(regex, stderr): + self.changed = False + + regex = 'Imported image {0}.*'.format(self.uuid) + if re.match(regex, stdout.splitlines()[-1]): + self.changed = True + else: + cmd = [self.cmd, 'delete', '-P', pool] + ([self.uuid] if self.uuid else []) + (rc, stdout, stderr) = self.module.run_command(cmd) + + regex = '.*ImageNotInstalled.*' + if re.match(regex, stderr): + # Even if the 'rc' was non-zero (3), we handled the situation + # in order to determine if there was a change. + self.changed = False + + regex = 'Deleted image {0}'.format(self.uuid) + if re.match(regex, stdout): + self.changed = True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + force=dict(type='bool'), + pool=dict(default='zones'), + source=dict(), + state=dict(required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']), + type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']), + uuid=dict() + ), + # This module relies largely on imgadm(1M) to enforce idempotency, which does not + # provide a "noop" (or equivalent) mode to do a dry-run. + supports_check_mode=False, + ) + + imgadm = Imgadm(module) + + uuid = module.params['uuid'] + source = module.params['source'] + state = module.params['state'] + + result = {'state': state} + + # Either manage sources or images. + if source: + result['source'] = source + imgadm.manage_sources() + else: + result['uuid'] = uuid + + if state == 'updated': + imgadm.update_images() + else: + # Make sure operate on a single image for the following actions + if (uuid == '*') and (state != 'vacuumed'): + module.fail_json(msg='Can only specify uuid as "*" when updating image(s)') + imgadm.manage_images() + + result['changed'] = imgadm.changed + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/infinity.py b/plugins/modules/infinity.py deleted file mode 120000 index dbc7da6c47..0000000000 --- a/plugins/modules/infinity.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/infinity/infinity.py \ No newline at end of file diff --git a/plugins/modules/infinity.py b/plugins/modules/infinity.py new file mode 100644 index 0000000000..7f568faa0d --- /dev/null +++ b/plugins/modules/infinity.py @@ -0,0 +1,585 @@ +#!/usr/bin/python + +# Copyright (c) 2017, +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: infinity +short_description: Manage Infinity IPAM using Rest API +description: + - Manage Infinity IPAM using REST API. +author: + - Meirong Liu (@MeganLiu) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + server_ip: + description: + - Infinity server_ip with IP address. + type: str + required: true + username: + description: + - Username to access Infinity. + - The user must have REST API privileges. + type: str + required: true + password: + description: + - Infinity password. + type: str + required: true + action: + description: + - Action to perform. + type: str + required: true + choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip] + network_id: + description: + - Network ID. + type: str + ip_address: + description: + - IP Address for a reservation or a release. + type: str + network_address: + description: + - Network address with CIDR format (for example V(192.168.310.0)). + type: str + network_size: + description: + - Network bitmask (for example V(255.255.255.220) or CIDR format V(/26)). + type: str + network_name: + description: + - The name of a network. + type: str + network_location: + description: + - The parent network ID for a given network. + type: int + default: -1 + network_type: + description: + - Network type defined by Infinity. + type: str + choices: [lan, shared_lan, supernet] + default: lan + network_family: + description: + - Network family defined by Infinity, for example V(IPv4), V(IPv6) and V(Dual stack). + type: str + choices: ['4', '6', dual] + default: '4' +""" + +EXAMPLES = r""" +- hosts: localhost + connection: local + strategy: debug + tasks: + - name: Reserve network into Infinity IPAM + community.general.infinity: + server_ip: 80.75.107.12 + username: username + password: password + action: reserve_network + network_name: reserve_new_ansible_network + network_family: 4 + network_type: lan + network_id: 1201 + network_size: /28 + register: infinity +""" + +RETURN = r""" +network_id: + description: ID for a given network. + returned: success + type: str + sample: '1501' +ip_info: + description: + - When reserve next available IP address from a network, the IP address info is returned. + - Please note that the value is a B(string) containing JSON data. + returned: success + type: str + sample: >- + { + "address": "192.168.10.3", + "hostname": "", + "FQDN": "", + "domainname": "", + "id": 3229 + } +network_info: + description: + - When reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved + network is returned. + - Please note that the value is a B(string) containing JSON data. + returned: success + type: str + sample: >- + { + "network_address": "192.168.10.32/28", + "network_family": "4", + "network_id": 3102, + "network_size": null, + "description": null, + "network_location": "3085", + "ranges": {"id": 0, "name": null, "first_ip": null, "type": null, "last_ip": null}, + "network_type": "lan", + "network_name": "'reserve_new_ansible_network'" + } +""" + + +from ansible.module_utils.basic import AnsibleModule, json +from ansible.module_utils.urls import open_url + + +class Infinity(object): + """ + Class for manage REST API calls with the Infinity. + """ + + def __init__(self, module, server_ip, username, password): + self.module = module + self.auth_user = username + self.auth_pass = password + self.base_url = "https://%s/rest/v1/" % (str(server_ip)) + + def _get_api_call_ansible_handler( + self, + method='get', + resource_url='', + stat_codes=None, + params=None, + payload_data=None): + """ + Perform the HTTPS request by using ansible get/delete method + """ + stat_codes = [200] if stat_codes is None else stat_codes + request_url = str(self.base_url) + str(resource_url) + response = None + headers = {'Content-Type': 'application/json'} + if not request_url: + self.module.exit_json( + msg="When sending Rest api call , the resource URL is empty, please check.") + if payload_data and not isinstance(payload_data, str): + payload_data = json.dumps(payload_data) + response_raw = open_url( + str(request_url), + method=method, + timeout=20, + headers=headers, + url_username=self.auth_user, + url_password=self.auth_pass, + validate_certs=False, + force_basic_auth=True, + data=payload_data) + + response = response_raw.read() + payload = '' + if response_raw.code not in stat_codes: + self.module.exit_json( + changed=False, + meta=" openurl response_raw.code show error and error code is %r" % + (response_raw.code)) + else: + if isinstance(response, str) and len(response) > 0: + payload = response + elif method.lower() == 'delete' and response_raw.code == 204: + payload = 'Delete is done.' + if isinstance(payload, dict) and "text" in payload: + self.module.exit_json( + changed=False, + meta="when calling rest api, returned data is not json ") + raise Exception(payload["text"]) + return payload + + # --------------------------------------------------------------------------- + # get_network() + # --------------------------------------------------------------------------- + def get_network(self, network_id, network_name, limit=-1): + """ + Search network_name inside Infinity by using rest api + Network id or network_name needs to be provided + return the details of a given with given network_id or name + """ + if network_name is None and network_id is None: + self.module.exit_json( + msg="You must specify one of the options 'network_name' or 'network_id'.") + method = "get" + resource_url = '' + params = {} + response = None + if network_id: + resource_url = "networks/" + str(network_id) + response = self._get_api_call_ansible_handler(method, resource_url) + if network_id is None and network_name: + method = "get" + resource_url = "search" + params = {"query": json.dumps( + {"name": network_name, "type": "network"})} + response = self._get_api_call_ansible_handler( + method, resource_url, payload_data=json.dumps(params)) + if response and isinstance(response, str): + response = json.loads(response) + if response and isinstance(response, list) and len( + response) > 1 and limit == 1: + response = response[0] + response = json.dumps(response) + return response + + # --------------------------------------------------------------------------- + # get_network_id() + # --------------------------------------------------------------------------- + def get_network_id(self, network_name="", network_type='lan'): + """ + query network_id from Infinity via rest api based on given network_name + """ + method = 'get' + resource_url = 'search' + response = None + if network_name is None: + self.module.exit_json( + msg="You must specify the option 'network_name'") + params = {"query": json.dumps( + {"name": network_name, "type": "network"})} + response = self._get_api_call_ansible_handler( + method, resource_url, payload_data=json.dumps(params)) + network_id = "" + if response and isinstance(response, str): + response = json.loads(response) + if response and isinstance(response, list): + response = response[0] + network_id = response['id'] + return network_id + + # --------------------------------------------------------------------------- + # reserve_next_available_ip() + # --------------------------------------------------------------------------- + def reserve_next_available_ip(self, network_id=""): + """ + Reserve ip address via Infinity by using rest api + network_id: the id of the network that users would like to reserve network from + return the next available ip address from that given network + """ + method = "post" + resource_url = '' + response = None + ip_info = '' + if not network_id: + self.module.exit_json( + msg="You must specify the option 'network_id'.") + if network_id: + resource_url = "networks/" + str(network_id) + "/reserve_ip" + response = self._get_api_call_ansible_handler(method, resource_url) + if response and response.find( + "[") >= 0 and response.find("]") >= 0: + start_pos = response.find("{") + end_pos = response.find("}") + ip_info = response[start_pos: (end_pos + 1)] + return ip_info + + # ------------------------- + # release_ip() + # ------------------------- + def release_ip(self, network_id="", ip_address=""): + """ + Reserve ip address via Infinity by using rest api + """ + method = "get" + resource_url = '' + response = None + if ip_address is None or network_id is None: + self.module.exit_json( + msg="You must specify those two options: 'network_id' and 'ip_address'.") + + resource_url = "networks/" + str(network_id) + "/children" + response = self._get_api_call_ansible_handler(method, resource_url) + if not response: + self.module.exit_json( + msg="There is an error in release ip %s from network %s." % + (ip_address, network_id)) + + ip_list = json.loads(response) + ip_idlist = [] + for ip_item in ip_list: + ip_id = ip_item['id'] + ip_idlist.append(ip_id) + deleted_ip_id = '' + for ip_id in ip_idlist: + ip_response = '' + resource_url = "ip_addresses/" + str(ip_id) + ip_response = self._get_api_call_ansible_handler( + method, + resource_url, + stat_codes=[200]) + if ip_response and json.loads( + ip_response)['address'] == str(ip_address): + deleted_ip_id = ip_id + break + if deleted_ip_id: + method = 'delete' + resource_url = "ip_addresses/" + str(deleted_ip_id) + response = self._get_api_call_ansible_handler( + method, resource_url, stat_codes=[204]) + else: + self.module.exit_json( + msg=" When release ip, could not find the ip address %r from the given network %r' ." % + (ip_address, network_id)) + + return response + + # ------------------- + # delete_network() + # ------------------- + def delete_network(self, network_id="", network_name=""): + """ + delete network from Infinity by using rest api + """ + method = 'delete' + resource_url = '' + response = None + if network_id is None and network_name is None: + self.module.exit_json( + msg="You must specify one of those options: 'network_id','network_name' .") + if network_id is None and network_name: + network_id = self.get_network_id(network_name=network_name) + if network_id: + resource_url = "networks/" + str(network_id) + response = self._get_api_call_ansible_handler( + method, resource_url, stat_codes=[204]) + return response + + # reserve_network() + # --------------------------------------------------------------------------- + def reserve_network(self, network_id="", + reserved_network_name="", reserved_network_description="", + reserved_network_size="", reserved_network_family='4', + reserved_network_type='lan', reserved_network_address="",): + """ + Reserves the first available network of specified size from a given supernet +
network_name (required)
Name of the network
+
description (optional)
Free description
+
network_family (required)
Address family of the network. One of '4', '6', 'IPv4', 'IPv6', 'dual'
+
network_address (optional)
Address of the new network. If not given, the first network available will be created.
+
network_size (required)
Size of the new network in /<prefix> notation.
+
network_type (required)
Type of network. One of 'supernet', 'lan', 'shared_lan'
+ + """ + method = 'post' + resource_url = '' + network_info = None + if network_id is None or reserved_network_name is None or reserved_network_size is None: + self.module.exit_json( + msg="You must specify those options: 'network_id', 'reserved_network_name' and 'reserved_network_size'") + if network_id: + resource_url = "networks/" + str(network_id) + "/reserve_network" + if not reserved_network_family: + reserved_network_family = '4' + if not reserved_network_type: + reserved_network_type = 'lan' + payload_data = { + "network_name": reserved_network_name, + 'description': reserved_network_description, + 'network_size': reserved_network_size, + 'network_family': reserved_network_family, + 'network_type': reserved_network_type, + 'network_location': int(network_id)} + if reserved_network_address: + payload_data.update({'network_address': reserved_network_address}) + + network_info = self._get_api_call_ansible_handler( + method, resource_url, stat_codes=[200, 201], payload_data=payload_data) + + return network_info + + # --------------------------------------------------------------------------- + # release_network() + # --------------------------------------------------------------------------- + def release_network( + self, + network_id="", + released_network_name="", + released_network_type='lan'): + """ + Release the network with name 'released_network_name' from the given supernet network_id + """ + method = 'get' + response = None + if network_id is None or released_network_name is None: + self.module.exit_json( + msg="You must specify those options 'network_id', 'reserved_network_name' and 'reserved_network_size'") + matched_network_id = "" + resource_url = "networks/" + str(network_id) + "/children" + response = self._get_api_call_ansible_handler(method, resource_url) + if not response: + self.module.exit_json( + msg=" there is an error in releasing network %r from network %s." % + (network_id, released_network_name)) + if response: + response = json.loads(response) + for child_net in response: + if child_net['network'] and child_net['network']['network_name'] == released_network_name: + matched_network_id = child_net['network']['network_id'] + break + response = None + if matched_network_id: + method = 'delete' + resource_url = "networks/" + str(matched_network_id) + response = self._get_api_call_ansible_handler( + method, resource_url, stat_codes=[204]) + else: + self.module.exit_json( + msg=" When release network , could not find the network %r from the given superent %r' " % + (released_network_name, network_id)) + + return response + + # --------------------------------------------------------------------------- + # add_network() + # --------------------------------------------------------------------------- + def add_network( + self, network_name="", network_address="", + network_size="", network_family='4', + network_type='lan', network_location=-1): + """ + add a new LAN network into a given supernet Fusionlayer Infinity via rest api or default supernet + required fields=['network_name', 'network_family', 'network_type', 'network_address','network_size' ] + """ + method = 'post' + resource_url = 'networks' + response = None + if network_name is None or network_address is None or network_size is None: + self.module.exit_json( + msg="You must specify those options 'network_name', 'network_address' and 'network_size'") + + if not network_family: + network_family = '4' + if not network_type: + network_type = 'lan' + if not network_location: + network_location = -1 + payload_data = { + "network_name": network_name, + 'network_address': network_address, + 'network_size': network_size, + 'network_family': network_family, + 'network_type': network_type, + 'network_location': network_location} + response = self._get_api_call_ansible_handler( + method='post', resource_url=resource_url, + stat_codes=[200], payload_data=payload_data) + return response + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_ip=dict(type='str', required=True), + username=dict(type='str', required=True), + password=dict(type='str', required=True, no_log=True), + network_id=dict(type='str'), + ip_address=dict(type='str'), + network_name=dict(type='str'), + network_location=dict(type='int', default=-1), + network_family=dict(type='str', default='4', choices=['4', '6', 'dual']), + network_type=dict(type='str', default='lan', choices=['lan', 'shared_lan', 'supernet']), + network_address=dict(type='str'), + network_size=dict(type='str'), + action=dict(type='str', required=True, choices=[ + 'add_network', + 'delete_network', + 'get_network', + 'get_network_id', + 'release_ip', + 'release_network', + 'reserve_network', + 'reserve_next_available_ip', + ],), + ), + required_together=( + ['username', 'password'], + ), + ) + server_ip = module.params["server_ip"] + username = module.params["username"] + password = module.params["password"] + action = module.params["action"] + network_id = module.params["network_id"] + released_ip = module.params["ip_address"] + network_name = module.params["network_name"] + network_family = module.params["network_family"] + network_type = module.params["network_type"] + network_address = module.params["network_address"] + network_size = module.params["network_size"] + network_location = module.params["network_location"] + my_infinity = Infinity(module, server_ip, username, password) + result = '' + if action == "reserve_next_available_ip": + if network_id: + result = my_infinity.reserve_next_available_ip(network_id) + if not result: + result = 'There is an error in calling method of reserve_next_available_ip' + module.exit_json(changed=False, meta=result) + module.exit_json(changed=True, meta=result) + elif action == "release_ip": + if network_id and released_ip: + result = my_infinity.release_ip( + network_id=network_id, ip_address=released_ip) + module.exit_json(changed=True, meta=result) + elif action == "delete_network": + result = my_infinity.delete_network( + network_id=network_id, network_name=network_name) + module.exit_json(changed=True, meta=result) + + elif action == "get_network_id": + result = my_infinity.get_network_id( + network_name=network_name, network_type=network_type) + module.exit_json(changed=True, meta=result) + elif action == "get_network": + result = my_infinity.get_network( + network_id=network_id, network_name=network_name) + module.exit_json(changed=True, meta=result) + elif action == "reserve_network": + result = my_infinity.reserve_network( + network_id=network_id, + reserved_network_name=network_name, + reserved_network_size=network_size, + reserved_network_family=network_family, + reserved_network_type=network_type, + reserved_network_address=network_address) + module.exit_json(changed=True, meta=result) + elif action == "release_network": + result = my_infinity.release_network( + network_id=network_id, + released_network_name=network_name, + released_network_type=network_type) + module.exit_json(changed=True, meta=result) + + elif action == "add_network": + result = my_infinity.add_network( + network_name=network_name, + network_location=network_location, + network_address=network_address, + network_size=network_size, + network_family=network_family, + network_type=network_type) + + module.exit_json(changed=True, meta=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/influxdb_database.py b/plugins/modules/influxdb_database.py deleted file mode 120000 index 4e77d360d7..0000000000 --- a/plugins/modules/influxdb_database.py +++ /dev/null @@ -1 +0,0 @@ -./database/influxdb/influxdb_database.py \ No newline at end of file diff --git a/plugins/modules/influxdb_database.py b/plugins/modules/influxdb_database.py new file mode 100644 index 0000000000..600599ab0c --- /dev/null +++ b/plugins/modules/influxdb_database.py @@ -0,0 +1,144 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Kamil Szczygiel +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: influxdb_database +short_description: Manage InfluxDB databases +description: + - Manage InfluxDB databases. +author: "Kamil Szczygiel (@kamsz)" +requirements: + - "influxdb >= 0.9" + - requests +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + database_name: + description: + - Name of the database. + required: true + type: str + state: + description: + - Determines if the database should be created or destroyed. + choices: [absent, present] + default: present + type: str +extends_documentation_fragment: + - community.general.influxdb + - community.general.attributes +""" + +EXAMPLES = r""" +# Example influxdb_database command from Ansible Playbooks +- name: Create database + community.general.influxdb_database: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + +- name: Destroy database + community.general.influxdb_database: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + state: absent + +- name: Create database using custom credentials + community.general.influxdb_database: + hostname: "{{influxdb_ip_address}}" + username: "{{influxdb_username}}" + password: "{{influxdb_password}}" + database_name: "{{influxdb_database_name}}" + ssl: true + validate_certs: true +""" + +RETURN = r""" +# only defaults +""" + +try: + import requests.exceptions + from influxdb import exceptions +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb + + +def find_database(module, client, database_name): + database = None + + try: + databases = client.get_list_database() + for db in databases: + if db['name'] == database_name: + database = db + break + except requests.exceptions.ConnectionError as e: + module.fail_json(msg=str(e)) + return database + + +def create_database(module, client, database_name): + if not module.check_mode: + try: + client.create_database(database_name) + except requests.exceptions.ConnectionError as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=True) + + +def drop_database(module, client, database_name): + if not module.check_mode: + try: + client.drop_database(database_name) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + + module.exit_json(changed=True) + + +def main(): + argument_spec = InfluxDb.influxdb_argument_spec() + argument_spec.update( + database_name=dict(required=True, type='str'), + state=dict(default='present', type='str', choices=['present', 'absent']) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + state = module.params['state'] + + influxdb = InfluxDb(module) + client = influxdb.connect_to_influxdb() + database_name = influxdb.database_name + database = find_database(module, client, database_name) + + if state == 'present': + if database: + module.exit_json(changed=False) + else: + create_database(module, client, database_name) + + if state == 'absent': + if database: + drop_database(module, client, database_name) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/influxdb_query.py b/plugins/modules/influxdb_query.py deleted file mode 120000 index 5802f578a2..0000000000 --- a/plugins/modules/influxdb_query.py +++ /dev/null @@ -1 +0,0 @@ -./database/influxdb/influxdb_query.py \ No newline at end of file diff --git a/plugins/modules/influxdb_query.py b/plugins/modules/influxdb_query.py new file mode 100644 index 0000000000..1707d401f2 --- /dev/null +++ b/plugins/modules/influxdb_query.py @@ -0,0 +1,103 @@ +#!/usr/bin/python +# Copyright (c) 2017, René Moser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: influxdb_query +short_description: Query data points from InfluxDB +description: + - Query data points from InfluxDB. +author: "René Moser (@resmo)" +requirements: + - "influxdb >= 0.9" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + query: + description: + - Query to be executed. + required: true + type: str + database_name: + description: + - Name of the database. + required: true + type: str +extends_documentation_fragment: + - community.general.influxdb + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Query connections + community.general.influxdb_query: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + query: "select mean(value) from connections" + register: connection + +- name: Query connections with tags filters + community.general.influxdb_query: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + query: "select mean(value) from connections where region='zue01' and host='server01'" + register: connection + +- name: Print results from the query + ansible.builtin.debug: + var: connection.query_results +""" + +RETURN = r""" +query_results: + description: Result from the query. + returned: success + type: list + sample: + - mean: 1245.5333333333333 + time: "1970-01-01T00:00:00Z" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb + + +class AnsibleInfluxDBRead(InfluxDb): + + def read_by_query(self, query): + client = self.connect_to_influxdb() + try: + rs = client.query(query) + if rs: + return list(rs.get_points()) + except Exception as e: + self.module.fail_json(msg=to_native(e)) + + +def main(): + argument_spec = InfluxDb.influxdb_argument_spec() + argument_spec.update( + query=dict(type='str', required=True), + database_name=dict(required=True, type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + influx = AnsibleInfluxDBRead(module) + query = module.params.get('query') + results = influx.read_by_query(query) + module.exit_json(changed=True, query_results=results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/influxdb_retention_policy.py b/plugins/modules/influxdb_retention_policy.py deleted file mode 120000 index 90b6326bf3..0000000000 --- a/plugins/modules/influxdb_retention_policy.py +++ /dev/null @@ -1 +0,0 @@ -./database/influxdb/influxdb_retention_policy.py \ No newline at end of file diff --git a/plugins/modules/influxdb_retention_policy.py b/plugins/modules/influxdb_retention_policy.py new file mode 100644 index 0000000000..c1848a4694 --- /dev/null +++ b/plugins/modules/influxdb_retention_policy.py @@ -0,0 +1,341 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Kamil Szczygiel +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: influxdb_retention_policy +short_description: Manage InfluxDB retention policies +description: + - Manage InfluxDB retention policies. +author: "Kamil Szczygiel (@kamsz)" +requirements: + - "influxdb >= 0.9" + - requests +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + database_name: + description: + - Name of the database. + required: true + type: str + policy_name: + description: + - Name of the retention policy. + required: true + type: str + state: + description: + - State of the retention policy. + choices: [absent, present] + default: present + type: str + version_added: 3.1.0 + duration: + description: + - Determines how long InfluxDB should keep the data. If specified, it should be V(INF) or at least one hour. If not + specified, V(INF) is assumed. Supports complex duration expressions with multiple units. + - Required only if O(state) is set to V(present). + type: str + replication: + description: + - Determines how many independent copies of each point are stored in the cluster. + - Required only if O(state) is set to V(present). + type: int + default: + description: + - Sets the retention policy as default retention policy. + type: bool + default: false + shard_group_duration: + description: + - Determines the time range covered by a shard group. If specified it must be at least one hour. If not provided, it + is determined by InfluxDB by the rentention policy's duration. Supports complex duration expressions with multiple + units. + type: str + version_added: '2.0.0' +extends_documentation_fragment: + - community.general.influxdb + - community.general.attributes +""" + +EXAMPLES = r""" +# Example influxdb_retention_policy command from Ansible Playbooks +- name: Create 1 hour retention policy + community.general.influxdb_retention_policy: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1h + replication: 1 + ssl: true + validate_certs: true + state: present + +- name: Create 1 day retention policy with 1 hour shard group duration + community.general.influxdb_retention_policy: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1d + replication: 1 + shard_group_duration: 1h + state: present + +- name: Create 1 week retention policy with 1 day shard group duration + community.general.influxdb_retention_policy: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1w + replication: 1 + shard_group_duration: 1d + state: present + +- name: Create infinite retention policy with 1 week of shard group duration + community.general.influxdb_retention_policy: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: INF + replication: 1 + ssl: false + shard_group_duration: 1w + state: present + +- name: Create retention policy with complex durations + community.general.influxdb_retention_policy: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 5d1h30m + replication: 1 + ssl: false + shard_group_duration: 1d10h30m + state: present + +- name: Drop retention policy + community.general.influxdb_retention_policy: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + state: absent +""" + +RETURN = r""" +# only defaults +""" + +import re + +try: + import requests.exceptions + from influxdb import exceptions +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb +from ansible.module_utils.common.text.converters import to_native + + +VALID_DURATION_REGEX = re.compile(r'^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$') + +DURATION_REGEX = re.compile(r'(\d+)(ns|u|µ|ms|s|m|h|d|w)') +EXTENDED_DURATION_REGEX = re.compile(r'(?:(\d+)(ns|u|µ|ms|m|h|d|w)|(\d+(?:\.\d+)?)(s))') + +DURATION_UNIT_NANOSECS = { + 'ns': 1, + 'u': 1000, + 'µ': 1000, + 'ms': 1000 * 1000, + 's': 1000 * 1000 * 1000, + 'm': 1000 * 1000 * 1000 * 60, + 'h': 1000 * 1000 * 1000 * 60 * 60, + 'd': 1000 * 1000 * 1000 * 60 * 60 * 24, + 'w': 1000 * 1000 * 1000 * 60 * 60 * 24 * 7, +} + +MINIMUM_VALID_DURATION = 1 * DURATION_UNIT_NANOSECS['h'] +MINIMUM_VALID_SHARD_GROUP_DURATION = 1 * DURATION_UNIT_NANOSECS['h'] + + +def check_duration_literal(value): + return VALID_DURATION_REGEX.search(value) is not None + + +def parse_duration_literal(value, extended=False): + duration = 0.0 + + if value == "INF": + return duration + + lookup = (EXTENDED_DURATION_REGEX if extended else DURATION_REGEX).findall(value) + + for duration_literal in lookup: + filtered_literal = [_f for _f in duration_literal if _f] + duration_val = float(filtered_literal[0]) + duration += duration_val * DURATION_UNIT_NANOSECS[filtered_literal[1]] + + return duration + + +def find_retention_policy(module, client): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + hostname = module.params['hostname'] + retention_policy = None + + try: + retention_policies = client.get_list_retention_policies(database=database_name) + for policy in retention_policies: + if policy['name'] == policy_name: + retention_policy = policy + break + except requests.exceptions.ConnectionError as e: + module.fail_json(msg="Cannot connect to database %s on %s : %s" % (database_name, hostname, to_native(e))) + + if retention_policy is not None: + retention_policy["duration"] = parse_duration_literal(retention_policy["duration"], extended=True) + retention_policy["shardGroupDuration"] = parse_duration_literal(retention_policy["shardGroupDuration"], extended=True) + + return retention_policy + + +def create_retention_policy(module, client): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + duration = module.params['duration'] + replication = module.params['replication'] + default = module.params['default'] + shard_group_duration = module.params['shard_group_duration'] + + if not check_duration_literal(duration): + module.fail_json(msg="Failed to parse value of duration") + + influxdb_duration_format = parse_duration_literal(duration) + if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION: + module.fail_json(msg="duration value must be at least 1h") + + if shard_group_duration is not None: + if not check_duration_literal(shard_group_duration): + module.fail_json(msg="Failed to parse value of shard_group_duration") + + influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) + if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION: + module.fail_json(msg="shard_group_duration value must be finite and at least 1h") + + if not module.check_mode: + try: + if shard_group_duration: + client.create_retention_policy(policy_name, duration, replication, database_name, default, + shard_group_duration) + else: + client.create_retention_policy(policy_name, duration, replication, database_name, default) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + module.exit_json(changed=True) + + +def alter_retention_policy(module, client, retention_policy): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + duration = module.params['duration'] + replication = module.params['replication'] + default = module.params['default'] + shard_group_duration = module.params['shard_group_duration'] + + changed = False + + if not check_duration_literal(duration): + module.fail_json(msg="Failed to parse value of duration") + + influxdb_duration_format = parse_duration_literal(duration) + if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION: + module.fail_json(msg="duration value must be at least 1h") + + if shard_group_duration is None: + influxdb_shard_group_duration_format = retention_policy["shardGroupDuration"] + else: + if not check_duration_literal(shard_group_duration): + module.fail_json(msg="Failed to parse value of shard_group_duration") + + influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) + if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION: + module.fail_json(msg="shard_group_duration value must be finite and at least 1h") + + if (retention_policy['duration'] != influxdb_duration_format or + retention_policy['shardGroupDuration'] != influxdb_shard_group_duration_format or + retention_policy['replicaN'] != int(replication) or + retention_policy['default'] != default): + if not module.check_mode: + try: + client.alter_retention_policy(policy_name, database_name, duration, replication, default, + shard_group_duration) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + changed = True + module.exit_json(changed=changed) + + +def drop_retention_policy(module, client): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + + if not module.check_mode: + try: + client.drop_retention_policy(policy_name, database_name) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + module.exit_json(changed=True) + + +def main(): + argument_spec = InfluxDb.influxdb_argument_spec() + argument_spec.update( + state=dict(default='present', type='str', choices=['present', 'absent']), + database_name=dict(required=True, type='str'), + policy_name=dict(required=True, type='str'), + duration=dict(type='str'), + replication=dict(type='int'), + default=dict(default=False, type='bool'), + shard_group_duration=dict(type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=( + ('state', 'present', ['duration', 'replication']), + ), + ) + + state = module.params['state'] + + influxdb = InfluxDb(module) + client = influxdb.connect_to_influxdb() + + retention_policy = find_retention_policy(module, client) + + if state == 'present': + if retention_policy: + alter_retention_policy(module, client, retention_policy) + else: + create_retention_policy(module, client) + + if state == 'absent': + if retention_policy: + drop_retention_policy(module, client) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/influxdb_user.py b/plugins/modules/influxdb_user.py deleted file mode 120000 index 569dbbc502..0000000000 --- a/plugins/modules/influxdb_user.py +++ /dev/null @@ -1 +0,0 @@ -./database/influxdb/influxdb_user.py \ No newline at end of file diff --git a/plugins/modules/influxdb_user.py b/plugins/modules/influxdb_user.py new file mode 100644 index 0000000000..b6351a0c27 --- /dev/null +++ b/plugins/modules/influxdb_user.py @@ -0,0 +1,291 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Vitaliy Zhhuta +# insipred by Kamil Szczygiel influxdb_database module +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: influxdb_user +short_description: Manage InfluxDB users +description: + - Manage InfluxDB users. +author: "Vitaliy Zhhuta (@zhhuta)" +requirements: + - "influxdb >= 0.9" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + user_name: + description: + - Name of the user. + required: true + type: str + user_password: + description: + - Password to be set for the user. + required: false + type: str + admin: + description: + - Whether the user should be in the admin role or not. + - Since version 2.8, the role is also updated. + default: false + type: bool + state: + description: + - State of the user. + choices: [absent, present] + default: present + type: str + grants: + description: + - Privileges to grant to this user. + - Takes a list of dicts containing the "database" and "privilege" keys. + - If this argument is not provided, the current grants are left alone. + - If an empty list is provided, all grants for the user are removed. + type: list + elements: dict +extends_documentation_fragment: + - community.general.influxdb + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Create a user on localhost using default login credentials + community.general.influxdb_user: + user_name: john + user_password: s3cr3t + +- name: Create a user on localhost using custom login credentials + community.general.influxdb_user: + user_name: john + user_password: s3cr3t + login_username: "{{ influxdb_username }}" + login_password: "{{ influxdb_password }}" + +- name: Create an admin user on a remote host using custom login credentials + community.general.influxdb_user: + user_name: john + user_password: s3cr3t + admin: true + hostname: "{{ influxdb_hostname }}" + login_username: "{{ influxdb_username }}" + login_password: "{{ influxdb_password }}" + +- name: Create a user on localhost with privileges + community.general.influxdb_user: + user_name: john + user_password: s3cr3t + login_username: "{{ influxdb_username }}" + login_password: "{{ influxdb_password }}" + grants: + - database: 'collectd' + privilege: 'WRITE' + - database: 'graphite' + privilege: 'READ' + +- name: Destroy a user using custom login credentials + community.general.influxdb_user: + user_name: john + login_username: "{{ influxdb_username }}" + login_password: "{{ influxdb_password }}" + state: absent +""" + +RETURN = r"""#""" + +import json + +from ansible.module_utils.urls import ConnectionError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +import ansible_collections.community.general.plugins.module_utils.influxdb as influx + + +def find_user(module, client, user_name): + user_result = None + + try: + users = client.get_list_users() + for user in users: + if user['user'] == user_name: + user_result = user + break + except ConnectionError as e: + module.fail_json(msg=to_native(e)) + return user_result + + +def check_user_password(module, client, user_name, user_password): + try: + client.switch_user(user_name, user_password) + client.get_list_users() + except influx.exceptions.InfluxDBClientError as e: + if e.code == 401: + return False + except ConnectionError as e: + module.fail_json(msg=to_native(e)) + finally: + # restore previous user + client.switch_user(module.params['username'], module.params['password']) + return True + + +def set_user_password(module, client, user_name, user_password): + if not module.check_mode: + try: + client.set_user_password(user_name, user_password) + except ConnectionError as e: + module.fail_json(msg=to_native(e)) + + +def create_user(module, client, user_name, user_password, admin): + if not module.check_mode: + try: + client.create_user(user_name, user_password, admin) + except ConnectionError as e: + module.fail_json(msg=to_native(e)) + + +def drop_user(module, client, user_name): + if not module.check_mode: + try: + client.drop_user(user_name) + except influx.exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + + module.exit_json(changed=True) + + +def set_user_grants(module, client, user_name, grants): + changed = False + + current_grants = [] + try: + current_grants = client.get_list_privileges(user_name) + except influx.exceptions.InfluxDBClientError as e: + if not module.check_mode or 'user not found' not in e.content: + module.fail_json(msg=e.content) + + try: + parsed_grants = [] + # Fix privileges wording + for i, v in enumerate(current_grants): + if v['privilege'] != 'NO PRIVILEGES': + if v['privilege'] == 'ALL PRIVILEGES': + v['privilege'] = 'ALL' + parsed_grants.append(v) + + # check if the current grants are included in the desired ones + for current_grant in parsed_grants: + if current_grant not in grants: + if not module.check_mode: + client.revoke_privilege(current_grant['privilege'], + current_grant['database'], + user_name) + changed = True + + # check if the desired grants are included in the current ones + for grant in grants: + if grant not in parsed_grants: + if not module.check_mode: + client.grant_privilege(grant['privilege'], + grant['database'], + user_name) + changed = True + + except influx.exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + + return changed + + +INFLUX_AUTH_FIRST_USER_REQUIRED = "error authorizing query: create admin user first or disable authentication" + + +def main(): + argument_spec = influx.InfluxDb.influxdb_argument_spec() + argument_spec.update( + state=dict(default='present', type='str', choices=['present', 'absent']), + user_name=dict(required=True, type='str'), + user_password=dict(type='str', no_log=True), + admin=dict(default='False', type='bool'), + grants=dict(type='list', elements='dict'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + state = module.params['state'] + user_name = module.params['user_name'] + user_password = module.params['user_password'] + admin = module.params['admin'] + grants = module.params['grants'] + influxdb = influx.InfluxDb(module) + client = influxdb.connect_to_influxdb() + + user = None + try: + user = find_user(module, client, user_name) + except influx.exceptions.InfluxDBClientError as e: + if e.code == 403: + reason = None + try: + msg = json.loads(e.content) + reason = msg["error"] + except (KeyError, ValueError): + module.fail_json(msg=to_native(e)) + + if reason != INFLUX_AUTH_FIRST_USER_REQUIRED: + module.fail_json(msg=to_native(e)) + else: + module.fail_json(msg=to_native(e)) + + changed = False + + if state == 'present': + if user: + if not check_user_password(module, client, user_name, user_password) and user_password is not None: + set_user_password(module, client, user_name, user_password) + changed = True + + try: + if admin and not user['admin']: + if not module.check_mode: + client.grant_admin_privileges(user_name) + changed = True + elif not admin and user['admin']: + if not module.check_mode: + client.revoke_admin_privileges(user_name) + changed = True + except influx.exceptions.InfluxDBClientError as e: + module.fail_json(msg=to_native(e)) + + else: + user_password = user_password or '' + create_user(module, client, user_name, user_password, admin) + changed = True + + if grants is not None: + if set_user_grants(module, client, user_name, grants): + changed = True + + module.exit_json(changed=changed) + + if state == 'absent': + if user: + drop_user(module, client, user_name) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/influxdb_write.py b/plugins/modules/influxdb_write.py deleted file mode 120000 index c292396cf2..0000000000 --- a/plugins/modules/influxdb_write.py +++ /dev/null @@ -1 +0,0 @@ -./database/influxdb/influxdb_write.py \ No newline at end of file diff --git a/plugins/modules/influxdb_write.py b/plugins/modules/influxdb_write.py new file mode 100644 index 0000000000..d0348aca01 --- /dev/null +++ b/plugins/modules/influxdb_write.py @@ -0,0 +1,98 @@ +#!/usr/bin/python +# Copyright (c) 2017, René Moser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: influxdb_write +short_description: Write data points into InfluxDB +description: + - Write data points into InfluxDB. +author: "René Moser (@resmo)" +requirements: + - "influxdb >= 0.9" +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + data_points: + description: + - Data points as dict to write into the database. + required: true + type: list + elements: dict + database_name: + description: + - Name of the database. + required: true + type: str +extends_documentation_fragment: + - community.general.influxdb + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Write points into database + community.general.influxdb_write: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + data_points: + - measurement: connections + tags: + host: server01 + region: us-west + time: "{{ ansible_date_time.iso8601 }}" + fields: + value: 2000 + - measurement: connections + tags: + host: server02 + region: us-east + time: "{{ ansible_date_time.iso8601 }}" + fields: + value: 3000 +""" + +RETURN = r""" +# only defaults +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb + + +class AnsibleInfluxDBWrite(InfluxDb): + + def write_data_point(self, data_points): + client = self.connect_to_influxdb() + + try: + client.write_points(data_points) + except Exception as e: + self.module.fail_json(msg=to_native(e)) + + +def main(): + argument_spec = InfluxDb.influxdb_argument_spec() + argument_spec.update( + data_points=dict(required=True, type='list', elements='dict'), + database_name=dict(required=True, type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + ) + + influx = AnsibleInfluxDBWrite(module) + data_points = module.params.get('data_points') + influx.write_data_point(data_points) + module.exit_json(changed=True) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ini_file.py b/plugins/modules/ini_file.py deleted file mode 120000 index 27ef491444..0000000000 --- a/plugins/modules/ini_file.py +++ /dev/null @@ -1 +0,0 @@ -./files/ini_file.py \ No newline at end of file diff --git a/plugins/modules/ini_file.py b/plugins/modules/ini_file.py new file mode 100644 index 0000000000..27b55c3bf4 --- /dev/null +++ b/plugins/modules/ini_file.py @@ -0,0 +1,660 @@ +#!/usr/bin/python + +# Copyright (c) 2012, Jan-Piet Mens +# Copyright (c) 2015, Ales Nosek +# Copyright (c) 2017, Ansible Project +# Copyright (c) 2023, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ini_file +short_description: Tweak settings in INI files +extends_documentation_fragment: + - files + - community.general.attributes +description: + - Manage (add, remove, change) individual settings in an INI-style file without having to manage the file as a whole with, + say, M(ansible.builtin.template) or M(ansible.builtin.assemble). + - Adds missing sections if they do not exist. + - This module adds missing ending newlines to files to keep in line with the POSIX standard, even when no other modifications + need to be applied. +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + path: + description: + - Path to the INI-style file; this file is created if required. + type: path + required: true + aliases: [dest] + section: + description: + - Section name in INI file. This is added if O(state=present) automatically when a single value is being set. + - If being omitted, the O(option) is placed before the first O(section). + - Omitting O(section) is also required if the config format does not support sections. + type: str + section_has_values: + type: list + elements: dict + required: false + suboptions: + option: + type: str + description: Matching O(section) must contain this option. + required: true + value: + type: str + description: Matching O(section_has_values[].option) must have this specific value. + values: + description: + - The string value to be associated with an O(section_has_values[].option). + - Mutually exclusive with O(section_has_values[].value). + - O(section_has_values[].value=v) is equivalent to O(section_has_values[].values=[v]). + type: list + elements: str + description: + - Among possibly multiple sections of the same name, select the first one that contains matching options and values. + - With O(state=present), if a suitable section is not found, a new section is added, including the required options. + - With O(state=absent), at most one O(section) is removed if it contains the values. + version_added: 8.6.0 + option: + description: + - If set (required for changing a O(value)), this is the name of the option. + - May be omitted if adding/removing a whole O(section). + type: str + value: + description: + - The string value to be associated with an O(option). + - May be omitted when removing an O(option). + - Mutually exclusive with O(values). + - O(value=v) is equivalent to O(values=[v]). + type: str + values: + description: + - The string value to be associated with an O(option). + - May be omitted when removing an O(option). + - Mutually exclusive with O(value). + - O(value=v) is equivalent to O(values=[v]). + type: list + elements: str + version_added: 3.6.0 + backup: + description: + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. + type: bool + default: false + state: + description: + - If set to V(absent) and O(exclusive) set to V(true) all matching O(option) lines are removed. + - If set to V(absent) and O(exclusive) set to V(false) the specified O(option=value) lines are removed, but the other + O(option)s with the same name are not touched. + - If set to V(present) and O(exclusive) set to V(false) the specified O(option=values) lines are added, but the other + O(option)s with the same name are not touched. + - If set to V(present) and O(exclusive) set to V(true) all given O(option=values) lines are added and the other O(option)s + with the same name are removed. + type: str + choices: [absent, present] + default: present + exclusive: + description: + - If set to V(true) (default), all matching O(option) lines are removed when O(state=absent), or replaced when O(state=present). + - If set to V(false), only the specified O(value)/O(values) are added when O(state=present), or removed when O(state=absent), + and existing ones are not modified. + type: bool + default: true + version_added: 3.6.0 + no_extra_spaces: + description: + - Do not insert spaces before and after '=' symbol. + type: bool + default: false + ignore_spaces: + description: + - Do not change a line if doing so would only add or remove spaces before or after the V(=) symbol. + type: bool + default: false + version_added: 7.5.0 + create: + description: + - If set to V(false), the module fails if the file does not already exist. + - By default it creates the file if it is missing. + type: bool + default: true + allow_no_value: + description: + - Allow option without value and without '=' symbol. + type: bool + default: false + modify_inactive_option: + description: + - By default the module replaces a commented line that matches the given option. + - Set this option to V(false) to avoid this. This is useful when you want to keep commented example C(key=value) pairs + for documentation purposes. + type: bool + default: true + version_added: 8.0.0 + follow: + description: + - This flag indicates that filesystem links, if they exist, should be followed. + - O(follow=true) can modify O(path) when combined with parameters such as O(mode). + type: bool + default: false + version_added: 7.1.0 +notes: + - While it is possible to add an O(option) without specifying a O(value), this makes no sense. + - As of community.general 3.2.0, UTF-8 BOM markers are discarded when reading files. +author: + - Jan-Piet Mens (@jpmens) + - Ales Nosek (@noseka1) +""" + +EXAMPLES = r""" +- name: Ensure "fav=lemonade is in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/conf + section: drinks + option: fav + value: lemonade + mode: '0600' + backup: true + +- name: Ensure "temperature=cold is in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/anotherconf + section: drinks + option: temperature + value: cold + backup: true + +- name: Add "beverage=lemon juice" is in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/conf + section: drinks + option: beverage + value: lemon juice + mode: '0600' + state: present + exclusive: false + +- name: Ensure multiple values "beverage=coke" and "beverage=pepsi" are in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/conf + section: drinks + option: beverage + values: + - coke + - pepsi + mode: '0600' + state: present + +- name: Add "beverage=lemon juice" outside a section in specified file + community.general.ini_file: + path: /etc/conf + option: beverage + value: lemon juice + state: present + +- name: Remove the peer configuration for 10.128.0.11/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.128.0.11/32 + mode: '0600' + state: absent + +- name: Add "beverage=lemon juice" outside a section in specified file + community.general.ini_file: + path: /etc/conf + option: beverage + value: lemon juice + state: present + +- name: Update the public key for peer 10.128.0.12/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.128.0.12/32 + option: PublicKey + value: xxxxxxxxxxxxxxxxxxxx + mode: '0600' + state: present + +- name: Remove the peer configuration for 10.128.0.11/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.4.0.11/32 + mode: '0600' + state: absent + +- name: Update the public key for peer 10.128.0.12/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.4.0.12/32 + option: PublicKey + value: xxxxxxxxxxxxxxxxxxxx + mode: '0600' + state: present +""" + +import io +import os +import re +import tempfile +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_text + + +def match_opt(option, line): + option = re.escape(option) + return re.match('( |\t)*([#;]?)( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) + + +def match_active_opt(option, line): + option = re.escape(option) + return re.match('()()( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) + + +def update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg): + option_changed = None + if ignore_spaces: + old_match = match_opt(option, section_lines[index]) + if not old_match.group(2): + new_match = match_opt(option, newline) + option_changed = old_match.group(8) != new_match.group(8) + if option_changed is None: + option_changed = section_lines[index] != newline + if option_changed: + section_lines[index] = newline + changed = changed or option_changed + if option_changed: + msg = 'option changed' + changed_lines[index] = 1 + return (changed, msg) + + +def check_section_has_values(section_has_values, section_lines): + if section_has_values is not None: + for condition in section_has_values: + for line in section_lines: + match = match_opt(condition["option"], line) + if match and (len(condition["values"]) == 0 or match.group(8) in condition["values"]): + break + else: + return False + return True + + +def do_ini(module, filename, section=None, section_has_values=None, option=None, values=None, + state='present', exclusive=True, backup=False, no_extra_spaces=False, + ignore_spaces=False, create=True, allow_no_value=False, modify_inactive_option=True, follow=False): + + if section is not None: + section = to_text(section) + if option is not None: + option = to_text(option) + + # deduplicate entries in values + values_unique = [] + [values_unique.append(to_text(value)) for value in values if value not in values_unique and value is not None] + values = values_unique + + diff = dict( + before='', + after='', + before_header='%s (content)' % filename, + after_header='%s (content)' % filename, + ) + + if follow and os.path.islink(filename): + target_filename = os.path.realpath(filename) + else: + target_filename = filename + + if not os.path.exists(target_filename): + if not create: + module.fail_json(rc=257, msg='Destination %s does not exist!' % target_filename) + destpath = os.path.dirname(target_filename) + if not os.path.exists(destpath) and not module.check_mode: + os.makedirs(destpath) + ini_lines = [] + else: + with io.open(target_filename, 'r', encoding="utf-8-sig") as ini_file: + ini_lines = [to_text(line) for line in ini_file.readlines()] + + if module._diff: + diff['before'] = ''.join(ini_lines) + + changed = False + + # ini file could be empty + if not ini_lines: + ini_lines.append('\n') + + # last line of file may not contain a trailing newline + if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n': + ini_lines[-1] += '\n' + changed = True + + # append fake section lines to simplify the logic + # At top: + # Fake random section to do not match any other in the file + # Using commit hash as fake section name + fake_section_name = "ad01e11446efb704fcdbdb21f2c43757423d91c5" + + # Insert it at the beginning + ini_lines.insert(0, '[%s]' % fake_section_name) + + # At bottom: + ini_lines.append('[') + + # If no section is defined, fake section is used + if not section: + section = fake_section_name + + within_section = not section + section_start = section_end = 0 + msg = 'OK' + if no_extra_spaces: + assignment_format = '%s=%s\n' + else: + assignment_format = '%s = %s\n' + + option_no_value_present = False + + non_blank_non_comment_pattern = re.compile(to_text(r'^[ \t]*([#;].*)?$')) + + before = after = [] + section_lines = [] + + section_pattern = re.compile(to_text(r'^\[\s*%s\s*]' % re.escape(section.strip()))) + + for index, line in enumerate(ini_lines): + # end of section: + if within_section and line.startswith('['): + if check_section_has_values( + section_has_values, ini_lines[section_start:index] + ): + section_end = index + break + else: + # look for another section + within_section = False + section_start = section_end = 0 + + # find start and end of section + if section_pattern.match(line): + within_section = True + section_start = index + + before = ini_lines[0:section_start] + section_lines = ini_lines[section_start:section_end] + after = ini_lines[section_end:len(ini_lines)] + + # Keep track of changed section_lines + changed_lines = [0] * len(section_lines) + + # Determine whether to consider using commented out/inactive options or only active ones + if modify_inactive_option: + match_function = match_opt + else: + match_function = match_active_opt + + # handling multiple instances of option=value when state is 'present' with/without exclusive is a bit complex + # + # 1. edit all lines where we have a option=value pair with a matching value in values[] + # 2. edit all the remaining lines where we have a matching option + # 3. delete remaining lines where we have a matching option + # 4. insert missing option line(s) at the end of the section + + if state == 'present' and option: + for index, line in enumerate(section_lines): + if match_function(option, line): + match = match_function(option, line) + if values and match.group(8) in values: + matched_value = match.group(8) + if not matched_value and allow_no_value: + # replace existing option with no value line(s) + newline = '%s\n' % option + option_no_value_present = True + else: + # replace existing option=value line(s) + newline = assignment_format % (option, matched_value) + (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg) + values.remove(matched_value) + elif not values and allow_no_value: + # replace existing option with no value line(s) + newline = '%s\n' % option + (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg) + option_no_value_present = True + break + + if state == 'present' and exclusive and not allow_no_value: + # override option with no value to option with value if not allow_no_value + if len(values) > 0: + for index, line in enumerate(section_lines): + if not changed_lines[index] and match_function(option, line): + newline = assignment_format % (option, values.pop(0)) + (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg) + if len(values) == 0: + break + # remove all remaining option occurrences from the rest of the section + for index in range(len(section_lines) - 1, 0, -1): + if not changed_lines[index] and match_function(option, section_lines[index]): + del section_lines[index] + del changed_lines[index] + changed = True + msg = 'option changed' + + if state == 'present': + # insert missing option line(s) at the end of the section + for index in range(len(section_lines), 0, -1): + # search backwards for previous non-blank or non-comment line + if not non_blank_non_comment_pattern.match(section_lines[index - 1]): + if option and values: + # insert option line(s) + for element in values[::-1]: + # items are added backwards, so traverse the list backwards to not confuse the user + # otherwise some of their options might appear in reverse order for whatever fancy reason ¯\_(ツ)_/¯ + if element is not None: + # insert option=value line + section_lines.insert(index, assignment_format % (option, element)) + msg = 'option added' + changed = True + elif element is None and allow_no_value: + # insert option with no value line + section_lines.insert(index, '%s\n' % option) + msg = 'option added' + changed = True + elif option and not values and allow_no_value and not option_no_value_present: + # insert option with no value line(s) + section_lines.insert(index, '%s\n' % option) + msg = 'option added' + changed = True + break + + if state == 'absent': + if option: + if exclusive: + # delete all option line(s) with given option and ignore value + new_section_lines = [line for line in section_lines if not (match_active_opt(option, line))] + if section_lines != new_section_lines: + changed = True + msg = 'option changed' + section_lines = new_section_lines + elif not exclusive and len(values) > 0: + # delete specified option=value line(s) + new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(8) in values)] + if section_lines != new_section_lines: + changed = True + msg = 'option changed' + section_lines = new_section_lines + else: + # drop the entire section + if section_lines: + section_lines = [] + msg = 'section removed' + changed = True + + # reassemble the ini_lines after manipulation + ini_lines = before + section_lines + after + + # remove the fake section line + del ini_lines[0] + del ini_lines[-1:] + + if not within_section and state == 'present': + ini_lines.append('[%s]\n' % section) + msg = 'section and option added' + if section_has_values: + for condition in section_has_values: + if condition['option'] != option: + if len(condition['values']) > 0: + for value in condition['values']: + ini_lines.append(assignment_format % (condition['option'], value)) + elif allow_no_value: + ini_lines.append('%s\n' % condition['option']) + elif not exclusive: + for value in condition['values']: + if value not in values: + values.append(value) + if option and values: + for value in values: + ini_lines.append(assignment_format % (option, value)) + elif option and not values and allow_no_value: + ini_lines.append('%s\n' % option) + else: + msg = 'only section added' + changed = True + + if module._diff: + diff['after'] = ''.join(ini_lines) + + backup_file = None + if changed and not module.check_mode: + if backup: + backup_file = module.backup_local(target_filename) + + encoded_ini_lines = [to_bytes(line) for line in ini_lines] + try: + tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir) + f = os.fdopen(tmpfd, 'wb') + f.writelines(encoded_ini_lines) + f.close() + except IOError: + module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc()) + + try: + module.atomic_move(tmpfile, os.path.abspath(target_filename)) + except IOError: + module.ansible.fail_json(msg='Unable to move temporary \ + file %s to %s, IOError' % (tmpfile, target_filename), traceback=traceback.format_exc()) + + return (changed, backup_file, diff, msg) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True, aliases=['dest']), + section=dict(type='str'), + section_has_values=dict(type='list', elements='dict', options=dict( + option=dict(type='str', required=True), + value=dict(type='str'), + values=dict(type='list', elements='str') + ), mutually_exclusive=[['value', 'values']]), + option=dict(type='str'), + value=dict(type='str'), + values=dict(type='list', elements='str'), + backup=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + exclusive=dict(type='bool', default=True), + no_extra_spaces=dict(type='bool', default=False), + ignore_spaces=dict(type='bool', default=False), + allow_no_value=dict(type='bool', default=False), + modify_inactive_option=dict(type='bool', default=True), + create=dict(type='bool', default=True), + follow=dict(type='bool', default=False) + ), + mutually_exclusive=[ + ['value', 'values'] + ], + add_file_common_args=True, + supports_check_mode=True, + ) + + path = module.params['path'] + section = module.params['section'] + section_has_values = module.params['section_has_values'] + option = module.params['option'] + value = module.params['value'] + values = module.params['values'] + state = module.params['state'] + exclusive = module.params['exclusive'] + backup = module.params['backup'] + no_extra_spaces = module.params['no_extra_spaces'] + ignore_spaces = module.params['ignore_spaces'] + allow_no_value = module.params['allow_no_value'] + modify_inactive_option = module.params['modify_inactive_option'] + create = module.params['create'] + follow = module.params['follow'] + + if state == 'present' and not allow_no_value and value is None and not values: + module.fail_json(msg="Parameter 'value(s)' must be defined if state=present and allow_no_value=False.") + + if value is not None: + values = [value] + elif values is None: + values = [] + + if section_has_values: + for condition in section_has_values: + if condition['value'] is not None: + condition['values'] = [condition['value']] + elif condition['values'] is None: + condition['values'] = [] +# raise Exception("section_has_values: {}".format(section_has_values)) + + (changed, backup_file, diff, msg) = do_ini( + module, path, section, section_has_values, option, values, state, exclusive, backup, + no_extra_spaces, ignore_spaces, create, allow_no_value, modify_inactive_option, follow) + + if not module.check_mode and os.path.exists(path): + file_args = module.load_file_common_arguments(module.params) + changed = module.set_fs_attributes_if_different(file_args, changed) + + results = dict( + changed=changed, + diff=diff, + msg=msg, + path=path, + ) + if backup_file is not None: + results['backup_file'] = backup_file + + # Mission complete + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/installp.py b/plugins/modules/installp.py deleted file mode 120000 index 1da290f5ee..0000000000 --- a/plugins/modules/installp.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/installp.py \ No newline at end of file diff --git a/plugins/modules/installp.py b/plugins/modules/installp.py new file mode 100644 index 0000000000..57f70db687 --- /dev/null +++ b/plugins/modules/installp.py @@ -0,0 +1,298 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: installp +author: + - Kairo Araujo (@kairoaraujo) +short_description: Manage packages on AIX +description: + - Manage packages using 'installp' on AIX. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + accept_license: + description: + - Whether to accept the license for the package(s). + type: bool + default: false + name: + description: + - One or more packages to install or remove. + - Use V(all) to install all packages available on informed O(repository_path). + type: list + elements: str + required: true + aliases: [pkg] + repository_path: + description: + - Path with AIX packages (required to install). + type: path + state: + description: + - Whether the package needs to be present on or absent from the system. + type: str + choices: [absent, present] + default: present +notes: + - If the package is already installed, even the package/fileset is new, the module does not install it. +""" + +EXAMPLES = r""" +- name: Install package foo + community.general.installp: + name: foo + repository_path: /repository/AIX71/installp/base + accept_license: true + state: present + +- name: Install bos.sysmgt that includes bos.sysmgt.nim.master, bos.sysmgt.nim.spot + community.general.installp: + name: bos.sysmgt + repository_path: /repository/AIX71/installp/base + accept_license: true + state: present + +- name: Install bos.sysmgt.nim.master only + community.general.installp: + name: bos.sysmgt.nim.master + repository_path: /repository/AIX71/installp/base + accept_license: true + state: present + +- name: Install bos.sysmgt.nim.master and bos.sysmgt.nim.spot + community.general.installp: + name: bos.sysmgt.nim.master, bos.sysmgt.nim.spot + repository_path: /repository/AIX71/installp/base + accept_license: true + state: present + +- name: Remove packages bos.sysmgt.nim.master + community.general.installp: + name: bos.sysmgt.nim.master + state: absent +""" + +RETURN = r""" # """ + +import os +import re + +from ansible.module_utils.basic import AnsibleModule + + +def _check_new_pkg(module, package, repository_path): + """ + Check if the package of fileset is correct name and repository path. + + :param module: Ansible module arguments spec. + :param package: Package/fileset name. + :param repository_path: Repository package path. + :return: Bool, package information. + """ + + if os.path.isdir(repository_path): + installp_cmd = module.get_bin_path('installp', True) + rc, package_result, err = module.run_command([installp_cmd, "-l", "-MR", "-d", repository_path]) + if rc != 0: + module.fail_json(msg="Failed to run installp.", rc=rc, err=err) + + if package == 'all': + pkg_info = "All packages on dir" + return True, pkg_info + + else: + pkg_info = {} + for line in package_result.splitlines(): + if re.findall(package, line): + pkg_name = line.split()[0].strip() + pkg_version = line.split()[1].strip() + pkg_info[pkg_name] = pkg_version + + return True, pkg_info + + return False, None + + else: + module.fail_json(msg="Repository path %s is not valid." % repository_path) + + +def _check_installed_pkg(module, package, repository_path): + """ + Check the package on AIX. + It verifies if the package is installed and information + + :param module: Ansible module parameters spec. + :param package: Package/fileset name. + :param repository_path: Repository package path. + :return: Bool, package data. + """ + + lslpp_cmd = module.get_bin_path('lslpp', True) + rc, lslpp_result, err = module.run_command([lslpp_cmd, "-lcq", "%s*" % (package, )]) + + if rc == 1: + package_state = ' '.join(err.split()[-2:]) + if package_state == 'not installed.': + return False, None + else: + module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err) + + if rc != 0: + module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err) + + pkg_data = {} + full_pkg_data = lslpp_result.splitlines() + for line in full_pkg_data: + pkg_name, fileset, level = line.split(':')[0:3] + pkg_data[pkg_name] = fileset, level + + return True, pkg_data + + +def remove(module, installp_cmd, packages): + repository_path = None + remove_count = 0 + removed_pkgs = [] + not_found_pkg = [] + for package in packages: + pkg_check, dummy = _check_installed_pkg(module, package, repository_path) + + if pkg_check: + if not module.check_mode: + rc, remove_out, err = module.run_command([installp_cmd, "-u", package]) + if rc != 0: + module.fail_json(msg="Failed to run installp.", rc=rc, err=err) + remove_count += 1 + removed_pkgs.append(package) + + else: + not_found_pkg.append(package) + + if remove_count > 0: + if len(not_found_pkg) > 1: + not_found_pkg.insert(0, "Package(s) not found: ") + + changed = True + msg = "Packages removed: %s. %s " % (' '.join(removed_pkgs), ' '.join(not_found_pkg)) + + else: + changed = False + msg = ("No packages removed, all packages not found: %s" % ' '.join(not_found_pkg)) + + return changed, msg + + +def install(module, installp_cmd, packages, repository_path, accept_license): + installed_pkgs = [] + not_found_pkgs = [] + already_installed_pkgs = {} + + accept_license_param = { + True: ['-Y'], + False: [], + } + + # Validate if package exists on repository path. + for package in packages: + pkg_check, pkg_data = _check_new_pkg(module, package, repository_path) + + # If package exists on repository path, check if package is installed. + if pkg_check: + pkg_check_current, pkg_info = _check_installed_pkg(module, package, repository_path) + + # If package is already installed. + if pkg_check_current: + # Check if package is a package and not a fileset, get version + # and add the package into already installed list + if package in pkg_info.keys(): + already_installed_pkgs[package] = pkg_info[package][1] + + else: + # If the package is not a package but a fileset, confirm + # and add the fileset/package into already installed list + for key in pkg_info.keys(): + if package in pkg_info[key]: + already_installed_pkgs[package] = pkg_info[key][1] + + else: + if not module.check_mode: + rc, out, err = module.run_command( + [installp_cmd, "-a"] + accept_license_param[accept_license] + ["-X", "-d", repository_path, package]) + if rc != 0: + module.fail_json(msg="Failed to run installp", rc=rc, err=err) + installed_pkgs.append(package) + + else: + not_found_pkgs.append(package) + + if len(installed_pkgs) > 0: + installed_msg = (" Installed: %s." % ' '.join(installed_pkgs)) + else: + installed_msg = '' + + if len(not_found_pkgs) > 0: + not_found_msg = (" Not found: %s." % ' '.join(not_found_pkgs)) + else: + not_found_msg = '' + + if len(already_installed_pkgs) > 0: + already_installed_msg = (" Already installed: %s." % already_installed_pkgs) + else: + already_installed_msg = '' + + if len(installed_pkgs) > 0: + changed = True + msg = ("%s%s%s" % (installed_msg, not_found_msg, already_installed_msg)) + else: + changed = False + msg = ("No packages installed.%s%s%s" % (installed_msg, not_found_msg, already_installed_msg)) + + return changed, msg + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', required=True, aliases=['pkg']), + repository_path=dict(type='path'), + accept_license=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + name = module.params['name'] + repository_path = module.params['repository_path'] + accept_license = module.params['accept_license'] + state = module.params['state'] + + installp_cmd = module.get_bin_path('installp', True) + + if state == 'present': + if repository_path is None: + module.fail_json(msg="repository_path is required to install package") + + changed, msg = install(module, installp_cmd, name, repository_path, accept_license) + + elif state == 'absent': + changed, msg = remove(module, installp_cmd, name) + + else: + module.fail_json(changed=False, msg="Unexpected state.") + + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/interfaces_file.py b/plugins/modules/interfaces_file.py deleted file mode 120000 index 256cfddbe6..0000000000 --- a/plugins/modules/interfaces_file.py +++ /dev/null @@ -1 +0,0 @@ -./system/interfaces_file.py \ No newline at end of file diff --git a/plugins/modules/interfaces_file.py b/plugins/modules/interfaces_file.py new file mode 100644 index 0000000000..c7038d1008 --- /dev/null +++ b/plugins/modules/interfaces_file.py @@ -0,0 +1,418 @@ +#!/usr/bin/python +# +# Copyright (c) 2016, Roman Belyakovsky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: interfaces_file +short_description: Tweak settings in C(/etc/network/interfaces) files +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +description: + - Manage (add, remove, change) individual interface options in an interfaces-style file without having to manage the file + as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file. + - Read information about interfaces from interfaces-styled files. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + dest: + type: path + description: + - Path to the interfaces file. + default: /etc/network/interfaces + iface: + type: str + description: + - Name of the interface, required for value changes or option remove. + address_family: + type: str + description: + - Address family of the interface, useful if same interface name is used for both V(inet) and V(inet6). + option: + type: str + description: + - Name of the option, required for value changes or option remove. + value: + type: str + description: + - If O(option) is not presented for the O(iface) and O(state) is V(present), then O(option) is added. If O(option) already + exists and is not V(pre-up), V(up), V(post-up) or V(down), its value is updated. V(pre-up), V(up), V(post-up) and + V(down) options cannot be updated, only adding new options, removing existing ones or cleaning the whole option set + are supported. + backup: + description: + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. + type: bool + default: false + state: + type: str + description: + - If set to V(absent) the option or section is removed if present instead of created. + default: "present" + choices: ["present", "absent"] + +notes: + - If option is defined multiple times last one is updated but all others are deleted in case of an O(state=absent). +requirements: [] +author: "Roman Belyakovsky (@hryamzik)" +""" + +RETURN = r""" +dest: + description: Destination file/path. + returned: success + type: str + sample: "/etc/network/interfaces" +ifaces: + description: Interfaces dictionary. + returned: success + type: dict + contains: + ifaces: + description: Interface dictionary. + returned: success + type: dict + contains: + eth0: + description: Name of the interface. + returned: success + type: dict + contains: + address_family: + description: Interface address family. + returned: success + type: str + sample: "inet" + method: + description: Interface method. + returned: success + type: str + sample: "manual" + mtu: + description: Other options, all values returned as strings. + returned: success + type: str + sample: "1500" + pre-up: + description: List of C(pre-up) scripts. + returned: success + type: list + elements: str + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + up: + description: List of C(up) scripts. + returned: success + type: list + elements: str + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + post-up: + description: List of C(post-up) scripts. + returned: success + type: list + elements: str + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + down: + description: List of C(down) scripts. + returned: success + type: list + elements: str + sample: + - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" +""" + +EXAMPLES = r""" +- name: Set eth1 mtu configuration value to 8000 + community.general.interfaces_file: + dest: /etc/network/interfaces.d/eth1.cfg + iface: eth1 + option: mtu + value: 8000 + backup: true + state: present + register: eth1_cfg +""" + +import os +import re +import tempfile + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes + + +def lineDict(line): + return {'line': line, 'line_type': 'unknown'} + + +def optionDict(line, iface, option, value, address_family): + return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family} + + +def getValueFromLine(s): + spaceRe = re.compile(r'\s+') + m = list(spaceRe.finditer(s))[-1] + valueEnd = m.start() + option = s.split()[0] + optionStart = s.find(option) + optionLen = len(option) + return s[optionLen + optionStart:].strip() + + +def read_interfaces_file(module, filename): + with open(filename, 'r') as f: + return read_interfaces_lines(module, f) + + +def read_interfaces_lines(module, line_strings): + lines = [] + ifaces = {} + currently_processing = None + i = 0 + for line in line_strings: + i += 1 + words = line.split() + if len(words) < 1: + lines.append(lineDict(line)) + continue + if words[0][0] == "#": + lines.append(lineDict(line)) + continue + if words[0] == "mapping": + # currmap = calloc(1, sizeof *currmap); + lines.append(lineDict(line)) + currently_processing = "MAPPING" + elif words[0] == "source": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "source-dir": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "source-directory": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "iface": + currif = { + "pre-up": [], + "up": [], + "down": [], + "post-up": [] + } + iface_name = words[1] + try: + currif['address_family'] = words[2] + except IndexError: + currif['address_family'] = None + address_family = currif['address_family'] + try: + currif['method'] = words[3] + except IndexError: + currif['method'] = None + + ifaces[iface_name] = currif + lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif, 'address_family': address_family}) + currently_processing = "IFACE" + elif words[0] == "auto": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0].startswith("allow-"): + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "no-auto-down": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "no-scripts": + lines.append(lineDict(line)) + currently_processing = "NONE" + else: + if currently_processing == "IFACE": + option_name = words[0] + value = getValueFromLine(line) + lines.append(optionDict(line, iface_name, option_name, value, address_family)) + if option_name in ["pre-up", "up", "down", "post-up"]: + currif[option_name].append(value) + else: + currif[option_name] = value + elif currently_processing == "MAPPING": + lines.append(lineDict(line)) + elif currently_processing == "NONE": + lines.append(lineDict(line)) + else: + module.fail_json(msg="misplaced option %s in line %d" % (line, i)) + return None, None + return lines, ifaces + + +def get_interface_options(iface_lines): + return [i for i in iface_lines if i['line_type'] == 'option'] + + +def get_target_options(iface_options, option): + return [i for i in iface_options if i['option'] == option] + + +def update_existing_option_line(target_option, value): + old_line = target_option['line'] + old_value = target_option['value'] + prefix_start = old_line.find(target_option["option"]) + optionLen = len(target_option["option"]) + old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + optionLen:]) + start = old_value_position.start() + prefix_start + optionLen + end = old_value_position.end() + prefix_start + optionLen + line = old_line[:start] + value + old_line[end:] + return line + + +def set_interface_option(module, lines, iface, option, raw_value, state, address_family=None): + value = str(raw_value) + changed = False + + iface_lines = [item for item in lines if "iface" in item and item["iface"] == iface] + if address_family is not None: + iface_lines = [item for item in iface_lines + if "address_family" in item and item["address_family"] == address_family] + + if len(iface_lines) < 1: + # interface not found + module.fail_json(msg="Error: interface %s not found" % iface) + return changed, None + + iface_options = get_interface_options(iface_lines) + target_options = get_target_options(iface_options, option) + + if state == "present": + if len(target_options) < 1: + changed = True + # add new option + last_line_dict = iface_lines[-1] + changed, lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family) + else: + if option in ["pre-up", "up", "down", "post-up"]: + if len([i for i in target_options if i['value'] == value]) < 1: + changed, lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options, address_family) + else: + # if more than one option found edit the last one + if target_options[-1]['value'] != value: + changed = True + target_option = target_options[-1] + line = update_existing_option_line(target_option, value) + address_family = target_option['address_family'] + index = len(lines) - lines[::-1].index(target_option) - 1 + lines[index] = optionDict(line, iface, option, value, address_family) + elif state == "absent": + if len(target_options) >= 1: + if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None": + for target_option in [ito for ito in target_options if ito['value'] == value]: + changed = True + lines = [ln for ln in lines if ln != target_option] + else: + changed = True + for target_option in target_options: + lines = [ln for ln in lines if ln != target_option] + else: + module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state) + + return changed, lines + + +def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family): + # Changing method of interface is not an addition + if option == 'method': + changed = False + for ln in lines: + if ln.get('line_type', '') == 'iface' and ln.get('iface', '') == iface and value != ln.get('params', {}).get('method', ''): + if address_family is not None and ln.get('address_family') != address_family: + continue + changed = True + ln['line'] = re.sub(ln.get('params', {}).get('method', '') + '$', value, ln.get('line')) + ln['params']['method'] = value + return changed, lines + + last_line = last_line_dict['line'] + prefix_start = last_line.find(last_line.split()[0]) + suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1]) + prefix = last_line[:prefix_start] + + if len(iface_options) < 1: + # interface has no options, ident + prefix += " " + + line = prefix + "%s %s" % (option, value) + last_line[suffix_start:] + option_dict = optionDict(line, iface, option, value, address_family) + index = len(lines) - lines[::-1].index(last_line_dict) + lines.insert(index, option_dict) + return True, lines + + +def write_changes(module, lines, dest): + + tmpfd, tmpfile = tempfile.mkstemp() + with os.fdopen(tmpfd, 'wb') as f: + f.write(to_bytes(''.join(lines), errors='surrogate_or_strict')) + module.atomic_move(tmpfile, os.path.realpath(dest)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + dest=dict(type='path', default='/etc/network/interfaces'), + iface=dict(type='str'), + address_family=dict(type='str'), + option=dict(type='str'), + value=dict(type='str'), + backup=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + add_file_common_args=True, + supports_check_mode=True, + required_by=dict( + option=('iface',), + ), + ) + + dest = module.params['dest'] + iface = module.params['iface'] + address_family = module.params['address_family'] + option = module.params['option'] + value = module.params['value'] + backup = module.params['backup'] + state = module.params['state'] + + if option is not None and state == "present" and value is None: + module.fail_json(msg="Value must be set if option is defined and state is 'present'") + + lines, ifaces = read_interfaces_file(module, dest) + + changed = False + + if option is not None: + changed, lines = set_interface_option(module, lines, iface, option, value, state, address_family) + + if changed: + dummy, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d]) + + if changed and not module.check_mode: + if backup: + module.backup_local(dest) + write_changes(module, [d['line'] for d in lines if 'line' in d], dest) + + module.exit_json(dest=dest, changed=changed, ifaces=ifaces) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ip_netns.py b/plugins/modules/ip_netns.py deleted file mode 120000 index fa32e6d6db..0000000000 --- a/plugins/modules/ip_netns.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/ip_netns.py \ No newline at end of file diff --git a/plugins/modules/ip_netns.py b/plugins/modules/ip_netns.py new file mode 100644 index 0000000000..0a3fa9f86d --- /dev/null +++ b/plugins/modules/ip_netns.py @@ -0,0 +1,137 @@ +#!/usr/bin/python +# Copyright (c) 2017, Arie Bregman +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ip_netns +author: "Arie Bregman (@bregman-arie)" +short_description: Manage network namespaces +requirements: [ip] +description: + - Create or delete network namespaces using the C(ip) command. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + required: false + description: + - Name of the namespace. + type: str + state: + required: false + default: "present" + choices: [present, absent] + description: + - Whether the namespace should exist. + type: str +""" + +EXAMPLES = r""" +- name: Create a namespace named mario + community.general.ip_netns: + name: mario + state: present + +- name: Delete a namespace named luigi + community.general.ip_netns: + name: luigi + state: absent +""" + +RETURN = r""" +# Default return values +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text + + +class Namespace(object): + """Interface to network namespaces. """ + + def __init__(self, module): + self.module = module + self.name = module.params['name'] + self.state = module.params['state'] + + def _netns(self, command): + '''Run ip nents command''' + return self.module.run_command(['ip', 'netns'] + command) + + def exists(self): + '''Check if the namespace already exists''' + rc, out, err = self.module.run_command(['ip', 'netns', 'list']) + if rc != 0: + self.module.fail_json(msg=to_text(err)) + return self.name in out + + def add(self): + '''Create network namespace''' + rtc, out, err = self._netns(['add', self.name]) + + if rtc != 0: + self.module.fail_json(msg=err) + + def delete(self): + '''Delete network namespace''' + rtc, out, err = self._netns(['del', self.name]) + if rtc != 0: + self.module.fail_json(msg=err) + + def check(self): + '''Run check mode''' + changed = False + + if self.state == 'present' and self.exists(): + changed = True + + elif self.state == 'absent' and self.exists(): + changed = True + elif self.state == 'present' and not self.exists(): + changed = True + + self.module.exit_json(changed=changed) + + def run(self): + '''Make the necessary changes''' + changed = False + + if self.state == 'absent': + if self.exists(): + self.delete() + changed = True + elif self.state == 'present': + if not self.exists(): + self.add() + changed = True + + self.module.exit_json(changed=changed) + + +def main(): + """Entry point.""" + module = AnsibleModule( + argument_spec={ + 'name': {'default': None}, + 'state': {'default': 'present', 'choices': ['present', 'absent']}, + }, + supports_check_mode=True, + ) + + network_namespace = Namespace(module) + if module.check_mode: + network_namespace.check() + else: + network_namespace.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_config.py b/plugins/modules/ipa_config.py deleted file mode 120000 index a83a2d279b..0000000000 --- a/plugins/modules/ipa_config.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_config.py \ No newline at end of file diff --git a/plugins/modules/ipa_config.py b/plugins/modules/ipa_config.py new file mode 100644 index 0000000000..ffa035d6e9 --- /dev/null +++ b/plugins/modules/ipa_config.py @@ -0,0 +1,392 @@ +#!/usr/bin/python +# Copyright (c) 2018, Fran Fitzpatrick +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_config +author: Fran Fitzpatrick (@fxfitz) +short_description: Manage Global FreeIPA Configuration Settings +description: + - Modify global configuration settings of a FreeIPA Server. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + ipaconfigstring: + description: Extra hashes to generate in password plug-in. + aliases: ["configstring"] + type: list + elements: str + choices: ["AllowNThash", "KDC:Disable Last Success", "KDC:Disable Lockout", "KDC:Disable Default Preauth for SPNs"] + version_added: '2.5.0' + ipadefaultloginshell: + description: Default shell for new users. + aliases: ["loginshell"] + type: str + ipadefaultemaildomain: + description: Default e-mail domain for new users. + aliases: ["emaildomain"] + type: str + ipadefaultprimarygroup: + description: Default group for new users. + aliases: ["primarygroup"] + type: str + version_added: '2.5.0' + ipagroupobjectclasses: + description: A list of group objectclasses. + aliases: ["groupobjectclasses"] + type: list + elements: str + version_added: '7.3.0' + ipagroupsearchfields: + description: A list of fields to search in when searching for groups. + aliases: ["groupsearchfields"] + type: list + elements: str + version_added: '2.5.0' + ipahomesrootdir: + description: Default location of home directories. + aliases: ["homesrootdir"] + type: str + version_added: '2.5.0' + ipakrbauthzdata: + description: Default types of PAC supported for services. + aliases: ["krbauthzdata"] + type: list + elements: str + choices: ["MS-PAC", "PAD", "nfs:NONE"] + version_added: '2.5.0' + ipamaxusernamelength: + description: Maximum length of usernames. + aliases: ["maxusernamelength"] + type: int + version_added: '2.5.0' + ipapwdexpadvnotify: + description: Notice of impending password expiration, in days. + aliases: ["pwdexpadvnotify"] + type: int + version_added: '2.5.0' + ipasearchrecordslimit: + description: Maximum number of records to search (-1 or 0 is unlimited). + aliases: ["searchrecordslimit"] + type: int + version_added: '2.5.0' + ipasearchtimelimit: + description: Maximum amount of time (seconds) for a search (-1 or 0 is unlimited). + aliases: ["searchtimelimit"] + type: int + version_added: '2.5.0' + ipaselinuxusermaporder: + description: The SELinux user map order (order in increasing priority of SELinux users). + aliases: ["selinuxusermaporder"] + type: list + elements: str + version_added: '3.7.0' + ipauserauthtype: + description: + - The authentication type to use by default. + - The choice V(idp) has been added in community.general 7.3.0. + - The choice V(passkey) has been added in community.general 8.1.0. + aliases: ["userauthtype"] + choices: ["password", "radius", "otp", "pkinit", "hardened", "idp", "passkey", "disabled"] + type: list + elements: str + version_added: '2.5.0' + ipauserobjectclasses: + description: A list of user objectclasses. + aliases: ["userobjectclasses"] + type: list + elements: str + version_added: '7.3.0' + ipausersearchfields: + description: A list of fields to search in when searching for users. + aliases: ["usersearchfields"] + type: list + elements: str + version_added: '2.5.0' +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure password plugin features DC:Disable Last Success and KDC:Disable Lockout are enabled + community.general.ipa_config: + ipaconfigstring: ["KDC:Disable Last Success", "KDC:Disable Lockout"] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the default login shell is bash + community.general.ipa_config: + ipadefaultloginshell: /bin/bash + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the default e-mail domain is ansible.com + community.general.ipa_config: + ipadefaultemaildomain: ansible.com + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the default primary group is set to ipausers + community.general.ipa_config: + ipadefaultprimarygroup: ipausers + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the group search fields are set to 'cn,description' + community.general.ipa_config: + ipagroupsearchfields: ['cn', 'description'] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the home directory location is set to /home + community.general.ipa_config: + ipahomesrootdir: /home + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the default types of PAC supported for services is set to MS-PAC and PAD + community.general.ipa_config: + ipakrbauthzdata: ["MS-PAC", "PAD"] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the maximum user name length is set to 32 + community.general.ipa_config: + ipamaxusernamelength: 32 + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the password expiration notice is set to 4 days + community.general.ipa_config: + ipapwdexpadvnotify: 4 + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the search record limit is set to 100 + community.general.ipa_config: + ipasearchrecordslimit: 100 + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the search time limit is set to 2 seconds + community.general.ipa_config: + ipasearchtimelimit: 2 + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the default user auth type is password + community.general.ipa_config: + ipauserauthtype: ['password'] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the user search fields is set to 'uid,givenname,sn,ou,title' + community.general.ipa_config: + ipausersearchfields: ['uid', 'givenname', 'sn', 'ou', 'title'] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the SELinux user map order is set + community.general.ipa_config: + ipaselinuxusermaporder: + - "guest_u:s0" + - "xguest_u:s0" + - "user_u:s0" + - "staff_u:s0-s0:c0.c1023" + - "unconfined_u:s0-s0:c0.c1023" + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret +""" + +RETURN = r""" +config: + description: Configuration as returned by IPA API. + returned: always + type: dict +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class ConfigIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(ConfigIPAClient, self).__init__(module, host, port, protocol) + + def config_show(self): + return self._post_json(method='config_show', name=None) + + def config_mod(self, name, item): + return self._post_json(method='config_mod', name=name, item=item) + + +def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None, + ipadefaultemaildomain=None, ipadefaultprimarygroup=None, + ipagroupsearchfields=None, ipagroupobjectclasses=None, + ipahomesrootdir=None, ipakrbauthzdata=None, + ipamaxusernamelength=None, ipapwdexpadvnotify=None, + ipasearchrecordslimit=None, ipasearchtimelimit=None, + ipaselinuxusermaporder=None, ipauserauthtype=None, + ipausersearchfields=None, ipauserobjectclasses=None): + config = {} + if ipaconfigstring is not None: + config['ipaconfigstring'] = ipaconfigstring + if ipadefaultloginshell is not None: + config['ipadefaultloginshell'] = ipadefaultloginshell + if ipadefaultemaildomain is not None: + config['ipadefaultemaildomain'] = ipadefaultemaildomain + if ipadefaultprimarygroup is not None: + config['ipadefaultprimarygroup'] = ipadefaultprimarygroup + if ipagroupobjectclasses is not None: + config['ipagroupobjectclasses'] = ipagroupobjectclasses + if ipagroupsearchfields is not None: + config['ipagroupsearchfields'] = ','.join(ipagroupsearchfields) + if ipahomesrootdir is not None: + config['ipahomesrootdir'] = ipahomesrootdir + if ipakrbauthzdata is not None: + config['ipakrbauthzdata'] = ipakrbauthzdata + if ipamaxusernamelength is not None: + config['ipamaxusernamelength'] = str(ipamaxusernamelength) + if ipapwdexpadvnotify is not None: + config['ipapwdexpadvnotify'] = str(ipapwdexpadvnotify) + if ipasearchrecordslimit is not None: + config['ipasearchrecordslimit'] = str(ipasearchrecordslimit) + if ipasearchtimelimit is not None: + config['ipasearchtimelimit'] = str(ipasearchtimelimit) + if ipaselinuxusermaporder is not None: + config['ipaselinuxusermaporder'] = '$'.join(ipaselinuxusermaporder) + if ipauserauthtype is not None: + config['ipauserauthtype'] = ipauserauthtype + if ipauserobjectclasses is not None: + config['ipauserobjectclasses'] = ipauserobjectclasses + if ipausersearchfields is not None: + config['ipausersearchfields'] = ','.join(ipausersearchfields) + + return config + + +def get_config_diff(client, ipa_config, module_config): + return client.get_diff(ipa_data=ipa_config, module_data=module_config) + + +def ensure(module, client): + module_config = get_config_dict( + ipaconfigstring=module.params.get('ipaconfigstring'), + ipadefaultloginshell=module.params.get('ipadefaultloginshell'), + ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'), + ipadefaultprimarygroup=module.params.get('ipadefaultprimarygroup'), + ipagroupobjectclasses=module.params.get('ipagroupobjectclasses'), + ipagroupsearchfields=module.params.get('ipagroupsearchfields'), + ipahomesrootdir=module.params.get('ipahomesrootdir'), + ipakrbauthzdata=module.params.get('ipakrbauthzdata'), + ipamaxusernamelength=module.params.get('ipamaxusernamelength'), + ipapwdexpadvnotify=module.params.get('ipapwdexpadvnotify'), + ipasearchrecordslimit=module.params.get('ipasearchrecordslimit'), + ipasearchtimelimit=module.params.get('ipasearchtimelimit'), + ipaselinuxusermaporder=module.params.get('ipaselinuxusermaporder'), + ipauserauthtype=module.params.get('ipauserauthtype'), + ipausersearchfields=module.params.get('ipausersearchfields'), + ipauserobjectclasses=module.params.get('ipauserobjectclasses'), + ) + ipa_config = client.config_show() + diff = get_config_diff(client, ipa_config, module_config) + + changed = False + new_config = {} + for module_key in diff: + if module_config.get(module_key) != ipa_config.get(module_key, None): + changed = True + new_config.update({module_key: module_config.get(module_key)}) + + if changed and not module.check_mode: + client.config_mod(name=None, item=new_config) + + return changed, client.config_show() + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update( + ipaconfigstring=dict(type='list', elements='str', + choices=['AllowNThash', + 'KDC:Disable Last Success', + 'KDC:Disable Lockout', + 'KDC:Disable Default Preauth for SPNs'], + aliases=['configstring']), + ipadefaultloginshell=dict(type='str', aliases=['loginshell']), + ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']), + ipadefaultprimarygroup=dict(type='str', aliases=['primarygroup']), + ipagroupobjectclasses=dict(type='list', elements='str', + aliases=['groupobjectclasses']), + ipagroupsearchfields=dict(type='list', elements='str', + aliases=['groupsearchfields']), + ipahomesrootdir=dict(type='str', aliases=['homesrootdir']), + ipakrbauthzdata=dict(type='list', elements='str', + choices=['MS-PAC', 'PAD', 'nfs:NONE'], + aliases=['krbauthzdata']), + ipamaxusernamelength=dict(type='int', aliases=['maxusernamelength']), + ipapwdexpadvnotify=dict(type='int', aliases=['pwdexpadvnotify']), + ipasearchrecordslimit=dict(type='int', aliases=['searchrecordslimit']), + ipasearchtimelimit=dict(type='int', aliases=['searchtimelimit']), + ipaselinuxusermaporder=dict(type='list', elements='str', + aliases=['selinuxusermaporder']), + ipauserauthtype=dict(type='list', elements='str', + aliases=['userauthtype'], + choices=["password", "radius", "otp", "pkinit", + "hardened", "idp", "passkey", "disabled"]), + ipausersearchfields=dict(type='list', elements='str', + aliases=['usersearchfields']), + ipauserobjectclasses=dict(type='list', elements='str', + aliases=['userobjectclasses']), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = ConfigIPAClient( + module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot'] + ) + + try: + client.login( + username=module.params['ipa_user'], + password=module.params['ipa_pass'] + ) + changed, user = ensure(module, client) + module.exit_json(changed=changed, user=user) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_dnsrecord.py b/plugins/modules/ipa_dnsrecord.py deleted file mode 120000 index 6bb86ea1fb..0000000000 --- a/plugins/modules/ipa_dnsrecord.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_dnsrecord.py \ No newline at end of file diff --git a/plugins/modules/ipa_dnsrecord.py b/plugins/modules/ipa_dnsrecord.py new file mode 100644 index 0000000000..2507cc7f14 --- /dev/null +++ b/plugins/modules/ipa_dnsrecord.py @@ -0,0 +1,384 @@ +#!/usr/bin/python +# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_dnsrecord +author: Abhijeet Kasurde (@Akasurde) +short_description: Manage FreeIPA DNS records +description: + - Add, modify and delete an IPA DNS Record using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + zone_name: + description: + - The DNS zone name to which DNS record needs to be managed. + required: true + type: str + record_name: + description: + - The DNS record name to manage. + required: true + aliases: ["name"] + type: str + record_type: + description: + - The type of DNS record name. + - Support for V(NS) was added in comunity.general 8.2.0. + - Support for V(SSHFP) was added in community.general 9.1.0. + required: false + default: 'A' + choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'NS', 'PTR', 'SRV', 'TXT', 'SSHFP'] + type: str + record_value: + description: + - Manage DNS record name with this value. + - Mutually exclusive with O(record_values), and exactly one of O(record_value) and O(record_values) has to be specified. + - Use O(record_values) if you need to specify multiple values. + - In the case of V(A) or V(AAAA) record types, this is the IP address. + - In the case of V(A6) record type, this is the A6 Record data. + - In the case of V(CNAME) record type, this is the hostname. + - In the case of V(DNAME) record type, this is the DNAME target. + - In the case of V(NS) record type, this is the name server hostname. Hostname must already have a valid A or AAAA record. + - In the case of V(PTR) record type, this is the hostname. + - In the case of V(TXT) record type, this is a text. + - In the case of V(SRV) record type, this is a service record. + - In the case of V(MX) record type, this is a mail exchanger record. + - In the case of V(SSHFP) record type, this is an SSH fingerprint record. + type: str + record_values: + description: + - Manage DNS record name with this value. + - Mutually exclusive with O(record_value), and exactly one of O(record_value) and O(record_values) has to be specified. + - In the case of V(A) or V(AAAA) record types, this is the IP address. + - In the case of V(A6) record type, this is the A6 Record data. + - In the case of V(CNAME) record type, this is the hostname. + - In the case of V(DNAME) record type, this is the DNAME target. + - In the case of V(NS) record type, this is the name server hostname. Hostname must already have a valid A or AAAA record. + - In the case of V(PTR) record type, this is the hostname. + - In the case of V(TXT) record type, this is a text. + - In the case of V(SRV) record type, this is a service record. + - In the case of V(MX) record type, this is a mail exchanger record. + - In the case of V(SSHFP) record type, this is an SSH fingerprint record. + type: list + elements: str + record_ttl: + description: + - Set the TTL for the record. + - Applies only when adding a new or changing the value of O(record_value) or O(record_values). + required: false + type: int + state: + description: State to ensure. + required: false + default: present + choices: ["absent", "present"] + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure dns record is present + community.general.ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + record_name: vm-001 + record_type: 'AAAA' + record_value: '::1' + +- name: Ensure that dns records exists with a TTL + community.general.ipa_dnsrecord: + name: host02 + zone_name: example.com + record_type: 'AAAA' + record_values: '::1,fe80::1' + record_ttl: 300 + ipa_host: ipa.example.com + ipa_pass: topsecret + state: present + +- name: Ensure a PTR record is present + community.general.ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: 2.168.192.in-addr.arpa + record_name: 5 + record_type: 'PTR' + record_value: 'internal.ipa.example.com' + +- name: Ensure a TXT record is present + community.general.ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + record_name: _kerberos + record_type: 'TXT' + record_value: 'EXAMPLE.COM' + +- name: Ensure an SRV record is present + community.general.ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + record_name: _kerberos._udp.example.com + record_type: 'SRV' + record_value: '10 50 88 ipa.example.com' + +- name: Ensure an MX records are present + community.general.ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + record_name: '@' + record_type: 'MX' + record_values: + - '1 mailserver-01.example.com' + - '2 mailserver-02.example.com' + +- name: Ensure that dns record is removed + community.general.ipa_dnsrecord: + name: host01 + zone_name: example.com + record_type: 'AAAA' + record_value: '::1' + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + state: absent + +- name: Ensure an NS record for a subdomain is present + community.general.ipa_dnsrecord: + name: subdomain + zone_name: example.com + record_type: 'NS' + record_value: 'ns1.subdomain.exmaple.com' + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: ChangeMe! + +- name: Retrieve the current sshfp fingerprints + ansible.builtin.command: ssh-keyscan -D localhost + register: ssh_hostkeys + +- name: Update the SSHFP records in DNS + community.general.ipa_dnsrecord: + name: "{{ inventory_hostname}}" + zone_name: example.com + record_type: 'SSHFP' + record_values: "{{ ssh_hostkeys.stdout.split('\n') | map('split', 'SSHFP ') | map('last') | list }}" + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: ChangeMe! +""" + +RETURN = r""" +dnsrecord: + description: DNS record as returned by IPA API. + returned: always + type: dict +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class DNSRecordIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(DNSRecordIPAClient, self).__init__(module, host, port, protocol) + + def dnsrecord_find(self, zone_name, record_name): + if record_name == '@': + return self._post_json(method='dnsrecord_show', name=zone_name, item={'idnsname': record_name, 'all': True}) + else: + return self._post_json(method='dnsrecord_find', name=zone_name, item={'idnsname': record_name, 'all': True}) + + def dnsrecord_add(self, zone_name=None, record_name=None, details=None): + item = dict(idnsname=record_name) + + if details.get('record_ttl'): + item.update(dnsttl=details['record_ttl']) + + for value in details['record_values']: + if details['record_type'] == 'A': + item.update(a_part_ip_address=value) + elif details['record_type'] == 'AAAA': + item.update(aaaa_part_ip_address=value) + elif details['record_type'] == 'A6': + item.update(a6_part_data=value) + elif details['record_type'] == 'CNAME': + item.update(cname_part_hostname=value) + elif details['record_type'] == 'DNAME': + item.update(dname_part_target=value) + elif details['record_type'] == 'NS': + item.update(ns_part_hostname=value) + elif details['record_type'] == 'PTR': + item.update(ptr_part_hostname=value) + elif details['record_type'] == 'TXT': + item.update(txtrecord=value) + elif details['record_type'] == 'SRV': + item.update(srvrecord=value) + elif details['record_type'] == 'MX': + item.update(mxrecord=value) + elif details['record_type'] == 'SSHFP': + item.update(sshfprecord=value) + + self._post_json(method='dnsrecord_add', name=zone_name, item=item) + + def dnsrecord_mod(self, zone_name=None, record_name=None, details=None): + item = get_dnsrecord_dict(details) + item.update(idnsname=record_name) + if details.get('record_ttl'): + item.update(dnsttl=details['record_ttl']) + return self._post_json(method='dnsrecord_mod', name=zone_name, item=item) + + def dnsrecord_del(self, zone_name=None, record_name=None, details=None): + item = get_dnsrecord_dict(details) + item.update(idnsname=record_name) + return self._post_json(method='dnsrecord_del', name=zone_name, item=item) + + +def get_dnsrecord_dict(details=None): + module_dnsrecord = dict() + if details['record_type'] == 'A' and details['record_values']: + module_dnsrecord.update(arecord=details['record_values']) + elif details['record_type'] == 'AAAA' and details['record_values']: + module_dnsrecord.update(aaaarecord=details['record_values']) + elif details['record_type'] == 'A6' and details['record_values']: + module_dnsrecord.update(a6record=details['record_values']) + elif details['record_type'] == 'CNAME' and details['record_values']: + module_dnsrecord.update(cnamerecord=details['record_values']) + elif details['record_type'] == 'DNAME' and details['record_values']: + module_dnsrecord.update(dnamerecord=details['record_values']) + elif details['record_type'] == 'NS' and details['record_values']: + module_dnsrecord.update(nsrecord=details['record_values']) + elif details['record_type'] == 'PTR' and details['record_values']: + module_dnsrecord.update(ptrrecord=details['record_values']) + elif details['record_type'] == 'TXT' and details['record_values']: + module_dnsrecord.update(txtrecord=details['record_values']) + elif details['record_type'] == 'SRV' and details['record_values']: + module_dnsrecord.update(srvrecord=details['record_values']) + elif details['record_type'] == 'MX' and details['record_values']: + module_dnsrecord.update(mxrecord=details['record_values']) + elif details['record_type'] == 'SSHFP' and details['record_values']: + module_dnsrecord.update(sshfprecord=details['record_values']) + + if details.get('record_ttl'): + module_dnsrecord.update(dnsttl=details['record_ttl']) + + return module_dnsrecord + + +def get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord): + details = get_dnsrecord_dict(module_dnsrecord) + return client.get_diff(ipa_data=ipa_dnsrecord, module_data=details) + + +def ensure(module, client): + zone_name = module.params['zone_name'] + record_name = module.params['record_name'] + record_ttl = module.params.get('record_ttl') + state = module.params['state'] + + ipa_dnsrecord = client.dnsrecord_find(zone_name, record_name) + + record_values = module.params['record_values'] + if module.params['record_value'] is not None: + record_values = [module.params['record_value']] + + module_dnsrecord = dict( + record_type=module.params['record_type'], + record_values=record_values, + record_ttl=to_native(record_ttl, nonstring='passthru'), + ) + + # ttl is not required to change records + if module_dnsrecord['record_ttl'] is None: + module_dnsrecord.pop('record_ttl') + + changed = False + if state == 'present': + if not ipa_dnsrecord: + changed = True + if not module.check_mode: + client.dnsrecord_add(zone_name=zone_name, + record_name=record_name, + details=module_dnsrecord) + else: + diff = get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord) + if len(diff) > 0: + changed = True + if not module.check_mode: + client.dnsrecord_mod(zone_name=zone_name, + record_name=record_name, + details=module_dnsrecord) + else: + if ipa_dnsrecord: + changed = True + if not module.check_mode: + client.dnsrecord_del(zone_name=zone_name, + record_name=record_name, + details=module_dnsrecord) + + return changed, client.dnsrecord_find(zone_name, record_name) + + +def main(): + record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'NS', 'PTR', 'TXT', 'SRV', 'MX', 'SSHFP'] + argument_spec = ipa_argument_spec() + argument_spec.update( + zone_name=dict(type='str', required=True), + record_name=dict(type='str', aliases=['name'], required=True), + record_type=dict(type='str', default='A', choices=record_types), + record_value=dict(type='str'), + record_values=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent']), + record_ttl=dict(type='int'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[['record_value', 'record_values']], + required_one_of=[['record_value', 'record_values']], + supports_check_mode=True + ) + + client = DNSRecordIPAClient( + module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot'] + ) + + try: + client.login( + username=module.params['ipa_user'], + password=module.params['ipa_pass'] + ) + changed, record = ensure(module, client) + module.exit_json(changed=changed, record=record) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_dnszone.py b/plugins/modules/ipa_dnszone.py deleted file mode 120000 index f2624a9251..0000000000 --- a/plugins/modules/ipa_dnszone.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_dnszone.py \ No newline at end of file diff --git a/plugins/modules/ipa_dnszone.py b/plugins/modules/ipa_dnszone.py new file mode 100644 index 0000000000..57faaef955 --- /dev/null +++ b/plugins/modules/ipa_dnszone.py @@ -0,0 +1,202 @@ +#!/usr/bin/python +# Copyright (c) 2017, Fran Fitzpatrick (francis.x.fitzpatrick@gmail.com) +# Borrowed heavily from other work by Abhijeet Kasurde (akasurde@redhat.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_dnszone +author: Fran Fitzpatrick (@fxfitz) +short_description: Manage FreeIPA DNS Zones +description: + - Add and delete an IPA DNS Zones using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + zone_name: + description: + - The DNS zone name to which needs to be managed. + required: true + type: str + state: + description: State to ensure. + required: false + default: present + choices: ["absent", "present"] + type: str + dynamicupdate: + description: Apply dynamic update to zone. + default: false + type: bool + allowsyncptr: + description: Allow synchronization of forward and reverse records in the zone. + default: false + type: bool + version_added: 4.3.0 +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure dns zone is present + community.general.ipa_dnszone: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + +- name: Ensure dns zone is present and is dynamic update + community.general.ipa_dnszone: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + dynamicupdate: true + +- name: Ensure that dns zone is removed + community.general.ipa_dnszone: + zone_name: example.com + ipa_host: localhost + ipa_user: admin + ipa_pass: topsecret + state: absent + +- name: Ensure dns zone is present and is allowing sync + community.general.ipa_dnszone: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + allowsyncptr: true +""" + +RETURN = r""" +zone: + description: DNS zone as returned by IPA API. + returned: always + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class DNSZoneIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(DNSZoneIPAClient, self).__init__(module, host, port, protocol) + + def dnszone_find(self, zone_name, details=None): + items = {'all': 'true', + 'idnsname': zone_name, } + if details is not None: + items.update(details) + + return self._post_json( + method='dnszone_find', + name=zone_name, + item=items + ) + + def dnszone_add(self, zone_name=None, details=None): + items = {} + if details is not None: + items.update(details) + + return self._post_json( + method='dnszone_add', + name=zone_name, + item=items + ) + + def dnszone_mod(self, zone_name=None, details=None): + items = {} + if details is not None: + items.update(details) + + return self._post_json( + method='dnszone_mod', + name=zone_name, + item=items + ) + + def dnszone_del(self, zone_name=None, record_name=None, details=None): + return self._post_json( + method='dnszone_del', name=zone_name, item={}) + + +def ensure(module, client): + zone_name = module.params['zone_name'] + state = module.params['state'] + dynamicupdate = module.params['dynamicupdate'] + allowsyncptr = module.params['allowsyncptr'] + + changed = False + + # does zone exist + ipa_dnszone = client.dnszone_find(zone_name) + + if state == 'present': + if not ipa_dnszone: + + changed = True + if not module.check_mode: + client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr}) + elif ipa_dnszone['idnsallowdynupdate'][0] != str(dynamicupdate).upper() or \ + ipa_dnszone.get('idnsallowsyncptr') and ipa_dnszone['idnsallowsyncptr'][0] != str(allowsyncptr).upper(): + changed = True + if not module.check_mode: + client.dnszone_mod(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr}) + else: + changed = False + + # state is absent + else: + # check for generic zone existence + if ipa_dnszone: + changed = True + if not module.check_mode: + client.dnszone_del(zone_name=zone_name) + + return changed, client.dnszone_find(zone_name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(zone_name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + dynamicupdate=dict(type='bool', default=False), + allowsyncptr=dict(type='bool', default=False), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + ) + + client = DNSZoneIPAClient( + module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot'] + ) + + try: + client.login( + username=module.params['ipa_user'], + password=module.params['ipa_pass'] + ) + changed, zone = ensure(module, client) + module.exit_json(changed=changed, zone=zone) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_getkeytab.py b/plugins/modules/ipa_getkeytab.py new file mode 100644 index 0000000000..0b4e102ac0 --- /dev/null +++ b/plugins/modules/ipa_getkeytab.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# Copyright (c) 2024 Alexander Bakanovskii +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ipa_getkeytab +short_description: Manage keytab file in FreeIPA +version_added: 9.5.0 +description: + - Manage keytab file with C(ipa-getkeytab) utility. + - See U(https://manpages.ubuntu.com/manpages/jammy/man1/ipa-getkeytab.1.html) for reference. +author: "Alexander Bakanovskii (@abakanovskii)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + description: + - The base path where to put generated keytab file. + type: path + aliases: ["keytab"] + required: true + principal: + description: + - The non-realm part of the full principal name. + type: str + required: true + ipa_host: + description: + - The IPA server to retrieve the keytab from (FQDN). + type: str + ldap_uri: + description: + - LDAP URI. If V(ldap://) is specified, STARTTLS is initiated by default. + - Can not be used with the O(ipa_host) option. + type: str + bind_dn: + description: + - The LDAP DN to bind as when retrieving a keytab without Kerberos credentials. + - Generally used with the O(bind_pw) option. + type: str + bind_pw: + description: + - The LDAP password to use when not binding with Kerberos. + type: str + password: + description: + - Use this password for the key instead of one randomly generated. + type: str + ca_cert: + description: + - The path to the IPA CA certificate used to validate LDAPS/STARTTLS connections. + type: path + sasl_mech: + description: + - SASL mechanism to use if O(bind_dn) and O(bind_pw) are not specified. + choices: ["GSSAPI", "EXTERNAL"] + type: str + retrieve_mode: + description: + - Retrieve an existing key from the server instead of generating a new one. + - This is incompatible with the O(password), and works only against a IPA server more recent than version 3.3. + - The user requesting the keytab must have access to the keys for this operation to succeed. + - Be aware that if set V(true), a new keytab is generated. + - This invalidates all previously retrieved keytabs for this service principal. + type: bool + encryption_types: + description: + - The list of encryption types to use to generate keys. + - It uses local client defaults if not provided. + - Valid values depend on the Kerberos library version and configuration. + type: str + state: + description: + - The state of the keytab file. + - V(present) only check for existence of a file, if you want to recreate keytab with other parameters you should set + O(force=true). + type: str + default: present + choices: ["present", "absent"] + force: + description: + - Force recreation if exists already. + type: bool +requirements: + - freeipa-client + - Managed host is FreeIPA client +extends_documentation_fragment: + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Get Kerberos ticket using default principal + community.general.krb_ticket: + password: "{{ aldpro_admin_password }}" + +- name: Create keytab + community.general.ipa_getkeytab: + path: /etc/ipa/test.keytab + principal: HTTP/freeipa-dc02.ipa.test + ipa_host: freeipa-dc01.ipa.test + +- name: Retrieve already existing keytab + community.general.ipa_getkeytab: + path: /etc/ipa/test.keytab + principal: HTTP/freeipa-dc02.ipa.test + ipa_host: freeipa-dc01.ipa.test + retrieve_mode: true + +- name: Force keytab recreation + community.general.ipa_getkeytab: + path: /etc/ipa/test.keytab + principal: HTTP/freeipa-dc02.ipa.test + ipa_host: freeipa-dc01.ipa.test + force: true +""" + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +class IPAKeytab(object): + def __init__(self, module, **kwargs): + self.module = module + self.path = kwargs['path'] + self.state = kwargs['state'] + self.principal = kwargs['principal'] + self.ipa_host = kwargs['ipa_host'] + self.ldap_uri = kwargs['ldap_uri'] + self.bind_dn = kwargs['bind_dn'] + self.bind_pw = kwargs['bind_pw'] + self.password = kwargs['password'] + self.ca_cert = kwargs['ca_cert'] + self.sasl_mech = kwargs['sasl_mech'] + self.retrieve_mode = kwargs['retrieve_mode'] + self.encryption_types = kwargs['encryption_types'] + + self.runner = CmdRunner( + module, + command='ipa-getkeytab', + arg_formats=dict( + retrieve_mode=cmd_runner_fmt.as_bool('--retrieve'), + path=cmd_runner_fmt.as_opt_val('--keytab'), + ipa_host=cmd_runner_fmt.as_opt_val('--server'), + principal=cmd_runner_fmt.as_opt_val('--principal'), + ldap_uri=cmd_runner_fmt.as_opt_val('--ldapuri'), + bind_dn=cmd_runner_fmt.as_opt_val('--binddn'), + bind_pw=cmd_runner_fmt.as_opt_val('--bindpw'), + password=cmd_runner_fmt.as_opt_val('--password'), + ca_cert=cmd_runner_fmt.as_opt_val('--cacert'), + sasl_mech=cmd_runner_fmt.as_opt_val('--mech'), + encryption_types=cmd_runner_fmt.as_opt_val('--enctypes'), + ) + ) + + def _exec(self, check_rc=True): + with self.runner( + "retrieve_mode path ipa_host principal ldap_uri bind_dn bind_pw password ca_cert sasl_mech encryption_types", + check_rc=check_rc + ) as ctx: + rc, out, err = ctx.run() + return out + + +def main(): + arg_spec = dict( + path=dict(type='path', required=True, aliases=["keytab"]), + state=dict(default='present', choices=['present', 'absent']), + principal=dict(type='str', required=True), + ipa_host=dict(type='str'), + ldap_uri=dict(type='str'), + bind_dn=dict(type='str'), + bind_pw=dict(type='str'), + password=dict(type='str', no_log=True), + ca_cert=dict(type='path'), + sasl_mech=dict(type='str', choices=["GSSAPI", "EXTERNAL"]), + retrieve_mode=dict(type='bool'), + encryption_types=dict(type='str'), + force=dict(type='bool'), + ) + module = AnsibleModule( + argument_spec=arg_spec, + mutually_exclusive=[('ipa_host', 'ldap_uri'), ('retrieve_mode', 'password')], + supports_check_mode=True, + ) + + path = module.params['path'] + state = module.params['state'] + force = module.params['force'] + + keytab = IPAKeytab(module, + path=path, + state=state, + principal=module.params['principal'], + ipa_host=module.params['ipa_host'], + ldap_uri=module.params['ldap_uri'], + bind_dn=module.params['bind_dn'], + bind_pw=module.params['bind_pw'], + password=module.params['password'], + ca_cert=module.params['ca_cert'], + sasl_mech=module.params['sasl_mech'], + retrieve_mode=module.params['retrieve_mode'], + encryption_types=module.params['encryption_types'], + ) + + changed = False + if state == 'present': + if os.path.exists(path): + if force and not module.check_mode: + try: + os.remove(path) + except OSError as e: + module.fail_json(msg="Error deleting: %s - %s." % (e.filename, e.strerror)) + keytab._exec() + changed = True + if force and module.check_mode: + changed = True + else: + changed = True + keytab._exec() + + if state == 'absent': + if os.path.exists(path): + changed = True + if not module.check_mode: + try: + os.remove(path) + except OSError as e: + module.fail_json(msg="Error deleting: %s - %s." % (e.filename, e.strerror)) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_group.py b/plugins/modules/ipa_group.py deleted file mode 120000 index 86d6492c2e..0000000000 --- a/plugins/modules/ipa_group.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_group.py \ No newline at end of file diff --git a/plugins/modules/ipa_group.py b/plugins/modules/ipa_group.py new file mode 100644 index 0000000000..2c004c8bb7 --- /dev/null +++ b/plugins/modules/ipa_group.py @@ -0,0 +1,339 @@ +#!/usr/bin/python +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_group +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA group +description: + - Add, modify and delete group within IPA server. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + append: + description: + - If V(true), add the listed O(user) and O(group) to the group members. + - If V(false), only the listed O(user) and O(group) are set as group members, removing any other members. + default: false + type: bool + version_added: 4.0.0 + cn: + description: + - Canonical name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ['name'] + type: str + description: + description: + - Description of the group. + type: str + external: + description: + - Allow adding external non-IPA members from trusted domains. + type: bool + gidnumber: + description: + - GID (use this option to set it manually). + aliases: ['gid'] + type: str + group: + description: + - List of group names assigned to this group. + - If O(append=false) and an empty list is passed all groups are removed from this group. + - Groups that are already assigned but not passed are removed. + - If O(append=true) the listed groups are assigned without removing other groups. + - If option is omitted assigned groups are not checked or changed. + type: list + elements: str + nonposix: + description: + - Create as a non-POSIX group. + type: bool + user: + description: + - List of user names assigned to this group. + - If O(append=false) and an empty list is passed all users are removed from this group. + - Users that are already assigned but not passed are removed. + - If O(append=true) the listed users are assigned without removing other users. + - If option is omitted assigned users are not checked or changed. + type: list + elements: str + external_user: + description: + - List of external users assigned to this group. + - Behaves identically to O(user) with respect to O(append) attribute. + - List entries can be in V(DOMAIN\\\\username) or SID format. + - Unless SIDs are provided, the module always attempts to make changes even if the group already has all the users. + This is because only SIDs are returned by IPA query. + - O(external=true) is needed for this option to work. + type: list + elements: str + version_added: 6.3.0 + state: + description: + - State to ensure. + default: "present" + choices: ["absent", "present"] + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure group is present + community.general.ipa_group: + name: oinstall + gidnumber: '54321' + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure that groups sysops and appops are assigned to ops but no other group + community.general.ipa_group: + name: ops + group: + - sysops + - appops + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure that users linus and larry are assign to the group, but no other user + community.general.ipa_group: + name: sysops + user: + - linus + - larry + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure that new starter named john is member of the group, without removing other members + community.general.ipa_group: + name: developers + user: + - john + append: true + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Add external user to a group + community.general.ipa_group: + name: developers + external: true + append: true + external_user: + - S-1-5-21-123-1234-12345-63421 + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Add a user from MYDOMAIN + community.general.ipa_group: + name: developers + external: true + append: true + external_user: + - MYDOMAIN\\john + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure group is absent + community.general.ipa_group: + name: sysops + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +""" + +RETURN = r""" +group: + description: Group as returned by IPA API. + returned: always + type: dict +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class GroupIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(GroupIPAClient, self).__init__(module, host, port, protocol) + + def group_find(self, name): + return self._post_json(method='group_find', name=None, item={'all': True, 'cn': name}) + + def group_add(self, name, item): + return self._post_json(method='group_add', name=name, item=item) + + def group_mod(self, name, item): + return self._post_json(method='group_mod', name=name, item=item) + + def group_del(self, name): + return self._post_json(method='group_del', name=name) + + def group_add_member(self, name, item): + return self._post_json(method='group_add_member', name=name, item=item) + + def group_add_member_group(self, name, item): + return self.group_add_member(name=name, item={'group': item}) + + def group_add_member_user(self, name, item): + return self.group_add_member(name=name, item={'user': item}) + + def group_add_member_externaluser(self, name, item): + return self.group_add_member(name=name, item={'ipaexternalmember': item}) + + def group_remove_member(self, name, item): + return self._post_json(method='group_remove_member', name=name, item=item) + + def group_remove_member_group(self, name, item): + return self.group_remove_member(name=name, item={'group': item}) + + def group_remove_member_user(self, name, item): + return self.group_remove_member(name=name, item={'user': item}) + + def group_remove_member_externaluser(self, name, item): + return self.group_remove_member(name=name, item={'ipaexternalmember': item}) + + +def get_group_dict(description=None, external=None, gid=None, nonposix=None): + group = {} + if description is not None: + group['description'] = description + if external is not None: + group['external'] = external + if gid is not None: + group['gidnumber'] = gid + if nonposix is not None: + group['nonposix'] = nonposix + return group + + +def get_group_diff(client, ipa_group, module_group): + data = [] + # With group_add attribute nonposix is passed, whereas with group_mod only posix can be passed. + if 'nonposix' in module_group: + # Only non-posix groups can be changed to posix + if not module_group['nonposix'] and ipa_group.get('nonposix'): + module_group['posix'] = True + del module_group['nonposix'] + + if 'external' in module_group: + if module_group['external'] and 'ipaexternalgroup' in ipa_group.get('objectclass'): + del module_group['external'] + + return client.get_diff(ipa_data=ipa_group, module_data=module_group) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['cn'] + group = module.params['group'] + user = module.params['user'] + external = module.params['external'] + external_user = module.params['external_user'] + append = module.params['append'] + + module_group = get_group_dict(description=module.params['description'], + external=external, + gid=module.params['gidnumber'], + nonposix=module.params['nonposix']) + ipa_group = client.group_find(name=name) + + if not (external or external_user is None): + module.fail_json("external_user can only be set if external = True") + + changed = False + if state == 'present': + if not ipa_group: + changed = True + if not module.check_mode: + ipa_group = client.group_add(name, item=module_group) + else: + diff = get_group_diff(client, ipa_group, module_group) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_group.get(key) + client.group_mod(name=name, item=data) + + if group is not None: + changed = client.modify_if_diff(name, ipa_group.get('member_group', []), group, + client.group_add_member_group, + client.group_remove_member_group, + append=append) or changed + + if user is not None: + changed = client.modify_if_diff(name, ipa_group.get('member_user', []), user, + client.group_add_member_user, + client.group_remove_member_user, + append=append) or changed + + if external_user is not None: + changed = client.modify_if_diff(name, ipa_group.get('ipaexternalmember', []), external_user, + client.group_add_member_externaluser, + client.group_remove_member_externaluser, + append=append) or changed + else: + if ipa_group: + changed = True + if not module.check_mode: + client.group_del(name) + + return changed, client.group_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + external=dict(type='bool'), + external_user=dict(type='list', elements='str'), + gidnumber=dict(type='str', aliases=['gid']), + group=dict(type='list', elements='str'), + nonposix=dict(type='bool'), + state=dict(type='str', default='present', choices=['present', 'absent']), + user=dict(type='list', elements='str'), + append=dict(type='bool', default=False)) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + ) + + client = GroupIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, group = ensure(module, client) + module.exit_json(changed=changed, group=group) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_hbacrule.py b/plugins/modules/ipa_hbacrule.py deleted file mode 120000 index fea88add35..0000000000 --- a/plugins/modules/ipa_hbacrule.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_hbacrule.py \ No newline at end of file diff --git a/plugins/modules/ipa_hbacrule.py b/plugins/modules/ipa_hbacrule.py new file mode 100644 index 0000000000..67e39bbe98 --- /dev/null +++ b/plugins/modules/ipa_hbacrule.py @@ -0,0 +1,367 @@ +#!/usr/bin/python +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_hbacrule +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA HBAC rule +description: + - Add, modify or delete an IPA HBAC rule using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + cn: + description: + - Canonical name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + description: + description: Description. + type: str + host: + description: + - List of host names to assign. + - If an empty list is passed all hosts are removed from the rule. + - If option is omitted hosts are not checked or changed. + required: false + type: list + elements: str + hostcategory: + description: Host category. + choices: ['all'] + type: str + hostgroup: + description: + - List of hostgroup names to assign. + - If an empty list is passed all hostgroups are removed from the rule. + - If option is omitted hostgroups are not checked or changed. + type: list + elements: str + service: + description: + - List of service names to assign. + - If an empty list is passed all services are removed from the rule. + - If option is omitted services are not checked or changed. + type: list + elements: str + servicecategory: + description: Service category. + choices: ['all'] + type: str + servicegroup: + description: + - List of service group names to assign. + - If an empty list is passed all assigned service groups are removed from the rule. + - If option is omitted service groups are not checked or changed. + type: list + elements: str + sourcehost: + description: + - List of source host names to assign. + - If an empty list if passed all assigned source hosts are removed from the rule. + - If option is omitted source hosts are not checked or changed. + type: list + elements: str + sourcehostcategory: + description: Source host category. + choices: ['all'] + type: str + sourcehostgroup: + description: + - List of source host group names to assign. + - If an empty list if passed all assigned source host groups are removed from the rule. + - If option is omitted source host groups are not checked or changed. + type: list + elements: str + state: + description: State to ensure. + default: "present" + choices: ["absent", "disabled", "enabled", "present"] + type: str + user: + description: + - List of user names to assign. + - If an empty list if passed all assigned users are removed from the rule. + - If option is omitted users are not checked or changed. + type: list + elements: str + usercategory: + description: User category. + choices: ['all'] + type: str + usergroup: + description: + - List of user group names to assign. + - If an empty list if passed all assigned user groups are removed from the rule. + - If option is omitted user groups are not checked or changed. + type: list + elements: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure rule to allow all users to access any host from any host + community.general.ipa_hbacrule: + name: allow_all + description: Allow all users to access any host from any host + hostcategory: all + servicecategory: all + usercategory: all + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure rule with certain limitations + community.general.ipa_hbacrule: + name: allow_all_developers_access_to_db + description: Allow all developers to access any database from any host + hostgroup: + - db-server + usergroup: + - developers + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure rule is absent + community.general.ipa_hbacrule: + name: rule_to_be_deleted + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +""" + +RETURN = r""" +hbacrule: + description: HBAC rule as returned by IPA API. + returned: always + type: dict +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class HBACRuleIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(HBACRuleIPAClient, self).__init__(module, host, port, protocol) + + def hbacrule_find(self, name): + return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name}) + + def hbacrule_add(self, name, item): + return self._post_json(method='hbacrule_add', name=name, item=item) + + def hbacrule_mod(self, name, item): + return self._post_json(method='hbacrule_mod', name=name, item=item) + + def hbacrule_del(self, name): + return self._post_json(method='hbacrule_del', name=name) + + def hbacrule_add_host(self, name, item): + return self._post_json(method='hbacrule_add_host', name=name, item=item) + + def hbacrule_remove_host(self, name, item): + return self._post_json(method='hbacrule_remove_host', name=name, item=item) + + def hbacrule_add_service(self, name, item): + return self._post_json(method='hbacrule_add_service', name=name, item=item) + + def hbacrule_remove_service(self, name, item): + return self._post_json(method='hbacrule_remove_service', name=name, item=item) + + def hbacrule_add_user(self, name, item): + return self._post_json(method='hbacrule_add_user', name=name, item=item) + + def hbacrule_remove_user(self, name, item): + return self._post_json(method='hbacrule_remove_user', name=name, item=item) + + def hbacrule_add_sourcehost(self, name, item): + return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item) + + def hbacrule_remove_sourcehost(self, name, item): + return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item) + + +def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None, + sourcehostcategory=None, + usercategory=None): + data = {} + if description is not None: + data['description'] = description + if hostcategory is not None: + data['hostcategory'] = hostcategory + if ipaenabledflag is not None: + data['ipaenabledflag'] = ipaenabledflag + if servicecategory is not None: + data['servicecategory'] = servicecategory + if sourcehostcategory is not None: + data['sourcehostcategory'] = sourcehostcategory + if usercategory is not None: + data['usercategory'] = usercategory + return data + + +def get_hbcarule_diff(client, ipa_hbcarule, module_hbcarule): + return client.get_diff(ipa_data=ipa_hbcarule, module_data=module_hbcarule) + + +def ensure(module, client): + name = module.params['cn'] + state = module.params['state'] + + ipa_version = client.get_ipa_version() + if state in ['present', 'enabled']: + if LooseVersion(ipa_version) < LooseVersion('4.9.10'): + ipaenabledflag = 'TRUE' + else: + ipaenabledflag = True + else: + if LooseVersion(ipa_version) < LooseVersion('4.9.10'): + ipaenabledflag = 'FALSE' + else: + ipaenabledflag = False + + host = module.params['host'] + hostcategory = module.params['hostcategory'] + hostgroup = module.params['hostgroup'] + service = module.params['service'] + servicecategory = module.params['servicecategory'] + servicegroup = module.params['servicegroup'] + sourcehost = module.params['sourcehost'] + sourcehostcategory = module.params['sourcehostcategory'] + sourcehostgroup = module.params['sourcehostgroup'] + user = module.params['user'] + usercategory = module.params['usercategory'] + usergroup = module.params['usergroup'] + + module_hbacrule = get_hbacrule_dict(description=module.params['description'], + hostcategory=hostcategory, + ipaenabledflag=ipaenabledflag, + servicecategory=servicecategory, + sourcehostcategory=sourcehostcategory, + usercategory=usercategory) + ipa_hbacrule = client.hbacrule_find(name=name) + + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_hbacrule: + changed = True + if not module.check_mode: + ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule) + else: + diff = get_hbcarule_diff(client, ipa_hbacrule, module_hbacrule) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_hbacrule.get(key) + client.hbacrule_mod(name=name, item=data) + + if host is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_host', []), host, + client.hbacrule_add_host, + client.hbacrule_remove_host, 'host') or changed + + if hostgroup is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup, + client.hbacrule_add_host, + client.hbacrule_remove_host, 'hostgroup') or changed + + if service is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvc', []), service, + client.hbacrule_add_service, + client.hbacrule_remove_service, 'hbacsvc') or changed + + if servicegroup is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []), + servicegroup, + client.hbacrule_add_service, + client.hbacrule_remove_service, 'hbacsvcgroup') or changed + + if sourcehost is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_host', []), sourcehost, + client.hbacrule_add_sourcehost, + client.hbacrule_remove_sourcehost, 'host') or changed + + if sourcehostgroup is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup, + client.hbacrule_add_sourcehost, + client.hbacrule_remove_sourcehost, 'hostgroup') or changed + + if user is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_user', []), user, + client.hbacrule_add_user, + client.hbacrule_remove_user, 'user') or changed + + if usergroup is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_group', []), usergroup, + client.hbacrule_add_user, + client.hbacrule_remove_user, 'group') or changed + else: + if ipa_hbacrule: + changed = True + if not module.check_mode: + client.hbacrule_del(name=name) + + return changed, client.hbacrule_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + host=dict(type='list', elements='str'), + hostcategory=dict(type='str', choices=['all']), + hostgroup=dict(type='list', elements='str'), + service=dict(type='list', elements='str'), + servicecategory=dict(type='str', choices=['all']), + servicegroup=dict(type='list', elements='str'), + sourcehost=dict(type='list', elements='str'), + sourcehostcategory=dict(type='str', choices=['all']), + sourcehostgroup=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + user=dict(type='list', elements='str'), + usercategory=dict(type='str', choices=['all']), + usergroup=dict(type='list', elements='str')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True + ) + + client = HBACRuleIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, hbacrule = ensure(module, client) + module.exit_json(changed=changed, hbacrule=hbacrule) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_host.py b/plugins/modules/ipa_host.py deleted file mode 120000 index 65637ce920..0000000000 --- a/plugins/modules/ipa_host.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_host.py \ No newline at end of file diff --git a/plugins/modules/ipa_host.py b/plugins/modules/ipa_host.py new file mode 100644 index 0000000000..568550f5a8 --- /dev/null +++ b/plugins/modules/ipa_host.py @@ -0,0 +1,326 @@ +#!/usr/bin/python +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_host +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA host +description: + - Add, modify and delete an IPA host using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + fqdn: + description: + - Full qualified domain name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + description: + description: + - A description of this host. + type: str + force: + description: + - Force host name even if not in DNS. + required: false + type: bool + ip_address: + description: + - Add the host to DNS with this IP address. + type: str + mac_address: + description: + - List of Hardware MAC address(es) off this host. + - If option is omitted MAC addresses are not checked nor changed. + - If an empty list is passed all assigned MAC addresses are removed. + - MAC addresses that are already assigned but not passed are removed. + aliases: ["macaddress"] + type: list + elements: str + ns_host_location: + description: + - Host location (for example V(Lab 2)). + aliases: ["nshostlocation"] + type: str + ns_hardware_platform: + description: + - Host hardware platform (for example V(Lenovo T61")). + aliases: ["nshardwareplatform"] + type: str + ns_os_version: + description: + - Host operating system and version (for example V(Fedora 9)). + aliases: ["nsosversion"] + type: str + user_certificate: + description: + - List of Base-64 encoded server certificates. + - If option is omitted certificates are not checked nor changed. + - If an empty list is passed all assigned certificates are removed. + - Certificates already assigned but not passed are removed. + aliases: ["usercertificate"] + type: list + elements: str + state: + description: + - State to ensure. + default: present + choices: ["absent", "disabled", "enabled", "present"] + type: str + force_creation: + description: + - Create host if O(state=disabled) or O(state=enabled) but not present. + default: true + type: bool + version_added: 9.5.0 + update_dns: + description: + - If set V(true) with O(state=absent), then removes DNS records of the host managed by FreeIPA DNS. + - This option has no effect for states other than V(absent). + type: bool + random_password: + description: Generate a random password to be used in bulk enrollment. + type: bool +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure host is present + community.general.ipa_host: + name: host01.example.com + description: Example host + ip_address: 192.168.0.123 + ns_host_location: Lab + ns_os_version: CentOS 7 + ns_hardware_platform: Lenovo T61 + mac_address: + - "08:00:27:E3:B1:2D" + - "52:54:00:BD:97:1E" + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Generate a random password for bulk enrolment + community.general.ipa_host: + name: host01.example.com + description: Example host + ip_address: 192.168.0.123 + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + random_password: true + +- name: Ensure host is disabled + community.general.ipa_host: + name: host01.example.com + state: disabled + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure that all user certificates are removed + community.general.ipa_host: + name: host01.example.com + user_certificate: [] + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure host is absent + community.general.ipa_host: + name: host01.example.com + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure host and its DNS record is absent + community.general.ipa_host: + name: host01.example.com + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + update_dns: true +""" + +RETURN = r""" +host: + description: Host as returned by IPA API. + returned: always + type: dict +host_diff: + description: List of options that differ and would be changed. + returned: if check mode and a difference is found + type: list +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class HostIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(HostIPAClient, self).__init__(module, host, port, protocol) + + def host_show(self, name): + return self._post_json(method='host_show', name=name) + + def host_find(self, name): + return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name}) + + def host_add(self, name, host): + return self._post_json(method='host_add', name=name, item=host) + + def host_mod(self, name, host): + return self._post_json(method='host_mod', name=name, item=host) + + def host_del(self, name, update_dns): + return self._post_json(method='host_del', name=name, item={'updatedns': update_dns}) + + def host_disable(self, name): + return self._post_json(method='host_disable', name=name) + + +def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None, + ns_os_version=None, user_certificate=None, mac_address=None, random_password=None): + data = {} + if description is not None: + data['description'] = description + if force is not None: + data['force'] = force + if ip_address is not None: + data['ip_address'] = ip_address + if ns_host_location is not None: + data['nshostlocation'] = ns_host_location + if ns_hardware_platform is not None: + data['nshardwareplatform'] = ns_hardware_platform + if ns_os_version is not None: + data['nsosversion'] = ns_os_version + if user_certificate is not None: + data['usercertificate'] = [{"__base64__": item} for item in user_certificate] + if mac_address is not None: + data['macaddress'] = mac_address + if random_password is not None: + data['random'] = random_password + return data + + +def get_host_diff(client, ipa_host, module_host): + non_updateable_keys = ['force', 'ip_address'] + if not module_host.get('random'): + non_updateable_keys.append('random') + for key in non_updateable_keys: + if key in module_host: + del module_host[key] + + return client.get_diff(ipa_data=ipa_host, module_data=module_host) + + +def ensure(module, client): + name = module.params['fqdn'] + state = module.params['state'] + force_creation = module.params['force_creation'] + + ipa_host = client.host_find(name=name) + module_host = get_host_dict(description=module.params['description'], + force=module.params['force'], + ip_address=module.params['ip_address'], + ns_host_location=module.params['ns_host_location'], + ns_hardware_platform=module.params['ns_hardware_platform'], + ns_os_version=module.params['ns_os_version'], + user_certificate=module.params['user_certificate'], + mac_address=module.params['mac_address'], + random_password=module.params['random_password'], + ) + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_host and (force_creation or state == 'present'): + changed = True + if not module.check_mode: + # OTP password generated by FreeIPA is visible only for host_add command + # so, return directly from here. + return changed, client.host_add(name=name, host=module_host) + else: + if state in ['disabled', 'enabled']: + module.fail_json(msg="No host with name " + ipa_host + " found") + + diff = get_host_diff(client, ipa_host, module_host) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_host.get(key) + if "usercertificate" not in data: + data["usercertificate"] = [ + cert['__base64__'] for cert in ipa_host.get("usercertificate", []) + ] + ipa_host_show = client.host_show(name=name) + if ipa_host_show.get('has_keytab', True) and (state == 'disabled' or module.params.get('random_password')): + client.host_disable(name=name) + return changed, client.host_mod(name=name, host=data) + elif state == 'absent': + if ipa_host: + changed = True + update_dns = module.params.get('update_dns', False) + if not module.check_mode: + client.host_del(name=name, update_dns=update_dns) + + return changed, client.host_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update( + description=dict(type='str'), + fqdn=dict(type='str', required=True, aliases=['name']), + force=dict(type='bool'), + ip_address=dict(type='str'), + ns_host_location=dict(type='str', aliases=['nshostlocation']), + ns_hardware_platform=dict(type='str', aliases=['nshardwareplatform']), + ns_os_version=dict(type='str', aliases=['nsosversion']), + user_certificate=dict(type='list', aliases=['usercertificate'], elements='str'), + mac_address=dict(type='list', aliases=['macaddress'], elements='str'), + update_dns=dict(type='bool'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + random_password=dict(type='bool', no_log=False), + force_creation=dict(type='bool', default=True) + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = HostIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, host = ensure(module, client) + module.exit_json(changed=changed, host=host) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_hostgroup.py b/plugins/modules/ipa_hostgroup.py deleted file mode 120000 index cc7e4efb98..0000000000 --- a/plugins/modules/ipa_hostgroup.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_hostgroup.py \ No newline at end of file diff --git a/plugins/modules/ipa_hostgroup.py b/plugins/modules/ipa_hostgroup.py new file mode 100644 index 0000000000..f4f40d0bd9 --- /dev/null +++ b/plugins/modules/ipa_hostgroup.py @@ -0,0 +1,227 @@ +#!/usr/bin/python +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_hostgroup +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA host-group +description: + - Add, modify and delete an IPA host-group using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + append: + description: + - If V(true), add the listed O(host) to the O(hostgroup). + - If V(false), only the listed O(host) is set in O(hostgroup), removing any other hosts. + default: false + type: bool + version_added: 6.6.0 + cn: + description: + - Name of host-group. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + description: + description: + - Description. + type: str + host: + description: + - List of hosts that belong to the host-group. + - If an empty list is passed all hosts are removed from the group. + - If option is omitted hosts are not checked nor changed. + - If option is passed all assigned hosts that are not passed are unassigned from the group. + type: list + elements: str + hostgroup: + description: + - List of host-groups than belong to that host-group. + - If an empty list is passed all host-groups are removed from the group. + - If option is omitted host-groups are not checked nor changed. + - If option is passed all assigned hostgroups that are not passed are unassigned from the group. + type: list + elements: str + state: + description: + - State to ensure. + - V("absent") and V("disabled") give the same results. + - V("present") and V("enabled") give the same results. + default: "present" + choices: ["absent", "disabled", "enabled", "present"] + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure host-group databases is present + community.general.ipa_hostgroup: + name: databases + state: present + host: + - db.example.com + hostgroup: + - mysql-server + - oracle-server + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure host-group databases is absent + community.general.ipa_hostgroup: + name: databases + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +""" + +RETURN = r""" +hostgroup: + description: Hostgroup as returned by IPA API. + returned: always + type: dict +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class HostGroupIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(HostGroupIPAClient, self).__init__(module, host, port, protocol) + + def hostgroup_find(self, name): + return self._post_json(method='hostgroup_find', name=None, item={'all': True, 'cn': name}) + + def hostgroup_add(self, name, item): + return self._post_json(method='hostgroup_add', name=name, item=item) + + def hostgroup_mod(self, name, item): + return self._post_json(method='hostgroup_mod', name=name, item=item) + + def hostgroup_del(self, name): + return self._post_json(method='hostgroup_del', name=name) + + def hostgroup_add_member(self, name, item): + return self._post_json(method='hostgroup_add_member', name=name, item=item) + + def hostgroup_add_host(self, name, item): + return self.hostgroup_add_member(name=name, item={'host': item}) + + def hostgroup_add_hostgroup(self, name, item): + return self.hostgroup_add_member(name=name, item={'hostgroup': item}) + + def hostgroup_remove_member(self, name, item): + return self._post_json(method='hostgroup_remove_member', name=name, item=item) + + def hostgroup_remove_host(self, name, item): + return self.hostgroup_remove_member(name=name, item={'host': item}) + + def hostgroup_remove_hostgroup(self, name, item): + return self.hostgroup_remove_member(name=name, item={'hostgroup': item}) + + +def get_hostgroup_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup): + return client.get_diff(ipa_data=ipa_hostgroup, module_data=module_hostgroup) + + +def ensure(module, client): + name = module.params['cn'] + state = module.params['state'] + host = module.params['host'] + hostgroup = module.params['hostgroup'] + append = module.params['append'] + + ipa_hostgroup = client.hostgroup_find(name=name) + module_hostgroup = get_hostgroup_dict(description=module.params['description']) + + changed = False + if state in ['present', 'enabled']: + if not ipa_hostgroup: + changed = True + if not module.check_mode: + ipa_hostgroup = client.hostgroup_add(name=name, item=module_hostgroup) + else: + diff = get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_hostgroup.get(key) + client.hostgroup_mod(name=name, item=data) + + if host is not None: + changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []), + [item.lower() for item in host], + client.hostgroup_add_host, + client.hostgroup_remove_host, + append=append) or changed + + if hostgroup is not None: + changed = client.modify_if_diff(name, ipa_hostgroup.get('member_hostgroup', []), + [item.lower() for item in hostgroup], + client.hostgroup_add_hostgroup, + client.hostgroup_remove_hostgroup, + append=append) or changed + + else: + if ipa_hostgroup: + changed = True + if not module.check_mode: + client.hostgroup_del(name=name) + + return changed, client.hostgroup_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + host=dict(type='list', elements='str'), + hostgroup=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + append=dict(type='bool', default=False)) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = HostGroupIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, hostgroup = ensure(module, client) + module.exit_json(changed=changed, hostgroup=hostgroup) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_otpconfig.py b/plugins/modules/ipa_otpconfig.py deleted file mode 120000 index a29ef04412..0000000000 --- a/plugins/modules/ipa_otpconfig.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_otpconfig.py \ No newline at end of file diff --git a/plugins/modules/ipa_otpconfig.py b/plugins/modules/ipa_otpconfig.py new file mode 100644 index 0000000000..a260cc7a13 --- /dev/null +++ b/plugins/modules/ipa_otpconfig.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# Copyright (c) 2021, Ansible Project +# Heavily influenced from Fran Fitzpatrick ipa_config module +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_otpconfig +author: justchris1 (@justchris1) +short_description: Manage FreeIPA OTP Configuration Settings +version_added: 2.5.0 +description: + - Modify global configuration settings of a FreeIPA Server with respect to OTP (One Time Passwords). +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + ipatokentotpauthwindow: + description: TOTP authentication window in seconds. + aliases: ["totpauthwindow"] + type: int + ipatokentotpsyncwindow: + description: TOTP synchronization window in seconds. + aliases: ["totpsyncwindow"] + type: int + ipatokenhotpauthwindow: + description: HOTP authentication window in number of hops. + aliases: ["hotpauthwindow"] + type: int + ipatokenhotpsyncwindow: + description: HOTP synchronization window in hops. + aliases: ["hotpsyncwindow"] + type: int +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure the TOTP authentication window is set to 300 seconds + community.general.ipa_otpconfig: + ipatokentotpauthwindow: '300' + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the TOTP synchronization window is set to 86400 seconds + community.general.ipa_otpconfig: + ipatokentotpsyncwindow: '86400' + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the HOTP authentication window is set to 10 hops + community.general.ipa_otpconfig: + ipatokenhotpauthwindow: '10' + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the HOTP synchronization window is set to 100 hops + community.general.ipa_otpconfig: + ipatokenhotpsyncwindow: '100' + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret +""" + +RETURN = r""" +otpconfig: + description: OTP configuration as returned by IPA API. + returned: always + type: dict +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class OTPConfigIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(OTPConfigIPAClient, self).__init__(module, host, port, protocol) + + def otpconfig_show(self): + return self._post_json(method='otpconfig_show', name=None) + + def otpconfig_mod(self, name, item): + return self._post_json(method='otpconfig_mod', name=name, item=item) + + +def get_otpconfig_dict(ipatokentotpauthwindow=None, ipatokentotpsyncwindow=None, + ipatokenhotpauthwindow=None, ipatokenhotpsyncwindow=None): + + config = {} + if ipatokentotpauthwindow is not None: + config['ipatokentotpauthwindow'] = str(ipatokentotpauthwindow) + if ipatokentotpsyncwindow is not None: + config['ipatokentotpsyncwindow'] = str(ipatokentotpsyncwindow) + if ipatokenhotpauthwindow is not None: + config['ipatokenhotpauthwindow'] = str(ipatokenhotpauthwindow) + if ipatokenhotpsyncwindow is not None: + config['ipatokenhotpsyncwindow'] = str(ipatokenhotpsyncwindow) + + return config + + +def get_otpconfig_diff(client, ipa_config, module_config): + return client.get_diff(ipa_data=ipa_config, module_data=module_config) + + +def ensure(module, client): + module_otpconfig = get_otpconfig_dict( + ipatokentotpauthwindow=module.params.get('ipatokentotpauthwindow'), + ipatokentotpsyncwindow=module.params.get('ipatokentotpsyncwindow'), + ipatokenhotpauthwindow=module.params.get('ipatokenhotpauthwindow'), + ipatokenhotpsyncwindow=module.params.get('ipatokenhotpsyncwindow'), + ) + ipa_otpconfig = client.otpconfig_show() + diff = get_otpconfig_diff(client, ipa_otpconfig, module_otpconfig) + + changed = False + new_otpconfig = {} + for module_key in diff: + if module_otpconfig.get(module_key) != ipa_otpconfig.get(module_key, None): + changed = True + new_otpconfig.update({module_key: module_otpconfig.get(module_key)}) + + if changed and not module.check_mode: + client.otpconfig_mod(name=None, item=new_otpconfig) + + return changed, client.otpconfig_show() + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update( + ipatokentotpauthwindow=dict(type='int', aliases=['totpauthwindow'], no_log=False), + ipatokentotpsyncwindow=dict(type='int', aliases=['totpsyncwindow'], no_log=False), + ipatokenhotpauthwindow=dict(type='int', aliases=['hotpauthwindow'], no_log=False), + ipatokenhotpsyncwindow=dict(type='int', aliases=['hotpsyncwindow'], no_log=False), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = OTPConfigIPAClient( + module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot'] + ) + + try: + client.login( + username=module.params['ipa_user'], + password=module.params['ipa_pass'] + ) + changed, otpconfig = ensure(module, client) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, otpconfig=otpconfig) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_otptoken.py b/plugins/modules/ipa_otptoken.py deleted file mode 120000 index cb0dbaf569..0000000000 --- a/plugins/modules/ipa_otptoken.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_otptoken.py \ No newline at end of file diff --git a/plugins/modules/ipa_otptoken.py b/plugins/modules/ipa_otptoken.py new file mode 100644 index 0000000000..388ccfb4d9 --- /dev/null +++ b/plugins/modules/ipa_otptoken.py @@ -0,0 +1,530 @@ +#!/usr/bin/python +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_otptoken +author: justchris1 (@justchris1) +short_description: Manage FreeIPA OTPs +version_added: 2.5.0 +description: + - Add, modify, and delete One Time Passwords in IPA. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + uniqueid: + description: Unique ID of the token in IPA. + required: true + aliases: ["name"] + type: str + newuniqueid: + description: If specified, the unique ID specified is changed to this. + type: str + otptype: + description: + - Type of OTP. + - B(Note:) Cannot be modified after OTP is created. + type: str + choices: [totp, hotp] + secretkey: + description: + - Token secret (Base64). + - If OTP is created and this is not specified, a random secret is generated by IPA. + - B(Note:) Cannot be modified after OTP is created. + type: str + description: + description: Description of the token (informational only). + type: str + owner: + description: Assigned user of the token. + type: str + enabled: + description: Mark the token as enabled (default V(true)). + default: true + type: bool + notbefore: + description: + - First date/time the token can be used. + - In the format C(YYYYMMddHHmmss). + - For example, V(20180121182022) allows the token to be used starting on 21 January 2018 at 18:20:22. + type: str + notafter: + description: + - Last date/time the token can be used. + - In the format C(YYYYMMddHHmmss). + - For example, V(20200121182022) allows the token to be used until 21 January 2020 at 18:20:22. + type: str + vendor: + description: Token vendor name (informational only). + type: str + model: + description: Token model (informational only). + type: str + serial: + description: Token serial (informational only). + type: str + state: + description: State to ensure. + choices: ['present', 'absent'] + default: 'present' + type: str + algorithm: + description: + - Token hash algorithm. + - B(Note:) Cannot be modified after OTP is created. + choices: ['sha1', 'sha256', 'sha384', 'sha512'] + type: str + digits: + description: + - Number of digits each token code has. + - B(Note:) Cannot be modified after OTP is created. + choices: [6, 8] + type: int + offset: + description: + - TOTP token / IPA server time difference. + - B(Note:) Cannot be modified after OTP is created. + type: int + interval: + description: + - Length of TOTP token code validity in seconds. + - B(Note:) Cannot be modified after OTP is created. + type: int + counter: + description: + - Initial counter for the HOTP token. + - B(Note:) Cannot be modified after OTP is created. + type: int +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Create a totp for pinky, allowing the IPA server to generate using defaults + community.general.ipa_otptoken: + uniqueid: Token123 + otptype: totp + owner: pinky + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Create a 8 digit hotp for pinky with sha256 with specified validity times + community.general.ipa_otptoken: + uniqueid: Token123 + enabled: true + otptype: hotp + digits: 8 + secretkey: UMKSIER00zT2T2tWMUlTRmNlekRCbFQvWFBVZUh2dElHWGR6T3VUR3IzK2xjaFk9 + algorithm: sha256 + notbefore: 20180121182123 + notafter: 20220121182123 + owner: pinky + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Update Token123 to indicate a vendor, model, serial number (info only), and description + community.general.ipa_otptoken: + uniqueid: Token123 + vendor: Acme + model: acme101 + serial: SerialNumber1 + description: Acme OTP device + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Disable Token123 + community.general.ipa_otptoken: + uniqueid: Token123 + enabled: false + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Rename Token123 to TokenABC and enable it + community.general.ipa_otptoken: + uniqueid: Token123 + newuniqueid: TokenABC + enabled: true + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +""" + +RETURN = r""" +otptoken: + description: OTP Token as returned by IPA API. + returned: always + type: dict +""" + +import base64 +import traceback + +from ansible.module_utils.basic import AnsibleModule, sanitize_keys +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class OTPTokenIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(OTPTokenIPAClient, self).__init__(module, host, port, protocol) + + def otptoken_find(self, name): + return self._post_json(method='otptoken_find', name=None, item={'all': True, + 'ipatokenuniqueid': name, + 'timelimit': '0', + 'sizelimit': '0'}) + + def otptoken_add(self, name, item): + return self._post_json(method='otptoken_add', name=name, item=item) + + def otptoken_mod(self, name, item): + return self._post_json(method='otptoken_mod', name=name, item=item) + + def otptoken_del(self, name): + return self._post_json(method='otptoken_del', name=name) + + +def base64_to_base32(base64_string): + """Converts base64 string to base32 string""" + b32_string = base64.b32encode(base64.b64decode(base64_string)).decode('ascii') + return b32_string + + +def base32_to_base64(base32_string): + """Converts base32 string to base64 string""" + b64_string = base64.b64encode(base64.b32decode(base32_string)).decode('ascii') + return b64_string + + +def get_otptoken_dict(ansible_to_ipa, uniqueid=None, newuniqueid=None, otptype=None, secretkey=None, description=None, owner=None, + enabled=None, notbefore=None, notafter=None, vendor=None, + model=None, serial=None, algorithm=None, digits=None, offset=None, + interval=None, counter=None): + """Create the dictionary of settings passed in""" + + otptoken = {} + if uniqueid is not None: + otptoken[ansible_to_ipa['uniqueid']] = uniqueid + if newuniqueid is not None: + otptoken[ansible_to_ipa['newuniqueid']] = newuniqueid + if otptype is not None: + otptoken[ansible_to_ipa['otptype']] = otptype.upper() + if secretkey is not None: + # For some unknown reason, while IPA returns the secret in base64, + # it wants the secret passed in as base32. This makes it more difficult + # for comparison (does 'current' equal to 'new'). Moreover, this may + # cause some subtle issue in a playbook as the output is encoded + # in a different way than if it was passed in as a parameter. For + # these reasons, have the module standardize on base64 input (as parameter) + # and output (from IPA). + otptoken[ansible_to_ipa['secretkey']] = base64_to_base32(secretkey) + if description is not None: + otptoken[ansible_to_ipa['description']] = description + if owner is not None: + otptoken[ansible_to_ipa['owner']] = owner + if enabled is not None: + otptoken[ansible_to_ipa['enabled']] = False if enabled else True + if notbefore is not None: + otptoken[ansible_to_ipa['notbefore']] = notbefore + 'Z' + if notafter is not None: + otptoken[ansible_to_ipa['notafter']] = notafter + 'Z' + if vendor is not None: + otptoken[ansible_to_ipa['vendor']] = vendor + if model is not None: + otptoken[ansible_to_ipa['model']] = model + if serial is not None: + otptoken[ansible_to_ipa['serial']] = serial + if algorithm is not None: + otptoken[ansible_to_ipa['algorithm']] = algorithm + if digits is not None: + otptoken[ansible_to_ipa['digits']] = str(digits) + if offset is not None: + otptoken[ansible_to_ipa['offset']] = str(offset) + if interval is not None: + otptoken[ansible_to_ipa['interval']] = str(interval) + if counter is not None: + otptoken[ansible_to_ipa['counter']] = str(counter) + + return otptoken + + +def transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible): + """Transform the output received by IPA to a format more friendly + before it is returned to the user. IPA returns even simple + strings as a list of strings. It also returns bools and + int as string. This function cleans that up before return. + """ + updated_otptoken = ipa_otptoken + + # Used to hold values that will be sanitized from output as no_log. + # For the case where secretkey is not specified at the module, but + # is passed back from IPA. + sanitize_strings = set() + + # Rename the IPA parameters to the more friendly ansible module names for them + for ipa_parameter in ipa_to_ansible: + if ipa_parameter in ipa_otptoken: + updated_otptoken[ipa_to_ansible[ipa_parameter]] = ipa_otptoken[ipa_parameter] + updated_otptoken.pop(ipa_parameter) + + # Change the type from IPA's list of string to the appropriate return value type + # based on field. By default, assume they should be strings. + for ansible_parameter in ansible_to_ipa: + if ansible_parameter in updated_otptoken: + if isinstance(updated_otptoken[ansible_parameter], list) and len(updated_otptoken[ansible_parameter]) == 1: + if ansible_parameter in ['digits', 'offset', 'interval', 'counter']: + updated_otptoken[ansible_parameter] = int(updated_otptoken[ansible_parameter][0]) + elif ansible_parameter == 'enabled': + updated_otptoken[ansible_parameter] = bool(updated_otptoken[ansible_parameter][0]) + else: + updated_otptoken[ansible_parameter] = updated_otptoken[ansible_parameter][0] + + if 'secretkey' in updated_otptoken: + if isinstance(updated_otptoken['secretkey'], dict): + if '__base64__' in updated_otptoken['secretkey']: + sanitize_strings.add(updated_otptoken['secretkey']['__base64__']) + b64key = updated_otptoken['secretkey']['__base64__'] + updated_otptoken.pop('secretkey') + updated_otptoken['secretkey'] = b64key + sanitize_strings.add(b64key) + elif '__base32__' in updated_otptoken['secretkey']: + sanitize_strings.add(updated_otptoken['secretkey']['__base32__']) + b32key = updated_otptoken['secretkey']['__base32__'] + b64key = base32_to_base64(b32key) + updated_otptoken.pop('secretkey') + updated_otptoken['secretkey'] = b64key + sanitize_strings.add(b32key) + sanitize_strings.add(b64key) + + return updated_otptoken, sanitize_strings + + +def validate_modifications(ansible_to_ipa, module, ipa_otptoken, + module_otptoken, unmodifiable_after_creation): + """Checks to see if the requested modifications are valid. Some elements + cannot be modified after initial creation. However, we still want to + validate arguments that are specified, but are not different than what + is currently set on the server. + """ + + modifications_valid = True + + for parameter in unmodifiable_after_creation: + if ansible_to_ipa[parameter] in module_otptoken and ansible_to_ipa[parameter] in ipa_otptoken: + mod_value = module_otptoken[ansible_to_ipa[parameter]] + + # For someone unknown reason, the returns from IPA put almost all + # values in a list, even though passing them in a list (even of + # length 1) will be rejected. The module values for all elements + # other than type (totp or hotp) have this happen. + if parameter == 'otptype': + ipa_value = ipa_otptoken[ansible_to_ipa[parameter]] + else: + if len(ipa_otptoken[ansible_to_ipa[parameter]]) != 1: + module.fail_json(msg=("Invariant fail: Return value from IPA is not a list " + + "of length 1. Please open a bug report for the module.")) + if parameter == 'secretkey': + # We stored the secret key in base32 since we had assumed that would need to + # be the format if we were contacting IPA to create it. However, we are + # now comparing it against what is already set in the IPA server, so convert + # back to base64 for comparison. + mod_value = base32_to_base64(mod_value) + + # For the secret key, it is even more specific in that the key is returned + # in a dict, in the list, as the __base64__ entry for the IPA response. + ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__'] + if '__base64__' in ipa_otptoken[ansible_to_ipa[parameter]][0]: + ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__'] + elif '__base32__' in ipa_otptoken[ansible_to_ipa[parameter]][0]: + b32key = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base32__'] + b64key = base32_to_base64(b32key) + ipa_value = b64key + else: + ipa_value = None + else: + ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0] + + if mod_value != ipa_value: + modifications_valid = False + fail_message = ("Parameter '" + parameter + "' cannot be changed once " + + "the OTP is created and the requested value specified here (" + + str(mod_value) + + ") differs from what is set in the IPA server (" + + str(ipa_value) + ")") + module.fail_json(msg=fail_message) + + return modifications_valid + + +def ensure(module, client): + # dict to map from ansible parameter names to attribute names + # used by IPA (which are not so friendly). + ansible_to_ipa = {'uniqueid': 'ipatokenuniqueid', + 'newuniqueid': 'rename', + 'otptype': 'type', + 'secretkey': 'ipatokenotpkey', + 'description': 'description', + 'owner': 'ipatokenowner', + 'enabled': 'ipatokendisabled', + 'notbefore': 'ipatokennotbefore', + 'notafter': 'ipatokennotafter', + 'vendor': 'ipatokenvendor', + 'model': 'ipatokenmodel', + 'serial': 'ipatokenserial', + 'algorithm': 'ipatokenotpalgorithm', + 'digits': 'ipatokenotpdigits', + 'offset': 'ipatokentotpclockoffset', + 'interval': 'ipatokentotptimestep', + 'counter': 'ipatokenhotpcounter'} + + # Create inverse dictionary for mapping return values + ipa_to_ansible = {v: k for k, v in ansible_to_ipa.items()} + + unmodifiable_after_creation = ['otptype', 'secretkey', 'algorithm', + 'digits', 'offset', 'interval', 'counter'] + state = module.params['state'] + uniqueid = module.params['uniqueid'] + + module_otptoken = get_otptoken_dict(ansible_to_ipa=ansible_to_ipa, + uniqueid=module.params.get('uniqueid'), + newuniqueid=module.params.get('newuniqueid'), + otptype=module.params.get('otptype'), + secretkey=module.params.get('secretkey'), + description=module.params.get('description'), + owner=module.params.get('owner'), + enabled=module.params.get('enabled'), + notbefore=module.params.get('notbefore'), + notafter=module.params.get('notafter'), + vendor=module.params.get('vendor'), + model=module.params.get('model'), + serial=module.params.get('serial'), + algorithm=module.params.get('algorithm'), + digits=module.params.get('digits'), + offset=module.params.get('offset'), + interval=module.params.get('interval'), + counter=module.params.get('counter')) + + ipa_otptoken = client.otptoken_find(name=uniqueid) + + if ansible_to_ipa['newuniqueid'] in module_otptoken: + # Check to see if the new unique id is already taken in use + ipa_otptoken_new = client.otptoken_find(name=module_otptoken[ansible_to_ipa['newuniqueid']]) + if ipa_otptoken_new: + module.fail_json(msg=("Requested rename through newuniqueid to " + + module_otptoken[ansible_to_ipa['newuniqueid']] + + " failed because the new unique id is already in use")) + + changed = False + if state == 'present': + if not ipa_otptoken: + changed = True + if not module.check_mode: + # It would not make sense to have a rename after creation, so if the user + # specified a newuniqueid, just replace the uniqueid with the updated one + # before creation + if ansible_to_ipa['newuniqueid'] in module_otptoken: + module_otptoken[ansible_to_ipa['uniqueid']] = module_otptoken[ansible_to_ipa['newuniqueid']] + uniqueid = module_otptoken[ansible_to_ipa['newuniqueid']] + module_otptoken.pop(ansible_to_ipa['newuniqueid']) + + # IPA wants the unique id in the first position and not as a key/value pair. + # Get rid of it from the otptoken dict and just specify it in the name field + # for otptoken_add. + if ansible_to_ipa['uniqueid'] in module_otptoken: + module_otptoken.pop(ansible_to_ipa['uniqueid']) + + module_otptoken['all'] = True + ipa_otptoken = client.otptoken_add(name=uniqueid, item=module_otptoken) + else: + if not validate_modifications(ansible_to_ipa, module, ipa_otptoken, + module_otptoken, unmodifiable_after_creation): + module.fail_json(msg="Modifications requested in module are not valid") + + # IPA will reject 'modifications' that do not actually modify anything + # if any of the unmodifiable elements are specified. Explicitly + # get rid of them here. They were not different or else the + # we would have failed out in validate_modifications. + for x in unmodifiable_after_creation: + if ansible_to_ipa[x] in module_otptoken: + module_otptoken.pop(ansible_to_ipa[x]) + + diff = client.get_diff(ipa_data=ipa_otptoken, module_data=module_otptoken) + if len(diff) > 0: + changed = True + if not module.check_mode: + + # IPA wants the unique id in the first position and not as a key/value pair. + # Get rid of it from the otptoken dict and just specify it in the name field + # for otptoken_mod. + if ansible_to_ipa['uniqueid'] in module_otptoken: + module_otptoken.pop(ansible_to_ipa['uniqueid']) + + module_otptoken['all'] = True + ipa_otptoken = client.otptoken_mod(name=uniqueid, item=module_otptoken) + else: + if ipa_otptoken: + changed = True + if not module.check_mode: + client.otptoken_del(name=uniqueid) + + # Transform the output to use ansible keywords (not the IPA keywords) and + # sanitize any key values in the output. + ipa_otptoken, sanitize_strings = transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible) + module.no_log_values = module.no_log_values.union(sanitize_strings) + sanitized_otptoken = sanitize_keys(obj=ipa_otptoken, no_log_strings=module.no_log_values) + return changed, sanitized_otptoken + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(uniqueid=dict(type='str', aliases=['name'], required=True), + newuniqueid=dict(type='str'), + otptype=dict(type='str', choices=['totp', 'hotp']), + secretkey=dict(type='str', no_log=True), + description=dict(type='str'), + owner=dict(type='str'), + enabled=dict(type='bool', default=True), + notbefore=dict(type='str'), + notafter=dict(type='str'), + vendor=dict(type='str'), + model=dict(type='str'), + serial=dict(type='str'), + state=dict(type='str', choices=['present', 'absent'], default='present'), + algorithm=dict(type='str', choices=['sha1', 'sha256', 'sha384', 'sha512']), + digits=dict(type='int', choices=[6, 8]), + offset=dict(type='int'), + interval=dict(type='int'), + counter=dict(type='int')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = OTPTokenIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, otptoken = ensure(module, client) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, otptoken=otptoken) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_pwpolicy.py b/plugins/modules/ipa_pwpolicy.py deleted file mode 120000 index b35fc0fae4..0000000000 --- a/plugins/modules/ipa_pwpolicy.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_pwpolicy.py \ No newline at end of file diff --git a/plugins/modules/ipa_pwpolicy.py b/plugins/modules/ipa_pwpolicy.py new file mode 100644 index 0000000000..10650a49dd --- /dev/null +++ b/plugins/modules/ipa_pwpolicy.py @@ -0,0 +1,307 @@ +#!/usr/bin/python +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_pwpolicy +author: Adralioh (@adralioh) +short_description: Manage FreeIPA password policies +description: + - Add, modify, or delete a password policy using the IPA API. +version_added: 2.0.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + group: + description: + - Name of the group that the policy applies to. + - If omitted, the global policy is used. + aliases: ["name"] + type: str + state: + description: State to ensure. + default: "present" + choices: ["absent", "present"] + type: str + maxpwdlife: + description: Maximum password lifetime (in days). + type: str + minpwdlife: + description: Minimum password lifetime (in hours). + type: str + historylength: + description: + - Number of previous passwords that are remembered. + - Users cannot reuse remembered passwords. + type: str + minclasses: + description: Minimum number of character classes. + type: str + minlength: + description: Minimum password length. + type: str + priority: + description: + - Priority of the policy. + - High number means lower priority. + - Required when C(cn) is not the global policy. + type: str + maxfailcount: + description: Maximum number of consecutive failures before lockout. + type: str + failinterval: + description: Period (in seconds) after which the number of failed login attempts is reset. + type: str + lockouttime: + description: Period (in seconds) for which users are locked out. + type: str + gracelimit: + description: Maximum number of LDAP logins after password expiration. + type: int + version_added: 8.2.0 + maxrepeat: + description: Maximum number of allowed same consecutive characters in the new password. + type: int + version_added: 8.2.0 + maxsequence: + description: Maximum length of monotonic character sequences in the new password. An example of a monotonic sequence of + length 5 is V(12345). + type: int + version_added: 8.2.0 + dictcheck: + description: Check whether the password (with possible modifications) matches a word in a dictionary (using cracklib). + type: bool + version_added: 8.2.0 + usercheck: + description: Check whether the password (with possible modifications) contains the user name in some form (if the name + has > 3 characters). + type: bool + version_added: 8.2.0 +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Modify the global password policy + community.general.ipa_pwpolicy: + maxpwdlife: '90' + minpwdlife: '1' + historylength: '8' + minclasses: '3' + minlength: '16' + maxfailcount: '6' + failinterval: '60' + lockouttime: '600' + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure the password policy for the group admins is present + community.general.ipa_pwpolicy: + group: admins + state: present + maxpwdlife: '60' + minpwdlife: '24' + historylength: '16' + minclasses: '4' + priority: '10' + minlength: '6' + maxfailcount: '4' + failinterval: '600' + lockouttime: '1200' + gracelimit: 3 + maxrepeat: 3 + maxsequence: 3 + dictcheck: true + usercheck: true + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure that the group sysops does not have a unique password policy + community.general.ipa_pwpolicy: + group: sysops + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +""" + +RETURN = r""" +pwpolicy: + description: Password policy as returned by IPA API. + returned: always + type: dict + sample: + cn: ['admins'] + cospriority: ['10'] + dn: 'cn=admins,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com' + krbmaxpwdlife: ['60'] + krbminpwdlife: ['24'] + krbpwdfailurecountinterval: ['600'] + krbpwdhistorylength: ['16'] + krbpwdlockoutduration: ['1200'] + krbpwdmaxfailure: ['4'] + krbpwdmindiffchars: ['4'] + objectclass: ['top', 'nscontainer', 'krbpwdpolicy'] +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class PwPolicyIPAClient(IPAClient): + '''The global policy will be selected when `name` is `None`''' + def __init__(self, module, host, port, protocol): + super(PwPolicyIPAClient, self).__init__(module, host, port, protocol) + + def pwpolicy_find(self, name): + if name is None: + # Manually set the cn to the global policy because pwpolicy_find will return a random + # different policy if cn is `None` + name = 'global_policy' + return self._post_json(method='pwpolicy_find', name=None, item={'all': True, 'cn': name}) + + def pwpolicy_add(self, name, item): + return self._post_json(method='pwpolicy_add', name=name, item=item) + + def pwpolicy_mod(self, name, item): + return self._post_json(method='pwpolicy_mod', name=name, item=item) + + def pwpolicy_del(self, name): + return self._post_json(method='pwpolicy_del', name=name) + + +def get_pwpolicy_dict(maxpwdlife=None, minpwdlife=None, historylength=None, minclasses=None, + minlength=None, priority=None, maxfailcount=None, failinterval=None, + lockouttime=None, gracelimit=None, maxrepeat=None, maxsequence=None, dictcheck=None, usercheck=None): + pwpolicy = {} + pwpolicy_options = { + 'krbmaxpwdlife': maxpwdlife, + 'krbminpwdlife': minpwdlife, + 'krbpwdhistorylength': historylength, + 'krbpwdmindiffchars': minclasses, + 'krbpwdminlength': minlength, + 'cospriority': priority, + 'krbpwdmaxfailure': maxfailcount, + 'krbpwdfailurecountinterval': failinterval, + 'krbpwdlockoutduration': lockouttime, + 'passwordgracelimit': gracelimit, + 'ipapwdmaxrepeat': maxrepeat, + 'ipapwdmaxsequence': maxsequence, + } + + pwpolicy_boolean_options = { + 'ipapwddictcheck': dictcheck, + 'ipapwdusercheck': usercheck, + } + + for option, value in pwpolicy_options.items(): + if value is not None: + pwpolicy[option] = to_native(value) + + for option, value in pwpolicy_boolean_options.items(): + if value is not None: + pwpolicy[option] = bool(value) + + return pwpolicy + + +def get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy): + return client.get_diff(ipa_data=ipa_pwpolicy, module_data=module_pwpolicy) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['group'] + + module_pwpolicy = get_pwpolicy_dict(maxpwdlife=module.params.get('maxpwdlife'), + minpwdlife=module.params.get('minpwdlife'), + historylength=module.params.get('historylength'), + minclasses=module.params.get('minclasses'), + minlength=module.params.get('minlength'), + priority=module.params.get('priority'), + maxfailcount=module.params.get('maxfailcount'), + failinterval=module.params.get('failinterval'), + lockouttime=module.params.get('lockouttime'), + gracelimit=module.params.get('gracelimit'), + maxrepeat=module.params.get('maxrepeat'), + maxsequence=module.params.get('maxsequence'), + dictcheck=module.params.get('dictcheck'), + usercheck=module.params.get('usercheck'), + ) + + ipa_pwpolicy = client.pwpolicy_find(name=name) + + changed = False + if state == 'present': + if not ipa_pwpolicy: + changed = True + if not module.check_mode: + ipa_pwpolicy = client.pwpolicy_add(name=name, item=module_pwpolicy) + else: + diff = get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy) + if len(diff) > 0: + changed = True + if not module.check_mode: + ipa_pwpolicy = client.pwpolicy_mod(name=name, item=module_pwpolicy) + else: + if ipa_pwpolicy: + changed = True + if not module.check_mode: + client.pwpolicy_del(name=name) + + return changed, ipa_pwpolicy + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(group=dict(type='str', aliases=['name']), + state=dict(type='str', default='present', choices=['present', 'absent']), + maxpwdlife=dict(type='str'), + minpwdlife=dict(type='str'), + historylength=dict(type='str'), + minclasses=dict(type='str'), + minlength=dict(type='str'), + priority=dict(type='str'), + maxfailcount=dict(type='str'), + failinterval=dict(type='str'), + lockouttime=dict(type='str'), + gracelimit=dict(type='int'), + maxrepeat=dict(type='int'), + maxsequence=dict(type='int'), + dictcheck=dict(type='bool'), + usercheck=dict(type='bool'), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = PwPolicyIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, pwpolicy = ensure(module, client) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, pwpolicy=pwpolicy) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_role.py b/plugins/modules/ipa_role.py deleted file mode 120000 index 667601a829..0000000000 --- a/plugins/modules/ipa_role.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_role.py \ No newline at end of file diff --git a/plugins/modules/ipa_role.py b/plugins/modules/ipa_role.py new file mode 100644 index 0000000000..130036ebd1 --- /dev/null +++ b/plugins/modules/ipa_role.py @@ -0,0 +1,306 @@ +#!/usr/bin/python +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_role +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA role +description: + - Add, modify and delete a role within FreeIPA server using FreeIPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + cn: + description: + - Role name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ['name'] + type: str + description: + description: + - A description of this role-group. + type: str + group: + description: + - List of group names assign to this role. + - If an empty list is passed all assigned groups are unassigned from the role. + - If option is omitted groups are not checked nor changed. + - If option is passed all assigned groups that are not passed are unassigned from the role. + type: list + elements: str + host: + description: + - List of host names to assign. + - If an empty list is passed all assigned hosts are unassigned from the role. + - If option is omitted hosts are not checked nor changed. + - If option is passed all assigned hosts that are not passed are unassigned from the role. + type: list + elements: str + hostgroup: + description: + - List of host group names to assign. + - If an empty list is passed all assigned host groups are removed from the role. + - If option is omitted host groups are not checked nor changed. + - If option is passed all assigned hostgroups that are not passed are unassigned from the role. + type: list + elements: str + privilege: + description: + - List of privileges granted to the role. + - If an empty list is passed all assigned privileges are removed. + - If option is omitted privileges are not checked nor changed. + - If option is passed all assigned privileges that are not passed are removed. + type: list + elements: str + service: + description: + - List of service names to assign. + - If an empty list is passed all assigned services are removed from the role. + - If option is omitted services are not checked nor changed. + - If option is passed all assigned services that are not passed are removed from the role. + type: list + elements: str + state: + description: State to ensure. + default: "present" + choices: ["absent", "present"] + type: str + user: + description: + - List of user names to assign. + - If an empty list is passed all assigned users are removed from the role. + - If option is omitted users are not checked nor changed. + type: list + elements: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure role is present + community.general.ipa_role: + name: dba + description: Database Administrators + state: present + user: + - pinky + - brain + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure role with certain details + community.general.ipa_role: + name: another-role + description: Just another role + group: + - editors + host: + - host01.example.com + hostgroup: + - hostgroup01 + privilege: + - Group Administrators + - User Administrators + service: + - service01 + +- name: Ensure role is absent + community.general.ipa_role: + name: dba + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +""" + +RETURN = r""" +role: + description: Role as returned by IPA API. + returned: always + type: dict +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class RoleIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(RoleIPAClient, self).__init__(module, host, port, protocol) + + def role_find(self, name): + return self._post_json(method='role_find', name=None, item={'all': True, 'cn': name}) + + def role_add(self, name, item): + return self._post_json(method='role_add', name=name, item=item) + + def role_mod(self, name, item): + return self._post_json(method='role_mod', name=name, item=item) + + def role_del(self, name): + return self._post_json(method='role_del', name=name) + + def role_add_member(self, name, item): + return self._post_json(method='role_add_member', name=name, item=item) + + def role_add_group(self, name, item): + return self.role_add_member(name=name, item={'group': item}) + + def role_add_host(self, name, item): + return self.role_add_member(name=name, item={'host': item}) + + def role_add_hostgroup(self, name, item): + return self.role_add_member(name=name, item={'hostgroup': item}) + + def role_add_service(self, name, item): + return self.role_add_member(name=name, item={'service': item}) + + def role_add_user(self, name, item): + return self.role_add_member(name=name, item={'user': item}) + + def role_remove_member(self, name, item): + return self._post_json(method='role_remove_member', name=name, item=item) + + def role_remove_group(self, name, item): + return self.role_remove_member(name=name, item={'group': item}) + + def role_remove_host(self, name, item): + return self.role_remove_member(name=name, item={'host': item}) + + def role_remove_hostgroup(self, name, item): + return self.role_remove_member(name=name, item={'hostgroup': item}) + + def role_remove_service(self, name, item): + return self.role_remove_member(name=name, item={'service': item}) + + def role_remove_user(self, name, item): + return self.role_remove_member(name=name, item={'user': item}) + + def role_add_privilege(self, name, item): + return self._post_json(method='role_add_privilege', name=name, item={'privilege': item}) + + def role_remove_privilege(self, name, item): + return self._post_json(method='role_remove_privilege', name=name, item={'privilege': item}) + + +def get_role_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_role_diff(client, ipa_role, module_role): + return client.get_diff(ipa_data=ipa_role, module_data=module_role) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['cn'] + group = module.params['group'] + host = module.params['host'] + hostgroup = module.params['hostgroup'] + privilege = module.params['privilege'] + service = module.params['service'] + user = module.params['user'] + + module_role = get_role_dict(description=module.params['description']) + ipa_role = client.role_find(name=name) + + changed = False + if state == 'present': + if not ipa_role: + changed = True + if not module.check_mode: + ipa_role = client.role_add(name=name, item=module_role) + else: + diff = get_role_diff(client, ipa_role, module_role) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_role.get(key) + client.role_mod(name=name, item=data) + + if group is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_group', []), group, + client.role_add_group, + client.role_remove_group) or changed + if host is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_host', []), host, + client.role_add_host, + client.role_remove_host) or changed + + if hostgroup is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_hostgroup', []), hostgroup, + client.role_add_hostgroup, + client.role_remove_hostgroup) or changed + + if privilege is not None: + changed = client.modify_if_diff(name, ipa_role.get('memberof_privilege', []), privilege, + client.role_add_privilege, + client.role_remove_privilege) or changed + if service is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_service', []), service, + client.role_add_service, + client.role_remove_service) or changed + if user is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_user', []), user, + client.role_add_user, + client.role_remove_user) or changed + + else: + if ipa_role: + changed = True + if not module.check_mode: + client.role_del(name) + + return changed, client.role_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + group=dict(type='list', elements='str'), + host=dict(type='list', elements='str'), + hostgroup=dict(type='list', elements='str'), + privilege=dict(type='list', elements='str'), + service=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent']), + user=dict(type='list', elements='str')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = RoleIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, role = ensure(module, client) + module.exit_json(changed=changed, role=role) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_service.py b/plugins/modules/ipa_service.py deleted file mode 120000 index 3e786f163b..0000000000 --- a/plugins/modules/ipa_service.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_service.py \ No newline at end of file diff --git a/plugins/modules/ipa_service.py b/plugins/modules/ipa_service.py new file mode 100644 index 0000000000..089d49fc88 --- /dev/null +++ b/plugins/modules/ipa_service.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_service +author: Cédric Parent (@cprh) +short_description: Manage FreeIPA service +description: + - Add and delete an IPA service using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + krbcanonicalname: + description: + - Principal of the service. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + hosts: + description: + - Defines the list of C(ManagedBy) hosts. + required: false + type: list + elements: str + force: + description: + - Force principal name even if host is not in DNS. + required: false + type: bool + skip_host_check: + description: + - Force service to be created even when host object does not exist to manage it. + - This is only used on creation, not for updating existing services. + required: false + type: bool + default: false + version_added: 4.7.0 + state: + description: State to ensure. + required: false + default: present + choices: ["absent", "present"] + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure service is present + community.general.ipa_service: + name: http/host01.example.com + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure service is absent + community.general.ipa_service: + name: http/host01.example.com + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Changing Managing hosts list + community.general.ipa_service: + name: http/host01.example.com + hosts: + - host01.example.com + - host02.example.com + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +""" + +RETURN = r""" +service: + description: Service as returned by IPA API. + returned: always + type: dict +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class ServiceIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(ServiceIPAClient, self).__init__(module, host, port, protocol) + + def service_find(self, name): + return self._post_json(method='service_find', name=None, item={'all': True, 'krbcanonicalname': name}) + + def service_add(self, name, service): + return self._post_json(method='service_add', name=name, item=service) + + def service_mod(self, name, service): + return self._post_json(method='service_mod', name=name, item=service) + + def service_del(self, name): + return self._post_json(method='service_del', name=name) + + def service_disable(self, name): + return self._post_json(method='service_disable', name=name) + + def service_add_host(self, name, item): + return self._post_json(method='service_add_host', name=name, item={'host': item}) + + def service_remove_host(self, name, item): + return self._post_json(method='service_remove_host', name=name, item={'host': item}) + + +def get_service_dict(force=None, krbcanonicalname=None, skip_host_check=None): + data = {} + if force is not None: + data['force'] = force + if krbcanonicalname is not None: + data['krbcanonicalname'] = krbcanonicalname + if skip_host_check is not None: + data['skip_host_check'] = skip_host_check + return data + + +def get_service_diff(client, ipa_host, module_service): + non_updateable_keys = ['force', 'krbcanonicalname', 'skip_host_check'] + for key in non_updateable_keys: + if key in module_service: + del module_service[key] + + return client.get_diff(ipa_data=ipa_host, module_data=module_service) + + +def ensure(module, client): + name = module.params['krbcanonicalname'] + state = module.params['state'] + hosts = module.params['hosts'] + + ipa_service = client.service_find(name=name) + module_service = get_service_dict(force=module.params['force'], skip_host_check=module.params['skip_host_check']) + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_service: + changed = True + if not module.check_mode: + client.service_add(name=name, service=module_service) + else: + diff = get_service_diff(client, ipa_service, module_service) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_service.get(key) + client.service_mod(name=name, service=data) + if hosts is not None: + if 'managedby_host' in ipa_service: + for host in ipa_service['managedby_host']: + if host not in hosts: + if not module.check_mode: + client.service_remove_host(name=name, item=host) + changed = True + for host in hosts: + if host not in ipa_service['managedby_host']: + if not module.check_mode: + client.service_add_host(name=name, item=host) + changed = True + else: + for host in hosts: + if not module.check_mode: + client.service_add_host(name=name, item=host) + changed = True + + else: + if ipa_service: + changed = True + if not module.check_mode: + client.service_del(name=name) + + return changed, client.service_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update( + krbcanonicalname=dict(type='str', required=True, aliases=['name']), + force=dict(type='bool'), + skip_host_check=dict(type='bool', default=False), + hosts=dict(type='list', elements='str'), + state=dict(type='str', default='present', + choices=['present', 'absent'])) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = ServiceIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, host = ensure(module, client) + module.exit_json(changed=changed, host=host) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_subca.py b/plugins/modules/ipa_subca.py deleted file mode 120000 index a26a6c904b..0000000000 --- a/plugins/modules/ipa_subca.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_subca.py \ No newline at end of file diff --git a/plugins/modules/ipa_subca.py b/plugins/modules/ipa_subca.py new file mode 100644 index 0000000000..1442f9d7ea --- /dev/null +++ b/plugins/modules/ipa_subca.py @@ -0,0 +1,216 @@ +#!/usr/bin/python +# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_subca +author: Abhijeet Kasurde (@Akasurde) +short_description: Manage FreeIPA Lightweight Sub Certificate Authorities +description: + - Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + subca_name: + description: + - The Sub Certificate Authority name which needs to be managed. + required: true + aliases: ["name"] + type: str + subca_subject: + description: + - The Sub Certificate Authority's Subject, for example V(CN=SampleSubCA1,O=testrelm.test). + required: true + type: str + subca_desc: + description: + - The Sub Certificate Authority's description. + type: str + state: + description: + - State to ensure. + - States V(disable) and V(enable) are available for FreeIPA 4.4.2 version and onwards. + required: false + default: present + choices: ["absent", "disabled", "enabled", "present"] + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure IPA Sub CA is present + community.general.ipa_subca: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + subca_name: AnsibleSubCA1 + subca_subject: 'CN=AnsibleSubCA1,O=example.com' + subca_desc: Ansible Sub CA + +- name: Ensure that IPA Sub CA is removed + community.general.ipa_subca: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: absent + subca_name: AnsibleSubCA1 + +- name: Ensure that IPA Sub CA is disabled + community.general.ipa_subca: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: disable + subca_name: AnsibleSubCA1 +""" + +RETURN = r""" +subca: + description: IPA Sub CA record as returned by IPA API. + returned: always + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class SubCAIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(SubCAIPAClient, self).__init__(module, host, port, protocol) + + def subca_find(self, subca_name): + return self._post_json(method='ca_find', name=subca_name, item=None) + + def subca_add(self, subca_name=None, subject_dn=None, details=None): + item = dict(ipacasubjectdn=subject_dn) + subca_desc = details.get('description', None) + if subca_desc is not None: + item.update(description=subca_desc) + return self._post_json(method='ca_add', name=subca_name, item=item) + + def subca_mod(self, subca_name=None, diff=None, details=None): + item = get_subca_dict(details) + for change in diff: + update_detail = dict() + if item[change] is not None: + update_detail.update(setattr="{0}={1}".format(change, item[change])) + self._post_json(method='ca_mod', name=subca_name, item=update_detail) + + def subca_del(self, subca_name=None): + return self._post_json(method='ca_del', name=subca_name) + + def subca_disable(self, subca_name=None): + return self._post_json(method='ca_disable', name=subca_name) + + def subca_enable(self, subca_name=None): + return self._post_json(method='ca_enable', name=subca_name) + + +def get_subca_dict(details=None): + module_subca = dict() + if details['description'] is not None: + module_subca['description'] = details['description'] + if details['subca_subject'] is not None: + module_subca['ipacasubjectdn'] = details['subca_subject'] + return module_subca + + +def get_subca_diff(client, ipa_subca, module_subca): + details = get_subca_dict(module_subca) + return client.get_diff(ipa_data=ipa_subca, module_data=details) + + +def ensure(module, client): + subca_name = module.params['subca_name'] + subca_subject_dn = module.params['subca_subject'] + subca_desc = module.params['subca_desc'] + + state = module.params['state'] + + ipa_subca = client.subca_find(subca_name) + module_subca = dict(description=subca_desc, + subca_subject=subca_subject_dn) + + changed = False + if state == 'present': + if not ipa_subca: + changed = True + if not module.check_mode: + client.subca_add(subca_name=subca_name, subject_dn=subca_subject_dn, details=module_subca) + else: + diff = get_subca_diff(client, ipa_subca, module_subca) + # IPA does not allow to modify Sub CA's subject DN + # So skip it for now. + if 'ipacasubjectdn' in diff: + diff.remove('ipacasubjectdn') + del module_subca['subca_subject'] + + if len(diff) > 0: + changed = True + if not module.check_mode: + client.subca_mod(subca_name=subca_name, diff=diff, details=module_subca) + elif state == 'absent': + if ipa_subca: + changed = True + if not module.check_mode: + client.subca_del(subca_name=subca_name) + elif state == 'disable': + ipa_version = client.get_ipa_version() + if LooseVersion(ipa_version) < LooseVersion('4.4.2'): + module.fail_json(msg="Current version of IPA server [%s] does not support 'CA disable' option. Please upgrade to " + "version greater than 4.4.2") + if ipa_subca: + changed = True + if not module.check_mode: + client.subca_disable(subca_name=subca_name) + elif state == 'enable': + ipa_version = client.get_ipa_version() + if LooseVersion(ipa_version) < LooseVersion('4.4.2'): + module.fail_json(msg="Current version of IPA server [%s] does not support 'CA enable' option. Please upgrade to " + "version greater than 4.4.2") + if ipa_subca: + changed = True + if not module.check_mode: + client.subca_enable(subca_name=subca_name) + + return changed, client.subca_find(subca_name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(subca_name=dict(type='str', required=True, aliases=['name']), + subca_subject=dict(type='str', required=True), + subca_desc=dict(type='str'), + state=dict(type='str', default='present', + choices=['present', 'absent', 'enabled', 'disabled']),) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True,) + + client = SubCAIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, record = ensure(module, client) + module.exit_json(changed=changed, record=record) + except Exception as exc: + module.fail_json(msg=to_native(exc)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_sudocmd.py b/plugins/modules/ipa_sudocmd.py deleted file mode 120000 index f9c0e1d894..0000000000 --- a/plugins/modules/ipa_sudocmd.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_sudocmd.py \ No newline at end of file diff --git a/plugins/modules/ipa_sudocmd.py b/plugins/modules/ipa_sudocmd.py new file mode 100644 index 0000000000..1aabeb07a3 --- /dev/null +++ b/plugins/modules/ipa_sudocmd.py @@ -0,0 +1,155 @@ +#!/usr/bin/python +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_sudocmd +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA sudo command +description: + - Add, modify or delete sudo command within FreeIPA server using FreeIPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + sudocmd: + description: + - Sudo command. + aliases: ['name'] + required: true + type: str + description: + description: + - A description of this command. + type: str + state: + description: State to ensure. + default: present + choices: ['absent', 'disabled', 'enabled', 'present'] + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure sudo command exists + community.general.ipa_sudocmd: + name: su + description: Allow to run su via sudo + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure sudo command does not exist + community.general.ipa_sudocmd: + name: su + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +""" + +RETURN = r""" +sudocmd: + description: Sudo command as return from IPA API. + returned: always + type: dict +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class SudoCmdIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(SudoCmdIPAClient, self).__init__(module, host, port, protocol) + + def sudocmd_find(self, name): + return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name}) + + def sudocmd_add(self, name, item): + return self._post_json(method='sudocmd_add', name=name, item=item) + + def sudocmd_mod(self, name, item): + return self._post_json(method='sudocmd_mod', name=name, item=item) + + def sudocmd_del(self, name): + return self._post_json(method='sudocmd_del', name=name) + + +def get_sudocmd_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd): + return client.get_diff(ipa_data=ipa_sudocmd, module_data=module_sudocmd) + + +def ensure(module, client): + name = module.params['sudocmd'] + state = module.params['state'] + + module_sudocmd = get_sudocmd_dict(description=module.params['description']) + ipa_sudocmd = client.sudocmd_find(name=name) + + changed = False + if state == 'present': + if not ipa_sudocmd: + changed = True + if not module.check_mode: + client.sudocmd_add(name=name, item=module_sudocmd) + else: + diff = get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_sudocmd.get(key) + client.sudocmd_mod(name=name, item=data) + else: + if ipa_sudocmd: + changed = True + if not module.check_mode: + client.sudocmd_del(name=name) + + return changed, client.sudocmd_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(description=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + sudocmd=dict(type='str', required=True, aliases=['name'])) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = SudoCmdIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, sudocmd = ensure(module, client) + module.exit_json(changed=changed, sudocmd=sudocmd) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_sudocmdgroup.py b/plugins/modules/ipa_sudocmdgroup.py deleted file mode 120000 index 9a12a22f53..0000000000 --- a/plugins/modules/ipa_sudocmdgroup.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_sudocmdgroup.py \ No newline at end of file diff --git a/plugins/modules/ipa_sudocmdgroup.py b/plugins/modules/ipa_sudocmdgroup.py new file mode 100644 index 0000000000..af3f4c9547 --- /dev/null +++ b/plugins/modules/ipa_sudocmdgroup.py @@ -0,0 +1,183 @@ +#!/usr/bin/python +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_sudocmdgroup +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA sudo command group +description: + - Add, modify or delete sudo command group within IPA server using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + cn: + description: + - Sudo Command Group. + aliases: ['name'] + required: true + type: str + description: + description: + - Group description. + type: str + state: + description: State to ensure. + default: present + choices: ['absent', 'disabled', 'enabled', 'present'] + type: str + sudocmd: + description: + - List of sudo commands to assign to the group. + - If an empty list is passed all assigned commands are removed from the group. + - If option is omitted sudo commands are not checked nor changed. + type: list + elements: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure sudo command group exists + community.general.ipa_sudocmdgroup: + name: group01 + description: Group of important commands + sudocmd: + - su + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure sudo command group does not exist + community.general.ipa_sudocmdgroup: + name: group01 + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +""" + +RETURN = r""" +sudocmdgroup: + description: Sudo command group as returned by IPA API. + returned: always + type: dict +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class SudoCmdGroupIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(SudoCmdGroupIPAClient, self).__init__(module, host, port, protocol) + + def sudocmdgroup_find(self, name): + return self._post_json(method='sudocmdgroup_find', name=None, item={'all': True, 'cn': name}) + + def sudocmdgroup_add(self, name, item): + return self._post_json(method='sudocmdgroup_add', name=name, item=item) + + def sudocmdgroup_mod(self, name, item): + return self._post_json(method='sudocmdgroup_mod', name=name, item=item) + + def sudocmdgroup_del(self, name): + return self._post_json(method='sudocmdgroup_del', name=name) + + def sudocmdgroup_add_member(self, name, item): + return self._post_json(method='sudocmdgroup_add_member', name=name, item=item) + + def sudocmdgroup_add_member_sudocmd(self, name, item): + return self.sudocmdgroup_add_member(name=name, item={'sudocmd': item}) + + def sudocmdgroup_remove_member(self, name, item): + return self._post_json(method='sudocmdgroup_remove_member', name=name, item=item) + + def sudocmdgroup_remove_member_sudocmd(self, name, item): + return self.sudocmdgroup_remove_member(name=name, item={'sudocmd': item}) + + +def get_sudocmdgroup_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup): + return client.get_diff(ipa_data=ipa_sudocmdgroup, module_data=module_sudocmdgroup) + + +def ensure(module, client): + name = module.params['cn'] + state = module.params['state'] + sudocmd = module.params['sudocmd'] + + module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params['description']) + ipa_sudocmdgroup = client.sudocmdgroup_find(name=name) + + changed = False + if state == 'present': + if not ipa_sudocmdgroup: + changed = True + if not module.check_mode: + ipa_sudocmdgroup = client.sudocmdgroup_add(name=name, item=module_sudocmdgroup) + else: + diff = get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_sudocmdgroup.get(key) + client.sudocmdgroup_mod(name=name, item=data) + + if sudocmd is not None: + changed = client.modify_if_diff(name, ipa_sudocmdgroup.get('member_sudocmd', []), sudocmd, + client.sudocmdgroup_add_member_sudocmd, + client.sudocmdgroup_remove_member_sudocmd) + else: + if ipa_sudocmdgroup: + changed = True + if not module.check_mode: + client.sudocmdgroup_del(name=name) + + return changed, client.sudocmdgroup_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + sudocmd=dict(type='list', elements='str')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = SudoCmdGroupIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, sudocmdgroup = ensure(module, client) + module.exit_json(changed=changed, sudorule=sudocmdgroup) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_sudorule.py b/plugins/modules/ipa_sudorule.py deleted file mode 120000 index 9a43496f48..0000000000 --- a/plugins/modules/ipa_sudorule.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_sudorule.py \ No newline at end of file diff --git a/plugins/modules/ipa_sudorule.py b/plugins/modules/ipa_sudorule.py new file mode 100644 index 0000000000..96ea6bfa30 --- /dev/null +++ b/plugins/modules/ipa_sudorule.py @@ -0,0 +1,515 @@ +#!/usr/bin/python +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_sudorule +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA sudo rule +description: + - Add, modify or delete sudo rule within IPA server using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + cn: + description: + - Canonical name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ['name'] + type: str + cmdcategory: + description: + - Command category the rule applies to. + choices: ['all'] + type: str + cmd: + description: + - List of commands assigned to the rule. + - If an empty list is passed all commands are removed from the rule. + - If option is omitted commands are not checked nor changed. + type: list + elements: str + cmdgroup: + description: + - List of command groups assigned to the rule. + - If an empty list is passed all command groups are removed from the rule. + - If option is omitted command groups are not checked nor changed. + type: list + elements: str + version_added: 2.0.0 + deny_cmd: + description: + - List of denied commands assigned to the rule. + - If an empty list is passed all commands are removed from the rule. + - If option is omitted commands are not checked nor changed. + type: list + elements: str + version_added: 8.1.0 + deny_cmdgroup: + description: + - List of denied command groups assigned to the rule. + - If an empty list is passed all command groups are removed from the rule. + - If option is omitted command groups are not checked nor changed. + type: list + elements: str + version_added: 8.1.0 + description: + description: + - Description of the sudo rule. + type: str + host: + description: + - List of hosts assigned to the rule. + - If an empty list is passed all hosts are removed from the rule. + - If option is omitted hosts are not checked nor changed. + - Option O(hostcategory) must be omitted to assign hosts. + type: list + elements: str + hostcategory: + description: + - Host category the rule applies to. + - If V(all) is passed one must omit O(host) and O(hostgroup). + - Option O(host) and O(hostgroup) must be omitted to assign V(all). + choices: ['all'] + type: str + hostgroup: + description: + - List of host groups assigned to the rule. + - If an empty list is passed all host groups are removed from the rule. + - If option is omitted host groups are not checked nor changed. + - Option O(hostcategory) must be omitted to assign host groups. + type: list + elements: str + runasextusers: + description: + - List of external RunAs users. + type: list + elements: str + version_added: 2.3.0 + runasusercategory: + description: + - RunAs User category the rule applies to. + choices: ['all'] + type: str + runasgroupcategory: + description: + - RunAs Group category the rule applies to. + choices: ['all'] + type: str + sudoopt: + description: + - List of options to add to the sudo rule. + type: list + elements: str + user: + description: + - List of users assigned to the rule. + - If an empty list is passed all users are removed from the rule. + - If option is omitted users are not checked nor changed. + type: list + elements: str + usercategory: + description: + - User category the rule applies to. + choices: ['all'] + type: str + usergroup: + description: + - List of user groups assigned to the rule. + - If an empty list is passed all user groups are removed from the rule. + - If option is omitted user groups are not checked nor changed. + type: list + elements: str + state: + description: State to ensure. + default: present + choices: ['absent', 'disabled', 'enabled', 'present'] + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked + for a password. + community.general.ipa_sudorule: + name: sudo_all_nopasswd + cmdcategory: all + description: Allow to run every command with sudo without password + hostcategory: all + sudoopt: + - '!authenticate' + usercategory: all + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure user group developers can run every command on host group db-server as well as on host db01.example.com. + community.general.ipa_sudorule: + name: sudo_dev_dbserver + description: Allow developers to run every command with sudo on all database server + cmdcategory: all + host: + - db01.example.com + hostgroup: + - db-server + sudoopt: + - '!authenticate' + usergroup: + - developers + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure user group operations can run any commands that is part of operations-cmdgroup on any host as user root. + community.general.ipa_sudorule: + name: sudo_operations_all + description: Allow operators to run any commands that is part of operations-cmdgroup on any host as user root. + cmdgroup: + - operations-cmdgroup + hostcategory: all + runasextusers: + - root + sudoopt: + - '!authenticate' + usergroup: + - operators + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +""" + +RETURN = r""" +sudorule: + description: Sudorule as returned by IPA. + returned: always + type: dict +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class SudoRuleIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(SudoRuleIPAClient, self).__init__(module, host, port, protocol) + + def sudorule_find(self, name): + return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name}) + + def sudorule_add(self, name, item): + return self._post_json(method='sudorule_add', name=name, item=item) + + def sudorule_add_runasuser(self, name, item): + return self._post_json(method='sudorule_add_runasuser', name=name, item={'user': item}) + + def sudorule_remove_runasuser(self, name, item): + return self._post_json(method='sudorule_remove_runasuser', name=name, item={'user': item}) + + def sudorule_mod(self, name, item): + return self._post_json(method='sudorule_mod', name=name, item=item) + + def sudorule_del(self, name): + return self._post_json(method='sudorule_del', name=name) + + def sudorule_add_option(self, name, item): + return self._post_json(method='sudorule_add_option', name=name, item=item) + + def sudorule_add_option_ipasudoopt(self, name, item): + return self.sudorule_add_option(name=name, item={'ipasudoopt': item}) + + def sudorule_remove_option(self, name, item): + return self._post_json(method='sudorule_remove_option', name=name, item=item) + + def sudorule_remove_option_ipasudoopt(self, name, item): + return self.sudorule_remove_option(name=name, item={'ipasudoopt': item}) + + def sudorule_add_host(self, name, item): + return self._post_json(method='sudorule_add_host', name=name, item=item) + + def sudorule_add_host_host(self, name, item): + return self.sudorule_add_host(name=name, item={'host': item}) + + def sudorule_add_host_hostgroup(self, name, item): + return self.sudorule_add_host(name=name, item={'hostgroup': item}) + + def sudorule_remove_host(self, name, item): + return self._post_json(method='sudorule_remove_host', name=name, item=item) + + def sudorule_remove_host_host(self, name, item): + return self.sudorule_remove_host(name=name, item={'host': item}) + + def sudorule_remove_host_hostgroup(self, name, item): + return self.sudorule_remove_host(name=name, item={'hostgroup': item}) + + def sudorule_add_allow_command(self, name, item): + return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item}) + + def sudorule_add_allow_command_group(self, name, item): + return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmdgroup': item}) + + def sudorule_add_deny_command(self, name, item): + return self._post_json(method='sudorule_add_deny_command', name=name, item={'sudocmd': item}) + + def sudorule_add_deny_command_group(self, name, item): + return self._post_json(method='sudorule_add_deny_command', name=name, item={'sudocmdgroup': item}) + + def sudorule_remove_allow_command(self, name, item): + return self._post_json(method='sudorule_remove_allow_command', name=name, item=item) + + def sudorule_add_user(self, name, item): + return self._post_json(method='sudorule_add_user', name=name, item=item) + + def sudorule_add_user_user(self, name, item): + return self.sudorule_add_user(name=name, item={'user': item}) + + def sudorule_add_user_group(self, name, item): + return self.sudorule_add_user(name=name, item={'group': item}) + + def sudorule_remove_user(self, name, item): + return self._post_json(method='sudorule_remove_user', name=name, item=item) + + def sudorule_remove_user_user(self, name, item): + return self.sudorule_remove_user(name=name, item={'user': item}) + + def sudorule_remove_user_group(self, name, item): + return self.sudorule_remove_user(name=name, item={'group': item}) + + +def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None, + runasgroupcategory=None, runasusercategory=None): + data = {} + if cmdcategory is not None: + data['cmdcategory'] = cmdcategory + if description is not None: + data['description'] = description + if hostcategory is not None: + data['hostcategory'] = hostcategory + if ipaenabledflag is not None: + data['ipaenabledflag'] = ipaenabledflag + if usercategory is not None: + data['usercategory'] = usercategory + if runasusercategory is not None: + data['ipasudorunasusercategory'] = runasusercategory + if runasgroupcategory is not None: + data['ipasudorunasgroupcategory'] = runasgroupcategory + return data + + +def category_changed(module, client, category_name, ipa_sudorule): + if ipa_sudorule.get(category_name, None) == ['all']: + if not module.check_mode: + # cn is returned as list even with only a single value. + client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None}) + return True + return False + + +def ensure(module, client): + state = module.params['state'] + name = module.params['cn'] + cmd = module.params['cmd'] + cmdgroup = module.params['cmdgroup'] + cmdcategory = module.params['cmdcategory'] + deny_cmd = module.params['deny_cmd'] + deny_cmdgroup = module.params['deny_cmdgroup'] + host = module.params['host'] + hostcategory = module.params['hostcategory'] + hostgroup = module.params['hostgroup'] + runasusercategory = module.params['runasusercategory'] + runasgroupcategory = module.params['runasgroupcategory'] + runasextusers = module.params['runasextusers'] + + ipa_version = client.get_ipa_version() + if state in ['present', 'enabled']: + if LooseVersion(ipa_version) < LooseVersion('4.9.10'): + ipaenabledflag = 'TRUE' + else: + ipaenabledflag = True + else: + if LooseVersion(ipa_version) < LooseVersion('4.9.10'): + ipaenabledflag = 'FALSE' + else: + ipaenabledflag = False + + sudoopt = module.params['sudoopt'] + user = module.params['user'] + usercategory = module.params['usercategory'] + usergroup = module.params['usergroup'] + + module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory, + description=module.params['description'], + hostcategory=hostcategory, + ipaenabledflag=ipaenabledflag, + usercategory=usercategory, + runasusercategory=runasusercategory, + runasgroupcategory=runasgroupcategory) + ipa_sudorule = client.sudorule_find(name=name) + + changed = False + if state in ['present', 'disabled', 'enabled']: + if not ipa_sudorule: + changed = True + if not module.check_mode: + ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule) + else: + diff = client.get_diff(ipa_sudorule, module_sudorule) + if len(diff) > 0: + changed = True + if not module.check_mode: + if 'hostcategory' in diff: + if ipa_sudorule.get('memberhost_host', None) is not None: + client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host')) + if ipa_sudorule.get('memberhost_hostgroup', None) is not None: + client.sudorule_remove_host_hostgroup(name=name, + item=ipa_sudorule.get('memberhost_hostgroup')) + + client.sudorule_mod(name=name, item=module_sudorule) + + if cmd is not None: + changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed + if not module.check_mode: + client.sudorule_add_allow_command(name=name, item=cmd) + + if cmdgroup is not None: + changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed + if not module.check_mode: + client.sudorule_add_allow_command_group(name=name, item=cmdgroup) + + if deny_cmd is not None: + changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed + if not module.check_mode: + client.sudorule_add_deny_command(name=name, item=deny_cmd) + + if deny_cmdgroup is not None: + changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed + if not module.check_mode: + client.sudorule_add_deny_command_group(name=name, item=deny_cmdgroup) + + if runasusercategory is not None: + changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed + + if runasgroupcategory is not None: + changed = category_changed(module, client, 'iparunasgroupcategory', ipa_sudorule) or changed + + if host is not None: + changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed + changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host, + client.sudorule_add_host_host, + client.sudorule_remove_host_host) or changed + + if hostgroup is not None: + changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed + changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup, + client.sudorule_add_host_hostgroup, + client.sudorule_remove_host_hostgroup) or changed + if sudoopt is not None: + # client.modify_if_diff does not work as each option must be removed/added by its own + ipa_list = ipa_sudorule.get('ipasudoopt', []) + module_list = sudoopt + diff = list(set(ipa_list) - set(module_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + for item in diff: + client.sudorule_remove_option_ipasudoopt(name, item) + diff = list(set(module_list) - set(ipa_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + for item in diff: + client.sudorule_add_option_ipasudoopt(name, item) + + if runasextusers is not None: + ipa_sudorule_run_as_user = ipa_sudorule.get('ipasudorunasextuser', []) + diff = list(set(ipa_sudorule_run_as_user) - set(runasextusers)) + if len(diff) > 0: + changed = True + if not module.check_mode: + for item in diff: + client.sudorule_remove_runasuser(name=name, item=item) + diff = list(set(runasextusers) - set(ipa_sudorule_run_as_user)) + if len(diff) > 0: + changed = True + if not module.check_mode: + for item in diff: + client.sudorule_add_runasuser(name=name, item=item) + + if user is not None: + changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed + changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user, + client.sudorule_add_user_user, + client.sudorule_remove_user_user) or changed + if usergroup is not None: + changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed + changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup, + client.sudorule_add_user_group, + client.sudorule_remove_user_group) or changed + else: + if ipa_sudorule: + changed = True + if not module.check_mode: + client.sudorule_del(name) + + return changed, client.sudorule_find(name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cmd=dict(type='list', elements='str'), + cmdgroup=dict(type='list', elements='str'), + cmdcategory=dict(type='str', choices=['all']), + cn=dict(type='str', required=True, aliases=['name']), + deny_cmd=dict(type='list', elements='str'), + deny_cmdgroup=dict(type='list', elements='str'), + description=dict(type='str'), + host=dict(type='list', elements='str'), + hostcategory=dict(type='str', choices=['all']), + hostgroup=dict(type='list', elements='str'), + runasusercategory=dict(type='str', choices=['all']), + runasgroupcategory=dict(type='str', choices=['all']), + sudoopt=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + user=dict(type='list', elements='str'), + usercategory=dict(type='str', choices=['all']), + usergroup=dict(type='list', elements='str'), + runasextusers=dict(type='list', elements='str')) + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=[['cmdcategory', 'cmd'], + ['cmdcategory', 'deny_cmd'], + ['cmdcategory', 'cmdgroup'], + ['cmdcategory', 'deny_cmdgroup'], + ['hostcategory', 'host'], + ['hostcategory', 'hostgroup'], + ['usercategory', 'user'], + ['usercategory', 'usergroup']], + supports_check_mode=True) + + client = SudoRuleIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, sudorule = ensure(module, client) + module.exit_json(changed=changed, sudorule=sudorule) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_user.py b/plugins/modules/ipa_user.py deleted file mode 120000 index eaaf781cdb..0000000000 --- a/plugins/modules/ipa_user.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_user.py \ No newline at end of file diff --git a/plugins/modules/ipa_user.py b/plugins/modules/ipa_user.py new file mode 100644 index 0000000000..4fbef766c5 --- /dev/null +++ b/plugins/modules/ipa_user.py @@ -0,0 +1,408 @@ +#!/usr/bin/python +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_user +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA users +description: + - Add, modify and delete user within IPA server. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + displayname: + description: Display name. + type: str + update_password: + description: + - Set password for a user. + type: str + default: 'always' + choices: [always, on_create] + givenname: + description: + - First name. + - If user does not exist and O(state=present), the usage of O(givenname) is required. + type: str + krbpasswordexpiration: + description: + - Date at which the user password expires. + - In the format YYYYMMddHHmmss. + - For example V(20180121182022) expires on 21 January 2018 at 18:20:22. + type: str + loginshell: + description: Login shell. + type: str + mail: + description: + - List of mail addresses assigned to the user. + - If an empty list is passed all assigned email addresses are deleted. + - If None is passed email addresses are not checked nor changed. + type: list + elements: str + password: + description: + - Password for a user. + - It is not set for an existing user unless O(update_password=always), which is the default. + type: str + sn: + description: + - Surname. + - If user does not exist and O(state=present), the usage of O(sn) is required. + type: str + sshpubkey: + description: + - List of public SSH key. + - If an empty list is passed all assigned public keys are deleted. + - If None is passed SSH public keys are not checked nor changed. + type: list + elements: str + state: + description: State to ensure. + default: "present" + choices: ["absent", "disabled", "enabled", "present"] + type: str + telephonenumber: + description: + - List of telephone numbers assigned to the user. + - If an empty list is passed all assigned telephone numbers are deleted. + - If None is passed telephone numbers are not checked nor changed. + type: list + elements: str + title: + description: Title. + type: str + uid: + description: Uid of the user. + required: true + aliases: ["name"] + type: str + uidnumber: + description: + - Account Settings UID/Posix User ID number. + type: str + gidnumber: + description: + - Posix Group ID. + type: str + homedirectory: + description: + - Default home directory of the user. + type: str + version_added: '0.2.0' + userauthtype: + description: + - The authentication type to use for the user. + - To remove all authentication types from the user, use an empty list V([]). + - The choice V(idp) and V(passkey) has been added in community.general 8.1.0. + choices: ["password", "radius", "otp", "pkinit", "hardened", "idp", "passkey"] + type: list + elements: str + version_added: '1.2.0' +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes + +requirements: + - base64 + - hashlib +""" + +EXAMPLES = r""" +- name: Ensure pinky is present and always reset password + community.general.ipa_user: + name: pinky + state: present + krbpasswordexpiration: 20200119235959 + givenname: Pinky + sn: Acme + mail: + - pinky@acme.com + telephonenumber: + - '+555123456' + sshpubkey: + - ssh-rsa .... + - ssh-dsa .... + uidnumber: '1001' + gidnumber: '100' + homedirectory: /home/pinky + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure brain is absent + community.general.ipa_user: + name: brain + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure pinky is present but don't reset password if already exists + community.general.ipa_user: + name: pinky + state: present + givenname: Pinky + sn: Acme + password: zounds + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + update_password: on_create + +- name: Ensure pinky is present and using one time password and RADIUS authentication + community.general.ipa_user: + name: pinky + state: present + userauthtype: + - otp + - radius + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +""" + +RETURN = r""" +user: + description: User as returned by IPA API. + returned: always + type: dict +""" + +import base64 +import hashlib +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class UserIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(UserIPAClient, self).__init__(module, host, port, protocol) + + def user_find(self, name): + return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name}) + + def user_add(self, name, item): + return self._post_json(method='user_add', name=name, item=item) + + def user_mod(self, name, item): + return self._post_json(method='user_mod', name=name, item=item) + + def user_del(self, name): + return self._post_json(method='user_del', name=name) + + def user_disable(self, name): + return self._post_json(method='user_disable', name=name) + + def user_enable(self, name): + return self._post_json(method='user_enable', name=name) + + +def get_user_dict(displayname=None, givenname=None, krbpasswordexpiration=None, loginshell=None, + mail=None, nsaccountlock=False, sn=None, sshpubkey=None, telephonenumber=None, + title=None, userpassword=None, gidnumber=None, uidnumber=None, homedirectory=None, + userauthtype=None): + user = {} + if displayname is not None: + user['displayname'] = displayname + if krbpasswordexpiration is not None: + user['krbpasswordexpiration'] = krbpasswordexpiration + "Z" + if givenname is not None: + user['givenname'] = givenname + if loginshell is not None: + user['loginshell'] = loginshell + if mail is not None: + user['mail'] = mail + user['nsaccountlock'] = nsaccountlock + if sn is not None: + user['sn'] = sn + if sshpubkey is not None: + user['ipasshpubkey'] = sshpubkey + if telephonenumber is not None: + user['telephonenumber'] = telephonenumber + if title is not None: + user['title'] = title + if userpassword is not None: + user['userpassword'] = userpassword + if gidnumber is not None: + user['gidnumber'] = gidnumber + if uidnumber is not None: + user['uidnumber'] = uidnumber + if homedirectory is not None: + user['homedirectory'] = homedirectory + if userauthtype is not None: + user['ipauserauthtype'] = userauthtype + + return user + + +def get_user_diff(client, ipa_user, module_user): + """ + Return the keys of each dict whereas values are different. Unfortunately the IPA + API returns everything as a list even if only a single value is possible. + Therefore some more complexity is needed. + The method will check if the value type of module_user.attr is not a list and + create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method + must not be changed if the returned API dict is changed. + :param ipa_user: + :param module_user: + :return: + """ + # sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints. + # These are used for comparison. + sshpubkey = None + if 'ipasshpubkey' in module_user: + hash_algo = 'md5' + if 'sshpubkeyfp' in ipa_user and ipa_user['sshpubkeyfp'][0][:7].upper() == 'SHA256:': + hash_algo = 'sha256' + module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user['ipasshpubkey']] + # Remove the ipasshpubkey element as it is not returned from IPA but save its value to be used later on + sshpubkey = module_user['ipasshpubkey'] + del module_user['ipasshpubkey'] + + result = client.get_diff(ipa_data=ipa_user, module_data=module_user) + + # If there are public keys, remove the fingerprints and add them back to the dict + if sshpubkey is not None: + del module_user['sshpubkeyfp'] + module_user['ipasshpubkey'] = sshpubkey + return result + + +def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'): + """ + Return the public key fingerprint of a given public SSH key + in format "[fp] [comment] (ssh-rsa)" where fp is of the format: + FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7 + for md5 or + SHA256:[base64] + for sha256 + Comments are assumed to be all characters past the second + whitespace character in the sshpubkey string. + :param ssh_key: + :param hash_algo: + :return: + """ + parts = ssh_key.strip().split(None, 2) + if len(parts) == 0: + return None + key_type = parts[0] + key = base64.b64decode(parts[1].encode('ascii')) + + if hash_algo == 'md5': + fp_plain = hashlib.md5(key).hexdigest() + key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper() + elif hash_algo == 'sha256': + fp_plain = base64.b64encode(hashlib.sha256(key).digest()).decode('ascii').rstrip('=') + key_fp = 'SHA256:{fp}'.format(fp=fp_plain) + if len(parts) < 3: + return "%s (%s)" % (key_fp, key_type) + else: + comment = parts[2] + return "%s %s (%s)" % (key_fp, comment, key_type) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['uid'] + nsaccountlock = state == 'disabled' + + module_user = get_user_dict(displayname=module.params.get('displayname'), + krbpasswordexpiration=module.params.get('krbpasswordexpiration'), + givenname=module.params.get('givenname'), + loginshell=module.params['loginshell'], + mail=module.params['mail'], sn=module.params['sn'], + sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock, + telephonenumber=module.params['telephonenumber'], title=module.params['title'], + userpassword=module.params['password'], + gidnumber=module.params.get('gidnumber'), uidnumber=module.params.get('uidnumber'), + homedirectory=module.params.get('homedirectory'), + userauthtype=module.params.get('userauthtype')) + + update_password = module.params.get('update_password') + ipa_user = client.user_find(name=name) + + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_user: + changed = True + if not module.check_mode: + ipa_user = client.user_add(name=name, item=module_user) + else: + if update_password == 'on_create': + module_user.pop('userpassword', None) + diff = get_user_diff(client, ipa_user, module_user) + if len(diff) > 0: + changed = True + if not module.check_mode: + ipa_user = client.user_mod(name=name, item=module_user) + else: + if ipa_user: + changed = True + if not module.check_mode: + client.user_del(name) + + return changed, ipa_user + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(displayname=dict(type='str'), + givenname=dict(type='str'), + update_password=dict(type='str', default="always", + choices=['always', 'on_create'], + no_log=False), + krbpasswordexpiration=dict(type='str', no_log=False), + loginshell=dict(type='str'), + mail=dict(type='list', elements='str'), + sn=dict(type='str'), + uid=dict(type='str', required=True, aliases=['name']), + gidnumber=dict(type='str'), + uidnumber=dict(type='str'), + password=dict(type='str', no_log=True), + sshpubkey=dict(type='list', elements='str'), + state=dict(type='str', default='present', + choices=['present', 'absent', 'enabled', 'disabled']), + telephonenumber=dict(type='list', elements='str'), + title=dict(type='str'), + homedirectory=dict(type='str'), + userauthtype=dict(type='list', elements='str', + choices=['password', 'radius', 'otp', 'pkinit', 'hardened', 'idp', 'passkey'])) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = UserIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + # If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list). + # Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey + # as different which should be avoided. + if module.params['sshpubkey'] is not None: + if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] == "": + module.params['sshpubkey'] = None + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, user = ensure(module, client) + module.exit_json(changed=changed, user=user) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_vault.py b/plugins/modules/ipa_vault.py deleted file mode 120000 index 49b5d545a4..0000000000 --- a/plugins/modules/ipa_vault.py +++ /dev/null @@ -1 +0,0 @@ -./identity/ipa/ipa_vault.py \ No newline at end of file diff --git a/plugins/modules/ipa_vault.py b/plugins/modules/ipa_vault.py new file mode 100644 index 0000000000..54cbdce235 --- /dev/null +++ b/plugins/modules/ipa_vault.py @@ -0,0 +1,252 @@ +#!/usr/bin/python +# Copyright (c) 2018, Juan Manuel Parrilla +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_vault +author: Juan Manuel Parrilla (@jparrill) +short_description: Manage FreeIPA vaults +description: + - Add, modify and delete vaults and secret vaults. + - KRA service should be enabled to use this module. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + cn: + description: + - Vault name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + description: + description: + - Description. + type: str + ipavaulttype: + description: + - Vault types are based on security level. + default: "symmetric" + choices: ["asymmetric", "standard", "symmetric"] + aliases: ["vault_type"] + type: str + ipavaultpublickey: + description: + - Public key. + aliases: ["vault_public_key"] + type: str + ipavaultsalt: + description: + - Vault Salt. + aliases: ["vault_salt"] + type: str + username: + description: + - Any user can own one or more user vaults. + - Mutually exclusive with O(service). + aliases: ["user"] + type: list + elements: str + service: + description: + - Any service can own one or more service vaults. + - Mutually exclusive with O(user). + type: str + state: + description: + - State to ensure. + default: "present" + choices: ["absent", "present"] + type: str + replace: + description: + - Force replace the existent vault on IPA server. + type: bool + default: false + choices: ["True", "False"] + validate_certs: + description: + - Validate IPA server certificates. + type: bool + default: true +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure vault is present + community.general.ipa_vault: + name: vault01 + vault_type: standard + user: user01 + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure vault is present for Admin user + community.general.ipa_vault: + name: vault01 + vault_type: standard + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure vault is absent + community.general.ipa_vault: + name: vault01 + vault_type: standard + user: user01 + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Modify vault if already exists + community.general.ipa_vault: + name: vault01 + vault_type: standard + description: "Vault for test" + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + replace: true + +- name: Get vault info if already exists + community.general.ipa_vault: + name: vault01 + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +""" + +RETURN = r""" +vault: + description: Vault as returned by IPA API. + returned: always + type: dict +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class VaultIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(VaultIPAClient, self).__init__(module, host, port, protocol) + + def vault_find(self, name): + return self._post_json(method='vault_find', name=None, item={'all': True, 'cn': name}) + + def vault_add_internal(self, name, item): + return self._post_json(method='vault_add_internal', name=name, item=item) + + def vault_mod_internal(self, name, item): + return self._post_json(method='vault_mod_internal', name=name, item=item) + + def vault_del(self, name): + return self._post_json(method='vault_del', name=name) + + +def get_vault_dict(description=None, vault_type=None, vault_salt=None, vault_public_key=None, service=None): + vault = {} + + if description is not None: + vault['description'] = description + if vault_type is not None: + vault['ipavaulttype'] = vault_type + if vault_salt is not None: + vault['ipavaultsalt'] = vault_salt + if vault_public_key is not None: + vault['ipavaultpublickey'] = vault_public_key + if service is not None: + vault['service'] = service + return vault + + +def get_vault_diff(client, ipa_vault, module_vault, module): + return client.get_diff(ipa_data=ipa_vault, module_data=module_vault) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['cn'] + user = module.params['username'] + replace = module.params['replace'] + + module_vault = get_vault_dict(description=module.params['description'], vault_type=module.params['ipavaulttype'], + vault_salt=module.params['ipavaultsalt'], + vault_public_key=module.params['ipavaultpublickey'], + service=module.params['service']) + ipa_vault = client.vault_find(name=name) + + changed = False + if state == 'present': + if not ipa_vault: + # New vault + changed = True + if not module.check_mode: + ipa_vault = client.vault_add_internal(name, item=module_vault) + else: + # Already exists + if replace: + diff = get_vault_diff(client, ipa_vault, module_vault, module) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_vault.get(key) + client.vault_mod_internal(name=name, item=data) + + else: + if ipa_vault: + changed = True + if not module.check_mode: + client.vault_del(name) + + return changed, client.vault_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + ipavaulttype=dict(type='str', default='symmetric', + choices=['standard', 'symmetric', 'asymmetric'], aliases=['vault_type']), + ipavaultsalt=dict(type='str', aliases=['vault_salt']), + ipavaultpublickey=dict(type='str', aliases=['vault_public_key']), + service=dict(type='str'), + replace=dict(type='bool', default=False, choices=[True, False]), + state=dict(type='str', default='present', choices=['present', 'absent']), + username=dict(type='list', elements='str', aliases=['user'])) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['username', 'service']]) + + client = VaultIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, vault = ensure(module, client) + module.exit_json(changed=changed, vault=vault) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipbase_info.py b/plugins/modules/ipbase_info.py new file mode 100644 index 0000000000..e2d73333fa --- /dev/null +++ b/plugins/modules/ipbase_info.py @@ -0,0 +1,300 @@ +#!/usr/bin/python +# +# Copyright (c) 2023, Dominik Kukacka +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: "ipbase_info" +version_added: "7.0.0" +short_description: "Retrieve IP geolocation and other facts of a host's IP address using the ipbase.com API" +description: + - Retrieve IP geolocation and other facts of a host's IP address using the ipbase.com API. +author: "Dominik Kukacka (@dominikkukacka)" +extends_documentation_fragment: + - "community.general.attributes" + - "community.general.attributes.info_module" +options: + ip: + description: + - The IP you want to get the info for. If not specified the API detects the IP automatically. + required: false + type: str + apikey: + description: + - The API key for the request if you need more requests. + required: false + type: str + hostname: + description: + - If the O(hostname) parameter is set to V(true), the API response contains the hostname of the IP. + required: false + type: bool + default: false + language: + description: + - An ISO Alpha 2 Language Code for localizing the IP data. + required: false + type: str + default: "en" +notes: + - Check U(https://ipbase.com/) for more information. +""" + +EXAMPLES = r""" +- name: "Get IP geolocation information of the primary outgoing IP" + community.general.ipbase_info: + register: my_ip_info + +- name: "Get IP geolocation information of a specific IP" + community.general.ipbase_info: + ip: "8.8.8.8" + register: my_ip_info + + +- name: "Get IP geolocation information of a specific IP with all other possible parameters" + community.general.ipbase_info: + ip: "8.8.8.8" + apikey: "xxxxxxxxxxxxxxxxxxxxxx" + hostname: true + language: "de" + register: my_ip_info +""" + +RETURN = r""" +data: + description: "JSON parsed response from ipbase.com. Please refer to U(https://ipbase.com/docs/info) for the detailed structure + of the response." + returned: success + type: dict + sample: + { + "ip": "1.1.1.1", + "hostname": "one.one.one.one", + "type": "v4", + "range_type": { + "type": "PUBLIC", + "description": "Public address" + }, + "connection": { + "asn": 13335, + "organization": "Cloudflare, Inc.", + "isp": "APNIC Research and Development", + "range": "1.1.1.1/32" + }, + "location": { + "geonames_id": 5332870, + "latitude": 34.053611755371094, + "longitude": -118.24549865722656, + "zip": "90012", + "continent": { + "code": "NA", + "name": "North America", + "name_translated": "North America" + }, + "country": { + "alpha2": "US", + "alpha3": "USA", + "calling_codes": [ + "+1" + ], + "currencies": [ + { + "symbol": "$", + "name": "US Dollar", + "symbol_native": "$", + "decimal_digits": 2, + "rounding": 0, + "code": "USD", + "name_plural": "US dollars" + } + ], + "emoji": "...", + "ioc": "USA", + "languages": [ + { + "name": "English", + "name_native": "English" + } + ], + "name": "United States", + "name_translated": "United States", + "timezones": [ + "America/New_York", + "America/Detroit", + "America/Kentucky/Louisville", + "America/Kentucky/Monticello", + "America/Indiana/Indianapolis", + "America/Indiana/Vincennes", + "America/Indiana/Winamac", + "America/Indiana/Marengo", + "America/Indiana/Petersburg", + "America/Indiana/Vevay", + "America/Chicago", + "America/Indiana/Tell_City", + "America/Indiana/Knox", + "America/Menominee", + "America/North_Dakota/Center", + "America/North_Dakota/New_Salem", + "America/North_Dakota/Beulah", + "America/Denver", + "America/Boise", + "America/Phoenix", + "America/Los_Angeles", + "America/Anchorage", + "America/Juneau", + "America/Sitka", + "America/Metlakatla", + "America/Yakutat", + "America/Nome", + "America/Adak", + "Pacific/Honolulu" + ], + "is_in_european_union": false, + "fips": "US", + "geonames_id": 6252001, + "hasc_id": "US", + "wikidata_id": "Q30" + }, + "city": { + "fips": "644000", + "alpha2": null, + "geonames_id": 5368753, + "hasc_id": null, + "wikidata_id": "Q65", + "name": "Los Angeles", + "name_translated": "Los Angeles" + }, + "region": { + "fips": "US06", + "alpha2": "US-CA", + "geonames_id": 5332921, + "hasc_id": "US.CA", + "wikidata_id": "Q99", + "name": "California", + "name_translated": "California" + } + }, + "tlds": [ + ".us" + ], + "timezone": { + "id": "America/Los_Angeles", + "current_time": "2023-05-04T04:30:28-07:00", + "code": "PDT", + "is_daylight_saving": true, + "gmt_offset": -25200 + }, + "security": { + "is_anonymous": false, + "is_datacenter": false, + "is_vpn": false, + "is_bot": false, + "is_abuser": true, + "is_known_attacker": true, + "is_proxy": false, + "is_spam": false, + "is_tor": false, + "is_icloud_relay": false, + "threat_score": 100 + }, + "domains": { + "count": 10943, + "domains": [ + "eliwise.academy", + "accountingprose.academy", + "pistola.academy", + "1and1-test-ntlds-fr.accountant", + "omnergy.africa" + ] + } + } +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from urllib.parse import urlencode + + +USER_AGENT = 'ansible-community.general.ipbase_info/0.1.0' +BASE_URL = 'https://api.ipbase.com/v2/info' + + +class IpbaseInfo(object): + + def __init__(self, module): + self.module = module + + def _get_url_data(self, url): + response, info = fetch_url( + self.module, + url, + force=True, + timeout=10, + headers={ + 'Accept': 'application/json', + 'User-Agent': USER_AGENT, + }) + + if info['status'] != 200: + self.module.fail_json(msg='The API request to ipbase.com returned an error status code {0}'.format(info['status'])) + else: + try: + content = response.read() + result = self.module.from_json(content.decode('utf8')) + except ValueError: + self.module.fail_json( + msg='Failed to parse the ipbase.com response: ' + '{0} {1}'.format(url, content)) + else: + return result + + def info(self): + + ip = self.module.params['ip'] + apikey = self.module.params['apikey'] + hostname = self.module.params['hostname'] + language = self.module.params['language'] + + url = BASE_URL + + params = {} + if ip: + params['ip'] = ip + + if apikey: + params['apikey'] = apikey + + if hostname: + params['hostname'] = 1 + + if language: + params['language'] = language + + if params: + url += '?' + urlencode(params) + + return self._get_url_data(url) + + +def main(): + module_args = dict( + ip=dict(type='str', no_log=False), + apikey=dict(type='str', no_log=True), + hostname=dict(type='bool', no_log=False, default=False), + language=dict(type='str', no_log=False, default='en'), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + ipbase = IpbaseInfo(module) + module.exit_json(**ipbase.info()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipify_facts.py b/plugins/modules/ipify_facts.py deleted file mode 120000 index 91c91fe5ac..0000000000 --- a/plugins/modules/ipify_facts.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/ipify_facts.py \ No newline at end of file diff --git a/plugins/modules/ipify_facts.py b/plugins/modules/ipify_facts.py new file mode 100644 index 0000000000..73a94db2c7 --- /dev/null +++ b/plugins/modules/ipify_facts.py @@ -0,0 +1,106 @@ +#!/usr/bin/python +# +# Copyright (c) 2015, René Moser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ipify_facts +short_description: Retrieve the public IP of your internet gateway +description: + - If behind NAT and need to know the public IP of your internet gateway. +author: + - René Moser (@resmo) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +options: + api_url: + description: + - URL of the ipify.org API service. + - C(?format=json) is appended by default. + type: str + default: https://api.ipify.org/ + timeout: + description: + - HTTP connection timeout in seconds. + type: int + default: 10 + validate_certs: + description: + - When set to V(false), SSL certificates are not validated. + type: bool + default: true +notes: + - Visit https://www.ipify.org to get more information. +""" + +EXAMPLES = r""" +# Gather IP facts from ipify.org +- name: Get my public IP + community.general.ipify_facts: + +# Gather IP facts from your own ipify service endpoint with a custom timeout +- name: Get my public IP + community.general.ipify_facts: + api_url: http://api.example.com/ipify + timeout: 20 +""" + +RETURN = r""" +ipify_public_ip: + description: Public IP of the internet gateway. + returned: success + type: str + sample: 1.2.3.4 +""" + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.text.converters import to_text + + +class IpifyFacts(object): + + def __init__(self): + self.api_url = module.params.get('api_url') + self.timeout = module.params.get('timeout') + + def run(self): + result = { + 'ipify_public_ip': None + } + (response, info) = fetch_url(module=module, url=self.api_url + "?format=json", force=True, timeout=self.timeout) + + if not response: + module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout)) + + data = json.loads(to_text(response.read())) + result['ipify_public_ip'] = data.get('ip') + return result + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + api_url=dict(type='str', default='https://api.ipify.org/'), + timeout=dict(type='int', default=10), + validate_certs=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + + ipify_facts = IpifyFacts().run() + ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts) + module.exit_json(**ipify_facts_result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipinfoio_facts.py b/plugins/modules/ipinfoio_facts.py deleted file mode 120000 index 507a47ef10..0000000000 --- a/plugins/modules/ipinfoio_facts.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/ipinfoio_facts.py \ No newline at end of file diff --git a/plugins/modules/ipinfoio_facts.py b/plugins/modules/ipinfoio_facts.py new file mode 100644 index 0000000000..4d5d8b25a8 --- /dev/null +++ b/plugins/modules/ipinfoio_facts.py @@ -0,0 +1,133 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Aleksei Kostiuk +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ipinfoio_facts +short_description: Retrieve IP geolocation facts of a host's IP address +description: + - Gather IP geolocation facts of a host's IP address using ipinfo.io API. +author: "Aleksei Kostiuk (@akostyuk)" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +options: + timeout: + description: + - HTTP connection timeout in seconds. + required: false + default: 10 + type: int + http_agent: + description: + - Set http user agent. + required: false + default: "ansible-ipinfoio-module/0.0.1" + type: str +notes: + - Check U(http://ipinfo.io/) for more information. +""" + +EXAMPLES = r""" +# Retrieve geolocation data of a host's IP address +- name: Get IP geolocation data + community.general.ipinfoio_facts: +""" + +RETURN = r""" +ansible_facts: + description: "Dictionary of IP geolocation facts for a host's IP address." + returned: changed + type: complex + contains: + ip: + description: "Public IP address of a host." + type: str + sample: "8.8.8.8" + hostname: + description: Domain name. + type: str + sample: "google-public-dns-a.google.com" + country: + description: ISO 3166-1 alpha-2 country code. + type: str + sample: "US" + region: + description: State or province name. + type: str + sample: "California" + city: + description: City name. + type: str + sample: "Mountain View" + loc: + description: Latitude and Longitude of the location. + type: str + sample: "37.3860,-122.0838" + org: + description: "Organization's name." + type: str + sample: "AS3356 Level 3 Communications, Inc." + postal: + description: Postal code. + type: str + sample: "94035" +""" +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils.urls import fetch_url + + +USER_AGENT = 'ansible-ipinfoio-module/0.0.1' + + +class IpinfoioFacts(object): + + def __init__(self, module): + self.url = 'https://ipinfo.io/json' + self.timeout = module.params.get('timeout') + self.module = module + + def get_geo_data(self): + response, info = fetch_url(self.module, self.url, force=True, # NOQA + timeout=self.timeout) + try: + info['status'] == 200 + except AssertionError: + self.module.fail_json(msg='Could not get {0} page, ' + 'check for connectivity!'.format(self.url)) + else: + try: + content = response.read() + result = self.module.from_json(content.decode('utf8')) + except ValueError: + self.module.fail_json( + msg='Failed to parse the ipinfo.io response: ' + '{0} {1}'.format(self.url, content)) + else: + return result + + +def main(): + module = AnsibleModule( # NOQA + argument_spec=dict( + http_agent=dict(default=USER_AGENT), + timeout=dict(type='int', default=10), + ), + supports_check_mode=True, + ) + + ipinfoio = IpinfoioFacts(module) + ipinfoio_result = dict( + changed=False, ansible_facts=ipinfoio.get_geo_data()) + module.exit_json(**ipinfoio_result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipmi_boot.py b/plugins/modules/ipmi_boot.py deleted file mode 120000 index 665caa8696..0000000000 --- a/plugins/modules/ipmi_boot.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/ipmi/ipmi_boot.py \ No newline at end of file diff --git a/plugins/modules/ipmi_boot.py b/plugins/modules/ipmi_boot.py new file mode 100644 index 0000000000..30fcfb161d --- /dev/null +++ b/plugins/modules/ipmi_boot.py @@ -0,0 +1,218 @@ +#!/usr/bin/python +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ipmi_boot +short_description: Management of order of boot devices +description: + - Use this module to manage order of boot devices. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Hostname or IP address of the BMC. + required: true + type: str + port: + description: + - Remote RMCP port. + default: 623 + type: int + user: + description: + - Username to use to connect to the BMC. + required: true + type: str + password: + description: + - Password to connect to the BMC. + required: true + type: str + key: + description: + - Encryption key to connect to the BMC in hex format. + required: false + type: str + version_added: 4.1.0 + bootdev: + description: + - Set boot device to use on next reboot. + - 'The choices for the device are:' + - V(network) -- Request network boot. + - V(floppy) -- Boot from floppy. + - V(hd) -- Boot from hard drive. + - V(safe) -- Boot from hard drive, requesting 'safe mode'. + - V(optical) -- boot from CD/DVD/BD drive. + - V(setup) -- Boot into setup utility. + - V(default) -- remove any IPMI directed boot device request. + required: true + choices: + - network + - floppy + - hd + - safe + - optical + - setup + - default + type: str + state: + description: + - Whether to ensure that boot devices is desired. + - 'The choices for the state are: - present -- Request system turn on - absent -- Request system turn on.' + default: present + choices: [present, absent] + type: str + persistent: + description: + - If set, ask that system firmware uses this device beyond next boot. Be aware many systems do not honor this. + type: bool + default: false + uefiboot: + description: + - If set, request UEFI boot explicitly. Strictly speaking, the spec suggests that if not set, the system should BIOS + boot and offers no "do not care" option. In practice, this flag not being set does not preclude UEFI boot on any system + I have encountered. + type: bool + default: false +requirements: + - pyghmi +author: "Bulat Gaifullin (@bgaifullin) " +""" + +RETURN = r""" +bootdev: + description: The boot device name which is used beyond next boot. + returned: success + type: str + sample: default +persistent: + description: If V(true), system firmware uses this device beyond next boot. + returned: success + type: bool + sample: false +uefimode: + description: If V(true), system firmware uses UEFI boot explicitly beyond next boot. + returned: success + type: bool + sample: false +""" + +EXAMPLES = r""" +- name: Ensure bootdevice is HD + community.general.ipmi_boot: + name: test.testdomain.com + user: admin + password: password + bootdev: hd + +- name: Ensure bootdevice is not Network + community.general.ipmi_boot: + name: test.testdomain.com + user: admin + password: password + key: 1234567890AABBCCDEFF000000EEEE12 + bootdev: network + state: absent +""" + +import traceback +import binascii + +PYGHMI_IMP_ERR = None +try: + from pyghmi.ipmi import command +except ImportError: + PYGHMI_IMP_ERR = traceback.format_exc() + command = None + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + port=dict(default=623, type='int'), + user=dict(required=True, no_log=True), + password=dict(required=True, no_log=True), + key=dict(type='str', no_log=True), + state=dict(default='present', choices=['present', 'absent']), + bootdev=dict(required=True, choices=['network', 'hd', 'floppy', 'safe', 'optical', 'setup', 'default']), + persistent=dict(default=False, type='bool'), + uefiboot=dict(default=False, type='bool') + ), + supports_check_mode=True, + ) + + if command is None: + module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR) + + name = module.params['name'] + port = module.params['port'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + bootdev = module.params['bootdev'] + persistent = module.params['persistent'] + uefiboot = module.params['uefiboot'] + request = dict() + + if state == 'absent' and bootdev == 'default': + module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.") + + try: + if module.params['key']: + key = binascii.unhexlify(module.params['key']) + else: + key = None + except Exception as e: + module.fail_json(msg="Unable to convert 'key' from hex string.") + + # --- run command --- + try: + ipmi_cmd = command.Command( + bmc=name, userid=user, password=password, port=port, kg=key + ) + module.debug('ipmi instantiated - name: "%s"' % name) + current = ipmi_cmd.get_bootdev() + # uefimode may not supported by BMC, so use desired value as default + current.setdefault('uefimode', uefiboot) + if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot): + request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent) + elif state == 'absent' and current['bootdev'] == bootdev: + request = dict(bootdev='default') + else: + module.exit_json(changed=False, **current) + + if module.check_mode: + response = dict(bootdev=request['bootdev']) + else: + response = ipmi_cmd.set_bootdev(**request) + + if 'error' in response: + module.fail_json(msg=response['error']) + + if 'persist' in request: + response['persistent'] = request['persist'] + if 'uefiboot' in request: + response['uefimode'] = request['uefiboot'] + + module.exit_json(changed=True, **response) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipmi_power.py b/plugins/modules/ipmi_power.py deleted file mode 120000 index 905aaab40e..0000000000 --- a/plugins/modules/ipmi_power.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/ipmi/ipmi_power.py \ No newline at end of file diff --git a/plugins/modules/ipmi_power.py b/plugins/modules/ipmi_power.py new file mode 100644 index 0000000000..b88fba07be --- /dev/null +++ b/plugins/modules/ipmi_power.py @@ -0,0 +1,273 @@ +#!/usr/bin/python +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ipmi_power +short_description: Power management for machine +description: + - Use this module for power management. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Hostname or IP address of the BMC. + required: true + type: str + port: + description: + - Remote RMCP port. + default: 623 + type: int + user: + description: + - Username to use to connect to the BMC. + required: true + type: str + password: + description: + - Password to connect to the BMC. + required: true + type: str + key: + description: + - Encryption key to connect to the BMC in hex format. + required: false + type: str + version_added: 4.1.0 + state: + description: + - Whether to ensure that the machine in desired state. + - 'The choices for state are:' + - V(on) -- Request system turn on. + - V(off) -- Request system turn off without waiting for OS to shutdown. + - V(shutdown) -- Have system request OS proper shutdown. + - V(reset) -- Request system reset without waiting for OS. + - V(boot) -- If system is off, then V(on), else V(reset). + - Either this option or O(machine) is required. + choices: ['on', 'off', shutdown, reset, boot] + type: str + timeout: + description: + - Maximum number of seconds before interrupt request. + default: 300 + type: int + machine: + description: + - Provide a list of the remote target address for the bridge IPMI request, and the power status. + - Either this option or O(state) is required. + required: false + type: list + elements: dict + version_added: 4.3.0 + suboptions: + targetAddress: + description: + - Remote target address for the bridge IPMI request. + type: int + required: true + state: + description: + - Whether to ensure that the machine specified by O(machine[].targetAddress) in desired state. + - If this option is not set, the power state is set by O(state). + - If both this option and O(state) are set, this option takes precedence over O(state). + choices: ['on', 'off', shutdown, reset, boot] + type: str + +requirements: + - pyghmi +author: "Bulat Gaifullin (@bgaifullin) " +""" + +RETURN = r""" +powerstate: + description: The current power state of the machine. + returned: success and O(machine) is not provided + type: str + sample: 'on' +status: + description: The current power state of the machine when the machine option is set. + returned: success and O(machine) is provided + type: list + elements: dict + version_added: 4.3.0 + contains: + powerstate: + description: The current power state of the machine specified by RV(status[].targetAddress). + type: str + targetAddress: + description: The remote target address. + type: int + sample: + [ + { + "powerstate": "on", + "targetAddress": 48 + }, + { + "powerstate": "on", + "targetAddress": 50 + } + ] +""" + +EXAMPLES = r""" +- name: Ensure machine is powered on + community.general.ipmi_power: + name: test.testdomain.com + user: admin + password: password + state: 'on' + +- name: Ensure machines of which remote target address is 48 and 50 are powered off + community.general.ipmi_power: + name: test.testdomain.com + user: admin + password: password + state: 'off' + machine: + - targetAddress: 48 + - targetAddress: 50 + +- name: Ensure machine of which remote target address is 48 is powered on, and 50 is powered off + community.general.ipmi_power: + name: test.testdomain.com + user: admin + password: password + machine: + - targetAddress: 48 + state: 'on' + - targetAddress: 50 + state: 'off' +""" + +import traceback +import binascii + +PYGHMI_IMP_ERR = None +INVALID_TARGET_ADDRESS = 0x100 +try: + from pyghmi.ipmi import command +except ImportError: + PYGHMI_IMP_ERR = traceback.format_exc() + command = None + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + port=dict(default=623, type='int'), + state=dict(choices=['on', 'off', 'shutdown', 'reset', 'boot']), + user=dict(required=True, no_log=True), + password=dict(required=True, no_log=True), + key=dict(type='str', no_log=True), + timeout=dict(default=300, type='int'), + machine=dict( + type='list', elements='dict', + options=dict( + targetAddress=dict(required=True, type='int'), + state=dict(type='str', choices=['on', 'off', 'shutdown', 'reset', 'boot']), + ), + ), + ), + supports_check_mode=True, + required_one_of=( + ['state', 'machine'], + ), + ) + + if command is None: + module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR) + + name = module.params['name'] + port = module.params['port'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + timeout = module.params['timeout'] + machine = module.params['machine'] + + try: + if module.params['key']: + key = binascii.unhexlify(module.params['key']) + else: + key = None + except Exception: + module.fail_json(msg="Unable to convert 'key' from hex string.") + + # --- run command --- + try: + ipmi_cmd = command.Command( + bmc=name, userid=user, password=password, port=port, kg=key + ) + module.debug('ipmi instantiated - name: "%s"' % name) + + changed = False + if machine is None: + current = ipmi_cmd.get_power() + if current['powerstate'] != state: + response = {'powerstate': state} if module.check_mode \ + else ipmi_cmd.set_power(state, wait=timeout) + changed = True + else: + response = current + + if 'error' in response: + module.fail_json(msg=response['error']) + + module.exit_json(changed=changed, **response) + else: + response = [] + for entry in machine: + taddr = entry['targetAddress'] + if taddr >= INVALID_TARGET_ADDRESS: + module.fail_json(msg="targetAddress should be set between 0 to 255.") + + try: + # bridge_request is supported on pyghmi 1.5.30 and later + current = ipmi_cmd.get_power(bridge_request={"addr": taddr}) + except TypeError: + module.fail_json( + msg="targetAddress isn't supported on the installed pyghmi.") + + if entry['state']: + tstate = entry['state'] + elif state: + tstate = state + else: + module.fail_json(msg="Either state or suboption of machine state should be set.") + + if current['powerstate'] != tstate: + changed = True + if not module.check_mode: + new = ipmi_cmd.set_power(tstate, wait=timeout, bridge_request={"addr": taddr}) + if 'error' in new: + module.fail_json(msg=new['error']) + + response.append( + {'targetAddress:': taddr, 'powerstate': new['powerstate']}) + + if current['powerstate'] == tstate or module.check_mode: + response.append({'targetAddress:': taddr, 'powerstate': tstate}) + + module.exit_json(changed=changed, status=response) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/iptables_state.py b/plugins/modules/iptables_state.py deleted file mode 120000 index 864608d532..0000000000 --- a/plugins/modules/iptables_state.py +++ /dev/null @@ -1 +0,0 @@ -system/iptables_state.py \ No newline at end of file diff --git a/plugins/modules/iptables_state.py b/plugins/modules/iptables_state.py new file mode 100644 index 0000000000..0119465007 --- /dev/null +++ b/plugins/modules/iptables_state.py @@ -0,0 +1,651 @@ +#!/usr/bin/python + +# Copyright (c) 2020, quidame +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: iptables_state +short_description: Save iptables state into a file or restore it from a file +version_added: '1.1.0' +author: quidame (@quidame) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.flow +description: + - C(iptables) is used to set up, maintain, and inspect the tables of IP packet filter rules in the Linux kernel. + - This module handles the saving and/or loading of rules. This is the same as the behaviour of the C(iptables-save) and + C(iptables-restore) (or C(ip6tables-save) and C(ip6tables-restore) for IPv6) commands which this module uses internally. + - Modifying the state of the firewall remotely may lead to loose access to the host in case of mistake in new ruleset. This + module embeds a rollback feature to avoid this, by telling the host to restore previous rules if a cookie is still there + after a given delay, and all this time telling the controller to try to remove this cookie on the host through a new connection. +notes: + - The rollback feature is not a module option and depends on task's attributes. To enable it, the module must be played + asynchronously, in other words by setting task attributes C(poll) to V(0), and C(async) to a value less or equal to C(ANSIBLE_TIMEOUT). + If C(async) is greater, the rollback still happens when needed, but you experience a connection timeout instead of more + relevant info returned by the module after its failure. +attributes: + check_mode: + support: full + diff_mode: + support: none + action: + support: full + async: + support: full +options: + counters: + description: + - Save or restore the values of all packet and byte counters. + - When V(true), the module is not idempotent. + type: bool + default: false + ip_version: + description: + - Which version of the IP protocol this module should apply to. + type: str + choices: [ipv4, ipv6] + default: ipv4 + modprobe: + description: + - Specify the path to the C(modprobe) program internally used by iptables related commands to load kernel modules. + - By default, V(/proc/sys/kernel/modprobe) is inspected to determine the executable's path. + type: path + noflush: + description: + - For O(state=restored), ignored otherwise. + - If V(false), restoring iptables rules from a file flushes (deletes) all previous contents of the respective table(s). + If V(true), the previous rules are left untouched (but policies are updated anyway, for all built-in chains). + type: bool + default: false + path: + description: + - The file the iptables state should be saved to. + - The file the iptables state should be restored from. + type: path + required: true + state: + description: + - Whether the firewall state should be saved (into a file) or restored (from a file). + type: str + choices: [saved, restored] + required: true + table: + description: + - When O(state=restored), restore only the named table even if the input file contains other tables. Fail if the named + table is not declared in the file. + - When O(state=saved), restrict output to the specified table. If not specified, output includes all active tables. + type: str + choices: [filter, nat, mangle, raw, security] + wait: + description: + - Wait N seconds for the xtables lock to prevent instant failure in case multiple instances of the program are running + concurrently. + type: int +requirements: [iptables, ip6tables] +""" + +EXAMPLES = r""" +# This will apply to all loaded/active IPv4 tables. +- name: Save current state of the firewall in system file + community.general.iptables_state: + state: saved + path: /etc/sysconfig/iptables + +# This will apply only to IPv6 filter table. +- name: save current state of the firewall in system file + community.general.iptables_state: + ip_version: ipv6 + table: filter + state: saved + path: /etc/iptables/rules.v6 + +# This will load a state from a file, with a rollback in case of access loss +- name: restore firewall state from a file + community.general.iptables_state: + state: restored + path: /run/iptables.apply + async: "{{ ansible_timeout }}" + poll: 0 + +# This will load new rules by appending them to the current ones +- name: restore firewall state from a file + community.general.iptables_state: + state: restored + path: /run/iptables.apply + noflush: true + async: "{{ ansible_timeout }}" + poll: 0 + +# This will only retrieve information +- name: get current state of the firewall + community.general.iptables_state: + state: saved + path: /tmp/iptables + check_mode: true + changed_when: false + register: iptables_state + +- name: show current state of the firewall + ansible.builtin.debug: + var: iptables_state.initial_state +""" + +RETURN = r""" +applied: + description: Whether or not the wanted state has been successfully restored. + type: bool + returned: always + sample: true +initial_state: + description: The current state of the firewall when module starts. + type: list + elements: str + returned: always + sample: + [ + "# Generated by xtables-save v1.8.2", + "*filter", + ":INPUT ACCEPT [0:0]", + ":FORWARD ACCEPT [0:0]", + ":OUTPUT ACCEPT [0:0]", + "COMMIT", + "# Completed" + ] +restored: + description: The state the module restored, whenever it is finally applied or not. + type: list + elements: str + returned: always + sample: + [ + "# Generated by xtables-save v1.8.2", + "*filter", + ":INPUT DROP [0:0]", + ":FORWARD DROP [0:0]", + ":OUTPUT ACCEPT [0:0]", + "-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT", + "-A INPUT -m conntrack --ctstate INVALID -j DROP", + "-A INPUT -i lo -j ACCEPT", + "-A INPUT -p icmp -j ACCEPT", + "-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT", + "COMMIT", + "# Completed" + ] +saved: + description: The iptables state the module saved. + type: list + elements: str + returned: always + sample: + [ + "# Generated by xtables-save v1.8.2", + "*filter", + ":INPUT ACCEPT [0:0]", + ":FORWARD DROP [0:0]", + ":OUTPUT ACCEPT [0:0]", + "COMMIT", + "# Completed" + ] +tables: + description: + - The iptables on the system before the module has run, separated by table. + - If the option O(table) is used, only this table is included. + type: dict + contains: + table: + description: Policies and rules for all chains of the named table. + type: list + elements: str + sample: |- + { + "filter": [ + ":INPUT ACCEPT", + ":FORWARD ACCEPT", + ":OUTPUT ACCEPT", + "-A INPUT -i lo -j ACCEPT", + "-A INPUT -p icmp -j ACCEPT", + "-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT", + "-A INPUT -j REJECT --reject-with icmp-host-prohibited" + ], + "nat": [ + ":PREROUTING ACCEPT", + ":INPUT ACCEPT", + ":OUTPUT ACCEPT", + ":POSTROUTING ACCEPT" + ] + } + returned: always +""" + + +import re +import os +import time +import tempfile +import filecmp +import shutil + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_native + + +IPTABLES = dict( + ipv4='iptables', + ipv6='ip6tables', +) + +SAVE = dict( + ipv4='iptables-save', + ipv6='ip6tables-save', +) + +RESTORE = dict( + ipv4='iptables-restore', + ipv6='ip6tables-restore', +) + +TABLES = ['filter', 'mangle', 'nat', 'raw', 'security'] + + +def read_state(b_path): + ''' + Read a file and store its content in a variable as a list. + ''' + with open(b_path, 'r') as f: + text = f.read() + return [t for t in text.splitlines() if t != ''] + + +def write_state(b_path, lines, changed): + ''' + Write given contents to the given path, and return changed status. + ''' + # Populate a temporary file + tmpfd, tmpfile = tempfile.mkstemp() + with os.fdopen(tmpfd, 'w') as f: + f.write("{0}\n".format("\n".join(lines))) + + # Prepare to copy temporary file to the final destination + if not os.path.exists(b_path): + b_destdir = os.path.dirname(b_path) + destdir = to_native(b_destdir, errors='surrogate_or_strict') + if b_destdir and not os.path.exists(b_destdir) and not module.check_mode: + try: + os.makedirs(b_destdir) + except Exception as err: + module.fail_json( + msg='Error creating %s: %s' % (destdir, to_native(err)), + initial_state=lines) + changed = True + + elif not filecmp.cmp(tmpfile, b_path): + changed = True + + # Do it + if changed and not module.check_mode: + try: + shutil.copyfile(tmpfile, b_path) + except Exception as err: + path = to_native(b_path, errors='surrogate_or_strict') + module.fail_json( + msg='Error saving state into %s: %s' % (path, to_native(err)), + initial_state=lines) + + return changed + + +def initialize_from_null_state(initializer, initcommand, fallbackcmd, table): + ''' + This ensures iptables-state output is suitable for iptables-restore to roll + back to it, i.e. iptables-save output is not empty. This also works for the + iptables-nft-save alternative. + ''' + if table is None: + table = 'filter' + + commandline = list(initializer) + commandline += ['-t', table] + dummy = module.run_command(commandline, check_rc=True) + (rc, out, err) = module.run_command(initcommand, check_rc=True) + if '*%s' % table not in out.splitlines(): + # The last resort. + iptables_input = '*%s\n:OUTPUT ACCEPT\nCOMMIT\n' % table + dummy = module.run_command(fallbackcmd, data=iptables_input, check_rc=True) + (rc, out, err) = module.run_command(initcommand, check_rc=True) + + return rc, out, err + + +def filter_and_format_state(string): + ''' + Remove timestamps to ensure idempotence between runs. Also remove counters + by default. And return the result as a list. + ''' + string = re.sub(r'((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*', r'\1', string) + if not module.params['counters']: + string = re.sub(r'\[[0-9]+:[0-9]+\]', r'[0:0]', string) + lines = [line for line in string.splitlines() if line != ''] + return lines + + +def parse_per_table_state(all_states_dump): + ''' + Convert raw iptables-save output into usable datastructure, for reliable + comparisons between initial and final states. + ''' + lines = filter_and_format_state(all_states_dump) + tables = dict() + current_table = '' + current_list = list() + for line in lines: + if re.match(r'^[*](filter|mangle|nat|raw|security)$', line): + current_table = line[1:] + continue + if line == 'COMMIT': + tables[current_table] = current_list + current_table = '' + current_list = list() + continue + if line.startswith('# '): + continue + current_list.append(line) + return tables + + +def main(): + + global module + + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True), + state=dict(type='str', choices=['saved', 'restored'], required=True), + table=dict(type='str', choices=['filter', 'nat', 'mangle', 'raw', 'security']), + noflush=dict(type='bool', default=False), + counters=dict(type='bool', default=False), + modprobe=dict(type='path'), + ip_version=dict(type='str', choices=['ipv4', 'ipv6'], default='ipv4'), + wait=dict(type='int'), + _timeout=dict(type='int'), + _back=dict(type='path'), + ), + required_together=[ + ['_timeout', '_back'], + ], + supports_check_mode=True, + ) + + # We'll parse iptables-restore stderr + module.run_command_environ_update = dict(LANG='C', LC_MESSAGES='C') + + path = module.params['path'] + state = module.params['state'] + table = module.params['table'] + noflush = module.params['noflush'] + counters = module.params['counters'] + modprobe = module.params['modprobe'] + ip_version = module.params['ip_version'] + wait = module.params['wait'] + _timeout = module.params['_timeout'] + _back = module.params['_back'] + + bin_iptables = module.get_bin_path(IPTABLES[ip_version], True) + bin_iptables_save = module.get_bin_path(SAVE[ip_version], True) + bin_iptables_restore = module.get_bin_path(RESTORE[ip_version], True) + + os.umask(0o077) + changed = False + COMMANDARGS = [] + INITCOMMAND = [bin_iptables_save] + INITIALIZER = [bin_iptables, '-L', '-n'] + TESTCOMMAND = [bin_iptables_restore, '--test'] + FALLBACKCMD = [bin_iptables_restore] + + if counters: + COMMANDARGS.append('--counters') + + if table is not None: + COMMANDARGS.extend(['--table', table]) + + if wait is not None: + TESTCOMMAND.extend(['--wait', '%d' % wait]) + + if modprobe is not None: + b_modprobe = to_bytes(modprobe, errors='surrogate_or_strict') + if not os.path.exists(b_modprobe): + module.fail_json(msg="modprobe %s not found" % modprobe) + if not os.path.isfile(b_modprobe): + module.fail_json(msg="modprobe %s not a file" % modprobe) + if not os.access(b_modprobe, os.R_OK): + module.fail_json(msg="modprobe %s not readable" % modprobe) + if not os.access(b_modprobe, os.X_OK): + module.fail_json(msg="modprobe %s not executable" % modprobe) + COMMANDARGS.extend(['--modprobe', modprobe]) + INITIALIZER.extend(['--modprobe', modprobe]) + INITCOMMAND.extend(['--modprobe', modprobe]) + TESTCOMMAND.extend(['--modprobe', modprobe]) + FALLBACKCMD.extend(['--modprobe', modprobe]) + + SAVECOMMAND = list(COMMANDARGS) + SAVECOMMAND.insert(0, bin_iptables_save) + + b_path = to_bytes(path, errors='surrogate_or_strict') + + if state == 'restored': + if not os.path.exists(b_path): + module.fail_json(msg="Source %s not found" % path) + if not os.path.isfile(b_path): + module.fail_json(msg="Source %s not a file" % path) + if not os.access(b_path, os.R_OK): + module.fail_json(msg="Source %s not readable" % path) + state_to_restore = read_state(b_path) + cmd = None + else: + cmd = ' '.join(SAVECOMMAND) + + (rc, stdout, stderr) = module.run_command(INITCOMMAND, check_rc=True) + + # The issue comes when wanting to restore state from empty iptable-save's + # output... what happens when, say: + # - no table is specified, and iptables-save's output is only nat table; + # - we give filter's ruleset to iptables-restore, that locks ourselves out + # of the host; + # then trying to roll iptables state back to the previous (working) setup + # doesn't override current filter table because no filter table is stored + # in the backup ! So we have to ensure tables to be restored have a backup + # in case of rollback. + if table is None: + if state == 'restored': + for t in TABLES: + if '*%s' % t in state_to_restore: + if len(stdout) == 0 or '*%s' % t not in stdout.splitlines(): + (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, t) + elif len(stdout) == 0: + (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, 'filter') + + elif state == 'restored' and '*%s' % table not in state_to_restore: + module.fail_json(msg="Table %s to restore not defined in %s" % (table, path)) + + elif len(stdout) == 0 or '*%s' % table not in stdout.splitlines(): + (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, table) + + initial_state = filter_and_format_state(stdout) + if initial_state is None: + module.fail_json(msg="Unable to initialize firewall from NULL state.") + + # Depending on the value of 'table', initref_state may differ from + # initial_state. + (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) + tables_before = parse_per_table_state(stdout) + initref_state = filter_and_format_state(stdout) + + if state == 'saved': + changed = write_state(b_path, initref_state, changed) + module.exit_json( + changed=changed, + cmd=cmd, + tables=tables_before, + initial_state=initial_state, + saved=initref_state) + + # + # All remaining code is for state=restored + # + + MAINCOMMAND = list(COMMANDARGS) + MAINCOMMAND.insert(0, bin_iptables_restore) + + if wait is not None: + MAINCOMMAND.extend(['--wait', '%d' % wait]) + + if _back is not None: + b_back = to_bytes(_back, errors='surrogate_or_strict') + dummy = write_state(b_back, initref_state, changed) + BACKCOMMAND = list(MAINCOMMAND) + BACKCOMMAND.append(_back) + + if noflush: + MAINCOMMAND.append('--noflush') + + MAINCOMMAND.append(path) + cmd = ' '.join(MAINCOMMAND) + + TESTCOMMAND = list(MAINCOMMAND) + TESTCOMMAND.insert(1, '--test') + error_msg = "Source %s is not suitable for input to %s" % (path, os.path.basename(bin_iptables_restore)) + + # Due to a bug in iptables-nft-restore --test, we have to validate tables + # one by one (https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=960003). + for t in tables_before: + testcommand = list(TESTCOMMAND) + testcommand.extend(['--table', t]) + (rc, stdout, stderr) = module.run_command(testcommand) + + if 'Another app is currently holding the xtables lock' in stderr: + error_msg = stderr + + if rc != 0: + cmd = ' '.join(testcommand) + module.fail_json( + msg=error_msg, + cmd=cmd, + rc=rc, + stdout=stdout, + stderr=stderr, + tables=tables_before, + initial_state=initial_state, + restored=state_to_restore, + applied=False) + + if module.check_mode: + tmpfd, tmpfile = tempfile.mkstemp() + with os.fdopen(tmpfd, 'w') as f: + f.write("{0}\n".format("\n".join(initial_state))) + + if filecmp.cmp(tmpfile, b_path): + restored_state = initial_state + else: + restored_state = state_to_restore + + else: + # Let time enough to the plugin to retrieve async status of the module + # in case of bad option type/value and the like. + if _back is not None: + b_starter = to_bytes('%s.starter' % _back, errors='surrogate_or_strict') + while True: + if os.path.exists(b_starter): + os.remove(b_starter) + break + time.sleep(0.01) + + (rc, stdout, stderr) = module.run_command(MAINCOMMAND) + if 'Another app is currently holding the xtables lock' in stderr: + module.fail_json( + msg=stderr, + cmd=cmd, + rc=rc, + stdout=stdout, + stderr=stderr, + tables=tables_before, + initial_state=initial_state, + restored=state_to_restore, + applied=False) + + (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) + restored_state = filter_and_format_state(stdout) + tables_after = parse_per_table_state('\n'.join(restored_state)) + if restored_state not in (initref_state, initial_state): + for table_name, table_content in tables_after.items(): + if table_name not in tables_before: + # Would initialize a table, which doesn't exist yet + changed = True + break + if tables_before[table_name] != table_content: + # Content of some table changes + changed = True + break + + if _back is None or module.check_mode: + module.exit_json( + changed=changed, + cmd=cmd, + tables=tables_before, + initial_state=initial_state, + restored=restored_state, + applied=True) + + # The rollback implementation currently needs: + # Here: + # * test existence of the backup file, exit with success if it doesn't exist + # * otherwise, restore iptables from this file and return failure + # Action plugin: + # * try to remove the backup file + # * wait async task is finished and retrieve its final status + # * modify it and return the result + # Task: + # * task attribute 'async' set to the same value (or lower) than ansible + # timeout + # * task attribute 'poll' equals 0 + # + for dummy in range(_timeout): + if os.path.exists(b_back): + time.sleep(1) + continue + module.exit_json( + changed=changed, + cmd=cmd, + tables=tables_before, + initial_state=initial_state, + restored=restored_state, + applied=True) + + # Here we are: for whatever reason, but probably due to the current ruleset, + # the action plugin (i.e. on the controller) was unable to remove the backup + # cookie, so we restore initial state from it. + (rc, stdout, stderr) = module.run_command(BACKCOMMAND, check_rc=True) + os.remove(b_back) + + (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) + tables_rollback = parse_per_table_state(stdout) + + msg = ( + "Failed to confirm state restored from %s after %ss. " + "Firewall has been rolled back to its initial state." % (path, _timeout) + ) + + module.fail_json( + changed=(tables_before != tables_rollback), + msg=msg, + cmd=cmd, + tables=tables_before, + initial_state=initial_state, + restored=restored_state, + applied=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipwcli_dns.py b/plugins/modules/ipwcli_dns.py deleted file mode 120000 index 17b0c47142..0000000000 --- a/plugins/modules/ipwcli_dns.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/ipwcli_dns.py \ No newline at end of file diff --git a/plugins/modules/ipwcli_dns.py b/plugins/modules/ipwcli_dns.py new file mode 100644 index 0000000000..909da24ddf --- /dev/null +++ b/plugins/modules/ipwcli_dns.py @@ -0,0 +1,353 @@ +#!/usr/bin/python + +# Copyright (c) 2020, Christian Wollinger +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipwcli_dns + +short_description: Manage DNS Records for Ericsson IPWorks using C(ipwcli) + +version_added: '0.2.0' + +description: + - Manage DNS records for the Ericsson IPWorks DNS server. The module uses the C(ipwcli) to deploy the DNS records. +requirements: + - ipwcli (installed on Ericsson IPWorks) + +notes: + - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + dnsname: + description: + - Name of the record. + required: true + type: str + type: + description: + - Type of the record. + required: true + type: str + choices: [NAPTR, SRV, A, AAAA] + container: + description: + - Sets the container zone for the record. + required: true + type: str + address: + description: + - The IP address for the A or AAAA record. + - Required for O(type=A) or O(type=AAAA). + type: str + ttl: + description: + - Sets the TTL of the record. + type: int + default: 3600 + state: + description: + - Whether the record should exist or not. + type: str + choices: [absent, present] + default: present + priority: + description: + - Sets the priority of the SRV record. + type: int + default: 10 + weight: + description: + - Sets the weight of the SRV record. + type: int + default: 10 + port: + description: + - Sets the port of the SRV record. + - Required for O(type=SRV). + type: int + target: + description: + - Sets the target of the SRV record. + - Required for O(type=SRV). + type: str + order: + description: + - Sets the order of the NAPTR record. + - Required for O(type=NAPTR). + type: int + preference: + description: + - Sets the preference of the NAPTR record. + - Required for O(type=NAPTR). + type: int + flags: + description: + - Sets one of the possible flags of NAPTR record. + - Required for O(type=NAPTR). + type: str + choices: ['S', 'A', 'U', 'P'] + service: + description: + - Sets the service of the NAPTR record. + - Required for O(type=NAPTR). + type: str + replacement: + description: + - Sets the replacement of the NAPTR record. + - Required for O(type=NAPTR). + type: str + username: + description: + - Username to login on ipwcli. + type: str + required: true + password: + description: + - Password to login on ipwcli. + type: str + required: true + +author: + - Christian Wollinger (@cwollinger) +""" + +EXAMPLES = r""" +- name: Create A record + community.general.ipwcli_dns: + dnsname: example.com + type: A + container: ZoneOne + address: 127.0.0.1 + +- name: Remove SRV record if exists + community.general.ipwcli_dns: + dnsname: _sip._tcp.test.example.com + type: SRV + container: ZoneOne + ttl: 100 + state: absent + target: example.com + port: 5060 + +- name: Create NAPTR record + community.general.ipwcli_dns: + dnsname: test.example.com + type: NAPTR + preference: 10 + container: ZoneOne + ttl: 100 + order: 10 + service: 'SIP+D2T' + replacement: '_sip._tcp.test.example.com.' + flags: S +""" + +RETURN = r""" +record: + description: The created record from the input params. + type: str + returned: always +""" + +from ansible.module_utils.basic import AnsibleModule + + +class ResourceRecord(object): + + def __init__(self, module): + self.module = module + self.dnsname = module.params['dnsname'] + self.dnstype = module.params['type'] + self.container = module.params['container'] + self.address = module.params['address'] + self.ttl = module.params['ttl'] + self.state = module.params['state'] + self.priority = module.params['priority'] + self.weight = module.params['weight'] + self.port = module.params['port'] + self.target = module.params['target'] + self.order = module.params['order'] + self.preference = module.params['preference'] + self.flags = module.params['flags'] + self.service = module.params['service'] + self.replacement = module.params['replacement'] + self.user = module.params['username'] + self.password = module.params['password'] + + def create_naptrrecord(self): + # create NAPTR record with the given params + record = ('naptrrecord %s -set ttl=%s;container=%s;order=%s;preference=%s;flags="%s";service="%s";replacement="%s"' + % (self.dnsname, self.ttl, self.container, self.order, self.preference, self.flags, self.service, self.replacement)) + return record + + def create_srvrecord(self): + # create SRV record with the given params + record = ('srvrecord %s -set ttl=%s;container=%s;priority=%s;weight=%s;port=%s;target=%s' + % (self.dnsname, self.ttl, self.container, self.priority, self.weight, self.port, self.target)) + return record + + def create_arecord(self): + # create A record with the given params + if self.dnstype == 'AAAA': + record = 'aaaarecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container) + else: + record = 'arecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container) + + return record + + def list_record(self, record): + # check if the record exists via list on ipwcli + search = 'list %s' % (record.replace(';', '&&').replace('set', 'where')) + cmd = [ + self.module.get_bin_path('ipwcli', True), + '-user=%s' % self.user, + '-password=%s' % self.password, + ] + rc, out, err = self.module.run_command(cmd, data=search) + + if 'Invalid username or password' in out: + self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') + + if (('ARecord %s' % self.dnsname in out and rc == 0) or ('SRVRecord %s' % self.dnsname in out and rc == 0) or + ('NAPTRRecord %s' % self.dnsname in out and rc == 0)): + return True, rc, out, err + + return False, rc, out, err + + def deploy_record(self, record): + # check what happens if create fails on ipworks + stdin = 'create %s' % (record) + cmd = [ + self.module.get_bin_path('ipwcli', True), + '-user=%s' % self.user, + '-password=%s' % self.password, + ] + rc, out, err = self.module.run_command(cmd, data=stdin) + + if 'Invalid username or password' in out: + self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') + + if '1 object(s) created.' in out: + return rc, out, err + else: + self.module.fail_json(msg='record creation failed', stderr=out) + + def delete_record(self, record): + # check what happens if create fails on ipworks + stdin = 'delete %s' % (record.replace(';', '&&').replace('set', 'where')) + cmd = [ + self.module.get_bin_path('ipwcli', True), + '-user=%s' % self.user, + '-password=%s' % self.password, + ] + rc, out, err = self.module.run_command(cmd, data=stdin) + + if 'Invalid username or password' in out: + self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') + + if '1 object(s) were updated.' in out: + return rc, out, err + else: + self.module.fail_json(msg='record deletion failed', stderr=out) + + +def run_module(): + # define available arguments/parameters a user can pass to the module + module_args = dict( + dnsname=dict(type='str', required=True), + type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']), + container=dict(type='str', required=True), + address=dict(type='str'), + ttl=dict(type='int', default=3600), + state=dict(type='str', default='present', choices=['absent', 'present']), + priority=dict(type='int', default=10), + weight=dict(type='int', default=10), + port=dict(type='int'), + target=dict(type='str'), + order=dict(type='int'), + preference=dict(type='int'), + flags=dict(type='str', choices=['S', 'A', 'U', 'P']), + service=dict(type='str'), + replacement=dict(type='str'), + username=dict(type='str', required=True), + password=dict(type='str', required=True, no_log=True) + ) + + # define result + result = dict( + changed=False, + stdout='', + stderr='', + rc=0, + record='' + ) + + # supports check mode + module = AnsibleModule( + argument_spec=module_args, + required_if=[ + ['type', 'A', ['address']], + ['type', 'AAAA', ['address']], + ['type', 'SRV', ['port', 'target']], + ['type', 'NAPTR', ['preference', 'order', 'service', 'replacement']], + ], + supports_check_mode=True + ) + + user = ResourceRecord(module) + + if user.dnstype == 'NAPTR': + record = user.create_naptrrecord() + elif user.dnstype == 'SRV': + record = user.create_srvrecord() + elif user.dnstype == 'A' or user.dnstype == 'AAAA': + record = user.create_arecord() + + found, rc, out, err = user.list_record(record) + + if found and user.state == 'absent': + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = user.delete_record(record) + result['changed'] = True + result['record'] = record + result['rc'] = rc + result['stdout'] = out + result['stderr'] = err + elif not found and user.state == 'present': + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = user.deploy_record(record) + result['changed'] = True + result['record'] = record + result['rc'] = rc + result['stdout'] = out + result['stderr'] = err + else: + result['changed'] = False + result['record'] = record + result['rc'] = rc + result['stdout'] = out + result['stderr'] = err + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/irc.py b/plugins/modules/irc.py deleted file mode 120000 index c45fe207b9..0000000000 --- a/plugins/modules/irc.py +++ /dev/null @@ -1 +0,0 @@ -./notification/irc.py \ No newline at end of file diff --git a/plugins/modules/irc.py b/plugins/modules/irc.py new file mode 100644 index 0000000000..537b26e0bc --- /dev/null +++ b/plugins/modules/irc.py @@ -0,0 +1,355 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Jan-Piet Mens +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: irc +short_description: Send a message to an IRC channel or a nick +description: + - Send a message to an IRC channel or a nick. This is a very simplistic implementation. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + server: + type: str + description: + - IRC server name/address. + default: localhost + port: + type: int + description: + - IRC server port number. + default: 6667 + nick: + type: str + description: + - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting. + default: ansible + msg: + type: str + description: + - The message body. + required: true + topic: + type: str + description: + - Set the channel topic. + color: + type: str + description: + - Text color for the message. + default: "none" + choices: + - none + - white + - black + - blue + - green + - red + - brown + - purple + - orange + - yellow + - light_green + - teal + - light_cyan + - light_blue + - pink + - gray + - light_gray + aliases: [colour] + channel: + type: str + description: + - Channel name. One of nick_to or channel needs to be set. When both are set, the message is sent to both of them. + nick_to: + type: list + elements: str + description: + - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the + message is sent to both of them. + key: + type: str + description: + - Channel key. + passwd: + type: str + description: + - Server password. + timeout: + type: int + description: + - Timeout to use while waiting for successful registration and join messages, this is to prevent an endless loop. + default: 30 + use_tls: + description: + - Designates whether TLS/SSL should be used when connecting to the IRC server. + - O(use_tls) is available since community.general 8.1.0, before the option was exlusively called O(use_ssl). The latter + is now an alias of O(use_tls). + - B(Note:) for security reasons, you should always set O(use_tls=true) and O(validate_certs=true) whenever possible. + - The default of this option changed to V(true) in community.general 10.0.0. + type: bool + default: true + aliases: + - use_ssl + part: + description: + - Designates whether user should part from channel after sending message or not. Useful for when using a mock bot and + not wanting join/parts between messages. + type: bool + default: true + style: + type: str + description: + - Text style for the message. Note italic does not work on some clients. + choices: ["bold", "underline", "reverse", "italic", "none"] + default: none + validate_certs: + description: + - If set to V(false), the SSL certificates are not validated. + - This should always be set to V(true). Using V(false) is unsafe and should only be done if the network between between + Ansible and the IRC server is known to be safe. + - B(Note:) for security reasons, you should always set O(use_tls=true) and O(validate_certs=true) whenever possible. + - The default of this option changed to V(true) in community.general 10.0.0. + type: bool + default: true + version_added: 8.1.0 + +# informational: requirements for nodes +requirements: [socket] +author: + - "Jan-Piet Mens (@jpmens)" + - "Matt Martz (@sivel)" +""" + +EXAMPLES = r""" +- name: Send a message to an IRC channel from nick ansible + community.general.irc: + server: irc.example.net + use_tls: true + validate_certs: true + channel: '#t1' + msg: Hello world + +- name: Send a message to an IRC channel + local_action: + module: irc + port: 6669 + server: irc.example.net + use_tls: true + validate_certs: true + channel: '#t1' + msg: 'All finished at {{ ansible_date_time.iso8601 }}' + color: red + nick: ansibleIRC + +- name: Send a message to an IRC channel + local_action: + module: irc + port: 6669 + server: irc.example.net + use_tls: true + validate_certs: true + channel: '#t1' + nick_to: + - nick1 + - nick2 + msg: 'All finished at {{ ansible_date_time.iso8601 }}' + color: red + nick: ansibleIRC +""" + +# =========================================== +# IRC module support methods. +# + +import re +import socket +import ssl +import time +import traceback + +from ansible.module_utils.common.text.converters import to_native, to_bytes +from ansible.module_utils.basic import AnsibleModule + + +def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None, + nick="ansible", color='none', passwd=False, timeout=30, use_tls=False, validate_certs=True, + part=True, style=None): + '''send message to IRC''' + nick_to = [] if nick_to is None else nick_to + + colornumbers = { + 'white': "00", + 'black': "01", + 'blue': "02", + 'green': "03", + 'red': "04", + 'brown': "05", + 'purple': "06", + 'orange': "07", + 'yellow': "08", + 'light_green': "09", + 'teal': "10", + 'light_cyan': "11", + 'light_blue': "12", + 'pink': "13", + 'gray': "14", + 'light_gray': "15", + } + + stylechoices = { + 'bold': "\x02", + 'underline': "\x1F", + 'reverse': "\x16", + 'italic': "\x1D", + } + + try: + styletext = stylechoices[style] + except Exception: + styletext = "" + + try: + colornumber = colornumbers[color] + colortext = "\x03" + colornumber + except Exception: + colortext = "" + + message = styletext + colortext + msg + + irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if use_tls: + kwargs = {} + if validate_certs: + context = ssl.create_default_context() + kwargs["server_hostname"] = server + else: + context = ssl.SSLContext(ssl.PROTOCOL_TLS) + context.verify_mode = ssl.CERT_NONE + irc = context.wrap_socket(irc, **kwargs) + irc.connect((server, int(port))) + + if passwd: + irc.send(to_bytes('PASS %s\r\n' % passwd)) + irc.send(to_bytes('NICK %s\r\n' % nick)) + irc.send(to_bytes('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick))) + motd = '' + start = time.time() + while 1: + motd += to_native(irc.recv(1024)) + # The server might send back a shorter nick than we specified (due to NICKLEN), + # so grab that and use it from now on (assuming we find the 00[1-4] response). + match = re.search(r'^:\S+ 00[1-4] (?P\S+) :', motd, flags=re.M) + if match: + nick = match.group('nick') + break + elif time.time() - start > timeout: + raise Exception('Timeout waiting for IRC server welcome response') + time.sleep(0.5) + + if channel: + if key: + irc.send(to_bytes('JOIN %s %s\r\n' % (channel, key))) + else: + irc.send(to_bytes('JOIN %s\r\n' % channel)) + + join = '' + start = time.time() + while 1: + join += to_native(irc.recv(1024)) + if re.search(r'^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M | re.I): + break + elif time.time() - start > timeout: + raise Exception('Timeout waiting for IRC JOIN response') + time.sleep(0.5) + + if topic is not None: + irc.send(to_bytes('TOPIC %s :%s\r\n' % (channel, topic))) + time.sleep(1) + + if nick_to: + for nick in nick_to: + irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (nick, message))) + if channel: + irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (channel, message))) + time.sleep(1) + if part: + if channel: + irc.send(to_bytes('PART %s\r\n' % channel)) + irc.send(to_bytes('QUIT\r\n')) + time.sleep(1) + irc.close() + +# =========================================== +# Main +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server=dict(default='localhost'), + port=dict(type='int', default=6667), + nick=dict(default='ansible'), + nick_to=dict(type='list', elements='str'), + msg=dict(required=True), + color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue", + "green", "red", "brown", + "purple", "orange", "yellow", + "light_green", "teal", "light_cyan", + "light_blue", "pink", "gray", + "light_gray", "none"]), + style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]), + channel=dict(), + key=dict(no_log=True), + topic=dict(), + passwd=dict(no_log=True), + timeout=dict(type='int', default=30), + part=dict(type='bool', default=True), + use_tls=dict(type='bool', default=True, aliases=['use_ssl']), + validate_certs=dict(type='bool', default=True), + ), + supports_check_mode=True, + required_one_of=[['channel', 'nick_to']] + ) + + server = module.params["server"] + port = module.params["port"] + nick = module.params["nick"] + nick_to = module.params["nick_to"] + msg = module.params["msg"] + color = module.params["color"] + channel = module.params["channel"] + topic = module.params["topic"] + if topic and not channel: + module.fail_json(msg="When topic is specified, a channel is required.") + key = module.params["key"] + passwd = module.params["passwd"] + timeout = module.params["timeout"] + use_tls = module.params["use_tls"] + part = module.params["part"] + style = module.params["style"] + validate_certs = module.params["validate_certs"] + + try: + send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_tls, validate_certs, part, style) + except Exception as e: + module.fail_json(msg="unable to send to IRC: %s" % to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=False, channel=channel, nick=nick, + msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/iso_create.py b/plugins/modules/iso_create.py deleted file mode 120000 index 897a8d99be..0000000000 --- a/plugins/modules/iso_create.py +++ /dev/null @@ -1 +0,0 @@ -./files/iso_create.py \ No newline at end of file diff --git a/plugins/modules/iso_create.py b/plugins/modules/iso_create.py new file mode 100644 index 0000000000..8d11bb2248 --- /dev/null +++ b/plugins/modules/iso_create.py @@ -0,0 +1,301 @@ +#!/usr/bin/python + +# Copyright (c) 2020, Ansible Project +# Copyright (c) 2020, VMware, Inc. All Rights Reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: iso_create +short_description: Generate ISO file with specified files or folders +description: + - This module is used to generate ISO file with specified path of files. +author: + - Diane Wang (@Tomorrow9) +requirements: + - "pycdlib" +version_added: '0.2.0' + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + src_files: + description: + - This is a list of absolute paths of source files or folders to be contained in the new generated ISO file. + - The module fails if specified file or folder in O(src_files) does not exist on local machine. + - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and underscores + (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path names are limited + to 255 characters.' + type: list + required: true + elements: path + dest_iso: + description: + - The absolute path with file name of the new generated ISO file on local machine. + - It creates intermediate folders when they do not exist. + type: path + required: true + interchange_level: + description: + - The ISO9660 interchange level to use, it dictates the rules on the names of files. + - Levels and valid values V(1), V(2), V(3), V(4) are supported. + - The default value is level V(1), which is the most conservative, level V(3) is recommended. + - ISO9660 file names at interchange level V(1) cannot have more than 8 characters or 3 characters in the extension. + type: int + default: 1 + choices: [1, 2, 3, 4] + vol_ident: + description: + - The volume identification string to use on the new generated ISO image. + type: str + rock_ridge: + description: + - Whether to make this ISO have the Rock Ridge extensions or not. + - Valid values are V(1.09), V(1.10) or V(1.12), means adding the specified Rock Ridge version to the ISO. + - If unsure, set V(1.09) to ensure maximum compatibility. + - If not specified, then not add Rock Ridge extension to the ISO. + type: str + choices: ['1.09', '1.10', '1.12'] + joliet: + description: + - Support levels and valid values are V(1), V(2), or V(3). + - Level V(3) is by far the most common. + - If not specified, then no Joliet support is added. + type: int + choices: [1, 2, 3] + udf: + description: + - Whether to add UDF support to this ISO. + - If set to V(true), then version 2.60 of the UDF spec is used. + - If not specified or set to V(false), then no UDF support is added. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Create an ISO file + community.general.iso_create: + src_files: + - /root/testfile.yml + - /root/testfolder + dest_iso: /tmp/test.iso + interchange_level: 3 + +- name: Create an ISO file with Rock Ridge extension + community.general.iso_create: + src_files: + - /root/testfile.yml + - /root/testfolder + dest_iso: /tmp/test.iso + rock_ridge: 1.09 + +- name: Create an ISO file with Joliet support + community.general.iso_create: + src_files: + - ./windows_config/Autounattend.xml + dest_iso: ./test.iso + interchange_level: 3 + joliet: 3 + vol_ident: WIN_AUTOINSTALL +""" + +RETURN = r""" +source_file: + description: Configured source files or directories list. + returned: on success + type: list + elements: path + sample: ["/path/to/file.txt", "/path/to/folder"] +created_iso: + description: Created iso file path. + returned: on success + type: str + sample: "/path/to/test.iso" +interchange_level: + description: Configured interchange level. + returned: on success + type: int + sample: 3 +vol_ident: + description: Configured volume identification string. + returned: on success + type: str + sample: "OEMDRV" +joliet: + description: Configured Joliet support level. + returned: on success + type: int + sample: 3 +rock_ridge: + description: Configured Rock Ridge version. + returned: on success + type: str + sample: "1.09" +udf: + description: Configured UDF support. + returned: on success + type: bool + sample: false +""" + +import os +import traceback + +PYCDLIB_IMP_ERR = None +try: + import pycdlib + HAS_PYCDLIB = True +except ImportError: + PYCDLIB_IMP_ERR = traceback.format_exc() + HAS_PYCDLIB = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def add_file(module, iso_file=None, src_file=None, file_path=None, rock_ridge=None, use_joliet=None, use_udf=None): + rr_name = None + joliet_path = None + udf_path = None + # In standard ISO interchange level 1, file names have a maximum of 8 characters, followed by a required dot, + # followed by a maximum 3 character extension, followed by a semicolon and a version + file_name = os.path.basename(file_path) + if '.' not in file_name: + file_in_iso_path = file_path.upper() + '.;1' + else: + file_in_iso_path = file_path.upper() + ';1' + if rock_ridge: + rr_name = file_name + if use_joliet: + joliet_path = file_path + if use_udf: + udf_path = file_path + try: + iso_file.add_file(src_file, iso_path=file_in_iso_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path) + except Exception as err: + module.fail_json(msg="Failed to add file %s to ISO file due to %s" % (src_file, to_native(err))) + + +def add_directory(module, iso_file=None, dir_path=None, rock_ridge=None, use_joliet=None, use_udf=None): + rr_name = None + joliet_path = None + udf_path = None + iso_dir_path = dir_path.upper() + if rock_ridge: + rr_name = os.path.basename(dir_path) + if use_joliet: + joliet_path = dir_path + if use_udf: + udf_path = dir_path + try: + iso_file.add_directory(iso_path=iso_dir_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path) + except Exception as err: + module.fail_json(msg="Failed to directory %s to ISO file due to %s" % (dir_path, to_native(err))) + + +def main(): + argument_spec = dict( + src_files=dict(type='list', required=True, elements='path'), + dest_iso=dict(type='path', required=True), + interchange_level=dict(type='int', choices=[1, 2, 3, 4], default=1), + vol_ident=dict(type='str'), + rock_ridge=dict(type='str', choices=['1.09', '1.10', '1.12']), + joliet=dict(type='int', choices=[1, 2, 3]), + udf=dict(type='bool', default=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + if not HAS_PYCDLIB: + module.fail_json(missing_required_lib('pycdlib'), exception=PYCDLIB_IMP_ERR) + + src_file_list = module.params.get('src_files') + if src_file_list and len(src_file_list) == 0: + module.fail_json(msg='Please specify source file and/or directory list using src_files parameter.') + for src_file in src_file_list: + if not os.path.exists(src_file): + module.fail_json(msg="Specified source file/directory path does not exist on local machine, %s" % src_file) + + dest_iso = module.params.get('dest_iso') + if dest_iso and len(dest_iso) == 0: + module.fail_json(msg='Please specify the absolute path of the new created ISO file using dest_iso parameter.') + + dest_iso_dir = os.path.dirname(dest_iso) + if dest_iso_dir and not os.path.exists(dest_iso_dir): + # will create intermediate dir for new ISO file + try: + os.makedirs(dest_iso_dir) + except OSError as err: + module.fail_json(msg='Exception caught when creating folder %s, with error %s' % (dest_iso_dir, to_native(err))) + + volume_id = module.params.get('vol_ident') + if volume_id is None: + volume_id = '' + inter_level = module.params.get('interchange_level') + rock_ridge = module.params.get('rock_ridge') + use_joliet = module.params.get('joliet') + use_udf = None + if module.params['udf']: + use_udf = '2.60' + + result = dict( + changed=False, + source_file=src_file_list, + created_iso=dest_iso, + interchange_level=inter_level, + vol_ident=volume_id, + rock_ridge=rock_ridge, + joliet=use_joliet, + udf=use_udf + ) + if not module.check_mode: + iso_file = pycdlib.PyCdlib(always_consistent=True) + iso_file.new(interchange_level=inter_level, vol_ident=volume_id, rock_ridge=rock_ridge, joliet=use_joliet, udf=use_udf) + + for src_file in src_file_list: + # if specify a dir then go through the dir to add files and dirs + if os.path.isdir(src_file): + dir_list = [] + file_list = [] + src_file = src_file.rstrip('/') + dir_name = os.path.basename(src_file) + add_directory(module, iso_file=iso_file, dir_path='/' + dir_name, rock_ridge=rock_ridge, + use_joliet=use_joliet, use_udf=use_udf) + + # get dir list and file list + for path, dirs, files in os.walk(src_file): + for filename in files: + file_list.append(os.path.join(path, filename)) + for dir in dirs: + dir_list.append(os.path.join(path, dir)) + for new_dir in dir_list: + add_directory(module, iso_file=iso_file, dir_path=new_dir.split(os.path.dirname(src_file))[1], + rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf) + for new_file in file_list: + add_file(module, iso_file=iso_file, src_file=new_file, + file_path=new_file.split(os.path.dirname(src_file))[1], rock_ridge=rock_ridge, + use_joliet=use_joliet, use_udf=use_udf) + # if specify a file then add this file directly to the '/' path in ISO + else: + add_file(module, iso_file=iso_file, src_file=src_file, file_path='/' + os.path.basename(src_file), + rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf) + + iso_file.write(dest_iso) + iso_file.close() + + result['changed'] = True + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/iso_customize.py b/plugins/modules/iso_customize.py new file mode 100644 index 0000000000..7e64f949bd --- /dev/null +++ b/plugins/modules/iso_customize.py @@ -0,0 +1,342 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Ansible Project +# Copyright (c) 2022, VMware, Inc. All Rights Reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: iso_customize +short_description: Add/remove/change files in ISO file +description: + - This module is used to add/remove/change files in ISO file. + - The file inside ISO is overwritten if it exists by option O(add_files). +author: + - Yuhua Zou (@ZouYuhua) +requirements: + - "pycdlib" +version_added: '5.8.0' + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + src_iso: + description: + - This is the path of source ISO file. + type: path + required: true + dest_iso: + description: + - The path of the customized ISO file. + type: path + required: true + delete_files: + description: + - Absolute paths for files inside the ISO file that should be removed. + type: list + required: false + elements: str + default: [] + add_files: + description: + - Allows to add and replace files in the ISO file. + - It creates intermediate folders inside the ISO file when they do not exist. + type: list + required: false + elements: dict + default: [] + suboptions: + src_file: + description: + - The path with file name on the machine the module is executed on. + type: path + required: true + dest_file: + description: + - The absolute path of the file inside the ISO file. + type: str + required: true +notes: + - The C(pycdlib) library states it supports Python 2.7 and 3.4+. + - The function C(add_file) in pycdlib is designed to overwrite the existing file in ISO with type ISO9660 / Rock Ridge 1.12 + / Joliet / UDF. But it does not overwrite the existing file in ISO with Rock Ridge 1.09 / 1.10. So we take workaround + "delete the existing file and then add file for ISO with Rock Ridge". +""" + +EXAMPLES = r""" +- name: "Customize ISO file" + community.general.iso_customize: + src_iso: "/path/to/ubuntu-22.04-desktop-amd64.iso" + dest_iso: "/path/to/ubuntu-22.04-desktop-amd64-customized.iso" + delete_files: + - "/boot.catalog" + add_files: + - src_file: "/path/to/grub.cfg" + dest_file: "/boot/grub/grub.cfg" + - src_file: "/path/to/ubuntu.seed" + dest_file: "/preseed/ubuntu.seed" + register: customize_iso_result +""" + +RETURN = r""" +src_iso: + description: Path of source ISO file. + returned: on success + type: str + sample: "/path/to/file.iso" +dest_iso: + description: Path of the customized ISO file. + returned: on success + type: str + sample: "/path/to/customized.iso" +""" + +import os + +from ansible_collections.community.general.plugins.module_utils import deps +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +with deps.declare("pycdlib"): + import pycdlib + + +# The upper dir exist, we only add subdirectoy +def iso_add_dir(module, opened_iso, iso_type, dir_path): + parent_dir, check_dirname = dir_path.rsplit("/", 1) + if not parent_dir.strip(): + parent_dir = "/" + check_dirname = check_dirname.strip() + + for dirname, dirlist, dummy_filelist in opened_iso.walk(iso_path=parent_dir.upper()): + if dirname == parent_dir.upper(): + if check_dirname.upper() in dirlist: + return + + if parent_dir == "/": + current_dirpath = "/%s" % check_dirname + else: + current_dirpath = "%s/%s" % (parent_dir, check_dirname) + + current_dirpath_upper = current_dirpath.upper() + try: + if iso_type == "iso9660": + opened_iso.add_directory(current_dirpath_upper) + elif iso_type == "rr": + opened_iso.add_directory(current_dirpath_upper, rr_name=check_dirname) + elif iso_type == "joliet": + opened_iso.add_directory(current_dirpath_upper, joliet_path=current_dirpath) + elif iso_type == "udf": + opened_iso.add_directory(current_dirpath_upper, udf_path=current_dirpath) + except Exception as err: + msg = "Failed to create dir %s with error: %s" % (current_dirpath, to_native(err)) + module.fail_json(msg=msg) + + +def iso_add_dirs(module, opened_iso, iso_type, dir_path): + dirnames = dir_path.strip().split("/") + + current_dirpath = "/" + for item in dirnames: + if not item.strip(): + continue + if current_dirpath == "/": + current_dirpath = "/%s" % item + else: + current_dirpath = "%s/%s" % (current_dirpath, item) + + iso_add_dir(module, opened_iso, iso_type, current_dirpath) + + +def iso_check_file_exists(opened_iso, dest_file): + file_dir = os.path.dirname(dest_file).strip() + file_name = os.path.basename(dest_file) + dirnames = file_dir.strip().split("/") + + parent_dir = "/" + for item in dirnames: + if not item.strip(): + continue + + for dirname, dirlist, dummy_filelist in opened_iso.walk(iso_path=parent_dir.upper()): + if dirname != parent_dir.upper(): + break + + if item.upper() not in dirlist: + return False + + if parent_dir == "/": + parent_dir = "/%s" % item + else: + parent_dir = "%s/%s" % (parent_dir, item) + + if '.' not in file_name: + file_in_iso_path = file_name.upper() + '.;1' + else: + file_in_iso_path = file_name.upper() + ';1' + + for dirname, dummy_dirlist, filelist in opened_iso.walk(iso_path=parent_dir.upper()): + if dirname != parent_dir.upper(): + return False + + return file_name.upper() in filelist or file_in_iso_path in filelist + + +def iso_add_file(module, opened_iso, iso_type, src_file, dest_file): + dest_file = dest_file.strip() + if dest_file[0] != "/": + dest_file = "/%s" % dest_file + + file_local = src_file.strip() + + file_dir = os.path.dirname(dest_file).strip() + file_name = os.path.basename(dest_file) + if '.' not in file_name: + file_in_iso_path = dest_file.upper() + '.;1' + else: + file_in_iso_path = dest_file.upper() + ';1' + + if file_dir and file_dir != "/": + iso_add_dirs(module, opened_iso, iso_type, file_dir) + + try: + if iso_type == "iso9660": + opened_iso.add_file(file_local, iso_path=file_in_iso_path) + elif iso_type == "rr": + # For ISO with Rock Ridge 1.09 / 1.10, it won't overwrite the existing file + # So we take workaround here: delete the existing file and then add file + if iso_check_file_exists(opened_iso, dest_file): + opened_iso.rm_file(iso_path=file_in_iso_path) + opened_iso.add_file(file_local, iso_path=file_in_iso_path, rr_name=file_name) + elif iso_type == "joliet": + opened_iso.add_file(file_local, iso_path=file_in_iso_path, joliet_path=dest_file) + elif iso_type == "udf": + # For ISO with UDF, it won't always succeed to overwrite the existing file + # So we take workaround here: delete the existing file and then add file + if iso_check_file_exists(opened_iso, dest_file): + opened_iso.rm_file(udf_path=dest_file) + opened_iso.add_file(file_local, iso_path=file_in_iso_path, udf_path=dest_file) + except Exception as err: + msg = "Failed to add local file %s to ISO with error: %s" % (file_local, to_native(err)) + module.fail_json(msg=msg) + + +def iso_delete_file(module, opened_iso, iso_type, dest_file): + dest_file = dest_file.strip() + if dest_file[0] != "/": + dest_file = "/%s" % dest_file + file_name = os.path.basename(dest_file) + + if not iso_check_file_exists(opened_iso, dest_file): + module.fail_json(msg="The file %s does not exist." % dest_file) + + if '.' not in file_name: + file_in_iso_path = dest_file.upper() + '.;1' + else: + file_in_iso_path = dest_file.upper() + ';1' + + try: + if iso_type == "iso9660": + opened_iso.rm_file(iso_path=file_in_iso_path) + elif iso_type == "rr": + opened_iso.rm_file(iso_path=file_in_iso_path) + elif iso_type == "joliet": + opened_iso.rm_file(joliet_path=dest_file) + elif iso_type == "udf": + opened_iso.rm_file(udf_path=dest_file) + except Exception as err: + msg = "Failed to delete iso file %s with error: %s" % (dest_file, to_native(err)) + module.fail_json(msg=msg) + + +def iso_rebuild(module, src_iso, dest_iso, delete_files_list, add_files_list): + iso = None + iso_type = "iso9660" + + try: + iso = pycdlib.PyCdlib(always_consistent=True) + iso.open(src_iso) + if iso.has_rock_ridge(): + iso_type = "rr" + elif iso.has_joliet(): + iso_type = "joliet" + elif iso.has_udf(): + iso_type = "udf" + + for item in delete_files_list: + iso_delete_file(module, iso, iso_type, item) + + for item in add_files_list: + iso_add_file(module, iso, iso_type, item['src_file'], item['dest_file']) + + iso.write(dest_iso) + except Exception as err: + msg = "Failed to rebuild ISO %s with error: %s" % (src_iso, to_native(err)) + module.fail_json(msg=msg) + finally: + if iso: + iso.close() + + +def main(): + argument_spec = dict( + src_iso=dict(type='path', required=True), + dest_iso=dict(type='path', required=True), + delete_files=dict(type='list', elements='str', default=[]), + add_files=dict( + type='list', elements='dict', default=[], + options=dict( + src_file=dict(type='path', required=True), + dest_file=dict(type='str', required=True), + ), + ), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=[('delete_files', 'add_files'), ], + supports_check_mode=True, + ) + deps.validate(module) + + src_iso = module.params['src_iso'] + if not os.path.exists(src_iso): + module.fail_json(msg="ISO file %s does not exist." % src_iso) + + dest_iso = module.params['dest_iso'] + dest_iso_dir = os.path.dirname(dest_iso) + if dest_iso_dir and not os.path.exists(dest_iso_dir): + module.fail_json(msg="The dest directory %s does not exist" % dest_iso_dir) + + delete_files_list = [s.strip() for s in module.params['delete_files']] + add_files_list = module.params['add_files'] + if add_files_list: + for item in add_files_list: + if not os.path.exists(item['src_file']): + module.fail_json(msg="The file %s does not exist." % item['src_file']) + + result = dict( + src_iso=src_iso, + customized_iso=dest_iso, + delete_files=delete_files_list, + add_files=add_files_list, + changed=True, + ) + + if not module.check_mode: + iso_rebuild(module, src_iso, dest_iso, delete_files_list, add_files_list) + + result['changed'] = True + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/iso_extract.py b/plugins/modules/iso_extract.py deleted file mode 120000 index f74dcfd283..0000000000 --- a/plugins/modules/iso_extract.py +++ /dev/null @@ -1 +0,0 @@ -./files/iso_extract.py \ No newline at end of file diff --git a/plugins/modules/iso_extract.py b/plugins/modules/iso_extract.py new file mode 100644 index 0000000000..11897744a8 --- /dev/null +++ b/plugins/modules/iso_extract.py @@ -0,0 +1,220 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Jeroen Hoekx +# Copyright (c) 2016, Matt Robinson +# Copyright (c) 2017, Dag Wieers +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Jeroen Hoekx (@jhoekx) + - Matt Robinson (@ribbons) + - Dag Wieers (@dagwieers) +module: iso_extract +short_description: Extract files from an ISO image +description: + - This module has two possible ways of operation. + - If 7zip is installed on the system, this module extracts files from an ISO into a temporary directory and copies files + to a given destination, if needed. + - If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module mounts the ISO image to a temporary location, + and copies files to a given destination, if needed. +requirements: + - Either 7z (from C(7zip) or C(p7zip) package) + - Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + image: + description: + - The ISO image to extract files from. + type: path + required: true + aliases: [path, src] + dest: + description: + - The destination directory to extract files to. + type: path + required: true + files: + description: + - A list of files to extract from the image. + - Extracting directories does not work. + type: list + elements: str + required: true + force: + description: + - If V(true), it replaces the remote file when contents are different than the source. + - If V(false), the file is only extracted and copied if the destination does not already exist. + type: bool + default: true + executable: + description: + - The path to the C(7z) executable to use for extracting files from the ISO. + - If not provided, it assumes the value V(7z). + type: path + password: + description: + - Password used to decrypt files from the ISO. + - It is only used if C(7z) is used. + - The password is used as a command line argument to 7z. This is a B(potential security risk) that allows passwords + to be revealed if someone else can list running processes on the same machine in the right moment. + type: str + version_added: 10.1.0 +notes: + - Only the file checksum (content) is taken into account when extracting files from the ISO image. If O(force=false), only + checks the presence of the file. +""" + +EXAMPLES = r""" +- name: Extract kernel and ramdisk from a LiveCD + community.general.iso_extract: + image: /tmp/rear-test.iso + dest: /tmp/virt-rear/ + files: + - isolinux/kernel + - isolinux/initrd.cgz +""" + +RETURN = r""" +# +""" + +import os.path +import shutil +import tempfile + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + image=dict(type='path', required=True, aliases=['path', 'src']), + dest=dict(type='path', required=True), + files=dict(type='list', elements='str', required=True), + force=dict(type='bool', default=True), + password=dict(type='str', no_log=True), + executable=dict(type='path'), # No default on purpose + ), + supports_check_mode=True, + ) + image = module.params['image'] + dest = module.params['dest'] + files = module.params['files'] + force = module.params['force'] + password = module.params['password'] + executable = module.params['executable'] + + result = dict( + changed=False, + dest=dest, + image=image, + ) + + # We want to know if the user provided it or not, so we set default here + if executable is None: + executable = '7z' + + binary = module.get_bin_path(executable, None) + + # When executable was provided and binary not found, warn user ! + if module.params['executable'] is not None and not binary: + module.warn("Executable '%s' is not found on the system, trying to mount ISO instead." % executable) + + if not os.path.exists(dest): + module.fail_json(msg="Directory '%s' does not exist" % dest) + + if not os.path.exists(os.path.dirname(image)): + module.fail_json(msg="ISO image '%s' does not exist" % image) + + result['files'] = [] + extract_files = list(files) + + if not force: + # Check if we have to process any files based on existence + for f in files: + dest_file = os.path.join(dest, os.path.basename(f)) + if os.path.exists(dest_file): + result['files'].append(dict( + checksum=None, + dest=dest_file, + src=f, + )) + extract_files.remove(f) + + if not extract_files: + module.exit_json(**result) + + tmp_dir = tempfile.mkdtemp() + + # Use 7zip when we have a binary, otherwise try to mount + if binary: + cmd = [binary, 'x', image, '-o%s' % tmp_dir] + if password: + cmd += ["-p%s" % password] + cmd += extract_files + else: + cmd = [module.get_bin_path('mount'), '-o', 'loop,ro', image, tmp_dir] + + rc, out, err = module.run_command(cmd) + if rc != 0: + result.update(dict( + cmd=cmd, + rc=rc, + stderr=err, + stdout=out, + )) + shutil.rmtree(tmp_dir) + + if binary: + module.fail_json(msg="Failed to extract from ISO image '%s' to '%s'" % (image, tmp_dir), **result) + else: + module.fail_json(msg="Failed to mount ISO image '%s' to '%s', and we could not find executable '%s'." % (image, tmp_dir, executable), **result) + + try: + for f in extract_files: + tmp_src = os.path.join(tmp_dir, f) + if not os.path.exists(tmp_src): + module.fail_json(msg="Failed to extract '%s' from ISO image" % f, **result) + + src_checksum = module.sha1(tmp_src) + + dest_file = os.path.join(dest, os.path.basename(f)) + + if os.path.exists(dest_file): + dest_checksum = module.sha1(dest_file) + else: + dest_checksum = None + + result['files'].append(dict( + checksum=src_checksum, + dest=dest_file, + src=f, + )) + + if src_checksum != dest_checksum: + if not module.check_mode: + shutil.copy(tmp_src, dest_file) + + result['changed'] = True + finally: + if not binary: + module.run_command([module.get_bin_path('umount'), tmp_dir]) + + shutil.rmtree(tmp_dir) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/jabber.py b/plugins/modules/jabber.py deleted file mode 120000 index 46eee6ebd9..0000000000 --- a/plugins/modules/jabber.py +++ /dev/null @@ -1 +0,0 @@ -./notification/jabber.py \ No newline at end of file diff --git a/plugins/modules/jabber.py b/plugins/modules/jabber.py new file mode 100644 index 0000000000..096a9c6ef2 --- /dev/null +++ b/plugins/modules/jabber.py @@ -0,0 +1,169 @@ +#!/usr/bin/python +# +# Copyright (c) 2015, Brian Coca +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: jabber +short_description: Send a message to jabber user or chat room +description: + - Send a message to jabber. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + user: + type: str + description: + - User as which to connect. + required: true + password: + type: str + description: + - Password for user to connect. + required: true + to: + type: str + description: + - User ID or name of the room, when using room use a slash to indicate your nick. + required: true + msg: + type: str + description: + - The message body. + required: true + host: + type: str + description: + - Host to connect, overrides user info. + port: + type: int + description: + - Port to connect to, overrides default. + default: 5222 + encoding: + type: str + description: + - Message encoding. +requirements: + - python xmpp (xmpppy) +author: "Brian Coca (@bcoca)" +""" + +EXAMPLES = r""" +- name: Send a message to a user + community.general.jabber: + user: mybot@example.net + password: secret + to: friend@example.net + msg: Ansible task finished + +- name: Send a message to a room + community.general.jabber: + user: mybot@example.net + password: secret + to: mychaps@conference.example.net/ansiblebot + msg: Ansible task finished + +- name: Send a message, specifying the host and port + community.general.jabber: + user: mybot@example.net + host: talk.example.net + port: 5223 + password: secret + to: mychaps@example.net + msg: Ansible task finished +""" + +import time +import traceback + +HAS_XMPP = True +XMPP_IMP_ERR = None +try: + import xmpp +except ImportError: + XMPP_IMP_ERR = traceback.format_exc() + HAS_XMPP = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + user=dict(required=True), + password=dict(required=True, no_log=True), + to=dict(required=True), + msg=dict(required=True), + host=dict(), + port=dict(default=5222, type='int'), + encoding=dict(), + ), + supports_check_mode=True + ) + + if not HAS_XMPP: + module.fail_json(msg=missing_required_lib('xmpppy'), exception=XMPP_IMP_ERR) + + jid = xmpp.JID(module.params['user']) + user = jid.getNode() + server = jid.getDomain() + port = module.params['port'] + password = module.params['password'] + try: + to, nick = module.params['to'].split('/', 1) + except ValueError: + to, nick = module.params['to'], None + + if module.params['host']: + host = module.params['host'] + else: + host = server + if module.params['encoding']: + xmpp.simplexml.ENCODING = module.params['encoding'] + + msg = xmpp.protocol.Message(body=module.params['msg']) + + try: + conn = xmpp.Client(server, debug=[]) + if not conn.connect(server=(host, port)): + module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server)) + if not conn.auth(user, password, 'Ansible'): + module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user, server)) + # some old servers require this, also the sleep following send + conn.sendInitPresence(requestRoster=0) + + if nick: # sending to room instead of user, need to join + msg.setType('groupchat') + msg.setTag('x', namespace='http://jabber.org/protocol/muc#user') + join = xmpp.Presence(to=module.params['to']) + join.setTag('x', namespace='http://jabber.org/protocol/muc') + conn.send(join) + time.sleep(1) + else: + msg.setType('chat') + + msg.setTo(to) + if not module.check_mode: + conn.send(msg) + time.sleep(1) + conn.disconnect() + except Exception as e: + module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=False, to=to, user=user, msg=msg.getBody()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/java_cert.py b/plugins/modules/java_cert.py deleted file mode 120000 index 1bbf7365d4..0000000000 --- a/plugins/modules/java_cert.py +++ /dev/null @@ -1 +0,0 @@ -./system/java_cert.py \ No newline at end of file diff --git a/plugins/modules/java_cert.py b/plugins/modules/java_cert.py new file mode 100644 index 0000000000..2f1f33f782 --- /dev/null +++ b/plugins/modules/java_cert.py @@ -0,0 +1,631 @@ +#!/usr/bin/python + +# Copyright (c) 2013, RSD Services S.A +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: java_cert + +short_description: Uses keytool to import/remove certificate to/from java keystore (cacerts) +description: + - This is a wrapper module around keytool, which can be used to import certificates and optionally private keys to a given + java keystore, or remove them from it. +extends_documentation_fragment: + - community.general.attributes + - ansible.builtin.files +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + cert_url: + description: + - Basic URL to fetch SSL certificate from. + - Exactly one of O(cert_url), O(cert_path), O(cert_content), or O(pkcs12_path) is required to load certificate. + type: str + cert_port: + description: + - Port to connect to URL. + - This is used to create server URL:PORT. + type: int + default: 443 + cert_path: + description: + - Local path to load certificate from. + - Exactly one of O(cert_url), O(cert_path), O(cert_content), or O(pkcs12_path) is required to load certificate. + type: path + cert_content: + description: + - Content of the certificate used to create the keystore. + - Exactly one of O(cert_url), O(cert_path), O(cert_content), or O(pkcs12_path) is required to load certificate. + type: str + version_added: 8.6.0 + cert_alias: + description: + - Imported certificate alias. + - The alias is used when checking for the presence of a certificate in the keystore. + type: str + trust_cacert: + description: + - Trust imported cert as CAcert. + type: bool + default: false + version_added: '0.2.0' + pkcs12_path: + description: + - Local path to load PKCS12 keystore from. + - Unlike O(cert_url), O(cert_path) and O(cert_content), the PKCS12 keystore embeds the private key matching the certificate, + and is used to import both the certificate and its private key into the java keystore. + - Exactly one of O(cert_url), O(cert_path), O(cert_content), or O(pkcs12_path) is required to load certificate. + type: path + pkcs12_password: + description: + - Password for importing from PKCS12 keystore. + type: str + pkcs12_alias: + description: + - Alias in the PKCS12 keystore. + type: str + keystore_path: + description: + - Path to keystore. + type: path + keystore_pass: + description: + - Keystore password. + type: str + required: true + keystore_create: + description: + - Create keystore if it does not exist. + type: bool + default: false + keystore_type: + description: + - Keystore type (JCEKS, JKS). + type: str + executable: + description: + - Path to keytool binary if not used we search in PATH for it. + type: str + default: keytool + state: + description: + - Defines action which can be either certificate import or removal. + - When O(state=present), the certificate is always inserted into the keystore, even if there already exists a cert alias + that is different. + type: str + choices: [absent, present] + default: present + mode: + version_added: 8.5.0 + owner: + version_added: 8.5.0 + group: + version_added: 8.5.0 + seuser: + version_added: 8.5.0 + serole: + version_added: 8.5.0 + setype: + version_added: 8.5.0 + selevel: + version_added: 8.5.0 + unsafe_writes: + version_added: 8.5.0 + attributes: + version_added: 8.5.0 +requirements: [openssl, keytool] +author: + - Adam Hamsik (@haad) +""" + +EXAMPLES = r""" +- name: Import SSL certificate from google.com to a given cacerts keystore + community.general.java_cert: + cert_url: google.com + cert_port: 443 + keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts + keystore_pass: changeit + state: present + +- name: Remove certificate with given alias from a keystore + community.general.java_cert: + cert_url: google.com + keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts + keystore_pass: changeit + executable: /usr/lib/jvm/jre7/bin/keytool + state: absent + +- name: Import trusted CA from SSL certificate + community.general.java_cert: + cert_path: /opt/certs/rootca.crt + keystore_path: /tmp/cacerts + keystore_pass: changeit + keystore_create: true + state: present + cert_alias: LE_RootCA + trust_cacert: true + +- name: Import trusted CA from the SSL certificate stored in the cert_content variable + community.general.java_cert: + cert_content: | + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- + keystore_path: /tmp/cacerts + keystore_pass: changeit + keystore_create: true + state: present + cert_alias: LE_RootCA + trust_cacert: true + +- name: Import SSL certificate from google.com to a keystore, create it if it doesn't exist + community.general.java_cert: + cert_url: google.com + keystore_path: /tmp/cacerts + keystore_pass: changeit + keystore_create: true + state: present + +- name: Import a pkcs12 keystore with a specified alias, create it if it doesn't exist + community.general.java_cert: + pkcs12_path: "/tmp/importkeystore.p12" + cert_alias: default + keystore_path: /opt/wildfly/standalone/configuration/defaultkeystore.jks + keystore_pass: changeit + keystore_create: true + state: present + +- name: Import SSL certificate to JCEKS keystore + community.general.java_cert: + pkcs12_path: "/tmp/importkeystore.p12" + pkcs12_alias: default + pkcs12_password: somepass + cert_alias: default + keystore_path: /opt/someapp/security/keystore.jceks + keystore_type: "JCEKS" + keystore_pass: changeit + keystore_create: true + state: present +""" + +RETURN = r""" +cmd: + description: Executed command to get action done. + returned: success + type: str + sample: "keytool -importcert -noprompt -keystore" +""" + +import os +import tempfile +import re +from urllib.parse import urlparse +from urllib.request import getproxies + +# import module snippets +from ansible.module_utils.basic import AnsibleModule + + +def _get_keystore_type_keytool_parameters(keystore_type): + ''' Check that custom keystore is presented in parameters ''' + if keystore_type: + return ["-storetype", keystore_type] + return [] + + +def _check_cert_present(module, executable, keystore_path, keystore_pass, alias, keystore_type): + ''' Check if certificate with alias is present in keystore + located at keystore_path ''' + test_cmd = [ + executable, + "-list", + "-keystore", + keystore_path, + "-alias", + alias, + "-rfc" + ] + test_cmd += _get_keystore_type_keytool_parameters(keystore_type) + + (check_rc, stdout, dummy) = module.run_command(test_cmd, data=keystore_pass, check_rc=False) + if check_rc == 0: + return (True, stdout) + return (False, '') + + +def _get_certificate_from_url(module, executable, url, port, pem_certificate_output): + remote_cert_pem_chain = _download_cert_url(module, executable, url, port) + with open(pem_certificate_output, 'w') as f: + f.write(remote_cert_pem_chain) + + +def _get_first_certificate_from_x509_file(module, pem_certificate_file, pem_certificate_output, openssl_bin): + """ Read a X509 certificate chain file and output the first certificate in the list """ + extract_cmd = [ + openssl_bin, + "x509", + "-in", + pem_certificate_file, + "-out", + pem_certificate_output + ] + (extract_rc, dummy, extract_stderr) = module.run_command(extract_cmd, check_rc=False) + + if extract_rc != 0: + # trying der encoded file + extract_cmd += ["-inform", "der"] + (extract_rc, dummy, extract_stderr) = module.run_command(extract_cmd, check_rc=False) + + if extract_rc != 0: + # this time it is a real failure + module.fail_json(msg="Internal module failure, cannot extract certificate, error: %s" % extract_stderr, + rc=extract_rc, cmd=extract_cmd) + + return extract_rc + + +def _get_digest_from_x509_file(module, pem_certificate_file, openssl_bin): + """ Read a X509 certificate file and output sha256 digest using openssl """ + # cleanup file before to compare + (dummy, tmp_certificate) = tempfile.mkstemp() + module.add_cleanup_file(tmp_certificate) + _get_first_certificate_from_x509_file(module, pem_certificate_file, tmp_certificate, openssl_bin) + dgst_cmd = [ + openssl_bin, + "dgst", + "-r", + "-sha256", + tmp_certificate + ] + (dgst_rc, dgst_stdout, dgst_stderr) = module.run_command(dgst_cmd, check_rc=False) + + if dgst_rc != 0: + module.fail_json(msg="Internal module failure, cannot compute digest for certificate, error: %s" % dgst_stderr, + rc=dgst_rc, cmd=dgst_cmd) + + return dgst_stdout.split(' ')[0] + + +def _export_public_cert_from_pkcs12(module, executable, pkcs_file, alias, password, dest): + """ Runs keytools to extract the public cert from a PKCS12 archive and write it to a file. """ + export_cmd = [ + executable, + "-list", + "-noprompt", + "-keystore", + pkcs_file, + "-storetype", + "pkcs12", + "-rfc" + ] + # Append optional alias + if alias: + export_cmd.extend(["-alias", alias]) + (export_rc, export_stdout, export_err) = module.run_command(export_cmd, data=password, check_rc=False) + + if export_rc != 0: + module.fail_json(msg="Internal module failure, cannot extract public certificate from PKCS12, message: %s" % export_stdout, + stderr=export_err, + rc=export_rc) + + with open(dest, 'w') as f: + f.write(export_stdout) + + +def get_proxy_settings(scheme='https'): + """ Returns a tuple containing (proxy_host, proxy_port). (False, False) if no proxy is found """ + proxy_url = getproxies().get(scheme, '') + if not proxy_url: + return (False, False) + else: + parsed_url = urlparse(proxy_url) + if parsed_url.scheme: + (proxy_host, proxy_port) = parsed_url.netloc.split(':') + else: + (proxy_host, proxy_port) = parsed_url.path.split(':') + return (proxy_host, proxy_port) + + +def build_proxy_options(): + """ Returns list of valid proxy options for keytool """ + (proxy_host, proxy_port) = get_proxy_settings() + no_proxy = os.getenv("no_proxy") + + proxy_opts = [] + if proxy_host: + proxy_opts.extend(["-J-Dhttps.proxyHost=%s" % proxy_host, "-J-Dhttps.proxyPort=%s" % proxy_port]) + + if no_proxy is not None: + # For Java's nonProxyHosts property, items are separated by '|', + # and patterns have to start with "*". + non_proxy_hosts = no_proxy.replace(',', '|') + non_proxy_hosts = re.sub(r'(^|\|)\.', r'\1*.', non_proxy_hosts) + + # The property name is http.nonProxyHosts, there is no + # separate setting for HTTPS. + proxy_opts.extend(["-J-Dhttp.nonProxyHosts=%s" % non_proxy_hosts]) + return proxy_opts + + +def _update_permissions(module, keystore_path): + """ Updates keystore file attributes as necessary """ + file_args = module.load_file_common_arguments(module.params, path=keystore_path) + return module.set_fs_attributes_if_different(file_args, False) + + +def _download_cert_url(module, executable, url, port): + """ Fetches the certificate from the remote URL using `keytool -printcert...` + The PEM formatted string is returned """ + proxy_opts = build_proxy_options() + fetch_cmd = [executable, "-printcert", "-rfc", "-sslserver"] + proxy_opts + ["%s:%d" % (url, port)] + + # Fetch SSL certificate from remote host. + (fetch_rc, fetch_out, fetch_err) = module.run_command(fetch_cmd, check_rc=False) + + if fetch_rc != 0: + module.fail_json(msg="Internal module failure, cannot download certificate, error: %s" % fetch_err, + rc=fetch_rc, cmd=fetch_cmd) + + return fetch_out + + +def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias, + keystore_path, keystore_pass, keystore_alias, keystore_type): + ''' Import pkcs12 from path into keystore located on + keystore_path as alias ''' + optional_aliases = { + "-destalias": keystore_alias, + "-srcalias": pkcs12_alias + } + import_cmd = [ + executable, + "-importkeystore", + "-noprompt", + "-srcstoretype", + "pkcs12", + "-srckeystore", + pkcs12_path, + "-destkeystore", + keystore_path, + ] + # Append optional aliases + for flag, value in optional_aliases.items(): + if value: + import_cmd.extend([flag, value]) + + import_cmd += _get_keystore_type_keytool_parameters(keystore_type) + + secret_data = "%s\n%s" % (keystore_pass, pkcs12_pass) + # Password of a new keystore must be entered twice, for confirmation + if not os.path.exists(keystore_path): + secret_data = "%s\n%s" % (keystore_pass, secret_data) + + # Use local certificate from local path and import it to a java keystore + (import_rc, import_out, import_err) = module.run_command(import_cmd, data=secret_data, check_rc=False) + diff = {'before': '\n', 'after': '%s\n' % keystore_alias} + + if import_rc != 0 or not os.path.exists(keystore_path): + module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, error=import_err) + + return dict(changed=True, msg=import_out, + rc=import_rc, cmd=import_cmd, stdout=import_out, + error=import_err, diff=diff) + + +def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert): + ''' Import certificate from path into keystore located on + keystore_path as alias ''' + import_cmd = [ + executable, + "-importcert", + "-noprompt", + "-keystore", + keystore_path, + "-file", + path, + "-alias", + alias + ] + import_cmd += _get_keystore_type_keytool_parameters(keystore_type) + + if trust_cacert: + import_cmd.extend(["-trustcacerts"]) + + # Use local certificate from local path and import it to a java keystore + (import_rc, import_out, import_err) = module.run_command(import_cmd, + data="%s\n%s" % (keystore_pass, keystore_pass), + check_rc=False) + diff = {'before': '\n', 'after': '%s\n' % alias} + + if import_rc != 0: + module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, error=import_err) + + return dict(changed=True, msg=import_out, + rc=import_rc, cmd=import_cmd, stdout=import_out, + error=import_err, diff=diff) + + +def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type): + ''' Delete certificate identified with alias from keystore on keystore_path ''' + del_cmd = [ + executable, + "-delete", + "-noprompt", + "-keystore", + keystore_path, + "-alias", + alias + ] + + del_cmd += _get_keystore_type_keytool_parameters(keystore_type) + + # Delete SSL certificate from keystore + (del_rc, del_out, del_err) = module.run_command(del_cmd, data=keystore_pass, check_rc=True) + diff = {'before': '%s\n' % alias, 'after': None} + + if del_rc != 0: + module.fail_json(msg=del_out, rc=del_rc, cmd=del_cmd, error=del_err) + + return dict(changed=True, msg=del_out, rc=del_rc, cmd=del_cmd, + stdout=del_out, error=del_err, diff=diff) + + +def test_keytool(module, executable): + ''' Test if keytool is actually executable or not ''' + module.run_command([executable], check_rc=True) + + +def test_keystore(module, keystore_path): + ''' Check if we can access keystore as file or not ''' + if keystore_path is None: + keystore_path = '' + + if not os.path.exists(keystore_path) and not os.path.isfile(keystore_path): + # Keystore doesn't exist we want to create it + module.fail_json(changed=False, msg="Module require existing keystore at keystore_path '%s'" % keystore_path) + + +def main(): + argument_spec = dict( + cert_url=dict(type='str'), + cert_path=dict(type='path'), + cert_content=dict(type='str'), + pkcs12_path=dict(type='path'), + pkcs12_password=dict(type='str', no_log=True), + pkcs12_alias=dict(type='str'), + cert_alias=dict(type='str'), + cert_port=dict(type='int', default=443), + keystore_path=dict(type='path'), + keystore_pass=dict(type='str', required=True, no_log=True), + trust_cacert=dict(type='bool', default=False), + keystore_create=dict(type='bool', default=False), + keystore_type=dict(type='str'), + executable=dict(type='str', default='keytool'), + state=dict(type='str', default='present', choices=['absent', 'present']), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[['state', 'present', ('cert_path', 'cert_url', 'cert_content', 'pkcs12_path'), True], + ['state', 'absent', ('cert_url', 'cert_alias'), True]], + required_together=[['keystore_path', 'keystore_pass']], + mutually_exclusive=[ + ['cert_url', 'cert_path', 'cert_content', 'pkcs12_path'] + ], + supports_check_mode=True, + add_file_common_args=True, + ) + + url = module.params.get('cert_url') + path = module.params.get('cert_path') + content = module.params.get('cert_content') + port = module.params.get('cert_port') + + pkcs12_path = module.params.get('pkcs12_path') + pkcs12_pass = module.params.get('pkcs12_password', '') + pkcs12_alias = module.params.get('pkcs12_alias', '1') + + cert_alias = module.params.get('cert_alias') or url + trust_cacert = module.params.get('trust_cacert') + + keystore_path = module.params.get('keystore_path') + keystore_pass = module.params.get('keystore_pass') + keystore_create = module.params.get('keystore_create') + keystore_type = module.params.get('keystore_type') + executable = module.params.get('executable') + state = module.params.get('state') + + # openssl dependency resolution + openssl_bin = module.get_bin_path('openssl', True) + + if path and not cert_alias: + module.fail_json(changed=False, + msg="Using local path import from %s requires alias argument." + % keystore_path) + + test_keytool(module, executable) + + if not keystore_create: + test_keystore(module, keystore_path) + + alias_exists, alias_exists_output = _check_cert_present( + module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) + + (dummy, new_certificate) = tempfile.mkstemp() + (dummy, old_certificate) = tempfile.mkstemp() + module.add_cleanup_file(new_certificate) + module.add_cleanup_file(old_certificate) + + result = dict() + + if state == 'absent' and alias_exists: + if module.check_mode: + module.exit_json(changed=True) + + # delete + result = delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) + + # dump certificate to enroll in the keystore on disk and compute digest + if state == 'present': + # The alias exists in the keystore so we must now compare the SHA256 hash of the + # public certificate already in the keystore, and the certificate we are wanting to add + if alias_exists: + with open(old_certificate, "w") as f: + f.write(alias_exists_output) + keystore_cert_digest = _get_digest_from_x509_file(module, old_certificate, openssl_bin) + + else: + keystore_cert_digest = '' + + if pkcs12_path: + # Extracting certificate with openssl + _export_public_cert_from_pkcs12(module, executable, pkcs12_path, pkcs12_alias, pkcs12_pass, new_certificate) + + elif path: + # Extracting the X509 digest is a bit easier. Keytool will print the PEM + # certificate to stdout so we don't need to do any transformations. + new_certificate = path + + elif content: + with open(new_certificate, "w") as f: + f.write(content) + + elif url: + # Getting the X509 digest from a URL is the same as from a path, we just have + # to download the cert first + _get_certificate_from_url(module, executable, url, port, new_certificate) + + new_cert_digest = _get_digest_from_x509_file(module, new_certificate, openssl_bin) + + if keystore_cert_digest != new_cert_digest: + + if module.check_mode: + module.exit_json(changed=True) + + if alias_exists: + # The certificate in the keystore does not match with the one we want to be present + # The existing certificate must first be deleted before we insert the correct one + delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) + + if pkcs12_path: + result = import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias, + keystore_path, keystore_pass, cert_alias, keystore_type) + else: + result = import_cert_path(module, executable, new_certificate, keystore_path, + keystore_pass, cert_alias, keystore_type, trust_cacert) + + if os.path.exists(keystore_path): + changed_permissions = _update_permissions(module, keystore_path) + result['changed'] = result.get('changed', False) or changed_permissions + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/java_keystore.py b/plugins/modules/java_keystore.py deleted file mode 120000 index 3934e70bda..0000000000 --- a/plugins/modules/java_keystore.py +++ /dev/null @@ -1 +0,0 @@ -./system/java_keystore.py \ No newline at end of file diff --git a/plugins/modules/java_keystore.py b/plugins/modules/java_keystore.py new file mode 100644 index 0000000000..6cb063e883 --- /dev/null +++ b/plugins/modules/java_keystore.py @@ -0,0 +1,550 @@ +#!/usr/bin/python + +# Copyright (c) 2021, quidame +# Copyright (c) 2016, Guillaume Grossetie +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: java_keystore +short_description: Create a Java keystore in JKS format +description: + - Bundle a x509 certificate and its private key into a Java Keystore in JKS format. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the certificate in the keystore. + - If the provided name does not exist in the keystore, the module re-creates the keystore. This behavior changed in + community.general 3.0.0, before that the module would fail when the name did not match. + type: str + required: true + certificate: + description: + - Content of the certificate used to create the keystore. + - If the fingerprint of the provided certificate does not match the fingerprint of the certificate bundled in the keystore, + the keystore is regenerated with the provided certificate. + - Exactly one of O(certificate) or O(certificate_path) is required. + type: str + certificate_path: + description: + - Location of the certificate used to create the keystore. + - If the fingerprint of the provided certificate does not match the fingerprint of the certificate bundled in the keystore, + the keystore is regenerated with the provided certificate. + - Exactly one of O(certificate) or O(certificate_path) is required. + type: path + version_added: '3.0.0' + private_key: + description: + - Content of the private key used to create the keystore. + - Exactly one of O(private_key) or O(private_key_path) is required. + type: str + private_key_path: + description: + - Location of the private key used to create the keystore. + - Exactly one of O(private_key) or O(private_key_path) is required. + type: path + version_added: '3.0.0' + private_key_passphrase: + description: + - Passphrase used to read the private key, if required. + type: str + version_added: '0.2.0' + password: + description: + - Password that should be used to secure the keystore. + - If the provided password fails to unlock the keystore, the module re-creates the keystore with the new passphrase. + This behavior changed in community.general 3.0.0, before that the module would fail when the password did not match. + type: str + required: true + dest: + description: + - Absolute path of the generated keystore. + type: path + required: true + force: + description: + - Keystore is created even if it already exists. + type: bool + default: false + owner: + description: + - Name of the user that should own jks file. + required: false + group: + description: + - Name of the group that should own jks file. + required: false + mode: + description: + - Mode the file should be. + required: false + ssl_backend: + description: + - Backend for loading private keys and certificates. + type: str + default: openssl + choices: + - openssl + - cryptography + version_added: 3.1.0 + keystore_type: + description: + - Type of the Java keystore. + - When this option is omitted and the keystore does not already exist, the behavior follows C(keytool)'s default store + type which depends on Java version; V(pkcs12) since Java 9 and V(jks) prior (may also be V(pkcs12) if new default + has been backported to this version). + - When this option is omitted and the keystore already exists, the current type is left untouched, unless another option + leads to overwrite the keystore (in that case, this option behaves like for keystore creation). + - When O(keystore_type) is set, the keystore is created with this type if it does not already exist, or is overwritten + to match the given type in case of mismatch. + type: str + choices: + - jks + - pkcs12 + version_added: 3.3.0 +requirements: + - openssl in PATH (when O(ssl_backend=openssl)) + - keytool in PATH + - cryptography >= 3.0 (when O(ssl_backend=cryptography)) +author: + - Guillaume Grossetie (@Mogztter) + - quidame (@quidame) +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +seealso: + - module: community.crypto.openssl_pkcs12 + - module: community.general.java_cert +notes: + - O(certificate) and O(private_key) require that their contents are available on the controller (either inline in a playbook, + or with the P(ansible.builtin.file#lookup) lookup), while O(certificate_path) and O(private_key_path) require that the + files are available on the target host. + - By design, any change of a value of options O(keystore_type), O(name) or O(password), as well as changes of key or certificate + materials causes the existing O(dest) to be overwritten. +""" + +EXAMPLES = r""" +- name: Create a keystore for the given certificate/private key pair (inline) + community.general.java_keystore: + name: example + certificate: | + -----BEGIN CERTIFICATE----- + h19dUZ2co2f... + -----END CERTIFICATE----- + private_key: | + -----BEGIN RSA PRIVATE KEY----- + DBVFTEVDVFJ... + -----END RSA PRIVATE KEY----- + password: changeit + dest: /etc/security/keystore.jks + +- name: Create a keystore for the given certificate/private key pair (with files on controller) + community.general.java_keystore: + name: example + certificate: "{{ lookup('file', '/path/to/certificate.crt') }}" + private_key: "{{ lookup('file', '/path/to/private.key') }}" + password: changeit + dest: /etc/security/keystore.jks + +- name: Create a keystore for the given certificate/private key pair (with files on target host) + community.general.java_keystore: + name: snakeoil + certificate_path: /etc/ssl/certs/ssl-cert-snakeoil.pem + private_key_path: /etc/ssl/private/ssl-cert-snakeoil.key + password: changeit + dest: /etc/security/keystore.jks +""" + +RETURN = r""" +err: + description: Output from stderr of keytool/openssl command after error of given command. + returned: failure + type: str + sample: "Keystore password is too short - must be at least 6 characters\n" + +cmd: + description: Executed command to get action done. + returned: changed and failure + type: str + sample: "/usr/bin/openssl x509 -noout -in /tmp/user/1000/tmp8jd_lh23 -fingerprint -sha256" +""" + + +import os +import re +import tempfile + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_bytes, to_native + +try: + from cryptography.hazmat.primitives.serialization.pkcs12 import serialize_key_and_certificates + from cryptography.hazmat.primitives.serialization import ( + BestAvailableEncryption, + NoEncryption, + load_pem_private_key, + load_der_private_key, + ) + from cryptography.x509 import ( + load_pem_x509_certificate, + load_der_x509_certificate, + ) + from cryptography.hazmat.primitives import hashes + from cryptography.exceptions import UnsupportedAlgorithm + from cryptography.hazmat.backends.openssl import backend + HAS_CRYPTOGRAPHY_PKCS12 = True +except ImportError: + HAS_CRYPTOGRAPHY_PKCS12 = False + + +class JavaKeystore: + def __init__(self, module): + self.module = module + self.result = dict() + + self.keytool_bin = module.get_bin_path('keytool', True) + + self.certificate = module.params['certificate'] + self.keypass = module.params['private_key_passphrase'] + self.keystore_path = module.params['dest'] + self.name = module.params['name'] + self.password = module.params['password'] + self.private_key = module.params['private_key'] + self.ssl_backend = module.params['ssl_backend'] + self.keystore_type = module.params['keystore_type'] + + if self.ssl_backend == 'openssl': + self.openssl_bin = module.get_bin_path('openssl', True) + else: + if not HAS_CRYPTOGRAPHY_PKCS12: + self.module.fail_json(msg=missing_required_lib('cryptography >= 3.0')) + + if module.params['certificate_path'] is None: + self.certificate_path = create_file(self.certificate) + self.module.add_cleanup_file(self.certificate_path) + else: + self.certificate_path = module.params['certificate_path'] + + if module.params['private_key_path'] is None: + self.private_key_path = create_file(self.private_key) + self.module.add_cleanup_file(self.private_key_path) + else: + self.private_key_path = module.params['private_key_path'] + + def update_permissions(self): + file_args = self.module.load_file_common_arguments(self.module.params, path=self.keystore_path) + return self.module.set_fs_attributes_if_different(file_args, False) + + def read_certificate_fingerprint(self, cert_format='PEM'): + if self.ssl_backend == 'cryptography': + if cert_format == 'PEM': + cert_loader = load_pem_x509_certificate + else: + cert_loader = load_der_x509_certificate + + try: + with open(self.certificate_path, 'rb') as cert_file: + cert = cert_loader( + cert_file.read(), + backend=backend + ) + except (OSError, ValueError) as e: + self.module.fail_json(msg="Unable to read the provided certificate: %s" % to_native(e)) + + fp = cert.fingerprint(hashes.SHA256()).hex().upper() + fingerprint = ':'.join([fp[i:i + 2] for i in range(0, len(fp), 2)]) + else: + current_certificate_fingerprint_cmd = [ + self.openssl_bin, "x509", "-noout", "-in", self.certificate_path, "-fingerprint", "-sha256" + ] + (rc, current_certificate_fingerprint_out, current_certificate_fingerprint_err) = self.module.run_command( + current_certificate_fingerprint_cmd, + environ_update=None, + check_rc=False + ) + if rc != 0: + return self.module.fail_json( + msg=current_certificate_fingerprint_out, + err=current_certificate_fingerprint_err, + cmd=current_certificate_fingerprint_cmd, + rc=rc + ) + + current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out) + if not current_certificate_match: + return self.module.fail_json( + msg="Unable to find the current certificate fingerprint in %s" % ( + current_certificate_fingerprint_out + ), + cmd=current_certificate_fingerprint_cmd, + rc=rc + ) + + fingerprint = current_certificate_match.group(1) + return fingerprint + + def read_stored_certificate_fingerprint(self): + stored_certificate_fingerprint_cmd = [ + self.keytool_bin, "-list", "-alias", self.name, + "-keystore", self.keystore_path, "-v" + ] + (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = self.module.run_command( + stored_certificate_fingerprint_cmd, data=self.password, check_rc=False) + if rc != 0: + if "keytool error: java.lang.Exception: Alias <%s> does not exist" % self.name \ + in stored_certificate_fingerprint_out: + return "alias mismatch" + if re.match( + r'keytool error: java\.io\.IOException: ' + + '[Kk]eystore( was tampered with, or)? password was incorrect', + stored_certificate_fingerprint_out + ): + return "password mismatch" + return self.module.fail_json( + msg=stored_certificate_fingerprint_out, + err=stored_certificate_fingerprint_err, + cmd=stored_certificate_fingerprint_cmd, + rc=rc + ) + + if self.keystore_type not in (None, self.current_type()): + return "keystore type mismatch" + + stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out) + if not stored_certificate_match: + return self.module.fail_json( + msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out, + cmd=stored_certificate_fingerprint_cmd, + rc=rc + ) + + return stored_certificate_match.group(1) + + def current_type(self): + magic_bytes = b'\xfe\xed\xfe\xed' + with open(self.keystore_path, 'rb') as fd: + header = fd.read(4) + if header == magic_bytes: + return 'jks' + return 'pkcs12' + + def cert_changed(self): + current_certificate_fingerprint = self.read_certificate_fingerprint() + stored_certificate_fingerprint = self.read_stored_certificate_fingerprint() + return current_certificate_fingerprint != stored_certificate_fingerprint + + def cryptography_create_pkcs12_bundle(self, keystore_p12_path, key_format='PEM', cert_format='PEM'): + if key_format == 'PEM': + key_loader = load_pem_private_key + else: + key_loader = load_der_private_key + + if cert_format == 'PEM': + cert_loader = load_pem_x509_certificate + else: + cert_loader = load_der_x509_certificate + + try: + with open(self.private_key_path, 'rb') as key_file: + private_key = key_loader( + key_file.read(), + password=to_bytes(self.keypass), + backend=backend + ) + except TypeError: + # Re-attempt with no password to match existing behavior + try: + with open(self.private_key_path, 'rb') as key_file: + private_key = key_loader( + key_file.read(), + password=None, + backend=backend + ) + except (OSError, TypeError, ValueError, UnsupportedAlgorithm) as e: + self.module.fail_json( + msg="The following error occurred while loading the provided private_key: %s" % to_native(e) + ) + except (OSError, ValueError, UnsupportedAlgorithm) as e: + self.module.fail_json( + msg="The following error occurred while loading the provided private_key: %s" % to_native(e) + ) + try: + with open(self.certificate_path, 'rb') as cert_file: + cert = cert_loader( + cert_file.read(), + backend=backend + ) + except (OSError, ValueError, UnsupportedAlgorithm) as e: + self.module.fail_json( + msg="The following error occurred while loading the provided certificate: %s" % to_native(e) + ) + + if self.password: + encryption = BestAvailableEncryption(to_bytes(self.password)) + else: + encryption = NoEncryption() + + pkcs12_bundle = serialize_key_and_certificates( + name=to_bytes(self.name), + key=private_key, + cert=cert, + cas=None, + encryption_algorithm=encryption + ) + + with open(keystore_p12_path, 'wb') as p12_file: + p12_file.write(pkcs12_bundle) + + self.result.update(msg="PKCS#12 bundle created by cryptography backend") + + def openssl_create_pkcs12_bundle(self, keystore_p12_path): + export_p12_cmd = [self.openssl_bin, "pkcs12", "-export", "-name", self.name, "-in", self.certificate_path, + "-inkey", self.private_key_path, "-out", keystore_p12_path, "-passout", "stdin"] + + # when keypass is provided, add -passin + cmd_stdin = "" + if self.keypass: + export_p12_cmd.append("-passin") + export_p12_cmd.append("stdin") + cmd_stdin = "%s\n" % self.keypass + cmd_stdin += "%s\n%s" % (self.password, self.password) + + (rc, export_p12_out, export_p12_err) = self.module.run_command( + export_p12_cmd, data=cmd_stdin, environ_update=None, check_rc=False + ) + + self.result = dict(msg=export_p12_out, cmd=export_p12_cmd, rc=rc) + if rc != 0: + self.result['err'] = export_p12_err + self.module.fail_json(**self.result) + + def create(self): + """Create the keystore, or replace it with a rollback in case of + keytool failure. + """ + if self.module.check_mode: + self.result['changed'] = True + return self.result + + keystore_p12_path = create_path() + self.module.add_cleanup_file(keystore_p12_path) + + if self.ssl_backend == 'cryptography': + self.cryptography_create_pkcs12_bundle(keystore_p12_path) + else: + self.openssl_create_pkcs12_bundle(keystore_p12_path) + + if self.keystore_type == 'pkcs12': + # Preserve properties of the destination file, if any. + self.module.atomic_move(os.path.abspath(keystore_p12_path), os.path.abspath(self.keystore_path)) + self.update_permissions() + self.result['changed'] = True + return self.result + + import_keystore_cmd = [self.keytool_bin, "-importkeystore", + "-destkeystore", self.keystore_path, + "-srckeystore", keystore_p12_path, + "-srcstoretype", "pkcs12", + "-alias", self.name, + "-noprompt"] + + if self.keystore_type == 'jks': + keytool_help = self.module.run_command([self.keytool_bin, '-importkeystore', '-help']) + if '-deststoretype' in keytool_help[1] + keytool_help[2]: + import_keystore_cmd.insert(4, "-deststoretype") + import_keystore_cmd.insert(5, self.keystore_type) + + keystore_backup = None + if self.exists(): + keystore_backup = self.keystore_path + '.tmpbak' + # Preserve properties of the source file + self.module.preserved_copy(self.keystore_path, keystore_backup) + os.remove(self.keystore_path) + + (rc, import_keystore_out, import_keystore_err) = self.module.run_command( + import_keystore_cmd, data='%s\n%s\n%s' % (self.password, self.password, self.password), check_rc=False + ) + + self.result = dict(msg=import_keystore_out, cmd=import_keystore_cmd, rc=rc) + + # keytool may return 0 whereas the keystore has not been created. + if rc != 0 or not self.exists(): + if keystore_backup is not None: + self.module.preserved_copy(keystore_backup, self.keystore_path) + os.remove(keystore_backup) + self.result['err'] = import_keystore_err + return self.module.fail_json(**self.result) + + self.update_permissions() + if keystore_backup is not None: + os.remove(keystore_backup) + self.result['changed'] = True + return self.result + + def exists(self): + return os.path.exists(self.keystore_path) + + +# Utility functions +def create_path(): + dummy, tmpfile = tempfile.mkstemp() + os.remove(tmpfile) + return tmpfile + + +def create_file(content): + tmpfd, tmpfile = tempfile.mkstemp() + with os.fdopen(tmpfd, 'w') as f: + f.write(content) + return tmpfile + + +def main(): + choose_between = (['certificate', 'certificate_path'], + ['private_key', 'private_key_path']) + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + dest=dict(type='path', required=True), + certificate=dict(type='str', no_log=True), + certificate_path=dict(type='path'), + private_key=dict(type='str', no_log=True), + private_key_path=dict(type='path', no_log=False), + private_key_passphrase=dict(type='str', no_log=True), + password=dict(type='str', required=True, no_log=True), + ssl_backend=dict(type='str', default='openssl', choices=['openssl', 'cryptography']), + keystore_type=dict(type='str', choices=['jks', 'pkcs12']), + force=dict(type='bool', default=False), + ), + required_one_of=choose_between, + mutually_exclusive=choose_between, + supports_check_mode=True, + add_file_common_args=True, + ) + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + + result = dict() + jks = JavaKeystore(module) + + if jks.exists(): + if module.params['force'] or jks.cert_changed(): + result = jks.create() + else: + result['changed'] = jks.update_permissions() + else: + result = jks.create() + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/jboss.py b/plugins/modules/jboss.py deleted file mode 120000 index efcf36b44c..0000000000 --- a/plugins/modules/jboss.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/jboss.py \ No newline at end of file diff --git a/plugins/modules/jboss.py b/plugins/modules/jboss.py new file mode 100644 index 0000000000..c26e0188a1 --- /dev/null +++ b/plugins/modules/jboss.py @@ -0,0 +1,183 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Jeroen Hoekx +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: jboss +short_description: Deploy applications to JBoss +description: + - Deploy applications to JBoss standalone using the filesystem. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + deployment: + required: true + description: + - The name of the deployment. + type: str + src: + description: + - The remote path of the application ear or war to deploy. + - Required when O(state=present). + - Ignored when O(state=absent). + type: path + deploy_path: + default: /var/lib/jbossas/standalone/deployments + description: + - The location in the filesystem where the deployment scanner listens. + type: path + state: + choices: [present, absent] + default: "present" + description: + - Whether the application should be deployed or undeployed. + type: str +notes: + - The JBoss standalone deployment-scanner has to be enabled in C(standalone.xml). + - The module can wait until O(deployment) file is deployed/undeployed by deployment-scanner. Duration of waiting time depends + on scan-interval parameter from C(standalone.xml). + - Ensure no identically named application is deployed through the JBoss CLI. +seealso: + - name: WildFly reference + description: Complete reference of the WildFly documentation. + link: https://docs.wildfly.org +author: + - Jeroen Hoekx (@jhoekx) +""" + +EXAMPLES = r""" +- name: Deploy a hello world application to the default deploy_path + community.general.jboss: + src: /tmp/hello-1.0-SNAPSHOT.war + deployment: hello.war + state: present + +- name: Update the hello world application to the non-default deploy_path + community.general.jboss: + src: /tmp/hello-1.1-SNAPSHOT.war + deploy_path: /opt/wildfly/deployment + deployment: hello.war + state: present + +- name: Undeploy the hello world application from the default deploy_path + community.general.jboss: + deployment: hello.war + state: absent +""" + +RETURN = r""" # """ + +import os +import time +from ansible.module_utils.basic import AnsibleModule + + +DEFAULT_DEPLOY_PATH = '/var/lib/jbossas/standalone/deployments' + + +def is_deployed(deploy_path, deployment): + return os.path.exists(os.path.join(deploy_path, "%s.deployed" % deployment)) + + +def is_undeployed(deploy_path, deployment): + return os.path.exists(os.path.join(deploy_path, "%s.undeployed" % deployment)) + + +def is_failed(deploy_path, deployment): + return os.path.exists(os.path.join(deploy_path, "%s.failed" % deployment)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + src=dict(type='path'), + deployment=dict(type='str', required=True), + deploy_path=dict(type='path', default=DEFAULT_DEPLOY_PATH), + state=dict(type='str', choices=['absent', 'present'], default='present'), + ), + required_if=[('state', 'present', ('src',))], + supports_check_mode=True + ) + + result = dict(changed=False) + + src = module.params['src'] + deployment = module.params['deployment'] + deploy_path = module.params['deploy_path'] + state = module.params['state'] + + if not os.path.exists(deploy_path): + module.fail_json(msg="deploy_path does not exist.") + + if state == 'absent' and src: + module.warn('Parameter src is ignored when state=absent') + elif state == 'present' and not os.path.exists(src): + module.fail_json(msg='Source file %s does not exist.' % src) + + deployed = is_deployed(deploy_path, deployment) + + # === when check_mode === + if module.check_mode: + if state == 'present': + if not deployed: + result['changed'] = True + + elif deployed: + if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)): + result['changed'] = True + + elif state == 'absent' and deployed: + result['changed'] = True + + module.exit_json(**result) + # ======================= + + if state == 'present' and not deployed: + if is_failed(deploy_path, deployment): + # Clean up old failed deployment + os.remove(os.path.join(deploy_path, "%s.failed" % deployment)) + + module.preserved_copy(src, os.path.join(deploy_path, deployment)) + while not deployed: + deployed = is_deployed(deploy_path, deployment) + if is_failed(deploy_path, deployment): + module.fail_json(msg='Deploying %s failed.' % deployment) + time.sleep(1) + result['changed'] = True + + if state == 'present' and deployed: + if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)): + os.remove(os.path.join(deploy_path, "%s.deployed" % deployment)) + module.preserved_copy(src, os.path.join(deploy_path, deployment)) + deployed = False + while not deployed: + deployed = is_deployed(deploy_path, deployment) + if is_failed(deploy_path, deployment): + module.fail_json(msg='Deploying %s failed.' % deployment) + time.sleep(1) + result['changed'] = True + + if state == 'absent' and deployed: + os.remove(os.path.join(deploy_path, "%s.deployed" % deployment)) + while deployed: + deployed = not is_undeployed(deploy_path, deployment) + if is_failed(deploy_path, deployment): + module.fail_json(msg='Undeploying %s failed.' % deployment) + time.sleep(1) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/jenkins_build.py b/plugins/modules/jenkins_build.py deleted file mode 120000 index 13e660e7b0..0000000000 --- a/plugins/modules/jenkins_build.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/jenkins_build.py \ No newline at end of file diff --git a/plugins/modules/jenkins_build.py b/plugins/modules/jenkins_build.py new file mode 100644 index 0000000000..a088ce7dae --- /dev/null +++ b/plugins/modules/jenkins_build.py @@ -0,0 +1,328 @@ +#!/usr/bin/python +# +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: jenkins_build +short_description: Manage jenkins builds +version_added: 2.2.0 +description: + - Manage Jenkins builds with Jenkins REST API. +requirements: + - "python-jenkins >= 0.4.12" +author: + - Brett Milford (@brettmilford) + - Tong He (@unnecessary-username) + - Juan Casanova (@juanmcasanova) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + args: + description: + - A list of parameters to pass to the build. + type: dict + name: + description: + - Name of the Jenkins job to build. + required: true + type: str + build_number: + description: + - An integer which specifies a build of a job. Is required to remove a build from the queue. + type: int + password: + description: + - Password to authenticate with the Jenkins server. + type: str + state: + description: + - Attribute that specifies if the build is to be created, deleted or stopped. + - The V(stopped) state has been added in community.general 3.3.0. + default: present + choices: ['present', 'absent', 'stopped'] + type: str + token: + description: + - API token used to authenticate with the Jenkins server. + type: str + url: + description: + - URL of the Jenkins server. + default: http://localhost:8080 + type: str + user: + description: + - User to authenticate with the Jenkins server. + type: str + detach: + description: + - Enable detached mode to not wait for the build end. + default: false + type: bool + version_added: 7.4.0 + time_between_checks: + description: + - Time in seconds to wait between requests to the Jenkins server. + - This times must be higher than the configured quiet time for the job. + default: 10 + type: int + version_added: 7.4.0 +""" + +EXAMPLES = r""" +- name: Create a jenkins build using basic authentication + community.general.jenkins_build: + name: "test-check" + args: + cloud: "test" + availability_zone: "test_az" + state: present + user: admin + password: asdfg + url: http://localhost:8080 + +- name: Stop a running jenkins build anonymously + community.general.jenkins_build: + name: "stop-check" + build_number: 3 + state: stopped + url: http://localhost:8080 + +- name: Trigger Jenkins build in detached mode + community.general.jenkins_build: + name: "detached-build" + state: present + user: admin + token: abcdefghijklmnopqrstuvwxyz123456 + url: http://localhost:8080 + detach: true + time_between_checks: 20 + +- name: Delete a jenkins build using token authentication + community.general.jenkins_build: + name: "delete-experiment" + build_number: 30 + state: absent + user: Jenkins + token: abcdefghijklmnopqrstuvwxyz123456 + url: http://localhost:8080 +""" + +RETURN = r""" +name: + description: Name of the jenkins job. + returned: success + type: str + sample: "test-job" +state: + description: State of the jenkins job. + returned: success + type: str + sample: present +user: + description: User used for authentication. + returned: success + type: str + sample: admin +url: + description: URL to connect to the Jenkins server. + returned: success + type: str + sample: https://jenkins.mydomain.com +build_info: + description: Build info of the jenkins job. + returned: success + type: dict +""" + +import traceback +from time import sleep + +JENKINS_IMP_ERR = None +try: + import jenkins + python_jenkins_installed = True +except ImportError: + JENKINS_IMP_ERR = traceback.format_exc() + python_jenkins_installed = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +class JenkinsBuild: + + def __init__(self, module): + self.module = module + + self.name = module.params.get('name') + self.password = module.params.get('password') + self.args = module.params.get('args') + self.state = module.params.get('state') + self.token = module.params.get('token') + self.user = module.params.get('user') + self.jenkins_url = module.params.get('url') + self.build_number = module.params.get('build_number') + self.detach = module.params.get('detach') + self.time_between_checks = module.params.get('time_between_checks') + self.server = self.get_jenkins_connection() + + self.result = { + 'changed': False, + 'url': self.jenkins_url, + 'name': self.name, + 'user': self.user, + 'state': self.state, + } + + self.EXCL_STATE = "excluded state" + + def get_jenkins_connection(self): + try: + if self.user and self.password: + return jenkins.Jenkins(self.jenkins_url, self.user, self.password) + elif self.user and self.token: + return jenkins.Jenkins(self.jenkins_url, self.user, self.token) + elif self.user and not (self.password or self.token): + return jenkins.Jenkins(self.jenkins_url, self.user) + else: + return jenkins.Jenkins(self.jenkins_url) + except Exception as e: + self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e)) + + def get_next_build(self): + try: + build_number = self.server.get_job_info(self.name)['nextBuildNumber'] + except Exception as e: + self.module.fail_json(msg='Unable to get job info from Jenkins server, %s' % to_native(e), + exception=traceback.format_exc()) + + return build_number + + def get_build_status(self): + try: + response = self.server.get_build_info(self.name, self.build_number) + return response + except jenkins.JenkinsException as e: + response = {} + response["result"] = "ABSENT" + return response + except Exception as e: + self.module.fail_json(msg='Unable to fetch build information, %s' % to_native(e), + exception=traceback.format_exc()) + + def present_build(self): + self.build_number = self.get_next_build() + + try: + if self.args is None: + self.server.build_job(self.name) + else: + self.server.build_job(self.name, self.args) + except Exception as e: + self.module.fail_json(msg='Unable to create build for %s: %s' % (self.jenkins_url, to_native(e)), + exception=traceback.format_exc()) + + def stopped_build(self): + build_info = None + try: + build_info = self.server.get_build_info(self.name, self.build_number) + if build_info['building'] is True: + self.server.stop_build(self.name, self.build_number) + except Exception as e: + self.module.fail_json(msg='Unable to stop build for %s: %s' % (self.jenkins_url, to_native(e)), + exception=traceback.format_exc()) + else: + if build_info['building'] is False: + self.module.exit_json(**self.result) + + def absent_build(self): + try: + self.server.delete_build(self.name, self.build_number) + except Exception as e: + self.module.fail_json(msg='Unable to delete build for %s: %s' % (self.jenkins_url, to_native(e)), + exception=traceback.format_exc()) + + def get_result(self): + result = self.result + build_status = self.get_build_status() + + if build_status['result'] is None: + # If detached mode is active mark as success, we wouldn't be able to get here if it didn't exist + if self.detach: + result['changed'] = True + result['build_info'] = build_status + + return result + + sleep(self.time_between_checks) + self.get_result() + else: + if self.state == "stopped" and build_status['result'] == "ABORTED": + result['changed'] = True + result['build_info'] = build_status + elif self.state == "absent" and build_status['result'] == "ABSENT": + result['changed'] = True + result['build_info'] = build_status + elif self.state != "absent" and build_status['result'] == "SUCCESS": + result['changed'] = True + result['build_info'] = build_status + else: + result['failed'] = True + result['build_info'] = build_status + + return result + + +def test_dependencies(module): + if not python_jenkins_installed: + module.fail_json( + msg=missing_required_lib("python-jenkins", + url="https://python-jenkins.readthedocs.io/en/latest/install.html"), + exception=JENKINS_IMP_ERR) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + args=dict(type='dict'), + build_number=dict(type='int'), + name=dict(required=True), + password=dict(no_log=True), + state=dict(choices=['present', 'absent', 'stopped'], default="present"), + token=dict(no_log=True), + url=dict(default="http://localhost:8080"), + user=dict(), + detach=dict(type='bool', default=False), + time_between_checks=dict(type='int', default=10), + ), + mutually_exclusive=[['password', 'token']], + required_if=[['state', 'absent', ['build_number'], True], ['state', 'stopped', ['build_number'], True]], + ) + + test_dependencies(module) + jenkins_build = JenkinsBuild(module) + + if module.params.get('state') == "present": + jenkins_build.present_build() + elif module.params.get('state') == "stopped": + jenkins_build.stopped_build() + else: + jenkins_build.absent_build() + + sleep(jenkins_build.time_between_checks) + result = jenkins_build.get_result() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/jenkins_build_info.py b/plugins/modules/jenkins_build_info.py new file mode 100644 index 0000000000..1e032af423 --- /dev/null +++ b/plugins/modules/jenkins_build_info.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: jenkins_build_info +short_description: Get information about Jenkins builds +version_added: 7.4.0 +description: + - Get information about Jenkins builds with Jenkins REST API. +requirements: + - "python-jenkins >= 0.4.12" +author: + - Juan Casanova (@juanmcasanova) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + name: + description: + - Name of the Jenkins job to which the build belongs. + required: true + type: str + build_number: + description: + - An integer which specifies a build of a job. + - If not specified the last build information is returned. + type: int + password: + description: + - Password to authenticate with the Jenkins server. + type: str + token: + description: + - API token used to authenticate with the Jenkins server. + type: str + url: + description: + - URL of the Jenkins server. + default: http://localhost:8080 + type: str + user: + description: + - User to authenticate with the Jenkins server. + type: str +""" + +EXAMPLES = r""" +- name: Get information about a jenkins build using basic authentication + community.general.jenkins_build_info: + name: "test-check" + build_number: 1 + user: admin + password: asdfg + url: http://localhost:8080 + +- name: Get information about a jenkins build anonymously + community.general.jenkins_build_info: + name: "stop-check" + build_number: 3 + url: http://localhost:8080 + +- name: Get information about a jenkins build using token authentication + community.general.jenkins_build_info: + name: "delete-experiment" + build_number: 30 + user: Jenkins + token: abcdefghijklmnopqrstuvwxyz123456 + url: http://localhost:8080 +""" + +RETURN = r""" +name: + description: Name of the jenkins job. + returned: success + type: str + sample: "test-job" +state: + description: State of the jenkins job. + returned: success + type: str + sample: present +user: + description: User used for authentication. + returned: success + type: str + sample: admin +url: + description: URL to connect to the Jenkins server. + returned: success + type: str + sample: https://jenkins.mydomain.com +build_info: + description: Build info of the jenkins job. + returned: success + type: dict +""" + +import traceback + +JENKINS_IMP_ERR = None +try: + import jenkins + python_jenkins_installed = True +except ImportError: + JENKINS_IMP_ERR = traceback.format_exc() + python_jenkins_installed = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +class JenkinsBuildInfo: + + def __init__(self, module): + self.module = module + + self.name = module.params.get('name') + self.password = module.params.get('password') + self.token = module.params.get('token') + self.user = module.params.get('user') + self.jenkins_url = module.params.get('url') + self.build_number = module.params.get('build_number') + self.server = self.get_jenkins_connection() + + self.result = { + 'changed': False, + 'url': self.jenkins_url, + 'name': self.name, + 'user': self.user, + } + + def get_jenkins_connection(self): + try: + if self.user and self.password: + return jenkins.Jenkins(self.jenkins_url, self.user, self.password) + elif self.user and self.token: + return jenkins.Jenkins(self.jenkins_url, self.user, self.token) + elif self.user and not (self.password or self.token): + return jenkins.Jenkins(self.jenkins_url, self.user) + else: + return jenkins.Jenkins(self.jenkins_url) + except Exception as e: + self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e)) + + def get_build_status(self): + try: + if self.build_number is None: + job_info = self.server.get_job_info(self.name) + self.build_number = job_info['lastBuild']['number'] + + return self.server.get_build_info(self.name, self.build_number) + except jenkins.JenkinsException as e: + response = {} + response["result"] = "ABSENT" + return response + except Exception as e: + self.module.fail_json(msg='Unable to fetch build information, %s' % to_native(e), + exception=traceback.format_exc()) + + def get_result(self): + result = self.result + build_status = self.get_build_status() + + if build_status['result'] == "ABSENT": + result['failed'] = True + result['build_info'] = build_status + + return result + + +def test_dependencies(module): + if not python_jenkins_installed: + module.fail_json( + msg=missing_required_lib("python-jenkins", + url="https://python-jenkins.readthedocs.io/en/latest/install.html"), + exception=JENKINS_IMP_ERR) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + build_number=dict(type='int'), + name=dict(required=True), + password=dict(no_log=True), + token=dict(no_log=True), + url=dict(default="http://localhost:8080"), + user=dict(), + ), + mutually_exclusive=[['password', 'token']], + supports_check_mode=True, + ) + + test_dependencies(module) + jenkins_build_info = JenkinsBuildInfo(module) + + result = jenkins_build_info.get_result() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/jenkins_credential.py b/plugins/modules/jenkins_credential.py new file mode 100644 index 0000000000..b40c3546ea --- /dev/null +++ b/plugins/modules/jenkins_credential.py @@ -0,0 +1,861 @@ +#!/usr/bin/python +# +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: jenkins_credential +short_description: Manage Jenkins credentials and domains through API +version_added: 11.1.0 +description: + - This module allows managing Jenkins credentials and domain scopes through the Jenkins HTTP API. + - Create, update, and delete different credential types such as C(username/password), C(secret text), C(SSH key), C(certificates), + C(GitHub App), and domains. + - For scoped domains (O(type=scope)), it supports restrictions based on V(hostname), V(hostname:port), V(path), and V(scheme). +requirements: + - urllib3 >= 1.26.0 +author: + - Youssef Ali (@YoussefKhalidAli) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + id: + description: + - The ID of the Jenkins credential or domain. + type: str + type: + description: + - Type of the credential or action. + choices: + - user_and_pass + - file + - text + - github_app + - ssh_key + - certificate + - scope + - token + type: str + state: + description: + - The state of the credential. + choices: + - present + - absent + default: present + type: str + scope: + description: + - Jenkins credential domain scope. + - Deleting a domain scope deletes all credentials within it. + type: str + default: '_' + force: + description: + - Force update if the credential already exists, used with O(state=present). + - If set to V(true), it deletes the existing credential before creating a new one. + - Always returns RV(ignore:changed=true). + type: bool + default: false + url: + description: + - Jenkins server URL. + type: str + default: http://localhost:8080 + jenkins_user: + description: + - Jenkins user for authentication. + required: true + type: str + jenkins_password: + description: + - Jenkins password for token creation. Required if O(type=token). + type: str + token: + description: + - Jenkins API token. Required unless O(type=token). + type: str + description: + description: + - Description of the credential or domain. + default: '' + type: str + location: + description: + - Location of the credential. Either V(system) or V(folder). + - If O(location=folder) then O(url) must be set to V(/job/). + choices: + - system + - folder + default: 'system' + type: str + name: + description: + - Name of the token to generate. Required if O(type=token). + - When generating a new token, do not pass O(id). It is generated automatically. + - Creating two tokens with the same name generates two distinct tokens with different RV(token_uuid) values. + - Replacing a token with another one of the same name requires deleting the original first using O(force=True). + type: str + username: + description: + - Username for credentials types that require it (for example O(type=ssh_key) or O(type=user_and_pass)). + type: str + password: + description: + - Password for credentials types that require it (for example O(type=user_and_passs) or O(type=certificate)). + type: str + secret: + description: + - Secret text (used when O(type=text)). + type: str + appID: + description: + - GitHub App ID. + type: str + api_uri: + description: + - Link to Github API. + default: 'https://api.github.com' + type: str + owner: + description: + - GitHub App owner. + type: str + file_path: + description: + - File path to secret file (for example O(type=file) or O(type=certificate)). + - For O(type=certificate), this can be a V(.p12) or V(.pem) file. + type: path + private_key_path: + description: + - Path to private key file for PEM certificates or GitHub Apps. + type: path + passphrase: + description: + - SSH passphrase if needed. + type: str + inc_hostname: + description: + - List of hostnames to include in scope. + type: list + elements: str + exc_hostname: + description: + - List of hostnames to exclude from scope. + - If a hostname appears in both this list and O(inc_hostname), the hostname is excluded. + type: list + elements: str + inc_hostname_port: + description: + - List of V(host:port) to include in scope. + type: list + elements: str + exc_hostname_port: + description: + - List of host:port to exclude from scope. + - If a hostname and port appears in both this list and O(inc_hostname_port), it is excluded. + type: list + elements: str + inc_path: + description: + - List of URL paths to include when matching credentials to domains. + - 'B(Matching is hierarchical): subpaths of excluded paths are also excluded, even if explicitly included.' + type: list + elements: str + exc_path: + description: + - List of URL paths to exclude. + - If a path is also matched by O(exc_path), it is excluded. + - If you exclude a subpath of a path previously included, that subpath alone is excluded. + type: list + elements: str + schemes: + description: + - List of schemes (for example V(http) or V(https)) to match. + type: list + elements: str +""" + +EXAMPLES = r""" +- name: Generate token + community.general.jenkins_credential: + id: "test-token" + jenkins_user: "admin" + jenkins_password: "password" + type: "token" + register: token_result + +- name: Add CUSTOM scope credential + community.general.jenkins_credential: + id: "CUSTOM" + type: "scope" + jenkins_user: "admin" + token: "{{ token }}" + description: "Custom scope credential" + inc_path: + - "include/path" + - "include/path2" + exc_path: + - "exclude/path" + - "exclude/path2" + inc_hostname: + - "included-hostname" + - "included-hostname2" + exc_hostname: + - "excluded-hostname" + - "excluded-hostname2" + schemes: + - "http" + - "https" + inc_hostname_port: + - "included-hostname:7000" + - "included-hostname2:7000" + exc_hostname_port: + - "excluded-hostname:7000" + - "excluded-hostname2:7000" + +- name: Add user_and_pass credential + community.general.jenkins_credential: + id: "userpass-id" + type: "user_and_pass" + jenkins_user: "admin" + token: "{{ token }}" + description: "User and password credential" + username: "user1" + password: "pass1" + +- name: Add file credential to custom scope + community.general.jenkins_credential: + id: "file-id" + type: "file" + jenkins_user: "admin" + token: "{{ token }}" + scope: "CUSTOM" + description: "File credential" + file_path: "../vars/my-secret.pem" + +- name: Add text credential to folder + community.general.jenkins_credential: + id: "text-id" + type: "text" + jenkins_user: "admin" + token: "{{ token }}" + description: "Text credential" + secret: "mysecrettext" + location: "folder" + url: "http://localhost:8080/job/test" + +- name: Add githubApp credential + community.general.jenkins_credential: + id: "githubapp-id" + type: "github_app" + jenkins_user: "admin" + token: "{{ token }}" + description: "GitHub app credential" + appID: "12345" + file_path: "../vars/github.pem" + owner: "github_owner" + +- name: Add sshKey credential + community.general.jenkins_credential: + id: "sshkey-id" + type: "ssh_key" + jenkins_user: "admin" + token: "{{ token }}" + description: "SSH key credential" + username: "sshuser" + file_path: "../vars/ssh_key" + passphrase: 1234 + +- name: Add certificate credential (p12) + community.general.jenkins_credential: + id: "certificate-id" + type: "certificate" + jenkins_user: "admin" + token: "{{ token }}" + description: "Certificate credential" + password: "12345678901234" + file_path: "../vars/certificate.p12" + +- name: Add certificate credential (pem) + community.general.jenkins_credential: + id: "certificate-id-pem" + type: "certificate" + jenkins_user: "admin" + token: "{{ token }}" + description: "Certificate credential (pem)" + file_path: "../vars/cert.pem" + private_key_path: "../vars/private.key" +""" +RETURN = r""" +details: + description: Return more details in case of errors. + type: str + returned: failed +token: + description: + - The generated API token if O(type=token). + - This is needed to authenticate API calls later. + - This should be stored securely, as it is the only time it is returned. + type: str + returned: success +token_uuid: + description: + - The generated ID of the token. + - You pass this value back to the module as O(id) to edit or revoke the token later. + - This should be stored securely, as it is the only time it is returned. + type: str + returned: success +""" + +from urllib.parse import urlencode +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url, basic_auth_header +from ansible_collections.community.general.plugins.module_utils import deps + +import json +import os +import base64 + +with deps.declare("urllib3", reason="urllib3 is required to embed files into requests"): + import urllib3 + + +# Function to validate file paths exist on disk +def validate_file_exist(module, path): + + if path and not os.path.exists(path): + module.fail_json(msg="File not found: {}".format(path)) + + +# Gets the Jenkins crumb for CSRF protection which is required for API calls +def get_jenkins_crumb(module, headers): + type = module.params["type"] + url = module.params["url"] + + if "/job" in url: + url = url.split("/job")[0] + + crumb_url = "{}/crumbIssuer/api/json".format(url) + + response, info = fetch_url(module, crumb_url, headers=headers) + + if info["status"] != 200: + module.fail_json(msg="Failed to fetch Jenkins crumb. Confirm token is real.") + + # Cookie is needed to generate API token + cookie = info.get("set-cookie", "") + session_cookie = cookie.split(";")[0] if cookie else None + + try: + data = response.read() + json_data = json.loads(data) + crumb_request_field = json_data["crumbRequestField"] + crumb = json_data["crumb"] + headers[crumb_request_field] = crumb # Set the crumb in headers + headers["Content-Type"] = ( + "application/x-www-form-urlencoded" # Set Content-Type for form data + ) + if type == "token": + headers["Cookie"] = ( + session_cookie # Set session cookie for token operations + ) + return crumb_request_field, crumb, session_cookie # Return for test purposes + + except Exception: + return None + + +# Function to clean the data sent via API by removing unwanted keys and None values +def clean_data(data): + # Keys to remove (including those with None values) + keys_to_remove = { + "url", + "token", + "jenkins_user", + "jenkins_password", + "file_path", + "private_key_path", + "type", + "state", + "force", + "name", + "scope", + "location", + "api_uri", + } + + # Filter out None values and unwanted keys + cleaned_data = { + key: value + for key, value in data.items() + if value is not None and key not in keys_to_remove + } + + return cleaned_data + + +# Function to check if credentials/domain exists +def target_exists(module, check_domain=False): + url = module.params["url"] + location = module.params["location"] + scope = module.params["scope"] + name = module.params["id"] + user = module.params["jenkins_user"] + token = module.params["token"] + + headers = {"Authorization": basic_auth_header(user, token)} + + if module.params["type"] == "scope" or check_domain: + target_url = "{}/credentials/store/{}/domain/{}/api/json".format( + url, location, scope if check_domain else name + ) + elif module.params["type"] == "token": + return False # Can't check token + else: + target_url = "{}/credentials/store/{}/domain/{}/credential/{}/api/json".format( + url, location, scope, name + ) + + response, info = fetch_url(module, target_url, headers=headers) + status = info.get("status", 0) + + if status == 200: + return True + elif status == 404: + return False + else: + module.fail_json( + msg="Unexpected status code {} when checking {} existence.".format( + status, name + ) + ) + + +# Function to delete the scope or credential provided +def delete_target(module, headers): + user = module.params["jenkins_user"] + type = module.params["type"] + url = module.params["url"] + location = module.params["location"] + id = module.params["id"] + scope = module.params["scope"] + + body = False + + try: + + if type == "token": + delete_url = "{}/user/{}/descriptorByName/jenkins.security.ApiTokenProperty/revoke".format( + url, user + ) + body = urlencode({"tokenUuid": id}) + + elif type == "scope": + delete_url = "{}/credentials/store/{}/domain/{}/doDelete".format( + url, location, id + ) + + else: + delete_url = ( + "{}/credentials/store/{}/domain/{}/credential/{}/doDelete".format( + url, location, scope, id + ) + ) + + response, info = fetch_url( + module, + delete_url, + headers=headers, + data=body if body else None, + method="POST", + ) + + status = info.get("status", 0) + if not status == 200: + module.fail_json( + msg="Failed to delete: HTTP {}, {}, {}".format( + status, response, headers + ) + ) + + except Exception as e: + module.fail_json(msg="Exception during delete: {}".format(str(e))) + + +# Function to read the private key for types texts and ssh_key +def read_privateKey(module): + try: + with open(module.params["private_key_path"], "r") as f: + private_key = f.read().strip() + return private_key + except Exception as e: + module.fail_json(msg="Failed to read private key file: {}".format(str(e))) + + +# Function to builds multipart form-data body and content-type header for file credential upload. +# Returns: +# body (bytes): Encoded multipart data +# content_type (str): Content-Type header including boundary +def embed_file_into_body(module, file_path, credentials): + + filename = os.path.basename(file_path) + + try: + with open(file_path, "rb") as f: + file_bytes = f.read() + except Exception as e: + module.fail_json(msg="Failed to read file: {}".format(str(e))) + return "", "" # Return for test purposes + + credentials.update( + { + "file": "file0", + "fileName": filename, + } + ) + + payload = {"credentials": credentials} + + fields = {"file0": (filename, file_bytes), "json": json.dumps(payload)} + + body, content_type = urllib3.encode_multipart_formdata(fields) + return body, content_type + + +# Main function to run the Ansible module +def run_module(): + + module = AnsibleModule( + argument_spec=dict( + id=dict(type="str"), + type=dict( + type="str", + choices=[ + "user_and_pass", + "file", + "text", + "github_app", + "ssh_key", + "certificate", + "scope", + "token", + ], + ), + state=dict(type="str", default="present", choices=["present", "absent"]), + force=dict(type="bool", default=False), + scope=dict(type="str", default="_"), + url=dict(type="str", default="http://localhost:8080"), + jenkins_user=dict(type="str", required=True), + jenkins_password=dict(type="str", no_log=True), + token=dict(type="str", no_log=True), + description=dict(type="str", default=""), + location=dict(type="str", default="system", choices=["system", "folder"]), + name=dict(type="str"), + username=dict(type="str"), + password=dict(type="str", no_log=True), + file_path=dict(type="path"), + secret=dict(type="str", no_log=True), + appID=dict(type="str"), + api_uri=dict(type="str", default="https://api.github.com"), + owner=dict(type="str"), + passphrase=dict(type="str", no_log=True), + private_key_path=dict(type="path", no_log=True), + # Scope specifications parameters + inc_hostname=dict(type="list", elements="str"), + exc_hostname=dict(type="list", elements="str"), + inc_hostname_port=dict(type="list", elements="str"), + exc_hostname_port=dict(type="list", elements="str"), + inc_path=dict(type="list", elements="str"), + exc_path=dict(type="list", elements="str"), + schemes=dict(type="list", elements="str"), + ), + supports_check_mode=True, + required_if=[ + ("state", "present", ["type"]), + ("state", "absent", ["id"]), + ("type", "token", ["name", "jenkins_password"]), + ("type", "user_and_pass", ["username", "password", "id", "token"]), + ("type", "file", ["file_path", "id", "token"]), + ("type", "text", ["secret", "id", "token"]), + ("type", "github_app", ["appID", "private_key_path", "id", "token"]), + ("type", "ssh_key", ["username", "private_key_path", "id", "token"]), + ("type", "certificate", ["file_path", "id", "token"]), + ("type", "scope", ["id", "token"]), + ], + ) + + # Parameters + id = module.params["id"] + type = module.params["type"] + state = module.params["state"] + force = module.params["force"] + scope = module.params["scope"] + url = module.params["url"] + jenkins_user = module.params["jenkins_user"] + jenkins_password = module.params["jenkins_password"] + name = module.params["name"] + token = module.params["token"] + description = module.params["description"] + location = module.params["location"] + filePath = module.params["file_path"] + private_key_path = module.params["private_key_path"] + api_uri = module.params["api_uri"] + inc_hostname = module.params["inc_hostname"] + exc_hostname = module.params["exc_hostname"] + inc_hostname_port = module.params["inc_hostname_port"] + exc_hostname_port = module.params["exc_hostname_port"] + inc_path = module.params["inc_path"] + exc_path = module.params["exc_path"] + schemes = module.params["schemes"] + + deps.validate(module) + + headers = { + "Authorization": basic_auth_header(jenkins_user, token or jenkins_password), + } + + # Get the crumb for CSRF protection + get_jenkins_crumb(module, headers) + + result = dict( + changed=False, + msg="", + ) + + credentials = clean_data(module.params) + + does_exist = target_exists(module) + + # Check if the credential/domain doesn't exist and the user wants to delete + if not does_exist and state == "absent" and not type == "token": + result["changed"] = False + result["msg"] = "{} does not exist.".format(id) + module.exit_json(**result) + + if state == "present": + + # If updating, we need to delete the existing credential/domain first based on force parameter + if force and (does_exist or type == "token"): + delete_target(module, headers) + elif does_exist and not force: + result["changed"] = False + result["msg"] = "{} already exists. Use force=True to update.".format(id) + module.exit_json(**result) + + if type == "token": + + post_url = "{}/user/{}/descriptorByName/jenkins.security.ApiTokenProperty/generateNewToken".format( + url, jenkins_user + ) + + body = "newTokenName={}".format(name) + + elif type == "scope": + + post_url = "{}/credentials/store/{}/createDomain".format(url, location) + + specifications = [] + + # Create a domain in Jenkins + if inc_hostname or exc_hostname: + specifications.append( + { + "stapler-class": "com.cloudbees.plugins.credentials.domains.HostnameSpecification", + "includes": ",".join(inc_hostname), + "excludes": ",".join(exc_hostname), + } + ) + + if inc_hostname_port or exc_hostname_port: + specifications.append( + { + "stapler-class": "com.cloudbees.plugins.credentials.domains.HostnamePortSpecification", + "includes": ",".join(inc_hostname_port), + "excludes": ",".join(exc_hostname_port), + } + ) + + if schemes: + specifications.append( + { + "stapler-class": "com.cloudbees.plugins.credentials.domains.SchemeSpecification", + "schemes": ",".join(schemes), + }, + ) + + if inc_path or exc_path: + specifications.append( + { + "stapler-class": "com.cloudbees.plugins.credentials.domains.PathSpecification", + "includes": ",".join(inc_path), + "excludes": ",".join(exc_path), + } + ) + + payload = { + "name": id, + "description": description, + "specifications": specifications, + } + + else: + if filePath: + validate_file_exist(module, filePath) + elif private_key_path: + validate_file_exist(module, private_key_path) + + post_url = "{}/credentials/store/{}/domain/{}/createCredentials".format( + url, location, scope + ) + + cred_class = { + "user_and_pass": "com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl", + "file": "org.jenkinsci.plugins.plaincredentials.impl.FileCredentialsImpl", + "text": "org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl", + "github_app": "org.jenkinsci.plugins.github_branch_source.GitHubAppCredentials", + "ssh_key": "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey", + "certificate": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl", + } + credentials.update({"$class": cred_class[type]}) + + if type == "file": + + # Build multipart body and content-type + body, content_type = embed_file_into_body(module, filePath, credentials) + headers["Content-Type"] = content_type + + elif type == "github_app": + + private_key = read_privateKey(module) + + credentials.update( + { + "privateKey": private_key, + "apiUri": api_uri, + } + ) + + elif type == "ssh_key": + + private_key = read_privateKey(module) + + credentials.update( + { + "privateKeySource": { + "stapler-class": "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey$DirectEntryPrivateKeySource", + "privateKey": private_key, + }, + } + ) + + elif type == "certificate": + + name, ext = os.path.splitext(filePath) + + if ext.lower() in [".p12", ".pfx"]: + try: + with open(filePath, "rb") as f: + file_content = f.read() + uploaded_keystore = base64.b64encode(file_content).decode( + "utf-8" + ) + except Exception as e: + module.fail_json( + msg="Failed to read or encode keystore file: {}".format( + str(e) + ) + ) + + credentials.update( + { + "keyStoreSource": { + "$class": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl$UploadedKeyStoreSource", + "uploadedKeystore": uploaded_keystore, + }, + } + ) + + elif ext.lower() in [".pem", ".crt"]: # PEM mode + try: + with open(filePath, "r") as f: + cert_chain = f.read() + with open(private_key_path, "r") as f: + private_key = f.read() + except Exception as e: + module.fail_json( + msg="Failed to read PEM files: {}".format(str(e)) + ) + + credentials.update( + { + "keyStoreSource": { + "$class": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl$PEMEntryKeyStoreSource", + "certChain": cert_chain, + "privateKey": private_key, + }, + } + ) + + else: + module.fail_json( + msg="Unsupported certificate file type. Only .p12, .pfx, .pem or .crt are supported." + ) + + payload = {"credentials": credentials} + + if not type == "file" and not type == "token": + body = urlencode({"json": json.dumps(payload)}) + + else: # Delete + + delete_target(module, headers) + + module.exit_json(changed=True, msg="{} deleted successfully.".format(id)) + + if ( + not type == "scope" and not scope == "_" + ): # Check if custom scope exists if adding to a custom scope + if not target_exists(module, True): + module.fail_json(msg="Domain {} doesn't exists".format(scope)) + + try: + response, info = fetch_url( + module, post_url, headers=headers, data=body, method="POST" + ) + except Exception as e: + module.fail_json(msg="Request to {} failed: {}".format(post_url, str(e))) + + status = info.get("status", 0) + + if not status == 200: + body = response.read() if response else b"" + module.fail_json( + msg="Failed to {} credential".format( + "add/update" if state == "present" else "delete" + ), + details=body.decode("utf-8", errors="ignore"), + ) + + if type == "token": + response_data = json.loads(response.read()) + result["token"] = response_data["data"]["tokenValue"] + result["token_uuid"] = response_data["data"]["tokenUuid"] + + result["changed"] = True + result["msg"] = response.read().decode("utf-8") + + module.exit_json(**result) + + +if __name__ == "__main__": + run_module() diff --git a/plugins/modules/jenkins_job.py b/plugins/modules/jenkins_job.py deleted file mode 120000 index 50187f37ef..0000000000 --- a/plugins/modules/jenkins_job.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/jenkins_job.py \ No newline at end of file diff --git a/plugins/modules/jenkins_job.py b/plugins/modules/jenkins_job.py new file mode 100644 index 0000000000..ec8941ea93 --- /dev/null +++ b/plugins/modules/jenkins_job.py @@ -0,0 +1,381 @@ +#!/usr/bin/python +# +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: jenkins_job +short_description: Manage jenkins jobs +description: + - Manage Jenkins jobs by using Jenkins REST API. +requirements: + - "python-jenkins >= 0.4.12" +author: "Sergio Millan Rodriguez (@sermilrod)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + config: + type: str + description: + - Config in XML format. + - Required if job does not yet exist. + - Mutually exclusive with O(enabled). + - Considered if O(state=present). + required: false + enabled: + description: + - Whether the job should be enabled or disabled. + - Mutually exclusive with O(config). + - Considered if O(state=present). + type: bool + required: false + name: + type: str + description: + - Name of the Jenkins job. + required: true + password: + type: str + description: + - Password to authenticate with the Jenkins server. + required: false + state: + type: str + description: + - Attribute that specifies if the job has to be created or deleted. + required: false + default: present + choices: ['present', 'absent'] + token: + type: str + description: + - API token used to authenticate alternatively to password. + required: false + url: + type: str + description: + - URL where the Jenkins server is accessible. + required: false + default: http://localhost:8080 + user: + type: str + description: + - User to authenticate with the Jenkins server. + required: false + validate_certs: + type: bool + default: true + description: + - If set to V(false), the SSL certificates are not validated. This should only set to V(false) used on personally controlled + sites using self-signed certificates as it avoids verifying the source site. + - The C(python-jenkins) library only handles this by using the environment variable E(PYTHONHTTPSVERIFY). + version_added: 2.3.0 +""" + +EXAMPLES = r""" +- name: Create a jenkins job using basic authentication + community.general.jenkins_job: + config: "{{ lookup('file', 'templates/test.xml') }}" + name: test + password: admin + url: http://localhost:8080 + user: admin + +- name: Create a jenkins job using the token + community.general.jenkins_job: + config: "{{ lookup('template', 'templates/test.xml.j2') }}" + name: test + token: asdfasfasfasdfasdfadfasfasdfasdfc + url: http://localhost:8080 + user: admin + +- name: Delete a jenkins job using basic authentication + community.general.jenkins_job: + name: test + password: admin + state: absent + url: http://localhost:8080 + user: admin + +- name: Delete a jenkins job using the token + community.general.jenkins_job: + name: test + token: asdfasfasfasdfasdfadfasfasdfasdfc + state: absent + url: http://localhost:8080 + user: admin + +- name: Disable a jenkins job using basic authentication + community.general.jenkins_job: + name: test + password: admin + enabled: false + url: http://localhost:8080 + user: admin + +- name: Disable a jenkins job using the token + community.general.jenkins_job: + name: test + token: asdfasfasfasdfasdfadfasfasdfasdfc + enabled: false + url: http://localhost:8080 + user: admin +""" + +RETURN = r""" +name: + description: Name of the jenkins job. + returned: success + type: str + sample: test-job +state: + description: State of the jenkins job. + returned: success + type: str + sample: present +enabled: + description: Whether the jenkins job is enabled or not. + returned: success + type: bool + sample: true +user: + description: User used for authentication. + returned: success + type: str + sample: admin +url: + description: URL to connect to the Jenkins server. + returned: success + type: str + sample: https://jenkins.mydomain.com +""" + +import os +import traceback +import xml.etree.ElementTree as ET + +JENKINS_IMP_ERR = None +try: + import jenkins + python_jenkins_installed = True +except ImportError: + JENKINS_IMP_ERR = traceback.format_exc() + python_jenkins_installed = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +class JenkinsJob(object): + + def __init__(self, module): + self.module = module + + self.config = module.params.get('config') + self.name = module.params.get('name') + self.password = module.params.get('password') + self.state = module.params.get('state') + self.enabled = module.params.get('enabled') + self.token = module.params.get('token') + self.user = module.params.get('user') + self.jenkins_url = module.params.get('url') + self.server = self.get_jenkins_connection() + + self.result = { + 'changed': False, + 'url': self.jenkins_url, + 'name': self.name, + 'user': self.user, + 'state': self.state, + 'diff': { + 'before': "", + 'after': "" + } + } + + self.EXCL_STATE = "excluded state" + if not module.params['validate_certs']: + os.environ['PYTHONHTTPSVERIFY'] = '0' + + def get_jenkins_connection(self): + try: + if self.user and self.password: + return jenkins.Jenkins(self.jenkins_url, self.user, self.password) + elif self.user and self.token: + return jenkins.Jenkins(self.jenkins_url, self.user, self.token) + elif self.user and not (self.password or self.token): + return jenkins.Jenkins(self.jenkins_url, self.user) + else: + return jenkins.Jenkins(self.jenkins_url) + except Exception as e: + self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e), exception=traceback.format_exc()) + + def get_job_status(self): + try: + response = self.server.get_job_info(self.name) + if "color" not in response: + return self.EXCL_STATE + else: + return to_native(response['color']) + + except Exception as e: + self.module.fail_json(msg='Unable to fetch job information, %s' % to_native(e), exception=traceback.format_exc()) + + def job_exists(self): + try: + return bool(self.server.job_exists(self.name)) + except Exception as e: + self.module.fail_json(msg='Unable to validate if job exists, %s for %s' % (to_native(e), self.jenkins_url), + exception=traceback.format_exc()) + + def get_config(self): + return job_config_to_string(self.config) + + def get_current_config(self): + return job_config_to_string(self.server.get_job_config(self.name).encode('utf-8')) + + def has_config_changed(self): + # config is optional, if not provided we keep the current config as is + if self.config is None: + return False + + config_file = self.get_config() + machine_file = self.get_current_config() + + self.result['diff']['after'] = config_file + self.result['diff']['before'] = machine_file + + if machine_file != config_file: + return True + return False + + def present_job(self): + if self.config is None and self.enabled is None: + self.module.fail_json(msg='one of the following params is required on state=present: config,enabled') + + if not self.job_exists(): + self.create_job() + else: + self.update_job() + + def has_state_changed(self, status): + # Keep in current state if enabled arg_spec is not given + if self.enabled is None: + return False + + return (self.enabled is False and status != "disabled") or (self.enabled is True and status == "disabled") + + def switch_state(self): + if self.enabled is False: + self.server.disable_job(self.name) + else: + self.server.enable_job(self.name) + + def update_job(self): + try: + status = self.get_job_status() + + # Handle job config + if self.has_config_changed(): + self.result['changed'] = True + if not self.module.check_mode: + self.server.reconfig_job(self.name, self.get_config()) + + # Handle job disable/enable + elif status != self.EXCL_STATE and self.has_state_changed(status): + self.result['changed'] = True + if not self.module.check_mode: + self.switch_state() + + except Exception as e: + self.module.fail_json(msg='Unable to reconfigure job, %s for %s' % (to_native(e), self.jenkins_url), + exception=traceback.format_exc()) + + def create_job(self): + if self.config is None: + self.module.fail_json(msg='missing required param: config') + + self.result['changed'] = True + try: + config_file = self.get_config() + self.result['diff']['after'] = config_file + if not self.module.check_mode: + self.server.create_job(self.name, config_file) + except Exception as e: + self.module.fail_json(msg='Unable to create job, %s for %s' % (to_native(e), self.jenkins_url), + exception=traceback.format_exc()) + + def absent_job(self): + if self.job_exists(): + self.result['changed'] = True + self.result['diff']['before'] = self.get_current_config() + if not self.module.check_mode: + try: + self.server.delete_job(self.name) + except Exception as e: + self.module.fail_json(msg='Unable to delete job, %s for %s' % (to_native(e), self.jenkins_url), + exception=traceback.format_exc()) + + def get_result(self): + result = self.result + if self.job_exists(): + result['enabled'] = self.get_job_status() != "disabled" + else: + result['enabled'] = None + return result + + +def test_dependencies(module): + if not python_jenkins_installed: + module.fail_json( + msg=missing_required_lib("python-jenkins", + url="https://python-jenkins.readthedocs.io/en/latest/install.html"), + exception=JENKINS_IMP_ERR) + + +def job_config_to_string(xml_str): + return ET.tostring(ET.fromstring(xml_str)).decode('ascii') + + +def main(): + module = AnsibleModule( + argument_spec=dict( + config=dict(type='str'), + name=dict(type='str', required=True), + password=dict(type='str', no_log=True), + state=dict(type='str', choices=['present', 'absent'], default="present"), + enabled=dict(type='bool'), + token=dict(type='str', no_log=True), + url=dict(type='str', default="http://localhost:8080"), + user=dict(type='str'), + validate_certs=dict(type='bool', default=True), + ), + mutually_exclusive=[ + ['password', 'token'], + ['config', 'enabled'], + ], + supports_check_mode=True, + ) + + test_dependencies(module) + jenkins_job = JenkinsJob(module) + + if module.params.get('state') == "present": + jenkins_job.present_job() + else: + jenkins_job.absent_job() + + result = jenkins_job.get_result() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/jenkins_job_info.py b/plugins/modules/jenkins_job_info.py deleted file mode 120000 index 06ea79aebc..0000000000 --- a/plugins/modules/jenkins_job_info.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/jenkins_job_info.py \ No newline at end of file diff --git a/plugins/modules/jenkins_job_info.py b/plugins/modules/jenkins_job_info.py new file mode 100644 index 0000000000..7c3feafee2 --- /dev/null +++ b/plugins/modules/jenkins_job_info.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# +# Copyright (c) Ansible Project +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: jenkins_job_info +short_description: Get information about Jenkins jobs +description: + - This module can be used to query information about which Jenkins jobs which already exists. +requirements: + - "python-jenkins >= 0.4.12" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + name: + type: str + description: + - Exact name of the Jenkins job to fetch information about. + glob: + type: str + description: + - A shell glob of Jenkins job names to fetch information about. + color: + type: str + description: + - Only fetch jobs with the given status color. + password: + type: str + description: + - Password to authenticate with the Jenkins server. + - This is mutually exclusive with O(token). + token: + type: str + description: + - API token used to authenticate with the Jenkins server. + - This is mutually exclusive with O(password). + url: + type: str + description: + - URL where the Jenkins server is accessible. + default: http://localhost:8080 + user: + type: str + description: + - User to authenticate with the Jenkins server. + validate_certs: + description: + - If set to V(false), the SSL certificates are not validated. + - This should only set to V(false) used on personally controlled sites using self-signed certificates. + default: true + type: bool +author: + - "Chris St. Pierre (@stpierre)" +""" + +EXAMPLES = r""" +# Get all Jenkins jobs anonymously +- community.general.jenkins_job_info: + user: admin + register: my_jenkins_job_info + +# Get all Jenkins jobs using basic auth +- community.general.jenkins_job_info: + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get all Jenkins jobs using the token +- community.general.jenkins_job_info: + user: admin + token: abcdefghijklmnop + register: my_jenkins_job_info + +# Get info about a single job using basic auth +- community.general.jenkins_job_info: + name: some-job-name + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get info about a single job in a folder using basic auth +- community.general.jenkins_job_info: + name: some-folder-name/some-job-name + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get info about jobs matching a shell glob using basic auth +- community.general.jenkins_job_info: + glob: some-job-* + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get info about all failing jobs using basic auth +- community.general.jenkins_job_info: + color: red + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get info about passing jobs matching a shell glob using basic auth +- community.general.jenkins_job_info: + name: some-job-* + color: blue + user: admin + password: hunter2 + register: my_jenkins_job_info + +- name: Get the info from custom URL with token and validate_certs=False + community.general.jenkins_job_info: + user: admin + token: 126df5c60d66c66e3b75b11104a16a8a + url: https://jenkins.example.com + register: my_jenkins_job_info +""" + +RETURN = r""" +jobs: + description: All jobs found matching the specified criteria. + returned: success + type: list + sample: + [ + { + "name": "test-job", + "fullname": "test-folder/test-job", + "url": "http://localhost:8080/job/test-job/", + "color": "blue" + } + ] +""" + +import ssl +import fnmatch +import traceback + +JENKINS_IMP_ERR = None +try: + import jenkins + HAS_JENKINS = True +except ImportError: + JENKINS_IMP_ERR = traceback.format_exc() + HAS_JENKINS = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def get_jenkins_connection(module): + url = module.params["url"] + username = module.params.get("user") + password = module.params.get("password") + token = module.params.get("token") + + validate_certs = module.params.get('validate_certs') + if not validate_certs and hasattr(ssl, 'SSLContext'): + ssl._create_default_https_context = ssl._create_unverified_context + if validate_certs and not hasattr(ssl, 'SSLContext'): + module.fail_json(msg="Module does not support changing verification mode with python < 2.7.9." + " Either update Python or use validate_certs=false.") + + if username and (password or token): + return jenkins.Jenkins(url, username, password or token) + elif username: + return jenkins.Jenkins(url, username) + else: + return jenkins.Jenkins(url) + + +def test_dependencies(module): + if not HAS_JENKINS: + module.fail_json( + msg=missing_required_lib("python-jenkins", + url="https://python-jenkins.readthedocs.io/en/latest/install.html"), + exception=JENKINS_IMP_ERR) + + +def get_jobs(module): + jenkins_conn = get_jenkins_connection(module) + jobs = [] + if module.params.get("name"): + try: + job_info = jenkins_conn.get_job_info(module.params.get("name")) + except jenkins.NotFoundException: + pass + else: + jobs.append({ + "name": job_info["name"], + "fullname": job_info["fullName"], + "url": job_info["url"], + "color": job_info["color"] + }) + + else: + all_jobs = jenkins_conn.get_all_jobs() + if module.params.get("glob"): + jobs.extend( + j for j in all_jobs + if fnmatch.fnmatch(j["fullname"], module.params.get("glob"))) + else: + jobs = all_jobs + # python-jenkins includes the internal Jenkins class used for each job + # in its return value; we strip that out because the leading underscore + # (and the fact that it is not documented in the python-jenkins docs) + # indicates that it is not part of the dependable public interface. + for job in jobs: + if "_class" in job: + del job["_class"] + + if module.params.get("color"): + jobs = [j for j in jobs if j["color"] == module.params.get("color")] + + return jobs + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str'), + glob=dict(type='str'), + color=dict(type='str'), + password=dict(type='str', no_log=True), + token=dict(type='str', no_log=True), + url=dict(type='str', default="http://localhost:8080"), + user=dict(type='str'), + validate_certs=dict(type='bool', default=True), + ), + mutually_exclusive=[ + ['password', 'token'], + ['name', 'glob'], + ], + supports_check_mode=True, + ) + + test_dependencies(module) + jobs = list() + + try: + jobs = get_jobs(module) + except jenkins.JenkinsException as err: + module.fail_json( + msg='Unable to connect to Jenkins server, %s' % to_native(err), + exception=traceback.format_exc()) + + module.exit_json(changed=False, jobs=jobs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/jenkins_node.py b/plugins/modules/jenkins_node.py new file mode 100644 index 0000000000..2ebcdf967d --- /dev/null +++ b/plugins/modules/jenkins_node.py @@ -0,0 +1,484 @@ +#!/usr/bin/python +# +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: jenkins_node +short_description: Manage Jenkins nodes +version_added: 10.0.0 +description: + - Manage Jenkins nodes with Jenkins REST API. +requirements: + - "python-jenkins >= 0.4.12" +author: + - Connor Newton (@phyrwork) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: partial + details: + - Check mode is unable to show configuration changes for a node that is not yet present. + diff_mode: + support: none +options: + url: + description: + - URL of the Jenkins server. + default: http://localhost:8080 + type: str + name: + description: + - Name of the Jenkins node to manage. + required: true + type: str + user: + description: + - User to authenticate with the Jenkins server. + type: str + token: + description: + - API token to authenticate with the Jenkins server. + type: str + state: + description: + - Specifies whether the Jenkins node should be V(present) (created), V(absent) (deleted), V(enabled) (online) or V(disabled) + (offline). + default: present + choices: ['enabled', 'disabled', 'present', 'absent'] + type: str + num_executors: + description: + - When specified, sets the Jenkins node executor count. + type: int + labels: + description: + - When specified, sets the Jenkins node labels. + type: list + elements: str + offline_message: + description: + - Specifies the offline reason message to be set when configuring the Jenkins node state. + - If O(offline_message) is given and requested O(state) is not V(disabled), an error is raised. + - Internally O(offline_message) is set using the V(toggleOffline) API, so updating the message when the node is already + offline (current state V(disabled)) is not possible. In this case, a warning is issued. + type: str + version_added: 10.0.0 +""" + +EXAMPLES = r""" +- name: Create a Jenkins node using token authentication + community.general.jenkins_node: + url: http://localhost:8080 + user: jenkins + token: 11eb751baabb66c4d1cb8dc4e0fb142cde + name: my-node + state: present + +- name: Set number of executors on Jenkins node + community.general.jenkins_node: + name: my-node + state: present + num_executors: 4 + +- name: Set labels on Jenkins node + community.general.jenkins_node: + name: my-node + state: present + labels: + - label-1 + - label-2 + - label-3 + +- name: Set Jenkins node offline with offline message. + community.general.jenkins_node: + name: my-node + state: disabled + offline_message: >- + This node is offline for some reason. +""" + +RETURN = r""" +url: + description: URL used to connect to the Jenkins server. + returned: success + type: str + sample: https://jenkins.mydomain.com +user: + description: User used for authentication. + returned: success + type: str + sample: jenkins +name: + description: Name of the Jenkins node. + returned: success + type: str + sample: my-node +state: + description: State of the Jenkins node. + returned: success + type: str + sample: present +created: + description: Whether or not the Jenkins node was created by the task. + returned: success + type: bool +deleted: + description: Whether or not the Jenkins node was deleted by the task. + returned: success + type: bool +disabled: + description: Whether or not the Jenkins node was disabled by the task. + returned: success + type: bool +enabled: + description: Whether or not the Jenkins node was enabled by the task. + returned: success + type: bool +configured: + description: Whether or not the Jenkins node was configured by the task. + returned: success + type: bool +""" + +import sys +import traceback +from xml.etree import ElementTree as et + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils import deps + +with deps.declare( + "python-jenkins", + reason="python-jenkins is required to interact with Jenkins", + url="https://opendev.org/jjb/python-jenkins", +): + import jenkins + + +IS_PYTHON_2 = sys.version_info[0] <= 2 + + +class JenkinsNode: + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.state = module.params['state'] + self.token = module.params['token'] + self.user = module.params['user'] + self.url = module.params['url'] + self.num_executors = module.params['num_executors'] + self.labels = module.params['labels'] + self.offline_message = module.params['offline_message'] # type: str | None + + if self.offline_message is not None: + self.offline_message = self.offline_message.strip() + + if self.state != "disabled": + self.module.fail_json("can not set offline message when state is not disabled") + + if self.labels is not None: + for label in self.labels: + if " " in label: + self.module.fail_json("labels must not contain spaces: got invalid label {}".format(label)) + + self.instance = self.get_jenkins_instance() + self.result = { + 'changed': False, + 'url': self.url, + 'user': self.user, + 'name': self.name, + 'state': self.state, + 'created': False, + 'deleted': False, + 'disabled': False, + 'enabled': False, + 'configured': False, + 'warnings': [], + } + + def get_jenkins_instance(self): + try: + if self.user and self.token: + return jenkins.Jenkins(self.url, self.user, self.token) + elif self.user and not self.token: + return jenkins.Jenkins(self.url, self.user) + else: + return jenkins.Jenkins(self.url) + except Exception as e: + self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e)) + + def configure_node(self, present): + if not present: + # Node would only not be present if in check mode and if not present there + # is no way to know what would and would not be changed. + if not self.module.check_mode: + raise Exception("configure_node present is False outside of check mode") + return + + configured = False + + data = self.instance.get_node_config(self.name) + root = et.fromstring(data) + + if self.num_executors is not None: + elem = root.find('numExecutors') + if elem is None: + elem = et.SubElement(root, 'numExecutors') + if elem.text is None or int(elem.text) != self.num_executors: + elem.text = str(self.num_executors) + configured = True + + if self.labels is not None: + elem = root.find('label') + if elem is None: + elem = et.SubElement(root, 'label') + labels = [] + if elem.text: + labels = elem.text.split() + if labels != self.labels: + elem.text = " ".join(self.labels) + configured = True + + if configured: + if IS_PYTHON_2: + data = et.tostring(root) + else: + data = et.tostring(root, encoding="unicode") + + self.instance.reconfig_node(self.name, data) + + self.result['configured'] = configured + if configured: + self.result['changed'] = True + + def present_node(self, configure=True): # type: (bool) -> bool + """Assert node present. + + Args: + configure: If True, run node configuration after asserting node present. + + Returns: + True if the node is present, False otherwise (i.e. is check mode). + """ + def create_node(): + try: + self.instance.create_node(self.name, launcher=jenkins.LAUNCHER_SSH) + except jenkins.JenkinsException as e: + # Some versions of python-jenkins < 1.8.3 has an authorization bug when + # handling redirects returned when posting to resources. If the node is + # created OK then can ignore the error. + if not self.instance.node_exists(self.name): + self.module.fail_json(msg="Create node failed: %s" % to_native(e), exception=traceback.format_exc()) + + # TODO: Remove authorization workaround. + self.result['warnings'].append( + "suppressed 401 Not Authorized on redirect after node created: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" + ) + + present = self.instance.node_exists(self.name) + created = False + if not present: + if not self.module.check_mode: + create_node() + present = True + + created = True + + if configure: + self.configure_node(present) + + self.result['created'] = created + if created: + self.result['changed'] = True + + return present # Used to gate downstream queries when in check mode. + + def absent_node(self): + def delete_node(): + try: + self.instance.delete_node(self.name) + except jenkins.JenkinsException as e: + # Some versions of python-jenkins < 1.8.3 has an authorization bug when + # handling redirects returned when posting to resources. If the node is + # deleted OK then can ignore the error. + if self.instance.node_exists(self.name): + self.module.fail_json(msg="Delete node failed: %s" % to_native(e), exception=traceback.format_exc()) + + # TODO: Remove authorization workaround. + self.result['warnings'].append( + "suppressed 401 Not Authorized on redirect after node deleted: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" + ) + + present = self.instance.node_exists(self.name) + deleted = False + if present: + if not self.module.check_mode: + delete_node() + + deleted = True + + self.result['deleted'] = deleted + if deleted: + self.result['changed'] = True + + def enabled_node(self): + def get_offline(): # type: () -> bool + return self.instance.get_node_info(self.name)["offline"] + + present = self.present_node() + + enabled = False + + if present: + def enable_node(): + try: + self.instance.enable_node(self.name) + except jenkins.JenkinsException as e: + # Some versions of python-jenkins < 1.8.3 has an authorization bug when + # handling redirects returned when posting to resources. If the node is + # disabled OK then can ignore the error. + offline = get_offline() + + if offline: + self.module.fail_json(msg="Enable node failed: %s" % to_native(e), exception=traceback.format_exc()) + + # TODO: Remove authorization workaround. + self.result['warnings'].append( + "suppressed 401 Not Authorized on redirect after node enabled: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" + ) + + offline = get_offline() + + if offline: + if not self.module.check_mode: + enable_node() + + enabled = True + else: + # Would have created node with initial state enabled therefore would not have + # needed to enable therefore not enabled. + if not self.module.check_mode: + raise Exception("enabled_node present is False outside of check mode") + enabled = False + + self.result['enabled'] = enabled + if enabled: + self.result['changed'] = True + + def disabled_node(self): + def get_offline_info(): + info = self.instance.get_node_info(self.name) + + offline = info["offline"] + offline_message = info["offlineCauseReason"] + + return offline, offline_message + + # Don't configure until after disabled, in case the change in configuration + # causes the node to pick up a job. + present = self.present_node(False) + + disabled = False + changed = False + + if present: + offline, offline_message = get_offline_info() + + if self.offline_message is not None and self.offline_message != offline_message: + if offline: + # n.b. Internally disable_node uses toggleOffline gated by a not + # offline condition. This means that disable_node can not be used to + # update an offline message if the node is already offline. + # + # Toggling the node online to set the message when toggling offline + # again is not an option as during this transient online time jobs + # may be scheduled on the node which is not acceptable. + self.result["warnings"].append( + "unable to change offline message when already offline" + ) + else: + offline_message = self.offline_message + changed = True + + def disable_node(): + try: + self.instance.disable_node(self.name, offline_message) + except jenkins.JenkinsException as e: + # Some versions of python-jenkins < 1.8.3 has an authorization bug when + # handling redirects returned when posting to resources. If the node is + # disabled OK then can ignore the error. + offline, _offline_message = get_offline_info() + + if not offline: + self.module.fail_json(msg="Disable node failed: %s" % to_native(e), exception=traceback.format_exc()) + + # TODO: Remove authorization workaround. + self.result['warnings'].append( + "suppressed 401 Not Authorized on redirect after node disabled: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" + ) + + if not offline: + if not self.module.check_mode: + disable_node() + + disabled = True + + else: + # Would have created node with initial state enabled therefore would have + # needed to disable therefore disabled. + if not self.module.check_mode: + raise Exception("disabled_node present is False outside of check mode") + disabled = True + + if disabled: + changed = True + + self.result['disabled'] = disabled + + if changed: + self.result['changed'] = True + + self.configure_node(present) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, type='str'), + url=dict(default='http://localhost:8080'), + user=dict(), + token=dict(no_log=True), + state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='present'), + num_executors=dict(type='int'), + labels=dict(type='list', elements='str'), + offline_message=dict(type='str'), + ), + supports_check_mode=True, + ) + + deps.validate(module) + + jenkins_node = JenkinsNode(module) + + state = module.params.get('state') + if state == 'enabled': + jenkins_node.enabled_node() + elif state == 'disabled': + jenkins_node.disabled_node() + elif state == 'present': + jenkins_node.present_node() + else: + jenkins_node.absent_node() + + module.exit_json(**jenkins_node.result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/jenkins_plugin.py b/plugins/modules/jenkins_plugin.py deleted file mode 120000 index 6345daed01..0000000000 --- a/plugins/modules/jenkins_plugin.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/jenkins_plugin.py \ No newline at end of file diff --git a/plugins/modules/jenkins_plugin.py b/plugins/modules/jenkins_plugin.py new file mode 100644 index 0000000000..9f38668037 --- /dev/null +++ b/plugins/modules/jenkins_plugin.py @@ -0,0 +1,1014 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Jiri Tyr +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: jenkins_plugin +author: Jiri Tyr (@jtyr) +short_description: Add or remove Jenkins plugin +description: + - Ansible module which helps to manage Jenkins plugins. +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + group: + type: str + description: + - GID or name of the Jenkins group on the OS. + default: jenkins + jenkins_home: + type: path + description: + - Home directory of the Jenkins user. + default: /var/lib/jenkins + mode: + type: raw + description: + - File mode applied on versioned plugins. + default: '0644' + name: + type: str + description: + - Plugin name. + required: true + owner: + type: str + description: + - UID or name of the Jenkins user on the OS. + default: jenkins + state: + type: str + description: + - Desired plugin state. + - If set to V(latest), the check for new version is performed every time. This is suitable to keep the plugin up-to-date. + choices: [absent, present, pinned, unpinned, enabled, disabled, latest] + default: present + timeout: + type: int + description: + - Server connection timeout in secs. + default: 30 + updates_expiration: + type: int + description: + - Number of seconds after which a new copy of the C(update-center.json) file is downloaded. This is used to avoid the + need to download the plugin to calculate its checksum when O(state=latest) is specified. + - Set it to V(0) if no cache file should be used. In that case, the plugin file is always downloaded to calculate its + checksum when O(state=latest) is specified. + default: 86400 + updates_url: + type: list + elements: str + description: + - A list of base URL(s) to retrieve C(update-center.json), and direct plugin files from. + - This can be a list since community.general 3.3.0. + default: ['https://updates.jenkins.io', 'http://mirrors.jenkins.io'] + updates_url_username: + description: + - If using a custom O(updates_url), set this as the username of the user with access to the URL. + - If the custom O(updates_url) does not require authentication, this can be left empty. + type: str + version_added: 11.2.0 + updates_url_password: + description: + - If using a custom O(updates_url), set this as the password of the user with access to the URL. + - If the custom O(updates_url) does not require authentication, this can be left empty. + type: str + version_added: 11.2.0 + update_json_url_segment: + type: list + elements: str + description: + - A list of URL segment(s) to retrieve the update center JSON file from. + default: ['update-center.json', 'updates/update-center.json'] + version_added: 3.3.0 + plugin_versions_url_segment: + type: list + elements: str + description: + - A list of URL segment(s) to retrieve the plugin versions JSON file from. + default: ['plugin-versions.json', 'current/plugin-versions.json'] + version_added: 11.2.0 + latest_plugins_url_segments: + type: list + elements: str + description: + - Path inside the O(updates_url) to get latest plugins from. + default: ['latest'] + version_added: 3.3.0 + versioned_plugins_url_segments: + type: list + elements: str + description: + - Path inside the O(updates_url) to get specific version of plugins from. + default: ['download/plugins', 'plugins'] + version_added: 3.3.0 + url: + type: str + description: + - URL of the Jenkins server. + default: http://localhost:8080 + version: + type: str + description: + - Plugin version number. + - If this option is specified, all plugin dependencies must be installed manually. + - It might take longer to verify that the correct version is installed. This is especially true if a specific version + number is specified. + - Quote the version to prevent the value to be interpreted as float. For example if V(1.20) would be unquoted, it would + become V(1.2). + with_dependencies: + description: + - Defines whether to install plugin dependencies. + - In earlier versions, this option had no effect when a specific O(version) was set. + Since community.general 11.2.0, dependencies are also installed for versioned plugins. + type: bool + default: true + +notes: + - Plugin installation should be run under root or the same user which owns the plugin files on the disk. Only if the plugin + is not installed yet and no version is specified, the API installation is performed which requires only the Web UI credentials. + - It is necessary to notify the handler or call the M(ansible.builtin.service) module to restart the Jenkins service after + a new plugin was installed. + - Pinning works only if the plugin is installed and Jenkins service was successfully restarted after the plugin installation. + - It is not possible to run the module remotely by changing the O(url) parameter to point to the Jenkins server. The module + must be used on the host where Jenkins runs as it needs direct access to the plugin files. + - If using a custom O(updates_url), ensure that the URL provides a C(plugin-versions.json) file. + This file must include metadata for all available plugin versions to support version compatibility resolution. + The file should be in the same format as the one provided by Jenkins update center (https://updates.jenkins.io/current/plugin-versions.json). +extends_documentation_fragment: + - ansible.builtin.url + - ansible.builtin.files + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Install plugin + community.general.jenkins_plugin: + name: build-pipeline-plugin + +- name: Install plugin without its dependencies + community.general.jenkins_plugin: + name: build-pipeline-plugin + with_dependencies: false + +- name: Make sure the plugin is always up-to-date + community.general.jenkins_plugin: + name: token-macro + state: latest + +- name: Install specific version of the plugin + community.general.jenkins_plugin: + name: token-macro + version: "1.15" + +- name: Pin the plugin + community.general.jenkins_plugin: + name: token-macro + state: pinned + +- name: Unpin the plugin + community.general.jenkins_plugin: + name: token-macro + state: unpinned + +- name: Enable the plugin + community.general.jenkins_plugin: + name: token-macro + state: enabled + +- name: Disable the plugin + community.general.jenkins_plugin: + name: token-macro + state: disabled + +- name: Uninstall plugin + community.general.jenkins_plugin: + name: build-pipeline-plugin + state: absent + +# +# Example of how to authenticate +# +- name: Install plugin + community.general.jenkins_plugin: + name: build-pipeline-plugin + url_username: admin + url_password: p4ssw0rd + url: http://localhost:8888 + +# +# Example of how to authenticate with serverless deployment +# +- name: Update plugins on ECS Fargate Jenkins instance + community.general.jenkins_plugin: + # plugin name and version + name: ws-cleanup + version: '0.45' + # Jenkins home path mounted on ec2-helper VM (example) + jenkins_home: "/mnt/{{ jenkins_instance }}" + # matching the UID/GID to one in official Jenkins image + owner: 1000 + group: 1000 + # Jenkins instance URL and admin credentials + url: "https://{{ jenkins_instance }}.com/" + url_username: admin + url_password: p4ssw0rd + # make module work from EC2 which has local access + # to EFS mount as well as Jenkins URL + delegate_to: ec2-helper + vars: + jenkins_instance: foobar + +# +# Example of a Play which handles Jenkins restarts during the state changes +# +- name: Jenkins Master play + hosts: jenkins-master + vars: + my_jenkins_plugins: + token-macro: + enabled: true + build-pipeline-plugin: + version: "1.4.9" + pinned: false + enabled: true + tasks: + - name: Install plugins without a specific version + community.general.jenkins_plugin: + name: "{{ item.key }}" + register: my_jenkins_plugin_unversioned + when: > + 'version' not in item.value + with_dict: "{{ my_jenkins_plugins }}" + + - name: Install plugins with a specific version + community.general.jenkins_plugin: + name: "{{ item.key }}" + version: "{{ item.value['version'] }}" + register: my_jenkins_plugin_versioned + when: > + 'version' in item.value + with_dict: "{{ my_jenkins_plugins }}" + + - name: Initiate the fact + ansible.builtin.set_fact: + jenkins_restart_required: false + + - name: Check if restart is required by any of the versioned plugins + ansible.builtin.set_fact: + jenkins_restart_required: true + when: item.changed + with_items: "{{ my_jenkins_plugin_versioned.results }}" + + - name: Check if restart is required by any of the unversioned plugins + ansible.builtin.set_fact: + jenkins_restart_required: true + when: item.changed + with_items: "{{ my_jenkins_plugin_unversioned.results }}" + + - name: Restart Jenkins if required + ansible.builtin.service: + name: jenkins + state: restarted + when: jenkins_restart_required + + - name: Wait for Jenkins to start up + ansible.builtin.uri: + url: http://localhost:8080 + status_code: 200 + timeout: 5 + register: jenkins_service_status + # Keep trying for 5 mins in 5 sec intervals + retries: 60 + delay: 5 + until: > + 'status' in jenkins_service_status and + jenkins_service_status['status'] == 200 + when: jenkins_restart_required + + - name: Reset the fact + ansible.builtin.set_fact: + jenkins_restart_required: false + when: jenkins_restart_required + + - name: Plugin pinning + community.general.jenkins_plugin: + name: "{{ item.key }}" + state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}" + when: > + 'pinned' in item.value + with_dict: "{{ my_jenkins_plugins }}" + + - name: Plugin enabling + community.general.jenkins_plugin: + name: "{{ item.key }}" + state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}" + when: > + 'enabled' in item.value + with_dict: "{{ my_jenkins_plugins }}" +""" + +RETURN = r""" +plugin: + description: Plugin name. + returned: success + type: str + sample: build-pipeline-plugin +state: + description: State of the target, after execution. + returned: success + type: str + sample: "present" +""" + +import hashlib +import io +import json +import os +import tempfile +import time +from collections import OrderedDict +from http import cookiejar +from urllib.parse import urlencode + +from ansible.module_utils.basic import AnsibleModule, to_bytes +from ansible.module_utils.urls import fetch_url, url_argument_spec, basic_auth_header +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.jenkins import download_updates_file + + +class FailedInstallingWithPluginManager(Exception): + pass + + +class JenkinsPlugin(object): + def __init__(self, module): + # To be able to call fail_json + self.module = module + + # Shortcuts for the params + self.params = self.module.params + self.url = self.params['url'] + self.timeout = self.params['timeout'] + + # Authentication for non-Jenkins calls + self.updates_url_credentials = {} + if self.params.get('updates_url_username') and self.params.get('updates_url_password'): + self.updates_url_credentials["Authorization"] = basic_auth_header(self.params['updates_url_username'], self.params['updates_url_password']) + + # Crumb + self.crumb = {} + + # Authentication for Jenkins calls + if self.params.get('url_username') and self.params.get('url_password'): + self.crumb["Authorization"] = basic_auth_header(self.params['url_username'], self.params['url_password']) + + # Cookie jar for crumb session + self.cookies = None + + if self._csrf_enabled(): + self.cookies = cookiejar.LWPCookieJar() + self._get_crumb() + + # Get list of installed plugins + self._get_installed_plugins() + + def _csrf_enabled(self): + csrf_data = self._get_json_data( + "%s/%s" % (self.url, "api/json"), 'CSRF') + + if 'useCrumbs' not in csrf_data: + self.module.fail_json( + msg="Required fields not found in the Crumbs response.", + details=csrf_data) + + return csrf_data['useCrumbs'] + + def _get_json_data(self, url, what, **kwargs): + # Get the JSON data + r = self._get_url_data(url, what, **kwargs) + + # Parse the JSON data + try: + json_data = json.loads(to_native(r.read())) + except Exception as e: + self.module.fail_json( + msg="Cannot parse %s JSON data." % what, + details=to_native(e)) + + return json_data + + def _get_urls_data(self, urls, what=None, msg_status=None, msg_exception=None, **kwargs): + # Compose default messages + if msg_status is None: + msg_status = "Cannot get %s" % what + + if msg_exception is None: + msg_exception = "Retrieval of %s failed." % what + + errors = {} + for url in urls: + err_msg = None + try: + self.module.debug("fetching url: %s" % url) + + is_jenkins_call = url.startswith(self.url) + self.module.params['force_basic_auth'] = is_jenkins_call + + response, info = fetch_url( + self.module, url, timeout=self.timeout, cookies=self.cookies, + headers=self.crumb if is_jenkins_call else self.updates_url_credentials or self.crumb, + **kwargs) + if info['status'] == 200: + return response + else: + err_msg = ("%s. fetching url %s failed. response code: %s" % (msg_status, url, info['status'])) + if info['status'] > 400: # extend error message + err_msg = "%s. response body: %s" % (err_msg, info['body']) + except Exception as e: + err_msg = "%s. fetching url %s failed. error msg: %s" % (msg_status, url, to_native(e)) + finally: + if err_msg is not None: + self.module.debug(err_msg) + errors[url] = err_msg + + # failed on all urls + self.module.fail_json(msg=msg_exception, details=errors) + + def _get_url_data( + self, url, what=None, msg_status=None, msg_exception=None, + dont_fail=False, **kwargs): + # Compose default messages + if msg_status is None: + msg_status = "Cannot get %s" % what + + if msg_exception is None: + msg_exception = "Retrieval of %s failed." % what + + # Get the URL data + try: + is_jenkins_call = url.startswith(self.url) + self.module.params['force_basic_auth'] = is_jenkins_call + + response, info = fetch_url( + self.module, url, timeout=self.timeout, cookies=self.cookies, + headers=self.crumb if is_jenkins_call else self.updates_url_credentials or self.crumb, + **kwargs) + + if info['status'] != 200: + if dont_fail: + raise FailedInstallingWithPluginManager(info['msg']) + else: + self.module.fail_json(msg=msg_status, details=info['msg']) + except Exception as e: + if dont_fail: + raise FailedInstallingWithPluginManager(e) + else: + self.module.fail_json(msg=msg_exception, details=to_native(e)) + + return response + + def _get_crumb(self): + crumb_data = self._get_json_data( + "%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb') + + if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data: + self.crumb[crumb_data['crumbRequestField']] = crumb_data['crumb'] + else: + self.module.fail_json( + msg="Required fields not found in the Crum response.", + details=crumb_data) + + def _get_installed_plugins(self): + plugins_data = self._get_json_data( + "%s/%s" % (self.url, "pluginManager/api/json?depth=1"), + 'list of plugins') + + # Check if we got valid data + if 'plugins' not in plugins_data: + self.module.fail_json(msg="No valid plugin data found.") + + # Create final list of installed/pined plugins + self.is_installed = False + self.is_pinned = False + self.is_enabled = False + self.installed_plugins = plugins_data['plugins'] + + for p in plugins_data['plugins']: + if p['shortName'] == self.params['name']: + self.is_installed = True + + if p['pinned']: + self.is_pinned = True + + if p['enabled']: + self.is_enabled = True + + break + + def _install_dependencies(self): + dependencies = self._get_versioned_dependencies() + self.dependencies_states = [] + + for dep_name, dep_version in dependencies.items(): + if not any(p['shortName'] == dep_name and p['version'] == dep_version for p in self.installed_plugins): + dep_params = self.params.copy() + dep_params['name'] = dep_name + dep_params['version'] = dep_version + dep_module = AnsibleModule( + argument_spec=self.module.argument_spec, + supports_check_mode=self.module.check_mode + ) + dep_module.params = dep_params + dep_plugin = JenkinsPlugin(dep_module) + if not dep_plugin.install(): + self.dependencies_states.append( + { + 'name': dep_name, + 'version': dep_version, + 'state': 'absent'}) + else: + self.dependencies_states.append( + { + 'name': dep_name, + 'version': dep_version, + 'state': 'present'}) + else: + self.dependencies_states.append( + { + 'name': dep_name, + 'version': dep_version, + 'state': 'present'}) + + def _install_with_plugin_manager(self): + if not self.module.check_mode: + # Install the plugin (with dependencies) + install_script = ( + 'd = Jenkins.instance.updateCenter.getPlugin("%s")' + '.deploy(); d.get();' % self.params['name']) + + if self.params['with_dependencies']: + install_script = ( + 'Jenkins.instance.updateCenter.getPlugin("%s")' + '.getNeededDependencies().each{it.deploy()}; %s' % ( + self.params['name'], install_script)) + + script_data = { + 'script': install_script + } + data = urlencode(script_data) + + # Send the installation request + r = self._get_url_data( + "%s/scriptText" % self.url, + msg_status="Cannot install plugin.", + msg_exception="Plugin installation has failed.", + data=data, + dont_fail=True) + + hpi_file = '%s/plugins/%s.hpi' % ( + self.params['jenkins_home'], + self.params['name']) + + if os.path.isfile(hpi_file): + os.remove(hpi_file) + + def install(self): + changed = False + plugin_file = ( + '%s/plugins/%s.jpi' % ( + self.params['jenkins_home'], + self.params['name'])) + + if not self.is_installed and self.params['version'] in [None, 'latest']: + try: + self._install_with_plugin_manager() + changed = True + except FailedInstallingWithPluginManager: # Fallback to manually downloading the plugin + pass + + if not changed: + # Check if the plugin directory exists + if not os.path.isdir(self.params['jenkins_home']): + self.module.fail_json( + msg="Jenkins home directory doesn't exist.") + + checksum_old = None + if os.path.isfile(plugin_file): + # Make the checksum of the currently installed plugin + with open(plugin_file, 'rb') as plugin_fh: + plugin_content = plugin_fh.read() + checksum_old = hashlib.sha1(plugin_content).hexdigest() + + # Install dependencies + if self.params['with_dependencies']: + self._install_dependencies() + + if self.params['version'] in [None, 'latest']: + # Take latest version + plugin_urls = self._get_latest_plugin_urls() + else: + # Take specific version + plugin_urls = self._get_versioned_plugin_urls() + if ( + self.params['updates_expiration'] == 0 or + self.params['version'] not in [None, 'latest'] or + checksum_old is None): + + # Download the plugin file directly + r = self._download_plugin(plugin_urls) + + # Write downloaded plugin into file if checksums don't match + if checksum_old is None: + # No previously installed plugin + if not self.module.check_mode: + self._write_file(plugin_file, r) + + changed = True + else: + # Get data for the MD5 + data = r.read() + + # Make new checksum + checksum_new = hashlib.sha1(data).hexdigest() + + # If the checksum is different from the currently installed + # plugin, store the new plugin + if checksum_old != checksum_new: + if not self.module.check_mode: + self._write_file(plugin_file, data) + + changed = True + elif self.params['version'] == 'latest': + # Check for update from the updates JSON file + plugin_data = self._download_updates() + + # If the latest version changed, download it + if checksum_old != to_bytes(plugin_data['sha1']): + if not self.module.check_mode: + r = self._download_plugin(plugin_urls) + self._write_file(plugin_file, r) + + changed = True + + # Change file attributes if needed + if os.path.isfile(plugin_file): + params = { + 'dest': plugin_file + } + params.update(self.params) + file_args = self.module.load_file_common_arguments(params) + + if not self.module.check_mode: + # Not sure how to run this in the check mode + changed = self.module.set_fs_attributes_if_different( + file_args, changed) + else: + # See the comment above + changed = True + + return changed + + def _get_latest_plugin_urls(self): + urls = [] + for base_url in self.params['updates_url']: + for update_segment in self.params['latest_plugins_url_segments']: + urls.append("{0}/{1}/{2}.hpi".format(base_url, update_segment, self.params['name'])) + return urls + + def _get_latest_compatible_plugin_version(self, plugin_name=None): + if not hasattr(self, 'jenkins_version'): + self.module.params['force_basic_auth'] = True + resp, info = fetch_url(self.module, self.url) + raw_version = info.get("x-jenkins") + self.jenkins_version = self.parse_version(raw_version) + name = plugin_name or self.params['name'] + cache_path = "{}/ansible_jenkins_plugin_cache.json".format(self.params['jenkins_home']) + plugin_version_urls = [] + for base_url in self.params['updates_url']: + for update_json in self.params['plugin_versions_url_segment']: + plugin_version_urls.append("{}/{}".format(base_url, update_json)) + + try: # Check if file is saved localy + if os.path.exists(cache_path): + file_mtime = os.path.getmtime(cache_path) + else: + file_mtime = 0 + + now = time.time() + if now - file_mtime >= 86400: + response = self._get_urls_data(plugin_version_urls, what="plugin-versions.json") + plugin_data = json.loads(to_native(response.read()), object_pairs_hook=OrderedDict) + + # Save it to file for next time + with open(cache_path, "w") as f: + json.dump(plugin_data, f) + + with open(cache_path, "r") as f: + plugin_data = json.load(f) + + except Exception as e: + if os.path.exists(cache_path): + os.remove(cache_path) + self.module.fail_json(msg="Failed to parse plugin-versions.json", details=to_native(e)) + + plugin_versions = plugin_data.get("plugins", {}).get(name) + if not plugin_versions: + self.module.fail_json(msg="Plugin '{}' not found.".format(name)) + + sorted_versions = list(reversed(plugin_versions.items())) + + for idx, (version_title, version_info) in enumerate(sorted_versions): + required_core = version_info.get("requiredCore", "0.0") + if self.parse_version(required_core) <= self.jenkins_version: + return 'latest' if idx == 0 else version_title + + self.module.warn( + "No compatible version found for plugin '{}'. " + "Installing latest version.".format(name)) + return 'latest' + + def _get_versioned_plugin_urls(self): + urls = [] + for base_url in self.params['updates_url']: + for versioned_segment in self.params['versioned_plugins_url_segments']: + urls.append("{0}/{1}/{2}/{3}/{2}.hpi".format(base_url, versioned_segment, self.params['name'], self.params['version'])) + return urls + + def _get_update_center_urls(self): + urls = [] + for base_url in self.params['updates_url']: + for update_json in self.params['update_json_url_segment']: + urls.append("{0}/{1}".format(base_url, update_json)) + return urls + + def _get_versioned_dependencies(self): + # Get dependencies for the specified plugin version + plugin_data = self._download_updates()['dependencies'] + + dependencies_info = { + dep["name"]: self._get_latest_compatible_plugin_version(dep["name"]) + for dep in plugin_data + if not dep.get("optional", False) + } + + return dependencies_info + + def _download_updates(self): + try: + updates_file, download_updates = download_updates_file(self.params['updates_expiration']) + except OSError as e: + self.module.fail_json( + msg="Cannot create temporal directory.", + details=to_native(e)) + + # Download the updates file if needed + if download_updates: + urls = self._get_update_center_urls() + + # Get the data + r = self._get_urls_data( + urls, + msg_status="Remote updates not found.", + msg_exception="Updates download failed.") + + # Write the updates file + tmp_update_fd, tmp_updates_file = tempfile.mkstemp() + os.write(tmp_update_fd, r.read()) + + try: + os.close(tmp_update_fd) + except IOError as e: + self.module.fail_json( + msg="Cannot close the tmp updates file %s." % tmp_updates_file, + details=to_native(e)) + else: + tmp_updates_file = updates_file + + # Open the updates file + try: + f = io.open(tmp_updates_file, encoding='utf-8') + + # Read only the second line + dummy = f.readline() + data = json.loads(f.readline()) + except IOError as e: + self.module.fail_json( + msg="Cannot open%s updates file." % (" temporary" if tmp_updates_file != updates_file else ""), + details=to_native(e)) + except Exception as e: + self.module.fail_json( + msg="Cannot load JSON data from the%s updates file." % (" temporary" if tmp_updates_file != updates_file else ""), + details=to_native(e)) + + # Move the updates file to the right place if we could read it + if tmp_updates_file != updates_file: + self.module.atomic_move(os.path.abspath(tmp_updates_file), os.path.abspath(updates_file)) + + # Check if we have the plugin data available + if not data.get('plugins', {}).get(self.params['name']): + self.module.fail_json(msg="Cannot find plugin data in the updates file.") + + return data['plugins'][self.params['name']] + + def _download_plugin(self, plugin_urls): + # Download the plugin + + return self._get_urls_data( + plugin_urls, + msg_status="Plugin not found.", + msg_exception="Plugin download failed.") + + def _write_file(self, f, data): + # Store the plugin into a temp file and then move it + tmp_f_fd, tmp_f = tempfile.mkstemp() + + if isinstance(data, (str, bytes)): + os.write(tmp_f_fd, data) + else: + os.write(tmp_f_fd, data.read()) + + try: + os.close(tmp_f_fd) + except IOError as e: + self.module.fail_json( + msg='Cannot close the temporal plugin file %s.' % tmp_f, + details=to_native(e)) + + # Move the file onto the right place + self.module.atomic_move(os.path.abspath(tmp_f), os.path.abspath(f)) + + def uninstall(self): + changed = False + + # Perform the action + if self.is_installed: + if not self.module.check_mode: + self._pm_query('doUninstall', 'Uninstallation') + + changed = True + + return changed + + def pin(self): + return self._pinning('pin') + + def unpin(self): + return self._pinning('unpin') + + def _pinning(self, action): + changed = False + + # Check if the plugin is pinned/unpinned + if ( + action == 'pin' and not self.is_pinned or + action == 'unpin' and self.is_pinned): + + # Perform the action + if not self.module.check_mode: + self._pm_query(action, "%sning" % action.capitalize()) + + changed = True + + return changed + + def enable(self): + return self._enabling('enable') + + def disable(self): + return self._enabling('disable') + + def _enabling(self, action): + changed = False + + # Check if the plugin is pinned/unpinned + if ( + action == 'enable' and not self.is_enabled or + action == 'disable' and self.is_enabled): + + # Perform the action + if not self.module.check_mode: + self._pm_query( + "make%sd" % action.capitalize(), + "%sing" % action[:-1].capitalize()) + + changed = True + + return changed + + def _pm_query(self, action, msg): + url = "%s/pluginManager/plugin/%s/%s" % ( + self.params['url'], self.params['name'], action) + + # Send the request + self._get_url_data( + url, + msg_status="Plugin not found. %s" % url, + msg_exception="%s has failed." % msg, + method="POST") + + @staticmethod + def parse_version(version_str): + return tuple(int(x) for x in version_str.split('.')) + + +def main(): + # Module arguments + argument_spec = url_argument_spec() + argument_spec.update( + group=dict(type='str', default='jenkins'), + jenkins_home=dict(type='path', default='/var/lib/jenkins'), + mode=dict(default='0644', type='raw'), + name=dict(type='str', required=True), + owner=dict(type='str', default='jenkins'), + state=dict( + choices=[ + 'present', + 'absent', + 'pinned', + 'unpinned', + 'enabled', + 'disabled', + 'latest'], + default='present'), + timeout=dict(default=30, type="int"), + updates_expiration=dict(default=86400, type="int"), + updates_url=dict(type="list", elements="str", default=['https://updates.jenkins.io', + 'http://mirrors.jenkins.io']), + updates_url_username=dict(type="str"), + updates_url_password=dict(type="str", no_log=True), + update_json_url_segment=dict(type="list", elements="str", default=['update-center.json', + 'updates/update-center.json']), + plugin_versions_url_segment=dict(type="list", elements="str", default=['plugin-versions.json', + 'current/plugin-versions.json']), + latest_plugins_url_segments=dict(type="list", elements="str", default=['latest']), + versioned_plugins_url_segments=dict(type="list", elements="str", default=['download/plugins', 'plugins']), + url=dict(default='http://localhost:8080'), + url_password=dict(no_log=True), + version=dict(), + with_dependencies=dict(default=True, type='bool'), + ) + # Module settings + module = AnsibleModule( + argument_spec=argument_spec, + add_file_common_args=True, + supports_check_mode=True, + ) + + # Convert timeout to float + try: + module.params['timeout'] = float(module.params['timeout']) + except ValueError as e: + module.fail_json( + msg='Cannot convert %s to float.' % module.params['timeout'], + details=to_native(e)) + # Instantiate the JenkinsPlugin object + jp = JenkinsPlugin(module) + + # Set version to latest if state is latest + if module.params['state'] == 'latest': + module.params['state'] = 'present' + module.params['version'] = jp._get_latest_compatible_plugin_version() + + # Set version to latest compatible version if version is latest + if module.params['version'] == 'latest': + module.params['version'] = jp._get_latest_compatible_plugin_version() + + # Create some shortcuts + name = module.params['name'] + state = module.params['state'] + + # Initial change state of the task + changed = False + + # Perform action depending on the requested state + if state == 'present': + changed = jp.install() + elif state == 'absent': + changed = jp.uninstall() + elif state == 'pinned': + changed = jp.pin() + elif state == 'unpinned': + changed = jp.unpin() + elif state == 'enabled': + changed = jp.enable() + elif state == 'disabled': + changed = jp.disable() + + # Print status of the change + module.exit_json(changed=changed, plugin=name, state=state, dependencies=jp.dependencies_states if hasattr(jp, 'dependencies_states') else None) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/jenkins_script.py b/plugins/modules/jenkins_script.py deleted file mode 120000 index 1e13562ab9..0000000000 --- a/plugins/modules/jenkins_script.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/jenkins_script.py \ No newline at end of file diff --git a/plugins/modules/jenkins_script.py b/plugins/modules/jenkins_script.py new file mode 100644 index 0000000000..eda3a49f2e --- /dev/null +++ b/plugins/modules/jenkins_script.py @@ -0,0 +1,197 @@ +#!/usr/bin/python + +# Copyright (c) 2016, James Hogarth +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +author: James Hogarth (@hogarthj) +module: jenkins_script +short_description: Executes a groovy script in the jenkins instance +description: + - The C(jenkins_script) module takes a script plus a dict of values to use within the script and returns the result of the + script being run. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + script: + type: str + description: + - The groovy script to be executed. This gets passed as a string Template if args is defined. + required: true + url: + type: str + description: + - The jenkins server to execute the script against. The default is a local jenkins instance that is not being proxied + through a webserver. + default: http://localhost:8080 + validate_certs: + description: + - If set to V(false), the SSL certificates are not validated. This should only set to V(false) used on personally controlled + sites using self-signed certificates as it avoids verifying the source site. + type: bool + default: true + user: + type: str + description: + - The username to connect to the jenkins server with. + password: + type: str + description: + - The password to connect to the jenkins server with. + timeout: + type: int + description: + - The request timeout in seconds. + default: 10 + args: + type: dict + description: + - A dict of key-value pairs used in formatting the script using string.Template (see https://docs.python.org/2/library/string.html#template-strings). +notes: + - Since the script can do anything this does not report on changes. Knowing the script is being run it is important to set + C(changed_when) for the ansible output to be clear on any alterations made. +""" + +EXAMPLES = r""" +- name: Obtaining a list of plugins + community.general.jenkins_script: + script: 'println(Jenkins.instance.pluginManager.plugins)' + user: admin + password: admin + +- name: Setting master using a variable to hold a more complicate script + ansible.builtin.set_fact: + setmaster_mode: | + import jenkins.model.* + instance = Jenkins.getInstance() + instance.setMode(${jenkins_mode}) + instance.save() + +- name: Use the variable as the script + community.general.jenkins_script: + script: "{{ setmaster_mode }}" + args: + jenkins_mode: Node.Mode.EXCLUSIVE + +- name: Interacting with an untrusted HTTPS connection + community.general.jenkins_script: + script: "println(Jenkins.instance.pluginManager.plugins)" + user: admin + password: admin + url: https://localhost + validate_certs: false # only do this when you trust the network! +""" + +RETURN = r""" +output: + description: Result of script. + returned: success + type: str + sample: 'Result: true' +""" + +import json +from http import cookiejar +from urllib.parse import urlencode + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.text.converters import to_native + + +def is_csrf_protection_enabled(module): + resp, info = fetch_url(module, + module.params['url'] + '/api/json', + timeout=module.params['timeout'], + method='GET') + if info["status"] != 200: + module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='') + + content = to_native(resp.read()) + return json.loads(content).get('useCrumbs', False) + + +def get_crumb(module, cookies): + resp, info = fetch_url(module, + module.params['url'] + '/crumbIssuer/api/json', + method='GET', + timeout=module.params['timeout'], + cookies=cookies) + if info["status"] != 200: + module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='') + + content = to_native(resp.read()) + return json.loads(content) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + script=dict(required=True, type="str"), + url=dict(type="str", default="http://localhost:8080"), + validate_certs=dict(type="bool", default=True), + user=dict(type="str"), + password=dict(no_log=True, type="str"), + timeout=dict(type="int", default=10), + args=dict(type="dict") + ) + ) + + if module.params['user'] is not None: + if module.params['password'] is None: + module.fail_json(msg="password required when user provided", output='') + module.params['url_username'] = module.params['user'] + module.params['url_password'] = module.params['password'] + module.params['force_basic_auth'] = True + + if module.params['args'] is not None: + from string import Template + try: + script_contents = Template(module.params['script']).substitute(module.params['args']) + except KeyError as err: + module.fail_json(msg="Error with templating variable: %s" % err, output='') + else: + script_contents = module.params['script'] + + headers = {} + cookies = None + if is_csrf_protection_enabled(module): + cookies = cookiejar.LWPCookieJar() + crumb = get_crumb(module, cookies) + headers = {crumb['crumbRequestField']: crumb['crumb']} + + resp, info = fetch_url(module, + module.params['url'] + "/scriptText", + data=urlencode({'script': script_contents}), + headers=headers, + method="POST", + timeout=module.params['timeout'], + cookies=cookies) + + if info["status"] != 200: + module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='') + + result = to_native(resp.read()) + + if 'Exception:' in result and 'at java.lang.Thread' in result: + module.fail_json(msg="script failed with stacktrace:\n " + result, output='') + + module.exit_json( + output=result, + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/jira.py b/plugins/modules/jira.py deleted file mode 120000 index a3f5f15b3d..0000000000 --- a/plugins/modules/jira.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/jira.py \ No newline at end of file diff --git a/plugins/modules/jira.py b/plugins/modules/jira.py new file mode 100644 index 0000000000..34d1cc3a8a --- /dev/null +++ b/plugins/modules/jira.py @@ -0,0 +1,866 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Steve Smith +# Atlassian open-source approval reference OSR-76. +# +# Copyright (c) 2020, Per Abildgaard Toft Search and update function +# Copyright (c) 2021, Brandon McNama Issue attachment functionality +# Copyright (c) 2022, Hugo Prudente Worklog functionality +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: jira +short_description: Create and modify issues in a JIRA instance +description: + - Create and modify issues in a JIRA instance. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + uri: + type: str + required: true + description: + - Base URI for the JIRA instance. + operation: + type: str + required: true + aliases: [command] + choices: [attach, comment, create, edit, fetch, link, search, transition, update, worklog] + description: + - The operation to perform. + - V(worklog) was added in community.general 6.5.0. + username: + type: str + description: + - The username to log-in with. + - Must be used with O(password). Mutually exclusive with O(token). + password: + type: str + description: + - The password to log-in with. + - Must be used with O(username). Mutually exclusive with O(token). + token: + type: str + description: + - The personal access token to log-in with. + - Mutually exclusive with O(username) and O(password). + version_added: 4.2.0 + client_cert: + type: path + description: + - Client certificate if required. + - In addition to O(username) and O(password) or O(token). Not mutually exclusive. + version_added: 10.4.0 + client_key: + type: path + description: + - Client certificate key if required. + - In addition to O(username) and O(password) or O(token). Not mutually exclusive. + version_added: 10.4.0 + + project: + type: str + required: false + description: + - The project for this operation. Required for issue creation. + summary: + type: str + required: false + description: + - The issue summary, where appropriate. + - Note that JIRA may not allow changing field values on specific transitions or states. + description: + type: str + required: false + description: + - The issue description, where appropriate. + - Note that JIRA may not allow changing field values on specific transitions or states. + issuetype: + type: str + required: false + description: + - The issue type, for issue creation. + issue: + type: str + required: false + description: + - An existing issue key to operate on. + aliases: ['ticket'] + + comment: + type: str + required: false + description: + - The comment text to add. + - Note that JIRA may not allow changing field values on specific transitions or states. + comment_visibility: + type: dict + description: + - Used to specify comment comment visibility. + - See + U(https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-comments/#api-rest-api-2-issue-issueidorkey-comment-post) + for details. + suboptions: + type: + description: + - Use O(comment_visibility.type) to specify which of the JIRA visibility restriction types is used. + type: str + required: true + choices: [group, role] + value: + description: + - Specify value corresponding to the type of visibility restriction. For example name of the group or role. + type: str + required: true + version_added: '3.2.0' + + status: + type: str + required: false + description: + - Only used when O(operation) is V(transition), and a bit of a misnomer, it actually refers to the transition name. + - This is mutually exclusive with O(status_id). + status_id: + type: str + required: false + description: + - Only used when O(operation) is V(transition), and refers to the transition ID. + - This is mutually exclusive with O(status). + version_added: 10.3.0 + assignee: + type: str + required: false + description: + - Sets the assignee when O(operation) is V(create), V(transition), or V(edit). + - Recent versions of JIRA no longer accept a user name as a user identifier. In that case, use O(account_id) instead. + - Note that JIRA may not allow changing field values on specific transitions or states. + account_id: + type: str + description: + - Sets the account identifier for the assignee when O(operation) is V(create), V(transition), or V(edit). + - Note that JIRA may not allow changing field values on specific transitions or states. + version_added: 2.5.0 + + linktype: + type: str + required: false + description: + - Set type of link, when action 'link' selected. + inwardissue: + type: str + required: false + description: + - Set issue from which link is created. + outwardissue: + type: str + required: false + description: + - Set issue to which link is created. + fields: + type: dict + required: false + description: + - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API (possibly + after merging with other required data, as when passed to create). See examples for more information, and the JIRA + REST API for the structure required for various fields. + - When passed to comment, the data structure is merged at the first level since community.general 4.6.0. Useful to add + JIRA properties for example. + - Note that JIRA may not allow changing field values on specific transitions or states. + default: {} + jql: + required: false + description: + - Query JIRA in JQL Syntax, for example V("CMDB Hostname" = test.example.com). + type: str + version_added: '0.2.0' + + maxresults: + required: false + description: + - Limit the result of O(operation=search). If no value is specified, the default JIRA limit is used. + - Used when O(operation=search) only, ignored otherwise. + type: int + version_added: '0.2.0' + + timeout: + type: float + required: false + description: + - Set timeout, in seconds, on requests to JIRA API. + default: 10 + + validate_certs: + required: false + description: + - Require valid SSL certificates (set to V(false) if you would like to use self-signed certificates). + default: true + type: bool + + attachment: + type: dict + version_added: 2.5.0 + description: + - Information about the attachment being uploaded. + suboptions: + filename: + required: true + type: path + description: + - The path to the file to upload (from the remote node) or, if O(attachment.content) is specified, the filename + to use for the attachment. + content: + type: str + description: + - The Base64 encoded contents of the file to attach. If not specified, the contents of O(attachment.filename) is + used instead. + mimetype: + type: str + description: + - The MIME type to supply for the upload. If not specified, best-effort detection is performed. +notes: + - Currently this only works with basic-auth, or tokens. + - To use with JIRA Cloud, pass the login e-mail as the O(username) and the API token as O(password). +author: + - "Steve Smith (@tarka)" + - "Per Abildgaard Toft (@pertoft)" + - "Brandon McNama (@DWSR)" +""" + +EXAMPLES = r""" +# Create a new issue and add a comment to it: +- name: Create an issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + project: ANS + operation: create + summary: Example Issue + description: Created using Ansible + issuetype: Task + args: + fields: + customfield_13225: "test" + customfield_12931: {"value": "Test"} + register: issue + +- name: Comment on issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: comment + comment: A comment added by Ansible + +- name: Comment on issue with restricted visibility + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: comment + comment: A comment added by Ansible + comment_visibility: + type: role + value: Developers + +- name: Comment on issue with property to mark it internal + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: comment + comment: A comment added by Ansible + fields: + properties: + - key: 'sd.public.comment' + value: + internal: true + +# Add an workog to an existing issue +- name: Worklog on issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: worklog + comment: A worklog added by Ansible + fields: + timeSpentSeconds: 12000 + +- name: Workflow on issue with comment restricted visibility + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: worklog + comment: A worklog added by Ansible + comment_visibility: + type: role + value: Developers + fields: + timeSpentSeconds: 12000 + +- name: Workflow on issue with comment property to mark it internal + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: worklog + comment: A worklog added by Ansible + fields: + properties: + - key: 'sd.public.comment' + value: + internal: true + timeSpentSeconds: 12000 + +# Assign an existing issue using edit +- name: Assign an issue using free-form fields + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key}}' + operation: edit + assignee: ssmith + +# Create an issue with an existing assignee +- name: Create an assigned issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + project: ANS + operation: create + summary: Assigned issue + description: Created and assigned using Ansible + issuetype: Task + assignee: ssmith + +# Edit an issue +- name: Set the labels on an issue using free-form fields + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: edit + args: + fields: + labels: + - autocreated + - ansible + +# Updating a field using operations: add, set & remove +- name: Change the value of a Select dropdown + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: update + args: + fields: + customfield_12931: ['set': {'value': 'Virtual'}] + customfield_13820: ['set': {'value': 'Manually'}] + register: cmdb_issue + delegate_to: localhost + + +# Retrieve metadata for an issue and use it to create an account +- name: Get an issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + project: ANS + operation: fetch + issue: ANS-63 + register: issue + +# Search for an issue +# You can limit the search for specific fields by adding optional args. Note! It must be a dict, hence, lastViewed: null +- name: Search for an issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + project: ANS + operation: search + maxresults: 10 + jql: project=cmdb AND cf[13225]="test" + args: + fields: + lastViewed: + register: issue + +- name: Create a unix account for the reporter + become: true + user: + name: '{{ issue.meta.fields.creator.name }}' + comment: '{{ issue.meta.fields.creator.displayName }}' + +# You can get list of valid linktypes at /rest/api/2/issueLinkType +# url of your jira installation. +- name: Create link from HSP-1 to MKY-1 + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + operation: link + linktype: Relates + inwardissue: HSP-1 + outwardissue: MKY-1 + +# Transition an issue +- name: Resolve the issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: transition + status: Resolve Issue + account_id: 112233445566778899aabbcc + fields: + resolution: + name: Done + description: I am done! This is the last description I will ever give you. + +# Attach a file to an issue +- name: Attach a file + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: HSP-1 + operation: attach + attachment: + filename: topsecretreport.xlsx + +# Use username, password and client certificate authentification +- name: Create an issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + client_cert: '{{ path/to/client-cert }}' + client_key: '{{ path/to/client-key }}' + +# Use token and client certificate authentification +- name: Create an issue + community.general.jira: + uri: '{{ server }}' + token: '{{ token }}' + client_cert: '{{ path/to/client-cert }}' + client_key: '{{ path/to/client-key }}' +""" + +import base64 +import binascii +import json +import mimetypes +import os +import random +import string +import traceback +from urllib.request import pathname2url + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper, cause_changes +from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native +from ansible.module_utils.urls import fetch_url + + +class JIRA(StateModuleHelper): + module = dict( + argument_spec=dict( + attachment=dict(type='dict', options=dict( + content=dict(type='str'), + filename=dict(type='path', required=True), + mimetype=dict(type='str') + )), + uri=dict(type='str', required=True), + operation=dict( + type='str', + choices=['attach', 'create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search', 'worklog'], + aliases=['command'], required=True + ), + username=dict(type='str'), + password=dict(type='str', no_log=True), + token=dict(type='str', no_log=True), + client_cert=dict(type='path'), + client_key=dict(type='path'), + project=dict(type='str', ), + summary=dict(type='str', ), + description=dict(type='str', ), + issuetype=dict(type='str', ), + issue=dict(type='str', aliases=['ticket']), + comment=dict(type='str', ), + comment_visibility=dict(type='dict', options=dict( + type=dict(type='str', choices=['group', 'role'], required=True), + value=dict(type='str', required=True) + )), + status=dict(type='str', ), + status_id=dict(type='str', ), + assignee=dict(type='str', ), + fields=dict(default={}, type='dict'), + linktype=dict(type='str', ), + inwardissue=dict(type='str', ), + outwardissue=dict(type='str', ), + jql=dict(type='str', ), + maxresults=dict(type='int'), + timeout=dict(type='float', default=10), + validate_certs=dict(default=True, type='bool'), + account_id=dict(type='str'), + ), + mutually_exclusive=[ + ['username', 'token'], + ['password', 'token'], + ['assignee', 'account_id'], + ['status', 'status_id'] + ], + required_together=[ + ['username', 'password'], + ['client_cert', 'client_key'] + ], + required_one_of=[ + ['username', 'token'], + ], + required_if=( + ('operation', 'attach', ['issue', 'attachment']), + ('operation', 'create', ['project', 'issuetype', 'summary']), + ('operation', 'comment', ['issue', 'comment']), + ('operation', 'workflow', ['issue', 'comment']), + ('operation', 'fetch', ['issue']), + ('operation', 'transition', ['issue']), + ('operation', 'transition', ['status', 'status_id'], True), + ('operation', 'link', ['linktype', 'inwardissue', 'outwardissue']), + ('operation', 'search', ['jql']), + ), + supports_check_mode=False + ) + state_param = 'operation' + + def __init_module__(self): + if self.vars.fields is None: + self.vars.fields = {} + if self.vars.assignee: + self.vars.fields['assignee'] = {'name': self.vars.assignee} + if self.vars.account_id: + self.vars.fields['assignee'] = {'accountId': self.vars.account_id} + self.vars.uri = self.vars.uri.strip('/') + self.vars.set('restbase', self.vars.uri + '/rest/api/2') + + @cause_changes(when="success") + def operation_create(self): + createfields = { + 'project': {'key': self.vars.project}, + 'summary': self.vars.summary, + 'issuetype': {'name': self.vars.issuetype}} + + if self.vars.description: + createfields['description'] = self.vars.description + + # Merge in any additional or overridden fields + if self.vars.fields: + createfields.update(self.vars.fields) + + data = {'fields': createfields} + url = self.vars.restbase + '/issue/' + self.vars.meta = self.post(url, data) + + @cause_changes(when="success") + def operation_comment(self): + data = { + 'body': self.vars.comment + } + # if comment_visibility is specified restrict visibility + if self.vars.comment_visibility is not None: + data['visibility'] = self.vars.comment_visibility + + # Use 'fields' to merge in any additional data + if self.vars.fields: + data.update(self.vars.fields) + + url = self.vars.restbase + '/issue/' + self.vars.issue + '/comment' + self.vars.meta = self.post(url, data) + + @cause_changes(when="success") + def operation_worklog(self): + data = { + 'comment': self.vars.comment + } + # if comment_visibility is specified restrict visibility + if self.vars.comment_visibility is not None: + data['visibility'] = self.vars.comment_visibility + + # Use 'fields' to merge in any additional data + if self.vars.fields: + data.update(self.vars.fields) + + url = self.vars.restbase + '/issue/' + self.vars.issue + '/worklog' + self.vars.meta = self.post(url, data) + + @cause_changes(when="success") + def operation_edit(self): + data = { + 'fields': self.vars.fields + } + url = self.vars.restbase + '/issue/' + self.vars.issue + self.vars.meta = self.put(url, data) + + @cause_changes(when="success") + def operation_update(self): + data = { + "update": self.vars.fields, + } + url = self.vars.restbase + '/issue/' + self.vars.issue + self.vars.meta = self.put(url, data) + + def operation_fetch(self): + url = self.vars.restbase + '/issue/' + self.vars.issue + self.vars.meta = self.get(url) + + def operation_search(self): + url = self.vars.restbase + '/search?jql=' + pathname2url(self.vars.jql) + if self.vars.fields: + fields = self.vars.fields.keys() + url = url + '&fields=' + '&fields='.join([pathname2url(f) for f in fields]) + if self.vars.maxresults: + url = url + '&maxResults=' + str(self.vars.maxresults) + + self.vars.meta = self.get(url) + + @cause_changes(when="success") + def operation_transition(self): + # Find the transition id + turl = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions" + tmeta = self.get(turl) + + tid = None + target = None + + if self.vars.status is not None: + target = self.vars.status.strip() + elif self.vars.status_id is not None: + tid = self.vars.status_id.strip() + + for t in tmeta['transitions']: + if target is not None: + if t['name'] == target: + tid = t['id'] + break + else: + if tid == t['id']: + break + else: + if target is not None: + raise ValueError("Failed find valid transition for '%s'" % target) + else: + raise ValueError("Failed find valid transition for ID '%s'" % tid) + + fields = dict(self.vars.fields) + if self.vars.summary is not None: + fields.update({'summary': self.vars.summary}) + if self.vars.description is not None: + fields.update({'description': self.vars.description}) + + # Perform it + data = {'transition': {"id": tid}, + 'fields': fields} + if self.vars.comment is not None: + data.update({"update": { + "comment": [{ + "add": {"body": self.vars.comment} + }], + }}) + url = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions" + self.vars.meta = self.post(url, data) + + @cause_changes(when="success") + def operation_link(self): + data = { + 'type': {'name': self.vars.linktype}, + 'inwardIssue': {'key': self.vars.inwardissue}, + 'outwardIssue': {'key': self.vars.outwardissue}, + } + url = self.vars.restbase + '/issueLink/' + self.vars.meta = self.post(url, data) + + @cause_changes(when="success") + def operation_attach(self): + v = self.vars + filename = v.attachment.get('filename') + content = v.attachment.get('content') + + if not any((filename, content)): + raise ValueError('at least one of filename or content must be provided') + mime = v.attachment.get('mimetype') + + if not os.path.isfile(filename): + raise ValueError('The provided filename does not exist: %s' % filename) + + content_type, data = self._prepare_attachment(filename, content, mime) + + url = v.restbase + '/issue/' + v.issue + '/attachments' + return True, self.post( + url, data, content_type=content_type, additional_headers={"X-Atlassian-Token": "no-check"} + ) + + # Ideally we'd just use prepare_multipart from ansible.module_utils.urls, but + # unfortunately it does not support specifying the encoding and also defaults to + # base64. Jira doesn't support base64 encoded attachments (and is therefore not + # spec compliant. Go figure). I originally wrote this function as an almost + # exact copypasta of prepare_multipart, but ran into some encoding issues when + # using the noop encoder. Hand rolling the entire message body seemed to work + # out much better. + # + # https://community.atlassian.com/t5/Jira-questions/Jira-dosen-t-decode-base64-attachment-request-REST-API/qaq-p/916427 + # + # content is expected to be a base64 encoded string since Ansible doesn't + # support passing raw bytes objects. + @staticmethod + def _prepare_attachment(filename, content=None, mime_type=None): + def escape_quotes(s): + return s.replace('"', '\\"') + + boundary = "".join(random.choice(string.digits + string.ascii_letters) for dummy in range(30)) + name = to_native(os.path.basename(filename)) + + if not mime_type: + try: + mime_type = mimetypes.guess_type(filename or '', strict=False)[0] or 'application/octet-stream' + except Exception: + mime_type = 'application/octet-stream' + main_type, sep, sub_type = mime_type.partition('/') + + if not content and filename: + with open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') as f: + content = f.read() + else: + try: + content = base64.b64decode(content) + except binascii.Error as e: + raise Exception("Unable to base64 decode file content: %s" % e) + + lines = [ + "--{0}".format(boundary), + 'Content-Disposition: form-data; name="file"; filename={0}'.format(escape_quotes(name)), + "Content-Type: {0}".format("{0}/{1}".format(main_type, sub_type)), + '', + to_text(content), + "--{0}--".format(boundary), + "" + ] + + return ( + "multipart/form-data; boundary={0}".format(boundary), + "\r\n".join(lines) + ) + + def request( + self, + url, + data=None, + method=None, + content_type='application/json', + additional_headers=None + ): + if data and content_type == 'application/json': + data = json.dumps(data) + + headers = {} + if isinstance(additional_headers, dict): + headers = additional_headers.copy() + + # NOTE: fetch_url uses a password manager, which follows the + # standard request-then-challenge basic-auth semantics. However as + # JIRA allows some unauthorised operations it doesn't necessarily + # send the challenge, so the request occurs as the anonymous user, + # resulting in unexpected results. To work around this we manually + # inject the auth header up-front to ensure that JIRA treats + # the requests as authorized for this user. + + if self.vars.token is not None: + headers.update({ + "Content-Type": content_type, + "Authorization": "Bearer %s" % self.vars.token, + }) + else: + auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(self.vars.username, self.vars.password), + errors='surrogate_or_strict'))) + headers.update({ + "Content-Type": content_type, + "Authorization": "Basic %s" % auth, + }) + + response, info = fetch_url( + self.module, url, data=data, method=method, timeout=self.vars.timeout, headers=headers + ) + + if info['status'] not in (200, 201, 204): + error = None + try: + error = json.loads(info['body']) + except Exception: + msg = 'The request "{method} {url}" returned the unexpected status code {status} {msg}\n{body}'.format( + status=info['status'], + msg=info['msg'], + body=info.get('body'), + url=url, + method=method, + ) + self.module.fail_json(msg=to_native(msg), exception=traceback.format_exc()) + if error: + msg = [] + for key in ('errorMessages', 'errors'): + if error.get(key): + msg.append(to_native(error[key])) + if msg: + self.module.fail_json(msg=', '.join(msg)) + self.module.fail_json(msg=to_native(error)) + # Fallback print body, if it can't be decoded + self.module.fail_json(msg=to_native(info['body'])) + + body = response.read() + + if body: + return json.loads(to_text(body, errors='surrogate_or_strict')) + return {} + + def post(self, url, data, content_type='application/json', additional_headers=None): + return self.request(url, data=data, method='POST', content_type=content_type, + additional_headers=additional_headers) + + def put(self, url, data): + return self.request(url, data=data, method='PUT') + + def get(self, url): + return self.request(url) + + +def main(): + jira = JIRA() + jira.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/kdeconfig.py b/plugins/modules/kdeconfig.py new file mode 100644 index 0000000000..c0d5b80b70 --- /dev/null +++ b/plugins/modules/kdeconfig.py @@ -0,0 +1,273 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Salvatore Mesoraca +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + + +DOCUMENTATION = r""" +module: kdeconfig +short_description: Manage KDE configuration files +version_added: "6.5.0" +description: + - Add or change individual settings in KDE configuration files. + - It uses B(kwriteconfig) under the hood. +options: + path: + description: + - Path to the config file. If the file does not exist it is created. + type: path + required: true + kwriteconfig_path: + description: + - Path to the kwriteconfig executable. If not specified, Ansible tries to discover it. + type: path + values: + description: + - List of values to set. + type: list + elements: dict + suboptions: + group: + description: + - The option's group. One between this and O(values[].groups) is required. + type: str + groups: + description: + - List of the option's groups. One between this and O(values[].group) is required. + type: list + elements: str + key: + description: + - The option's name. + type: str + required: true + value: + description: + - The option's value. One between this and O(values[].bool_value) is required. + type: str + bool_value: + description: + - Boolean value. + - One between this and O(values[].value) is required. + type: bool + required: true + backup: + description: + - Create a backup file. + type: bool + default: false +extends_documentation_fragment: + - files + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +requirements: + - kwriteconfig +author: + - Salvatore Mesoraca (@smeso) +""" + +EXAMPLES = r""" +- name: Ensure "Homepage=https://www.ansible.com/" in group "Branding" + community.general.kdeconfig: + path: /etc/xdg/kickoffrc + values: + - group: Branding + key: Homepage + value: https://www.ansible.com/ + mode: '0644' + +- name: Ensure "KEY=true" in groups "Group" and "Subgroup", and "KEY=VALUE" in Group2 + community.general.kdeconfig: + path: /etc/xdg/someconfigrc + values: + - groups: [Group, Subgroup] + key: KEY + bool_value: true + - group: Group2 + key: KEY + value: VALUE + backup: true +""" + +RETURN = r""" # """ + +import os +import shutil +import tempfile +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_text + + +class TemporaryDirectory(object): + """Basic backport of tempfile.TemporaryDirectory""" + + def __init__(self, suffix="", prefix="tmp", dir=None): + self.name = None + self.name = tempfile.mkdtemp(suffix, prefix, dir) + + def __enter__(self): + return self.name + + def rm(self): + if self.name: + shutil.rmtree(self.name, ignore_errors=True) + self.name = None + + def __exit__(self, exc, value, tb): + self.rm() + + def __del__(self): + self.rm() + + +def run_kwriteconfig(module, cmd, path, groups, key, value): + """Invoke kwriteconfig with arguments""" + args = [cmd, '--file', path, '--key', key] + for group in groups: + args.extend(['--group', group]) + if isinstance(value, bool): + args.extend(['--type', 'bool']) + if value: + args.append('true') + else: + args.append('false') + else: + args.extend(['--', value]) + module.run_command(args, check_rc=True) + + +def run_module(module, tmpdir, kwriteconfig): + result = dict(changed=False, msg='OK', path=module.params['path']) + b_path = to_bytes(module.params['path']) + tmpfile = os.path.join(tmpdir, 'file') + b_tmpfile = to_bytes(tmpfile) + diff = dict( + before='', + after='', + before_header=result['path'], + after_header=result['path'], + ) + try: + with open(b_tmpfile, 'wb') as dst: + try: + with open(b_path, 'rb') as src: + b_data = src.read() + except IOError: + result['changed'] = True + else: + dst.write(b_data) + try: + diff['before'] = to_text(b_data) + except UnicodeError: + diff['before'] = repr(b_data) + except IOError: + module.fail_json(msg='Unable to create temporary file', traceback=traceback.format_exc()) + + for row in module.params['values']: + groups = row['groups'] + if groups is None: + groups = [row['group']] + key = row['key'] + value = row['bool_value'] + if value is None: + value = row['value'] + run_kwriteconfig(module, kwriteconfig, tmpfile, groups, key, value) + + with open(b_tmpfile, 'rb') as tmpf: + b_data = tmpf.read() + try: + diff['after'] = to_text(b_data) + except UnicodeError: + diff['after'] = repr(b_data) + + result['changed'] = result['changed'] or diff['after'] != diff['before'] + + file_args = module.load_file_common_arguments(module.params) + + if module.check_mode: + if not result['changed']: + shutil.copystat(b_path, b_tmpfile) + uid, gid = module.user_and_group(b_path) + os.chown(b_tmpfile, uid, gid) + if module._diff: + diff = {} + else: + diff = None + result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff) + if module._diff: + result['diff'] = diff + module.exit_json(**result) + + if result['changed']: + if module.params['backup'] and os.path.exists(b_path): + result['backup_file'] = module.backup_local(result['path']) + try: + module.atomic_move(b_tmpfile, os.path.abspath(b_path)) + except IOError: + module.ansible.fail_json(msg='Unable to move temporary file %s to %s, IOError' % (tmpfile, result['path']), traceback=traceback.format_exc()) + + if result['changed']: + module.set_fs_attributes_if_different(file_args, result['changed']) + else: + if module._diff: + diff = {} + else: + diff = None + result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff) + if module._diff: + result['diff'] = diff + module.exit_json(**result) + + +def main(): + single_value_arg = dict(group=dict(type='str'), + groups=dict(type='list', elements='str'), + key=dict(type='str', required=True, no_log=False), + value=dict(type='str'), + bool_value=dict(type='bool')) + required_alternatives = [('group', 'groups'), ('value', 'bool_value')] + module_args = dict( + values=dict(type='list', + elements='dict', + options=single_value_arg, + mutually_exclusive=required_alternatives, + required_one_of=required_alternatives, + required=True), + path=dict(type='path', required=True), + kwriteconfig_path=dict(type='path'), + backup=dict(type='bool', default=False), + ) + + module = AnsibleModule( + argument_spec=module_args, + add_file_common_args=True, + supports_check_mode=True, + ) + + kwriteconfig = None + if module.params['kwriteconfig_path'] is not None: + kwriteconfig = module.get_bin_path(module.params['kwriteconfig_path'], required=True) + else: + for progname in ('kwriteconfig6', 'kwriteconfig5', 'kwriteconfig', 'kwriteconfig4'): + kwriteconfig = module.get_bin_path(progname) + if kwriteconfig is not None: + break + if kwriteconfig is None: + module.fail_json(msg='kwriteconfig is not installed') + for v in module.params['values']: + if not v['key']: + module.fail_json(msg="'key' cannot be empty") + with TemporaryDirectory(dir=module.tmpdir) as tmpdir: + run_module(module, tmpdir, kwriteconfig) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/kernel_blacklist.py b/plugins/modules/kernel_blacklist.py deleted file mode 120000 index 3b40cc663e..0000000000 --- a/plugins/modules/kernel_blacklist.py +++ /dev/null @@ -1 +0,0 @@ -./system/kernel_blacklist.py \ No newline at end of file diff --git a/plugins/modules/kernel_blacklist.py b/plugins/modules/kernel_blacklist.py new file mode 100644 index 0000000000..a0bad12b83 --- /dev/null +++ b/plugins/modules/kernel_blacklist.py @@ -0,0 +1,115 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Alexei Znamensky (@russoz) +# Copyright (c) 2013, Matthias Vogelgesang +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: kernel_blacklist +author: + - Matthias Vogelgesang (@matze) +short_description: Blacklist kernel modules +description: + - Add or remove kernel modules from blacklist. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + name: + type: str + description: + - Name of kernel module to black- or whitelist. + required: true + state: + type: str + description: + - Whether the module should be present in the blacklist or absent. + choices: [absent, present] + default: present + blacklist_file: + type: str + description: + - If specified, use this blacklist file instead of C(/etc/modprobe.d/blacklist-ansible.conf). + default: /etc/modprobe.d/blacklist-ansible.conf +""" + +EXAMPLES = r""" +- name: Blacklist the nouveau driver module + community.general.kernel_blacklist: + name: nouveau + state: present +""" + +import os +import re + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + + +class Blacklist(StateModuleHelper): + output_params = ('name', 'state') + module = dict( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + blacklist_file=dict(type='str', default='/etc/modprobe.d/blacklist-ansible.conf'), + ), + supports_check_mode=True, + ) + + def __init_module__(self): + self.pattern = re.compile(r'^blacklist\s+{0}$'.format(re.escape(self.vars.name))) + self.vars.filename = self.vars.blacklist_file + self.vars.set('file_exists', os.path.exists(self.vars.filename), output=False, change=True) + if not self.vars.file_exists: + with open(self.vars.filename, 'a'): + pass + self.vars.file_exists = True + self.vars.set('lines', [], change=True, diff=True) + else: + with open(self.vars.filename) as fd: + self.vars.set('lines', [x.rstrip() for x in fd.readlines()], change=True, diff=True) + self.vars.set('is_blacklisted', self._is_module_blocked(), change=True) + + def _is_module_blocked(self): + for line in self.vars.lines: + stripped = line.strip() + if stripped.startswith('#'): + continue + if self.pattern.match(stripped): + return True + return False + + def state_absent(self): + if not self.vars.is_blacklisted: + return + self.vars.is_blacklisted = False + self.vars.lines = [line for line in self.vars.lines if not self.pattern.match(line.strip())] + + def state_present(self): + if self.vars.is_blacklisted: + return + self.vars.is_blacklisted = True + self.vars.lines = self.vars.lines + ['blacklist %s' % self.vars.name] + + def __quit_module__(self): + if self.has_changed() and not self.module.check_mode: + bkp = self.module.backup_local(self.vars.filename) + with open(self.vars.filename, "w") as fd: + fd.writelines(["{0}\n".format(x) for x in self.vars.lines]) + self.module.add_cleanup_file(bkp) + + +def main(): + Blacklist.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_authentication.py b/plugins/modules/keycloak_authentication.py deleted file mode 120000 index e27a180a01..0000000000 --- a/plugins/modules/keycloak_authentication.py +++ /dev/null @@ -1 +0,0 @@ -./identity/keycloak/keycloak_authentication.py \ No newline at end of file diff --git a/plugins/modules/keycloak_authentication.py b/plugins/modules/keycloak_authentication.py new file mode 100644 index 0000000000..6e84a6adfd --- /dev/null +++ b/plugins/modules/keycloak_authentication.py @@ -0,0 +1,497 @@ +#!/usr/bin/python +# Copyright (c) 2019, INSPQ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_authentication + +short_description: Configure authentication in Keycloak + +description: + - This module actually can only make a copy of an existing authentication flow, add an execution to it and configure it. + - It can also delete the flow. +version_added: "3.3.0" + +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + realm: + description: + - The name of the realm in which is the authentication. + required: true + type: str + alias: + description: + - Alias for the authentication flow. + required: true + type: str + description: + description: + - Description of the flow. + type: str + providerId: + description: + - C(providerId) for the new flow when not copied from an existing flow. + choices: ["basic-flow", "client-flow"] + type: str + copyFrom: + description: + - C(flowAlias) of the authentication flow to use for the copy. + type: str + authenticationExecutions: + description: + - Configuration structure for the executions. + type: list + elements: dict + suboptions: + providerId: + description: + - C(providerID) for the new flow when not copied from an existing flow. + type: str + displayName: + description: + - Name of the execution or subflow to create or update. + type: str + requirement: + description: + - Control status of the subflow or execution. + choices: ["REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL"] + type: str + flowAlias: + description: + - Alias of parent flow. + type: str + authenticationConfig: + description: + - Describe the config of the authentication. + type: dict + index: + description: + - Priority order of the execution. + type: int + subFlowType: + description: + - For new subflows, optionally specify the type. + - Is only used at creation. + choices: ["basic-flow", "form-flow"] + default: "basic-flow" + type: str + version_added: 6.6.0 + state: + description: + - Control if the authentication flow must exists or not. + choices: ["present", "absent"] + default: present + type: str + force: + type: bool + default: false + description: + - If V(true), allows to remove the authentication flow and recreate it. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Philippe Gauthier (@elfelip) + - Gaëtan Daubresse (@Gaetan2907) +""" + +EXAMPLES = r""" +- name: Create an authentication flow from first broker login and add an execution to it. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-execution1" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.execution1.property" + config: + test1.property: "value" + - providerId: "test-execution2" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.execution2.property" + config: + test2.property: "value" + state: present + +- name: Re-create the authentication flow + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-provisioning" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.provisioning.property" + config: + test.provisioning.property: "value" + state: present + force: true + +- name: Create an authentication flow with subflow containing an execution. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-execution1" + requirement: "REQUIRED" + - displayName: "New Subflow" + requirement: "REQUIRED" + - providerId: "auth-cookie" + requirement: "REQUIRED" + flowAlias: "New Sublow" + state: present + +- name: Remove authentication. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + state: absent +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the authentication after module execution. + returned: on success + type: dict + sample: + { + "alias": "Copy of first broker login", + "authenticationExecutions": [ + { + "alias": "review profile config", + "authenticationConfig": { + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + }, + "id": "6f09e4fb-aad4-496a-b873-7fa9779df6d7" + }, + "configurable": true, + "displayName": "Review Profile", + "id": "8f77dab8-2008-416f-989e-88b09ccf0b4c", + "index": 0, + "level": 0, + "providerId": "idp-review-profile", + "requirement": "REQUIRED", + "requirementChoices": [ + "REQUIRED", + "ALTERNATIVE", + "DISABLED" + ] + } + ], + "builtIn": false, + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "id": "bc228863-5887-4297-b898-4d988f8eaa5c", + "providerId": "basic-flow", + "topLevel": true + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak \ + import KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible.module_utils.basic import AnsibleModule + + +def find_exec_in_executions(searched_exec, executions): + """ + Search if exec is contained in the executions. + :param searched_exec: Execution to search for. + :param executions: List of executions. + :return: Index of the execution, -1 if not found.. + """ + for i, existing_exec in enumerate(executions, start=0): + if ("providerId" in existing_exec and "providerId" in searched_exec and + existing_exec["providerId"] == searched_exec["providerId"] or + "displayName" in existing_exec and "displayName" in searched_exec and + existing_exec["displayName"] == searched_exec["displayName"]): + return i + return -1 + + +def create_or_update_executions(kc, config, realm='master'): + """ + Create or update executions for an authentication flow. + :param kc: Keycloak API access. + :param config: Representation of the authentication flow including its executions. + :param realm: Realm + :return: tuple (changed, dict(before, after) + WHERE + bool changed indicates if changes have been made + dict(str, str) shows state before and after creation/update + """ + try: + changed = False + after = "" + before = "" + execution = None + if "authenticationExecutions" in config: + # Get existing executions on the Keycloak server for this alias + existing_executions = kc.get_executions_representation(config, realm=realm) + for new_exec_index, new_exec in enumerate(config["authenticationExecutions"], start=0): + if new_exec["index"] is not None: + new_exec_index = new_exec["index"] + exec_found = False + # Get flowalias parent if given + if new_exec["flowAlias"] is not None: + flow_alias_parent = new_exec["flowAlias"] + else: + flow_alias_parent = config["alias"] + # Check if same providerId or displayName name between existing and new execution + exec_index = find_exec_in_executions(new_exec, existing_executions) + if exec_index != -1: + # Remove key that doesn't need to be compared with existing_exec + exclude_key = ["flowAlias", "subFlowType"] + for index_key, key in enumerate(new_exec, start=0): + if new_exec[key] is None: + exclude_key.append(key) + # Compare the executions to see if it need changes + if not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) or exec_index != new_exec_index: + exec_found = True + if new_exec['index'] is None: + new_exec_index = exec_index + before += str(existing_executions[exec_index]) + '\n' + execution = existing_executions[exec_index].copy() + # Remove exec from list in case 2 exec with same name + existing_executions[exec_index].clear() + elif new_exec["providerId"] is not None: + kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm) + execution = kc.get_executions_representation(config, realm=realm)[exec_index] + exec_found = True + exec_index = new_exec_index + after += str(new_exec) + '\n' + elif new_exec["displayName"] is not None: + kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm, flowType=new_exec["subFlowType"]) + execution = kc.get_executions_representation(config, realm=realm)[exec_index] + exec_found = True + exec_index = new_exec_index + after += str(new_exec) + '\n' + if exec_found: + changed = True + if exec_index != -1: + # Update the existing execution + updated_exec = { + "id": execution["id"] + } + # add the execution configuration + if new_exec["authenticationConfig"] is not None: + if "authenticationConfig" in execution and "id" in execution["authenticationConfig"]: + kc.delete_authentication_config(execution["authenticationConfig"]["id"], realm=realm) + kc.add_authenticationConfig_to_execution(updated_exec["id"], new_exec["authenticationConfig"], realm=realm) + for key in new_exec: + # remove unwanted key for the next API call + if key not in ("flowAlias", "authenticationConfig", "subFlowType"): + updated_exec[key] = new_exec[key] + if new_exec["requirement"] is not None: + if "priority" in execution: + updated_exec["priority"] = execution["priority"] + kc.update_authentication_executions(flow_alias_parent, updated_exec, realm=realm) + diff = exec_index - new_exec_index + kc.change_execution_priority(updated_exec["id"], diff, realm=realm) + after += str(kc.get_executions_representation(config, realm=realm)[new_exec_index]) + '\n' + return changed, dict(before=before, after=after) + except Exception as e: + kc.module.fail_json(msg='Could not create or update executions for authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + realm=dict(type='str', required=True), + alias=dict(type='str', required=True), + providerId=dict(type='str', choices=["basic-flow", "client-flow"]), + description=dict(type='str'), + copyFrom=dict(type='str'), + authenticationExecutions=dict(type='list', elements='dict', + options=dict( + providerId=dict(type='str'), + displayName=dict(type='str'), + requirement=dict(choices=["REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL"], type='str'), + flowAlias=dict(type='str'), + authenticationConfig=dict(type='dict'), + index=dict(type='int'), + subFlowType=dict(choices=["basic-flow", "form-flow"], default='basic-flow', type='str'), + )), + state=dict(choices=["absent", "present"], default='present'), + force=dict(type='bool', default=False), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', flow={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + force = module.params.get('force') + + new_auth_repr = { + "alias": module.params.get("alias"), + "copyFrom": module.params.get("copyFrom"), + "providerId": module.params.get("providerId"), + "authenticationExecutions": module.params.get("authenticationExecutions"), + "description": module.params.get("description"), + "builtIn": module.params.get("builtIn"), + "subflow": module.params.get("subflow"), + } + + auth_repr = kc.get_authentication_flow_by_alias(alias=new_auth_repr["alias"], realm=realm) + + # Cater for when it doesn't exist (an empty dict) + if not auth_repr: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = new_auth_repr["alias"] + ' absent' + module.exit_json(**result) + + elif state == 'present': + # Process a creation + result['changed'] = True + + if module._diff: + result['diff'] = dict(before='', after=new_auth_repr) + + if module.check_mode: + module.exit_json(**result) + + # If copyFrom is defined, create authentication flow from a copy + if "copyFrom" in new_auth_repr and new_auth_repr["copyFrom"] is not None: + auth_repr = kc.copy_auth_flow(config=new_auth_repr, realm=realm) + else: # Create an empty authentication flow + auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm) + + # If the authentication still not exist on the server, raise an exception. + if auth_repr is None: + result['msg'] = "Authentication just created not found: " + str(new_auth_repr) + module.fail_json(**result) + + # Configure the executions for the flow + create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm) + + # Get executions created + exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) + if exec_repr is not None: + auth_repr["authenticationExecutions"] = exec_repr + result['end_state'] = auth_repr + + else: + if state == 'present': + # Process an update + + if force: # If force option is true + # Delete the actual authentication flow + result['changed'] = True + if module._diff: + result['diff'] = dict(before=auth_repr, after=new_auth_repr) + if module.check_mode: + module.exit_json(**result) + kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm) + # If copyFrom is defined, create authentication flow from a copy + if "copyFrom" in new_auth_repr and new_auth_repr["copyFrom"] is not None: + auth_repr = kc.copy_auth_flow(config=new_auth_repr, realm=realm) + else: # Create an empty authentication flow + auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm) + # If the authentication still not exist on the server, raise an exception. + if auth_repr is None: + result['msg'] = "Authentication just created not found: " + str(new_auth_repr) + module.fail_json(**result) + # Configure the executions for the flow + + if module.check_mode: + module.exit_json(**result) + changed, diff = create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm) + result['changed'] |= changed + + if module._diff: + result['diff'] = diff + + # Get executions created + exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) + if exec_repr is not None: + auth_repr["authenticationExecutions"] = exec_repr + result['end_state'] = auth_repr + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=auth_repr, after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm) + + result['msg'] = 'Authentication flow: {alias} id: {id} is deleted'.format(alias=new_auth_repr['alias'], + id=auth_repr["id"]) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_authentication_required_actions.py b/plugins/modules/keycloak_authentication_required_actions.py new file mode 100644 index 0000000000..61672721bd --- /dev/null +++ b/plugins/modules/keycloak_authentication_required_actions.py @@ -0,0 +1,456 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_authentication_required_actions + +short_description: Allows administration of Keycloak authentication required actions + +description: + - This module can register, update and delete required actions. + - It also filters out any duplicate required actions by their alias. The first occurrence is preserved. +version_added: 7.1.0 + +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + realm: + description: + - The name of the realm in which are the authentication required actions. + required: true + type: str + required_actions: + elements: dict + description: + - Authentication required action. + suboptions: + alias: + description: + - Unique name of the required action. + required: true + type: str + config: + description: + - Configuration for the required action. + type: dict + defaultAction: + description: + - Indicates whether new users have the required action assigned to them. + type: bool + enabled: + description: + - Indicates, if the required action is enabled or not. + type: bool + name: + description: + - Displayed name of the required action. Required for registration. + type: str + priority: + description: + - Priority of the required action. + type: int + providerId: + description: + - Provider ID of the required action. Required for registration. + type: str + type: list + state: + choices: ["absent", "present"] + description: + - Control if the realm authentication required actions are going to be registered/updated (V(present)) or deleted (V(absent)). + required: true + type: str + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Skrekulko (@Skrekulko) +""" + +EXAMPLES = r""" +- name: Register a new required action. + community.general.keycloak_authentication_required_actions: + auth_client_id: "admin-cli" + auth_keycloak_url: "http://localhost:8080" + auth_password: "password" + auth_realm: "master" + auth_username: "admin" + realm: "master" + required_action: + - alias: "TERMS_AND_CONDITIONS" + name: "Terms and conditions" + providerId: "TERMS_AND_CONDITIONS" + enabled: true + state: "present" + +- name: Update the newly registered required action. + community.general.keycloak_authentication_required_actions: + auth_client_id: "admin-cli" + auth_keycloak_url: "http://localhost:8080" + auth_password: "password" + auth_realm: "master" + auth_username: "admin" + realm: "master" + required_action: + - alias: "TERMS_AND_CONDITIONS" + enabled: false + state: "present" + +- name: Delete the updated registered required action. + community.general.keycloak_authentication_required_actions: + auth_client_id: "admin-cli" + auth_keycloak_url: "http://localhost:8080" + auth_password: "password" + auth_realm: "master" + auth_username: "admin" + realm: "master" + required_action: + - alias: "TERMS_AND_CONDITIONS" + state: "absent" +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the authentication required actions after module execution. + returned: on success + type: complex + contains: + alias: + description: + - Unique name of the required action. + sample: test-provider-id + type: str + config: + description: + - Configuration for the required action. + sample: {} + type: dict + defaultAction: + description: + - Indicates whether new users have the required action assigned to them. + sample: false + type: bool + enabled: + description: + - Indicates, if the required action is enabled or not. + sample: false + type: bool + name: + description: + - Displayed name of the required action. Required for registration. + sample: Test provider ID + type: str + priority: + description: + - Priority of the required action. + sample: 90 + type: int + providerId: + description: + - Provider ID of the required action. Required for registration. + sample: test-provider-id + type: str +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def sanitize_required_actions(objects): + for obj in objects: + alias = obj['alias'] + name = obj['name'] + provider_id = obj['providerId'] + + if not name: + obj['name'] = alias + + if provider_id != alias: + obj['providerId'] = alias + + return objects + + +def filter_duplicates(objects): + filtered_objects = {} + + for obj in objects: + alias = obj["alias"] + + if alias not in filtered_objects: + filtered_objects[alias] = obj + + return list(filtered_objects.values()) + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + realm=dict(type='str', required=True), + required_actions=dict( + type='list', + elements='dict', + options=dict( + alias=dict(type='str', required=True), + config=dict(type='dict'), + defaultAction=dict(type='bool'), + enabled=dict(type='bool'), + name=dict(type='str'), + priority=dict(type='int'), + providerId=dict(type='str') + ) + ), + state=dict(type='str', choices=['present', 'absent'], required=True) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + # Convenience variables + realm = module.params.get('realm') + desired_required_actions = module.params.get('required_actions') + state = module.params.get('state') + + # Sanitize required actions + desired_required_actions = sanitize_required_actions(desired_required_actions) + + # Filter out duplicate required actions + desired_required_actions = filter_duplicates(desired_required_actions) + + # Get required actions + before_required_actions = kc.get_required_actions(realm=realm) + + if state == 'present': + # Initialize empty lists to hold the required actions that need to be + # registered, updated, and original ones of the updated one + register_required_actions = [] + before_updated_required_actions = [] + updated_required_actions = [] + + # Loop through the desired required actions and check if they exist in the before required actions + for desired_required_action in desired_required_actions: + found = False + + # Loop through the before required actions and check if the aliases match + for before_required_action in before_required_actions: + if desired_required_action['alias'] == before_required_action['alias']: + update_required = False + + # Fill in the parameters + for k, v in before_required_action.items(): + if k not in desired_required_action or desired_required_action[k] is None: + desired_required_action[k] = v + + # Loop through the keys of the desired and before required actions + # and check if there are any differences between them + for key in desired_required_action.keys(): + if key in before_required_action and desired_required_action[key] != before_required_action[key]: + update_required = True + break + + # If there are differences, add the before and desired required actions + # to their respective lists for updating + if update_required: + before_updated_required_actions.append(before_required_action) + updated_required_actions.append(desired_required_action) + found = True + break + # If the desired required action is not found in the before required actions, + # add it to the list of required actions to register + if not found: + # Check if name is provided + if 'name' not in desired_required_action or desired_required_action['name'] is None: + module.fail_json( + msg='Unable to register required action %s in realm %s: name not included' + % (desired_required_action['alias'], realm) + ) + + # Check if provider ID is provided + if 'providerId' not in desired_required_action or desired_required_action['providerId'] is None: + module.fail_json( + msg='Unable to register required action %s in realm %s: providerId not included' + % (desired_required_action['alias'], realm) + ) + + register_required_actions.append(desired_required_action) + + # Handle diff + if module._diff: + diff_required_actions = updated_required_actions.copy() + diff_required_actions.extend(register_required_actions) + + result['diff'] = dict( + before=before_updated_required_actions, + after=diff_required_actions + ) + + # Handle changed + if register_required_actions or updated_required_actions: + result['changed'] = True + + # Handle check mode + if module.check_mode: + if register_required_actions or updated_required_actions: + result['change'] = True + result['msg'] = 'Required actions would be registered/updated' + else: + result['change'] = False + result['msg'] = 'Required actions would not be registered/updated' + + module.exit_json(**result) + + # Register required actions + if register_required_actions: + for register_required_action in register_required_actions: + kc.register_required_action(realm=realm, rep=register_required_action) + kc.update_required_action(alias=register_required_action['alias'], realm=realm, rep=register_required_action) + + # Update required actions + if updated_required_actions: + for updated_required_action in updated_required_actions: + kc.update_required_action(alias=updated_required_action['alias'], realm=realm, rep=updated_required_action) + + # Initialize the final list of required actions + final_required_actions = [] + + # Iterate over the before_required_actions + for before_required_action in before_required_actions: + # Check if there is an updated_required_action with the same alias + updated_required_action_found = False + + for updated_required_action in updated_required_actions: + if updated_required_action['alias'] == before_required_action['alias']: + # Merge the two dictionaries, favoring the values from updated_required_action + merged_dict = {} + for key in before_required_action.keys(): + if key in updated_required_action: + merged_dict[key] = updated_required_action[key] + else: + merged_dict[key] = before_required_action[key] + + for key in updated_required_action.keys(): + if key not in before_required_action: + merged_dict[key] = updated_required_action[key] + + # Add the merged dictionary to the final list of required actions + final_required_actions.append(merged_dict) + + # Mark the updated_required_action as found + updated_required_action_found = True + + # Stop looking for updated_required_action + break + + # If no matching updated_required_action was found, add the before_required_action to the final list of required actions + if not updated_required_action_found: + final_required_actions.append(before_required_action) + + # Append any remaining updated_required_actions that were not merged + for updated_required_action in updated_required_actions: + if not any(updated_required_action['alias'] == action['alias'] for action in final_required_actions): + final_required_actions.append(updated_required_action) + + # Append newly registered required actions + final_required_actions.extend(register_required_actions) + + # Handle message and end state + result['msg'] = 'Required actions registered/updated' + result['end_state'] = final_required_actions + else: + # Filter out the deleted required actions + final_required_actions = [] + delete_required_actions = [] + + for before_required_action in before_required_actions: + delete_action = False + + for desired_required_action in desired_required_actions: + if before_required_action['alias'] == desired_required_action['alias']: + delete_action = True + break + + if not delete_action: + final_required_actions.append(before_required_action) + else: + delete_required_actions.append(before_required_action) + + # Handle diff + if module._diff: + result['diff'] = dict( + before=before_required_actions, + after=final_required_actions + ) + + # Handle changed + if delete_required_actions: + result['changed'] = True + + # Handle check mode + if module.check_mode: + if final_required_actions: + result['change'] = True + result['msg'] = 'Required actions would be deleted' + else: + result['change'] = False + result['msg'] = 'Required actions would not be deleted' + + module.exit_json(**result) + + # Delete required actions + if delete_required_actions: + for delete_required_action in delete_required_actions: + kc.delete_required_action(alias=delete_required_action['alias'], realm=realm) + + # Handle message and end state + result['msg'] = 'Required actions deleted' + result['end_state'] = final_required_actions + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_authz_authorization_scope.py b/plugins/modules/keycloak_authz_authorization_scope.py new file mode 100644 index 0000000000..ad7ada6719 --- /dev/null +++ b/plugins/modules/keycloak_authz_authorization_scope.py @@ -0,0 +1,277 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_authz_authorization_scope + +short_description: Allows administration of Keycloak client authorization scopes using Keycloak API + +version_added: 6.6.0 + +description: + - This module allows the administration of Keycloak client Authorization Scopes using the Keycloak REST API. Authorization + Scopes are only available if a client has Authorization enabled. + - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have + the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate + realm definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services + paths and payloads have not officially been documented by the Keycloak project. + U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the authorization scope. + - On V(present), the authorization scope is created (or updated if it exists already). + - On V(absent), the authorization scope is removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Name of the authorization scope to create. + type: str + required: true + display_name: + description: + - The display name of the authorization scope. + type: str + required: false + icon_uri: + description: + - The icon URI for the authorization scope. + type: str + required: false + client_id: + description: + - The C(clientId) of the Keycloak client that should have the authorization scope. + - This is usually a human-readable name of the Keycloak client. + type: str + required: true + realm: + description: + - The name of the Keycloak realm the Keycloak client is in. + type: str + required: true + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Samuli Seppänen (@mattock) +""" + +EXAMPLES = r""" +- name: Manage Keycloak file:delete authorization scope + keycloak_authz_authorization_scope: + name: file:delete + state: present + display_name: File delete + client_id: myclient + realm: myrealm + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the authorization scope after module execution. + returned: on success + type: complex + contains: + id: + description: ID of the authorization scope. + type: str + returned: when O(state=present) + sample: a6ab1cf2-1001-40ec-9f39-48f23b6a0a41 + name: + description: Name of the authorization scope. + type: str + returned: when O(state=present) + sample: file:delete + display_name: + description: Display name of the authorization scope. + type: str + returned: when O(state=present) + sample: File delete + icon_uri: + description: Icon URI for the authorization scope. + type: str + returned: when O(state=present) + sample: http://localhost/icon.png +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', default='present', + choices=['present', 'absent']), + name=dict(type='str', required=True), + display_name=dict(type='str'), + icon_uri=dict(type='str'), + client_id=dict(type='str', required=True), + realm=dict(type='str', required=True) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + # Convenience variables + state = module.params.get('state') + name = module.params.get('name') + display_name = module.params.get('display_name') + icon_uri = module.params.get('icon_uri') + client_id = module.params.get('client_id') + realm = module.params.get('realm') + + # Get the "id" of the client based on the usually more human-readable + # "clientId" + cid = kc.get_client_id(client_id, realm=realm) + if not cid: + module.fail_json(msg='Invalid client %s for realm %s' % + (client_id, realm)) + + # Get current state of the Authorization Scope using its name as the search + # filter. This returns False if it is not found. + before_authz_scope = kc.get_authz_authorization_scope_by_name( + name=name, client_id=cid, realm=realm) + + # Generate a JSON payload for Keycloak Admin API. This is needed for + # "create" and "update" operations. + desired_authz_scope = {} + desired_authz_scope['name'] = name + desired_authz_scope['displayName'] = display_name + desired_authz_scope['iconUri'] = icon_uri + + # Add "id" to payload for modify operations + if before_authz_scope: + desired_authz_scope['id'] = before_authz_scope['id'] + + # Ensure that undefined (null) optional parameters are presented as empty + # strings in the desired state. This makes comparisons with current state + # much easier. + for k, v in desired_authz_scope.items(): + if not v: + desired_authz_scope[k] = '' + + # Do the above for the current state + if before_authz_scope: + for k in ['displayName', 'iconUri']: + if k not in before_authz_scope: + before_authz_scope[k] = '' + + if before_authz_scope and state == 'present': + changes = False + for k, v in desired_authz_scope.items(): + if before_authz_scope[k] != v: + changes = True + # At this point we know we have to update the object anyways, + # so there's no need to do more work. + break + + if changes: + if module._diff: + result['diff'] = dict(before=before_authz_scope, after=desired_authz_scope) + + if module.check_mode: + result['changed'] = True + result['msg'] = 'Authorization scope would be updated' + module.exit_json(**result) + else: + kc.update_authz_authorization_scope( + payload=desired_authz_scope, id=before_authz_scope['id'], client_id=cid, realm=realm) + result['changed'] = True + result['msg'] = 'Authorization scope updated' + else: + result['changed'] = False + result['msg'] = 'Authorization scope not updated' + + result['end_state'] = desired_authz_scope + elif not before_authz_scope and state == 'present': + if module._diff: + result['diff'] = dict(before={}, after=desired_authz_scope) + + if module.check_mode: + result['changed'] = True + result['msg'] = 'Authorization scope would be created' + module.exit_json(**result) + else: + kc.create_authz_authorization_scope( + payload=desired_authz_scope, client_id=cid, realm=realm) + result['changed'] = True + result['msg'] = 'Authorization scope created' + result['end_state'] = desired_authz_scope + elif before_authz_scope and state == 'absent': + if module._diff: + result['diff'] = dict(before=before_authz_scope, after={}) + + if module.check_mode: + result['changed'] = True + result['msg'] = 'Authorization scope would be removed' + module.exit_json(**result) + else: + kc.remove_authz_authorization_scope( + id=before_authz_scope['id'], client_id=cid, realm=realm) + result['changed'] = True + result['msg'] = 'Authorization scope removed' + elif not before_authz_scope and state == 'absent': + result['changed'] = False + else: + module.fail_json(msg='Unable to determine what to do with authorization scope %s of client %s in realm %s' % ( + name, client_id, realm)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_authz_custom_policy.py b/plugins/modules/keycloak_authz_custom_policy.py new file mode 100644 index 0000000000..87b8fde834 --- /dev/null +++ b/plugins/modules/keycloak_authz_custom_policy.py @@ -0,0 +1,208 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_authz_custom_policy + +short_description: Allows administration of Keycloak client custom Javascript policies using Keycloak API + +version_added: 7.5.0 + +description: + - This module allows the administration of Keycloak client custom Javascript using the Keycloak REST API. Custom Javascript + policies are only available if a client has Authorization enabled and if they have been deployed to the Keycloak server + as JAR files. + - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have + the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate + realm definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services + paths and payloads have not officially been documented by the Keycloak project. + U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the custom policy. + - On V(present), the custom policy is created (or updated if it exists already). + - On V(absent), the custom policy is removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Name of the custom policy to create. + type: str + required: true + policy_type: + description: + - The type of the policy. This must match the name of the custom policy deployed to the server. + - Multiple policies pointing to the same policy type can be created, but their names have to differ. + type: str + required: true + client_id: + description: + - The V(clientId) of the Keycloak client that should have the custom policy attached to it. + - This is usually a human-readable name of the Keycloak client. + type: str + required: true + realm: + description: + - The name of the Keycloak realm the Keycloak client is in. + type: str + required: true + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Samuli Seppänen (@mattock) +""" + +EXAMPLES = r""" +- name: Manage Keycloak custom authorization policy + community.general.keycloak_authz_custom_policy: + name: OnlyOwner + state: present + policy_type: script-policy.js + client_id: myclient + realm: myrealm + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the custom policy after module execution. + returned: on success + type: dict + contains: + name: + description: Name of the custom policy. + type: str + returned: when I(state=present) + sample: file:delete + policy_type: + description: Type of custom policy. + type: str + returned: when I(state=present) + sample: File delete +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', default='present', + choices=['present', 'absent']), + name=dict(type='str', required=True), + policy_type=dict(type='str', required=True), + client_id=dict(type='str', required=True), + realm=dict(type='str', required=True) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + # Convenience variables + state = module.params.get('state') + name = module.params.get('name') + policy_type = module.params.get('policy_type') + client_id = module.params.get('client_id') + realm = module.params.get('realm') + + cid = kc.get_client_id(client_id, realm=realm) + if not cid: + module.fail_json(msg='Invalid client %s for realm %s' % + (client_id, realm)) + + before_authz_custom_policy = kc.get_authz_policy_by_name( + name=name, client_id=cid, realm=realm) + + desired_authz_custom_policy = {} + desired_authz_custom_policy['name'] = name + desired_authz_custom_policy['type'] = policy_type + + # Modifying existing custom policies is not possible + if before_authz_custom_policy and state == 'present': + result['msg'] = "Custom policy %s already exists" % (name) + result['changed'] = False + result['end_state'] = desired_authz_custom_policy + elif not before_authz_custom_policy and state == 'present': + if module.check_mode: + result['msg'] = "Would create custom policy %s" % (name) + else: + kc.create_authz_custom_policy( + payload=desired_authz_custom_policy, policy_type=policy_type, client_id=cid, realm=realm) + result['msg'] = "Custom policy %s created" % (name) + + result['changed'] = True + result['end_state'] = desired_authz_custom_policy + elif before_authz_custom_policy and state == 'absent': + if module.check_mode: + result['msg'] = "Would remove custom policy %s" % (name) + else: + kc.remove_authz_custom_policy( + policy_id=before_authz_custom_policy['id'], client_id=cid, realm=realm) + result['msg'] = "Custom policy %s removed" % (name) + + result['changed'] = True + result['end_state'] = {} + elif not before_authz_custom_policy and state == 'absent': + result['msg'] = "Custom policy %s does not exist" % (name) + result['changed'] = False + result['end_state'] = {} + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_authz_permission.py b/plugins/modules/keycloak_authz_permission.py new file mode 100644 index 0000000000..b36db802cb --- /dev/null +++ b/plugins/modules/keycloak_authz_permission.py @@ -0,0 +1,429 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_authz_permission + +version_added: 7.2.0 + +short_description: Allows administration of Keycloak client authorization permissions using Keycloak API + +description: + - This module allows the administration of Keycloak client authorization permissions using the Keycloak REST API. Authorization + permissions are only available if a client has Authorization enabled. + - There are some peculiarities in JSON paths and payloads for authorization permissions. In particular POST and PUT operations + are targeted at permission endpoints, whereas GET requests go to policies endpoint. To make matters more interesting the + JSON responses from GET requests return data in a different format than what is expected for POST and PUT. The end result + is that it is not possible to detect changes to things like policies, scopes or resources - at least not without a large + number of additional API calls. Therefore this module always updates authorization permissions instead of attempting to + determine if changes are truly needed. + - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have + the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate + realm definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services + paths and payloads have not officially been documented by the Keycloak project. + U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the authorization permission. + - On V(present), the authorization permission is created (or updated if it exists already). + - On V(absent), the authorization permission is removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Name of the authorization permission to create. + type: str + required: true + description: + description: + - The description of the authorization permission. + type: str + required: false + permission_type: + description: + - The type of authorization permission. + - On V(scope) create a scope-based permission. + - On V(resource) create a resource-based permission. + type: str + required: true + choices: + - resource + - scope + decision_strategy: + description: + - The decision strategy to use with this permission. + type: str + default: UNANIMOUS + required: false + choices: + - UNANIMOUS + - AFFIRMATIVE + - CONSENSUS + resources: + description: + - Resource names to attach to this permission. + - Scope-based permissions can only include one resource. + - Resource-based permissions can include multiple resources. + type: list + elements: str + default: [] + required: false + scopes: + description: + - Scope names to attach to this permission. + - Resource-based permissions cannot have scopes attached to them. + type: list + elements: str + default: [] + required: false + policies: + description: + - Policy names to attach to this permission. + type: list + elements: str + default: [] + required: false + client_id: + description: + - The clientId of the keycloak client that should have the authorization scope. + - This is usually a human-readable name of the Keycloak client. + type: str + required: true + realm: + description: + - The name of the Keycloak realm the Keycloak client is in. + type: str + required: true + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Samuli Seppänen (@mattock) +""" + +EXAMPLES = r""" +- name: Manage scope-based Keycloak authorization permission + community.general.keycloak_authz_permission: + name: ScopePermission + state: present + description: Scope permission + permission_type: scope + scopes: + - file:delete + policies: + - Default Policy + client_id: myclient + realm: myrealm + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master + +- name: Manage resource-based Keycloak authorization permission + community.general.keycloak_authz_permission: + name: ResourcePermission + state: present + description: Resource permission + permission_type: resource + resources: + - Default Resource + policies: + - Default Policy + client_id: myclient + realm: myrealm + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the authorization permission after module execution. + returned: on success + type: complex + contains: + id: + description: ID of the authorization permission. + type: str + returned: when O(state=present) + sample: 9da05cd2-b273-4354-bbd8-0c133918a454 + name: + description: Name of the authorization permission. + type: str + returned: when O(state=present) + sample: ResourcePermission + description: + description: Description of the authorization permission. + type: str + returned: when O(state=present) + sample: Resource Permission + type: + description: Type of the authorization permission. + type: str + returned: when O(state=present) + sample: resource + decisionStrategy: + description: The decision strategy to use. + type: str + returned: when O(state=present) + sample: UNANIMOUS + logic: + description: The logic used for the permission (part of the payload, but has a fixed value). + type: str + returned: when O(state=present) + sample: POSITIVE + resources: + description: IDs of resources attached to this permission. + type: list + returned: when O(state=present) + sample: + - 49e052ff-100d-4b79-a9dd-52669ed3c11d + scopes: + description: IDs of scopes attached to this permission. + type: list + returned: when O(state=present) + sample: + - 9da05cd2-b273-4354-bbd8-0c133918a454 + policies: + description: IDs of policies attached to this permission. + type: list + returned: when O(state=present) + sample: + - 9da05cd2-b273-4354-bbd8-0c133918a454 +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', default='present', + choices=['present', 'absent']), + name=dict(type='str', required=True), + description=dict(type='str'), + permission_type=dict(type='str', choices=['scope', 'resource'], required=True), + decision_strategy=dict(type='str', default='UNANIMOUS', + choices=['UNANIMOUS', 'AFFIRMATIVE', 'CONSENSUS']), + resources=dict(type='list', elements='str', default=[]), + scopes=dict(type='list', elements='str', default=[]), + policies=dict(type='list', elements='str', default=[]), + client_id=dict(type='str', required=True), + realm=dict(type='str', required=True) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + # Convenience variables + state = module.params.get('state') + name = module.params.get('name') + description = module.params.get('description') + permission_type = module.params.get('permission_type') + decision_strategy = module.params.get('decision_strategy') + realm = module.params.get('realm') + client_id = module.params.get('client_id') + realm = module.params.get('realm') + resources = module.params.get('resources') + scopes = module.params.get('scopes') + policies = module.params.get('policies') + + if permission_type == 'scope' and state == 'present': + if scopes == []: + module.fail_json(msg='Scopes need to defined when permission type is set to scope!') + if len(resources) > 1: + module.fail_json(msg='Only one resource can be defined for a scope permission!') + + if permission_type == 'resource' and state == 'present': + if resources == []: + module.fail_json(msg='A resource need to defined when permission type is set to resource!') + if scopes != []: + module.fail_json(msg='Scopes cannot be defined when permission type is set to resource!') + + result = dict(changed=False, msg='', end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + # Get id of the client based on client_id + cid = kc.get_client_id(client_id, realm=realm) + if not cid: + module.fail_json(msg='Invalid client %s for realm %s' % + (client_id, realm)) + + # Get current state of the permission using its name as the search + # filter. This returns False if it is not found. + permission = kc.get_authz_permission_by_name( + name=name, client_id=cid, realm=realm) + + # Generate a JSON payload for Keycloak Admin API. This is needed for + # "create" and "update" operations. + payload = {} + payload['name'] = name + payload['description'] = description + payload['type'] = permission_type + payload['decisionStrategy'] = decision_strategy + payload['logic'] = 'POSITIVE' + payload['scopes'] = [] + payload['resources'] = [] + payload['policies'] = [] + + if permission_type == 'scope': + # Add the resource id, if any, to the payload. While the data type is a + # list, it is only possible to have one entry in it based on what Keycloak + # Admin Console does. + r = False + resource_scopes = [] + + if resources: + r = kc.get_authz_resource_by_name(resources[0], cid, realm) + if not r: + module.fail_json(msg='Unable to find authorization resource with name %s for client %s in realm %s' % (resources[0], cid, realm)) + else: + payload['resources'].append(r['_id']) + + for rs in r['scopes']: + resource_scopes.append(rs['id']) + + # Generate a list of scope ids based on scope names. Fail if the + # defined resource does not include all those scopes. + for scope in scopes: + s = kc.get_authz_authorization_scope_by_name(scope, cid, realm) + if r and not s['id'] in resource_scopes: + module.fail_json(msg='Resource %s does not include scope %s for client %s in realm %s' % (resources[0], scope, client_id, realm)) + else: + payload['scopes'].append(s['id']) + + elif permission_type == 'resource': + if resources: + for resource in resources: + r = kc.get_authz_resource_by_name(resource, cid, realm) + if not r: + module.fail_json(msg='Unable to find authorization resource with name %s for client %s in realm %s' % (resource, cid, realm)) + else: + payload['resources'].append(r['_id']) + + # Add policy ids, if any, to the payload. + if policies: + for policy in policies: + p = kc.get_authz_policy_by_name(policy, cid, realm) + + if p: + payload['policies'].append(p['id']) + else: + module.fail_json(msg='Unable to find authorization policy with name %s for client %s in realm %s' % (policy, client_id, realm)) + + # Add "id" to payload for update operations + if permission: + payload['id'] = permission['id'] + + # Handle the special case where the user attempts to change an already + # existing permission's type - something that can't be done without a + # full delete -> (re)create cycle. + if permission['type'] != payload['type']: + module.fail_json(msg='Modifying the type of permission (scope/resource) is not supported: \ + permission %s of client %s in realm %s unchanged' % (permission['id'], cid, realm)) + + # Updating an authorization permission is tricky for several reasons. + # Firstly, the current permission is retrieved using a _policy_ endpoint, + # not from a permission endpoint. Also, the data that is returned is in a + # different format than what is expected by the payload. So, comparing the + # current state attribute by attribute to the payload is not possible. For + # example the data contains a JSON object "config" which may contain the + # authorization type, but which is no required in the payload. Moreover, + # information about resources, scopes and policies is _not_ present in the + # data. So, there is no way to determine if any of those fields have + # changed. Therefore the best options we have are + # + # a) Always apply the payload without checking the current state + # b) Refuse to make any changes to any settings (only support create and delete) + # + # The approach taken here is a). + # + if permission and state == 'present': + if module.check_mode: + result['msg'] = 'Notice: unable to check current resources, scopes and policies for permission. \ + Would apply desired state without checking the current state.' + else: + kc.update_authz_permission(payload=payload, permission_type=permission_type, id=permission['id'], client_id=cid, realm=realm) + result['msg'] = 'Notice: unable to check current resources, scopes and policies for permission. \ + Applying desired state without checking the current state.' + + # Assume that something changed, although we don't know if that is the case. + result['changed'] = True + result['end_state'] = payload + elif not permission and state == 'present': + if module.check_mode: + result['msg'] = 'Would create permission' + else: + kc.create_authz_permission(payload=payload, permission_type=permission_type, client_id=cid, realm=realm) + result['msg'] = 'Permission created' + + result['changed'] = True + result['end_state'] = payload + elif permission and state == 'absent': + if module.check_mode: + result['msg'] = 'Would remove permission' + else: + kc.remove_authz_permission(id=permission['id'], client_id=cid, realm=realm) + result['msg'] = 'Permission removed' + + result['changed'] = True + + elif not permission and state == 'absent': + result['changed'] = False + else: + module.fail_json(msg='Unable to determine what to do with permission %s of client %s in realm %s' % ( + name, client_id, realm)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_authz_permission_info.py b/plugins/modules/keycloak_authz_permission_info.py new file mode 100644 index 0000000000..c60da778ed --- /dev/null +++ b/plugins/modules/keycloak_authz_permission_info.py @@ -0,0 +1,172 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_authz_permission_info + +version_added: 7.2.0 + +short_description: Query Keycloak client authorization permissions information + +description: + - This module allows querying information about Keycloak client authorization permissions from the resources endpoint using + the Keycloak REST API. Authorization permissions are only available if a client has Authorization enabled. + - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have + the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate + realm definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services + paths and payloads have not officially been documented by the Keycloak project. + U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). +attributes: + action_group: + version_added: 10.2.0 + +options: + name: + description: + - Name of the authorization permission to create. + type: str + required: true + client_id: + description: + - The clientId of the keycloak client that should have the authorization scope. + - This is usually a human-readable name of the Keycloak client. + type: str + required: true + realm: + description: + - The name of the Keycloak realm the Keycloak client is in. + type: str + required: true + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + - community.general.attributes.info_module + +author: + - Samuli Seppänen (@mattock) +""" + +EXAMPLES = r""" +- name: Query Keycloak authorization permission + community.general.keycloak_authz_permission_info: + name: ScopePermission + client_id: myclient + realm: myrealm + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +queried_state: + description: State of the resource (a policy) as seen by Keycloak. + returned: on success + type: complex + contains: + id: + description: ID of the authorization permission. + type: str + sample: 9da05cd2-b273-4354-bbd8-0c133918a454 + name: + description: Name of the authorization permission. + type: str + sample: ResourcePermission + description: + description: Description of the authorization permission. + type: str + sample: Resource Permission + type: + description: Type of the authorization permission. + type: str + sample: resource + decisionStrategy: + description: The decision strategy. + type: str + sample: UNANIMOUS + logic: + description: The logic used for the permission (part of the payload, but has a fixed value). + type: str + sample: POSITIVE + config: + description: Configuration of the permission (empty in all observed cases). + type: dict + sample: {} +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + name=dict(type='str', required=True), + client_id=dict(type='str', required=True), + realm=dict(type='str', required=True) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + # Convenience variables + name = module.params.get('name') + client_id = module.params.get('client_id') + realm = module.params.get('realm') + + result = dict(changed=False, msg='', queried_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + # Get id of the client based on client_id + cid = kc.get_client_id(client_id, realm=realm) + if not cid: + module.fail_json(msg='Invalid client %s for realm %s' % + (client_id, realm)) + + # Get current state of the permission using its name as the search + # filter. This returns False if it is not found. + permission = kc.get_authz_permission_by_name( + name=name, client_id=cid, realm=realm) + + result['queried_state'] = permission + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py deleted file mode 120000 index 89e481a669..0000000000 --- a/plugins/modules/keycloak_client.py +++ /dev/null @@ -1 +0,0 @@ -./identity/keycloak/keycloak_client.py \ No newline at end of file diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py new file mode 100644 index 0000000000..ed13b106e2 --- /dev/null +++ b/plugins/modules/keycloak_client.py @@ -0,0 +1,1425 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_client + +short_description: Allows administration of Keycloak clients using Keycloak API + + +description: + - This module allows the administration of Keycloak clients using the Keycloak REST API. It requires access to the REST + API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). Aliases are provided so camelCased versions can be used + as well. + - The Keycloak API does not always sanity check inputs, for example you can set SAML-specific settings on an OpenID Connect + client for instance and the other way around. Be careful. If you do not specify a setting, usually a sensible default + is chosen. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the client. + - On V(present), the client are created (or updated if it exists already). + - On V(absent), the client are removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + + realm: + description: + - The realm to create the client in. + type: str + default: master + + client_id: + description: + - Client ID of client to be worked on. This is usually an alphanumeric name chosen by you. Either this or O(id) is required. + If you specify both, O(id) takes precedence. This is C(clientId) in the Keycloak REST API. + aliases: + - clientId + type: str + + id: + description: + - ID of client to be worked on. This is usually an UUID. Either this or O(client_id) is required. If you specify both, + this takes precedence. + type: str + + name: + description: + - Name of the client (this is not the same as O(client_id)). + type: str + + description: + description: + - Description of the client in Keycloak. + type: str + + root_url: + description: + - Root URL appended to relative URLs for this client. This is C(rootUrl) in the Keycloak REST API. + aliases: + - rootUrl + type: str + + admin_url: + description: + - URL to the admin interface of the client. This is C(adminUrl) in the Keycloak REST API. + aliases: + - adminUrl + type: str + + base_url: + description: + - Default URL to use when the auth server needs to redirect or link back to the client This is C(baseUrl) in the Keycloak + REST API. + aliases: + - baseUrl + type: str + + enabled: + description: + - Is this client enabled or not? + type: bool + + client_authenticator_type: + description: + - How do clients authenticate with the auth server? Either V(client-secret), V(client-jwt), or V(client-x509) can be + chosen. When using V(client-secret), the module parameter O(secret) can set it, for V(client-jwt), you can use the + keys C(use.jwks.url), C(jwks.url), and C(jwt.credential.certificate) in the O(attributes) module parameter to configure + its behavior. For V(client-x509) you can use the keys C(x509.allow.regex.pattern.comparison) and C(x509.subjectdn) + in the O(attributes) module parameter to configure which certificate(s) to accept. + - This is C(clientAuthenticatorType) in the Keycloak REST API. + choices: ['client-secret', 'client-jwt', 'client-x509'] + aliases: + - clientAuthenticatorType + type: str + + secret: + description: + - When using O(client_authenticator_type=client-secret) (the default), you can specify a secret here (otherwise one + is generated if it does not exit). If changing this secret, the module does not register a change currently (but the + changed secret is saved). + type: str + + registration_access_token: + description: + - The registration access token provides access for clients to the client registration service. This is C(registrationAccessToken) + in the Keycloak REST API. + aliases: + - registrationAccessToken + type: str + + default_roles: + description: + - List of default roles for this client. If the client roles referenced do not exist yet, they are created. This is + C(defaultRoles) in the Keycloak REST API. + aliases: + - defaultRoles + type: list + elements: str + + redirect_uris: + description: + - Acceptable redirect URIs for this client. This is C(redirectUris) in the Keycloak REST API. + aliases: + - redirectUris + type: list + elements: str + + web_origins: + description: + - List of allowed CORS origins. This is C(webOrigins) in the Keycloak REST API. + aliases: + - webOrigins + type: list + elements: str + + not_before: + description: + - Revoke any tokens issued before this date for this client (this is a UNIX timestamp). This is C(notBefore) in the + Keycloak REST API. + type: int + aliases: + - notBefore + + bearer_only: + description: + - The access type of this client is bearer-only. This is C(bearerOnly) in the Keycloak REST API. + aliases: + - bearerOnly + type: bool + + consent_required: + description: + - If enabled, users have to consent to client access. This is C(consentRequired) in the Keycloak REST API. + aliases: + - consentRequired + type: bool + + standard_flow_enabled: + description: + - Enable standard flow for this client or not (OpenID connect). This is C(standardFlowEnabled) in the Keycloak REST + API. + aliases: + - standardFlowEnabled + type: bool + + implicit_flow_enabled: + description: + - Enable implicit flow for this client or not (OpenID connect). This is C(implicitFlowEnabled) in the Keycloak REST + API. + aliases: + - implicitFlowEnabled + type: bool + + direct_access_grants_enabled: + description: + - Are direct access grants enabled for this client or not (OpenID connect). This is C(directAccessGrantsEnabled) in + the Keycloak REST API. + aliases: + - directAccessGrantsEnabled + type: bool + + service_accounts_enabled: + description: + - Are service accounts enabled for this client or not (OpenID connect). This is C(serviceAccountsEnabled) in the Keycloak + REST API. + aliases: + - serviceAccountsEnabled + type: bool + + authorization_services_enabled: + description: + - Are authorization services enabled for this client or not (OpenID connect). This is C(authorizationServicesEnabled) + in the Keycloak REST API. + aliases: + - authorizationServicesEnabled + type: bool + + public_client: + description: + - Is the access type for this client public or not. This is C(publicClient) in the Keycloak REST API. + aliases: + - publicClient + type: bool + + frontchannel_logout: + description: + - Is frontchannel logout enabled for this client or not. This is C(frontchannelLogout) in the Keycloak REST API. + aliases: + - frontchannelLogout + type: bool + + protocol: + description: + - Type of client. + - At creation only, default value is V(openid-connect) if O(protocol) is omitted. + - The V(docker-v2) value was added in community.general 8.6.0. + type: str + choices: ['openid-connect', 'saml', 'docker-v2'] + + full_scope_allowed: + description: + - Is the "Full Scope Allowed" feature set for this client or not. This is C(fullScopeAllowed) in the Keycloak REST API. + aliases: + - fullScopeAllowed + type: bool + + node_re_registration_timeout: + description: + - Cluster node re-registration timeout for this client. This is C(nodeReRegistrationTimeout) in the Keycloak REST API. + type: int + aliases: + - nodeReRegistrationTimeout + + registered_nodes: + description: + - Dict of registered cluster nodes (with C(nodename) as the key and last registration time as the value). This is C(registeredNodes) + in the Keycloak REST API. + type: dict + aliases: + - registeredNodes + + client_template: + description: + - Client template to use for this client. If it does not exist this field is silently dropped. This is C(clientTemplate) + in the Keycloak REST API. + type: str + aliases: + - clientTemplate + + use_template_config: + description: + - Whether or not to use configuration from the O(client_template). This is C(useTemplateConfig) in the Keycloak REST + API. + aliases: + - useTemplateConfig + type: bool + + use_template_scope: + description: + - Whether or not to use scope configuration from the O(client_template). This is C(useTemplateScope) in the Keycloak + REST API. + aliases: + - useTemplateScope + type: bool + + use_template_mappers: + description: + - Whether or not to use mapper configuration from the O(client_template). This is C(useTemplateMappers) in the Keycloak + REST API. + aliases: + - useTemplateMappers + type: bool + + always_display_in_console: + description: + - Whether or not to display this client in account console, even if the user does not have an active session. + aliases: + - alwaysDisplayInConsole + type: bool + version_added: 4.7.0 + + surrogate_auth_required: + description: + - Whether or not surrogate auth is required. This is C(surrogateAuthRequired) in the Keycloak REST API. + aliases: + - surrogateAuthRequired + type: bool + + authorization_settings: + description: + - A data structure defining the authorization settings for this client. For reference, please see the Keycloak API docs + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation). This is C(authorizationSettings) + in the Keycloak REST API. + type: dict + aliases: + - authorizationSettings + + authentication_flow_binding_overrides: + description: + - Override realm authentication flow bindings. + type: dict + suboptions: + browser: + description: + - Flow ID of the browser authentication flow. + - O(authentication_flow_binding_overrides.browser) and O(authentication_flow_binding_overrides.browser_name) are + mutually exclusive. + type: str + + browser_name: + description: + - Flow name of the browser authentication flow. + - O(authentication_flow_binding_overrides.browser) and O(authentication_flow_binding_overrides.browser_name) are + mutually exclusive. + aliases: + - browserName + type: str + version_added: 9.1.0 + + direct_grant: + description: + - Flow ID of the direct grant authentication flow. + - O(authentication_flow_binding_overrides.direct_grant) and O(authentication_flow_binding_overrides.direct_grant_name) + are mutually exclusive. + aliases: + - directGrant + type: str + + direct_grant_name: + description: + - Flow name of the direct grant authentication flow. + - O(authentication_flow_binding_overrides.direct_grant) and O(authentication_flow_binding_overrides.direct_grant_name) + are mutually exclusive. + aliases: + - directGrantName + type: str + version_added: 9.1.0 + aliases: + - authenticationFlowBindingOverrides + version_added: 3.4.0 + + client_scopes_behavior: + description: + - Determine how O(default_client_scopes) and O(optional_client_scopes) behave when updating an existing client. + - 'V(ignore): Do not change the client scopes of an existing client. This is the default for backward compatibility.' + - 'V(patch): Add missing scopes, do not remove any missing scopes.' + - 'V(idempotent): Make the client scopes exactly as specified, adding and removing scopes as needed.' + aliases: + - clientScopesBehavior + type: str + choices: ['ignore', 'patch', 'idempotent'] + default: 'ignore' + version_added: 11.4.0 + + default_client_scopes: + description: + - List of default client scopes. + - See O(client_scopes_behavior) for how this behaves when updating an existing client. + aliases: + - defaultClientScopes + type: list + elements: str + version_added: 4.7.0 + + optional_client_scopes: + description: + - List of optional client scopes. + - See O(client_scopes_behavior) for how this behaves when updating an existing client. + aliases: + - optionalClientScopes + type: list + elements: str + version_added: 4.7.0 + + protocol_mappers: + description: + - A list of dicts defining protocol mappers for this client. This is C(protocolMappers) in the Keycloak REST API. + aliases: + - protocolMappers + type: list + elements: dict + suboptions: + consentRequired: + description: + - Specifies whether a user needs to provide consent to a client for this mapper to be active. + type: bool + + consentText: + description: + - The human-readable name of the consent the user is presented to accept. + type: str + + id: + description: + - Usually a UUID specifying the internal ID of this protocol mapper instance. + type: str + + name: + description: + - The name of this protocol mapper. + type: str + + protocol: + description: + - This specifies for which protocol this protocol mapper is active. + choices: ['openid-connect', 'saml', 'docker-v2'] + type: str + + protocolMapper: + description: + - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide + since this may be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:' + - V(docker-v2-allow-all-mapper). + - V(oidc-address-mapper). + - V(oidc-full-name-mapper). + - V(oidc-group-membership-mapper). + - V(oidc-hardcoded-claim-mapper). + - V(oidc-hardcoded-role-mapper). + - V(oidc-role-name-mapper). + - V(oidc-script-based-protocol-mapper). + - V(oidc-sha256-pairwise-sub-mapper). + - V(oidc-usermodel-attribute-mapper). + - V(oidc-usermodel-client-role-mapper). + - V(oidc-usermodel-property-mapper). + - V(oidc-usermodel-realm-role-mapper). + - V(oidc-usersessionmodel-note-mapper). + - V(saml-group-membership-mapper). + - V(saml-hardcode-attribute-mapper). + - V(saml-hardcode-role-mapper). + - V(saml-role-list-mapper). + - V(saml-role-name-mapper). + - V(saml-user-attribute-mapper). + - V(saml-user-property-mapper). + - V(saml-user-session-note-mapper). + - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to + Server Info -> Providers and looking under 'protocol-mapper'. + type: str + + config: + description: + - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value + of O(protocol_mappers[].protocolMapper) and are not documented other than by the source of the mappers and its + parent class(es). An example is given below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the RV(existing) field. + type: dict + + attributes: + description: + - A dict of further attributes for this client. This can contain various configuration settings; an example is given + in the examples section. While an exhaustive list of permissible options is not available; possible options as of + Keycloak 3.4 are listed below. The Keycloak API does not validate whether a given option is appropriate for the protocol + used; if specified anyway, Keycloak does not use it. + type: dict + suboptions: + saml.authnstatement: + description: + - For SAML clients, boolean specifying whether or not a statement containing method and timestamp should be included + in the login response. + saml.client.signature: + description: + - For SAML clients, boolean specifying whether a client signature is required and validated. + saml.encrypt: + description: + - Boolean specifying whether SAML assertions should be encrypted with the client's public key. + saml.force.post.binding: + description: + - For SAML clients, boolean specifying whether always to use POST binding for responses. + saml.onetimeuse.condition: + description: + - For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses. + saml.server.signature: + description: + - Boolean specifying whether SAML documents should be signed by the realm. + saml.server.signature.keyinfo.ext: + description: + - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion + of the signing key ID in the SAML Extensions element. + saml.signature.algorithm: + description: + - Signature algorithm used to sign SAML documents. One of V(RSA_SHA256), V(RSA_SHA1), V(RSA_SHA512), or V(DSA_SHA1). + saml.signing.certificate: + description: + - SAML signing key certificate, base64-encoded. + saml.signing.private.key: + description: + - SAML signing key private key, base64-encoded. + saml_assertion_consumer_url_post: + description: + - SAML POST Binding URL for the client's assertion consumer service (login responses). + saml_assertion_consumer_url_redirect: + description: + - SAML Redirect Binding URL for the client's assertion consumer service (login responses). + saml_force_name_id_format: + description: + - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured + one instead. + saml_name_id_format: + description: + - For SAML clients, the NameID format to use (one of V(username), V(email), V(transient), or V(persistent)). + saml_signature_canonicalization_method: + description: + - SAML signature canonicalization method. This is one of four values, namely V(http://www.w3.org/2001/10/xml-exc-c14n#) + for EXCLUSIVE, V(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS, + V(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) + for INCLUSIVE, and V(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS. + saml_single_logout_service_url_post: + description: + - SAML POST binding URL for the client's single logout service. + saml_single_logout_service_url_redirect: + description: + - SAML redirect binding URL for the client's single logout service. + user.info.response.signature.alg: + description: + - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of V(RS256) or V(unsigned). + request.object.signature.alg: + description: + - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending OIDC request object. One + of V(any), V(none), V(RS256). + use.jwks.url: + description: + - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client public keys. + jwks.url: + description: + - For OpenID-Connect clients, URL where client keys in JWK are stored. + jwt.credential.certificate: + description: + - For OpenID-Connect clients, client certificate for validating JWT issued by client and signed by its key, base64-encoded. + x509.subjectdn: + description: + - For OpenID-Connect clients, subject which is used to authenticate the client. + type: str + version_added: 9.5.0 + + x509.allow.regex.pattern.comparison: + description: + - For OpenID-Connect clients, boolean specifying whether to allow C(x509.subjectdn) as regular expression. + type: bool + version_added: 9.5.0 + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Eike Frost (@eikef) +""" + +EXAMPLES = r""" +- name: Create or update Keycloak client (minimal example), authentication with credentials + community.general.keycloak_client: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + client_id: test + state: present + delegate_to: localhost + + +- name: Create or update Keycloak client (minimal example), authentication with token + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + token: TOKEN + client_id: test + state: present + delegate_to: localhost + + +- name: Delete a Keycloak client + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + client_id: test + state: absent + delegate_to: localhost + + +- name: Create or update a Keycloak client (minimal example), with x509 authentication + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: master + state: present + client_id: test + client_authenticator_type: client-x509 + attributes: + x509.subjectdn: "CN=client" + x509.allow.regex.pattern.comparison: false + + +- name: Create or update a Keycloak client (with all the bells and whistles) + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + realm: master + client_id: test + id: d8b127a3-31f6-44c8-a7e4-4ab9a3e78d95 + name: this_is_a_test + description: Description of this wonderful client + root_url: https://www.example.com/ + admin_url: https://www.example.com/admin_url + base_url: basepath + enabled: true + client_authenticator_type: client-secret + secret: REALLYWELLKEPTSECRET + redirect_uris: + - https://www.example.com/* + - http://localhost:8888/ + web_origins: + - https://www.example.com/* + not_before: 1507825725 + bearer_only: false + consent_required: false + standard_flow_enabled: true + implicit_flow_enabled: false + direct_access_grants_enabled: false + service_accounts_enabled: false + authorization_services_enabled: false + public_client: false + frontchannel_logout: false + protocol: openid-connect + full_scope_allowed: false + node_re_registration_timeout: -1 + client_template: test + use_template_config: false + use_template_scope: false + use_template_mappers: false + always_display_in_console: true + registered_nodes: + node01.example.com: 1507828202 + registration_access_token: eyJWT_TOKEN + surrogate_auth_required: false + default_roles: + - test01 + - test02 + authentication_flow_binding_overrides: + browser: 4c90336b-bf1d-4b87-916d-3677ba4e5fbb + protocol_mappers: + - config: + access.token.claim: true + claim.name: "family_name" + id.token.claim: true + jsonType.label: String + user.attribute: lastName + userinfo.token.claim: true + consentRequired: true + consentText: "${familyName}" + name: family name + protocol: openid-connect + protocolMapper: oidc-usermodel-property-mapper + - config: + attribute.name: Role + attribute.nameformat: Basic + single: false + consentRequired: false + name: role list + protocol: saml + protocolMapper: saml-role-list-mapper + attributes: + saml.authnstatement: true + saml.client.signature: true + saml.force.post.binding: true + saml.server.signature: true + saml.signature.algorithm: RSA_SHA256 + saml.signing.certificate: CERTIFICATEHERE + saml.signing.private.key: PRIVATEKEYHERE + saml_force_name_id_format: false + saml_name_id_format: username + saml_signature_canonicalization_method: "http://www.w3.org/2001/10/xml-exc-c14n#" + user.info.response.signature.alg: RS256 + request.object.signature.alg: RS256 + use.jwks.url: true + jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT + jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Client testclient has been updated" + +proposed: + description: Representation of proposed client. + returned: always + type: dict + sample: {"clientId": "test"} + +existing: + description: Representation of existing client (sample is truncated). + returned: always + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } + +end_state: + description: Representation of client after module execution (sample is truncated). + returned: on success + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +import copy + + +PROTOCOL_OPENID_CONNECT = 'openid-connect' +PROTOCOL_SAML = 'saml' +PROTOCOL_DOCKER_V2 = 'docker-v2' +CLIENT_META_DATA = ['authorizationServicesEnabled'] + + +def normalise_scopes_for_behavior(desired_client, before_client, clientScopesBehavior): + """ + Normalize the desired and existing client scopes according to the specified behavior. + + This function adjusts the lists of default and optional client scopes in the desired client + configuration based on the selected behavior: + - 'ignore': The desired scopes are set to match the existing scopes. + - 'patch': Any scopes present in the existing configuration but missing from the desired configuration + are appended to the desired scopes. + - 'idempotent': No modification is made; the desired scopes are used as-is. + + :param desired_client: + type: dict + description: The desired client configuration, including default and optional client scopes. + + :param before_client: + type: dict + description: The current client configuration, including default and optional client scopes. + + :param clientScopesBehavior: + type: str + description: The behavior mode for handling client scopes. Must be one of 'ignore', 'patch', or 'idempotent'. + + :return: + type: tuple + description: Returns a tuple of (desired_client, before_client) after normalization. + """ + desired_client = copy.deepcopy(desired_client) + before_client = copy.deepcopy(before_client) + if clientScopesBehavior == 'ignore': + desired_client['defaultClientScopes'] = copy.deepcopy(before_client['defaultClientScopes']) + desired_client['optionalClientScopes'] = copy.deepcopy(before_client['optionalClientScopes']) + elif clientScopesBehavior == 'patch': + for scope in before_client['defaultClientScopes']: + if scope not in desired_client['defaultClientScopes']: + desired_client['defaultClientScopes'].append(scope) + for scope in before_client['optionalClientScopes']: + if scope not in desired_client['optionalClientScopes']: + desired_client['optionalClientScopes'].append(scope) + + return desired_client, before_client + + +def check_optional_scopes_not_default(desired_client, clientScopesBehavior, module): + """ + Ensure that no client scope is assigned as both default and optional. + + This function checks the desired client configuration to verify that no scope is present + in both the default and optional client scopes. If such a conflict is found, the module + execution fails with an appropriate error message. + + :param desired_client: + type: dict + description: The desired client configuration, including default and optional client scopes. + + :param clientScopesBehavior: + type: str + description: The behavior mode for handling client scopes. Must be one of 'ignore', 'patch', or 'idempotent'. + + :param module: + type: AnsibleModule + description: The Ansible module instance, used to fail execution if a conflict is detected. + + :return: + type: None + description: Returns None. Fails the module if a scope is both default and optional. + """ + if clientScopesBehavior == 'ignore': + return + for scope in desired_client['optionalClientScopes']: + if scope in desired_client['defaultClientScopes']: + module.fail_json(msg='Client scope %s cannot be both default and optional' % scope) + + +def normalise_cr(clientrep, remove_ids=False): + """ Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the + the change detection is more effective. + + :param clientrep: the clientrep dict to be sanitized + :param remove_ids: If set to true, then the unique ID's of objects is removed to make the diff and checks for changed + not alert when the ID's of objects are not usually known, (e.g. for protocol_mappers) + :return: normalised clientrep dict + """ + # Avoid the dict passed in to be modified + clientrep = copy.deepcopy(clientrep) + + if remove_ids: + clientrep.pop('id', None) + + if 'defaultClientScopes' in clientrep: + clientrep['defaultClientScopes'] = list(sorted(clientrep['defaultClientScopes'])) + else: + clientrep['defaultClientScopes'] = [] + + if 'optionalClientScopes' in clientrep: + clientrep['optionalClientScopes'] = list(sorted(clientrep['optionalClientScopes'])) + else: + clientrep['optionalClientScopes'] = [] + + if 'redirectUris' in clientrep: + clientrep['redirectUris'] = list(sorted(clientrep['redirectUris'])) + else: + clientrep['redirectUris'] = [] + + if 'protocolMappers' in clientrep: + clientrep['protocolMappers'] = sorted(clientrep['protocolMappers'], key=lambda x: (x.get('name'), x.get('protocol'), x.get('protocolMapper'))) + for mapper in clientrep['protocolMappers']: + if remove_ids: + mapper.pop('id', None) + + # Convert bool to string + if 'config' in mapper: + for key, value in mapper['config'].items(): + if isinstance(value, bool): + mapper['config'][key] = str(value).lower() + + # Set to a default value. + mapper['consentRequired'] = mapper.get('consentRequired', False) + else: + clientrep['protocolMappers'] = [] + + if 'attributes' in clientrep: + for key, value in clientrep['attributes'].items(): + if isinstance(value, bool): + clientrep['attributes'][key] = str(value).lower() + clientrep['attributes'].pop('client.secret.creation.time', None) + else: + clientrep['attributes'] = [] + + if 'webOrigins' in clientrep: + clientrep['webOrigins'] = sorted(clientrep['webOrigins']) + else: + clientrep['webOrigins'] = [] + + if 'redirectUris' in clientrep: + clientrep['redirectUris'] = sorted(clientrep['redirectUris']) + else: + clientrep['redirectUris'] = [] + + return clientrep + + +def normalize_kc_resp(clientrep): + # kc drops the variable 'authorizationServicesEnabled' if set to false + # to minimize diff/changes we set it to false if not set by kc + if clientrep and 'authorizationServicesEnabled' not in clientrep: + clientrep['authorizationServicesEnabled'] = False + + +def sanitize_cr(clientrep): + """ Removes probably sensitive details from a client representation. + + :param clientrep: the clientrep dict to be sanitized + :return: sanitized clientrep dict + """ + result = copy.deepcopy(clientrep) + if 'secret' in result: + result['secret'] = 'no_log' + if 'attributes' in result: + attributes = result['attributes'] + if isinstance(attributes, dict): + if 'saml.signing.private.key' in attributes: + attributes['saml.signing.private.key'] = 'no_log' + if 'saml.encryption.private.key' in attributes: + attributes['saml.encryption.private.key'] = 'no_log' + return normalise_cr(result) + + +def get_authentication_flow_id(flow_name, realm, kc): + """ Get the authentication flow ID based on the flow name, realm, and Keycloak client. + + Args: + flow_name (str): The name of the authentication flow. + realm (str): The name of the realm. + kc (KeycloakClient): The Keycloak client instance. + + Returns: + str: The ID of the authentication flow. + + Raises: + KeycloakAPIException: If the authentication flow with the given name is not found in the realm. + """ + flow = kc.get_authentication_flow_by_alias(flow_name, realm) + if flow: + return flow["id"] + kc.module.fail_json(msg='Authentification flow %s not found in realm %s' % (flow_name, realm)) + + +def flow_binding_from_dict_to_model(newClientFlowBinding, realm, kc): + """ Convert a dictionary representing client flow bindings to a model representation. + + Args: + newClientFlowBinding (dict): A dictionary containing client flow bindings. + realm (str): The name of the realm. + kc (KeycloakClient): An instance of the KeycloakClient class. + + Returns: + dict: A dictionary representing the model flow bindings. The dictionary has two keys: + - "browser" (str or None): The ID of the browser authentication flow binding, or None if not provided. + - "direct_grant" (str or None): The ID of the direct grant authentication flow binding, or None if not provided. + + Raises: + KeycloakAPIException: If the authentication flow with the given name is not found in the realm. + + """ + + modelFlow = { + "browser": None, + "direct_grant": None + } + + for k, v in newClientFlowBinding.items(): + if not v: + continue + if k == "browser": + modelFlow["browser"] = v + elif k == "browser_name": + modelFlow["browser"] = get_authentication_flow_id(v, realm, kc) + elif k == "direct_grant": + modelFlow["direct_grant"] = v + elif k == "direct_grant_name": + modelFlow["direct_grant"] = get_authentication_flow_id(v, realm, kc) + + return modelFlow + + +def find_match(iterable, attribute, name): + """ + Search for an element in a list of dictionaries based on a given attribute and value. + + This function iterates over the elements of an iterable (typically a list of dictionaries) + and returns the first element whose value for the specified attribute matches `name`. + + :param iterable: + type: iterable (commonly list[dict]) + description: The collection of elements to search within (usually a list of dictionaries). + + :param attribute: + type: str + description: The dictionary key/attribute used for comparison. + + :param name: + type: Any + description: The value to search for within the given attribute. + + :return: + type: dict | None + description: Returns the first dictionary where the attribute matches the given value case insensitive. + Returns `None` if no match is found. + """ + name_lower = str(name).lower() + return next( + ( + value + for value in iterable + if attribute in value and str(value[attribute]).lower() == name_lower + ), + None, + ) + + +def add_default_client_scopes(desired_client, before_client, realm, kc): + """ + Adds missing default client scopes to a Keycloak client. + + This function compares the desired default client scopes specified in `desired_client` + with the current default client scopes in `before_client`. For each scope that is present + in `desired_client["defaultClientScopes"]` but missing from `before_client['defaultClientScopes']`, + it retrieves the scope information from Keycloak and adds it to the client. + + :param desired_client: + type: dict + description: The desired client configuration, including the list of default client scopes. + + :param before_client: + type: dict + description: The current client configuration, including the list of default client scopes. + + :param realm + type: str + description: The name of the Keycloak realm. + + :param kc + type: KeycloakAPI + description: An instance of the Keycloak API client. + + Returns: + None + """ + desired_default_scope = desired_client["defaultClientScopes"] + missing_scopes = [item for item in desired_default_scope if item not in before_client['defaultClientScopes']] + if not missing_scopes: + return + client_scopes = kc.get_clientscopes(realm) + for name in missing_scopes: + scope = find_match(client_scopes, "name", name) + if scope: + kc.add_default_clientscope(scope['id'], realm, desired_client['clientId']) + + +def add_optional_client_scopes(desired_client, before_client, realm, kc): + """ + Adds missing optional client scopes to a Keycloak client. + + This function compares the desired optional client scopes specified in `desired_client` + with the current optional client scopes in `before_client`. For each scope that is present + in `desired_client["optionalClientScopes"]` but missing from `before_client['optionalClientScopes']`, + it retrieves the scope information from Keycloak and adds it to the client. + + :param desired_client: + type: dict + description: The desired client configuration, including the list of optional client scopes. + + :param before_client: + type: dict + description: The current client configuration, including the list of optional client scopes. + + :param realm: + type: str + description: The name of the Keycloak realm. + + :param kc: + type: KeycloakAPI + description: An instance of the Keycloak API client. + + Returns: + None + """ + desired_optional_scope = desired_client["optionalClientScopes"] + missing_scopes = [item for item in desired_optional_scope if item not in before_client['optionalClientScopes']] + if not missing_scopes: + return + client_scopes = kc.get_clientscopes(realm) + for name in missing_scopes: + scope = find_match(client_scopes, "name", name) + if scope: + kc.add_optional_clientscope(scope['id'], realm, desired_client['clientId']) + + +def remove_default_client_scopes(desired_client, before_client, realm, kc): + """ + Removes default client scopes from a Keycloak client that are no longer desired. + + This function compares the current default client scopes in `before_client` + with the desired default client scopes in `desired_client`. For each scope that is present + in `before_client["defaultClientScopes"]` but missing from `desired_client['defaultClientScopes']`, + it retrieves the scope information from Keycloak and removes it from the client. + + :param desired_client: + type: dict + description: The desired client configuration, including the list of default client scopes. + + :param before_client: + type: dict + description: The current client configuration, including the list of default client scopes. + + :param realm: + type: str + description: The name of the Keycloak realm. + + :param kc: + type: KeycloakAPI + description: An instance of the Keycloak API client. + + Returns: + None + """ + before_default_scope = before_client["defaultClientScopes"] + missing_scopes = [item for item in before_default_scope if item not in desired_client['defaultClientScopes']] + if not missing_scopes: + return + client_scopes = kc.get_default_clientscopes(realm, desired_client['clientId']) + for name in missing_scopes: + scope = find_match(client_scopes, "name", name) + if scope: + kc.delete_default_clientscope(scope['id'], realm, desired_client['clientId']) + + +def remove_optional_client_scopes(desired_client, before_client, realm, kc): + """ + Removes optional client scopes from a Keycloak client that are no longer desired. + + This function compares the current optional client scopes in `before_client` + with the desired optional client scopes in `desired_client`. For each scope that is present + in `before_client["optionalClientScopes"]` but missing from `desired_client['optionalClientScopes']`, + it retrieves the scope information from Keycloak and removes it from the client. + + :param desired_client: + type: dict + description: The desired client configuration, including the list of optional client scopes. + + :param before_client: + type: dict + description: The current client configuration, including the list of optional client scopes. + + :param realm: + type: str + description: The name of the Keycloak realm. + + :param kc: + type: KeycloakAPI + description: An instance of the Keycloak API client. + + Returns: + None + """ + before_optional_scope = before_client["optionalClientScopes"] + missing_scopes = [item for item in before_optional_scope if item not in desired_client['optionalClientScopes']] + if not missing_scopes: + return + client_scopes = kc.get_optional_clientscopes(realm, desired_client['clientId']) + for name in missing_scopes: + scope = find_match(client_scopes, "name", name) + if scope: + kc.delete_optional_clientscope(scope['id'], realm, desired_client['clientId']) + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + protmapper_spec = dict( + consentRequired=dict(type='bool'), + consentText=dict(type='str'), + id=dict(type='str'), + name=dict(type='str'), + protocol=dict(type='str', choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML, PROTOCOL_DOCKER_V2]), + protocolMapper=dict(type='str'), + config=dict(type='dict'), + ) + + authentication_flow_spec = dict( + browser=dict(type='str'), + browser_name=dict(type='str', aliases=['browserName']), + direct_grant=dict(type='str', aliases=['directGrant']), + direct_grant_name=dict(type='str', aliases=['directGrantName']), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(type='str', default='master'), + + id=dict(type='str'), + client_id=dict(type='str', aliases=['clientId']), + name=dict(type='str'), + description=dict(type='str'), + root_url=dict(type='str', aliases=['rootUrl']), + admin_url=dict(type='str', aliases=['adminUrl']), + base_url=dict(type='str', aliases=['baseUrl']), + surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']), + enabled=dict(type='bool'), + client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt', 'client-x509'], aliases=['clientAuthenticatorType']), + secret=dict(type='str', no_log=True), + registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True), + default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), + redirect_uris=dict(type='list', elements='str', aliases=['redirectUris']), + web_origins=dict(type='list', elements='str', aliases=['webOrigins']), + not_before=dict(type='int', aliases=['notBefore']), + bearer_only=dict(type='bool', aliases=['bearerOnly']), + consent_required=dict(type='bool', aliases=['consentRequired']), + standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']), + implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']), + direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']), + service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']), + authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']), + public_client=dict(type='bool', aliases=['publicClient']), + frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']), + protocol=dict(type='str', choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML, PROTOCOL_DOCKER_V2]), + attributes=dict(type='dict'), + full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']), + node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']), + registered_nodes=dict(type='dict', aliases=['registeredNodes']), + client_template=dict(type='str', aliases=['clientTemplate']), + use_template_config=dict(type='bool', aliases=['useTemplateConfig']), + use_template_scope=dict(type='bool', aliases=['useTemplateScope']), + use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']), + always_display_in_console=dict(type='bool', aliases=['alwaysDisplayInConsole']), + authentication_flow_binding_overrides=dict( + type='dict', + aliases=['authenticationFlowBindingOverrides'], + options=authentication_flow_spec, + required_one_of=[['browser', 'direct_grant', 'browser_name', 'direct_grant_name']], + mutually_exclusive=[['browser', 'browser_name'], ['direct_grant', 'direct_grant_name']], + ), + protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), + authorization_settings=dict(type='dict', aliases=['authorizationSettings']), + client_scopes_behavior=dict(type='str', aliases=['clientScopesBehavior'], choices=['ignore', 'patch', 'idempotent'], default='ignore'), + default_client_scopes=dict(type='list', elements='str', aliases=['defaultClientScopes']), + optional_client_scopes=dict(type='list', elements='str', aliases=['optionalClientScopes']), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['client_id', 'id'], + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + cid = module.params.get('id') + clientScopesBehavior = module.params.get('client_scopes_behavior') + state = module.params.get('state') + + # Filter and map the parameters names that apply to the client + client_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and + module.params.get(x) is not None] + + # See if it already exists in Keycloak + if cid is None: + before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm) + if before_client is not None: + cid = before_client['id'] + else: + before_client = kc.get_client_by_id(cid, realm=realm) + + normalize_kc_resp(before_client) + + if before_client is None: + before_client = {} + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for client_param in client_params: + new_param_value = module.params.get(client_param) + + # Unfortunately, the ansible argument spec checker introduces variables with null values when + # they are not specified + if client_param == 'protocol_mappers': + new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] + elif client_param == 'authentication_flow_binding_overrides': + new_param_value = flow_binding_from_dict_to_model(new_param_value, realm, kc) + elif client_param == 'attributes' and 'attributes' in before_client: + attributes_copy = copy.deepcopy(before_client['attributes']) + attributes_copy.update(new_param_value) + new_param_value = attributes_copy + elif client_param in ['clientScopesBehavior', 'client_scopes_behavior']: + continue + + changeset[camel(client_param)] = new_param_value + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_client = copy.deepcopy(before_client) + desired_client.update(changeset) + + result['proposed'] = sanitize_cr(changeset) + result['existing'] = sanitize_cr(before_client) + + # Cater for when it doesn't exist (an empty dict) + if not before_client: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Client does not exist; doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if 'clientId' not in desired_client: + module.fail_json(msg='client_id needs to be specified when creating a new client') + if 'protocol' not in desired_client: + desired_client['protocol'] = PROTOCOL_OPENID_CONNECT + + if module._diff: + result['diff'] = dict(before='', after=sanitize_cr(desired_client)) + + if module.check_mode: + module.exit_json(**result) + + # create it + kc.create_client(desired_client, realm=realm) + after_client = kc.get_client_by_clientid(desired_client['clientId'], realm=realm) + + result['end_state'] = sanitize_cr(after_client) + + result['msg'] = 'Client %s has been created.' % desired_client['clientId'] + module.exit_json(**result) + + else: + if state == 'present': + # We can only compare the current client with the proposed updates we have + desired_client_with_scopes, before_client_with_scopes = normalise_scopes_for_behavior(desired_client, before_client, clientScopesBehavior) + check_optional_scopes_not_default(desired_client, clientScopesBehavior, module) + before_norm = normalise_cr(before_client_with_scopes, remove_ids=True) + desired_norm = normalise_cr(desired_client_with_scopes, remove_ids=True) + # no changes + if before_norm == desired_norm: + result['changed'] = False + result['end_state'] = sanitize_cr(before_client) + result['msg'] = 'No changes required for Client %s.' % desired_client['clientId'] + module.exit_json(**result) + + # Process an update + result['changed'] = True + + if module.check_mode: + result['end_state'] = sanitize_cr(desired_client_with_scopes) + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_client), + after=sanitize_cr(desired_client)) + module.exit_json(**result) + + # do the update + kc.update_client(cid, desired_client, realm=realm) + + remove_default_client_scopes(desired_client_with_scopes, before_client_with_scopes, realm, kc) + remove_optional_client_scopes(desired_client_with_scopes, before_client_with_scopes, realm, kc) + add_default_client_scopes(desired_client_with_scopes, before_client_with_scopes, realm, kc) + add_optional_client_scopes(desired_client_with_scopes, before_client_with_scopes, realm, kc) + + after_client = kc.get_client_by_id(cid, realm=realm) + normalize_kc_resp(after_client) + + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_client), + after=sanitize_cr(after_client)) + + result['end_state'] = sanitize_cr(after_client) + + result['msg'] = 'Client %s has been updated.' % desired_client['clientId'] + module.exit_json(**result) + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_client), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_client(cid, realm=realm) + result['proposed'] = {} + + result['end_state'] = {} + + result['msg'] = 'Client %s has been deleted.' % before_client['clientId'] + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_client_rolemapping.py b/plugins/modules/keycloak_client_rolemapping.py deleted file mode 120000 index 02243ca68d..0000000000 --- a/plugins/modules/keycloak_client_rolemapping.py +++ /dev/null @@ -1 +0,0 @@ -identity/keycloak/keycloak_client_rolemapping.py \ No newline at end of file diff --git a/plugins/modules/keycloak_client_rolemapping.py b/plugins/modules/keycloak_client_rolemapping.py new file mode 100644 index 0000000000..53ac32c2e9 --- /dev/null +++ b/plugins/modules/keycloak_client_rolemapping.py @@ -0,0 +1,403 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_client_rolemapping + +short_description: Allows administration of Keycloak client_rolemapping with the Keycloak API + +version_added: 3.5.0 + +description: + - This module allows you to add, remove or modify Keycloak client_rolemapping with the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a client_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API + to translate the name into the role ID. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the client_rolemapping. + - On V(present), the client_rolemapping is created if it does not yet exist, or updated with the parameters + you provide. + - On V(absent), the client_rolemapping is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + type: str + description: + - They Keycloak realm under which this role_representation resides. + default: 'master' + + group_name: + type: str + description: + - Name of the group to be mapped. + - This parameter is required (can be replaced by gid for less API call). + parents: + version_added: "7.1.0" + type: list + description: + - List of parent groups for the group to handle sorted top to bottom. + - Set this if your group is a subgroup and you do not provide the GID in O(gid). + elements: dict + suboptions: + id: + type: str + description: + - Identify parent by ID. + - Needs less API calls than using O(parents[].name). + - A deep parent chain can be started at any point when first given parent is given as ID. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. + name: + type: str + description: + - Identify parent by name. + - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood. + - When giving a parent chain with only names it must be complete up to the top. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. + gid: + type: str + description: + - ID of the group to be mapped. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. + client_id: + type: str + description: + - Name of the client to be mapped (different than O(cid)). + - This parameter is required (can be replaced by cid for less API call). + cid: + type: str + description: + - ID of the client to be mapped. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. + roles: + description: + - Roles to be mapped to the group. + type: list + elements: dict + suboptions: + name: + type: str + description: + - Name of the role_representation. + - This parameter is required only when creating or updating the role_representation. + id: + type: str + description: + - The unique identifier for this role_representation. + - This parameter is not required for updating or deleting a role_representation but providing it reduces the number + of API calls required. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Gaëtan Daubresse (@Gaetan2907) +""" + +EXAMPLES = r""" +- name: Map a client role to a group, authentication with credentials + community.general.keycloak_client_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a group, authentication with token + community.general.keycloak_client_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + state: present + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a subgroup, authentication with token + community.general.keycloak_client_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + state: present + client_id: client1 + group_name: subgroup1 + parents: + - name: parent-group + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Unmap client role from a group + community.general.keycloak_client_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: absent + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Role role1 assigned to group group1." + +proposed: + description: Representation of proposed client role mapping. + returned: always + type: dict + sample: {"clientId": "test"} + +existing: + description: + - Representation of existing client role mapping. + - The sample is truncated. + returned: always + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } + +end_state: + description: + - Representation of client role mapping after module execution. + - The sample is truncated. + returned: on success + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError, +) +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + roles_spec = dict( + name=dict(type='str'), + id=dict(type='str'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + gid=dict(type='str'), + group_name=dict(type='str'), + parents=dict( + type='list', elements='dict', + options=dict( + id=dict(type='str'), + name=dict(type='str') + ), + ), + cid=dict(type='str'), + client_id=dict(type='str'), + roles=dict(type='list', elements='dict', options=roles_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('cid') + client_id = module.params.get('client_id') + gid = module.params.get('gid') + group_name = module.params.get('group_name') + roles = module.params.get('roles') + parents = module.params.get('parents') + + # Check the parameters + if cid is None and client_id is None: + module.fail_json(msg='Either the `client_id` or `cid` has to be specified.') + if gid is None and group_name is None: + module.fail_json(msg='Either the `group_name` or `gid` has to be specified.') + + # Get the potential missing parameters + if gid is None: + group_rep = kc.get_group_by_name(group_name, realm=realm, parents=parents) + if group_rep is not None: + gid = group_rep['id'] + else: + module.fail_json(msg='Could not fetch group %s:' % group_name) + if cid is None: + cid = kc.get_client_id(client_id, realm=realm) + if cid is None: + module.fail_json(msg='Could not fetch client %s:' % client_id) + if roles is None: + module.exit_json(msg="Nothing to do (no roles specified).") + else: + for role_index, role in enumerate(roles, start=0): + if role['name'] is None and role['id'] is None: + module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') + # Fetch missing role_id + if role['id'] is None: + role_id = kc.get_client_role_id_by_name(cid, role['name'], realm=realm) + if role_id is not None: + role['id'] = role_id + else: + module.fail_json(msg='Could not fetch role %s:' % (role['name'])) + # Fetch missing role_name + else: + role['name'] = kc.get_client_group_rolemapping_by_id(gid, cid, role['id'], realm=realm)['name'] + if role['name'] is None: + module.fail_json(msg='Could not fetch role %s' % (role['id'])) + + # Get effective client-level role mappings + available_roles_before = kc.get_client_group_available_rolemappings(gid, cid, realm=realm) + assigned_roles_before = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm) + + result['existing'] = assigned_roles_before + result['proposed'] = list(assigned_roles_before) if assigned_roles_before else [] + + update_roles = [] + for role_index, role in enumerate(roles, start=0): + # Fetch roles to assign if state present + if state == 'present': + for available_role in available_roles_before: + if role['name'] == available_role['name']: + update_roles.append({ + 'id': role['id'], + 'name': role['name'], + }) + result['proposed'].append(available_role) + # Fetch roles to remove if state absent + else: + for assigned_role in assigned_roles_before: + if role['name'] == assigned_role['name']: + update_roles.append({ + 'id': role['id'], + 'name': role['name'], + }) + if assigned_role in result['proposed']: # Handle double removal + result['proposed'].remove(assigned_role) + + if len(update_roles): + if state == 'present': + # Assign roles + result['changed'] = True + if module._diff: + result['diff'] = dict(before=assigned_roles_before, after=result['proposed']) + if module.check_mode: + module.exit_json(**result) + kc.add_group_rolemapping(gid, cid, update_roles, realm=realm) + result['msg'] = 'Roles %s assigned to group %s.' % (update_roles, group_name) + assigned_roles_after = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + else: + # Remove mapping of role + result['changed'] = True + if module._diff: + result['diff'] = dict(before=assigned_roles_before, after=result['proposed']) + if module.check_mode: + module.exit_json(**result) + kc.delete_group_rolemapping(gid, cid, update_roles, realm=realm) + result['msg'] = 'Roles %s removed from group %s.' % (update_roles, group_name) + assigned_roles_after = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + # Do nothing + else: + result['changed'] = False + result['msg'] = 'Nothing to do, roles %s are %s with group %s.' % (roles, 'mapped' if state == 'present' else 'not mapped', group_name) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_client_rolescope.py b/plugins/modules/keycloak_client_rolescope.py new file mode 100644 index 0000000000..8f37172a18 --- /dev/null +++ b/plugins/modules/keycloak_client_rolescope.py @@ -0,0 +1,275 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_client_rolescope + +short_description: Allows administration of Keycloak client roles scope to restrict the usage of certain roles to a other + specific client applications + +version_added: 8.6.0 + +description: + - This module allows you to add or remove Keycloak roles from clients scope using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - Client O(client_id) must have O(community.general.keycloak_client#module:full_scope_allowed) set to V(false). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the role mapping. + - On V(present), all roles in O(role_names) are mapped if not exist yet. + - On V(absent), all roles mapping in O(role_names) are removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + type: str + description: + - The Keycloak realm under which clients resides. + default: 'master' + + client_id: + type: str + required: true + description: + - Roles provided in O(role_names) while be added to this client scope. + client_scope_id: + type: str + description: + - If the O(role_names) are client role, the client ID under which it resides. + - If this parameter is absent, the roles are considered a realm role. + role_names: + required: true + type: list + elements: str + description: + - Names of roles to manipulate. + - If O(client_scope_id) is present, all roles must be under this client. + - If O(client_scope_id) is absent, all roles must be under the realm. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Andre Desrosiers (@desand01) +""" + +EXAMPLES = r""" +- name: Add roles to public client scope + community.general.keycloak_client_rolescope: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: MyCustomRealm + client_id: frontend-client-public + client_scope_id: backend-client-private + role_names: + - backend-role-admin + - backend-role-user + +- name: Remove roles from public client scope + community.general.keycloak_client_rolescope: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: MyCustomRealm + client_id: frontend-client-public + client_scope_id: backend-client-private + role_names: + - backend-role-admin + state: absent + +- name: Add realm roles to public client scope + community.general.keycloak_client_rolescope: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: MyCustomRealm + client_id: frontend-client-public + role_names: + - realm-role-admin + - realm-role-user +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Client role scope for frontend-client-public has been updated" + +end_state: + description: Representation of role role scope after module execution. + returned: on success + type: list + elements: dict + sample: + [ + { + "clientRole": false, + "composite": false, + "containerId": "MyCustomRealm", + "id": "47293104-59a6-46f0-b460-2e9e3c9c424c", + "name": "backend-role-admin" + }, + { + "clientRole": false, + "composite": false, + "containerId": "MyCustomRealm", + "id": "39c62a6d-542c-4715-92d2-41021eb33967", + "name": "backend-role-user" + } + ] +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + client_id=dict(type='str', required=True), + client_scope_id=dict(type='str'), + realm=dict(type='str', default='master'), + role_names=dict(type='list', elements='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = dict(changed=False, msg='', diff={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + clientid = module.params.get('client_id') + client_scope_id = module.params.get('client_scope_id') + role_names = module.params.get('role_names') + state = module.params.get('state') + + objRealm = kc.get_realm_by_id(realm) + if not objRealm: + module.fail_json(msg="Failed to retrive realm '{realm}'".format(realm=realm)) + + objClient = kc.get_client_by_clientid(clientid, realm) + if not objClient: + module.fail_json(msg="Failed to retrive client '{realm}.{clientid}'".format(realm=realm, clientid=clientid)) + if objClient["fullScopeAllowed"] and state == "present": + module.fail_json(msg="FullScopeAllowed is active for Client '{realm}.{clientid}'".format(realm=realm, clientid=clientid)) + + if client_scope_id: + objClientScope = kc.get_client_by_clientid(client_scope_id, realm) + if not objClientScope: + module.fail_json(msg="Failed to retrive client '{realm}.{client_scope_id}'".format(realm=realm, client_scope_id=client_scope_id)) + before_role_mapping = kc.get_client_role_scope_from_client(objClient["id"], objClientScope["id"], realm) + else: + before_role_mapping = kc.get_client_role_scope_from_realm(objClient["id"], realm) + + if client_scope_id: + # retrive all role from client_scope + client_scope_roles_by_name = kc.get_client_roles_by_id(objClientScope["id"], realm) + else: + # retrive all role from realm + client_scope_roles_by_name = kc.get_realm_roles(realm) + + # convert to indexed Dict by name + client_scope_roles_by_name = {role["name"]: role for role in client_scope_roles_by_name} + role_mapping_by_name = {role["name"]: role for role in before_role_mapping} + role_mapping_to_manipulate = [] + + if state == "present": + # update desired + for role_name in role_names: + if role_name not in client_scope_roles_by_name: + if client_scope_id: + module.fail_json(msg="Failed to retrive role '{realm}.{client_scope_id}.{role_name}'" + .format(realm=realm, client_scope_id=client_scope_id, role_name=role_name)) + else: + module.fail_json(msg="Failed to retrive role '{realm}.{role_name}'".format(realm=realm, role_name=role_name)) + if role_name not in role_mapping_by_name: + role_mapping_to_manipulate.append(client_scope_roles_by_name[role_name]) + role_mapping_by_name[role_name] = client_scope_roles_by_name[role_name] + else: + # remove role if present + for role_name in role_names: + if role_name in role_mapping_by_name: + role_mapping_to_manipulate.append(role_mapping_by_name[role_name]) + del role_mapping_by_name[role_name] + + before_role_mapping = sorted(before_role_mapping, key=lambda d: d['name']) + desired_role_mapping = sorted(role_mapping_by_name.values(), key=lambda d: d['name']) + + result['changed'] = len(role_mapping_to_manipulate) > 0 + + if result['changed']: + result['diff'] = dict(before=before_role_mapping, after=desired_role_mapping) + + if not result['changed']: + # no changes + result['end_state'] = before_role_mapping + result['msg'] = "No changes required for client role scope {name}.".format(name=clientid) + elif state == "present": + # doing update + if module.check_mode: + result['end_state'] = desired_role_mapping + elif client_scope_id: + result['end_state'] = kc.update_client_role_scope_from_client(role_mapping_to_manipulate, objClient["id"], objClientScope["id"], realm) + else: + result['end_state'] = kc.update_client_role_scope_from_realm(role_mapping_to_manipulate, objClient["id"], realm) + result['msg'] = "Client role scope for {name} has been updated".format(name=clientid) + else: + # doing delete + if module.check_mode: + result['end_state'] = desired_role_mapping + elif client_scope_id: + result['end_state'] = kc.delete_client_role_scope_from_client(role_mapping_to_manipulate, objClient["id"], objClientScope["id"], realm) + else: + result['end_state'] = kc.delete_client_role_scope_from_realm(role_mapping_to_manipulate, objClient["id"], realm) + result['msg'] = "Client role scope for {name} has been deleted".format(name=clientid) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py deleted file mode 120000 index 01468a5c8e..0000000000 --- a/plugins/modules/keycloak_clientscope.py +++ /dev/null @@ -1 +0,0 @@ -identity/keycloak/keycloak_clientscope.py \ No newline at end of file diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py new file mode 100644 index 0000000000..cea4b4fab2 --- /dev/null +++ b/plugins/modules/keycloak_clientscope.py @@ -0,0 +1,523 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_clientscope + +short_description: Allows administration of Keycloak client_scopes using Keycloak API + +version_added: 3.4.0 + +description: + - This module allows you to add, remove or modify Keycloak client_scopes using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a client_scope, where possible provide the client_scope ID to the module. This removes a lookup to the API + to translate the name into the client_scope ID. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the client_scope. + - On V(present), the client_scope is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the client_scope is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + name: + type: str + description: + - Name of the client_scope. + - This parameter is required only when creating or updating the client_scope. + realm: + type: str + description: + - They Keycloak realm under which this client_scope resides. + default: 'master' + + id: + type: str + description: + - The unique identifier for this client_scope. + - This parameter is not required for updating or deleting a client_scope but providing it reduces the number of API + calls required. + description: + type: str + description: + - Description for this client_scope. + - This parameter is not required for updating or deleting a client_scope. + protocol: + description: + - Type of client. + - The V(docker-v2) value was added in community.general 8.6.0. + choices: ['openid-connect', 'saml', 'wsfed', 'docker-v2'] + type: str + + protocol_mappers: + description: + - A list of dicts defining protocol mappers for this client. + - This is C(protocolMappers) in the Keycloak REST API. + aliases: + - protocolMappers + type: list + elements: dict + suboptions: + protocol: + description: + - This specifies for which protocol this protocol mapper. + - Is active. + choices: ['openid-connect', 'saml', 'wsfed', 'docker-v2'] + type: str + + protocolMapper: + description: + - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide + since this may be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:' + - V(docker-v2-allow-all-mapper). + - V(oidc-address-mapper). + - V(oidc-full-name-mapper). + - V(oidc-group-membership-mapper). + - V(oidc-hardcoded-claim-mapper). + - V(oidc-hardcoded-role-mapper). + - V(oidc-role-name-mapper). + - V(oidc-script-based-protocol-mapper). + - V(oidc-sha256-pairwise-sub-mapper). + - V(oidc-usermodel-attribute-mapper). + - V(oidc-usermodel-client-role-mapper). + - V(oidc-usermodel-property-mapper). + - V(oidc-usermodel-realm-role-mapper). + - V(oidc-usersessionmodel-note-mapper). + - V(saml-group-membership-mapper). + - V(saml-hardcode-attribute-mapper). + - V(saml-hardcode-role-mapper). + - V(saml-role-list-mapper). + - V(saml-role-name-mapper). + - V(saml-user-attribute-mapper). + - V(saml-user-property-mapper). + - V(saml-user-session-note-mapper). + - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to + Server Info -> Providers and looking under 'protocol-mapper'. + type: str + + name: + description: + - The name of this protocol mapper. + type: str + + id: + description: + - Usually a UUID specifying the internal ID of this protocol mapper instance. + type: str + + config: + description: + - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value + of O(protocol_mappers[].protocolMapper) and are not documented other than by the source of the mappers and its + parent class(es). An example is given below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the RV(existing) return value. + type: dict + + attributes: + type: dict + description: + - A dict of key/value pairs to set as custom attributes for the client_scope. + - Values may be single values (for example a string) or a list of strings. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Gaëtan Daubresse (@Gaetan2907) +""" + +EXAMPLES = r""" +- name: Create a Keycloak client_scopes, authentication with credentials + community.general.keycloak_clientscope: + name: my-new-kc-clientscope + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a Keycloak client_scopes, authentication with token + community.general.keycloak_clientscope: + name: my-new-kc-clientscope + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + +- name: Delete a keycloak client_scopes + community.general.keycloak_clientscope: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + state: absent + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Delete a Keycloak client_scope based on name + community.general.keycloak_clientscope: + name: my-clientscope-for-deletion + state: absent + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Update the name of a Keycloak client_scope + community.general.keycloak_clientscope: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + name: an-updated-kc-clientscope-name + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a Keycloak client_scope with some custom attributes + community.general.keycloak_clientscope: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + name: my-new_clientscope + description: description-of-clientscope + protocol: openid-connect + protocol_mappers: + - config: + access.token.claim: true + claim.name: "family_name" + id.token.claim: true + jsonType.label: String + user.attribute: lastName + userinfo.token.claim: true + name: family name + protocol: openid-connect + protocolMapper: oidc-usermodel-property-mapper + - config: + attribute.name: Role + attribute.nameformat: Basic + single: false + name: role list + protocol: saml + protocolMapper: saml-role-list-mapper + attributes: + attrib1: value1 + attrib2: value2 + attrib3: + - with + - numerous + - individual + - list + - items + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Client_scope testclientscope has been updated" + +proposed: + description: Representation of proposed client scope. + returned: always + type: dict + sample: {"clientId": "test"} + +existing: + description: Representation of existing client scope (sample is truncated). + returned: always + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } + +end_state: + description: Representation of client scope after module execution (sample is truncated). + returned: on success + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible.module_utils.basic import AnsibleModule + + +def normalise_cr(clientscoperep, remove_ids=False): + """ Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the + the change detection is more effective. + + :param clientscoperep: the clientscoperep dict to be sanitized + :param remove_ids: If set to true, then the unique ID's of objects is removed to make the diff and checks for changed + not alert when the ID's of objects are not usually known, (e.g. for protocol_mappers) + :return: normalised clientscoperep dict + """ + # Avoid the dict passed in to be modified + clientscoperep = clientscoperep.copy() + + if 'protocolMappers' in clientscoperep: + clientscoperep['protocolMappers'] = sorted(clientscoperep['protocolMappers'], key=lambda x: (x.get('name'), x.get('protocol'), x.get('protocolMapper'))) + for mapper in clientscoperep['protocolMappers']: + if remove_ids: + mapper.pop('id', None) + + # Set to a default value. + mapper['consentRequired'] = mapper.get('consentRequired', False) + + return clientscoperep + + +def sanitize_cr(clientscoperep): + """ Removes probably sensitive details from a clientscoperep representation. + + :param clientscoperep: the clientscoperep dict to be sanitized + :return: sanitized clientrep dict + """ + result = clientscoperep.copy() + if 'secret' in result: + result['secret'] = 'no_log' + if 'attributes' in result: + if 'saml.signing.private.key' in result['attributes']: + result['attributes']['saml.signing.private.key'] = 'no_log' + return normalise_cr(result) + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + protmapper_spec = dict( + id=dict(type='str'), + name=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed', 'docker-v2']), + protocolMapper=dict(type='str'), + config=dict(type='dict'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + id=dict(type='str'), + name=dict(type='str'), + description=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed', 'docker-v2']), + attributes=dict(type='dict'), + protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'name'], + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('id') + name = module.params.get('name') + protocol_mappers = module.params.get('protocol_mappers') + + # Filter and map the parameters names that apply to the client scope + clientscope_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and + module.params.get(x) is not None] + + # See if it already exists in Keycloak + if cid is None: + before_clientscope = kc.get_clientscope_by_name(name, realm=realm) + else: + before_clientscope = kc.get_clientscope_by_clientscopeid(cid, realm=realm) + + if before_clientscope is None: + before_clientscope = {} + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for clientscope_param in clientscope_params: + new_param_value = module.params.get(clientscope_param) + + # Unfortunately, the ansible argument spec checker introduces variables with null values when + # they are not specified + if clientscope_param == 'protocol_mappers': + new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] + changeset[camel(clientscope_param)] = new_param_value + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_clientscope = before_clientscope.copy() + desired_clientscope.update(changeset) + + # Cater for when it doesn't exist (an empty dict) + if not before_clientscope: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Clientscope does not exist; doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if name is None: + module.fail_json(msg='name must be specified when creating a new clientscope') + + if module._diff: + result['diff'] = dict(before='', after=sanitize_cr(desired_clientscope)) + + if module.check_mode: + module.exit_json(**result) + + # create it + kc.create_clientscope(desired_clientscope, realm=realm) + after_clientscope = kc.get_clientscope_by_name(name, realm) + + result['end_state'] = sanitize_cr(after_clientscope) + + result['msg'] = 'Clientscope {name} has been created with ID {id}'.format(name=after_clientscope['name'], + id=after_clientscope['id']) + + else: + if state == 'present': + # Process an update + + # no changes + # remove ids for compare, problematic if desired has no ids set (not required), + # normalize for consentRequired in protocolMappers + if normalise_cr(desired_clientscope, remove_ids=True) == normalise_cr(before_clientscope, remove_ids=True): + result['changed'] = False + result['end_state'] = sanitize_cr(desired_clientscope) + result['msg'] = "No changes required to clientscope {name}.".format(name=before_clientscope['name']) + module.exit_json(**result) + + # doing an update + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_clientscope), after=sanitize_cr(desired_clientscope)) + + if module.check_mode: + # We can only compare the current clientscope with the proposed updates we have + before_norm = normalise_cr(before_clientscope, remove_ids=True) + desired_norm = normalise_cr(desired_clientscope, remove_ids=True) + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_norm), + after=sanitize_cr(desired_norm)) + result['changed'] = not is_struct_included(desired_norm, before_norm) + module.exit_json(**result) + + # do the update + kc.update_clientscope(desired_clientscope, realm=realm) + + # do the protocolmappers update + if protocol_mappers is not None: + for protocol_mapper in protocol_mappers: + # update if protocolmapper exist + current_protocolmapper = kc.get_clientscope_protocolmapper_by_name(desired_clientscope['id'], protocol_mapper['name'], realm=realm) + if current_protocolmapper is not None: + protocol_mapper['id'] = current_protocolmapper['id'] + kc.update_clientscope_protocolmappers(desired_clientscope['id'], protocol_mapper, realm=realm) + # create otherwise + else: + kc.create_clientscope_protocolmapper(desired_clientscope['id'], protocol_mapper, realm=realm) + + after_clientscope = kc.get_clientscope_by_clientscopeid(desired_clientscope['id'], realm=realm) + + result['end_state'] = after_clientscope + + result['msg'] = "Clientscope {id} has been updated".format(id=after_clientscope['id']) + module.exit_json(**result) + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_clientscope), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + cid = before_clientscope['id'] + kc.delete_clientscope(cid=cid, realm=realm) + + result['end_state'] = {} + + result['msg'] = "Clientscope {name} has been deleted".format(name=before_clientscope['name']) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_clientscope_type.py b/plugins/modules/keycloak_clientscope_type.py new file mode 100644 index 0000000000..e979d123ab --- /dev/null +++ b/plugins/modules/keycloak_clientscope_type.py @@ -0,0 +1,304 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: keycloak_clientscope_type + +short_description: Set the type of aclientscope in realm or client using Keycloak API + +version_added: 6.6.0 + +description: + - This module allows you to set the type (optional, default) of clientscopes using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + realm: + type: str + description: + - The Keycloak realm. + default: 'master' + + client_id: + description: + - The O(client_id) of the client. If not set the clientscope types are set as a default for the realm. + aliases: + - clientId + type: str + + default_clientscopes: + description: + - Client scopes that should be of type default. + type: list + elements: str + + optional_clientscopes: + description: + - Client scopes that should be of type optional. + type: list + elements: str + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Simon Pahl (@simonpahl) +""" + +EXAMPLES = r""" +- name: Set default client scopes on realm level + community.general.keycloak_clientscope_type: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: "MyCustomRealm" + default_clientscopes: ['profile', 'roles'] + delegate_to: localhost + + +- name: Set default and optional client scopes on client level with token auth + community.general.keycloak_clientscope_type: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + realm: "MyCustomRealm" + client_id: "MyCustomClient" + default_clientscopes: ['profile', 'roles'] + optional_clientscopes: ['phone'] + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "" +proposed: + description: Representation of proposed client-scope types mapping. + returned: always + type: dict + sample: + { + "default_clientscopes": [ + "profile", + "role" + ], + "optional_clientscopes": [] + } +existing: + description: + - Representation of client scopes before module execution. + returned: always + type: dict + sample: + { + "default_clientscopes": [ + "profile", + "role" + ], + "optional_clientscopes": [ + "phone" + ] + } +end_state: + description: + - Representation of client scopes after module execution. + - The sample is truncated. + returned: on success + type: dict + sample: + { + "default_clientscopes": [ + "profile", + "role" + ], + "optional_clientscopes": [] + } +""" + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, KeycloakError, get_token) + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import \ + keycloak_argument_spec + + +def keycloak_clientscope_type_module(): + """ + Returns an AnsibleModule definition. + + :return: argument_spec dict + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + realm=dict(default='master'), + client_id=dict(type='str', aliases=['clientId']), + default_clientscopes=dict(type='list', elements='str'), + optional_clientscopes=dict(type='list', elements='str'), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([ + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret'], + ['default_clientscopes', 'optional_clientscopes'] + ]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + mutually_exclusive=[ + ['token', 'auth_realm'], + ['token', 'auth_username'], + ['token', 'auth_password'] + ], + ) + + return module + + +def clientscopes_to_add(existing, proposed): + to_add = [] + existing_clientscope_ids = extract_field(existing, 'id') + for clientscope in proposed: + if not clientscope['id'] in existing_clientscope_ids: + to_add.append(clientscope) + return to_add + + +def clientscopes_to_delete(existing, proposed): + to_delete = [] + proposed_clientscope_ids = extract_field(proposed, 'id') + for clientscope in existing: + if not clientscope['id'] in proposed_clientscope_ids: + to_delete.append(clientscope) + return to_delete + + +def extract_field(dictionary, field='name'): + return [cs[field] for cs in dictionary] + + +def normalize_scopes(scopes): + scopes_copy = scopes.copy() + if isinstance(scopes_copy.get('default_clientscopes'), list): + scopes_copy['default_clientscopes'] = sorted(scopes_copy['default_clientscopes']) + if isinstance(scopes_copy.get('optional_clientscopes'), list): + scopes_copy['optional_clientscopes'] = sorted(scopes_copy['optional_clientscopes']) + return scopes_copy + + +def main(): + """ + Module keycloak_clientscope_type + + :return: + """ + + module = keycloak_clientscope_type_module() + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + client_id = module.params.get('client_id') + default_clientscopes = module.params.get('default_clientscopes') + optional_clientscopes = module.params.get('optional_clientscopes') + + result = dict(changed=False, msg='', proposed={}, existing={}, end_state={}) + + all_clientscopes = kc.get_clientscopes(realm) + default_clientscopes_real = [] + optional_clientscopes_real = [] + + for client_scope in all_clientscopes: + if default_clientscopes is not None and client_scope["name"] in default_clientscopes: + default_clientscopes_real.append(client_scope) + if optional_clientscopes is not None and client_scope["name"] in optional_clientscopes: + optional_clientscopes_real.append(client_scope) + + if default_clientscopes is not None and len(default_clientscopes_real) != len(default_clientscopes): + module.fail_json(msg='At least one of the default_clientscopes does not exist!') + + if optional_clientscopes is not None and len(optional_clientscopes_real) != len(optional_clientscopes): + module.fail_json(msg='At least one of the optional_clientscopes does not exist!') + + result['proposed'].update({ + 'default_clientscopes': 'no-change' if default_clientscopes is None else default_clientscopes, + 'optional_clientscopes': 'no-change' if optional_clientscopes is None else optional_clientscopes + }) + + default_clientscopes_existing = kc.get_default_clientscopes(realm, client_id) + optional_clientscopes_existing = kc.get_optional_clientscopes(realm, client_id) + + result['existing'].update({ + 'default_clientscopes': extract_field(default_clientscopes_existing), + 'optional_clientscopes': extract_field(optional_clientscopes_existing) + }) + + if module._diff: + result['diff'] = dict(before=normalize_scopes(result['existing']), after=normalize_scopes(result['proposed'])) + + default_clientscopes_add = clientscopes_to_add(default_clientscopes_existing, default_clientscopes_real) + optional_clientscopes_add = clientscopes_to_add(optional_clientscopes_existing, optional_clientscopes_real) + + default_clientscopes_delete = clientscopes_to_delete(default_clientscopes_existing, default_clientscopes_real) + optional_clientscopes_delete = clientscopes_to_delete(optional_clientscopes_existing, optional_clientscopes_real) + + result["changed"] = any(len(x) > 0 for x in [ + default_clientscopes_add, optional_clientscopes_add, default_clientscopes_delete, optional_clientscopes_delete + ]) + + if module.check_mode: + module.exit_json(**result) + + # first delete so clientscopes can change type + for clientscope in default_clientscopes_delete: + kc.delete_default_clientscope(clientscope['id'], realm, client_id) + for clientscope in optional_clientscopes_delete: + kc.delete_optional_clientscope(clientscope['id'], realm, client_id) + + for clientscope in default_clientscopes_add: + kc.add_default_clientscope(clientscope['id'], realm, client_id) + for clientscope in optional_clientscopes_add: + kc.add_optional_clientscope(clientscope['id'], realm, client_id) + + result['end_state'].update({ + 'default_clientscopes': extract_field(kc.get_default_clientscopes(realm, client_id)), + 'optional_clientscopes': extract_field(kc.get_optional_clientscopes(realm, client_id)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_clientsecret_info.py b/plugins/modules/keycloak_clientsecret_info.py new file mode 100644 index 0000000000..8b92516eb9 --- /dev/null +++ b/plugins/modules/keycloak_clientsecret_info.py @@ -0,0 +1,166 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Fynn Chen +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: keycloak_clientsecret_info + +short_description: Retrieve client secret using Keycloak API + +version_added: 6.1.0 + +description: + - This module allows you to get a Keycloak client secret using the Keycloak REST API. It requires access to the REST API + using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + - When retrieving a new client secret, where possible provide the client's O(id) (not O(client_id)) to the module. This + removes a lookup to the API to translate the O(client_id) into the client ID. + - 'Note that this module returns the client secret. To avoid this showing up in the logs, please add C(no_log: true) to + the task.' +attributes: + action_group: + version_added: 10.2.0 + +options: + realm: + type: str + description: + - They Keycloak realm under which this client resides. + default: 'master' + + id: + description: + - The unique identifier for this client. + - This parameter is not required for getting or generating a client secret but providing it reduces the number of API + calls required. + type: str + + client_id: + description: + - The O(client_id) of the client. Passing this instead of O(id) results in an extra API call. + aliases: + - clientId + type: str + + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + - community.general.attributes.info_module + +author: + - Fynn Chen (@fynncfchen) + - John Cant (@johncant) +""" + +EXAMPLES = r""" +- name: Get a Keycloak client secret, authentication with credentials + community.general.keycloak_clientsecret_info: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + no_log: true + +- name: Get a new Keycloak client secret, authentication with token + community.general.keycloak_clientsecret_info: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + no_log: true + +- name: Get a new Keycloak client secret, passing client_id instead of id + community.general.keycloak_clientsecret_info: + client_id: 'myClientId' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + no_log: true + +- name: Get a new Keycloak client secret, authentication with auth_client_id and auth_client_secret + community.general.keycloak_clientsecret_info: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_client_secret: SECRET + auth_keycloak_url: https://auth.example.com/auth + delegate_to: localhost + no_log: true +""" + +RETURN = r""" +msg: + description: Textual description of whether we succeeded or failed. + returned: always + type: str + +clientsecret_info: + description: Representation of the client secret. + returned: on success + type: complex + contains: + type: + description: Credential type. + type: str + returned: always + sample: secret + value: + description: Client secret. + type: str + returned: always + sample: cUGnX1EIeTtPPAkcyGMv0ncyqDPu68P1 +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, KeycloakError, get_token) +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak_clientsecret import ( + keycloak_clientsecret_module, keycloak_clientsecret_module_resolve_params) + + +def main(): + """ + Module keycloak_clientsecret_info + + :return: + """ + + module = keycloak_clientsecret_module() + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + id, realm = keycloak_clientsecret_module_resolve_params(module, kc) + + clientsecret = kc.get_clientsecret(id=id, realm=realm) + + result = { + 'clientsecret_info': clientsecret, + 'msg': 'Get client secret successful for ID {id}'.format(id=id) + } + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_clientsecret_regenerate.py b/plugins/modules/keycloak_clientsecret_regenerate.py new file mode 100644 index 0000000000..823c011a96 --- /dev/null +++ b/plugins/modules/keycloak_clientsecret_regenerate.py @@ -0,0 +1,176 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Fynn Chen +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: keycloak_clientsecret_regenerate + +short_description: Regenerate Keycloak client secret using Keycloak API + +version_added: 6.1.0 + +description: + - This module allows you to regenerate a Keycloak client secret using the Keycloak REST API. It requires access to the REST + API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + - When regenerating a client secret, where possible provide the client's ID (not client_id) to the module. This removes + a lookup to the API to translate the client_id into the client ID. + - 'Note that this module returns the client secret. To avoid this showing up in the logs, please add C(no_log: true) to + the task.' +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 10.2.0 + +options: + realm: + type: str + description: + - They Keycloak realm under which this client resides. + default: 'master' + + id: + description: + - The unique identifier for this client. + - This parameter is not required for getting or generating a client secret but providing it reduces the number of API + calls required. + type: str + + client_id: + description: + - The client_id of the client. Passing this instead of ID results in an extra API call. + aliases: + - clientId + type: str + + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Fynn Chen (@fynncfchen) + - John Cant (@johncant) +""" + +EXAMPLES = r""" +- name: Regenerate a Keycloak client secret, authentication with credentials + community.general.keycloak_clientsecret_regenerate: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + no_log: true + +- name: Regenerate a Keycloak client secret, authentication with token + community.general.keycloak_clientsecret_regenerate: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + no_log: true + +- name: Regenerate a Keycloak client secret, passing client_id instead of id + community.general.keycloak_clientsecret_info: + client_id: 'myClientId' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + no_log: true + +- name: Regenerate a new Keycloak client secret, authentication with auth_client_id and auth_client_secret + community.general.keycloak_clientsecret_regenerate: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_client_secret: SECRET + auth_keycloak_url: https://auth.example.com/auth + delegate_to: localhost + no_log: true +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the client credential after module execution. + returned: on success + type: complex + contains: + type: + description: Credential type. + type: str + returned: always + sample: secret + value: + description: Client secret. + type: str + returned: always + sample: cUGnX1EIeTtPPAkcyGMv0ncyqDPu68P1 +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, KeycloakError, get_token) +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak_clientsecret import ( + keycloak_clientsecret_module, keycloak_clientsecret_module_resolve_params) + + +def main(): + """ + Module keycloak_clientsecret_regenerate + + :return: + """ + + module = keycloak_clientsecret_module() + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + id, realm = keycloak_clientsecret_module_resolve_params(module, kc) + + if module.check_mode: + dummy_result = { + "msg": 'No action taken while in check mode', + "end_state": {'type': 'secret', 'value': 'X' * 32} + } + module.exit_json(**dummy_result) + + # Create new secret + clientsecret = kc.create_clientsecret(id=id, realm=realm) + + result = { + "msg": 'New client secret has been generated for ID {id}'.format(id=id), + "end_state": clientsecret + } + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_clienttemplate.py b/plugins/modules/keycloak_clienttemplate.py deleted file mode 120000 index 73fbd3ed7b..0000000000 --- a/plugins/modules/keycloak_clienttemplate.py +++ /dev/null @@ -1 +0,0 @@ -./identity/keycloak/keycloak_clienttemplate.py \ No newline at end of file diff --git a/plugins/modules/keycloak_clienttemplate.py b/plugins/modules/keycloak_clienttemplate.py new file mode 100644 index 0000000000..7eda821de6 --- /dev/null +++ b/plugins/modules/keycloak_clienttemplate.py @@ -0,0 +1,448 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_clienttemplate + +short_description: Allows administration of Keycloak client templates using Keycloak API + +description: + - This module allows the administration of Keycloak client templates using the Keycloak REST API. It requires access to + the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - The Keycloak API does not always enforce for only sensible settings to be used -- you can set SAML-specific settings on + an OpenID Connect client for instance and the other way around. Be careful. If you do not specify a setting, usually a + sensible default is chosen. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the client template. + - On V(present), the client template is created (or updated if it exists already). + - On V(absent), the client template is removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + + id: + description: + - ID of client template to be worked on. This is usually a UUID. + type: str + + realm: + description: + - Realm this client template is found in. + type: str + default: master + + name: + description: + - Name of the client template. + type: str + + description: + description: + - Description of the client template in Keycloak. + type: str + + protocol: + description: + - Type of client template. + - The V(docker-v2) value was added in community.general 8.6.0. + choices: ['openid-connect', 'saml', 'docker-v2'] + type: str + + full_scope_allowed: + description: + - Is the "Full Scope Allowed" feature set for this client template or not. This is C(fullScopeAllowed) in the Keycloak + REST API. + type: bool + + protocol_mappers: + description: + - A list of dicts defining protocol mappers for this client template. This is C(protocolMappers) in the Keycloak REST + API. + type: list + elements: dict + suboptions: + consentRequired: + description: + - Specifies whether a user needs to provide consent to a client for this mapper to be active. + type: bool + + consentText: + description: + - The human-readable name of the consent the user is presented to accept. + type: str + + id: + description: + - Usually a UUID specifying the internal ID of this protocol mapper instance. + type: str + + name: + description: + - The name of this protocol mapper. + type: str + + protocol: + description: + - This specifies for which protocol this protocol mapper is active. + choices: ['openid-connect', 'saml', 'docker-v2'] + type: str + + protocolMapper: + description: + - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide + since this may be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:' + - V(docker-v2-allow-all-mapper). + - V(oidc-address-mapper). + - V(oidc-full-name-mapper). + - V(oidc-group-membership-mapper). + - V(oidc-hardcoded-claim-mapper). + - V(oidc-hardcoded-role-mapper). + - V(oidc-role-name-mapper). + - V(oidc-script-based-protocol-mapper). + - V(oidc-sha256-pairwise-sub-mapper). + - V(oidc-usermodel-attribute-mapper). + - V(oidc-usermodel-client-role-mapper). + - V(oidc-usermodel-property-mapper). + - V(oidc-usermodel-realm-role-mapper). + - V(oidc-usersessionmodel-note-mapper). + - V(saml-group-membership-mapper). + - V(saml-hardcode-attribute-mapper). + - V(saml-hardcode-role-mapper). + - V(saml-role-list-mapper). + - V(saml-role-name-mapper). + - V(saml-user-attribute-mapper). + - V(saml-user-property-mapper). + - V(saml-user-session-note-mapper). + - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to + Server Info -> Providers and looking under 'protocol-mapper'. + type: str + + config: + description: + - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value + of O(protocol_mappers[].protocolMapper) and are not documented other than by the source of the mappers and its + parent class(es). An example is given below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the RV(existing) field. + type: dict + + attributes: + description: + - A dict of further attributes for this client template. This can contain various configuration settings, though in + the default installation of Keycloak as of 3.4, none are documented or known, so this is usually empty. + type: dict + +notes: + - The Keycloak REST API defines further fields (namely C(bearerOnly), C(consentRequired), C(standardFlowEnabled), C(implicitFlowEnabled), + C(directAccessGrantsEnabled), C(serviceAccountsEnabled), C(publicClient), and C(frontchannelLogout)) which, while available + with keycloak_client, do not have any effect on Keycloak client-templates and are discarded if supplied with an API request + changing client-templates. As such, they are not available through this module. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Eike Frost (@eikef) +""" + +EXAMPLES = r""" +- name: Create or update Keycloak client template (minimal), authentication with credentials + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: master + name: this_is_a_test + delegate_to: localhost + +- name: Create or update Keycloak client template (minimal), authentication with token + community.general.keycloak_clienttemplate: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + token: TOKEN + realm: master + name: this_is_a_test + delegate_to: localhost + +- name: Delete Keycloak client template + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: master + state: absent + name: test01 + delegate_to: localhost + +- name: Create or update Keycloak client template (with a protocol mapper) + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: master + name: this_is_a_test + protocol_mappers: + - config: + access.token.claim: true + claim.name: "family_name" + id.token.claim: true + jsonType.label: String + user.attribute: lastName + userinfo.token.claim: true + consentRequired: true + consentText: "${familyName}" + name: family name + protocol: openid-connect + protocolMapper: oidc-usermodel-property-mapper + full_scope_allowed: false + id: bce6f5e9-d7d3-4955-817e-c5b7f8d65b3f + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Client template testclient has been updated" + +proposed: + description: Representation of proposed client template. + returned: always + type: dict + sample: {"name": "test01"} + +existing: + description: Representation of existing client template (sample is truncated). + returned: always + type: dict + sample: + { + "description": "test01", + "fullScopeAllowed": false, + "id": "9c3712ab-decd-481e-954f-76da7b006e5f", + "name": "test01", + "protocol": "saml" + } + +end_state: + description: Representation of client template after module execution (sample is truncated). + returned: on success + type: dict + sample: + { + "description": "test01", + "fullScopeAllowed": false, + "id": "9c3712ab-decd-481e-954f-76da7b006e5f", + "name": "test01", + "protocol": "saml" + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + protmapper_spec = dict( + consentRequired=dict(type='bool'), + consentText=dict(type='str'), + id=dict(type='str'), + name=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'docker-v2']), + protocolMapper=dict(type='str'), + config=dict(type='dict'), + ) + + meta_args = dict( + realm=dict(type='str', default='master'), + state=dict(default='present', choices=['present', 'absent']), + + id=dict(type='str'), + name=dict(type='str'), + description=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'docker-v2']), + attributes=dict(type='dict'), + full_scope_allowed=dict(type='bool'), + protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'name'], + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('id') + + # Filter and map the parameters names that apply to the client template + clientt_params = [x for x in module.params + if x not in ['state', 'auth_keycloak_url', 'auth_client_id', 'auth_realm', + 'auth_client_secret', 'auth_username', 'auth_password', + 'validate_certs', 'realm'] and module.params.get(x) is not None] + + # See if it already exists in Keycloak + if cid is None: + before_clientt = kc.get_client_template_by_name(module.params.get('name'), realm=realm) + if before_clientt is not None: + cid = before_clientt['id'] + else: + before_clientt = kc.get_client_template_by_id(cid, realm=realm) + + if before_clientt is None: + before_clientt = {} + + result['existing'] = before_clientt + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for clientt_param in clientt_params: + # lists in the Keycloak API are sorted + new_param_value = module.params.get(clientt_param) + if isinstance(new_param_value, list): + try: + new_param_value = sorted(new_param_value) + except TypeError: + pass + changeset[camel(clientt_param)] = new_param_value + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_clientt = before_clientt.copy() + desired_clientt.update(changeset) + + result['proposed'] = changeset + + # Cater for when it doesn't exist (an empty dict) + if not before_clientt: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Client template does not exist, doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if 'name' not in desired_clientt: + module.fail_json(msg='name needs to be specified when creating a new client') + + if module._diff: + result['diff'] = dict(before='', after=desired_clientt) + + if module.check_mode: + module.exit_json(**result) + + # create it + kc.create_client_template(desired_clientt, realm=realm) + after_clientt = kc.get_client_template_by_name(desired_clientt['name'], realm=realm) + + result['end_state'] = after_clientt + + result['msg'] = 'Client template %s has been created.' % desired_clientt['name'] + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + + result['changed'] = True + if module.check_mode: + # We can only compare the current client template with the proposed updates we have + if module._diff: + result['diff'] = dict(before=before_clientt, + after=desired_clientt) + + module.exit_json(**result) + + # do the update + kc.update_client_template(cid, desired_clientt, realm=realm) + + after_clientt = kc.get_client_template_by_id(cid, realm=realm) + if before_clientt == after_clientt: + result['changed'] = False + + result['end_state'] = after_clientt + + if module._diff: + result['diff'] = dict(before=before_clientt, after=after_clientt) + + result['msg'] = 'Client template %s has been updated.' % desired_clientt['name'] + module.exit_json(**result) + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_clientt, after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_client_template(cid, realm=realm) + result['proposed'] = {} + + result['end_state'] = {} + + result['msg'] = 'Client template %s has been deleted.' % before_clientt['name'] + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_component.py b/plugins/modules/keycloak_component.py new file mode 100644 index 0000000000..c33c9af136 --- /dev/null +++ b/plugins/modules/keycloak_component.py @@ -0,0 +1,322 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Björn Bösel +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_component + +short_description: Allows administration of Keycloak components using Keycloak API + +version_added: 10.0.0 + +description: + - This module allows the administration of Keycloak components using the Keycloak REST API. It requires access to the REST + API using OpenID Connect; the user connecting and the realm being used must have the requisite access rights. In a default + Keycloak installation, C(admin-cli) and an C(admin) user would work, as would a separate realm definition with the scope + tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/latest/rest-api/index.html). Aliases are provided so camelCased versions can be + used as well. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the Keycloak component. + - On V(present), the component is created (or updated if it exists already). + - On V(absent), the component is removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Name of the component to create. + type: str + required: true + parent_id: + description: + - The parent_id of the component. In practice the ID (name) of the realm. + type: str + required: true + provider_id: + description: + - The name of the "provider ID" for the key. + type: str + required: true + provider_type: + description: + - The name of the "provider type" for the key. That is, V(org.keycloak.storage.UserStorageProvider), V(org.keycloak.userprofile.UserProfileProvider), + ... + - See U(https://www.keycloak.org/docs/latest/server_development/index.html#_providers). + type: str + required: true + config: + description: + - Configuration properties for the provider. + - Contents vary depending on the provider type. + type: dict + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Björn Bösel (@fivetide) +""" + +EXAMPLES = r""" +- name: Manage Keycloak User Storage Provider + community.general.keycloak_component: + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master + name: my storage provider + state: present + parent_id: some_realm + provider_id: my storage + provider_type: "org.keycloak.storage.UserStorageProvider" + config: + myCustomKey: "my_custom_key" + cachePolicy: "NO_CACHE" + enabled: true +""" + +RETURN = r""" +end_state: + description: Representation of the keycloak_component after module execution. + returned: on success + type: dict + contains: + id: + description: ID of the component. + type: str + returned: when O(state=present) + sample: 5b7ec13f-99da-46ad-8326-ab4c73cf4ce4 + name: + description: Name of the component. + type: str + returned: when O(state=present) + sample: mykey + parentId: + description: ID of the realm this key belongs to. + type: str + returned: when O(state=present) + sample: myrealm + providerId: + description: The ID of the key provider. + type: str + returned: when O(state=present) + sample: rsa + providerType: + description: The type of provider. + type: str + returned: when O(state=present) + config: + description: Component configuration. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from urllib.parse import urlencode +from copy import deepcopy + + +def main(): + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type='str', required=True), + parent_id=dict(type='str', required=True), + provider_id=dict(type='str', required=True), + provider_type=dict(type='str', required=True), + config=dict( + type='dict', + ) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + + # This will include the current state of the component if it is already + # present. This is only used for diff-mode. + before_component = {} + before_component['config'] = {} + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + params_to_ignore = list(keycloak_argument_spec().keys()) + ["state", "parent_id"] + + # Filter and map the parameters names that apply to the role + component_params = [x for x in module.params + if x not in params_to_ignore and + module.params.get(x) is not None] + + provider_type = module.params.get("provider_type") + + # Build a proposed changeset from parameters given to this module + changeset = {} + changeset['config'] = {} + + # Generate a JSON payload for Keycloak Admin API from the module + # parameters. Parameters that do not belong to the JSON payload (e.g. + # "state" or "auth_keycloal_url") have been filtered away earlier (see + # above). + # + # This loop converts Ansible module parameters (snake-case) into + # Keycloak-compatible format (camel-case). For example private_key + # becomes privateKey. + # + # It also converts bool, str and int parameters into lists with a single + # entry of 'str' type. Bool values are also lowercased. This is required + # by Keycloak. + # + for component_param in component_params: + if component_param == 'config': + for config_param in module.params.get('config'): + changeset['config'][camel(config_param)] = [] + raw_value = module.params.get('config')[config_param] + if isinstance(raw_value, bool): + value = str(raw_value).lower() + else: + value = str(raw_value) + + changeset['config'][camel(config_param)].append(value) + else: + # No need for camelcase in here as these are one word parameters + new_param_value = module.params.get(component_param) + changeset[camel(component_param)] = new_param_value + + # Make a deep copy of the changeset. This is use when determining + # changes to the current state. + changeset_copy = deepcopy(changeset) + + # Make it easier to refer to current module parameters + name = module.params.get('name') + force = module.params.get('force') + state = module.params.get('state') + enabled = module.params.get('enabled') + provider_id = module.params.get('provider_id') + provider_type = module.params.get('provider_type') + parent_id = module.params.get('parent_id') + + # Get a list of all Keycloak components that are of keyprovider type. + current_components = kc.get_components(urlencode(dict(type=provider_type)), parent_id) + + # If this component is present get its key ID. Confusingly the key ID is + # also known as the Provider ID. + component_id = None + + # Track individual parameter changes + changes = "" + + # This tells Ansible whether the key was changed (added, removed, modified) + result['changed'] = False + + # Loop through the list of components. If we encounter a component whose + # name matches the value of the name parameter then assume the key is + # already present. + for component in current_components: + if component['name'] == name: + component_id = component['id'] + changeset['id'] = component_id + changeset_copy['id'] = component_id + + # Compare top-level parameters + for param, value in changeset.items(): + before_component[param] = component[param] + + if changeset_copy[param] != component[param] and param != 'config': + changes += "%s: %s -> %s, " % (param, component[param], changeset_copy[param]) + result['changed'] = True + # Compare parameters under the "config" key + for p, v in changeset_copy['config'].items(): + try: + before_component['config'][p] = component['config'][p] or [] + except KeyError: + before_component['config'][p] = [] + if changeset_copy['config'][p] != component['config'][p]: + changes += "config.%s: %s -> %s, " % (p, component['config'][p], changeset_copy['config'][p]) + result['changed'] = True + + # Check all the possible states of the resource and do what is needed to + # converge current state with desired state (create, update or delete + # the key). + if component_id and state == 'present': + if result['changed']: + if module._diff: + result['diff'] = dict(before=before_component, after=changeset_copy) + + if module.check_mode: + result['msg'] = "Component %s would be changed: %s" % (name, changes.strip(", ")) + else: + kc.update_component(changeset, parent_id) + result['msg'] = "Component %s changed: %s" % (name, changes.strip(", ")) + else: + result['msg'] = "Component %s was in sync" % (name) + + result['end_state'] = changeset_copy + elif component_id and state == 'absent': + if module._diff: + result['diff'] = dict(before=before_component, after={}) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Component %s would be deleted" % (name) + else: + kc.delete_component(component_id, parent_id) + result['changed'] = True + result['msg'] = "Component %s deleted" % (name) + + result['end_state'] = {} + elif not component_id and state == 'present': + if module._diff: + result['diff'] = dict(before={}, after=changeset_copy) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Component %s would be created" % (name) + else: + kc.create_component(changeset, parent_id) + result['changed'] = True + result['msg'] = "Component %s created" % (name) + + result['end_state'] = changeset_copy + elif not component_id and state == 'absent': + result['changed'] = False + result['msg'] = "Component %s not present" % (name) + result['end_state'] = {} + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_component_info.py b/plugins/modules/keycloak_component_info.py new file mode 100644 index 0000000000..92f86ea046 --- /dev/null +++ b/plugins/modules/keycloak_component_info.py @@ -0,0 +1,165 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_component_info + +short_description: Retrieve component info in Keycloak + +version_added: 8.2.0 + +description: + - This module retrieve information on component from Keycloak. +attributes: + action_group: + version_added: 10.2.0 + +options: + realm: + description: + - The name of the realm. + required: true + type: str + name: + description: + - Name of the Component. + type: str + provider_type: + description: + - Provider type of components. + - 'Examples: V(org.keycloak.storage.UserStorageProvider), V(org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy), + V(org.keycloak.keys.KeyProvider), V(org.keycloak.userprofile.UserProfileProvider), V(org.keycloak.storage.ldap.mappers.LDAPStorageMapper).' + type: str + parent_id: + description: + - Container ID of the components. + type: str + + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + - community.general.attributes.info_module + +author: + - Andre Desrosiers (@desand01) +""" + +EXAMPLES = r""" +- name: Retrive info of a UserStorageProvider named myldap + community.general.keycloak_component_info: + auth_keycloak_url: http://localhost:8080/auth + auth_sername: admin + auth_password: password + auth_realm: master + realm: myrealm + name: myldap + provider_type: org.keycloak.storage.UserStorageProvider + +- name: Retrive key info component + community.general.keycloak_component_info: + auth_keycloak_url: http://localhost:8080/auth + auth_sername: admin + auth_password: password + auth_realm: master + realm: myrealm + name: rsa-enc-generated + provider_type: org.keycloak.keys.KeyProvider + +- name: Retrive all component from realm master + community.general.keycloak_component_info: + auth_keycloak_url: http://localhost:8080/auth + auth_sername: admin + auth_password: password + auth_realm: master + realm: myrealm + +- name: Retrive all sub components of parent component filter by type + community.general.keycloak_component_info: + auth_keycloak_url: http://localhost:8080/auth + auth_sername: admin + auth_password: password + auth_realm: master + realm: myrealm + parent_id: "075ef2fa-19fc-4a6d-bf4c-249f57365fd2" + provider_type: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" +""" + +RETURN = r""" +components: + description: JSON representation of components. + returned: always + type: list + elements: dict +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from urllib.parse import quote + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + name=dict(type='str'), + realm=dict(type='str', required=True), + parent_id=dict(type='str'), + provider_type=dict(type='str'), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = dict(changed=False, components=[]) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + parentId = module.params.get('parent_id') + name = module.params.get('name') + providerType = module.params.get('provider_type') + + objRealm = kc.get_realm_by_id(realm) + if not objRealm: + module.fail_json(msg="Failed to retrive realm '{realm}'".format(realm=realm)) + + filters = [] + + if parentId: + filters.append("parent=%s" % (quote(parentId, safe=''))) + else: + filters.append("parent=%s" % (quote(objRealm['id'], safe=''))) + + if name: + filters.append("name=%s" % (quote(name, safe=''))) + if providerType: + filters.append("type=%s" % (quote(providerType, safe=''))) + + result['components'] = kc.get_components(filter="&".join(filters), realm=realm) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_group.py b/plugins/modules/keycloak_group.py deleted file mode 120000 index b48bef5412..0000000000 --- a/plugins/modules/keycloak_group.py +++ /dev/null @@ -1 +0,0 @@ -./identity/keycloak/keycloak_group.py \ No newline at end of file diff --git a/plugins/modules/keycloak_group.py b/plugins/modules/keycloak_group.py new file mode 100644 index 0000000000..a040e6e659 --- /dev/null +++ b/plugins/modules/keycloak_group.py @@ -0,0 +1,481 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Adam Goossens +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_group + +short_description: Allows administration of Keycloak groups using Keycloak API + +description: + - This module allows you to add, remove or modify Keycloak groups using the Keycloak REST API. It requires access to the + REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In + a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the + scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a group, where possible provide the group ID to the module. This removes a lookup to the API to translate + the name into the group ID. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the group. + - On V(present), the group is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the group is removed if it exists. Be aware that absenting a group with subgroups automatically deletes + all its subgroups too. + default: 'present' + type: str + choices: + - present + - absent + + name: + type: str + description: + - Name of the group. + - This parameter is required only when creating or updating the group. + realm: + type: str + description: + - They Keycloak realm under which this group resides. + default: 'master' + + id: + type: str + description: + - The unique identifier for this group. + - This parameter is not required for updating or deleting a group but providing it reduces the number of API calls required. + attributes: + type: dict + description: + - A dict of key/value pairs to set as custom attributes for the group. + - Values may be single values (for example a string) or a list of strings. + parents: + version_added: "6.4.0" + type: list + description: + - List of parent groups for the group to handle sorted top to bottom. + - Set this to create a group as a subgroup of another group or groups (parents) or when accessing an existing subgroup + by name. + - Not necessary to set when accessing an existing subgroup by its C(ID) because in that case the group can be directly + queried without necessarily knowing its parent(s). + elements: dict + suboptions: + id: + type: str + description: + - Identify parent by ID. + - Needs less API calls than using O(parents[].name). + - A deep parent chain can be started at any point when first given parent is given as ID. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. + name: + type: str + description: + - Identify parent by name. + - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood. + - When giving a parent chain with only names it must be complete up to the top. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. +notes: + - Presently, the RV(end_state.realmRoles), RV(end_state.clientRoles), and RV(end_state.access) attributes returned by the + Keycloak API are read-only for groups. This limitation will be removed in a later version of this module. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Adam Goossens (@adamgoossens) +""" + +EXAMPLES = r""" +- name: Create a Keycloak group, authentication with credentials + community.general.keycloak_group: + name: my-new-kc-group + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + register: result_new_kcgrp + delegate_to: localhost + +- name: Create a Keycloak group, authentication with token + community.general.keycloak_group: + name: my-new-kc-group + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + +- name: Delete a keycloak group + community.general.keycloak_group: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + state: absent + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Delete a Keycloak group based on name + community.general.keycloak_group: + name: my-group-for-deletion + state: absent + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Update the name of a Keycloak group + community.general.keycloak_group: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + name: an-updated-kc-group-name + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a keycloak group with some custom attributes + community.general.keycloak_group: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + name: my-new_group + attributes: + attrib1: value1 + attrib2: value2 + attrib3: + - with + - numerous + - individual + - list + - items + delegate_to: localhost + +- name: Create a Keycloak subgroup of a base group (using parent name) + community.general.keycloak_group: + name: my-new-kc-group-sub + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + parents: + - name: my-new-kc-group + register: result_new_kcgrp_sub + delegate_to: localhost + +- name: Create a Keycloak subgroup of a base group (using parent id) + community.general.keycloak_group: + name: my-new-kc-group-sub2 + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + parents: + - id: "{{ result_new_kcgrp.end_state.id }}" + delegate_to: localhost + +- name: Create a Keycloak subgroup of a subgroup (using parent names) + community.general.keycloak_group: + name: my-new-kc-group-sub-sub + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + parents: + - name: my-new-kc-group + - name: my-new-kc-group-sub + delegate_to: localhost + +- name: Create a Keycloak subgroup of a subgroup (using direct parent id) + community.general.keycloak_group: + name: my-new-kc-group-sub-sub + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + parents: + - id: "{{ result_new_kcgrp_sub.end_state.id }}" + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the group after module execution (sample is truncated). + returned: on success + type: complex + contains: + id: + description: GUID that identifies the group. + type: str + returned: always + sample: 23f38145-3195-462c-97e7-97041ccea73e + name: + description: Name of the group. + type: str + returned: always + sample: grp-test-123 + attributes: + description: Attributes applied to this group. + type: dict + returned: always + sample: + attr1: ["val1", "val2", "val3"] + path: + description: URI path to the group. + type: str + returned: always + sample: /grp-test-123 + realmRoles: + description: An array of the realm-level roles granted to this group. + type: list + returned: always + sample: [] + subGroups: + description: A list of groups that are children of this group. These groups have the same parameters as documented here. + type: list + returned: always + clientRoles: + description: A list of client-level roles granted to this group. + type: list + returned: always + sample: [] + access: + description: A dict describing the accesses you have to this group based on the credentials used. + type: dict + returned: always + sample: + manage: true + manageMembership: true + view: true +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + id=dict(type='str'), + name=dict(type='str'), + attributes=dict(type='dict'), + parents=dict( + type='list', elements='dict', + options=dict( + id=dict(type='str'), + name=dict(type='str') + ), + ), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'name'], + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, group='') + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + gid = module.params.get('id') + name = module.params.get('name') + attributes = module.params.get('attributes') + + parents = module.params.get('parents') + + # attributes in Keycloak have their values returned as lists + # using the API. attributes is a dict, so we'll transparently convert + # the values to lists. + if attributes is not None: + for key, val in module.params['attributes'].items(): + module.params['attributes'][key] = [val] if not isinstance(val, list) else val + + # Filter and map the parameters names that apply to the group + group_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'parents'] and + module.params.get(x) is not None] + + # See if it already exists in Keycloak + if gid is None: + before_group = kc.get_group_by_name(name, realm=realm, parents=parents) + else: + before_group = kc.get_group_by_groupid(gid, realm=realm) + + if before_group is None: + before_group = {} + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for param in group_params: + new_param_value = module.params.get(param) + old_value = before_group[param] if param in before_group else None + if new_param_value != old_value: + changeset[camel(param)] = new_param_value + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_group = before_group.copy() + desired_group.update(changeset) + + # Cater for when it doesn't exist (an empty dict) + if not before_group: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Group does not exist; doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if name is None: + module.fail_json(msg='name must be specified when creating a new group') + + if module._diff: + result['diff'] = dict(before='', after=desired_group) + + if module.check_mode: + module.exit_json(**result) + + # create it ... + if parents: + # ... as subgroup of another parent group + kc.create_subgroup(parents, desired_group, realm=realm) + else: + # ... as toplvl base group + kc.create_group(desired_group, realm=realm) + + after_group = kc.get_group_by_name(name, realm, parents=parents) + + result['end_state'] = after_group + + result['msg'] = 'Group {name} has been created with ID {id}'.format(name=after_group['name'], + id=after_group['id']) + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + + # no changes + if desired_group == before_group: + result['changed'] = False + result['end_state'] = desired_group + result['msg'] = "No changes required to group {name}.".format(name=before_group['name']) + module.exit_json(**result) + + # doing an update + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_group, after=desired_group) + + if module.check_mode: + module.exit_json(**result) + + # do the update + kc.update_group(desired_group, realm=realm) + + after_group = kc.get_group_by_groupid(desired_group['id'], realm=realm) + + result['end_state'] = after_group + + result['msg'] = "Group {id} has been updated".format(id=after_group['id']) + module.exit_json(**result) + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_group, after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + gid = before_group['id'] + kc.delete_group(groupid=gid, realm=realm) + + result['end_state'] = {} + + result['msg'] = "Group {name} has been deleted".format(name=before_group['name']) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_identity_provider.py b/plugins/modules/keycloak_identity_provider.py deleted file mode 120000 index 6beed321db..0000000000 --- a/plugins/modules/keycloak_identity_provider.py +++ /dev/null @@ -1 +0,0 @@ -./identity/keycloak/keycloak_identity_provider.py \ No newline at end of file diff --git a/plugins/modules/keycloak_identity_provider.py b/plugins/modules/keycloak_identity_provider.py new file mode 100644 index 0000000000..12aa2cc4ad --- /dev/null +++ b/plugins/modules/keycloak_identity_provider.py @@ -0,0 +1,730 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_identity_provider + +short_description: Allows administration of Keycloak identity providers using Keycloak API + +version_added: 3.6.0 + +description: + - This module allows you to add, remove or modify Keycloak identity providers using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html). +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the identity provider. + - On V(present), the identity provider is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the identity provider is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + description: + - The Keycloak realm under which this identity provider resides. + default: 'master' + type: str + + alias: + description: + - The alias uniquely identifies an identity provider and it is also used to build the redirect URI. + required: true + type: str + + display_name: + description: + - Friendly name for identity provider. + aliases: + - displayName + type: str + + enabled: + description: + - Enable/disable this identity provider. + type: bool + + store_token: + description: + - Enable/disable whether tokens must be stored after authenticating users. + aliases: + - storeToken + type: bool + + add_read_token_role_on_create: + description: + - Enable/disable whether new users can read any stored tokens. This assigns the C(broker.read-token) role. + aliases: + - addReadTokenRoleOnCreate + type: bool + + trust_email: + description: + - If enabled, email provided by this provider is not verified even if verification is enabled for the realm. + aliases: + - trustEmail + type: bool + + link_only: + description: + - If true, users cannot log in through this provider. They can only link to this provider. This is useful if you do + not want to allow login from the provider, but want to integrate with a provider. + aliases: + - linkOnly + type: bool + + first_broker_login_flow_alias: + description: + - Alias of authentication flow, which is triggered after first login with this identity provider. + aliases: + - firstBrokerLoginFlowAlias + type: str + + post_broker_login_flow_alias: + description: + - Alias of authentication flow, which is triggered after each login with this identity provider. + aliases: + - postBrokerLoginFlowAlias + type: str + + authenticate_by_default: + description: + - Specifies if this identity provider should be used by default for authentication even before displaying login screen. + aliases: + - authenticateByDefault + type: bool + + provider_id: + description: + - Protocol used by this provider (supported values are V(oidc) or V(saml)). + aliases: + - providerId + type: str + + config: + description: + - Dict specifying the configuration options for the provider; the contents differ depending on the value of O(provider_id). + Examples are given below for V(oidc) and V(saml). It is easiest to obtain valid config values by dumping an already-existing + identity provider configuration through check-mode in the RV(existing) field. + type: dict + suboptions: + hide_on_login_page: + description: + - If hidden, login with this provider is possible only if requested explicitly, for example using the C(kc_idp_hint) + parameter. + aliases: + - hideOnLoginPage + type: bool + + gui_order: + description: + - Number defining order of the provider in GUI (for example, on Login page). + aliases: + - guiOrder + type: int + + sync_mode: + description: + - Default sync mode for all mappers. The sync mode determines when user data is synced using the mappers. + aliases: + - syncMode + type: str + + issuer: + description: + - The issuer identifier for the issuer of the response. If not provided, no validation is performed. + type: str + + authorizationUrl: + description: + - The Authorization URL. + type: str + + tokenUrl: + description: + - The Token URL. + type: str + + logoutUrl: + description: + - End session endpoint to use to logout user from external IDP. + type: str + + userInfoUrl: + description: + - The User Info URL. + type: str + + clientAuthMethod: + description: + - The client authentication method. + type: str + + clientId: + description: + - The client or client identifier registered within the identity provider. + type: str + + clientSecret: + description: + - The client or client secret registered within the identity provider. + type: str + + defaultScope: + description: + - The scopes to be sent when asking for authorization. + type: str + + validateSignature: + description: + - Enable/disable signature validation of external IDP signatures. + type: bool + + useJwksUrl: + description: + - If V(true), identity provider public keys are downloaded from given JWKS URL. + type: bool + + jwksUrl: + description: + - URL where identity provider keys in JWK format are stored. See JWK specification for more details. + type: str + + entityId: + description: + - The Entity ID that is used to uniquely identify this SAML Service Provider. + type: str + + singleSignOnServiceUrl: + description: + - The URL that must be used to send authentication requests (SAML AuthnRequest). + type: str + + singleLogoutServiceUrl: + description: + - The URL that must be used to send logout requests. + type: str + + backchannelSupported: + description: + - Does the external IDP support backchannel logout? + type: str + + nameIDPolicyFormat: + description: + - Specifies the URI reference corresponding to a name identifier format. + type: str + + principalType: + description: + - Way to identify and track external users from the assertion. + type: str + + fromUrl: + description: + - IDP well-known OpenID Connect configuration URL. + - Support only O(provider_id=oidc). + - O(config.fromUrl) is mutually exclusive with O(config.userInfoUrl), O(config.authorizationUrl), + O(config.tokenUrl), O(config.logoutUrl), O(config.issuer) and O(config.jwksUrl). + type: str + version_added: '11.2.0' + + mappers: + description: + - A list of dicts defining mappers associated with this Identity Provider. + type: list + elements: dict + suboptions: + id: + description: + - Unique ID of this mapper. + type: str + + name: + description: + - Name of the mapper. + type: str + + identityProviderAlias: + description: + - Alias of the identity provider for this mapper. + type: str + + identityProviderMapper: + description: + - Type of mapper. + type: str + + config: + description: + - Dict specifying the configuration options for the mapper; the contents differ depending on the value of O(mappers[].identityProviderMapper). + type: dict + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Laurent Paumier (@laurpaum) +""" + +EXAMPLES = r""" +- name: Create OIDC identity provider, authentication with credentials + community.general.keycloak_identity_provider: + state: present + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: admin + auth_password: admin + realm: myrealm + alias: oidc-idp + display_name: OpenID Connect IdP + enabled: true + provider_id: oidc + config: + issuer: https://idp.example.com + authorizationUrl: https://idp.example.com/auth + tokenUrl: https://idp.example.com/token + userInfoUrl: https://idp.example.com/userinfo + clientAuthMethod: client_secret_post + clientId: my-client + clientSecret: secret + syncMode: FORCE + mappers: + - name: first_name + identityProviderMapper: oidc-user-attribute-idp-mapper + config: + claim: first_name + user.attribute: first_name + syncMode: INHERIT + - name: last_name + identityProviderMapper: oidc-user-attribute-idp-mapper + config: + claim: last_name + user.attribute: last_name + syncMode: INHERIT + +- name: Create OIDC identity provider, with well-known configuration URL + community.general.keycloak_identity_provider: + state: present + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: admin + auth_password: admin + realm: myrealm + alias: oidc-idp + display_name: OpenID Connect IdP + enabled: true + provider_id: oidc + config: + fromUrl: https://the-idp.example.com/auth/realms/idprealm/.well-known/openid-configuration + clientAuthMethod: client_secret_post + clientId: my-client + clientSecret: secret + +- name: Create SAML identity provider, authentication with credentials + community.general.keycloak_identity_provider: + state: present + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: admin + auth_password: admin + realm: myrealm + alias: saml-idp + display_name: SAML IdP + enabled: true + provider_id: saml + config: + entityId: https://auth.example.com/auth/realms/myrealm + singleSignOnServiceUrl: https://idp.example.com/login + wantAuthnRequestsSigned: true + wantAssertionsSigned: true + mappers: + - name: roles + identityProviderMapper: saml-user-attribute-idp-mapper + config: + user.attribute: roles + attribute.friendly.name: User Roles + attribute.name: roles + syncMode: INHERIT +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Identity provider my-idp has been created" + +proposed: + description: Representation of proposed identity provider. + returned: always + type: dict + sample: + { + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "providerId": "oidc" + } + +existing: + description: Representation of existing identity provider. + returned: always + type: dict + sample: + { + "addReadTokenRoleOnCreate": false, + "alias": "my-idp", + "authenticateByDefault": false, + "config": { + "authorizationUrl": "https://old.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "**********", + "issuer": "https://old.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://old.example.com/token", + "userInfoUrl": "https://old.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": true, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", + "linkOnly": false, + "providerId": "oidc", + "storeToken": false, + "trustEmail": false + } + +end_state: + description: Representation of identity provider after module execution. + returned: on success + type: dict + sample: + { + "addReadTokenRoleOnCreate": false, + "alias": "my-idp", + "authenticateByDefault": false, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "**********", + "issuer": "https://idp.example.com", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": true, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", + "linkOnly": false, + "providerId": "oidc", + "storeToken": false, + "trustEmail": false + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from copy import deepcopy + + +def sanitize(idp): + idpcopy = deepcopy(idp) + if 'config' in idpcopy: + if 'clientSecret' in idpcopy['config']: + idpcopy['config']['clientSecret'] = '**********' + return idpcopy + + +def get_identity_provider_with_mappers(kc, alias, realm): + idp = kc.get_identity_provider(alias, realm) + if idp is not None: + idp['mappers'] = sorted(kc.get_identity_provider_mappers(alias, realm), key=lambda x: x.get('name')) + # clientSecret returned by API when using `get_identity_provider(alias, realm)` is always ********** + # to detect changes to the secret, we get the actual cleartext secret from the full realm info + if 'config' in idp: + if 'clientSecret' in idp['config']: + for idp_from_realm in kc.get_realm_by_id(realm).get('identityProviders', []): + if idp_from_realm['internalId'] == idp['internalId']: + cleartext_secret = idp_from_realm.get('config', {}).get('clientSecret') + if cleartext_secret: + idp['config']['clientSecret'] = cleartext_secret + if idp is None: + idp = {} + return idp + + +def fetch_identity_provider_wellknown_config(kc, config): + """ + Fetches OpenID Connect well-known configuration from a given URL and updates the config dict with discovered endpoints. + Support for oidc providers only. + :param kc: KeycloakAPI instance used to fetch endpoints and handle errors. + :param config: Dictionary containing identity provider configuration, must include 'fromUrl' key to trigger fetch. + :return: None. The config dict is updated in-place. + """ + if config and 'fromUrl' in config : + if 'providerId' in config and config['providerId'] != 'oidc': + kc.module.fail_json(msg="Only 'oidc' provider_id is supported when using 'fromUrl'.") + endpoints = ['userInfoUrl', 'authorizationUrl', 'tokenUrl', 'logoutUrl', 'issuer', 'jwksUrl'] + if any(k in config for k in endpoints): + kc.module.fail_json(msg="Cannot specify both 'fromUrl' and 'userInfoUrl', 'authorizationUrl', 'tokenUrl', 'logoutUrl', 'issuer' or 'jwksUrl'.") + openIdConfig = kc.fetch_idp_endpoints_import_config_url( + fromUrl=config['fromUrl'], + realm=kc.module.params.get('realm', 'master')) + for k in endpoints: + if k in openIdConfig: + config[k] = openIdConfig[k] + del config['fromUrl'] + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + mapper_spec = dict( + id=dict(type='str'), + name=dict(type='str'), + identityProviderAlias=dict(type='str'), + identityProviderMapper=dict(type='str'), + config=dict(type='dict'), + ) + + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + realm=dict(type='str', default='master'), + alias=dict(type='str', required=True), + add_read_token_role_on_create=dict(type='bool', aliases=['addReadTokenRoleOnCreate']), + authenticate_by_default=dict(type='bool', aliases=['authenticateByDefault']), + config=dict(type='dict'), + display_name=dict(type='str', aliases=['displayName']), + enabled=dict(type='bool'), + first_broker_login_flow_alias=dict(type='str', aliases=['firstBrokerLoginFlowAlias']), + link_only=dict(type='bool', aliases=['linkOnly']), + post_broker_login_flow_alias=dict(type='str', aliases=['postBrokerLoginFlowAlias']), + provider_id=dict(type='str', aliases=['providerId']), + store_token=dict(type='bool', aliases=['storeToken']), + trust_email=dict(type='bool', aliases=['trustEmail']), + mappers=dict(type='list', elements='dict', options=mapper_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + alias = module.params.get('alias') + state = module.params.get('state') + config = module.params.get('config') + + fetch_identity_provider_wellknown_config(kc, config) + + # Filter and map the parameters names that apply to the identity provider. + idp_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and + module.params.get(x) is not None] + + # See if it already exists in Keycloak + before_idp = get_identity_provider_with_mappers(kc, alias, realm) + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for param in idp_params: + new_param_value = module.params.get(param) + old_value = before_idp[camel(param)] if camel(param) in before_idp else None + if new_param_value != old_value: + changeset[camel(param)] = new_param_value + + # special handling of mappers list to allow change detection + if module.params.get('mappers') is not None: + for change in module.params['mappers']: + change = {k: v for k, v in change.items() if v is not None} + if change.get('id') is None and change.get('name') is None: + module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') + if before_idp == dict(): + old_mapper = dict() + elif change.get('id') is not None: + old_mapper = kc.get_identity_provider_mapper(change['id'], alias, realm) + if old_mapper is None: + old_mapper = dict() + else: + found = [x for x in kc.get_identity_provider_mappers(alias, realm) if x['name'] == change['name']] + if len(found) == 1: + old_mapper = found[0] + else: + old_mapper = dict() + new_mapper = old_mapper.copy() + new_mapper.update(change) + + if changeset.get('mappers') is None: + changeset['mappers'] = list() + # eventually this holds all desired mappers, unchanged, modified and newly added + changeset['mappers'].append(new_mapper) + + # ensure idempotency in case module.params.mappers is not sorted by name + changeset['mappers'] = sorted(changeset['mappers'], key=lambda x: x.get('id') if x.get('name') is None else x['name']) + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_idp = before_idp.copy() + desired_idp.update(changeset) + + result['proposed'] = sanitize(changeset) + result['existing'] = sanitize(before_idp) + + # Cater for when it doesn't exist (an empty dict) + if not before_idp: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Identity provider does not exist; doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if module._diff: + result['diff'] = dict(before='', after=sanitize(desired_idp)) + + if module.check_mode: + module.exit_json(**result) + + # create it + desired_idp = desired_idp.copy() + mappers = desired_idp.pop('mappers', []) + kc.create_identity_provider(desired_idp, realm) + for mapper in mappers: + if mapper.get('identityProviderAlias') is None: + mapper['identityProviderAlias'] = alias + kc.create_identity_provider_mapper(mapper, alias, realm) + after_idp = get_identity_provider_with_mappers(kc, alias, realm) + + result['end_state'] = sanitize(after_idp) + + result['msg'] = 'Identity provider {alias} has been created'.format(alias=alias) + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + + # no changes + if desired_idp == before_idp: + result['changed'] = False + result['end_state'] = sanitize(desired_idp) + result['msg'] = "No changes required to identity provider {alias}.".format(alias=alias) + module.exit_json(**result) + + # doing an update + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_idp), after=sanitize(desired_idp)) + + if module.check_mode: + module.exit_json(**result) + + # do the update + desired_idp = desired_idp.copy() + updated_mappers = desired_idp.pop('mappers', []) + original_mappers = list(before_idp.get('mappers', [])) + + kc.update_identity_provider(desired_idp, realm) + for mapper in updated_mappers: + if mapper.get('id') is not None: + # only update existing if there is a change + for i, orig in enumerate(original_mappers): + if mapper['id'] == orig['id']: + del original_mappers[i] + if mapper != orig: + kc.update_identity_provider_mapper(mapper, alias, realm) + else: + if mapper.get('identityProviderAlias') is None: + mapper['identityProviderAlias'] = alias + kc.create_identity_provider_mapper(mapper, alias, realm) + for mapper in [x for x in before_idp['mappers'] + if [y for y in updated_mappers if y["name"] == x['name']] == []]: + kc.delete_identity_provider_mapper(mapper['id'], alias, realm) + + after_idp = get_identity_provider_with_mappers(kc, alias, realm) + + result['end_state'] = sanitize(after_idp) + + result['msg'] = "Identity provider {alias} has been updated".format(alias=alias) + module.exit_json(**result) + + elif state == 'absent': + # Process a deletion + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_idp), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_identity_provider(alias, realm) + + result['end_state'] = {} + + result['msg'] = "Identity provider {alias} has been deleted".format(alias=alias) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_realm.py b/plugins/modules/keycloak_realm.py deleted file mode 120000 index 1cdeb46a81..0000000000 --- a/plugins/modules/keycloak_realm.py +++ /dev/null @@ -1 +0,0 @@ -./identity/keycloak/keycloak_realm.py \ No newline at end of file diff --git a/plugins/modules/keycloak_realm.py b/plugins/modules/keycloak_realm.py new file mode 100644 index 0000000000..47ad90ee4e --- /dev/null +++ b/plugins/modules/keycloak_realm.py @@ -0,0 +1,1111 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_realm + +short_description: Allows administration of Keycloak realm using Keycloak API + +version_added: 3.0.0 + +description: + - This module allows the administration of Keycloak realm using the Keycloak REST API. It requires access to the REST API + using OpenID Connect; the user connecting and the realm being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate realm definition with the scope tailored + to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). Aliases are provided so camelCased versions can be used + as well. + - The Keycloak API does not always sanity check inputs, for example you can set SAML-specific settings on an OpenID Connect + client for instance and also the other way around. B(Be careful). If you do not specify a setting, usually a sensible + default is chosen. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the realm. + - On V(present), the realm is created (or updated if it exists already). + - On V(absent), the realm is removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + + id: + description: + - The realm to create. + type: str + realm: + description: + - The realm name. + type: str + access_code_lifespan: + description: + - The realm access code lifespan. + aliases: + - accessCodeLifespan + type: int + access_code_lifespan_login: + description: + - The realm access code lifespan login. + aliases: + - accessCodeLifespanLogin + type: int + access_code_lifespan_user_action: + description: + - The realm access code lifespan user action. + aliases: + - accessCodeLifespanUserAction + type: int + access_token_lifespan: + description: + - The realm access token lifespan. + aliases: + - accessTokenLifespan + type: int + access_token_lifespan_for_implicit_flow: + description: + - The realm access token lifespan for implicit flow. + aliases: + - accessTokenLifespanForImplicitFlow + type: int + account_theme: + description: + - The realm account theme. + aliases: + - accountTheme + type: str + action_token_generated_by_admin_lifespan: + description: + - The realm action token generated by admin lifespan. + aliases: + - actionTokenGeneratedByAdminLifespan + type: int + action_token_generated_by_user_lifespan: + description: + - The realm action token generated by user lifespan. + aliases: + - actionTokenGeneratedByUserLifespan + type: int + admin_events_details_enabled: + description: + - The realm admin events details enabled. + aliases: + - adminEventsDetailsEnabled + type: bool + admin_events_enabled: + description: + - The realm admin events enabled. + aliases: + - adminEventsEnabled + type: bool + admin_theme: + description: + - The realm admin theme. + aliases: + - adminTheme + type: str + attributes: + description: + - The realm attributes. + type: dict + browser_flow: + description: + - The realm browser flow. + aliases: + - browserFlow + type: str + browser_security_headers: + description: + - The realm browser security headers. + aliases: + - browserSecurityHeaders + type: dict + brute_force_protected: + description: + - The realm brute force protected. + aliases: + - bruteForceProtected + type: bool + brute_force_strategy: + description: + - The realm brute force strategy. + aliases: + - bruteForceStrategy + choices: ['LINEAR', 'MULTIPLE'] + type: str + version_added: 11.2.0 + client_authentication_flow: + description: + - The realm client authentication flow. + aliases: + - clientAuthenticationFlow + type: str + client_scope_mappings: + description: + - The realm client scope mappings. + aliases: + - clientScopeMappings + type: dict + default_default_client_scopes: + description: + - The realm default default client scopes. + aliases: + - defaultDefaultClientScopes + type: list + elements: str + default_groups: + description: + - The realm default groups. + aliases: + - defaultGroups + type: list + elements: str + default_locale: + description: + - The realm default locale. + aliases: + - defaultLocale + type: str + default_optional_client_scopes: + description: + - The realm default optional client scopes. + aliases: + - defaultOptionalClientScopes + type: list + elements: str + default_roles: + description: + - The realm default roles. + aliases: + - defaultRoles + type: list + elements: str + default_signature_algorithm: + description: + - The realm default signature algorithm. + aliases: + - defaultSignatureAlgorithm + type: str + direct_grant_flow: + description: + - The realm direct grant flow. + aliases: + - directGrantFlow + type: str + display_name: + description: + - The realm display name. + aliases: + - displayName + type: str + display_name_html: + description: + - The realm display name HTML. + aliases: + - displayNameHtml + type: str + docker_authentication_flow: + description: + - The realm docker authentication flow. + aliases: + - dockerAuthenticationFlow + type: str + duplicate_emails_allowed: + description: + - The realm duplicate emails allowed option. + aliases: + - duplicateEmailsAllowed + type: bool + edit_username_allowed: + description: + - The realm edit username allowed option. + aliases: + - editUsernameAllowed + type: bool + email_theme: + description: + - The realm email theme. + aliases: + - emailTheme + type: str + enabled: + description: + - The realm enabled option. + type: bool + enabled_event_types: + description: + - The realm enabled event types. + aliases: + - enabledEventTypes + type: list + elements: str + events_enabled: + description: + - Enables or disables login events for this realm. + aliases: + - eventsEnabled + type: bool + version_added: 3.6.0 + events_expiration: + description: + - The realm events expiration. + aliases: + - eventsExpiration + type: int + events_listeners: + description: + - The realm events listeners. + aliases: + - eventsListeners + type: list + elements: str + failure_factor: + description: + - The realm failure factor. + aliases: + - failureFactor + type: int + internationalization_enabled: + description: + - The realm internationalization enabled option. + aliases: + - internationalizationEnabled + type: bool + login_theme: + description: + - The realm login theme. + aliases: + - loginTheme + type: str + login_with_email_allowed: + description: + - The realm login with email allowed option. + aliases: + - loginWithEmailAllowed + type: bool + max_delta_time_seconds: + description: + - The realm max delta time in seconds. + aliases: + - maxDeltaTimeSeconds + type: int + max_failure_wait_seconds: + description: + - The realm max failure wait in seconds. + aliases: + - maxFailureWaitSeconds + type: int + max_temporary_lockouts: + description: + - The realm max temporary lockouts. + aliases: + - maxTemporaryLockouts + type: int + version_added: 11.2.0 + minimum_quick_login_wait_seconds: + description: + - The realm minimum quick login wait in seconds. + aliases: + - minimumQuickLoginWaitSeconds + type: int + not_before: + description: + - The realm not before. + aliases: + - notBefore + type: int + offline_session_idle_timeout: + description: + - The realm offline session idle timeout. + aliases: + - offlineSessionIdleTimeout + type: int + offline_session_max_lifespan: + description: + - The realm offline session max lifespan. + aliases: + - offlineSessionMaxLifespan + type: int + offline_session_max_lifespan_enabled: + description: + - The realm offline session max lifespan enabled option. + aliases: + - offlineSessionMaxLifespanEnabled + type: bool + otp_policy_algorithm: + description: + - The realm otp policy algorithm. + aliases: + - otpPolicyAlgorithm + type: str + otp_policy_digits: + description: + - The realm otp policy digits. + aliases: + - otpPolicyDigits + type: int + otp_policy_initial_counter: + description: + - The realm otp policy initial counter. + aliases: + - otpPolicyInitialCounter + type: int + otp_policy_look_ahead_window: + description: + - The realm otp policy look ahead window. + aliases: + - otpPolicyLookAheadWindow + type: int + otp_policy_period: + description: + - The realm otp policy period. + aliases: + - otpPolicyPeriod + type: int + otp_policy_type: + description: + - The realm otp policy type. + aliases: + - otpPolicyType + type: str + otp_supported_applications: + description: + - The realm otp supported applications. + aliases: + - otpSupportedApplications + type: list + elements: str + password_policy: + description: + - The realm password policy. + aliases: + - passwordPolicy + type: str + organizations_enabled: + description: + - Enables support for experimental organization feature. + aliases: + - organizationsEnabled + type: bool + version_added: 10.0.0 + permanent_lockout: + description: + - The realm permanent lockout. + aliases: + - permanentLockout + type: bool + quick_login_check_milli_seconds: + description: + - The realm quick login check in milliseconds. + aliases: + - quickLoginCheckMilliSeconds + type: int + refresh_token_max_reuse: + description: + - The realm refresh token max reuse. + aliases: + - refreshTokenMaxReuse + type: int + registration_allowed: + description: + - The realm registration allowed option. + aliases: + - registrationAllowed + type: bool + registration_email_as_username: + description: + - The realm registration email as username option. + aliases: + - registrationEmailAsUsername + type: bool + registration_flow: + description: + - The realm registration flow. + aliases: + - registrationFlow + type: str + remember_me: + description: + - The realm remember me option. + aliases: + - rememberMe + type: bool + reset_credentials_flow: + description: + - The realm reset credentials flow. + aliases: + - resetCredentialsFlow + type: str + reset_password_allowed: + description: + - The realm reset password allowed option. + aliases: + - resetPasswordAllowed + type: bool + revoke_refresh_token: + description: + - The realm revoke refresh token option. + aliases: + - revokeRefreshToken + type: bool + smtp_server: + description: + - The realm smtp server. + aliases: + - smtpServer + type: dict + ssl_required: + description: + - The realm ssl required option. + choices: ['all', 'external', 'none'] + aliases: + - sslRequired + type: str + sso_session_idle_timeout: + description: + - The realm sso session idle timeout. + aliases: + - ssoSessionIdleTimeout + type: int + sso_session_idle_timeout_remember_me: + description: + - The realm sso session idle timeout remember me. + aliases: + - ssoSessionIdleTimeoutRememberMe + type: int + sso_session_max_lifespan: + description: + - The realm sso session max lifespan. + aliases: + - ssoSessionMaxLifespan + type: int + sso_session_max_lifespan_remember_me: + description: + - The realm sso session max lifespan remember me. + aliases: + - ssoSessionMaxLifespanRememberMe + type: int + supported_locales: + description: + - The realm supported locales. + aliases: + - supportedLocales + type: list + elements: str + user_managed_access_allowed: + description: + - The realm user managed access allowed option. + aliases: + - userManagedAccessAllowed + type: bool + verify_email: + description: + - The realm verify email option. + aliases: + - verifyEmail + type: bool + wait_increment_seconds: + description: + - The realm wait increment in seconds. + aliases: + - waitIncrementSeconds + type: int + client_session_idle_timeout: + description: + - All Clients will inherit from this setting, time a session is allowed to be idle before it expires. + aliases: + - clientSessionIdleTimeout + type: int + version_added: 11.2.0 + client_session_max_lifespan: + description: + - All Clients will inherit from this setting, max time before a session is expired. + aliases: + - clientSessionMaxLifespan + type: int + version_added: 11.2.0 + client_offline_session_idle_timeout: + description: + - All Clients will inherit from this setting, time an offline session is allowed to be idle before it expires. + aliases: + - clientOfflineSessionIdleTimeout + type: int + version_added: 11.2.0 + client_offline_session_max_lifespan: + description: + - All Clients will inherit from this setting, max time before an offline session is expired regardless of activity. + aliases: + - clientOfflineSessionMaxLifespan + type: int + version_added: 11.2.0 + oauth2_device_code_lifespan: + description: + - Max time before the device code and user code are expired. + aliases: + - oauth2DeviceCodeLifespan + type: int + version_added: 11.2.0 + oauth2_device_polling_interval: + description: + - The minimum amount of time in seconds that the client should wait between polling requests to the token endpoint. + aliases: + - oauth2DevicePollingInterval + type: int + version_added: 11.2.0 + web_authn_policy_rp_entity_name: + description: + - WebAuthn Relying Party Entity Name. + aliases: + - webAuthnPolicyRpEntityName + type: str + version_added: 11.3.0 + web_authn_policy_signature_algorithms: + description: + - List of acceptable WebAuthn signature algorithms. + aliases: + - webAuthnPolicySignatureAlgorithms + type: list + version_added: 11.3.0 + elements: str + web_authn_policy_rp_id: + description: + - WebAuthn Relying Party ID (domain). Empty string means use request host. + aliases: + - webAuthnPolicyRpId + type: str + version_added: 11.3.0 + web_authn_policy_attestation_conveyance_preference: + description: + - Attestation conveyance preference for WebAuthn. + aliases: + - webAuthnPolicyAttestationConveyancePreference + type: str + version_added: 11.3.0 + web_authn_policy_authenticator_attachment: + description: + - Authenticator attachment preference for WebAuthn authenticators. + aliases: + - webAuthnPolicyAuthenticatorAttachment + type: str + version_added: 11.3.0 + web_authn_policy_require_resident_key: + description: + - Whether resident keys are required for WebAuthn (Yes/No/not specified). + aliases: + - webAuthnPolicyRequireResidentKey + type: str + version_added: 11.3.0 + web_authn_policy_user_verification_requirement: + description: + - User verification requirement for WebAuthn. + aliases: + - webAuthnPolicyUserVerificationRequirement + type: str + version_added: 11.3.0 + web_authn_policy_create_timeout: + description: + - Timeout for WebAuthn credential creation (ms). + aliases: + - webAuthnPolicyCreateTimeout + type: int + version_added: 11.3.0 + web_authn_policy_avoid_same_authenticator_register: + description: + - Avoid registering the same authenticator multiple times. + aliases: + - webAuthnPolicyAvoidSameAuthenticatorRegister + type: bool + version_added: 11.3.0 + web_authn_policy_acceptable_aaguids: + description: + - List of acceptable AAGUIDs for WebAuthn authenticators. + aliases: + - webAuthnPolicyAcceptableAaguids + type: list + version_added: 11.3.0 + elements: str + web_authn_policy_extra_origins: + description: + - Additional acceptable origins for WebAuthn requests. + aliases: + - webAuthnPolicyExtraOrigins + type: list + version_added: 11.3.0 + elements: str + web_authn_policy_passwordless_rp_entity_name: + description: + - WebAuthn Passwordless Relying Party Entity Name. + aliases: + - webAuthnPolicyPasswordlessRpEntityName + type: str + version_added: 11.3.0 + web_authn_policy_passwordless_signature_algorithms: + description: + - List of acceptable WebAuthn signature algorithms for passwordless. + aliases: + - webAuthnPolicyPasswordlessSignatureAlgorithms + type: list + version_added: 11.3.0 + elements: str + web_authn_policy_passwordless_rp_id: + description: + - WebAuthn Passwordless Relying Party ID (domain). + aliases: + - webAuthnPolicyPasswordlessRpId + type: str + version_added: 11.3.0 + web_authn_policy_passwordless_attestation_conveyance_preference: + description: + - Attestation conveyance preference for WebAuthn passwordless. + aliases: + - webAuthnPolicyPasswordlessAttestationConveyancePreference + type: str + version_added: 11.3.0 + web_authn_policy_passwordless_authenticator_attachment: + description: + - Authenticator attachment for WebAuthn passwordless. + aliases: + - webAuthnPolicyPasswordlessAuthenticatorAttachment + type: str + version_added: 11.3.0 + web_authn_policy_passwordless_require_resident_key: + description: + - Whether resident keys are required for WebAuthn passwordless (V(Yes)/V(No)/V(not specified)). + aliases: + - webAuthnPolicyPasswordlessRequireResidentKey + type: str + version_added: 11.3.0 + web_authn_policy_passwordless_user_verification_requirement: + description: + - User verification requirement for WebAuthn passwordless. + aliases: + - webAuthnPolicyPasswordlessUserVerificationRequirement + type: str + version_added: 11.3.0 + web_authn_policy_passwordless_create_timeout: + description: + - Timeout for WebAuthn passwordless credential creation (ms). + aliases: + - webAuthnPolicyPasswordlessCreateTimeout + type: int + version_added: 11.3.0 + web_authn_policy_passwordless_avoid_same_authenticator_register: + description: + - Avoid registering the same authenticator multiple times for passwordless. + aliases: + - webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister + type: bool + version_added: 11.3.0 + web_authn_policy_passwordless_acceptable_aaguids: + description: + - List of acceptable AAGUIDs for WebAuthn passwordless authenticators. + aliases: + - webAuthnPolicyPasswordlessAcceptableAaguids + type: list + version_added: 11.3.0 + elements: str + web_authn_policy_passwordless_extra_origins: + description: + - Additional acceptable origins for WebAuthn passwordless requests. + aliases: + - webAuthnPolicyPasswordlessExtraOrigins + type: list + version_added: 11.3.0 + elements: str + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Christophe Gilles (@kris2kris) +""" + +EXAMPLES = r""" +- name: Create or update Keycloak realm (minimal example) + community.general.keycloak_realm: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: unique_realm_name + state: present + +- name: Delete a Keycloak realm + community.general.keycloak_realm: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: unique_realm_name + state: absent +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Realm testrealm has been updated" + +proposed: + description: Representation of proposed realm. + returned: always + type: dict + sample: {"realm": "test"} + +existing: + description: Representation of existing realm (sample is truncated). + returned: always + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } + +end_state: + description: Representation of realm after module execution (sample is truncated). + returned: on success + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def normalise_cr(realmrep): + """ Re-sorts any properties where the order is important so that diff's is minimised and the change detection is more effective. + + :param realmrep: the realmrep dict to be sanitized + :return: normalised realmrep dict + """ + # Avoid the dict passed in to be modified + realmrep = realmrep.copy() + + if 'enabledEventTypes' in realmrep: + realmrep['enabledEventTypes'] = list(sorted(realmrep['enabledEventTypes'])) + + if 'otpSupportedApplications' in realmrep: + realmrep['otpSupportedApplications'] = list(sorted(realmrep['otpSupportedApplications'])) + + if 'supportedLocales' in realmrep: + realmrep['supportedLocales'] = list(sorted(realmrep['supportedLocales'])) + + return realmrep + + +def sanitize_cr(realmrep): + """ Removes probably sensitive details from a realm representation. + + :param realmrep: the realmrep dict to be sanitized + :return: sanitized realmrep dict + """ + result = realmrep.copy() + if 'secret' in result: + result['secret'] = '********' + if 'attributes' in result: + if 'saml.signing.private.key' in result['attributes']: + result['attributes'] = result['attributes'].copy() + result['attributes']['saml.signing.private.key'] = '********' + return normalise_cr(result) + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + + id=dict(type='str'), + realm=dict(type='str'), + access_code_lifespan=dict(type='int', aliases=['accessCodeLifespan']), + access_code_lifespan_login=dict(type='int', aliases=['accessCodeLifespanLogin']), + access_code_lifespan_user_action=dict(type='int', aliases=['accessCodeLifespanUserAction']), + access_token_lifespan=dict(type='int', aliases=['accessTokenLifespan'], no_log=False), + access_token_lifespan_for_implicit_flow=dict(type='int', aliases=['accessTokenLifespanForImplicitFlow'], no_log=False), + account_theme=dict(type='str', aliases=['accountTheme']), + action_token_generated_by_admin_lifespan=dict(type='int', aliases=['actionTokenGeneratedByAdminLifespan'], no_log=False), + action_token_generated_by_user_lifespan=dict(type='int', aliases=['actionTokenGeneratedByUserLifespan'], no_log=False), + admin_events_details_enabled=dict(type='bool', aliases=['adminEventsDetailsEnabled']), + admin_events_enabled=dict(type='bool', aliases=['adminEventsEnabled']), + admin_theme=dict(type='str', aliases=['adminTheme']), + attributes=dict(type='dict'), + browser_flow=dict(type='str', aliases=['browserFlow']), + browser_security_headers=dict(type='dict', aliases=['browserSecurityHeaders']), + brute_force_protected=dict(type='bool', aliases=['bruteForceProtected']), + brute_force_strategy=dict(type='str', choices=['LINEAR', 'MULTIPLE'], aliases=['bruteForceStrategy']), + client_authentication_flow=dict(type='str', aliases=['clientAuthenticationFlow']), + client_scope_mappings=dict(type='dict', aliases=['clientScopeMappings']), + default_default_client_scopes=dict(type='list', elements='str', aliases=['defaultDefaultClientScopes']), + default_groups=dict(type='list', elements='str', aliases=['defaultGroups']), + default_locale=dict(type='str', aliases=['defaultLocale']), + default_optional_client_scopes=dict(type='list', elements='str', aliases=['defaultOptionalClientScopes']), + default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), + default_signature_algorithm=dict(type='str', aliases=['defaultSignatureAlgorithm']), + direct_grant_flow=dict(type='str', aliases=['directGrantFlow']), + display_name=dict(type='str', aliases=['displayName']), + display_name_html=dict(type='str', aliases=['displayNameHtml']), + docker_authentication_flow=dict(type='str', aliases=['dockerAuthenticationFlow']), + duplicate_emails_allowed=dict(type='bool', aliases=['duplicateEmailsAllowed']), + edit_username_allowed=dict(type='bool', aliases=['editUsernameAllowed']), + email_theme=dict(type='str', aliases=['emailTheme']), + enabled=dict(type='bool'), + enabled_event_types=dict(type='list', elements='str', aliases=['enabledEventTypes']), + events_enabled=dict(type='bool', aliases=['eventsEnabled']), + events_expiration=dict(type='int', aliases=['eventsExpiration']), + events_listeners=dict(type='list', elements='str', aliases=['eventsListeners']), + failure_factor=dict(type='int', aliases=['failureFactor']), + internationalization_enabled=dict(type='bool', aliases=['internationalizationEnabled']), + login_theme=dict(type='str', aliases=['loginTheme']), + login_with_email_allowed=dict(type='bool', aliases=['loginWithEmailAllowed']), + max_delta_time_seconds=dict(type='int', aliases=['maxDeltaTimeSeconds']), + max_failure_wait_seconds=dict(type='int', aliases=['maxFailureWaitSeconds']), + max_temporary_lockouts=dict(type='int', aliases=['maxTemporaryLockouts']), + minimum_quick_login_wait_seconds=dict(type='int', aliases=['minimumQuickLoginWaitSeconds']), + not_before=dict(type='int', aliases=['notBefore']), + offline_session_idle_timeout=dict(type='int', aliases=['offlineSessionIdleTimeout']), + offline_session_max_lifespan=dict(type='int', aliases=['offlineSessionMaxLifespan']), + offline_session_max_lifespan_enabled=dict(type='bool', aliases=['offlineSessionMaxLifespanEnabled']), + otp_policy_algorithm=dict(type='str', aliases=['otpPolicyAlgorithm']), + otp_policy_digits=dict(type='int', aliases=['otpPolicyDigits']), + otp_policy_initial_counter=dict(type='int', aliases=['otpPolicyInitialCounter']), + otp_policy_look_ahead_window=dict(type='int', aliases=['otpPolicyLookAheadWindow']), + otp_policy_period=dict(type='int', aliases=['otpPolicyPeriod']), + otp_policy_type=dict(type='str', aliases=['otpPolicyType']), + otp_supported_applications=dict(type='list', elements='str', aliases=['otpSupportedApplications']), + password_policy=dict(type='str', aliases=['passwordPolicy'], no_log=False), + organizations_enabled=dict(type='bool', aliases=['organizationsEnabled']), + permanent_lockout=dict(type='bool', aliases=['permanentLockout']), + quick_login_check_milli_seconds=dict(type='int', aliases=['quickLoginCheckMilliSeconds']), + refresh_token_max_reuse=dict(type='int', aliases=['refreshTokenMaxReuse'], no_log=False), + registration_allowed=dict(type='bool', aliases=['registrationAllowed']), + registration_email_as_username=dict(type='bool', aliases=['registrationEmailAsUsername']), + registration_flow=dict(type='str', aliases=['registrationFlow']), + remember_me=dict(type='bool', aliases=['rememberMe']), + reset_credentials_flow=dict(type='str', aliases=['resetCredentialsFlow']), + reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed'], no_log=False), + revoke_refresh_token=dict(type='bool', aliases=['revokeRefreshToken']), + smtp_server=dict(type='dict', aliases=['smtpServer']), + ssl_required=dict(choices=["external", "all", "none"], aliases=['sslRequired']), + sso_session_idle_timeout=dict(type='int', aliases=['ssoSessionIdleTimeout']), + sso_session_idle_timeout_remember_me=dict(type='int', aliases=['ssoSessionIdleTimeoutRememberMe']), + sso_session_max_lifespan=dict(type='int', aliases=['ssoSessionMaxLifespan']), + sso_session_max_lifespan_remember_me=dict(type='int', aliases=['ssoSessionMaxLifespanRememberMe']), + supported_locales=dict(type='list', elements='str', aliases=['supportedLocales']), + user_managed_access_allowed=dict(type='bool', aliases=['userManagedAccessAllowed']), + verify_email=dict(type='bool', aliases=['verifyEmail']), + wait_increment_seconds=dict(type='int', aliases=['waitIncrementSeconds']), + client_session_idle_timeout=dict(type='int', aliases=['clientSessionIdleTimeout']), + client_session_max_lifespan=dict(type='int', aliases=['clientSessionMaxLifespan']), + client_offline_session_idle_timeout=dict(type='int', aliases=['clientOfflineSessionIdleTimeout']), + client_offline_session_max_lifespan=dict(type='int', aliases=['clientOfflineSessionMaxLifespan']), + oauth2_device_code_lifespan=dict(type='int', aliases=['oauth2DeviceCodeLifespan']), + oauth2_device_polling_interval=dict(type='int', aliases=['oauth2DevicePollingInterval']), + web_authn_policy_rp_entity_name=dict(type='str', aliases=['webAuthnPolicyRpEntityName']), + web_authn_policy_signature_algorithms=dict(type='list', elements='str', aliases=['webAuthnPolicySignatureAlgorithms']), + web_authn_policy_rp_id=dict(type='str', aliases=['webAuthnPolicyRpId']), + web_authn_policy_attestation_conveyance_preference=dict(type='str', aliases=['webAuthnPolicyAttestationConveyancePreference']), + web_authn_policy_authenticator_attachment=dict(type='str', aliases=['webAuthnPolicyAuthenticatorAttachment']), + web_authn_policy_require_resident_key=dict(type='str', aliases=['webAuthnPolicyRequireResidentKey'], no_log=False), + web_authn_policy_user_verification_requirement=dict(type='str', aliases=['webAuthnPolicyUserVerificationRequirement']), + web_authn_policy_create_timeout=dict(type='int', aliases=['webAuthnPolicyCreateTimeout']), + web_authn_policy_avoid_same_authenticator_register=dict(type='bool', aliases=['webAuthnPolicyAvoidSameAuthenticatorRegister']), + web_authn_policy_acceptable_aaguids=dict(type='list', elements='str', aliases=['webAuthnPolicyAcceptableAaguids']), + web_authn_policy_extra_origins=dict(type='list', elements='str', aliases=['webAuthnPolicyExtraOrigins']), + web_authn_policy_passwordless_rp_entity_name=dict(type='str', aliases=['webAuthnPolicyPasswordlessRpEntityName']), + web_authn_policy_passwordless_signature_algorithms=dict( + type='list', elements='str', aliases=['webAuthnPolicyPasswordlessSignatureAlgorithms'], no_log=False + ), + web_authn_policy_passwordless_rp_id=dict(type='str', aliases=['webAuthnPolicyPasswordlessRpId']), + web_authn_policy_passwordless_attestation_conveyance_preference=dict( + type='str', aliases=['webAuthnPolicyPasswordlessAttestationConveyancePreference'], no_log=False + ), + web_authn_policy_passwordless_authenticator_attachment=dict( + type='str', aliases=['webAuthnPolicyPasswordlessAuthenticatorAttachment'], no_log=False + ), + web_authn_policy_passwordless_require_resident_key=dict( + type='str', aliases=['webAuthnPolicyPasswordlessRequireResidentKey'], no_log=False + ), + web_authn_policy_passwordless_user_verification_requirement=dict( + type='str', aliases=['webAuthnPolicyPasswordlessUserVerificationRequirement'], no_log=False + ), + web_authn_policy_passwordless_create_timeout=dict(type='int', aliases=['webAuthnPolicyPasswordlessCreateTimeout']), + web_authn_policy_passwordless_avoid_same_authenticator_register=dict(type='bool', aliases=['webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister']), + web_authn_policy_passwordless_acceptable_aaguids=dict( + type='list', elements='str', aliases=['webAuthnPolicyPasswordlessAcceptableAaguids'], no_log=False + ), + web_authn_policy_passwordless_extra_origins=dict( + type='list', elements='str', aliases=['webAuthnPolicyPasswordlessExtraOrigins'], no_log=False + ), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'realm', 'enabled'], + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + + # convert module parameters to realm representation parameters (if they belong in there) + params_to_ignore = list(keycloak_argument_spec().keys()) + ['state'] + + # Filter and map the parameters names that apply to the role + realm_params = [x for x in module.params + if x not in params_to_ignore and + module.params.get(x) is not None] + + # See whether the realm already exists in Keycloak + before_realm = kc.get_realm_by_id(realm=realm) + + if before_realm is None: + before_realm = {} + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for realm_param in realm_params: + new_param_value = module.params.get(realm_param) + changeset[camel(realm_param)] = new_param_value + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_realm = before_realm.copy() + desired_realm.update(changeset) + + result['proposed'] = sanitize_cr(changeset) + before_realm_sanitized = sanitize_cr(before_realm) + result['existing'] = before_realm_sanitized + + # Cater for when it doesn't exist (an empty dict) + if not before_realm: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Realm does not exist, doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if module._diff: + result['diff'] = dict(before='', after=sanitize_cr(desired_realm)) + + if module.check_mode: + module.exit_json(**result) + + # create it + kc.create_realm(desired_realm) + after_realm = kc.get_realm_by_id(desired_realm['realm']) + + result['end_state'] = sanitize_cr(after_realm) + + result['msg'] = 'Realm %s has been created.' % desired_realm['realm'] + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + + # doing an update + result['changed'] = True + if module.check_mode: + # We can only compare the current realm with the proposed updates we have + before_norm = normalise_cr(before_realm) + desired_norm = normalise_cr(desired_realm) + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_norm), + after=sanitize_cr(desired_norm)) + result['changed'] = (before_norm != desired_norm) + + module.exit_json(**result) + + # do the update + kc.update_realm(desired_realm, realm=realm) + + after_realm = kc.get_realm_by_id(realm=realm) + + if before_realm == after_realm: + result['changed'] = False + + result['end_state'] = sanitize_cr(after_realm) + + if module._diff: + result['diff'] = dict(before=before_realm_sanitized, + after=sanitize_cr(after_realm)) + + result['msg'] = 'Realm %s has been updated.' % desired_realm['realm'] + module.exit_json(**result) + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_realm_sanitized, after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_realm(realm=realm) + + result['proposed'] = {} + result['end_state'] = {} + + result['msg'] = 'Realm %s has been deleted.' % before_realm['realm'] + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_realm_info.py b/plugins/modules/keycloak_realm_info.py new file mode 100644 index 0000000000..db16970046 --- /dev/null +++ b/plugins/modules/keycloak_realm_info.py @@ -0,0 +1,132 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_realm_info + +short_description: Allows obtaining Keycloak realm public information using Keycloak API + +version_added: 4.3.0 + +description: + - This module allows you to get Keycloak realm public information using the Keycloak REST API. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + +options: + auth_keycloak_url: + description: + - URL to the Keycloak instance. + type: str + required: true + aliases: + - url + validate_certs: + description: + - Verify TLS certificates (do not disable this in production). + type: bool + default: true + + realm: + type: str + description: + - They Keycloak realm ID. + default: 'master' + +author: + - Fynn Chen (@fynncfchen) +""" + +EXAMPLES = r""" +- name: Get a Keycloak public key + community.general.keycloak_realm_info: + realm: MyCustomRealm + auth_keycloak_url: https://auth.example.com/auth + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +realm_info: + description: + - Representation of the realm public information. + returned: always + type: dict + contains: + realm: + description: Realm ID. + type: str + returned: always + sample: MyRealm + public_key: + description: Public key of the realm. + type: str + returned: always + sample: MIIBIjANBgkqhkiG9w0BAQEFAAO... + token-service: + description: Token endpoint URL. + type: str + returned: always + sample: https://auth.example.com/auth/realms/MyRealm/protocol/openid-connect + account-service: + description: Account console URL. + type: str + returned: always + sample: https://auth.example.com/auth/realms/MyRealm/account + tokens-not-before: + description: The token not before. + type: int + returned: always + sample: 0 +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = dict( + auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False), + validate_certs=dict(type='bool', default=True), + + realm=dict(default='master'), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = dict(changed=False, msg='', realm_info='') + + kc = KeycloakAPI(module, {}) + + realm = module.params.get('realm') + + realm_info = kc.get_realm_info_by_id(realm=realm) + + result['realm_info'] = realm_info + result['msg'] = 'Get realm public info successful for ID {realm}'.format(realm=realm) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_realm_key.py b/plugins/modules/keycloak_realm_key.py new file mode 100644 index 0000000000..df9200016c --- /dev/null +++ b/plugins/modules/keycloak_realm_key.py @@ -0,0 +1,470 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_realm_key + +short_description: Allows administration of Keycloak realm keys using Keycloak API + +version_added: 7.5.0 + +description: + - This module allows the administration of Keycloak realm keys using the Keycloak REST API. It requires access to the REST + API using OpenID Connect; the user connecting and the realm being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate realm definition with the scope tailored + to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). Aliases are provided so camelCased versions can be used + as well. + - This module is unable to detect changes to the actual cryptographic key after importing it. However, if some other property + is changed alongside the cryptographic key, then the key also changes as a side-effect, as the JSON payload needs to include + the private key. This can be considered either a bug or a feature, as the alternative would be to always update the realm + key whether it has changed or not. +attributes: + check_mode: + support: full + diff_mode: + support: partial + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the keycloak realm key. + - On V(present), the realm key is created (or updated if it exists already). + - On V(absent), the realm key is removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Name of the realm key to create. + type: str + required: true + force: + description: + - Enforce the state of the private key and certificate. This is not automatically the case as this module is unable + to determine the current state of the private key and thus cannot trigger an update based on an actual divergence. + That said, a private key update may happen even if force is false as a side-effect of other changes. + default: false + type: bool + parent_id: + description: + - The parent_id of the realm key. In practice the name of the realm. + type: str + required: true + provider_id: + description: + - The name of the "provider ID" for the key. + - The value V(rsa-enc) has been added in community.general 8.2.0. + choices: ['rsa', 'rsa-enc'] + default: 'rsa' + type: str + config: + description: + - Dict specifying the key and its properties. + type: dict + suboptions: + active: + description: + - Whether they key is active or inactive. Not to be confused with the state of the Ansible resource managed by the + O(state) parameter. + default: true + type: bool + enabled: + description: + - Whether the key is enabled or disabled. Not to be confused with the state of the Ansible resource managed by the + O(state) parameter. + default: true + type: bool + priority: + description: + - The priority of the key. + type: int + required: true + algorithm: + description: + - Key algorithm. + - The values V(RS384), V(RS512), V(PS256), V(PS384), V(PS512), V(RSA1_5), V(RSA-OAEP), V(RSA-OAEP-256) have been + added in community.general 8.2.0. + default: RS256 + choices: ['RS256', 'RS384', 'RS512', 'PS256', 'PS384', 'PS512', 'RSA1_5', 'RSA-OAEP', 'RSA-OAEP-256'] + type: str + private_key: + description: + - The private key as an ASCII string. Contents of the key must match O(config.algorithm) and O(provider_id). + - Please note that the module cannot detect whether the private key specified differs from the current state's private + key. Use O(force=true) to force the module to update the private key if you expect it to be updated. + required: true + type: str + certificate: + description: + - A certificate signed with the private key as an ASCII string. Contents of the key must match O(config.algorithm) + and O(provider_id). + - If you want Keycloak to automatically generate a certificate using your private key then set this to an empty + string. + required: true + type: str +notes: + - Current value of the private key cannot be fetched from Keycloak. Therefore comparing its desired state to the current + state is not possible. + - If O(config.certificate) is not explicitly provided it is dynamically created by Keycloak. Therefore comparing the current + state of the certificate to the desired state (which may be empty) is not possible. + - Due to the private key and certificate options the module is B(not fully idempotent). You can use O(force=true) to force + the module to ensure updating if you know that the private key might have changed. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Samuli Seppänen (@mattock) +""" + +EXAMPLES = r""" +- name: Manage Keycloak realm key (certificate autogenerated by Keycloak) + community.general.keycloak_realm_key: + name: custom + state: present + parent_id: master + provider_id: rsa + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master + config: + private_key: "{{ private_key }}" + certificate: "" + enabled: true + active: true + priority: 120 + algorithm: RS256 +- name: Manage Keycloak realm key and certificate + community.general.keycloak_realm_key: + name: custom + state: present + parent_id: master + provider_id: rsa + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master + config: + private_key: "{{ private_key }}" + certificate: "{{ certificate }}" + enabled: true + active: true + priority: 120 + algorithm: RS256 +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the keycloak_realm_key after module execution. + returned: on success + type: dict + contains: + id: + description: ID of the realm key. + type: str + returned: when O(state=present) + sample: 5b7ec13f-99da-46ad-8326-ab4c73cf4ce4 + name: + description: Name of the realm key. + type: str + returned: when O(state=present) + sample: mykey + parentId: + description: ID of the realm this key belongs to. + type: str + returned: when O(state=present) + sample: myrealm + providerId: + description: The ID of the key provider. + type: str + returned: when O(state=present) + sample: rsa + providerType: + description: The type of provider. + type: str + returned: when O(state=present) + config: + description: Realm key configuration. + type: dict + returned: when O(state=present) + sample: + { + "active": [ + "true" + ], + "algorithm": [ + "RS256" + ], + "enabled": [ + "true" + ], + "priority": [ + "140" + ] + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from urllib.parse import urlencode +from copy import deepcopy + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type='str', required=True), + force=dict(type='bool', default=False), + parent_id=dict(type='str', required=True), + provider_id=dict(type='str', default='rsa', choices=['rsa', 'rsa-enc']), + config=dict( + type='dict', + options=dict( + active=dict(type='bool', default=True), + enabled=dict(type='bool', default=True), + priority=dict(type='int', required=True), + algorithm=dict( + type="str", + default="RS256", + choices=[ + "RS256", + "RS384", + "RS512", + "PS256", + "PS384", + "PS512", + "RSA1_5", + "RSA-OAEP", + "RSA-OAEP-256", + ], + ), + private_key=dict(type='str', required=True, no_log=True), + certificate=dict(type='str', required=True) + ) + ) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + # Initialize the result object. Only "changed" seems to have special + # meaning for Ansible. + result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + + # This will include the current state of the realm key if it is already + # present. This is only used for diff-mode. + before_realm_key = {} + before_realm_key['config'] = {} + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + params_to_ignore = list(keycloak_argument_spec().keys()) + ["state", "force", "parent_id"] + + # Filter and map the parameters names that apply to the role + component_params = [x for x in module.params + if x not in params_to_ignore and + module.params.get(x) is not None] + + # We only support one component provider type in this module + provider_type = 'org.keycloak.keys.KeyProvider' + + # Build a proposed changeset from parameters given to this module + changeset = {} + changeset['config'] = {} + + # Generate a JSON payload for Keycloak Admin API from the module + # parameters. Parameters that do not belong to the JSON payload (e.g. + # "state" or "auth_keycloal_url") have been filtered away earlier (see + # above). + # + # This loop converts Ansible module parameters (snake-case) into + # Keycloak-compatible format (camel-case). For example private_key + # becomes privateKey. + # + # It also converts bool, str and int parameters into lists with a single + # entry of 'str' type. Bool values are also lowercased. This is required + # by Keycloak. + # + for component_param in component_params: + if component_param == 'config': + for config_param in module.params.get('config'): + changeset['config'][camel(config_param)] = [] + raw_value = module.params.get('config')[config_param] + if isinstance(raw_value, bool): + value = str(raw_value).lower() + else: + value = str(raw_value) + + changeset['config'][camel(config_param)].append(value) + else: + # No need for camelcase in here as these are one word parameters + new_param_value = module.params.get(component_param) + changeset[camel(component_param)] = new_param_value + + # As provider_type is not a module parameter we have to add it to the + # changeset explicitly. + changeset['providerType'] = provider_type + + # Make a deep copy of the changeset. This is use when determining + # changes to the current state. + changeset_copy = deepcopy(changeset) + + # It is not possible to compare current keys to desired keys, because the + # certificate parameter is a base64-encoded binary blob created on the fly + # when a key is added. Moreover, the Keycloak Admin API does not seem to + # return the value of the private key for comparison. So, in effect, it we + # just have to ignore changes to the keys. However, as the privateKey + # parameter needs be present in the JSON payload, any changes done to any + # other parameters (e.g. config.priority) will trigger update of the keys + # as a side-effect. + del changeset_copy['config']['privateKey'] + del changeset_copy['config']['certificate'] + + # Make it easier to refer to current module parameters + name = module.params.get('name') + force = module.params.get('force') + state = module.params.get('state') + enabled = module.params.get('enabled') + provider_id = module.params.get('provider_id') + parent_id = module.params.get('parent_id') + + # Get a list of all Keycloak components that are of keyprovider type. + realm_keys = kc.get_components(urlencode(dict(type=provider_type)), parent_id) + + # If this component is present get its key ID. Confusingly the key ID is + # also known as the Provider ID. + key_id = None + + # Track individual parameter changes + changes = "" + + # This tells Ansible whether the key was changed (added, removed, modified) + result['changed'] = False + + # Loop through the list of components. If we encounter a component whose + # name matches the value of the name parameter then assume the key is + # already present. + for key in realm_keys: + if key['name'] == name: + key_id = key['id'] + changeset['id'] = key_id + changeset_copy['id'] = key_id + + # Compare top-level parameters + for param, value in changeset.items(): + before_realm_key[param] = key[param] + + if changeset_copy[param] != key[param] and param != 'config': + changes += "%s: %s -> %s, " % (param, key[param], changeset_copy[param]) + result['changed'] = True + + # Compare parameters under the "config" key + for p, v in changeset_copy['config'].items(): + before_realm_key['config'][p] = key['config'][p] + if changeset_copy['config'][p] != key['config'][p]: + changes += "config.%s: %s -> %s, " % (p, key['config'][p], changeset_copy['config'][p]) + result['changed'] = True + + # Sanitize linefeeds for the privateKey. Without this the JSON payload + # will be invalid. + changeset['config']['privateKey'][0] = changeset['config']['privateKey'][0].replace('\\n', '\n') + changeset['config']['certificate'][0] = changeset['config']['certificate'][0].replace('\\n', '\n') + + # Check all the possible states of the resource and do what is needed to + # converge current state with desired state (create, update or delete + # the key). + if key_id and state == 'present': + if result['changed']: + if module._diff: + del before_realm_key['config']['privateKey'] + del before_realm_key['config']['certificate'] + result['diff'] = dict(before=before_realm_key, after=changeset_copy) + + if module.check_mode: + result['msg'] = "Realm key %s would be changed: %s" % (name, changes.strip(", ")) + else: + kc.update_component(changeset, parent_id) + result['msg'] = "Realm key %s changed: %s" % (name, changes.strip(", ")) + elif not result['changed'] and force: + kc.update_component(changeset, parent_id) + result['changed'] = True + result['msg'] = "Realm key %s was forcibly updated" % (name) + else: + result['msg'] = "Realm key %s was in sync" % (name) + + result['end_state'] = changeset_copy + elif key_id and state == 'absent': + if module._diff: + del before_realm_key['config']['privateKey'] + del before_realm_key['config']['certificate'] + result['diff'] = dict(before=before_realm_key, after={}) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Realm key %s would be deleted" % (name) + else: + kc.delete_component(key_id, parent_id) + result['changed'] = True + result['msg'] = "Realm key %s deleted" % (name) + + result['end_state'] = {} + elif not key_id and state == 'present': + if module._diff: + result['diff'] = dict(before={}, after=changeset_copy) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Realm key %s would be created" % (name) + else: + kc.create_component(changeset, parent_id) + result['changed'] = True + result['msg'] = "Realm key %s created" % (name) + + result['end_state'] = changeset_copy + elif not key_id and state == 'absent': + result['changed'] = False + result['msg'] = "Realm key %s not present" % (name) + result['end_state'] = {} + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_realm_keys_metadata_info.py b/plugins/modules/keycloak_realm_keys_metadata_info.py new file mode 100644 index 0000000000..71ce5acffa --- /dev/null +++ b/plugins/modules/keycloak_realm_keys_metadata_info.py @@ -0,0 +1,132 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: keycloak_realm_keys_metadata_info + +short_description: Allows obtaining Keycloak realm keys metadata using Keycloak API + +version_added: 9.3.0 + +description: + - This module allows you to get Keycloak realm keys metadata using the Keycloak REST API. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/latest/rest-api/index.html). +attributes: + action_group: + version_added: 10.2.0 + +options: + realm: + type: str + description: + - They Keycloak realm to fetch keys metadata. + default: 'master' + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + - community.general.attributes.info_module + +author: + - Thomas Bach (@thomasbach-dev) +""" + +EXAMPLES = r""" +- name: Fetch Keys metadata + community.general.keycloak_realm_keys_metadata_info: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: MyCustomRealm + delegate_to: localhost + register: keycloak_keys_metadata + +- name: Write the Keycloak keys certificate into a file + ansible.builtin.copy: + dest: /tmp/keycloak.cert + content: | + {{ keys_metadata['keycloak_keys_metadata']['keys'] + | selectattr('algorithm', 'equalto', 'RS256') + | map(attribute='certificate') + | first + }} + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +keys_metadata: + description: + + - Representation of the realm keys metadata (see U(https://www.keycloak.org/docs-api/latest/rest-api/index.html#KeysMetadataRepresentation)). + returned: always + type: dict + contains: + active: + description: A mapping (that is, a dict) from key algorithms to UUIDs. + type: dict + returned: always + keys: + description: A list of dicts providing detailed information on the keys. + type: list + elements: dict + returned: always +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, KeycloakError, get_token, keycloak_argument_spec) + + +def main(): + argument_spec = keycloak_argument_spec() + + meta_args = dict( + realm=dict(default="master"), + ) + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg="", keys_metadata="") + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get("realm") + + keys_metadata = kc.get_realm_keys_metadata_by_id(realm=realm) + + result["keys_metadata"] = keys_metadata + result["msg"] = "Get realm keys metadata successful for ID {realm}".format( + realm=realm + ) + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/keycloak_realm_rolemapping.py b/plugins/modules/keycloak_realm_rolemapping.py new file mode 100644 index 0000000000..b8034a260b --- /dev/null +++ b/plugins/modules/keycloak_realm_rolemapping.py @@ -0,0 +1,380 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_realm_rolemapping + +short_description: Allows administration of Keycloak realm role mappings into groups with the Keycloak API + +version_added: 8.2.0 + +description: + - This module allows you to add, remove or modify Keycloak realm role mappings into groups with the Keycloak REST API. It + requires access to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite + access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client + definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/18.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a group_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API + to translate the name into the role ID. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the realm_rolemapping. + - On C(present), the realm_rolemapping is created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the realm_rolemapping is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + type: str + description: + - They Keycloak realm under which this role_representation resides. + default: 'master' + + group_name: + type: str + description: + - Name of the group to be mapped. + - This parameter is required (can be replaced by gid for less API call). + parents: + type: list + description: + - List of parent groups for the group to handle sorted top to bottom. + - Set this if your group is a subgroup and you do not provide the GID in O(gid). + elements: dict + suboptions: + id: + type: str + description: + - Identify parent by ID. + - Needs less API calls than using O(parents[].name). + - A deep parent chain can be started at any point when first given parent is given as ID. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. + name: + type: str + description: + - Identify parent by name. + - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood. + - When giving a parent chain with only names it must be complete up to the top. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. + gid: + type: str + description: + - ID of the group to be mapped. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. + roles: + description: + - Roles to be mapped to the group. + type: list + elements: dict + suboptions: + name: + type: str + description: + - Name of the role_representation. + - This parameter is required only when creating or updating the role_representation. + id: + type: str + description: + - The unique identifier for this role_representation. + - This parameter is not required for updating or deleting a role_representation but providing it reduces the number + of API calls required. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Gaëtan Daubresse (@Gaetan2907) + - Marius Huysamen (@mhuysamen) + - Alexander Groß (@agross) +""" + +EXAMPLES = r""" +- name: Map a client role to a group, authentication with credentials + community.general.keycloak_realm_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a group, authentication with token + community.general.keycloak_realm_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + state: present + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a subgroup, authentication with token + community.general.keycloak_realm_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + state: present + group_name: subgroup1 + parents: + - name: parent-group + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Unmap realm role from a group + community.general.keycloak_realm_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: absent + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Role role1 assigned to group group1." + +proposed: + description: Representation of proposed client role mapping. + returned: always + type: dict + sample: {"clientId": "test"} + +existing: + description: + - Representation of existing client role mapping. + - The sample is truncated. + returned: always + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } + +end_state: + description: + - Representation of client role mapping after module execution. + - The sample is truncated. + returned: on success + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError, +) +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + roles_spec = dict( + name=dict(type='str'), + id=dict(type='str'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + gid=dict(type='str'), + group_name=dict(type='str'), + parents=dict( + type='list', elements='dict', + options=dict( + id=dict(type='str'), + name=dict(type='str') + ), + ), + roles=dict(type='list', elements='dict', options=roles_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + gid = module.params.get('gid') + group_name = module.params.get('group_name') + roles = module.params.get('roles') + parents = module.params.get('parents') + + # Check the parameters + if gid is None and group_name is None: + module.fail_json(msg='Either the `group_name` or `gid` has to be specified.') + + # Get the potential missing parameters + if gid is None: + group_rep = kc.get_group_by_name(group_name, realm=realm, parents=parents) + if group_rep is not None: + gid = group_rep['id'] + else: + module.fail_json(msg='Could not fetch group %s:' % group_name) + else: + group_rep = kc.get_group_by_groupid(gid, realm=realm) + + if roles is None: + module.exit_json(msg="Nothing to do (no roles specified).") + else: + for role_index, role in enumerate(roles, start=0): + if role['name'] is None and role['id'] is None: + module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') + # Fetch missing role_id + if role['id'] is None: + role_rep = kc.get_realm_role(role['name'], realm=realm) + if role_rep is not None: + role['id'] = role_rep['id'] + else: + module.fail_json(msg='Could not fetch realm role %s by name:' % (role['name'])) + # Fetch missing role_name + else: + for realm_role in kc.get_realm_roles(realm=realm): + if realm_role['id'] == role['id']: + role['name'] = realm_role['name'] + break + + if role['name'] is None: + module.fail_json(msg='Could not fetch realm role %s by ID' % (role['id'])) + + assigned_roles_before = group_rep.get('realmRoles', []) + + result['existing'] = assigned_roles_before + result['proposed'] = list(assigned_roles_before) if assigned_roles_before else [] + + update_roles = [] + for role_index, role in enumerate(roles, start=0): + # Fetch roles to assign if state present + if state == 'present': + if any(assigned == role['name'] for assigned in assigned_roles_before): + pass + else: + update_roles.append({ + 'id': role['id'], + 'name': role['name'], + }) + result['proposed'].append(role['name']) + # Fetch roles to remove if state absent + else: + if any(assigned == role['name'] for assigned in assigned_roles_before): + update_roles.append({ + 'id': role['id'], + 'name': role['name'], + }) + if role['name'] in result['proposed']: # Handle double removal + result['proposed'].remove(role['name']) + + if len(update_roles): + result['changed'] = True + if module._diff: + result['diff'] = dict(before=assigned_roles_before, after=result['proposed']) + if module.check_mode: + module.exit_json(**result) + + if state == 'present': + # Assign roles + kc.add_group_realm_rolemapping(gid=gid, role_rep=update_roles, realm=realm) + result['msg'] = 'Realm roles %s assigned to groupId %s.' % (update_roles, gid) + else: + # Remove mapping of role + kc.delete_group_realm_rolemapping(gid=gid, role_rep=update_roles, realm=realm) + result['msg'] = 'Realm roles %s removed from groupId %s.' % (update_roles, gid) + + if gid is None: + assigned_roles_after = kc.get_group_by_name(group_name, realm=realm, parents=parents).get('realmRoles', []) + else: + assigned_roles_after = kc.get_group_by_groupid(gid, realm=realm).get('realmRoles', []) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + # Do nothing + else: + result['changed'] = False + result['msg'] = 'Nothing to do, roles %s are %s with group %s.' % (roles, 'mapped' if state == 'present' else 'not mapped', group_name) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_role.py b/plugins/modules/keycloak_role.py deleted file mode 120000 index 48554b3a5f..0000000000 --- a/plugins/modules/keycloak_role.py +++ /dev/null @@ -1 +0,0 @@ -./identity/keycloak/keycloak_role.py \ No newline at end of file diff --git a/plugins/modules/keycloak_role.py b/plugins/modules/keycloak_role.py new file mode 100644 index 0000000000..1480965ab6 --- /dev/null +++ b/plugins/modules/keycloak_role.py @@ -0,0 +1,434 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Adam Goossens +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_role + +short_description: Allows administration of Keycloak roles using Keycloak API + +version_added: 3.4.0 + +description: + - This module allows you to add, remove or modify Keycloak roles using the Keycloak REST API. It requires access to the + REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In + a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the + scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the role. + - On V(present), the role is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the role is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + name: + type: str + required: true + description: + - Name of the role. + - This parameter is required. + description: + type: str + description: + - The role description. + realm: + type: str + description: + - The Keycloak realm under which this role resides. + default: 'master' + + client_id: + type: str + description: + - If the role is a client role, the client ID under which it resides. + - If this parameter is absent, the role is considered a realm role. + attributes: + type: dict + description: + - A dict of key/value pairs to set as custom attributes for the role. + - Values may be single values (for example a string) or a list of strings. + composite: + description: + - If V(true), the role is a composition of other realm and/or client role. + default: false + type: bool + version_added: 7.1.0 + composites: + description: + - List of roles to include to the composite realm role. + - If the composite role is a client role, the C(clientId) (not ID of the client) must be specified. + default: [] + type: list + elements: dict + version_added: 7.1.0 + suboptions: + name: + description: + - Name of the role. This can be the name of a REALM role or a client role. + type: str + required: true + client_id: + description: + - Client ID if the role is a client role. Do not include this option for a REALM role. + - Use the client ID you can see in the Keycloak console, not the technical ID of the client. + type: str + required: false + aliases: + - clientId + state: + description: + - Create the composite if present, remove it if absent. + type: str + choices: + - present + - absent + default: present + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Laurent Paumier (@laurpaum) +""" + +EXAMPLES = r""" +- name: Create a Keycloak realm role, authentication with credentials + community.general.keycloak_role: + name: my-new-kc-role + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a Keycloak realm role, authentication with token + community.general.keycloak_role: + name: my-new-kc-role + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + +- name: Create a Keycloak client role + community.general.keycloak_role: + name: my-new-kc-role + realm: MyCustomRealm + client_id: MyClient + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Delete a Keycloak role + community.general.keycloak_role: + name: my-role-for-deletion + state: absent + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a keycloak role with some custom attributes + community.general.keycloak_role: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + name: my-new-role + attributes: + attrib1: value1 + attrib2: value2 + attrib3: + - with + - numerous + - individual + - list + - items + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Role myrole has been updated" + +proposed: + description: Representation of proposed role. + returned: always + type: dict + sample: {"description": "My updated test description"} + +existing: + description: Representation of existing role. + returned: always + type: dict + sample: + { + "attributes": {}, + "clientRole": true, + "composite": false, + "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", + "description": "My client test role", + "id": "561703dd-0f38-45ff-9a5a-0c978f794547", + "name": "myrole" + } + +end_state: + description: Representation of role after module execution (sample is truncated). + returned: on success + type: dict + sample: + { + "attributes": {}, + "clientRole": true, + "composite": false, + "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", + "description": "My updated client test role", + "id": "561703dd-0f38-45ff-9a5a-0c978f794547", + "name": "myrole" + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible.module_utils.basic import AnsibleModule +import copy + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + composites_spec = dict( + name=dict(type='str', required=True), + client_id=dict(type='str', aliases=['clientId']), + state=dict(type='str', default='present', choices=['present', 'absent']) + ) + + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type='str', required=True), + description=dict(type='str'), + realm=dict(type='str', default='master'), + client_id=dict(type='str'), + attributes=dict(type='dict'), + composites=dict(type='list', default=[], options=composites_spec, elements='dict'), + composite=dict(type='bool', default=False), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + clientid = module.params.get('client_id') + name = module.params.get('name') + state = module.params.get('state') + + # attributes in Keycloak have their values returned as lists + # using the API. attributes is a dict, so we'll transparently convert + # the values to lists. + if module.params.get('attributes') is not None: + for key, val in module.params['attributes'].items(): + module.params['attributes'][key] = [val] if not isinstance(val, list) else val + + # Filter and map the parameters names that apply to the role + role_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'client_id'] and + module.params.get(x) is not None] + + # See if it already exists in Keycloak + if clientid is None: + before_role = kc.get_realm_role(name, realm) + else: + before_role = kc.get_client_role(name, clientid, realm) + + if before_role is None: + before_role = {} + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for param in role_params: + new_param_value = module.params.get(param) + old_value = before_role[param] if param in before_role else None + if new_param_value != old_value: + changeset[camel(param)] = copy.deepcopy(new_param_value) + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_role = copy.deepcopy(before_role) + desired_role.update(changeset) + + result['proposed'] = changeset + result['existing'] = before_role + + # Cater for when it doesn't exist (an empty dict) + if not before_role: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Role does not exist, doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if name is None: + module.fail_json(msg='name must be specified when creating a new role') + + if module._diff: + result['diff'] = dict(before='', after=desired_role) + + if module.check_mode: + module.exit_json(**result) + + # create it + if clientid is None: + kc.create_realm_role(desired_role, realm) + after_role = kc.get_realm_role(name, realm) + else: + kc.create_client_role(desired_role, clientid, realm) + after_role = kc.get_client_role(name, clientid, realm) + + if after_role['composite']: + after_role['composites'] = kc.get_role_composites(rolerep=after_role, clientid=clientid, realm=realm) + + result['end_state'] = after_role + + result['msg'] = 'Role {name} has been created'.format(name=name) + module.exit_json(**result) + + else: + if state == 'present': + compare_exclude = ['clientId'] + if 'composites' in desired_role and isinstance(desired_role['composites'], list) and len(desired_role['composites']) > 0: + composites = kc.get_role_composites(rolerep=before_role, clientid=clientid, realm=realm) + before_role['composites'] = [] + for composite in composites: + before_composite = {} + if composite['clientRole']: + composite_client = kc.get_client_by_id(id=composite['containerId'], realm=realm) + before_composite['client_id'] = composite_client['clientId'] + else: + before_composite['client_id'] = None + before_composite['name'] = composite['name'] + before_composite['state'] = 'present' + before_role['composites'].append(before_composite) + else: + compare_exclude.append('composites') + # Process an update + # no changes + if is_struct_included(desired_role, before_role, exclude=compare_exclude): + result['changed'] = False + result['end_state'] = desired_role + result['msg'] = "No changes required to role {name}.".format(name=name) + module.exit_json(**result) + + # doing an update + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_role, after=desired_role) + + if module.check_mode: + module.exit_json(**result) + + # do the update + if clientid is None: + kc.update_realm_role(desired_role, realm) + after_role = kc.get_realm_role(name, realm) + else: + kc.update_client_role(desired_role, clientid, realm) + after_role = kc.get_client_role(name, clientid, realm) + if after_role['composite']: + after_role['composites'] = kc.get_role_composites(rolerep=after_role, clientid=clientid, realm=realm) + + result['end_state'] = after_role + + result['msg'] = "Role {name} has been updated".format(name=name) + module.exit_json(**result) + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_role, after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + if clientid is None: + kc.delete_realm_role(name, realm) + else: + kc.delete_client_role(name, clientid, realm) + + result['end_state'] = {} + + result['msg'] = "Role {name} has been deleted".format(name=name) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_user.py b/plugins/modules/keycloak_user.py new file mode 100644 index 0000000000..8ff657e322 --- /dev/null +++ b/plugins/modules/keycloak_user.py @@ -0,0 +1,539 @@ +#!/usr/bin/python + +# Copyright (c) 2019, INSPQ (@elfelip) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: keycloak_user +short_description: Create and configure a user in Keycloak +description: + - This module creates, removes, or updates Keycloak users. +version_added: 7.1.0 +options: + auth_username: + aliases: [] + realm: + description: + - The name of the realm in which is the client. + default: master + type: str + username: + description: + - Username for the user. + required: true + type: str + id: + description: + - ID of the user on the Keycloak server if known. + type: str + enabled: + description: + - Enabled user. + type: bool + email_verified: + description: + - Check the validity of user email. + default: false + type: bool + aliases: + - emailVerified + first_name: + description: + - The user's first name. + required: false + type: str + aliases: + - firstName + last_name: + description: + - The user's last name. + required: false + type: str + aliases: + - lastName + email: + description: + - User email. + required: false + type: str + federation_link: + description: + - Federation Link. + required: false + type: str + aliases: + - federationLink + service_account_client_id: + description: + - Description of the client Application. + required: false + type: str + aliases: + - serviceAccountClientId + client_consents: + description: + - Client Authenticator Type. + type: list + elements: dict + default: [] + aliases: + - clientConsents + suboptions: + client_id: + description: + - Client ID of the client role. Not the technical ID of the client. + type: str + required: true + aliases: + - clientId + roles: + description: + - List of client roles to assign to the user. + type: list + required: true + elements: str + groups: + description: + - List of groups for the user. + - Groups can be referenced by their name, like V(staff), or their path, like V(/staff/engineering). The path syntax + allows you to reference subgroups, which is not possible otherwise. + - Using the path is possible since community.general 10.6.0. + type: list + elements: dict + default: [] + suboptions: + name: + description: + - Name of the group. + type: str + state: + description: + - Control whether the user must be member of this group or not. + choices: ["present", "absent"] + default: present + type: str + credentials: + description: + - User credentials. + default: [] + type: list + elements: dict + suboptions: + type: + description: + - Credential type. + type: str + required: true + value: + description: + - Value of the credential. + type: str + required: true + temporary: + description: + - If V(true), the users are required to reset their credentials at next login. + type: bool + default: false + required_actions: + description: + - RequiredActions user Auth. + default: [] + type: list + elements: str + aliases: + - requiredActions + federated_identities: + description: + - List of IDPs of user. + default: [] + type: list + elements: str + aliases: + - federatedIdentities + attributes: + description: + - List of user attributes. + required: false + type: list + elements: dict + suboptions: + name: + description: + - Name of the attribute. + type: str + values: + description: + - Values for the attribute as list. + type: list + elements: str + state: + description: + - Control whether the attribute must exists or not. + choices: ["present", "absent"] + default: present + type: str + access: + description: + - List user access. + required: false + type: dict + disableable_credential_types: + description: + - List user Credential Type. + default: [] + type: list + elements: str + aliases: + - disableableCredentialTypes + origin: + description: + - User origin. + required: false + type: str + self: + description: + - User self administration. + required: false + type: str + state: + description: + - Control whether the user should exists or not. + choices: ["present", "absent"] + default: present + type: str + force: + description: + - If V(true), allows to remove user and recreate it. + type: bool + default: false +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 +notes: + - The module does not modify the user ID of an existing user. +author: + - Philippe Gauthier (@elfelip) +""" + +EXAMPLES = r""" +- name: Create a user user1 + community.general.keycloak_user: + auth_keycloak_url: http://localhost:8080/auth + auth_username: admin + auth_password: password + realm: master + username: user1 + firstName: user1 + lastName: user1 + email: user1 + enabled: true + emailVerified: false + credentials: + - type: password + value: password + temporary: false + attributes: + - name: attr1 + values: + - value1 + state: present + - name: attr2 + values: + - value2 + state: absent + groups: + - name: group1 + state: present + state: present + +- name: Re-create a User + community.general.keycloak_user: + auth_keycloak_url: http://localhost:8080/auth + auth_username: admin + auth_password: password + realm: master + username: user1 + firstName: user1 + lastName: user1 + email: user1 + enabled: true + emailVerified: false + credentials: + - type: password + value: password + temporary: false + attributes: + - name: attr1 + values: + - value1 + state: present + - name: attr2 + values: + - value2 + state: absent + groups: + - name: group1 + state: present + state: present + +- name: Re-create a User + community.general.keycloak_user: + auth_keycloak_url: http://localhost:8080/auth + auth_username: admin + auth_password: password + realm: master + username: user1 + firstName: user1 + lastName: user1 + email: user1 + enabled: true + emailVerified: false + credentials: + - type: password + value: password + temporary: false + attributes: + - name: attr1 + values: + - value1 + state: present + - name: attr2 + values: + - value2 + state: absent + groups: + - name: group1 + state: present + state: present + force: true + +- name: Remove User + community.general.keycloak_user: + auth_keycloak_url: http://localhost:8080/auth + auth_username: admin + auth_password: password + realm: master + username: user1 + state: absent +""" + +RETURN = r""" +proposed: + description: Representation of the proposed user. + returned: on success + type: dict +existing: + description: Representation of the existing user. + returned: on success + type: dict +end_state: + description: Representation of the user after module execution. + returned: on success + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible.module_utils.basic import AnsibleModule +import copy + + +def main(): + argument_spec = keycloak_argument_spec() + argument_spec['auth_username']['aliases'] = [] + credential_spec = dict( + type=dict(type='str', required=True), + value=dict(type='str', required=True), + temporary=dict(type='bool', default=False) + ) + client_consents_spec = dict( + client_id=dict(type='str', required=True, aliases=['clientId']), + roles=dict(type='list', elements='str', required=True) + ) + attributes_spec = dict( + name=dict(type='str'), + values=dict(type='list', elements='str'), + state=dict(type='str', choices=['present', 'absent'], default='present') + ) + groups_spec = dict( + name=dict(type='str'), + state=dict(type='str', choices=['present', 'absent'], default='present') + ) + meta_args = dict( + realm=dict(type='str', default='master'), + self=dict(type='str'), + id=dict(type='str'), + username=dict(type='str', required=True), + first_name=dict(type='str', aliases=['firstName']), + last_name=dict(type='str', aliases=['lastName']), + email=dict(type='str'), + enabled=dict(type='bool'), + email_verified=dict(type='bool', default=False, aliases=['emailVerified']), + federation_link=dict(type='str', aliases=['federationLink']), + service_account_client_id=dict(type='str', aliases=['serviceAccountClientId']), + attributes=dict(type='list', elements='dict', options=attributes_spec), + access=dict(type='dict'), + groups=dict(type='list', default=[], elements='dict', options=groups_spec), + disableable_credential_types=dict(type='list', default=[], aliases=['disableableCredentialTypes'], elements='str'), + required_actions=dict(type='list', default=[], aliases=['requiredActions'], elements='str'), + credentials=dict(type='list', default=[], elements='dict', options=credential_spec), + federated_identities=dict(type='list', default=[], aliases=['federatedIdentities'], elements='str'), + client_consents=dict(type='list', default=[], aliases=['clientConsents'], elements='dict', options=client_consents_spec), + origin=dict(type='str'), + state=dict(choices=["absent", "present"], default='present'), + force=dict(type='bool', default=False), + ) + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + force = module.params.get('force') + username = module.params.get('username') + groups = module.params.get('groups') + + # Filter and map the parameters names that apply to the user + user_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'force', 'groups'] and + module.params.get(x) is not None] + + before_user = kc.get_user_by_username(username=username, realm=realm) + + if before_user is None: + before_user = {} + + changeset = {} + + for param in user_params: + new_param_value = module.params.get(param) + if param == 'attributes' and param in before_user: + old_value = kc.convert_keycloak_user_attributes_dict_to_module_list(attributes=before_user['attributes']) + else: + old_value = before_user[param] if param in before_user else None + if new_param_value != old_value: + if old_value is not None and param == 'attributes': + for old_attribute in old_value: + old_attribute_found = False + for new_attribute in new_param_value: + if new_attribute['name'] == old_attribute['name']: + old_attribute_found = True + if not old_attribute_found: + new_param_value.append(copy.deepcopy(old_attribute)) + if isinstance(new_param_value, dict): + changeset[camel(param)] = copy.deepcopy(new_param_value) + else: + changeset[camel(param)] = new_param_value + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_user = copy.deepcopy(before_user) + desired_user.update(changeset) + + result['proposed'] = changeset + result['existing'] = before_user + + changed = False + + # Cater for when it doesn't exist (an empty dict) + if state == 'absent': + if not before_user: + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Role does not exist, doing nothing.' + module.exit_json(**result) + else: + # Delete user + kc.delete_user(user_id=before_user['id'], realm=realm) + result["msg"] = 'User %s deleted' % (before_user['username']) + changed = True + + else: + after_user = {} + if force and before_user: # If the force option is set to true + # Delete the existing user + kc.delete_user(user_id=before_user["id"], realm=realm) + + if not before_user or force: + # Process a creation + changed = True + + if username is None: + module.fail_json(msg='username must be specified when creating a new user') + + if module._diff: + result['diff'] = dict(before='', after=desired_user) + + if module.check_mode: + module.exit_json(**result) + # Create the user + after_user = kc.create_user(userrep=desired_user, realm=realm) + result["msg"] = 'User %s created' % (desired_user['username']) + # Add user ID to new representation + desired_user['id'] = after_user["id"] + else: + excludes = [ + "access", + "notBefore", + "createdTimestamp", + "totp", + "credentials", + "disableableCredentialTypes", + "groups", + "clientConsents", + "federatedIdentities", + "requiredActions"] + # Add user ID to new representation + desired_user['id'] = before_user["id"] + + # Compare users + if not (is_struct_included(desired_user, before_user, excludes)): # If the new user does not introduce a change to the existing user + # Update the user + after_user = kc.update_user(userrep=desired_user, realm=realm) + changed = True + + # set user groups + if kc.update_user_groups_membership(userrep=desired_user, groups=groups, realm=realm): + changed = True + # Get the user groups + after_user["groups"] = kc.get_user_groups(user_id=desired_user["id"], realm=realm) + result["end_state"] = after_user + if changed: + result["msg"] = 'User %s updated' % (desired_user['username']) + else: + result["msg"] = 'No changes made for user %s' % (desired_user['username']) + + result['changed'] = changed + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py deleted file mode 120000 index e996a5c87d..0000000000 --- a/plugins/modules/keycloak_user_federation.py +++ /dev/null @@ -1 +0,0 @@ -identity/keycloak/keycloak_user_federation.py \ No newline at end of file diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py new file mode 100644 index 0000000000..c856e31d29 --- /dev/null +++ b/plugins/modules/keycloak_user_federation.py @@ -0,0 +1,1102 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_user_federation + +short_description: Allows administration of Keycloak user federations using Keycloak API + +version_added: 3.7.0 + +description: + - This module allows you to add, remove or modify Keycloak user federations using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html). +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the user federation. + - On V(present), the user federation is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the user federation is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + description: + - The Keycloak realm under which this user federation resides. + default: 'master' + type: str + + id: + description: + - The unique ID for this user federation. If left empty, the user federation is searched by its O(name). + type: str + + name: + description: + - Display name of provider when linked in admin console. + type: str + + provider_id: + description: + - Provider for this user federation. Built-in providers are V(ldap), V(kerberos), and V(sssd). Custom user storage providers + can also be used. + aliases: + - providerId + type: str + + provider_type: + description: + - Component type for user federation (only supported value is V(org.keycloak.storage.UserStorageProvider)). + aliases: + - providerType + default: org.keycloak.storage.UserStorageProvider + type: str + + parent_id: + description: + - Unique ID for the parent of this user federation. Realm ID is automatically used if left blank. + aliases: + - parentId + type: str + + remove_unspecified_mappers: + description: + - Remove mappers that are not specified in the configuration for this federation. + - Set to V(false) to keep mappers that are not listed in O(mappers). + type: bool + default: true + version_added: 9.4.0 + + bind_credential_update_mode: + description: + - The value of the config parameter O(config.bindCredential) is redacted in the Keycloak responses. Comparing the redacted + value with the desired value always evaluates to not equal. This means the before and desired states are never equal + if the parameter is set. + - Set to V(always) to include O(config.bindCredential) in the comparison of before and desired state. Because of the + redacted value returned by Keycloak the module always detects a change and make an update if a O(config.bindCredential) + value is set. + - Set to V(only_indirect) to exclude O(config.bindCredential) when comparing the before state with the desired state. + The value of O(config.bindCredential) is only updated if there are other changes to the user federation that require + an update. + type: str + default: always + choices: + - always + - only_indirect + version_added: 9.5.0 + + config: + description: + - Dict specifying the configuration options for the provider; the contents differ depending on the value of O(provider_id). + Examples are given below for V(ldap), V(kerberos) and V(sssd). It is easiest to obtain valid config values by dumping + an already-existing user federation configuration through check-mode in the RV(existing) field. + - The value V(sssd) has been supported since community.general 4.2.0. + type: dict + suboptions: + enabled: + description: + - Enable/disable this user federation. + default: true + type: bool + + priority: + description: + - Priority of provider when doing a user lookup. Lowest first. + default: 0 + type: int + + importEnabled: + description: + - If V(true), LDAP users are imported into Keycloak DB and synced by the configured sync policies. + default: true + type: bool + + editMode: + description: + - V(READ_ONLY) is a read-only LDAP store. V(WRITABLE) means data is synced back to LDAP on demand. V(UNSYNCED) means + user data is imported, but not synced back to LDAP. + type: str + choices: + - READ_ONLY + - WRITABLE + - UNSYNCED + + syncRegistrations: + description: + - Should newly created users be created within LDAP store? Priority effects which provider is chosen to sync the + new user. + default: false + type: bool + + vendor: + description: + - LDAP vendor (provider). + - Use short name. For instance, write V(rhds) for "Red Hat Directory Server". + type: str + + usernameLDAPAttribute: + description: + - Name of LDAP attribute, which is mapped as Keycloak username. For many LDAP server vendors it can be V(uid). For + Active directory it can be V(sAMAccountName) or V(cn). The attribute should be filled for all LDAP user records + you want to import from LDAP to Keycloak. + type: str + + rdnLDAPAttribute: + description: + - Name of LDAP attribute, which is used as RDN (top attribute) of typical user DN. Usually it is the same as Username + LDAP attribute, however it is not required. For example for Active directory, it is common to use V(cn) as RDN + attribute when username attribute might be V(sAMAccountName). + type: str + + uuidLDAPAttribute: + description: + - Name of LDAP attribute, which is used as unique object identifier (UUID) for objects in LDAP. For many LDAP server + vendors, it is V(entryUUID); however some are different. For example for Active directory it should be V(objectGUID). + If your LDAP server does not support the notion of UUID, you can use any other attribute that is supposed to be + unique among LDAP users in tree. + type: str + + userObjectClasses: + description: + - All values of LDAP objectClass attribute for users in LDAP divided by comma. For example V(inetOrgPerson, organizationalPerson). + Newly created Keycloak users are written to LDAP with all those object classes and existing LDAP user records + are found just if they contain all those object classes. + type: str + + connectionUrl: + description: + - Connection URL to your LDAP server. + type: str + + usersDn: + description: + - Full DN of LDAP tree where your users are. This DN is the parent of LDAP users. + type: str + + customUserSearchFilter: + description: + - Additional LDAP Filter for filtering searched users. Leave this empty if you do not need additional filter. + type: str + + searchScope: + description: + - For one level, the search applies only for users in the DNs specified by User DNs. For subtree, the search applies + to the whole subtree. See LDAP documentation for more details. + default: '1' + type: str + choices: + - '1' + - '2' + + authType: + description: + - Type of the Authentication method used during LDAP Bind operation. It is used in most of the requests sent to + the LDAP server. + default: 'none' + type: str + choices: + - none + - simple + + bindDn: + description: + - DN of LDAP user which is used by Keycloak to access LDAP server. + type: str + + bindCredential: + description: + - Password of LDAP admin. + type: str + + startTls: + description: + - Encrypts the connection to LDAP using STARTTLS, which disables connection pooling. + default: false + type: bool + + usePasswordModifyExtendedOp: + description: + - Use the LDAPv3 Password Modify Extended Operation (RFC-3062). The password modify extended operation usually requires + that LDAP user already has password in the LDAP server. So when this is used with 'Sync Registrations', it can + be good to add also 'Hardcoded LDAP attribute mapper' with randomly generated initial password. + default: false + type: bool + + validatePasswordPolicy: + description: + - Determines if Keycloak should validate the password with the realm password policy before updating it. + default: false + type: bool + + trustEmail: + description: + - If enabled, email provided by this provider is not verified even if verification is enabled for the realm. + default: false + type: bool + + useTruststoreSpi: + description: + - Specifies whether LDAP connection uses the truststore SPI with the truststore configured in standalone.xml/domain.xml. + V(always) means that it always uses it. V(never) means that it does not use it. V(ldapsOnly) means that it uses + if your connection URL use ldaps. + - Note even if standalone.xml/domain.xml is not configured, the default Java cacerts or certificate specified by + C(javax.net.ssl.trustStore) property is used. + default: ldapsOnly + type: str + choices: + - always + - ldapsOnly + - never + + connectionTimeout: + description: + - LDAP Connection Timeout in milliseconds. + type: int + + readTimeout: + description: + - LDAP Read Timeout in milliseconds. This timeout applies for LDAP read operations. + type: int + + pagination: + description: + - Does the LDAP server support pagination. + default: true + type: bool + + connectionPooling: + description: + - Determines if Keycloak should use connection pooling for accessing LDAP server. + default: true + type: bool + + connectionPoolingAuthentication: + description: + - A list of space-separated authentication types of connections that may be pooled. + type: str + choices: + - none + - simple + - DIGEST-MD5 + + connectionPoolingDebug: + description: + - A string that indicates the level of debug output to produce. Example valid values are V(fine) (trace connection + creation and removal) and V(all) (all debugging information). + type: str + + connectionPoolingInitSize: + description: + - The number of connections per connection identity to create when initially creating a connection for the identity. + type: int + + connectionPoolingMaxSize: + description: + - The maximum number of connections per connection identity that can be maintained concurrently. + type: int + + connectionPoolingPrefSize: + description: + - The preferred number of connections per connection identity that should be maintained concurrently. + type: int + + connectionPoolingProtocol: + description: + - A list of space-separated protocol types of connections that may be pooled. Valid types are V(plain) and V(ssl). + type: str + + connectionPoolingTimeout: + description: + - The number of milliseconds that an idle connection may remain in the pool without being closed and removed from + the pool. + type: int + + allowKerberosAuthentication: + description: + - Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data about authenticated users is + provisioned from this LDAP server. + default: false + type: bool + + kerberosRealm: + description: + - Name of kerberos realm. + type: str + + krbPrincipalAttribute: + description: + - Name of the LDAP attribute, which refers to Kerberos principal. This is used to lookup appropriate LDAP user after + successful Kerberos/SPNEGO authentication in Keycloak. When this is empty, the LDAP user is looked up based on + LDAP username corresponding to the first part of his Kerberos principal. For instance, for principal C(john@KEYCLOAK.ORG), + it assumes that LDAP username is V(john). + type: str + version_added: 8.1.0 + + serverPrincipal: + description: + - Full name of server principal for HTTP service including server and domain name. For example V(HTTP/host.foo.org@FOO.ORG). + Use V(*) to accept any service principal in the KeyTab file. + type: str + + keyTab: + description: + - Location of Kerberos KeyTab file containing the credentials of server principal. For example V(/etc/krb5.keytab). + type: str + + debug: + description: + - Enable/disable debug logging to standard output for Krb5LoginModule. + type: bool + + useKerberosForPasswordAuthentication: + description: + - Use Kerberos login module for authenticate username/password against Kerberos server instead of authenticating + against LDAP server with Directory Service API. + default: false + type: bool + + allowPasswordAuthentication: + description: + - Enable/disable possibility of username/password authentication against Kerberos database. + type: bool + + batchSizeForSync: + description: + - Count of LDAP users to be imported from LDAP to Keycloak within a single transaction. + default: 1000 + type: int + + fullSyncPeriod: + description: + - Period for full synchronization in seconds. + default: -1 + type: int + + changedSyncPeriod: + description: + - Period for synchronization of changed or newly created LDAP users in seconds. + default: -1 + type: int + + updateProfileFirstLogin: + description: + - Update profile on first login. + type: bool + + cachePolicy: + description: + - Cache Policy for this storage provider. + type: str + default: 'DEFAULT' + choices: + - DEFAULT + - EVICT_DAILY + - EVICT_WEEKLY + - MAX_LIFESPAN + - NO_CACHE + + evictionDay: + description: + - Day of the week the entry is set to become invalid on. + type: str + + evictionHour: + description: + - Hour of day the entry is set to become invalid on. + type: str + + evictionMinute: + description: + - Minute of day the entry is set to become invalid on. + type: str + + maxLifespan: + description: + - Max lifespan of cache entry in milliseconds. + type: int + + referral: + description: + - Specifies if LDAP referrals should be followed or ignored. Please note that enabling referrals can slow down authentication + as it allows the LDAP server to decide which other LDAP servers to use. This could potentially include untrusted + servers. + type: str + choices: + - ignore + - follow + version_added: 9.5.0 + + mappers: + description: + - A list of dicts defining mappers associated with this Identity Provider. + type: list + elements: dict + suboptions: + id: + description: + - Unique ID of this mapper. + type: str + + name: + description: + - Name of the mapper. If no ID is given, the mapper is searched by name. + type: str + + parentId: + description: + - Unique ID for the parent of this mapper. ID of the user federation is automatically used if left blank. + type: str + + providerId: + description: + - The mapper type for this mapper (for instance V(user-attribute-ldap-mapper)). + type: str + + providerType: + description: + - Component type for this mapper. + type: str + default: org.keycloak.storage.ldap.mappers.LDAPStorageMapper + + config: + description: + - Dict specifying the configuration options for the mapper; the contents differ depending on the value of I(identityProviderMapper). + type: dict + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Laurent Paumier (@laurpaum) +""" + +EXAMPLES = r""" +- name: Create LDAP user federation + community.general.keycloak_user_federation: + auth_keycloak_url: https://keycloak.example.com/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: my-realm + name: my-ldap + state: present + provider_id: ldap + provider_type: org.keycloak.storage.UserStorageProvider + config: + priority: 0 + enabled: true + cachePolicy: DEFAULT + batchSizeForSync: 1000 + editMode: READ_ONLY + importEnabled: true + syncRegistrations: false + vendor: other + usernameLDAPAttribute: uid + rdnLDAPAttribute: uid + uuidLDAPAttribute: entryUUID + userObjectClasses: inetOrgPerson, organizationalPerson + connectionUrl: ldaps://ldap.example.com:636 + usersDn: ou=Users,dc=example,dc=com + authType: simple + bindDn: cn=directory reader + bindCredential: password + searchScope: 1 + validatePasswordPolicy: false + trustEmail: false + useTruststoreSpi: ldapsOnly + connectionPooling: true + pagination: true + allowKerberosAuthentication: false + debug: false + useKerberosForPasswordAuthentication: false + mappers: + - name: "full name" + providerId: "full-name-ldap-mapper" + providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" + config: + ldap.full.name.attribute: cn + read.only: true + write.only: false + +- name: Create Kerberos user federation + community.general.keycloak_user_federation: + auth_keycloak_url: https://keycloak.example.com/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: my-realm + name: my-kerberos + state: present + provider_id: kerberos + provider_type: org.keycloak.storage.UserStorageProvider + config: + priority: 0 + enabled: true + cachePolicy: DEFAULT + kerberosRealm: EXAMPLE.COM + serverPrincipal: HTTP/host.example.com@EXAMPLE.COM + keyTab: keytab + allowPasswordAuthentication: false + updateProfileFirstLogin: false + +- name: Create sssd user federation + community.general.keycloak_user_federation: + auth_keycloak_url: https://keycloak.example.com/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: my-realm + name: my-sssd + state: present + provider_id: sssd + provider_type: org.keycloak.storage.UserStorageProvider + config: + priority: 0 + enabled: true + cachePolicy: DEFAULT + +- name: Delete user federation + community.general.keycloak_user_federation: + auth_keycloak_url: https://keycloak.example.com/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: my-realm + name: my-federation + state: absent +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "No changes required to user federation 164bb483-c613-482e-80fe-7f1431308799." + +proposed: + description: Representation of proposed user federation. + returned: always + type: dict + sample: + { + "config": { + "allowKerberosAuthentication": "false", + "authType": "simple", + "batchSizeForSync": "1000", + "bindCredential": "**********", + "bindDn": "cn=directory reader", + "cachePolicy": "DEFAULT", + "connectionPooling": "true", + "connectionUrl": "ldaps://ldap.example.com:636", + "debug": "false", + "editMode": "READ_ONLY", + "enabled": "true", + "importEnabled": "true", + "pagination": "true", + "priority": "0", + "rdnLDAPAttribute": "uid", + "searchScope": "1", + "syncRegistrations": "false", + "trustEmail": "false", + "useKerberosForPasswordAuthentication": "false", + "useTruststoreSpi": "ldapsOnly", + "userObjectClasses": "inetOrgPerson, organizationalPerson", + "usernameLDAPAttribute": "uid", + "usersDn": "ou=Users,dc=example,dc=com", + "uuidLDAPAttribute": "entryUUID", + "validatePasswordPolicy": "false", + "vendor": "other" + }, + "name": "ldap", + "providerId": "ldap", + "providerType": "org.keycloak.storage.UserStorageProvider" + } + +existing: + description: Representation of existing user federation. + returned: always + type: dict + sample: + { + "config": { + "allowKerberosAuthentication": "false", + "authType": "simple", + "batchSizeForSync": "1000", + "bindCredential": "**********", + "bindDn": "cn=directory reader", + "cachePolicy": "DEFAULT", + "changedSyncPeriod": "-1", + "connectionPooling": "true", + "connectionUrl": "ldaps://ldap.example.com:636", + "debug": "false", + "editMode": "READ_ONLY", + "enabled": "true", + "fullSyncPeriod": "-1", + "importEnabled": "true", + "pagination": "true", + "priority": "0", + "rdnLDAPAttribute": "uid", + "searchScope": "1", + "syncRegistrations": "false", + "trustEmail": "false", + "useKerberosForPasswordAuthentication": "false", + "useTruststoreSpi": "ldapsOnly", + "userObjectClasses": "inetOrgPerson, organizationalPerson", + "usernameLDAPAttribute": "uid", + "usersDn": "ou=Users,dc=example,dc=com", + "uuidLDAPAttribute": "entryUUID", + "validatePasswordPolicy": "false", + "vendor": "other" + }, + "id": "01122837-9047-4ae4-8ca0-6e2e891a765f", + "mappers": [ + { + "config": { + "always.read.value.from.ldap": "false", + "is.mandatory.in.ldap": "false", + "ldap.attribute": "mail", + "read.only": "true", + "user.model.attribute": "email" + }, + "id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f", + "name": "email", + "parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f", + "providerId": "user-attribute-ldap-mapper", + "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" + } + ], + "name": "myfed", + "parentId": "myrealm", + "providerId": "ldap", + "providerType": "org.keycloak.storage.UserStorageProvider" + } + +end_state: + description: Representation of user federation after module execution. + returned: on success + type: dict + sample: + { + "config": { + "allowPasswordAuthentication": "false", + "cachePolicy": "DEFAULT", + "enabled": "true", + "kerberosRealm": "EXAMPLE.COM", + "keyTab": "/etc/krb5.keytab", + "priority": "0", + "serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM", + "updateProfileFirstLogin": "false" + }, + "id": "cf52ae4f-4471-4435-a0cf-bb620cadc122", + "mappers": [], + "name": "kerberos", + "parentId": "myrealm", + "providerId": "kerberos", + "providerType": "org.keycloak.storage.UserStorageProvider" + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from urllib.parse import urlencode +from copy import deepcopy + + +def normalize_kc_comp(comp): + if 'config' in comp: + # kc completely removes the parameter `krbPrincipalAttribute` if it is set to `''`; the unset kc parameter is equivalent to `''`; + # to make change detection and diff more accurate we set it again in the kc responses + if 'krbPrincipalAttribute' not in comp['config']: + comp['config']['krbPrincipalAttribute'] = [''] + + # kc stores a timestamp of the last sync in `lastSync` to time the periodic sync, it is removed to minimize diff/changes + comp['config'].pop('lastSync', None) + + +def sanitize(comp): + compcopy = deepcopy(comp) + if 'config' in compcopy: + compcopy['config'] = {k: v[0] for k, v in compcopy['config'].items()} + if 'bindCredential' in compcopy['config']: + compcopy['config']['bindCredential'] = '**********' + if 'mappers' in compcopy: + for mapper in compcopy['mappers']: + if 'config' in mapper: + mapper['config'] = {k: v[0] for k, v in mapper['config'].items()} + return compcopy + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + config_spec = dict( + allowKerberosAuthentication=dict(type='bool', default=False), + allowPasswordAuthentication=dict(type='bool'), + authType=dict(type='str', choices=['none', 'simple'], default='none'), + batchSizeForSync=dict(type='int', default=1000), + bindCredential=dict(type='str', no_log=True), + bindDn=dict(type='str'), + cachePolicy=dict(type='str', choices=['DEFAULT', 'EVICT_DAILY', 'EVICT_WEEKLY', 'MAX_LIFESPAN', 'NO_CACHE'], default='DEFAULT'), + changedSyncPeriod=dict(type='int', default=-1), + connectionPooling=dict(type='bool', default=True), + connectionPoolingAuthentication=dict(type='str', choices=['none', 'simple', 'DIGEST-MD5']), + connectionPoolingDebug=dict(type='str'), + connectionPoolingInitSize=dict(type='int'), + connectionPoolingMaxSize=dict(type='int'), + connectionPoolingPrefSize=dict(type='int'), + connectionPoolingProtocol=dict(type='str'), + connectionPoolingTimeout=dict(type='int'), + connectionTimeout=dict(type='int'), + connectionUrl=dict(type='str'), + customUserSearchFilter=dict(type='str'), + debug=dict(type='bool'), + editMode=dict(type='str', choices=['READ_ONLY', 'WRITABLE', 'UNSYNCED']), + enabled=dict(type='bool', default=True), + evictionDay=dict(type='str'), + evictionHour=dict(type='str'), + evictionMinute=dict(type='str'), + fullSyncPeriod=dict(type='int', default=-1), + importEnabled=dict(type='bool', default=True), + kerberosRealm=dict(type='str'), + keyTab=dict(type='str', no_log=False), + maxLifespan=dict(type='int'), + pagination=dict(type='bool', default=True), + priority=dict(type='int', default=0), + rdnLDAPAttribute=dict(type='str'), + readTimeout=dict(type='int'), + referral=dict(type='str', choices=['ignore', 'follow']), + searchScope=dict(type='str', choices=['1', '2'], default='1'), + serverPrincipal=dict(type='str'), + krbPrincipalAttribute=dict(type='str'), + startTls=dict(type='bool', default=False), + syncRegistrations=dict(type='bool', default=False), + trustEmail=dict(type='bool', default=False), + updateProfileFirstLogin=dict(type='bool'), + useKerberosForPasswordAuthentication=dict(type='bool', default=False), + usePasswordModifyExtendedOp=dict(type='bool', default=False, no_log=False), + useTruststoreSpi=dict(type='str', choices=['always', 'ldapsOnly', 'never'], default='ldapsOnly'), + userObjectClasses=dict(type='str'), + usernameLDAPAttribute=dict(type='str'), + usersDn=dict(type='str'), + uuidLDAPAttribute=dict(type='str'), + validatePasswordPolicy=dict(type='bool', default=False), + vendor=dict(type='str'), + ) + + mapper_spec = dict( + id=dict(type='str'), + name=dict(type='str'), + parentId=dict(type='str'), + providerId=dict(type='str'), + providerType=dict(type='str', default='org.keycloak.storage.ldap.mappers.LDAPStorageMapper'), + config=dict(type='dict'), + ) + + meta_args = dict( + config=dict(type='dict', options=config_spec), + state=dict(type='str', default='present', choices=['present', 'absent']), + realm=dict(type='str', default='master'), + id=dict(type='str'), + name=dict(type='str'), + provider_id=dict(type='str', aliases=['providerId']), + provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'), + parent_id=dict(type='str', aliases=['parentId']), + remove_unspecified_mappers=dict(type='bool', default=True), + bind_credential_update_mode=dict(type='str', default='always', choices=['always', 'only_indirect']), + mappers=dict(type='list', elements='dict', options=mapper_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'name'], + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + config = module.params.get('config') + mappers = module.params.get('mappers') + cid = module.params.get('id') + name = module.params.get('name') + + # Keycloak API expects config parameters to be arrays containing a single string element + if config is not None: + module.params['config'] = { + k: [str(v).lower() if not isinstance(v, str) else v] + for k, v in config.items() + if config[k] is not None + } + + if mappers is not None: + for mapper in mappers: + if mapper.get('config') is not None: + mapper['config'] = { + k: [str(v).lower() if not isinstance(v, str) else v] + for k, v in mapper['config'].items() + if mapper['config'][k] is not None + } + + # Filter and map the parameters names that apply + comp_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + + ['state', 'realm', 'mappers', 'remove_unspecified_mappers', 'bind_credential_update_mode'] + and module.params.get(x) is not None] + + # See if it already exists in Keycloak + if cid is None: + found = kc.get_components(urlencode(dict(type='org.keycloak.storage.UserStorageProvider', name=name)), realm) + if len(found) > 1: + module.fail_json(msg='No ID given and found multiple user federations with name `{name}`. Cannot continue.'.format(name=name)) + before_comp = next(iter(found), None) + if before_comp is not None: + cid = before_comp['id'] + else: + before_comp = kc.get_component(cid, realm) + + if before_comp is None: + before_comp = {} + + # if user federation exists, get associated mappers + if cid is not None and before_comp: + before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name') or '') + + normalize_kc_comp(before_comp) + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for param in comp_params: + new_param_value = module.params.get(param) + old_value = before_comp[camel(param)] if camel(param) in before_comp else None + if param == 'mappers': + new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] + if new_param_value != old_value: + changeset[camel(param)] = new_param_value + + # special handling of mappers list to allow change detection + if module.params.get('mappers') is not None: + if module.params['provider_id'] in ['kerberos', 'sssd']: + module.fail_json(msg='Cannot configure mappers for {type} provider.'.format(type=module.params['provider_id'])) + for change in module.params['mappers']: + change = {k: v for k, v in change.items() if v is not None} + if change.get('id') is None and change.get('name') is None: + module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') + if cid is None: + old_mapper = {} + elif change.get('id') is not None: + old_mapper = next((before_mapper for before_mapper in before_comp.get('mappers', []) if before_mapper["id"] == change['id']), None) + if old_mapper is None: + old_mapper = {} + else: + found = [before_mapper for before_mapper in before_comp.get('mappers', []) if before_mapper['name'] == change['name']] + if len(found) > 1: + module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=change['name'])) + if len(found) == 1: + old_mapper = found[0] + else: + old_mapper = {} + new_mapper = old_mapper.copy() + new_mapper.update(change) + # changeset contains all desired mappers: those existing, to update or to create + if changeset.get('mappers') is None: + changeset['mappers'] = list() + changeset['mappers'].append(new_mapper) + changeset['mappers'] = sorted(changeset['mappers'], key=lambda x: x.get('name') or '') + + # to keep unspecified existing mappers we add them to the desired mappers list, unless they're already present + if not module.params['remove_unspecified_mappers'] and 'mappers' in before_comp: + changeset_mapper_ids = [mapper['id'] for mapper in changeset['mappers'] if 'id' in mapper] + changeset['mappers'].extend([mapper for mapper in before_comp['mappers'] if mapper['id'] not in changeset_mapper_ids]) + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_comp = before_comp.copy() + desired_comp.update(changeset) + + result['proposed'] = sanitize(changeset) + result['existing'] = sanitize(before_comp) + + # Cater for when it doesn't exist (an empty dict) + if not before_comp: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'User federation does not exist; doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if module.check_mode: + if module._diff: + result['diff'] = dict(before='', after=sanitize(desired_comp)) + module.exit_json(**result) + + # create it + desired_mappers = desired_comp.pop('mappers', []) + after_comp = kc.create_component(desired_comp, realm) + cid = after_comp['id'] + updated_mappers = [] + # when creating a user federation, keycloak automatically creates default mappers + default_mappers = kc.get_components(urlencode(dict(parent=cid)), realm) + + # create new mappers or update existing default mappers + for desired_mapper in desired_mappers: + found = [default_mapper for default_mapper in default_mappers if default_mapper['name'] == desired_mapper['name']] + if len(found) > 1: + module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=desired_mapper['name'])) + if len(found) == 1: + old_mapper = found[0] + else: + old_mapper = {} + + new_mapper = old_mapper.copy() + new_mapper.update(desired_mapper) + + if new_mapper.get('id') is not None: + kc.update_component(new_mapper, realm) + updated_mappers.append(new_mapper) + else: + if new_mapper.get('parentId') is None: + new_mapper['parentId'] = cid + updated_mappers.append(kc.create_component(new_mapper, realm)) + + if module.params['remove_unspecified_mappers']: + # we remove all unwanted default mappers + # we use ids so we dont accidently remove one of the previously updated default mapper + for default_mapper in default_mappers: + if not default_mapper['id'] in [x['id'] for x in updated_mappers]: + kc.delete_component(default_mapper['id'], realm) + + after_comp['mappers'] = kc.get_components(urlencode(dict(parent=cid)), realm) + normalize_kc_comp(after_comp) + if module._diff: + result['diff'] = dict(before='', after=sanitize(after_comp)) + result['end_state'] = sanitize(after_comp) + result['msg'] = "User federation {id} has been created".format(id=cid) + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + + desired_copy = deepcopy(desired_comp) + before_copy = deepcopy(before_comp) + # exclude bindCredential when checking wether an update is required, therefore + # updating it only if there are other changes + if module.params['bind_credential_update_mode'] == 'only_indirect': + desired_copy.get('config', []).pop('bindCredential', None) + before_copy.get('config', []).pop('bindCredential', None) + # no changes + if desired_copy == before_copy: + result['changed'] = False + result['end_state'] = sanitize(desired_comp) + result['msg'] = "No changes required to user federation {id}.".format(id=cid) + module.exit_json(**result) + + # doing an update + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_comp), after=sanitize(desired_comp)) + + if module.check_mode: + module.exit_json(**result) + + # do the update + desired_mappers = desired_comp.pop('mappers', []) + kc.update_component(desired_comp, realm) + + for before_mapper in before_comp.get('mappers', []): + # remove unwanted existing mappers that will not be updated + if not before_mapper['id'] in [x['id'] for x in desired_mappers if 'id' in x]: + kc.delete_component(before_mapper['id'], realm) + + for mapper in desired_mappers: + if mapper in before_comp.get('mappers', []): + continue + if mapper.get('id') is not None: + kc.update_component(mapper, realm) + else: + if mapper.get('parentId') is None: + mapper['parentId'] = desired_comp['id'] + kc.create_component(mapper, realm) + + after_comp = kc.get_component(cid, realm) + after_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name') or '') + normalize_kc_comp(after_comp) + after_comp_sanitized = sanitize(after_comp) + before_comp_sanitized = sanitize(before_comp) + result['end_state'] = after_comp_sanitized + if module._diff: + result['diff'] = dict(before=before_comp_sanitized, after=after_comp_sanitized) + result['changed'] = before_comp_sanitized != after_comp_sanitized + result['msg'] = "User federation {id} has been updated".format(id=cid) + module.exit_json(**result) + + elif state == 'absent': + # Process a deletion + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_comp), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_component(cid, realm) + + result['end_state'] = {} + + result['msg'] = "User federation {id} has been deleted".format(id=cid) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_user_rolemapping.py b/plugins/modules/keycloak_user_rolemapping.py new file mode 100644 index 0000000000..2d7024fd5f --- /dev/null +++ b/plugins/modules/keycloak_user_rolemapping.py @@ -0,0 +1,397 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Dušan Marković (@bratwurzt) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_user_rolemapping + +short_description: Allows administration of Keycloak user_rolemapping with the Keycloak API + +version_added: 5.7.0 + +description: + - This module allows you to add, remove or modify Keycloak user_rolemapping with the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a user_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API to + translate the name into the role ID. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the user_rolemapping. + - On V(present), the user_rolemapping is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the user_rolemapping is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + type: str + description: + - They Keycloak realm under which this role_representation resides. + default: 'master' + + target_username: + type: str + description: + - Username of the user roles are mapped to. + - This parameter is not required (can be replaced by uid for less API call). + uid: + type: str + description: + - ID of the user to be mapped. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. + service_account_user_client_id: + type: str + description: + - Client ID of the service-account-user to be mapped. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. + client_id: + type: str + description: + - Name of the client to be mapped (different than O(cid)). + - This parameter is required if O(cid) is not provided (can be replaced by O(cid) to reduce the number of API calls + that must be made). + cid: + type: str + description: + - ID of the client to be mapped. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. + roles: + description: + - Roles to be mapped to the user. + type: list + elements: dict + suboptions: + name: + type: str + description: + - Name of the role representation. + - This parameter is required only when creating or updating the role_representation. + id: + type: str + description: + - The unique identifier for this role_representation. + - This parameter is not required for updating or deleting a role_representation but providing it reduces the number + of API calls required. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Dušan Marković (@bratwurzt) +""" + +EXAMPLES = r""" +- name: Map a client role to a user, authentication with credentials + community.general.keycloak_user_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + client_id: client1 + user_id: user1Id + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a service account user for a client, authentication with credentials + community.general.keycloak_user_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + client_id: client1 + service_account_user_client_id: clientIdOfServiceAccount + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a user, authentication with token + community.general.keycloak_user_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + state: present + client_id: client1 + target_username: user1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Unmap client role from a user + community.general.keycloak_user_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: absent + client_id: client1 + uid: 70e3ae72-96b6-11e6-9056-9737fd4d0764 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Role role1 assigned to user user1." + +proposed: + description: Representation of proposed client role mapping. + returned: always + type: dict + sample: {"clientId": "test"} + +existing: + description: + - Representation of existing client role mapping. + - The sample is truncated. + returned: always + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } + +end_state: + description: + - Representation of client role mapping after module execution. + - The sample is truncated. + returned: on success + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + roles_spec = dict( + name=dict(type='str'), + id=dict(type='str'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + uid=dict(type='str'), + target_username=dict(type='str'), + service_account_user_client_id=dict(type='str'), + cid=dict(type='str'), + client_id=dict(type='str'), + roles=dict(type='list', elements='dict', options=roles_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret'], + ['uid', 'target_username', 'service_account_user_client_id']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('cid') + client_id = module.params.get('client_id') + uid = module.params.get('uid') + target_username = module.params.get('target_username') + service_account_user_client_id = module.params.get('service_account_user_client_id') + roles = module.params.get('roles') + + # Check the parameters + if uid is None and target_username is None and service_account_user_client_id is None: + module.fail_json(msg='Either the `target_username`, `uid` or `service_account_user_client_id` has to be specified.') + + # Get the potential missing parameters + if uid is None and service_account_user_client_id is None: + user_rep = kc.get_user_by_username(username=target_username, realm=realm) + if user_rep is not None: + uid = user_rep.get('id') + else: + module.fail_json(msg='Could not fetch user for username %s:' % target_username) + else: + if uid is None and target_username is None: + user_rep = kc.get_service_account_user_by_client_id(client_id=service_account_user_client_id, realm=realm) + if user_rep is not None: + uid = user_rep['id'] + else: + module.fail_json(msg='Could not fetch service-account-user for client_id %s:' % target_username) + + if cid is None and client_id is not None: + cid = kc.get_client_id(client_id=client_id, realm=realm) + if cid is None: + module.fail_json(msg='Could not fetch client %s:' % client_id) + if roles is None: + module.exit_json(msg="Nothing to do (no roles specified).") + else: + for role_index, role in enumerate(roles, start=0): + if role.get('name') is None and role.get('id') is None: + module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') + # Fetch missing role_id + if role.get('id') is None: + if cid is None: + role_id = kc.get_realm_role(name=role.get('name'), realm=realm)['id'] + else: + role_id = kc.get_client_role_id_by_name(cid=cid, name=role.get('name'), realm=realm) + if role_id is not None: + role['id'] = role_id + else: + module.fail_json(msg='Could not fetch role %s for client_id %s or realm %s' % (role.get('name'), client_id, realm)) + # Fetch missing role_name + else: + if cid is None: + role['name'] = kc.get_realm_user_rolemapping_by_id(uid=uid, rid=role.get('id'), realm=realm)['name'] + else: + role['name'] = kc.get_client_user_rolemapping_by_id(uid=uid, cid=cid, rid=role.get('id'), realm=realm)['name'] + if role.get('name') is None: + module.fail_json(msg='Could not fetch role %s for client_id %s or realm %s' % (role.get('id'), client_id, realm)) + + # Get effective role mappings + if cid is None: + available_roles_before = kc.get_realm_user_available_rolemappings(uid=uid, realm=realm) + assigned_roles_before = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm) + else: + available_roles_before = kc.get_client_user_available_rolemappings(uid=uid, cid=cid, realm=realm) + assigned_roles_before = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm) + + result['existing'] = assigned_roles_before + result['proposed'] = roles + + update_roles = [] + for role_index, role in enumerate(roles, start=0): + # Fetch roles to assign if state present + if state == 'present': + for available_role in available_roles_before: + if role.get('name') == available_role.get('name'): + update_roles.append({ + 'id': role.get('id'), + 'name': role.get('name'), + }) + # Fetch roles to remove if state absent + else: + for assigned_role in assigned_roles_before: + if role.get('name') == assigned_role.get('name'): + update_roles.append({ + 'id': role.get('id'), + 'name': role.get('name'), + }) + + if len(update_roles): + if state == 'present': + # Assign roles + result['changed'] = True + if module._diff: + result['diff'] = dict(before={"roles": assigned_roles_before}, after={"roles": update_roles}) + if module.check_mode: + module.exit_json(**result) + kc.add_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm) + result['msg'] = 'Roles %s assigned to userId %s.' % (update_roles, uid) + if cid is None: + assigned_roles_after = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm) + else: + assigned_roles_after = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + else: + # Remove mapping of role + result['changed'] = True + if module._diff: + result['diff'] = dict(before={"roles": assigned_roles_before}, after={"roles": update_roles}) + if module.check_mode: + module.exit_json(**result) + kc.delete_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm) + result['msg'] = 'Roles %s removed from userId %s.' % (update_roles, uid) + if cid is None: + assigned_roles_after = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm) + else: + assigned_roles_after = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + # Do nothing + else: + result['changed'] = False + result['msg'] = 'Nothing to do, roles %s are correctly mapped to user for username %s.' % (roles, target_username) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_userprofile.py b/plugins/modules/keycloak_userprofile.py new file mode 100644 index 0000000000..a09ab8818b --- /dev/null +++ b/plugins/modules/keycloak_userprofile.py @@ -0,0 +1,734 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_userprofile + +short_description: Allows managing Keycloak User Profiles + +description: + - This module allows you to create, update, or delete Keycloak User Profiles using the Keycloak API. You can also customize + the "Unmanaged Attributes" with it. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/24.0.5/rest-api/index.html). For compatibility reasons, the module also accepts + the camelCase versions of the options. +version_added: "9.4.0" + +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the User Profile provider. + - On V(present), the User Profile provider is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the User Profile provider is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + parent_id: + description: + - The parent ID of the realm key. In practice the ID (name) of the realm. + aliases: + - parentId + - realm + type: str + required: true + + provider_id: + description: + - The name of the provider ID for the key (supported value is V(declarative-user-profile)). + aliases: + - providerId + choices: ['declarative-user-profile'] + default: 'declarative-user-profile' + type: str + + provider_type: + description: + - Component type for User Profile (only supported value is V(org.keycloak.userprofile.UserProfileProvider)). + aliases: + - providerType + choices: ['org.keycloak.userprofile.UserProfileProvider'] + default: org.keycloak.userprofile.UserProfileProvider + type: str + + config: + description: + - The configuration of the User Profile Provider. + type: dict + required: false + suboptions: + kc_user_profile_config: + description: + - Define a declarative User Profile. See EXAMPLES for more context. + aliases: + - kcUserProfileConfig + type: list + elements: dict + suboptions: + attributes: + description: + - A list of attributes to be included in the User Profile. + type: list + elements: dict + suboptions: + name: + description: + - The name of the attribute. + type: str + required: true + + display_name: + description: + - The display name of the attribute. + aliases: + - displayName + type: str + required: true + + validations: + description: + - The validations to be applied to the attribute. + type: dict + suboptions: + length: + description: + - The length validation for the attribute. + type: dict + suboptions: + min: + description: + - The minimum length of the attribute. + type: int + max: + description: + - The maximum length of the attribute. + type: int + required: true + + email: + description: + - The email validation for the attribute. + type: dict + + username_prohibited_characters: + description: + - The prohibited characters validation for the username attribute. + type: dict + aliases: + - usernameProhibitedCharacters + + up_username_not_idn_homograph: + description: + - The validation to prevent IDN homograph attacks in usernames. + type: dict + aliases: + - upUsernameNotIdnHomograph + + person_name_prohibited_characters: + description: + - The prohibited characters validation for person name attributes. + type: dict + aliases: + - personNameProhibitedCharacters + + uri: + description: + - The URI validation for the attribute. + type: dict + + pattern: + description: + - The pattern validation for the attribute using regular expressions. + type: dict + + options: + description: + - Validation to ensure the attribute matches one of the provided options. + type: dict + + annotations: + description: + - Annotations for the attribute. + type: dict + + group: + description: + - Specifies the User Profile group where this attribute is added. + type: str + + permissions: + description: + - The permissions for viewing and editing the attribute. + type: dict + suboptions: + view: + description: + - The roles that can view the attribute. + - Supported values are V(admin) and V(user). + type: list + elements: str + default: + - admin + - user + + edit: + description: + - The roles that can edit the attribute. + - Supported values are V(admin) and V(user). + type: list + elements: str + default: + - admin + - user + + multivalued: + description: + - Whether the attribute can have multiple values. + type: bool + default: false + + required: + description: + - The roles that require this attribute. + type: dict + suboptions: + roles: + description: + - The roles for which this attribute is required. + - Supported values are V(admin) and V(user). + type: list + elements: str + default: + - user + + groups: + description: + - A list of attribute groups to be included in the User Profile. + type: list + elements: dict + suboptions: + name: + description: + - The name of the group. + type: str + required: true + + display_header: + description: + - The display header for the group. + aliases: + - displayHeader + type: str + required: true + + display_description: + description: + - The display description for the group. + aliases: + - displayDescription + type: str + required: false + + annotations: + description: + - The annotations included in the group. + type: dict + required: false + + unmanaged_attribute_policy: + description: + - Policy for unmanaged attributes. + aliases: + - unmanagedAttributePolicy + type: str + choices: + - ENABLED + - ADMIN_EDIT + - ADMIN_VIEW + +notes: + - Currently, only a single V(declarative-user-profile) entry is supported for O(provider_id) (design of the Keyckoak API). + However, there can be multiple O(config.kc_user_profile_config[].attributes[]) entries. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Eike Waldt (@yeoldegrove) +""" + +EXAMPLES = r""" +- name: Create a Declarative User Profile with default settings + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - attributes: + - name: username + displayName: ${username} + validations: + length: + min: 3 + max: 255 + username_prohibited_characters: {} + up_username_not_idn_homograph: {} + annotations: {} + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: email + displayName: ${email} + validations: + email: {} + length: + max: 255 + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: firstName + displayName: ${firstName} + validations: + length: + max: 255 + person_name_prohibited_characters: {} + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: lastName + displayName: ${lastName} + validations: + length: + max: 255 + person_name_prohibited_characters: {} + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + groups: + - name: user-metadata + displayHeader: User metadata + displayDescription: Attributes, which refer to user metadata + annotations: {} + +- name: Delete a Keycloak User Profile Provider + keycloak_userprofile: + state: absent + parent_id: master + +# Unmanaged attributes are user attributes not explicitly defined in the User Profile +# configuration. By default, unmanaged attributes are "Disabled" and are not +# available from any context such as registration, account, and the +# administration console. By setting "Enabled", unmanaged attributes are fully +# recognized by the server and accessible through all contexts, useful if you are +# starting migrating an existing realm to the declarative User Profile +# and you don't have yet all user attributes defined in the User Profile configuration. +- name: Enable Unmanaged Attributes + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - unmanagedAttributePolicy: ENABLED + +# By setting "Only administrators can write", unmanaged attributes can be managed +# only through the administration console and API, useful if you have already +# defined any custom attribute that can be managed by users but you are unsure +# about adding other attributes that should only be managed by administrators. +- name: Enable ADMIN_EDIT on Unmanaged Attributes + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - unmanagedAttributePolicy: ADMIN_EDIT + +# By setting `Only administrators can view`, unmanaged attributes are read-only +# and only available through the administration console and API. +- name: Enable ADMIN_VIEW on Unmanaged Attributes + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - unmanagedAttributePolicy: ADMIN_VIEW +""" + +RETURN = r""" +msg: + description: The output message generated by the module. + returned: always + type: str + sample: UserProfileProvider created successfully +data: + description: The data returned by the Keycloak API. + returned: when state is present + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from copy import deepcopy +from urllib.parse import urlencode +import json + + +def remove_null_values(data): + if isinstance(data, dict): + # Recursively remove null values from dictionaries + return {k: remove_null_values(v) for k, v in data.items() if v is not None} + elif isinstance(data, list): + # Recursively remove null values from lists + return [remove_null_values(item) for item in data if item is not None] + else: + # Return the data if it is neither a dictionary nor a list + return data + + +def camel_recursive(data): + if isinstance(data, dict): + # Convert keys to camelCase and apply recursively + return {camel(k): camel_recursive(v) for k, v in data.items()} + elif isinstance(data, list): + # Apply camelCase conversion to each item in the list + return [camel_recursive(item) for item in data] + else: + # Return the data as-is if it is not a dict or list + return data + + +def main(): + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + parent_id=dict(type='str', aliases=['parentId', 'realm'], required=True), + provider_id=dict(type='str', aliases=['providerId'], default='declarative-user-profile', choices=['declarative-user-profile']), + provider_type=dict( + type='str', + aliases=['providerType'], + default='org.keycloak.userprofile.UserProfileProvider', + choices=['org.keycloak.userprofile.UserProfileProvider'] + ), + config=dict( + type='dict', + options={ + 'kc_user_profile_config': dict( + type='list', + aliases=['kcUserProfileConfig'], + elements='dict', + options={ + 'attributes': dict( + type='list', + elements='dict', + options={ + 'name': dict(type='str', required=True), + 'display_name': dict(type='str', aliases=['displayName'], required=True), + 'validations': dict( + type='dict', + options={ + 'length': dict( + type='dict', + options={ + 'min': dict(type='int'), + 'max': dict(type='int', required=True) + } + ), + 'email': dict(type='dict'), + 'username_prohibited_characters': dict(type='dict', aliases=['usernameProhibitedCharacters']), + 'up_username_not_idn_homograph': dict(type='dict', aliases=['upUsernameNotIdnHomograph']), + 'person_name_prohibited_characters': dict(type='dict', aliases=['personNameProhibitedCharacters']), + 'uri': dict(type='dict'), + 'pattern': dict(type='dict'), + 'options': dict(type='dict') + } + ), + 'annotations': dict(type='dict'), + 'group': dict(type='str'), + 'permissions': dict( + type='dict', + options={ + 'view': dict(type='list', elements='str', default=['admin', 'user']), + 'edit': dict(type='list', elements='str', default=['admin', 'user']) + } + ), + 'multivalued': dict(type='bool', default=False), + 'required': dict( + type='dict', + options={ + 'roles': dict(type='list', elements='str', default=['user']) + } + ) + } + ), + 'groups': dict( + type='list', + elements='dict', + options={ + 'name': dict(type='str', required=True), + 'display_header': dict(type='str', aliases=['displayHeader'], required=True), + 'display_description': dict(type='str', aliases=['displayDescription']), + 'annotations': dict(type='dict') + } + ), + 'unmanaged_attribute_policy': dict( + type='str', + aliases=['unmanagedAttributePolicy'], + choices=['ENABLED', 'ADMIN_EDIT', 'ADMIN_VIEW'], + + ) + } + ) + } + ) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + # Initialize the result object. Only "changed" seems to have special + # meaning for Ansible. + result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + + # This will include the current state of the realm userprofile if it is already + # present. This is only used for diff-mode. + before_realm_userprofile = {} + before_realm_userprofile['config'] = {} + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + params_to_ignore = list(keycloak_argument_spec().keys()) + ["state"] + + # Filter and map the parameters names that apply to the role + component_params = [ + x + for x in module.params + if x not in params_to_ignore and module.params.get(x) is not None + ] + + # Build a proposed changeset from parameters given to this module + changeset = {} + + # Build the changeset with proper JSON serialization for kc_user_profile_config + config = module.params.get('config') + changeset['config'] = {} + + # Generate a JSON payload for Keycloak Admin API from the module + # parameters. Parameters that do not belong to the JSON payload (e.g. + # "state" or "auth_keycloal_url") have been filtered away earlier (see + # above). + # + # This loop converts Ansible module parameters (snake-case) into + # Keycloak-compatible format (camel-case). For example proider_id + # becomes providerId. It also handles some special cases, e.g. aliases. + for component_param in component_params: + # realm/parent_id parameter + if component_param == 'realm' or component_param == 'parent_id': + changeset['parent_id'] = module.params.get(component_param) + changeset.pop(component_param, None) + # complex parameters in config suboptions + elif component_param == 'config': + for config_param in config: + # special parameter kc_user_profile_config + if config_param in ('kcUserProfileConfig', 'kc_user_profile_config'): + config_param_org = config_param + # rename parameter to be accepted by Keycloak API + config_param = 'kc.user.profile.config' + # make sure no null values are passed to Keycloak API + kc_user_profile_config = remove_null_values(config[config_param_org]) + changeset[camel(component_param)][config_param] = [] + if len(kc_user_profile_config) > 0: + # convert aliases to camelCase + kc_user_profile_config = camel_recursive(kc_user_profile_config) + # rename validations to be accepted by Keycloak API + if 'attributes' in kc_user_profile_config[0]: + for attribute in kc_user_profile_config[0]['attributes']: + if 'validations' in attribute: + if 'usernameProhibitedCharacters' in attribute['validations']: + attribute['validations']['username-prohibited-characters'] = ( + attribute['validations'].pop('usernameProhibitedCharacters') + ) + if 'upUsernameNotIdnHomograph' in attribute['validations']: + attribute['validations']['up-username-not-idn-homograph'] = ( + attribute['validations'].pop('upUsernameNotIdnHomograph') + ) + if 'personNameProhibitedCharacters' in attribute['validations']: + attribute['validations']['person-name-prohibited-characters'] = ( + attribute['validations'].pop('personNameProhibitedCharacters') + ) + changeset[camel(component_param)][config_param].append(kc_user_profile_config[0]) + # usual camelCase parameters + else: + changeset[camel(component_param)][camel(config_param)] = [] + raw_value = module.params.get(component_param)[config_param] + if isinstance(raw_value, bool): + value = str(raw_value).lower() + else: + value = raw_value # Directly use the raw value + changeset[camel(component_param)][camel(config_param)].append(value) + # usual parameters + else: + new_param_value = module.params.get(component_param) + changeset[camel(component_param)] = new_param_value + + # Make it easier to refer to current module parameters + state = module.params.get('state') + enabled = module.params.get('enabled') + parent_id = module.params.get('parent_id') + provider_type = module.params.get('provider_type') + provider_id = module.params.get('provider_id') + + # Make a deep copy of the changeset. This is use when determining + # changes to the current state. + changeset_copy = deepcopy(changeset) + + # Get a list of all Keycloak components that are of userprofile provider type. + realm_userprofiles = kc.get_components(urlencode(dict(type=provider_type)), parent_id) + + # If this component is present get its userprofile ID. Confusingly the userprofile ID is + # also known as the Provider ID. + userprofile_id = None + + # Track individual parameter changes + changes = "" + + # This tells Ansible whether the userprofile was changed (added, removed, modified) + result['changed'] = False + + # Loop through the list of components. If we encounter a component whose + # name matches the value of the name parameter then assume the userprofile is + # already present. + for userprofile in realm_userprofiles: + if provider_id == "declarative-user-profile": + userprofile_id = userprofile['id'] + changeset['id'] = userprofile_id + changeset_copy['id'] = userprofile_id + + # keycloak returns kc.user.profile.config as a single JSON formatted string, so we have to deserialize it + if 'config' in userprofile and 'kc.user.profile.config' in userprofile['config']: + userprofile['config']['kc.user.profile.config'][0] = json.loads(userprofile['config']['kc.user.profile.config'][0]) + + # Compare top-level parameters + for param, value in changeset.items(): + before_realm_userprofile[param] = userprofile[param] + + if changeset_copy[param] != userprofile[param] and param != 'config': + changes += "%s: %s -> %s, " % (param, userprofile[param], changeset_copy[param]) + result['changed'] = True + + # Compare parameters under the "config" userprofile + for p, v in changeset_copy['config'].items(): + before_realm_userprofile['config'][p] = userprofile['config'][p] + if changeset_copy['config'][p] != userprofile['config'][p]: + changes += "config.%s: %s -> %s, " % (p, userprofile['config'][p], changeset_copy['config'][p]) + result['changed'] = True + + # Check all the possible states of the resource and do what is needed to + # converge current state with desired state (create, update or delete + # the userprofile). + + # keycloak expects kc.user.profile.config as a single JSON formatted string, so we have to serialize it + if 'config' in changeset and 'kc.user.profile.config' in changeset['config']: + changeset['config']['kc.user.profile.config'][0] = json.dumps(changeset['config']['kc.user.profile.config'][0]) + if userprofile_id and state == 'present': + if result['changed']: + if module._diff: + result['diff'] = dict(before=before_realm_userprofile, after=changeset_copy) + + if module.check_mode: + result['msg'] = "Userprofile %s would be changed: %s" % (provider_id, changes.strip(", ")) + else: + kc.update_component(changeset, parent_id) + result['msg'] = "Userprofile %s changed: %s" % (provider_id, changes.strip(", ")) + else: + result['msg'] = "Userprofile %s was in sync" % (provider_id) + + result['end_state'] = changeset_copy + elif userprofile_id and state == 'absent': + if module._diff: + result['diff'] = dict(before=before_realm_userprofile, after={}) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Userprofile %s would be deleted" % (provider_id) + else: + kc.delete_component(userprofile_id, parent_id) + result['changed'] = True + result['msg'] = "Userprofile %s deleted" % (provider_id) + + result['end_state'] = {} + elif not userprofile_id and state == 'present': + if module._diff: + result['diff'] = dict(before={}, after=changeset_copy) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Userprofile %s would be created" % (provider_id) + else: + kc.create_component(changeset, parent_id) + result['changed'] = True + result['msg'] = "Userprofile %s created" % (provider_id) + + result['end_state'] = changeset_copy + elif not userprofile_id and state == 'absent': + result['changed'] = False + result['msg'] = "Userprofile %s not present" % (provider_id) + result['end_state'] = {} + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keyring.py b/plugins/modules/keyring.py new file mode 100644 index 0000000000..a201d214c2 --- /dev/null +++ b/plugins/modules/keyring.py @@ -0,0 +1,276 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Alexander Hussey +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +""" +Ansible Module - community.general.keyring +""" + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: keyring +version_added: 5.2.0 +author: + - Alexander Hussey (@ahussey-redhat) +short_description: Set or delete a passphrase using the Operating System's native keyring +description: >- + This module uses the L(keyring Python library, https://pypi.org/project/keyring/) to set or delete passphrases for a given + service and username from the OS' native keyring. +requirements: + - keyring (Python library) + - gnome-keyring (application - required for headless Gnome keyring access) + - dbus-run-session (application - required for headless Gnome keyring access) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + service: + description: The name of the service. + required: true + type: str + username: + description: The user belonging to the service. + required: true + type: str + user_password: + description: The password to set. + required: false + type: str + aliases: + - password + keyring_password: + description: Password to unlock keyring. + required: true + type: str + state: + description: Whether the password should exist. + required: false + default: present + type: str + choices: + - present + - absent +""" + +EXAMPLES = r""" +- name: Set a password for test/test1 + community.general.keyring: + service: test + username: test1 + user_password: "{{ user_password }}" + keyring_password: "{{ keyring_password }}" + +- name: Delete the password for test/test1 + community.general.keyring: + service: test + username: test1 + user_password: "{{ user_password }}" + keyring_password: "{{ keyring_password }}" + state: absent +""" + +try: + from shlex import quote +except ImportError: + from pipes import quote +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +try: + import keyring + + HAS_KEYRING = True + KEYRING_IMP_ERR = None +except ImportError: + HAS_KEYRING = False + KEYRING_IMP_ERR = traceback.format_exc() + + +def del_passphrase(module): + """ + Attempt to delete a passphrase in the keyring using the Python API and fallback to using a shell. + """ + if module.check_mode: + return None + try: + keyring.delete_password(module.params["service"], module.params["username"]) + return None + except keyring.errors.KeyringLocked: + delete_argument = ( + 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring del %s %s\n' + % ( + quote(module.params["keyring_password"]), + quote(module.params["service"]), + quote(module.params["username"]), + ) + ) + dummy, dummy, stderr = module.run_command( + "dbus-run-session -- /bin/bash", + use_unsafe_shell=True, + data=delete_argument, + encoding=None, + ) + + if not stderr.decode("UTF-8"): + return None + return stderr.decode("UTF-8") + + +def set_passphrase(module): + """ + Attempt to set passphrase in the keyring using the Python API and fallback to using a shell. + """ + if module.check_mode: + return None + try: + keyring.set_password( + module.params["service"], + module.params["username"], + module.params["user_password"], + ) + return None + except keyring.errors.KeyringLocked: + set_argument = ( + 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring set %s %s\n%s\n' + % ( + quote(module.params["keyring_password"]), + quote(module.params["service"]), + quote(module.params["username"]), + quote(module.params["user_password"]), + ) + ) + dummy, dummy, stderr = module.run_command( + "dbus-run-session -- /bin/bash", + use_unsafe_shell=True, + data=set_argument, + encoding=None, + ) + if not stderr.decode("UTF-8"): + return None + return stderr.decode("UTF-8") + + +def get_passphrase(module): + """ + Attempt to retrieve passphrase from keyring using the Python API and fallback to using a shell. + """ + try: + passphrase = keyring.get_password( + module.params["service"], module.params["username"] + ) + return passphrase + except keyring.errors.KeyringLocked: + pass + except keyring.errors.InitError: + pass + except AttributeError: + pass + get_argument = 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring get %s %s\n' % ( + quote(module.params["keyring_password"]), + quote(module.params["service"]), + quote(module.params["username"]), + ) + dummy, stdout, dummy = module.run_command( + "dbus-run-session -- /bin/bash", + use_unsafe_shell=True, + data=get_argument, + encoding=None, + ) + try: + return stdout.decode("UTF-8").splitlines()[1] # Only return the line containing the password + except IndexError: + return None + + +def run_module(): + """ + Attempts to retrieve a passphrase from a keyring. + """ + result = dict( + changed=False, + msg="", + ) + + module_args = dict( + service=dict(type="str", required=True), + username=dict(type="str", required=True), + keyring_password=dict(type="str", required=True, no_log=True), + user_password=dict( + type="str", no_log=True, aliases=["password"] + ), + state=dict( + type="str", default="present", choices=["absent", "present"] + ), + ) + + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + if not HAS_KEYRING: + module.fail_json(msg=missing_required_lib("keyring"), exception=KEYRING_IMP_ERR) + + passphrase = get_passphrase(module) + if module.params["state"] == "present": + if passphrase is not None: + if passphrase == module.params["user_password"]: + result["msg"] = "Passphrase already set for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if passphrase != module.params["user_password"]: + set_result = set_passphrase(module) + if set_result is None: + result["changed"] = True + result["msg"] = "Passphrase has been updated for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if set_result is not None: + module.fail_json(msg=set_result) + if passphrase is None: + set_result = set_passphrase(module) + if set_result is None: + result["changed"] = True + result["msg"] = "Passphrase has been updated for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if set_result is not None: + module.fail_json(msg=set_result) + + if module.params["state"] == "absent": + if not passphrase: + result["result"] = "Passphrase already absent for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if passphrase: + del_result = del_passphrase(module) + if del_result is None: + result["changed"] = True + result["msg"] = "Passphrase has been removed for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if del_result is not None: + module.fail_json(msg=del_result) + + module.exit_json(**result) + + +def main(): + """ + main module loop + """ + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/keyring_info.py b/plugins/modules/keyring_info.py new file mode 100644 index 0000000000..fb186c8e44 --- /dev/null +++ b/plugins/modules/keyring_info.py @@ -0,0 +1,153 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Alexander Hussey +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +""" +Ansible Module - community.general.keyring_info +""" + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: keyring_info +version_added: 5.2.0 +author: + - Alexander Hussey (@ahussey-redhat) +short_description: Get a passphrase using the Operating System's native keyring +description: >- + This module uses the L(keyring Python library, https://pypi.org/project/keyring/) to retrieve passphrases for a given service + and username from the OS' native keyring. +requirements: + - keyring (Python library) + - gnome-keyring (application - required for headless Linux keyring access) + - dbus-run-session (application - required for headless Linux keyring access) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + service: + description: The name of the service. + required: true + type: str + username: + description: The user belonging to the service. + required: true + type: str + keyring_password: + description: Password to unlock keyring. + required: true + type: str +""" + +EXAMPLES = r""" +- name: Retrieve password for service_name/user_name + community.general.keyring_info: + service: test + username: test1 + keyring_password: "{{ keyring_password }}" + register: test_password + +- name: Display password + ansible.builtin.debug: + msg: "{{ test_password.passphrase }}" +""" + +RETURN = r""" +passphrase: + description: A string containing the password. + returned: success and the password exists + type: str + sample: Password123 +""" + +try: + from shlex import quote +except ImportError: + from pipes import quote +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +try: + import keyring + + HAS_KEYRING = True + KEYRING_IMP_ERR = None +except ImportError: + HAS_KEYRING = False + KEYRING_IMP_ERR = traceback.format_exc() + + +def _alternate_retrieval_method(module): + get_argument = 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring get %s %s\n' % ( + quote(module.params["keyring_password"]), + quote(module.params["service"]), + quote(module.params["username"]), + ) + dummy, stdout, dummy = module.run_command( + "dbus-run-session -- /bin/bash", + use_unsafe_shell=True, + data=get_argument, + encoding=None, + ) + try: + return stdout.decode("UTF-8").splitlines()[1] + except IndexError: + return None + + +def run_module(): + """ + Attempts to retrieve a passphrase from a keyring. + """ + result = dict(changed=False, msg="") + + module_args = dict( + service=dict(type="str", required=True), + username=dict(type="str", required=True), + keyring_password=dict(type="str", required=True, no_log=True), + ) + + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + if not HAS_KEYRING: + module.fail_json(msg=missing_required_lib("keyring"), exception=KEYRING_IMP_ERR) + try: + passphrase = keyring.get_password( + module.params["service"], module.params["username"] + ) + except keyring.errors.KeyringLocked: + pass + except keyring.errors.InitError: + pass + except AttributeError: + pass + + if passphrase is None: + passphrase = _alternate_retrieval_method(module) + + if passphrase is not None: + result["msg"] = "Successfully retrieved password for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + result["passphrase"] = passphrase + if passphrase is None: + result["msg"] = "Password for %s@%s does not exist." % ( + module.params["service"], + module.params["username"], + ) + module.exit_json(**result) + + +def main(): + """ + main module loop + """ + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/kibana_plugin.py b/plugins/modules/kibana_plugin.py deleted file mode 120000 index 8504c8fe58..0000000000 --- a/plugins/modules/kibana_plugin.py +++ /dev/null @@ -1 +0,0 @@ -./database/misc/kibana_plugin.py \ No newline at end of file diff --git a/plugins/modules/kibana_plugin.py b/plugins/modules/kibana_plugin.py new file mode 100644 index 0000000000..b464d363bb --- /dev/null +++ b/plugins/modules/kibana_plugin.py @@ -0,0 +1,275 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Thierno IB. BARRY @barryib +# Sponsored by Polyconseil http://polyconseil.fr. +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: kibana_plugin +short_description: Manage Kibana plugins +description: + - This module can be used to manage Kibana plugins. +author: Thierno IB. BARRY (@barryib) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the plugin to install. + required: true + type: str + state: + description: + - Desired state of a plugin. + choices: ["present", "absent"] + default: present + type: str + url: + description: + - Set exact URL to download the plugin from. + - For local file, prefix its absolute path with C(file://). + type: str + timeout: + description: + - 'Timeout setting: V(30s), V(1m), V(1h) and so on.' + default: 1m + type: str + plugin_bin: + description: + - Location of the Kibana binary. + default: /opt/kibana/bin/kibana + type: path + plugin_dir: + description: + - Your configured plugin directory specified in Kibana. + default: /opt/kibana/installedPlugins/ + type: path + version: + description: + - Version of the plugin to be installed. + - If the plugin is installed with in a previous version, it is B(not) updated unless O(force=true). + type: str + force: + description: + - Delete and re-install the plugin. It can be useful for plugins update. + type: bool + default: false + allow_root: + description: + - Whether to allow C(kibana) and C(kibana-plugin) to be run as root. Passes the C(--allow-root) flag to these commands. + type: bool + default: false + version_added: 2.3.0 +""" + +EXAMPLES = r""" +- name: Install Elasticsearch head plugin + community.general.kibana_plugin: + state: present + name: elasticsearch/marvel + +- name: Install specific version of a plugin + community.general.kibana_plugin: + state: present + name: elasticsearch/marvel + version: '2.3.3' + +- name: Uninstall Elasticsearch head plugin + community.general.kibana_plugin: + state: absent + name: elasticsearch/marvel +""" + +RETURN = r""" +cmd: + description: The launched command during plugin management (install / remove). + returned: success + type: str +name: + description: The plugin name to install or remove. + returned: success + type: str +url: + description: The URL from where the plugin is installed from. + returned: success + type: str +timeout: + description: The timeout for plugin download. + returned: success + type: str +state: + description: The state for the managed plugin. + returned: success + type: str +""" + +import os +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +PACKAGE_STATE_MAP = dict( + present="--install", + absent="--remove" +) + + +def parse_plugin_repo(string): + elements = string.split("/") + + # We first consider the simplest form: pluginname + repo = elements[0] + + # We consider the form: username/pluginname + if len(elements) > 1: + repo = elements[1] + + # remove elasticsearch- prefix + # remove es- prefix + for string in ("elasticsearch-", "es-"): + if repo.startswith(string): + return repo[len(string):] + + return repo + + +def is_plugin_present(plugin_dir, working_dir): + return os.path.isdir(os.path.join(working_dir, plugin_dir)) + + +def parse_error(string): + reason = "reason: " + try: + return string[string.index(reason) + len(reason):].strip() + except ValueError: + return string + + +def install_plugin(module, plugin_bin, plugin_name, url, timeout, allow_root, kibana_version='4.6'): + if LooseVersion(kibana_version) > LooseVersion('4.6'): + kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') + cmd_args = [kibana_plugin_bin, "install"] + if url: + cmd_args.append(url) + else: + cmd_args.append(plugin_name) + else: + cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name] + + if url: + cmd_args.extend(["--url", url]) + + if timeout: + cmd_args.extend(["--timeout", timeout]) + + if allow_root: + cmd_args.append('--allow-root') + + if module.check_mode: + return True, " ".join(cmd_args), "check mode", "" + + rc, out, err = module.run_command(cmd_args) + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, " ".join(cmd_args), out, err + + +def remove_plugin(module, plugin_bin, plugin_name, allow_root, kibana_version='4.6'): + if LooseVersion(kibana_version) > LooseVersion('4.6'): + kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') + cmd_args = [kibana_plugin_bin, "remove", plugin_name] + else: + cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name] + + if allow_root: + cmd_args.append('--allow-root') + + if module.check_mode: + return True, " ".join(cmd_args), "check mode", "" + + rc, out, err = module.run_command(cmd_args) + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, " ".join(cmd_args), out, err + + +def get_kibana_version(module, plugin_bin, allow_root): + cmd_args = [plugin_bin, '--version'] + + if allow_root: + cmd_args.append('--allow-root') + + rc, out, err = module.run_command(cmd_args) + if rc != 0: + module.fail_json(msg="Failed to get Kibana version : %s" % err) + + return out.strip() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), + url=dict(), + timeout=dict(default="1m"), + plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"), + plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"), + version=dict(), + force=dict(default=False, type="bool"), + allow_root=dict(default=False, type="bool"), + ), + supports_check_mode=True, + ) + + name = module.params["name"] + state = module.params["state"] + url = module.params["url"] + timeout = module.params["timeout"] + plugin_bin = module.params["plugin_bin"] + plugin_dir = module.params["plugin_dir"] + version = module.params["version"] + force = module.params["force"] + allow_root = module.params["allow_root"] + + changed, cmd, out, err = False, '', '', '' + + kibana_version = get_kibana_version(module, plugin_bin, allow_root) + + present = is_plugin_present(parse_plugin_repo(name), plugin_dir) + + # skip if the state is correct + if (present and state == "present" and not force) or (state == "absent" and not present and not force): + module.exit_json(changed=False, name=name, state=state) + + if version: + name = name + '/' + version + + if state == "present": + if force: + remove_plugin(module, plugin_bin, name, allow_root, kibana_version) + changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, allow_root, kibana_version) + + elif state == "absent": + changed, cmd, out, err = remove_plugin(module, plugin_bin, name, allow_root, kibana_version) + + module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/krb_ticket.py b/plugins/modules/krb_ticket.py new file mode 100644 index 0000000000..995319e715 --- /dev/null +++ b/plugins/modules/krb_ticket.py @@ -0,0 +1,381 @@ +#!/usr/bin/python +# Copyright (c) 2024 Alexander Bakanovskii +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: krb_ticket +short_description: Kerberos utils for managing tickets +version_added: 10.0.0 +description: + - Manage Kerberos tickets with C(kinit), C(klist) and C(kdestroy) base utilities. + - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/user/user_commands/index.html) for reference. +author: "Alexander Bakanovskii (@abakanovskii)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + password: + description: + - Principal password. + - It is required to specify O(password) or O(keytab_path). + type: str + principal: + description: + - The principal name. + - If not set, the user running this module is used. + type: str + state: + description: + - The state of the Kerberos ticket. + - V(present) is equivalent of C(kinit) command. + - V(absent) is equivalent of C(kdestroy) command. + type: str + default: present + choices: ["present", "absent"] + kdestroy_all: + description: + - When O(state=absent) destroys all credential caches in collection. + - Equivalent of running C(kdestroy -A). + type: bool + cache_name: + description: + - Use O(cache_name) as the ticket cache name and location. + - If this option is not used, the default cache name and location are used. + - The default credentials cache may vary between systems. + - If not set the value of E(KRB5CCNAME) environment variable is used instead, its value is used to name the default + ticket cache. + type: str + lifetime: + description: + - Requests a ticket with the lifetime, if the O(lifetime) is not specified, the default ticket lifetime is used. + - Specifying a ticket lifetime longer than the maximum ticket lifetime (configured by each site) does not override the + configured maximum ticket lifetime. + - 'The value for O(lifetime) must be followed by one of the following suffixes: V(s) - seconds, V(m) - minutes, V(h) + - hours, V(d) - days.' + - You cannot mix units; a value of V(3h30m) results in an error. + - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference. + type: str + start_time: + description: + - Requests a postdated ticket. + - Postdated tickets are issued with the invalid flag set, and need to be resubmitted to the KDC for validation before + use. + - O(start_time) specifies the duration of the delay before the ticket can become valid. + - You can use absolute time formats, for example V(July 27, 2012 at 20:30) you would neet to set O(start_time=20120727203000). + - You can also use time duration format similar to O(lifetime) or O(renewable). + - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference. + type: str + renewable: + description: + - Requests renewable tickets, with a total lifetime equal to O(renewable). + - 'The value for O(renewable) must be followed by one of the following delimiters: V(s) - seconds, V(m) - minutes, V(h) + - hours, V(d) - days.' + - You cannot mix units; a value of V(3h30m) results in an error. + - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference. + type: str + forwardable: + description: + - Request forwardable or non-forwardable tickets. + type: bool + proxiable: + description: + - Request proxiable or non-proxiable tickets. + type: bool + address_restricted: + description: + - Request tickets restricted to the host's local address or non-restricted. + type: bool + anonymous: + description: + - Requests anonymous processing. + type: bool + canonicalization: + description: + - Requests canonicalization of the principal name, and allows the KDC to reply with a different client principal from + the one requested. + type: bool + enterprise: + description: + - Treats the principal name as an enterprise name (implies the O(canonicalization) option). + type: bool + renewal: + description: + - Requests renewal of the ticket-granting ticket. + - Note that an expired ticket cannot be renewed, even if the ticket is still within its renewable life. + type: bool + validate: + description: + - Requests that the ticket-granting ticket in the cache (with the invalid flag set) be passed to the KDC for validation. + - If the ticket is within its requested time range, the cache is replaced with the validated ticket. + type: bool + keytab: + description: + - Requests a ticket, obtained from a key in the local host's keytab. + - If O(keytab_path) is not specified it tries to use default client keytab path (C(-i) option). + type: bool + keytab_path: + description: + - Use when O(keytab=true) to specify path to a keytab file. + - It is required to specify O(password) or O(keytab_path). + type: path +requirements: + - krb5-user and krb5-config packages +extends_documentation_fragment: + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Get Kerberos ticket using default principal + community.general.krb_ticket: + password: some_password + +- name: Get Kerberos ticket using keytab + community.general.krb_ticket: + keytab: true + keytab_path: /etc/ipa/file.keytab + +- name: Get Kerberos ticket with a lifetime of 7 days + community.general.krb_ticket: + password: some_password + lifetime: 7d + +- name: Get Kerberos ticket with a starting time of July 2, 2024, 1:35:30 p.m. + community.general.krb_ticket: + password: some_password + start_time: "240702133530" + +- name: Get Kerberos ticket using principal name + community.general.krb_ticket: + password: some_password + principal: admin + +- name: Get Kerberos ticket using principal with realm + community.general.krb_ticket: + password: some_password + principal: admin@IPA.TEST + +- name: Check for existence by ticket cache + community.general.krb_ticket: + cache_name: KEYRING:persistent:0:0 + +- name: Make sure default ticket is destroyed + community.general.krb_ticket: + state: absent + +- name: Make sure specific ticket destroyed by principal + community.general.krb_ticket: + state: absent + principal: admin@IPA.TEST + +- name: Make sure specific ticket destroyed by cache_name + community.general.krb_ticket: + state: absent + cache_name: KEYRING:persistent:0:0 + +- name: Make sure all tickets are destroyed + community.general.krb_ticket: + state: absent + kdestroy_all: true +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +class IPAKeytab(object): + def __init__(self, module, **kwargs): + self.module = module + self.password = kwargs['password'] + self.principal = kwargs['principal'] + self.state = kwargs['state'] + self.kdestroy_all = kwargs['kdestroy_all'] + self.cache_name = kwargs['cache_name'] + self.start_time = kwargs['start_time'] + self.renewable = kwargs['renewable'] + self.forwardable = kwargs['forwardable'] + self.proxiable = kwargs['proxiable'] + self.address_restricted = kwargs['address_restricted'] + self.canonicalization = kwargs['canonicalization'] + self.enterprise = kwargs['enterprise'] + self.renewal = kwargs['renewal'] + self.validate = kwargs['validate'] + self.keytab = kwargs['keytab'] + self.keytab_path = kwargs['keytab_path'] + + self.kinit = CmdRunner( + module, + command='kinit', + arg_formats=dict( + lifetime=cmd_runner_fmt.as_opt_val('-l'), + start_time=cmd_runner_fmt.as_opt_val('-s'), + renewable=cmd_runner_fmt.as_opt_val('-r'), + forwardable=cmd_runner_fmt.as_bool('-f', '-F', ignore_none=True), + proxiable=cmd_runner_fmt.as_bool('-p', '-P', ignore_none=True), + address_restricted=cmd_runner_fmt.as_bool('-a', '-A', ignore_none=True), + anonymous=cmd_runner_fmt.as_bool('-n'), + canonicalization=cmd_runner_fmt.as_bool('-C'), + enterprise=cmd_runner_fmt.as_bool('-E'), + renewal=cmd_runner_fmt.as_bool('-R'), + validate=cmd_runner_fmt.as_bool('-v'), + keytab=cmd_runner_fmt.as_bool('-k'), + keytab_path=cmd_runner_fmt.as_func(lambda v: ['-t', v] if v else ['-i']), + cache_name=cmd_runner_fmt.as_opt_val('-c'), + principal=cmd_runner_fmt.as_list(), + ) + ) + + self.kdestroy = CmdRunner( + module, + command='kdestroy', + arg_formats=dict( + kdestroy_all=cmd_runner_fmt.as_bool('-A'), + cache_name=cmd_runner_fmt.as_opt_val('-c'), + principal=cmd_runner_fmt.as_opt_val('-p'), + ) + ) + + self.klist = CmdRunner( + module, + command='klist', + arg_formats=dict( + show_list=cmd_runner_fmt.as_bool('-l'), + ) + ) + + def exec_kinit(self): + params = dict(self.module.params) + with self.kinit( + "lifetime start_time renewable forwardable proxiable address_restricted anonymous " + "canonicalization enterprise renewal validate keytab keytab_path cache_name principal", + check_rc=True, + data=self.password, + ) as ctx: + rc, out, err = ctx.run(**params) + return out + + def exec_kdestroy(self): + params = dict(self.module.params) + with self.kdestroy( + "kdestroy_all cache_name principal", + check_rc=True + ) as ctx: + rc, out, err = ctx.run(**params) + return out + + def exec_klist(self, show_list): + # Use chech_rc = False because + # If no tickets present, klist command will always return rc = 1 + params = dict(show_list=show_list) + with self.klist( + "show_list", + check_rc=False + ) as ctx: + rc, out, err = ctx.run(**params) + return rc, out, err + + def check_ticket_present(self): + ticket_present = True + show_list = False + + if not self.principal and not self.cache_name: + rc, out, err = self.exec_klist(show_list) + if rc != 0: + ticket_present = False + else: + show_list = True + rc, out, err = self.exec_klist(show_list) + if self.principal and self.principal not in str(out): + ticket_present = False + if self.cache_name and self.cache_name not in str(out): + ticket_present = False + + return ticket_present + + +def main(): + arg_spec = dict( + principal=dict(type='str'), + password=dict(type='str', no_log=True), + state=dict(default='present', choices=['present', 'absent']), + kdestroy_all=dict(type='bool'), + cache_name=dict(type='str', fallback=(env_fallback, ['KRB5CCNAME'])), + lifetime=dict(type='str'), + start_time=dict(type='str'), + renewable=dict(type='str'), + forwardable=dict(type='bool'), + proxiable=dict(type='bool'), + address_restricted=dict(type='bool'), + anonymous=dict(type='bool'), + canonicalization=dict(type='bool'), + enterprise=dict(type='bool'), + renewal=dict(type='bool'), + validate=dict(type='bool'), + keytab=dict(type='bool'), + keytab_path=dict(type='path'), + ) + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True, + required_by={ + 'keytab_path': 'keytab' + }, + required_if=[ + ('state', 'present', ('password', 'keytab_path'), True), + ], + ) + + state = module.params['state'] + kdestroy_all = module.params['kdestroy_all'] + + keytab = IPAKeytab(module, + state=state, + kdestroy_all=kdestroy_all, + principal=module.params['principal'], + password=module.params['password'], + cache_name=module.params['cache_name'], + lifetime=module.params['lifetime'], + start_time=module.params['start_time'], + renewable=module.params['renewable'], + forwardable=module.params['forwardable'], + proxiable=module.params['proxiable'], + address_restricted=module.params['address_restricted'], + anonymous=module.params['anonymous'], + canonicalization=module.params['canonicalization'], + enterprise=module.params['enterprise'], + renewal=module.params['renewal'], + validate=module.params['validate'], + keytab=module.params['keytab'], + keytab_path=module.params['keytab_path'], + ) + + if module.params['keytab_path'] is not None and module.params['keytab'] is not True: + module.fail_json(msg="If keytab_path is specified then keytab parameter must be True") + + changed = False + if state == 'present': + if not keytab.check_ticket_present(): + changed = True + if not module.check_mode: + keytab.exec_kinit() + + if state == 'absent': + if kdestroy_all: + changed = True + if not module.check_mode: + keytab.exec_kdestroy() + elif keytab.check_ticket_present(): + changed = True + if not module.check_mode: + keytab.exec_kdestroy() + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/launchd.py b/plugins/modules/launchd.py deleted file mode 120000 index 38013fb253..0000000000 --- a/plugins/modules/launchd.py +++ /dev/null @@ -1 +0,0 @@ -./system/launchd.py \ No newline at end of file diff --git a/plugins/modules/launchd.py b/plugins/modules/launchd.py new file mode 100644 index 0000000000..c7e98f2bc0 --- /dev/null +++ b/plugins/modules/launchd.py @@ -0,0 +1,523 @@ +#!/usr/bin/python + +# Copyright (c) 2018, Martin Migasiewicz +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: launchd +author: + - Martin Migasiewicz (@martinm82) +short_description: Manage macOS services +version_added: 1.0.0 +description: + - Manage launchd services on target macOS hosts. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the service. + type: str + required: true + plist: + description: + - Name of the V(.plist) file for the service. + - Defaults to V({name}.plist). + type: str + version_added: 10.1.0 + state: + description: + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - C(launchd) does not support V(restarted) nor V(reloaded) natively. These states trigger a stop/start (restarted) or + an unload/load (reloaded). + - V(restarted) unloads and loads the service before start to ensure that the latest job definition (plist) is used. + - V(reloaded) unloads and loads the service to ensure that the latest job definition (plist) is used. Whether a service + is started or stopped depends on the content of the definition file. + type: str + choices: [reloaded, restarted, started, stopped, unloaded] + enabled: + description: + - Whether the service should start on boot. + - B(At least one of state and enabled are required). + type: bool + force_stop: + description: + - Whether the service should not be restarted automatically by launchd. + - Services might have the C(KeepAlive) attribute set to V(true) in a launchd configuration. In case this is set to V(true), + stopping a service causes that C(launchd) starts the service again. + - Set this option to V(true) to let this module change the C(KeepAlive) attribute to V(false). + type: bool + default: false +notes: + - A user must privileged to manage services using this module. +requirements: + - A system managed by launchd + - The plistlib Python library +""" + +EXAMPLES = r""" +- name: Make sure spotify webhelper is started + community.general.launchd: + name: com.spotify.webhelper + state: started + +- name: Deploy custom memcached job definition + template: + src: org.memcached.plist.j2 + dest: /Library/LaunchDaemons/org.memcached.plist + +- name: Run memcached + community.general.launchd: + name: org.memcached + state: started + +- name: Stop memcached + community.general.launchd: + name: org.memcached + state: stopped + +- name: Stop memcached + community.general.launchd: + name: org.memcached + state: stopped + force_stop: true + +- name: Restart memcached + community.general.launchd: + name: org.memcached + state: restarted + +- name: Unload memcached + community.general.launchd: + name: org.memcached + state: unloaded + +- name: restart sshd + community.general.launchd: + name: com.openssh.sshd + plist: ssh.plist + state: restarted +""" + +RETURN = r""" +status: + description: Metadata about service status. + returned: always + type: dict + sample: + { + "current_pid": "-", + "current_state": "stopped", + "previous_pid": "82636", + "previous_state": "running" + } +""" + +import os +import plistlib +from abc import ABCMeta, abstractmethod +from time import sleep + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +class ServiceState: + UNKNOWN = 0 + LOADED = 1 + STOPPED = 2 + STARTED = 3 + UNLOADED = 4 + + @staticmethod + def to_string(state): + strings = { + ServiceState.UNKNOWN: 'unknown', + ServiceState.LOADED: 'loaded', + ServiceState.STOPPED: 'stopped', + ServiceState.STARTED: 'started', + ServiceState.UNLOADED: 'unloaded' + } + return strings[state] + + +class Plist: + def __init__(self, module, service, filename=None): + self.__changed = False + self.__service = service + if filename is not None: + self.__filename = filename + else: + self.__filename = '%s.plist' % service + + state, pid, dummy, dummy = LaunchCtlList(module, self.__service).run() + + self.__file = self.__find_service_plist(self.__filename) + if self.__file is None: + msg = 'Unable to find the plist file %s for service %s' % ( + self.__filename, self.__service, + ) + if pid is None and state == ServiceState.UNLOADED: + msg += ' and it was not found among active services' + module.fail_json(msg=msg) + self.__update(module) + + @staticmethod + def __find_service_plist(filename): + """Finds the plist file associated with a service""" + + launchd_paths = [ + os.path.join(os.getenv('HOME'), 'Library/LaunchAgents'), + '/Library/LaunchAgents', + '/Library/LaunchDaemons', + '/System/Library/LaunchAgents', + '/System/Library/LaunchDaemons' + ] + + for path in launchd_paths: + try: + files = os.listdir(path) + except OSError: + continue + + if filename in files: + return os.path.join(path, filename) + return None + + def __update(self, module): + self.__handle_param_enabled(module) + self.__handle_param_force_stop(module) + + def __read_plist_file(self, module): + service_plist = {} + try: + with open(self.__file, 'rb') as plist_fp: + service_plist = plistlib.load(plist_fp) + except Exception as e: + module.fail_json(msg="Failed to read plist file " + "%s due to %s" % (self.__file, to_native(e))) + return service_plist + + def __write_plist_file(self, module, service_plist=None): + if not service_plist: + service_plist = {} + + try: + with open(self.__file, 'wb') as plist_fp: + plistlib.dump(service_plist, plist_fp) + except Exception as e: + module.fail_json(msg="Failed to write to plist file " + " %s due to %s" % (self.__file, to_native(e))) + + def __handle_param_enabled(self, module): + if module.params['enabled'] is not None: + service_plist = self.__read_plist_file(module) + + # Enable/disable service startup at boot if requested + # Launchctl does not expose functionality to set the RunAtLoad + # attribute of a job definition. So we parse and modify the job + # definition plist file directly for this purpose. + if module.params['enabled'] is not None: + enabled = service_plist.get('RunAtLoad', False) + if module.params['enabled'] != enabled: + service_plist['RunAtLoad'] = module.params['enabled'] + + # Update the plist with one of the changes done. + if not module.check_mode: + self.__write_plist_file(module, service_plist) + self.__changed = True + + def __handle_param_force_stop(self, module): + if module.params['force_stop'] is not None: + service_plist = self.__read_plist_file(module) + + # Set KeepAlive to false in case force_stop is defined to avoid + # that the service gets restarted when stopping was requested. + if module.params['force_stop'] is not None: + keep_alive = service_plist.get('KeepAlive', False) + if module.params['force_stop'] and keep_alive: + service_plist['KeepAlive'] = not module.params['force_stop'] + + # Update the plist with one of the changes done. + if not module.check_mode: + self.__write_plist_file(module, service_plist) + self.__changed = True + + def is_changed(self): + return self.__changed + + def get_file(self): + return self.__file + + +class LaunchCtlTask(metaclass=ABCMeta): + WAITING_TIME = 5 # seconds + + def __init__(self, module, service, plist): + self._module = module + self._service = service + self._plist = plist + self._launch = self._module.get_bin_path('launchctl', True) + + def run(self): + """Runs a launchd command like 'load', 'unload', 'start', 'stop', etc. + and returns the new state and pid. + """ + self.runCommand() + return self.get_state() + + @abstractmethod + def runCommand(self): + pass + + def get_state(self): + rc, out, err = self._launchctl("list") + if rc != 0: + self._module.fail_json( + msg='Failed to get status of %s' % (self._launch)) + + state = ServiceState.UNLOADED + service_pid = "-" + status_code = None + for line in out.splitlines(): + if line.strip(): + pid, last_exit_code, label = line.split('\t') + if label.strip() == self._service: + service_pid = pid + status_code = last_exit_code + + # From launchctl man page: + # If the number [...] is negative, it represents the + # negative of the signal which killed the job. Thus, + # "-15" would indicate that the job was terminated with + # SIGTERM. + if last_exit_code not in ['0', '-2', '-3', '-9', '-15']: + # Something strange happened and we have no clue in + # which state the service is now. Therefore we mark + # the service state as UNKNOWN. + state = ServiceState.UNKNOWN + elif pid != '-': + # PID seems to be an integer so we assume the service + # is started. + state = ServiceState.STARTED + else: + # Exit code is 0 and PID is not available so we assume + # the service is stopped. + state = ServiceState.STOPPED + break + return (state, service_pid, status_code, err) + + def start(self): + rc, out, err = self._launchctl("start") + # Unfortunately launchd does not wait until the process really started. + sleep(self.WAITING_TIME) + return (rc, out, err) + + def stop(self): + rc, out, err = self._launchctl("stop") + # Unfortunately launchd does not wait until the process really stopped. + sleep(self.WAITING_TIME) + return (rc, out, err) + + def restart(self): + # TODO: check for rc, out, err + self.stop() + return self.start() + + def reload(self): + # TODO: check for rc, out, err + self.unload() + return self.load() + + def load(self): + return self._launchctl("load") + + def unload(self): + return self._launchctl("unload") + + def _launchctl(self, command): + service_or_plist = self._plist.get_file() if command in [ + 'load', 'unload'] else self._service if command in ['start', 'stop'] else "" + + rc, out, err = self._module.run_command( + '%s %s %s' % (self._launch, command, service_or_plist)) + + if rc != 0: + msg = "Unable to %s '%s' (%s): '%s'" % ( + command, self._service, self._plist.get_file(), err) + self._module.fail_json(msg=msg) + + return (rc, out, err) + + +class LaunchCtlStart(LaunchCtlTask): + def __init__(self, module, service, plist): + super(LaunchCtlStart, self).__init__(module, service, plist) + + def runCommand(self): + state, dummy, dummy, dummy = self.get_state() + + if state in (ServiceState.STOPPED, ServiceState.LOADED): + self.reload() + self.start() + elif state == ServiceState.STARTED: + # In case the service is already in started state but the + # job definition was changed we need to unload/load the + # service and start the service again. + if self._plist.is_changed(): + self.reload() + self.start() + elif state == ServiceState.UNLOADED: + self.load() + self.start() + elif state == ServiceState.UNKNOWN: + # We are in an unknown state, let's try to reload the config + # and start the service again. + self.reload() + self.start() + + +class LaunchCtlStop(LaunchCtlTask): + def __init__(self, module, service, plist): + super(LaunchCtlStop, self).__init__(module, service, plist) + + def runCommand(self): + state, dummy, dummy, dummy = self.get_state() + + if state == ServiceState.STOPPED: + # In case the service is stopped and we might later decide + # to start it, we need to reload the job definition by + # forcing an unload and load first. + # Afterwards we need to stop it as it might have been + # started again (KeepAlive or RunAtLoad). + if self._plist.is_changed(): + self.reload() + self.stop() + elif state in (ServiceState.STARTED, ServiceState.LOADED): + if self._plist.is_changed(): + self.reload() + self.stop() + elif state == ServiceState.UNKNOWN: + # We are in an unknown state, let's try to reload the config + # and stop the service gracefully. + self.reload() + self.stop() + + +class LaunchCtlReload(LaunchCtlTask): + def __init__(self, module, service, plist): + super(LaunchCtlReload, self).__init__(module, service, plist) + + def runCommand(self): + state, dummy, dummy, dummy = self.get_state() + + if state == ServiceState.UNLOADED: + # launchd throws an error if we do an unload on an already + # unloaded service. + self.load() + else: + self.reload() + + +class LaunchCtlUnload(LaunchCtlTask): + def __init__(self, module, service, plist): + super(LaunchCtlUnload, self).__init__(module, service, plist) + + def runCommand(self): + state, dummy, dummy, dummy = self.get_state() + self.unload() + + +class LaunchCtlRestart(LaunchCtlReload): + def __init__(self, module, service, plist): + super(LaunchCtlRestart, self).__init__(module, service, plist) + + def runCommand(self): + super(LaunchCtlRestart, self).runCommand() + self.start() + + +class LaunchCtlList(LaunchCtlTask): + def __init__(self, module, service): + super(LaunchCtlList, self).__init__(module, service, None) + + def runCommand(self): + # Do nothing, the list functionality is done by the + # base class run method. + pass + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + plist=dict(type='str'), + state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped', 'unloaded']), + enabled=dict(type='bool'), + force_stop=dict(type='bool', default=False), + ), + supports_check_mode=True, + required_one_of=[ + ['state', 'enabled'], + ], + ) + + service = module.params['name'] + plist_filename = module.params['plist'] + action = module.params['state'] + rc = 0 + out = err = '' + result = { + 'name': service, + 'changed': False, + 'status': {}, + } + + # We will tailor the plist file in case one of the options + # (enabled, force_stop) was specified. + plist = Plist(module, service, plist_filename) + result['changed'] = plist.is_changed() + + # Gather information about the service to be controlled. + state, pid, dummy, dummy = LaunchCtlList(module, service).run() + result['status']['previous_state'] = ServiceState.to_string(state) + result['status']['previous_pid'] = pid + + # Map the actions to specific tasks + tasks = { + 'started': LaunchCtlStart(module, service, plist), + 'stopped': LaunchCtlStop(module, service, plist), + 'restarted': LaunchCtlRestart(module, service, plist), + 'reloaded': LaunchCtlReload(module, service, plist), + 'unloaded': LaunchCtlUnload(module, service, plist) + } + + status_code = '0' + # Run the requested task + if not module.check_mode: + state, pid, status_code, err = tasks[action].run() + + result['status']['current_state'] = ServiceState.to_string(state) + result['status']['current_pid'] = pid + result['status']['status_code'] = status_code + result['status']['error'] = err + + if (result['status']['current_state'] != result['status']['previous_state'] or + result['status']['current_pid'] != result['status']['previous_pid']): + result['changed'] = True + if module.check_mode: + if result['status']['current_state'] != action: + result['changed'] = True + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/layman.py b/plugins/modules/layman.py deleted file mode 120000 index 88e026be76..0000000000 --- a/plugins/modules/layman.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/layman.py \ No newline at end of file diff --git a/plugins/modules/layman.py b/plugins/modules/layman.py new file mode 100644 index 0000000000..af2191654f --- /dev/null +++ b/plugins/modules/layman.py @@ -0,0 +1,269 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Jakub Jirutka +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: layman +author: "Jakub Jirutka (@jirutka)" +short_description: Manage Gentoo overlays +description: + - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. Please note that Layman + must be installed on a managed node prior using this module. +requirements: + - layman python module +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The overlay ID to install, synchronize, or uninstall. Use V(ALL) to sync all of the installed overlays (can be used + only when O(state=updated)). + required: true + type: str + list_url: + description: + - An URL of the alternative overlays list that defines the overlay to install. This list is fetched and saved under + C(${overlay_defs}/${name}.xml), where C(overlay_defs) is read from the Layman's configuration. + aliases: [url] + type: str + state: + description: + - Whether to install (V(present)), sync (V(updated)), or uninstall (V(absent)) the overlay. + default: present + choices: [present, absent, updated] + type: str + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be set to V(false) when no other option exists. + type: bool + default: true +""" + +EXAMPLES = r""" +- name: Install the overlay mozilla which is on the central overlays list + community.general.layman: + name: mozilla + +- name: Install the overlay cvut from the specified alternative list + community.general.layman: + name: cvut + list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml' + +- name: Update (sync) the overlay cvut or install if not installed yet + community.general.layman: + name: cvut + list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml' + state: updated + +- name: Update (sync) all of the installed overlays + community.general.layman: + name: ALL + state: updated + +- name: Uninstall the overlay cvut + community.general.layman: + name: cvut + state: absent +""" + +import shutil +import traceback + +from os import path + +LAYMAN_IMP_ERR = None +try: + from layman.api import LaymanAPI + from layman.config import BareConfig + HAS_LAYMAN_API = True +except ImportError: + LAYMAN_IMP_ERR = traceback.format_exc() + HAS_LAYMAN_API = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.urls import fetch_url + + +USERAGENT = 'ansible-httpget' + + +class ModuleError(Exception): + pass + + +def init_layman(config=None): + '''Returns the initialized ``LaymanAPI``. + + :param config: the layman's configuration to use (optional) + ''' + if config is None: + config = BareConfig(read_configfile=True, quietness=1) + return LaymanAPI(config) + + +def download_url(module, url, dest): + ''' + :param url: the URL to download + :param dest: the absolute path of where to save the downloaded content to; + it must be writable and not a directory + + :raises ModuleError + ''' + + # Hack to add params in the form that fetch_url expects + module.params['http_agent'] = USERAGENT + response, info = fetch_url(module, url) + if info['status'] != 200: + raise ModuleError("Failed to get %s: %s" % (url, info['msg'])) + + try: + with open(dest, 'w') as f: + shutil.copyfileobj(response, f) + except IOError as e: + raise ModuleError("Failed to write: %s" % str(e)) + + +def install_overlay(module, name, list_url=None): + '''Installs the overlay repository. If not on the central overlays list, + then :list_url of an alternative list must be provided. The list will be + fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the + ``overlay_defs`` is read from the Layman's configuration). + + :param name: the overlay id + :param list_url: the URL of the remote repositories list to look for the overlay + definition (optional, default: None) + + :returns: True if the overlay was installed, or False if already exists + (i.e. nothing has changed) + :raises ModuleError + ''' + # read Layman configuration + layman_conf = BareConfig(read_configfile=True) + layman = init_layman(layman_conf) + + if layman.is_installed(name): + return False + + if module.check_mode: + mymsg = 'Would add layman repo \'' + name + '\'' + module.exit_json(changed=True, msg=mymsg) + + if not layman.is_repo(name): + if not list_url: + raise ModuleError("Overlay '%s' is not on the list of known " + "overlays and URL of the remote list was not provided." % name) + + overlay_defs = layman_conf.get_option('overlay_defs') + dest = path.join(overlay_defs, name + '.xml') + + download_url(module, list_url, dest) + + # reload config + layman = init_layman() + + if not layman.add_repos(name): + raise ModuleError(layman.get_errors()) + + return True + + +def uninstall_overlay(module, name): + '''Uninstalls the given overlay repository from the system. + + :param name: the overlay id to uninstall + + :returns: True if the overlay was uninstalled, or False if doesn't exist + (i.e. nothing has changed) + :raises ModuleError + ''' + layman = init_layman() + + if not layman.is_installed(name): + return False + + if module.check_mode: + mymsg = 'Would remove layman repo \'' + name + '\'' + module.exit_json(changed=True, msg=mymsg) + + layman.delete_repos(name) + if layman.get_errors(): + raise ModuleError(layman.get_errors()) + + return True + + +def sync_overlay(name): + '''Synchronizes the specified overlay repository. + + :param name: the overlay repository id to sync + :raises ModuleError + ''' + layman = init_layman() + + if not layman.sync(name): + messages = [str(item[1]) for item in layman.sync_results[2]] + raise ModuleError(messages) + + +def sync_overlays(): + '''Synchronize all of the installed overlays. + + :raises ModuleError + ''' + layman = init_layman() + + for name in layman.get_installed(): + sync_overlay(name) + + +def main(): + # define module + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + list_url=dict(aliases=['url']), + state=dict(default="present", choices=['present', 'absent', 'updated']), + validate_certs=dict(default=True, type='bool'), + ), + supports_check_mode=True + ) + + if not HAS_LAYMAN_API: + module.fail_json(msg=missing_required_lib('Layman'), exception=LAYMAN_IMP_ERR) + + state, name, url = (module.params[key] for key in ['state', 'name', 'list_url']) + + changed = False + try: + if state == 'present': + changed = install_overlay(module, name, url) + + elif state == 'updated': + if name == 'ALL': + sync_overlays() + elif install_overlay(module, name, url): + changed = True + else: + sync_overlay(name) + else: + changed = uninstall_overlay(module, name) + + except ModuleError as e: + module.fail_json(msg=e.message) + else: + module.exit_json(changed=changed, name=name) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lbu.py b/plugins/modules/lbu.py deleted file mode 120000 index 8f55930dde..0000000000 --- a/plugins/modules/lbu.py +++ /dev/null @@ -1 +0,0 @@ -./system/lbu.py \ No newline at end of file diff --git a/plugins/modules/lbu.py b/plugins/modules/lbu.py new file mode 100644 index 0000000000..7957d0392a --- /dev/null +++ b/plugins/modules/lbu.py @@ -0,0 +1,134 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Kaarle Ritvanen +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: lbu + +short_description: Local Backup Utility for Alpine Linux + +version_added: '0.2.0' + +description: + - Manage Local Backup Utility of Alpine Linux in run-from-RAM mode. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + commit: + description: + - Control whether to commit changed files. + type: bool + exclude: + description: + - List of paths to exclude. + type: list + elements: str + include: + description: + - List of paths to include. + type: list + elements: str + +author: + - Kaarle Ritvanen (@kunkku) +""" + +EXAMPLES = r""" +# Commit changed files (if any) +- name: Commit + community.general.lbu: + commit: true + +# Exclude path and commit +- name: Exclude directory + community.general.lbu: + commit: true + exclude: + - /etc/opt + +# Include paths without committing +- name: Include file and directory + community.general.lbu: + include: + - /root/.ssh/authorized_keys + - /var/lib/misc +""" + +RETURN = r""" +msg: + description: Error message. + type: str + returned: on failure +""" + +from ansible.module_utils.basic import AnsibleModule + +import os.path + + +def run_module(): + module = AnsibleModule( + argument_spec={ + 'commit': {'type': 'bool'}, + 'exclude': {'type': 'list', 'elements': 'str'}, + 'include': {'type': 'list', 'elements': 'str'} + }, + supports_check_mode=True + ) + + changed = False + + def run_lbu(*args): + code, stdout, stderr = module.run_command( + [module.get_bin_path('lbu', required=True)] + list(args) + ) + if code: + module.fail_json(changed=changed, msg=stderr) + return stdout + + update = False + commit = False + + for param in ('include', 'exclude'): + if module.params[param]: + paths = run_lbu(param, '-l').split('\n') + for path in module.params[param]: + if os.path.normpath('/' + path)[1:] not in paths: + update = True + + if module.params['commit']: + commit = update or run_lbu('status') > '' + + if module.check_mode: + module.exit_json(changed=update or commit) + + if update: + for param in ('include', 'exclude'): + if module.params[param]: + run_lbu(param, *module.params[param]) + changed = True + + if commit: + run_lbu('commit') + changed = True + + module.exit_json(changed=changed) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ldap_attrs.py b/plugins/modules/ldap_attrs.py deleted file mode 120000 index 4e99e91474..0000000000 --- a/plugins/modules/ldap_attrs.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/ldap/ldap_attrs.py \ No newline at end of file diff --git a/plugins/modules/ldap_attrs.py b/plugins/modules/ldap_attrs.py new file mode 100644 index 0000000000..cb8c676536 --- /dev/null +++ b/plugins/modules/ldap_attrs.py @@ -0,0 +1,338 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Maciej Delmanowski +# Copyright (c) 2017, Alexander Korinek +# Copyright (c) 2016, Peter Sagerson +# Copyright (c) 2016, Jiri Tyr +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ldap_attrs +short_description: Add or remove multiple LDAP attribute values +description: + - Add or remove multiple LDAP attribute values. +notes: + - This only deals with attributes on existing entries. To add or remove whole entries, see M(community.general.ldap_entry). + - For O(state=present) and O(state=absent), all value comparisons are performed on the server for maximum accuracy. For + O(state=exact), values have to be compared in Python, which obviously ignores LDAP matching rules. This should work out + in most cases, but it is theoretically possible to see spurious changes when target and actual values are semantically + identical but lexically distinct. +version_added: '0.2.0' +author: + - Jiri Tyr (@jtyr) + - Alexander Korinek (@noles) + - Maciej Delmanowski (@drybjed) +requirements: + - python-ldap +attributes: + check_mode: + support: full + diff_mode: + support: full + version_added: 8.5.0 +options: + state: + required: false + type: str + choices: [present, absent, exact] + default: present + description: + - The state of the attribute values. If V(present), all given attribute values are added if they are missing. If V(absent), + all given attribute values are removed if present. If V(exact), the set of attribute values is forced to exactly those + provided and no others. If O(state=exact) and the attribute value is empty, all values for this attribute are removed. + attributes: + required: true + type: dict + description: + - The attribute(s) and value(s) to add or remove. + - Each attribute value can be a string for single-valued attributes or a list of strings for multi-valued attributes. + - If you specify values for this option in YAML, please note that you can improve readability for long string values + by using YAML block modifiers as seen in the examples for this module. + - Note that when using values that YAML/ansible-core interprets as other types, like V(yes), V(no) (booleans), or V(2.10) + (float), make sure to quote them if these are meant to be strings. Otherwise the wrong values may be sent to LDAP. + ordered: + required: false + type: bool + default: false + description: + - If V(true), prepend list values with X-ORDERED index numbers in all attributes specified in the current task. This + is useful mostly with C(olcAccess) attribute to easily manage LDAP Access Control Lists. +extends_documentation_fragment: + - community.general.ldap.documentation + - community.general.attributes +""" + + +EXAMPLES = r""" +- name: Configure directory number 1 for example.com + community.general.ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcSuffix: dc=example,dc=com + state: exact + +# The complex argument format is required here to pass a list of ACL strings. +- name: Set up the ACL + community.general.ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcAccess: + - >- + {0}to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + {1}to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read + state: exact + +# An alternative approach with automatic X-ORDERED numbering +- name: Set up the ACL + community.general.ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcAccess: + - >- + to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read + ordered: true + state: exact + +- name: Declare some indexes + community.general.ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcDbIndex: + - objectClass eq + - uid eq + +- name: Set up a root user, which we can use later to bootstrap the directory + community.general.ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcRootDN: cn=root,dc=example,dc=com + olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" + state: exact + +- name: Remove an attribute with a specific value + community.general.ldap_attrs: + dn: uid=jdoe,ou=people,dc=example,dc=com + attributes: + description: "An example user account" + state: absent + server_uri: ldap://localhost/ + bind_dn: cn=admin,dc=example,dc=com + bind_pw: password + +- name: Remove specified attribute(s) from an entry + community.general.ldap_attrs: + dn: uid=jdoe,ou=people,dc=example,dc=com + attributes: + description: [] + state: exact + server_uri: ldap://localhost/ + bind_dn: cn=admin,dc=example,dc=com + bind_pw: password +""" + + +RETURN = r""" +modlist: + description: List of modified parameters. + returned: success + type: list + sample: + - [2, "olcRootDN", ["cn=root,dc=example,dc=com"]] +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native, to_bytes, to_text +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together + +import re + +LDAP_IMP_ERR = None +try: + import ldap + import ldap.filter + + HAS_LDAP = True +except ImportError: + LDAP_IMP_ERR = traceback.format_exc() + HAS_LDAP = False + + +class LdapAttrs(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + + # Shortcuts + self.attrs = self.module.params['attributes'] + self.state = self.module.params['state'] + self.ordered = self.module.params['ordered'] + + def _order_values(self, values): + """ Prepend X-ORDERED index numbers to attribute's values. """ + ordered_values = [] + + if isinstance(values, list): + for index, value in enumerate(values): + cleaned_value = re.sub(r'^\{\d+\}', '', value) + ordered_values.append('{' + str(index) + '}' + cleaned_value) + + return ordered_values + + def _normalize_values(self, values): + """ Normalize attribute's values. """ + norm_values = [] + + if isinstance(values, list): + if self.ordered: + norm_values = list(map(to_bytes, + self._order_values(list(map(str, + values))))) + else: + norm_values = list(map(to_bytes, values)) + else: + norm_values = [to_bytes(str(values))] + + return norm_values + + def add(self): + modlist = [] + new_attrs = {} + for name, values in self.module.params['attributes'].items(): + norm_values = self._normalize_values(values) + added_values = [] + for value in norm_values: + if self._is_value_absent(name, value): + modlist.append((ldap.MOD_ADD, name, value)) + added_values.append(value) + if added_values: + new_attrs[name] = norm_values + return modlist, {}, new_attrs + + def delete(self): + modlist = [] + old_attrs = {} + new_attrs = {} + for name, values in self.module.params['attributes'].items(): + norm_values = self._normalize_values(values) + removed_values = [] + for value in norm_values: + if self._is_value_present(name, value): + removed_values.append(value) + modlist.append((ldap.MOD_DELETE, name, value)) + if removed_values: + old_attrs[name] = norm_values + new_attrs[name] = [value for value in norm_values if value not in removed_values] + return modlist, old_attrs, new_attrs + + def exact(self): + modlist = [] + old_attrs = {} + new_attrs = {} + for name, values in self.module.params['attributes'].items(): + norm_values = self._normalize_values(values) + try: + results = self.connection.search_s( + self.dn, ldap.SCOPE_BASE, attrlist=[name]) + except ldap.LDAPError as e: + self.fail("Cannot search for attribute %s" % name, e) + + current = results[0][1].get(name, []) + + if frozenset(norm_values) != frozenset(current): + if len(current) == 0: + modlist.append((ldap.MOD_ADD, name, norm_values)) + elif len(norm_values) == 0: + modlist.append((ldap.MOD_DELETE, name, None)) + else: + modlist.append((ldap.MOD_REPLACE, name, norm_values)) + old_attrs[name] = current + new_attrs[name] = norm_values + if len(current) == 1 and len(norm_values) == 1: + old_attrs[name] = current[0] + new_attrs[name] = norm_values[0] + + return modlist, old_attrs, new_attrs + + def _is_value_present(self, name, value): + """ True if the target attribute has the given value. """ + try: + escaped_value = ldap.filter.escape_filter_chars(to_text(value)) + filterstr = "(%s=%s)" % (name, escaped_value) + dns = self.connection.search_s(self.dn, ldap.SCOPE_BASE, filterstr) + is_present = len(dns) == 1 + except ldap.NO_SUCH_OBJECT: + is_present = False + + return is_present + + def _is_value_absent(self, name, value): + """ True if the target attribute doesn't have the given value. """ + return not self._is_value_present(name, value) + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs( + attributes=dict(type='dict', required=True), + ordered=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'exact', 'present']), + ), + supports_check_mode=True, + required_together=ldap_required_together(), + ) + + if not HAS_LDAP: + module.fail_json(msg=missing_required_lib('python-ldap'), + exception=LDAP_IMP_ERR) + + # Instantiate the LdapAttr object + ldap = LdapAttrs(module) + old_attrs = None + new_attrs = None + + state = module.params['state'] + + # Perform action + if state == 'present': + modlist, old_attrs, new_attrs = ldap.add() + elif state == 'absent': + modlist, old_attrs, new_attrs = ldap.delete() + elif state == 'exact': + modlist, old_attrs, new_attrs = ldap.exact() + + changed = False + + if len(modlist) > 0: + changed = True + + if not module.check_mode: + try: + ldap.connection.modify_s(ldap.dn, modlist) + except Exception as e: + module.fail_json(msg="Attribute action failed.", details=to_native(e)) + + module.exit_json(changed=changed, modlist=modlist, diff={"before": old_attrs, "after": new_attrs}) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ldap_entry.py b/plugins/modules/ldap_entry.py deleted file mode 120000 index d1a76fa53b..0000000000 --- a/plugins/modules/ldap_entry.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/ldap/ldap_entry.py \ No newline at end of file diff --git a/plugins/modules/ldap_entry.py b/plugins/modules/ldap_entry.py new file mode 100644 index 0000000000..05242304bd --- /dev/null +++ b/plugins/modules/ldap_entry.py @@ -0,0 +1,269 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Peter Sagerson +# Copyright (c) 2016, Jiri Tyr +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ldap_entry +short_description: Add or remove LDAP entries +description: + - Add or remove LDAP entries. This module only asserts the existence or non-existence of an LDAP entry, not its attributes. + To assert the attribute values of an entry, see M(community.general.ldap_attrs). +author: + - Jiri Tyr (@jtyr) +requirements: + - python-ldap +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + attributes: + description: + - If O(state=present), attributes necessary to create an entry. Existing entries are never modified. To assert specific + attribute values on an existing entry, use M(community.general.ldap_attrs) module instead. + - Each attribute value can be a string for single-valued attributes or a list of strings for multi-valued attributes. + - If you specify values for this option in YAML, please note that you can improve readability for long string values + by using YAML block modifiers as seen in the examples for this module. + - Note that when using values that YAML/ansible-core interprets as other types, like V(yes), V(no) (booleans), or V(2.10) + (float), make sure to quote them if these are meant to be strings. Otherwise the wrong values may be sent to LDAP. + type: dict + default: {} + objectClass: + description: + - If O(state=present), value or list of values to use when creating the entry. It can either be a string or an actual + list of strings. + type: list + elements: str + state: + description: + - The target state of the entry. + choices: [present, absent] + default: present + type: str + recursive: + description: + - If O(state=delete), a flag indicating whether a single entry or the whole branch must be deleted. + type: bool + default: false + version_added: 4.6.0 +extends_documentation_fragment: + - community.general.ldap.documentation + - community.general.attributes +""" + + +EXAMPLES = r""" +- name: Make sure we have a parent entry for users + community.general.ldap_entry: + dn: ou=users,dc=example,dc=com + objectClass: organizationalUnit + +- name: Make sure we have an admin user + community.general.ldap_entry: + dn: cn=admin,dc=example,dc=com + objectClass: + - simpleSecurityObject + - organizationalRole + attributes: + description: An LDAP administrator + userPassword: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" + +- name: Set possible values for attributes elements + community.general.ldap_entry: + dn: cn=admin,dc=example,dc=com + objectClass: + - simpleSecurityObject + - organizationalRole + attributes: + description: An LDAP Administrator + roleOccupant: + - cn=Chocs Puddington,ou=Information Technology,dc=example,dc=com + - cn=Alice Stronginthebrain,ou=Information Technology,dc=example,dc=com + olcAccess: + - >- + {0}to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + {1}to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read + +- name: Get rid of an old entry + community.general.ldap_entry: + dn: ou=stuff,dc=example,dc=com + state: absent + server_uri: ldap://localhost/ + bind_dn: cn=admin,dc=example,dc=com + bind_pw: password + +# +# The same as in the previous example but with the authentication details +# stored in the ldap_auth variable: +# +# ldap_auth: +# server_uri: ldap://localhost/ +# bind_dn: cn=admin,dc=example,dc=com +# bind_pw: password +# +# In the example below, 'args' is a task keyword, passed at the same level as the module +- name: Get rid of an old entry + community.general.ldap_entry: + dn: ou=stuff,dc=example,dc=com + state: absent + args: "{{ ldap_auth }}" +""" + + +RETURN = r""" +# Default return values +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native, to_bytes +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together + +LDAP_IMP_ERR = None +try: + import ldap.modlist + import ldap.controls + + HAS_LDAP = True +except ImportError: + LDAP_IMP_ERR = traceback.format_exc() + HAS_LDAP = False + + +class LdapEntry(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + + # Shortcuts + self.state = self.module.params['state'] + self.recursive = self.module.params['recursive'] + + # Add the objectClass into the list of attributes + self.module.params['attributes']['objectClass'] = ( + self.module.params['objectClass']) + + # Load attributes + if self.state == 'present': + self.attrs = self._load_attrs() + + def _load_attrs(self): + """ Turn attribute's value to array. """ + attrs = {} + + for name, value in self.module.params['attributes'].items(): + if isinstance(value, list): + attrs[name] = list(map(to_bytes, value)) + else: + attrs[name] = [to_bytes(value)] + + return attrs + + def add(self): + """ If self.dn does not exist, returns a callable that will add it. """ + def _add(): + self.connection.add_s(self.dn, modlist) + + if not self._is_entry_present(): + modlist = ldap.modlist.addModlist(self.attrs) + action = _add + else: + action = None + + return action + + def delete(self): + """ If self.dn exists, returns a callable that will delete either + the item itself if the recursive option is not set or the whole branch + if it is. """ + def _delete(): + self.connection.delete_s(self.dn) + + def _delete_recursive(): + """ Attempt recursive deletion using the subtree-delete control. + If that fails, do it manually. """ + try: + subtree_delete = ldap.controls.ValueLessRequestControl('1.2.840.113556.1.4.805') + self.connection.delete_ext_s(self.dn, serverctrls=[subtree_delete]) + except ldap.NOT_ALLOWED_ON_NONLEAF: + search = self.connection.search_s(self.dn, ldap.SCOPE_SUBTREE, attrlist=('dn',)) + search.reverse() + for entry in search: + self.connection.delete_s(entry[0]) + + if self._is_entry_present(): + if self.recursive: + action = _delete_recursive + else: + action = _delete + else: + action = None + + return action + + def _is_entry_present(self): + try: + self.connection.search_s(self.dn, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + is_present = False + else: + is_present = True + + return is_present + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs( + attributes=dict(default={}, type='dict'), + objectClass=dict(type='list', elements='str'), + state=dict(default='present', choices=['present', 'absent']), + recursive=dict(default=False, type='bool'), + ), + required_if=[('state', 'present', ['objectClass'])], + supports_check_mode=True, + required_together=ldap_required_together(), + ) + + if not HAS_LDAP: + module.fail_json(msg=missing_required_lib('python-ldap'), + exception=LDAP_IMP_ERR) + + state = module.params['state'] + + # Instantiate the LdapEntry object + ldap = LdapEntry(module) + + # Get the action function + if state == 'present': + action = ldap.add() + elif state == 'absent': + action = ldap.delete() + + # Perform the action + if action is not None and not module.check_mode: + try: + action() + except Exception as e: + module.fail_json(msg="Entry action failed.", details=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=(action is not None)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ldap_inc.py b/plugins/modules/ldap_inc.py new file mode 100644 index 0000000000..41d58dfb3f --- /dev/null +++ b/plugins/modules/ldap_inc.py @@ -0,0 +1,241 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Philippe Duveau +# Copyright (c) 2019, Maciej Delmanowski (ldap_attrs.py) +# Copyright (c) 2017, Alexander Korinek (ldap_attrs.py) +# Copyright (c) 2016, Peter Sagerson (ldap_attrs.py) +# Copyright (c) 2016, Jiri Tyr (ldap_attrs.py) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# The code of this module is derived from that of ldap_attrs.py + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ldap_inc +short_description: Use the Modify-Increment LDAP V3 feature to increment an attribute value +version_added: 10.2.0 +description: + - Atomically increments the value of an attribute and return its new value. +notes: + - When implemented by the directory server, the module uses the ModifyIncrement extension defined in L(RFC4525, https://www.rfc-editor.org/rfc/rfc4525.html) + and the control PostRead. This extension and the control are implemented in OpenLdap but not all directory servers implement + them. In this case, the module automatically uses a more classic method based on two phases, first the current value is + read then the modify operation remove the old value and add the new one in a single request. If the value has changed + by a concurrent call then the remove action fails. Then the sequence is retried 3 times before raising an error to the + playbook. In an heavy modification environment, the module does not guarante to be systematically successful. + - This only deals with integer attribute of an existing entry. To modify attributes of an entry, see M(community.general.ldap_attrs) + or to add or remove whole entries, see M(community.general.ldap_entry). +author: + - Philippe Duveau (@pduveau) +requirements: + - python-ldap +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + dn: + required: true + type: str + description: + - The DN entry containing the attribute to increment. + attribute: + required: true + type: str + description: + - The attribute to increment. + increment: + required: false + type: int + default: 1 + description: + - The value of the increment to apply. + method: + required: false + type: str + default: auto + choices: [auto, rfc4525, legacy] + description: + - If V(auto), the module determines automatically the method to use. + - If V(rfc4525) or V(legacy) force to use the corresponding method. +extends_documentation_fragment: + - community.general.ldap.documentation + - community.general.attributes +""" + + +EXAMPLES = r""" +- name: Increments uidNumber 1 Number for example.com + community.general.ldap_inc: + dn: "cn=uidNext,ou=unix-management,dc=example,dc=com" + attribute: "uidNumber" + increment: "1" + register: ldap_uidNumber_sequence + +- name: Modifies the user to define its identification number (uidNumber) when incrementation is successful + community.general.ldap_attrs: + dn: "cn=john,ou=posix-users,dc=example,dc=com" + state: present + attributes: + - uidNumber: "{{ ldap_uidNumber_sequence.value }}" + when: ldap_uidNumber_sequence.incremented +""" + + +RETURN = r""" +incremented: + description: + - It is set to V(true) if the attribute value has changed. + returned: success + type: bool + sample: true + +attribute: + description: + - The name of the attribute that was incremented. + returned: success + type: str + sample: uidNumber + +value: + description: + - The new value after incrementing. + returned: success + type: str + sample: "2" + +rfc4525: + description: + - Is V(true) if the method used to increment is based on RFC4525, V(false) if legacy. + returned: success + type: bool + sample: true +""" + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native, to_bytes +from ansible_collections.community.general.plugins.module_utils import deps +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together + +with deps.declare("ldap", reason=missing_required_lib('python-ldap')): + import ldap + import ldap.controls.readentry + + +class LdapInc(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + # Shortcuts + self.attr = self.module.params['attribute'] + self.increment = self.module.params['increment'] + self.method = self.module.params['method'] + + def inc_rfc4525(self): + return [(ldap.MOD_INCREMENT, self.attr, [to_bytes(str(self.increment))])] + + def inc_legacy(self, curr_val, new_val): + return [(ldap.MOD_DELETE, self.attr, [to_bytes(curr_val)]), + (ldap.MOD_ADD, self.attr, [to_bytes(new_val)])] + + def serverControls(self): + return [ldap.controls.readentry.PostReadControl(attrList=[self.attr])] + + LDAP_MOD_INCREMENT = to_bytes("1.3.6.1.1.14") + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs( + attribute=dict(type='str', required=True), + increment=dict(type='int', default=1), + method=dict(type='str', default='auto', choices=['auto', 'rfc4525', 'legacy']), + ), + supports_check_mode=True, + required_together=ldap_required_together(), + ) + + deps.validate(module) + + # Instantiate the LdapAttr object + mod = LdapInc(module) + + changed = False + ret = "" + rfc4525 = False + + try: + if mod.increment != 0 and not module.check_mode: + changed = True + + if mod.method != "auto": + rfc4525 = mod.method == "rfc425" + else: + rootDSE = mod.connection.search_ext_s( + base="", + scope=ldap.SCOPE_BASE, + attrlist=["*", "+"]) + if len(rootDSE) == 1: + if to_bytes(ldap.CONTROL_POST_READ) in rootDSE[0][1]["supportedControl"] and ( + mod.LDAP_MOD_INCREMENT in rootDSE[0][1]["supportedFeatures"] or + mod.LDAP_MOD_INCREMENT in rootDSE[0][1]["supportedExtension"] + ): + rfc4525 = True + + if rfc4525: + dummy, dummy, dummy, resp_ctrls = mod.connection.modify_ext_s( + dn=mod.dn, + modlist=mod.inc_rfc4525(), + serverctrls=mod.serverControls(), + clientctrls=None) + if len(resp_ctrls) == 1: + ret = resp_ctrls[0].entry[mod.attr][0] + + else: + tries = 0 + max_tries = 3 + while tries < max_tries: + tries = tries + 1 + result = mod.connection.search_ext_s( + base=mod.dn, + scope=ldap.SCOPE_BASE, + filterstr="(%s=*)" % mod.attr, + attrlist=[mod.attr]) + if len(result) != 1: + module.fail_json(msg="The entry does not exist or does not contain the specified attribute.") + return + try: + ret = str(int(result[0][1][mod.attr][0]) + mod.increment) + # if the current value first arg in inc_legacy has changed then the modify will fail + mod.connection.modify_s( + dn=mod.dn, + modlist=mod.inc_legacy(result[0][1][mod.attr][0], ret)) + break + except ldap.NO_SUCH_ATTRIBUTE: + if tries == max_tries: + module.fail_json(msg="The increment could not be applied after " + str(max_tries) + " tries.") + return + + else: + result = mod.connection.search_ext_s( + base=mod.dn, + scope=ldap.SCOPE_BASE, + filterstr="(%s=*)" % mod.attr, + attrlist=[mod.attr]) + if len(result) == 1: + ret = str(int(result[0][1][mod.attr][0]) + mod.increment) + changed = mod.increment != 0 + else: + module.fail_json(msg="The entry does not exist or does not contain the specified attribute.") + + except Exception as e: + module.fail_json(msg="Attribute action failed.", details=to_native(e)) + + module.exit_json(changed=changed, incremented=changed, attribute=mod.attr, value=ret, rfc4525=rfc4525) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ldap_passwd.py b/plugins/modules/ldap_passwd.py deleted file mode 120000 index c8b7de3408..0000000000 --- a/plugins/modules/ldap_passwd.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/ldap/ldap_passwd.py \ No newline at end of file diff --git a/plugins/modules/ldap_passwd.py b/plugins/modules/ldap_passwd.py new file mode 100644 index 0000000000..86cd923c95 --- /dev/null +++ b/plugins/modules/ldap_passwd.py @@ -0,0 +1,140 @@ +#!/usr/bin/python + +# Copyright (c) 2017-2018, Keller Fuchs +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ldap_passwd +short_description: Set passwords in LDAP +description: + - Set a password for an LDAP entry. This module only asserts that a given password is valid for a given entry. To assert + the existence of an entry, see M(community.general.ldap_entry). +author: + - Keller Fuchs (@KellerFuchs) +requirements: + - python-ldap +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + passwd: + description: + - The (plaintext) password to be set for O(dn). + type: str +extends_documentation_fragment: + - community.general.ldap.documentation + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Set a password for the admin user + community.general.ldap_passwd: + dn: cn=admin,dc=example,dc=com + passwd: "{{ vault_secret }}" + +- name: Setting passwords in bulk + community.general.ldap_passwd: + dn: "{{ item.key }}" + passwd: "{{ item.value }}" + with_dict: + alice: alice123123 + bob: "|30b!" + admin: "{{ vault_secret }}" +""" + +RETURN = r""" +modlist: + description: List of modified parameters. + returned: success + type: list + sample: + - [2, "olcRootDN", ["cn=root,dc=example,dc=com"]] +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together + +LDAP_IMP_ERR = None +try: + import ldap + + HAS_LDAP = True +except ImportError: + LDAP_IMP_ERR = traceback.format_exc() + HAS_LDAP = False + + +class LdapPasswd(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + + # Shortcuts + self.passwd = self.module.params['passwd'] + + def passwd_check(self): + try: + tmp_con = ldap.initialize(self.server_uri) + except ldap.LDAPError as e: + self.fail("Cannot initialize LDAP connection", e) + + if self.start_tls: + try: + tmp_con.start_tls_s() + except ldap.LDAPError as e: + self.fail("Cannot start TLS.", e) + + try: + tmp_con.simple_bind_s(self.dn, self.passwd) + except ldap.INVALID_CREDENTIALS: + return True + except ldap.LDAPError as e: + self.fail("Cannot bind to the server.", e) + else: + return False + finally: + tmp_con.unbind() + + def passwd_set(self): + # Exit early if the password is already valid + if not self.passwd_check(): + return False + + # Change the password (or throw an exception) + try: + self.connection.passwd_s(self.dn, None, self.passwd) + except ldap.LDAPError as e: + self.fail("Unable to set password", e) + + # Password successfully changed + return True + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs(passwd=dict(no_log=True)), + supports_check_mode=True, + required_together=ldap_required_together(), + ) + + if not HAS_LDAP: + module.fail_json(msg=missing_required_lib('python-ldap'), + exception=LDAP_IMP_ERR) + + ldap = LdapPasswd(module) + + if module.check_mode: + module.exit_json(changed=ldap.passwd_check()) + + module.exit_json(changed=ldap.passwd_set()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ldap_search.py b/plugins/modules/ldap_search.py deleted file mode 120000 index a21c103342..0000000000 --- a/plugins/modules/ldap_search.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/ldap/ldap_search.py \ No newline at end of file diff --git a/plugins/modules/ldap_search.py b/plugins/modules/ldap_search.py new file mode 100644 index 0000000000..d7d1a9bbcf --- /dev/null +++ b/plugins/modules/ldap_search.py @@ -0,0 +1,241 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Peter Sagerson +# Copyright (c) 2020, Sebastian Pfahl +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ldap_search +version_added: '0.2.0' +short_description: Search for entries in a LDAP server +description: + - Return the results of an LDAP search. +author: + - Sebastian Pfahl (@eryx12o45) +requirements: + - python-ldap +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + dn: + required: true + type: str + description: + - The LDAP DN to search in. + scope: + choices: [base, onelevel, subordinate, children] + default: base + type: str + description: + - The LDAP scope to use. + - V(subordinate) requires the LDAPv3 subordinate feature extension. + - V(children) is equivalent to a "subtree" scope. + filter: + default: '(objectClass=*)' + type: str + description: + - Used for filtering the LDAP search result. + attrs: + type: list + elements: str + description: + - A list of attributes for limiting the result. Use an actual list or a comma-separated string. + schema: + default: false + type: bool + description: + - Set to V(true) to return the full attribute schema of entries, not their attribute values. Overrides O(attrs) when + provided. + page_size: + default: 0 + type: int + description: + - The page size when performing a simple paged result search (RFC 2696). This setting can be tuned to reduce issues + with timeouts and server limits. + - Setting the page size to V(0) (default) disables paged searching. + version_added: 7.1.0 + base64_attributes: + description: + - If provided, all attribute values returned that are listed in this option are Base64 encoded. + - If the special value V(*) appears in this list, all attributes are Base64 encoded. + - All other attribute values are converted to UTF-8 strings. If they contain binary data, please note that invalid UTF-8 + bytes are omitted. + type: list + elements: str + version_added: 7.0.0 +extends_documentation_fragment: + - community.general.ldap.documentation + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Return all entries within the 'groups' organizational unit. + community.general.ldap_search: + dn: "ou=groups,dc=example,dc=com" + register: ldap_groups + +- name: Return GIDs for all groups + community.general.ldap_search: + dn: "ou=groups,dc=example,dc=com" + scope: "onelevel" + attrs: + - "gidNumber" + register: ldap_group_gids +""" + +# @FIXME RV 'results' is meant to be used when 'loop:' was used with the module. +RESULTS = r""" +results: + description: + - For every entry found, one dictionary is returned. + - Every dictionary contains a key C(dn) with the entry's DN as a value. + - Every attribute of the entry found is added to the dictionary. If the key has precisely one value, that value is taken + directly, otherwise the key's value is a list. + - Note that all values (for single-element lists) and list elements (for multi-valued lists) are UTF-8 strings. Some might + contain Base64-encoded binary data; which ones is determined by the O(base64_attributes) option. + type: list + elements: dict +""" + +import base64 +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together + +LDAP_IMP_ERR = None +try: + import ldap + + HAS_LDAP = True +except ImportError: + LDAP_IMP_ERR = traceback.format_exc() + HAS_LDAP = False + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs( + dn=dict(type='str', required=True), + scope=dict(type='str', default='base', choices=['base', 'onelevel', 'subordinate', 'children']), + filter=dict(type='str', default='(objectClass=*)'), + attrs=dict(type='list', elements='str'), + schema=dict(type='bool', default=False), + page_size=dict(type='int', default=0), + base64_attributes=dict(type='list', elements='str'), + ), + supports_check_mode=True, + required_together=ldap_required_together(), + ) + + if not HAS_LDAP: + module.fail_json(msg=missing_required_lib('python-ldap'), + exception=LDAP_IMP_ERR) + + try: + LdapSearch(module).main() + except Exception as exception: + module.fail_json(msg="Attribute action failed.", details=to_native(exception)) + + +def _normalize_string(val, convert_to_base64): + if isinstance(val, (str, bytes)): + if isinstance(val, str): + val = to_bytes(val, encoding='utf-8') + if convert_to_base64: + val = to_text(base64.b64encode(val)) + else: + # See https://github.com/ansible/ansible/issues/80258#issuecomment-1477038952 for details. + # We want to make sure that all strings are properly UTF-8 encoded, even if they were not, + # or happened to be byte strings. + val = to_text(val, 'utf-8', errors='replace') + # See also https://github.com/ansible-collections/community.general/issues/5704. + return val + + +def _extract_entry(dn, attrs, base64_attributes): + extracted = {'dn': dn} + for attr, val in list(attrs.items()): + convert_to_base64 = '*' in base64_attributes or attr in base64_attributes + if len(val) == 1: + extracted[attr] = _normalize_string(val[0], convert_to_base64) + else: + extracted[attr] = [_normalize_string(v, convert_to_base64) for v in val] + return extracted + + +class LdapSearch(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + + self.filterstr = self.module.params['filter'] + self.attrlist = [] + self.page_size = self.module.params['page_size'] + self._load_scope() + self._load_attrs() + self._load_schema() + self._base64_attributes = set(self.module.params['base64_attributes'] or []) + + def _load_schema(self): + self.schema = self.module.params['schema'] + if self.schema: + self.attrsonly = 1 + else: + self.attrsonly = 0 + + def _load_scope(self): + spec = dict( + base=ldap.SCOPE_BASE, + onelevel=ldap.SCOPE_ONELEVEL, + subordinate=ldap.SCOPE_SUBORDINATE, + children=ldap.SCOPE_SUBTREE, + ) + self.scope = spec[self.module.params['scope']] + + def _load_attrs(self): + self.attrlist = self.module.params['attrs'] or None + + def main(self): + results = self.perform_search() + self.module.exit_json(changed=False, results=results) + + def perform_search(self): + ldap_entries = [] + controls = [] + if self.page_size > 0: + controls.append(ldap.controls.libldap.SimplePagedResultsControl(True, size=self.page_size, cookie='')) + try: + while True: + response = self.connection.search_ext( + self.dn, + self.scope, + filterstr=self.filterstr, + attrlist=self.attrlist, + attrsonly=self.attrsonly, + serverctrls=controls, + ) + rtype, results, rmsgid, serverctrls = self.connection.result3(response) + for result in results: + if isinstance(result[1], dict): + if self.schema: + ldap_entries.append(dict(dn=result[0], attrs=list(result[1].keys()))) + else: + ldap_entries.append(_extract_entry(result[0], result[1], self._base64_attributes)) + cookies = [c.cookie for c in serverctrls if c.controlType == ldap.controls.libldap.SimplePagedResultsControl.controlType] + if self.page_size > 0 and cookies and cookies[0]: + controls[0].cookie = cookies[0] + else: + return ldap_entries + except ldap.NO_SUCH_OBJECT: + self.module.fail_json(msg="Base not found: {0}".format(self.dn)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/librato_annotation.py b/plugins/modules/librato_annotation.py deleted file mode 120000 index bb5b282c25..0000000000 --- a/plugins/modules/librato_annotation.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/librato_annotation.py \ No newline at end of file diff --git a/plugins/modules/librato_annotation.py b/plugins/modules/librato_annotation.py new file mode 100644 index 0000000000..2118d95051 --- /dev/null +++ b/plugins/modules/librato_annotation.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# +# Copyright (c) Seth Edwards, 2014 +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: librato_annotation +short_description: Create an annotation in Librato +description: + - Create an annotation event on the given annotation stream O(name). If the annotation stream does not exist, it creates + one automatically. +author: "Seth Edwards (@Sedward)" +requirements: [] +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + user: + type: str + description: + - Librato account username. + required: true + api_key: + type: str + description: + - Librato account API key. + required: true + name: + type: str + description: + - The annotation stream name. + - If the annotation stream does not exist, it creates one automatically. + required: false + title: + type: str + description: + - The title of an annotation is a string and may contain spaces. + - The title should be a short, high-level summary of the annotation for example V(v45 Deployment). + required: true + source: + type: str + description: + - A string which describes the originating source of an annotation when that annotation is tracked across multiple members + of a population. + required: false + description: + type: str + description: + - The description contains extra metadata about a particular annotation. + - The description should contain specifics on the individual annotation for example V(Deployed 9b562b2 shipped new feature + foo!). + required: false + start_time: + type: int + description: + - The unix timestamp indicating the time at which the event referenced by this annotation started. + required: false + end_time: + type: int + description: + - The unix timestamp indicating the time at which the event referenced by this annotation ended. + - For events that have a duration, this is a useful way to annotate the duration of the event. + required: false + links: + type: list + elements: dict + description: + - See examples. +""" + +EXAMPLES = r""" +- name: Create a simple annotation event with a source + community.general.librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXX + title: App Config Change + source: foo.bar + description: This is a detailed description of the config change + +- name: Create an annotation that includes a link + community.general.librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXXX + name: code.deploy + title: app code deploy + description: this is a detailed description of a deployment + links: + - rel: example + href: http://www.example.com/deploy + +- name: Create an annotation with a start_time and end_time + community.general.librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXXX + name: maintenance + title: Maintenance window + description: This is a detailed description of maintenance + start_time: 1395940006 + end_time: 1395954406 +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def post_annotation(module): + user = module.params['user'] + api_key = module.params['api_key'] + name = module.params['name'] + title = module.params['title'] + + url = 'https://metrics-api.librato.com/v1/annotations/%s' % name + params = {} + params['title'] = title + + if module.params['source'] is not None: + params['source'] = module.params['source'] + if module.params['description'] is not None: + params['description'] = module.params['description'] + if module.params['start_time'] is not None: + params['start_time'] = module.params['start_time'] + if module.params['end_time'] is not None: + params['end_time'] = module.params['end_time'] + if module.params['links'] is not None: + params['links'] = module.params['links'] + + json_body = module.jsonify(params) + + headers = {} + headers['Content-Type'] = 'application/json' + + # Hack send parameters the way fetch_url wants them + module.params['url_username'] = user + module.params['url_password'] = api_key + response, info = fetch_url(module, url, data=json_body, headers=headers) + response_code = str(info['status']) + response_body = info['body'] + if info['status'] != 201: + if info['status'] >= 400: + module.fail_json(msg="Request Failed. Response code: " + response_code + " Response body: " + response_body) + else: + module.fail_json(msg="Request Failed. Response code: " + response_code) + response = response.read() + module.exit_json(changed=True, annotation=response) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + user=dict(required=True), + api_key=dict(required=True, no_log=True), + name=dict(), + title=dict(required=True), + source=dict(), + description=dict(), + start_time=dict(type='int'), + end_time=dict(type='int'), + links=dict(type='list', elements='dict') + ) + ) + + post_annotation(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/linode.py b/plugins/modules/linode.py deleted file mode 120000 index 6a2d5bed44..0000000000 --- a/plugins/modules/linode.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/linode/linode.py \ No newline at end of file diff --git a/plugins/modules/linode.py b/plugins/modules/linode.py new file mode 100644 index 0000000000..e4e27bf0d4 --- /dev/null +++ b/plugins/modules/linode.py @@ -0,0 +1,685 @@ +#!/usr/bin/python + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: linode +short_description: Manage instances on the Linode Public Cloud +description: + - Manage Linode Public Cloud instances and optionally wait for it to be 'running'. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicate desired state of the resource. + choices: [absent, active, deleted, present, restarted, started, stopped] + default: present + type: str + api_key: + description: + - Linode API key. + - E(LINODE_API_KEY) environment variable can be used instead. + type: str + required: true + name: + description: + - Name to give the instance (alphanumeric, dashes, underscore). + - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-). + required: true + type: str + displaygroup: + description: + - Add the instance to a Display Group in Linode Manager. + type: str + default: '' + linode_id: + description: + - Unique ID of a Linode server. This value is read-only in the sense that if you specify it on creation of a Linode + it is not used. The Linode API generates these IDs and we can those generated value here to reference a Linode more + specifically. This is useful for idempotency. + aliases: [lid] + type: int + additional_disks: + description: + - List of dictionaries for creating additional disks that are added to the Linode configuration settings. + - Dictionary takes Size, Label, Type. Size is in MB. + type: list + elements: dict + alert_bwin_enabled: + description: + - Set status of bandwidth in alerts. + type: bool + alert_bwin_threshold: + description: + - Set threshold in MB of bandwidth in alerts. + type: int + alert_bwout_enabled: + description: + - Set status of bandwidth out alerts. + type: bool + alert_bwout_threshold: + description: + - Set threshold in MB of bandwidth out alerts. + type: int + alert_bwquota_enabled: + description: + - Set status of bandwidth quota alerts as percentage of network transfer quota. + type: bool + alert_bwquota_threshold: + description: + - Set threshold in MB of bandwidth quota alerts. + type: int + alert_cpu_enabled: + description: + - Set status of receiving CPU usage alerts. + type: bool + alert_cpu_threshold: + description: + - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total. + type: int + alert_diskio_enabled: + description: + - Set status of receiving disk IO alerts. + type: bool + alert_diskio_threshold: + description: + - Set threshold for average IO ops/sec over 2 hour period. + type: int + backupweeklyday: + description: + - Day of the week to take backups. + type: int + backupwindow: + description: + - The time window in which backups are taken. + type: int + plan: + description: + - Plan to use for the instance (Linode plan). + type: int + payment_term: + description: + - Payment term to use for the instance (payment term in months). + default: 1 + choices: [1, 12, 24] + type: int + password: + description: + - Root password to apply to a new server (auto generated if missing). + type: str + private_ip: + description: + - Add private IPv4 address when Linode is created. + - Default is V(false). + type: bool + ssh_pub_key: + description: + - SSH public key applied to root user. + type: str + swap: + description: + - Swap size in MB. + default: 512 + type: int + distribution: + description: + - Distribution to use for the instance (Linode Distribution). + type: int + datacenter: + description: + - Datacenter to create an instance in (Linode Datacenter). + type: int + kernel_id: + description: + - Kernel to use for the instance (Linode Kernel). + type: int + wait: + description: + - Wait for the instance to be in state V(running) before returning. + type: bool + default: true + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 300 + type: int + watchdog: + description: + - Set status of Lassie watchdog. + type: bool + default: true +requirements: + - linode-python +author: + - Vincent Viallet (@zbal) +notes: + - Please note, linode-python does not have python 3 support. + - This module uses the now deprecated v3 of the Linode API. + - Please review U(https://www.linode.com/api/linode) for determining the required parameters. +""" + +EXAMPLES = r""" +- name: Create a new Linode + community.general.linode: + name: linode-test1 + plan: 1 + datacenter: 7 + distribution: 129 + state: present + register: linode_creation + +- name: Create a server with a private IP Address + community.general.linode: + module: linode + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 1 + datacenter: 2 + distribution: 99 + password: 'superSecureRootPassword' + private_ip: true + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: true + wait_timeout: 600 + state: present + delegate_to: localhost + register: linode_creation + +- name: Fully configure new server + community.general.linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 4 + datacenter: 2 + distribution: 99 + kernel_id: 138 + password: 'superSecureRootPassword' + private_ip: true + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: true + wait_timeout: 600 + state: present + alert_bwquota_enabled: true + alert_bwquota_threshold: 80 + alert_bwin_enabled: true + alert_bwin_threshold: 10 + alert_cpu_enabled: true + alert_cpu_threshold: 210 + alert_bwout_enabled: true + alert_bwout_threshold: 10 + alert_diskio_enabled: true + alert_diskio_threshold: 10000 + backupweeklyday: 1 + backupwindow: 2 + displaygroup: 'test' + additional_disks: + - {Label: 'disk1', Size: 2500, Type: 'raw'} + - {Label: 'newdisk', Size: 2000} + watchdog: true + delegate_to: localhost + register: linode_creation + +- name: Ensure a running server (create if missing) + community.general.linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 1 + datacenter: 2 + distribution: 99 + password: 'superSecureRootPassword' + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: true + wait_timeout: 600 + state: present + delegate_to: localhost + register: linode_creation + +- name: Delete a server + community.general.linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: absent + delegate_to: localhost + +- name: Stop a server + community.general.linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: stopped + delegate_to: localhost + +- name: Reboot a server + community.general.linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: restarted + delegate_to: localhost +""" + +import time +import traceback + +LINODE_IMP_ERR = None +try: + from linode import api as linode_api + HAS_LINODE = True +except ImportError: + LINODE_IMP_ERR = traceback.format_exc() + HAS_LINODE = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback + + +def randompass(): + ''' + Generate a long random password that comply to Linode requirements + ''' + # Linode API currently requires the following: + # It must contain at least two of these four character classes: + # lower case letters - upper case letters - numbers - punctuation + # we play it safe :) + import random + import string + # as of python 2.4, this reseeds the PRNG from urandom + random.seed() + lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6)) + upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6)) + number = ''.join(random.choice(string.digits) for x in range(6)) + punct = ''.join(random.choice(string.punctuation) for x in range(6)) + p = lower + upper + number + punct + return ''.join(random.sample(p, len(p))) + + +def getInstanceDetails(api, server): + ''' + Return the details of an instance, populating IPs, etc. + ''' + instance = {'id': server['LINODEID'], + 'name': server['LABEL'], + 'public': [], + 'private': []} + + # Populate with ips + for ip in api.linode_ip_list(LinodeId=server['LINODEID']): + if ip['ISPUBLIC'] and 'ipv4' not in instance: + instance['ipv4'] = ip['IPADDRESS'] + instance['fqdn'] = ip['RDNS_NAME'] + if ip['ISPUBLIC']: + instance['public'].append({'ipv4': ip['IPADDRESS'], + 'fqdn': ip['RDNS_NAME'], + 'ip_id': ip['IPADDRESSID']}) + else: + instance['private'].append({'ipv4': ip['IPADDRESS'], + 'fqdn': ip['RDNS_NAME'], + 'ip_id': ip['IPADDRESSID']}) + return instance + + +def linodeServers(module, api, state, name, + displaygroup, plan, additional_disks, distribution, + datacenter, kernel_id, linode_id, payment_term, password, + private_ip, ssh_pub_key, swap, wait, wait_timeout, watchdog, **kwargs): + instances = [] + changed = False + new_server = False + servers = [] + disks = [] + configs = [] + jobs = [] + + # See if we can match an existing server details with the provided linode_id + if linode_id: + # For the moment we only consider linode_id as criteria for match + # Later we can use more (size, name, etc.) and update existing + servers = api.linode_list(LinodeId=linode_id) + # Attempt to fetch details about disks and configs only if servers are + # found with linode_id + if servers: + disks = api.linode_disk_list(LinodeId=linode_id) + configs = api.linode_config_list(LinodeId=linode_id) + + # Act on the state + if state in ('active', 'present', 'started'): + # TODO: validate all the plan / distribution / datacenter are valid + + # Multi step process/validation: + # - need linode_id (entity) + # - need disk_id for linode_id - create disk from distrib + # - need config_id for linode_id - create config (need kernel) + + # Any create step triggers a job that need to be waited for. + if not servers: + for arg in (name, plan, distribution, datacenter): + if not arg: + module.fail_json(msg='%s is required for %s state' % (arg, state)) + # Create linode entity + new_server = True + + # Get size of all individually listed disks to subtract from Distribution disk + used_disk_space = 0 if additional_disks is None else sum(disk['Size'] for disk in additional_disks) + + try: + res = api.linode_create(DatacenterID=datacenter, PlanID=plan, + PaymentTerm=payment_term) + linode_id = res['LinodeID'] + # Update linode Label to match name + api.linode_update(LinodeId=linode_id, Label='%s-%s' % (linode_id, name)) + # Update Linode with Ansible configuration options + api.linode_update(LinodeId=linode_id, LPM_DISPLAYGROUP=displaygroup, WATCHDOG=watchdog, **kwargs) + # Save server + servers = api.linode_list(LinodeId=linode_id) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + + # Add private IP to Linode + if private_ip: + try: + res = api.linode_ip_addprivate(LinodeID=linode_id) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) + + if not disks: + for arg in (name, linode_id, distribution): + if not arg: + module.fail_json(msg='%s is required for %s state' % (arg, state)) + # Create disks (1 from distrib, 1 for SWAP) + new_server = True + try: + if not password: + # Password is required on creation, if not provided generate one + password = randompass() + if not swap: + swap = 512 + # Create data disk + size = servers[0]['TOTALHD'] - used_disk_space - swap + + if ssh_pub_key: + res = api.linode_disk_createfromdistribution( + LinodeId=linode_id, DistributionID=distribution, + rootPass=password, rootSSHKey=ssh_pub_key, + Label='%s data disk (lid: %s)' % (name, linode_id), + Size=size) + else: + res = api.linode_disk_createfromdistribution( + LinodeId=linode_id, DistributionID=distribution, + rootPass=password, + Label='%s data disk (lid: %s)' % (name, linode_id), + Size=size) + jobs.append(res['JobID']) + # Create SWAP disk + res = api.linode_disk_create(LinodeId=linode_id, Type='swap', + Label='%s swap disk (lid: %s)' % (name, linode_id), + Size=swap) + # Create individually listed disks at specified size + if additional_disks: + for disk in additional_disks: + # If a disk Type is not passed in, default to ext4 + if disk.get('Type') is None: + disk['Type'] = 'ext4' + res = api.linode_disk_create(LinodeID=linode_id, Label=disk['Label'], Size=disk['Size'], Type=disk['Type']) + + jobs.append(res['JobID']) + except Exception as e: + # TODO: destroy linode ? + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) + + if not configs: + for arg in (name, linode_id, distribution): + if not arg: + module.fail_json(msg='%s is required for %s state' % (arg, state)) + + # Check architecture + for distrib in api.avail_distributions(): + if distrib['DISTRIBUTIONID'] != distribution: + continue + arch = '32' + if distrib['IS64BIT']: + arch = '64' + break + + # Get latest kernel matching arch if kernel_id is not specified + if not kernel_id: + for kernel in api.avail_kernels(): + if not kernel['LABEL'].startswith('Latest %s' % arch): + continue + kernel_id = kernel['KERNELID'] + break + + # Get disk list + disks_id = [] + for disk in api.linode_disk_list(LinodeId=linode_id): + if disk['TYPE'] == 'ext3': + disks_id.insert(0, str(disk['DISKID'])) + continue + disks_id.append(str(disk['DISKID'])) + # Trick to get the 9 items in the list + while len(disks_id) < 9: + disks_id.append('') + disks_list = ','.join(disks_id) + + # Create config + new_server = True + try: + api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id, + Disklist=disks_list, Label='%s config' % name) + configs = api.linode_config_list(LinodeId=linode_id) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) + + # Start / Ensure servers are running + for server in servers: + # Refresh server state + server = api.linode_list(LinodeId=server['LINODEID'])[0] + # Ensure existing servers are up and running, boot if necessary + if server['STATUS'] != 1: + res = api.linode_boot(LinodeId=linode_id) + jobs.append(res['JobID']) + changed = True + + # wait here until the instances are up + wait_timeout = time.time() + wait_timeout + while wait and wait_timeout > time.time(): + # refresh the server details + server = api.linode_list(LinodeId=server['LINODEID'])[0] + # status: + # -2: Boot failed + # 1: Running + if server['STATUS'] in (-2, 1): + break + time.sleep(5) + if wait and wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg='Timeout waiting on %s (lid: %s)' % (server['LABEL'], server['LINODEID'])) + # Get a fresh copy of the server details + server = api.linode_list(LinodeId=server['LINODEID'])[0] + if server['STATUS'] == -2: + module.fail_json(msg='%s (lid: %s) failed to boot' % + (server['LABEL'], server['LINODEID'])) + # From now on we know the task is a success + # Build instance report + instance = getInstanceDetails(api, server) + # depending on wait flag select the status + if wait: + instance['status'] = 'Running' + else: + instance['status'] = 'Starting' + + # Return the root password if this is a new box and no SSH key + # has been provided + if new_server and not ssh_pub_key: + instance['password'] = password + instances.append(instance) + + elif state in ('stopped',): + if not servers: + module.fail_json(msg='Server (lid: %s) not found' % (linode_id)) + + for server in servers: + instance = getInstanceDetails(api, server) + if server['STATUS'] != 2: + try: + res = api.linode_shutdown(LinodeId=linode_id) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) + instance['status'] = 'Stopping' + changed = True + else: + instance['status'] = 'Stopped' + instances.append(instance) + + elif state in ('restarted',): + if not servers: + module.fail_json(msg='Server (lid: %s) not found' % (linode_id)) + + for server in servers: + instance = getInstanceDetails(api, server) + try: + res = api.linode_reboot(LinodeId=server['LINODEID']) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) + instance['status'] = 'Restarting' + changed = True + instances.append(instance) + + elif state in ('absent', 'deleted'): + for server in servers: + instance = getInstanceDetails(api, server) + try: + api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) + instance['status'] = 'Deleting' + changed = True + instances.append(instance) + + # Ease parsing if only 1 instance + if len(instances) == 1: + module.exit_json(changed=changed, instance=instances[0]) + + module.exit_json(changed=changed, instances=instances) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', + choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']), + api_key=dict(type='str', no_log=True, required=True, fallback=(env_fallback, ['LINODE_API_KEY'])), + name=dict(type='str', required=True), + alert_bwin_enabled=dict(type='bool'), + alert_bwin_threshold=dict(type='int'), + alert_bwout_enabled=dict(type='bool'), + alert_bwout_threshold=dict(type='int'), + alert_bwquota_enabled=dict(type='bool'), + alert_bwquota_threshold=dict(type='int'), + alert_cpu_enabled=dict(type='bool'), + alert_cpu_threshold=dict(type='int'), + alert_diskio_enabled=dict(type='bool'), + alert_diskio_threshold=dict(type='int'), + backupweeklyday=dict(type='int'), + backupwindow=dict(type='int'), + displaygroup=dict(type='str', default=''), + plan=dict(type='int'), + additional_disks=dict(type='list', elements='dict'), + distribution=dict(type='int'), + datacenter=dict(type='int'), + kernel_id=dict(type='int'), + linode_id=dict(type='int', aliases=['lid']), + payment_term=dict(type='int', default=1, choices=[1, 12, 24]), + password=dict(type='str', no_log=True), + private_ip=dict(type='bool'), + ssh_pub_key=dict(type='str'), + swap=dict(type='int', default=512), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=300), + watchdog=dict(type='bool', default=True), + ), + required_if=[ + ('state', 'restarted', ['linode_id']), + ('state', 'stopped', ['linode_id']), + ] + ) + + if not HAS_LINODE: + module.fail_json(msg=missing_required_lib('linode-python'), exception=LINODE_IMP_ERR) + + state = module.params.get('state') + api_key = module.params.get('api_key') + name = module.params.get('name') + alert_bwin_enabled = module.params.get('alert_bwin_enabled') + alert_bwin_threshold = module.params.get('alert_bwin_threshold') + alert_bwout_enabled = module.params.get('alert_bwout_enabled') + alert_bwout_threshold = module.params.get('alert_bwout_threshold') + alert_bwquota_enabled = module.params.get('alert_bwquota_enabled') + alert_bwquota_threshold = module.params.get('alert_bwquota_threshold') + alert_cpu_enabled = module.params.get('alert_cpu_enabled') + alert_cpu_threshold = module.params.get('alert_cpu_threshold') + alert_diskio_enabled = module.params.get('alert_diskio_enabled') + alert_diskio_threshold = module.params.get('alert_diskio_threshold') + backupweeklyday = module.params.get('backupweeklyday') + backupwindow = module.params.get('backupwindow') + displaygroup = module.params.get('displaygroup') + plan = module.params.get('plan') + additional_disks = module.params.get('additional_disks') + distribution = module.params.get('distribution') + datacenter = module.params.get('datacenter') + kernel_id = module.params.get('kernel_id') + linode_id = module.params.get('linode_id') + payment_term = module.params.get('payment_term') + password = module.params.get('password') + private_ip = module.params.get('private_ip') + ssh_pub_key = module.params.get('ssh_pub_key') + swap = module.params.get('swap') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + watchdog = int(module.params.get('watchdog')) + + check_items = dict( + alert_bwin_enabled=alert_bwin_enabled, + alert_bwin_threshold=alert_bwin_threshold, + alert_bwout_enabled=alert_bwout_enabled, + alert_bwout_threshold=alert_bwout_threshold, + alert_bwquota_enabled=alert_bwquota_enabled, + alert_bwquota_threshold=alert_bwquota_threshold, + alert_cpu_enabled=alert_cpu_enabled, + alert_cpu_threshold=alert_cpu_threshold, + alert_diskio_enabled=alert_diskio_enabled, + alert_diskio_threshold=alert_diskio_threshold, + backupweeklyday=backupweeklyday, + backupwindow=backupwindow, + ) + + kwargs = {k: v for k, v in check_items.items() if v is not None} + + # setup the auth + try: + api = linode_api.Api(api_key) + api.test_echo() + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) + + linodeServers(module, api, state, name, + displaygroup, plan, + additional_disks, distribution, datacenter, kernel_id, linode_id, + payment_term, password, private_ip, ssh_pub_key, swap, wait, + wait_timeout, watchdog, **kwargs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/linode_v4.py b/plugins/modules/linode_v4.py deleted file mode 120000 index 78a00b38c6..0000000000 --- a/plugins/modules/linode_v4.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/linode/linode_v4.py \ No newline at end of file diff --git a/plugins/modules/linode_v4.py b/plugins/modules/linode_v4.py new file mode 100644 index 0000000000..6f0cac84d6 --- /dev/null +++ b/plugins/modules/linode_v4.py @@ -0,0 +1,305 @@ +#!/usr/bin/python + +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: linode_v4 +short_description: Manage instances on the Linode cloud +description: Manage instances on the Linode cloud. +requirements: + - linode_api4 >= 2.0.0 +author: + - Luke Murphy (@decentral1se) +notes: + - No Linode resizing is currently implemented. This module aims to replace the current Linode module which uses deprecated + API bindings on the Linode side. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + region: + description: + - The region of the instance. This is a required parameter only when creating Linode instances. See U(https://www.linode.com/docs/api/regions/). + type: str + image: + description: + - The image of the instance. This is a required parameter only when creating Linode instances. + - See U(https://www.linode.com/docs/api/images/). + type: str + type: + description: + - The type of the instance. This is a required parameter only when creating Linode instances. + - See U(https://www.linode.com/docs/api/linode-types/). + type: str + label: + description: + - The instance label. This label is used as the main determiner for idempotency for the module and is therefore mandatory. + type: str + required: true + group: + description: + - The group that the instance should be marked under. Please note, that group labelling is deprecated but still supported. + The encouraged method for marking instances is to use tags. + type: str + private_ip: + description: + - If V(true), the created Linode instance has private networking enabled and assigned a private IPv4 address. + type: bool + default: false + version_added: 3.0.0 + tags: + description: + - The tags that the instance should be marked under. + - See U(https://www.linode.com/docs/api/tags/). + type: list + elements: str + root_pass: + description: + - The password for the root user. If not specified, it generates a new one. This generated password is available in + the task success JSON. + type: str + authorized_keys: + description: + - A list of SSH public key parts to deploy for the root user. + type: list + elements: str + state: + description: + - The desired instance state. + type: str + choices: + - present + - absent + required: true + access_token: + description: + - The Linode API v4 access token. It may also be specified by exposing the E(LINODE_ACCESS_TOKEN) environment variable. + - See U(https://www.linode.com/docs/api#access-and-authentication). + required: true + type: str + stackscript_id: + description: + - The numeric ID of the StackScript to use when creating the instance. + - See U(https://www.linode.com/docs/api/stackscripts/). + type: int + version_added: 1.3.0 + stackscript_data: + description: + - An object containing arguments to any User Defined Fields present in the StackScript used when creating the instance. + Only valid when a O(stackscript_id) is provided. + - See U(https://www.linode.com/docs/api/stackscripts/). + type: dict + version_added: 1.3.0 +""" + +EXAMPLES = r""" +- name: Create a new Linode. + community.general.linode_v4: + label: new-linode + type: g6-nanode-1 + region: eu-west + image: linode/debian9 + root_pass: passw0rd + authorized_keys: + - "ssh-rsa ..." + stackscript_id: 1337 + stackscript_data: + variable: value + state: present + +- name: Delete that new Linode. + community.general.linode_v4: + label: new-linode + state: absent +""" + +RETURN = r""" +instance: + description: The instance description in JSON serialized form. + returned: Always. + type: dict + sample: + { + "root_pass": "foobar", # if auto-generated + "alerts": { + "cpu": 90, + "io": 10000, + "network_in": 10, + "network_out": 10, + "transfer_quota": 80 + }, + "backups": { + "enabled": false, + "schedule": { + "day": null, + "window": null + } + }, + "created": "2018-09-26T08:12:33", + "group": "Foobar Group", + "hypervisor": "kvm", + "id": 10480444, + "image": "linode/centos7", + "ipv4": [ + "130.132.285.233" + ], + "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64", + "label": "lin-foo", + "region": "eu-west", + "specs": { + "disk": 25600, + "memory": 1024, + "transfer": 1000, + "vcpus": 1 + }, + "status": "running", + "tags": [], + "type": "g6-nanode-1", + "updated": "2018-09-26T10:10:14", + "watchdog_enabled": true + } +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.linode import get_user_agent + +LINODE_IMP_ERR = None +try: + from linode_api4 import Instance, LinodeClient + HAS_LINODE_DEPENDENCY = True +except ImportError: + LINODE_IMP_ERR = traceback.format_exc() + HAS_LINODE_DEPENDENCY = False + + +def create_linode(module, client, **kwargs): + """Creates a Linode instance and handles return format.""" + if kwargs['root_pass'] is None: + kwargs.pop('root_pass') + + try: + response = client.linode.instance_create(**kwargs) + except Exception as exception: + module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception) + + try: + if isinstance(response, tuple): + instance, root_pass = response + instance_json = instance._raw_json + instance_json.update({'root_pass': root_pass}) + return instance_json + else: + return response._raw_json + except TypeError: + module.fail_json(msg='Unable to parse Linode instance creation response. Please raise a bug against this' + ' module on https://github.com/ansible-collections/community.general/issues' + ) + + +def maybe_instance_from_label(module, client): + """Try to retrieve an instance based on a label.""" + try: + label = module.params['label'] + result = client.linode.instances(Instance.label == label) + return result[0] + except IndexError: + return None + except Exception as exception: + module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception) + + +def initialise_module(): + """Initialise the module parameter specification.""" + return AnsibleModule( + argument_spec=dict( + label=dict(type='str', required=True), + state=dict( + type='str', + required=True, + choices=['present', 'absent'] + ), + access_token=dict( + type='str', + required=True, + no_log=True, + fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']), + ), + authorized_keys=dict(type='list', elements='str', no_log=False), + group=dict(type='str'), + image=dict(type='str'), + private_ip=dict(type='bool', default=False), + region=dict(type='str'), + root_pass=dict(type='str', no_log=True), + tags=dict(type='list', elements='str'), + type=dict(type='str'), + stackscript_id=dict(type='int'), + stackscript_data=dict(type='dict'), + ), + supports_check_mode=False, + required_one_of=( + ['state', 'label'], + ), + required_together=( + ['region', 'image', 'type'], + ) + ) + + +def build_client(module): + """Build a LinodeClient.""" + return LinodeClient( + module.params['access_token'], + user_agent=get_user_agent('linode_v4_module') + ) + + +def main(): + """Module entrypoint.""" + module = initialise_module() + + if not HAS_LINODE_DEPENDENCY: + module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR) + + client = build_client(module) + instance = maybe_instance_from_label(module, client) + + if module.params['state'] == 'present' and instance is not None: + module.exit_json(changed=False, instance=instance._raw_json) + + elif module.params['state'] == 'present' and instance is None: + instance_json = create_linode( + module, client, + authorized_keys=module.params['authorized_keys'], + group=module.params['group'], + image=module.params['image'], + label=module.params['label'], + private_ip=module.params['private_ip'], + region=module.params['region'], + root_pass=module.params['root_pass'], + tags=module.params['tags'], + ltype=module.params['type'], + stackscript=module.params['stackscript_id'], + stackscript_data=module.params['stackscript_data'], + ) + module.exit_json(changed=True, instance=instance_json) + + elif module.params['state'] == 'absent' and instance is not None: + instance.delete() + module.exit_json(changed=True, instance=instance._raw_json) + + elif module.params['state'] == 'absent' and instance is None: + module.exit_json(changed=False, instance={}) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/listen_ports_facts.py b/plugins/modules/listen_ports_facts.py deleted file mode 120000 index 7580c29b18..0000000000 --- a/plugins/modules/listen_ports_facts.py +++ /dev/null @@ -1 +0,0 @@ -./system/listen_ports_facts.py \ No newline at end of file diff --git a/plugins/modules/listen_ports_facts.py b/plugins/modules/listen_ports_facts.py new file mode 100644 index 0000000000..11b364ad4b --- /dev/null +++ b/plugins/modules/listen_ports_facts.py @@ -0,0 +1,425 @@ +#!/usr/bin/python +# +# Copyright (c) 2017, Nathan Davison +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: listen_ports_facts +author: + - Nathan Davison (@ndavison) +description: + - Gather facts on processes listening on TCP and UDP ports using the C(netstat) or C(ss) commands. + - This module currently supports Linux only. +requirements: + - netstat or ss +short_description: Gather facts on processes listening on TCP and UDP ports +notes: + - C(ss) returns all processes for each listen address and port. + - This plugin returns each of them, so multiple entries for the same listen address and port are likely in results. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +options: + command: + description: + - Override which command to use for fetching listen ports. + - By default module uses first found supported command on the system (in alphanumerical order). + type: str + choices: + - netstat + - ss + version_added: 4.1.0 + include_non_listening: + description: + - Show both listening and non-listening sockets (for TCP this means established connections). + - Adds the return values RV(ansible_facts.tcp_listen[].state), RV(ansible_facts.udp_listen[].state), RV(ansible_facts.tcp_listen[].foreign_address), + and RV(ansible_facts.udp_listen[].foreign_address) to the returned facts. + type: bool + default: false + version_added: 5.4.0 +""" + +EXAMPLES = r""" +- name: Gather facts on listening ports + community.general.listen_ports_facts: + +- name: TCP whitelist violation + ansible.builtin.debug: + msg: TCP port {{ item.port }} by pid {{ item.pid }} violates the whitelist + vars: + tcp_listen_violations: "{{ ansible_facts.tcp_listen | selectattr('port', 'in', tcp_whitelist) | list }}" + tcp_whitelist: + - 22 + - 25 + loop: "{{ tcp_listen_violations }}" + +- name: List TCP ports + ansible.builtin.debug: + msg: "{{ ansible_facts.tcp_listen | map(attribute='port') | sort | list }}" + +- name: List UDP ports + ansible.builtin.debug: + msg: "{{ ansible_facts.udp_listen | map(attribute='port') | sort | list }}" + +- name: List all ports + ansible.builtin.debug: + msg: "{{ (ansible_facts.tcp_listen + ansible_facts.udp_listen) | map(attribute='port') | unique | sort | list }}" + +- name: Gather facts on all ports and override which command to use + community.general.listen_ports_facts: + command: 'netstat' + include_non_listening: true +""" + +RETURN = r""" +ansible_facts: + description: Dictionary containing details of TCP and UDP ports with listening servers. + returned: always + type: complex + contains: + tcp_listen: + description: A list of processes that are listening on a TCP port. + returned: if TCP servers were found + type: list + contains: + address: + description: The address the server is listening on. + returned: always + type: str + sample: "0.0.0.0" + foreign_address: + description: The address of the remote end of the socket. + returned: if O(include_non_listening=true) + type: str + sample: "10.80.0.1" + version_added: 5.4.0 + state: + description: The state of the socket. + returned: if O(include_non_listening=true) + type: str + sample: "ESTABLISHED" + version_added: 5.4.0 + name: + description: The name of the listening process. + returned: if user permissions allow + type: str + sample: "mysqld" + pid: + description: The pid of the listening process. + returned: always + type: int + sample: 1223 + port: + description: The port the server is listening on. + returned: always + type: int + sample: 3306 + protocol: + description: The network protocol of the server. + returned: always + type: str + sample: "tcp" + stime: + description: The start time of the listening process. + returned: always + type: str + sample: "Thu Feb 2 13:29:45 2017" + user: + description: The user who is running the listening process. + returned: always + type: str + sample: "mysql" + udp_listen: + description: A list of processes that are listening on a UDP port. + returned: if UDP servers were found + type: list + contains: + address: + description: The address the server is listening on. + returned: always + type: str + sample: "0.0.0.0" + foreign_address: + description: The address of the remote end of the socket. + returned: if O(include_non_listening=true) + type: str + sample: "10.80.0.1" + version_added: 5.4.0 + state: + description: The state of the socket. UDP is a connectionless protocol. Shows UCONN or ESTAB. + returned: if O(include_non_listening=true) + type: str + sample: "UCONN" + version_added: 5.4.0 + name: + description: The name of the listening process. + returned: if user permissions allow + type: str + sample: "rsyslogd" + pid: + description: The pid of the listening process. + returned: always + type: int + sample: 609 + port: + description: The port the server is listening on. + returned: always + type: int + sample: 514 + protocol: + description: The network protocol of the server. + returned: always + type: str + sample: "udp" + stime: + description: The start time of the listening process. + returned: always + type: str + sample: "Thu Feb 2 13:29:45 2017" + user: + description: The user who is running the listening process. + returned: always + type: str + sample: "root" +""" + +import re +import platform +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.basic import AnsibleModule + + +def split_pid_name(pid_name): + """ + Split the entry PID/Program name into the PID (int) and the name (str) + :param pid_name: PID/Program String separated with a dash. E.g 51/sshd: returns pid = 51 and name = sshd + :return: PID (int) and the program name (str) + """ + try: + pid, name = pid_name.split("/", 1) + except ValueError: + # likely unprivileged user, so add empty name & pid + return 0, "" + else: + name = name.rstrip(":") + return int(pid), name + + +def netStatParse(raw): + """ + The netstat result can be either split in 6,7 or 8 elements depending on the values of state, process and name. + For UDP the state is always empty. For UDP and TCP the process can be empty. + So these cases have to be checked. + :param raw: Netstat raw output String. First line explains the format, each following line contains a connection. + :return: List of dicts, each dict contains protocol, state, local address, foreign address, port, name, pid for one + connection. + """ + results = list() + for line in raw.splitlines(): + if line.startswith(("tcp", "udp")): + # set variables to default state, in case they are not specified + state = "" + pid_and_name = "" + process = "" + formatted_line = line.split() + protocol, recv_q, send_q, address, foreign_address, rest = \ + formatted_line[0], formatted_line[1], formatted_line[2], formatted_line[3], formatted_line[4], formatted_line[5:] + address, port = address.rsplit(":", 1) + + if protocol.startswith("tcp"): + # nestat distinguishes between tcp6 and tcp + protocol = "tcp" + if len(rest) == 3: + state, pid_and_name, process = rest + if len(rest) == 2: + state, pid_and_name = rest + + if protocol.startswith("udp"): + # safety measure, similar to tcp6 + protocol = "udp" + if len(rest) == 2: + pid_and_name, process = rest + if len(rest) == 1: + pid_and_name = rest[0] + + pid, name = split_pid_name(pid_name=pid_and_name) + result = { + 'protocol': protocol, + 'state': state, + 'address': address, + 'foreign_address': foreign_address, + 'port': int(port), + 'name': name, + 'pid': int(pid), + } + if result not in results: + results.append(result) + return results + + +def ss_parse(raw): + """ + The ss_parse result can be either split in 6 or 7 elements depending on the process column, + e.g. due to unprivileged user. + :param raw: ss raw output String. First line explains the format, each following line contains a connection. + :return: List of dicts, each dict contains protocol, state, local address, foreign address, port, name, pid for one + connection. + """ + results = list() + regex_conns = re.compile(pattern=r'\[?(.+?)\]?:([0-9]+)$') + regex_pid = re.compile(pattern=r'"(.*?)",pid=(\d+)') + + lines = raw.splitlines() + + if len(lines) == 0 or not lines[0].startswith('Netid '): + # unexpected stdout from ss + raise EnvironmentError('Unknown stdout format of `ss`: {0}'.format(raw)) + + # skip headers (-H arg is not present on e.g. Ubuntu 16) + lines = lines[1:] + + for line in lines: + cells = line.split(None, 6) + try: + if len(cells) == 6: + # no process column, e.g. due to unprivileged user + process = str() + protocol, state, recv_q, send_q, local_addr_port, peer_addr_port = cells + else: + protocol, state, recv_q, send_q, local_addr_port, peer_addr_port, process = cells + except ValueError: + # unexpected stdout from ss + raise EnvironmentError( + 'Expected `ss` table layout "Netid, State, Recv-Q, Send-Q, Local Address:Port, Peer Address:Port" and \ + optionally "Process", but got something else: {0}'.format(line) + ) + + conns = regex_conns.search(local_addr_port) + pids = regex_pid.findall(process) + if conns is None and pids is None: + continue + + if pids is None: + # likely unprivileged user, so add empty name & pid + # as we do in netstat logic to be consistent with output + pids = [(str(), 0)] + + address = conns.group(1) + port = conns.group(2) + for name, pid in pids: + result = { + 'protocol': protocol, + 'state': state, + 'address': address, + 'foreign_address': peer_addr_port, + 'port': int(port), + 'name': name, + 'pid': int(pid), + } + results.append(result) + return results + + +def main(): + command_args = ['-p', '-l', '-u', '-n', '-t'] + commands_map = { + 'netstat': { + 'args': [], + 'parse_func': netStatParse + }, + 'ss': { + 'args': [], + 'parse_func': ss_parse + }, + } + module = AnsibleModule( + argument_spec=dict( + command=dict(type='str', choices=list(sorted(commands_map))), + include_non_listening=dict(default=False, type='bool'), + ), + supports_check_mode=True, + ) + + if module.params['include_non_listening']: + command_args = ['-p', '-u', '-n', '-t', '-a'] + + commands_map['netstat']['args'] = command_args + commands_map['ss']['args'] = command_args + + if platform.system() != 'Linux': + module.fail_json(msg='This module requires Linux.') + + def getPidSTime(pid): + ps_cmd = module.get_bin_path('ps', True) + rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'lstart', '-p', str(pid)]) + stime = '' + if rc == 0: + for line in ps_output.splitlines(): + if 'started' not in line: + stime = line + return stime + + def getPidUser(pid): + ps_cmd = module.get_bin_path('ps', True) + rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'user', '-p', str(pid)]) + user = '' + if rc == 0: + for line in ps_output.splitlines(): + if line != 'USER': + user = line + return user + + result = { + 'changed': False, + 'ansible_facts': { + 'tcp_listen': [], + 'udp_listen': [], + }, + } + + try: + command = None + bin_path = None + if module.params['command'] is not None: + command = module.params['command'] + bin_path = module.get_bin_path(command, required=True) + else: + for c in sorted(commands_map): + bin_path = module.get_bin_path(c, required=False) + if bin_path is not None: + command = c + break + + if bin_path is None: + raise EnvironmentError('Unable to find any of the supported commands in PATH: {0}'.format(", ".join(sorted(commands_map)))) + + # which ports are listening for connections? + args = commands_map[command]['args'] + rc, stdout, stderr = module.run_command([bin_path] + args) + if rc == 0: + parse_func = commands_map[command]['parse_func'] + results = parse_func(stdout) + + for connection in results: + # only display state and foreign_address for include_non_listening. + if not module.params['include_non_listening']: + connection.pop('state', None) + connection.pop('foreign_address', None) + connection['stime'] = getPidSTime(connection['pid']) + connection['user'] = getPidUser(connection['pid']) + if connection['protocol'].startswith('tcp'): + result['ansible_facts']['tcp_listen'].append(connection) + elif connection['protocol'].startswith('udp'): + result['ansible_facts']['udp_listen'].append(connection) + except (KeyError, EnvironmentError) as e: + module.fail_json(msg=to_native(e)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lldp.py b/plugins/modules/lldp.py deleted file mode 120000 index 240eabde0d..0000000000 --- a/plugins/modules/lldp.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/lldp.py \ No newline at end of file diff --git a/plugins/modules/lldp.py b/plugins/modules/lldp.py new file mode 100644 index 0000000000..a142d9a2ab --- /dev/null +++ b/plugins/modules/lldp.py @@ -0,0 +1,112 @@ +#!/usr/bin/python +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: lldp +requirements: [lldpctl] +short_description: Get details reported by LLDP +description: + - Reads data out of C(lldpctl). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + multivalues: + description: If lldpctl outputs an attribute multiple time represent all values as a list. + required: false + type: bool + default: false +author: "Andy Hill (@andyhky)" +notes: + - Requires C(lldpd) running and LLDP enabled on switches. +""" + +EXAMPLES = r""" +# Retrieve switch/port information +- name: Gather information from LLDP + community.general.lldp: + +- name: Print each switch/port + ansible.builtin.debug: + msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}" + with_items: "{{ lldp.keys() }}" + +# TASK: [Print each switch/port] *********************************************************** +# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"} +# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"} +# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"} +""" + +from ansible.module_utils.basic import AnsibleModule + + +def gather_lldp(module): + cmd = [module.get_bin_path('lldpctl'), '-f', 'keyvalue'] + rc, output, err = module.run_command(cmd) + if output: + output_dict = {} + current_dict = {} + lldp_entries = output.strip().split("\n") + + final = "" + for entry in lldp_entries: + if entry.startswith('lldp'): + path, value = entry.strip().split("=", 1) + path = path.split(".") + path_components, final = path[:-1], path[-1] + elif final in current_dict and isinstance(current_dict[final], str): + current_dict[final] += '\n' + entry + continue + elif final in current_dict and isinstance(current_dict[final], list): + current_dict[final][-1] += '\n' + entry + continue + else: + continue + + current_dict = output_dict + for path_component in path_components: + current_dict[path_component] = current_dict.get(path_component, {}) + if not isinstance(current_dict[path_component], dict): + current_dict[path_component] = {'value': current_dict[path_component]} + current_dict = current_dict[path_component] + + if final in current_dict and isinstance(current_dict[final], dict) and module.params['multivalues']: + current_dict = current_dict[final] + final = 'value' + + if final not in current_dict or not module.params['multivalues']: + current_dict[final] = value + elif isinstance(current_dict[final], str): + current_dict[final] = [current_dict[final], value] + elif isinstance(current_dict[final], list): + current_dict[final].append(value) + + return output_dict + + +def main(): + module_args = dict( + multivalues=dict(type='bool', default=False) + ) + module = AnsibleModule(module_args) + + lldp_output = gather_lldp(module) + try: + data = {'lldp': lldp_output['lldp']} + module.exit_json(ansible_facts=data) + except TypeError: + module.fail_json(msg="lldpctl command failed. is lldpd running?") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/locale_gen.py b/plugins/modules/locale_gen.py deleted file mode 120000 index b62a9f7575..0000000000 --- a/plugins/modules/locale_gen.py +++ /dev/null @@ -1 +0,0 @@ -./system/locale_gen.py \ No newline at end of file diff --git a/plugins/modules/locale_gen.py b/plugins/modules/locale_gen.py new file mode 100644 index 0000000000..6cfbe81ccc --- /dev/null +++ b/plugins/modules/locale_gen.py @@ -0,0 +1,283 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: locale_gen +short_description: Creates or removes locales +description: + - Manages locales in Debian and Ubuntu systems. +author: + - Augustus Kling (@AugustusKling) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: list + elements: str + description: + - Name and encoding of the locales, such as V(en_GB.UTF-8). + - Before community.general 9.3.0, this was a string. Using a string still works. + required: true + state: + type: str + description: + - Whether the locales shall be present. + choices: [absent, present] + default: present +notes: + - If C(/etc/locale.gen) exists, the module assumes to be using the B(glibc) mechanism, else if C(/var/lib/locales/supported.d/) + exists it assumes to be using the B(ubuntu_legacy) mechanism, else it raises an error. + - When using glibc mechanism, it manages locales by editing C(/etc/locale.gen) and running C(locale-gen). + - When using ubuntu_legacy mechanism, it manages locales by editing C(/var/lib/locales/supported.d/local) and then running + C(locale-gen). + - Please note that the code path that uses ubuntu_legacy mechanism has not been tested for a while, because Ubuntu is already + using the glibc mechanism. There is no support for that, given our inability to test it. Therefore, that mechanism is + B(deprecated) and will be removed in community.general 13.0.0. + - Currently the module is B(only supported for Debian and Ubuntu) systems. + - This module requires the package C(locales) installed in Debian and Ubuntu systems. +""" + +EXAMPLES = r""" +- name: Ensure a locale exists + community.general.locale_gen: + name: de_CH.UTF-8 + state: present + +- name: Ensure multiple locales exist + community.general.locale_gen: + name: + - en_GB.UTF-8 + - nl_NL.UTF-8 + state: present +""" + +RETURN = r""" +mechanism: + description: Mechanism used to deploy the locales. + type: str + choices: + - glibc + - ubuntu_legacy + returned: success + sample: glibc + version_added: 10.2.0 +""" + +import os +import re + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.mh.deco import check_mode_skip + +from ansible_collections.community.general.plugins.module_utils.locale_gen import locale_runner, locale_gen_runner + + +ETC_LOCALE_GEN = "/etc/locale.gen" +VAR_LIB_LOCALES = "/var/lib/locales/supported.d" +VAR_LIB_LOCALES_LOCAL = os.path.join(VAR_LIB_LOCALES, "local") +SUPPORTED_LOCALES = "/usr/share/i18n/SUPPORTED" +LOCALE_NORMALIZATION = { + ".utf8": ".UTF-8", + ".eucjp": ".EUC-JP", + ".iso885915": ".ISO-8859-15", + ".cp1251": ".CP1251", + ".koi8r": ".KOI8-R", + ".armscii8": ".ARMSCII-8", + ".euckr": ".EUC-KR", + ".gbk": ".GBK", + ".gb18030": ".GB18030", + ".euctw": ".EUC-TW", +} + + +class LocaleGen(StateModuleHelper): + output_params = ["name"] + module = dict( + argument_spec=dict( + name=dict(type="list", elements="str", required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + def __init_module__(self): + self.MECHANISMS = dict( + ubuntu_legacy=dict( + available=SUPPORTED_LOCALES, + apply_change=self.apply_change_ubuntu_legacy, + ), + glibc=dict( + available=SUPPORTED_LOCALES, + apply_change=self.apply_change_glibc, + ), + ) + + if os.path.exists(ETC_LOCALE_GEN): + self.vars.ubuntu_mode = False + self.vars.mechanism = "glibc" + elif os.path.exists(VAR_LIB_LOCALES): + self.vars.ubuntu_mode = True + self.vars.mechanism = "ubuntu_legacy" + self.module.deprecate( + "On this machine mechanism=ubuntu_legacy is used. This mechanism is deprecated and will be removed from" + " in community.general 13.0.0. If you see this message on a modern Debian or Ubuntu version," + " please create an issue in the community.general repository", + version="13.0.0", collection_name="community.general" + ) + else: + self.do_raise('{0} and {1} are missing. Is the package "locales" installed?'.format( + VAR_LIB_LOCALES, ETC_LOCALE_GEN + )) + + self.runner = locale_runner(self.module) + + self.assert_available() + self.vars.set("is_present", self.is_present(), output=False) + self.vars.set("state_tracking", self._state_name(self.vars.is_present), output=False, change=True) + + def __quit_module__(self): + self.vars.state_tracking = self._state_name(self.is_present()) + + @staticmethod + def _state_name(present): + return "present" if present else "absent" + + def assert_available(self): + """Check if the given locales are available on the system. This is done by + checking either : + * if the locale is present in /etc/locales.gen + * or if the locale is present in /usr/share/i18n/SUPPORTED""" + regexp = r'^\s*#?\s*(?P\S+[\._\S]+) (?P\S+)\s*$' + locales_available = self.MECHANISMS[self.vars.mechanism]["available"] + + re_compiled = re.compile(regexp) + with open(locales_available, 'r') as fd: + lines = fd.readlines() + res = [re_compiled.match(line) for line in lines] + self.vars.set("available_lines", lines, verbosity=4) + + locales_not_found = [] + for locale in self.vars.name: + # Check if the locale is not found in any of the matches + if not any(match and match.group("locale") == locale for match in res): + locales_not_found.append(locale) + + # locale may be installed but not listed in the file, for example C.UTF-8 in some systems + locales_not_found = self.locale_get_not_present(locales_not_found) + + if locales_not_found: + self.do_raise("The following locales you have entered are not available on your system: {0}".format(', '.join(locales_not_found))) + + def is_present(self): + return not self.locale_get_not_present(self.vars.name) + + def locale_get_not_present(self, locales): + runner = locale_runner(self.module) + with runner() as ctx: + rc, out, err = ctx.run() + if self.verbosity >= 4: + self.vars.locale_run_info = ctx.run_info + + not_found = [] + for locale in locales: + if not any(self.fix_case(locale) == self.fix_case(line) for line in out.splitlines()): + not_found.append(locale) + + return not_found + + def fix_case(self, name): + """locale -a might return the encoding in either lower or upper case. + Passing through this function makes them uniform for comparisons.""" + for s, r in LOCALE_NORMALIZATION.items(): + name = name.replace(s, r) + return name + + def set_locale_glibc(self, names, enabled=True): + """ Sets the state of the locale. Defaults to enabled. """ + with open(ETC_LOCALE_GEN, 'r') as fr: + lines = fr.readlines() + + locale_regexes = [] + + for name in names: + search_string = r'^#?\s*%s (?P.+)' % re.escape(name) + if enabled: + new_string = r'%s \g' % (name) + else: + new_string = r'# %s \g' % (name) + re_search = re.compile(search_string) + locale_regexes.append([re_search, new_string]) + + for i in range(len(lines)): + for [search, replace] in locale_regexes: + lines[i] = search.sub(replace, lines[i]) + + # Write the modified content back to the file + with open(ETC_LOCALE_GEN, 'w') as fw: + fw.writelines(lines) + + def apply_change_glibc(self, targetState, names): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, either present or absent. + names -- Names list including encoding such as de_CH.UTF-8. + """ + + self.set_locale_glibc(names, enabled=(targetState == "present")) + + runner = locale_gen_runner(self.module) + with runner() as ctx: + ctx.run() + + def apply_change_ubuntu_legacy(self, targetState, names): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, either present or absent. + names -- Name list including encoding such as de_CH.UTF-8. + """ + runner = locale_gen_runner(self.module) + + if targetState == "present": + # Create locale. + # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local + with runner() as ctx: + ctx.run() + else: + # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales. + with open(VAR_LIB_LOCALES_LOCAL, "r") as fr: + content = fr.readlines() + with open(VAR_LIB_LOCALES_LOCAL, "w") as fw: + for line in content: + locale, charset = line.split(' ') + if locale not in names: + fw.write(line) + # Purge locales and regenerate. + # Please provide a patch if you know how to avoid regenerating the locales to keep! + with runner("purge") as ctx: + ctx.run() + + @check_mode_skip + def __state_fallback__(self): + if self.vars.state_tracking == self.vars.state: + return + self.MECHANISMS[self.vars.mechanism]["apply_change"](self.vars.state, self.vars.name) + + +def main(): + LocaleGen.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/logentries.py b/plugins/modules/logentries.py deleted file mode 120000 index 965208104a..0000000000 --- a/plugins/modules/logentries.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/logentries.py \ No newline at end of file diff --git a/plugins/modules/logentries.py b/plugins/modules/logentries.py new file mode 100644 index 0000000000..535ef57a2a --- /dev/null +++ b/plugins/modules/logentries.py @@ -0,0 +1,162 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Ivan Vanderbyl +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: logentries +author: "Ivan Vanderbyl (@ivanvanderbyl)" +short_description: Module for tracking logs using U(logentries.com) +description: + - Sends logs to LogEntries in realtime. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + type: str + description: + - Path to a log file. + required: true + state: + type: str + description: + - Following state of the log. + choices: ['present', 'absent', 'followed', 'unfollowed'] + required: false + default: present + name: + type: str + description: + - Name of the log. + required: false + logtype: + type: str + description: + - Type of the log. + required: false + aliases: [type] + +notes: + - Requires the LogEntries agent which can be installed following the instructions at U(logentries.com). +""" + +EXAMPLES = r""" +- name: Track nginx logs + community.general.logentries: + path: /var/log/nginx/access.log + state: present + name: nginx-access-log + +- name: Stop tracking nginx logs + community.general.logentries: + path: /var/log/nginx/error.log + state: absent +""" + +from ansible.module_utils.basic import AnsibleModule + + +def query_log_status(module, le_path, path, state="present"): + """ Returns whether a log is followed or not. """ + + if state == "present": + rc, out, err = module.run_command([le_path, "followed", path]) + if rc == 0: + return True + + return False + + +def follow_log(module, le_path, logs, name=None, logtype=None): + """ Follows one or more logs if not already followed. """ + + followed_count = 0 + + for log in logs: + if query_log_status(module, le_path, log): + continue + + if module.check_mode: + module.exit_json(changed=True) + + cmd = [le_path, 'follow', log] + if name: + cmd.extend(['--name', name]) + if logtype: + cmd.extend(['--type', logtype]) + rc, out, err = module.run_command(cmd) + + if not query_log_status(module, le_path, log): + module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip())) + + followed_count += 1 + + if followed_count > 0: + module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,)) + + module.exit_json(changed=False, msg="logs(s) already followed") + + +def unfollow_log(module, le_path, logs): + """ Unfollows one or more logs if followed. """ + + removed_count = 0 + + # Using a for loop in case of error, we can report the package that failed + for log in logs: + # Query the log first, to see if we even need to remove. + if not query_log_status(module, le_path, log): + continue + + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = module.run_command([le_path, 'rm', log]) + + if query_log_status(module, le_path, log): + module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip())) + + removed_count += 1 + + if removed_count > 0: + module.exit_json(changed=True, msg="removed %d package(s)" % removed_count) + + module.exit_json(changed=False, msg="logs(s) already unfollowed") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(required=True), + state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]), + name=dict(type='str'), + logtype=dict(type='str', aliases=['type']) + ), + supports_check_mode=True + ) + + le_path = module.get_bin_path('le', True, ['/usr/local/bin']) + + p = module.params + + # Handle multiple log files + logs = p["path"].split(",") + logs = [_f for _f in logs if _f] + + if p["state"] in ["present", "followed"]: + follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype']) + + elif p["state"] in ["absent", "unfollowed"]: + unfollow_log(module, le_path, logs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/logentries_msg.py b/plugins/modules/logentries_msg.py deleted file mode 120000 index 473ceddf78..0000000000 --- a/plugins/modules/logentries_msg.py +++ /dev/null @@ -1 +0,0 @@ -./notification/logentries_msg.py \ No newline at end of file diff --git a/plugins/modules/logentries_msg.py b/plugins/modules/logentries_msg.py new file mode 100644 index 0000000000..bbbaf9720d --- /dev/null +++ b/plugins/modules/logentries_msg.py @@ -0,0 +1,102 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: logentries_msg +short_description: Send a message to logentries +description: + - Send a message to logentries. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + type: str + description: + - Log token. + required: true + msg: + type: str + description: + - The message body. + required: true + api: + type: str + description: + - API endpoint. + default: data.logentries.com + port: + type: int + description: + - API endpoint port. + default: 80 +author: "Jimmy Tang (@jcftang) " +""" + +RETURN = """#""" + +EXAMPLES = r""" +- name: Send a message to logentries + community.general.logentries_msg: + token: 00000000-0000-0000-0000-000000000000 + msg: "{{ ansible_hostname }}" +""" + +import socket + +from ansible.module_utils.basic import AnsibleModule + + +def send_msg(module, token, msg, api, port): + + message = "{0} {1}\n".format(token, msg) + + api_ip = socket.gethostbyname(api) + + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((api_ip, port)) + try: + if not module.check_mode: + s.send(message) + except Exception as e: + module.fail_json(msg="failed to send message, msg=%s" % e) + s.close() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + token=dict(type='str', required=True, no_log=True), + msg=dict(type='str', required=True), + api=dict(type='str', default="data.logentries.com"), + port=dict(type='int', default=80)), + supports_check_mode=True + ) + + token = module.params["token"] + msg = module.params["msg"] + api = module.params["api"] + port = module.params["port"] + + changed = False + try: + send_msg(module, token, msg, api, port) + changed = True + except Exception as e: + module.fail_json(msg="unable to send msg: %s" % e) + + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/logstash_plugin.py b/plugins/modules/logstash_plugin.py deleted file mode 120000 index 4ff1ffb7c6..0000000000 --- a/plugins/modules/logstash_plugin.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/logstash_plugin.py \ No newline at end of file diff --git a/plugins/modules/logstash_plugin.py b/plugins/modules/logstash_plugin.py new file mode 100644 index 0000000000..e0d112d334 --- /dev/null +++ b/plugins/modules/logstash_plugin.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# Copyright (c) 2017, Loic Blot +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: logstash_plugin +short_description: Manage Logstash plugins +description: + - Manages Logstash plugins. +author: Loic Blot (@nerzhul) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - Install plugin with that name. + required: true + state: + type: str + description: + - Apply plugin state. + choices: ["present", "absent"] + default: present + plugin_bin: + type: path + description: + - Specify logstash-plugin to use for plugin management. + default: /usr/share/logstash/bin/logstash-plugin + proxy_host: + type: str + description: + - Proxy host to use during plugin installation. + proxy_port: + type: str + description: + - Proxy port to use during plugin installation. + version: + type: str + description: + - Specify version of the plugin to install. If the plugin exists with a previous version, it is B(not) updated. +""" + +EXAMPLES = r""" +- name: Install Logstash beats input plugin + community.general.logstash_plugin: + state: present + name: logstash-input-beats + +- name: Install specific version of a plugin + community.general.logstash_plugin: + state: present + name: logstash-input-syslog + version: '3.2.0' + +- name: Uninstall Logstash plugin + community.general.logstash_plugin: + state: absent + name: logstash-filter-multiline + +- name: Install Logstash plugin with alternate heap size + community.general.logstash_plugin: + state: present + name: logstash-input-beats + environment: + LS_JAVA_OPTS: "-Xms256m -Xmx256m" +""" + +from ansible.module_utils.basic import AnsibleModule + + +PACKAGE_STATE_MAP = dict( + present="install", + absent="remove" +) + + +def is_plugin_present(module, plugin_bin, plugin_name): + cmd_args = [plugin_bin, "list", plugin_name] + rc, out, err = module.run_command(cmd_args) + return rc == 0 + + +def parse_error(string): + reason = "reason: " + try: + return string[string.index(reason) + len(reason):].strip() + except ValueError: + return string + + +def install_plugin(module, plugin_bin, plugin_name, version, proxy_host, proxy_port): + cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name] + + if version: + cmd_args.extend(["--version", version]) + + if proxy_host and proxy_port: + cmd_args.extend(["-DproxyHost=%s" % proxy_host, "-DproxyPort=%s" % proxy_port]) + + cmd = " ".join(cmd_args) + + if module.check_mode: + rc, out, err = 0, "check mode", "" + else: + rc, out, err = module.run_command(cmd_args) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, cmd, out, err + + +def remove_plugin(module, plugin_bin, plugin_name): + cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], plugin_name] + + cmd = " ".join(cmd_args) + + if module.check_mode: + rc, out, err = 0, "check mode", "" + else: + rc, out, err = module.run_command(cmd_args) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, cmd, out, err + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), + plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"), + proxy_host=dict(), + proxy_port=dict(), + version=dict() + ), + supports_check_mode=True + ) + + name = module.params["name"] + state = module.params["state"] + plugin_bin = module.params["plugin_bin"] + proxy_host = module.params["proxy_host"] + proxy_port = module.params["proxy_port"] + version = module.params["version"] + + present = is_plugin_present(module, plugin_bin, name) + + # skip if the state is correct + if (present and state == "present") or (state == "absent" and not present): + module.exit_json(changed=False, name=name, state=state) + + if state == "present": + changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, proxy_host, proxy_port) + elif state == "absent": + changed, cmd, out, err = remove_plugin(module, plugin_bin, name) + + module.exit_json(changed=changed, cmd=cmd, name=name, state=state, stdout=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lvg.py b/plugins/modules/lvg.py deleted file mode 120000 index fabe20e300..0000000000 --- a/plugins/modules/lvg.py +++ /dev/null @@ -1 +0,0 @@ -./system/lvg.py \ No newline at end of file diff --git a/plugins/modules/lvg.py b/plugins/modules/lvg.py new file mode 100644 index 0000000000..bc165ad5f8 --- /dev/null +++ b/plugins/modules/lvg.py @@ -0,0 +1,560 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Alexander Bulimov +# Based on lvol module by Jeroen Hoekx +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Alexander Bulimov (@abulimov) +module: lvg +short_description: Configure LVM volume groups +description: + - This module creates, removes or resizes volume groups. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + vg: + description: + - The name of the volume group. + type: str + required: true + pvs: + description: + - List of comma-separated devices to use as physical devices in this volume group. + - Required when creating or resizing volume group. + - The module runs C(pvcreate) if needed. + - O(remove_extra_pvs) controls whether or not unspecified physical devices are removed from the volume group. + type: list + elements: str + pesize: + description: + - The size of the physical extent. O(pesize) must be a power of 2 of at least 1 sector (where the sector size is the + largest sector size of the PVs currently used in the VG), or at least 128KiB. + - O(pesize) can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte. + type: str + default: "4" + pv_options: + description: + - Additional options to pass to C(pvcreate) when creating the volume group. + type: str + default: '' + pvresize: + description: + - If V(true), resize the physical volume to the maximum available size. + type: bool + default: false + version_added: '0.2.0' + vg_options: + description: + - Additional options to pass to C(vgcreate) when creating the volume group. + type: str + default: '' + state: + description: + - Control if the volume group exists and its state. + - The states V(active) and V(inactive) implies V(present) state. Added in 7.1.0. + - If V(active) or V(inactive), the module manages the VG's logical volumes current state. The module also handles the + VG's autoactivation state if supported unless when creating a volume group and the autoactivation option specified + in O(vg_options). + type: str + choices: [absent, present, active, inactive] + default: present + force: + description: + - If V(true), allows to remove volume group with logical volumes. + type: bool + default: false + reset_vg_uuid: + description: + - Whether the volume group's UUID is regenerated. + - This is B(not idempotent). Specifying this parameter always results in a change. + type: bool + default: false + version_added: 7.1.0 + reset_pv_uuid: + description: + - Whether the volume group's physical volumes' UUIDs are regenerated. + - This is B(not idempotent). Specifying this parameter always results in a change. + type: bool + default: false + version_added: 7.1.0 + remove_extra_pvs: + description: + - Remove physical volumes from the volume group which are not in O(pvs). + type: bool + default: true + version_added: 10.4.0 +seealso: + - module: community.general.filesystem + - module: community.general.lvol + - module: community.general.parted +notes: + - This module does not modify PE size for already present volume group. +""" + +EXAMPLES = r""" +- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB + community.general.lvg: + vg: vg.services + pvs: /dev/sda1 + pesize: 32 + +- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB + community.general.lvg: + vg: vg.services + pvs: /dev/sdb + pesize: 128K + +# If, for example, we already have VG vg.services on top of /dev/sdb1, +# this VG will be extended by /dev/sdc5. Or if vg.services was created on +# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5, +# and then reduce by /dev/sda5. +- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5. + community.general.lvg: + vg: vg.services + pvs: + - /dev/sdb1 + - /dev/sdc5 + +- name: Remove a volume group with name vg.services + community.general.lvg: + vg: vg.services + state: absent + +- name: Create a volume group on top of /dev/sda3 and resize the volume group /dev/sda3 to the maximum possible + community.general.lvg: + vg: resizableVG + pvs: /dev/sda3 + pvresize: true + +- name: Deactivate a volume group + community.general.lvg: + state: inactive + vg: vg.services + +- name: Activate a volume group + community.general.lvg: + state: active + vg: vg.services + +- name: Add new PVs to volume group without removing existing ones + community.general.lvg: + vg: vg.services + pvs: /dev/sdb1,/dev/sdc1 + remove_extra_pvs: false + state: present + +- name: Reset a volume group UUID + community.general.lvg: + state: inactive + vg: vg.services + reset_vg_uuid: true + +- name: Reset both volume group and pv UUID + community.general.lvg: + state: inactive + vg: vg.services + pvs: + - /dev/sdb1 + - /dev/sdc5 + reset_vg_uuid: true + reset_pv_uuid: true +""" + +import itertools +import os + +from ansible.module_utils.basic import AnsibleModule + +VG_AUTOACTIVATION_OPT = '--setautoactivation' + + +def parse_vgs(data): + vgs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + vgs.append({ + 'name': parts[0], + 'pv_count': int(parts[1]), + 'lv_count': int(parts[2]), + }) + return vgs + + +def find_mapper_device_name(module, dm_device): + dmsetup_cmd = module.get_bin_path('dmsetup', True) + mapper_prefix = '/dev/mapper/' + rc, dm_name, err = module.run_command([dmsetup_cmd, "info", "-C", "--noheadings", "-o", "name", dm_device]) + if rc != 0: + module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err) + mapper_device = mapper_prefix + dm_name.rstrip() + return mapper_device + + +def parse_pvs(module, data): + pvs = [] + dm_prefix = '/dev/dm-' + for line in data.splitlines(): + parts = line.strip().split(';') + if parts[0].startswith(dm_prefix): + parts[0] = find_mapper_device_name(module, parts[0]) + pvs.append({ + 'name': parts[0], + 'vg_name': parts[1], + }) + return pvs + + +def find_vg(module, vg): + if not vg: + return None + vgs_cmd = module.get_bin_path('vgs', True) + dummy, current_vgs, dummy = module.run_command([vgs_cmd, "--noheadings", "-o", "vg_name,pv_count,lv_count", "--separator", ";"], check_rc=True) + + vgs = parse_vgs(current_vgs) + + for test_vg in vgs: + if test_vg['name'] == vg: + this_vg = test_vg + break + else: + this_vg = None + + return this_vg + + +def is_autoactivation_supported(module, vg_cmd): + autoactivation_supported = False + dummy, vgchange_opts, dummy = module.run_command([vg_cmd, '--help'], check_rc=True) + + if VG_AUTOACTIVATION_OPT in vgchange_opts: + autoactivation_supported = True + + return autoactivation_supported + + +def activate_vg(module, vg, active): + changed = False + vgchange_cmd = module.get_bin_path('vgchange', True) + vgs_cmd = module.get_bin_path('vgs', True) + vgs_fields = ['lv_attr'] + + autoactivation_enabled = False + autoactivation_supported = is_autoactivation_supported(module=module, vg_cmd=vgchange_cmd) + + if autoactivation_supported: + vgs_fields.append('autoactivation') + + vgs_cmd_with_opts = [vgs_cmd, '--noheadings', '-o', ','.join(vgs_fields), '--separator', ';', vg] + dummy, current_vg_lv_states, dummy = module.run_command(vgs_cmd_with_opts, check_rc=True) + + lv_active_count = 0 + lv_inactive_count = 0 + + for line in current_vg_lv_states.splitlines(): + parts = line.strip().split(';') + if parts[0][4] == 'a': + lv_active_count += 1 + else: + lv_inactive_count += 1 + if autoactivation_supported: + autoactivation_enabled = autoactivation_enabled or parts[1] == 'enabled' + + activate_flag = None + if active and lv_inactive_count > 0: + activate_flag = 'y' + elif not active and lv_active_count > 0: + activate_flag = 'n' + + # Extra logic necessary because vgchange returns error when autoactivation is already set + if autoactivation_supported: + if active and not autoactivation_enabled: + if module.check_mode: + changed = True + else: + module.run_command([vgchange_cmd, VG_AUTOACTIVATION_OPT, 'y', vg], check_rc=True) + changed = True + elif not active and autoactivation_enabled: + if module.check_mode: + changed = True + else: + module.run_command([vgchange_cmd, VG_AUTOACTIVATION_OPT, 'n', vg], check_rc=True) + changed = True + + if activate_flag is not None: + if module.check_mode: + changed = True + else: + module.run_command([vgchange_cmd, '--activate', activate_flag, vg], check_rc=True) + changed = True + + return changed + + +def append_vgcreate_options(module, state, vgoptions): + vgcreate_cmd = module.get_bin_path('vgcreate', True) + + autoactivation_supported = is_autoactivation_supported(module=module, vg_cmd=vgcreate_cmd) + + if autoactivation_supported and state in ['active', 'inactive']: + if VG_AUTOACTIVATION_OPT not in vgoptions: + if state == 'active': + vgoptions += [VG_AUTOACTIVATION_OPT, 'y'] + else: + vgoptions += [VG_AUTOACTIVATION_OPT, 'n'] + + +def get_pv_values_for_resize(module, device): + pvdisplay_cmd = module.get_bin_path('pvdisplay', True) + pvdisplay_ops = ["--units", "b", "--columns", "--noheadings", "--nosuffix", "--separator", ";", "-o", "dev_size,pv_size,pe_start,vg_extent_size"] + pvdisplay_cmd_device_options = [pvdisplay_cmd, device] + pvdisplay_ops + + dummy, pv_values, dummy = module.run_command(pvdisplay_cmd_device_options, check_rc=True) + + values = pv_values.strip().split(';') + + dev_size = int(values[0]) + pv_size = int(values[1]) + pe_start = int(values[2]) + vg_extent_size = int(values[3]) + + return (dev_size, pv_size, pe_start, vg_extent_size) + + +def resize_pv(module, device): + changed = False + pvresize_cmd = module.get_bin_path('pvresize', True) + + dev_size, pv_size, pe_start, vg_extent_size = get_pv_values_for_resize(module=module, device=device) + if (dev_size - (pe_start + pv_size)) > vg_extent_size: + if module.check_mode: + changed = True + else: + # If there is a missing pv on the machine, versions of pvresize rc indicates failure. + rc, out, err = module.run_command([pvresize_cmd, device]) + dummy, new_pv_size, dummy, dummy = get_pv_values_for_resize(module=module, device=device) + if pv_size == new_pv_size: + module.fail_json(msg="Failed executing pvresize command.", rc=rc, err=err, out=out) + else: + changed = True + + return changed + + +def reset_uuid_pv(module, device): + changed = False + pvs_cmd = module.get_bin_path('pvs', True) + pvs_cmd_with_opts = [pvs_cmd, '--noheadings', '-o', 'uuid', device] + pvchange_cmd = module.get_bin_path('pvchange', True) + pvchange_cmd_with_opts = [pvchange_cmd, '-u', device] + + dummy, orig_uuid, dummy = module.run_command(pvs_cmd_with_opts, check_rc=True) + + if module.check_mode: + changed = True + else: + # If there is a missing pv on the machine, pvchange rc indicates failure. + pvchange_rc, pvchange_out, pvchange_err = module.run_command(pvchange_cmd_with_opts) + dummy, new_uuid, dummy = module.run_command(pvs_cmd_with_opts, check_rc=True) + if orig_uuid.strip() == new_uuid.strip(): + module.fail_json(msg="PV (%s) UUID change failed" % (device), rc=pvchange_rc, err=pvchange_err, out=pvchange_out) + else: + changed = True + + return changed + + +def reset_uuid_vg(module, vg): + changed = False + vgchange_cmd = module.get_bin_path('vgchange', True) + vgchange_cmd_with_opts = [vgchange_cmd, '-u', vg] + if module.check_mode: + changed = True + else: + module.run_command(vgchange_cmd_with_opts, check_rc=True) + changed = True + + return changed + + +def main(): + module = AnsibleModule( + argument_spec=dict( + vg=dict(type='str', required=True), + pvs=dict(type='list', elements='str'), + pesize=dict(type='str', default='4'), + pv_options=dict(type='str', default=''), + pvresize=dict(type='bool', default=False), + vg_options=dict(type='str', default=''), + state=dict(type='str', default='present', choices=['absent', 'present', 'active', 'inactive']), + force=dict(type='bool', default=False), + reset_vg_uuid=dict(type='bool', default=False), + reset_pv_uuid=dict(type='bool', default=False), + remove_extra_pvs=dict(type="bool", default=True), + ), + required_if=[ + ['reset_pv_uuid', True, ['pvs']], + ], + supports_check_mode=True, + ) + + vg = module.params['vg'] + state = module.params['state'] + force = module.boolean(module.params['force']) + pvresize = module.boolean(module.params['pvresize']) + pesize = module.params['pesize'] + pvoptions = module.params['pv_options'].split() + vgoptions = module.params['vg_options'].split() + reset_vg_uuid = module.boolean(module.params['reset_vg_uuid']) + reset_pv_uuid = module.boolean(module.params['reset_pv_uuid']) + remove_extra_pvs = module.boolean(module.params["remove_extra_pvs"]) + + this_vg = find_vg(module=module, vg=vg) + present_state = state in ['present', 'active', 'inactive'] + pvs_required = present_state and this_vg is None + changed = False + + dev_list = [] + if module.params['pvs']: + dev_list = list(module.params['pvs']) + elif pvs_required: + module.fail_json(msg="No physical volumes given.") + + # LVM always uses real paths not symlinks so replace symlinks with actual path + for idx, dev in enumerate(dev_list): + dev_list[idx] = os.path.realpath(dev) + + if present_state: + # check given devices + for test_dev in dev_list: + if not os.path.exists(test_dev): + module.fail_json(msg="Device %s not found." % test_dev) + + # get pv list + pvs_cmd = module.get_bin_path('pvs', True) + if dev_list: + pvs_filter_pv_name = ' || '.join( + 'pv_name = {0}'.format(x) + for x in itertools.chain(dev_list, module.params['pvs']) + ) + pvs_filter_vg_name = 'vg_name = {0}'.format(vg) + pvs_filter = ["--select", "{0} || {1}".format(pvs_filter_pv_name, pvs_filter_vg_name)] + else: + pvs_filter = [] + rc, current_pvs, err = module.run_command([pvs_cmd, "--noheadings", "-o", "pv_name,vg_name", "--separator", ";"] + pvs_filter) + if rc != 0: + module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err) + + # check pv for devices + pvs = parse_pvs(module, current_pvs) + used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg] + if used_pvs: + module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name'])) + + if this_vg is None: + if present_state: + append_vgcreate_options(module=module, state=state, vgoptions=vgoptions) + # create VG + if module.check_mode: + changed = True + else: + # create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in dev_list: + rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) + vgcreate_cmd = module.get_bin_path('vgcreate') + rc, dummy, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err) + else: + if state == 'absent': + if module.check_mode: + module.exit_json(changed=True) + else: + if this_vg['lv_count'] == 0 or force: + # remove VG + vgremove_cmd = module.get_bin_path('vgremove', True) + rc, dummy, err = module.run_command([vgremove_cmd, "--force", vg]) + if rc == 0: + module.exit_json(changed=True) + else: + module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err) + else: + module.fail_json(msg="Refuse to remove non-empty volume group %s without force=true" % (vg)) + # activate/deactivate existing VG + elif state == 'active': + changed = activate_vg(module=module, vg=vg, active=True) + elif state == 'inactive': + changed = activate_vg(module=module, vg=vg, active=False) + + # reset VG uuid + if reset_vg_uuid: + changed = reset_uuid_vg(module=module, vg=vg) or changed + + # resize VG + if dev_list: + current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg] + devs_to_remove = list(set(current_devs) - set(dev_list)) + devs_to_add = list(set(dev_list) - set(current_devs)) + + if not remove_extra_pvs: + devs_to_remove = [] + + if current_devs: + if present_state: + for device in current_devs: + if pvresize: + changed = resize_pv(module=module, device=device) or changed + if reset_pv_uuid: + changed = reset_uuid_pv(module=module, device=device) or changed + + if devs_to_add or devs_to_remove: + if module.check_mode: + changed = True + else: + if devs_to_add: + # create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in devs_to_add: + rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) + # add PV to our VG + vgextend_cmd = module.get_bin_path('vgextend', True) + rc, dummy, err = module.run_command([vgextend_cmd, vg] + devs_to_add) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to extend %s by %s." % (vg, ' '.join(devs_to_add)), rc=rc, err=err) + + # remove some PV from our VG + if devs_to_remove: + vgreduce_cmd = module.get_bin_path('vgreduce', True) + rc, dummy, err = module.run_command([vgreduce_cmd, "--force", vg] + devs_to_remove) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to reduce %s by %s." % (vg, ' '.join(devs_to_remove)), rc=rc, err=err) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lvg_rename.py b/plugins/modules/lvg_rename.py new file mode 100644 index 0000000000..5c1b497f2b --- /dev/null +++ b/plugins/modules/lvg_rename.py @@ -0,0 +1,167 @@ +#!/usr/bin/python + +# Copyright (c) Contributors to the Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Laszlo Szomor (@lszomor) +module: lvg_rename +short_description: Renames LVM volume groups +description: + - This module renames volume groups using the C(vgchange) command. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +version_added: 7.1.0 +options: + vg: + description: + - The name or UUID of the source VG. + - See V(vgrename(8\)) for valid values. + type: str + required: true + vg_new: + description: + - The new name of the VG. + - See V(lvm(8\)) for valid names. + type: str + required: true +seealso: + - module: community.general.lvg +notes: + - This module does not modify VG renaming-related configurations like C(fstab) entries or boot parameters. +""" + +EXAMPLES = r""" +- name: Rename a VG by name + community.general.lvg_rename: + vg: vg_orig_name + vg_new: vg_new_name + +- name: Rename a VG by UUID + community.general.lvg_rename: + vg_uuid: SNgd0Q-rPYa-dPB8-U1g6-4WZI-qHID-N7y9Vj + vg_new: vg_new_name +""" + +from ansible.module_utils.basic import AnsibleModule + +argument_spec = dict( + vg=dict(type='str', required=True), + vg_new=dict(type='str', required=True), +) + + +class LvgRename(object): + def __init__(self, module): + ''' + Orchestrates the lvg_rename module logic. + + :param module: An AnsibleModule instance. + ''' + self.module = module + self.result = {'changed': False} + self.vg_list = [] + self._load_params() + + def run(self): + """Performs the module logic.""" + + self._load_vg_list() + + old_vg_exists = self._is_vg_exists(vg=self.vg) + new_vg_exists = self._is_vg_exists(vg=self.vg_new) + + if old_vg_exists: + if new_vg_exists: + self.module.fail_json(msg='The new VG name (%s) is already in use.' % (self.vg_new)) + else: + self._rename_vg() + else: + if new_vg_exists: + self.result['msg'] = 'The new VG (%s) already exists, nothing to do.' % (self.vg_new) + self.module.exit_json(**self.result) + else: + self.module.fail_json(msg='Both current (%s) and new (%s) VG are missing.' % (self.vg, self.vg_new)) + + self.module.exit_json(**self.result) + + def _load_params(self): + """Load the parameters from the module.""" + + self.vg = self.module.params['vg'] + self.vg_new = self.module.params['vg_new'] + + def _load_vg_list(self): + """Load the VGs from the system.""" + + vgs_cmd = self.module.get_bin_path('vgs', required=True) + vgs_cmd_with_opts = [vgs_cmd, '--noheadings', '--separator', ';', '-o', 'vg_name,vg_uuid'] + dummy, vg_raw_list, dummy = self.module.run_command(vgs_cmd_with_opts, check_rc=True) + + for vg_info in vg_raw_list.splitlines(): + vg_name, vg_uuid = vg_info.strip().split(';') + self.vg_list.append(vg_name) + self.vg_list.append(vg_uuid) + + def _is_vg_exists(self, vg): + ''' + Checks VG existence by name or UUID. It removes the '/dev/' prefix before checking. + + :param vg: A string with the name or UUID of the VG. + :returns: A boolean indicates whether the VG exists or not. + ''' + + vg_found = False + dev_prefix = '/dev/' + + if vg.startswith(dev_prefix): + vg_id = vg[len(dev_prefix):] + else: + vg_id = vg + + vg_found = vg_id in self.vg_list + + return vg_found + + def _rename_vg(self): + """Renames the volume group.""" + + vgrename_cmd = self.module.get_bin_path('vgrename', required=True) + + if self.module._diff: + self.result['diff'] = {'before': {'vg': self.vg}, 'after': {'vg': self.vg_new}} + + if self.module.check_mode: + self.result['msg'] = "Running in check mode. The module would rename VG %s to %s." % (self.vg, self.vg_new) + self.result['changed'] = True + else: + vgrename_cmd_with_opts = [vgrename_cmd, self.vg, self.vg_new] + dummy, vg_rename_out, dummy = self.module.run_command(vgrename_cmd_with_opts, check_rc=True) + + self.result['msg'] = vg_rename_out + self.result['changed'] = True + + +def setup_module_object(): + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + return module + + +def main(): + module = setup_module_object() + lvg_rename = LvgRename(module=module) + lvg_rename.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lvm_pv.py b/plugins/modules/lvm_pv.py new file mode 100644 index 0000000000..3623109465 --- /dev/null +++ b/plugins/modules/lvm_pv.py @@ -0,0 +1,201 @@ +#!/usr/bin/python + +# Copyright (c) 2025, Klention Mali +# Based on lvol module by Jeroen Hoekx +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + + +DOCUMENTATION = r""" +module: lvm_pv +short_description: Manage LVM Physical Volumes +version_added: "11.0.0" +description: + - Creates, resizes or removes LVM Physical Volumes. +author: + - Klention Mali (@klention) +options: + device: + description: + - Path to the block device to manage. + type: path + required: true + state: + description: + - Control if the physical volume exists. + type: str + choices: [present, absent] + default: present + force: + description: + - Force the operation. + - When O(state=present) (creating a PV), this uses C(pvcreate -f) to force creation. + - When O(state=absent) (removing a PV), this uses C(pvremove -ff) to force removal even if part of a volume group. + type: bool + default: false + resize: + description: + - Resize PV to device size when O(state=present). + type: bool + default: false +notes: + - Requires LVM2 utilities installed on the target system. + - Device path must exist when creating a PV. +""" + +EXAMPLES = r""" +- name: Creating physical volume on /dev/sdb + community.general.lvm_pv: + device: /dev/sdb + +- name: Creating and resizing (if needed) physical volume + community.general.lvm_pv: + device: /dev/sdb + resize: true + +- name: Removing physical volume that is not part of any volume group + community.general.lvm_pv: + device: /dev/sdb + state: absent + +- name: Force removing physical volume that is already part of a volume group + community.general.lvm_pv: + device: /dev/sdb + force: true + state: absent +""" + +RETURN = r""" +""" + + +import os +from ansible.module_utils.basic import AnsibleModule + + +def get_pv_status(module, device): + """Check if the device is already a PV.""" + cmd = ['pvs', '--noheadings', '--readonly', device] + return module.run_command(cmd)[0] == 0 + + +def get_pv_size(module, device): + """Get current PV size in bytes.""" + cmd = ['pvs', '--noheadings', '--nosuffix', '--units', 'b', '-o', 'pv_size', device] + rc, out, err = module.run_command(cmd, check_rc=True) + return int(out.strip()) + + +def rescan_device(module, device): + """Perform storage rescan for the device.""" + base_device = os.path.basename(device) + is_partition = "/sys/class/block/{0}/partition".format(base_device) + + # Determine parent device if partition exists + parent_device = base_device + if os.path.exists(is_partition): + parent_device = ( + base_device.rpartition('p')[0] if base_device.startswith('nvme') + else base_device.rstrip('0123456789') + ) + + # Determine rescan path + rescan_path = "/sys/block/{0}/device/{1}".format( + parent_device, + "rescan_controller" if base_device.startswith('nvme') else "rescan" + ) + + if os.path.exists(rescan_path): + try: + with open(rescan_path, 'w') as f: + f.write('1') + return True + except IOError as e: + module.warn("Failed to rescan device {0}: {1}".format(device, str(e))) + else: + module.warn("Rescan path does not exist for device {0}".format(device)) + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + device=dict(type='path', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + force=dict(type='bool', default=False), + resize=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + device = module.params['device'] + state = module.params['state'] + force = module.params['force'] + resize = module.params['resize'] + changed = False + actions = [] + + # Validate device existence for present state + if state == 'present' and not os.path.exists(device): + module.fail_json(msg="Device %s not found" % device) + + is_pv = get_pv_status(module, device) + + if state == 'present': + # Create PV if needed + if not is_pv: + if module.check_mode: + changed = True + actions.append('would be created') + else: + cmd = ['pvcreate'] + if force: + cmd.append('-f') + cmd.append(device) + rc, out, err = module.run_command(cmd, check_rc=True) + changed = True + actions.append('created') + is_pv = True + + # Handle resizing + elif resize and is_pv: + if module.check_mode: + # In check mode, assume resize would change + changed = True + actions.append('would be resized') + else: + # Perform device rescan if each time + if rescan_device(module, device): + actions.append('rescanned') + original_size = get_pv_size(module, device) + rc, out, err = module.run_command(['pvresize', device], check_rc=True) + new_size = get_pv_size(module, device) + if new_size != original_size: + changed = True + actions.append('resized') + + elif state == 'absent': + if is_pv: + if module.check_mode: + changed = True + actions.append('would be removed') + else: + cmd = ['pvremove', '-y'] + if force: + cmd.append('-ff') + changed = True + cmd.append(device) + rc, out, err = module.run_command(cmd, check_rc=True) + actions.append('removed') + + # Generate final message + if actions: + msg = "PV %s: %s" % (device, ', '.join(actions)) + else: + msg = "No changes needed for PV %s" % device + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lvm_pv_move_data.py b/plugins/modules/lvm_pv_move_data.py new file mode 100644 index 0000000000..d14434d66a --- /dev/null +++ b/plugins/modules/lvm_pv_move_data.py @@ -0,0 +1,218 @@ +#!/usr/bin/python + +# Copyright (c) 2025, Klention Mali +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + + +DOCUMENTATION = r""" +module: lvm_pv_move_data +short_description: Move data between LVM Physical Volumes (PVs) +version_added: "11.2.0" +description: + - Moves data from one LVM Physical Volume (PV) to another. +author: + - Klention Mali (@klention) +options: + source: + description: + - Path to the source block device to move data from. + - Must be an existing PV. + type: path + required: true + destination: + description: + - Path to the destination block device to move data to. + - Must be an existing PV with enough free space. + type: path + required: true + auto_answer: + description: + - Answer yes to all prompts automatically. + type: bool + default: false + atomic: + description: + - Makes the C(pvmove) operation atomic, ensuring that all affected LVs are moved to the destination PV, + or none are if the operation is aborted. + type: bool + default: true + autobackup: + description: + - Automatically backup metadata before changes (strongly advised!). + type: bool + default: true +requirements: + - LVM2 utilities + - Both O(source) and O(destination) devices must exist, and the PVs must be in the same volume group. + - The O(destination) PV must have enough free space to accommodate the O(source) PV's allocated extents. + - Verbosity is automatically controlled by Ansible's verbosity level (using multiple C(-v) flags). +""" + +EXAMPLES = r""" +- name: Moving data from /dev/sdb to /dev/sdc + community.general.lvm_pv_move_data: + source: /dev/sdb + destination: /dev/sdc +""" + +RETURN = r""" +actions: + description: List of actions performed during module execution. + returned: success + type: list + elements: str + sample: [ + "moved data from /dev/sdb to /dev/sdc", + "no allocated extents to move", + "would move data from /dev/sdb to /dev/sdc" + ] +""" + + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def main(): + module = AnsibleModule( + argument_spec=dict( + source=dict(type='path', required=True), + destination=dict(type='path', required=True), + auto_answer=dict(type='bool', default=False), + atomic=dict(type='bool', default=True), + autobackup=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + + pvs_runner = CmdRunner( + module, + command="pvs", + arg_formats=dict( + noheadings=cmd_runner_fmt.as_fixed("--noheadings"), + readonly=cmd_runner_fmt.as_fixed("--readonly"), + vg_name=cmd_runner_fmt.as_fixed("-o", "vg_name"), + pv_pe_alloc_count=cmd_runner_fmt.as_fixed("-o", "pv_pe_alloc_count"), + pv_pe_count=cmd_runner_fmt.as_fixed("-o", "pv_pe_count"), + device=cmd_runner_fmt.as_list(), + ) + ) + + source = module.params['source'] + destination = module.params['destination'] + changed = False + actions = [] + result = {'changed': False} + + # Validate device existence + if not os.path.exists(source): + module.fail_json(msg="Source device %s not found" % source) + if not os.path.exists(destination): + module.fail_json(msg="Destination device %s not found" % destination) + if source == destination: + module.fail_json(msg="Source and destination devices must be different") + + def run_pvs_command(arguments, device): + with pvs_runner(arguments) as ctx: + rc, out, err = ctx.run(device=device) + if rc != 0: + module.fail_json( + msg="Command failed: %s" % err, + stdout=out, + stderr=err, + rc=rc, + cmd=ctx.cmd, + arguments=arguments, + device=device, + ) + return out.strip() + + def is_pv(device): + with pvs_runner("noheadings readonly device", check_rc=False) as ctx: + rc, out, err = ctx.run(device=device) + return rc == 0 + + if not is_pv(source): + module.fail_json(msg="Source device %s is not a PV" % source) + if not is_pv(destination): + module.fail_json(msg="Destination device %s is not a PV" % destination) + + vg_src = run_pvs_command("noheadings vg_name device", source) + vg_dest = run_pvs_command("noheadings vg_name device", destination) + if vg_src != vg_dest: + module.fail_json( + msg="Source and destination must be in the same VG. Source VG: '%s', Destination VG: '%s'." % (vg_src, vg_dest) + ) + + def get_allocated_pe(device): + try: + return int(run_pvs_command("noheadings pv_pe_alloc_count device", device)) + except ValueError: + module.fail_json(msg="Invalid allocated PE count for device %s" % device) + + allocated = get_allocated_pe(source) + if allocated == 0: + actions.append('no allocated extents to move') + else: + # Check destination has enough free space + def get_total_pe(device): + try: + return int(run_pvs_command("noheadings pv_pe_count device", device)) + except ValueError: + module.fail_json(msg="Invalid total PE count for device %s" % device) + + def get_free_pe(device): + return get_total_pe(device) - get_allocated_pe(device) + + free_pe_dest = get_free_pe(destination) + if free_pe_dest < allocated: + module.fail_json( + msg="Destination device %s has only %d free physical extents, but source device %s has %d allocated extents. Not enough space." % + (destination, free_pe_dest, source, allocated) + ) + + if module.check_mode: + changed = True + actions.append('would move data from %s to %s' % (source, destination)) + else: + pvmove_runner = CmdRunner( + module, + command="pvmove", + arg_formats=dict( + auto_answer=cmd_runner_fmt.as_bool("-y"), + atomic=cmd_runner_fmt.as_bool("--atomic"), + autobackup=cmd_runner_fmt.as_fixed("--autobackup", "y" if module.params['autobackup'] else "n"), + verbosity=cmd_runner_fmt.as_func(lambda v: ['-' + 'v' * v] if v > 0 else []), + source=cmd_runner_fmt.as_list(), + destination=cmd_runner_fmt.as_list(), + ) + ) + + verbosity = module._verbosity + with pvmove_runner("auto_answer atomic autobackup verbosity source destination") as ctx: + rc, out, err = ctx.run( + verbosity=verbosity, + source=source, + destination=destination + ) + result['stdout'] = out + result['stderr'] = err + + changed = True + actions.append('moved data from %s to %s' % (source, destination)) + + result['changed'] = changed + result['actions'] = actions + if actions: + result['msg'] = "PV data move: %s" % ", ".join(actions) + else: + result['msg'] = "No data to move from %s" % source + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lvol.py b/plugins/modules/lvol.py deleted file mode 120000 index c17caee4d9..0000000000 --- a/plugins/modules/lvol.py +++ /dev/null @@ -1 +0,0 @@ -./system/lvol.py \ No newline at end of file diff --git a/plugins/modules/lvol.py b/plugins/modules/lvol.py new file mode 100644 index 0000000000..c2b18dd936 --- /dev/null +++ b/plugins/modules/lvol.py @@ -0,0 +1,613 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Jeroen Hoekx , Alexander Bulimov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Jeroen Hoekx (@jhoekx) + - Alexander Bulimov (@abulimov) + - Raoul Baudach (@unkaputtbar112) + - Ziga Kern (@zigaSRC) +module: lvol +short_description: Configure LVM logical volumes +description: + - This module creates, removes or resizes logical volumes. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + vg: + type: str + required: true + description: + - The volume group this logical volume is part of. + lv: + type: str + description: + - The name of the logical volume. + size: + type: str + description: + - The size of the logical volume, according to lvcreate(8) C(--size), by default in megabytes or optionally with one + of [bBsSkKmMgGtTpPeE] units; or according to lvcreate(8) C(--extents) as a percentage of [VG|PVS|FREE|ORIGIN]; Float + values must begin with a digit. + - When resizing, apart from specifying an absolute size you may, according to lvextend(8)|lvreduce(8) C(--size), specify + the amount to extend the logical volume with the prefix V(+) or the amount to reduce the logical volume by with prefix + V(-). + - Resizing using V(+) or V(-) was not supported prior to community.general 3.0.0. + - Please note that when using V(+), V(-), or percentage of FREE, the module is B(not idempotent). + state: + type: str + description: + - Control if the logical volume exists. If V(present) and the volume does not already exist then the O(size) option + is required. + choices: [absent, present] + default: present + active: + description: + - Whether the volume is active and visible to the host. + type: bool + default: true + force: + description: + - Shrink or remove operations of volumes requires this switch. Ensures that filesystems never get corrupted/destroyed + by mistake. + type: bool + default: false + opts: + type: str + description: + - Free-form options to be passed to the lvcreate command. + snapshot: + type: str + description: + - The name of a snapshot volume to be configured. When creating a snapshot volume, the O(lv) parameter specifies the + origin volume. + pvs: + type: list + elements: str + description: + - List of physical volumes (for example V(/dev/sda, /dev/sdb)). + thinpool: + type: str + description: + - The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name. + shrink: + description: + - Shrink if current size is higher than size requested. + type: bool + default: true + resizefs: + description: + - Resize the underlying filesystem together with the logical volume. + - Supported for C(ext2), C(ext3), C(ext4), C(reiserfs) and C(XFS) filesystems. Attempts to resize other filesystem types + result in failure. + type: bool + default: false +notes: + - You must specify lv (when managing the state of logical volumes) or thinpool (when managing a thin provisioned volume). +""" + +EXAMPLES = r""" +- name: Create a logical volume of 512m + community.general.lvol: + vg: firefly + lv: test + size: 512 + +- name: Create a logical volume of 512m with disks /dev/sda and /dev/sdb + community.general.lvol: + vg: firefly + lv: test + size: 512 + pvs: + - /dev/sda + - /dev/sdb + +- name: Create cache pool logical volume + community.general.lvol: + vg: firefly + lv: lvcache + size: 512m + opts: --type cache-pool + +- name: Create a logical volume of 512g. + community.general.lvol: + vg: firefly + lv: test + size: 512g + +- name: Create a logical volume the size of all remaining space in the volume group + community.general.lvol: + vg: firefly + lv: test + size: 100%FREE + +- name: Create a logical volume with special options + community.general.lvol: + vg: firefly + lv: test + size: 512g + opts: -r 16 + +- name: Extend the logical volume to 1024m. + community.general.lvol: + vg: firefly + lv: test + size: 1024 + +- name: Extend the logical volume to consume all remaining space in the volume group + community.general.lvol: + vg: firefly + lv: test + size: +100%FREE + +- name: Extend the logical volume by given space + community.general.lvol: + vg: firefly + lv: test + size: +512M + +- name: Extend the logical volume to take all remaining space of the PVs and resize the underlying filesystem + community.general.lvol: + vg: firefly + lv: test + size: 100%PVS + resizefs: true + +- name: Resize the logical volume to % of VG + community.general.lvol: + vg: firefly + lv: test + size: 80%VG + force: true + +- name: Reduce the logical volume to 512m + community.general.lvol: + vg: firefly + lv: test + size: 512 + force: true + +- name: Reduce the logical volume by given space + community.general.lvol: + vg: firefly + lv: test + size: -512M + force: true + +- name: Set the logical volume to 512m and do not try to shrink if size is lower than current one + community.general.lvol: + vg: firefly + lv: test + size: 512 + shrink: false + +- name: Remove the logical volume. + community.general.lvol: + vg: firefly + lv: test + state: absent + force: true + +- name: Create a snapshot volume of the test logical volume. + community.general.lvol: + vg: firefly + lv: test + snapshot: snap1 + size: 100m + +- name: Deactivate a logical volume + community.general.lvol: + vg: firefly + lv: test + active: false + +- name: Create a deactivated logical volume + community.general.lvol: + vg: firefly + lv: test + size: 512g + active: false + +- name: Create a thin pool of 512g + community.general.lvol: + vg: firefly + thinpool: testpool + size: 512g + +- name: Create a thin volume of 128g + community.general.lvol: + vg: firefly + lv: test + thinpool: testpool + size: 128g +""" + +import re +import shlex + +from ansible.module_utils.basic import AnsibleModule + +LVOL_ENV_VARS = dict( + # make sure we use the C locale when running lvol-related commands + LANG='C', + LC_ALL='C', + LC_MESSAGES='C', + LC_CTYPE='C', +) + + +def mkversion(major, minor, patch): + return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch) + + +def parse_lvs(data): + lvs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + lvs.append({ + 'name': parts[0].replace('[', '').replace(']', ''), + 'size': float(parts[1]), + 'active': (parts[2][4] == 'a'), + 'thinpool': (parts[2][0] == 't'), + 'thinvol': (parts[2][0] == 'V'), + }) + return lvs + + +def parse_vgs(data): + vgs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + vgs.append({ + 'name': parts[0], + 'size': float(parts[1]), + 'free': float(parts[2]), + 'ext_size': float(parts[3]) + }) + return vgs + + +def get_lvm_version(module): + ver_cmd = module.get_bin_path("lvm", required=True) + rc, out, err = module.run_command([ver_cmd, "version"]) + if rc != 0: + return None + m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out) + if not m: + return None + return mkversion(m.group(1), m.group(2), m.group(3)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + vg=dict(type='str', required=True), + lv=dict(type='str'), + size=dict(type='str'), + opts=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present']), + force=dict(type='bool', default=False), + shrink=dict(type='bool', default=True), + active=dict(type='bool', default=True), + snapshot=dict(type='str'), + pvs=dict(type='list', elements='str'), + resizefs=dict(type='bool', default=False), + thinpool=dict(type='str'), + ), + supports_check_mode=True, + required_one_of=( + ['lv', 'thinpool'], + ), + ) + + module.run_command_environ_update = LVOL_ENV_VARS + + # Determine if the "--yes" option should be used + version_found = get_lvm_version(module) + if version_found is None: + module.fail_json(msg="Failed to get LVM version number") + version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option + if version_found >= version_yesopt: + yesopt = ["--yes"] + else: + yesopt = [] + + vg = module.params['vg'] + lv = module.params['lv'] + size = module.params['size'] + opts = shlex.split(module.params['opts'] or '') + state = module.params['state'] + force = module.boolean(module.params['force']) + shrink = module.boolean(module.params['shrink']) + active = module.boolean(module.params['active']) + resizefs = module.boolean(module.params['resizefs']) + thinpool = module.params['thinpool'] + size_opt = 'L' + size_unit = 'm' + size_operator = None + snapshot = module.params['snapshot'] + pvs = module.params['pvs'] or [] + + # Add --test option when running in check-mode + if module.check_mode: + test_opt = ['--test'] + else: + test_opt = [] + + if size: + # LVEXTEND(8)/LVREDUCE(8) -l, -L options: Check for relative value for resizing + if size.startswith('+'): + size_operator = '+' + size = size[1:] + elif size.startswith('-'): + size_operator = '-' + size = size[1:] + # LVCREATE(8) does not support [+-] + + # LVCREATE(8)/LVEXTEND(8)/LVREDUCE(8) -l --extents option with percentage + if '%' in size: + size_parts = size.split('%', 1) + size_percent = int(size_parts[0]) + if size_percent > 100: + module.fail_json(msg="Size percentage cannot be larger than 100%") + size_whole = size_parts[1] + if size_whole == 'ORIGIN' and snapshot is None: + module.fail_json(msg="Percentage of ORIGIN supported only for snapshot volumes") + elif size_whole not in ['VG', 'PVS', 'FREE', 'ORIGIN']: + module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE|ORIGIN") + size_opt = 'l' + size_unit = '' + + # LVCREATE(8)/LVEXTEND(8)/LVREDUCE(8) -L --size option unit + if '%' not in size: + if size[-1].lower() in 'bskmgtpe': + size_unit = size[-1] + size = size[0:-1] + + try: + float(size) + if not size[0].isdigit(): + raise ValueError() + except ValueError: + module.fail_json(msg="Bad size specification of '%s'" % size) + + # when no unit, megabytes by default + if size_opt == 'l': + unit = 'm' + else: + unit = size_unit + + # Get information on volume group requested + vgs_cmd = module.get_bin_path("vgs", required=True) + rc, current_vgs, err = module.run_command( + [vgs_cmd, "--noheadings", "--nosuffix", "-o", "vg_name,size,free,vg_extent_size", "--units", unit.lower(), "--separator", ";", vg]) + + if rc != 0: + if state == 'absent': + module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) + else: + module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) + + vgs = parse_vgs(current_vgs) + this_vg = vgs[0] + + # Get information on logical volume requested + lvs_cmd = module.get_bin_path("lvs", required=True) + rc, current_lvs, err = module.run_command( + [lvs_cmd, "-a", "--noheadings", "--nosuffix", "-o", "lv_name,size,lv_attr", "--units", unit.lower(), "--separator", ";", vg]) + + if rc != 0: + if state == 'absent': + module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) + else: + module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) + + changed = False + + lvs = parse_lvs(current_lvs) + + if snapshot: + # Check snapshot pre-conditions + for test_lv in lvs: + if test_lv['name'] == lv or test_lv['name'] == thinpool: + if not test_lv['thinpool'] and not thinpool: + break + else: + module.fail_json(msg="Snapshots of thin pool LVs are not supported.") + else: + module.fail_json(msg="Snapshot origin LV %s does not exist in volume group %s." % (lv, vg)) + check_lv = snapshot + elif thinpool: + if lv: + # Check thin volume pre-conditions + for test_lv in lvs: + if test_lv['name'] == thinpool: + break + else: + module.fail_json(msg="Thin pool LV %s does not exist in volume group %s." % (thinpool, vg)) + check_lv = lv + else: + check_lv = thinpool + else: + check_lv = lv + + for test_lv in lvs: + if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]): + this_lv = test_lv + break + else: + this_lv = None + + msg = '' + if this_lv is None: + if state == 'present': + if size_operator is not None: + if size_operator == "-" or (size_whole not in ["VG", "PVS", "FREE", "ORIGIN", None]): + module.fail_json(msg="Bad size specification of '%s%s' for creating LV" % (size_operator, size)) + # Require size argument except for snapshot of thin volumes + if (lv or thinpool) and not size: + for test_lv in lvs: + if test_lv['name'] == lv and test_lv['thinvol'] and snapshot: + break + else: + module.fail_json(msg="No size given.") + + # create LV + lvcreate_cmd = module.get_bin_path("lvcreate", required=True) + cmd = [lvcreate_cmd] + test_opt + yesopt + if snapshot is not None: + if size: + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += ["-s", "-n", snapshot] + opts + ["%s/%s" % (vg, lv)] + elif thinpool: + if lv: + if size_opt == 'l': + module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.") + size_opt = 'V' + cmd += ["-n", lv] + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += opts + ["-T", "%s/%s" % (vg, thinpool)] + else: + cmd += ["-n", lv] + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += opts + [vg] + pvs + rc, dummy, err = module.run_command(cmd) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) + else: + if state == 'absent': + # remove LV + if not force: + module.fail_json(msg="Sorry, no removal of logical volume %s without force=true." % (this_lv['name'])) + lvremove_cmd = module.get_bin_path("lvremove", required=True) + rc, dummy, err = module.run_command([lvremove_cmd] + test_opt + ["--force", "%s/%s" % (vg, this_lv['name'])]) + if rc == 0: + module.exit_json(changed=True) + else: + module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err) + + elif not size: + pass + + elif size_opt == 'l': + # Resize LV based on % value + tool = None + size_free = this_vg['free'] + if size_whole == 'VG' or size_whole == 'PVS': + size_requested = size_percent * this_vg['size'] / 100 + else: # size_whole == 'FREE': + size_requested = size_percent * this_vg['free'] / 100 + + if size_operator == '+': + size_requested += this_lv['size'] + elif size_operator == '-': + size_requested = this_lv['size'] - size_requested + + # According to latest documentation (LVM2-2.03.11) all tools round down + size_requested -= (size_requested % this_vg['ext_size']) + + if this_lv['size'] < size_requested: + if (size_free > 0) and (size_free >= (size_requested - this_lv['size'])): + tool = [module.get_bin_path("lvextend", required=True)] + else: + module.fail_json( + msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % + (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit) + ) + elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large + if size_requested < 1: + module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) + elif not force: + module.fail_json(msg="Sorry, no shrinking of %s without force=true" % (this_lv['name'])) + else: + tool = [module.get_bin_path("lvreduce", required=True), '--force'] + + if tool: + if resizefs: + tool += ['--resizefs'] + cmd = tool + test_opt + if size_operator: + cmd += ["-%s" % size_opt, "%s%s%s" % (size_operator, size, size_unit)] + else: + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += ["%s/%s" % (vg, this_lv['name'])] + pvs + rc, out, err = module.run_command(cmd) + if "Reached maximum COW size" in out: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) + elif rc == 0: + changed = True + msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit) + elif "matches existing size" in err or "matches existing size" in out: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) + elif "not larger than existing size" in err or "not larger than existing size" in out: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) + else: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) + + else: + # resize LV based on absolute values + tool = None + if float(size) > this_lv['size'] or size_operator == '+': + tool = [module.get_bin_path("lvextend", required=True)] + elif shrink and float(size) < this_lv['size'] or size_operator == '-': + if float(size) == 0: + module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) + if not force: + module.fail_json(msg="Sorry, no shrinking of %s without force=true." % (this_lv['name'])) + else: + tool = [module.get_bin_path("lvreduce", required=True), '--force'] + + if tool: + if resizefs: + tool += ['--resizefs'] + cmd = tool + test_opt + if size_operator: + cmd += ["-%s" % size_opt, "%s%s%s" % (size_operator, size, size_unit)] + else: + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += ["%s/%s" % (vg, this_lv['name'])] + pvs + rc, out, err = module.run_command(cmd) + if "Reached maximum COW size" in out: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) + elif rc == 0: + changed = True + elif "matches existing size" in err or "matches existing size" in out: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) + elif "not larger than existing size" in err or "not larger than existing size" in out: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) + else: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) + + if this_lv is not None: + if active: + lvchange_cmd = module.get_bin_path("lvchange", required=True) + rc, dummy, err = module.run_command([lvchange_cmd, "-ay", "%s/%s" % (vg, this_lv['name'])]) + if rc == 0: + module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) + else: + module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err) + else: + lvchange_cmd = module.get_bin_path("lvchange", required=True) + rc, dummy, err = module.run_command([lvchange_cmd, "-an", "%s/%s" % (vg, this_lv['name'])]) + if rc == 0: + module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) + else: + module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err) + + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lxc_container.py b/plugins/modules/lxc_container.py deleted file mode 120000 index 521ee3328d..0000000000 --- a/plugins/modules/lxc_container.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/lxc/lxc_container.py \ No newline at end of file diff --git a/plugins/modules/lxc_container.py b/plugins/modules/lxc_container.py new file mode 100644 index 0000000000..6c4ff64f9c --- /dev/null +++ b/plugins/modules/lxc_container.py @@ -0,0 +1,1728 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Kevin Carter +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: lxc_container +short_description: Manage LXC Containers +description: + - Management of LXC containers. +author: "Kevin Carter (@cloudnull)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of a container. + type: str + required: true + backing_store: + choices: + - dir + - lvm + - loop + - btrfs + - overlayfs + - zfs + description: + - Backend storage type for the container. + type: str + default: dir + template: + description: + - Name of the template to use within an LXC create. + type: str + default: ubuntu + template_options: + description: + - Template options when building the container. + type: str + config: + description: + - Path to the LXC configuration file. + type: path + lv_name: + description: + - Name of the logical volume, defaults to the container name. + - If not specified, it defaults to E(CONTAINER_NAME). + type: str + vg_name: + description: + - If backend store is lvm, specify the name of the volume group. + type: str + default: lxc + thinpool: + description: + - Use LVM thin pool called TP. + type: str + fs_type: + description: + - Create fstype TYPE. + type: str + default: ext4 + fs_size: + description: + - File system Size. + type: str + default: 5G + directory: + description: + - Place rootfs directory under DIR. + type: path + zfs_root: + description: + - Create zfs under given zfsroot. + type: str + container_command: + description: + - Run a command within a container. + type: str + lxc_path: + description: + - Place container under E(PATH). + type: path + container_log: + description: + - Enable a container log for host actions to the container. + type: bool + default: false + container_log_level: + choices: + - Info + - info + - INFO + - Error + - error + - ERROR + - Debug + - debug + - DEBUG + description: + - Set the log level for a container where O(container_log) was set. + type: str + required: false + default: INFO + clone_name: + description: + - Name of the new cloned server. + - This is only used when state is clone. + type: str + clone_snapshot: + description: + - Create a snapshot a container when cloning. + - This is not supported by all container storage backends. + - Enabling this may fail if the backing store does not support snapshots. + type: bool + default: false + archive: + description: + - Create an archive of a container. + - This creates a tarball of the running container. + type: bool + default: false + archive_path: + description: + - Path the save the archived container. + - If the path does not exist the archive method attempts to create it. + type: path + archive_compression: + choices: + - gzip + - bzip2 + - none + description: + - Type of compression to use when creating an archive of a running container. + type: str + default: gzip + state: + choices: + - started + - stopped + - restarted + - absent + - frozen + - clone + description: + - Define the state of a container. + - If you clone a container using O(clone_name) the newly cloned container created in a stopped state. + - The running container is stopped while the clone operation is happening and upon completion of the clone the original + container state is restored. + type: str + default: started + container_config: + description: + - A list of C(key=value) options to use when configuring a container. + type: list + elements: str +requirements: + - 'lxc >= 2.0 # OS package' + - 'python3 >= 3.5 # OS Package' + - 'python3-lxc # OS Package' +notes: + - Containers must have a unique name. If you attempt to create a container with a name that already exists in the users + namespace the module simply returns as "unchanged". + - The O(container_command) can be used with any state except V(absent). If used with state V(stopped) the container is V(started), + the command executed, and then the container V(stopped) again. Likewise if O(state=stopped) and the container does not + exist it is first created, V(started), the command executed, and then V(stopped). If you use a C(|) in the variable you + can use common script formatting within the variable itself. The O(container_command) option always execute as C(bash). + When using O(container_command), a log file is created in the C(/tmp/) directory which contains both C(stdout) and C(stderr) + of any command executed. + - If O(archive=true) the system attempts to create a compressed tarball of the running container. The O(archive) option + supports LVM backed containers and creates a snapshot of the running container when creating the archive. + - If your distro does not have a package for C(python3-lxc), which is a requirement for this module, it can be installed + from source at U(https://github.com/lxc/python3-lxc) or installed using C(pip install lxc). +""" + +EXAMPLES = r""" +- name: Create a started container + community.general.lxc_container: + name: test-container-started + container_log: true + template: ubuntu + state: started + template_options: --release trusty + +- name: Create a stopped container + community.general.lxc_container: + name: test-container-stopped + container_log: true + template: ubuntu + state: stopped + template_options: --release trusty + +- name: Create a frozen container + community.general.lxc_container: + name: test-container-frozen + container_log: true + template: ubuntu + state: frozen + template_options: --release trusty + container_command: | + echo 'hello world.' | tee /opt/started-frozen + +# Create filesystem container, configure it, and archive it, and start it. +- name: Create filesystem container + community.general.lxc_container: + name: test-container-config + backing_store: dir + container_log: true + template: ubuntu + state: started + archive: true + archive_compression: none + container_config: + - "lxc.aa_profile=unconfined" + - "lxc.cgroup.devices.allow=a *:* rmw" + template_options: --release trusty + +# Create an lvm container, run a complex command in it, add additional +# configuration to it, create an archive of it, and finally leave the container +# in a frozen state. The container archive will be compressed using bzip2 +- name: Create a frozen lvm container + community.general.lxc_container: + name: test-container-lvm + container_log: true + template: ubuntu + state: frozen + backing_store: lvm + template_options: --release trusty + container_command: | + apt-get update + apt-get install -y vim lxc-dev + echo 'hello world.' | tee /opt/started + if [[ -f "/opt/started" ]]; then + echo 'hello world.' | tee /opt/found-started + fi + container_config: + - "lxc.aa_profile=unconfined" + - "lxc.cgroup.devices.allow=a *:* rmw" + archive: true + archive_compression: bzip2 + register: lvm_container_info + +- name: Debug info on container "test-container-lvm" + ansible.builtin.debug: + var: lvm_container_info + +- name: Run a command in a container and ensure it is in a "stopped" state. + community.general.lxc_container: + name: test-container-started + state: stopped + container_command: | + echo 'hello world.' | tee /opt/stopped + +- name: Run a command in a container and ensure it is in a "frozen" state. + community.general.lxc_container: + name: test-container-stopped + state: frozen + container_command: | + echo 'hello world.' | tee /opt/frozen + +- name: Start a container + community.general.lxc_container: + name: test-container-stopped + state: started + +- name: Run a command in a container and then restart it + community.general.lxc_container: + name: test-container-started + state: restarted + container_command: | + echo 'hello world.' | tee /opt/restarted + +- name: Run a complex command within a "running" container + community.general.lxc_container: + name: test-container-started + container_command: | + apt-get update + apt-get install -y curl wget vim apache2 + echo 'hello world.' | tee /opt/started + if [[ -f "/opt/started" ]]; then + echo 'hello world.' | tee /opt/found-started + fi + +# Create an archive of an existing container, save the archive to a defined +# path and then destroy it. +- name: Archive container + community.general.lxc_container: + name: test-container-started + state: absent + archive: true + archive_path: /opt/archives + +# Create a container using overlayfs, create an archive of it, create a +# snapshot clone of the container and and finally leave the container +# in a frozen state. The container archive will be compressed using gzip. +- name: Create an overlayfs container archive and clone it + community.general.lxc_container: + name: test-container-overlayfs + container_log: true + template: ubuntu + state: started + backing_store: overlayfs + template_options: --release trusty + clone_snapshot: true + clone_name: test-container-overlayfs-clone-snapshot + archive: true + archive_compression: gzip + register: clone_container_info + +- name: Debug info on container "test-container" + ansible.builtin.debug: + var: clone_container_info + +- name: Clone a container using snapshot + community.general.lxc_container: + name: test-container-overlayfs-clone-snapshot + backing_store: overlayfs + clone_name: test-container-overlayfs-clone-snapshot2 + clone_snapshot: true + +- name: Create a new container and clone it + community.general.lxc_container: + name: test-container-new-archive + backing_store: dir + clone_name: test-container-new-archive-clone + +- name: Archive and clone a container then destroy it + community.general.lxc_container: + name: test-container-new-archive + state: absent + clone_name: test-container-new-archive-destroyed-clone + archive: true + archive_compression: gzip + +- name: Start a cloned container. + community.general.lxc_container: + name: test-container-new-archive-destroyed-clone + state: started + +- name: Destroy a container + community.general.lxc_container: + name: '{{ item }}' + state: absent + with_items: + - test-container-stopped + - test-container-started + - test-container-frozen + - test-container-lvm + - test-container-config + - test-container-overlayfs + - test-container-overlayfs-clone + - test-container-overlayfs-clone-snapshot + - test-container-overlayfs-clone-snapshot2 + - test-container-new-archive + - test-container-new-archive-clone + - test-container-new-archive-destroyed-clone +""" + +RETURN = r""" +lxc_container: + description: Container information. + returned: success + type: complex + contains: + name: + description: Name of the LXC container. + returned: success + type: str + sample: test_host + init_pid: + description: Pid of the LXC init process. + returned: success + type: int + sample: 19786 + interfaces: + description: List of the container's network interfaces. + returned: success + type: list + sample: ["eth0", "lo"] + ips: + description: List of IPs. + returned: success + type: list + sample: ["10.0.3.3"] + state: + description: Resulting state of the container. + returned: success + type: str + sample: "running" + archive: + description: Resulting state of the container. + returned: success, when archive is true + type: str + sample: "/tmp/test-container-config.tar" + clone: + description: If the container was cloned. + returned: success, when clone_name is specified + type: bool + sample: true +""" + +import os +import os.path +import re +import shutil +import subprocess +import tempfile +import time +import shlex + +try: + import lxc +except ImportError: + HAS_LXC = False +else: + HAS_LXC = True + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE +from ansible.module_utils.common.text.converters import to_text, to_bytes + + +# LXC_COMPRESSION_MAP is a map of available compression types when creating +# an archive of a container. +LXC_COMPRESSION_MAP = { + 'gzip': { + 'extension': 'tar.tgz', + 'argument': '-czf' + }, + 'bzip2': { + 'extension': 'tar.bz2', + 'argument': '-cjf' + }, + 'none': { + 'extension': 'tar', + 'argument': '-cf' + } +} + + +# LXC_COMMAND_MAP is a map of variables that are available to a method based +# on the state the container is in. +LXC_COMMAND_MAP = { + 'create': { + 'variables': { + 'config': '--config', + 'template': '--template', + 'backing_store': '--bdev', + 'lxc_path': '--lxcpath', + 'lv_name': '--lvname', + 'vg_name': '--vgname', + 'thinpool': '--thinpool', + 'fs_type': '--fstype', + 'fs_size': '--fssize', + 'directory': '--dir', + 'zfs_root': '--zfsroot' + } + }, + 'clone': { + 'variables-lxc-copy': { + 'backing_store': '--backingstorage', + 'lxc_path': '--lxcpath', + 'fs_size': '--fssize', + 'name': '--name', + 'clone_name': '--newname' + }, + # lxc-clone is deprecated in favor of lxc-copy + 'variables-lxc-clone': { + 'backing_store': '--backingstore', + 'lxc_path': '--lxcpath', + 'fs_size': '--fssize', + 'name': '--orig', + 'clone_name': '--new' + } + } +} + + +# LXC_BACKING_STORE is a map of available storage backends and options that +# are incompatible with the given storage backend. +LXC_BACKING_STORE = { + 'dir': [ + 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool' + ], + 'lvm': [ + 'zfs_root' + ], + 'btrfs': [ + 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size' + ], + 'loop': [ + 'lv_name', 'vg_name', 'thinpool', 'zfs_root' + ], + 'overlayfs': [ + 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root' + ], + 'zfs': [ + 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool' + ] +} + + +# LXC_LOGGING_LEVELS is a map of available log levels +LXC_LOGGING_LEVELS = { + 'INFO': ['info', 'INFO', 'Info'], + 'ERROR': ['error', 'ERROR', 'Error'], + 'DEBUG': ['debug', 'DEBUG', 'Debug'] +} + + +# LXC_ANSIBLE_STATES is a map of states that contain values of methods used +# when a particular state is evoked. +LXC_ANSIBLE_STATES = { + 'started': '_started', + 'stopped': '_stopped', + 'restarted': '_restarted', + 'absent': '_destroyed', + 'frozen': '_frozen', + 'clone': '_clone' +} + + +# This is used to attach to a running container and execute commands from +# within the container on the host. This will provide local access to a +# container without using SSH. The template will attempt to work within the +# home directory of the user that was attached to the container and source +# that users environment variables by default. +ATTACH_TEMPLATE = """#!/usr/bin/env bash +pushd "$(getent passwd $(whoami)|cut -f6 -d':')" + if [[ -f ".bashrc" ]];then + source .bashrc + unset HOSTNAME + fi +popd + +# User defined command +%(container_command)s +""" + + +def create_script(command): + """Write out a script onto a target. + + This method should be backward compatible with Python when executing + from within the container. + + :param command: command to run, this can be a script and can use spacing + with newlines as separation. + :type command: ``str`` + """ + + (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script') + f = os.fdopen(fd, 'wb') + try: + f.write(to_bytes(ATTACH_TEMPLATE % {'container_command': command}, errors='surrogate_or_strict')) + f.flush() + finally: + f.close() + + # Ensure the script is executable. + os.chmod(script_file, int('0700', 8)) + + # Output log file. + stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab') + + # Error log file. + stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab') + + # Execute the script command. + try: + subprocess.Popen( + [script_file], + stdout=stdout_file, + stderr=stderr_file + ).communicate() + finally: + # Close the log files. + stderr_file.close() + stdout_file.close() + + # Remove the script file upon completion of execution. + os.remove(script_file) + + +class LxcContainerManagement(object): + def __init__(self, module): + """Management of LXC containers via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.state = self.module.params['state'] + self.state_change = False + self.lxc_vg = None + self.lxc_path = self.module.params['lxc_path'] + self.container_name = self.module.params['name'] + self.container = self.get_container_bind() + self.archive_info = None + self.clone_info = None + + def get_container_bind(self): + return lxc.Container(name=self.container_name) + + @staticmethod + def _roundup(num): + """Return a rounded floating point number. + + :param num: Number to round up. + :type: ``float`` + :returns: Rounded up number. + :rtype: ``int`` + """ + num, part = str(num).split('.') + num = int(num) + if int(part) != 0: + num += 1 + return num + + @staticmethod + def _container_exists(container_name, lxc_path=None): + """Check if a container exists. + + :param container_name: Name of the container. + :type: ``str`` + :returns: True or False if the container is found. + :rtype: ``bol`` + """ + return any(c == container_name for c in lxc.list_containers(config_path=lxc_path)) + + @staticmethod + def _add_variables(variables_dict, build_command): + """Return a command list with all found options. + + :param variables_dict: Pre-parsed optional variables used from a + seed command. + :type variables_dict: ``dict`` + :param build_command: Command to run. + :type build_command: ``list`` + :returns: list of command options. + :rtype: ``list`` + """ + + for key, value in variables_dict.items(): + build_command.append(str(key)) + build_command.append(str(value)) + return build_command + + def _get_vars(self, variables): + """Return a dict of all variables as found within the module. + + :param variables: Hash of all variables to find. + :type variables: ``dict`` + """ + + # Remove incompatible storage backend options. + variables = variables.copy() + for v in LXC_BACKING_STORE[self.module.params['backing_store']]: + variables.pop(v, None) + + false_values = BOOLEANS_FALSE.union([None, '']) + result = { + v: self.module.params[k] + for k, v in variables.items() + if self.module.params[k] not in false_values + } + return result + + def _config(self): + """Configure an LXC container. + + Write new configuration values to the lxc config file. This will + stop the container if it is running write the new options and then + restart the container upon completion. + """ + + _container_config = self.module.params['container_config'] + if not _container_config: + return False + + container_config_file = self.container.config_file_name + with open(container_config_file, 'rb') as f: + container_config = to_text(f.read(), errors='surrogate_or_strict').splitlines(True) + + parsed_options = [i.split('=', 1) for i in _container_config] + config_change = False + for key, value in parsed_options: + key = key.strip() + value = value.strip() + new_entry = '%s = %s\n' % (key, value) + keyre = re.compile(r'%s(\s+)?=' % key) + for option_line in container_config: + # Look for key in config + if keyre.match(option_line): + dummy, _value = option_line.split('=', 1) + config_value = ' '.join(_value.split()) + line_index = container_config.index(option_line) + # If the sanitized values don't match replace them + if value != config_value: + line_index += 1 + if new_entry not in container_config: + config_change = True + container_config.insert(line_index, new_entry) + # Break the flow as values are written or not at this point + break + else: + config_change = True + container_config.append(new_entry) + + # If the config changed restart the container. + if config_change: + container_state = self._get_state() + if container_state != 'stopped': + self.container.stop() + + with open(container_config_file, 'wb') as f: + f.writelines([to_bytes(line, errors='surrogate_or_strict') for line in container_config]) + + self.state_change = True + if container_state == 'running': + self._container_startup() + elif container_state == 'frozen': + self._container_startup() + self.container.freeze() + + def _container_create_clone(self): + """Clone a new LXC container from an existing container. + + This method will clone an existing container to a new container using + the `clone_name` variable as the new container name. The method will + create a container if the container `name` does not exist. + + Note that cloning a container will ensure that the original container + is "stopped" before the clone can be done. Because this operation can + require a state change the method will return the original container + to its prior state upon completion of the clone. + + Once the clone is complete the new container will be left in a stopped + state. + """ + + # Ensure that the state of the original container is stopped + container_state = self._get_state() + if container_state != 'stopped': + self.state_change = True + self.container.stop() + + # lxc-clone is deprecated in favor of lxc-copy + clone_vars = 'variables-lxc-copy' + clone_cmd = self.module.get_bin_path('lxc-copy') + if not clone_cmd: + clone_vars = 'variables-lxc-clone' + clone_cmd = self.module.get_bin_path('lxc-clone', True) + + build_command = [ + clone_cmd, + ] + + build_command = self._add_variables( + variables_dict=self._get_vars( + variables=LXC_COMMAND_MAP['clone'][clone_vars] + ), + build_command=build_command + ) + + # Load logging for the instance when creating it. + if self.module.params['clone_snapshot']: + build_command.append('--snapshot') + # Check for backing_store == overlayfs if so force the use of snapshot + # If overlay fs is used and snapshot is unset the clone command will + # fail with an unsupported type. + elif self.module.params['backing_store'] == 'overlayfs': + build_command.append('--snapshot') + + rc, return_data, err = self.module.run_command(build_command) + if rc != 0: + message = "Failed executing %s." % os.path.basename(clone_cmd) + self.failure( + err=err, rc=rc, msg=message, command=' '.join( + build_command + ) + ) + else: + self.state_change = True + # Restore the original state of the origin container if it was + # not in a stopped state. + if container_state == 'running': + self.container.start() + elif container_state == 'frozen': + self.container.start() + self.container.freeze() + + return True + + def _create(self): + """Create a new LXC container. + + This method will build and execute a shell command to build the + container. It would have been nice to simply use the lxc python library + however at the time this was written the python library, in both py2 + and py3 didn't support some of the more advanced container create + processes. These missing processes mainly revolve around backing + LXC containers with block devices. + """ + + build_command = [ + self.module.get_bin_path('lxc-create', True), + '--name', self.container_name, + '--quiet' + ] + + build_command = self._add_variables( + variables_dict=self._get_vars( + variables=LXC_COMMAND_MAP['create']['variables'] + ), + build_command=build_command + ) + + # Load logging for the instance when creating it. + if self.module.params['container_log']: + # Set the logging path to the /var/log/lxc if uid is root. else + # set it to the home folder of the user executing. + try: + if os.getuid() != 0: + log_path = os.getenv('HOME') + else: + if not os.path.isdir('/var/log/lxc/'): + os.makedirs('/var/log/lxc/') + log_path = '/var/log/lxc/' + except OSError: + log_path = os.getenv('HOME') + + build_command.extend([ + '--logfile', + os.path.join( + log_path, 'lxc-%s.log' % self.container_name + ), + '--logpriority', + self.module.params.get( + 'container_log_level' + ).upper() + ]) + + # Add the template commands to the end of the command if there are any + template_options = self.module.params['template_options'] + if template_options: + build_command.append('--') + build_command += shlex.split(template_options) + + rc, return_data, err = self.module.run_command(build_command) + if rc != 0: + message = "Failed executing lxc-create." + self.failure( + err=err, rc=rc, msg=message, command=' '.join(build_command) + ) + else: + self.state_change = True + + def _container_data(self): + """Returns a dict of container information. + + :returns: container data + :rtype: ``dict`` + """ + + return { + 'interfaces': self.container.get_interfaces(), + 'ips': self.container.get_ips(), + 'state': self._get_state(), + 'init_pid': int(self.container.init_pid), + 'name': self.container_name, + } + + def _unfreeze(self): + """Unfreeze a container. + + :returns: True or False based on if the container was unfrozen. + :rtype: ``bol`` + """ + + unfreeze = self.container.unfreeze() + if unfreeze: + self.state_change = True + return unfreeze + + def _get_state(self): + """Return the state of a container. + + If the container is not found the state returned is "absent" + + :returns: state of a container as a lower case string. + :rtype: ``str`` + """ + + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + return str(self.container.state).lower() + return str('absent') + + def _execute_command(self): + """Execute a shell command.""" + + container_command = self.module.params['container_command'] + if container_command: + container_state = self._get_state() + if container_state == 'frozen': + self._unfreeze() + elif container_state == 'stopped': + self._container_startup() + + self.container.attach_wait(create_script, container_command) + self.state_change = True + + def _container_startup(self, timeout=60): + """Ensure a container is started. + + :param timeout: Time before the destroy operation is abandoned. + :type timeout: ``int`` + """ + + self.container = self.get_container_bind() + for dummy in range(timeout): + if self._get_state() == 'running': + return True + + self.container.start() + self.state_change = True + # post startup sleep for 1 second. + time.sleep(1) + self.failure( + lxc_container=self._container_data(), + error='Failed to start container [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to start. Check to lxc is' + ' available and that the container is in a functional' + ' state.' % self.container_name + ) + + def _check_archive(self): + """Create a compressed archive of a container. + + This will store archive_info in as self.archive_info + """ + + if self.module.params['archive']: + self.archive_info = { + 'archive': self._container_create_tar() + } + + def _check_clone(self): + """Create a compressed archive of a container. + + This will store archive_info in as self.archive_info + """ + + clone_name = self.module.params['clone_name'] + if clone_name: + if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path): + self.clone_info = { + 'cloned': self._container_create_clone() + } + else: + self.clone_info = { + 'cloned': False + } + + def _destroyed(self, timeout=60): + """Ensure a container is destroyed. + + :param timeout: Time before the destroy operation is abandoned. + :type timeout: ``int`` + """ + + for dummy in range(timeout): + if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + break + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + + if self._get_state() != 'stopped': + self.state_change = True + self.container.stop() + + if self.container.destroy(): + self.state_change = True + + # post destroy attempt sleep for 1 second. + time.sleep(1) + else: + self.failure( + lxc_container=self._container_data(), + error='Failed to destroy container' + ' [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to be destroyed. Check' + ' that lxc is available and that the container is in a' + ' functional state.' % self.container_name + ) + + def _frozen(self, count=0): + """Ensure a container is frozen. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='frozen') + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + self._execute_command() + + # Perform any configuration updates + self._config() + + container_state = self._get_state() + if container_state == 'frozen': + pass + elif container_state == 'running': + self.container.freeze() + self.state_change = True + else: + self._container_startup() + self.container.freeze() + self.state_change = True + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + else: + self._create() + count += 1 + self._frozen(count) + + def _restarted(self, count=0): + """Ensure a container is restarted. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='restart') + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + self._execute_command() + + # Perform any configuration updates + self._config() + + if self._get_state() != 'stopped': + self.container.stop() + self.state_change = True + + # Run container startup + self._container_startup() + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + else: + self._create() + count += 1 + self._restarted(count) + + def _stopped(self, count=0): + """Ensure a container is stopped. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='stop') + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + self._execute_command() + + # Perform any configuration updates + self._config() + + if self._get_state() != 'stopped': + self.container.stop() + self.state_change = True + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + else: + self._create() + count += 1 + self._stopped(count) + + def _started(self, count=0): + """Ensure a container is started. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='start') + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + container_state = self._get_state() + if container_state == 'running': + pass + elif container_state == 'frozen': + self._unfreeze() + elif not self._container_startup(): + self.failure( + lxc_container=self._container_data(), + error='Failed to start container' + ' [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to start. Check to lxc is' + ' available and that the container is in a functional' + ' state.' % self.container_name + ) + + # Return data + self._execute_command() + + # Perform any configuration updates + self._config() + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + else: + self._create() + count += 1 + self._started(count) + + def _get_lxc_vg(self): + """Return the name of the Volume Group used in LXC.""" + + build_command = [ + self.module.get_bin_path('lxc-config', True), + "lxc.bdev.lvm.vg" + ] + rc, vg, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to read LVM VG from LXC config', + command=' '.join(build_command) + ) + else: + return str(vg.strip()) + + def _lvm_lv_list(self): + """Return a list of all lv in a current vg.""" + + vg = self._get_lxc_vg() + build_command = [ + self.module.get_bin_path('lvs', True) + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to get list of LVs', + command=' '.join(build_command) + ) + + all_lvms = [i.split() for i in stdout.splitlines()][1:] + return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg] + + def _get_vg_free_pe(self, vg_name): + """Return the available size of a given VG. + + :param vg_name: Name of volume. + :type vg_name: ``str`` + :returns: size and measurement of an LV + :type: ``tuple`` + """ + + build_command = [ + 'vgdisplay', + vg_name, + '--units', + 'g' + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to read vg %s' % vg_name, + command=' '.join(build_command) + ) + + vg_info = [i.strip() for i in stdout.splitlines()][1:] + free_pe = [i for i in vg_info if i.startswith('Free')] + _free_pe = free_pe[0].split() + return float(_free_pe[-2]), _free_pe[-1] + + def _get_lv_size(self, lv_name): + """Return the available size of a given LV. + + :param lv_name: Name of volume. + :type lv_name: ``str`` + :returns: size and measurement of an LV + :type: ``tuple`` + """ + + vg = self._get_lxc_vg() + lv = os.path.join(vg, lv_name) + build_command = [ + 'lvdisplay', + lv, + '--units', + 'g' + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to read lv %s' % lv, + command=' '.join(build_command) + ) + + lv_info = [i.strip() for i in stdout.splitlines()][1:] + _free_pe = [i for i in lv_info if i.startswith('LV Size')] + free_pe = _free_pe[0].split() + return self._roundup(float(free_pe[-2])), free_pe[-1] + + def _lvm_snapshot_create(self, source_lv, snapshot_name, + snapshot_size_gb=5): + """Create an LVM snapshot. + + :param source_lv: Name of lv to snapshot + :type source_lv: ``str`` + :param snapshot_name: Name of lv snapshot + :type snapshot_name: ``str`` + :param snapshot_size_gb: Size of snapshot to create + :type snapshot_size_gb: ``int`` + """ + + vg = self._get_lxc_vg() + free_space, measurement = self._get_vg_free_pe(vg_name=vg) + + if free_space < float(snapshot_size_gb): + message = ( + 'Snapshot size [ %s ] is > greater than [ %s ] on volume group' + ' [ %s ]' % (snapshot_size_gb, free_space, vg) + ) + self.failure( + error='Not enough space to create snapshot', + rc=2, + msg=message + ) + + # Create LVM Snapshot + build_command = [ + self.module.get_bin_path('lvcreate', True), + "-n", + snapshot_name, + "-s", + os.path.join(vg, source_lv), + "-L%sg" % snapshot_size_gb + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to Create LVM snapshot %s/%s --> %s' + % (vg, source_lv, snapshot_name) + ) + + def _lvm_lv_mount(self, lv_name, mount_point): + """mount an lv. + + :param lv_name: name of the logical volume to mount + :type lv_name: ``str`` + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ + + vg = self._get_lxc_vg() + + build_command = [ + self.module.get_bin_path('mount', True), + "/dev/%s/%s" % (vg, lv_name), + mount_point, + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to mountlvm lv %s/%s to %s' + % (vg, lv_name, mount_point) + ) + + def _create_tar(self, source_dir): + """Create an archive of a given ``source_dir`` to ``output_path``. + + :param source_dir: Path to the directory to be archived. + :type source_dir: ``str`` + """ + + old_umask = os.umask(int('0077', 8)) + + archive_path = self.module.params['archive_path'] + if not os.path.isdir(archive_path): + os.makedirs(archive_path) + + archive_compression = self.module.params['archive_compression'] + compression_type = LXC_COMPRESSION_MAP[archive_compression] + + # remove trailing / if present. + archive_name = '%s.%s' % ( + os.path.join( + archive_path, + self.container_name + ), + compression_type['extension'] + ) + + build_command = [ + self.module.get_bin_path('tar', True), + '--directory=%s' % os.path.realpath(source_dir), + compression_type['argument'], + archive_name, + '.' + ] + + rc, stdout, err = self.module.run_command( + build_command + ) + + os.umask(old_umask) + + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to create tar archive', + command=' '.join(build_command) + ) + + return archive_name + + def _lvm_lv_remove(self, lv_name): + """Remove an LV. + + :param lv_name: The name of the logical volume + :type lv_name: ``str`` + """ + + vg = self._get_lxc_vg() + build_command = [ + self.module.get_bin_path('lvremove', True), + "-f", + "%s/%s" % (vg, lv_name), + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to remove LVM LV %s/%s' % (vg, lv_name), + command=' '.join(build_command) + ) + + def _rsync_data(self, container_path, temp_dir): + """Sync the container directory to the temp directory. + + :param container_path: path to the container container + :type container_path: ``str`` + :param temp_dir: path to the temporary local working directory + :type temp_dir: ``str`` + """ + # This loop is created to support overlayfs archives. This should + # squash all of the layers into a single archive. + fs_paths = container_path.split(':') + if 'overlayfs' in fs_paths: + fs_paths.pop(fs_paths.index('overlayfs')) + + for fs_path in fs_paths: + # Set the path to the container data + fs_path = os.path.dirname(fs_path) + + # Run the sync command + build_command = [ + self.module.get_bin_path('rsync', True), + '-aHAX', + fs_path, + temp_dir, + ] + rc, stdout, err = self.module.run_command( + build_command, + ) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to perform archive', + command=' '.join(build_command) + ) + + def _unmount(self, mount_point): + """Unmount a file system. + + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ + + build_command = [ + self.module.get_bin_path('umount', True), + mount_point, + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to unmount [ %s ]' % mount_point, + command=' '.join(build_command) + ) + + def _overlayfs_mount(self, lowerdir, upperdir, mount_point): + """mount an lv. + + :param lowerdir: name/path of the lower directory + :type lowerdir: ``str`` + :param upperdir: name/path of the upper directory + :type upperdir: ``str`` + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ + + build_command = [ + self.module.get_bin_path('mount', True), + '-t', 'overlayfs', + '-o', 'lowerdir=%s,upperdir=%s' % (lowerdir, upperdir), + 'overlayfs', + mount_point, + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to mount overlayfs:%s:%s to %s -- Command: %s' + % (lowerdir, upperdir, mount_point, build_command) + ) + + def _container_create_tar(self): + """Create a tar archive from an LXC container. + + The process is as follows: + * Stop or Freeze the container + * Create temporary dir + * Copy container and config to temporary directory + * If LVM backed: + * Create LVM snapshot of LV backing the container + * Mount the snapshot to tmpdir/rootfs + * Restore the state of the container + * Create tar of tmpdir + * Clean up + """ + + # Create a temp dir + temp_dir = tempfile.mkdtemp() + + # Set the name of the working dir, temp + container_name + work_dir = os.path.join(temp_dir, self.container_name) + + # LXC container rootfs + lxc_rootfs = self.container.get_config_item('lxc.rootfs') + + # Test if the containers rootfs is a block device + block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev')) + + # Test if the container is using overlayfs + overlayfs_backed = lxc_rootfs.startswith('overlayfs') + + mount_point = os.path.join(work_dir, 'rootfs') + + # Set the snapshot name if needed + snapshot_name = '%s_lxc_snapshot' % self.container_name + + container_state = self._get_state() + try: + # Ensure the original container is stopped or frozen + if container_state not in ['stopped', 'frozen']: + if container_state == 'running': + self.container.freeze() + else: + self.container.stop() + + # Sync the container data from the container_path to work_dir + self._rsync_data(lxc_rootfs, temp_dir) + + if block_backed: + if snapshot_name not in self._lvm_lv_list(): + if not os.path.exists(mount_point): + os.makedirs(mount_point) + + # Take snapshot + size, measurement = self._get_lv_size( + lv_name=self.container_name + ) + self._lvm_snapshot_create( + source_lv=self.container_name, + snapshot_name=snapshot_name, + snapshot_size_gb=size + ) + + # Mount snapshot + self._lvm_lv_mount( + lv_name=snapshot_name, + mount_point=mount_point + ) + else: + self.failure( + err='snapshot [ %s ] already exists' % snapshot_name, + rc=1, + msg='The snapshot [ %s ] already exists. Please clean' + ' up old snapshot of containers before continuing.' + % snapshot_name + ) + elif overlayfs_backed: + lowerdir, upperdir = lxc_rootfs.split(':')[1:] + self._overlayfs_mount( + lowerdir=lowerdir, + upperdir=upperdir, + mount_point=mount_point + ) + + # Set the state as changed and set a new fact + self.state_change = True + return self._create_tar(source_dir=work_dir) + finally: + if block_backed or overlayfs_backed: + # unmount snapshot + self._unmount(mount_point) + + if block_backed: + # Remove snapshot + self._lvm_lv_remove(snapshot_name) + + # Restore original state of container + if container_state == 'running': + if self._get_state() == 'frozen': + self.container.unfreeze() + else: + self.container.start() + + # Remove tmpdir + shutil.rmtree(temp_dir) + + def check_count(self, count, method): + if count > 1: + self.failure( + error='Failed to %s container' % method, + rc=1, + msg='The container [ %s ] failed to %s. Check to lxc is' + ' available and that the container is in a functional' + ' state.' % (self.container_name, method) + ) + + def failure(self, **kwargs): + """Return a Failure when running an Ansible command. + + :param error: ``str`` Error that occurred. + :param rc: ``int`` Return code while executing an Ansible command. + :param msg: ``str`` Message to report. + """ + + self.module.fail_json(**kwargs) + + def run(self): + """Run the main method.""" + + action = getattr(self, LXC_ANSIBLE_STATES[self.state]) + action() + + outcome = self._container_data() + if self.archive_info: + outcome.update(self.archive_info) + + if self.clone_info: + outcome.update(self.clone_info) + + self.module.exit_json( + changed=self.state_change, + lxc_container=outcome + ) + + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True + ), + template=dict( + type='str', + default='ubuntu' + ), + backing_store=dict( + type='str', + choices=list(LXC_BACKING_STORE.keys()), + default='dir' + ), + template_options=dict( + type='str' + ), + config=dict( + type='path', + ), + vg_name=dict( + type='str', + default='lxc' + ), + thinpool=dict( + type='str' + ), + fs_type=dict( + type='str', + default='ext4' + ), + fs_size=dict( + type='str', + default='5G' + ), + directory=dict( + type='path' + ), + zfs_root=dict( + type='str' + ), + lv_name=dict( + type='str' + ), + lxc_path=dict( + type='path' + ), + state=dict( + choices=list(LXC_ANSIBLE_STATES.keys()), + default='started' + ), + container_command=dict( + type='str' + ), + container_config=dict( + type='list', + elements='str' + ), + container_log=dict( + type='bool', + default=False + ), + container_log_level=dict( + choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i], + default='INFO' + ), + clone_name=dict( + type='str', + ), + clone_snapshot=dict( + type='bool', + default='false' + ), + archive=dict( + type='bool', + default=False + ), + archive_path=dict( + type='path', + ), + archive_compression=dict( + choices=list(LXC_COMPRESSION_MAP.keys()), + default='gzip' + ) + ), + supports_check_mode=False, + required_if=([ + ('archive', True, ['archive_path']) + ]), + ) + + if not HAS_LXC: + module.fail_json( + msg='The `lxc` module is not importable. Check the requirements.' + ) + + if not module.params['lv_name']: + module.params['lv_name'] = module.params['name'] + + lxc_manage = LxcContainerManagement(module=module) + lxc_manage.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lxca_cmms.py b/plugins/modules/lxca_cmms.py deleted file mode 120000 index 4c5de878ee..0000000000 --- a/plugins/modules/lxca_cmms.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/lxca/lxca_cmms.py \ No newline at end of file diff --git a/plugins/modules/lxca_cmms.py b/plugins/modules/lxca_cmms.py new file mode 100644 index 0000000000..9078cd272a --- /dev/null +++ b/plugins/modules/lxca_cmms.py @@ -0,0 +1,173 @@ +#!/usr/bin/python +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# +from __future__ import annotations + + +DOCUMENTATION = r""" +author: + - Naval Patel (@navalkp) + - Prashant Bhosale (@prabhosa) +module: lxca_cmms +short_description: Custom module for lxca cmms inventory utility +description: + - This module returns/displays a inventory details of cmms. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + uuid: + description: UUID of device, this is string with length greater than 16. + type: str + + command_options: + description: Options to filter nodes information. + default: cmms + choices: + - cmms + - cmms_by_uuid + - cmms_by_chassis_uuid + type: str + + chassis: + description: UUID of chassis, this is string with length greater than 16. + type: str + +extends_documentation_fragment: + - community.general.lxca_common + - community.general.attributes +""" + +EXAMPLES = r""" +# get all cmms info +- name: Get nodes data from LXCA + community.general.lxca_cmms: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + +# get specific cmms info by uuid +- name: Get nodes data from LXCA + community.general.lxca_cmms: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + uuid: "3C737AA5E31640CE949B10C129A8B01F" + command_options: cmms_by_uuid + +# get specific cmms info by chassis uuid +- name: Get nodes data from LXCA + community.general.lxca_cmms: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + chassis: "3C737AA5E31640CE949B10C129A8B01F" + command_options: cmms_by_chassis_uuid +""" + +RETURN = r""" +result: + description: Cmms detail from lxca. + returned: success + type: dict + sample: + cmmList: + - machineType: '' + model: '' + type: 'CMM' + uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' + # bunch of properties + - machineType: '' + model: '' + type: 'CMM' + uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' + # bunch of properties + # Multiple cmms details +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object +try: + from pylxca import cmms +except ImportError: + pass + + +UUID_REQUIRED = 'UUID of device is required for cmms_by_uuid command.' +CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for cmms_by_chassis_uuid command.' +SUCCESS_MSG = "Success %s result" + + +def _cmms(module, lxca_con): + return cmms(lxca_con) + + +def _cmms_by_uuid(module, lxca_con): + if not module.params['uuid']: + module.fail_json(msg=UUID_REQUIRED) + return cmms(lxca_con, module.params['uuid']) + + +def _cmms_by_chassis_uuid(module, lxca_con): + if not module.params['chassis']: + module.fail_json(msg=CHASSIS_UUID_REQUIRED) + return cmms(lxca_con, chassis=module.params['chassis']) + + +def setup_module_object(): + """ + this function merge argument spec and create ansible module object + :return: + """ + args_spec = dict(LXCA_COMMON_ARGS) + args_spec.update(INPUT_ARG_SPEC) + module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False) + + return module + + +FUNC_DICT = { + 'cmms': _cmms, + 'cmms_by_uuid': _cmms_by_uuid, + 'cmms_by_chassis_uuid': _cmms_by_chassis_uuid, +} + + +INPUT_ARG_SPEC = dict( + command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid', + 'cmms_by_chassis_uuid']), + uuid=dict(), + chassis=dict() +) + + +def execute_module(module): + """ + This function invoke commands + :param module: Ansible module object + """ + try: + with connection_object(module) as lxca_con: + result = FUNC_DICT[module.params['command_options']](module, lxca_con) + module.exit_json(changed=False, + msg=SUCCESS_MSG % module.params['command_options'], + result=result) + except Exception as exception: + error_msg = '; '.join((e) for e in exception.args) + module.fail_json(msg=error_msg, exception=traceback.format_exc()) + + +def main(): + module = setup_module_object() + has_pylxca(module) + execute_module(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lxca_nodes.py b/plugins/modules/lxca_nodes.py deleted file mode 120000 index 3a0d51e850..0000000000 --- a/plugins/modules/lxca_nodes.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/lxca/lxca_nodes.py \ No newline at end of file diff --git a/plugins/modules/lxca_nodes.py b/plugins/modules/lxca_nodes.py new file mode 100644 index 0000000000..010f189629 --- /dev/null +++ b/plugins/modules/lxca_nodes.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# +from __future__ import annotations + + +DOCUMENTATION = r""" +author: + - Naval Patel (@navalkp) + - Prashant Bhosale (@prabhosa) +module: lxca_nodes +short_description: Custom module for lxca nodes inventory utility +description: + - This module returns/displays a inventory details of nodes. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + uuid: + description: UUID of device, this is string with length greater than 16. + type: str + + command_options: + description: Options to filter nodes information. + default: nodes + choices: + - nodes + - nodes_by_uuid + - nodes_by_chassis_uuid + - nodes_status_managed + - nodes_status_unmanaged + type: str + + chassis: + description: UUID of chassis, this is string with length greater than 16. + type: str + +extends_documentation_fragment: + - community.general.lxca_common + - community.general.attributes +""" + +EXAMPLES = r""" +# get all nodes info +- name: Get nodes data from LXCA + community.general.lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + command_options: nodes + +# get specific nodes info by uuid +- name: Get nodes data from LXCA + community.general.lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + uuid: "3C737AA5E31640CE949B10C129A8B01F" + command_options: nodes_by_uuid + +# get specific nodes info by chassis uuid +- name: Get nodes data from LXCA + community.general.lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + chassis: "3C737AA5E31640CE949B10C129A8B01F" + command_options: nodes_by_chassis_uuid + +# get managed nodes +- name: Get nodes data from LXCA + community.general.lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + command_options: nodes_status_managed + +# get unmanaged nodes +- name: Get nodes data from LXCA + community.general.lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + command_options: nodes_status_unmanaged +""" + +RETURN = r""" +result: + description: Nodes detail from lxca. + returned: always + type: dict + sample: + nodeList: + - machineType: '6241' + model: 'AC1' + type: 'Rack-TowerServer' + uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' + # bunch of properties + - machineType: '8871' + model: 'AC1' + type: 'Rack-TowerServer' + uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' + # bunch of properties + # Multiple nodes details +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object +try: + from pylxca import nodes +except ImportError: + pass + + +UUID_REQUIRED = 'UUID of device is required for nodes_by_uuid command.' +CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for nodes_by_chassis_uuid command.' +SUCCESS_MSG = "Success %s result" + + +def _nodes(module, lxca_con): + return nodes(lxca_con) + + +def _nodes_by_uuid(module, lxca_con): + if not module.params['uuid']: + module.fail_json(msg=UUID_REQUIRED) + return nodes(lxca_con, module.params['uuid']) + + +def _nodes_by_chassis_uuid(module, lxca_con): + if not module.params['chassis']: + module.fail_json(msg=CHASSIS_UUID_REQUIRED) + return nodes(lxca_con, chassis=module.params['chassis']) + + +def _nodes_status_managed(module, lxca_con): + return nodes(lxca_con, status='managed') + + +def _nodes_status_unmanaged(module, lxca_con): + return nodes(lxca_con, status='unmanaged') + + +def setup_module_object(): + """ + this function merge argument spec and create ansible module object + :return: + """ + args_spec = dict(LXCA_COMMON_ARGS) + args_spec.update(INPUT_ARG_SPEC) + module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False) + + return module + + +FUNC_DICT = { + 'nodes': _nodes, + 'nodes_by_uuid': _nodes_by_uuid, + 'nodes_by_chassis_uuid': _nodes_by_chassis_uuid, + 'nodes_status_managed': _nodes_status_managed, + 'nodes_status_unmanaged': _nodes_status_unmanaged, +} + + +INPUT_ARG_SPEC = dict( + command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid', + 'nodes_by_chassis_uuid', + 'nodes_status_managed', + 'nodes_status_unmanaged']), + uuid=dict(), chassis=dict() +) + + +def execute_module(module): + """ + This function invoke commands + :param module: Ansible module object + """ + try: + with connection_object(module) as lxca_con: + result = FUNC_DICT[module.params['command_options']](module, lxca_con) + module.exit_json(changed=False, + msg=SUCCESS_MSG % module.params['command_options'], + result=result) + except Exception as exception: + error_msg = '; '.join(exception.args) + module.fail_json(msg=error_msg, exception=traceback.format_exc()) + + +def main(): + module = setup_module_object() + has_pylxca(module) + execute_module(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lxd_container.py b/plugins/modules/lxd_container.py deleted file mode 120000 index e4cce99fee..0000000000 --- a/plugins/modules/lxd_container.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/lxd/lxd_container.py \ No newline at end of file diff --git a/plugins/modules/lxd_container.py b/plugins/modules/lxd_container.py new file mode 100644 index 0000000000..22e4315150 --- /dev/null +++ b/plugins/modules/lxd_container.py @@ -0,0 +1,888 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Hiroaki Nakamura +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: lxd_container +short_description: Manage LXD instances +description: + - Management of LXD containers and virtual machines. +author: "Hiroaki Nakamura (@hnakamur)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + version_added: 6.4.0 + diff_mode: + support: full + version_added: 6.4.0 +options: + name: + description: + - Name of an instance. + type: str + required: true + project: + description: + - Project of an instance. + - See U(https://documentation.ubuntu.com/lxd/en/latest/projects/). + required: false + type: str + version_added: 4.8.0 + architecture: + description: + - The architecture for the instance (for example V(x86_64) or V(i686)). + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get). + type: str + required: false + config: + description: + - 'The config for the instance (for example V({"limits.cpu": "2"})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get). + - If the instance already exists and its "config" values in metadata obtained from the LXD API + U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get) + are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_put). + - The keys starting with C(volatile.) are ignored for this comparison when O(ignore_volatile_options=true). + type: dict + required: false + ignore_volatile_options: + description: + - If set to V(true), options starting with C(volatile.) are ignored. As a result, they are reapplied for each execution. + - This default behavior can be changed by setting this option to V(false). + - The default value changed from V(true) to V(false) in community.general 6.0.0. + type: bool + required: false + default: false + version_added: 3.7.0 + profiles: + description: + - Profile to be used by the instance. + type: list + elements: str + devices: + description: + - 'The devices for the instance (for example V({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get). + type: dict + required: false + ephemeral: + description: + - Whether or not the instance is ephemeral (for example V(true) or V(false)). + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get). + required: false + type: bool + source: + description: + - 'The source for the instance (for example V({ "type": "image", "mode": "pull", "server": "https://cloud-images.ubuntu.com/releases/", + "protocol": "simplestreams", "alias": "22.04" })).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/) for complete API documentation. + - 'Note that C(protocol) accepts two choices: V(lxd) or V(simplestreams).' + required: false + type: dict + state: + choices: + - started + - stopped + - restarted + - absent + - frozen + description: + - Define the state of an instance. + required: false + default: started + type: str + target: + description: + - For cluster deployments. It attempts to create an instance on a target node. If the instance exists elsewhere in a + cluster, then it is not replaced nor moved. The name should respond to same name of the node you see in C(lxc cluster + list). + type: str + required: false + version_added: 1.0.0 + timeout: + description: + - A timeout for changing the state of the instance. + - This is also used as a timeout for waiting until IPv4 addresses are set to the all network interfaces in the instance + after starting or restarting. + required: false + default: 30 + type: int + type: + description: + - Instance type can be either V(virtual-machine) or V(container). + required: false + default: container + choices: + - container + - virtual-machine + type: str + version_added: 4.1.0 + wait_for_ipv4_addresses: + description: + - If this is V(true), the C(lxd_container) waits until IPv4 addresses are set to the all network interfaces in the instance + after starting or restarting. + required: false + default: false + type: bool + wait_for_container: + description: + - If set to V(true), the tasks wait until the task reports a success status when performing container operations. + default: false + type: bool + version_added: 4.4.0 + force_stop: + description: + - If this is V(true), the C(lxd_container) forces to stop the instance when it stops or restarts the instance. + required: false + default: false + type: bool + url: + description: + - The unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + type: str + snap_url: + description: + - The unix domain socket path when LXD is installed by snap package manager. + required: false + default: unix:/var/snap/lxd/common/lxd/unix.socket + type: str + client_key: + description: + - The client certificate key file path. + - If not specified, it defaults to C(${HOME}/.config/lxc/client.key). + required: false + aliases: [key_file] + type: path + client_cert: + description: + - The client certificate file path. + - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt). + required: false + aliases: [cert_file] + type: path + trust_password: + description: + - The client trusted password. + - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config + set core.trust_password ). See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' + - If trust_password is set, this module send a request for authentication before sending any requests. + required: false + type: str +notes: + - Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance + with a name that already existed in the users namespace, the module simply returns as "unchanged". + - There are two ways to run commands inside a container or virtual machine, using the command module or using the ansible + lxd connection plugin bundled in Ansible >= 2.1, the later requires python to be installed in the instance which can be + done with the command module. + - You can copy a file from the host to the instance with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) + module and the P(community.general.lxd#connection) connection plugin. See the example below. + - You can copy a file in the created instance to the localhost with C(command=lxc file pull instance_name/dir/filename filename). + See the first example below. + - Linuxcontainers.org has phased out LXC/LXD support with March 2024 + (U(https://discuss.linuxcontainers.org/t/important-notice-for-lxd-users-image-server/18479)). + Currently only Ubuntu is still providing images. +""" + +EXAMPLES = r""" +# An example for creating a Ubuntu container and install python +- hosts: localhost + connection: local + tasks: + - name: Create a started container + community.general.lxd_container: + name: mycontainer + ignore_volatile_options: true + state: started + source: + type: image + mode: pull + server: https://cloud-images.ubuntu.com/releases/ + protocol: simplestreams + alias: "22.04" + profiles: ["default"] + wait_for_ipv4_addresses: true + timeout: 600 + + - name: Check python is installed in container + delegate_to: mycontainer + ansible.builtin.raw: dpkg -s python + register: python_install_check + failed_when: python_install_check.rc not in [0, 1] + changed_when: false + + - name: Install python in container + delegate_to: mycontainer + ansible.builtin.raw: apt-get install -y python + when: python_install_check.rc == 1 + +# An example for creating an Ubuntu 14.04 container using an image fingerprint. +# This requires changing 'server' and 'protocol' key values, replacing the +# 'alias' key with with 'fingerprint' and supplying an appropriate value that +# matches the container image you wish to use. +- hosts: localhost + connection: local + tasks: + - name: Create a started container + community.general.lxd_container: + name: mycontainer + ignore_volatile_options: true + state: started + source: + type: image + mode: pull + # Provides current (and older) Ubuntu images with listed fingerprints + server: https://cloud-images.ubuntu.com/releases + # Protocol used by 'ubuntu' remote (as shown by 'lxc remote list') + protocol: simplestreams + # This provides an Ubuntu 14.04 LTS amd64 image from 20150814. + fingerprint: e9a8bdfab6dc + profiles: ["default"] + wait_for_ipv4_addresses: true + timeout: 600 + +# An example of creating a ubuntu-minial container +- hosts: localhost + connection: local + tasks: + - name: Create a started container + community.general.lxd_container: + name: mycontainer + ignore_volatile_options: true + state: started + source: + type: image + mode: pull + # Provides Ubuntu minimal images + server: https://cloud-images.ubuntu.com/minimal/releases/ + protocol: simplestreams + alias: "22.04" + profiles: ["default"] + wait_for_ipv4_addresses: true + timeout: 600 + +# An example for creating container in project other than default +- hosts: localhost + connection: local + tasks: + - name: Create a started container in project mytestproject + community.general.lxd_container: + name: mycontainer + project: mytestproject + ignore_volatile_options: true + state: started + source: + protocol: simplestreams + type: image + mode: pull + server: https://cloud-images.ubuntu.com/releases/ + alias: "22.04" + profiles: ["default"] + wait_for_ipv4_addresses: true + timeout: 600 + +# An example for deleting a container +- hosts: localhost + connection: local + tasks: + - name: Delete a container + community.general.lxd_container: + name: mycontainer + state: absent + type: container + +# An example for restarting a container +- hosts: localhost + connection: local + tasks: + - name: Restart a container + community.general.lxd_container: + name: mycontainer + state: restarted + type: container + +# An example for restarting a container using https to connect to the LXD server +- hosts: localhost + connection: local + tasks: + - name: Restart a container + community.general.lxd_container: + url: https://127.0.0.1:8443 + # These client_cert and client_key values are equal to the default values. + # client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" + # client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" + trust_password: mypassword + name: mycontainer + state: restarted + +# Note your container must be in the inventory for the below example. +# +# [containers] +# mycontainer ansible_connection=lxd +# +- hosts: + - mycontainer + tasks: + - name: Copy /etc/hosts in the created container to localhost with name "mycontainer-hosts" + ansible.builtin.fetch: + src: /etc/hosts + dest: /tmp/mycontainer-hosts + flat: true + +# An example for LXD cluster deployments. This example will create two new container on specific +# nodes - 'node01' and 'node02'. In 'target:', 'node01' and 'node02' are names of LXD cluster +# members that LXD cluster recognizes, not ansible inventory names, see: 'lxc cluster list'. +# LXD API calls can be made to any LXD member, in this example, we send API requests to +# 'node01.example.com', which matches ansible inventory name. +- hosts: node01.example.com + tasks: + - name: Create LXD container + community.general.lxd_container: + name: new-container-1 + ignore_volatile_options: true + state: started + source: + type: image + mode: pull + alias: "22.04" + target: node01 + + - name: Create container on another node + community.general.lxd_container: + name: new-container-2 + ignore_volatile_options: true + state: started + source: + type: image + mode: pull + alias: "22.04" + target: node02 + +# An example for creating a virtual machine +- hosts: localhost + connection: local + tasks: + - name: Create container on another node + community.general.lxd_container: + name: new-vm-1 + type: virtual-machine + state: started + ignore_volatile_options: true + wait_for_ipv4_addresses: true + profiles: ["default"] + source: + protocol: simplestreams + type: image + mode: pull + server: ['...'] # URL to the image server + alias: debian/11 + timeout: 600 +""" + +RETURN = r""" +addresses: + description: Mapping from the network device name to a list of IPv4 addresses in the instance. + returned: when state is started or restarted + type: dict + sample: + { + "eth0": [ + "10.155.92.191" + ] + } +old_state: + description: The old state of the instance. + returned: when state is started or restarted + type: str + sample: "stopped" +logs: + description: The logs of requests and responses. + returned: when ansible-playbook is invoked with -vvvv. + type: list + sample: "(too long to be placed here)" +actions: + description: List of actions performed for the instance. + returned: success + type: list + sample: ["create", "start"] +""" + +import copy +import datetime +import os +import time +from urllib.parse import urlencode + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException + +# LXD_ANSIBLE_STATES is a map of states that contain values of methods used +# when a particular state is evoked. +LXD_ANSIBLE_STATES = { + 'started': '_started', + 'stopped': '_stopped', + 'restarted': '_restarted', + 'absent': '_destroyed', + 'frozen': '_frozen', +} + +# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible +# lxc_container module state parameter value. +ANSIBLE_LXD_STATES = { + 'Running': 'started', + 'Stopped': 'stopped', + 'Frozen': 'frozen', +} + +# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint +ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' + +# CONFIG_PARAMS is a list of config attribute names. +CONFIG_PARAMS = [ + 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source', 'type' +] + +# CONFIG_CREATION_PARAMS is a list of attribute names that are only applied +# on instance creation. +CONFIG_CREATION_PARAMS = ['source', 'type'] + + +class LXDContainerManagement(object): + def __init__(self, module): + """Management of LXC containers via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.name = self.module.params['name'] + self.project = self.module.params['project'] + self._build_config() + + self.state = self.module.params['state'] + + self.timeout = self.module.params['timeout'] + self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses'] + self.force_stop = self.module.params['force_stop'] + self.addresses = None + self.target = self.module.params['target'] + self.wait_for_container = self.module.params['wait_for_container'] + + self.type = self.module.params['type'] + + self.key_file = self.module.params.get('client_key') + if self.key_file is None: + self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME']) + self.cert_file = self.module.params.get('client_cert') + if self.cert_file is None: + self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME']) + self.debug = self.module._verbosity >= 4 + + try: + if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: + self.url = self.module.params['url'] + elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): + self.url = self.module.params['snap_url'] + else: + self.url = self.module.params['url'] + except Exception as e: + self.module.fail_json(msg=e.msg) + + try: + self.client = LXDClient( + self.url, key_file=self.key_file, cert_file=self.cert_file, + debug=self.debug + ) + except LXDClientException as e: + self.module.fail_json(msg=e.msg) + + # LXD (3.19) Rest API provides instances endpoint, failback to containers and virtual-machines + # https://documentation.ubuntu.com/lxd/en/latest/rest-api/#instances-containers-and-virtual-machines + self.api_endpoint = '/1.0/instances' + check_api_endpoint = self.client.do('GET', '{0}?project='.format(self.api_endpoint), ok_error_codes=[404]) + + if check_api_endpoint['error_code'] == 404: + if self.type == 'container': + self.api_endpoint = '/1.0/containers' + elif self.type == 'virtual-machine': + self.api_endpoint = '/1.0/virtual-machines' + + self.trust_password = self.module.params.get('trust_password', None) + self.actions = [] + self.diff = {'before': {}, 'after': {}} + self.old_instance_json = {} + self.old_sections = {} + + def _build_config(self): + self.config = {} + for attr in CONFIG_PARAMS: + param_val = self.module.params.get(attr, None) + if param_val is not None: + self.config[attr] = param_val + + def _get_instance_json(self): + url = '{0}/{1}'.format(self.api_endpoint, self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + return self.client.do('GET', url, ok_error_codes=[404]) + + def _get_instance_state_json(self): + url = '{0}/{1}/state'.format(self.api_endpoint, self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + return self.client.do('GET', url, ok_error_codes=[404]) + + @staticmethod + def _instance_json_to_module_state(resp_json): + if resp_json['type'] == 'error': + return 'absent' + return ANSIBLE_LXD_STATES[resp_json['metadata']['status']] + + def _change_state(self, action, force_stop=False): + url = '{0}/{1}/state'.format(self.api_endpoint, self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + body_json = {'action': action, 'timeout': self.timeout} + if force_stop: + body_json['force'] = True + if not self.module.check_mode: + return self.client.do('PUT', url, body_json=body_json) + + def _create_instance(self): + url = self.api_endpoint + url_params = dict() + if self.target: + url_params['target'] = self.target + if self.project: + url_params['project'] = self.project + if url_params: + url = '{0}?{1}'.format(url, urlencode(url_params)) + config = self.config.copy() + config['name'] = self.name + if self.type not in self.api_endpoint: + config['type'] = self.type + if not self.module.check_mode: + self.client.do('POST', url, config, wait_for_container=self.wait_for_container) + self.actions.append('create') + + def _start_instance(self): + self._change_state('start') + self.actions.append('start') + + def _stop_instance(self): + self._change_state('stop', self.force_stop) + self.actions.append('stop') + + def _restart_instance(self): + self._change_state('restart', self.force_stop) + self.actions.append('restart') + + def _delete_instance(self): + url = '{0}/{1}'.format(self.api_endpoint, self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + if not self.module.check_mode: + self.client.do('DELETE', url) + self.actions.append('delete') + + def _freeze_instance(self): + self._change_state('freeze') + self.actions.append('freeze') + + def _unfreeze_instance(self): + self._change_state('unfreeze') + self.actions.append('unfreeze') + + def _instance_ipv4_addresses(self, ignore_devices=None): + ignore_devices = ['lo'] if ignore_devices is None else ignore_devices + data = (self._get_instance_state_json() or {}).get('metadata', None) or {} + network = { + k: v + for k, v in (data.get('network') or {}).items() + if k not in ignore_devices + } + addresses = { + k: [a['address'] for a in v['addresses'] if a['family'] == 'inet'] + for k, v in network.items() + } + return addresses + + @staticmethod + def _has_all_ipv4_addresses(addresses): + return len(addresses) > 0 and all(len(v) > 0 for v in addresses.values()) + + def _get_addresses(self): + try: + due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout) + while datetime.datetime.now() < due: + time.sleep(1) + addresses = self._instance_ipv4_addresses() + if self._has_all_ipv4_addresses(addresses) or self.module.check_mode: + self.addresses = addresses + return + except LXDClientException as e: + e.msg = 'timeout for getting IPv4 addresses' + raise + + def _started(self): + if self.old_state == 'absent': + self._create_instance() + self._start_instance() + else: + if self.old_state == 'frozen': + self._unfreeze_instance() + elif self.old_state == 'stopped': + self._start_instance() + if self._needs_to_apply_instance_configs(): + self._apply_instance_configs() + if self.wait_for_ipv4_addresses: + self._get_addresses() + + def _stopped(self): + if self.old_state == 'absent': + self._create_instance() + else: + if self.old_state == 'stopped': + if self._needs_to_apply_instance_configs(): + self._start_instance() + self._apply_instance_configs() + self._stop_instance() + else: + if self.old_state == 'frozen': + self._unfreeze_instance() + if self._needs_to_apply_instance_configs(): + self._apply_instance_configs() + self._stop_instance() + + def _restarted(self): + if self.old_state == 'absent': + self._create_instance() + self._start_instance() + else: + if self.old_state == 'frozen': + self._unfreeze_instance() + if self._needs_to_apply_instance_configs(): + self._apply_instance_configs() + self._restart_instance() + if self.wait_for_ipv4_addresses: + self._get_addresses() + + def _destroyed(self): + if self.old_state != 'absent': + if self.old_state == 'frozen': + self._unfreeze_instance() + if self.old_state != 'stopped': + self._stop_instance() + self._delete_instance() + + def _frozen(self): + if self.old_state == 'absent': + self._create_instance() + self._start_instance() + self._freeze_instance() + else: + if self.old_state == 'stopped': + self._start_instance() + if self._needs_to_apply_instance_configs(): + self._apply_instance_configs() + self._freeze_instance() + + def _needs_to_change_instance_config(self, key): + if key not in self.config: + return False + + if key == 'config': + # self.old_sections is already filtered for volatile keys if necessary + old_configs = dict(self.old_sections.get(key, None) or {}) + for k, v in self.config['config'].items(): + if k not in old_configs: + return True + if old_configs[k] != v: + return True + return False + else: + old_configs = self.old_sections.get(key, {}) + return self.config[key] != old_configs + + def _needs_to_apply_instance_configs(self): + for param in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS): + if self._needs_to_change_instance_config(param): + return True + return False + + def _apply_instance_configs(self): + old_metadata = copy.deepcopy(self.old_instance_json).get('metadata', None) or {} + body_json = {} + for param in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS): + if param in old_metadata: + body_json[param] = old_metadata[param] + + if self._needs_to_change_instance_config(param): + if param == 'config': + body_json['config'] = body_json.get('config', None) or {} + for k, v in self.config['config'].items(): + body_json['config'][k] = v + else: + body_json[param] = self.config[param] + self.diff['after']['instance'] = body_json + url = '{0}/{1}'.format(self.api_endpoint, self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + if not self.module.check_mode: + self.client.do('PUT', url, body_json=body_json) + self.actions.append('apply_instance_configs') + + def run(self): + """Run the main method.""" + + def adjust_content(content): + return content if not isinstance(content, dict) else { + k: v for k, v in content.items() if not (self.ignore_volatile_options and k.startswith('volatile.')) + } + + try: + if self.trust_password is not None: + self.client.authenticate(self.trust_password) + self.ignore_volatile_options = self.module.params.get('ignore_volatile_options') + + self.old_instance_json = self._get_instance_json() + self.old_sections = { + section: adjust_content(content) + for section, content in (self.old_instance_json.get('metadata') or {}).items() + if section in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS) + } + + self.diff['before']['instance'] = self.old_sections + # preliminary, will be overwritten in _apply_instance_configs() if called + self.diff['after']['instance'] = self.config + + self.old_state = self._instance_json_to_module_state(self.old_instance_json) + self.diff['before']['state'] = self.old_state + self.diff['after']['state'] = self.state + + action = getattr(self, LXD_ANSIBLE_STATES[self.state]) + action() + + state_changed = len(self.actions) > 0 + result_json = { + 'log_verbosity': self.module._verbosity, + 'changed': state_changed, + 'old_state': self.old_state, + 'actions': self.actions, + 'diff': self.diff, + } + if self.client.debug: + result_json['logs'] = self.client.logs + if self.addresses is not None: + result_json['addresses'] = self.addresses + self.module.exit_json(**result_json) + except LXDClientException as e: + state_changed = len(self.actions) > 0 + fail_params = { + 'msg': e.msg, + 'changed': state_changed, + 'actions': self.actions, + 'diff': self.diff, + } + if self.client.debug: + fail_params['logs'] = e.kwargs['logs'] + self.module.fail_json(**fail_params) + + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True, + ), + project=dict( + type='str', + ), + architecture=dict( + type='str', + ), + config=dict( + type='dict', + ), + ignore_volatile_options=dict( + type='bool', + default=False, + ), + devices=dict( + type='dict', + ), + ephemeral=dict( + type='bool', + ), + profiles=dict( + type='list', + elements='str', + ), + source=dict( + type='dict', + ), + state=dict( + choices=list(LXD_ANSIBLE_STATES.keys()), + default='started', + ), + target=dict( + type='str', + ), + timeout=dict( + type='int', + default=30 + ), + type=dict( + type='str', + default='container', + choices=['container', 'virtual-machine'], + ), + wait_for_container=dict( + type='bool', + default=False, + ), + wait_for_ipv4_addresses=dict( + type='bool', + default=False, + ), + force_stop=dict( + type='bool', + default=False, + ), + url=dict( + type='str', + default=ANSIBLE_LXD_DEFAULT_URL, + ), + snap_url=dict( + type='str', + default='unix:/var/snap/lxd/common/lxd/unix.socket', + ), + client_key=dict( + type='path', + aliases=['key_file'], + ), + client_cert=dict( + type='path', + aliases=['cert_file'], + ), + trust_password=dict(type='str', no_log=True), + ), + supports_check_mode=True, + ) + + lxd_manage = LXDContainerManagement(module=module) + lxd_manage.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lxd_profile.py b/plugins/modules/lxd_profile.py deleted file mode 120000 index 07179686c7..0000000000 --- a/plugins/modules/lxd_profile.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/lxd/lxd_profile.py \ No newline at end of file diff --git a/plugins/modules/lxd_profile.py b/plugins/modules/lxd_profile.py new file mode 100644 index 0000000000..8a6fd19aa6 --- /dev/null +++ b/plugins/modules/lxd_profile.py @@ -0,0 +1,551 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Hiroaki Nakamura +# Copyright (c) 2020, Frank Dornheim +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: lxd_profile +short_description: Manage LXD profiles +description: + - Management of LXD profiles. +author: "Hiroaki Nakamura (@hnakamur)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of a profile. + required: true + type: str + project: + description: + - Project of a profile. See U(https://documentation.ubuntu.com/lxd/en/latest/projects/). + type: str + required: false + version_added: 4.8.0 + description: + description: + - Description of the profile. + type: str + config: + description: + - 'The config for the instance (for example V({"limits.memory": "4GB"})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get). + - If the profile already exists and its C(config) value in metadata obtained from GET /1.0/profiles/ + U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get) + are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_put). + - Not all config values are supported to apply the existing profile. Maybe you need to delete and recreate a profile. + required: false + type: dict + devices: + description: + - 'The devices for the profile (for example V({"rootfs": {"path": "/dev/kvm", "type": "unix-char"})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get). + required: false + type: dict + new_name: + description: + - A new name of a profile. + - If this parameter is specified a profile is renamed to this name. + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_post). + required: false + type: str + merge_profile: + description: + - Merge the configuration of the present profile with the new desired configuration, instead of replacing it. + required: false + default: false + type: bool + version_added: 2.1.0 + state: + choices: + - present + - absent + description: + - Define the state of a profile. + required: false + default: present + type: str + url: + description: + - The unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + type: str + snap_url: + description: + - The unix domain socket path when LXD is installed by snap package manager. + required: false + default: unix:/var/snap/lxd/common/lxd/unix.socket + type: str + client_key: + description: + - The client certificate key file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.key). + required: false + aliases: [key_file] + type: path + client_cert: + description: + - The client certificate file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.crt). + required: false + aliases: [cert_file] + type: path + trust_password: + description: + - The client trusted password. + - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config + set core.trust_password ). See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' + - If O(trust_password) is set, this module send a request for authentication before sending any requests. + required: false + type: str +notes: + - Profiles must have a unique name. If you attempt to create a profile with a name that already existed in the users namespace + the module simply returns as "unchanged". +""" + +EXAMPLES = r""" +# An example for creating a profile +- hosts: localhost + connection: local + tasks: + - name: Create a profile + community.general.lxd_profile: + name: macvlan + state: present + config: {} + description: my macvlan profile + devices: + eth0: + nictype: macvlan + parent: br0 + type: nic + +# An example for creating a profile in project mytestproject +- hosts: localhost + connection: local + tasks: + - name: Create a profile + community.general.lxd_profile: + name: testprofile + project: mytestproject + state: present + config: {} + description: test profile in project mytestproject + devices: {} + +# An example for creating a profile via http connection +- hosts: localhost + connection: local + tasks: + - name: Create macvlan profile + community.general.lxd_profile: + url: https://127.0.0.1:8443 + # These client_cert and client_key values are equal to the default values. + # client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" + # client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" + trust_password: mypassword + name: macvlan + state: present + config: {} + description: my macvlan profile + devices: + eth0: + nictype: macvlan + parent: br0 + type: nic + +# An example for modify/merge a profile +- hosts: localhost + connection: local + tasks: + - name: Merge a profile + community.general.lxd_profile: + merge_profile: true + name: macvlan + state: present + config: {} + description: my macvlan profile + devices: + eth0: + nictype: macvlan + parent: br0 + type: nic + +# An example for deleting a profile +- hosts: localhost + connection: local + tasks: + - name: Delete a profile + community.general.lxd_profile: + name: macvlan + state: absent + +# An example for renaming a profile +- hosts: localhost + connection: local + tasks: + - name: Rename a profile + community.general.lxd_profile: + name: macvlan + new_name: macvlan2 + state: present +""" + +RETURN = r""" +old_state: + description: The old state of the profile. + returned: success + type: str + sample: "absent" +logs: + description: The logs of requests and responses. + returned: when ansible-playbook is invoked with -vvvv. + type: list + sample: "(too long to be placed here)" +actions: + description: List of actions performed for the profile. + returned: success + type: list + sample: ["create"] +""" + +import os +from urllib.parse import urlencode + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException + +# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint +ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' + +# PROFILE_STATES is a list for states supported +PROFILES_STATES = [ + 'present', 'absent' +] + +# CONFIG_PARAMS is a list of config attribute names. +CONFIG_PARAMS = [ + 'config', 'description', 'devices' +] + + +class LXDProfileManagement(object): + def __init__(self, module): + """Management of LXC profiles via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.name = self.module.params['name'] + self.project = self.module.params['project'] + self._build_config() + self.state = self.module.params['state'] + self.new_name = self.module.params.get('new_name', None) + + self.key_file = self.module.params.get('client_key') + if self.key_file is None: + self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME']) + self.cert_file = self.module.params.get('client_cert') + if self.cert_file is None: + self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME']) + self.debug = self.module._verbosity >= 4 + + try: + if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: + self.url = self.module.params['url'] + elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): + self.url = self.module.params['snap_url'] + else: + self.url = self.module.params['url'] + except Exception as e: + self.module.fail_json(msg=e.msg) + + try: + self.client = LXDClient( + self.url, key_file=self.key_file, cert_file=self.cert_file, + debug=self.debug + ) + except LXDClientException as e: + self.module.fail_json(msg=e.msg) + self.trust_password = self.module.params.get('trust_password', None) + self.actions = [] + + def _build_config(self): + self.config = {} + for attr in CONFIG_PARAMS: + param_val = self.module.params.get(attr, None) + if param_val is not None: + self.config[attr] = param_val + + def _get_profile_json(self): + url = '/1.0/profiles/{0}'.format(self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + return self.client.do('GET', url, ok_error_codes=[404]) + + @staticmethod + def _profile_json_to_module_state(resp_json): + if resp_json['type'] == 'error': + return 'absent' + return 'present' + + def _update_profile(self): + if self.state == 'present': + if self.old_state == 'absent': + if self.new_name is None: + self._create_profile() + else: + self.module.fail_json( + msg='new_name must not be set when the profile does not exist and the state is present', + changed=False) + else: + if self.new_name is not None and self.new_name != self.name: + self._rename_profile() + if self._needs_to_apply_profile_configs(): + self._apply_profile_configs() + elif self.state == 'absent': + if self.old_state == 'present': + if self.new_name is None: + self._delete_profile() + else: + self.module.fail_json( + msg='new_name must not be set when the profile exists and the specified state is absent', + changed=False) + + def _create_profile(self): + url = '/1.0/profiles' + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + config = self.config.copy() + config['name'] = self.name + self.client.do('POST', url, config) + self.actions.append('create') + + def _rename_profile(self): + url = '/1.0/profiles/{0}'.format(self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + config = {'name': self.new_name} + self.client.do('POST', url, config) + self.actions.append('rename') + self.name = self.new_name + + def _needs_to_change_profile_config(self, key): + if key not in self.config: + return False + old_configs = self.old_profile_json['metadata'].get(key, None) + return self.config[key] != old_configs + + def _needs_to_apply_profile_configs(self): + return ( + self._needs_to_change_profile_config('config') or + self._needs_to_change_profile_config('description') or + self._needs_to_change_profile_config('devices') + ) + + def _merge_dicts(self, source, destination): + """Merge Dictionaries + + Get a list of filehandle numbers from logger to be handed to + DaemonContext.files_preserve + + Args: + dict(source): source dict + dict(destination): destination dict + Kwargs: + None + Raises: + None + Returns: + dict(destination): merged dict""" + for key, value in source.items(): + if isinstance(value, dict): + # get node or create one + node = destination.setdefault(key, {}) + self._merge_dicts(value, node) + else: + destination[key] = value + return destination + + def _merge_config(self, config): + """ merge profile + + Merge Configuration of the present profile and the new desired configitems + + Args: + dict(config): Dict with the old config in 'metadata' and new config in 'config' + Kwargs: + None + Raises: + None + Returns: + dict(config): new config""" + # merge or copy the sections from the existing profile to 'config' + for item in ['config', 'description', 'devices', 'name', 'used_by']: + if item in config: + config[item] = self._merge_dicts(config['metadata'][item], config[item]) + else: + config[item] = config['metadata'][item] + # merge or copy the sections from the ansible-task to 'config' + return self._merge_dicts(self.config, config) + + def _generate_new_config(self, config): + """ rebuild profile + + Rebuild the Profile by the configuration provided in the play. + Existing configurations are discarded. + + This is the default behavior. + + Args: + dict(config): Dict with the old config in 'metadata' and new config in 'config' + Kwargs: + None + Raises: + None + Returns: + dict(config): new config""" + for k, v in self.config.items(): + config[k] = v + return config + + def _apply_profile_configs(self): + """ Selection of the procedure: rebuild or merge + + The standard behavior is that all information not contained + in the play is discarded. + + If "merge_profile" is provides in the play and "True", then existing + configurations from the profile and new ones defined are merged. + + Args: + None + Kwargs: + None + Raises: + None + Returns: + None""" + config = self.old_profile_json.copy() + if self.module.params['merge_profile']: + config = self._merge_config(config) + else: + config = self._generate_new_config(config) + + # upload config to lxd + url = '/1.0/profiles/{0}'.format(self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + self.client.do('PUT', url, config) + self.actions.append('apply_profile_configs') + + def _delete_profile(self): + url = '/1.0/profiles/{0}'.format(self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + self.client.do('DELETE', url) + self.actions.append('delete') + + def run(self): + """Run the main method.""" + + try: + if self.trust_password is not None: + self.client.authenticate(self.trust_password) + + self.old_profile_json = self._get_profile_json() + self.old_state = self._profile_json_to_module_state(self.old_profile_json) + self._update_profile() + + state_changed = len(self.actions) > 0 + result_json = { + 'changed': state_changed, + 'old_state': self.old_state, + 'actions': self.actions + } + if self.client.debug: + result_json['logs'] = self.client.logs + self.module.exit_json(**result_json) + except LXDClientException as e: + state_changed = len(self.actions) > 0 + fail_params = { + 'msg': e.msg, + 'changed': state_changed, + 'actions': self.actions + } + if self.client.debug: + fail_params['logs'] = e.kwargs['logs'] + self.module.fail_json(**fail_params) + + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True + ), + project=dict( + type='str', + ), + new_name=dict( + type='str', + ), + config=dict( + type='dict', + ), + description=dict( + type='str', + ), + devices=dict( + type='dict', + ), + merge_profile=dict( + type='bool', + default=False + ), + state=dict( + choices=PROFILES_STATES, + default='present' + ), + url=dict( + type='str', + default=ANSIBLE_LXD_DEFAULT_URL + ), + snap_url=dict( + type='str', + default='unix:/var/snap/lxd/common/lxd/unix.socket' + ), + client_key=dict( + type='path', + aliases=['key_file'] + ), + client_cert=dict( + type='path', + aliases=['cert_file'] + ), + trust_password=dict(type='str', no_log=True) + ), + supports_check_mode=False, + ) + + lxd_manage = LXDProfileManagement(module=module) + lxd_manage.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lxd_project.py b/plugins/modules/lxd_project.py new file mode 100644 index 0000000000..a0bd710547 --- /dev/null +++ b/plugins/modules/lxd_project.py @@ -0,0 +1,452 @@ +#!/usr/bin/python +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: lxd_project +short_description: Manage LXD projects +version_added: 4.8.0 +description: + - Management of LXD projects. +author: "Raymond Chang (@we10710aa)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of the project. + required: true + type: str + description: + description: + - Description of the project. + type: str + config: + description: + - 'The config for the project (for example V({"features.profiles": "true"})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get). + - If the project already exists and its "config" value in metadata obtained from C(GET /1.0/projects/) + U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get) + are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_put). + type: dict + new_name: + description: + - A new name of a project. + - If this parameter is specified a project is renamed to this name. + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_post). + required: false + type: str + merge_project: + description: + - Merge the configuration of the present project with the new desired configuration, instead of replacing it. If configuration + is the same after merged, no change is made. + required: false + default: false + type: bool + state: + choices: + - present + - absent + description: + - Define the state of a project. + required: false + default: present + type: str + url: + description: + - The Unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + type: str + snap_url: + description: + - The Unix domain socket path when LXD is installed by snap package manager. + required: false + default: unix:/var/snap/lxd/common/lxd/unix.socket + type: str + client_key: + description: + - The client certificate key file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.key). + required: false + aliases: [key_file] + type: path + client_cert: + description: + - The client certificate file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.crt). + required: false + aliases: [cert_file] + type: path + trust_password: + description: + - The client trusted password. + - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config + set core.trust_password ) See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' + - If O(trust_password) is set, this module send a request for authentication before sending any requests. + required: false + type: str +notes: + - Projects must have a unique name. If you attempt to create a project with a name that already existed in the users namespace + the module simply returns as "unchanged". +""" + +EXAMPLES = r""" +# An example for creating a project +- hosts: localhost + connection: local + tasks: + - name: Create a project + community.general.lxd_project: + name: ansible-test-project + state: present + config: {} + description: my new project + +# An example for renaming a project +- hosts: localhost + connection: local + tasks: + - name: Rename ansible-test-project to ansible-test-project-new-name + community.general.lxd_project: + name: ansible-test-project + new_name: ansible-test-project-new-name + state: present + config: {} + description: my new project +""" + +RETURN = r""" +old_state: + description: The old state of the project. + returned: success + type: str + sample: "absent" +logs: + description: The logs of requests and responses. + returned: when ansible-playbook is invoked with -vvvv. + type: list + elements: dict + contains: + type: + description: Type of actions performed, currently only V(sent request). + type: str + sample: "sent request" + request: + description: HTTP request sent to LXD server. + type: dict + contains: + method: + description: Method of HTTP request. + type: str + sample: "GET" + url: + description: URL path of HTTP request. + type: str + sample: "/1.0/projects/test-project" + json: + description: JSON body of HTTP request. + type: str + sample: "(too long to be placed here)" + timeout: + description: Timeout of HTTP request, V(null) if unset. + type: int + sample: null + response: + description: HTTP response received from LXD server. + type: dict + contains: + json: + description: JSON of HTTP response. + type: str + sample: "(too long to be placed here)" +actions: + description: List of actions performed for the project. + returned: success + type: list + elements: str + sample: ["create"] +""" + +from ansible_collections.community.general.plugins.module_utils.lxd import ( + LXDClient, LXDClientException, default_key_file, default_cert_file +) +from ansible.module_utils.basic import AnsibleModule +import os + +# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint +ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' + +# PROJECTS_STATES is a list for states supported +PROJECTS_STATES = [ + 'present', 'absent' +] + +# CONFIG_PARAMS is a list of config attribute names. +CONFIG_PARAMS = [ + 'config', 'description' +] + + +class LXDProjectManagement(object): + def __init__(self, module): + """Management of LXC projects via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.name = self.module.params['name'] + self._build_config() + self.state = self.module.params['state'] + self.new_name = self.module.params.get('new_name', None) + + self.key_file = self.module.params.get('client_key') + if self.key_file is None: + self.key_file = default_key_file() + self.cert_file = self.module.params.get('client_cert') + if self.cert_file is None: + self.cert_file = default_cert_file() + self.debug = self.module._verbosity >= 4 + + try: + if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: + self.url = self.module.params['url'] + elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): + self.url = self.module.params['snap_url'] + else: + self.url = self.module.params['url'] + except Exception as e: + self.module.fail_json(msg=e.msg) + + try: + self.client = LXDClient( + self.url, key_file=self.key_file, cert_file=self.cert_file, + debug=self.debug + ) + except LXDClientException as e: + self.module.fail_json(msg=e.msg) + self.trust_password = self.module.params.get('trust_password', None) + self.actions = [] + + def _build_config(self): + self.config = {} + for attr in CONFIG_PARAMS: + param_val = self.module.params.get(attr, None) + if param_val is not None: + self.config[attr] = param_val + + def _get_project_json(self): + return self.client.do( + 'GET', '/1.0/projects/{0}'.format(self.name), + ok_error_codes=[404] + ) + + @staticmethod + def _project_json_to_module_state(resp_json): + if resp_json['type'] == 'error': + return 'absent' + return 'present' + + def _update_project(self): + if self.state == 'present': + if self.old_state == 'absent': + if self.new_name is None: + self._create_project() + else: + self.module.fail_json( + msg='new_name must not be set when the project does not exist and the state is present', + changed=False) + else: + if self.new_name is not None and self.new_name != self.name: + self._rename_project() + if self._needs_to_apply_project_configs(): + self._apply_project_configs() + elif self.state == 'absent': + if self.old_state == 'present': + if self.new_name is None: + self._delete_project() + else: + self.module.fail_json( + msg='new_name must not be set when the project exists and the specified state is absent', + changed=False) + + def _create_project(self): + config = self.config.copy() + config['name'] = self.name + self.client.do('POST', '/1.0/projects', config) + self.actions.append('create') + + def _rename_project(self): + config = {'name': self.new_name} + self.client.do('POST', '/1.0/projects/{0}'.format(self.name), config) + self.actions.append('rename') + self.name = self.new_name + + def _needs_to_change_project_config(self, key): + if key not in self.config: + return False + old_configs = self.old_project_json['metadata'].get(key, None) + return self.config[key] != old_configs + + def _needs_to_apply_project_configs(self): + return ( + self._needs_to_change_project_config('config') or + self._needs_to_change_project_config('description') + ) + + def _merge_dicts(self, source, destination): + """ Return a new dict that merge two dict, + with values in source dict overwrite destination dict + + Args: + dict(source): source dict + dict(destination): destination dict + Kwargs: + None + Raises: + None + Returns: + dict(destination): merged dict""" + result = destination.copy() + for key, value in source.items(): + if isinstance(value, dict): + # get node or create one + node = result.setdefault(key, {}) + self._merge_dicts(value, node) + else: + result[key] = value + return result + + def _apply_project_configs(self): + """ Selection of the procedure: rebuild or merge + + The standard behavior is that all information not contained + in the play is discarded. + + If "merge_project" is provides in the play and "True", then existing + configurations from the project and new ones defined are merged. + + Args: + None + Kwargs: + None + Raises: + None + Returns: + None""" + old_config = dict() + old_metadata = self.old_project_json['metadata'].copy() + for attr in CONFIG_PARAMS: + old_config[attr] = old_metadata[attr] + + if self.module.params['merge_project']: + config = self._merge_dicts(self.config, old_config) + if config == old_config: + # no need to call api if merged config is the same + # as old config + return + else: + config = self.config.copy() + # upload config to lxd + self.client.do('PUT', '/1.0/projects/{0}'.format(self.name), config) + self.actions.append('apply_projects_configs') + + def _delete_project(self): + self.client.do('DELETE', '/1.0/projects/{0}'.format(self.name)) + self.actions.append('delete') + + def run(self): + """Run the main method.""" + + try: + if self.trust_password is not None: + self.client.authenticate(self.trust_password) + + self.old_project_json = self._get_project_json() + self.old_state = self._project_json_to_module_state( + self.old_project_json) + self._update_project() + + state_changed = len(self.actions) > 0 + result_json = { + 'changed': state_changed, + 'old_state': self.old_state, + 'actions': self.actions + } + if self.client.debug: + result_json['logs'] = self.client.logs + self.module.exit_json(**result_json) + except LXDClientException as e: + state_changed = len(self.actions) > 0 + fail_params = { + 'msg': e.msg, + 'changed': state_changed, + 'actions': self.actions + } + if self.client.debug: + fail_params['logs'] = e.kwargs['logs'] + self.module.fail_json(**fail_params) + + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True + ), + new_name=dict( + type='str', + ), + config=dict( + type='dict', + ), + description=dict( + type='str', + ), + merge_project=dict( + type='bool', + default=False + ), + state=dict( + choices=PROJECTS_STATES, + default='present' + ), + url=dict( + type='str', + default=ANSIBLE_LXD_DEFAULT_URL + ), + snap_url=dict( + type='str', + default='unix:/var/snap/lxd/common/lxd/unix.socket' + ), + client_key=dict( + type='path', + aliases=['key_file'] + ), + client_cert=dict( + type='path', + aliases=['cert_file'] + ), + trust_password=dict(type='str', no_log=True) + ), + supports_check_mode=False, + ) + + lxd_manage = LXDProjectManagement(module=module) + lxd_manage.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/macports.py b/plugins/modules/macports.py deleted file mode 120000 index 4f54f8ba19..0000000000 --- a/plugins/modules/macports.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/macports.py \ No newline at end of file diff --git a/plugins/modules/macports.py b/plugins/modules/macports.py new file mode 100644 index 0000000000..7e9e3c0b57 --- /dev/null +++ b/plugins/modules/macports.py @@ -0,0 +1,324 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Jimmy Tang +# Based on okpg (Patrick Pelletier ), pacman +# (Afterburn) and pkgin (Shaun Zinck) modules +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: macports +author: "Jimmy Tang (@jcftang)" +short_description: Package manager for MacPorts +description: + - Manages MacPorts packages (ports). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - A list of port names. + aliases: ['port'] + type: list + elements: str + selfupdate: + description: + - Update Macports and the ports tree, either prior to installing ports or as a separate step. + - Equivalent to running C(port selfupdate). + aliases: ['update_cache', 'update_ports'] + default: false + type: bool + state: + description: + - Indicates the desired state of the port. + choices: ['present', 'absent', 'active', 'inactive', 'installed', 'removed'] + default: present + type: str + upgrade: + description: + - Upgrade all outdated ports, either prior to installing ports or as a separate step. + - Equivalent to running C(port upgrade outdated). + default: false + type: bool + variant: + description: + - A port variant specification. + - O(variant) is only supported with O(state=installed) and O(state=present). + aliases: ['variants'] + type: str +""" + +EXAMPLES = r""" +- name: Install the foo port + community.general.macports: + name: foo + +- name: Install the universal, x11 variant of the foo port + community.general.macports: + name: foo + variant: +universal+x11 + +- name: Install a list of ports + community.general.macports: + name: "{{ ports }}" + vars: + ports: + - foo + - foo-tools + +- name: Update Macports and the ports tree, then upgrade all outdated ports + community.general.macports: + selfupdate: true + upgrade: true + +- name: Update Macports and the ports tree, then install the foo port + community.general.macports: + name: foo + selfupdate: true + +- name: Remove the foo port + community.general.macports: + name: foo + state: absent + +- name: Activate the foo port + community.general.macports: + name: foo + state: active + +- name: Deactivate the foo port + community.general.macports: + name: foo + state: inactive +""" + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def selfupdate(module, port_path): + """ Update Macports and the ports tree. """ + + rc, out, err = module.run_command([port_path, "-v", "selfupdate"]) + + if rc == 0: + updated = any( + re.search(r'Total number of ports parsed:\s+[^0]', s.strip()) or + re.search(r'Installing new Macports release', s.strip()) + for s in out.split('\n') + if s + ) + if updated: + changed = True + msg = "Macports updated successfully" + else: + changed = False + msg = "Macports already up-to-date" + + return (changed, msg, out, err) + else: + module.fail_json(msg="Failed to update Macports", stdout=out, stderr=err) + + +def upgrade(module, port_path): + """ Upgrade outdated ports. """ + + rc, out, err = module.run_command([port_path, "upgrade", "outdated"]) + + # rc is 1 when nothing to upgrade so check stdout first. + if out.strip() == "Nothing to upgrade.": + changed = False + msg = "Ports already upgraded" + return (changed, msg, out, err) + elif rc == 0: + changed = True + msg = "Outdated ports upgraded successfully" + return (changed, msg, out, err) + else: + module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err) + + +def query_port(module, port_path, name, state="present"): + """ Returns whether a port is installed or not. """ + + if state == "present": + + rc, out, err = module.run_command([port_path, "-q", "installed", name]) + + if rc == 0 and out.strip().startswith(name + " "): + return True + + return False + + elif state == "active": + + rc, out, err = module.run_command([port_path, "-q", "installed", name]) + + if rc == 0 and "(active)" in out: + return True + + return False + + +def remove_ports(module, port_path, ports, stdout, stderr): + """ Uninstalls one or more ports if installed. """ + + remove_c = 0 + # Using a for loop in case of error, we can report the port that failed + for port in ports: + # Query the port first, to see if we even need to remove + if not query_port(module, port_path, port): + continue + + rc, out, err = module.run_command([port_path, "uninstall", port]) + stdout += out + stderr += err + if query_port(module, port_path, port): + module.fail_json(msg="Failed to remove %s: %s" % (port, err), stdout=stdout, stderr=stderr) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c, stdout=stdout, stderr=stderr) + + module.exit_json(changed=False, msg="Port(s) already absent", stdout=stdout, stderr=stderr) + + +def install_ports(module, port_path, ports, variant, stdout, stderr): + """ Installs one or more ports if not already installed. """ + + install_c = 0 + + for port in ports: + if query_port(module, port_path, port): + continue + + rc, out, err = module.run_command([port_path, "install", port, variant]) + stdout += out + stderr += err + if not query_port(module, port_path, port): + module.fail_json(msg="Failed to install %s: %s" % (port, err), stdout=stdout, stderr=stderr) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c), stdout=stdout, stderr=stderr) + + module.exit_json(changed=False, msg="Port(s) already present", stdout=stdout, stderr=stderr) + + +def activate_ports(module, port_path, ports, stdout, stderr): + """ Activate a port if it is inactive. """ + + activate_c = 0 + + for port in ports: + if not query_port(module, port_path, port): + module.fail_json(msg="Failed to activate %s, port(s) not present" % (port), stdout=stdout, stderr=stderr) + + if query_port(module, port_path, port, state="active"): + continue + + rc, out, err = module.run_command([port_path, "activate", port]) + stdout += out + stderr += err + + if not query_port(module, port_path, port, state="active"): + module.fail_json(msg="Failed to activate %s: %s" % (port, err), stdout=stdout, stderr=stderr) + + activate_c += 1 + + if activate_c > 0: + module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c), stdout=stdout, stderr=stderr) + + module.exit_json(changed=False, msg="Port(s) already active", stdout=stdout, stderr=stderr) + + +def deactivate_ports(module, port_path, ports, stdout, stderr): + """ Deactivate a port if it is active. """ + + deactivated_c = 0 + + for port in ports: + if not query_port(module, port_path, port): + module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port), stdout=stdout, stderr=stderr) + + if not query_port(module, port_path, port, state="active"): + continue + + rc, out, err = module.run_command([port_path, "deactivate", port]) + stdout += out + stderr += err + if query_port(module, port_path, port, state="active"): + module.fail_json(msg="Failed to deactivate %s: %s" % (port, err), stdout=stdout, stderr=stderr) + + deactivated_c += 1 + + if deactivated_c > 0: + module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c), stdout=stdout, stderr=stderr) + + module.exit_json(changed=False, msg="Port(s) already inactive", stdout=stdout, stderr=stderr) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', aliases=["port"]), + selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'), + state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]), + upgrade=dict(default=False, type='bool'), + variant=dict(aliases=["variants"], type='str') + ) + ) + + stdout = "" + stderr = "" + + port_path = module.get_bin_path('port', True, ['/opt/local/bin']) + + p = module.params + + if p["selfupdate"]: + (changed, msg, out, err) = selfupdate(module, port_path) + stdout += out + stderr += err + if not (p["name"] or p["upgrade"]): + module.exit_json(changed=changed, msg=msg, stdout=stdout, stderr=stderr) + + if p["upgrade"]: + (changed, msg, out, err) = upgrade(module, port_path) + stdout += out + stderr += err + if not p["name"]: + module.exit_json(changed=changed, msg=msg, stdout=stdout, stderr=stderr) + + pkgs = p["name"] + + variant = p["variant"] + + if p["state"] in ["present", "installed"]: + install_ports(module, port_path, pkgs, variant, stdout, stderr) + + elif p["state"] in ["absent", "removed"]: + remove_ports(module, port_path, pkgs, stdout, stderr) + + elif p["state"] == "active": + activate_ports(module, port_path, pkgs, stdout, stderr) + + elif p["state"] == "inactive": + deactivate_ports(module, port_path, pkgs, stdout, stderr) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/mail.py b/plugins/modules/mail.py deleted file mode 120000 index 8ddaedfc04..0000000000 --- a/plugins/modules/mail.py +++ /dev/null @@ -1 +0,0 @@ -./notification/mail.py \ No newline at end of file diff --git a/plugins/modules/mail.py b/plugins/modules/mail.py new file mode 100644 index 0000000000..4365f56a33 --- /dev/null +++ b/plugins/modules/mail.py @@ -0,0 +1,413 @@ +#!/usr/bin/python + +# Copyright (c) 2012, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +author: + - Dag Wieers (@dagwieers) +module: mail +short_description: Send an email +description: + - This module is useful for sending emails from playbooks. + - One may wonder why automate sending emails? In complex environments there are from time to time processes that cannot + be automated, either because you lack the authority to make it so, or because not everyone agrees to a common approach. + - If you cannot automate a specific step, but the step is non-blocking, sending out an email to the responsible party to + make them perform their part of the bargain is an elegant way to put the responsibility in someone else's lap. + - Of course sending out a mail can be equally useful as a way to notify one or more people in a team that a specific action + has been (successfully) taken. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + sender: + description: + - The email-address the mail is sent from. May contain address and phrase. + type: str + default: root + aliases: [from] + to: + description: + - The email-address(es) the mail is being sent to. + - This is a list, which may contain address and phrase portions. + type: list + elements: str + default: root + aliases: [recipients] + cc: + description: + - The email-address(es) the mail is being copied to. + - This is a list, which may contain address and phrase portions. + type: list + elements: str + default: [] + bcc: + description: + - The email-address(es) the mail is being 'blind' copied to. + - This is a list, which may contain address and phrase portions. + type: list + elements: str + default: [] + subject: + description: + - The subject of the email being sent. + required: true + type: str + aliases: [msg] + body: + description: + - The body of the email being sent. + type: str + username: + description: + - If SMTP requires username. + type: str + password: + description: + - If SMTP requires password. + type: str + host: + description: + - The mail server. + type: str + default: localhost + port: + description: + - The mail server port. + - This must be a valid integer between V(1) and V(65534). + type: int + default: 25 + attach: + description: + - A list of pathnames of files to attach to the message. + - Attached files have their content-type set to C(application/octet-stream). + type: list + elements: path + default: [] + headers: + description: + - A list of headers which should be added to the message. + - Each individual header is specified as V(header=value) (see example below). + type: list + elements: str + default: [] + charset: + description: + - The character set of email being sent. + type: str + default: utf-8 + subtype: + description: + - The minor mime type, can be either V(plain) or V(html). + - The major type is always V(text). + type: str + choices: [html, plain] + default: plain + secure: + description: + - If V(always), the connection only sends email if the connection is Encrypted. If the server does not accept the encrypted + connection it fails. + - If V(try), the connection attempts to setup a secure SSL/TLS session, before trying to send. + - If V(never), the connection does not attempt to setup a secure SSL/TLS session, before sending. + - If V(starttls), the connection tries to upgrade to a secure SSL/TLS connection, before sending. If it is unable to + do so it fails. + type: str + choices: [always, never, starttls, try] + default: try + timeout: + description: + - Sets the timeout in seconds for connection attempts. + type: int + default: 20 + ehlohost: + description: + - Allows for manual specification of host for EHLO. + type: str + version_added: 3.8.0 + message_id_domain: + description: + - The domain name to use for the L(Message-ID header, https://en.wikipedia.org/wiki/Message-ID). + type: str + default: ansible + version_added: 8.2.0 +""" + +EXAMPLES = r""" +- name: Example playbook sending mail to root + community.general.mail: + subject: System {{ ansible_hostname }} has been successfully provisioned. + delegate_to: localhost + +- name: Sending an e-mail using Gmail SMTP servers + community.general.mail: + host: smtp.gmail.com + port: 587 + username: username@gmail.com + password: mysecret + to: John Smith + subject: Ansible-report + body: System {{ ansible_hostname }} has been successfully provisioned. + delegate_to: localhost + +- name: Send e-mail to a bunch of users, attaching files + community.general.mail: + host: 127.0.0.1 + port: 2025 + subject: Ansible-report + body: Hello, this is an e-mail. I hope you like it ;-) + from: jane@example.net (Jane Jolie) + to: + - John Doe + - Suzie Something + cc: Charlie Root + attach: + - /etc/group + - /tmp/avatar2.png + headers: + - Reply-To=john@example.com + - X-Special="Something or other" + charset: us-ascii + delegate_to: localhost + +- name: Sending an e-mail using the remote machine, not the Ansible controller node + community.general.mail: + host: localhost + port: 25 + to: John Smith + subject: Ansible-report + body: System {{ ansible_hostname }} has been successfully provisioned. + +- name: Sending an e-mail using Legacy SSL to the remote machine + community.general.mail: + host: localhost + port: 25 + to: John Smith + subject: Ansible-report + body: System {{ ansible_hostname }} has been successfully provisioned. + secure: always + +- name: Sending an e-mail using StartTLS to the remote machine + community.general.mail: + host: localhost + port: 25 + to: John Smith + subject: Ansible-report + body: System {{ ansible_hostname }} has been successfully provisioned. + secure: starttls + +- name: Sending an e-mail using StartTLS, remote server, custom EHLO, and timeout of 10 seconds + community.general.mail: + host: some.smtp.host.tld + port: 25 + timeout: 10 + ehlohost: my-resolvable-hostname.tld + to: John Smith + subject: Ansible-report + body: System {{ ansible_hostname }} has been successfully provisioned. + secure: starttls +""" + +import os +import smtplib +import ssl +import traceback +from email import encoders +from email.utils import parseaddr, formataddr, formatdate, make_msgid +from email.mime.base import MIMEBase +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText +from email.header import Header + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + username=dict(type='str'), + password=dict(type='str', no_log=True), + host=dict(type='str', default='localhost'), + port=dict(type='int', default=25), + ehlohost=dict(type='str'), + sender=dict(type='str', default='root', aliases=['from']), + to=dict(type='list', elements='str', default=['root'], aliases=['recipients']), + cc=dict(type='list', elements='str', default=[]), + bcc=dict(type='list', elements='str', default=[]), + subject=dict(type='str', required=True, aliases=['msg']), + body=dict(type='str'), + attach=dict(type='list', elements='path', default=[]), + headers=dict(type='list', elements='str', default=[]), + charset=dict(type='str', default='utf-8'), + subtype=dict(type='str', default='plain', choices=['html', 'plain']), + secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']), + timeout=dict(type='int', default=20), + message_id_domain=dict(type='str', default='ansible'), + ), + required_together=[['password', 'username']], + ) + + username = module.params.get('username') + password = module.params.get('password') + host = module.params.get('host') + port = module.params.get('port') + local_hostname = module.params.get('ehlohost') + sender = module.params.get('sender') + recipients = module.params.get('to') + copies = module.params.get('cc') + blindcopies = module.params.get('bcc') + subject = module.params.get('subject') + body = module.params.get('body') + attach_files = module.params.get('attach') + headers = module.params.get('headers') + charset = module.params.get('charset') + subtype = module.params.get('subtype') + secure = module.params.get('secure') + timeout = module.params.get('timeout') + message_id_domain = module.params['message_id_domain'] + + code = 0 + secure_state = False + sender_phrase, sender_addr = parseaddr(sender) + + if not body: + body = subject + + try: + if secure != 'never': + try: + smtp = smtplib.SMTP_SSL(host=host, port=port, local_hostname=local_hostname, timeout=timeout) + code, smtpmessage = smtp.connect(host, port) + secure_state = True + except ssl.SSLError as e: + if secure == 'always': + module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' % + (host, port, to_native(e)), exception=traceback.format_exc()) + except Exception: + pass + + if not secure_state: + smtp = smtplib.SMTP(host=host, port=port, local_hostname=local_hostname, timeout=timeout) + code, smtpmessage = smtp.connect(host, port) + + except smtplib.SMTPException as e: + module.fail_json(rc=1, msg='Unable to Connect %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) + + try: + smtp.ehlo() + except smtplib.SMTPException as e: + module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) + + if int(code) > 0: + if not secure_state and secure in ('starttls', 'try'): + if smtp.has_extn('STARTTLS'): + try: + smtp.starttls() + secure_state = True + except smtplib.SMTPException as e: + module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' % + (host, port, to_native(e)), exception=traceback.format_exc()) + try: + smtp.ehlo() + except smtplib.SMTPException as e: + module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) + else: + if secure == 'starttls': + module.fail_json(rc=1, msg='StartTLS is not offered on server %s:%s' % (host, port)) + + if username and password: + if smtp.has_extn('AUTH'): + try: + smtp.login(username, password) + except smtplib.SMTPAuthenticationError: + module.fail_json(rc=1, msg='Authentication to %s:%s failed, please check your username and/or password' % (host, port)) + except smtplib.SMTPException: + module.fail_json(rc=1, msg='No Suitable authentication method was found on %s:%s' % (host, port)) + else: + module.fail_json(rc=1, msg="No Authentication on the server at %s:%s" % (host, port)) + + if not secure_state and (username and password): + module.warn('Username and Password was sent without encryption') + + msg = MIMEMultipart(_charset=charset) + msg['From'] = formataddr((sender_phrase, sender_addr)) + msg['Date'] = formatdate(localtime=True) + msg['Subject'] = Header(subject, charset) + msg['Message-ID'] = make_msgid(domain=message_id_domain) + msg.preamble = "Multipart message" + + for header in headers: + # NOTE: Backward compatible with old syntax using '|' as delimiter + for hdr in [x.strip() for x in header.split('|')]: + try: + h_key, h_val = hdr.split('=', 1) + h_val = to_native(Header(h_val, charset)) + msg.add_header(h_key, h_val) + except Exception: + module.warn("Skipping header '%s', unable to parse" % hdr) + + if 'X-Mailer' not in msg: + msg.add_header('X-Mailer', 'Ansible mail module') + + addr_list = [] + for addr in [x.strip() for x in blindcopies]: + addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase + + to_list = [] + for addr in [x.strip() for x in recipients]: + to_list.append(formataddr(parseaddr(addr))) + addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase + msg['To'] = ", ".join(to_list) + + cc_list = [] + for addr in [x.strip() for x in copies]: + cc_list.append(formataddr(parseaddr(addr))) + addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase + msg['Cc'] = ", ".join(cc_list) + + part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset) + msg.attach(part) + + # NOTE: Backward compatibility with old syntax using space as delimiter is not retained + # This breaks files with spaces in it :-( + for filename in attach_files: + try: + part = MIMEBase('application', 'octet-stream') + with open(filename, 'rb') as fp: + part.set_payload(fp.read()) + encoders.encode_base64(part) + part.add_header('Content-disposition', 'attachment', filename=os.path.basename(filename)) + msg.attach(part) + except Exception as e: + module.fail_json(rc=1, msg="Failed to send community.general.mail: can't attach file %s: %s" % + (filename, to_native(e)), exception=traceback.format_exc()) + + composed = msg.as_string() + + try: + result = smtp.sendmail(sender_addr, set(addr_list), composed) + except Exception as e: + module.fail_json(rc=1, msg="Failed to send mail to '%s': %s" % + (", ".join(set(addr_list)), to_native(e)), exception=traceback.format_exc()) + + smtp.quit() + + if result: + for key in result: + module.warn("Failed to send mail to '%s': %s %s" % (key, result[key][0], result[key][1])) + module.exit_json(msg='Failed to send mail to at least one recipient', result=result) + + module.exit_json(msg='Mail sent successfully', result=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/make.py b/plugins/modules/make.py deleted file mode 120000 index 7ecfcd0af7..0000000000 --- a/plugins/modules/make.py +++ /dev/null @@ -1 +0,0 @@ -./system/make.py \ No newline at end of file diff --git a/plugins/modules/make.py b/plugins/modules/make.py new file mode 100644 index 0000000000..141dd2df4d --- /dev/null +++ b/plugins/modules/make.py @@ -0,0 +1,261 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Linus Unnebäck +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: make +short_description: Run targets in a Makefile +requirements: + - make +author: Linus Unnebäck (@LinusU) +description: + - Run targets in a Makefile. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + chdir: + description: + - Change to this directory before running make. + type: path + required: true + file: + description: + - Use a custom Makefile. + type: path + jobs: + description: + - Set the number of make jobs to run concurrently. + - Typically if set, this would be the number of processors and/or threads available to the machine. + - This is not supported by all make implementations. + type: int + version_added: 2.0.0 + make: + description: + - Use a specific make binary. + type: path + version_added: '0.2.0' + params: + description: + - Any extra parameters to pass to make. + - If the value is empty, only the key is used. For example, V(FOO:) produces V(FOO), not V(FOO=). + type: dict + target: + description: + - The target to run. + - Typically this would be something like V(install), V(test), or V(all). + - O(target) and O(targets) are mutually exclusive. + type: str + targets: + description: + - The list of targets to run. + - Typically this would be something like V(install), V(test), or V(all). + - O(target) and O(targets) are mutually exclusive. + type: list + elements: str + version_added: 7.2.0 +""" + +EXAMPLES = r""" +- name: Build the default target + community.general.make: + chdir: /home/ubuntu/cool-project + +- name: Run 'install' target as root + community.general.make: + chdir: /home/ubuntu/cool-project + target: install + become: true + +- name: Build 'all' target with extra arguments + community.general.make: + chdir: /home/ubuntu/cool-project + target: all + params: + NUM_THREADS: 4 + BACKEND: lapack + +- name: Build 'all' target with a custom Makefile + community.general.make: + chdir: /home/ubuntu/cool-project + target: all + file: /some-project/Makefile + +- name: build arm64 kernel on FreeBSD, with 16 parallel jobs + community.general.make: + chdir: /usr/src + jobs: 16 + target: buildkernel + params: + # This adds -DWITH_FDT to the command line: + -DWITH_FDT: + # The following adds TARGET=arm64 TARGET_ARCH=aarch64 to the command line: + TARGET: arm64 + TARGET_ARCH: aarch64 +""" + +RETURN = r""" +chdir: + description: + - The value of the module parameter O(chdir). + type: str + returned: success +command: + description: + - The command built and executed by the module. + type: str + returned: success + version_added: 6.5.0 +file: + description: + - The value of the module parameter O(file). + type: str + returned: success +jobs: + description: + - The value of the module parameter O(jobs). + type: int + returned: success +params: + description: + - The value of the module parameter O(params). + type: dict + returned: success +target: + description: + - The value of the module parameter O(target). + type: str + returned: success +targets: + description: + - The value of the module parameter O(targets). + type: str + returned: success + version_added: 7.2.0 +""" + +from shlex import quote as shlex_quote +from ansible.module_utils.basic import AnsibleModule + + +def run_command(command, module, check_rc=True): + """ + Run a command using the module, return + the result code and std{err,out} content. + + :param command: list of command arguments + :param module: Ansible make module instance + :return: return code, stdout content, stderr content + """ + rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir']) + return rc, sanitize_output(out), sanitize_output(err) + + +def sanitize_output(output): + """ + Sanitize the output string before we + pass it to module.fail_json. Defaults + the string to empty if it is None, else + strips trailing newlines. + + :param output: output to sanitize + :return: sanitized output + """ + if output is None: + return '' + else: + return output.rstrip("\r\n") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + target=dict(type='str'), + targets=dict(type='list', elements='str'), + params=dict(type='dict'), + chdir=dict(type='path', required=True), + file=dict(type='path'), + make=dict(type='path'), + jobs=dict(type='int'), + ), + mutually_exclusive=[('target', 'targets')], + supports_check_mode=True, + ) + + make_path = module.params['make'] + if make_path is None: + # Build up the invocation of `make` we are going to use + # For non-Linux OSes, prefer gmake (GNU make) over make + make_path = module.get_bin_path('gmake', required=False) + if not make_path: + # Fall back to system make + make_path = module.get_bin_path('make', required=True) + if module.params['params'] is not None: + make_parameters = [k + (('=' + str(v)) if v is not None else '') for k, v in module.params['params'].items()] + else: + make_parameters = [] + + # build command: + # handle any make specific arguments included in params + base_command = [make_path] + if module.params['jobs'] is not None: + jobs = str(module.params['jobs']) + base_command.extend(["-j", jobs]) + if module.params['file'] is not None: + base_command.extend(["-f", module.params['file']]) + + # add make target + if module.params['target']: + base_command.append(module.params['target']) + elif module.params['targets']: + base_command.extend(module.params['targets']) + + # add makefile parameters + base_command.extend(make_parameters) + + # Check if the target is already up to date + rc, out, err = run_command(base_command + ['-q'], module, check_rc=False) + if module.check_mode: + # If we've been asked to do a dry run, we only need + # to report whether or not the target is up to date + changed = (rc != 0) + else: + if rc == 0: + # The target is up to date, so we don't have to + # do anything + changed = False + else: + # The target isn't up to date, so we need to run it + rc, out, err = run_command(base_command, module, check_rc=True) + changed = True + + # We don't report the return code, as if this module failed + # we would be calling fail_json from run_command, so even if + # we had a non-zero return code, we did not fail. However, if + # we report a non-zero return code here, we will be marked as + # failed regardless of what we signal using the failed= kwarg. + module.exit_json( + changed=changed, + failed=False, + stdout=out, + stderr=err, + target=module.params['target'], + targets=module.params['targets'], + params=module.params['params'], + chdir=module.params['chdir'], + file=module.params['file'], + jobs=module.params['jobs'], + command=' '.join([shlex_quote(part) for part in base_command]), + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/manageiq_alert_profiles.py b/plugins/modules/manageiq_alert_profiles.py deleted file mode 120000 index 20850f1cea..0000000000 --- a/plugins/modules/manageiq_alert_profiles.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/manageiq/manageiq_alert_profiles.py \ No newline at end of file diff --git a/plugins/modules/manageiq_alert_profiles.py b/plugins/modules/manageiq_alert_profiles.py new file mode 100644 index 0000000000..6f04309eff --- /dev/null +++ b/plugins/modules/manageiq_alert_profiles.py @@ -0,0 +1,304 @@ +#!/usr/bin/python +# Copyright (c) 2017 Red Hat Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: manageiq_alert_profiles + +short_description: Configuration of alert profiles for ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Elad Alfassa (@elad661) +description: + - The manageiq_alert_profiles module supports adding, updating and deleting alert profiles in ManageIQ. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - V(absent) - alert profile should not exist, + - V(present) - alert profile should exist. + choices: ['absent', 'present'] + default: 'present' + name: + type: str + description: + - The unique alert profile name in ManageIQ. + required: true + resource_type: + type: str + description: + - The resource type for the alert profile in ManageIQ. Required when O(state=present). + choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', 'ExtManagementSystem', 'MiddlewareServer'] + alerts: + type: list + elements: str + description: + - List of alert descriptions to assign to this profile. + - Required if O(state=present). + notes: + type: str + description: + - Optional notes for this profile. +""" + +EXAMPLES = r""" +- name: Add an alert profile to ManageIQ + community.general.manageiq_alert_profiles: + state: present + name: Test profile + resource_type: ContainerNode + alerts: + - Test Alert 01 + - Test Alert 02 + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + +- name: Delete an alert profile from ManageIQ + community.general.manageiq_alert_profiles: + state: absent + name: Test profile + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +class ManageIQAlertProfiles(object): + """ Object to execute alert profile management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + self.url = '{api_url}/alert_definition_profiles'.format(api_url=self.api_url) + + def get_profiles(self): + """ Get all alert profiles from ManageIQ + """ + try: + response = self.client.get(self.url + '?expand=alert_definitions,resources') + except Exception as e: + self.module.fail_json(msg="Failed to query alert profiles: {error}".format(error=e)) + return response.get('resources') or [] + + def get_alerts(self, alert_descriptions): + """ Get a list of alert hrefs from a list of alert descriptions + """ + alerts = [] + for alert_description in alert_descriptions: + alert = self.manageiq.find_collection_resource_or_fail("alert_definitions", description=alert_description) + alerts.append(alert['href']) + + return alerts + + def add_profile(self, profile): + """ Add a new alert profile to ManageIQ + """ + # find all alerts to add to the profile + # we do this first to fail early if one is missing. + alerts = self.get_alerts(profile['alerts']) + + # build the profile dict to send to the server + + profile_dict = dict(name=profile['name'], + description=profile['name'], + mode=profile['resource_type']) + if profile['notes']: + profile_dict['set_data'] = dict(notes=profile['notes']) + + # send it to the server + try: + result = self.client.post(self.url, resource=profile_dict, action="create") + except Exception as e: + self.module.fail_json(msg="Creating profile failed {error}".format(error=e)) + + # now that it has been created, we can assign the alerts + self.assign_or_unassign(result['results'][0], alerts, "assign") + + msg = "Profile {name} created successfully" + msg = msg.format(name=profile['name']) + return dict(changed=True, msg=msg) + + def delete_profile(self, profile): + """ Delete an alert profile from ManageIQ + """ + try: + self.client.post(profile['href'], action="delete") + except Exception as e: + self.module.fail_json(msg="Deleting profile failed: {error}".format(error=e)) + + msg = "Successfully deleted profile {name}".format(name=profile['name']) + return dict(changed=True, msg=msg) + + def get_alert_href(self, alert): + """ Get an absolute href for an alert + """ + return "{url}/alert_definitions/{id}".format(url=self.api_url, id=alert['id']) + + def assign_or_unassign(self, profile, resources, action): + """ Assign or unassign alerts to profile, and validate the result. + """ + alerts = [dict(href=href) for href in resources] + + subcollection_url = profile['href'] + '/alert_definitions' + try: + result = self.client.post(subcollection_url, resources=alerts, action=action) + if len(result['results']) != len(alerts): + msg = "Failed to {action} alerts to profile '{name}'," +\ + "expected {expected} alerts to be {action}ed," +\ + "but only {changed} were {action}ed" + msg = msg.format(action=action, + name=profile['name'], + expected=len(alerts), + changed=result['results']) + self.module.fail_json(msg=msg) + except Exception as e: + msg = "Failed to {action} alerts to profile '{name}': {error}" + msg = msg.format(action=action, name=profile['name'], error=e) + self.module.fail_json(msg=msg) + + return result['results'] + + def update_profile(self, old_profile, desired_profile): + """ Update alert profile in ManageIQ + """ + changed = False + # we need to use client.get to query the alert definitions + old_profile = self.client.get(old_profile['href'] + '?expand=alert_definitions') + + # figure out which alerts we need to assign / unassign + # alerts listed by the user: + desired_alerts = set(self.get_alerts(desired_profile['alerts'])) + + # alert which currently exist in the profile + if 'alert_definitions' in old_profile: + # we use get_alert_href to have a direct href to the alert + existing_alerts = set(self.get_alert_href(alert) for alert in old_profile['alert_definitions']) + else: + # no alerts in this profile + existing_alerts = set() + + to_add = list(desired_alerts - existing_alerts) + to_remove = list(existing_alerts - desired_alerts) + + # assign / unassign the alerts, if needed + + if to_remove: + self.assign_or_unassign(old_profile, to_remove, "unassign") + changed = True + if to_add: + self.assign_or_unassign(old_profile, to_add, "assign") + changed = True + + # update other properties + profile_dict = dict() + + if old_profile['mode'] != desired_profile['resource_type']: + # mode needs to be updated + profile_dict['mode'] = desired_profile['resource_type'] + + # check if notes need to be updated + old_notes = old_profile.get('set_data', {}).get('notes') + + if desired_profile['notes'] != old_notes: + profile_dict['set_data'] = dict(notes=desired_profile['notes']) + + if profile_dict: + # if we have any updated values + changed = True + try: + result = self.client.post(old_profile['href'], + resource=profile_dict, + action="edit") + except Exception as e: + msg = "Updating profile '{name}' failed: {error}" + msg = msg.format(name=old_profile['name'], error=e) + self.module.fail_json(msg=msg) + + if changed: + msg = "Profile {name} updated successfully".format(name=desired_profile['name']) + else: + msg = "No update needed for profile {name}".format(name=desired_profile['name']) + return dict(changed=changed, msg=msg) + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + resource_type=dict(type='str', choices=['Vm', + 'ContainerNode', + 'MiqServer', + 'Host', + 'Storage', + 'EmsCluster', + 'ExtManagementSystem', + 'MiddlewareServer']), + alerts=dict(type='list', elements='str'), + notes=dict(type='str'), + state=dict(default='present', choices=['present', 'absent']), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule(argument_spec=argument_spec, + required_if=[('state', 'present', ['resource_type', 'alerts'])]) + + state = module.params['state'] + name = module.params['name'] + + manageiq = ManageIQ(module) + manageiq_alert_profiles = ManageIQAlertProfiles(manageiq) + + existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles", name=name) + + # we need to add or update the alert profile + if state == "present": + if not existing_profile: + # a profile with this name doesn't exist yet, let's create it + res_args = manageiq_alert_profiles.add_profile(module.params) + else: + # a profile with this name exists, we might need to update it + res_args = manageiq_alert_profiles.update_profile(existing_profile, module.params) + + # this alert profile should not exist + if state == "absent": + # if we have an alert profile with this name, delete it + if existing_profile: + res_args = manageiq_alert_profiles.delete_profile(existing_profile) + else: + # This alert profile does not exist in ManageIQ, and that's okay + msg = "Alert profile '{name}' does not exist in ManageIQ" + msg = msg.format(name=name) + res_args = dict(changed=False, msg=msg) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/manageiq_alerts.py b/plugins/modules/manageiq_alerts.py deleted file mode 120000 index 6b61c978c2..0000000000 --- a/plugins/modules/manageiq_alerts.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/manageiq/manageiq_alerts.py \ No newline at end of file diff --git a/plugins/modules/manageiq_alerts.py b/plugins/modules/manageiq_alerts.py new file mode 100644 index 0000000000..dec3dfad57 --- /dev/null +++ b/plugins/modules/manageiq_alerts.py @@ -0,0 +1,350 @@ +#!/usr/bin/python +# Copyright (c) 2017 Red Hat Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: manageiq_alerts + +short_description: Configuration of alerts in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Elad Alfassa (@elad661) +description: + - The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - V(absent) - alert should not exist, + - V(present) - alert should exist. + required: false + choices: ['absent', 'present'] + default: 'present' + description: + type: str + description: + - The unique alert description in ManageIQ. + - Required when state is "absent" or "present". + resource_type: + type: str + description: + - The entity type for the alert in ManageIQ. Required when O(state=present). + choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', 'ExtManagementSystem', 'MiddlewareServer'] + expression_type: + type: str + description: + - Expression type. + default: hash + choices: ["hash", "miq"] + expression: + type: dict + description: + - The alert expression for ManageIQ. + - Can either be in the "Miq Expression" format or the "Hash Expression format". + - Required if O(state=present). + enabled: + description: + - Enable or disable the alert. Required if O(state=present). + type: bool + options: + type: dict + description: + - Additional alert options, such as notification type and frequency. +""" + +EXAMPLES = r""" +- name: Add an alert with a "hash expression" to ManageIQ + community.general.manageiq_alerts: + state: present + description: Test Alert 01 + options: + notifications: + email: + to: ["example@example.com"] + from: "example@example.com" + resource_type: ContainerNode + expression: + eval_method: hostd_log_threshold + mode: internal + options: {} + enabled: true + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + +- name: Add an alert with a "miq expression" to ManageIQ + community.general.manageiq_alerts: + state: present + description: Test Alert 02 + options: + notifications: + email: + to: ["example@example.com"] + from: "example@example.com" + resource_type: Vm + expression_type: miq + expression: + and: + - CONTAINS: + tag: Vm.managed-environment + value: prod + - not: + CONTAINS: + tag: Vm.host.managed-environment + value: prod + enabled: true + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + +- name: Delete an alert from ManageIQ + community.general.manageiq_alerts: + state: absent + description: Test Alert 01 + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +class ManageIQAlert(object): + """ Represent a ManageIQ alert. Can be initialized with both the format + we receive from the server and the format we get from the user. + """ + def __init__(self, alert): + self.description = alert['description'] + self.db = alert['db'] + self.enabled = alert['enabled'] + self.options = alert['options'] + self.hash_expression = None + self.miq_expressipn = None + + if 'hash_expression' in alert: + self.hash_expression = alert['hash_expression'] + if 'miq_expression' in alert: + self.miq_expression = alert['miq_expression'] + if 'exp' in self.miq_expression: + # miq_expression is a field that needs a special case, because + # it is returned surrounded by a dict named exp even though we don't + # send it with that dict. + self.miq_expression = self.miq_expression['exp'] + + def __eq__(self, other): + """ Compare two ManageIQAlert objects + """ + return self.__dict__ == other.__dict__ + + +class ManageIQAlerts(object): + """ Object to execute alert management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + self.alerts_url = '{api_url}/alert_definitions'.format(api_url=self.api_url) + + def get_alerts(self): + """ Get all alerts from ManageIQ + """ + try: + response = self.client.get(self.alerts_url + '?expand=resources') + except Exception as e: + self.module.fail_json(msg="Failed to query alerts: {error}".format(error=e)) + return response.get('resources', []) + + def validate_hash_expression(self, expression): + """ Validate a 'hash expression' alert definition + """ + # hash expressions must have the following fields + for key in ['options', 'eval_method', 'mode']: + if key not in expression: + msg = "Hash expression is missing required field {key}".format(key=key) + self.module.fail_json(msg) + + def create_alert_dict(self, params): + """ Create a dict representing an alert + """ + if params['expression_type'] == 'hash': + # hash expression supports depends on https://github.com/ManageIQ/manageiq-api/pull/76 + self.validate_hash_expression(params['expression']) + expression_type = 'hash_expression' + else: + # actually miq_expression, but we call it "expression" for backwards-compatibility + expression_type = 'expression' + + # build the alret + alert = dict(description=params['description'], + db=params['resource_type'], + options=params['options'], + enabled=params['enabled']) + + # add the actual expression. + alert.update({expression_type: params['expression']}) + + return alert + + def add_alert(self, alert): + """ Add a new alert to ManageIQ + """ + try: + result = self.client.post(self.alerts_url, action='create', resource=alert) + + msg = "Alert {description} created successfully: {details}" + msg = msg.format(description=alert['description'], details=result) + return dict(changed=True, msg=msg) + except Exception as e: + msg = "Creating alert {description} failed: {error}" + if "Resource expression needs be specified" in str(e): + # Running on an older version of ManageIQ and trying to create a hash expression + msg = msg.format(description=alert['description'], + error="Your version of ManageIQ does not support hash_expression") + else: + msg = msg.format(description=alert['description'], error=e) + self.module.fail_json(msg=msg) + + def delete_alert(self, alert): + """ Delete an alert + """ + try: + result = self.client.post('{url}/{id}'.format(url=self.alerts_url, + id=alert['id']), + action="delete") + msg = "Alert {description} deleted: {details}" + msg = msg.format(description=alert['description'], details=result) + return dict(changed=True, msg=msg) + except Exception as e: + msg = "Deleting alert {description} failed: {error}" + msg = msg.format(description=alert['description'], error=e) + self.module.fail_json(msg=msg) + + def update_alert(self, existing_alert, new_alert): + """ Update an existing alert with the values from `new_alert` + """ + new_alert_obj = ManageIQAlert(new_alert) + if new_alert_obj == ManageIQAlert(existing_alert): + # no change needed - alerts are identical + return dict(changed=False, msg="No update needed") + else: + try: + url = '{url}/{id}'.format(url=self.alerts_url, id=existing_alert['id']) + result = self.client.post(url, action="edit", resource=new_alert) + + # make sure that the update was indeed successful by comparing + # the result to the expected result. + if new_alert_obj == ManageIQAlert(result): + # success! + msg = "Alert {description} updated successfully: {details}" + msg = msg.format(description=existing_alert['description'], details=result) + + return dict(changed=True, msg=msg) + else: + # unexpected result + msg = "Updating alert {description} failed, unexpected result {details}" + msg = msg.format(description=existing_alert['description'], details=result) + + self.module.fail_json(msg=msg) + + except Exception as e: + msg = "Updating alert {description} failed: {error}" + if "Resource expression needs be specified" in str(e): + # Running on an older version of ManageIQ and trying to update a hash expression + msg = msg.format(description=existing_alert['description'], + error="Your version of ManageIQ does not support hash_expression") + else: + msg = msg.format(description=existing_alert['description'], error=e) + self.module.fail_json(msg=msg) + + +def main(): + argument_spec = dict( + description=dict(type='str'), + resource_type=dict(type='str', choices=['Vm', + 'ContainerNode', + 'MiqServer', + 'Host', + 'Storage', + 'EmsCluster', + 'ExtManagementSystem', + 'MiddlewareServer']), + expression_type=dict(type='str', default='hash', choices=['miq', 'hash']), + expression=dict(type='dict'), + options=dict(type='dict'), + enabled=dict(type='bool'), + state=dict(default='present', + choices=['present', 'absent']), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule(argument_spec=argument_spec, + required_if=[('state', 'present', ['description', + 'resource_type', + 'expression', + 'enabled', + 'options']), + ('state', 'absent', ['description'])]) + + state = module.params['state'] + description = module.params['description'] + + manageiq = ManageIQ(module) + manageiq_alerts = ManageIQAlerts(manageiq) + + existing_alert = manageiq.find_collection_resource_by("alert_definitions", + description=description) + + # we need to add or update the alert + if state == "present": + alert = manageiq_alerts.create_alert_dict(module.params) + + if not existing_alert: + # an alert with this description doesn't exist yet, let's create it + res_args = manageiq_alerts.add_alert(alert) + else: + # an alert with this description exists, we might need to update it + res_args = manageiq_alerts.update_alert(existing_alert, alert) + + # this alert should not exist + elif state == "absent": + # if we have an alert with this description, delete it + if existing_alert: + res_args = manageiq_alerts.delete_alert(existing_alert) + else: + # it doesn't exist, and that's okay + msg = "Alert '{description}' does not exist in ManageIQ" + msg = msg.format(description=description) + res_args = dict(changed=False, msg=msg) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/manageiq_group.py b/plugins/modules/manageiq_group.py deleted file mode 120000 index 5a04df529f..0000000000 --- a/plugins/modules/manageiq_group.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/manageiq/manageiq_group.py \ No newline at end of file diff --git a/plugins/modules/manageiq_group.py b/plugins/modules/manageiq_group.py new file mode 100644 index 0000000000..39bc641967 --- /dev/null +++ b/plugins/modules/manageiq_group.py @@ -0,0 +1,638 @@ +#!/usr/bin/python +# Copyright (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn ) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: manageiq_group + +short_description: Management of groups in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Evert Mulder (@evertmulder) +description: + - The manageiq_group module supports adding, updating and deleting groups in ManageIQ. +requirements: + - manageiq-client + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - V(absent) - group should not exist, + - V(present) - group should exist. + choices: ['absent', 'present'] + default: 'present' + description: + type: str + description: + - The group description. + required: true + role_id: + type: int + description: + - The the group role ID. + required: false + role: + type: str + description: + - The the group role name. + - The O(role_id) has precedence over the O(role) when supplied. + required: false + default: + tenant_id: + type: int + description: + - The tenant for the group identified by the tenant ID. + required: false + default: + tenant: + type: str + description: + - The tenant for the group identified by the tenant name. + - The O(tenant_id) has precedence over the O(tenant) when supplied. + - Tenant names are case sensitive. + required: false + default: + managed_filters: + description: The tag values per category. + type: dict + required: false + default: + managed_filters_merge_mode: + type: str + description: + - In V(merge) mode existing categories are kept or updated, new categories are added. + - In V(replace) mode all categories are replaced with the supplied O(managed_filters). + choices: [merge, replace] + default: replace + belongsto_filters: + description: A list of strings with a reference to the allowed host, cluster or folder. + type: list + elements: str + required: false + default: + belongsto_filters_merge_mode: + type: str + description: + - In merge mode existing settings are merged with the supplied O(belongsto_filters). + - In replace mode current values are replaced with the supplied O(belongsto_filters). + choices: [merge, replace] + default: replace +""" + +EXAMPLES = r""" +- name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant' + community.general.manageiq_group: + description: 'MyGroup-user' + role: 'EvmRole-user' + tenant: 'my_tenant' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + +- name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4 + community.general.manageiq_group: + description: 'MyGroup-user' + role: 'EvmRole-user' + tenant_id: 4 + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + +- name: + - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant. + - Apply 3 prov_max_cpu and 2 department tags to the group. + - Limit access to a cluster for the group. + community.general.manageiq_group: + description: 'MyGroup-user' + role: 'EvmRole-user' + tenant: my_tenant + managed_filters: + prov_max_cpu: + - '1' + - '2' + - '4' + department: + - defense + - engineering + managed_filters_merge_mode: replace + belongsto_filters: + - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name" + belongsto_filters_merge_mode: merge + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + +- name: Delete a group in ManageIQ + community.general.manageiq_group: + state: 'absent' + description: 'MyGroup-user' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + +- name: Delete a group in ManageIQ using a token + community.general.manageiq_group: + state: 'absent' + description: 'MyGroup-user' + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' +""" + +RETURN = r""" +group: + description: The group. + returned: success + type: complex + contains: + description: + description: The group description. + returned: success + type: str + id: + description: The group ID. + returned: success + type: int + group_type: + description: The group type, system or user. + returned: success + type: str + role: + description: The group role name. + returned: success + type: str + tenant: + description: The group tenant name. + returned: success + type: str + managed_filters: + description: The tag values per category. + returned: success + type: dict + belongsto_filters: + description: A list of strings with a reference to the allowed host, cluster or folder. + returned: success + type: list + created_on: + description: Group creation date. + returned: success + type: str + sample: "2018-08-12T08:37:55+00:00" + updated_on: + description: Group update date. + returned: success + type: int + sample: "2018-08-12T08:37:55+00:00" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +class ManageIQgroup(object): + """ + Object to execute group management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + def group(self, description): + """ Search for group object by description. + Returns: + the group, or None if group was not found. + """ + groups = self.client.collections.groups.find_by(description=description) + if len(groups) == 0: + return None + else: + return groups[0] + + def tenant(self, tenant_id, tenant_name): + """ Search for tenant entity by name or id + Returns: + the tenant entity, None if no id or name was supplied + """ + + if tenant_id: + tenant = self.client.get_entity('tenants', tenant_id) + if not tenant: + self.module.fail_json(msg="Tenant with id '%s' not found in manageiq" % str(tenant_id)) + return tenant + else: + if tenant_name: + tenant_res = self.client.collections.tenants.find_by(name=tenant_name) + if not tenant_res: + self.module.fail_json(msg="Tenant '%s' not found in manageiq" % tenant_name) + if len(tenant_res) > 1: + self.module.fail_json(msg="Multiple tenants found in manageiq with name '%s'" % tenant_name) + tenant = tenant_res[0] + return tenant + else: + # No tenant name or tenant id supplied + return None + + def role(self, role_id, role_name): + """ Search for a role object by name or id. + Returns: + the role entity, None no id or name was supplied + + the role, or send a module Fail signal if role not found. + """ + if role_id: + role = self.client.get_entity('roles', role_id) + if not role: + self.module.fail_json(msg="Role with id '%s' not found in manageiq" % str(role_id)) + return role + else: + if role_name: + role_res = self.client.collections.roles.find_by(name=role_name) + if not role_res: + self.module.fail_json(msg="Role '%s' not found in manageiq" % role_name) + if len(role_res) > 1: + self.module.fail_json(msg="Multiple roles found in manageiq with name '%s'" % role_name) + return role_res[0] + else: + # No role name or role id supplied + return None + + @staticmethod + def merge_dict_values(norm_current_values, norm_updated_values): + """ Create an merged update object for manageiq group filters. + + The input dict contain the tag values per category. + If the new values contain the category, all tags for that category are replaced + If the new values do not contain the category, the existing tags are kept + + Returns: + the nested array with the merged values, used in the update post body + """ + + # If no updated values are supplied, in merge mode, the original values must be returned + # otherwise the existing tag filters will be removed. + if norm_current_values and (not norm_updated_values): + return norm_current_values + + # If no existing tag filters exist, use the user supplied values + if (not norm_current_values) and norm_updated_values: + return norm_updated_values + + # start with norm_current_values's keys and values + res = norm_current_values.copy() + # replace res with norm_updated_values's keys and values + res.update(norm_updated_values) + return res + + def delete_group(self, group): + """ Deletes a group from manageiq. + + Returns: + a dict of: + changed: boolean indicating if the entity was updated. + msg: a short message describing the operation executed. + """ + try: + url = '%s/groups/%s' % (self.api_url, group['id']) + result = self.client.post(url, action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete group %s: %s" % (group['description'], str(e))) + + if result['success'] is False: + self.module.fail_json(msg=result['message']) + + return dict( + changed=True, + msg="deleted group %s with id %s" % (group['description'], group['id'])) + + def edit_group(self, group, description, role, tenant, norm_managed_filters, managed_filters_merge_mode, + belongsto_filters, belongsto_filters_merge_mode): + """ Edit a manageiq group. + + Returns: + a dict of: + changed: boolean indicating if the entity was updated. + msg: a short message describing the operation executed. + """ + + if role or norm_managed_filters or belongsto_filters: + group.reload(attributes=['miq_user_role_name', 'entitlement']) + + try: + current_role = group['miq_user_role_name'] + except AttributeError: + current_role = None + + changed = False + resource = {} + + if description and group['description'] != description: + resource['description'] = description + changed = True + + if tenant and group['tenant_id'] != tenant['id']: + resource['tenant'] = dict(id=tenant['id']) + changed = True + + if role and current_role != role['name']: + resource['role'] = dict(id=role['id']) + changed = True + + if norm_managed_filters or belongsto_filters: + + # Only compare if filters are supplied + entitlement = group['entitlement'] + + if 'filters' not in entitlement: + # No existing filters exist, use supplied filters + managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) + resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters} + changed = True + else: + current_filters = entitlement['filters'] + new_filters = self.edit_group_edit_filters(current_filters, + norm_managed_filters, managed_filters_merge_mode, + belongsto_filters, belongsto_filters_merge_mode) + if new_filters: + resource['filters'] = new_filters + changed = True + + if not changed: + return dict( + changed=False, + msg="group %s is not changed." % group['description']) + + # try to update group + try: + self.client.post(group['href'], action='edit', resource=resource) + changed = True + except Exception as e: + self.module.fail_json(msg="failed to update group %s: %s" % (group['name'], str(e))) + + return dict( + changed=changed, + msg="successfully updated the group %s with id %s" % (group['description'], group['id'])) + + def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed_filters_merge_mode, + belongsto_filters, belongsto_filters_merge_mode): + """ Edit a manageiq group filters. + + Returns: + None if no the group was not updated + If the group was updated the post body part for updating the group + """ + filters_updated = False + new_filters_resource = {} + + current_belongsto_set = current_filters.get('belongsto', set()) + + if belongsto_filters: + new_belongsto_set = set(belongsto_filters) + else: + new_belongsto_set = set() + + if current_belongsto_set == new_belongsto_set: + new_filters_resource['belongsto'] = current_filters['belongsto'] + else: + if belongsto_filters_merge_mode == 'merge': + current_belongsto_set.update(new_belongsto_set) + new_filters_resource['belongsto'] = list(current_belongsto_set) + else: + new_filters_resource['belongsto'] = list(new_belongsto_set) + filters_updated = True + + # Process belongsto managed filter tags + # The input is in the form dict with keys are the categories and the tags are supplied string array + # ManageIQ, the current_managed, uses an array of arrays. One array of categories. + # We normalize the user input from a dict with arrays to a dict of sorted arrays + # We normalize the current manageiq array of arrays also to a dict of sorted arrays so we can compare + norm_current_filters = self.manageiq_filters_to_sorted_dict(current_filters) + + if norm_current_filters == norm_managed_filters: + if 'managed' in current_filters: + new_filters_resource['managed'] = current_filters['managed'] + else: + if managed_filters_merge_mode == 'merge': + merged_dict = self.merge_dict_values(norm_current_filters, norm_managed_filters) + new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(merged_dict) + else: + new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) + filters_updated = True + + if not filters_updated: + return None + + return new_filters_resource + + def create_group(self, description, role, tenant, norm_managed_filters, belongsto_filters): + """ Creates the group in manageiq. + + Returns: + the created group id, name, created_on timestamp, + updated_on timestamp. + """ + # check for required arguments + for key, value in dict(description=description).items(): + if value in (None, ''): + self.module.fail_json(msg="missing required argument: %s" % key) + + url = '%s/groups' % self.api_url + + resource = {'description': description} + + if role is not None: + resource['role'] = dict(id=role['id']) + + if tenant is not None: + resource['tenant'] = dict(id=tenant['id']) + + if norm_managed_filters or belongsto_filters: + managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) + resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters} + + try: + result = self.client.post(url, action='create', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to create group %s: %s" % (description, str(e))) + + return dict( + changed=True, + msg="successfully created group %s" % description, + group_id=result['results'][0]['id'] + ) + + @staticmethod + def normalized_managed_tag_filters_to_miq(norm_managed_filters): + if not norm_managed_filters: + return None + + return list(norm_managed_filters.values()) + + @staticmethod + def manageiq_filters_to_sorted_dict(current_filters): + current_managed_filters = current_filters.get('managed') + if not current_managed_filters: + return None + + res = {} + for tag_list in current_managed_filters: + tag_list.sort() + key = tag_list[0].split('/')[2] + res[key] = tag_list + + return res + + @staticmethod + def normalize_user_managed_filters_to_sorted_dict(managed_filters, module): + if not managed_filters: + return None + + res = {} + for cat_key in managed_filters: + cat_array = [] + if not isinstance(managed_filters[cat_key], list): + module.fail_json(msg='Entry "{0}" of managed_filters must be a list!'.format(cat_key)) + for tags in managed_filters[cat_key]: + miq_managed_tag = "/managed/" + cat_key + "/" + tags + cat_array.append(miq_managed_tag) + # Do not add empty categories. ManageIQ will remove all categories that are not supplied + if cat_array: + cat_array.sort() + res[cat_key] = cat_array + return res + + @staticmethod + def create_result_group(group): + """ Creates the ansible result object from a manageiq group entity + + Returns: + a dict with the group id, description, role, tenant, filters, group_type, created_on, updated_on + """ + try: + role_name = group['miq_user_role_name'] + except AttributeError: + role_name = None + + managed_filters = None + belongsto_filters = None + if 'filters' in group['entitlement']: + filters = group['entitlement']['filters'] + belongsto_filters = filters.get('belongsto') + group_managed_filters = filters.get('managed') + if group_managed_filters: + managed_filters = {} + for tag_list in group_managed_filters: + key = tag_list[0].split('/')[2] + tags = [] + for t in tag_list: + tags.append(t.split('/')[3]) + managed_filters[key] = tags + + return dict( + id=group['id'], + description=group['description'], + role=role_name, + tenant=group['tenant']['name'], + managed_filters=managed_filters, + belongsto_filters=belongsto_filters, + group_type=group['group_type'], + created_on=group['created_on'], + updated_on=group['updated_on'], + ) + + +def main(): + argument_spec = dict( + description=dict(required=True, type='str'), + state=dict(choices=['absent', 'present'], default='present'), + role_id=dict(type='int'), + role=dict(type='str'), + tenant_id=dict(type='int'), + tenant=dict(type='str'), + managed_filters=dict(type='dict'), + managed_filters_merge_mode=dict(choices=['merge', 'replace'], default='replace'), + belongsto_filters=dict(type='list', elements='str'), + belongsto_filters_merge_mode=dict(choices=['merge', 'replace'], default='replace'), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec + ) + + description = module.params['description'] + state = module.params['state'] + role_id = module.params['role_id'] + role_name = module.params['role'] + tenant_id = module.params['tenant_id'] + tenant_name = module.params['tenant'] + managed_filters = module.params['managed_filters'] + managed_filters_merge_mode = module.params['managed_filters_merge_mode'] + belongsto_filters = module.params['belongsto_filters'] + belongsto_filters_merge_mode = module.params['belongsto_filters_merge_mode'] + + manageiq = ManageIQ(module) + manageiq_group = ManageIQgroup(manageiq) + + group = manageiq_group.group(description) + + # group should not exist + if state == "absent": + # if we have a group, delete it + if group: + res_args = manageiq_group.delete_group(group) + # if we do not have a group, nothing to do + else: + res_args = dict( + changed=False, + msg="group '%s' does not exist in manageiq" % description) + + # group should exist + if state == "present": + + tenant = manageiq_group.tenant(tenant_id, tenant_name) + role = manageiq_group.role(role_id, role_name) + norm_managed_filters = manageiq_group.normalize_user_managed_filters_to_sorted_dict(managed_filters, module) + # if we have a group, edit it + if group: + res_args = manageiq_group.edit_group(group, description, role, tenant, + norm_managed_filters, managed_filters_merge_mode, + belongsto_filters, belongsto_filters_merge_mode) + + # if we do not have a group, create it + else: + res_args = manageiq_group.create_group(description, role, tenant, norm_managed_filters, belongsto_filters) + group = manageiq.client.get_entity('groups', res_args['group_id']) + + group.reload(expand='resources', attributes=['miq_user_role_name', 'tenant', 'entitlement']) + res_args['group'] = manageiq_group.create_result_group(group) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/manageiq_policies.py b/plugins/modules/manageiq_policies.py deleted file mode 120000 index 959ec93c78..0000000000 --- a/plugins/modules/manageiq_policies.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/manageiq/manageiq_policies.py \ No newline at end of file diff --git a/plugins/modules/manageiq_policies.py b/plugins/modules/manageiq_policies.py new file mode 100644 index 0000000000..a5539724dc --- /dev/null +++ b/plugins/modules/manageiq_policies.py @@ -0,0 +1,184 @@ +#!/usr/bin/python +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: manageiq_policies + +short_description: Management of resource policy_profiles in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Daniel Korn (@dkorn) +description: + - The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - V(absent) - policy_profiles should not exist, + - V(present) - policy_profiles should exist. + choices: ['absent', 'present'] + default: 'present' + policy_profiles: + type: list + elements: dict + description: + - List of dictionaries, each includes the policy_profile V(name) key. + - Required if O(state) is V(present) or V(absent). + resource_type: + type: str + description: + - The type of the resource to which the profile should be [un]assigned. + required: true + choices: + - provider + - host + - vm + - blueprint + - category + - cluster + - data store + - group + - resource pool + - service + - service template + - template + - tenant + - user + resource_name: + type: str + description: + - The name of the resource to which the profile should be [un]assigned. + - Must be specified if O(resource_id) is not set. Both options are mutually exclusive. + resource_id: + type: int + description: + - The ID of the resource to which the profile should be [un]assigned. + - Must be specified if O(resource_name) is not set. Both options are mutually exclusive. + version_added: 2.2.0 +""" + +EXAMPLES = r""" +- name: Assign new policy_profile for a provider in ManageIQ + community.general.manageiq_policies: + resource_name: 'EngLab' + resource_type: 'provider' + policy_profiles: + - name: openscap profile + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + +- name: Unassign a policy_profile for a provider in ManageIQ + community.general.manageiq_policies: + state: absent + resource_name: 'EngLab' + resource_type: 'provider' + policy_profiles: + - name: openscap profile + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! +""" + +RETURN = r""" +manageiq_policies: + description: + - List current policy_profile and policies for a provider in ManageIQ. + returned: always + type: dict + sample: + { + "changed": false, + "profiles": [ + { + "policies": [ + { + "active": true, + "description": "OpenSCAP", + "name": "openscap policy" + }, + { + "active": true, + "description": "Analyse incoming container images", + "name": "analyse incoming container images" + }, + { + "active": true, + "description": "Schedule compliance after smart state analysis", + "name": "schedule compliance after smart state analysis" + } + ], + "profile_description": "OpenSCAP profile", + "profile_name": "openscap profile" + } + ] + } +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities + + +def main(): + actions = {'present': 'assign', 'absent': 'unassign'} + argument_spec = dict( + policy_profiles=dict(type='list', elements='dict'), + resource_id=dict(type='int'), + resource_name=dict(type='str'), + resource_type=dict(required=True, type='str', + choices=list(manageiq_entities().keys())), + state=dict(type='str', + choices=['present', 'absent'], default='present'), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[["resource_id", "resource_name"]], + required_one_of=[["resource_id", "resource_name"]], + required_if=[ + ('state', 'present', ['policy_profiles']), + ('state', 'absent', ['policy_profiles']) + ], + ) + + policy_profiles = module.params['policy_profiles'] + resource_id = module.params['resource_id'] + resource_type_key = module.params['resource_type'] + resource_name = module.params['resource_name'] + state = module.params['state'] + + # get the action and resource type + action = actions[state] + resource_type = manageiq_entities()[resource_type_key] + + manageiq = ManageIQ(module) + manageiq_policies = manageiq.policies(resource_id, resource_type, resource_name) + + # assign or unassign the profiles + res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/manageiq_policies_info.py b/plugins/modules/manageiq_policies_info.py new file mode 100644 index 0000000000..bf96679e29 --- /dev/null +++ b/plugins/modules/manageiq_policies_info.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# Copyright (c) 2022, Alexei Znamensky +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: manageiq_policies_info +version_added: 5.8.0 + +short_description: Listing of resource policy_profiles in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + - community.general.attributes.info_module + +author: Alexei Znamensky (@russoz) +description: + - The manageiq_policies module supports listing policy_profiles in ManageIQ. +options: + resource_type: + type: str + description: + - The type of the resource to obtain the profile for. + required: true + choices: + - provider + - host + - vm + - blueprint + - category + - cluster + - data store + - group + - resource pool + - service + - service template + - template + - tenant + - user + resource_name: + type: str + description: + - The name of the resource to obtain the profile for. + - Must be specified if O(resource_id) is not set. Both options are mutually exclusive. + resource_id: + type: int + description: + - The ID of the resource to obtain the profile for. + - Must be specified if O(resource_name) is not set. Both options are mutually exclusive. +""" + +EXAMPLES = r""" +- name: List current policy_profile and policies for a provider in ManageIQ + community.general.manageiq_policies_info: + resource_name: 'EngLab' + resource_type: 'provider' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + register: result +""" + +RETURN = r""" +profiles: + description: + - List current policy_profile and policies for a provider in ManageIQ. + returned: always + type: list + elements: dict + sample: + - policies: + - active: true + description: OpenSCAP + name: openscap policy + - active: true, + description: Analyse incoming container images + name: analyse incoming container images + - active: true + description: Schedule compliance after smart state analysis + name: schedule compliance after smart state analysis + profile_description: OpenSCAP profile + profile_name: openscap profile +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities + + +def main(): + argument_spec = dict( + resource_id=dict(type='int'), + resource_name=dict(type='str'), + resource_type=dict(required=True, type='str', + choices=list(manageiq_entities().keys())), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[["resource_id", "resource_name"]], + required_one_of=[["resource_id", "resource_name"]], + supports_check_mode=True, + ) + + resource_id = module.params['resource_id'] + resource_type_key = module.params['resource_type'] + resource_name = module.params['resource_name'] + + # get the resource type + resource_type = manageiq_entities()[resource_type_key] + + manageiq_policies = ManageIQ(module).policies(resource_id, resource_type, resource_name) + + # return a list of current profiles for this object + current_profiles = manageiq_policies.query_resource_profiles() + res_args = dict(changed=False, profiles=current_profiles) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/manageiq_provider.py b/plugins/modules/manageiq_provider.py deleted file mode 120000 index a183e182aa..0000000000 --- a/plugins/modules/manageiq_provider.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/manageiq/manageiq_provider.py \ No newline at end of file diff --git a/plugins/modules/manageiq_provider.py b/plugins/modules/manageiq_provider.py new file mode 100644 index 0000000000..0268baa5c8 --- /dev/null +++ b/plugins/modules/manageiq_provider.py @@ -0,0 +1,912 @@ +#!/usr/bin/python +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: manageiq_provider +short_description: Management of provider in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Daniel Korn (@dkorn) +description: + - The manageiq_provider module supports adding, updating, and deleting provider in ManageIQ. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - V(absent) - provider should not exist, + - V(present) - provider should be present, + - V(refresh) - provider is refreshed. + choices: ['absent', 'present', 'refresh'] + default: 'present' + name: + type: str + description: The provider's name. + required: true + type: + type: str + description: The provider's type. + choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE'] + zone: + type: str + description: The ManageIQ zone name that manages the provider. + default: 'default' + provider_region: + type: str + description: The provider region name to connect to (for example AWS region for Amazon). + host_default_vnc_port_start: + type: str + description: The first port in the host VNC range. + host_default_vnc_port_end: + type: str + description: The last port in the host VNC range. + subscription: + type: str + description: Microsoft Azure subscription ID. + project: + type: str + description: Google Compute Engine Project ID. + azure_tenant_id: + type: str + description: Tenant ID. Defaults to V(null). + aliases: [keystone_v3_domain_id] + tenant_mapping_enabled: + type: bool + default: false + description: Whether to enable mapping of existing tenants. + api_version: + type: str + description: The OpenStack Keystone API version. + choices: ['v2', 'v3'] + + provider: + description: Default endpoint connection information, required if state is true. + type: dict + suboptions: + hostname: + type: str + description: The provider's API hostname. + required: true + port: + type: int + description: The provider's API port. + userid: + type: str + description: Provider's API endpoint authentication userid. + password: + type: str + description: Provider's API endpoint authentication password. + auth_key: + type: str + description: Provider's API endpoint authentication bearer token. + validate_certs: + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). + type: bool + default: true + aliases: [verify_ssl] + security_protocol: + type: str + description: How SSL certificates should be used for HTTPS requests. + choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl'] + certificate_authority: + type: str + description: The CA bundle string with custom certificates. + path: + type: str + description: + - TODO needs documentation. + project: + type: str + description: + - TODO needs documentation. + role: + type: str + description: + - TODO needs documentation. + subscription: + type: str + description: + - TODO needs documentation. + uid_ems: + type: str + description: + - TODO needs documentation. + metrics: + description: Metrics endpoint connection information. + type: dict + suboptions: + hostname: + type: str + description: The provider's API hostname. + required: true + port: + type: int + description: The provider's API port. + userid: + type: str + description: Provider's API endpoint authentication userid. + password: + type: str + description: Provider's API endpoint authentication password. + auth_key: + type: str + description: Provider's API endpoint authentication bearer token. + validate_certs: + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). + type: bool + default: true + aliases: [verify_ssl] + security_protocol: + type: str + choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl'] + description: How SSL certificates should be used for HTTPS requests. + certificate_authority: + type: str + description: The CA bundle string with custom certificates. + path: + type: str + description: Database name for oVirt metrics. Defaults to V(ovirt_engine_history). + project: + type: str + description: + - TODO needs documentation. + role: + type: str + description: + - TODO needs documentation. + subscription: + type: str + description: + - TODO needs documentation. + uid_ems: + type: str + description: + - TODO needs documentation. + alerts: + description: Alerts endpoint connection information. + type: dict + suboptions: + hostname: + type: str + description: The provider's API hostname. + required: true + port: + type: int + description: The provider's API port. + userid: + type: str + description: Provider's API endpoint authentication userid. Defaults to V(null). + password: + type: str + description: Provider's API endpoint authentication password. Defaults to V(null). + auth_key: + type: str + description: Provider's API endpoint authentication bearer token. Defaults to V(null). + validate_certs: + type: bool + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). Defaults to V(true). + default: true + aliases: [verify_ssl] + security_protocol: + type: str + choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl'] + description: How SSL certificates should be used for HTTPS requests. Defaults to V(null). + certificate_authority: + type: str + description: The CA bundle string with custom certificates. Defaults to V(null). + path: + type: str + description: + - TODO needs documentation. + project: + type: str + description: + - TODO needs documentation. + role: + type: str + description: + - TODO needs documentation. + subscription: + type: str + description: + - TODO needs documentation. + uid_ems: + type: str + description: + - TODO needs documentation. + ssh_keypair: + description: SSH key pair used for SSH connections to all hosts in this provider. + type: dict + suboptions: + hostname: + type: str + description: Director hostname. + required: true + userid: + type: str + description: SSH username. + auth_key: + type: str + description: SSH private key. + validate_certs: + description: + - Whether certificates should be verified for connections. + type: bool + default: true + aliases: [verify_ssl] + security_protocol: + type: str + choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl'] + description: + - TODO needs documentation. + certificate_authority: + type: str + description: + - TODO needs documentation. + password: + type: str + description: + - TODO needs documentation. + path: + type: str + description: + - TODO needs documentation. + project: + type: str + description: + - TODO needs documentation. + role: + type: str + description: + - TODO needs documentation. + subscription: + type: str + description: + - TODO needs documentation. + uid_ems: + type: str + description: + - TODO needs documentation. + port: + type: int + description: + - TODO needs documentation. +""" + +EXAMPLES = r""" +- name: Create a new provider in ManageIQ ('Hawkular' metrics) + community.general.manageiq_provider: + name: 'EngLab' + type: 'OpenShift' + state: 'present' + provider: + auth_key: 'topSecret' + hostname: 'example.com' + port: 8443 + validate_certs: true + security_protocol: 'ssl-with-validation-custom-ca' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwI... + -----END CERTIFICATE----- + metrics: + auth_key: 'topSecret' + role: 'hawkular' + hostname: 'example.com' + port: 443 + validate_certs: true + security_protocol: 'ssl-with-validation-custom-ca' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwI... + -----END CERTIFICATE----- + manageiq_connection: + url: 'https://127.0.0.1:80' + username: 'admin' + password: 'password' + validate_certs: true + + +- name: Update an existing provider named 'EngLab' (defaults to 'Prometheus' metrics) + community.general.manageiq_provider: + name: 'EngLab' + type: 'Openshift' + state: 'present' + provider: + auth_key: 'topSecret' + hostname: 'next.example.com' + port: 8443 + validate_certs: true + security_protocol: 'ssl-with-validation-custom-ca' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwI... + -----END CERTIFICATE----- + metrics: + auth_key: 'topSecret' + hostname: 'next.example.com' + port: 443 + validate_certs: true + security_protocol: 'ssl-with-validation-custom-ca' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwI... + -----END CERTIFICATE----- + manageiq_connection: + url: 'https://127.0.0.1' + username: 'admin' + password: 'password' + validate_certs: true + + +- name: Delete a provider in ManageIQ + community.general.manageiq_provider: + name: 'EngLab' + type: 'Openshift' + state: 'absent' + manageiq_connection: + url: 'https://127.0.0.1' + username: 'admin' + password: 'password' + validate_certs: true + + +- name: Create a new Amazon provider in ManageIQ using token authentication + community.general.manageiq_provider: + name: 'EngAmazon' + type: 'Amazon' + state: 'present' + provider: + hostname: 'amazon.example.com' + userid: 'hello' + password: 'world' + manageiq_connection: + url: 'https://127.0.0.1' + token: 'VeryLongToken' + validate_certs: true + + +- name: Create a new oVirt provider in ManageIQ + community.general.manageiq_provider: + name: 'RHEV' + type: 'oVirt' + state: 'present' + provider: + hostname: 'rhev01.example.com' + userid: 'admin@internal' + password: 'password' + validate_certs: true + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwI... + -----END CERTIFICATE----- + metrics: + hostname: 'metrics.example.com' + path: 'ovirt_engine_history' + userid: 'user_id_metrics' + password: 'password_metrics' + validate_certs: true + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwI... + -----END CERTIFICATE----- + manageiq_connection: + url: 'https://127.0.0.1' + username: 'admin' + password: 'password' + validate_certs: true + +- name: Create a new VMware provider in ManageIQ + community.general.manageiq_provider: + name: 'EngVMware' + type: 'VMware' + state: 'present' + provider: + hostname: 'vcenter.example.com' + host_default_vnc_port_start: 5800 + host_default_vnc_port_end: 5801 + userid: 'root' + password: 'password' + manageiq_connection: + url: 'https://127.0.0.1' + token: 'VeryLongToken' + validate_certs: true + +- name: Create a new Azure provider in ManageIQ + community.general.manageiq_provider: + name: 'EngAzure' + type: 'Azure' + provider_region: 'northeurope' + subscription: 'e272bd74-f661-484f-b223-88dd128a4049' + azure_tenant_id: 'e272bd74-f661-484f-b223-88dd128a4048' + state: 'present' + provider: + hostname: 'azure.example.com' + userid: 'e272bd74-f661-484f-b223-88dd128a4049' + password: 'password' + manageiq_connection: + url: 'https://cf-6af0.rhpds.opentlc.com' + username: 'admin' + password: 'password' + validate_certs: true + +- name: Create a new OpenStack Director provider in ManageIQ with rsa keypair + community.general.manageiq_provider: + name: 'EngDirector' + type: 'Director' + api_version: 'v3' + state: 'present' + provider: + hostname: 'director.example.com' + userid: 'admin' + password: 'password' + security_protocol: 'ssl-with-validation' + validate_certs: 'true' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwI... + -----END CERTIFICATE----- + ssh_keypair: + hostname: director.example.com + userid: heat-admin + auth_key: 'SecretSSHPrivateKey' + +- name: Create a new OpenStack provider in ManageIQ with amqp metrics + community.general.manageiq_provider: + name: 'EngOpenStack' + type: 'OpenStack' + api_version: 'v3' + state: 'present' + provider_region: 'europe' + tenant_mapping_enabled: 'False' + keystone_v3_domain_id: 'mydomain' + provider: + hostname: 'openstack.example.com' + userid: 'admin' + password: 'password' + security_protocol: 'ssl-with-validation' + validate_certs: 'true' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwI... + -----END CERTIFICATE----- + metrics: + role: amqp + hostname: 'amqp.example.com' + security_protocol: 'non-ssl' + port: 5666 + userid: admin + password: password + + +- name: Create a new GCE provider in ManageIQ + community.general.manageiq_provider: + name: 'EngGoogle' + type: 'GCE' + provider_region: 'europe-west1' + project: 'project1' + state: 'present' + provider: + hostname: 'gce.example.com' + auth_key: 'google_json_key' + validate_certs: 'false' +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +def supported_providers(): + return dict( + Openshift=dict( + class_name='ManageIQ::Providers::Openshift::ContainerManager', + authtype='bearer', + default_role='default', + metrics_role='prometheus', + alerts_role='prometheus_alerts', + ), + Amazon=dict( + class_name='ManageIQ::Providers::Amazon::CloudManager', + ), + oVirt=dict( + class_name='ManageIQ::Providers::Redhat::InfraManager', + default_role='default', + metrics_role='metrics', + ), + VMware=dict( + class_name='ManageIQ::Providers::Vmware::InfraManager', + ), + Azure=dict( + class_name='ManageIQ::Providers::Azure::CloudManager', + ), + Director=dict( + class_name='ManageIQ::Providers::Openstack::InfraManager', + ssh_keypair_role="ssh_keypair" + ), + OpenStack=dict( + class_name='ManageIQ::Providers::Openstack::CloudManager', + ), + GCE=dict( + class_name='ManageIQ::Providers::Google::CloudManager', + ), + ) + + +def endpoint_list_spec(): + return dict( + provider=dict(type='dict', options=endpoint_argument_spec()), + metrics=dict(type='dict', options=endpoint_argument_spec()), + alerts=dict(type='dict', options=endpoint_argument_spec()), + ssh_keypair=dict(type='dict', options=endpoint_argument_spec(), no_log=False), + ) + + +def endpoint_argument_spec(): + return dict( + role=dict(), + hostname=dict(required=True), + port=dict(type='int'), + validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']), + certificate_authority=dict(), + security_protocol=dict( + choices=[ + 'ssl-with-validation', + 'ssl-with-validation-custom-ca', + 'ssl-without-validation', + 'non-ssl', + ], + ), + userid=dict(), + password=dict(no_log=True), + auth_key=dict(no_log=True), + subscription=dict(no_log=True), + project=dict(), + uid_ems=dict(), + path=dict(), + ) + + +def delete_nulls(h): + """ Remove null entries from a hash + + Returns: + a hash without nulls + """ + if isinstance(h, list): + return [delete_nulls(i) for i in h] + if isinstance(h, dict): + return {k: delete_nulls(v) for k, v in h.items() if v is not None} + + return h + + +class ManageIQProvider(object): + """ + Object to execute provider management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + def class_name_to_type(self, class_name): + """ Convert class_name to type + + Returns: + the type + """ + out = [k for k, v in supported_providers().items() if v['class_name'] == class_name] + if len(out) == 1: + return out[0] + + return None + + def zone_id(self, name): + """ Search for zone id by zone name. + + Returns: + the zone id, or send a module Fail signal if zone not found. + """ + zone = self.manageiq.find_collection_resource_by('zones', name=name) + if not zone: # zone doesn't exist + self.module.fail_json( + msg="zone %s does not exist in manageiq" % (name)) + + return zone['id'] + + def provider(self, name): + """ Search for provider object by name. + + Returns: + the provider, or None if provider not found. + """ + return self.manageiq.find_collection_resource_by('providers', name=name) + + def build_connection_configurations(self, provider_type, endpoints): + """ Build "connection_configurations" objects from + requested endpoints provided by user + + Returns: + the user requested provider endpoints list + """ + connection_configurations = [] + endpoint_keys = endpoint_list_spec().keys() + provider_defaults = supported_providers().get(provider_type, {}) + + # get endpoint defaults + endpoint = endpoints.get('provider') + default_auth_key = endpoint.get('auth_key') + + # build a connection_configuration object for each endpoint + for endpoint_key in endpoint_keys: + endpoint = endpoints.get(endpoint_key) + if endpoint: + # get role and authtype + role = endpoint.get('role') or provider_defaults.get(endpoint_key + '_role', 'default') + if role == 'default': + authtype = provider_defaults.get('authtype') or role + else: + authtype = role + + # set a connection_configuration + connection_configurations.append({ + 'endpoint': { + 'role': role, + 'hostname': endpoint.get('hostname'), + 'port': endpoint.get('port'), + 'verify_ssl': [0, 1][endpoint.get('validate_certs', True)], + 'security_protocol': endpoint.get('security_protocol'), + 'certificate_authority': endpoint.get('certificate_authority'), + 'path': endpoint.get('path'), + }, + 'authentication': { + 'authtype': authtype, + 'userid': endpoint.get('userid'), + 'password': endpoint.get('password'), + 'auth_key': endpoint.get('auth_key') or default_auth_key, + } + }) + + return connection_configurations + + def delete_provider(self, provider): + """ Deletes a provider from manageiq. + + Returns: + a short message describing the operation executed. + """ + try: + url = '%s/providers/%s' % (self.api_url, provider['id']) + result = self.client.post(url, action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete provider %s: %s" % (provider['name'], str(e))) + + return dict(changed=True, msg=result['message']) + + def edit_provider(self, provider, name, provider_type, endpoints, zone_id, provider_region, + host_default_vnc_port_start, host_default_vnc_port_end, + subscription, project, uid_ems, tenant_mapping_enabled, api_version): + """ Edit a provider from manageiq. + + Returns: + a short message describing the operation executed. + """ + url = '%s/providers/%s' % (self.api_url, provider['id']) + + resource = dict( + name=name, + zone={'id': zone_id}, + provider_region=provider_region, + connection_configurations=endpoints, + host_default_vnc_port_start=host_default_vnc_port_start, + host_default_vnc_port_end=host_default_vnc_port_end, + subscription=subscription, + project=project, + uid_ems=uid_ems, + tenant_mapping_enabled=tenant_mapping_enabled, + api_version=api_version, + ) + + # NOTE: we do not check for diff's between requested and current + # provider, we always submit endpoints with password or auth_keys, + # since we can not compare with current password or auth_key, + # every edit request is sent to ManageIQ API without comparing + # it to current state. + + # clean nulls, we do not send nulls to the api + resource = delete_nulls(resource) + + # try to update provider + try: + result = self.client.post(url, action='edit', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to update provider %s: %s" % (provider['name'], str(e))) + + return dict( + changed=True, + msg="successfully updated the provider %s: %s" % (provider['name'], result)) + + def create_provider(self, name, provider_type, endpoints, zone_id, provider_region, + host_default_vnc_port_start, host_default_vnc_port_end, + subscription, project, uid_ems, tenant_mapping_enabled, api_version): + """ Creates the provider in manageiq. + + Returns: + a short message describing the operation executed. + """ + resource = dict( + name=name, + zone={'id': zone_id}, + provider_region=provider_region, + host_default_vnc_port_start=host_default_vnc_port_start, + host_default_vnc_port_end=host_default_vnc_port_end, + subscription=subscription, + project=project, + uid_ems=uid_ems, + tenant_mapping_enabled=tenant_mapping_enabled, + api_version=api_version, + connection_configurations=endpoints, + ) + + # clean nulls, we do not send nulls to the api + resource = delete_nulls(resource) + + # try to create a new provider + try: + url = '%s/providers' % (self.api_url) + result = self.client.post(url, type=supported_providers()[provider_type]['class_name'], **resource) + except Exception as e: + self.module.fail_json(msg="failed to create provider %s: %s" % (name, str(e))) + + return dict( + changed=True, + msg="successfully created the provider %s: %s" % (name, result['results'])) + + def refresh(self, provider, name): + """ Trigger provider refresh. + + Returns: + a short message describing the operation executed. + """ + try: + url = '%s/providers/%s' % (self.api_url, provider['id']) + result = self.client.post(url, action='refresh') + except Exception as e: + self.module.fail_json(msg="failed to refresh provider %s: %s" % (name, str(e))) + + return dict( + changed=True, + msg="refreshing provider %s" % name) + + +def main(): + zone_id = None + endpoints = [] + argument_spec = dict( + state=dict(choices=['absent', 'present', 'refresh'], default='present'), + name=dict(required=True), + zone=dict(default='default'), + provider_region=dict(), + host_default_vnc_port_start=dict(), + host_default_vnc_port_end=dict(), + subscription=dict(), + project=dict(), + azure_tenant_id=dict(aliases=['keystone_v3_domain_id']), + tenant_mapping_enabled=dict(default=False, type='bool'), + api_version=dict(choices=['v2', 'v3']), + type=dict(choices=list(supported_providers().keys())), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + # add the endpoint arguments to the arguments + argument_spec.update(endpoint_list_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['provider']), + ('state', 'refresh', ['name'])], + required_together=[ + ['host_default_vnc_port_start', 'host_default_vnc_port_end'] + ], + ) + + name = module.params['name'] + zone_name = module.params['zone'] + provider_type = module.params['type'] + raw_endpoints = module.params + provider_region = module.params['provider_region'] + host_default_vnc_port_start = module.params['host_default_vnc_port_start'] + host_default_vnc_port_end = module.params['host_default_vnc_port_end'] + subscription = module.params['subscription'] + uid_ems = module.params['azure_tenant_id'] + project = module.params['project'] + tenant_mapping_enabled = module.params['tenant_mapping_enabled'] + api_version = module.params['api_version'] + state = module.params['state'] + + manageiq = ManageIQ(module) + manageiq_provider = ManageIQProvider(manageiq) + + provider = manageiq_provider.provider(name) + + # provider should not exist + if state == "absent": + # if we have a provider, delete it + if provider: + res_args = manageiq_provider.delete_provider(provider) + # if we do not have a provider, nothing to do + else: + res_args = dict( + changed=False, + msg="provider %s: does not exist in manageiq" % (name)) + + # provider should exist + if state == "present": + # get data user did not explicitly give + if zone_name: + zone_id = manageiq_provider.zone_id(zone_name) + + # if we do not have a provider_type, use the current provider_type + if provider and not provider_type: + provider_type = manageiq_provider.class_name_to_type(provider['type']) + + # check supported_providers types + if not provider_type: + manageiq_provider.module.fail_json( + msg="missing required argument: provider_type") + + # check supported_providers types + if provider_type not in supported_providers().keys(): + manageiq_provider.module.fail_json( + msg="provider_type %s is not supported" % (provider_type)) + + # build "connection_configurations" objects from user requested endpoints + # "provider" is a required endpoint, if we have it, we have endpoints + if raw_endpoints.get("provider"): + endpoints = manageiq_provider.build_connection_configurations(provider_type, raw_endpoints) + + # if we have a provider, edit it + if provider: + res_args = manageiq_provider.edit_provider(provider, name, provider_type, endpoints, zone_id, provider_region, + host_default_vnc_port_start, host_default_vnc_port_end, + subscription, project, uid_ems, tenant_mapping_enabled, api_version) + # if we do not have a provider, create it + else: + res_args = manageiq_provider.create_provider(name, provider_type, endpoints, zone_id, provider_region, + host_default_vnc_port_start, host_default_vnc_port_end, + subscription, project, uid_ems, tenant_mapping_enabled, api_version) + + # refresh provider (trigger sync) + if state == "refresh": + if provider: + res_args = manageiq_provider.refresh(provider, name) + else: + res_args = dict( + changed=False, + msg="provider %s: does not exist in manageiq" % (name)) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/manageiq_tags.py b/plugins/modules/manageiq_tags.py deleted file mode 120000 index e01535a75e..0000000000 --- a/plugins/modules/manageiq_tags.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/manageiq/manageiq_tags.py \ No newline at end of file diff --git a/plugins/modules/manageiq_tags.py b/plugins/modules/manageiq_tags.py new file mode 100644 index 0000000000..7715a04288 --- /dev/null +++ b/plugins/modules/manageiq_tags.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: manageiq_tags + +short_description: Management of resource tags in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Daniel Korn (@dkorn) +description: + - The manageiq_tags module supports adding, updating and deleting tags in ManageIQ. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - V(absent) - tags should not exist, + - V(present) - tags should exist. + choices: ['absent', 'present'] + default: 'present' + tags: + type: list + elements: dict + description: + - V(tags) - list of dictionaries, each includes C(name) and C(category) keys. + - Required if O(state) is V(present) or V(absent). + resource_type: + type: str + description: + - The relevant resource type in manageiq. + required: true + choices: + - provider + - host + - vm + - blueprint + - category + - cluster + - data store + - group + - resource pool + - service + - service template + - template + - tenant + - user + resource_name: + type: str + description: + - The name of the resource at which tags are be controlled. + - Must be specified if O(resource_id) is not set. Both options are mutually exclusive. + resource_id: + description: + - The ID of the resource at which tags are controlled. + - Must be specified if O(resource_name) is not set. Both options are mutually exclusive. + type: int + version_added: 2.2.0 +""" + +EXAMPLES = r""" +- name: Create new tags for a provider in ManageIQ. + community.general.manageiq_tags: + resource_name: 'EngLab' + resource_type: 'provider' + tags: + - category: environment + name: prod + - category: owner + name: prod_ops + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when connecting to localhost! + +- name: Create new tags for a provider in ManageIQ. + community.general.manageiq_tags: + resource_id: 23000000790497 + resource_type: 'provider' + tags: + - category: environment + name: prod + - category: owner + name: prod_ops + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when connecting to localhost! + +- name: Remove tags for a provider in ManageIQ. + community.general.manageiq_tags: + state: absent + resource_name: 'EngLab' + resource_type: 'provider' + tags: + - category: environment + name: prod + - category: owner + name: prod_ops + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when connecting to localhost! +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ( + ManageIQ, ManageIQTags, manageiq_argument_spec, manageiq_entities +) + + +def main(): + actions = {'present': 'assign', 'absent': 'unassign'} + argument_spec = dict( + tags=dict(type='list', elements='dict'), + resource_id=dict(type='int'), + resource_name=dict(type='str'), + resource_type=dict(required=True, type='str', + choices=list(manageiq_entities().keys())), + state=dict(type='str', + choices=['present', 'absent'], default='present'), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[["resource_id", "resource_name"]], + required_one_of=[["resource_id", "resource_name"]], + required_if=[ + ('state', 'present', ['tags']), + ('state', 'absent', ['tags']) + ], + ) + + tags = module.params['tags'] + resource_id = module.params['resource_id'] + resource_type_key = module.params['resource_type'] + resource_name = module.params['resource_name'] + state = module.params['state'] + + # get the action and resource type + action = actions[state] + resource_type = manageiq_entities()[resource_type_key] + + manageiq = ManageIQ(module) + + # query resource id, fail if resource does not exist + if resource_id is None: + resource_id = manageiq.query_resource_id(resource_type, resource_name) + + manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id) + + # assign or unassign the tags + res_args = manageiq_tags.assign_or_unassign_tags(tags, action) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/manageiq_tags_info.py b/plugins/modules/manageiq_tags_info.py new file mode 100644 index 0000000000..eeb2e74685 --- /dev/null +++ b/plugins/modules/manageiq_tags_info.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: manageiq_tags_info +version_added: 5.8.0 +short_description: Retrieve resource tags in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + - community.general.attributes.info_module + +author: Alexei Znamensky (@russoz) +description: + - This module supports retrieving resource tags from ManageIQ. +options: + resource_type: + type: str + description: + - The relevant resource type in ManageIQ. + required: true + choices: + - provider + - host + - vm + - blueprint + - category + - cluster + - data store + - group + - resource pool + - service + - service template + - template + - tenant + - user + resource_name: + type: str + description: + - The name of the resource at which tags are controlled. + - Must be specified if O(resource_id) is not set. Both options are mutually exclusive. + resource_id: + description: + - The ID of the resource at which tags are controlled. + - Must be specified if O(resource_name) is not set. Both options are mutually exclusive. + type: int +""" + +EXAMPLES = r""" +- name: List current tags for a provider in ManageIQ. + community.general.manageiq_tags_info: + resource_name: 'EngLab' + resource_type: 'provider' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + register: result +""" + +RETURN = r""" +tags: + description: List of tags associated with the resource. + returned: on success + type: list + elements: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ( + ManageIQ, ManageIQTags, manageiq_argument_spec, manageiq_entities +) + + +def main(): + argument_spec = dict( + resource_id=dict(type='int'), + resource_name=dict(type='str'), + resource_type=dict(required=True, type='str', + choices=list(manageiq_entities().keys())), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[["resource_id", "resource_name"]], + required_one_of=[["resource_id", "resource_name"]], + supports_check_mode=True, + ) + + resource_id = module.params['resource_id'] + resource_type_key = module.params['resource_type'] + resource_name = module.params['resource_name'] + + # get the action and resource type + resource_type = manageiq_entities()[resource_type_key] + + manageiq = ManageIQ(module) + + # query resource id, fail if resource does not exist + if resource_id is None: + resource_id = manageiq.query_resource_id(resource_type, resource_name) + + manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id) + + # return a list of current tags for this object + current_tags = manageiq_tags.query_resource_tags() + res_args = dict(changed=False, tags=current_tags) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/manageiq_tenant.py b/plugins/modules/manageiq_tenant.py deleted file mode 120000 index 93a7ecb2e9..0000000000 --- a/plugins/modules/manageiq_tenant.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/manageiq/manageiq_tenant.py \ No newline at end of file diff --git a/plugins/modules/manageiq_tenant.py b/plugins/modules/manageiq_tenant.py new file mode 100644 index 0000000000..0ba54bbc91 --- /dev/null +++ b/plugins/modules/manageiq_tenant.py @@ -0,0 +1,548 @@ +#!/usr/bin/python +# Copyright (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn ) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: manageiq_tenant + +short_description: Management of tenants in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Evert Mulder (@evertmulder) +description: + - The manageiq_tenant module supports adding, updating and deleting tenants in ManageIQ. +requirements: + - manageiq-client +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - V(absent) - tenant should not exist, + - V(present) - tenant should be. + choices: ['absent', 'present'] + default: 'present' + name: + type: str + description: + - The tenant name. + required: true + default: + description: + type: str + description: + - The tenant description. + required: true + default: + parent_id: + type: int + description: + - The ID of the parent tenant. If not supplied the root tenant is used. + - The O(parent_id) takes president over O(parent) when supplied. + required: false + default: + parent: + type: str + description: + - The name of the parent tenant. If not supplied and no O(parent_id) is supplied the root tenant is used. + required: false + default: + quotas: + type: dict + description: + - The tenant quotas. + - All parameters case sensitive. + - 'Valid attributes are:' + - '- V(cpu_allocated) (int): use null to remove the quota.' + - '- V(mem_allocated) (GB): use null to remove the quota.' + - '- V(storage_allocated) (GB): use null to remove the quota.' + - '- V(vms_allocated) (int): use null to remove the quota.' + - '- V(templates_allocated) (int): use null to remove the quota.' + required: false + default: {} +""" + +EXAMPLES = r""" +- name: Update the root tenant in ManageIQ + community.general.manageiq_tenant: + name: 'My Company' + description: 'My company name' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + +- name: Create a tenant in ManageIQ + community.general.manageiq_tenant: + name: 'Dep1' + description: 'Manufacturing department' + parent_id: 1 + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + +- name: Delete a tenant in ManageIQ + community.general.manageiq_tenant: + state: 'absent' + name: 'Dep1' + parent_id: 1 + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + +- name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated + community.general.manageiq_tenant: + name: 'Dep1' + parent_id: 1 + quotas: + - cpu_allocated: 100 + - mem_allocated: 50 + - vms_allocated: + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + + +- name: Delete a tenant in ManageIQ using a token + community.general.manageiq_tenant: + state: 'absent' + name: 'Dep1' + parent_id: 1 + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' + validate_certs: false # only do this when you trust the network! +""" + +RETURN = r""" +tenant: + description: The tenant. + returned: success + type: complex + contains: + id: + description: The tenant ID. + returned: success + type: int + name: + description: The tenant name. + returned: success + type: str + description: + description: The tenant description. + returned: success + type: str + parent_id: + description: The ID of the parent tenant. + returned: success + type: int + quotas: + description: List of tenant quotas. + returned: success + type: list + sample: + cpu_allocated: 100 + mem_allocated: 50 +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +class ManageIQTenant(object): + """ + Object to execute tenant management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + def tenant(self, name, parent_id, parent): + """ Search for tenant object by name and parent_id or parent + or the root tenant if no parent or parent_id is supplied. + Returns: + the parent tenant, None for the root tenant + the tenant or None if tenant was not found. + """ + + if parent_id: + parent_tenant_res = self.client.collections.tenants.find_by(id=parent_id) + if not parent_tenant_res: + self.module.fail_json(msg="Parent tenant with id '%s' not found in manageiq" % str(parent_id)) + parent_tenant = parent_tenant_res[0] + tenants = self.client.collections.tenants.find_by(name=name) + + for tenant in tenants: + try: + ancestry = tenant['ancestry'] + except AttributeError: + ancestry = None + + if ancestry: + tenant_parent_id = int(ancestry.split("/")[-1]) + if int(tenant_parent_id) == parent_id: + return parent_tenant, tenant + + return parent_tenant, None + else: + if parent: + parent_tenant_res = self.client.collections.tenants.find_by(name=parent) + if not parent_tenant_res: + self.module.fail_json(msg="Parent tenant '%s' not found in manageiq" % parent) + + if len(parent_tenant_res) > 1: + self.module.fail_json(msg="Multiple parent tenants not found in manageiq with name '%s'" % parent) + + parent_tenant = parent_tenant_res[0] + parent_id = int(parent_tenant['id']) + tenants = self.client.collections.tenants.find_by(name=name) + + for tenant in tenants: + try: + ancestry = tenant['ancestry'] + except AttributeError: + ancestry = None + + if ancestry: + tenant_parent_id = int(ancestry.split("/")[-1]) + if tenant_parent_id == parent_id: + return parent_tenant, tenant + + return parent_tenant, None + else: + # No parent or parent id supplied we select the root tenant + return None, self.client.collections.tenants.find_by(ancestry=None)[0] + + def compare_tenant(self, tenant, name, description): + """ Compare tenant fields with new field values. + + Returns: + false if tenant fields have some difference from new fields, true o/w. + """ + found_difference = ( + (name and tenant['name'] != name) or + (description and tenant['description'] != description) + ) + + return not found_difference + + def delete_tenant(self, tenant): + """ Deletes a tenant from manageiq. + + Returns: + dict with `msg` and `changed` + """ + try: + url = '%s/tenants/%s' % (self.api_url, tenant['id']) + result = self.client.post(url, action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete tenant %s: %s" % (tenant['name'], str(e))) + + if result['success'] is False: + self.module.fail_json(msg=result['message']) + + return dict(changed=True, msg=result['message']) + + def edit_tenant(self, tenant, name, description): + """ Edit a manageiq tenant. + + Returns: + dict with `msg` and `changed` + """ + resource = dict(name=name, description=description, use_config_for_attributes=False) + + # check if we need to update ( compare_tenant is true is no difference found ) + if self.compare_tenant(tenant, name, description): + return dict( + changed=False, + msg="tenant %s is not changed." % tenant['name'], + tenant=tenant['_data']) + + # try to update tenant + try: + result = self.client.post(tenant['href'], action='edit', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to update tenant %s: %s" % (tenant['name'], str(e))) + + return dict( + changed=True, + msg="successfully updated the tenant with id %s" % (tenant['id'])) + + def create_tenant(self, name, description, parent_tenant): + """ Creates the tenant in manageiq. + + Returns: + dict with `msg`, `changed` and `tenant_id` + """ + parent_id = parent_tenant['id'] + # check for required arguments + for key, value in dict(name=name, description=description, parent_id=parent_id).items(): + if value in (None, ''): + self.module.fail_json(msg="missing required argument: %s" % key) + + url = '%s/tenants' % self.api_url + + resource = {'name': name, 'description': description, 'parent': {'id': parent_id}} + + try: + result = self.client.post(url, action='create', resource=resource) + tenant_id = result['results'][0]['id'] + except Exception as e: + self.module.fail_json(msg="failed to create tenant %s: %s" % (name, str(e))) + + return dict( + changed=True, + msg="successfully created tenant '%s' with id '%s'" % (name, tenant_id), + tenant_id=tenant_id) + + def tenant_quota(self, tenant, quota_key): + """ Search for tenant quota object by tenant and quota_key. + Returns: + the quota for the tenant, or None if the tenant quota was not found. + """ + + tenant_quotas = self.client.get("%s/quotas?expand=resources&filter[]=name=%s" % (tenant['href'], quota_key)) + + return tenant_quotas['resources'] + + def tenant_quotas(self, tenant): + """ Search for tenant quotas object by tenant. + Returns: + the quotas for the tenant, or None if no tenant quotas were not found. + """ + + tenant_quotas = self.client.get("%s/quotas?expand=resources" % (tenant['href'])) + + return tenant_quotas['resources'] + + def update_tenant_quotas(self, tenant, quotas): + """ Creates the tenant quotas in manageiq. + + Returns: + dict with `msg` and `changed` + """ + + changed = False + messages = [] + for quota_key, quota_value in quotas.items(): + current_quota_filtered = self.tenant_quota(tenant, quota_key) + if current_quota_filtered: + current_quota = current_quota_filtered[0] + else: + current_quota = None + + if quota_value: + # Change the byte values to GB + if quota_key in ['storage_allocated', 'mem_allocated']: + quota_value_int = int(quota_value) * 1024 * 1024 * 1024 + else: + quota_value_int = int(quota_value) + if current_quota: + res = self.edit_tenant_quota(tenant, current_quota, quota_key, quota_value_int) + else: + res = self.create_tenant_quota(tenant, quota_key, quota_value_int) + else: + if current_quota: + res = self.delete_tenant_quota(tenant, current_quota) + else: + res = dict(changed=False, msg="tenant quota '%s' does not exist" % quota_key) + + if res['changed']: + changed = True + + messages.append(res['msg']) + + return dict( + changed=changed, + msg=', '.join(messages)) + + def edit_tenant_quota(self, tenant, current_quota, quota_key, quota_value): + """ Update the tenant quotas in manageiq. + + Returns: + result + """ + + if current_quota['value'] == quota_value: + return dict( + changed=False, + msg="tenant quota %s already has value %s" % (quota_key, quota_value)) + else: + + url = '%s/quotas/%s' % (tenant['href'], current_quota['id']) + resource = {'value': quota_value} + try: + self.client.post(url, action='edit', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to update tenant quota %s: %s" % (quota_key, str(e))) + + return dict( + changed=True, + msg="successfully updated tenant quota %s" % quota_key) + + def create_tenant_quota(self, tenant, quota_key, quota_value): + """ Creates the tenant quotas in manageiq. + + Returns: + result + """ + url = '%s/quotas' % (tenant['href']) + resource = {'name': quota_key, 'value': quota_value} + try: + self.client.post(url, action='create', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to create tenant quota %s: %s" % (quota_key, str(e))) + + return dict( + changed=True, + msg="successfully created tenant quota %s" % quota_key) + + def delete_tenant_quota(self, tenant, quota): + """ deletes the tenant quotas in manageiq. + + Returns: + result + """ + try: + result = self.client.post(quota['href'], action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete tenant quota '%s': %s" % (quota['name'], str(e))) + + return dict(changed=True, msg=result['message']) + + def create_tenant_response(self, tenant, parent_tenant): + """ Creates the ansible result object from a manageiq tenant entity + + Returns: + a dict with the tenant id, name, description, parent id, + quota's + """ + tenant_quotas = self.create_tenant_quotas_response(tenant['tenant_quotas']) + + try: + ancestry = tenant['ancestry'] + tenant_parent_id = ancestry.split("/")[-1] + except AttributeError: + # The root tenant does not return the ancestry attribute + tenant_parent_id = None + + return dict( + id=tenant['id'], + name=tenant['name'], + description=tenant['description'], + parent_id=tenant_parent_id, + quotas=tenant_quotas + ) + + @staticmethod + def create_tenant_quotas_response(tenant_quotas): + """ Creates the ansible result object from a manageiq tenant_quotas entity + + Returns: + a dict with the applied quotas, name and value + """ + + if not tenant_quotas: + return {} + + result = {} + for quota in tenant_quotas: + if quota['unit'] == 'bytes': + value = float(quota['value']) / (1024 * 1024 * 1024) + else: + value = quota['value'] + result[quota['name']] = value + return result + + +def main(): + argument_spec = dict( + name=dict(required=True, type='str'), + description=dict(required=True, type='str'), + parent_id=dict(type='int'), + parent=dict(type='str'), + state=dict(choices=['absent', 'present'], default='present'), + quotas=dict(type='dict', default={}) + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec + ) + + name = module.params['name'] + description = module.params['description'] + parent_id = module.params['parent_id'] + parent = module.params['parent'] + state = module.params['state'] + quotas = module.params['quotas'] + + manageiq = ManageIQ(module) + manageiq_tenant = ManageIQTenant(manageiq) + + parent_tenant, tenant = manageiq_tenant.tenant(name, parent_id, parent) + + # tenant should not exist + if state == "absent": + # if we have a tenant, delete it + if tenant: + res_args = manageiq_tenant.delete_tenant(tenant) + # if we do not have a tenant, nothing to do + else: + if parent_id: + msg = "tenant '%s' with parent_id %i does not exist in manageiq" % (name, parent_id) + else: + msg = "tenant '%s' with parent '%s' does not exist in manageiq" % (name, parent) + + res_args = dict( + changed=False, + msg=msg) + + # tenant should exist + if state == "present": + # if we have a tenant, edit it + if tenant: + res_args = manageiq_tenant.edit_tenant(tenant, name, description) + + # if we do not have a tenant, create it + else: + res_args = manageiq_tenant.create_tenant(name, description, parent_tenant) + tenant = manageiq.client.get_entity('tenants', res_args['tenant_id']) + + # quotas as supplied and we have a tenant + if quotas: + tenant_quotas_res = manageiq_tenant.update_tenant_quotas(tenant, quotas) + if tenant_quotas_res['changed']: + res_args['changed'] = True + res_args['tenant_quotas_msg'] = tenant_quotas_res['msg'] + + tenant.reload(expand='resources', attributes=['tenant_quotas']) + res_args['tenant'] = manageiq_tenant.create_tenant_response(tenant, parent_tenant) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/manageiq_user.py b/plugins/modules/manageiq_user.py deleted file mode 120000 index a8ab9071f6..0000000000 --- a/plugins/modules/manageiq_user.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/manageiq/manageiq_user.py \ No newline at end of file diff --git a/plugins/modules/manageiq_user.py b/plugins/modules/manageiq_user.py new file mode 100644 index 0000000000..c116387e65 --- /dev/null +++ b/plugins/modules/manageiq_user.py @@ -0,0 +1,323 @@ +#!/usr/bin/python +# Copyright (c) 2017, Daniel Korn +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: manageiq_user + +short_description: Management of users in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Daniel Korn (@dkorn) +description: + - The manageiq_user module supports adding, updating and deleting users in ManageIQ. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - V(absent) - user should not exist, + - V(present) - user should be. + choices: ['absent', 'present'] + default: 'present' + userid: + type: str + description: + - The unique userid in manageiq, often mentioned as username. + required: true + name: + type: str + description: + - The users' full name. + password: + type: str + description: + - The users' password. + group: + type: str + description: + - The name of the group to which the user belongs. + email: + type: str + description: + - The users' E-mail address. + update_password: + type: str + default: always + choices: ['always', 'on_create'] + description: + - V(always) updates passwords unconditionally. + - V(on_create) only sets the password for a newly created user. +""" + +EXAMPLES = r""" +- name: Create a new user in ManageIQ + community.general.manageiq_user: + userid: 'jdoe' + name: 'Jane Doe' + password: 'VerySecret' + group: 'EvmGroup-user' + email: 'jdoe@example.com' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + +- name: Create a new user in ManageIQ using a token + community.general.manageiq_user: + userid: 'jdoe' + name: 'Jane Doe' + password: 'VerySecret' + group: 'EvmGroup-user' + email: 'jdoe@example.com' + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' + validate_certs: false # only do this when you trust the network! + +- name: Delete a user in ManageIQ + community.general.manageiq_user: + state: 'absent' + userid: 'jdoe' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + +- name: Delete a user in ManageIQ using a token + community.general.manageiq_user: + state: 'absent' + userid: 'jdoe' + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' + validate_certs: false # only do this when you trust the network! + +- name: Update email of user in ManageIQ + community.general.manageiq_user: + userid: 'jdoe' + email: 'jaustine@example.com' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + +- name: Update email of user in ManageIQ using a token + community.general.manageiq_user: + userid: 'jdoe' + email: 'jaustine@example.com' + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' + validate_certs: false # only do this when you trust the network! +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +class ManageIQUser(object): + """ + Object to execute user management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + def group_id(self, description): + """ Search for group id by group description. + + Returns: + the group id, or send a module Fail signal if group not found. + """ + group = self.manageiq.find_collection_resource_by('groups', description=description) + if not group: # group doesn't exist + self.module.fail_json( + msg="group %s does not exist in manageiq" % (description)) + + return group['id'] + + def user(self, userid): + """ Search for user object by userid. + + Returns: + the user, or None if user not found. + """ + return self.manageiq.find_collection_resource_by('users', userid=userid) + + def compare_user(self, user, name, group_id, password, email): + """ Compare user fields with new field values. + + Returns: + false if user fields have some difference from new fields, true o/w. + """ + found_difference = ( + (name and user['name'] != name) or + (password is not None) or + (email and user['email'] != email) or + (group_id and user['current_group_id'] != group_id) + ) + + return not found_difference + + def delete_user(self, user): + """ Deletes a user from manageiq. + + Returns: + a short message describing the operation executed. + """ + try: + url = '%s/users/%s' % (self.api_url, user['id']) + result = self.client.post(url, action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete user %s: %s" % (user['userid'], str(e))) + + return dict(changed=True, msg=result['message']) + + def edit_user(self, user, name, group, password, email): + """ Edit a user from manageiq. + + Returns: + a short message describing the operation executed. + """ + group_id = None + url = '%s/users/%s' % (self.api_url, user['id']) + + resource = dict(userid=user['userid']) + if group is not None: + group_id = self.group_id(group) + resource['group'] = dict(id=group_id) + if name is not None: + resource['name'] = name + if email is not None: + resource['email'] = email + + # if there is a password param, but 'update_password' is 'on_create' + # then discard the password (since we're editing an existing user) + if self.module.params['update_password'] == 'on_create': + password = None + if password is not None: + resource['password'] = password + + # check if we need to update ( compare_user is true is no difference found ) + if self.compare_user(user, name, group_id, password, email): + return dict( + changed=False, + msg="user %s is not changed." % (user['userid'])) + + # try to update user + try: + result = self.client.post(url, action='edit', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to update user %s: %s" % (user['userid'], str(e))) + + return dict( + changed=True, + msg="successfully updated the user %s: %s" % (user['userid'], result)) + + def create_user(self, userid, name, group, password, email): + """ Creates the user in manageiq. + + Returns: + the created user id, name, created_on timestamp, + updated_on timestamp, userid and current_group_id. + """ + # check for required arguments + for key, value in dict(name=name, group=group, password=password).items(): + if value in (None, ''): + self.module.fail_json(msg="missing required argument: %s" % (key)) + + group_id = self.group_id(group) + url = '%s/users' % (self.api_url) + + resource = {'userid': userid, 'name': name, 'password': password, 'group': {'id': group_id}} + if email is not None: + resource['email'] = email + + # try to create a new user + try: + result = self.client.post(url, action='create', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to create user %s: %s" % (userid, str(e))) + + return dict( + changed=True, + msg="successfully created the user %s: %s" % (userid, result['results'])) + + +def main(): + argument_spec = dict( + userid=dict(required=True, type='str'), + name=dict(), + password=dict(no_log=True), + group=dict(), + email=dict(), + state=dict(choices=['absent', 'present'], default='present'), + update_password=dict(choices=['always', 'on_create'], + default='always'), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + userid = module.params['userid'] + name = module.params['name'] + password = module.params['password'] + group = module.params['group'] + email = module.params['email'] + state = module.params['state'] + + manageiq = ManageIQ(module) + manageiq_user = ManageIQUser(manageiq) + + user = manageiq_user.user(userid) + + # user should not exist + if state == "absent": + # if we have a user, delete it + if user: + res_args = manageiq_user.delete_user(user) + # if we do not have a user, nothing to do + else: + res_args = dict( + changed=False, + msg="user %s: does not exist in manageiq" % (userid)) + + # user should exist + if state == "present": + # if we have a user, edit it + if user: + res_args = manageiq_user.edit_user(user, name, group, password, email) + # if we do not have a user, create it + else: + res_args = manageiq_user.create_user(userid, name, group, password, email) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/mas.py b/plugins/modules/mas.py deleted file mode 120000 index 91139e61af..0000000000 --- a/plugins/modules/mas.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/mas.py \ No newline at end of file diff --git a/plugins/modules/mas.py b/plugins/modules/mas.py new file mode 100644 index 0000000000..2e851f9ab6 --- /dev/null +++ b/plugins/modules/mas.py @@ -0,0 +1,307 @@ +#!/usr/bin/python + +# Copyright (c) 2020, Lukas Bestle +# Copyright (c) 2017, Michael Heap +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: mas +short_description: Manage Mac App Store applications with mas-cli +description: + - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli). +version_added: '0.2.0' +author: + - Michael Heap (@mheap) + - Lukas Bestle (@lukasbestle) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + id: + description: + - The Mac App Store identifier of the app(s) you want to manage. + - This can be found by running C(mas search APP_NAME) on your machine. + type: list + elements: int + state: + description: + - Desired state of the app installation. + - The V(absent) value requires root permissions, also see the examples. + type: str + choices: + - absent + - latest + - present + default: present + upgrade_all: + description: + - Upgrade all installed Mac App Store apps. + type: bool + default: false + aliases: ["upgrade"] +requirements: + - macOS 10.11 or higher. + - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path" + - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)). + - The feature of "checking if user is signed in" is disabled for anyone using macOS 12.0+. + - Users need to sign in to the Mac App Store GUI beforehand for anyone using macOS 12.0+ due to U(https://github.com/mas-cli/mas/issues/417). +""" + +EXAMPLES = r""" +- name: Install Keynote + community.general.mas: + id: 409183694 + state: present + +- name: Install Divvy with command mas installed in /usr/local/bin + community.general.mas: + id: 413857545 + state: present + environment: + PATH: /usr/local/bin:{{ ansible_facts.env.PATH }} + +- name: Install a list of apps + community.general.mas: + id: + - 409183694 # Keynote + - 413857545 # Divvy + state: present + +- name: Ensure the latest Keynote version is installed + community.general.mas: + id: 409183694 + state: latest + +- name: Upgrade all installed Mac App Store apps + community.general.mas: + upgrade_all: true + +- name: Install specific apps and also upgrade all others + community.general.mas: + id: + - 409183694 # Keynote + - 413857545 # Divvy + state: present + upgrade_all: true + +- name: Uninstall Divvy + community.general.mas: + id: 413857545 + state: absent + become: true # Uninstallation requires root permissions +""" + +RETURN = r""" # """ + +from ansible.module_utils.basic import AnsibleModule +import os + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +import platform +NOT_WORKING_MAC_VERSION_MAS_ACCOUNT = '12.0' + + +class Mas(object): + + def __init__(self, module): + self.module = module + + # Initialize data properties + self.mas_path = self.module.get_bin_path('mas') + self._checked_signin = False + self._mac_version = platform.mac_ver()[0] or '0.0' + self._installed = None # Populated only if needed + self._outdated = None # Populated only if needed + self.count_install = 0 + self.count_upgrade = 0 + self.count_uninstall = 0 + self.result = { + 'changed': False + } + + self.check_mas_tool() + + def app_command(self, command, id): + ''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' ''' + + if not self.module.check_mode: + if command != 'uninstall': + self.check_signin() + + rc, out, err = self.run([command, str(id)]) + if rc != 0: + self.module.fail_json( + msg="Error running command '{0}' on app '{1}': {2}".format(command, str(id), out.rstrip()) + ) + + # No error or dry run + self.__dict__['count_' + command] += 1 + + def check_mas_tool(self): + ''' Verifies that the `mas` tool is available in a recent version ''' + + # Is the `mas` tool available at all? + if not self.mas_path: + self.module.fail_json(msg='Required `mas` tool is not installed') + + # Is the version recent enough? + rc, out, err = self.run(['version']) + if rc != 0 or not out.strip() or LooseVersion(out.strip()) < LooseVersion('1.5.0'): + self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip()) + + def check_signin(self): + ''' Verifies that the user is signed in to the Mac App Store ''' + # Only check this once per execution + if self._checked_signin: + return + if LooseVersion(self._mac_version) >= LooseVersion(NOT_WORKING_MAC_VERSION_MAS_ACCOUNT): + # Checking if user is signed-in is disabled due to https://github.com/mas-cli/mas/issues/417 + self.module.log('WARNING: You must be signed in via the Mac App Store GUI beforehand else error will occur') + else: + rc, out, err = self.run(['account']) + if out.split("\n", 1)[0].rstrip() == 'Not signed in': + self.module.fail_json(msg='You must be signed in to the Mac App Store') + + self._checked_signin = True + + def exit(self): + ''' Exit with the data we have collected over time ''' + + msgs = [] + if self.count_install > 0: + msgs.append('Installed {0} app(s)'.format(self.count_install)) + if self.count_upgrade > 0: + msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade)) + if self.count_uninstall > 0: + msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall)) + + if msgs: + self.result['changed'] = True + self.result['msg'] = ', '.join(msgs) + + self.module.exit_json(**self.result) + + def get_current_state(self, command): + ''' Returns the list of all app IDs; command can either be 'list' or 'outdated' ''' + + rc, raw_apps, err = self.run([command]) + rows = raw_apps.split("\n") + if rows[0] == "No installed apps found": + rows = [] + apps = [] + for r in rows: + # Format: "123456789 App Name" + r = r.split(' ', 1) + if len(r) == 2: + apps.append(int(r[0])) + + return apps + + def installed(self): + ''' Returns the list of installed apps ''' + + # Populate cache if not already done + if self._installed is None: + self._installed = self.get_current_state('list') + + return self._installed + + def is_installed(self, id): + ''' Checks whether the given app is installed ''' + + return int(id) in self.installed() + + def is_outdated(self, id): + ''' Checks whether the given app is installed, but outdated ''' + + return int(id) in self.outdated() + + def outdated(self): + ''' Returns the list of installed, but outdated apps ''' + + # Populate cache if not already done + if self._outdated is None: + self._outdated = self.get_current_state('outdated') + + return self._outdated + + def run(self, cmd): + ''' Runs a command of the `mas` tool ''' + + cmd.insert(0, self.mas_path) + return self.module.run_command(cmd, False) + + def upgrade_all(self): + ''' Upgrades all installed apps and sets the correct result data ''' + + outdated = self.outdated() + + if not self.module.check_mode: + self.check_signin() + + rc, out, err = self.run(['upgrade']) + if rc != 0: + self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip()) + + self.count_upgrade += len(outdated) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + id=dict(type='list', elements='int'), + state=dict(type='str', default='present', choices=['absent', 'latest', 'present']), + upgrade_all=dict(type='bool', default=False, aliases=['upgrade']), + ), + supports_check_mode=True + ) + mas = Mas(module) + + if module.params['id']: + apps = module.params['id'] + else: + apps = [] + + state = module.params['state'] + upgrade = module.params['upgrade_all'] + + # Run operations on the given app IDs + for app in sorted(set(apps)): + if state == 'present': + if not mas.is_installed(app): + mas.app_command('install', app) + + elif state == 'absent': + if mas.is_installed(app): + # Ensure we are root + if os.getuid() != 0: + module.fail_json(msg="Uninstalling apps requires root permissions ('become: true')") + + mas.app_command('uninstall', app) + + elif state == 'latest': + if not mas.is_installed(app): + mas.app_command('install', app) + elif mas.is_outdated(app): + mas.app_command('upgrade', app) + + # Upgrade all apps if requested + mas._outdated = None # Clear cache + if upgrade and mas.outdated(): + mas.upgrade_all() + + # Exit with the collected data + mas.exit() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/matrix.py b/plugins/modules/matrix.py deleted file mode 120000 index e6c9e29a45..0000000000 --- a/plugins/modules/matrix.py +++ /dev/null @@ -1 +0,0 @@ -./notification/matrix.py \ No newline at end of file diff --git a/plugins/modules/matrix.py b/plugins/modules/matrix.py new file mode 100644 index 0000000000..5b643357f5 --- /dev/null +++ b/plugins/modules/matrix.py @@ -0,0 +1,145 @@ +#!/usr/bin/python +# coding: utf-8 + +# Copyright (c) 2018, Jan Christian Grünhage +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: "Jan Christian Grünhage (@jcgruenhage)" +module: matrix +short_description: Send notifications to matrix +description: + - This module sends HTML formatted notifications to matrix rooms. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + msg_plain: + type: str + description: + - Plain text form of the message to send to matrix, usually markdown. + required: true + msg_html: + type: str + description: + - HTML form of the message to send to matrix. + required: true + room_id: + type: str + description: + - ID of the room to send the notification to. + required: true + hs_url: + type: str + description: + - URL of the homeserver, where the CS-API is reachable. + required: true + token: + type: str + description: + - Authentication token for the API call. If provided, O(user_id) and O(password) are not required. + user_id: + type: str + description: + - The user ID of the user. + password: + type: str + description: + - The password to log in with. +requirements: + - matrix-client (Python library) +""" + +EXAMPLES = r""" +- name: Send matrix notification with token + community.general.matrix: + msg_plain: "**hello world**" + msg_html: "hello world" + room_id: "!12345678:server.tld" + hs_url: "https://matrix.org" + token: "{{ matrix_auth_token }}" + +- name: Send matrix notification with user_id and password + community.general.matrix: + msg_plain: "**hello world**" + msg_html: "hello world" + room_id: "!12345678:server.tld" + hs_url: "https://matrix.org" + user_id: "ansible_notification_bot" + password: "{{ matrix_auth_password }}" +""" + +RETURN = r""" +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +MATRIX_IMP_ERR = None +try: + from matrix_client.client import MatrixClient +except ImportError: + MATRIX_IMP_ERR = traceback.format_exc() + matrix_found = False +else: + matrix_found = True + + +def run_module(): + module_args = dict( + msg_plain=dict(type='str', required=True), + msg_html=dict(type='str', required=True), + room_id=dict(type='str', required=True), + hs_url=dict(type='str', required=True), + token=dict(type='str', no_log=True), + user_id=dict(type='str'), + password=dict(type='str', no_log=True), + ) + + result = dict( + changed=False, + message='' + ) + + module = AnsibleModule( + argument_spec=module_args, + mutually_exclusive=[['password', 'token']], + required_one_of=[['password', 'token']], + required_together=[['user_id', 'password']], + supports_check_mode=True + ) + + if not matrix_found: + module.fail_json(msg=missing_required_lib('matrix-client'), exception=MATRIX_IMP_ERR) + + if module.check_mode: + return result + + # create a client object + client = MatrixClient(module.params['hs_url']) + if module.params['token'] is not None: + client.api.token = module.params['token'] + else: + client.login(module.params['user_id'], module.params['password'], sync=False) + + # make sure we are in a given room and return a room object for it + room = client.join_room(module.params['room_id']) + # send an html formatted messages + room.send_html(module.params['msg_html'], module.params['msg_plain']) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/mattermost.py b/plugins/modules/mattermost.py deleted file mode 120000 index 87b7844716..0000000000 --- a/plugins/modules/mattermost.py +++ /dev/null @@ -1 +0,0 @@ -./notification/mattermost.py \ No newline at end of file diff --git a/plugins/modules/mattermost.py b/plugins/modules/mattermost.py new file mode 100644 index 0000000000..7739d62851 --- /dev/null +++ b/plugins/modules/mattermost.py @@ -0,0 +1,193 @@ +#!/usr/bin/python + +# Copyright (c) Benjamin Jolivot +# Inspired by slack module : +# # Copyright (c) 2017, Steve Pletcher +# # Copyright (c) 2016, René Moser +# # Copyright (c) 2015, Stefan Berggren +# # Copyright (c) 2014, Ramon de la Fuente ) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: mattermost +short_description: Send Mattermost notifications +description: + - Sends notifications to U(http://your.mattermost.url) using the Incoming WebHook integration. +author: "Benjamin Jolivot (@bjolivot)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + url: + type: str + description: + - Mattermost URL (for example V(http://mattermost.yourcompany.com)). + required: true + api_key: + type: str + description: + - Mattermost webhook API key. Log into your Mattermost site, go to Menu -> Integration -> Incoming Webhook -> Add Incoming + Webhook. This gives you a full URL. O(api_key) is the last part. U(http://mattermost.example.com/hooks/API_KEY). + required: true + text: + type: str + description: + - Text to send. Note that the module does not handle escaping characters. + - Required when O(attachments) is not set. + attachments: + type: list + elements: dict + description: + - Define a list of attachments. + - For more information, see U(https://developers.mattermost.com/integrate/admin-guide/admin-message-attachments/). + - Required when O(text) is not set. + version_added: 4.3.0 + channel: + type: str + description: + - Channel to send the message to. If absent, the message goes to the channel selected for the O(api_key). + username: + type: str + description: + - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc). + default: Ansible + icon_url: + type: str + description: + - URL for the message sender's icon. + default: https://docs.ansible.com/favicon.ico + priority: + type: str + description: + - Set a priority for the message. + choices: [important, urgent] + version_added: 10.0.0 + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + default: true + type: bool +""" + +EXAMPLES = r""" +- name: Send notification message via Mattermost + community.general.mattermost: + url: http://mattermost.example.com + api_key: my_api_key + text: '{{ inventory_hostname }} completed' + +- name: Send notification message via Mattermost all options + community.general.mattermost: + url: http://mattermost.example.com + api_key: my_api_key + text: '{{ inventory_hostname }} completed' + channel: notifications + username: 'Ansible on {{ inventory_hostname }}' + icon_url: http://www.example.com/some-image-file.png + priority: important + +- name: Send attachments message via Mattermost + community.general.mattermost: + url: http://mattermost.example.com + api_key: my_api_key + attachments: + - text: Display my system load on host A and B + color: '#ff00dd' + title: System load + fields: + - title: System A + value: "load average: 0,74, 0,66, 0,63" + short: true + - title: System B + value: 'load average: 5,16, 4,64, 2,43' + short: true +""" + +RETURN = r""" +payload: + description: Mattermost payload. + returned: success + type: str +webhook_url: + description: URL the webhook is sent to. + returned: success + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def main(): + module = AnsibleModule( + supports_check_mode=True, + argument_spec=dict( + url=dict(type='str', required=True), + api_key=dict(type='str', required=True, no_log=True), + text=dict(type='str'), + channel=dict(type='str'), + username=dict(type='str', default='Ansible'), + icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), + priority=dict(type='str', choices=['important', 'urgent']), + validate_certs=dict(default=True, type='bool'), + attachments=dict(type='list', elements='dict'), + ), + required_one_of=[ + ('text', 'attachments'), + ], + ) + # init return dict + result = dict(changed=False, msg="OK") + + # define webhook + webhook_url = "{0}/hooks/{1}".format(module.params['url'], module.params['api_key']) + result['webhook_url'] = webhook_url + + # define payload + payload = {} + for param in ['text', 'channel', 'username', 'icon_url', 'attachments']: + if module.params[param] is not None: + payload[param] = module.params[param] + if module.params['priority'] is not None: + payload['priority'] = {'priority': module.params['priority']} + + payload = module.jsonify(payload) + result['payload'] = payload + + # http headers + headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json', + } + + # notes: + # Nothing is done in check mode + # it'll pass even if your server is down or/and if your token is invalid. + # If someone find good way to check... + + # send request if not in test mode + if module.check_mode is False: + response, info = fetch_url(module=module, url=webhook_url, headers=headers, method='POST', data=payload) + + # something's wrong + if info['status'] != 200: + # some problem + result['msg'] = "Failed to send mattermost message, the error was: {0}".format(info['msg']) + module.fail_json(**result) + + # Looks good + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/maven_artifact.py b/plugins/modules/maven_artifact.py deleted file mode 120000 index 0d147a73a4..0000000000 --- a/plugins/modules/maven_artifact.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/language/maven_artifact.py \ No newline at end of file diff --git a/plugins/modules/maven_artifact.py b/plugins/modules/maven_artifact.py new file mode 100644 index 0000000000..9b0d787a3e --- /dev/null +++ b/plugins/modules/maven_artifact.py @@ -0,0 +1,753 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Chris Schmidt +# +# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact +# as a reference and starting point. +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: maven_artifact +short_description: Downloads an Artifact from a Maven Repository +description: + - Downloads an artifact from a maven repository given the maven coordinates provided to the module. + - Can retrieve snapshots or release versions of the artifact and resolve the latest available version if one is not available. +author: "Chris Schmidt (@chrisisbeef)" +requirements: + - lxml + - boto if using a S3 repository (V(s3://...)) +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + group_id: + type: str + description: + - The Maven groupId coordinate. + required: true + artifact_id: + type: str + description: + - The maven artifactId coordinate. + required: true + version: + type: str + description: + - The maven version coordinate. + - Mutually exclusive with O(version_by_spec). + version_by_spec: + type: str + description: + - The maven dependency version ranges. + - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution). + - The range type V((,1.0],[1.2,\)) and V((,1.1\),(1.1,\)) is not supported. + - Mutually exclusive with O(version). + version_added: '0.2.0' + classifier: + type: str + description: + - The maven classifier coordinate. + default: '' + extension: + type: str + description: + - The maven type/extension coordinate. + default: jar + repository_url: + type: str + description: + - The URL of the Maven Repository to download from. + - Use V(s3://...) if the repository is hosted on Amazon S3. + - Use V(file://...) if the repository is local. + default: https://repo1.maven.org/maven2 + username: + type: str + description: + - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3. + aliases: ["aws_secret_key"] + password: + type: str + description: + - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on + S3. + aliases: ["aws_secret_access_key"] + headers: + description: + - Add custom HTTP headers to a request in hash/dict format. + type: dict + force_basic_auth: + description: + - C(httplib2), the library used by the URI module only sends authentication information when a webservice responds to + an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins fail. This + option forces the sending of the Basic authentication header upon initial request. + default: false + type: bool + version_added: '0.2.0' + dest: + type: path + description: + - The path where the artifact should be written to. + - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file. + required: true + state: + type: str + description: + - The desired state of the artifact. + default: present + choices: [present, absent] + timeout: + type: int + description: + - Specifies a timeout in seconds for the connection attempt. + default: 10 + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be set to V(false) when no other option exists. + type: bool + default: true + client_cert: + description: + - PEM formatted certificate chain file to be used for SSL client authentication. + - This file can also include the key as well, and if the key is included, O(client_key) is not required. + type: path + version_added: '1.3.0' + client_key: + description: + - PEM formatted file that contains your private key to be used for SSL client authentication. + - If O(client_cert) contains both the certificate and key, this option is not required. + type: path + version_added: '1.3.0' + keep_name: + description: + - If V(true), the downloaded artifact's name is preserved, in other words the version number remains part of it. + - This option only has effect when O(dest) is a directory and O(version) is set to V(latest) or O(version_by_spec) is + defined. + type: bool + default: false + verify_checksum: + type: str + description: + - If V(never), the MD5/SHA1 checksum is never downloaded and verified. + - If V(download), the MD5/SHA1 checksum is downloaded and verified only after artifact download. This is the default. + - If V(change), the MD5/SHA1 checksum is downloaded and verified if the destination already exist, to verify if they + are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe) downloading the artifact, + and since some repository software, when acting as a proxy/cache, return a 404 error if the artifact has not been + cached yet, it may fail unexpectedly. If you still need it, you should consider using V(always) instead - if you deal + with a checksum, it is better to use it to verify integrity after download. + - V(always) combines V(download) and V(change). + required: false + default: 'download' + choices: ['never', 'download', 'change', 'always'] + checksum_alg: + type: str + description: + - If V(md5), checksums use the MD5 algorithm. This is the default. + - If V(sha1), checksums use the SHA1 algorithm. This can be used on systems configured to use FIPS-compliant algorithms, + since MD5 is blocked on such systems. + default: 'md5' + choices: ['md5', 'sha1'] + version_added: 3.2.0 + unredirected_headers: + type: list + elements: str + version_added: 5.2.0 + description: + - A list of headers that should not be included in the redirection. This headers are sent to the C(fetch_url) function. + - On ansible-core version 2.12 or later, the default of this option is V([Authorization, Cookie]). + - Useful if the redirection URL does not need to have sensitive headers in the request. + - Requires ansible-core version 2.12 or later. + directory_mode: + type: str + description: + - Filesystem permission mode applied recursively to O(dest) when it is a directory. +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Download the latest version of the JUnit framework artifact from Maven Central + community.general.maven_artifact: + group_id: junit + artifact_id: junit + dest: /tmp/junit-latest.jar + +- name: Download JUnit 4.11 from Maven Central + community.general.maven_artifact: + group_id: junit + artifact_id: junit + version: 4.11 + dest: /tmp/junit-4.11.jar + +- name: Download an artifact from a private repository requiring authentication + community.general.maven_artifact: + group_id: com.company + artifact_id: library-name + repository_url: 'https://repo.company.com/maven' + username: user + password: pass + dest: /tmp/library-name-latest.jar + +- name: Download an artifact from a private repository requiring certificate authentication + community.general.maven_artifact: + group_id: com.company + artifact_id: library-name + repository_url: 'https://repo.company.com/maven' + client_cert: /path/to/cert.pem + client_key: /path/to/key.pem + dest: /tmp/library-name-latest.jar + +- name: Download a WAR File to the Tomcat webapps directory to be deployed + community.general.maven_artifact: + group_id: com.company + artifact_id: web-app + extension: war + repository_url: 'https://repo.company.com/maven' + dest: /var/lib/tomcat7/webapps/web-app.war + +- name: Keep a downloaded artifact's name, i.e. retain the version + community.general.maven_artifact: + version: latest + artifact_id: spring-core + group_id: org.springframework + dest: /tmp/ + keep_name: true + +- name: Download the latest version of the JUnit framework artifact from Maven local + community.general.maven_artifact: + group_id: junit + artifact_id: junit + dest: /tmp/junit-latest.jar + repository_url: "file://{{ lookup('env','HOME') }}/.m2/repository" + +- name: Download the latest version between 3.8 and 4.0 (exclusive) of the JUnit framework artifact from Maven Central + community.general.maven_artifact: + group_id: junit + artifact_id: junit + version_by_spec: "[3.8,4.0)" + dest: /tmp/ +""" + +import hashlib +import os +import posixpath +import shutil +import io +import tempfile +import traceback +import re + +from ansible.module_utils.ansible_release import __version__ as ansible_version +from re import match +from urllib.parse import urlparse + +LXML_ETREE_IMP_ERR = None +try: + from lxml import etree + HAS_LXML_ETREE = True +except ImportError: + LXML_ETREE_IMP_ERR = traceback.format_exc() + HAS_LXML_ETREE = False + +BOTO_IMP_ERR = None +try: + import boto3 + HAS_BOTO = True +except ImportError: + BOTO_IMP_ERR = traceback.format_exc() + HAS_BOTO = False + +SEMANTIC_VERSION_IMP_ERR = None +try: + from semantic_version import Version, Spec + HAS_SEMANTIC_VERSION = True +except ImportError: + SEMANTIC_VERSION_IMP_ERR = traceback.format_exc() + HAS_SEMANTIC_VERSION = False + + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text + + +def split_pre_existing_dir(dirname): + ''' + Return the first pre-existing directory and a list of the new directories that will be created. + ''' + head, tail = os.path.split(dirname) + b_head = to_bytes(head, errors='surrogate_or_strict') + if not os.path.exists(b_head): + if head == dirname: + return None, [head] + else: + (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head) + else: + return head, [tail] + new_directory_list.append(tail) + return pre_existing_dir, new_directory_list + + +def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed): + ''' + Walk the new directories list and make sure that permissions are as we would expect + ''' + if new_directory_list: + first_sub_dir = new_directory_list.pop(0) + if not pre_existing_dir: + working_dir = first_sub_dir + else: + working_dir = os.path.join(pre_existing_dir, first_sub_dir) + directory_args['path'] = working_dir + changed = module.set_fs_attributes_if_different(directory_args, changed) + changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed) + return changed + + +class Artifact(object): + def __init__(self, group_id, artifact_id, version, version_by_spec, classifier='', extension='jar'): + if not group_id: + raise ValueError("group_id must be set") + if not artifact_id: + raise ValueError("artifact_id must be set") + + self.group_id = group_id + self.artifact_id = artifact_id + self.version = version + self.version_by_spec = version_by_spec + self.classifier = classifier + + if not extension: + self.extension = "jar" + else: + self.extension = extension + + def is_snapshot(self): + return self.version and self.version.endswith("SNAPSHOT") + + def path(self, with_version=True): + base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id) + if with_version and self.version: + timestamp_version_match = re.match("^(.*-)?([0-9]{8}\\.[0-9]{6}-[0-9]+)$", self.version) + if timestamp_version_match: + base = posixpath.join(base, timestamp_version_match.group(1) + "SNAPSHOT") + else: + base = posixpath.join(base, self.version) + return base + + def _generate_filename(self): + filename = self.artifact_id + "-" + self.classifier + "." + self.extension + if not self.classifier: + filename = self.artifact_id + "." + self.extension + return filename + + def get_filename(self, filename=None): + if not filename: + filename = self._generate_filename() + elif os.path.isdir(filename): + filename = os.path.join(filename, self._generate_filename()) + return filename + + def __str__(self): + result = "%s:%s:%s" % (self.group_id, self.artifact_id, self.version) + if self.classifier: + result = "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version) + elif self.extension != "jar": + result = "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version) + return result + + @staticmethod + def parse(input): + parts = input.split(":") + if len(parts) >= 3: + g = parts[0] + a = parts[1] + v = parts[-1] + t = None + c = None + if len(parts) == 4: + t = parts[2] + if len(parts) == 5: + t = parts[2] + c = parts[3] + return Artifact(g, a, v, c, t) + else: + return None + + +class MavenDownloader: + def __init__(self, module, base, local=False, headers=None): + self.module = module + if base.endswith("/"): + base = base.rstrip("/") + self.base = base + self.local = local + self.headers = headers + self.user_agent = "Ansible {0} maven_artifact".format(ansible_version) + self.latest_version_found = None + self.metadata_file_name = "maven-metadata-local.xml" if local else "maven-metadata.xml" + + def find_version_by_spec(self, artifact): + path = "/%s/%s" % (artifact.path(False), self.metadata_file_name) + content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path) + xml = etree.fromstring(content) + original_versions = xml.xpath("/metadata/versioning/versions/version/text()") + versions = [] + for version in original_versions: + try: + versions.append(Version.coerce(version)) + except ValueError: + # This means that version string is not a valid semantic versioning + pass + + parse_versions_syntax = { + # example -> (,1.0] + r"^\(,(?P[0-9.]*)]$": "<={upper_bound}", + # example -> 1.0 + r"^(?P[0-9.]*)$": "~={version}", + # example -> [1.0] + r"^\[(?P[0-9.]*)\]$": "=={version}", + # example -> [1.2, 1.3] + r"^\[(?P[0-9.]*),\s*(?P[0-9.]*)\]$": ">={lower_bound},<={upper_bound}", + # example -> [1.2, 1.3) + r"^\[(?P[0-9.]*),\s*(?P[0-9.]+)\)$": ">={lower_bound},<{upper_bound}", + # example -> [1.5,) + r"^\[(?P[0-9.]*),\)$": ">={lower_bound}", + } + + for regex, spec_format in parse_versions_syntax.items(): + regex_result = match(regex, artifact.version_by_spec) + if regex_result: + spec = Spec(spec_format.format(**regex_result.groupdict())) + selected_version = spec.select(versions) + + if not selected_version: + raise ValueError("No version found with this spec version: {0}".format(artifact.version_by_spec)) + + # To deal when repos on maven don't have patch number on first build (e.g. 3.8 instead of 3.8.0) + if str(selected_version) not in original_versions: + selected_version.patch = None + + return str(selected_version) + + raise ValueError("The spec version {0} is not supported! ".format(artifact.version_by_spec)) + + def find_latest_version_available(self, artifact): + if self.latest_version_found: + return self.latest_version_found + path = "/%s/%s" % (artifact.path(False), self.metadata_file_name) + content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path) + xml = etree.fromstring(content) + v = xml.xpath("/metadata/versioning/versions/version[last()]/text()") + if v: + self.latest_version_found = v[0] + return v[0] + + def find_uri_for_artifact(self, artifact): + if artifact.version_by_spec: + artifact.version = self.find_version_by_spec(artifact) + + if artifact.version == "latest": + artifact.version = self.find_latest_version_available(artifact) + + if artifact.is_snapshot(): + if self.local: + return self._uri_for_artifact(artifact, artifact.version) + path = "/%s/%s" % (artifact.path(), self.metadata_file_name) + content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path) + xml = etree.fromstring(content) + + for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"): + classifier = snapshotArtifact.xpath("classifier/text()") + artifact_classifier = classifier[0] if classifier else '' + extension = snapshotArtifact.xpath("extension/text()") + artifact_extension = extension[0] if extension else '' + if artifact_classifier == artifact.classifier and artifact_extension == artifact.extension: + return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0]) + timestamp_xmlpath = xml.xpath("/metadata/versioning/snapshot/timestamp/text()") + if timestamp_xmlpath: + timestamp = timestamp_xmlpath[0] + build_number = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0] + return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + build_number)) + + return self._uri_for_artifact(artifact, artifact.version) + + def _uri_for_artifact(self, artifact, version=None): + if artifact.is_snapshot() and not version: + raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact)) + elif not artifact.is_snapshot(): + version = artifact.version + if artifact.classifier: + return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension) + + return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension) + + # for small files, directly get the full content + def _getContent(self, url, failmsg, force=True): + if self.local: + parsed_url = urlparse(url) + if os.path.isfile(parsed_url.path): + with io.open(parsed_url.path, 'rb') as f: + return f.read() + if force: + raise ValueError(failmsg + " because can not find file: " + url) + return None + response = self._request(url, failmsg, force) + if response: + return response.read() + return None + + # only for HTTP request + def _request(self, url, failmsg, force=True): + url_to_use = url + parsed_url = urlparse(url) + + if parsed_url.scheme == 's3': + parsed_url = urlparse(url) + bucket_name = parsed_url.netloc + key_name = parsed_url.path[1:] + client = boto3.client('s3', aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', '')) + url_to_use = client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': key_name}, ExpiresIn=10) + + req_timeout = self.module.params.get('timeout') + + # Hack to add parameters in the way that fetch_url expects + self.module.params['url_username'] = self.module.params.get('username', '') + self.module.params['url_password'] = self.module.params.get('password', '') + self.module.params['http_agent'] = self.user_agent + + kwargs = {} + if self.module.params['unredirected_headers']: + kwargs['unredirected_headers'] = self.module.params['unredirected_headers'] + + response, info = fetch_url( + self.module, + url_to_use, + timeout=req_timeout, + headers=self.headers, + **kwargs + ) + + if info['status'] == 200: + return response + if force: + raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use) + return None + + def download(self, tmpdir, artifact, verify_download, filename=None, checksum_alg='md5'): + if (not artifact.version and not artifact.version_by_spec) or artifact.version == "latest": + artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact), None, + artifact.classifier, artifact.extension) + url = self.find_uri_for_artifact(artifact) + tempfd, tempname = tempfile.mkstemp(dir=tmpdir) + + try: + # copy to temp file + if self.local: + parsed_url = urlparse(url) + if os.path.isfile(parsed_url.path): + shutil.copy2(parsed_url.path, tempname) + else: + return "Can not find local file: " + parsed_url.path + else: + response = self._request(url, "Failed to download artifact " + str(artifact)) + with os.fdopen(tempfd, 'wb') as f: + shutil.copyfileobj(response, f) + + if verify_download: + invalid_checksum = self.is_invalid_checksum(tempname, url, checksum_alg) + if invalid_checksum: + # if verify_change was set, the previous file would be deleted + os.remove(tempname) + return invalid_checksum + except Exception as e: + os.remove(tempname) + raise e + + # all good, now copy temp file to target + shutil.move(tempname, artifact.get_filename(filename)) + return None + + def is_invalid_checksum(self, file, remote_url, checksum_alg='md5'): + if os.path.exists(file): + local_checksum = self._local_checksum(checksum_alg, file) + if self.local: + parsed_url = urlparse(remote_url) + remote_checksum = self._local_checksum(checksum_alg, parsed_url.path) + else: + try: + remote_checksum = to_text(self._getContent(remote_url + '.' + checksum_alg, "Failed to retrieve checksum", False), errors='strict') + except UnicodeError as e: + return "Cannot retrieve a valid %s checksum from %s: %s" % (checksum_alg, remote_url, to_native(e)) + if not remote_checksum: + return "Cannot find %s checksum from %s" % (checksum_alg, remote_url) + try: + # Check if remote checksum only contains md5/sha1 or md5/sha1 + filename + _remote_checksum = remote_checksum.split(None, 1)[0] + remote_checksum = _remote_checksum + # remote_checksum is empty so we continue and keep original checksum string + # This should not happen since we check for remote_checksum before + except IndexError: + pass + if local_checksum.lower() == remote_checksum.lower(): + return None + else: + return "Checksum does not match: we computed " + local_checksum + " but the repository states " + remote_checksum + + return "Path does not exist: " + file + + def _local_checksum(self, checksum_alg, file): + if checksum_alg.lower() == 'md5': + hash = hashlib.md5() + elif checksum_alg.lower() == 'sha1': + hash = hashlib.sha1() + else: + raise ValueError("Unknown checksum_alg %s" % checksum_alg) + with io.open(file, 'rb') as f: + for chunk in iter(lambda: f.read(8192), b''): + hash.update(chunk) + return hash.hexdigest() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + group_id=dict(required=True), + artifact_id=dict(required=True), + version=dict(), + version_by_spec=dict(), + classifier=dict(default=''), + extension=dict(default='jar'), + repository_url=dict(default='https://repo1.maven.org/maven2'), + username=dict(aliases=['aws_secret_key']), + password=dict(no_log=True, aliases=['aws_secret_access_key']), + headers=dict(type='dict'), + force_basic_auth=dict(default=False, type='bool'), + state=dict(default="present", choices=["present", "absent"]), + timeout=dict(default=10, type='int'), + dest=dict(type="path", required=True), + validate_certs=dict(default=True, type='bool'), + client_cert=dict(type="path"), + client_key=dict(type="path"), + keep_name=dict(default=False, type='bool'), + verify_checksum=dict(default='download', choices=['never', 'download', 'change', 'always']), + checksum_alg=dict(default='md5', choices=['md5', 'sha1']), + unredirected_headers=dict(type='list', elements='str'), + directory_mode=dict(type='str'), + ), + add_file_common_args=True, + mutually_exclusive=([('version', 'version_by_spec')]) + ) + + if module.params['unredirected_headers'] is None: + # if the user did not supply unredirected params, we use the default + module.params['unredirected_headers'] = ['Authorization', 'Cookie'] + + if not HAS_LXML_ETREE: + module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR) + + if module.params['version_by_spec'] and not HAS_SEMANTIC_VERSION: + module.fail_json(msg=missing_required_lib('semantic_version'), exception=SEMANTIC_VERSION_IMP_ERR) + + repository_url = module.params["repository_url"] + if not repository_url: + repository_url = "https://repo1.maven.org/maven2" + try: + parsed_url = urlparse(repository_url) + except AttributeError as e: + module.fail_json(msg='url parsing went wrong %s' % e) + + local = parsed_url.scheme == "file" + + if parsed_url.scheme == 's3' and not HAS_BOTO: + module.fail_json(msg=missing_required_lib('boto3', reason='when using s3:// repository URLs'), + exception=BOTO_IMP_ERR) + + group_id = module.params["group_id"] + artifact_id = module.params["artifact_id"] + version = module.params["version"] + version_by_spec = module.params["version_by_spec"] + classifier = module.params["classifier"] + extension = module.params["extension"] + headers = module.params['headers'] + state = module.params["state"] + dest = module.params["dest"] + b_dest = to_bytes(dest, errors='surrogate_or_strict') + keep_name = module.params["keep_name"] + verify_checksum = module.params["verify_checksum"] + verify_download = verify_checksum in ['download', 'always'] + verify_change = verify_checksum in ['change', 'always'] + checksum_alg = module.params["checksum_alg"] + + downloader = MavenDownloader(module, repository_url, local, headers) + + if not version_by_spec and not version: + version = "latest" + + try: + artifact = Artifact(group_id, artifact_id, version, version_by_spec, classifier, extension) + except ValueError as e: + module.fail_json(msg=e.args[0]) + + changed = False + prev_state = "absent" + + if dest.endswith(os.sep): + b_dest = to_bytes(dest, errors='surrogate_or_strict') + if not os.path.exists(b_dest): + (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dest) + os.makedirs(b_dest) + directory_args = module.load_file_common_arguments(module.params) + directory_mode = module.params["directory_mode"] + if directory_mode is not None: + directory_args['mode'] = directory_mode + else: + directory_args['mode'] = None + changed = adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed) + + if os.path.isdir(b_dest): + version_part = version + if version == 'latest': + version_part = downloader.find_latest_version_available(artifact) + elif version_by_spec: + version_part = downloader.find_version_by_spec(artifact) + + filename = "{artifact_id}{version_part}{classifier}.{extension}".format( + artifact_id=artifact_id, + version_part="-{0}".format(version_part) if keep_name else "", + classifier="-{0}".format(classifier) if classifier else "", + extension=extension + ) + dest = posixpath.join(dest, filename) + + b_dest = to_bytes(dest, errors='surrogate_or_strict') + + if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_checksum(dest, downloader.find_uri_for_artifact(artifact), checksum_alg)): + prev_state = "present" + + if prev_state == "absent": + try: + download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest, checksum_alg) + if download_error is None: + changed = True + else: + module.fail_json(msg="Cannot retrieve the artifact to destination: " + download_error) + except ValueError as e: + module.fail_json(msg=e.args[0]) + + file_args = module.load_file_common_arguments(module.params, path=dest) + changed = module.set_fs_attributes_if_different(file_args, changed) + if changed: + module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, + extension=extension, repository_url=repository_url, changed=changed) + else: + module.exit_json(state=state, dest=dest, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/memset_dns_reload.py b/plugins/modules/memset_dns_reload.py deleted file mode 120000 index bc0c5166d1..0000000000 --- a/plugins/modules/memset_dns_reload.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/memset/memset_dns_reload.py \ No newline at end of file diff --git a/plugins/modules/memset_dns_reload.py b/plugins/modules/memset_dns_reload.py new file mode 100644 index 0000000000..e7c9c70ea4 --- /dev/null +++ b/plugins/modules/memset_dns_reload.py @@ -0,0 +1,185 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: memset_dns_reload +author: "Simon Weald (@glitchcrab)" +short_description: Request reload of Memset's DNS infrastructure, +notes: + - DNS reload requests are a best-effort service provided by Memset; these generally happen every 15 minutes by default, + however you can request an immediate reload if later tasks rely on the records being created. An API key generated using + the Memset customer control panel is required with the following minimum scope - C(dns.reload). If you wish to poll the + job status to wait until the reload has completed, then C(job.status) is also required. +description: + - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + api_key: + required: true + type: str + description: + - The API key obtained from the Memset control panel. + poll: + default: false + type: bool + description: + - If V(true), it polls the reload job's status and return when the job has completed (unless the 30 second timeout is + reached first). If the timeout is reached then the task does not return as failed, but stderr indicates that the polling + failed. +""" + +EXAMPLES = r""" +- name: Submit DNS reload and poll + community.general.memset_dns_reload: + api_key: 5eb86c9196ab03919abcf03857163741 + poll: true + delegate_to: localhost +""" + +RETURN = r""" +memset_api: + description: Raw response from the Memset API. + returned: always + type: complex + contains: + error: + description: Whether the job ended in error state. + returned: always + type: bool + sample: true + finished: + description: Whether the job completed before the result was returned. + returned: always + type: bool + sample: true + id: + description: Job ID. + returned: always + type: str + sample: "c9cc8ad2a3e3fb8c63ed83c424928ef8" + status: + description: Job status. + returned: always + type: str + sample: "DONE" + type: + description: Job type. + returned: always + type: str + sample: "dns" +""" + +from time import sleep + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def poll_reload_status(api_key=None, job_id=None, payload=None): + ''' + We poll the `job.status` endpoint every 5 seconds up to a + maximum of 6 times. This is a relatively arbitrary choice of + timeout, however requests rarely take longer than 15 seconds + to complete. + ''' + memset_api, stderr, msg = None, None, None + payload['id'] = job_id + + api_method = 'job.status' + _has_failed, _msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload) + + while not response.json()['finished']: + counter = 0 + while counter < 6: + sleep(5) + _has_failed, msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload) + counter += 1 + if response.json()['error']: + # the reload job was submitted but polling failed. Don't return this as an overall task failure. + stderr = "Reload submitted successfully, but the Memset API returned a job error when attempting to poll the reload status." + else: + memset_api = response.json() + msg = None + + return memset_api, msg, stderr + + +def reload_dns(args=None): + ''' + DNS reloads are a single API call and therefore there's not much + which can go wrong outside of auth errors. + ''' + retvals, payload = dict(), dict() + has_changed, has_failed = False, False + memset_api, msg, stderr = None, None, None + + api_method = 'dns.reload' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + if has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = has_failed + if response.status_code is not None: + retvals['memset_api'] = response.json() + else: + retvals['stderr'] = response.stderr + retvals['msg'] = msg + return retvals + + # set changed to true if the reload request was accepted. + has_changed = True + memset_api = msg + # empty msg var as we don't want to return the API's json response twice. + msg = None + + if args['poll']: + # hand off to the poll function. + job_id = response.json()['id'] + memset_api, msg, stderr = poll_reload_status(api_key=args['api_key'], job_id=job_id, payload=payload) + + # assemble return variables. + retvals['failed'] = has_failed + retvals['changed'] = has_changed + for val in ['msg', 'stderr', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return retvals + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, type='str', no_log=True), + poll=dict(default=False, type='bool') + ), + supports_check_mode=False + ) + + # populate the dict with the user-provided vars. + args = dict(module.params) + + retvals = reload_dns(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/memset_memstore_info.py b/plugins/modules/memset_memstore_info.py deleted file mode 120000 index 6d7b55c02a..0000000000 --- a/plugins/modules/memset_memstore_info.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/memset/memset_memstore_info.py \ No newline at end of file diff --git a/plugins/modules/memset_memstore_info.py b/plugins/modules/memset_memstore_info.py new file mode 100644 index 0000000000..bda8cf0435 --- /dev/null +++ b/plugins/modules/memset_memstore_info.py @@ -0,0 +1,172 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: memset_memstore_info +author: "Simon Weald (@glitchcrab)" +short_description: Retrieve Memstore product usage information +notes: + - An API key generated using the Memset customer control panel is needed with the following minimum scope - C(memstore.usage). +description: + - Retrieve Memstore product usage information. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + api_key: + required: true + type: str + description: + - The API key obtained from the Memset control panel. + name: + required: true + type: str + description: + - The Memstore product name (that is, V(mstestyaa1)). +""" + +EXAMPLES = r""" +- name: Get usage for mstestyaa1 + community.general.memset_memstore_info: + name: mstestyaa1 + api_key: 5eb86c9896ab03919abcf03857163741 + delegate_to: localhost +""" + +RETURN = r""" +memset_api: + description: Info from the Memset API. + returned: always + type: complex + contains: + cdn_bandwidth: + description: Dictionary of CDN bandwidth facts. + returned: always + type: complex + contains: + bytes_out: + description: Outbound CDN bandwidth for the last 24 hours in bytes. + returned: always + type: int + sample: 1000 + requests: + description: Number of requests in the last 24 hours. + returned: always + type: int + sample: 10 + bytes_in: + description: Inbound CDN bandwidth for the last 24 hours in bytes. + returned: always + type: int + sample: 1000 + containers: + description: Number of containers. + returned: always + type: int + sample: 10 + bytes: + description: Space used in bytes. + returned: always + type: int + sample: 3860997965 + objs: + description: Number of objects. + returned: always + type: int + sample: 1000 + bandwidth: + description: Dictionary of CDN bandwidth facts. + returned: always + type: complex + contains: + bytes_out: + description: Outbound bandwidth for the last 24 hours in bytes. + returned: always + type: int + sample: 1000 + requests: + description: Number of requests in the last 24 hours. + returned: always + type: int + sample: 10 + bytes_in: + description: Inbound bandwidth for the last 24 hours in bytes. + returned: always + type: int + sample: 1000 +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def get_facts(args=None): + ''' + Performs a simple API call and returns a JSON blob. + ''' + retvals, payload = dict(), dict() + has_changed, has_failed = False, False + msg, stderr, memset_api = None, None, None + + payload['name'] = args['name'] + + api_method = 'memstore.usage' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + + if has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = has_failed + retvals['msg'] = msg + if response.status_code is not None: + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + else: + retvals['stderr'] = "{0}" . format(response.stderr) + return retvals + + # we don't want to return the same thing twice + msg = None + memset_api = response.json() + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + for val in ['msg', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return retvals + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, type='str', no_log=True), + name=dict(required=True, type='str') + ), + supports_check_mode=True, + ) + + # populate the dict with the user-provided vars. + args = dict(module.params) + + retvals = get_facts(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/memset_server_info.py b/plugins/modules/memset_server_info.py deleted file mode 120000 index 578a13ecdd..0000000000 --- a/plugins/modules/memset_server_info.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/memset/memset_server_info.py \ No newline at end of file diff --git a/plugins/modules/memset_server_info.py b/plugins/modules/memset_server_info.py new file mode 100644 index 0000000000..3869edb98a --- /dev/null +++ b/plugins/modules/memset_server_info.py @@ -0,0 +1,303 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: memset_server_info +author: "Simon Weald (@glitchcrab)" +short_description: Retrieve server information +notes: + - An API key generated using the Memset customer control panel is needed with the following minimum scope - C(server.info). +description: + - Retrieve server information. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + api_key: + required: true + type: str + description: + - The API key obtained from the Memset control panel. + name: + required: true + type: str + description: + - The server product name (that is, C(testyaa1)). +""" + +EXAMPLES = r""" +- name: Get details for testyaa1 + community.general.memset_server_info: + name: testyaa1 + api_key: 5eb86c9896ab03919abcf03857163741 + delegate_to: localhost +""" + +RETURN = r""" +memset_api: + description: Info from the Memset API. + returned: always + type: complex + contains: + backups: + description: Whether this server has a backup service. + returned: always + type: bool + sample: true + control_panel: + description: Whether the server has a control panel (for example cPanel). + returned: always + type: str + sample: 'cpanel' + data_zone: + description: The data zone the server is in. + returned: always + type: str + sample: 'Memset Public Cloud' + expiry_date: + description: Current expiry date of the server. + returned: always + type: str + sample: '2018-08-10' + firewall_rule_group: + description: Details about the firewall group this server is in. + returned: always + type: dict + sample: + { + "default_outbound_policy": "RETURN", + "name": "testyaa-fw1", + "nickname": "testyaa cPanel rules", + "notes": "", + "public": false, + "rules": { + "51d7db54d39c3544ef7c48baa0b9944f": { + "action": "ACCEPT", + "comment": "", + "dest_ip6s": "any", + "dest_ips": "any", + "dest_ports": "any", + "direction": "Inbound", + "ip_version": "any", + "ordering": 2, + "protocols": "icmp", + "rule_group_name": "testyaa-fw1", + "rule_id": "51d7db54d39c3544ef7c48baa0b9944f", + "source_ip6s": "any", + "source_ips": "any", + "source_ports": "any" + } + } + } + firewall_type: + description: The type of firewall the server has (for example self-managed, managed). + returned: always + type: str + sample: 'managed' + host_name: + description: The server's hostname. + returned: always + type: str + sample: 'testyaa1.miniserver.com' + ignore_monitoring_off: + description: When true, Memset does not remind the customer that monitoring is disabled. + returned: always + type: bool + sample: true + ips: + description: List of dictionaries of all IP addresses assigned to the server. + returned: always + type: list + sample: + [ + { + "address": "1.2.3.4", + "bytes_in_today": 1000.0, + "bytes_in_yesterday": 2000.0, + "bytes_out_today": 1000.0, + "bytes_out_yesterday": 2000.0 + } + ] + monitor: + description: Whether the server has monitoring enabled. + returned: always + type: bool + sample: true + monitoring_level: + description: The server's monitoring level (for example V(basic)). + returned: always + type: str + sample: 'basic' + name: + description: Server name (same as the service name). + returned: always + type: str + sample: 'testyaa1' + network_zones: + description: The network zone(s) the server is in. + returned: always + type: list + sample: ["reading"] + nickname: + description: Customer-set nickname for the server. + returned: always + type: str + sample: 'database server' + no_auto_reboot: + description: Whether or not to reboot the server if monitoring detects it down. + returned: always + type: bool + sample: true + no_nrpe: + description: Whether Memset should use NRPE to monitor this server. + returned: always + type: bool + sample: true + os: + description: The server's Operating System. + returned: always + type: str + sample: 'debian_stretch_64' + penetration_patrol: + description: Intrusion detection support level for this server. + returned: always + type: str + sample: 'managed' + penetration_patrol_alert_level: + description: The alert level at which notifications are sent. + returned: always + type: int + sample: 10 + primary_ip: + description: Server's primary IP. + returned: always + type: str + sample: '1.2.3.4' + renewal_price_amount: + description: Renewal cost for the server. + returned: always + type: str + sample: '30.00' + renewal_price_currency: + description: Currency for renewal payments. + returned: always + type: str + sample: 'GBP' + renewal_price_vat: + description: VAT rate for renewal payments. + returned: always + type: str + sample: '20' + start_date: + description: Server's start date. + returned: always + type: str + sample: '2013-04-10' + status: + description: Current status of the server (for example live, onhold). + returned: always + type: str + sample: 'LIVE' + support_level: + description: Support level included with the server. + returned: always + type: str + sample: 'managed' + type: + description: What this server is (for example V(dedicated)). + returned: always + type: str + sample: 'miniserver' + vlans: + description: Dictionary of tagged and untagged VLANs this server is in. + returned: always + type: dict + sample: + { + "tagged": [], + "untagged": [ + "testyaa-vlan1", + "testyaa-vlan2" + ] + } + vulnscan: + description: Vulnerability scanning level. + returned: always + type: str + sample: 'basic' +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def get_facts(args=None): + ''' + Performs a simple API call and returns a JSON blob. + ''' + retvals, payload = dict(), dict() + has_changed, has_failed = False, False + msg, stderr, memset_api = None, None, None + + payload['name'] = args['name'] + + api_method = 'server.info' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + + if has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = has_failed + retvals['msg'] = msg + if response.status_code is not None: + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + else: + retvals['stderr'] = "{0}" . format(response.stderr) + return retvals + + # we don't want to return the same thing twice + msg = None + memset_api = response.json() + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + for val in ['msg', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return retvals + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, type='str', no_log=True), + name=dict(required=True, type='str') + ), + supports_check_mode=True, + ) + + # populate the dict with the user-provided vars. + args = dict(module.params) + + retvals = get_facts(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/memset_zone.py b/plugins/modules/memset_zone.py deleted file mode 120000 index 24109bcc4d..0000000000 --- a/plugins/modules/memset_zone.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/memset/memset_zone.py \ No newline at end of file diff --git a/plugins/modules/memset_zone.py b/plugins/modules/memset_zone.py new file mode 100644 index 0000000000..3255e07a61 --- /dev/null +++ b/plugins/modules/memset_zone.py @@ -0,0 +1,315 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: memset_zone +author: "Simon Weald (@glitchcrab)" +short_description: Creates and deletes Memset DNS zones +notes: + - Zones can be thought of as a logical group of domains, all of which share the same DNS records (in other words they point + to the same IP). An API key generated using the Memset customer control panel is needed with the following minimum scope + - C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list). +description: + - Manage DNS zones in a Memset account. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + required: true + description: + - Indicates desired state of resource. + type: str + choices: [absent, present] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + type: str + name: + required: true + description: + - The zone nickname; usually the same as the main domain. Ensure this value has at most 250 characters. + type: str + aliases: [nickname] + ttl: + description: + - The default TTL for all records created in the zone. This must be a valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create). + type: int + default: 0 + choices: [0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400] + force: + required: false + default: false + type: bool + description: + - Forces deletion of a zone and all zone domains/zone records it contains. +""" + +EXAMPLES = r""" +# Create the zone 'test' +- name: Create zone + community.general.memset_zone: + name: test + state: present + api_key: 5eb86c9196ab03919abcf03857163741 + ttl: 300 + delegate_to: localhost + +# Force zone deletion +- name: Force delete zone + community.general.memset_zone: + name: test + state: absent + api_key: 5eb86c9196ab03919abcf03857163741 + force: true + delegate_to: localhost +""" + +RETURN = r""" +memset_api: + description: Zone info from the Memset API. + returned: when state == present + type: complex + contains: + domains: + description: List of domains in this zone. + returned: always + type: list + sample: [] + id: + description: Zone ID. + returned: always + type: str + sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" + nickname: + description: Zone name. + returned: always + type: str + sample: "example.com" + records: + description: List of DNS records for domains in this zone. + returned: always + type: list + sample: [] + ttl: + description: Default TTL for domains in this zone. + returned: always + type: int + sample: 300 +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import check_zone +from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def api_validation(args=None): + ''' + Perform some validation which will be enforced by Memset's API (see: + https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) + ''' + # zone domain length must be less than 250 chars. + if len(args['name']) > 250: + stderr = 'Zone name must be less than 250 characters in length.' + module.fail_json(failed=True, msg=stderr, stderr=stderr) + + +def check(args=None): + ''' + Support for running with check mode. + ''' + retvals = dict() + + api_method = 'dns.zone_list' + has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + zone_exists, counter = check_zone(data=response, name=args['name']) + + # set changed to true if the operation would cause a change. + has_changed = ((zone_exists and args['state'] == 'absent') or (not zone_exists and args['state'] == 'present')) + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + + return retvals + + +def create_zone(args=None, zone_exists=None, payload=None): + ''' + At this point we already know whether the zone exists, so we + just need to make the API reflect the desired state. + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + if not zone_exists: + payload['ttl'] = args['ttl'] + payload['nickname'] = args['name'] + api_method = 'dns.zone_create' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + else: + api_method = 'dns.zone_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + for zone in response.json(): + if zone['nickname'] == args['name']: + break + if zone['ttl'] != args['ttl']: + # update the zone if the desired TTL is different. + payload['id'] = zone['id'] + payload['ttl'] = args['ttl'] + api_method = 'dns.zone_update' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + + # populate return var with zone info. + api_method = 'dns.zone_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json()) + + if zone_exists: + payload = dict() + payload['id'] = zone_id + api_method = 'dns.zone_info' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + memset_api = response.json() + + return has_failed, has_changed, memset_api, msg + + +def delete_zone(args=None, zone_exists=None, payload=None): + ''' + Deletion requires extra sanity checking as the zone cannot be + deleted if it contains domains or records. Setting force=true + will override this behaviour. + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + if zone_exists: + api_method = 'dns.zone_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + counter = 0 + for zone in response.json(): + if zone['nickname'] == args['name']: + counter += 1 + if counter == 1: + for zone in response.json(): + if zone['nickname'] == args['name']: + zone_id = zone['id'] + domain_count = len(zone['domains']) + record_count = len(zone['records']) + if (domain_count > 0 or record_count > 0) and args['force'] is False: + # we need to fail out if force was not explicitly set. + stderr = 'Zone contains domains or records and force was not used.' + has_failed = True + has_changed = False + module.fail_json(failed=has_failed, changed=has_changed, msg=msg, stderr=stderr, rc=1) + api_method = 'dns.zone_delete' + payload['id'] = zone_id + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + # return raw JSON from API in named var and then unset msg var so we aren't returning the same thing twice. + memset_api = msg + msg = None + else: + # zone names are not unique, so we cannot safely delete the requested + # zone at this time. + has_failed = True + has_changed = False + msg = 'Unable to delete zone as multiple zones with the same name exist.' + else: + has_failed, has_changed = False, False + + return has_failed, has_changed, memset_api, msg + + +def create_or_delete(args=None): + ''' + We need to perform some initial sanity checking and also look + up required info before handing it off to create or delete. + ''' + retvals, payload = dict(), dict() + has_failed, has_changed = False, False + msg, memset_api, stderr = None, None, None + + # get the zones and check if the relevant zone exists. + api_method = 'dns.zone_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + if _has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = _has_failed + retvals['msg'] = _msg + + if response.stderr is not None: + retvals['stderr'] = response.stderr + + return retvals + + zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json()) + + if args['state'] == 'present': + has_failed, has_changed, memset_api, msg = create_zone(args=args, zone_exists=zone_exists, payload=payload) + + elif args['state'] == 'absent': + has_failed, has_changed, memset_api, msg = delete_zone(args=args, zone_exists=zone_exists, payload=payload) + + retvals['failed'] = has_failed + retvals['changed'] = has_changed + for val in ['msg', 'stderr', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return retvals + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['present', 'absent'], type='str'), + api_key=dict(required=True, type='str', no_log=True), + name=dict(required=True, aliases=['nickname'], type='str'), + ttl=dict(default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), + force=dict(default=False, type='bool') + ), + supports_check_mode=True + ) + + # populate the dict with the user-provided vars. + args = dict(module.params) + args['check_mode'] = module.check_mode + + # validate some API-specific limitations. + api_validation(args=args) + + if module.check_mode: + retvals = check(args) + else: + retvals = create_or_delete(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/memset_zone_domain.py b/plugins/modules/memset_zone_domain.py deleted file mode 120000 index d4e037aab3..0000000000 --- a/plugins/modules/memset_zone_domain.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/memset/memset_zone_domain.py \ No newline at end of file diff --git a/plugins/modules/memset_zone_domain.py b/plugins/modules/memset_zone_domain.py new file mode 100644 index 0000000000..d8b8618862 --- /dev/null +++ b/plugins/modules/memset_zone_domain.py @@ -0,0 +1,270 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: memset_zone_domain +author: "Simon Weald (@glitchcrab)" +short_description: Create and delete domains in Memset DNS zones +notes: + - Zone domains can be thought of as a collection of domains, all of which share the same DNS records (in other words, they + point to the same IP). An API key generated using the Memset customer control panel is needed with the following minimum + scope - C(dns.zone_domain_create), C(dns.zone_domain_delete), C(dns.zone_domain_list). + - Currently this module can only create one domain at a time. Multiple domains should be created using C(loop). +description: + - Manage DNS zone domains in a Memset account. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + default: present + description: + - Indicates desired state of resource. + type: str + choices: [absent, present] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + type: str + domain: + required: true + description: + - The zone domain name. Ensure this value has at most 250 characters. + type: str + aliases: ['name'] + zone: + required: true + description: + - The zone to add the domain to (this must already exist). + type: str +""" + +EXAMPLES = r""" +# Create the zone domain 'test.com' +- name: Create zone domain + community.general.memset_zone_domain: + domain: test.com + zone: testzone + state: present + api_key: 5eb86c9196ab03919abcf03857163741 + delegate_to: localhost +""" + +RETURN = r""" +memset_api: + description: Domain info from the Memset API. + returned: when changed or state == present + type: complex + contains: + domain: + description: Domain name. + returned: always + type: str + sample: "example.com" + id: + description: Domain ID. + returned: always + type: str + sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id +from ansible_collections.community.general.plugins.module_utils.memset import check_zone_domain +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def api_validation(args=None): + ''' + Perform some validation which will be enforced by Memset's API (see: + https://www.memset.com/apidocs/methods_dns.html#dns.zone_domain_create) + ''' + # zone domain length must be less than 250 chars + if len(args['domain']) > 250: + stderr = 'Zone domain must be less than 250 characters in length.' + module.fail_json(failed=True, msg=stderr) + + +def check(args=None): + ''' + Support for running with check mode. + ''' + retvals = dict() + has_changed = False + + api_method = 'dns.zone_domain_list' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + domain_exists = check_zone_domain(data=response, domain=args['domain']) + + # set changed to true if the operation would cause a change. + has_changed = ((domain_exists and args['state'] == 'absent') or (not domain_exists and args['state'] == 'present')) + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + + return retvals + + +def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None): + ''' + At this point we already know whether the containing zone exists, + so we just need to create the domain (or exit if it already exists). + ''' + has_changed, has_failed = False, False + msg = None + + api_method = 'dns.zone_domain_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + for zone_domain in response.json(): + if zone_domain['domain'] == args['domain']: + # zone domain already exists, nothing to change. + has_changed = False + break + else: + # we need to create the domain + api_method = 'dns.zone_domain_create' + payload['domain'] = args['domain'] + payload['zone_id'] = zone_id + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + + return has_failed, has_changed, msg + + +def delete_zone_domain(args=None, payload=None): + ''' + Deletion is pretty simple, domains are always unique so we + we don't need to do any sanity checking to avoid deleting the + wrong thing. + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + api_method = 'dns.zone_domain_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + domain_exists = check_zone_domain(data=response, domain=args['domain']) + + if domain_exists: + api_method = 'dns.zone_domain_delete' + payload['domain'] = args['domain'] + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + memset_api = response.json() + # unset msg as we don't want to return unnecessary info to the user. + msg = None + + return has_failed, has_changed, memset_api, msg + + +def create_or_delete_domain(args=None): + ''' + We need to perform some initial sanity checking and also look + up required info before handing it off to create or delete. + ''' + retvals, payload = dict(), dict() + has_changed, has_failed = False, False + msg, stderr, memset_api = None, None, None + + # get the zones and check if the relevant zone exists. + api_method = 'dns.zone_list' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + if has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = has_failed + retvals['msg'] = msg + if response.status_code is not None: + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + else: + retvals['stderr'] = response.stderr + return retvals + + zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) + + if not zone_exists: + # the zone needs to be unique - this isn't a requirement of Memset's API but it + # makes sense in the context of this module. + has_failed = True + if counter == 0: + stderr = "DNS zone '{0}' does not exist, cannot create domain." . format(args['zone']) + elif counter > 1: + stderr = "{0} matches multiple zones, cannot create domain." . format(args['zone']) + + retvals['failed'] = has_failed + retvals['msg'] = stderr + return retvals + + if args['state'] == 'present': + has_failed, has_changed, msg = create_zone_domain(args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload) + + if args['state'] == 'absent': + has_failed, has_changed, memset_api, msg = delete_zone_domain(args=args, payload=payload) + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + for val in ['msg', 'stderr', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return retvals + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + api_key=dict(required=True, type='str', no_log=True), + domain=dict(required=True, aliases=['name'], type='str'), + zone=dict(required=True, type='str') + ), + supports_check_mode=True + ) + + # populate the dict with the user-provided vars. + args = dict(module.params) + args['check_mode'] = module.check_mode + + # validate some API-specific limitations. + api_validation(args=args) + + if module.check_mode: + retvals = check(args) + else: + retvals = create_or_delete_domain(args) + + # we would need to populate the return values with the API's response + # in several places so it is easier to do it at the end instead. + if not retvals['failed']: + if args['state'] == 'present' and not module.check_mode: + payload = dict() + payload['domain'] = args['domain'] + api_method = 'dns.zone_domain_info' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + retvals['memset_api'] = response.json() + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/memset_zone_record.py b/plugins/modules/memset_zone_record.py deleted file mode 120000 index 7f2c9920ac..0000000000 --- a/plugins/modules/memset_zone_record.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/memset/memset_zone_record.py \ No newline at end of file diff --git a/plugins/modules/memset_zone_record.py b/plugins/modules/memset_zone_record.py new file mode 100644 index 0000000000..71d7b841c9 --- /dev/null +++ b/plugins/modules/memset_zone_record.py @@ -0,0 +1,386 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: memset_zone_record +author: "Simon Weald (@glitchcrab)" +short_description: Create and delete records in Memset DNS zones +notes: + - Zones can be thought of as a logical group of domains, all of which share the same DNS records (in other words they point + to the same IP). An API key generated using the Memset customer control panel is needed with the following minimum scope + - C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list). + - Currently this module can only create one DNS record at a time. Multiple records should be created using C(loop). +description: + - Manage DNS records in a Memset account. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + default: present + description: + - Indicates desired state of resource. + type: str + choices: [absent, present] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + type: str + address: + required: true + description: + - The address for this record (can be IP or text string depending on record type). + type: str + aliases: [ip, data] + priority: + description: + - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive). + type: int + default: 0 + record: + required: false + description: + - The subdomain to create. + type: str + default: '' + type: + required: true + description: + - The type of DNS record to create. + choices: [A, AAAA, CNAME, MX, NS, SRV, TXT] + type: str + relative: + type: bool + default: false + description: + - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS) and C(SRV)record types. + ttl: + description: + - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a valid int from + U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create). + default: 0 + choices: [0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400] + type: int + zone: + required: true + description: + - The name of the zone to which to add the record to. + type: str +""" + +EXAMPLES = r""" +# Create DNS record for www.domain.com +- name: Create DNS record + community.general.memset_zone_record: + api_key: dcf089a2896940da9ffefb307ef49ccd + state: present + zone: domain.com + type: A + record: www + address: 1.2.3.4 + ttl: 300 + relative: false + delegate_to: localhost + +# create an SPF record for domain.com +- name: Create SPF record for domain.com + community.general.memset_zone_record: + api_key: dcf089a2896940da9ffefb307ef49ccd + state: present + zone: domain.com + type: TXT + address: "v=spf1 +a +mx +ip4:a1.2.3.4 ?all" + delegate_to: localhost + +# create multiple DNS records +- name: Create multiple DNS records + community.general.memset_zone_record: + api_key: dcf089a2896940da9ffefb307ef49ccd + zone: "{{ item.zone }}" + type: "{{ item.type }}" + record: "{{ item.record }}" + address: "{{ item.address }}" + delegate_to: localhost + with_items: + - {'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4'} + - {'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1'} +""" + +RETURN = r""" +memset_api: + description: Record info from the Memset API. + returned: when state == present + type: complex + contains: + address: + description: Record content (may be an IP, string or blank depending on record type). + returned: always + type: str + sample: 1.1.1.1 + id: + description: Record ID. + returned: always + type: str + sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" + priority: + description: Priority for C(MX) and C(SRV) records. + returned: always + type: int + sample: 10 + record: + description: Name of record. + returned: always + type: str + sample: "www" + relative: + description: Adds the current domain onto the address field for C(CNAME), C(MX), C(NS) and C(SRV) types. + returned: always + type: bool + sample: false + ttl: + description: Record TTL. + returned: always + type: int + sample: 10 + type: + description: Record type. + returned: always + type: str + sample: AAAA + zone_id: + description: Zone ID. + returned: always + type: str + sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def api_validation(args=None): + ''' + Perform some validation which will be enforced by Memset's API (see: + https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) + ''' + failed_validation = False + error = None + + # priority can only be integer 0 > 999 + if not 0 <= args['priority'] <= 999: + failed_validation = True + error = 'Priority must be in the range 0 > 999 (inclusive).' + # data value must be max 250 chars + if len(args['address']) > 250: + failed_validation = True + error = "Address must be less than 250 characters in length." + # record value must be max 250 chars + if args['record']: + if len(args['record']) > 63: + failed_validation = True + error = "Record must be less than 63 characters in length." + # relative isn't used for all record types + if args['relative']: + if args['type'] not in ['CNAME', 'MX', 'NS', 'SRV']: + failed_validation = True + error = "Relative is only valid for CNAME, MX, NS and SRV record types." + # if any of the above failed then fail early + if failed_validation: + module.fail_json(failed=True, msg=error) + + +def create_zone_record(args=None, zone_id=None, records=None, payload=None): + ''' + Sanity checking has already occurred prior to this function being + called, so we can go ahead and either create or update the record. + As defaults are defined for all values in the argument_spec, this + may cause some changes to occur as the defaults are enforced (if + the user has only configured required variables). + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + # assemble the new record. + new_record = dict() + new_record['zone_id'] = zone_id + for arg in ['priority', 'address', 'relative', 'record', 'ttl', 'type']: + new_record[arg] = args[arg] + + # if we have any matches, update them. + if records: + for zone_record in records: + # record exists, add ID to payload. + new_record['id'] = zone_record['id'] + if zone_record == new_record: + # nothing to do; record is already correct so we populate + # the return var with the existing record's details. + memset_api = zone_record + return has_changed, has_failed, memset_api, msg + else: + # merge dicts ensuring we change any updated values + payload = zone_record.copy() + payload.update(new_record) + api_method = 'dns.zone_record_update' + if args['check_mode']: + has_changed = True + # return the new record to the user in the returned var. + memset_api = new_record + return has_changed, has_failed, memset_api, msg + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + memset_api = new_record + # empty msg as we don't want to return a boatload of json to the user. + msg = None + else: + # no record found, so we need to create it + api_method = 'dns.zone_record_create' + payload = new_record + if args['check_mode']: + has_changed = True + # populate the return var with the new record's details. + memset_api = new_record + return has_changed, has_failed, memset_api, msg + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + memset_api = new_record + # empty msg as we don't want to return a boatload of json to the user. + msg = None + + return has_changed, has_failed, memset_api, msg + + +def delete_zone_record(args=None, records=None, payload=None): + ''' + Matching records can be cleanly deleted without affecting other + resource types, so this is pretty simple to achieve. + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + # if we have any matches, delete them. + if records: + for zone_record in records: + if args['check_mode']: + has_changed = True + return has_changed, has_failed, memset_api, msg + payload['id'] = zone_record['id'] + api_method = 'dns.zone_record_delete' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + memset_api = zone_record + # empty msg as we don't want to return a boatload of json to the user. + msg = None + + return has_changed, has_failed, memset_api, msg + + +def create_or_delete(args=None): + ''' + We need to perform some initial sanity checking and also look + up required info before handing it off to create or delete functions. + Check mode is integrated into the create or delete functions. + ''' + has_failed, has_changed = False, False + msg, memset_api, stderr = None, None, None + retvals, payload = dict(), dict() + + # get the zones and check if the relevant zone exists. + api_method = 'dns.zone_list' + _has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + if _has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = _has_failed + retvals['msg'] = msg + if response.status_code is not None: + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + else: + retvals['stderr'] = response.stderr + return retvals + + zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) + + if not zone_exists: + has_failed = True + if counter == 0: + stderr = "DNS zone {0} does not exist." . format(args['zone']) + elif counter > 1: + stderr = "{0} matches multiple zones." . format(args['zone']) + retvals['failed'] = has_failed + retvals['msg'] = stderr + retvals['stderr'] = stderr + return retvals + + # get a list of all records ( as we can't limit records by zone) + api_method = 'dns.zone_record_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + # find any matching records + records = [record for record in response.json() if record['zone_id'] == zone_id + and record['record'] == args['record'] and record['type'] == args['type']] + + if args['state'] == 'present': + has_changed, has_failed, memset_api, msg = create_zone_record(args=args, zone_id=zone_id, records=records, payload=payload) + + if args['state'] == 'absent': + has_changed, has_failed, memset_api, msg = delete_zone_record(args=args, records=records, payload=payload) + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + for val in ['msg', 'stderr', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return retvals + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + api_key=dict(required=True, type='str', no_log=True), + zone=dict(required=True, type='str'), + type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'), + address=dict(required=True, aliases=['ip', 'data'], type='str'), + record=dict(default='', type='str'), + ttl=dict(default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), + priority=dict(default=0, type='int'), + relative=dict(default=False, type='bool') + ), + supports_check_mode=True + ) + + # populate the dict with the user-provided vars. + args = dict(module.params) + args['check_mode'] = module.check_mode + + # perform some Memset API-specific validation + api_validation(args=args) + + retvals = create_or_delete(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/mksysb.py b/plugins/modules/mksysb.py deleted file mode 120000 index d869d65732..0000000000 --- a/plugins/modules/mksysb.py +++ /dev/null @@ -1 +0,0 @@ -./system/mksysb.py \ No newline at end of file diff --git a/plugins/modules/mksysb.py b/plugins/modules/mksysb.py new file mode 100644 index 0000000000..c9a7eb7b60 --- /dev/null +++ b/plugins/modules/mksysb.py @@ -0,0 +1,168 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Alexei Znamensky (@russoz) +# Copyright (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +author: Kairo Araujo (@kairoaraujo) +module: mksysb +short_description: Generates AIX mksysb rootvg backups +description: + - This module manages a basic AIX mksysb (image) of rootvg. +seealso: + - name: C(mksysb) command manual page + description: Manual page for the command. + link: https://www.ibm.com/docs/en/aix/7.3?topic=m-mksysb-command + +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + backup_crypt_files: + description: + - Backup encrypted files. + type: bool + default: true + backup_dmapi_fs: + description: + - Back up DMAPI filesystem files. + type: bool + default: true + create_map_files: + description: + - Creates a new MAP files. + type: bool + default: false + exclude_files: + description: + - Excludes files using C(/etc/rootvg.exclude). + type: bool + default: false + exclude_wpar_files: + description: + - Excludes WPAR files. + type: bool + default: false + extended_attrs: + description: + - Backup extended attributes. + type: bool + default: true + name: + type: str + description: + - Backup name. + required: true + new_image_data: + description: + - Creates a new file data. + type: bool + default: true + software_packing: + description: + - Exclude files from packing option listed in C(/etc/exclude_packing.rootvg). + type: bool + default: false + storage_path: + type: str + description: + - Storage path where the mksysb backup is stored. + required: true + use_snapshot: + description: + - Creates backup using snapshots. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Running a backup image mksysb + community.general.mksysb: + name: myserver + storage_path: /repository/images + exclude_files: true + exclude_wpar_files: true +""" + +RETURN = r""" +msg: + description: Return message regarding the action. + returned: always + type: str +""" + +import os + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + +class MkSysB(ModuleHelper): + module = dict( + argument_spec=dict( + backup_crypt_files=dict(type='bool', default=True), + backup_dmapi_fs=dict(type='bool', default=True), + create_map_files=dict(type='bool', default=False), + exclude_files=dict(type='bool', default=False), + exclude_wpar_files=dict(type='bool', default=False), + extended_attrs=dict(type='bool', default=True), + name=dict(type='str', required=True), + new_image_data=dict(type='bool', default=True), + software_packing=dict(type='bool', default=False), + storage_path=dict(type='str', required=True), + use_snapshot=dict(type='bool', default=False) + ), + supports_check_mode=True, + ) + command_args_formats = dict( + create_map_files=cmd_runner_fmt.as_bool("-m"), + use_snapshot=cmd_runner_fmt.as_bool("-T"), + exclude_files=cmd_runner_fmt.as_bool("-e"), + exclude_wpar_files=cmd_runner_fmt.as_bool("-G"), + new_image_data=cmd_runner_fmt.as_bool("-i"), + software_packing=cmd_runner_fmt.as_bool_not("-p"), + extended_attrs=cmd_runner_fmt.as_bool("-a"), + backup_crypt_files=cmd_runner_fmt.as_bool_not("-Z"), + backup_dmapi_fs=cmd_runner_fmt.as_bool("-A"), + combined_path=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda p, n: ["%s/%s" % (p, n)])), + ) + + def __init_module__(self): + if not os.path.isdir(self.vars.storage_path): + self.do_raise("Storage path %s is not valid." % self.vars.storage_path) + + def __run__(self): + def process(rc, out, err): + if rc != 0: + self.do_raise("mksysb failed: {0}".format(out)) + + runner = CmdRunner( + self.module, + ['mksysb', '-X'], + self.command_args_formats, + ) + with runner(['create_map_files', 'use_snapshot', 'exclude_files', 'exclude_wpar_files', 'software_packing', + 'extended_attrs', 'backup_crypt_files', 'backup_dmapi_fs', 'new_image_data', 'combined_path'], + output_process=process, check_mode_skip=True) as ctx: + ctx.run(combined_path=[self.vars.storage_path, self.vars.name]) + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + + self.changed = True + + +def main(): + MkSysB.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/modprobe.py b/plugins/modules/modprobe.py deleted file mode 120000 index d922e97e64..0000000000 --- a/plugins/modules/modprobe.py +++ /dev/null @@ -1 +0,0 @@ -./system/modprobe.py \ No newline at end of file diff --git a/plugins/modules/modprobe.py b/plugins/modules/modprobe.py new file mode 100644 index 0000000000..fddf0643bd --- /dev/null +++ b/plugins/modules/modprobe.py @@ -0,0 +1,326 @@ +#!/usr/bin/python + +# Copyright (c) 2013, David Stygstra +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: modprobe +short_description: Load or unload kernel modules +author: + - David Stygstra (@stygstra) + - Julien Dauphant (@jdauphant) + - Matt Jeffery (@mattjeffery) +description: + - Load or unload kernel modules. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + required: true + description: + - Name of kernel module to manage. + state: + type: str + description: + - Whether the module should be present or absent. + choices: [absent, present] + default: present + params: + type: str + description: + - Modules parameters. + default: '' + persistent: + type: str + choices: [disabled, absent, present] + default: disabled + version_added: 7.0.0 + description: + - Persistency between reboots for configured module. + - This option creates files in C(/etc/modules-load.d/) and C(/etc/modprobe.d/) that make your module configuration persistent + during reboots. + - If V(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module is loaded + on next reboot. + - If V(absent), comments out module name from C(/etc/modules-load.d/) and comments out params from C(/etc/modprobe.d/) + so the module is not loaded on next reboot. + - If V(disabled), does not touch anything and leaves C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is. + - Note that it is usually a better idea to rely on the automatic module loading by PCI IDs, USB IDs, DMI IDs or similar + triggers encoded in the kernel modules themselves instead of configuration like this. + - In fact, most modern kernel modules are prepared for automatic loading already. + - B(Note:) This option works only with distributions that use C(systemd) when set to values other than V(disabled). +""" + +EXAMPLES = r""" +- name: Add the 802.1q module + community.general.modprobe: + name: 8021q + state: present + +- name: Add the dummy module + community.general.modprobe: + name: dummy + state: present + params: 'numdummies=2' + +- name: Add the dummy module and make sure it is loaded after reboots + community.general.modprobe: + name: dummy + state: present + params: 'numdummies=2' + persistent: present +""" + +import os.path +import platform +import shlex +import traceback +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +RELEASE_VER = platform.release() +MODULES_LOAD_LOCATION = '/etc/modules-load.d' +PARAMETERS_FILES_LOCATION = '/etc/modprobe.d' + + +class Modprobe(object): + + def __init__(self, module): + self.module = module + self.modprobe_bin = module.get_bin_path('modprobe', True) + + self.check_mode = module.check_mode + self.desired_state = module.params['state'] + self.name = module.params['name'] + self.params = module.params['params'] + self.persistent = module.params['persistent'] + + self.changed = False + + self.re_find_module = re.compile(r'^ *{0} *(?:[#;].*)?\n?\Z'.format(self.name)) + self.re_find_params = re.compile(r'^options {0} \w+=\S+ *(?:[#;].*)?\n?\Z'.format(self.name)) + self.re_get_params_and_values = re.compile(r'^options {0} (\w+=\S+) *(?:[#;].*)?\n?\Z'.format(self.name)) + + def load_module(self): + command = [self.modprobe_bin] + if self.check_mode: + command.append('-n') + command.extend([self.name] + shlex.split(self.params)) + + rc, out, err = self.module.run_command(command) + + if rc != 0: + return self.module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **self.result) + + if self.check_mode or self.module_loaded(): + self.changed = True + else: + rc, stdout, stderr = self.module.run_command( + [self.modprobe_bin, '-n', '--first-time', self.name] + shlex.split(self.params) + ) + if rc != 0: + self.module.warn(stderr) + + @property + def module_is_loaded_persistently(self): + for module_file in self.modules_files: + with open(module_file) as file: + for line in file: + if self.re_find_module.match(line): + return True + + return False + + @property + def params_is_set(self): + desired_params = set(self.params.split()) + + return desired_params == self.permanent_params + + @property + def permanent_params(self): + params = set() + + for modprobe_file in self.modprobe_files: + with open(modprobe_file) as file: + for line in file: + match = self.re_get_params_and_values.match(line) + if match: + params.add(match.group(1)) + + return params + + def create_module_file(self): + file_path = os.path.join(MODULES_LOAD_LOCATION, + self.name + '.conf') + if not self.check_mode: + with open(file_path, 'w') as file: + file.write(self.name + '\n') + + @property + def module_options_file_content(self): + file_content = ['options {0} {1}'.format(self.name, param) + for param in self.params.split()] + return '\n'.join(file_content) + '\n' + + def create_module_options_file(self): + new_file_path = os.path.join(PARAMETERS_FILES_LOCATION, + self.name + '.conf') + if not self.check_mode: + with open(new_file_path, 'w') as file: + file.write(self.module_options_file_content) + + def disable_old_params(self): + + for modprobe_file in self.modprobe_files: + with open(modprobe_file) as file: + file_content = file.readlines() + + content_changed = False + for index, line in enumerate(file_content): + if self.re_find_params.match(line): + file_content[index] = '#' + line + content_changed = True + + if not self.check_mode and content_changed: + with open(modprobe_file, 'w') as file: + file.write('\n'.join(file_content)) + + def disable_module_permanent(self): + + for module_file in self.modules_files: + with open(module_file) as file: + file_content = file.readlines() + + content_changed = False + for index, line in enumerate(file_content): + if self.re_find_module.match(line): + file_content[index] = '#' + line + content_changed = True + + if not self.check_mode and content_changed: + with open(module_file, 'w') as file: + file.write('\n'.join(file_content)) + + def load_module_permanent(self): + + if not self.module_is_loaded_persistently: + self.create_module_file() + self.changed = True + + if not self.params_is_set: + self.disable_old_params() + self.create_module_options_file() + self.changed = True + + def unload_module_permanent(self): + if self.module_is_loaded_persistently: + self.disable_module_permanent() + self.changed = True + + if self.permanent_params: + self.disable_old_params() + self.changed = True + + @property + def modules_files(self): + if not os.path.isdir(MODULES_LOAD_LOCATION): + return [] + modules_paths = [os.path.join(MODULES_LOAD_LOCATION, path) + for path in os.listdir(MODULES_LOAD_LOCATION)] + return [path for path in modules_paths if os.path.isfile(path)] + + @property + def modprobe_files(self): + if not os.path.isdir(PARAMETERS_FILES_LOCATION): + return [] + modules_paths = [os.path.join(PARAMETERS_FILES_LOCATION, path) + for path in os.listdir(PARAMETERS_FILES_LOCATION)] + return [path for path in modules_paths if os.path.isfile(path)] + + def module_loaded(self): + is_loaded = False + try: + with open('/proc/modules') as modules: + module_name = self.name.replace('-', '_') + ' ' + for line in modules: + if line.startswith(module_name): + is_loaded = True + break + + if not is_loaded: + module_file = '/' + self.name + '.ko' + builtin_path = os.path.join('/lib/modules/', RELEASE_VER, 'modules.builtin') + with open(builtin_path) as builtins: + for line in builtins: + if line.rstrip().endswith(module_file): + is_loaded = True + break + except (IOError, OSError) as e: + self.module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **self.result) + + return is_loaded + + def unload_module(self): + command = [self.modprobe_bin, '-r', self.name] + if self.check_mode: + command.append('-n') + + rc, out, err = self.module.run_command(command) + if rc != 0: + return self.module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **self.result) + + self.changed = True + + @property + def result(self): + return { + 'changed': self.changed, + 'name': self.name, + 'params': self.params, + 'state': self.desired_state, + } + + +def build_module(): + return AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + params=dict(type='str', default=''), + persistent=dict(type='str', default='disabled', choices=['disabled', 'present', 'absent']), + ), + supports_check_mode=True, + ) + + +def main(): + module = build_module() + + modprobe = Modprobe(module) + + if modprobe.desired_state == 'present' and not modprobe.module_loaded(): + modprobe.load_module() + elif modprobe.desired_state == 'absent' and modprobe.module_loaded(): + modprobe.unload_module() + + if modprobe.persistent == 'present' and not (modprobe.module_is_loaded_persistently and modprobe.params_is_set): + modprobe.load_module_permanent() + elif modprobe.persistent == 'absent' and (modprobe.module_is_loaded_persistently or modprobe.permanent_params): + modprobe.unload_module_permanent() + + module.exit_json(**modprobe.result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monit.py b/plugins/modules/monit.py deleted file mode 120000 index 136c007641..0000000000 --- a/plugins/modules/monit.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/monit.py \ No newline at end of file diff --git a/plugins/modules/monit.py b/plugins/modules/monit.py new file mode 100644 index 0000000000..63c83741b5 --- /dev/null +++ b/plugins/modules/monit.py @@ -0,0 +1,347 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Darryl Stoflet +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: monit +short_description: Manage the state of a program monitored using Monit +description: + - Manage the state of a program monitored using Monit. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The name of the C(monit) program/process to manage. + required: true + type: str + state: + description: + - The state of service. + required: true + choices: ["present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded"] + type: str + timeout: + description: + - If there are pending actions for the service monitored by monit, then it checks for up to this many seconds to verify + the requested action has been performed. The module sleeps for five seconds between each check. + default: 300 + type: int +author: + - Darryl Stoflet (@dstoflet) + - Simon Kelly (@snopoke) +""" + +EXAMPLES = r""" +- name: Manage the state of program httpd to be in started state + community.general.monit: + name: httpd + state: started +""" + +import time +import re + +from collections import namedtuple + +from ansible.module_utils.basic import AnsibleModule + + +STATE_COMMAND_MAP = { + 'stopped': 'stop', + 'started': 'start', + 'monitored': 'monitor', + 'unmonitored': 'unmonitor', + 'restarted': 'restart' +} + +MONIT_SERVICES = ['Process', 'File', 'Fifo', 'Filesystem', 'Directory', 'Remote host', 'System', 'Program', + 'Network'] + + +class StatusValue(namedtuple("Status", "value, is_pending")): + MISSING = 'missing' + OK = 'ok' + NOT_MONITORED = 'not_monitored' + INITIALIZING = 'initializing' + DOES_NOT_EXIST = 'does_not_exist' + EXECUTION_FAILED = 'execution_failed' + ALL_STATUS = [ + MISSING, OK, NOT_MONITORED, INITIALIZING, DOES_NOT_EXIST, EXECUTION_FAILED + ] + + def __new__(cls, value, is_pending=False): + return super(StatusValue, cls).__new__(cls, value, is_pending) + + def pending(self): + return StatusValue(self.value, True) + + def __getattr__(self, item): + if item in ('is_%s' % status for status in self.ALL_STATUS): + return self.value == getattr(self, item[3:].upper()) + raise AttributeError(item) + + def __str__(self): + return "%s%s" % (self.value, " (pending)" if self.is_pending else "") + + +class Status(object): + MISSING = StatusValue(StatusValue.MISSING) + OK = StatusValue(StatusValue.OK) + RUNNING = StatusValue(StatusValue.OK) + NOT_MONITORED = StatusValue(StatusValue.NOT_MONITORED) + INITIALIZING = StatusValue(StatusValue.INITIALIZING) + DOES_NOT_EXIST = StatusValue(StatusValue.DOES_NOT_EXIST) + EXECUTION_FAILED = StatusValue(StatusValue.EXECUTION_FAILED) + + +class Monit(object): + def __init__(self, module, monit_bin_path, service_name, timeout): + self.module = module + self.monit_bin_path = monit_bin_path + self.process_name = service_name + self.timeout = timeout + + self._monit_version = None + self._raw_version = None + self._status_change_retry_count = 6 + + def monit_version(self): + if self._monit_version is None: + self._raw_version, version = self._get_monit_version() + # Use only major and minor even if there are more these should be enough + self._monit_version = version[0], version[1] + return self._monit_version + + def _get_monit_version(self): + rc, out, err = self.module.run_command([self.monit_bin_path, '-V'], check_rc=True) + version_line = out.split('\n')[0] + raw_version = re.search(r"([0-9]+\.){1,2}([0-9]+)?", version_line).group() + return raw_version, tuple(map(int, raw_version.split('.'))) + + def exit_fail(self, msg, status=None, **kwargs): + kwargs.update({ + 'msg': msg, + 'monit_version': self._raw_version, + 'process_status': str(status) if status else None, + }) + self.module.fail_json(**kwargs) + + def exit_success(self, state): + self.module.exit_json(changed=True, name=self.process_name, state=state) + + @property + def command_args(self): + return ["-B"] if self.monit_version() > (5, 18) else [] + + def get_status(self, validate=False): + """Return the status of the process in monit. + + :@param validate: Force monit to re-check the status of the process + """ + monit_command = "validate" if validate else "status" + check_rc = False if validate else True # 'validate' always has rc = 1 + command = [self.monit_bin_path, monit_command] + self.command_args + [self.process_name] + rc, out, err = self.module.run_command(command, check_rc=check_rc) + return self._parse_status(out, err) + + def _parse_status(self, output, err): + escaped_monit_services = '|'.join([re.escape(x) for x in MONIT_SERVICES]) + pattern = "(%s) '%s'" % (escaped_monit_services, re.escape(self.process_name)) + if not re.search(pattern, output, re.IGNORECASE): + return Status.MISSING + + status_val = re.findall(r"^\s*status\s*([\w\- ]+)", output, re.MULTILINE) + if not status_val: + self.exit_fail("Unable to find process status", stdout=output, stderr=err) + + status_val = status_val[0].strip().upper() + if ' | ' in status_val: + status_val = status_val.split(' | ')[0] + if ' - ' not in status_val: + status_val = status_val.replace(' ', '_') + try: + return getattr(Status, status_val) + except AttributeError: + self.module.warn("Unknown monit status '%s', treating as execution failed" % status_val) + return Status.EXECUTION_FAILED + else: + status_val, substatus = status_val.split(' - ') + action, state = substatus.split() + if action in ['START', 'INITIALIZING', 'RESTART', 'MONITOR']: + status = Status.OK + else: + status = Status.NOT_MONITORED + + if state == 'pending': + status = status.pending() + return status + + def is_process_present(self): + command = [self.monit_bin_path, 'summary'] + self.command_args + rc, out, err = self.module.run_command(command, check_rc=True) + return bool(re.findall(r'\b%s\b' % self.process_name, out)) + + def is_process_running(self): + return self.get_status().is_ok + + def run_command(self, command): + """Runs a monit command, and returns the new status.""" + return self.module.run_command([self.monit_bin_path, command, self.process_name], check_rc=True) + + def wait_for_status_change(self, current_status): + running_status = self.get_status() + if running_status.value != current_status.value or current_status.value == StatusValue.EXECUTION_FAILED: + return running_status + + loop_count = 0 + while running_status.value == current_status.value: + if loop_count >= self._status_change_retry_count: + self.exit_fail('waited too long for monit to change state', running_status) + + loop_count += 1 + time.sleep(0.5) + validate = loop_count % 2 == 0 # force recheck of status every second try + running_status = self.get_status(validate) + return running_status + + def wait_for_monit_to_stop_pending(self, current_status=None): + """Fails this run if there is no status or it is pending/initializing for timeout""" + timeout_time = time.time() + self.timeout + + if not current_status: + current_status = self.get_status() + waiting_status = [ + StatusValue.MISSING, + StatusValue.INITIALIZING, + StatusValue.DOES_NOT_EXIST, + ] + while current_status.is_pending or (current_status.value in waiting_status): + if time.time() >= timeout_time: + self.exit_fail('waited too long for "pending", or "initiating" status to go away', current_status) + + time.sleep(5) + current_status = self.get_status(validate=True) + return current_status + + def reload(self): + rc, out, err = self.module.run_command([self.monit_bin_path, 'reload']) + if rc != 0: + self.exit_fail('monit reload failed', stdout=out, stderr=err) + self.exit_success(state='reloaded') + + def present(self): + self.run_command('reload') + + timeout_time = time.time() + self.timeout + while not self.is_process_present(): + if time.time() >= timeout_time: + self.exit_fail('waited too long for process to become "present"') + + time.sleep(5) + + self.exit_success(state='present') + + def change_state(self, state, expected_status, invert_expected=None): + current_status = self.get_status() + self.run_command(STATE_COMMAND_MAP[state]) + status = self.wait_for_status_change(current_status) + status = self.wait_for_monit_to_stop_pending(status) + status_match = status.value == expected_status.value + if invert_expected: + status_match = not status_match + if status_match: + self.exit_success(state=state) + self.exit_fail('%s process not %s' % (self.process_name, state), status) + + def stop(self): + self.change_state('stopped', Status.NOT_MONITORED) + + def unmonitor(self): + self.change_state('unmonitored', Status.NOT_MONITORED) + + def restart(self): + self.change_state('restarted', Status.OK) + + def start(self): + self.change_state('started', Status.OK) + + def monitor(self): + self.change_state('monitored', Status.NOT_MONITORED, invert_expected=True) + + +def main(): + arg_spec = dict( + name=dict(required=True), + timeout=dict(default=300, type='int'), + state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded']) + ) + + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + + name = module.params['name'] + state = module.params['state'] + timeout = module.params['timeout'] + + monit = Monit(module, module.get_bin_path('monit', True), name, timeout) + + def exit_if_check_mode(): + if module.check_mode: + module.exit_json(changed=True) + + if state == 'reloaded': + exit_if_check_mode() + monit.reload() + + present = monit.is_process_present() + + if not present and not state == 'present': + module.fail_json(msg='%s process not presently configured with monit' % name, name=name) + + if state == 'present': + if present: + module.exit_json(changed=False, name=name, state=state) + exit_if_check_mode() + monit.present() + + monit.wait_for_monit_to_stop_pending() + running = monit.is_process_running() + + if running and state in ['started', 'monitored']: + module.exit_json(changed=False, name=name, state=state) + + if running and state == 'stopped': + exit_if_check_mode() + monit.stop() + + if running and state == 'unmonitored': + exit_if_check_mode() + monit.unmonitor() + + elif state == 'restarted': + exit_if_check_mode() + monit.restart() + + elif not running and state == 'started': + exit_if_check_mode() + monit.start() + + elif not running and state == 'monitored': + exit_if_check_mode() + monit.monitor() + + module.exit_json(changed=False, name=name, state=state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/airbrake_deployment.py b/plugins/modules/monitoring/airbrake_deployment.py deleted file mode 100644 index a7d7710a0a..0000000000 --- a/plugins/modules/monitoring/airbrake_deployment.py +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Bruce Pennypacker -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: airbrake_deployment -author: -- "Bruce Pennypacker (@bpennypacker)" -- "Patrick Humpal (@phumpal)" -short_description: Notify airbrake about app deployments -description: - - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)). -options: - project_id: - description: - - Airbrake PROJECT_ID - required: true - type: str - version_added: '0.2.0' - project_key: - description: - - Airbrake PROJECT_KEY. - required: true - type: str - version_added: '0.2.0' - environment: - description: - - The airbrake environment name, typically 'production', 'staging', etc. - required: true - type: str - user: - description: - - The username of the person doing the deployment - required: false - type: str - repo: - description: - - URL of the project repository - required: false - type: str - revision: - description: - - A hash, number, tag, or other identifier showing what revision from version control was deployed - required: false - type: str - version: - description: - - A string identifying what version was deployed - required: false - type: str - version_added: '1.0.0' - url: - description: - - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit. - required: false - default: "https://api.airbrake.io/api/v4/projects/" - type: str - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - type: bool - -requirements: [] -''' - -EXAMPLES = ''' -- name: Notify airbrake about an app deployment - community.general.airbrake_deployment: - project_id: '12345' - project_key: 'AAAAAA' - environment: staging - user: ansible - revision: '4.2' - -- name: Notify airbrake about an app deployment, using git hash as revision - community.general.airbrake_deployment: - project_id: '12345' - project_key: 'AAAAAA' - environment: staging - user: ansible - revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15' - version: '0.2.0' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - project_id=dict(required=True, no_log=True, type='str'), - project_key=dict(required=True, no_log=True, type='str'), - environment=dict(required=True, type='str'), - user=dict(required=False, type='str'), - repo=dict(required=False, type='str'), - revision=dict(required=False, type='str'), - version=dict(required=False, type='str'), - url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'), - validate_certs=dict(default=True, type='bool'), - ), - supports_check_mode=True, - ) - - # Build list of params - params = {} - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True) - - # v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4 - if module.params["environment"]: - params["environment"] = module.params["environment"] - - if module.params["user"]: - params["username"] = module.params["user"] - - if module.params["repo"]: - params["repository"] = module.params["repo"] - - if module.params["revision"]: - params["revision"] = module.params["revision"] - - if module.params["version"]: - params["version"] = module.params["version"] - - # Build deploy url - url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"] - json_body = module.jsonify(params) - - # Build header - headers = {'Content-Type': 'application/json'} - - # Notify Airbrake of deploy - response, info = fetch_url(module, url, data=json_body, - headers=headers, method='POST') - - if info['status'] == 200 or info['status'] == 201: - module.exit_json(changed=True) - else: - module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/bigpanda.py b/plugins/modules/monitoring/bigpanda.py deleted file mode 100644 index c5fe61cbf6..0000000000 --- a/plugins/modules/monitoring/bigpanda.py +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: bigpanda -author: "Hagai Kariti (@hkariti)" -short_description: Notify BigPanda about deployments -description: - - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls. -options: - component: - type: str - description: - - "The name of the component being deployed. Ex: billing" - required: true - aliases: ['name'] - version: - type: str - description: - - The deployment version. - required: true - token: - type: str - description: - - API token. - required: true - state: - type: str - description: - - State of the deployment. - required: true - choices: ['started', 'finished', 'failed'] - hosts: - type: str - description: - - Name of affected host name. Can be a list. - - If not specified, it defaults to the remote system's hostname. - required: false - aliases: ['host'] - env: - type: str - description: - - The environment name, typically 'production', 'staging', etc. - required: false - owner: - type: str - description: - - The person responsible for the deployment. - required: false - description: - type: str - description: - - Free text description of the deployment. - required: false - url: - type: str - description: - - Base URL of the API server. - required: False - default: https://api.bigpanda.io - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - type: bool - deployment_message: - type: str - description: - - Message about the deployment. - version_added: '0.2.0' - source_system: - type: str - description: - - Source system used in the requests to the API - default: ansible - -# informational: requirements for nodes -requirements: [ ] -''' - -EXAMPLES = ''' -- name: Notify BigPanda about a deployment - community.general.bigpanda: - component: myapp - version: '1.3' - token: '{{ bigpanda_token }}' - state: started - -- name: Notify BigPanda about a deployment - community.general.bigpanda: - component: myapp - version: '1.3' - token: '{{ bigpanda_token }}' - state: finished - -# If outside servers aren't reachable from your machine, use delegate_to and override hosts: -- name: Notify BigPanda about a deployment - community.general.bigpanda: - component: myapp - version: '1.3' - token: '{{ bigpanda_token }}' - hosts: '{{ ansible_hostname }}' - state: started - delegate_to: localhost - register: deployment - -- name: Notify BigPanda about a deployment - community.general.bigpanda: - component: '{{ deployment.component }}' - version: '{{ deployment.version }}' - token: '{{ deployment.token }}' - state: finished - delegate_to: localhost -''' - -# =========================================== -# Module execution. -# -import json -import socket -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - component=dict(required=True, aliases=['name']), - version=dict(required=True), - token=dict(required=True, no_log=True), - state=dict(required=True, choices=['started', 'finished', 'failed']), - hosts=dict(required=False, aliases=['host']), - env=dict(required=False), - owner=dict(required=False), - description=dict(required=False), - deployment_message=dict(required=False), - source_system=dict(required=False, default='ansible'), - validate_certs=dict(default=True, type='bool'), - url=dict(required=False, default='https://api.bigpanda.io'), - ), - supports_check_mode=True, - ) - - token = module.params['token'] - state = module.params['state'] - url = module.params['url'] - - # Build the common request body - body = dict() - for k in ('component', 'version', 'hosts'): - v = module.params[k] - if v is not None: - body[k] = v - if body.get('hosts') is None: - body['hosts'] = [socket.gethostname()] - - if not isinstance(body['hosts'], list): - body['hosts'] = [body['hosts']] - - # Insert state-specific attributes to body - if state == 'started': - for k in ('source_system', 'env', 'owner', 'description'): - v = module.params[k] - if v is not None: - body[k] = v - - request_url = url + '/data/events/deployments/start' - else: - message = module.params['deployment_message'] - if message is not None: - body['errorMessage'] = message - - if state == 'finished': - body['status'] = 'success' - else: - body['status'] = 'failure' - - request_url = url + '/data/events/deployments/end' - - # Build the deployment object we return - deployment = dict(token=token, url=url) - deployment.update(body) - if 'errorMessage' in deployment: - message = deployment.pop('errorMessage') - deployment['message'] = message - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True, **deployment) - - # Send the data to bigpanda - data = json.dumps(body) - headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} - try: - response, info = fetch_url(module, request_url, data=data, headers=headers) - if info['status'] == 200: - module.exit_json(changed=True, **deployment) - else: - module.fail_json(msg=json.dumps(info)) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/circonus_annotation.py b/plugins/modules/monitoring/circonus_annotation.py deleted file mode 100644 index 8543aa00fa..0000000000 --- a/plugins/modules/monitoring/circonus_annotation.py +++ /dev/null @@ -1,234 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2014-2015, Epic Games, Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: circonus_annotation -short_description: create an annotation in circonus -description: - - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided -author: "Nick Harring (@NickatEpic)" -requirements: - - requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2) -notes: - - Check mode isn't supported. -options: - api_key: - type: str - description: - - Circonus API key - required: true - category: - type: str - description: - - Annotation Category - required: true - description: - type: str - description: - - Description of annotation - required: true - title: - type: str - description: - - Title of annotation - required: true - start: - type: int - description: - - Unix timestamp of event start - - If not specified, it defaults to I(now). - stop: - type: int - description: - - Unix timestamp of event end - - If not specified, it defaults to I(now) + I(duration). - duration: - type: int - description: - - Duration in seconds of annotation - default: 0 -''' -EXAMPLES = ''' -- name: Create a simple annotation event with a source, defaults to start and end time of now - community.general.circonus_annotation: - api_key: XXXXXXXXXXXXXXXXX - title: App Config Change - description: This is a detailed description of the config change - category: This category groups like annotations - -- name: Create an annotation with a duration of 5 minutes and a default start time of now - community.general.circonus_annotation: - api_key: XXXXXXXXXXXXXXXXX - title: App Config Change - description: This is a detailed description of the config change - category: This category groups like annotations - duration: 300 - -- name: Create an annotation with a start_time and end_time - community.general.circonus_annotation: - api_key: XXXXXXXXXXXXXXXXX - title: App Config Change - description: This is a detailed description of the config change - category: This category groups like annotations - start_time: 1395940006 - end_time: 1395954407 -''' - -RETURN = ''' -annotation: - description: details about the created annotation - returned: success - type: complex - contains: - _cid: - description: annotation identifier - returned: success - type: str - sample: /annotation/100000 - _created: - description: creation timestamp - returned: success - type: int - sample: 1502236928 - _last_modified: - description: last modification timestamp - returned: success - type: int - sample: 1502236928 - _last_modified_by: - description: last modified by - returned: success - type: str - sample: /user/1000 - category: - description: category of the created annotation - returned: success - type: str - sample: alerts - title: - description: title of the created annotation - returned: success - type: str - sample: WARNING - description: - description: description of the created annotation - returned: success - type: str - sample: Host is down. - start: - description: timestamp, since annotation applies - returned: success - type: int - sample: Host is down. - stop: - description: timestamp, since annotation ends - returned: success - type: str - sample: Host is down. - rel_metrics: - description: Array of metrics related to this annotation, each metrics is a string. - returned: success - type: list - sample: - - 54321_kbps -''' -import json -import time -import traceback -from distutils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests - HAS_REQUESTS = True -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - HAS_REQUESTS = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six import PY3 -from ansible.module_utils.common.text.converters import to_native - - -def check_requests_dep(module): - """Check if an adequate requests version is available""" - if not HAS_REQUESTS: - module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - else: - required_version = '2.0.0' if PY3 else '1.0.0' - if LooseVersion(requests.__version__) < LooseVersion(required_version): - module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__)) - - -def post_annotation(annotation, api_key): - ''' Takes annotation dict and api_key string''' - base_url = 'https://api.circonus.com/v2' - anootate_post_endpoint = '/annotation' - resp = requests.post(base_url + anootate_post_endpoint, - headers=build_headers(api_key), data=json.dumps(annotation)) - resp.raise_for_status() - return resp - - -def create_annotation(module): - ''' Takes ansible module object ''' - annotation = {} - duration = module.params['duration'] - if module.params['start'] is not None: - start = module.params['start'] - else: - start = int(time.time()) - if module.params['stop'] is not None: - stop = module.params['stop'] - else: - stop = int(time.time()) + duration - annotation['start'] = start - annotation['stop'] = stop - annotation['category'] = module.params['category'] - annotation['description'] = module.params['description'] - annotation['title'] = module.params['title'] - return annotation - - -def build_headers(api_token): - '''Takes api token, returns headers with it included.''' - headers = {'X-Circonus-App-Name': 'ansible', - 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token, - 'Accept': 'application/json'} - return headers - - -def main(): - '''Main function, dispatches logic''' - module = AnsibleModule( - argument_spec=dict( - start=dict(type='int'), - stop=dict(type='int'), - category=dict(required=True), - title=dict(required=True), - description=dict(required=True), - duration=dict(default=0, type='int'), - api_key=dict(required=True, no_log=True) - ) - ) - - check_requests_dep(module) - - annotation = create_annotation(module) - try: - resp = post_annotation(annotation, module.params['api_key']) - except requests.exceptions.RequestException as e: - module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc()) - module.exit_json(changed=True, annotation=resp.json()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/datadog/datadog_downtime.py b/plugins/modules/monitoring/datadog/datadog_downtime.py deleted file mode 100644 index ef308bdabe..0000000000 --- a/plugins/modules/monitoring/datadog/datadog_downtime.py +++ /dev/null @@ -1,308 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Datadog, Inc -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = """ ---- -module: datadog_downtime -short_description: Manages Datadog downtimes -version_added: 2.0.0 -description: - - Manages downtimes within Datadog. - - Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/s). -author: - - Datadog (@Datadog) -requirements: - - datadog-api-client - - Python 3.6+ -options: - api_key: - description: - - Your Datadog API key. - required: true - type: str - api_host: - description: - - The URL to the Datadog API. - - This value can also be set with the C(DATADOG_HOST) environment variable. - required: false - default: https://api.datadoghq.com - type: str - app_key: - description: - - Your Datadog app key. - required: true - type: str - state: - description: - - The designated state of the downtime. - required: false - choices: ["present", "absent"] - default: present - type: str - id: - description: - - The identifier of the downtime. - - If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the C(state). - - To keep your playbook idempotent, you should save the identifier in a file and read it in a lookup. - type: int - monitor_tags: - description: - - A list of monitor tags to which the downtime applies. - - The resulting downtime applies to monitors that match ALL provided monitor tags. - type: list - elements: str - scope: - description: - - A list of scopes to which the downtime applies. - - The resulting downtime applies to sources that matches ALL provided scopes. - type: list - elements: str - monitor_id: - description: - - The ID of the monitor to mute. If not provided, the downtime applies to all monitors. - type: int - downtime_message: - description: - - A message to include with notifications for this downtime. - - Email notifications can be sent to specific users by using the same "@username" notation as events. - type: str - start: - type: int - description: - - POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created. - end: - type: int - description: - - POSIX timestamp to end the downtime. If not provided, the downtime is in effect until you cancel it. - timezone: - description: - - The timezone for the downtime. - type: str - rrule: - description: - - The C(RRULE) standard for defining recurring events. - - For example, to have a recurring event on the first day of each month, - select a type of rrule and set the C(FREQ) to C(MONTHLY) and C(BYMONTHDAY) to C(1). - - Most common rrule options from the iCalendar Spec are supported. - - Attributes specifying the duration in C(RRULE) are not supported (e.g. C(DTSTART), C(DTEND), C(DURATION)). - type: str -""" - -EXAMPLES = """ - - name: Create a downtime - register: downtime_var - community.general.datadog_downtime: - state: present - monitor_tags: - - "foo:bar" - downtime_message: "Downtime for foo:bar" - scope: "test" - api_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - app_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - # Lookup the id in the file and ignore errors if the file doesn't exits, so downtime gets created - id: "{{ lookup('file', inventory_hostname ~ '_downtime_id.txt', errors='ignore') }}" - - name: Save downtime id to file for later updates and idempotence - delegate_to: localhost - copy: - content: "{{ downtime.downtime.id }}" - dest: "{{ inventory_hostname ~ '_downtime_id.txt' }}" -""" - -RETURN = """ -# Returns the downtime JSON dictionary from the API response under the C(downtime) key. -# See https://docs.datadoghq.com/api/v1/downtimes/#schedule-a-downtime for more details. -downtime: - description: The downtime returned by the API. - type: dict - returned: always - sample: { - "active": true, - "canceled": null, - "creator_id": 1445416, - "disabled": false, - "downtime_type": 2, - "end": null, - "id": 1055751000, - "message": "Downtime for foo:bar", - "monitor_id": null, - "monitor_tags": [ - "foo:bar" - ], - "parent_id": null, - "recurrence": null, - "scope": [ - "test" - ], - "start": 1607015009, - "timezone": "UTC", - "updater_id": null - } -""" - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -# Import Datadog -from ansible.module_utils.common.text.converters import to_native - -DATADOG_IMP_ERR = None -HAS_DATADOG = True -try: - from datadog_api_client.v1 import Configuration, ApiClient, ApiException - from datadog_api_client.v1.api.downtimes_api import DowntimesApi - from datadog_api_client.v1.model.downtime import Downtime - from datadog_api_client.v1.model.downtime_recurrence import DowntimeRecurrence -except ImportError: - DATADOG_IMP_ERR = traceback.format_exc() - HAS_DATADOG = False - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, no_log=True), - api_host=dict(required=False, default="https://api.datadoghq.com"), - app_key=dict(required=True, no_log=True), - state=dict(required=False, choices=["present", "absent"], default="present"), - monitor_tags=dict(required=False, type="list", elements="str"), - scope=dict(required=False, type="list", elements="str"), - monitor_id=dict(required=False, type="int"), - downtime_message=dict(required=False, no_log=True), - start=dict(required=False, type="int"), - end=dict(required=False, type="int"), - timezone=dict(required=False, type="str"), - rrule=dict(required=False, type="str"), - id=dict(required=False, type="int"), - ) - ) - - # Prepare Datadog - if not HAS_DATADOG: - module.fail_json(msg=missing_required_lib("datadog-api-client"), exception=DATADOG_IMP_ERR) - - configuration = Configuration( - host=module.params["api_host"], - api_key={ - "apiKeyAuth": module.params["api_key"], - "appKeyAuth": module.params["app_key"] - } - ) - with ApiClient(configuration) as api_client: - api_client.user_agent = "ansible_collection/community_general (module_name datadog_downtime) {0}".format( - api_client.user_agent - ) - api_instance = DowntimesApi(api_client) - - # Validate api and app keys - try: - api_instance.list_downtimes(current_only=True) - except ApiException as e: - module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key: {0}".format(e)) - - if module.params["state"] == "present": - schedule_downtime(module, api_client) - elif module.params["state"] == "absent": - cancel_downtime(module, api_client) - - -def _get_downtime(module, api_client): - api = DowntimesApi(api_client) - downtime = None - if module.params["id"]: - try: - downtime = api.get_downtime(module.params["id"]) - except ApiException as e: - module.fail_json(msg="Failed to retrieve downtime with id {0}: {1}".format(module.params["id"], e)) - return downtime - - -def build_downtime(module): - downtime = Downtime() - if module.params["monitor_tags"]: - downtime.monitor_tags = module.params["monitor_tags"] - if module.params["scope"]: - downtime.scope = module.params["scope"] - if module.params["monitor_id"]: - downtime.monitor_id = module.params["monitor_id"] - if module.params["downtime_message"]: - downtime.message = module.params["downtime_message"] - if module.params["start"]: - downtime.start = module.params["start"] - if module.params["end"]: - downtime.end = module.params["end"] - if module.params["timezone"]: - downtime.timezone = module.params["timezone"] - if module.params["rrule"]: - downtime.recurrence = DowntimeRecurrence( - rrule=module.params["rrule"] - ) - return downtime - - -def _post_downtime(module, api_client): - api = DowntimesApi(api_client) - downtime = build_downtime(module) - try: - resp = api.create_downtime(downtime) - module.params["id"] = resp.id - module.exit_json(changed=True, downtime=resp.to_dict()) - except ApiException as e: - module.fail_json(msg="Failed to create downtime: {0}".format(e)) - - -def _equal_dicts(a, b, ignore_keys): - ka = set(a).difference(ignore_keys) - kb = set(b).difference(ignore_keys) - return ka == kb and all(a[k] == b[k] for k in ka) - - -def _update_downtime(module, current_downtime, api_client): - api = DowntimesApi(api_client) - downtime = build_downtime(module) - try: - if current_downtime.disabled: - resp = api.create_downtime(downtime) - else: - resp = api.update_downtime(module.params["id"], downtime) - if _equal_dicts( - resp.to_dict(), - current_downtime.to_dict(), - ["active", "creator_id", "updater_id"] - ): - module.exit_json(changed=False, downtime=resp.to_dict()) - else: - module.exit_json(changed=True, downtime=resp.to_dict()) - except ApiException as e: - module.fail_json(msg="Failed to update downtime: {0}".format(e)) - - -def schedule_downtime(module, api_client): - downtime = _get_downtime(module, api_client) - if downtime is None: - _post_downtime(module, api_client) - else: - _update_downtime(module, downtime, api_client) - - -def cancel_downtime(module, api_client): - downtime = _get_downtime(module, api_client) - api = DowntimesApi(api_client) - if downtime is None: - module.exit_json(changed=False) - try: - api.cancel_downtime(downtime["id"]) - except ApiException as e: - module.fail_json(msg="Failed to create downtime: {0}".format(e)) - - module.exit_json(changed=True) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/monitoring/datadog/datadog_event.py b/plugins/modules/monitoring/datadog/datadog_event.py deleted file mode 100644 index 6284b5bf23..0000000000 --- a/plugins/modules/monitoring/datadog/datadog_event.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Author: Artūras 'arturaz' Šlajus -# Author: Naoya Nakazawa -# -# This module is proudly sponsored by iGeolise (www.igeolise.com) and -# Tiny Lab Productions (www.tinylabproductions.com). -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: datadog_event -short_description: Posts events to Datadog service -description: -- "Allows to post events to Datadog (www.datadoghq.com) service." -- "Uses http://docs.datadoghq.com/api/#events API." -author: -- "Artūras `arturaz` Šlajus (@arturaz)" -- "Naoya Nakazawa (@n0ts)" -options: - api_key: - type: str - description: ["Your DataDog API key."] - required: true - app_key: - type: str - description: ["Your DataDog app key."] - required: true - title: - type: str - description: ["The event title."] - required: true - text: - type: str - description: ["The body of the event."] - required: true - date_happened: - type: int - description: - - POSIX timestamp of the event. - - Default value is now. - priority: - type: str - description: ["The priority of the event."] - default: normal - choices: [normal, low] - host: - type: str - description: - - Host name to associate with the event. - - If not specified, it defaults to the remote system's hostname. - api_host: - type: str - description: - - DataDog API endpoint URL. - version_added: '3.3.0' - tags: - type: list - elements: str - description: ["Comma separated list of tags to apply to the event."] - alert_type: - type: str - description: ["Type of alert."] - default: info - choices: ['error', 'warning', 'info', 'success'] - aggregation_key: - type: str - description: ["An arbitrary string to use for aggregation."] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' -''' - -EXAMPLES = ''' -- name: Post an event with low priority - community.general.datadog_event: - title: Testing from ansible - text: Test - priority: low - api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 - app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN - -- name: Post an event with several tags - community.general.datadog_event: - title: Testing from ansible - text: Test - api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 - app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN - tags: 'aa,bb,#host:{{ inventory_hostname }}' - -- name: Post an event with several tags to another endpoint - community.general.datadog_event: - title: Testing from ansible - text: Test - api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 - app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN - api_host: 'https://example.datadoghq.eu' - tags: - - aa - - b - - '#host:{{ inventory_hostname }}' - -''' - -import platform -import traceback - -# Import Datadog -DATADOG_IMP_ERR = None -try: - from datadog import initialize, api - HAS_DATADOG = True -except Exception: - DATADOG_IMP_ERR = traceback.format_exc() - HAS_DATADOG = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, no_log=True), - app_key=dict(required=True, no_log=True), - api_host=dict(type='str'), - title=dict(required=True), - text=dict(required=True), - date_happened=dict(type='int'), - priority=dict(default='normal', choices=['normal', 'low']), - host=dict(), - tags=dict(type='list', elements='str'), - alert_type=dict(default='info', choices=['error', 'warning', 'info', 'success']), - aggregation_key=dict(no_log=False), - validate_certs=dict(default=True, type='bool'), - ) - ) - - # Prepare Datadog - if not HAS_DATADOG: - module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR) - - options = { - 'api_key': module.params['api_key'], - 'app_key': module.params['app_key'], - } - if module.params['api_host'] is not None: - options['api_host'] = module.params['api_host'] - - initialize(**options) - - _post_event(module) - - -def _post_event(module): - try: - if module.params['host'] is None: - module.params['host'] = platform.node().split('.')[0] - msg = api.Event.create(title=module.params['title'], - text=module.params['text'], - host=module.params['host'], - tags=module.params['tags'], - priority=module.params['priority'], - alert_type=module.params['alert_type'], - aggregation_key=module.params['aggregation_key'], - source_type_name='ansible') - if msg['status'] != 'ok': - module.fail_json(msg=msg) - - module.exit_json(changed=True, msg=msg) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/datadog/datadog_monitor.py b/plugins/modules/monitoring/datadog/datadog_monitor.py deleted file mode 100644 index ab25777ecd..0000000000 --- a/plugins/modules/monitoring/datadog/datadog_monitor.py +++ /dev/null @@ -1,411 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Sebastian Kornehl -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: datadog_monitor -short_description: Manages Datadog monitors -description: - - Manages monitors within Datadog. - - Options as described on https://docs.datadoghq.com/api/. -author: Sebastian Kornehl (@skornehl) -requirements: [datadog] -options: - api_key: - description: - - Your Datadog API key. - required: true - type: str - api_host: - description: - - The URL to the Datadog API. Default value is C(https://api.datadoghq.com). - - This value can also be set with the C(DATADOG_HOST) environment variable. - required: false - type: str - version_added: '0.2.0' - app_key: - description: - - Your Datadog app key. - required: true - type: str - state: - description: - - The designated state of the monitor. - required: true - choices: ['present', 'absent', 'mute', 'unmute'] - type: str - tags: - description: - - A list of tags to associate with your monitor when creating or updating. - - This can help you categorize and filter monitors. - type: list - elements: str - type: - description: - - The type of the monitor. - - The types C(query alert), C(trace-analytics alert) and C(rum alert) were added in community.general 2.1.0. - - The type C(composite) was added in community.general 3.4.0. - choices: - - metric alert - - service check - - event alert - - process alert - - log alert - - query alert - - trace-analytics alert - - rum alert - - composite - type: str - query: - description: - - The monitor query to notify on. - - Syntax varies depending on what type of monitor you are creating. - type: str - name: - description: - - The name of the alert. - required: true - type: str - notification_message: - description: - - A message to include with notifications for this monitor. - - Email notifications can be sent to specific users by using the same '@username' notation as events. - - Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'. - type: str - silenced: - type: dict - description: - - Dictionary of scopes to silence, with timestamps or None. - - Each scope will be muted until the given POSIX timestamp or forever if the value is None. - default: "" - notify_no_data: - description: - - Whether this monitor will notify when data stops reporting. - type: bool - default: 'no' - no_data_timeframe: - description: - - The number of minutes before a monitor will notify when data stops reporting. - - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks. - - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service. - type: str - timeout_h: - description: - - The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state. - type: str - renotify_interval: - description: - - The number of minutes after the last notification before a monitor will re-notify on the current status. - - It will only re-notify if it is not resolved. - type: str - escalation_message: - description: - - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. - - Not applicable if I(renotify_interval=None). - type: str - notify_audit: - description: - - Whether tagged users will be notified on changes to this monitor. - type: bool - default: 'no' - thresholds: - type: dict - description: - - A dictionary of thresholds by status. - - Only available for service checks and metric alerts. - - Because each of them can have multiple thresholds, we do not define them directly in the query. - - "If not specified, it defaults to: C({'ok': 1, 'critical': 1, 'warning': 1})." - locked: - description: - - Whether changes to this monitor should be restricted to the creator or admins. - type: bool - default: 'no' - require_full_window: - description: - - Whether this monitor needs a full window of data before it gets evaluated. - - We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped. - type: bool - new_host_delay: - description: - - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts. - - This gives the host time to fully initialize. - type: str - evaluation_delay: - description: - - Time to delay evaluation (in seconds). - - Effective for sparse values. - type: str - id: - description: - - The ID of the alert. - - If set, will be used instead of the name to locate the alert. - type: str - include_tags: - description: - - Whether notifications from this monitor automatically inserts its triggering tags into the title. - type: bool - default: yes - version_added: 1.3.0 -''' - -EXAMPLES = ''' -- name: Create a metric monitor - community.general.datadog_monitor: - type: "metric alert" - name: "Test monitor" - state: "present" - query: "datadog.agent.up.over('host:host1').last(2).count_by_status()" - notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog." - api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" - app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" - -- name: Deletes a monitor - community.general.datadog_monitor: - name: "Test monitor" - state: "absent" - api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" - app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" - -- name: Mutes a monitor - community.general.datadog_monitor: - name: "Test monitor" - state: "mute" - silenced: '{"*":None}' - api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" - app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" - -- name: Unmutes a monitor - community.general.datadog_monitor: - name: "Test monitor" - state: "unmute" - api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" - app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" - -- name: Use datadoghq.eu platform instead of datadoghq.com - community.general.datadog_monitor: - name: "Test monitor" - state: "absent" - api_host: https://api.datadoghq.eu - api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" - app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" -''' -import traceback - -# Import Datadog -DATADOG_IMP_ERR = None -try: - from datadog import initialize, api - HAS_DATADOG = True -except Exception: - DATADOG_IMP_ERR = traceback.format_exc() - HAS_DATADOG = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, no_log=True), - api_host=dict(), - app_key=dict(required=True, no_log=True), - state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']), - type=dict(choices=['metric alert', 'service check', 'event alert', 'process alert', - 'log alert', 'query alert', 'trace-analytics alert', - 'rum alert', 'composite']), - name=dict(required=True), - query=dict(), - notification_message=dict(no_log=True), - silenced=dict(type='dict'), - notify_no_data=dict(default=False, type='bool'), - no_data_timeframe=dict(), - timeout_h=dict(), - renotify_interval=dict(), - escalation_message=dict(), - notify_audit=dict(default=False, type='bool'), - thresholds=dict(type='dict', default=None), - tags=dict(type='list', elements='str', default=None), - locked=dict(default=False, type='bool'), - require_full_window=dict(type='bool'), - new_host_delay=dict(), - evaluation_delay=dict(), - id=dict(), - include_tags=dict(required=False, default=True, type='bool'), - ) - ) - - # Prepare Datadog - if not HAS_DATADOG: - module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR) - - options = { - 'api_key': module.params['api_key'], - 'api_host': module.params['api_host'], - 'app_key': module.params['app_key'] - } - - initialize(**options) - - # Check if api_key and app_key is correct or not - # if not, then fail here. - response = api.Monitor.get_all() - if isinstance(response, dict): - msg = response.get('errors', None) - if msg: - module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key : {0}".format(msg[0])) - - if module.params['state'] == 'present': - install_monitor(module) - elif module.params['state'] == 'absent': - delete_monitor(module) - elif module.params['state'] == 'mute': - mute_monitor(module) - elif module.params['state'] == 'unmute': - unmute_monitor(module) - - -def _fix_template_vars(message): - if message: - return message.replace('[[', '{{').replace(']]', '}}') - return message - - -def _get_monitor(module): - if module.params['id'] is not None: - monitor = api.Monitor.get(module.params['id']) - if 'errors' in monitor: - module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors']))) - return monitor - else: - monitors = api.Monitor.get_all() - for monitor in monitors: - if monitor['name'] == _fix_template_vars(module.params['name']): - return monitor - return {} - - -def _post_monitor(module, options): - try: - kwargs = dict(type=module.params['type'], query=module.params['query'], - name=_fix_template_vars(module.params['name']), - message=_fix_template_vars(module.params['notification_message']), - escalation_message=_fix_template_vars(module.params['escalation_message']), - options=options) - if module.params['tags'] is not None: - kwargs['tags'] = module.params['tags'] - msg = api.Monitor.create(**kwargs) - if 'errors' in msg: - module.fail_json(msg=str(msg['errors'])) - else: - module.exit_json(changed=True, msg=msg) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -def _equal_dicts(a, b, ignore_keys): - ka = set(a).difference(ignore_keys) - kb = set(b).difference(ignore_keys) - return ka == kb and all(a[k] == b[k] for k in ka) - - -def _update_monitor(module, monitor, options): - try: - kwargs = dict(id=monitor['id'], query=module.params['query'], - name=_fix_template_vars(module.params['name']), - message=_fix_template_vars(module.params['notification_message']), - escalation_message=_fix_template_vars(module.params['escalation_message']), - options=options) - if module.params['tags'] is not None: - kwargs['tags'] = module.params['tags'] - msg = api.Monitor.update(**kwargs) - - if 'errors' in msg: - module.fail_json(msg=str(msg['errors'])) - elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']): - module.exit_json(changed=False, msg=msg) - else: - module.exit_json(changed=True, msg=msg) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -def install_monitor(module): - options = { - "silenced": module.params['silenced'], - "notify_no_data": module.boolean(module.params['notify_no_data']), - "no_data_timeframe": module.params['no_data_timeframe'], - "timeout_h": module.params['timeout_h'], - "renotify_interval": module.params['renotify_interval'], - "escalation_message": module.params['escalation_message'], - "notify_audit": module.boolean(module.params['notify_audit']), - "locked": module.boolean(module.params['locked']), - "require_full_window": module.params['require_full_window'], - "new_host_delay": module.params['new_host_delay'], - "evaluation_delay": module.params['evaluation_delay'], - "include_tags": module.params['include_tags'], - } - - if module.params['type'] == "service check": - options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1} - if module.params['type'] in ["metric alert", "log alert", "query alert", "trace-analytics alert", "rum alert"] and module.params['thresholds'] is not None: - options["thresholds"] = module.params['thresholds'] - - monitor = _get_monitor(module) - if not monitor: - _post_monitor(module, options) - else: - _update_monitor(module, monitor, options) - - -def delete_monitor(module): - monitor = _get_monitor(module) - if not monitor: - module.exit_json(changed=False) - try: - msg = api.Monitor.delete(monitor['id']) - module.exit_json(changed=True, msg=msg) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -def mute_monitor(module): - monitor = _get_monitor(module) - if not monitor: - module.fail_json(msg="Monitor %s not found!" % module.params['name']) - elif monitor['options']['silenced']: - module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") - elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0): - module.exit_json(changed=False) - try: - if module.params['silenced'] is None or module.params['silenced'] == "": - msg = api.Monitor.mute(id=monitor['id']) - else: - msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced']) - module.exit_json(changed=True, msg=msg) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -def unmute_monitor(module): - monitor = _get_monitor(module) - if not monitor: - module.fail_json(msg="Monitor %s not found!" % module.params['name']) - elif not monitor['options']['silenced']: - module.exit_json(changed=False) - try: - msg = api.Monitor.unmute(monitor['id']) - module.exit_json(changed=True, msg=msg) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/honeybadger_deployment.py b/plugins/modules/monitoring/honeybadger_deployment.py deleted file mode 100644 index 2e2198e1a3..0000000000 --- a/plugins/modules/monitoring/honeybadger_deployment.py +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2014 Benjamin Curtis -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: honeybadger_deployment -author: "Benjamin Curtis (@stympy)" -short_description: Notify Honeybadger.io about app deployments -description: - - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking) -options: - token: - type: str - description: - - API token. - required: true - environment: - type: str - description: - - The environment name, typically 'production', 'staging', etc. - required: true - user: - type: str - description: - - The username of the person doing the deployment - repo: - type: str - description: - - URL of the project repository - revision: - type: str - description: - - A hash, number, tag, or other identifier showing what revision was deployed - url: - type: str - description: - - Optional URL to submit the notification to. - default: "https://api.honeybadger.io/v1/deploys" - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - -''' - -EXAMPLES = ''' -- name: Notify Honeybadger.io about an app deployment - community.general.honeybadger_deployment: - token: AAAAAA - environment: staging - user: ansible - revision: b6826b8 - repo: 'git@github.com:user/repo.git' -''' - -RETURN = '''# ''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - environment=dict(required=True), - user=dict(required=False), - repo=dict(required=False), - revision=dict(required=False), - url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'), - validate_certs=dict(default=True, type='bool'), - ), - supports_check_mode=True - ) - - params = {} - - if module.params["environment"]: - params["deploy[environment]"] = module.params["environment"] - - if module.params["user"]: - params["deploy[local_username]"] = module.params["user"] - - if module.params["repo"]: - params["deploy[repository]"] = module.params["repo"] - - if module.params["revision"]: - params["deploy[revision]"] = module.params["revision"] - - params["api_key"] = module.params["token"] - - url = module.params.get('url') - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True) - - try: - data = urlencode(params) - response, info = fetch_url(module, url, data=data) - except Exception as e: - module.fail_json(msg='Unable to notify Honeybadger: %s' % to_native(e), exception=traceback.format_exc()) - else: - if info['status'] == 201: - module.exit_json(changed=True) - else: - module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/icinga2_feature.py b/plugins/modules/monitoring/icinga2_feature.py deleted file mode 100644 index b59c0e11e4..0000000000 --- a/plugins/modules/monitoring/icinga2_feature.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2016, Loic Blot -# Copyright (c) 2018, Ansible Project -# Sponsored by Infopro Digital. http://www.infopro-digital.com/ -# Sponsored by E.T.A.I. http://www.etai.fr/ -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: icinga2_feature - -short_description: Manage Icinga2 feature -description: - - This module can be used to enable or disable an Icinga2 feature. -author: "Loic Blot (@nerzhul)" -options: - name: - type: str - description: - - This is the feature name to enable or disable. - required: True - state: - type: str - description: - - If set to C(present) and feature is disabled, then feature is enabled. - - If set to C(present) and feature is already enabled, then nothing is changed. - - If set to C(absent) and feature is enabled, then feature is disabled. - - If set to C(absent) and feature is already disabled, then nothing is changed. - choices: [ "present", "absent" ] - default: present -''' - -EXAMPLES = ''' -- name: Enable ido-pgsql feature - community.general.icinga2_feature: - name: ido-pgsql - state: present - -- name: Disable api feature - community.general.icinga2_feature: - name: api - state: absent -''' - -RETURN = ''' -# -''' - -import re -from ansible.module_utils.basic import AnsibleModule - - -class Icinga2FeatureHelper: - def __init__(self, module): - self.module = module - self._icinga2 = module.get_bin_path('icinga2', True) - self.feature_name = self.module.params['name'] - self.state = self.module.params['state'] - - def _exec(self, args): - cmd = [self._icinga2, 'feature'] - rc, out, err = self.module.run_command(cmd + args, check_rc=True) - return rc, out - - def manage(self): - rc, out = self._exec(["list"]) - if rc != 0: - self.module.fail_json(msg="Unable to list icinga2 features. " - "Ensure icinga2 is installed and present in binary path.") - - # If feature is already in good state, just exit - if (re.search("Disabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "absent") or \ - (re.search("Enabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "present"): - self.module.exit_json(changed=False) - - if self.module.check_mode: - self.module.exit_json(changed=True) - - feature_enable_str = "enable" if self.state == "present" else "disable" - - rc, out = self._exec([feature_enable_str, self.feature_name]) - - change_applied = False - if self.state == "present": - if rc != 0: - self.module.fail_json(msg="Failed to %s feature %s." - " icinga2 command returned %s" % (feature_enable_str, - self.feature_name, - out)) - - if re.search("already enabled", out) is None: - change_applied = True - else: - if rc == 0: - change_applied = True - # RC is not 0 for this already disabled feature, handle it as no change applied - elif re.search("Cannot disable feature '%s'. Target file .* does not exist" % self.feature_name, out): - change_applied = False - else: - self.module.fail_json(msg="Failed to disable feature. Command returns %s" % out) - - self.module.exit_json(changed=change_applied) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', choices=["present", "absent"], default="present") - ), - supports_check_mode=True - ) - - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') - Icinga2FeatureHelper(module).manage() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/icinga2_host.py b/plugins/modules/monitoring/icinga2_host.py deleted file mode 100644 index b4c4cdbcfb..0000000000 --- a/plugins/modules/monitoring/icinga2_host.py +++ /dev/null @@ -1,332 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This module is proudly sponsored by CGI (www.cgi.com) and -# KPN (www.kpn.com). -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: icinga2_host -short_description: Manage a host in Icinga2 -description: - - "Add or remove a host to Icinga2 through the API." - - "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)" -author: "Jurgen Brand (@t794104)" -options: - url: - type: str - description: - - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path - use_proxy: - description: - - If C(no), it will not use a proxy, even if one is defined in - an environment variable on the target hosts. - type: bool - default: 'yes' - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - url_username: - type: str - description: - - The username for use in HTTP basic authentication. - - This parameter can be used without C(url_password) for sites that allow empty passwords. - url_password: - type: str - description: - - The password for use in HTTP basic authentication. - - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used. - force_basic_auth: - description: - - httplib2, the library used by the uri module only sends authentication information when a webservice - responds to an initial request with a 401 status. Since some basic auth services do not properly - send a 401, logins will fail. This option forces the sending of the Basic authentication header - upon initial request. - type: bool - default: 'no' - client_cert: - type: path - description: - - PEM formatted certificate chain file to be used for SSL client - authentication. This file can also include the key as well, and if - the key is included, C(client_key) is not required. - client_key: - type: path - description: - - PEM formatted file that contains your private key to be used for SSL - client authentication. If C(client_cert) contains both the certificate - and key, this option is not required. - state: - type: str - description: - - Apply feature state. - choices: [ "present", "absent" ] - default: present - name: - type: str - description: - - Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique. - required: true - aliases: [host] - zone: - type: str - description: - - The zone from where this host should be polled. - template: - type: str - description: - - The template used to define the host. - - Template cannot be modified after object creation. - check_command: - type: str - description: - - The command used to check if the host is alive. - default: "hostalive" - display_name: - type: str - description: - - The name used to display the host. - - If not specified, it defaults to the value of the I(name) parameter. - ip: - type: str - description: - - The IP address of the host. - required: true - variables: - type: dict - description: - - Dictionary of variables. -extends_documentation_fragment: - - url -''' - -EXAMPLES = ''' -- name: Add host to icinga - community.general.icinga2_host: - url: "https://icinga2.example.com" - url_username: "ansible" - url_password: "a_secret" - state: present - name: "{{ ansible_fqdn }}" - ip: "{{ ansible_default_ipv4.address }}" - variables: - foo: "bar" - delegate_to: 127.0.0.1 -''' - -RETURN = ''' -name: - description: The name used to create, modify or delete the host - type: str - returned: always -data: - description: The data structure used for create, modify or delete of the host - type: dict - returned: always -''' - -import json -import os - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url, url_argument_spec - - -# =========================================== -# Icinga2 API class -# -class icinga2_api: - module = None - - def __init__(self, module): - self.module = module - - def call_url(self, path, data='', method='GET'): - headers = { - 'Accept': 'application/json', - 'X-HTTP-Method-Override': method, - } - url = self.module.params.get("url") + "/" + path - rsp, info = fetch_url(module=self.module, url=url, data=data, headers=headers, method=method, use_proxy=self.module.params['use_proxy']) - body = '' - if rsp: - body = json.loads(rsp.read()) - if info['status'] >= 400: - body = info['body'] - return {'code': info['status'], 'data': body} - - def check_connection(self): - ret = self.call_url('v1/status') - if ret['code'] == 200: - return True - return False - - def exists(self, hostname): - data = { - "filter": "match(\"" + hostname + "\", host.name)", - } - ret = self.call_url( - path="v1/objects/hosts", - data=self.module.jsonify(data) - ) - if ret['code'] == 200: - if len(ret['data']['results']) == 1: - return True - return False - - def create(self, hostname, data): - ret = self.call_url( - path="v1/objects/hosts/" + hostname, - data=self.module.jsonify(data), - method="PUT" - ) - return ret - - def delete(self, hostname): - data = {"cascade": 1} - ret = self.call_url( - path="v1/objects/hosts/" + hostname, - data=self.module.jsonify(data), - method="DELETE" - ) - return ret - - def modify(self, hostname, data): - ret = self.call_url( - path="v1/objects/hosts/" + hostname, - data=self.module.jsonify(data), - method="POST" - ) - return ret - - def diff(self, hostname, data): - ret = self.call_url( - path="v1/objects/hosts/" + hostname, - method="GET" - ) - changed = False - ic_data = ret['data']['results'][0] - for key in data['attrs']: - if key not in ic_data['attrs'].keys(): - changed = True - elif data['attrs'][key] != ic_data['attrs'][key]: - changed = True - return changed - - -# =========================================== -# Module execution. -# -def main(): - # use the predefined argument spec for url - argument_spec = url_argument_spec() - # add our own arguments - argument_spec.update( - state=dict(default="present", choices=["absent", "present"]), - name=dict(required=True, aliases=['host']), - zone=dict(), - template=dict(default=None), - check_command=dict(default="hostalive"), - display_name=dict(default=None), - ip=dict(required=True), - variables=dict(type='dict', default=None), - ) - - # Define the main module - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - state = module.params["state"] - name = module.params["name"] - zone = module.params["zone"] - template = [name] - if module.params["template"]: - template.append(module.params["template"]) - check_command = module.params["check_command"] - ip = module.params["ip"] - display_name = module.params["display_name"] - if not display_name: - display_name = name - variables = module.params["variables"] - - try: - icinga = icinga2_api(module=module) - icinga.check_connection() - except Exception as e: - module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e)) - - data = { - 'attrs': { - 'address': ip, - 'display_name': display_name, - 'check_command': check_command, - 'zone': zone, - 'vars': { - 'made_by': "ansible", - }, - 'templates': template, - } - } - - if variables: - data['attrs']['vars'].update(variables) - - changed = False - if icinga.exists(name): - if state == "absent": - if module.check_mode: - module.exit_json(changed=True, name=name, data=data) - else: - try: - ret = icinga.delete(name) - if ret['code'] == 200: - changed = True - else: - module.fail_json(msg="bad return code (%s) deleting host: '%s'" % (ret['code'], ret['data'])) - except Exception as e: - module.fail_json(msg="exception deleting host: " + str(e)) - - elif icinga.diff(name, data): - if module.check_mode: - module.exit_json(changed=False, name=name, data=data) - - # Template attribute is not allowed in modification - del data['attrs']['templates'] - - ret = icinga.modify(name, data) - - if ret['code'] == 200: - changed = True - else: - module.fail_json(msg="bad return code (%s) modifying host: '%s'" % (ret['code'], ret['data'])) - - else: - if state == "present": - if module.check_mode: - changed = True - else: - try: - ret = icinga.create(name, data) - if ret['code'] == 200: - changed = True - else: - module.fail_json(msg="bad return code (%s) creating host: '%s'" % (ret['code'], ret['data'])) - except Exception as e: - module.fail_json(msg="exception creating host: " + str(e)) - - module.exit_json(changed=changed, name=name, data=data) - - -# import module snippets -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/librato_annotation.py b/plugins/modules/monitoring/librato_annotation.py deleted file mode 100644 index 6fcabcf34e..0000000000 --- a/plugins/modules/monitoring/librato_annotation.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (C) Seth Edwards, 2014 -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: librato_annotation -short_description: create an annotation in librato -description: - - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically -author: "Seth Edwards (@Sedward)" -requirements: [] -options: - user: - type: str - description: - - Librato account username - required: true - api_key: - type: str - description: - - Librato account api key - required: true - name: - type: str - description: - - The annotation stream name - - If the annotation stream does not exist, it will be created automatically - required: false - title: - type: str - description: - - The title of an annotation is a string and may contain spaces - - The title should be a short, high-level summary of the annotation e.g. v45 Deployment - required: true - source: - type: str - description: - - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population - required: false - description: - type: str - description: - - The description contains extra metadata about a particular annotation - - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo! - required: false - start_time: - type: int - description: - - The unix timestamp indicating the time at which the event referenced by this annotation started - required: false - end_time: - type: int - description: - - The unix timestamp indicating the time at which the event referenced by this annotation ended - - For events that have a duration, this is a useful way to annotate the duration of the event - required: false - links: - type: list - elements: dict - description: - - See examples -''' - -EXAMPLES = ''' -- name: Create a simple annotation event with a source - community.general.librato_annotation: - user: user@example.com - api_key: XXXXXXXXXXXXXXXXX - title: App Config Change - source: foo.bar - description: This is a detailed description of the config change - -- name: Create an annotation that includes a link - community.general.librato_annotation: - user: user@example.com - api_key: XXXXXXXXXXXXXXXXXX - name: code.deploy - title: app code deploy - description: this is a detailed description of a deployment - links: - - rel: example - href: http://www.example.com/deploy - -- name: Create an annotation with a start_time and end_time - community.general.librato_annotation: - user: user@example.com - api_key: XXXXXXXXXXXXXXXXXX - name: maintenance - title: Maintenance window - description: This is a detailed description of maintenance - start_time: 1395940006 - end_time: 1395954406 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def post_annotation(module): - user = module.params['user'] - api_key = module.params['api_key'] - name = module.params['name'] - title = module.params['title'] - - url = 'https://metrics-api.librato.com/v1/annotations/%s' % name - params = {} - params['title'] = title - - if module.params['source'] is not None: - params['source'] = module.params['source'] - if module.params['description'] is not None: - params['description'] = module.params['description'] - if module.params['start_time'] is not None: - params['start_time'] = module.params['start_time'] - if module.params['end_time'] is not None: - params['end_time'] = module.params['end_time'] - if module.params['links'] is not None: - params['links'] = module.params['links'] - - json_body = module.jsonify(params) - - headers = {} - headers['Content-Type'] = 'application/json' - - # Hack send parameters the way fetch_url wants them - module.params['url_username'] = user - module.params['url_password'] = api_key - response, info = fetch_url(module, url, data=json_body, headers=headers) - response_code = str(info['status']) - response_body = info['body'] - if info['status'] != 201: - if info['status'] >= 400: - module.fail_json(msg="Request Failed. Response code: " + response_code + " Response body: " + response_body) - else: - module.fail_json(msg="Request Failed. Response code: " + response_code) - response = response.read() - module.exit_json(changed=True, annotation=response) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - user=dict(required=True), - api_key=dict(required=True, no_log=True), - name=dict(required=False), - title=dict(required=True), - source=dict(required=False), - description=dict(required=False), - start_time=dict(required=False, default=None, type='int'), - end_time=dict(required=False, default=None, type='int'), - links=dict(type='list', elements='dict') - ) - ) - - post_annotation(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/logentries.py b/plugins/modules/monitoring/logentries.py deleted file mode 100644 index 8f39fb5106..0000000000 --- a/plugins/modules/monitoring/logentries.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Ivan Vanderbyl -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: logentries -author: "Ivan Vanderbyl (@ivanvanderbyl)" -short_description: Module for tracking logs via logentries.com -description: - - Sends logs to LogEntries in realtime -options: - path: - type: str - description: - - path to a log file - required: true - state: - type: str - description: - - following state of the log - choices: [ 'present', 'absent', 'followed', 'unfollowed' ] - required: false - default: present - name: - type: str - description: - - name of the log - required: false - logtype: - type: str - description: - - type of the log - required: false - aliases: [type] - -notes: - - Requires the LogEntries agent which can be installed following the instructions at logentries.com -''' -EXAMPLES = ''' -- name: Track nginx logs - community.general.logentries: - path: /var/log/nginx/access.log - state: present - name: nginx-access-log - -- name: Stop tracking nginx logs - community.general.logentries: - path: /var/log/nginx/error.log - state: absent -''' - -from ansible.module_utils.basic import AnsibleModule - - -def query_log_status(module, le_path, path, state="present"): - """ Returns whether a log is followed or not. """ - - if state == "present": - rc, out, err = module.run_command("%s followed %s" % (le_path, path)) - if rc == 0: - return True - - return False - - -def follow_log(module, le_path, logs, name=None, logtype=None): - """ Follows one or more logs if not already followed. """ - - followed_count = 0 - - for log in logs: - if query_log_status(module, le_path, log): - continue - - if module.check_mode: - module.exit_json(changed=True) - - cmd = [le_path, 'follow', log] - if name: - cmd.extend(['--name', name]) - if logtype: - cmd.extend(['--type', logtype]) - rc, out, err = module.run_command(' '.join(cmd)) - - if not query_log_status(module, le_path, log): - module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip())) - - followed_count += 1 - - if followed_count > 0: - module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,)) - - module.exit_json(changed=False, msg="logs(s) already followed") - - -def unfollow_log(module, le_path, logs): - """ Unfollows one or more logs if followed. """ - - removed_count = 0 - - # Using a for loop in case of error, we can report the package that failed - for log in logs: - # Query the log first, to see if we even need to remove. - if not query_log_status(module, le_path, log): - continue - - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = module.run_command([le_path, 'rm', log]) - - if query_log_status(module, le_path, log): - module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip())) - - removed_count += 1 - - if removed_count > 0: - module.exit_json(changed=True, msg="removed %d package(s)" % removed_count) - - module.exit_json(changed=False, msg="logs(s) already unfollowed") - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(required=True), - state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]), - name=dict(required=False, default=None, type='str'), - logtype=dict(required=False, default=None, type='str', aliases=['type']) - ), - supports_check_mode=True - ) - - le_path = module.get_bin_path('le', True, ['/usr/local/bin']) - - p = module.params - - # Handle multiple log files - logs = p["path"].split(",") - logs = filter(None, logs) - - if p["state"] in ["present", "followed"]: - follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype']) - - elif p["state"] in ["absent", "unfollowed"]: - unfollow_log(module, le_path, logs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/logstash_plugin.py b/plugins/modules/monitoring/logstash_plugin.py deleted file mode 100644 index 5d1cd488ab..0000000000 --- a/plugins/modules/monitoring/logstash_plugin.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Loic Blot -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: logstash_plugin -short_description: Manage Logstash plugins -description: - - Manages Logstash plugins. -author: Loic Blot (@nerzhul) -options: - name: - type: str - description: - - Install plugin with that name. - required: True - state: - type: str - description: - - Apply plugin state. - choices: ["present", "absent"] - default: present - plugin_bin: - type: path - description: - - Specify logstash-plugin to use for plugin management. - default: /usr/share/logstash/bin/logstash-plugin - proxy_host: - type: str - description: - - Proxy host to use during plugin installation. - proxy_port: - type: str - description: - - Proxy port to use during plugin installation. - version: - type: str - description: - - Specify plugin Version of the plugin to install. - If plugin exists with previous version, it will NOT be updated. -''' - -EXAMPLES = ''' -- name: Install Logstash beats input plugin - community.general.logstash_plugin: - state: present - name: logstash-input-beats - -- name: Install specific version of a plugin - community.general.logstash_plugin: - state: present - name: logstash-input-syslog - version: '3.2.0' - -- name: Uninstall Logstash plugin - community.general.logstash_plugin: - state: absent - name: logstash-filter-multiline - -- name: Install Logstash plugin with alternate heap size - community.general.logstash_plugin: - state: present - name: logstash-input-beats - environment: - LS_JAVA_OPTS: "-Xms256m -Xmx256m" -''' - -from ansible.module_utils.basic import AnsibleModule - - -PACKAGE_STATE_MAP = dict( - present="install", - absent="remove" -) - - -def is_plugin_present(module, plugin_bin, plugin_name): - cmd_args = [plugin_bin, "list", plugin_name] - rc, out, err = module.run_command(" ".join(cmd_args)) - return rc == 0 - - -def parse_error(string): - reason = "reason: " - try: - return string[string.index(reason) + len(reason):].strip() - except ValueError: - return string - - -def install_plugin(module, plugin_bin, plugin_name, version, proxy_host, proxy_port): - cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name] - - if version: - cmd_args.append("--version %s" % version) - - if proxy_host and proxy_port: - cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port)) - - cmd = " ".join(cmd_args) - - if module.check_mode: - rc, out, err = 0, "check mode", "" - else: - rc, out, err = module.run_command(cmd) - - if rc != 0: - reason = parse_error(out) - module.fail_json(msg=reason) - - return True, cmd, out, err - - -def remove_plugin(module, plugin_bin, plugin_name): - cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], plugin_name] - - cmd = " ".join(cmd_args) - - if module.check_mode: - rc, out, err = 0, "check mode", "" - else: - rc, out, err = module.run_command(cmd) - - if rc != 0: - reason = parse_error(out) - module.fail_json(msg=reason) - - return True, cmd, out, err - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), - plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"), - proxy_host=dict(), - proxy_port=dict(), - version=dict() - ), - supports_check_mode=True - ) - - name = module.params["name"] - state = module.params["state"] - plugin_bin = module.params["plugin_bin"] - proxy_host = module.params["proxy_host"] - proxy_port = module.params["proxy_port"] - version = module.params["version"] - - present = is_plugin_present(module, plugin_bin, name) - - # skip if the state is correct - if (present and state == "present") or (state == "absent" and not present): - module.exit_json(changed=False, name=name, state=state) - - if state == "present": - changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, proxy_host, proxy_port) - elif state == "absent": - changed, cmd, out, err = remove_plugin(module, plugin_bin, name) - - module.exit_json(changed=changed, cmd=cmd, name=name, state=state, stdout=out, stderr=err) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/monit.py b/plugins/modules/monitoring/monit.py deleted file mode 100644 index 1dfe76d65f..0000000000 --- a/plugins/modules/monitoring/monit.py +++ /dev/null @@ -1,340 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Darryl Stoflet -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: monit -short_description: Manage the state of a program monitored via Monit -description: - - Manage the state of a program monitored via I(Monit). -options: - name: - description: - - The name of the I(monit) program/process to manage. - required: true - type: str - state: - description: - - The state of service. - required: true - choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ] - type: str - timeout: - description: - - If there are pending actions for the service monitored by monit, then Ansible will check - for up to this many seconds to verify the requested action has been performed. - Ansible will sleep for five seconds between each check. - default: 300 - type: int -author: - - Darryl Stoflet (@dstoflet) - - Simon Kelly (@snopoke) -''' - -EXAMPLES = ''' -- name: Manage the state of program httpd to be in started state - community.general.monit: - name: httpd - state: started -''' - -import time -import re - -from collections import namedtuple - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import python_2_unicode_compatible - - -STATE_COMMAND_MAP = { - 'stopped': 'stop', - 'started': 'start', - 'monitored': 'monitor', - 'unmonitored': 'unmonitor', - 'restarted': 'restart' -} - -MONIT_SERVICES = ['Process', 'File', 'Fifo', 'Filesystem', 'Directory', 'Remote host', 'System', 'Program', - 'Network'] - - -@python_2_unicode_compatible -class StatusValue(namedtuple("Status", "value, is_pending")): - MISSING = 'missing' - OK = 'ok' - NOT_MONITORED = 'not_monitored' - INITIALIZING = 'initializing' - DOES_NOT_EXIST = 'does_not_exist' - EXECUTION_FAILED = 'execution_failed' - ALL_STATUS = [ - MISSING, OK, NOT_MONITORED, INITIALIZING, DOES_NOT_EXIST, EXECUTION_FAILED - ] - - def __new__(cls, value, is_pending=False): - return super(StatusValue, cls).__new__(cls, value, is_pending) - - def pending(self): - return StatusValue(self.value, True) - - def __getattr__(self, item): - if item in ('is_%s' % status for status in self.ALL_STATUS): - return self.value == getattr(self, item[3:].upper()) - raise AttributeError(item) - - def __str__(self): - return "%s%s" % (self.value, " (pending)" if self.is_pending else "") - - -class Status(object): - MISSING = StatusValue(StatusValue.MISSING) - OK = StatusValue(StatusValue.OK) - RUNNING = StatusValue(StatusValue.OK) - NOT_MONITORED = StatusValue(StatusValue.NOT_MONITORED) - INITIALIZING = StatusValue(StatusValue.INITIALIZING) - DOES_NOT_EXIST = StatusValue(StatusValue.DOES_NOT_EXIST) - EXECUTION_FAILED = StatusValue(StatusValue.EXECUTION_FAILED) - - -class Monit(object): - def __init__(self, module, monit_bin_path, service_name, timeout): - self.module = module - self.monit_bin_path = monit_bin_path - self.process_name = service_name - self.timeout = timeout - - self._monit_version = None - self._raw_version = None - self._status_change_retry_count = 6 - - def monit_version(self): - if self._monit_version is None: - self._raw_version, version = self._get_monit_version() - # Use only major and minor even if there are more these should be enough - self._monit_version = version[0], version[1] - return self._monit_version - - def _get_monit_version(self): - rc, out, err = self.module.run_command('%s -V' % self.monit_bin_path, check_rc=True) - version_line = out.split('\n')[0] - raw_version = re.search(r"([0-9]+\.){1,2}([0-9]+)?", version_line).group() - return raw_version, tuple(map(int, raw_version.split('.'))) - - def exit_fail(self, msg, status=None, **kwargs): - kwargs.update({ - 'msg': msg, - 'monit_version': self._raw_version, - 'process_status': str(status) if status else None, - }) - self.module.fail_json(**kwargs) - - def exit_success(self, state): - self.module.exit_json(changed=True, name=self.process_name, state=state) - - @property - def command_args(self): - return "-B" if self.monit_version() > (5, 18) else "" - - def get_status(self, validate=False): - """Return the status of the process in monit. - - :@param validate: Force monit to re-check the status of the process - """ - monit_command = "validate" if validate else "status" - check_rc = False if validate else True # 'validate' always has rc = 1 - command = ' '.join([self.monit_bin_path, monit_command, self.command_args, self.process_name]) - rc, out, err = self.module.run_command(command, check_rc=check_rc) - return self._parse_status(out, err) - - def _parse_status(self, output, err): - escaped_monit_services = '|'.join([re.escape(x) for x in MONIT_SERVICES]) - pattern = "(%s) '%s'" % (escaped_monit_services, re.escape(self.process_name)) - if not re.search(pattern, output, re.IGNORECASE): - return Status.MISSING - - status_val = re.findall(r"^\s*status\s*([\w\- ]+)", output, re.MULTILINE) - if not status_val: - self.exit_fail("Unable to find process status", stdout=output, stderr=err) - - status_val = status_val[0].strip().upper() - if ' | ' in status_val: - status_val = status_val.split(' | ')[0] - if ' - ' not in status_val: - status_val = status_val.replace(' ', '_') - return getattr(Status, status_val) - else: - status_val, substatus = status_val.split(' - ') - action, state = substatus.split() - if action in ['START', 'INITIALIZING', 'RESTART', 'MONITOR']: - status = Status.OK - else: - status = Status.NOT_MONITORED - - if state == 'pending': - status = status.pending() - return status - - def is_process_present(self): - rc, out, err = self.module.run_command('%s summary %s' % (self.monit_bin_path, self.command_args), check_rc=True) - return bool(re.findall(r'\b%s\b' % self.process_name, out)) - - def is_process_running(self): - return self.get_status().is_ok - - def run_command(self, command): - """Runs a monit command, and returns the new status.""" - return self.module.run_command('%s %s %s' % (self.monit_bin_path, command, self.process_name), check_rc=True) - - def wait_for_status_change(self, current_status): - running_status = self.get_status() - if running_status.value != current_status.value or current_status.value == StatusValue.EXECUTION_FAILED: - return running_status - - loop_count = 0 - while running_status.value == current_status.value: - if loop_count >= self._status_change_retry_count: - self.exit_fail('waited too long for monit to change state', running_status) - - loop_count += 1 - time.sleep(0.5) - validate = loop_count % 2 == 0 # force recheck of status every second try - running_status = self.get_status(validate) - return running_status - - def wait_for_monit_to_stop_pending(self, current_status=None): - """Fails this run if there is no status or it's pending/initializing for timeout""" - timeout_time = time.time() + self.timeout - - if not current_status: - current_status = self.get_status() - waiting_status = [ - StatusValue.MISSING, - StatusValue.INITIALIZING, - StatusValue.DOES_NOT_EXIST, - ] - while current_status.is_pending or (current_status.value in waiting_status): - if time.time() >= timeout_time: - self.exit_fail('waited too long for "pending", or "initiating" status to go away', current_status) - - time.sleep(5) - current_status = self.get_status(validate=True) - return current_status - - def reload(self): - rc, out, err = self.module.run_command('%s reload' % self.monit_bin_path) - if rc != 0: - self.exit_fail('monit reload failed', stdout=out, stderr=err) - self.exit_success(state='reloaded') - - def present(self): - self.run_command('reload') - - timeout_time = time.time() + self.timeout - while not self.is_process_present(): - if time.time() >= timeout_time: - self.exit_fail('waited too long for process to become "present"') - - time.sleep(5) - - self.exit_success(state='present') - - def change_state(self, state, expected_status, invert_expected=None): - current_status = self.get_status() - self.run_command(STATE_COMMAND_MAP[state]) - status = self.wait_for_status_change(current_status) - status = self.wait_for_monit_to_stop_pending(status) - status_match = status.value == expected_status.value - if invert_expected: - status_match = not status_match - if status_match: - self.exit_success(state=state) - self.exit_fail('%s process not %s' % (self.process_name, state), status) - - def stop(self): - self.change_state('stopped', Status.NOT_MONITORED) - - def unmonitor(self): - self.change_state('unmonitored', Status.NOT_MONITORED) - - def restart(self): - self.change_state('restarted', Status.OK) - - def start(self): - self.change_state('started', Status.OK) - - def monitor(self): - self.change_state('monitored', Status.NOT_MONITORED, invert_expected=True) - - -def main(): - arg_spec = dict( - name=dict(required=True), - timeout=dict(default=300, type='int'), - state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded']) - ) - - module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) - - name = module.params['name'] - state = module.params['state'] - timeout = module.params['timeout'] - - monit = Monit(module, module.get_bin_path('monit', True), name, timeout) - - def exit_if_check_mode(): - if module.check_mode: - module.exit_json(changed=True) - - if state == 'reloaded': - exit_if_check_mode() - monit.reload() - - present = monit.is_process_present() - - if not present and not state == 'present': - module.fail_json(msg='%s process not presently configured with monit' % name, name=name) - - if state == 'present': - if present: - module.exit_json(changed=False, name=name, state=state) - exit_if_check_mode() - monit.present() - - monit.wait_for_monit_to_stop_pending() - running = monit.is_process_running() - - if running and state in ['started', 'monitored']: - module.exit_json(changed=False, name=name, state=state) - - if running and state == 'stopped': - exit_if_check_mode() - monit.stop() - - if running and state == 'unmonitored': - exit_if_check_mode() - monit.unmonitor() - - elif state == 'restarted': - exit_if_check_mode() - monit.restart() - - elif not running and state == 'started': - exit_if_check_mode() - monit.start() - - elif not running and state == 'monitored': - exit_if_check_mode() - monit.monitor() - - module.exit_json(changed=False, name=name, state=state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/nagios.py b/plugins/modules/monitoring/nagios.py deleted file mode 100644 index 248fd1051d..0000000000 --- a/plugins/modules/monitoring/nagios.py +++ /dev/null @@ -1,1304 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# This file is largely copied from the Nagios module included in the -# Func project. Original copyright follows: -# -# func-nagios - Schedule downtime and enables/disable notifications -# Copyright 2011, Red Hat, Inc. -# Tim Bielawa -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: nagios -short_description: Perform common tasks in Nagios related to downtime and notifications. -description: - - "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts." - - The C(nagios) module is not idempotent. - - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer - to the host the playbook is currently running on. - - You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet). - - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime/acknowledge for the I(host itself), - e.g., C(service=host). This keyword may not be given with other services at the same time. - I(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the services running on it.) - To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all). -options: - action: - description: - - Action to take. - - servicegroup options were added in 2.0. - - delete_downtime options were added in 2.2. - - The C(acknowledge) and C(forced_check) actions were added in community.general 1.2.0. - required: true - choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", - "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime", - "servicegroup_host_downtime", "acknowledge", "forced_check" ] - type: str - host: - description: - - Host to operate on in Nagios. - type: str - cmdfile: - description: - - Path to the nagios I(command file) (FIFO pipe). - Only required if auto-detection fails. - type: str - author: - description: - - Author to leave downtime comments as. - Only usable with the C(downtime) and C(acknowledge) action. - type: str - default: Ansible - comment: - description: - - Comment for C(downtime) and C(acknowledge)action. - type: str - default: Scheduling downtime - start: - description: - - When downtime should start, in time_t format (epoch seconds). - version_added: '0.2.0' - type: str - minutes: - description: - - Minutes to schedule downtime for. - - Only usable with the C(downtime) action. - type: int - default: 30 - services: - description: - - What to manage downtime/alerts for. Separate multiple services with commas. - C(service) is an alias for C(services). - B(Required) option when using the C(downtime), C(acknowledge), C(forced_check), C(enable_alerts), and C(disable_alerts) actions. - aliases: [ "service" ] - type: str - servicegroup: - description: - - The Servicegroup we want to set downtimes/alerts for. - B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). - type: str - command: - description: - - The raw command to send to nagios, which - should not include the submitted time header or the line-feed - B(Required) option when using the C(command) action. - type: str - -author: "Tim Bielawa (@tbielawa)" -''' - -EXAMPLES = ''' -- name: Set 30 minutes of apache downtime - community.general.nagios: - action: downtime - minutes: 30 - service: httpd - host: '{{ inventory_hostname }}' - -- name: Schedule an hour of HOST downtime - community.general.nagios: - action: downtime - minutes: 60 - service: host - host: '{{ inventory_hostname }}' - -- name: Schedule an hour of HOST downtime starting at 2019-04-23T02:00:00+00:00 - community.general.nagios: - action: downtime - start: 1555984800 - minutes: 60 - service: host - host: '{{ inventory_hostname }}' - -- name: Schedule an hour of HOST downtime, with a comment describing the reason - community.general.nagios: - action: downtime - minutes: 60 - service: host - host: '{{ inventory_hostname }}' - comment: Rebuilding machine - -- name: Schedule downtime for ALL services on HOST - community.general.nagios: - action: downtime - minutes: 45 - service: all - host: '{{ inventory_hostname }}' - -- name: Schedule downtime for a few services - community.general.nagios: - action: downtime - services: frob,foobar,qeuz - host: '{{ inventory_hostname }}' - -- name: Set 30 minutes downtime for all services in servicegroup foo - community.general.nagios: - action: servicegroup_service_downtime - minutes: 30 - servicegroup: foo - host: '{{ inventory_hostname }}' - -- name: Set 30 minutes downtime for all host in servicegroup foo - community.general.nagios: - action: servicegroup_host_downtime - minutes: 30 - servicegroup: foo - host: '{{ inventory_hostname }}' - -- name: Delete all downtime for a given host - community.general.nagios: - action: delete_downtime - host: '{{ inventory_hostname }}' - service: all - -- name: Delete all downtime for HOST with a particular comment - community.general.nagios: - action: delete_downtime - host: '{{ inventory_hostname }}' - service: host - comment: Planned maintenance - -- name: Acknowledge an HOST with a particular comment - community.general.nagios: - action: acknowledge - service: host - host: '{{ inventory_hostname }}' - comment: 'power outage - see casenr 12345' - -- name: Acknowledge an active service problem for the httpd service with a particular comment - community.general.nagios: - action: acknowledge - service: httpd - host: '{{ inventory_hostname }}' - comment: 'service crashed - see casenr 12345' - -- name: Reset a passive service check for snmp trap - community.general.nagios: - action: forced_check - service: snmp - host: '{{ inventory_hostname }}' - -- name: Force an active service check for the httpd service - community.general.nagios: - action: forced_check - service: httpd - host: '{{ inventory_hostname }}' - -- name: Force an active service check for all services of a particular host - community.general.nagios: - action: forced_check - service: all - host: '{{ inventory_hostname }}' - -- name: Force an active service check for a particular host - community.general.nagios: - action: forced_check - service: host - host: '{{ inventory_hostname }}' - -- name: Enable SMART disk alerts - community.general.nagios: - action: enable_alerts - service: smart - host: '{{ inventory_hostname }}' - -- name: Disable httpd and nfs alerts - community.general.nagios: - action: disable_alerts - service: httpd,nfs - host: '{{ inventory_hostname }}' - -- name: Disable HOST alerts - community.general.nagios: - action: disable_alerts - service: host - host: '{{ inventory_hostname }}' - -- name: Silence ALL alerts - community.general.nagios: - action: silence - host: '{{ inventory_hostname }}' - -- name: Unsilence all alerts - community.general.nagios: - action: unsilence - host: '{{ inventory_hostname }}' - -- name: Shut up nagios - community.general.nagios: - action: silence_nagios - -- name: Annoy me negios - community.general.nagios: - action: unsilence_nagios - -- name: Command something - community.general.nagios: - action: command - command: DISABLE_FAILURE_PREDICTION -''' - -import time -import os.path -import stat - -from ansible.module_utils.basic import AnsibleModule - - -###################################################################### - -def which_cmdfile(): - locations = [ - # rhel - '/etc/nagios/nagios.cfg', - # debian - '/etc/nagios3/nagios.cfg', - # older debian - '/etc/nagios2/nagios.cfg', - # bsd, solaris - '/usr/local/etc/nagios/nagios.cfg', - # groundwork it monitoring - '/usr/local/groundwork/nagios/etc/nagios.cfg', - # open monitoring distribution - '/omd/sites/oppy/tmp/nagios/nagios.cfg', - # ??? - '/usr/local/nagios/etc/nagios.cfg', - '/usr/local/nagios/nagios.cfg', - '/opt/nagios/etc/nagios.cfg', - '/opt/nagios/nagios.cfg', - # icinga on debian/ubuntu - '/etc/icinga/icinga.cfg', - # icinga installed from source (default location) - '/usr/local/icinga/etc/icinga.cfg', - ] - - for path in locations: - if os.path.exists(path): - for line in open(path): - if line.startswith('command_file'): - return line.split('=')[1].strip() - - return None - -###################################################################### - - -def main(): - ACTION_CHOICES = [ - 'downtime', - 'delete_downtime', - 'silence', - 'unsilence', - 'enable_alerts', - 'disable_alerts', - 'silence_nagios', - 'unsilence_nagios', - 'command', - 'servicegroup_host_downtime', - 'servicegroup_service_downtime', - 'acknowledge', - 'forced_check', - ] - - module = AnsibleModule( - argument_spec=dict( - action=dict(required=True, choices=ACTION_CHOICES), - author=dict(default='Ansible'), - comment=dict(default='Scheduling downtime'), - host=dict(required=False, default=None), - servicegroup=dict(required=False, default=None), - start=dict(required=False, default=None), - minutes=dict(default=30, type='int'), - cmdfile=dict(default=which_cmdfile()), - services=dict(default=None, aliases=['service']), - command=dict(required=False, default=None), - ) - ) - - action = module.params['action'] - host = module.params['host'] - servicegroup = module.params['servicegroup'] - start = module.params['start'] - services = module.params['services'] - cmdfile = module.params['cmdfile'] - command = module.params['command'] - - ################################################################## - # Required args per action: - # downtime = (minutes, service, host) - # acknowledge = (service, host) - # (un)silence = (host) - # (enable/disable)_alerts = (service, host) - # command = command - # - # AnsibleModule will verify most stuff, we need to verify - # 'service' manually. - - ################################################################## - if action not in ['command', 'silence_nagios', 'unsilence_nagios']: - if not host: - module.fail_json(msg='no host specified for action requiring one') - ###################################################################### - if action == 'downtime': - # Make sure there's an actual service selected - if not services: - module.fail_json(msg='no service selected to set downtime for') - - ###################################################################### - if action == 'delete_downtime': - # Make sure there's an actual service selected - if not services: - module.fail_json(msg='no service selected to set downtime for') - - ###################################################################### - - if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']: - # Make sure there's an actual servicegroup selected - if not servicegroup: - module.fail_json(msg='no servicegroup selected to set downtime for') - - ################################################################## - if action in ['enable_alerts', 'disable_alerts']: - if not services: - module.fail_json(msg='a service is required when setting alerts') - - if action in ['command']: - if not command: - module.fail_json(msg='no command passed for command action') - ###################################################################### - if action == 'acknowledge': - # Make sure there's an actual service selected - if not services: - module.fail_json(msg='no service selected to acknowledge') - - ################################################################## - if action == 'forced_check': - # Make sure there's an actual service selected - if not services: - module.fail_json(msg='no service selected to check') - - ################################################################## - if not cmdfile: - module.fail_json(msg='unable to locate nagios.cfg') - - ################################################################## - ansible_nagios = Nagios(module, **module.params) - if module.check_mode: - module.exit_json(changed=True) - else: - ansible_nagios.act() - ################################################################## - - -###################################################################### -class Nagios(object): - """ - Perform common tasks in Nagios related to downtime and - notifications. - - The complete set of external commands Nagios handles is documented - on their website: - - http://old.nagios.org/developerinfo/externalcommands/commandlist.php - - Note that in the case of `schedule_svc_downtime`, - `enable_svc_notifications`, and `disable_svc_notifications`, the - service argument should be passed as a list. - """ - - def __init__(self, module, **kwargs): - self.module = module - self.action = kwargs['action'] - self.author = kwargs['author'] - self.comment = kwargs['comment'] - self.host = kwargs['host'] - self.servicegroup = kwargs['servicegroup'] - if kwargs['start'] is not None: - self.start = int(kwargs['start']) - else: - self.start = None - self.minutes = kwargs['minutes'] - self.cmdfile = kwargs['cmdfile'] - self.command = kwargs['command'] - - if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'): - self.services = kwargs['services'] - else: - self.services = kwargs['services'].split(',') - - self.command_results = [] - - def _now(self): - """ - The time in seconds since 12:00:00AM Jan 1, 1970 - """ - - return int(time.time()) - - def _write_command(self, cmd): - """ - Write the given command to the Nagios command file - """ - - if not os.path.exists(self.cmdfile): - self.module.fail_json(msg='nagios command file does not exist', - cmdfile=self.cmdfile) - if not stat.S_ISFIFO(os.stat(self.cmdfile).st_mode): - self.module.fail_json(msg='nagios command file is not a fifo file', - cmdfile=self.cmdfile) - try: - fp = open(self.cmdfile, 'w') - fp.write(cmd) - fp.flush() - fp.close() - self.command_results.append(cmd.strip()) - except IOError: - self.module.fail_json(msg='unable to write to nagios command file', - cmdfile=self.cmdfile) - - def _fmt_dt_str(self, cmd, host, duration, author=None, - comment=None, start=None, - svc=None, fixed=1, trigger=0): - """ - Format an external-command downtime string. - - cmd - Nagios command ID - host - Host schedule downtime on - duration - Minutes to schedule downtime for - author - Name to file the downtime as - comment - Reason for running this command (upgrade, reboot, etc) - start - Start of downtime in seconds since 12:00AM Jan 1 1970 - Default is to use the entry time (now) - svc - Service to schedule downtime for, omit when for host downtime - fixed - Start now if 1, start when a problem is detected if 0 - trigger - Optional ID of event to start downtime from. Leave as 0 for - fixed downtime. - - Syntax: [submitted] COMMAND;;[] - ;;;;;; - - """ - - entry_time = self._now() - if start is None: - start = entry_time - - hdr = "[%s] %s;%s;" % (entry_time, cmd, host) - duration_s = (duration * 60) - end = start + duration_s - - if not author: - author = self.author - - if not comment: - comment = self.comment - - if svc is not None: - dt_args = [svc, str(start), str(end), str(fixed), str(trigger), - str(duration_s), author, comment] - else: - # Downtime for a host if no svc specified - dt_args = [str(start), str(end), str(fixed), str(trigger), - str(duration_s), author, comment] - - dt_arg_str = ";".join(dt_args) - dt_str = hdr + dt_arg_str + "\n" - - return dt_str - - def _fmt_ack_str(self, cmd, host, author=None, - comment=None, svc=None, sticky=0, notify=1, persistent=0): - """ - Format an external-command acknowledge string. - - cmd - Nagios command ID - host - Host schedule downtime on - author - Name to file the downtime as - comment - Reason for running this command (upgrade, reboot, etc) - svc - Service to schedule downtime for, omit when for host downtime - sticky - the acknowledgement will remain until the host returns to an UP state if set to 1 - notify - a notification will be sent out to contacts - persistent - survive across restarts of the Nagios process - - Syntax: [submitted] COMMAND;;[] - ;;;; - """ - - entry_time = self._now() - hdr = "[%s] %s;%s;" % (entry_time, cmd, host) - - if not author: - author = self.author - - if not comment: - comment = self.comment - - if svc is not None: - ack_args = [svc, str(sticky), str(notify), str(persistent), author, comment] - else: - # Downtime for a host if no svc specified - ack_args = [str(sticky), str(notify), str(persistent), author, comment] - - ack_arg_str = ";".join(ack_args) - ack_str = hdr + ack_arg_str + "\n" - - return ack_str - - def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None): - """ - Format an external-command downtime deletion string. - - cmd - Nagios command ID - host - Host to remove scheduled downtime from - comment - Reason downtime was added (upgrade, reboot, etc) - start - Start of downtime in seconds since 12:00AM Jan 1 1970 - svc - Service to remove downtime for, omit to remove all downtime for the host - - Syntax: [submitted] COMMAND;; - [];[];[] - """ - - entry_time = self._now() - hdr = "[%s] %s;%s;" % (entry_time, cmd, host) - - if comment is None: - comment = self.comment - - dt_del_args = [] - if svc is not None: - dt_del_args.append(svc) - else: - dt_del_args.append('') - - if start is not None: - dt_del_args.append(str(start)) - else: - dt_del_args.append('') - - if comment is not None: - dt_del_args.append(comment) - else: - dt_del_args.append('') - - dt_del_arg_str = ";".join(dt_del_args) - dt_del_str = hdr + dt_del_arg_str + "\n" - - return dt_del_str - - def _fmt_chk_str(self, cmd, host, svc=None, start=None): - """ - Format an external-command forced host or service check string. - - cmd - Nagios command ID - host - Host to check service from - svc - Service to check - start - check time - - Syntax: [submitted] COMMAND;;[]; - """ - - entry_time = self._now() - hdr = "[%s] %s;%s;" % (entry_time, cmd, host) - - if start is None: - start = entry_time + 3 - - if svc is None: - chk_args = [str(start)] - else: - chk_args = [svc, str(start)] - - chk_arg_str = ";".join(chk_args) - chk_str = hdr + chk_arg_str + "\n" - - return chk_str - - def _fmt_notif_str(self, cmd, host=None, svc=None): - """ - Format an external-command notification string. - - cmd - Nagios command ID. - host - Host to en/disable notifications on.. A value is not required - for global downtime - svc - Service to schedule downtime for. A value is not required - for host downtime. - - Syntax: [submitted] COMMAND;[;] - """ - - entry_time = self._now() - notif_str = "[%s] %s" % (entry_time, cmd) - if host is not None: - notif_str += ";%s" % host - - if svc is not None: - notif_str += ";%s" % svc - - notif_str += "\n" - - return notif_str - - def schedule_svc_downtime(self, host, services=None, minutes=30, start=None): - """ - This command is used to schedule downtime for a particular - service. - - During the specified downtime, Nagios will not send - notifications out about the service. - - Syntax: SCHEDULE_SVC_DOWNTIME;; - ;;;;;; - - """ - - cmd = "SCHEDULE_SVC_DOWNTIME" - - if services is None: - services = [] - - for service in services: - dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start, svc=service) - self._write_command(dt_cmd_str) - - def schedule_host_downtime(self, host, minutes=30, start=None): - """ - This command is used to schedule downtime for a particular - host. - - During the specified downtime, Nagios will not send - notifications out about the host. - - Syntax: SCHEDULE_HOST_DOWNTIME;;;; - ;;;; - """ - - cmd = "SCHEDULE_HOST_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start) - self._write_command(dt_cmd_str) - - def acknowledge_svc_problem(self, host, services=None): - """ - This command is used to acknowledge a particular - service problem. - - By acknowledging the current problem, future notifications - for the same servicestate are disabled - - Syntax: ACKNOWLEDGE_SVC_PROBLEM;;; - ;;;; - """ - - cmd = "ACKNOWLEDGE_SVC_PROBLEM" - - if services is None: - services = [] - - for service in services: - ack_cmd_str = self._fmt_ack_str(cmd, host, svc=service) - self._write_command(ack_cmd_str) - - def acknowledge_host_problem(self, host): - """ - This command is used to acknowledge a particular - host problem. - - By acknowledging the current problem, future notifications - for the same servicestate are disabled - - Syntax: ACKNOWLEDGE_HOST_PROBLEM;;;; - ;; - """ - - cmd = "ACKNOWLEDGE_HOST_PROBLEM" - ack_cmd_str = self._fmt_ack_str(cmd, host) - self._write_command(ack_cmd_str) - - def schedule_forced_host_check(self, host): - """ - This command schedules a forced active check for a particular host. - - Syntax: SCHEDULE_FORCED_HOST_CHECK;; - """ - - cmd = "SCHEDULE_FORCED_HOST_CHECK" - - chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None) - self._write_command(chk_cmd_str) - - def schedule_forced_host_svc_check(self, host): - """ - This command schedules a forced active check for all services - associated with a particular host. - - Syntax: SCHEDULE_FORCED_HOST_SVC_CHECKS;; - """ - - cmd = "SCHEDULE_FORCED_HOST_SVC_CHECKS" - - chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None) - self._write_command(chk_cmd_str) - - def schedule_forced_svc_check(self, host, services=None): - """ - This command schedules a forced active check for a particular - service. - - Syntax: SCHEDULE_FORCED_SVC_CHECK;;; - """ - - cmd = "SCHEDULE_FORCED_SVC_CHECK" - - if services is None: - services = [] - - for service in services: - chk_cmd_str = self._fmt_chk_str(cmd, host, svc=service) - self._write_command(chk_cmd_str) - - def schedule_host_svc_downtime(self, host, minutes=30, start=None): - """ - This command is used to schedule downtime for - all services associated with a particular host. - - During the specified downtime, Nagios will not send - notifications out about the host. - - SCHEDULE_HOST_SVC_DOWNTIME;;;; - ;;;; - """ - - cmd = "SCHEDULE_HOST_SVC_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start) - self._write_command(dt_cmd_str) - - def delete_host_downtime(self, host, services=None, comment=None): - """ - This command is used to remove scheduled downtime for a particular - host. - - Syntax: DEL_DOWNTIME_BY_HOST_NAME;; - [];[];[] - """ - - cmd = "DEL_DOWNTIME_BY_HOST_NAME" - - if services is None: - dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment) - self._write_command(dt_del_cmd_str) - else: - for service in services: - dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment) - self._write_command(dt_del_cmd_str) - - def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30, start=None): - """ - This command is used to schedule downtime for all hosts in a - particular hostgroup. - - During the specified downtime, Nagios will not send - notifications out about the hosts. - - Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;;; - ;;;;; - """ - - cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start) - self._write_command(dt_cmd_str) - - def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30, start=None): - """ - This command is used to schedule downtime for all services in - a particular hostgroup. - - During the specified downtime, Nagios will not send - notifications out about the services. - - Note that scheduling downtime for services does not - automatically schedule downtime for the hosts those services - are associated with. - - Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;;; - ;;;;; - """ - - cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start) - self._write_command(dt_cmd_str) - - def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30, start=None): - """ - This command is used to schedule downtime for all hosts in a - particular servicegroup. - - During the specified downtime, Nagios will not send - notifications out about the hosts. - - Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;; - ;;;;;; - - """ - - cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start) - self._write_command(dt_cmd_str) - - def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30, start=None): - """ - This command is used to schedule downtime for all services in - a particular servicegroup. - - During the specified downtime, Nagios will not send - notifications out about the services. - - Note that scheduling downtime for services does not - automatically schedule downtime for the hosts those services - are associated with. - - Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;; - ;;;;;; - - """ - - cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start) - self._write_command(dt_cmd_str) - - def disable_host_svc_notifications(self, host): - """ - This command is used to prevent notifications from being sent - out for all services on the specified host. - - Note that this command does not disable notifications from - being sent out about the host. - - Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOST_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - self._write_command(notif_str) - - def disable_host_notifications(self, host): - """ - This command is used to prevent notifications from being sent - out for the specified host. - - Note that this command does not disable notifications for - services associated with this host. - - Syntax: DISABLE_HOST_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - self._write_command(notif_str) - - def disable_svc_notifications(self, host, services=None): - """ - This command is used to prevent notifications from being sent - out for the specified service. - - Note that this command does not disable notifications from - being sent out about the host. - - Syntax: DISABLE_SVC_NOTIFICATIONS;; - """ - - cmd = "DISABLE_SVC_NOTIFICATIONS" - - if services is None: - services = [] - - for service in services: - notif_str = self._fmt_notif_str(cmd, host, svc=service) - self._write_command(notif_str) - - def disable_servicegroup_host_notifications(self, servicegroup): - """ - This command is used to prevent notifications from being sent - out for all hosts in the specified servicegroup. - - Note that this command does not disable notifications for - services associated with hosts in this service group. - - Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - self._write_command(notif_str) - - def disable_servicegroup_svc_notifications(self, servicegroup): - """ - This command is used to prevent notifications from being sent - out for all services in the specified servicegroup. - - Note that this does not prevent notifications from being sent - out about the hosts in this servicegroup. - - Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - self._write_command(notif_str) - - def disable_hostgroup_host_notifications(self, hostgroup): - """ - Disables notifications for all hosts in a particular - hostgroup. - - Note that this does not disable notifications for the services - associated with the hosts in the hostgroup - see the - DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that. - - Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - self._write_command(notif_str) - - def disable_hostgroup_svc_notifications(self, hostgroup): - """ - Disables notifications for all services associated with hosts - in a particular hostgroup. - - Note that this does not disable notifications for the hosts in - the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS - command for that. - - Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - self._write_command(notif_str) - - def enable_host_notifications(self, host): - """ - Enables notifications for a particular host. - - Note that this command does not enable notifications for - services associated with this host. - - Syntax: ENABLE_HOST_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - self._write_command(notif_str) - - def enable_host_svc_notifications(self, host): - """ - Enables notifications for all services on the specified host. - - Note that this does not enable notifications for the host. - - Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOST_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_svc_notifications(self, host, services=None): - """ - Enables notifications for a particular service. - - Note that this does not enable notifications for the host. - - Syntax: ENABLE_SVC_NOTIFICATIONS;; - """ - - cmd = "ENABLE_SVC_NOTIFICATIONS" - - if services is None: - services = [] - - nagios_return = True - return_str_list = [] - for service in services: - notif_str = self._fmt_notif_str(cmd, host, svc=service) - nagios_return = self._write_command(notif_str) and nagios_return - return_str_list.append(notif_str) - - if nagios_return: - return return_str_list - else: - return "Fail: could not write to the command file" - - def enable_hostgroup_host_notifications(self, hostgroup): - """ - Enables notifications for all hosts in a particular hostgroup. - - Note that this command does not enable notifications for - services associated with the hosts in this hostgroup. - - Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_hostgroup_svc_notifications(self, hostgroup): - """ - Enables notifications for all services that are associated - with hosts in a particular hostgroup. - - Note that this does not enable notifications for the hosts in - this hostgroup. - - Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_servicegroup_host_notifications(self, servicegroup): - """ - Enables notifications for all hosts that have services that - are members of a particular servicegroup. - - Note that this command does not enable notifications for - services associated with the hosts in this servicegroup. - - Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_servicegroup_svc_notifications(self, servicegroup): - """ - Enables notifications for all services that are members of a - particular servicegroup. - - Note that this does not enable notifications for the hosts in - this servicegroup. - - Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def silence_host(self, host): - """ - This command is used to prevent notifications from being sent - out for the host and all services on the specified host. - - This is equivalent to calling disable_host_svc_notifications - and disable_host_notifications. - - Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; - Syntax: DISABLE_HOST_NOTIFICATIONS; - """ - - cmd = [ - "DISABLE_HOST_SVC_NOTIFICATIONS", - "DISABLE_HOST_NOTIFICATIONS" - ] - nagios_return = True - return_str_list = [] - for c in cmd: - notif_str = self._fmt_notif_str(c, host) - nagios_return = self._write_command(notif_str) and nagios_return - return_str_list.append(notif_str) - - if nagios_return: - return return_str_list - else: - return "Fail: could not write to the command file" - - def unsilence_host(self, host): - """ - This command is used to enable notifications for the host and - all services on the specified host. - - This is equivalent to calling enable_host_svc_notifications - and enable_host_notifications. - - Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; - Syntax: ENABLE_HOST_NOTIFICATIONS; - """ - - cmd = [ - "ENABLE_HOST_SVC_NOTIFICATIONS", - "ENABLE_HOST_NOTIFICATIONS" - ] - nagios_return = True - return_str_list = [] - for c in cmd: - notif_str = self._fmt_notif_str(c, host) - nagios_return = self._write_command(notif_str) and nagios_return - return_str_list.append(notif_str) - - if nagios_return: - return return_str_list - else: - return "Fail: could not write to the command file" - - def silence_nagios(self): - """ - This command is used to disable notifications for all hosts and services - in nagios. - - This is a 'SHUT UP, NAGIOS' command - """ - cmd = 'DISABLE_NOTIFICATIONS' - self._write_command(self._fmt_notif_str(cmd)) - - def unsilence_nagios(self): - """ - This command is used to enable notifications for all hosts and services - in nagios. - - This is a 'OK, NAGIOS, GO'' command - """ - cmd = 'ENABLE_NOTIFICATIONS' - self._write_command(self._fmt_notif_str(cmd)) - - def nagios_cmd(self, cmd): - """ - This sends an arbitrary command to nagios - - It prepends the submitted time and appends a \n - - You just have to provide the properly formatted command - """ - - pre = '[%s]' % int(time.time()) - - post = '\n' - cmdstr = '%s %s%s' % (pre, cmd, post) - self._write_command(cmdstr) - - def act(self): - """ - Figure out what you want to do from ansible, and then do the - needful (at the earliest). - """ - # host or service downtime? - if self.action == 'downtime': - if self.services == 'host': - self.schedule_host_downtime(self.host, minutes=self.minutes, - start=self.start) - elif self.services == 'all': - self.schedule_host_svc_downtime(self.host, minutes=self.minutes, - start=self.start) - else: - self.schedule_svc_downtime(self.host, - services=self.services, - minutes=self.minutes, - start=self.start) - - elif self.action == 'acknowledge': - if self.services == 'host': - self.acknowledge_host_problem(self.host) - else: - self.acknowledge_svc_problem(self.host, services=self.services) - - elif self.action == 'delete_downtime': - if self.services == 'host': - self.delete_host_downtime(self.host) - elif self.services == 'all': - self.delete_host_downtime(self.host, comment='') - else: - self.delete_host_downtime(self.host, services=self.services) - - elif self.action == 'forced_check': - if self.services == 'host': - self.schedule_forced_host_check(self.host) - elif self.services == 'all': - self.schedule_forced_host_svc_check(self.host) - else: - self.schedule_forced_svc_check(self.host, services=self.services) - - elif self.action == "servicegroup_host_downtime": - if self.servicegroup: - self.schedule_servicegroup_host_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start) - elif self.action == "servicegroup_service_downtime": - if self.servicegroup: - self.schedule_servicegroup_svc_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start) - - # toggle the host AND service alerts - elif self.action == 'silence': - self.silence_host(self.host) - - elif self.action == 'unsilence': - self.unsilence_host(self.host) - - # toggle host/svc alerts - elif self.action == 'enable_alerts': - if self.services == 'host': - self.enable_host_notifications(self.host) - elif self.services == 'all': - self.enable_host_svc_notifications(self.host) - else: - self.enable_svc_notifications(self.host, - services=self.services) - - elif self.action == 'disable_alerts': - if self.services == 'host': - self.disable_host_notifications(self.host) - elif self.services == 'all': - self.disable_host_svc_notifications(self.host) - else: - self.disable_svc_notifications(self.host, - services=self.services) - elif self.action == 'silence_nagios': - self.silence_nagios() - - elif self.action == 'unsilence_nagios': - self.unsilence_nagios() - - elif self.action == 'command': - self.nagios_cmd(self.command) - - # wtf? - else: - self.module.fail_json(msg="unknown action specified: '%s'" % - self.action) - - self.module.exit_json(nagios_commands=self.command_results, - changed=True) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/newrelic_deployment.py b/plugins/modules/monitoring/newrelic_deployment.py deleted file mode 100644 index af953e0a75..0000000000 --- a/plugins/modules/monitoring/newrelic_deployment.py +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Matt Coddington -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: newrelic_deployment -author: "Matt Coddington (@mcodd)" -short_description: Notify newrelic about app deployments -description: - - Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api) -options: - token: - type: str - description: - - API token, to place in the x-api-key header. - required: true - app_name: - type: str - description: - - (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application - required: false - application_id: - type: str - description: - - (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM - required: false - changelog: - type: str - description: - - A list of changes for this deployment - required: false - description: - type: str - description: - - Text annotation for the deployment - notes for you - required: false - revision: - type: str - description: - - A revision number (e.g., git commit SHA) - required: false - user: - type: str - description: - - The name of the user/process that triggered this deployment - required: false - appname: - type: str - description: - - Name of the application - required: false - environment: - type: str - description: - - The environment for this deployment - required: false - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - type: bool - -requirements: [] -''' - -EXAMPLES = ''' -- name: Notify newrelic about an app deployment - community.general.newrelic_deployment: - token: AAAAAA - app_name: myapp - user: ansible deployment - revision: '1.0' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode - -# =========================================== -# Module execution. -# - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - app_name=dict(required=False), - application_id=dict(required=False), - changelog=dict(required=False), - description=dict(required=False), - revision=dict(required=False), - user=dict(required=False), - appname=dict(required=False), - environment=dict(required=False), - validate_certs=dict(default=True, type='bool'), - ), - required_one_of=[['app_name', 'application_id']], - supports_check_mode=True - ) - - # build list of params - params = {} - if module.params["app_name"] and module.params["application_id"]: - module.fail_json(msg="only one of 'app_name' or 'application_id' can be set") - - if module.params["app_name"]: - params["app_name"] = module.params["app_name"] - elif module.params["application_id"]: - params["application_id"] = module.params["application_id"] - else: - module.fail_json(msg="you must set one of 'app_name' or 'application_id'") - - for item in ["changelog", "description", "revision", "user", "appname", "environment"]: - if module.params[item]: - params[item] = module.params[item] - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True) - - # Send the data to NewRelic - url = "https://rpm.newrelic.com/deployments.xml" - data = urlencode(params) - headers = { - 'x-api-key': module.params["token"], - } - response, info = fetch_url(module, url, data=data, headers=headers) - if info['status'] in (200, 201): - module.exit_json(changed=True) - else: - module.fail_json(msg="unable to update newrelic: %s" % info['msg']) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/pagerduty.py b/plugins/modules/monitoring/pagerduty.py deleted file mode 100644 index dba931ab96..0000000000 --- a/plugins/modules/monitoring/pagerduty.py +++ /dev/null @@ -1,280 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: pagerduty -short_description: Create PagerDuty maintenance windows -description: - - This module will let you create PagerDuty maintenance windows -author: - - "Andrew Newdigate (@suprememoocow)" - - "Dylan Silva (@thaumos)" - - "Justin Johns (!UNKNOWN)" - - "Bruce Pennypacker (@bpennypacker)" -requirements: - - PagerDuty API access -options: - state: - type: str - description: - - Create a maintenance window or get a list of ongoing windows. - required: true - choices: [ "running", "started", "ongoing", "absent" ] - name: - type: str - description: - - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. - user: - type: str - description: - - PagerDuty user ID. Obsolete. Please, use I(token) for authorization. - token: - type: str - description: - - A pagerduty token, generated on the pagerduty site. It is used for authorization. - required: true - requester_id: - type: str - description: - - ID of user making the request. Only needed when creating a maintenance_window. - service: - type: list - elements: str - description: - - A comma separated list of PagerDuty service IDs. - aliases: [ services ] - window_id: - type: str - description: - - ID of maintenance window. Only needed when absent a maintenance_window. - hours: - type: str - description: - - Length of maintenance window in hours. - default: '1' - minutes: - type: str - description: - - Maintenance window in minutes (this is added to the hours). - default: '0' - desc: - type: str - description: - - Short description of maintenance window. - default: Created by Ansible - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' -''' - -EXAMPLES = ''' -- name: List ongoing maintenance windows using a token - community.general.pagerduty: - name: companyabc - token: xxxxxxxxxxxxxx - state: ongoing - -- name: Create a 1 hour maintenance window for service FOO123 - community.general.pagerduty: - name: companyabc - user: example@example.com - token: yourtoken - state: running - service: FOO123 - -- name: Create a 5 minute maintenance window for service FOO123 - community.general.pagerduty: - name: companyabc - token: xxxxxxxxxxxxxx - hours: 0 - minutes: 5 - state: running - service: FOO123 - - -- name: Create a 4 hour maintenance window for service FOO123 with the description "deployment" - community.general.pagerduty: - name: companyabc - user: example@example.com - state: running - service: FOO123 - hours: 4 - desc: deployment - register: pd_window - -- name: Delete the previous maintenance window - community.general.pagerduty: - name: companyabc - user: example@example.com - state: absent - window_id: '{{ pd_window.result.maintenance_window.id }}' - -# Delete a maintenance window from a separate playbook than its creation, -# and if it is the only existing maintenance window -- name: Check - community.general.pagerduty: - requester_id: XXXXXXX - token: yourtoken - state: ongoing - register: pd_window - -- name: Delete - community.general.pagerduty: - requester_id: XXXXXXX - token: yourtoken - state: absent - window_id: "{{ pd_window.result.maintenance_windows[0].id }}" -''' - -import datetime -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -class PagerDutyRequest(object): - def __init__(self, module, name, user, token): - self.module = module - self.name = name - self.user = user - self.token = token - self.headers = { - 'Content-Type': 'application/json', - "Authorization": self._auth_header(), - 'Accept': 'application/vnd.pagerduty+json;version=2' - } - - def ongoing(self, http_call=fetch_url): - url = "https://api.pagerduty.com/maintenance_windows?filter=ongoing" - headers = dict(self.headers) - - response, info = http_call(self.module, url, headers=headers) - if info['status'] != 200: - self.module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg']) - - json_out = self._read_response(response) - - return False, json_out, False - - def create(self, requester_id, service, hours, minutes, desc, http_call=fetch_url): - if not requester_id: - self.module.fail_json(msg="requester_id is required when maintenance window should be created") - - url = 'https://api.pagerduty.com/maintenance_windows' - - headers = dict(self.headers) - headers.update({'From': requester_id}) - - start, end = self._compute_start_end_time(hours, minutes) - services = self._create_services_payload(service) - - request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'services': services}} - - data = json.dumps(request_data) - response, info = http_call(self.module, url, data=data, headers=headers, method='POST') - if info['status'] != 201: - self.module.fail_json(msg="failed to create the window: %s" % info['msg']) - - json_out = self._read_response(response) - - return False, json_out, True - - def _create_services_payload(self, service): - if (isinstance(service, list)): - return [{'id': s, 'type': 'service_reference'} for s in service] - else: - return [{'id': service, 'type': 'service_reference'}] - - def _compute_start_end_time(self, hours, minutes): - now = datetime.datetime.utcnow() - later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes)) - start = now.strftime("%Y-%m-%dT%H:%M:%SZ") - end = later.strftime("%Y-%m-%dT%H:%M:%SZ") - return start, end - - def absent(self, window_id, http_call=fetch_url): - url = "https://api.pagerduty.com/maintenance_windows/" + window_id - headers = dict(self.headers) - - response, info = http_call(self.module, url, headers=headers, method='DELETE') - if info['status'] != 204: - self.module.fail_json(msg="failed to delete the window: %s" % info['msg']) - - json_out = self._read_response(response) - - return False, json_out, True - - def _auth_header(self): - return "Token token=%s" % self.token - - def _read_response(self, response): - try: - return json.loads(response.read()) - except Exception: - return "" - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']), - name=dict(required=False), - user=dict(required=False), - token=dict(required=True, no_log=True), - service=dict(required=False, type='list', elements='str', aliases=["services"]), - window_id=dict(required=False), - requester_id=dict(required=False), - hours=dict(default='1', required=False), # @TODO change to int? - minutes=dict(default='0', required=False), # @TODO change to int? - desc=dict(default='Created by Ansible', required=False), - validate_certs=dict(default=True, type='bool'), - ) - ) - - state = module.params['state'] - name = module.params['name'] - user = module.params['user'] - service = module.params['service'] - window_id = module.params['window_id'] - hours = module.params['hours'] - minutes = module.params['minutes'] - token = module.params['token'] - desc = module.params['desc'] - requester_id = module.params['requester_id'] - - pd = PagerDutyRequest(module, name, user, token) - - if state == "running" or state == "started": - if not service: - module.fail_json(msg="service not specified") - (rc, out, changed) = pd.create(requester_id, service, hours, minutes, desc) - if rc == 0: - changed = True - - if state == "ongoing": - (rc, out, changed) = pd.ongoing() - - if state == "absent": - (rc, out, changed) = pd.absent(window_id) - - if rc != 0: - module.fail_json(msg="failed", result=out) - - module.exit_json(msg="success", result=out, changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/pagerduty_alert.py b/plugins/modules/monitoring/pagerduty_alert.py deleted file mode 100644 index 58a1f260fb..0000000000 --- a/plugins/modules/monitoring/pagerduty_alert.py +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: pagerduty_alert -short_description: Trigger, acknowledge or resolve PagerDuty incidents -description: - - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events -author: - - "Amanpreet Singh (@ApsOps)" -requirements: - - PagerDuty API access -options: - name: - type: str - description: - - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. - service_id: - type: str - description: - - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved. - required: true - service_key: - type: str - description: - - The GUID of one of your "Generic API" services. Obsolete. Please use I(integration_key). - integration_key: - type: str - description: - - The GUID of one of your "Generic API" services. - - This is the "integration key" listed on a "Integrations" tab of PagerDuty service. - state: - type: str - description: - - Type of event to be sent. - required: true - choices: - - 'triggered' - - 'acknowledged' - - 'resolved' - api_key: - type: str - description: - - The pagerduty API key (readonly access), generated on the pagerduty site. - required: true - desc: - type: str - description: - - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) - will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. - The maximum length is 1024 characters. - - For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event. - required: false - default: Created via Ansible - incident_key: - type: str - description: - - Identifies the incident to which this I(state) should be applied. - - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an - open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup" - problem reports. - - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a - trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded. - required: false - client: - type: str - description: - - The name of the monitoring client that is triggering this event. - required: false - client_url: - type: str - description: - - The URL of the monitoring client that is triggering this event. - required: false -''' - -EXAMPLES = ''' -- name: Trigger an incident with just the basic options - community.general.pagerduty_alert: - name: companyabc - integration_key: xxx - api_key: yourapikey - service_id: PDservice - state: triggered - desc: problem that led to this trigger - -- name: Trigger an incident with more options - community.general.pagerduty_alert: - integration_key: xxx - api_key: yourapikey - service_id: PDservice - state: triggered - desc: problem that led to this trigger - incident_key: somekey - client: Sample Monitoring Service - client_url: http://service.example.com - -- name: Acknowledge an incident based on incident_key - community.general.pagerduty_alert: - integration_key: xxx - api_key: yourapikey - service_id: PDservice - state: acknowledged - incident_key: somekey - desc: "some text for incident's log" - -- name: Resolve an incident based on incident_key - community.general.pagerduty_alert: - integration_key: xxx - api_key: yourapikey - service_id: PDservice - state: resolved - incident_key: somekey - desc: "some text for incident's log" -''' -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode, urlunparse - - -def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url): - url = 'https://api.pagerduty.com/incidents' - headers = { - "Content-type": "application/json", - "Authorization": "Token token=%s" % api_key, - 'Accept': 'application/vnd.pagerduty+json;version=2' - } - - params = { - 'service_ids[]': service_id, - 'sort_by': 'incident_number:desc', - 'time_zone': 'UTC' - } - if incident_key: - params['incident_key'] = incident_key - - url_parts = list(urlparse(url)) - url_parts[4] = urlencode(params, True) - - url = urlunparse(url_parts) - - response, info = http_call(module, url, method='get', headers=headers) - - if info['status'] != 200: - module.fail_json(msg="failed to check current incident status." - "Reason: %s" % info['msg']) - - incidents = json.loads(response.read())["incidents"] - msg = "No corresponding incident" - - if len(incidents) == 0: - if state in ('acknowledged', 'resolved'): - return msg, False - return msg, True - elif state != incidents[0]["status"]: - return incidents[0], True - - return incidents[0], False - - -def send_event(module, service_key, event_type, desc, - incident_key=None, client=None, client_url=None): - url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" - headers = { - "Content-type": "application/json" - } - - data = { - "service_key": service_key, - "event_type": event_type, - "incident_key": incident_key, - "description": desc, - "client": client, - "client_url": client_url - } - - response, info = fetch_url(module, url, method='post', - headers=headers, data=json.dumps(data)) - if info['status'] != 200: - module.fail_json(msg="failed to %s. Reason: %s" % - (event_type, info['msg'])) - json_out = json.loads(response.read()) - return json_out - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=False), - service_id=dict(required=True), - service_key=dict(required=False, no_log=True), - integration_key=dict(required=False, no_log=True), - api_key=dict(required=True, no_log=True), - state=dict(required=True, - choices=['triggered', 'acknowledged', 'resolved']), - client=dict(required=False, default=None), - client_url=dict(required=False, default=None), - desc=dict(required=False, default='Created via Ansible'), - incident_key=dict(required=False, default=None, no_log=False) - ), - supports_check_mode=True - ) - - name = module.params['name'] - service_id = module.params['service_id'] - integration_key = module.params['integration_key'] - service_key = module.params['service_key'] - api_key = module.params['api_key'] - state = module.params['state'] - client = module.params['client'] - client_url = module.params['client_url'] - desc = module.params['desc'] - incident_key = module.params['incident_key'] - - if integration_key is None: - if service_key is not None: - integration_key = service_key - module.warn('"service_key" is obsolete parameter and will be removed.' - ' Please, use "integration_key" instead') - else: - module.fail_json(msg="'integration_key' is required parameter") - - state_event_dict = { - 'triggered': 'trigger', - 'acknowledged': 'acknowledge', - 'resolved': 'resolve' - } - - event_type = state_event_dict[state] - - if event_type != 'trigger' and incident_key is None: - module.fail_json(msg="incident_key is required for " - "acknowledge or resolve events") - - out, changed = check(module, name, state, service_id, - integration_key, api_key, incident_key) - - if not module.check_mode and changed is True: - out = send_event(module, integration_key, event_type, desc, - incident_key, client, client_url) - - module.exit_json(result=out, changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/pagerduty_change.py b/plugins/modules/monitoring/pagerduty_change.py deleted file mode 100644 index 358a69612e..0000000000 --- a/plugins/modules/monitoring/pagerduty_change.py +++ /dev/null @@ -1,192 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Adam Vaughan (@adamvaughan) avaughan@pagerduty.com -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: pagerduty_change -short_description: Track a code or infrastructure change as a PagerDuty change event -version_added: 1.3.0 -description: - - This module will let you create a PagerDuty change event each time the module is run. - - This is not an idempotent action and a new change event will be created each time it is run. -author: - - Adam Vaughan (@adamvaughan) -requirements: - - PagerDuty integration key -options: - integration_key: - description: - - The integration key that identifies the service the change was made to. - This can be found by adding an integration to a service in PagerDuty. - required: true - type: str - summary: - description: - - A short description of the change that occurred. - required: true - type: str - source: - description: - - The source of the change event. - default: Ansible - type: str - user: - description: - - The name of the user or process that triggered this deployment. - type: str - repo: - description: - - The URL of the project repository. - required: false - type: str - revision: - description: - - An identifier of the revision being deployed, typically a number or SHA from a version control system. - required: false - type: str - environment: - description: - - The environment name, typically C(production), C(staging), etc. - required: false - type: str - link_url: - description: - - A URL where more information about the deployment can be obtained. - required: false - type: str - link_text: - description: - - Descriptive text for a URL where more information about the deployment can be obtained. - required: false - type: str - url: - description: - - URL to submit the change event to. - required: false - default: https://events.pagerduty.com/v2/change/enqueue - type: str - validate_certs: - description: - - If C(no), SSL certificates for the target URL will not be validated. - This should only be used on personally controlled sites using self-signed certificates. - required: false - default: yes - type: bool -notes: - - Supports C(check_mode). Note that check mode simply does nothing except returning C(changed=true) in case the I(url) seems to be correct. -''' - -EXAMPLES = ''' -- name: Track the deployment as a PagerDuty change event - community.general.pagerduty_change: - integration_key: abc123abc123abc123abc123abc123ab - summary: The application was deployed - -- name: Track the deployment as a PagerDuty change event with more details - community.general.pagerduty_change: - integration_key: abc123abc123abc123abc123abc123ab - summary: The application was deployed - source: Ansible Deploy - user: ansible - repo: github.com/ansible/ansible - revision: '4.2' - environment: production - link_url: https://github.com/ansible-collections/community.general/pull/1269 - link_text: View changes on GitHub -''' - -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.basic import AnsibleModule -from datetime import datetime - - -def main(): - module = AnsibleModule( - argument_spec=dict( - integration_key=dict(required=True, type='str', no_log=True), - summary=dict(required=True, type='str'), - source=dict(required=False, default='Ansible', type='str'), - user=dict(required=False, type='str'), - repo=dict(required=False, type='str'), - revision=dict(required=False, type='str'), - environment=dict(required=False, type='str'), - link_url=dict(required=False, type='str'), - link_text=dict(required=False, type='str'), - url=dict(required=False, - default='https://events.pagerduty.com/v2/change/enqueue', type='str'), - validate_certs=dict(default=True, type='bool') - ), - supports_check_mode=True - ) - - # API documented at https://developer.pagerduty.com/docs/events-api-v2/send-change-events/ - - url = module.params['url'] - headers = {'Content-Type': 'application/json'} - - if module.check_mode: - _response, info = fetch_url( - module, url, headers=headers, method='POST') - - if info['status'] == 400: - module.exit_json(changed=True) - else: - module.fail_json( - msg='Checking the PagerDuty change event API returned an unexpected response: %d' % (info['status'])) - - custom_details = {} - - if module.params['user']: - custom_details['user'] = module.params['user'] - - if module.params['repo']: - custom_details['repo'] = module.params['repo'] - - if module.params['revision']: - custom_details['revision'] = module.params['revision'] - - if module.params['environment']: - custom_details['environment'] = module.params['environment'] - - now = datetime.utcnow() - timestamp = now.strftime("%Y-%m-%dT%H:%M:%S.%fZ") - - payload = { - 'summary': module.params['summary'], - 'source': module.params['source'], - 'timestamp': timestamp, - 'custom_details': custom_details - } - - event = { - 'routing_key': module.params['integration_key'], - 'payload': payload - } - - if module.params['link_url']: - link = { - 'href': module.params['link_url'] - } - - if module.params['link_text']: - link['text'] = module.params['link_text'] - - event['links'] = [link] - - _response, info = fetch_url( - module, url, data=module.jsonify(event), headers=headers, method='POST') - - if info['status'] == 202: - module.exit_json(changed=True) - else: - module.fail_json( - msg='Creating PagerDuty change event failed with %d' % (info['status'])) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/pagerduty_user.py b/plugins/modules/monitoring/pagerduty_user.py deleted file mode 100644 index 4b20a32108..0000000000 --- a/plugins/modules/monitoring/pagerduty_user.py +++ /dev/null @@ -1,263 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Zainab Alsaffar -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: pagerduty_user -short_description: Manage a user account on PagerDuty -description: - - This module manages the creation/removal of a user account on PagerDuty. -version_added: '1.3.0' -author: Zainab Alsaffar (@zanssa) -requirements: - - pdpyras python module = 4.1.1 - - PagerDuty API Access -options: - access_token: - description: - - An API access token to authenticate with the PagerDuty REST API. - required: true - type: str - pd_user: - description: - - Name of the user in PagerDuty. - required: true - type: str - pd_email: - description: - - The user's email address. - - I(pd_email) is the unique identifier used and cannot be updated using this module. - required: true - type: str - pd_role: - description: - - The user's role. - choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access'] - default: 'responder' - type: str - state: - description: - - State of the user. - - On C(present), it creates a user if the user doesn't exist. - - On C(absent), it removes a user if the account exists. - choices: ['present', 'absent'] - default: 'present' - type: str - pd_teams: - description: - - The teams to which the user belongs. - - Required if I(state=present). - type: list - elements: str -notes: - - Supports C(check_mode). -''' - -EXAMPLES = r''' -- name: Create a user account on PagerDuty - community.general.pagerduty_user: - access_token: 'Your_Access_token' - pd_user: user_full_name - pd_email: user_email - pd_role: user_pd_role - pd_teams: user_pd_teams - state: "present" - -- name: Remove a user account from PagerDuty - community.general.pagerduty_user: - access_token: 'Your_Access_token' - pd_user: user_full_name - pd_email: user_email - state: "absent" -''' - -RETURN = r''' # ''' - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -import traceback -from os import path - -try: - from pdpyras import APISession - HAS_PD_PY = True -except ImportError: - HAS_PD_PY = False - PD_IMPORT_ERR = traceback.format_exc() - -try: - from pdpyras import PDClientError - HAS_PD_CLIENT_ERR = True -except ImportError: - HAS_PD_CLIENT_ERR = False - PD_CLIENT_ERR_IMPORT_ERR = traceback.format_exc() - - -class PagerDutyUser(object): - def __init__(self, module, session): - self._module = module - self._apisession = session - - # check if the user exists - def does_user_exist(self, pd_email): - for user in self._apisession.iter_all('users'): - if user['email'] == pd_email: - return user['id'] - - # create a user account on PD - def add_pd_user(self, pd_name, pd_email, pd_role): - try: - user = self._apisession.persist('users', 'email', { - "name": pd_name, - "email": pd_email, - "type": "user", - "role": pd_role, - }) - return user - - except PDClientError as e: - if e.response.status_code == 400: - self._module.fail_json( - msg="Failed to add %s due to invalid argument" % (pd_name)) - if e.response.status_code == 401: - self._module.fail_json(msg="Failed to add %s due to invalid API key" % (pd_name)) - if e.response.status_code == 402: - self._module.fail_json( - msg="Failed to add %s due to inability to perform the action within the API token" % (pd_name)) - if e.response.status_code == 403: - self._module.fail_json( - msg="Failed to add %s due to inability to review the requested resource within the API token" % (pd_name)) - if e.response.status_code == 429: - self._module.fail_json( - msg="Failed to add %s due to reaching the limit of making requests" % (pd_name)) - - # delete a user account from PD - def delete_user(self, pd_user_id, pd_name): - try: - user_path = path.join('/users/', pd_user_id) - self._apisession.rdelete(user_path) - - except PDClientError as e: - if e.response.status_code == 404: - self._module.fail_json( - msg="Failed to remove %s as user was not found" % (pd_name)) - if e.response.status_code == 403: - self._module.fail_json( - msg="Failed to remove %s due to inability to review the requested resource within the API token" % (pd_name)) - if e.response.status_code == 401: - # print out the list of incidents - pd_incidents = self.get_incidents_assigned_to_user(pd_user_id) - self._module.fail_json(msg="Failed to remove %s as user has assigned incidents %s" % (pd_name, pd_incidents)) - if e.response.status_code == 429: - self._module.fail_json( - msg="Failed to remove %s due to reaching the limit of making requests" % (pd_name)) - - # get incidents assigned to a user - def get_incidents_assigned_to_user(self, pd_user_id): - incident_info = {} - incidents = self._apisession.list_all('incidents', params={'user_ids[]': [pd_user_id]}) - - for incident in incidents: - incident_info = { - 'title': incident['title'], - 'key': incident['incident_key'], - 'status': incident['status'] - } - return incident_info - - # add a user to a team/teams - def add_user_to_teams(self, pd_user_id, pd_teams, pd_role): - updated_team = None - for team in pd_teams: - team_info = self._apisession.find('teams', team, attribute='name') - if team_info is not None: - try: - updated_team = self._apisession.rput('/teams/' + team_info['id'] + '/users/' + pd_user_id, json={ - 'role': pd_role - }) - except PDClientError: - updated_team = None - return updated_team - - -def main(): - module = AnsibleModule( - argument_spec=dict( - access_token=dict(type='str', required=True, no_log=True), - pd_user=dict(type='str', required=True), - pd_email=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - pd_role=dict(type='str', default='responder', - choices=['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']), - pd_teams=dict(type='list', elements='str', required=False)), - required_if=[['state', 'present', ['pd_teams']], ], - supports_check_mode=True, - ) - - if not HAS_PD_PY: - module.fail_json(msg=missing_required_lib('pdpyras', url='https://github.com/PagerDuty/pdpyras'), exception=PD_IMPORT_ERR) - - if not HAS_PD_CLIENT_ERR: - module.fail_json(msg=missing_required_lib('PDClientError', url='https://github.com/PagerDuty/pdpyras'), exception=PD_CLIENT_ERR_IMPORT_ERR) - - access_token = module.params['access_token'] - pd_user = module.params['pd_user'] - pd_email = module.params['pd_email'] - state = module.params['state'] - pd_role = module.params['pd_role'] - pd_teams = module.params['pd_teams'] - - if pd_role: - pd_role_gui_value = { - 'global_admin': 'admin', - 'manager': 'user', - 'responder': 'limited_user', - 'observer': 'observer', - 'stakeholder': 'read_only_user', - 'limited_stakeholder': 'read_only_limited_user', - 'restricted_access': 'restricted_access' - } - pd_role = pd_role_gui_value[pd_role] - - # authenticate with PD API - try: - session = APISession(access_token) - except PDClientError as e: - module.fail_json(msg="Failed to authenticate with PagerDuty: %s" % e) - - user = PagerDutyUser(module, session) - - user_exists = user.does_user_exist(pd_email) - - if user_exists: - if state == "absent": - # remove user - if not module.check_mode: - user.delete_user(user_exists, pd_user) - module.exit_json(changed=True, result="Successfully deleted user %s" % pd_user) - else: - module.exit_json(changed=False, result="User %s already exists." % pd_user) - - # in case that the user does not exist - else: - if state == "absent": - module.exit_json(changed=False, result="User %s was not found." % pd_user) - - else: - # add user, adds user with the default notification rule and contact info (email) - if not module.check_mode: - user.add_pd_user(pd_user, pd_email, pd_role) - # get user's id - pd_user_id = user.does_user_exist(pd_email) - # add a user to the team/s - user.add_user_to_teams(pd_user_id, pd_teams, pd_role) - module.exit_json(changed=True, result="Successfully created & added user %s to team %s" % (pd_user, pd_teams)) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/monitoring/pingdom.py b/plugins/modules/monitoring/pingdom.py deleted file mode 100644 index 23ed254543..0000000000 --- a/plugins/modules/monitoring/pingdom.py +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: pingdom -short_description: Pause/unpause Pingdom alerts -description: - - This module will let you pause/unpause Pingdom alerts -author: - - "Dylan Silva (@thaumos)" - - "Justin Johns (!UNKNOWN)" -requirements: - - "This pingdom python library: https://github.com/mbabineau/pingdom-python" -options: - state: - type: str - description: - - Define whether or not the check should be running or paused. - required: true - choices: [ "running", "paused", "started", "stopped" ] - checkid: - type: str - description: - - Pingdom ID of the check. - required: true - uid: - type: str - description: - - Pingdom user ID. - required: true - passwd: - type: str - description: - - Pingdom user password. - required: true - key: - type: str - description: - - Pingdom API key. - required: true -notes: - - This module does not yet have support to add/remove checks. -''' - -EXAMPLES = ''' -- name: Pause the check with the ID of 12345 - community.general.pingdom: - uid: example@example.com - passwd: password123 - key: apipassword123 - checkid: 12345 - state: paused - -- name: Unpause the check with the ID of 12345 - community.general.pingdom: - uid: example@example.com - passwd: password123 - key: apipassword123 - checkid: 12345 - state: running -''' - -import traceback - -PINGDOM_IMP_ERR = None -try: - import pingdom - HAS_PINGDOM = True -except Exception: - PINGDOM_IMP_ERR = traceback.format_exc() - HAS_PINGDOM = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -def pause(checkid, uid, passwd, key): - - c = pingdom.PingdomConnection(uid, passwd, key) - c.modify_check(checkid, paused=True) - check = c.get_check(checkid) - name = check.name - result = check.status - # if result != "paused": # api output buggy - accept raw exception for now - # return (True, name, result) - return (False, name, result) - - -def unpause(checkid, uid, passwd, key): - - c = pingdom.PingdomConnection(uid, passwd, key) - c.modify_check(checkid, paused=False) - check = c.get_check(checkid) - name = check.name - result = check.status - # if result != "up": # api output buggy - accept raw exception for now - # return (True, name, result) - return (False, name, result) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']), - checkid=dict(required=True), - uid=dict(required=True), - passwd=dict(required=True, no_log=True), - key=dict(required=True, no_log=True), - ) - ) - - if not HAS_PINGDOM: - module.fail_json(msg=missing_required_lib("pingdom"), exception=PINGDOM_IMP_ERR) - - checkid = module.params['checkid'] - state = module.params['state'] - uid = module.params['uid'] - passwd = module.params['passwd'] - key = module.params['key'] - - if (state == "paused" or state == "stopped"): - (rc, name, result) = pause(checkid, uid, passwd, key) - - if (state == "running" or state == "started"): - (rc, name, result) = unpause(checkid, uid, passwd, key) - - if rc != 0: - module.fail_json(checkid=checkid, name=name, status=result) - - module.exit_json(checkid=checkid, name=name, status=result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/rollbar_deployment.py b/plugins/modules/monitoring/rollbar_deployment.py deleted file mode 100644 index cea3bfdf51..0000000000 --- a/plugins/modules/monitoring/rollbar_deployment.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2014, Max Riveiro, -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rollbar_deployment -author: "Max Riveiro (@kavu)" -short_description: Notify Rollbar about app deployments -description: - - Notify Rollbar about app deployments - (see https://rollbar.com/docs/deploys_other/) -options: - token: - type: str - description: - - Your project access token. - required: true - environment: - type: str - description: - - Name of the environment being deployed, e.g. 'production'. - required: true - revision: - type: str - description: - - Revision number/sha being deployed. - required: true - user: - type: str - description: - - User who deployed. - required: false - rollbar_user: - type: str - description: - - Rollbar username of the user who deployed. - required: false - comment: - type: str - description: - - Deploy comment (e.g. what is being deployed). - required: false - url: - type: str - description: - - Optional URL to submit the notification to. - required: false - default: 'https://api.rollbar.com/api/1/deploy/' - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. - This should only be used on personally controlled sites using - self-signed certificates. - required: false - default: 'yes' - type: bool -''' - -EXAMPLES = ''' - - name: Rollbar deployment notification - community.general.rollbar_deployment: - token: AAAAAA - environment: staging - user: ansible - revision: '4.2' - rollbar_user: admin - comment: Test Deploy - - - name: Notify rollbar about current git revision deployment by current user - community.general.rollbar_deployment: - token: "{{ rollbar_access_token }}" - environment: production - revision: "{{ lookup('pipe', 'git rev-parse HEAD') }}" - user: "{{ lookup('env', 'USER') }}" -''' -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - environment=dict(required=True), - revision=dict(required=True), - user=dict(required=False), - rollbar_user=dict(required=False), - comment=dict(required=False), - url=dict( - required=False, - default='https://api.rollbar.com/api/1/deploy/' - ), - validate_certs=dict(default=True, type='bool'), - ), - supports_check_mode=True - ) - - if module.check_mode: - module.exit_json(changed=True) - - params = dict( - access_token=module.params['token'], - environment=module.params['environment'], - revision=module.params['revision'] - ) - - if module.params['user']: - params['local_username'] = module.params['user'] - - if module.params['rollbar_user']: - params['rollbar_username'] = module.params['rollbar_user'] - - if module.params['comment']: - params['comment'] = module.params['comment'] - - url = module.params.get('url') - - try: - data = urlencode(params) - response, info = fetch_url(module, url, data=data, method='POST') - except Exception as e: - module.fail_json(msg='Unable to notify Rollbar: %s' % to_native(e), exception=traceback.format_exc()) - else: - if info['status'] == 200: - module.exit_json(changed=True) - else: - module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/sensu/sensu_check.py b/plugins/modules/monitoring/sensu/sensu_check.py deleted file mode 100644 index ec43b60abe..0000000000 --- a/plugins/modules/monitoring/sensu/sensu_check.py +++ /dev/null @@ -1,372 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Anders Ingemann -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: sensu_check -short_description: Manage Sensu checks -description: - - Manage the checks that should be run on a machine by I(Sensu). - - Most options do not have a default and will not be added to the check definition unless specified. - - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module, - - they are simply specified for your convenience. -options: - name: - type: str - description: - - The name of the check - - This is the key that is used to determine whether a check exists - required: true - state: - type: str - description: - - Whether the check should be present or not - choices: [ 'present', 'absent' ] - default: present - path: - type: str - description: - - Path to the json file of the check to be added/removed. - - Will be created if it does not exist (unless I(state=absent)). - - The parent folders need to exist when I(state=present), otherwise an error will be thrown - default: /etc/sensu/conf.d/checks.json - backup: - description: - - Create a backup file (if yes), including the timestamp information so - - you can get the original file back if you somehow clobbered it incorrectly. - type: bool - default: 'no' - command: - type: str - description: - - Path to the sensu check to run (not required when I(state=absent)) - handlers: - type: list - elements: str - description: - - List of handlers to notify when the check fails - default: [] - subscribers: - type: list - elements: str - description: - - List of subscribers/channels this check should run for - - See sensu_subscribers to subscribe a machine to a channel - default: [] - interval: - type: int - description: - - Check interval in seconds - timeout: - type: int - description: - - Timeout for the check - - If not specified, it defaults to 10. - ttl: - type: int - description: - - Time to live in seconds until the check is considered stale - handle: - description: - - Whether the check should be handled or not - - Default is C(false). - type: bool - subdue_begin: - type: str - description: - - When to disable handling of check failures - subdue_end: - type: str - description: - - When to enable handling of check failures - dependencies: - type: list - elements: str - description: - - Other checks this check depends on, if dependencies fail handling of this check will be disabled - default: [] - metric: - description: - - Whether the check is a metric - type: bool - default: 'no' - standalone: - description: - - Whether the check should be scheduled by the sensu client or server - - This option obviates the need for specifying the I(subscribers) option - - Default is C(false). - type: bool - publish: - description: - - Whether the check should be scheduled at all. - - You can still issue it via the sensu api - - Default is C(false). - type: bool - occurrences: - type: int - description: - - Number of event occurrences before the handler should take action - - If not specified, defaults to 1. - refresh: - type: int - description: - - Number of seconds handlers should wait before taking second action - aggregate: - description: - - Classifies the check as an aggregate check, - - making it available via the aggregate API - - Default is C(false). - type: bool - low_flap_threshold: - type: int - description: - - The low threshold for flap detection - high_flap_threshold: - type: int - description: - - The high threshold for flap detection - custom: - type: dict - description: - - A hash/dictionary of custom parameters for mixing to the configuration. - - You can't rewrite others module parameters using this - default: {} - source: - type: str - description: - - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch). -author: "Anders Ingemann (@andsens)" -''' - -EXAMPLES = ''' -# Fetch metrics about the CPU load every 60 seconds, -# the sensu server has a handler called 'relay' which forwards stats to graphite -- name: Get cpu metrics - community.general.sensu_check: - name: cpu_load - command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb - metric: yes - handlers: relay - subscribers: common - interval: 60 - -# Check whether nginx is running -- name: Check nginx process - community.general.sensu_check: - name: nginx_running - command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid - handlers: default - subscribers: nginx - interval: 60 - -# Stop monitoring the disk capacity. -# Note that the check will still show up in the sensu dashboard, -# to remove it completely you need to issue a DELETE request to the sensu api. -- name: Check disk - community.general.sensu_check: - name: check_disk_capacity - state: absent -''' - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def sensu_check(module, path, name, state='present', backup=False): - changed = False - reasons = [] - - stream = None - try: - try: - stream = open(path, 'r') - config = json.load(stream) - except IOError as e: - if e.errno == 2: # File not found, non-fatal - if state == 'absent': - reasons.append('file did not exist and state is `absent\'') - return changed, reasons - config = {} - else: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except ValueError: - msg = '{path} contains invalid JSON'.format(path=path) - module.fail_json(msg=msg) - finally: - if stream: - stream.close() - - if 'checks' not in config: - if state == 'absent': - reasons.append('`checks\' section did not exist and state is `absent\'') - return changed, reasons - config['checks'] = {} - changed = True - reasons.append('`checks\' section did not exist') - - if state == 'absent': - if name in config['checks']: - del config['checks'][name] - changed = True - reasons.append('check was present and state is `absent\'') - - if state == 'present': - if name not in config['checks']: - check = {} - config['checks'][name] = check - changed = True - reasons.append('check was absent and state is `present\'') - else: - check = config['checks'][name] - simple_opts = ['command', - 'handlers', - 'subscribers', - 'interval', - 'timeout', - 'ttl', - 'handle', - 'dependencies', - 'standalone', - 'publish', - 'occurrences', - 'refresh', - 'aggregate', - 'low_flap_threshold', - 'high_flap_threshold', - 'source', - ] - for opt in simple_opts: - if module.params[opt] is not None: - if opt not in check or check[opt] != module.params[opt]: - check[opt] = module.params[opt] - changed = True - reasons.append('`{opt}\' did not exist or was different'.format(opt=opt)) - else: - if opt in check: - del check[opt] - changed = True - reasons.append('`{opt}\' was removed'.format(opt=opt)) - - if module.params['custom']: - # Convert to json - custom_params = module.params['custom'] - overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']) - if overwrited_fields: - msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields)) - module.fail_json(msg=msg) - - for k, v in custom_params.items(): - if k in config['checks'][name]: - if not config['checks'][name][k] == v: - changed = True - reasons.append('`custom param {opt}\' was changed'.format(opt=k)) - else: - changed = True - reasons.append('`custom param {opt}\' was added'.format(opt=k)) - check[k] = v - simple_opts += custom_params.keys() - - # Remove obsolete custom params - for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']): - changed = True - reasons.append('`custom param {opt}\' was deleted'.format(opt=opt)) - del check[opt] - - if module.params['metric']: - if 'type' not in check or check['type'] != 'metric': - check['type'] = 'metric' - changed = True - reasons.append('`type\' was not defined or not `metric\'') - if not module.params['metric'] and 'type' in check: - del check['type'] - changed = True - reasons.append('`type\' was defined') - - if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None: - subdue = {'begin': module.params['subdue_begin'], - 'end': module.params['subdue_end'], - } - if 'subdue' not in check or check['subdue'] != subdue: - check['subdue'] = subdue - changed = True - reasons.append('`subdue\' did not exist or was different') - else: - if 'subdue' in check: - del check['subdue'] - changed = True - reasons.append('`subdue\' was removed') - - if changed and not module.check_mode: - if backup: - module.backup_local(path) - try: - try: - stream = open(path, 'w') - stream.write(json.dumps(config, indent=2) + '\n') - except IOError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - finally: - if stream: - stream.close() - - return changed, reasons - - -def main(): - - arg_spec = {'name': {'type': 'str', 'required': True}, - 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'}, - 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, - 'backup': {'type': 'bool', 'default': 'no'}, - 'command': {'type': 'str'}, - 'handlers': {'type': 'list', 'elements': 'str'}, - 'subscribers': {'type': 'list', 'elements': 'str'}, - 'interval': {'type': 'int'}, - 'timeout': {'type': 'int'}, - 'ttl': {'type': 'int'}, - 'handle': {'type': 'bool'}, - 'subdue_begin': {'type': 'str'}, - 'subdue_end': {'type': 'str'}, - 'dependencies': {'type': 'list', 'elements': 'str'}, - 'metric': {'type': 'bool', 'default': 'no'}, - 'standalone': {'type': 'bool'}, - 'publish': {'type': 'bool'}, - 'occurrences': {'type': 'int'}, - 'refresh': {'type': 'int'}, - 'aggregate': {'type': 'bool'}, - 'low_flap_threshold': {'type': 'int'}, - 'high_flap_threshold': {'type': 'int'}, - 'custom': {'type': 'dict'}, - 'source': {'type': 'str'}, - } - - required_together = [['subdue_begin', 'subdue_end']] - - module = AnsibleModule(argument_spec=arg_spec, - required_together=required_together, - supports_check_mode=True) - if module.params['state'] != 'absent' and module.params['command'] is None: - module.fail_json(msg="missing required arguments: %s" % ",".join(['command'])) - - path = module.params['path'] - name = module.params['name'] - state = module.params['state'] - backup = module.params['backup'] - - changed, reasons = sensu_check(module, path, name, state, backup) - - module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/sensu/sensu_client.py b/plugins/modules/monitoring/sensu/sensu_client.py deleted file mode 100644 index 886c398e09..0000000000 --- a/plugins/modules/monitoring/sensu/sensu_client.py +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2017, Red Hat Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: sensu_client -author: "David Moreau Simard (@dmsimard)" -short_description: Manages Sensu client configuration -description: - - Manages Sensu client configuration. - - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/clients.html)' -options: - state: - type: str - description: - - Whether the client should be present or not - choices: [ 'present', 'absent' ] - default: present - name: - type: str - description: - - A unique name for the client. The name cannot contain special characters or spaces. - - If not specified, it defaults to the system hostname as determined by Ruby Socket.gethostname (provided by Sensu). - address: - type: str - description: - - An address to help identify and reach the client. This is only informational, usually an IP address or hostname. - - If not specified it defaults to non-loopback IPv4 address as determined by Ruby Socket.ip_address_list (provided by Sensu). - subscriptions: - type: list - elements: str - description: - - An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (e.g. webserver). - - These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions. - - The subscriptions array items must be strings. - safe_mode: - description: - - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the check. - type: bool - default: 'no' - redact: - type: list - elements: str - description: - - Client definition attributes to redact (values) when logging and sending client keepalives. - socket: - type: dict - description: - - The socket definition scope, used to configure the Sensu client socket. - keepalives: - description: - - If Sensu should monitor keepalives for this client. - type: bool - default: 'yes' - keepalive: - type: dict - description: - - The keepalive definition scope, used to configure Sensu client keepalives behavior (e.g. keepalive thresholds, etc). - registration: - type: dict - description: - - The registration definition scope, used to configure Sensu registration event handlers. - deregister: - description: - - If a deregistration event should be created upon Sensu client process stop. - - Default is C(false). - type: bool - deregistration: - type: dict - description: - - The deregistration definition scope, used to configure automated Sensu client de-registration. - ec2: - type: dict - description: - - The ec2 definition scope, used to configure the Sensu Enterprise AWS EC2 integration (Sensu Enterprise users only). - chef: - type: dict - description: - - The chef definition scope, used to configure the Sensu Enterprise Chef integration (Sensu Enterprise users only). - puppet: - type: dict - description: - - The puppet definition scope, used to configure the Sensu Enterprise Puppet integration (Sensu Enterprise users only). - servicenow: - type: dict - description: - - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users only). -notes: - - Check mode is supported -''' - -EXAMPLES = ''' -# Minimum possible configuration -- name: Configure Sensu client - community.general.sensu_client: - subscriptions: - - default - -# With customization -- name: Configure Sensu client - community.general.sensu_client: - name: "{{ ansible_fqdn }}" - address: "{{ ansible_default_ipv4['address'] }}" - subscriptions: - - default - - webserver - redact: - - password - socket: - bind: 127.0.0.1 - port: 3030 - keepalive: - thresholds: - warning: 180 - critical: 300 - handlers: - - email - custom: - - broadcast: irc - occurrences: 3 - register: client - notify: - - Restart sensu-client - -- name: Secure Sensu client configuration file - ansible.builtin.file: - path: "{{ client['file'] }}" - owner: "sensu" - group: "sensu" - mode: "0600" - -- name: Delete the Sensu client configuration - community.general.sensu_client: - state: "absent" -''' - -RETURN = ''' -config: - description: Effective client configuration, when state is present - returned: success - type: dict - sample: {'name': 'client', 'subscriptions': ['default']} -file: - description: Path to the client configuration file - returned: success - type: str - sample: "/etc/sensu/conf.d/client.json" -''' - -import json -import os - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - supports_check_mode=True, - argument_spec=dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - name=dict(type='str', ), - address=dict(type='str', ), - subscriptions=dict(type='list', elements="str"), - safe_mode=dict(type='bool', default=False), - redact=dict(type='list', elements="str"), - socket=dict(type='dict'), - keepalives=dict(type='bool', default=True), - keepalive=dict(type='dict'), - registration=dict(type='dict'), - deregister=dict(type='bool'), - deregistration=dict(type='dict'), - ec2=dict(type='dict'), - chef=dict(type='dict'), - puppet=dict(type='dict'), - servicenow=dict(type='dict') - ), - required_if=[ - ['state', 'present', ['subscriptions']] - ] - ) - - state = module.params['state'] - path = "/etc/sensu/conf.d/client.json" - - if state == 'absent': - if os.path.exists(path): - if module.check_mode: - msg = '{path} would have been deleted'.format(path=path) - module.exit_json(msg=msg, changed=True) - else: - try: - os.remove(path) - msg = '{path} deleted successfully'.format(path=path) - module.exit_json(msg=msg, changed=True) - except OSError as e: - msg = 'Exception when trying to delete {path}: {exception}' - module.fail_json( - msg=msg.format(path=path, exception=str(e))) - else: - # Idempotency: it's okay if the file doesn't exist - msg = '{path} already does not exist'.format(path=path) - module.exit_json(msg=msg) - - # Build client configuration from module arguments - config = {'client': {}} - args = ['name', 'address', 'subscriptions', 'safe_mode', 'redact', - 'socket', 'keepalives', 'keepalive', 'registration', 'deregister', - 'deregistration', 'ec2', 'chef', 'puppet', 'servicenow'] - - for arg in args: - if arg in module.params and module.params[arg] is not None: - config['client'][arg] = module.params[arg] - - # Load the current config, if there is one, so we can compare - current_config = None - try: - current_config = json.load(open(path, 'r')) - except (IOError, ValueError): - # File either doesn't exist or it's invalid JSON - pass - - if current_config is not None and current_config == config: - # Config is the same, let's not change anything - module.exit_json(msg='Client configuration is already up to date', - config=config['client'], - file=path) - - # Validate that directory exists before trying to write to it - if not module.check_mode and not os.path.exists(os.path.dirname(path)): - try: - os.makedirs(os.path.dirname(path)) - except OSError as e: - module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path), - str(e))) - - if module.check_mode: - module.exit_json(msg='Client configuration would have been updated', - changed=True, - config=config['client'], - file=path) - - try: - with open(path, 'w') as client: - client.write(json.dumps(config, indent=4)) - module.exit_json(msg='Client configuration updated', - changed=True, - config=config['client'], - file=path) - except (OSError, IOError) as e: - module.fail_json(msg='Unable to write file {0}: {1}'.format(path, - str(e))) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/sensu/sensu_handler.py b/plugins/modules/monitoring/sensu/sensu_handler.py deleted file mode 100644 index 6511479899..0000000000 --- a/plugins/modules/monitoring/sensu/sensu_handler.py +++ /dev/null @@ -1,273 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2017, Red Hat Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: sensu_handler -author: "David Moreau Simard (@dmsimard)" -short_description: Manages Sensu handler configuration -description: - - Manages Sensu handler configuration - - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)' -options: - state: - type: str - description: - - Whether the handler should be present or not - choices: [ 'present', 'absent' ] - default: present - name: - type: str - description: - - A unique name for the handler. The name cannot contain special characters or spaces. - required: True - type: - type: str - description: - - The handler type - choices: [ 'pipe', 'tcp', 'udp', 'transport', 'set' ] - filter: - type: str - description: - - The Sensu event filter (name) to use when filtering events for the handler. - filters: - type: list - elements: str - description: - - An array of Sensu event filters (names) to use when filtering events for the handler. - - Each array item must be a string. - severities: - type: list - elements: str - description: - - An array of check result severities the handler will handle. - - 'NOTE: event resolution bypasses this filtering.' - - "Example: [ 'warning', 'critical', 'unknown' ]." - mutator: - type: str - description: - - The Sensu event mutator (name) to use to mutate event data for the handler. - timeout: - type: int - description: - - The handler execution duration timeout in seconds (hard stop). - - Only used by pipe and tcp handler types. - default: 10 - handle_silenced: - description: - - If events matching one or more silence entries should be handled. - type: bool - default: 'no' - handle_flapping: - description: - - If events in the flapping state should be handled. - type: bool - default: 'no' - command: - type: str - description: - - The handler command to be executed. - - The event data is passed to the process via STDIN. - - 'NOTE: the command attribute is only required for Pipe handlers (i.e. handlers configured with "type": "pipe").' - socket: - type: dict - description: - - The socket definition scope, used to configure the TCP/UDP handler socket. - - 'NOTE: the socket attribute is only required for TCP/UDP handlers (i.e. handlers configured with "type": "tcp" or "type": "udp").' - pipe: - type: dict - description: - - The pipe definition scope, used to configure the Sensu transport pipe. - - 'NOTE: the pipe attribute is only required for Transport handlers (i.e. handlers configured with "type": "transport").' - handlers: - type: list - elements: str - description: - - An array of Sensu event handlers (names) to use for events using the handler set. - - 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").' -notes: - - Check mode is supported -''' - -EXAMPLES = ''' -# Configure a handler that sends event data as STDIN (pipe) -- name: Configure IRC Sensu handler - community.general.sensu_handler: - name: "irc_handler" - type: "pipe" - command: "/usr/local/bin/notify-irc.sh" - severities: - - "ok" - - "critical" - - "warning" - - "unknown" - timeout: 15 - notify: - - Restart sensu-client - - Restart sensu-server - -# Delete a handler -- name: Delete IRC Sensu handler - community.general.sensu_handler: - name: "irc_handler" - state: "absent" - -# Example of a TCP handler -- name: Configure TCP Sensu handler - community.general.sensu_handler: - name: "tcp_handler" - type: "tcp" - timeout: 30 - socket: - host: "10.0.1.99" - port: 4444 - register: handler - notify: - - Restart sensu-client - - Restart sensu-server - -- name: Secure Sensu handler configuration file - ansible.builtin.file: - path: "{{ handler['file'] }}" - owner: "sensu" - group: "sensu" - mode: "0600" -''' - -RETURN = ''' -config: - description: Effective handler configuration, when state is present - returned: success - type: dict - sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'} -file: - description: Path to the handler configuration file - returned: success - type: str - sample: "/etc/sensu/conf.d/handlers/irc.json" -name: - description: Name of the handler - returned: success - type: str - sample: "irc" -''' - -import json -import os - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - supports_check_mode=True, - argument_spec=dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - name=dict(type='str', required=True), - type=dict(type='str', choices=['pipe', 'tcp', 'udp', 'transport', 'set']), - filter=dict(type='str'), - filters=dict(type='list', elements='str'), - severities=dict(type='list', elements='str'), - mutator=dict(type='str'), - timeout=dict(type='int', default=10), - handle_silenced=dict(type='bool', default=False), - handle_flapping=dict(type='bool', default=False), - command=dict(type='str'), - socket=dict(type='dict'), - pipe=dict(type='dict'), - handlers=dict(type='list', elements='str'), - ), - required_if=[ - ['state', 'present', ['type']], - ['type', 'pipe', ['command']], - ['type', 'tcp', ['socket']], - ['type', 'udp', ['socket']], - ['type', 'transport', ['pipe']], - ['type', 'set', ['handlers']] - ] - ) - - state = module.params['state'] - name = module.params['name'] - path = '/etc/sensu/conf.d/handlers/{0}.json'.format(name) - - if state == 'absent': - if os.path.exists(path): - if module.check_mode: - msg = '{path} would have been deleted'.format(path=path) - module.exit_json(msg=msg, changed=True) - else: - try: - os.remove(path) - msg = '{path} deleted successfully'.format(path=path) - module.exit_json(msg=msg, changed=True) - except OSError as e: - msg = 'Exception when trying to delete {path}: {exception}' - module.fail_json( - msg=msg.format(path=path, exception=str(e))) - else: - # Idempotency: it's okay if the file doesn't exist - msg = '{path} already does not exist'.format(path=path) - module.exit_json(msg=msg) - - # Build handler configuration from module arguments - config = {'handlers': {name: {}}} - args = ['type', 'filter', 'filters', 'severities', 'mutator', 'timeout', - 'handle_silenced', 'handle_flapping', 'command', 'socket', - 'pipe', 'handlers'] - - for arg in args: - if arg in module.params and module.params[arg] is not None: - config['handlers'][name][arg] = module.params[arg] - - # Load the current config, if there is one, so we can compare - current_config = None - try: - current_config = json.load(open(path, 'r')) - except (IOError, ValueError): - # File either doesn't exist or it's invalid JSON - pass - - if current_config is not None and current_config == config: - # Config is the same, let's not change anything - module.exit_json(msg='Handler configuration is already up to date', - config=config['handlers'][name], - file=path, - name=name) - - # Validate that directory exists before trying to write to it - if not module.check_mode and not os.path.exists(os.path.dirname(path)): - try: - os.makedirs(os.path.dirname(path)) - except OSError as e: - module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path), - str(e))) - - if module.check_mode: - module.exit_json(msg='Handler configuration would have been updated', - changed=True, - config=config['handlers'][name], - file=path, - name=name) - - try: - with open(path, 'w') as handler: - handler.write(json.dumps(config, indent=4)) - module.exit_json(msg='Handler configuration updated', - changed=True, - config=config['handlers'][name], - file=path, - name=name) - except (OSError, IOError) as e: - module.fail_json(msg='Unable to write file {0}: {1}'.format(path, - str(e))) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/sensu/sensu_silence.py b/plugins/modules/monitoring/sensu/sensu_silence.py deleted file mode 100644 index 80a5216711..0000000000 --- a/plugins/modules/monitoring/sensu/sensu_silence.py +++ /dev/null @@ -1,297 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2017, Steven Bambling -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: sensu_silence -author: Steven Bambling (@smbambling) -short_description: Manage Sensu silence entries -description: - - Create and clear (delete) a silence entries via the Sensu API - for subscriptions and checks. -options: - check: - type: str - description: - - Specifies the check which the silence entry applies to. - creator: - type: str - description: - - Specifies the entity responsible for this entry. - expire: - type: int - description: - - If specified, the silence entry will be automatically cleared - after this number of seconds. - expire_on_resolve: - description: - - If specified as true, the silence entry will be automatically - cleared once the condition it is silencing is resolved. - type: bool - reason: - type: str - description: - - If specified, this free-form string is used to provide context or - rationale for the reason this silence entry was created. - state: - type: str - description: - - Specifies to create or clear (delete) a silence entry via the Sensu API - default: present - choices: ['present', 'absent'] - subscription: - type: str - description: - - Specifies the subscription which the silence entry applies to. - - To create a silence entry for a client prepend C(client:) to client name. - Example - C(client:server1.example.dev) - required: true - url: - type: str - description: - - Specifies the URL of the Sensu monitoring host server. - required: false - default: http://127.0.01:4567 -''' - -EXAMPLES = ''' -# Silence ALL checks for a given client -- name: Silence server1.example.dev - community.general.sensu_silence: - subscription: client:server1.example.dev - creator: "{{ ansible_user_id }}" - reason: Performing maintenance - -# Silence specific check for a client -- name: Silence CPU_Usage check for server1.example.dev - community.general.sensu_silence: - subscription: client:server1.example.dev - check: CPU_Usage - creator: "{{ ansible_user_id }}" - reason: Investigation alert issue - -# Silence multiple clients from a dict - silence: - server1.example.dev: - reason: 'Deployment in progress' - server2.example.dev: - reason: 'Deployment in progress' - -- name: Silence several clients from a dict - community.general.sensu_silence: - subscription: "client:{{ item.key }}" - reason: "{{ item.value.reason }}" - creator: "{{ ansible_user_id }}" - with_dict: "{{ silence }}" -''' - -RETURN = ''' -''' - -import json - -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def query(module, url, check, subscription): - headers = { - 'Content-Type': 'application/json', - } - - url = url + '/silenced' - - request_data = { - 'check': check, - 'subscription': subscription, - } - - # Remove keys with None value - for k, v in dict(request_data).items(): - if v is None: - del request_data[k] - - response, info = fetch_url( - module, url, method='GET', - headers=headers, data=json.dumps(request_data) - ) - - if info['status'] == 500: - module.fail_json( - msg="Failed to query silence %s. Reason: %s" % (subscription, info) - ) - - try: - json_out = json.loads(to_native(response.read())) - except Exception: - json_out = "" - - return False, json_out, False - - -def clear(module, url, check, subscription): - # Test if silence exists before clearing - (rc, out, changed) = query(module, url, check, subscription) - - d = dict((i['subscription'], i['check']) for i in out) - subscription_exists = subscription in d - if check and subscription_exists: - exists = (check == d[subscription]) - else: - exists = subscription_exists - - # If check/subscription doesn't exist - # exit with changed state of False - if not exists: - return False, out, changed - - # module.check_mode is inherited from the AnsibleMOdule class - if not module.check_mode: - headers = { - 'Content-Type': 'application/json', - } - - url = url + '/silenced/clear' - - request_data = { - 'check': check, - 'subscription': subscription, - } - - # Remove keys with None value - for k, v in dict(request_data).items(): - if v is None: - del request_data[k] - - response, info = fetch_url( - module, url, method='POST', - headers=headers, data=json.dumps(request_data) - ) - - if info['status'] != 204: - module.fail_json( - msg="Failed to silence %s. Reason: %s" % (subscription, info) - ) - - try: - json_out = json.loads(to_native(response.read())) - except Exception: - json_out = "" - - return False, json_out, True - return False, out, True - - -def create( - module, url, check, creator, expire, - expire_on_resolve, reason, subscription): - (rc, out, changed) = query(module, url, check, subscription) - for i in out: - if (i['subscription'] == subscription): - if ( - (check is None or check == i['check']) and - ( - creator == '' or - creator == i['creator']) and - ( - reason == '' or - reason == i['reason']) and - ( - expire is None or expire == i['expire']) and - ( - expire_on_resolve is None or - expire_on_resolve == i['expire_on_resolve'] - ) - ): - return False, out, False - - # module.check_mode is inherited from the AnsibleMOdule class - if not module.check_mode: - headers = { - 'Content-Type': 'application/json', - } - - url = url + '/silenced' - - request_data = { - 'check': check, - 'creator': creator, - 'expire': expire, - 'expire_on_resolve': expire_on_resolve, - 'reason': reason, - 'subscription': subscription, - } - - # Remove keys with None value - for k, v in dict(request_data).items(): - if v is None: - del request_data[k] - - response, info = fetch_url( - module, url, method='POST', - headers=headers, data=json.dumps(request_data) - ) - - if info['status'] != 201: - module.fail_json( - msg="Failed to silence %s. Reason: %s" % - (subscription, info['msg']) - ) - - try: - json_out = json.loads(to_native(response.read())) - except Exception: - json_out = "" - - return False, json_out, True - return False, out, True - - -def main(): - module = AnsibleModule( - argument_spec=dict( - check=dict(required=False), - creator=dict(required=False), - expire=dict(type='int', required=False), - expire_on_resolve=dict(type='bool', required=False), - reason=dict(required=False), - state=dict(default='present', choices=['present', 'absent']), - subscription=dict(required=True), - url=dict(required=False, default='http://127.0.01:4567'), - ), - supports_check_mode=True - ) - - url = module.params['url'] - check = module.params['check'] - creator = module.params['creator'] - expire = module.params['expire'] - expire_on_resolve = module.params['expire_on_resolve'] - reason = module.params['reason'] - subscription = module.params['subscription'] - state = module.params['state'] - - if state == 'present': - (rc, out, changed) = create( - module, url, check, creator, - expire, expire_on_resolve, reason, subscription - ) - - if state == 'absent': - (rc, out, changed) = clear(module, url, check, subscription) - - if rc != 0: - module.fail_json(msg="failed", result=out) - module.exit_json(msg="success", result=out, changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/sensu/sensu_subscription.py b/plugins/modules/monitoring/sensu/sensu_subscription.py deleted file mode 100644 index 947c6e0de5..0000000000 --- a/plugins/modules/monitoring/sensu/sensu_subscription.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Anders Ingemann -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: sensu_subscription -short_description: Manage Sensu subscriptions -description: - - Manage which I(sensu channels) a machine should subscribe to -options: - name: - type: str - description: - - The name of the channel - required: true - state: - type: str - description: - - Whether the machine should subscribe or unsubscribe from the channel - choices: [ 'present', 'absent' ] - required: false - default: present - path: - type: str - description: - - Path to the subscriptions json file - required: false - default: /etc/sensu/conf.d/subscriptions.json - backup: - description: - - Create a backup file (if yes), including the timestamp information so you - - can get the original file back if you somehow clobbered it incorrectly. - type: bool - required: false - default: no -requirements: [ ] -author: Anders Ingemann (@andsens) -''' - -RETURN = ''' -reasons: - description: the reasons why the module changed or did not change something - returned: success - type: list - sample: ["channel subscription was absent and state is `present'"] -''' - -EXAMPLES = ''' -# Subscribe to the nginx channel -- name: Subscribe to nginx checks - community.general.sensu_subscription: name=nginx - -# Unsubscribe from the common checks channel -- name: Unsubscribe from common checks - community.general.sensu_subscription: name=common state=absent -''' - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def sensu_subscription(module, path, name, state='present', backup=False): - changed = False - reasons = [] - - try: - config = json.load(open(path)) - except IOError as e: - if e.errno == 2: # File not found, non-fatal - if state == 'absent': - reasons.append('file did not exist and state is `absent\'') - return changed, reasons - config = {} - else: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except ValueError: - msg = '{path} contains invalid JSON'.format(path=path) - module.fail_json(msg=msg) - - if 'client' not in config: - if state == 'absent': - reasons.append('`client\' did not exist and state is `absent\'') - return changed, reasons - config['client'] = {} - changed = True - reasons.append('`client\' did not exist') - - if 'subscriptions' not in config['client']: - if state == 'absent': - reasons.append('`client.subscriptions\' did not exist and state is `absent\'') - return changed, reasons - config['client']['subscriptions'] = [] - changed = True - reasons.append('`client.subscriptions\' did not exist') - - if name not in config['client']['subscriptions']: - if state == 'absent': - reasons.append('channel subscription was absent') - return changed, reasons - config['client']['subscriptions'].append(name) - changed = True - reasons.append('channel subscription was absent and state is `present\'') - else: - if state == 'absent': - config['client']['subscriptions'].remove(name) - changed = True - reasons.append('channel subscription was present and state is `absent\'') - - if changed and not module.check_mode: - if backup: - module.backup_local(path) - try: - open(path, 'w').write(json.dumps(config, indent=2) + '\n') - except IOError as e: - module.fail_json(msg='Failed to write to file %s: %s' % (path, to_native(e)), - exception=traceback.format_exc()) - - return changed, reasons - - -def main(): - arg_spec = {'name': {'type': 'str', 'required': True}, - 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'}, - 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, - 'backup': {'type': 'bool', 'default': 'no'}, - } - - module = AnsibleModule(argument_spec=arg_spec, - supports_check_mode=True) - - path = module.params['path'] - name = module.params['name'] - state = module.params['state'] - backup = module.params['backup'] - - changed, reasons = sensu_subscription(module, path, name, state, backup) - - module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/spectrum_device.py b/plugins/modules/monitoring/spectrum_device.py deleted file mode 100644 index 77e3b15390..0000000000 --- a/plugins/modules/monitoring/spectrum_device.py +++ /dev/null @@ -1,332 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Renato Orgito -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: spectrum_device -short_description: Creates/deletes devices in CA Spectrum. -description: - - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html). - - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1 -author: "Renato Orgito (@orgito)" -options: - device: - type: str - aliases: [ host, name ] - required: true - description: - - IP address of the device. - - If a hostname is given, it will be resolved to the IP address. - community: - type: str - description: - - SNMP community used for device discovery. - - Required when C(state=present). - required: true - landscape: - type: str - required: true - description: - - Landscape handle of the SpectroServer to which add or remove the device. - state: - type: str - required: false - description: - - On C(present) creates the device when it does not exist. - - On C(absent) removes the device when it exists. - choices: ['present', 'absent'] - default: 'present' - url: - type: str - aliases: [ oneclick_url ] - required: true - description: - - HTTP, HTTPS URL of the Oneclick server in the form (http|https)://host.domain[:port] - url_username: - type: str - aliases: [ oneclick_user ] - required: true - description: - - Oneclick user name. - url_password: - type: str - aliases: [ oneclick_password ] - required: true - description: - - Oneclick user password. - use_proxy: - required: false - description: - - if C(no), it will not use a proxy, even if one is defined in an environment - variable on the target hosts. - default: 'yes' - type: bool - validate_certs: - required: false - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - default: 'yes' - type: bool - agentport: - type: int - required: false - description: - - UDP port used for SNMP discovery. - default: 161 -notes: - - The devices will be created inside the I(Universe) container of the specified landscape. - - All the operations will be performed only on the specified landscape. -''' - -EXAMPLES = ''' -- name: Add device to CA Spectrum - local_action: - module: spectrum_device - device: '{{ ansible_host }}' - community: secret - landscape: '0x100000' - oneclick_url: http://oneclick.example.com:8080 - oneclick_user: username - oneclick_password: password - state: present - - -- name: Remove device from CA Spectrum - local_action: - module: spectrum_device - device: '{{ ansible_host }}' - landscape: '{{ landscape_handle }}' - oneclick_url: http://oneclick.example.com:8080 - oneclick_user: username - oneclick_password: password - use_proxy: no - state: absent -''' - -RETURN = ''' -device: - description: device data when state = present - returned: success - type: dict - sample: {'model_handle': '0x1007ab', 'landscape': '0x100000', 'address': '10.10.5.1'} -''' - -from socket import gethostbyname, gaierror -import xml.etree.ElementTree as ET - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def request(resource, xml=None, method=None): - headers = { - "Content-Type": "application/xml", - "Accept": "application/xml" - } - - url = module.params['oneclick_url'] + '/spectrum/restful/' + resource - - response, info = fetch_url(module, url, data=xml, method=method, headers=headers, timeout=45) - - if info['status'] == 401: - module.fail_json(msg="failed to authenticate to Oneclick server") - - if info['status'] not in (200, 201, 204): - module.fail_json(msg=info['msg']) - - return response.read() - - -def post(resource, xml=None): - return request(resource, xml=xml, method='POST') - - -def delete(resource): - return request(resource, xml=None, method='DELETE') - - -def get_ip(): - try: - device_ip = gethostbyname(module.params.get('device')) - except gaierror: - module.fail_json(msg="failed to resolve device ip address for '%s'" % module.params.get('device')) - - return device_ip - - -def get_device(device_ip): - """Query OneClick for the device using the IP Address""" - resource = '/models' - landscape_min = "0x%x" % int(module.params.get('landscape'), 16) - landscape_max = "0x%x" % (int(module.params.get('landscape'), 16) + 0x100000) - - xml = """ - - - - - - - - - SearchManager - - - - {mh_min} - - - - - {mh_max} - - - - - FIND_DEV_MODELS_BY_IP - - {search_ip} - - - - - - - - """.format(search_ip=device_ip, mh_min=landscape_min, mh_max=landscape_max) - - result = post(resource, xml=xml) - - root = ET.fromstring(result) - - if root.get('total-models') == '0': - return None - - namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') - - # get the first device - model = root.find('ca:model-responses', namespace).find('ca:model', namespace) - - if model.get('error'): - module.fail_json(msg="error checking device: %s" % model.get('error')) - - # get the attributes - model_handle = model.get('mh') - - model_address = model.find('./*[@id="0x12d7f"]').text - - # derive the landscape handler from the model handler of the device - model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000) - - device = dict( - model_handle=model_handle, - address=model_address, - landscape=model_landscape) - - return device - - -def add_device(): - device_ip = get_ip() - device = get_device(device_ip) - - if device: - module.exit_json(changed=False, device=device) - - if module.check_mode: - device = dict( - model_handle=None, - address=device_ip, - landscape="0x%x" % int(module.params.get('landscape'), 16)) - module.exit_json(changed=True, device=device) - - resource = 'model?ipaddress=' + device_ip + '&commstring=' + module.params.get('community') - resource += '&landscapeid=' + module.params.get('landscape') - - if module.params.get('agentport', None): - resource += '&agentport=' + str(module.params.get('agentport', 161)) - - result = post(resource) - root = ET.fromstring(result) - - if root.get('error') != 'Success': - module.fail_json(msg=root.get('error-message')) - - namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') - model = root.find('ca:model', namespace) - - model_handle = model.get('mh') - model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000) - - device = dict( - model_handle=model_handle, - address=device_ip, - landscape=model_landscape, - ) - - module.exit_json(changed=True, device=device) - - -def remove_device(): - device_ip = get_ip() - device = get_device(device_ip) - - if device is None: - module.exit_json(changed=False) - - if module.check_mode: - module.exit_json(changed=True) - - resource = '/model/' + device['model_handle'] - result = delete(resource) - - root = ET.fromstring(result) - - namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') - error = root.find('ca:error', namespace).text - - if error != 'Success': - error_message = root.find('ca:error-message', namespace).text - module.fail_json(msg="%s %s" % (error, error_message)) - - module.exit_json(changed=True) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - device=dict(required=True, aliases=['host', 'name']), - landscape=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - community=dict(required=True, no_log=True), # @TODO remove the 'required', given the required_if ? - agentport=dict(type='int', default=161), - url=dict(required=True, aliases=['oneclick_url']), - url_username=dict(required=True, aliases=['oneclick_user']), - url_password=dict(required=True, no_log=True, aliases=['oneclick_password']), - use_proxy=dict(type='bool', default=True), - validate_certs=dict(type='bool', default=True), - ), - required_if=[('state', 'present', ['community'])], - supports_check_mode=True - ) - - if module.params.get('state') == 'present': - add_device() - else: - remove_device() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/spectrum_model_attrs.py b/plugins/modules/monitoring/spectrum_model_attrs.py deleted file mode 100644 index 231352acd6..0000000000 --- a/plugins/modules/monitoring/spectrum_model_attrs.py +++ /dev/null @@ -1,528 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2021, Tyler Gates -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: spectrum_model_attrs -short_description: Enforce a model's attributes in CA Spectrum. -description: - - This module can be used to enforce a model's attributes in CA Spectrum. -version_added: 2.5.0 -author: - - Tyler Gates (@tgates81) -notes: - - Tested on CA Spectrum version 10.4.2.0.189. - - Model creation and deletion are not possible with this module. For that use M(community.general.spectrum_device) instead. -requirements: - - 'python >= 2.7' -options: - url: - description: - - URL of OneClick server. - type: str - required: true - url_username: - description: - - OneClick username. - type: str - required: true - aliases: [username] - url_password: - description: - - OneClick password. - type: str - required: true - aliases: [password] - use_proxy: - description: - - if C(no), it will not use a proxy, even if one is defined in - an environment variable on the target hosts. - default: yes - required: false - type: bool - name: - description: - - Model name. - type: str - required: true - type: - description: - - Model type. - type: str - required: true - validate_certs: - description: - - Validate SSL certificates. Only change this to C(false) if you can guarantee that you are talking to the correct endpoint and there is no - man-in-the-middle attack happening. - type: bool - default: yes - required: false - attributes: - description: - - A list of attribute names and values to enforce. - - All values and parameters are case sensitive and must be provided as strings only. - required: true - type: list - elements: dict - suboptions: - name: - description: - - Attribute name OR hex ID. - - 'Currently defined names are:' - - ' C(App_Manufacturer) (C(0x230683))' - - ' C(CollectionsModelNameString) (C(0x12adb))' - - ' C(Condition) (C(0x1000a))' - - ' C(Criticality) (C(0x1290c))' - - ' C(DeviceType) (C(0x23000e))' - - ' C(isManaged) (C(0x1295d))' - - ' C(Model_Class) (C(0x11ee8))' - - ' C(Model_Handle) (C(0x129fa))' - - ' C(Model_Name) (C(0x1006e))' - - ' C(Modeltype_Handle) (C(0x10001))' - - ' C(Modeltype_Name) (C(0x10000))' - - ' C(Network_Address) (C(0x12d7f))' - - ' C(Notes) (C(0x11564))' - - ' C(ServiceDesk_Asset_ID) (C(0x12db9))' - - ' C(TopologyModelNameString) (C(0x129e7))' - - ' C(sysDescr) (C(0x10052))' - - ' C(sysName) (C(0x10b5b))' - - ' C(Vendor_Name) (C(0x11570))' - - ' C(Description) (C(0x230017))' - - Hex IDs are the direct identifiers in Spectrum and will always work. - - 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> -> Attributes tab.' - type: str - required: true - value: - description: - - Attribute value. Empty strings should be C("") or C(null). - type: str - required: true -''' - -EXAMPLES = r''' -- name: Enforce maintenance mode for modelxyz01 with a note about why - community.general.spectrum_model_attrs: - url: "http://oneclick.url.com" - username: "{{ oneclick_username }}" - password: "{{ oneclick_password }}" - name: "modelxyz01" - type: "Host_Device" - validate_certs: true - attributes: - - name: "isManaged" - value: "false" - - name: "Notes" - value: "MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }} by {{ tower_user_name | default(ansible_user_id) }}" - delegate_to: localhost - register: spectrum_model_attrs_status -''' - -RETURN = r''' -msg: - description: Informational message on the job result. - type: str - returned: always - sample: 'Success' -changed_attrs: - description: Dictionary of changed name or hex IDs (whichever was specified) to their new corresponding values. - type: dict - returned: always - sample: { - "Notes": "MM set on 2021-02-03T22:04:02Z via CO CO9999 by tgates", - "isManaged": "true" - } -''' - - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import quote -import json -import re -import xml.etree.ElementTree as ET - - -class spectrum_model_attrs: - def __init__(self, module): - self.module = module - self.url = module.params['url'] - # If the user did not define a full path to the restul space in url: - # params, add what we believe it to be. - if not re.search('\\/.+', self.url.split('://')[1]): - self.url = "%s/spectrum/restful" % self.url.rstrip('/') - # Align these with what is defined in OneClick's UI under: - # Locator -> Devices -> By Model Name -> -> - # Attributes tab. - self.attr_map = dict(App_Manufacturer=hex(0x230683), - CollectionsModelNameString=hex(0x12adb), - Condition=hex(0x1000a), - Criticality=hex(0x1290c), - DeviceType=hex(0x23000e), - isManaged=hex(0x1295d), - Model_Class=hex(0x11ee8), - Model_Handle=hex(0x129fa), - Model_Name=hex(0x1006e), - Modeltype_Handle=hex(0x10001), - Modeltype_Name=hex(0x10000), - Network_Address=hex(0x12d7f), - Notes=hex(0x11564), - ServiceDesk_Asset_ID=hex(0x12db9), - TopologyModelNameString=hex(0x129e7), - sysDescr=hex(0x10052), - sysName=hex(0x10b5b), - Vendor_Name=hex(0x11570), - Description=hex(0x230017)) - self.search_qualifiers = [ - "and", "or", "not", "greater-than", "greater-than-or-equals", - "less-than", "less-than-or-equals", "equals", "equals-ignore-case", - "does-not-equal", "does-not-equal-ignore-case", "has-prefix", - "does-not-have-prefix", "has-prefix-ignore-case", - "does-not-have-prefix-ignore-case", "has-substring", - "does-not-have-substring", "has-substring-ignore-case", - "does-not-have-substring-ignore-case", "has-suffix", - "does-not-have-suffix", "has-suffix-ignore-case", - "does-not-have-suffix-ignore-case", "has-pcre", - "has-pcre-ignore-case", "has-wildcard", "has-wildcard-ignore-case", - "is-derived-from", "not-is-derived-from"] - - self.resp_namespace = dict(ca="http://www.ca.com/spectrum/restful/schema/response") - - self.result = dict(msg="", changed_attrs=dict()) - self.success_msg = "Success" - - def build_url(self, path): - """ - Build a sane Spectrum restful API URL - :param path: The path to append to the restful base - :type path: str - :returns: Complete restful API URL - :rtype: str - """ - - return "%s/%s" % (self.url.rstrip('/'), path.lstrip('/')) - - def attr_id(self, name): - """ - Get attribute hex ID - :param name: The name of the attribute to retrieve the hex ID for - :type name: str - :returns: Translated hex ID of name, or None if no translation found - :rtype: str or None - """ - - try: - return self.attr_map[name] - except KeyError: - return None - - def attr_name(self, _id): - """ - Get attribute name from hex ID - :param _id: The hex ID to lookup a name for - :type _id: str - :returns: Translated name of hex ID, or None if no translation found - :rtype: str or None - """ - - for name, m_id in list(self.attr_map.items()): - if _id == m_id: - return name - return None - - def urlencode(self, string): - """ - URL Encode a string - :param: string: The string to URL encode - :type string: str - :returns: URL encode version of supplied string - :rtype: str - """ - - return quote(string, "<>%-_.!*'():?#/@&+,;=") - - def update_model(self, model_handle, attrs): - """ - Update a model's attributes - :param model_handle: The model's handle ID - :type model_handle: str - :param attrs: Model's attributes to update. {'': ''} - :type attrs: dict - :returns: Nothing; exits on error or updates self.results - :rtype: None - """ - - # Build the update URL - update_url = self.build_url("/model/%s?" % model_handle) - for name, val in list(attrs.items()): - if val is None: - # None values should be converted to empty strings - val = "" - val = self.urlencode(str(val)) - if not update_url.endswith('?'): - update_url += "&" - - update_url += "attr=%s&val=%s" % (self.attr_id(name) or name, val) - - # POST to /model to update the attributes, or fail. - resp, info = fetch_url(self.module, update_url, method="PUT", - headers={"Content-Type": "application/json", - "Accept": "application/json"}, - use_proxy=self.module.params['use_proxy']) - status_code = info["status"] - if status_code >= 400: - body = info['body'] - else: - body = "" if resp is None else resp.read() - if status_code != 200: - self.result['msg'] = "HTTP PUT error %s: %s: %s" % (status_code, update_url, body) - self.module.fail_json(**self.result) - - # Load and parse the JSON response and either fail or set results. - json_resp = json.loads(body) - """ - Example success response: - {'model-update-response-list':{'model-responses':{'model':{'@error':'Success','@mh':'0x1010e76','attribute':{'@error':'Success','@id':'0x1295d'}}}}}" - Example failure response: - {'model-update-response-list': {'model-responses': {'model': {'@error': 'PartialFailure', '@mh': '0x1010e76', 'attribute': {'@error-message': 'brn0vlappua001: You do not have permission to set attribute Network_Address for this model.', '@error': 'Error', '@id': '0x12d7f'}}}}} - """ # noqa - model_resp = json_resp['model-update-response-list']['model-responses']['model'] - if model_resp['@error'] != "Success": - # I'm not 100% confident on the expected failure structure so just - # dump all of ['attribute']. - self.result['msg'] = str(model_resp['attribute']) - self.module.fail_json(**self.result) - - # Should be OK if we get to here, set results. - self.result['msg'] = self.success_msg - self.result['changed_attrs'].update(attrs) - self.result['changed'] = True - - def find_model(self, search_criteria, ret_attrs=None): - """ - Search for a model in /models - :param search_criteria: The XML - :type search_criteria: str - :param ret_attrs: List of attributes by name or ID to return back - (default is Model_Handle) - :type ret_attrs: list - returns: Dictionary mapping of ret_attrs to values: {ret_attr: ret_val} - rtype: dict - """ - - # If no return attributes were asked for, return Model_Handle. - if ret_attrs is None: - ret_attrs = ['Model_Handle'] - - # Set the XML > tags. If no hex ID - # is found for the name, assume it is already in hex. {name: hex ID} - rqstd_attrs = "" - for ra in ret_attrs: - _id = self.attr_id(ra) or ra - rqstd_attrs += '' % (self.attr_id(ra) or ra) - - # Build the complete XML search query for HTTP POST. - xml = """ - - - - - {0} - - - - {1} - -""".format(search_criteria, rqstd_attrs) - - # POST to /models and fail on errors. - url = self.build_url("/models") - resp, info = fetch_url(self.module, url, data=xml, method="POST", - use_proxy=self.module.params['use_proxy'], - headers={"Content-Type": "application/xml", - "Accept": "application/xml"}) - status_code = info["status"] - if status_code >= 400: - body = info['body'] - else: - body = "" if resp is None else resp.read() - if status_code != 200: - self.result['msg'] = "HTTP POST error %s: %s: %s" % (status_code, url, body) - self.module.fail_json(**self.result) - - # Parse through the XML response and fail on any detected errors. - root = ET.fromstring(body) - total_models = int(root.attrib['total-models']) - error = root.attrib['error'] - model_responses = root.find('ca:model-responses', self.resp_namespace) - if total_models < 1: - self.result['msg'] = "No models found matching search criteria `%s'" % search_criteria - self.module.fail_json(**self.result) - elif total_models > 1: - self.result['msg'] = "More than one model found (%s): `%s'" % (total_models, ET.tostring(model_responses, - encoding='unicode')) - self.module.fail_json(**self.result) - if error != "EndOfResults": - self.result['msg'] = "Unexpected search response `%s': %s" % (error, ET.tostring(model_responses, - encoding='unicode')) - self.module.fail_json(**self.result) - model = model_responses.find('ca:model', self.resp_namespace) - attrs = model.findall('ca:attribute', self.resp_namespace) - if not attrs: - self.result['msg'] = "No attributes returned." - self.module.fail_json(**self.result) - - # XML response should be successful. Iterate and set each returned - # attribute ID/name and value for return. - ret = dict() - for attr in attrs: - attr_id = attr.get('id') - attr_name = self.attr_name(attr_id) - # Note: all values except empty strings (None) are strings only! - attr_val = attr.text - key = attr_name if attr_name in ret_attrs else attr_id - ret[key] = attr_val - ret_attrs.remove(key) - return ret - - def find_model_by_name_type(self, mname, mtype, ret_attrs=None): - """ - Find a model by name and type - :param mname: Model name - :type mname: str - :param mtype: Model type - :type mtype: str - :param ret_attrs: List of attributes by name or ID to return back - (default is Model_Handle) - :type ret_attrs: list - returns: find_model(): Dictionary mapping of ret_attrs to values: - {ret_attr: ret_val} - rtype: dict - """ - - # If no return attributes were asked for, return Model_Handle. - if ret_attrs is None: - ret_attrs = ['Model_Handle'] - - """This is basically as follows: - - - - - ... - - - - - - - - """ - - # Parent filter tag - filtered_models = ET.Element('filtered-models') - # Logically and - _and = ET.SubElement(filtered_models, 'and') - - # Model Name - MN_equals = ET.SubElement(_and, 'equals') - Model_Name = ET.SubElement(MN_equals, 'attribute', - {'id': self.attr_map['Model_Name']}) - MN_value = ET.SubElement(Model_Name, 'value') - MN_value.text = mname - - # Model Type Name - MTN_equals = ET.SubElement(_and, 'equals') - Modeltype_Name = ET.SubElement(MTN_equals, 'attribute', - {'id': self.attr_map['Modeltype_Name']}) - MTN_value = ET.SubElement(Modeltype_Name, 'value') - MTN_value.text = mtype - - return self.find_model(ET.tostring(filtered_models, - encoding='unicode'), - ret_attrs) - - def ensure_model_attrs(self): - - # Get a list of all requested attribute names/IDs plus Model_Handle and - # use them to query the values currently set. Store finding in a - # dictionary. - req_attrs = [] - for attr in self.module.params['attributes']: - req_attrs.append(attr['name']) - if 'Model_Handle' not in req_attrs: - req_attrs.append('Model_Handle') - - # Survey attributes currently set and store in a dict. - cur_attrs = self.find_model_by_name_type(self.module.params['name'], - self.module.params['type'], - req_attrs) - - # Iterate through the requested attributes names/IDs values pair and - # compare with those currently set. If different, attempt to change. - Model_Handle = cur_attrs.pop("Model_Handle") - for attr in self.module.params['attributes']: - req_name = attr['name'] - req_val = attr['value'] - if req_val == "": - # The API will return None on empty string - req_val = None - if cur_attrs[req_name] != req_val: - if self.module.check_mode: - self.result['changed_attrs'][req_name] = req_val - self.result['msg'] = self.success_msg - self.result['changed'] = True - continue - resp = self.update_model(Model_Handle, {req_name: req_val}) - - self.module.exit_json(**self.result) - - -def run_module(): - argument_spec = dict( - url=dict(type='str', required=True), - url_username=dict(type='str', required=True, aliases=['username']), - url_password=dict(type='str', required=True, aliases=['password'], - no_log=True), - validate_certs=dict(type='bool', default=True), - use_proxy=dict(type='bool', default=True), - name=dict(type='str', required=True), - type=dict(type='str', required=True), - attributes=dict(type='list', - required=True, - elements='dict', - options=dict( - name=dict(type='str', required=True), - value=dict(type='str', required=True) - )), - ) - module = AnsibleModule( - supports_check_mode=True, - argument_spec=argument_spec, - ) - - try: - sm = spectrum_model_attrs(module) - sm.ensure_model_attrs() - except Exception as e: - module.fail_json(msg="Failed to ensure attribute(s) on `%s' with " - "exception: %s" % (module.params['name'], - to_native(e))) - - -def main(): - run_module() - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/monitoring/stackdriver.py b/plugins/modules/monitoring/stackdriver.py deleted file mode 100644 index fa6bacb951..0000000000 --- a/plugins/modules/monitoring/stackdriver.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: stackdriver -short_description: Send code deploy and annotation events to stackdriver -description: - - Send code deploy and annotation events to Stackdriver -author: "Ben Whaley (@bwhaley)" -options: - key: - type: str - description: - - API key. - required: true - event: - type: str - description: - - The type of event to send, either annotation or deploy - choices: ['annotation', 'deploy'] - required: true - revision_id: - type: str - description: - - The revision of the code that was deployed. Required for deploy events - deployed_by: - type: str - description: - - The person or robot responsible for deploying the code - default: "Ansible" - deployed_to: - type: str - description: - - "The environment code was deployed to. (ie: development, staging, production)" - repository: - type: str - description: - - The repository (or project) deployed - msg: - type: str - description: - - The contents of the annotation message, in plain text. Limited to 256 characters. Required for annotation. - annotated_by: - type: str - description: - - The person or robot who the annotation should be attributed to. - default: "Ansible" - level: - type: str - description: - - one of INFO/WARN/ERROR, defaults to INFO if not supplied. May affect display. - choices: ['INFO', 'WARN', 'ERROR'] - default: 'INFO' - instance_id: - type: str - description: - - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown - event_epoch: - type: str - description: - - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this." -''' - -EXAMPLES = ''' -- name: Send a code deploy event to stackdriver - community.general.stackdriver: - key: AAAAAA - event: deploy - deployed_to: production - deployed_by: leeroyjenkins - repository: MyWebApp - revision_id: abcd123 - -- name: Send an annotation event to stackdriver - community.general.stackdriver: - key: AAAAAA - event: annotation - msg: Greetings from Ansible - annotated_by: leeroyjenkins - level: WARN - instance_id: i-abcd1234 -''' - -# =========================================== -# Stackdriver module specific support methods. -# - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None): - """Send a deploy event to Stackdriver""" - deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent" - - params = {} - params['revision_id'] = revision_id - params['deployed_by'] = deployed_by - if deployed_to: - params['deployed_to'] = deployed_to - if repository: - params['repository'] = repository - - return do_send_request(module, deploy_api, params, key) - - -def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None): - """Send an annotation event to Stackdriver""" - annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent" - - params = {} - params['message'] = msg - if annotated_by: - params['annotated_by'] = annotated_by - if level: - params['level'] = level - if instance_id: - params['instance_id'] = instance_id - if event_epoch: - params['event_epoch'] = event_epoch - - return do_send_request(module, annotation_api, params, key) - - -def do_send_request(module, url, params, key): - data = json.dumps(params) - headers = { - 'Content-Type': 'application/json', - 'x-stackdriver-apikey': key - } - response, info = fetch_url(module, url, headers=headers, data=data, method='POST') - if info['status'] != 200: - module.fail_json(msg="Unable to send msg: %s" % info['msg']) - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( # @TODO add types - key=dict(required=True, no_log=True), - event=dict(required=True, choices=['deploy', 'annotation']), - msg=dict(), - revision_id=dict(), - annotated_by=dict(default='Ansible'), - level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']), - instance_id=dict(), - event_epoch=dict(), # @TODO int? - deployed_by=dict(default='Ansible'), - deployed_to=dict(), - repository=dict(), - ), - supports_check_mode=True - ) - - key = module.params["key"] - event = module.params["event"] - - # Annotation params - msg = module.params["msg"] - annotated_by = module.params["annotated_by"] - level = module.params["level"] - instance_id = module.params["instance_id"] - event_epoch = module.params["event_epoch"] - - # Deploy params - revision_id = module.params["revision_id"] - deployed_by = module.params["deployed_by"] - deployed_to = module.params["deployed_to"] - repository = module.params["repository"] - - ################################################################## - # deploy requires revision_id - # annotation requires msg - # We verify these manually - ################################################################## - - if event == 'deploy': - if not revision_id: - module.fail_json(msg="revision_id required for deploy events") - try: - send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository) - except Exception as e: - module.fail_json(msg="unable to sent deploy event: %s" % to_native(e), - exception=traceback.format_exc()) - - if event == 'annotation': - if not msg: - module.fail_json(msg="msg required for annotation events") - try: - send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch) - except Exception as e: - module.fail_json(msg="unable to sent annotation event: %s" % to_native(e), - exception=traceback.format_exc()) - - changed = True - module.exit_json(changed=changed, deployed_by=deployed_by) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/statsd.py b/plugins/modules/monitoring/statsd.py deleted file mode 100644 index b07851641b..0000000000 --- a/plugins/modules/monitoring/statsd.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: statsd -short_description: Send metrics to StatsD -version_added: 2.1.0 -description: - - The C(statsd) module sends metrics to StatsD. - - For more information, see U(https://statsd-metrics.readthedocs.io/en/latest/). - - Supported metric types are C(counter) and C(gauge). - Currently unupported metric types are C(timer), C(set), and C(gaugedelta). -author: "Mark Mercado (@mamercad)" -requirements: - - statsd -options: - state: - type: str - description: - - State of the check, only C(present) makes sense. - choices: ["present"] - default: present - host: - type: str - default: localhost - description: - - StatsD host (hostname or IP) to send metrics to. - port: - type: int - default: 8125 - description: - - The port on C(host) which StatsD is listening on. - protocol: - type: str - default: udp - choices: ["udp", "tcp"] - description: - - The transport protocol to send metrics over. - timeout: - type: float - default: 1.0 - description: - - Sender timeout, only applicable if C(protocol) is C(tcp). - metric: - type: str - required: true - description: - - The name of the metric. - metric_type: - type: str - required: true - choices: ["counter", "gauge"] - description: - - The type of metric. - metric_prefix: - type: str - description: - - The prefix to add to the metric. - value: - type: int - required: true - description: - - The value of the metric. - delta: - type: bool - default: false - description: - - If the metric is of type C(gauge), change the value by C(delta). -''' - -EXAMPLES = ''' -- name: Increment the metric my_counter by 1 - community.general.statsd: - host: localhost - port: 9125 - protocol: tcp - metric: my_counter - metric_type: counter - value: 1 - -- name: Set the gauge my_gauge to 7 - community.general.statsd: - host: localhost - port: 9125 - protocol: tcp - metric: my_gauge - metric_type: gauge - value: 7 -''' - - -from ansible.module_utils.basic import (AnsibleModule, missing_required_lib) - -try: - from statsd import StatsClient, TCPStatsClient - HAS_STATSD = True -except ImportError: - HAS_STATSD = False - - -def udp_statsd_client(**client_params): - return StatsClient(**client_params) - - -def tcp_statsd_client(**client_params): - return TCPStatsClient(**client_params) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['present']), - host=dict(type='str', default='localhost'), - port=dict(type='int', default=8125), - protocol=dict(type='str', default='udp', choices=['udp', 'tcp']), - timeout=dict(type='float', default=1.0), - metric=dict(type='str', required=True), - metric_type=dict(type='str', required=True, choices=['counter', 'gauge']), - metric_prefix=dict(type='str', default=''), - value=dict(type='int', required=True), - delta=dict(type='bool', default=False), - ), - supports_check_mode=False - ) - - if not HAS_STATSD: - module.fail_json(msg=missing_required_lib('statsd')) - - host = module.params.get('host') - port = module.params.get('port') - protocol = module.params.get('protocol') - timeout = module.params.get('timeout') - metric = module.params.get('metric') - metric_type = module.params.get('metric_type') - metric_prefix = module.params.get('metric_prefix') - value = module.params.get('value') - delta = module.params.get('delta') - - if protocol == 'udp': - client = udp_statsd_client(host=host, port=port, prefix=metric_prefix, maxudpsize=512, ipv6=False) - elif protocol == 'tcp': - client = tcp_statsd_client(host=host, port=port, timeout=timeout, prefix=metric_prefix, ipv6=False) - - metric_name = '%s/%s' % (metric_prefix, metric) if metric_prefix else metric - metric_display_value = '%s (delta=%s)' % (value, delta) if metric_type == 'gauge' else value - - try: - if metric_type == 'counter': - client.incr(metric, value) - elif metric_type == 'gauge': - client.gauge(metric, value, delta=delta) - - except Exception as exc: - module.fail_json(msg='Failed sending to StatsD %s' % str(exc)) - - finally: - if protocol == 'tcp': - client.close() - - module.exit_json(msg="Sent %s %s -> %s to StatsD" % (metric_type, metric_name, str(metric_display_value)), changed=True) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/statusio_maintenance.py b/plugins/modules/monitoring/statusio_maintenance.py deleted file mode 100644 index 10f733d4a8..0000000000 --- a/plugins/modules/monitoring/statusio_maintenance.py +++ /dev/null @@ -1,467 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2015, Benjamin Copeland (@bhcopeland) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: statusio_maintenance -short_description: Create maintenance windows for your status.io dashboard -description: - - Creates a maintenance window for status.io - - Deletes a maintenance window for status.io -notes: - - You can use the apiary API url (http://docs.statusio.apiary.io/) to - capture API traffic - - Use start_date and start_time with minutes to set future maintenance window -author: Benjamin Copeland (@bhcopeland) -options: - title: - type: str - description: - - A descriptive title for the maintenance window - default: "A new maintenance window" - desc: - type: str - description: - - Message describing the maintenance window - default: "Created by Ansible" - state: - type: str - description: - - Desired state of the package. - default: "present" - choices: ["present", "absent"] - api_id: - type: str - description: - - Your unique API ID from status.io - required: true - api_key: - type: str - description: - - Your unique API Key from status.io - required: true - statuspage: - type: str - description: - - Your unique StatusPage ID from status.io - required: true - url: - type: str - description: - - Status.io API URL. A private apiary can be used instead. - default: "https://api.status.io" - components: - type: list - elements: str - description: - - The given name of your component (server name) - aliases: ['component'] - containers: - type: list - elements: str - description: - - The given name of your container (data center) - aliases: ['container'] - all_infrastructure_affected: - description: - - If it affects all components and containers - type: bool - default: 'no' - automation: - description: - - Automatically start and end the maintenance window - type: bool - default: 'no' - maintenance_notify_now: - description: - - Notify subscribers now - type: bool - default: 'no' - maintenance_notify_72_hr: - description: - - Notify subscribers 72 hours before maintenance start time - type: bool - default: 'no' - maintenance_notify_24_hr: - description: - - Notify subscribers 24 hours before maintenance start time - type: bool - default: 'no' - maintenance_notify_1_hr: - description: - - Notify subscribers 1 hour before maintenance start time - type: bool - default: 'no' - maintenance_id: - type: str - description: - - The maintenance id number when deleting a maintenance window - minutes: - type: int - description: - - The length of time in UTC that the maintenance will run - (starting from playbook runtime) - default: 10 - start_date: - type: str - description: - - Date maintenance is expected to start (Month/Day/Year) (UTC) - - End Date is worked out from start_date + minutes - start_time: - type: str - description: - - Time maintenance is expected to start (Hour:Minutes) (UTC) - - End Time is worked out from start_time + minutes -''' - -EXAMPLES = ''' -- name: Create a maintenance window for 10 minutes on server1, with automation to stop the maintenance - community.general.statusio_maintenance: - title: Router Upgrade from ansible - desc: Performing a Router Upgrade - components: server1.example.com - api_id: api_id - api_key: api_key - statuspage: statuspage_id - maintenance_notify_1_hr: True - automation: True - -- name: Create a maintenance window for 60 minutes on server1 and server2 - community.general.statusio_maintenance: - title: Routine maintenance - desc: Some security updates - components: - - server1.example.com - - server2.example.com - minutes: 60 - api_id: api_id - api_key: api_key - statuspage: statuspage_id - maintenance_notify_1_hr: True - automation: True - delegate_to: localhost - -- name: Create a future maintenance window for 24 hours to all hosts inside the Primary Data Center - community.general.statusio_maintenance: - title: Data center downtime - desc: Performing a Upgrade to our data center - components: Primary Data Center - api_id: api_id - api_key: api_key - statuspage: statuspage_id - start_date: 01/01/2016 - start_time: 12:00 - minutes: 1440 - -- name: Delete a maintenance window - community.general.statusio_maintenance: - title: Remove a maintenance window - maintenance_id: 561f90faf74bc94a4700087b - statuspage: statuspage_id - api_id: api_id - api_key: api_key - state: absent - -''' -# TODO: Add RETURN documentation. -RETURN = ''' # ''' - -import datetime -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import open_url - - -def get_api_auth_headers(api_id, api_key, url, statuspage): - - headers = { - "x-api-id": api_id, - "x-api-key": api_key, - "Content-Type": "application/json" - } - - try: - response = open_url( - url + "/v2/component/list/" + statuspage, headers=headers) - data = json.loads(response.read()) - if data['status']['message'] == 'Authentication failed': - return 1, None, None, "Authentication failed: " \ - "Check api_id/api_key and statuspage id." - else: - auth_headers = headers - auth_content = data - except Exception as e: - return 1, None, None, to_native(e) - return 0, auth_headers, auth_content, None - - -def get_component_ids(auth_content, components): - host_ids = [] - lower_components = [x.lower() for x in components] - for result in auth_content["result"]: - if result['name'].lower() in lower_components: - data = { - "component_id": result["_id"], - "container_id": result["containers"][0]["_id"] - } - host_ids.append(data) - lower_components.remove(result['name'].lower()) - if len(lower_components): - # items not found in the api - return 1, None, lower_components - return 0, host_ids, None - - -def get_container_ids(auth_content, containers): - host_ids = [] - lower_containers = [x.lower() for x in containers] - for result in auth_content["result"]: - if result["containers"][0]["name"].lower() in lower_containers: - data = { - "component_id": result["_id"], - "container_id": result["containers"][0]["_id"] - } - host_ids.append(data) - lower_containers.remove(result["containers"][0]["name"].lower()) - - if len(lower_containers): - # items not found in the api - return 1, None, lower_containers - return 0, host_ids, None - - -def get_date_time(start_date, start_time, minutes): - returned_date = [] - if start_date and start_time: - try: - datetime.datetime.strptime(start_date, '%m/%d/%Y') - returned_date.append(start_date) - except (NameError, ValueError): - return 1, None, "Not a valid start_date format." - try: - datetime.datetime.strptime(start_time, '%H:%M') - returned_date.append(start_time) - except (NameError, ValueError): - return 1, None, "Not a valid start_time format." - try: - # Work out end date/time based on minutes - date_time_start = datetime.datetime.strptime( - start_time + start_date, '%H:%M%m/%d/%Y') - delta = date_time_start + datetime.timedelta(minutes=minutes) - returned_date.append(delta.strftime("%m/%d/%Y")) - returned_date.append(delta.strftime("%H:%M")) - except (NameError, ValueError): - return 1, None, "Couldn't work out a valid date" - else: - now = datetime.datetime.utcnow() - delta = now + datetime.timedelta(minutes=minutes) - # start_date - returned_date.append(now.strftime("%m/%d/%Y")) - returned_date.append(now.strftime("%H:%M")) - # end_date - returned_date.append(delta.strftime("%m/%d/%Y")) - returned_date.append(delta.strftime("%H:%M")) - return 0, returned_date, None - - -def create_maintenance(auth_headers, url, statuspage, host_ids, - all_infrastructure_affected, automation, title, desc, - returned_date, maintenance_notify_now, - maintenance_notify_72_hr, maintenance_notify_24_hr, - maintenance_notify_1_hr): - returned_dates = [[x] for x in returned_date] - component_id = [] - container_id = [] - for val in host_ids: - component_id.append(val['component_id']) - container_id.append(val['container_id']) - try: - values = json.dumps({ - "statuspage_id": statuspage, - "components": component_id, - "containers": container_id, - "all_infrastructure_affected": str(int(all_infrastructure_affected)), - "automation": str(int(automation)), - "maintenance_name": title, - "maintenance_details": desc, - "date_planned_start": returned_dates[0], - "time_planned_start": returned_dates[1], - "date_planned_end": returned_dates[2], - "time_planned_end": returned_dates[3], - "maintenance_notify_now": str(int(maintenance_notify_now)), - "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)), - "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)), - "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr)) - }) - response = open_url( - url + "/v2/maintenance/schedule", data=values, - headers=auth_headers) - data = json.loads(response.read()) - - if data["status"]["error"] == "yes": - return 1, None, data["status"]["message"] - except Exception as e: - return 1, None, to_native(e) - return 0, None, None - - -def delete_maintenance(auth_headers, url, statuspage, maintenance_id): - try: - values = json.dumps({ - "statuspage_id": statuspage, - "maintenance_id": maintenance_id, - }) - response = open_url( - url=url + "/v2/maintenance/delete", - data=values, - headers=auth_headers) - data = json.loads(response.read()) - if data["status"]["error"] == "yes": - return 1, None, "Invalid maintenance_id" - except Exception as e: - return 1, None, to_native(e) - return 0, None, None - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_id=dict(required=True), - api_key=dict(required=True, no_log=True), - statuspage=dict(required=True), - state=dict(required=False, default='present', - choices=['present', 'absent']), - url=dict(default='https://api.status.io', required=False), - components=dict(type='list', elements='str', required=False, default=None, - aliases=['component']), - containers=dict(type='list', elements='str', required=False, default=None, - aliases=['container']), - all_infrastructure_affected=dict(type='bool', default=False, - required=False), - automation=dict(type='bool', default=False, required=False), - title=dict(required=False, default='A new maintenance window'), - desc=dict(required=False, default='Created by Ansible'), - minutes=dict(type='int', required=False, default=10), - maintenance_notify_now=dict(type='bool', default=False, - required=False), - maintenance_notify_72_hr=dict(type='bool', default=False, - required=False), - maintenance_notify_24_hr=dict(type='bool', default=False, - required=False), - maintenance_notify_1_hr=dict(type='bool', default=False, - required=False), - maintenance_id=dict(required=False, default=None), - start_date=dict(default=None, required=False), - start_time=dict(default=None, required=False) - ), - supports_check_mode=True, - ) - - api_id = module.params['api_id'] - api_key = module.params['api_key'] - statuspage = module.params['statuspage'] - state = module.params['state'] - url = module.params['url'] - components = module.params['components'] - containers = module.params['containers'] - all_infrastructure_affected = module.params['all_infrastructure_affected'] - automation = module.params['automation'] - title = module.params['title'] - desc = module.params['desc'] - minutes = module.params['minutes'] - maintenance_notify_now = module.params['maintenance_notify_now'] - maintenance_notify_72_hr = module.params['maintenance_notify_72_hr'] - maintenance_notify_24_hr = module.params['maintenance_notify_24_hr'] - maintenance_notify_1_hr = module.params['maintenance_notify_1_hr'] - maintenance_id = module.params['maintenance_id'] - start_date = module.params['start_date'] - start_time = module.params['start_time'] - - if state == "present": - - if api_id and api_key: - (rc, auth_headers, auth_content, error) = \ - get_api_auth_headers(api_id, api_key, url, statuspage) - if rc != 0: - module.fail_json(msg="Failed to get auth keys: %s" % error) - else: - auth_headers = {} - auth_content = {} - - if minutes or start_time and start_date: - (rc, returned_date, error) = get_date_time( - start_date, start_time, minutes) - if rc != 0: - module.fail_json(msg="Failed to set date/time: %s" % error) - - if not components and not containers: - return module.fail_json(msg="A Component or Container must be " - "defined") - elif components and containers: - return module.fail_json(msg="Components and containers cannot " - "be used together") - else: - if components: - (rc, host_ids, error) = get_component_ids(auth_content, - components) - if rc != 0: - module.fail_json(msg="Failed to find component %s" % error) - - if containers: - (rc, host_ids, error) = get_container_ids(auth_content, - containers) - if rc != 0: - module.fail_json(msg="Failed to find container %s" % error) - - if module.check_mode: - module.exit_json(changed=True) - else: - (rc, dummy, error) = create_maintenance( - auth_headers, url, statuspage, host_ids, - all_infrastructure_affected, automation, - title, desc, returned_date, maintenance_notify_now, - maintenance_notify_72_hr, maintenance_notify_24_hr, - maintenance_notify_1_hr) - if rc == 0: - module.exit_json(changed=True, result="Successfully created " - "maintenance") - else: - module.fail_json(msg="Failed to create maintenance: %s" - % error) - - if state == "absent": - - if api_id and api_key: - (rc, auth_headers, auth_content, error) = \ - get_api_auth_headers(api_id, api_key, url, statuspage) - if rc != 0: - module.fail_json(msg="Failed to get auth keys: %s" % error) - else: - auth_headers = {} - - if module.check_mode: - module.exit_json(changed=True) - else: - (rc, dummy, error) = delete_maintenance( - auth_headers, url, statuspage, maintenance_id) - if rc == 0: - module.exit_json( - changed=True, - result="Successfully deleted maintenance" - ) - else: - module.fail_json( - msg="Failed to delete maintenance: %s" % error) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/uptimerobot.py b/plugins/modules/monitoring/uptimerobot.py deleted file mode 100644 index 833a7f191e..0000000000 --- a/plugins/modules/monitoring/uptimerobot.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: uptimerobot -short_description: Pause and start Uptime Robot monitoring -description: - - This module will let you start and pause Uptime Robot Monitoring -author: "Nate Kingsley (@nate-kingsley)" -requirements: - - Valid Uptime Robot API Key -options: - state: - type: str - description: - - Define whether or not the monitor should be running or paused. - required: true - choices: [ "started", "paused" ] - monitorid: - type: str - description: - - ID of the monitor to check. - required: true - apikey: - type: str - description: - - Uptime Robot API key. - required: true -notes: - - Support for adding and removing monitors and alert contacts has not yet been implemented. -''' - -EXAMPLES = ''' -- name: Pause the monitor with an ID of 12345 - community.general.uptimerobot: - monitorid: 12345 - apikey: 12345-1234512345 - state: paused - -- name: Start the monitor with an ID of 12345 - community.general.uptimerobot: - monitorid: 12345 - apikey: 12345-1234512345 - state: started -''' - -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.common.text.converters import to_text - - -API_BASE = "https://api.uptimerobot.com/" - -API_ACTIONS = dict( - status='getMonitors?', - editMonitor='editMonitor?' -) - -API_FORMAT = 'json' -API_NOJSONCALLBACK = 1 -CHANGED_STATE = False -SUPPORTS_CHECK_MODE = False - - -def checkID(module, params): - - data = urlencode(params) - full_uri = API_BASE + API_ACTIONS['status'] + data - req, info = fetch_url(module, full_uri) - result = to_text(req.read()) - jsonresult = json.loads(result) - req.close() - return jsonresult - - -def startMonitor(module, params): - - params['monitorStatus'] = 1 - data = urlencode(params) - full_uri = API_BASE + API_ACTIONS['editMonitor'] + data - req, info = fetch_url(module, full_uri) - result = to_text(req.read()) - jsonresult = json.loads(result) - req.close() - return jsonresult['stat'] - - -def pauseMonitor(module, params): - - params['monitorStatus'] = 0 - data = urlencode(params) - full_uri = API_BASE + API_ACTIONS['editMonitor'] + data - req, info = fetch_url(module, full_uri) - result = to_text(req.read()) - jsonresult = json.loads(result) - req.close() - return jsonresult['stat'] - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, choices=['started', 'paused']), - apikey=dict(required=True, no_log=True), - monitorid=dict(required=True) - ), - supports_check_mode=SUPPORTS_CHECK_MODE - ) - - params = dict( - apiKey=module.params['apikey'], - monitors=module.params['monitorid'], - monitorID=module.params['monitorid'], - format=API_FORMAT, - noJsonCallback=API_NOJSONCALLBACK - ) - - check_result = checkID(module, params) - - if check_result['stat'] != "ok": - module.fail_json( - msg="failed", - result=check_result['message'] - ) - - if module.params['state'] == 'started': - monitor_result = startMonitor(module, params) - else: - monitor_result = pauseMonitor(module, params) - - module.exit_json( - msg="success", - result=monitor_result - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/mqtt.py b/plugins/modules/mqtt.py deleted file mode 120000 index b6afc9e147..0000000000 --- a/plugins/modules/mqtt.py +++ /dev/null @@ -1 +0,0 @@ -./notification/mqtt.py \ No newline at end of file diff --git a/plugins/modules/mqtt.py b/plugins/modules/mqtt.py new file mode 100644 index 0000000000..ab1fe59cdc --- /dev/null +++ b/plugins/modules/mqtt.py @@ -0,0 +1,246 @@ +#!/usr/bin/python + +# Copyright (c) 2013, 2014, Jan-Piet Mens +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: mqtt +short_description: Publish a message on an MQTT topic for the IoT +description: + - Publish a message on an MQTT topic. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + server: + type: str + description: + - MQTT broker address/name. + default: localhost + port: + type: int + description: + - MQTT broker port number. + default: 1883 + username: + type: str + description: + - Username to authenticate against the broker. + password: + type: str + description: + - Password for O(username) to authenticate against the broker. + client_id: + type: str + description: + - MQTT client identifier. + - If not specified, it uses a value C(hostname + pid). + topic: + type: str + description: + - MQTT topic name. + required: true + payload: + type: str + description: + - Payload. The special string V("None") may be used to send a NULL (that is, empty) payload which is useful to simply + notify with the O(topic) or to clear previously retained messages. + required: true + qos: + type: str + description: + - QoS (Quality of Service). + default: "0" + choices: ["0", "1", "2"] + retain: + description: + - Setting this flag causes the broker to retain (in other words keep) the message so that applications that subsequently + subscribe to the topic can received the last retained message immediately. + type: bool + default: false + ca_cert: + type: path + description: + - The path to the Certificate Authority certificate files that are to be treated as trusted by this client. If this + is the only option given then the client operates in a similar manner to a web browser. That is to say it requires + the broker to have a certificate signed by the Certificate Authorities in ca_certs and communicates using TLS v1, + but does not attempt any form of authentication. This provides basic network encryption but may not be sufficient + depending on how the broker is configured. + aliases: [ca_certs] + client_cert: + type: path + description: + - The path pointing to the PEM encoded client certificate. If this is set it is used as client information for TLS based + authentication. Support for this feature is broker dependent. + aliases: [certfile] + client_key: + type: path + description: + - The path pointing to the PEM encoded client private key. If this is set it is used as client information for TLS based + authentication. Support for this feature is broker dependent. + aliases: [keyfile] + tls_version: + description: + - Specifies the version of the SSL/TLS protocol to be used. + - By default (if the python version supports it) the highest TLS version is detected. If unavailable, TLS v1 is used. + type: str + choices: + - tlsv1.1 + - tlsv1.2 +requirements: [mosquitto] +notes: + - This module requires a connection to an MQTT broker such as Mosquitto U(http://mosquitto.org) and the I(Paho) C(mqtt) + Python client (U(https://pypi.org/project/paho-mqtt/)). +author: "Jan-Piet Mens (@jpmens)" +""" + +EXAMPLES = r""" +- name: Publish a message on an MQTT topic + community.general.mqtt: + topic: 'service/ansible/{{ ansible_hostname }}' + payload: 'Hello at {{ ansible_date_time.iso8601 }}' + qos: 0 + retain: false + client_id: ans001 + delegate_to: localhost +""" + +# =========================================== +# MQTT module support methods. +# + +import os +import ssl +import traceback +import platform + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +HAS_PAHOMQTT = True +PAHOMQTT_IMP_ERR = None +try: + import socket + import paho.mqtt.publish as mqtt +except ImportError: + PAHOMQTT_IMP_ERR = traceback.format_exc() + HAS_PAHOMQTT = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +# =========================================== +# Main +# + +def main(): + tls_map = {} + + try: + tls_map['tlsv1.2'] = ssl.PROTOCOL_TLSv1_2 + except AttributeError: + pass + + try: + tls_map['tlsv1.1'] = ssl.PROTOCOL_TLSv1_1 + except AttributeError: + pass + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='localhost'), + port=dict(default=1883, type='int'), + topic=dict(required=True), + payload=dict(required=True), + client_id=dict(), + qos=dict(default="0", choices=["0", "1", "2"]), + retain=dict(default=False, type='bool'), + username=dict(), + password=dict(no_log=True), + ca_cert=dict(type='path', aliases=['ca_certs']), + client_cert=dict(type='path', aliases=['certfile']), + client_key=dict(type='path', aliases=['keyfile']), + tls_version=dict(choices=['tlsv1.1', 'tlsv1.2']) + ), + supports_check_mode=True + ) + + if not HAS_PAHOMQTT: + module.fail_json(msg=missing_required_lib('paho-mqtt'), exception=PAHOMQTT_IMP_ERR) + + server = module.params.get("server", 'localhost') + port = module.params.get("port", 1883) + topic = module.params.get("topic") + payload = module.params.get("payload") + client_id = module.params.get("client_id", '') + qos = int(module.params.get("qos", 0)) + retain = module.params.get("retain") + username = module.params.get("username", None) + password = module.params.get("password", None) + ca_certs = module.params.get("ca_cert", None) + certfile = module.params.get("client_cert", None) + keyfile = module.params.get("client_key", None) + tls_version = module.params.get("tls_version", None) + + if client_id is None: + client_id = "%s_%s" % (socket.getfqdn(), os.getpid()) + + if payload and payload == 'None': + payload = None + + auth = None + if username is not None: + auth = {'username': username, 'password': password} + + tls = None + if ca_certs is not None: + if tls_version: + tls_version = tls_map.get(tls_version, ssl.PROTOCOL_SSLv23) + else: + if LooseVersion(platform.python_version()) <= LooseVersion("3.5.2"): + # Specifying `None` on later versions of python seems sufficient to + # instruct python to autonegotiate the SSL/TLS connection. On versions + # 3.5.2 and lower though we need to specify the version. + # + # Note that this is an alias for PROTOCOL_TLS, but PROTOCOL_TLS was + # not available until 3.5.3. + tls_version = ssl.PROTOCOL_SSLv23 + + tls = { + 'ca_certs': ca_certs, + 'certfile': certfile, + 'keyfile': keyfile, + 'tls_version': tls_version, + } + + try: + mqtt.single( + topic, + payload, + qos=qos, + retain=retain, + client_id=client_id, + hostname=server, + port=port, + auth=auth, + tls=tls + ) + except Exception as e: + module.fail_json( + msg="unable to publish to MQTT broker %s" % to_native(e), + exception=traceback.format_exc() + ) + + module.exit_json(changed=False, topic=topic) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/mssql_db.py b/plugins/modules/mssql_db.py deleted file mode 120000 index 35d017debb..0000000000 --- a/plugins/modules/mssql_db.py +++ /dev/null @@ -1 +0,0 @@ -./database/mssql/mssql_db.py \ No newline at end of file diff --git a/plugins/modules/mssql_db.py b/plugins/modules/mssql_db.py new file mode 100644 index 0000000000..767cb2f554 --- /dev/null +++ b/plugins/modules/mssql_db.py @@ -0,0 +1,238 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Vedit Firat Arig +# Outline and parts are reused from Mark Theunissen's mysql_db module +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: mssql_db +short_description: Add or remove MSSQL databases from a remote host +description: + - Add or remove MSSQL databases from a remote host. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of the database to add or remove. + required: true + aliases: [db] + type: str + login_user: + description: + - The username used to authenticate with. + type: str + default: '' + login_password: + description: + - The password used to authenticate with. + type: str + default: '' + login_host: + description: + - Host running the database. + type: str + required: true + login_port: + description: + - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used. + default: '1433' + type: str + state: + description: + - The database state. + default: present + choices: ["present", "absent", "import"] + type: str + target: + description: + - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL files (C(.sql)) files are + supported. + type: str + autocommit: + description: + - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since + some content can not be changed within a transaction. + type: bool + default: false +notes: + - Requires the pymssql Python package on the remote host. For Ubuntu, this is as easy as pip install pymssql (See M(ansible.builtin.pip)). +requirements: + - pymssql +author: Vedit Firat Arig (@vedit) +""" + +EXAMPLES = r""" +- name: Create a new database with name 'jackdata' + community.general.mssql_db: + name: jackdata + state: present + +# Copy database dump file to remote host and restore it to database 'my_db' +- name: Copy database dump file to remote host + ansible.builtin.copy: + src: dump.sql + dest: /tmp + +- name: Restore the dump file to database 'my_db' + community.general.mssql_db: + name: my_db + state: import + target: /tmp/dump.sql +""" + +RETURN = r""" +# +""" + +import os +import traceback + +PYMSSQL_IMP_ERR = None +try: + import pymssql +except ImportError: + PYMSSQL_IMP_ERR = traceback.format_exc() + mssql_found = False +else: + mssql_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def db_exists(conn, cursor, db): + cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db) + conn.commit() + return bool(cursor.rowcount) + + +def db_create(conn, cursor, db): + cursor.execute("CREATE DATABASE [%s]" % db) + return db_exists(conn, cursor, db) + + +def db_delete(conn, cursor, db): + try: + cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db) + except Exception: + pass + cursor.execute("DROP DATABASE [%s]" % db) + return not db_exists(conn, cursor, db) + + +def db_import(conn, cursor, module, db, target): + if os.path.isfile(target): + with open(target, 'r') as backup: + sqlQuery = "USE [%s]\n" % db + for line in backup: + if line is None: + break + elif line.startswith('GO'): + cursor.execute(sqlQuery) + sqlQuery = "USE [%s]\n" % db + else: + sqlQuery += line + cursor.execute(sqlQuery) + conn.commit() + return 0, "import successful", "" + else: + return 1, "cannot find target file", "cannot find target file" + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['db']), + login_user=dict(default=''), + login_password=dict(default='', no_log=True), + login_host=dict(required=True), + login_port=dict(default='1433'), + target=dict(), + autocommit=dict(type='bool', default=False), + state=dict( + default='present', choices=['present', 'absent', 'import']) + ) + ) + + if not mssql_found: + module.fail_json(msg=missing_required_lib('pymssql'), exception=PYMSSQL_IMP_ERR) + + db = module.params['name'] + state = module.params['state'] + autocommit = module.params['autocommit'] + target = module.params["target"] + + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_host = module.params['login_host'] + login_port = module.params['login_port'] + + login_querystring = login_host + if login_port != "1433": + login_querystring = "%s:%s" % (login_host, login_port) + + if login_user != "" and login_password == "": + module.fail_json(msg="when supplying login_user arguments login_password must be provided") + + try: + conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master') + cursor = conn.cursor() + except Exception as e: + if "Unknown database" in str(e): + errno, errstr = e.args + module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) + else: + module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your " + "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf") + + conn.autocommit(True) + changed = False + + if db_exists(conn, cursor, db): + if state == "absent": + try: + changed = db_delete(conn, cursor, db) + except Exception as e: + module.fail_json(msg="error deleting database: " + str(e)) + elif state == "import": + conn.autocommit(autocommit) + rc, stdout, stderr = db_import(conn, cursor, module, db, target) + + if rc != 0: + module.fail_json(msg=stderr) + else: + module.exit_json(changed=True, db=db, msg=stdout) + else: + if state == "present": + try: + changed = db_create(conn, cursor, db) + except Exception as e: + module.fail_json(msg="error creating database: " + str(e)) + elif state == "import": + try: + changed = db_create(conn, cursor, db) + except Exception as e: + module.fail_json(msg="error creating database: " + str(e)) + + conn.autocommit(autocommit) + rc, stdout, stderr = db_import(conn, cursor, module, db, target) + + if rc != 0: + module.fail_json(msg=stderr) + else: + module.exit_json(changed=True, db=db, msg=stdout) + + module.exit_json(changed=changed, db=db) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/mssql_script.py b/plugins/modules/mssql_script.py deleted file mode 120000 index 9df43f1eb0..0000000000 --- a/plugins/modules/mssql_script.py +++ /dev/null @@ -1 +0,0 @@ -./database/mssql/mssql_script.py \ No newline at end of file diff --git a/plugins/modules/mssql_script.py b/plugins/modules/mssql_script.py new file mode 100644 index 0000000000..ab367203c9 --- /dev/null +++ b/plugins/modules/mssql_script.py @@ -0,0 +1,410 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Kris Budde 0: + queries.append(''.join(current_batch)) + + result['changed'] = True + if module.check_mode: + module.exit_json(**result) + + query_results = [] + for query in queries: + # Catch and exit on any bad query errors + try: + cursor.execute(query, sql_params) + qry_result = [] + rows = cursor.fetchall() + while rows: + qry_result.append(rows) + rows = cursor.fetchall() + query_results.append(qry_result) + except Exception as e: + # We know we executed the statement so this error just means we have no resultset + # which is ok (eg UPDATE/INSERT) + if ( + type(e).__name__ == 'OperationalError' and + str(e) == 'Statement not executed or executed statement has no resultset' + ): + query_results.append([]) + else: + # Rollback transaction before failing the module in case of error + if transaction: + conn.rollback() + error_msg = '%s: %s' % (type(e).__name__, str(e)) + module.fail_json(msg="query failed", query=query, error=error_msg, **result) + + # Commit transaction before exiting the module in case of no error + if transaction: + conn.commit() + + # ensure that the result is json serializable + qry_results = json.loads(json.dumps(query_results, default=clean_output)) + + result[query_results_key] = qry_results + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/nagios.py b/plugins/modules/nagios.py deleted file mode 120000 index 9ad9accd44..0000000000 --- a/plugins/modules/nagios.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/nagios.py \ No newline at end of file diff --git a/plugins/modules/nagios.py b/plugins/modules/nagios.py new file mode 100644 index 0000000000..ecf15d764a --- /dev/null +++ b/plugins/modules/nagios.py @@ -0,0 +1,1266 @@ +#!/usr/bin/python +# +# This file is largely copied from the Nagios module included in the +# Func project. Original copyright follows: +# +# func-nagios - Schedule downtime and enables/disable notifications +# Copyright 2011, Red Hat, Inc. +# Tim Bielawa +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: nagios +short_description: Perform common tasks in Nagios related to downtime and notifications +description: + - 'The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts.' + - The C(nagios) module is not idempotent. + - All actions require the O(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) + variable to refer to the host the playbook is currently running on. + - The module executes commands and needs to be run directly on the Nagios server + with a user that has appropriate access rights. It does not use Nagios' HTTP API. + - Searches for a I(nagios.cfg) in I(/etc/nagios), I(/etc/nagios2), I(/etc/nagios3), I(/usr/local/etc/nagios), + I(/usr/local/groundwork/nagios/etc), I(/omd/sites/oppy/tmp/nagios), I(/usr/local/nagios/etc), + I(/usr/local/nagios), I(/opt/nagios/etc), and I(/opt/nagios), + or a I(icinga.cfg) in I(/etc/icinga) and I(/usr/local/icinga/etc). + (The Nagios configuration file should be readable by the Ansible user.) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + action: + description: + - Action to take. + - The V(acknowledge) and V(forced_check) actions were added in community.general 1.2.0. + required: true + choices: + - downtime + - delete_downtime + - enable_alerts + - disable_alerts + - silence + - unsilence + - silence_nagios + - unsilence_nagios + - command + - servicegroup_service_downtime + - servicegroup_host_downtime + - acknowledge + - forced_check + type: str + host: + description: + - Host to operate on in Nagios. + type: str + cmdfile: + description: + - Path to the nagios I(command file) (FIFO pipe). Only required if auto-detection fails. + type: str + author: + description: + - Author to leave downtime comments as. Only used when O(action) is V(downtime) or V(acknowledge). + type: str + default: Ansible + comment: + description: + - Comment when O(action) is V(downtime) or V(acknowledge). + type: str + default: Scheduling downtime + start: + description: + - When downtime should start, in C(time_t) format (epoch seconds). + version_added: '0.2.0' + type: str + minutes: + description: + - Minutes to schedule downtime for. + - Only usable with O(action=downtime). + type: int + default: 30 + services: + description: + - What to manage downtime/alerts for. Separate multiple services with commas. + - 'B(Required) option when O(action) is one of: V(downtime), V(acknowledge), V(forced_check), V(enable_alerts), V(disable_alerts).' + - When specifying what O(services) to handle there is a special service value, V(host), which handles alerts/downtime/acknowledge + for the I(host itself), for example O(services=host). This keyword may not be given with other services at the same + time. B(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the + services running on it.) To schedule downtime for all O(services) on particular host use keyword V(all), for example + O(services=all). + - Before community.general 11.2.0, one could specify multiple services at once by separating them with commas, for example + O(services=httpd,nfs,puppet). Since community.general 11.2.0, there can be spaces around the commas, and an actual + list can be provided. + aliases: ["service"] + type: list + elements: str + servicegroup: + description: + - The Servicegroup we want to set downtimes/alerts for. + - B(Required) option when using the V(servicegroup_service_downtime) and V(servicegroup_host_downtime) O(action). + type: str + command: + description: + - The raw command to send to Nagios, which should not include the submitted time header or the line-feed. + - B(Required) option when O(action=command). + type: str + +author: "Tim Bielawa (@tbielawa)" +""" + +EXAMPLES = r""" +- name: Set 30 minutes of apache downtime + community.general.nagios: + action: downtime + minutes: 30 + service: httpd + host: '{{ inventory_hostname }}' + +- name: Schedule an hour of HOST downtime + community.general.nagios: + action: downtime + minutes: 60 + service: host + host: '{{ inventory_hostname }}' + +- name: Schedule an hour of HOST downtime starting at 2019-04-23T02:00:00+00:00 + community.general.nagios: + action: downtime + start: 1555984800 + minutes: 60 + service: host + host: '{{ inventory_hostname }}' + +- name: Schedule an hour of HOST downtime, with a comment describing the reason + community.general.nagios: + action: downtime + minutes: 60 + service: host + host: '{{ inventory_hostname }}' + comment: Rebuilding machine + +- name: Schedule downtime for ALL services on HOST + community.general.nagios: + action: downtime + minutes: 45 + service: all + host: '{{ inventory_hostname }}' + +- name: Schedule downtime for a few services + community.general.nagios: + action: downtime + services: frob,foobar,qeuz + host: '{{ inventory_hostname }}' + +- name: Set 30 minutes downtime for all services in servicegroup foo + community.general.nagios: + action: servicegroup_service_downtime + minutes: 30 + servicegroup: foo + host: '{{ inventory_hostname }}' + +- name: Set 30 minutes downtime for all host in servicegroup foo + community.general.nagios: + action: servicegroup_host_downtime + minutes: 30 + servicegroup: foo + host: '{{ inventory_hostname }}' + +- name: Delete all downtime for a given host + community.general.nagios: + action: delete_downtime + host: '{{ inventory_hostname }}' + service: all + +- name: Delete all downtime for HOST with a particular comment + community.general.nagios: + action: delete_downtime + host: '{{ inventory_hostname }}' + service: host + comment: Planned maintenance + +- name: Acknowledge an HOST with a particular comment + community.general.nagios: + action: acknowledge + service: host + host: '{{ inventory_hostname }}' + comment: 'power outage - see casenr 12345' + +- name: Acknowledge an active service problem for the httpd service with a particular comment + community.general.nagios: + action: acknowledge + service: httpd + host: '{{ inventory_hostname }}' + comment: 'service crashed - see casenr 12345' + +- name: Reset a passive service check for snmp trap + community.general.nagios: + action: forced_check + service: snmp + host: '{{ inventory_hostname }}' + +- name: Force an active service check for the httpd service + community.general.nagios: + action: forced_check + service: httpd + host: '{{ inventory_hostname }}' + +- name: Force an active service check for all services of a particular host + community.general.nagios: + action: forced_check + service: all + host: '{{ inventory_hostname }}' + +- name: Force an active service check for a particular host + community.general.nagios: + action: forced_check + service: host + host: '{{ inventory_hostname }}' + +- name: Enable SMART disk alerts + community.general.nagios: + action: enable_alerts + service: smart + host: '{{ inventory_hostname }}' + +- name: Disable httpd and nfs alerts + community.general.nagios: + action: disable_alerts + service: + - httpd + - nfs + host: '{{ inventory_hostname }}' + +- name: Disable HOST alerts + community.general.nagios: + action: disable_alerts + service: host + host: '{{ inventory_hostname }}' + +- name: Silence ALL alerts + community.general.nagios: + action: silence + host: '{{ inventory_hostname }}' + +- name: Unsilence all alerts + community.general.nagios: + action: unsilence + host: '{{ inventory_hostname }}' + +- name: Shut up nagios + community.general.nagios: + action: silence_nagios + +- name: Annoy me negios + community.general.nagios: + action: unsilence_nagios + +- name: Command something + community.general.nagios: + action: command + command: DISABLE_FAILURE_PREDICTION +""" + +import time +import os.path +import stat + +from ansible.module_utils.basic import AnsibleModule + + +def which_cmdfile(): + locations = [ + # rhel + '/etc/nagios/nagios.cfg', + # debian + '/etc/nagios3/nagios.cfg', + # older debian + '/etc/nagios2/nagios.cfg', + # bsd, solaris + '/usr/local/etc/nagios/nagios.cfg', + # groundwork it monitoring + '/usr/local/groundwork/nagios/etc/nagios.cfg', + # open monitoring distribution + '/omd/sites/oppy/tmp/nagios/nagios.cfg', + # ??? + '/usr/local/nagios/etc/nagios.cfg', + '/usr/local/nagios/nagios.cfg', + '/opt/nagios/etc/nagios.cfg', + '/opt/nagios/nagios.cfg', + # icinga on debian/ubuntu + '/etc/icinga/icinga.cfg', + # icinga installed from source (default location) + '/usr/local/icinga/etc/icinga.cfg', + ] + + for path in locations: + if os.path.exists(path): + for line in open(path): + if line.startswith('command_file'): + return line.split('=')[1].strip() + + return None + + +def main(): + ACTION_CHOICES = [ + 'downtime', + 'delete_downtime', + 'silence', + 'unsilence', + 'enable_alerts', + 'disable_alerts', + 'silence_nagios', + 'unsilence_nagios', + 'command', + 'servicegroup_host_downtime', + 'servicegroup_service_downtime', + 'acknowledge', + 'forced_check', + ] + + module = AnsibleModule( + argument_spec=dict( + action=dict(type='str', required=True, choices=ACTION_CHOICES), + author=dict(type='str', default='Ansible'), + comment=dict(type='str', default='Scheduling downtime'), + host=dict(type='str'), + servicegroup=dict(type='str'), + start=dict(type='str'), + minutes=dict(type='int', default=30), + cmdfile=dict(type='str', default=which_cmdfile()), + services=dict(type='list', elements='str', aliases=['service']), + command=dict(type='str'), + ), + required_if=[ + ('action', 'downtime', ['host', 'services']), + ('action', 'delete_downtime', ['host', 'services']), + ('action', 'silence', ['host']), + ('action', 'unsilence', ['host']), + ('action', 'enable_alerts', ['host', 'services']), + ('action', 'disable_alerts', ['host', 'services']), + ('action', 'command', ['command']), + ('action', 'servicegroup_host_downtime', ['host', 'servicegroup']), + ('action', 'servicegroup_service_downtime', ['host', 'servicegroup']), + ('action', 'acknowledge', ['host', 'services']), + ('action', 'forced_check', ['host', 'services']), + ], + ) + + if not module.params['cmdfile']: + module.fail_json(msg='unable to locate nagios.cfg') + + ansible_nagios = Nagios(module, **module.params) + if module.check_mode: + module.exit_json(changed=True) + else: + ansible_nagios.act() + + +class Nagios(object): + """ + Perform common tasks in Nagios related to downtime and + notifications. + + The complete set of external commands Nagios handles is documented + on their website: + + http://old.nagios.org/developerinfo/externalcommands/commandlist.php + + Note that in the case of `schedule_svc_downtime`, + `enable_svc_notifications`, and `disable_svc_notifications`, the + service argument should be passed as a list. + """ + + def __init__(self, module, **kwargs): + self.module = module + self.action = kwargs['action'] + self.author = kwargs['author'] + self.comment = kwargs['comment'] + self.host = kwargs['host'] + self.servicegroup = kwargs['servicegroup'] + if kwargs['start'] is not None: + self.start = int(kwargs['start']) + else: + self.start = None + self.minutes = kwargs['minutes'] + self.cmdfile = kwargs['cmdfile'] + self.command = kwargs['command'] + + if kwargs['services'] is None : + self.services = kwargs['services'] + elif len(kwargs['services']) == 1 and kwargs['services'][0] in ['host', 'all']: + self.services = kwargs['services'][0] + else: + self.services = kwargs['services'] + + self.command_results = [] + + def _now(self): + """ + The time in seconds since 12:00:00AM Jan 1, 1970 + """ + + return int(time.time()) + + def _write_command(self, cmd): + """ + Write the given command to the Nagios command file + """ + + if not os.path.exists(self.cmdfile): + self.module.fail_json(msg='nagios command file does not exist', + cmdfile=self.cmdfile) + if not stat.S_ISFIFO(os.stat(self.cmdfile).st_mode): + self.module.fail_json(msg='nagios command file is not a fifo file', + cmdfile=self.cmdfile) + try: + with open(self.cmdfile, 'w') as fp: + fp.write(cmd) + fp.flush() + self.command_results.append(cmd.strip()) + except IOError: + self.module.fail_json(msg='unable to write to nagios command file', + cmdfile=self.cmdfile) + + def _fmt_dt_str(self, cmd, host, duration, author=None, + comment=None, start=None, + svc=None, fixed=1, trigger=0): + """ + Format an external-command downtime string. + + cmd - Nagios command ID + host - Host schedule downtime on + duration - Minutes to schedule downtime for + author - Name to file the downtime as + comment - Reason for running this command (upgrade, reboot, etc) + start - Start of downtime in seconds since 12:00AM Jan 1 1970 + Default is to use the entry time (now) + svc - Service to schedule downtime for, omit when for host downtime + fixed - Start now if 1, start when a problem is detected if 0 + trigger - Optional ID of event to start downtime from. Leave as 0 for + fixed downtime. + + Syntax: [submitted] COMMAND;;[] + ;;;;;; + + """ + + entry_time = self._now() + if start is None: + start = entry_time + + hdr = "[%s] %s;%s;" % (entry_time, cmd, host) + duration_s = (duration * 60) + end = start + duration_s + + if not author: + author = self.author + + if not comment: + comment = self.comment + + if svc is not None: + dt_args = [svc, str(start), str(end), str(fixed), str(trigger), + str(duration_s), author, comment] + else: + # Downtime for a host if no svc specified + dt_args = [str(start), str(end), str(fixed), str(trigger), + str(duration_s), author, comment] + + dt_arg_str = ";".join(dt_args) + dt_str = hdr + dt_arg_str + "\n" + + return dt_str + + def _fmt_ack_str(self, cmd, host, author=None, + comment=None, svc=None, sticky=0, notify=1, persistent=0): + """ + Format an external-command acknowledge string. + + cmd - Nagios command ID + host - Host schedule downtime on + author - Name to file the downtime as + comment - Reason for running this command (upgrade, reboot, etc) + svc - Service to schedule downtime for, omit when for host downtime + sticky - the acknowledgement will remain until the host returns to an UP state if set to 1 + notify - a notification will be sent out to contacts + persistent - survive across restarts of the Nagios process + + Syntax: [submitted] COMMAND;;[] + ;;;; + """ + + entry_time = self._now() + hdr = "[%s] %s;%s;" % (entry_time, cmd, host) + + if not author: + author = self.author + + if not comment: + comment = self.comment + + if svc is not None: + ack_args = [svc, str(sticky), str(notify), str(persistent), author, comment] + else: + # Downtime for a host if no svc specified + ack_args = [str(sticky), str(notify), str(persistent), author, comment] + + ack_arg_str = ";".join(ack_args) + ack_str = hdr + ack_arg_str + "\n" + + return ack_str + + def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None): + """ + Format an external-command downtime deletion string. + + cmd - Nagios command ID + host - Host to remove scheduled downtime from + comment - Reason downtime was added (upgrade, reboot, etc) + start - Start of downtime in seconds since 12:00AM Jan 1 1970 + svc - Service to remove downtime for, omit to remove all downtime for the host + + Syntax: [submitted] COMMAND;; + [];[];[] + """ + + entry_time = self._now() + hdr = "[%s] %s;%s;" % (entry_time, cmd, host) + + if comment is None: + comment = self.comment + + dt_del_args = [] + if svc is not None: + dt_del_args.append(svc) + else: + dt_del_args.append('') + + if start is not None: + dt_del_args.append(str(start)) + else: + dt_del_args.append('') + + if comment is not None: + dt_del_args.append(comment) + else: + dt_del_args.append('') + + dt_del_arg_str = ";".join(dt_del_args) + dt_del_str = hdr + dt_del_arg_str + "\n" + + return dt_del_str + + def _fmt_chk_str(self, cmd, host, svc=None, start=None): + """ + Format an external-command forced host or service check string. + + cmd - Nagios command ID + host - Host to check service from + svc - Service to check + start - check time + + Syntax: [submitted] COMMAND;;[]; + """ + + entry_time = self._now() + hdr = "[%s] %s;%s;" % (entry_time, cmd, host) + + if start is None: + start = entry_time + 3 + + if svc is None: + chk_args = [str(start)] + else: + chk_args = [svc, str(start)] + + chk_arg_str = ";".join(chk_args) + chk_str = hdr + chk_arg_str + "\n" + + return chk_str + + def _fmt_notif_str(self, cmd, host=None, svc=None): + """ + Format an external-command notification string. + + cmd - Nagios command ID. + host - Host to en/disable notifications on.. A value is not required + for global downtime + svc - Service to schedule downtime for. A value is not required + for host downtime. + + Syntax: [submitted] COMMAND;[;] + """ + + entry_time = self._now() + notif_str = "[%s] %s" % (entry_time, cmd) + if host is not None: + notif_str += ";%s" % host + + if svc is not None: + notif_str += ";%s" % svc + + notif_str += "\n" + + return notif_str + + def schedule_svc_downtime(self, host, services=None, minutes=30, start=None): + """ + This command is used to schedule downtime for a particular + service. + + During the specified downtime, Nagios will not send + notifications out about the service. + + Syntax: SCHEDULE_SVC_DOWNTIME;; + ;;;;;; + + """ + + cmd = "SCHEDULE_SVC_DOWNTIME" + + if services is None: + services = [] + + for service in services: + dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start, svc=service) + self._write_command(dt_cmd_str) + + def schedule_host_downtime(self, host, minutes=30, start=None): + """ + This command is used to schedule downtime for a particular + host. + + During the specified downtime, Nagios will not send + notifications out about the host. + + Syntax: SCHEDULE_HOST_DOWNTIME;;;; + ;;;; + """ + + cmd = "SCHEDULE_HOST_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start) + self._write_command(dt_cmd_str) + + def acknowledge_svc_problem(self, host, services=None): + """ + This command is used to acknowledge a particular + service problem. + + By acknowledging the current problem, future notifications + for the same servicestate are disabled + + Syntax: ACKNOWLEDGE_SVC_PROBLEM;;; + ;;;; + """ + + cmd = "ACKNOWLEDGE_SVC_PROBLEM" + + if services is None: + services = [] + + for service in services: + ack_cmd_str = self._fmt_ack_str(cmd, host, svc=service) + self._write_command(ack_cmd_str) + + def acknowledge_host_problem(self, host): + """ + This command is used to acknowledge a particular + host problem. + + By acknowledging the current problem, future notifications + for the same servicestate are disabled + + Syntax: ACKNOWLEDGE_HOST_PROBLEM;;;; + ;; + """ + + cmd = "ACKNOWLEDGE_HOST_PROBLEM" + ack_cmd_str = self._fmt_ack_str(cmd, host) + self._write_command(ack_cmd_str) + + def schedule_forced_host_check(self, host): + """ + This command schedules a forced active check for a particular host. + + Syntax: SCHEDULE_FORCED_HOST_CHECK;; + """ + + cmd = "SCHEDULE_FORCED_HOST_CHECK" + + chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None) + self._write_command(chk_cmd_str) + + def schedule_forced_host_svc_check(self, host): + """ + This command schedules a forced active check for all services + associated with a particular host. + + Syntax: SCHEDULE_FORCED_HOST_SVC_CHECKS;; + """ + + cmd = "SCHEDULE_FORCED_HOST_SVC_CHECKS" + + chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None) + self._write_command(chk_cmd_str) + + def schedule_forced_svc_check(self, host, services=None): + """ + This command schedules a forced active check for a particular + service. + + Syntax: SCHEDULE_FORCED_SVC_CHECK;;; + """ + + cmd = "SCHEDULE_FORCED_SVC_CHECK" + + if services is None: + services = [] + + for service in services: + chk_cmd_str = self._fmt_chk_str(cmd, host, svc=service) + self._write_command(chk_cmd_str) + + def schedule_host_svc_downtime(self, host, minutes=30, start=None): + """ + This command is used to schedule downtime for + all services associated with a particular host. + + During the specified downtime, Nagios will not send + notifications out about the host. + + SCHEDULE_HOST_SVC_DOWNTIME;;;; + ;;;; + """ + + cmd = "SCHEDULE_HOST_SVC_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start) + self._write_command(dt_cmd_str) + + def delete_host_downtime(self, host, services=None, comment=None): + """ + This command is used to remove scheduled downtime for a particular + host. + + Syntax: DEL_DOWNTIME_BY_HOST_NAME;; + [];[];[] + """ + + cmd = "DEL_DOWNTIME_BY_HOST_NAME" + + if services is None: + dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment) + self._write_command(dt_del_cmd_str) + else: + for service in services: + dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment) + self._write_command(dt_del_cmd_str) + + def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30, start=None): + """ + This command is used to schedule downtime for all hosts in a + particular hostgroup. + + During the specified downtime, Nagios will not send + notifications out about the hosts. + + Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;;; + ;;;;; + """ + + cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start) + self._write_command(dt_cmd_str) + + def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30, start=None): + """ + This command is used to schedule downtime for all services in + a particular hostgroup. + + During the specified downtime, Nagios will not send + notifications out about the services. + + Note that scheduling downtime for services does not + automatically schedule downtime for the hosts those services + are associated with. + + Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;;; + ;;;;; + """ + + cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start) + self._write_command(dt_cmd_str) + + def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30, start=None): + """ + This command is used to schedule downtime for all hosts in a + particular servicegroup. + + During the specified downtime, Nagios will not send + notifications out about the hosts. + + Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;; + ;;;;;; + + """ + + cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start) + self._write_command(dt_cmd_str) + + def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30, start=None): + """ + This command is used to schedule downtime for all services in + a particular servicegroup. + + During the specified downtime, Nagios will not send + notifications out about the services. + + Note that scheduling downtime for services does not + automatically schedule downtime for the hosts those services + are associated with. + + Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;; + ;;;;;; + + """ + + cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start) + self._write_command(dt_cmd_str) + + def disable_host_svc_notifications(self, host): + """ + This command is used to prevent notifications from being sent + out for all services on the specified host. + + Note that this command does not disable notifications from + being sent out about the host. + + Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOST_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + self._write_command(notif_str) + + def disable_host_notifications(self, host): + """ + This command is used to prevent notifications from being sent + out for the specified host. + + Note that this command does not disable notifications for + services associated with this host. + + Syntax: DISABLE_HOST_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + self._write_command(notif_str) + + def disable_svc_notifications(self, host, services=None): + """ + This command is used to prevent notifications from being sent + out for the specified service. + + Note that this command does not disable notifications from + being sent out about the host. + + Syntax: DISABLE_SVC_NOTIFICATIONS;; + """ + + cmd = "DISABLE_SVC_NOTIFICATIONS" + + if services is None: + services = [] + + for service in services: + notif_str = self._fmt_notif_str(cmd, host, svc=service) + self._write_command(notif_str) + + def disable_servicegroup_host_notifications(self, servicegroup): + """ + This command is used to prevent notifications from being sent + out for all hosts in the specified servicegroup. + + Note that this command does not disable notifications for + services associated with hosts in this service group. + + Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + self._write_command(notif_str) + + def disable_servicegroup_svc_notifications(self, servicegroup): + """ + This command is used to prevent notifications from being sent + out for all services in the specified servicegroup. + + Note that this does not prevent notifications from being sent + out about the hosts in this servicegroup. + + Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + self._write_command(notif_str) + + def disable_hostgroup_host_notifications(self, hostgroup): + """ + Disables notifications for all hosts in a particular + hostgroup. + + Note that this does not disable notifications for the services + associated with the hosts in the hostgroup - see the + DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that. + + Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + self._write_command(notif_str) + + def disable_hostgroup_svc_notifications(self, hostgroup): + """ + Disables notifications for all services associated with hosts + in a particular hostgroup. + + Note that this does not disable notifications for the hosts in + the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS + command for that. + + Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + self._write_command(notif_str) + + def enable_host_notifications(self, host): + """ + Enables notifications for a particular host. + + Note that this command does not enable notifications for + services associated with this host. + + Syntax: ENABLE_HOST_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + self._write_command(notif_str) + + def enable_host_svc_notifications(self, host): + """ + Enables notifications for all services on the specified host. + + Note that this does not enable notifications for the host. + + Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOST_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_svc_notifications(self, host, services=None): + """ + Enables notifications for a particular service. + + Note that this does not enable notifications for the host. + + Syntax: ENABLE_SVC_NOTIFICATIONS;; + """ + + cmd = "ENABLE_SVC_NOTIFICATIONS" + + if services is None: + services = [] + + nagios_return = True + return_str_list = [] + for service in services: + notif_str = self._fmt_notif_str(cmd, host, svc=service) + nagios_return = self._write_command(notif_str) and nagios_return + return_str_list.append(notif_str) + + if nagios_return: + return return_str_list + else: + return "Fail: could not write to the command file" + + def enable_hostgroup_host_notifications(self, hostgroup): + """ + Enables notifications for all hosts in a particular hostgroup. + + Note that this command does not enable notifications for + services associated with the hosts in this hostgroup. + + Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_hostgroup_svc_notifications(self, hostgroup): + """ + Enables notifications for all services that are associated + with hosts in a particular hostgroup. + + Note that this does not enable notifications for the hosts in + this hostgroup. + + Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_servicegroup_host_notifications(self, servicegroup): + """ + Enables notifications for all hosts that have services that + are members of a particular servicegroup. + + Note that this command does not enable notifications for + services associated with the hosts in this servicegroup. + + Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_servicegroup_svc_notifications(self, servicegroup): + """ + Enables notifications for all services that are members of a + particular servicegroup. + + Note that this does not enable notifications for the hosts in + this servicegroup. + + Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def silence_host(self, host): + """ + This command is used to prevent notifications from being sent + out for the host and all services on the specified host. + + This is equivalent to calling disable_host_svc_notifications + and disable_host_notifications. + + Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; + Syntax: DISABLE_HOST_NOTIFICATIONS; + """ + + cmd = [ + "DISABLE_HOST_SVC_NOTIFICATIONS", + "DISABLE_HOST_NOTIFICATIONS" + ] + nagios_return = True + return_str_list = [] + for c in cmd: + notif_str = self._fmt_notif_str(c, host) + nagios_return = self._write_command(notif_str) and nagios_return + return_str_list.append(notif_str) + + if nagios_return: + return return_str_list + else: + return "Fail: could not write to the command file" + + def unsilence_host(self, host): + """ + This command is used to enable notifications for the host and + all services on the specified host. + + This is equivalent to calling enable_host_svc_notifications + and enable_host_notifications. + + Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; + Syntax: ENABLE_HOST_NOTIFICATIONS; + """ + + cmd = [ + "ENABLE_HOST_SVC_NOTIFICATIONS", + "ENABLE_HOST_NOTIFICATIONS" + ] + nagios_return = True + return_str_list = [] + for c in cmd: + notif_str = self._fmt_notif_str(c, host) + nagios_return = self._write_command(notif_str) and nagios_return + return_str_list.append(notif_str) + + if nagios_return: + return return_str_list + else: + return "Fail: could not write to the command file" + + def silence_nagios(self): + """ + This command is used to disable notifications for all hosts and services + in nagios. + + This is a 'SHUT UP, NAGIOS' command + """ + cmd = 'DISABLE_NOTIFICATIONS' + self._write_command(self._fmt_notif_str(cmd)) + + def unsilence_nagios(self): + """ + This command is used to enable notifications for all hosts and services + in nagios. + + This is a 'OK, NAGIOS, GO'' command + """ + cmd = 'ENABLE_NOTIFICATIONS' + self._write_command(self._fmt_notif_str(cmd)) + + def nagios_cmd(self, cmd): + """ + This sends an arbitrary command to nagios + + It prepends the submitted time and appends a \n + + You just have to provide the properly formatted command + """ + + pre = '[%s]' % int(time.time()) + + post = '\n' + cmdstr = '%s %s%s' % (pre, cmd, post) + self._write_command(cmdstr) + + def act(self): + """ + Figure out what you want to do from ansible, and then do the + needful (at the earliest). + """ + # host or service downtime? + if self.action == 'downtime': + if self.services == 'host': + self.schedule_host_downtime(self.host, minutes=self.minutes, + start=self.start) + elif self.services == 'all': + self.schedule_host_svc_downtime(self.host, minutes=self.minutes, + start=self.start) + else: + self.schedule_svc_downtime(self.host, + services=self.services, + minutes=self.minutes, + start=self.start) + + elif self.action == 'acknowledge': + if self.services == 'host': + self.acknowledge_host_problem(self.host) + else: + self.acknowledge_svc_problem(self.host, services=self.services) + + elif self.action == 'delete_downtime': + if self.services == 'host': + self.delete_host_downtime(self.host) + elif self.services == 'all': + self.delete_host_downtime(self.host, comment='') + else: + self.delete_host_downtime(self.host, services=self.services) + + elif self.action == 'forced_check': + if self.services == 'host': + self.schedule_forced_host_check(self.host) + elif self.services == 'all': + self.schedule_forced_host_svc_check(self.host) + else: + self.schedule_forced_svc_check(self.host, services=self.services) + + elif self.action == "servicegroup_host_downtime": + if self.servicegroup: + self.schedule_servicegroup_host_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start) + elif self.action == "servicegroup_service_downtime": + if self.servicegroup: + self.schedule_servicegroup_svc_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start) + + # toggle the host AND service alerts + elif self.action == 'silence': + self.silence_host(self.host) + + elif self.action == 'unsilence': + self.unsilence_host(self.host) + + # toggle host/svc alerts + elif self.action == 'enable_alerts': + if self.services == 'host': + self.enable_host_notifications(self.host) + elif self.services == 'all': + self.enable_host_svc_notifications(self.host) + else: + self.enable_svc_notifications(self.host, + services=self.services) + + elif self.action == 'disable_alerts': + if self.services == 'host': + self.disable_host_notifications(self.host) + elif self.services == 'all': + self.disable_host_svc_notifications(self.host) + else: + self.disable_svc_notifications(self.host, + services=self.services) + elif self.action == 'silence_nagios': + self.silence_nagios() + + elif self.action == 'unsilence_nagios': + self.unsilence_nagios() + + else: # self.action == 'command' + self.nagios_cmd(self.command) + + self.module.exit_json(nagios_commands=self.command_results, + changed=True) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/cloudflare_dns.py b/plugins/modules/net_tools/cloudflare_dns.py deleted file mode 100644 index 4e82e0af36..0000000000 --- a/plugins/modules/net_tools/cloudflare_dns.py +++ /dev/null @@ -1,884 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016 Michael Gruener -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: cloudflare_dns -author: -- Michael Gruener (@mgruener) -requirements: - - python >= 2.6 -short_description: Manage Cloudflare DNS records -description: - - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)." -options: - api_token: - description: - - API token. - - Required for api token authentication. - - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." - - Can be specified in C(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0. - type: str - required: false - version_added: '0.2.0' - account_api_key: - description: - - Account API key. - - Required for api keys authentication. - - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." - type: str - required: false - aliases: [ account_api_token ] - account_email: - description: - - Account email. Required for API keys authentication. - type: str - required: false - algorithm: - description: - - Algorithm number. - - Required for C(type=DS) and C(type=SSHFP) when C(state=present). - type: int - cert_usage: - description: - - Certificate usage number. - - Required for C(type=TLSA) when C(state=present). - type: int - choices: [ 0, 1, 2, 3 ] - hash_type: - description: - - Hash type number. - - Required for C(type=DS), C(type=SSHFP) and C(type=TLSA) when C(state=present). - type: int - choices: [ 1, 2 ] - key_tag: - description: - - DNSSEC key tag. - - Needed for C(type=DS) when C(state=present). - type: int - port: - description: - - Service port. - - Required for C(type=SRV) and C(type=TLSA). - type: int - priority: - description: - - Record priority. - - Required for C(type=MX) and C(type=SRV) - default: 1 - type: int - proto: - description: - - Service protocol. Required for C(type=SRV) and C(type=TLSA). - - Common values are TCP and UDP. - - Before Ansible 2.6 only TCP and UDP were available. - type: str - proxied: - description: - - Proxy through Cloudflare network or just use DNS. - type: bool - default: no - record: - description: - - Record to add. - - Required if C(state=present). - - Default is C(@) (e.g. the zone name). - type: str - default: '@' - aliases: [ name ] - selector: - description: - - Selector number. - - Required for C(type=TLSA) when C(state=present). - choices: [ 0, 1 ] - type: int - service: - description: - - Record service. - - Required for I(type=SRV). - type: str - solo: - description: - - Whether the record should be the only one for that record type and record name. - - Only use with C(state=present). - - This will delete all other records with the same record name and type. - type: bool - state: - description: - - Whether the record(s) should exist or not. - type: str - choices: [ absent, present ] - default: present - timeout: - description: - - Timeout for Cloudflare API calls. - type: int - default: 30 - ttl: - description: - - The TTL to give the new record. - - Must be between 120 and 2,147,483,647 seconds, or 1 for automatic. - type: int - default: 1 - type: - description: - - The type of DNS record to create. Required if C(state=present). - - C(type=DS), C(type=SSHFP) and C(type=TLSA) added in Ansible 2.7. - type: str - choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, TXT ] - value: - description: - - The record value. - - Required for C(state=present). - type: str - aliases: [ content ] - weight: - description: - - Service weight. - - Required for C(type=SRV). - type: int - default: 1 - zone: - description: - - The name of the Zone to work with (e.g. "example.com"). - - The Zone must already exist. - type: str - required: true - aliases: [ domain ] -''' - -EXAMPLES = r''' -- name: Create a test.example.net A record to point to 127.0.0.1 - community.general.cloudflare_dns: - zone: example.net - record: test - type: A - value: 127.0.0.1 - account_email: test@example.com - account_api_key: dummyapitoken - register: record - -- name: Create a record using api token - community.general.cloudflare_dns: - zone: example.net - record: test - type: A - value: 127.0.0.1 - api_token: dummyapitoken - -- name: Create a example.net CNAME record to example.com - community.general.cloudflare_dns: - zone: example.net - type: CNAME - value: example.com - account_email: test@example.com - account_api_key: dummyapitoken - state: present - -- name: Change its TTL - community.general.cloudflare_dns: - zone: example.net - type: CNAME - value: example.com - ttl: 600 - account_email: test@example.com - account_api_key: dummyapitoken - state: present - -- name: Delete the record - community.general.cloudflare_dns: - zone: example.net - type: CNAME - value: example.com - account_email: test@example.com - account_api_key: dummyapitoken - state: absent - -- name: Create a example.net CNAME record to example.com and proxy through Cloudflare's network - community.general.cloudflare_dns: - zone: example.net - type: CNAME - value: example.com - proxied: yes - account_email: test@example.com - account_api_key: dummyapitoken - state: present - -# This deletes all other TXT records named "test.example.net" -- name: Create TXT record "test.example.net" with value "unique value" - community.general.cloudflare_dns: - domain: example.net - record: test - type: TXT - value: unique value - solo: true - account_email: test@example.com - account_api_key: dummyapitoken - state: present - -- name: Create an SRV record _foo._tcp.example.net - community.general.cloudflare_dns: - domain: example.net - service: foo - proto: tcp - port: 3500 - priority: 10 - weight: 20 - type: SRV - value: fooserver.example.net - -- name: Create a SSHFP record login.example.com - community.general.cloudflare_dns: - zone: example.com - record: login - type: SSHFP - algorithm: 4 - hash_type: 2 - value: 9dc1d6742696d2f51ca1f1a78b3d16a840f7d111eb9454239e70db31363f33e1 - -- name: Create a TLSA record _25._tcp.mail.example.com - community.general.cloudflare_dns: - zone: example.com - record: mail - port: 25 - proto: tcp - type: TLSA - cert_usage: 3 - selector: 1 - hash_type: 1 - value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3 - -- name: Create a DS record for subdomain.example.com - community.general.cloudflare_dns: - zone: example.com - record: subdomain - type: DS - key_tag: 5464 - algorithm: 8 - hash_type: 2 - value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB -''' - -RETURN = r''' -record: - description: A dictionary containing the record data. - returned: success, except on record deletion - type: complex - contains: - content: - description: The record content (details depend on record type). - returned: success - type: str - sample: 192.0.2.91 - created_on: - description: The record creation date. - returned: success - type: str - sample: "2016-03-25T19:09:42.516553Z" - data: - description: Additional record data. - returned: success, if type is SRV, DS, SSHFP or TLSA - type: dict - sample: { - name: "jabber", - port: 8080, - priority: 10, - proto: "_tcp", - service: "_xmpp", - target: "jabberhost.sample.com", - weight: 5, - } - id: - description: The record ID. - returned: success - type: str - sample: f9efb0549e96abcb750de63b38c9576e - locked: - description: No documentation available. - returned: success - type: bool - sample: False - meta: - description: No documentation available. - returned: success - type: dict - sample: { auto_added: false } - modified_on: - description: Record modification date. - returned: success - type: str - sample: "2016-03-25T19:09:42.516553Z" - name: - description: The record name as FQDN (including _service and _proto for SRV). - returned: success - type: str - sample: www.sample.com - priority: - description: Priority of the MX record. - returned: success, if type is MX - type: int - sample: 10 - proxiable: - description: Whether this record can be proxied through Cloudflare. - returned: success - type: bool - sample: False - proxied: - description: Whether the record is proxied through Cloudflare. - returned: success - type: bool - sample: False - ttl: - description: The time-to-live for the record. - returned: success - type: int - sample: 300 - type: - description: The record type. - returned: success - type: str - sample: A - zone_id: - description: The ID of the zone containing the record. - returned: success - type: str - sample: abcede0bf9f0066f94029d2e6b73856a - zone_name: - description: The name of the zone containing the record. - returned: success - type: str - sample: sample.com -''' - -import json - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.module_utils.urls import fetch_url - - -def lowercase_string(param): - if not isinstance(param, str): - return param - return param.lower() - - -class CloudflareAPI(object): - - cf_api_endpoint = 'https://api.cloudflare.com/client/v4' - changed = False - - def __init__(self, module): - self.module = module - self.api_token = module.params['api_token'] - self.account_api_key = module.params['account_api_key'] - self.account_email = module.params['account_email'] - self.algorithm = module.params['algorithm'] - self.cert_usage = module.params['cert_usage'] - self.hash_type = module.params['hash_type'] - self.key_tag = module.params['key_tag'] - self.port = module.params['port'] - self.priority = module.params['priority'] - self.proto = lowercase_string(module.params['proto']) - self.proxied = module.params['proxied'] - self.selector = module.params['selector'] - self.record = lowercase_string(module.params['record']) - self.service = lowercase_string(module.params['service']) - self.is_solo = module.params['solo'] - self.state = module.params['state'] - self.timeout = module.params['timeout'] - self.ttl = module.params['ttl'] - self.type = module.params['type'] - self.value = module.params['value'] - self.weight = module.params['weight'] - self.zone = lowercase_string(module.params['zone']) - - if self.record == '@': - self.record = self.zone - - if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None): - self.value = self.value.rstrip('.').lower() - - if (self.type == 'AAAA') and (self.value is not None): - self.value = self.value.lower() - - if (self.type == 'SRV'): - if (self.proto is not None) and (not self.proto.startswith('_')): - self.proto = '_' + self.proto - if (self.service is not None) and (not self.service.startswith('_')): - self.service = '_' + self.service - - if (self.type == 'TLSA'): - if (self.proto is not None) and (not self.proto.startswith('_')): - self.proto = '_' + self.proto - if (self.port is not None): - self.port = '_' + str(self.port) - - if not self.record.endswith(self.zone): - self.record = self.record + '.' + self.zone - - if (self.type == 'DS'): - if self.record == self.zone: - self.module.fail_json(msg="DS records only apply to subdomains.") - - def _cf_simple_api_call(self, api_call, method='GET', payload=None): - if self.api_token: - headers = { - 'Authorization': 'Bearer ' + self.api_token, - 'Content-Type': 'application/json', - } - else: - headers = { - 'X-Auth-Email': self.account_email, - 'X-Auth-Key': self.account_api_key, - 'Content-Type': 'application/json', - } - data = None - if payload: - try: - data = json.dumps(payload) - except Exception as e: - self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e)) - - resp, info = fetch_url(self.module, - self.cf_api_endpoint + api_call, - headers=headers, - data=data, - method=method, - timeout=self.timeout) - - if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]: - self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}: {2}".format(api_call, info['status'], info.get('msg'))) - - error_msg = '' - if info['status'] == 401: - # Unauthorized - error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) - elif info['status'] == 403: - # Forbidden - error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) - elif info['status'] == 429: - # Too many requests - error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) - elif info['status'] == 405: - # Method not allowed - error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) - elif info['status'] == 415: - # Unsupported Media Type - error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) - elif info['status'] == 400: - # Bad Request - error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) - - result = None - try: - content = resp.read() - except AttributeError: - if info['body']: - content = info['body'] - else: - error_msg += "; The API response was empty" - - if content: - try: - result = json.loads(to_text(content, errors='surrogate_or_strict')) - except (getattr(json, 'JSONDecodeError', ValueError)) as e: - error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content) - - # Without a valid/parsed JSON response no more error processing can be done - if result is None: - self.module.fail_json(msg=error_msg) - - if 'success' not in result: - error_msg += "; Unexpected error details: {0}".format(result.get('error')) - self.module.fail_json(msg=error_msg) - - if not result['success']: - error_msg += "; Error details: " - for error in result['errors']: - error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message']) - if 'error_chain' in error: - for chain_error in error['error_chain']: - error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message']) - self.module.fail_json(msg=error_msg) - - return result, info['status'] - - def _cf_api_call(self, api_call, method='GET', payload=None): - result, status = self._cf_simple_api_call(api_call, method, payload) - - data = result['result'] - - if 'result_info' in result: - pagination = result['result_info'] - if pagination['total_pages'] > 1: - next_page = int(pagination['page']) + 1 - parameters = ['page={0}'.format(next_page)] - # strip "page" parameter from call parameters (if there are any) - if '?' in api_call: - raw_api_call, query = api_call.split('?', 1) - parameters += [param for param in query.split('&') if not param.startswith('page')] - else: - raw_api_call = api_call - while next_page <= pagination['total_pages']: - raw_api_call += '?' + '&'.join(parameters) - result, status = self._cf_simple_api_call(raw_api_call, method, payload) - data += result['result'] - next_page += 1 - - return data, status - - def _get_zone_id(self, zone=None): - if not zone: - zone = self.zone - - zones = self.get_zones(zone) - if len(zones) > 1: - self.module.fail_json(msg="More than one zone matches {0}".format(zone)) - - if len(zones) < 1: - self.module.fail_json(msg="No zone found with name {0}".format(zone)) - - return zones[0]['id'] - - def get_zones(self, name=None): - if not name: - name = self.zone - param = '' - if name: - param = '?' + urlencode({'name': name}) - zones, status = self._cf_api_call('/zones' + param) - return zones - - def get_dns_records(self, zone_name=None, type=None, record=None, value=''): - if not zone_name: - zone_name = self.zone - if not type: - type = self.type - if not record: - record = self.record - # necessary because None as value means to override user - # set module value - if (not value) and (value is not None): - value = self.value - - zone_id = self._get_zone_id() - api_call = '/zones/{0}/dns_records'.format(zone_id) - query = {} - if type: - query['type'] = type - if record: - query['name'] = record - if value: - query['content'] = value - if query: - api_call += '?' + urlencode(query) - - records, status = self._cf_api_call(api_call) - return records - - def delete_dns_records(self, **kwargs): - params = {} - for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone', - 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']: - if param in kwargs: - params[param] = kwargs[param] - else: - params[param] = getattr(self, param) - - records = [] - content = params['value'] - search_record = params['record'] - if params['type'] == 'SRV': - if not (params['value'] is None or params['value'] == ''): - content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] - search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] - elif params['type'] == 'DS': - if not (params['value'] is None or params['value'] == ''): - content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - elif params['type'] == 'SSHFP': - if not (params['value'] is None or params['value'] == ''): - content = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - elif params['type'] == 'TLSA': - if not (params['value'] is None or params['value'] == ''): - content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - search_record = params['port'] + '.' + params['proto'] + '.' + params['record'] - if params['solo']: - search_value = None - else: - search_value = content - - records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) - - for rr in records: - if params['solo']: - if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)): - self.changed = True - if not self.module.check_mode: - result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE') - else: - self.changed = True - if not self.module.check_mode: - result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE') - return self.changed - - def ensure_dns_record(self, **kwargs): - params = {} - for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone', - 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']: - if param in kwargs: - params[param] = kwargs[param] - else: - params[param] = getattr(self, param) - - search_value = params['value'] - search_record = params['record'] - new_record = None - if (params['type'] is None) or (params['record'] is None): - self.module.fail_json(msg="You must provide a type and a record to create a new record") - - if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']): - if not params['value']: - self.module.fail_json(msg="You must provide a non-empty value to create this record type") - - # there can only be one CNAME per record - # ignoring the value when searching for existing - # CNAME records allows us to update the value if it - # changes - if params['type'] == 'CNAME': - search_value = None - - new_record = { - "type": params['type'], - "name": params['record'], - "content": params['value'], - "ttl": params['ttl'] - } - - if (params['type'] in ['A', 'AAAA', 'CNAME']): - new_record["proxied"] = params["proxied"] - - if params['type'] == 'MX': - for attr in [params['priority'], params['value']]: - if (attr is None) or (attr == ''): - self.module.fail_json(msg="You must provide priority and a value to create this record type") - new_record = { - "type": params['type'], - "name": params['record'], - "content": params['value'], - "priority": params['priority'], - "ttl": params['ttl'] - } - - if params['type'] == 'SRV': - for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]: - if (attr is None) or (attr == ''): - self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type") - srv_data = { - "target": params['value'], - "port": params['port'], - "weight": params['weight'], - "priority": params['priority'], - "name": params['record'][:-len('.' + params['zone'])], - "proto": params['proto'], - "service": params['service'] - } - new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data} - search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] - search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] - - if params['type'] == 'DS': - for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]: - if (attr is None) or (attr == ''): - self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type") - ds_data = { - "key_tag": params['key_tag'], - "algorithm": params['algorithm'], - "digest_type": params['hash_type'], - "digest": params['value'], - } - new_record = { - "type": params['type'], - "name": params['record'], - 'data': ds_data, - "ttl": params['ttl'], - } - search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - - if params['type'] == 'SSHFP': - for attr in [params['algorithm'], params['hash_type'], params['value']]: - if (attr is None) or (attr == ''): - self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type") - sshfp_data = { - "fingerprint": params['value'], - "type": params['hash_type'], - "algorithm": params['algorithm'], - } - new_record = { - "type": params['type'], - "name": params['record'], - 'data': sshfp_data, - "ttl": params['ttl'], - } - search_value = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - - if params['type'] == 'TLSA': - for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]: - if (attr is None) or (attr == ''): - self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type") - search_record = params['port'] + '.' + params['proto'] + '.' + params['record'] - tlsa_data = { - "usage": params['cert_usage'], - "selector": params['selector'], - "matching_type": params['hash_type'], - "certificate": params['value'], - } - new_record = { - "type": params['type'], - "name": search_record, - 'data': tlsa_data, - "ttl": params['ttl'], - } - search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - - zone_id = self._get_zone_id(params['zone']) - records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) - # in theory this should be impossible as cloudflare does not allow - # the creation of duplicate records but lets cover it anyways - if len(records) > 1: - self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!") - # record already exists, check if it must be updated - if len(records) == 1: - cur_record = records[0] - do_update = False - if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']): - do_update = True - if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']): - do_update = True - if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']): - do_update = True - if ('data' in new_record) and ('data' in cur_record): - if (cur_record['data'] != new_record['data']): - do_update = True - if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']): - do_update = True - if do_update: - if self.module.check_mode: - result = new_record - else: - result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record) - self.changed = True - return result, self.changed - else: - return records, self.changed - if self.module.check_mode: - result = new_record - else: - result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record) - self.changed = True - return result, self.changed - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_token=dict( - type="str", - required=False, - no_log=True, - fallback=(env_fallback, ["CLOUDFLARE_TOKEN"]), - ), - account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']), - account_email=dict(type='str', required=False), - algorithm=dict(type='int'), - cert_usage=dict(type='int', choices=[0, 1, 2, 3]), - hash_type=dict(type='int', choices=[1, 2]), - key_tag=dict(type='int', no_log=False), - port=dict(type='int'), - priority=dict(type='int', default=1), - proto=dict(type='str'), - proxied=dict(type='bool', default=False), - record=dict(type='str', default='@', aliases=['name']), - selector=dict(type='int', choices=[0, 1]), - service=dict(type='str'), - solo=dict(type='bool'), - state=dict(type='str', default='present', choices=['absent', 'present']), - timeout=dict(type='int', default=30), - ttl=dict(type='int', default=1), - type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'TXT']), - value=dict(type='str', aliases=['content']), - weight=dict(type='int', default=1), - zone=dict(type='str', required=True, aliases=['domain']), - ), - supports_check_mode=True, - required_if=[ - ('state', 'present', ['record', 'type', 'value']), - ('state', 'absent', ['record']), - ('type', 'SRV', ['proto', 'service']), - ('type', 'TLSA', ['proto', 'port']), - ], - ) - - if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']): - module.fail_json(msg="Either api_token or account_api_key and account_email params are required.") - if module.params['type'] == 'SRV': - if not ((module.params['weight'] is not None and module.params['port'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['weight'] is None and module.params['port'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.") - - if module.params['type'] == 'SSHFP': - if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['algorithm'] is None and module.params['hash_type'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For SSHFP records the params algorithm, hash_type and value all need to be defined, or not at all.") - - if module.params['type'] == 'TLSA': - if not ((module.params['cert_usage'] is not None and module.params['selector'] is not None and module.params['hash_type'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['cert_usage'] is None and module.params['selector'] is None and module.params['hash_type'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.") - - if module.params['type'] == 'DS': - if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['key_tag'] is None and module.params['algorithm'] is None and module.params['hash_type'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For DS records the params key_tag, algorithm, hash_type and value all need to be defined, or not at all.") - - changed = False - cf_api = CloudflareAPI(module) - - # sanity checks - if cf_api.is_solo and cf_api.state == 'absent': - module.fail_json(msg="solo=true can only be used with state=present") - - # perform add, delete or update (only the TTL can be updated) of one or - # more records - if cf_api.state == 'present': - # delete all records matching record name + type - if cf_api.is_solo: - changed = cf_api.delete_dns_records(solo=cf_api.is_solo) - result, changed = cf_api.ensure_dns_record() - if isinstance(result, list): - module.exit_json(changed=changed, result={'record': result[0]}) - - module.exit_json(changed=changed, result={'record': result}) - else: - # force solo to False, just to be sure - changed = cf_api.delete_dns_records(solo=False) - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/dnsimple.py b/plugins/modules/net_tools/dnsimple.py deleted file mode 100644 index ac1b2a1416..0000000000 --- a/plugins/modules/net_tools/dnsimple.py +++ /dev/null @@ -1,516 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: dnsimple -short_description: Interface with dnsimple.com (a DNS hosting service) -description: - - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)." -options: - account_email: - description: - - Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for. - - "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)." - - "C(.dnsimple) config files are only supported in dnsimple-python<2.0.0" - type: str - account_api_token: - description: - - Account API token. See I(account_email) for more information. - type: str - domain: - description: - - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. - - If omitted, a list of domains will be returned. - - If domain is present but the domain doesn't exist, it will be created. - type: str - record: - description: - - Record to add, if blank a record for the domain will be created, supports the wildcard (*). - type: str - record_ids: - description: - - List of records to ensure they either exist or do not exist. - type: list - elements: str - type: - description: - - The type of DNS record to create. - choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL', 'CAA' ] - type: str - ttl: - description: - - The TTL to give the new record in seconds. - default: 3600 - type: int - value: - description: - - Record value. - - Must be specified when trying to ensure a record exists. - type: str - priority: - description: - - Record priority. - type: int - state: - description: - - whether the record should exist or not. - choices: [ 'present', 'absent' ] - default: present - type: str - solo: - description: - - Whether the record should be the only one for that record type and record name. - - Only use with C(state) is set to C(present) on a record. - type: 'bool' - default: no - sandbox: - description: - - Use the DNSimple sandbox environment. - - Requires a dedicated account in the dnsimple sandbox environment. - - Check U(https://developer.dnsimple.com/sandbox/) for more information. - type: 'bool' - default: no - version_added: 3.5.0 -requirements: - - "dnsimple >= 1.0.0" -notes: - - "Support for C(dnsimple < 2) is deprecated and will be removed in community.general 5.0.0." -author: "Alex Coomans (@drcapulet)" -''' - -EXAMPLES = ''' -- name: Authenticate using email and API token and fetch all domains - community.general.dnsimple: - account_email: test@example.com - account_api_token: dummyapitoken - delegate_to: localhost - -- name: Delete a domain - community.general.dnsimple: - domain: my.com - state: absent - delegate_to: localhost - -- name: Create a test.my.com A record to point to 127.0.0.1 - community.general.dnsimple: - domain: my.com - record: test - type: A - value: 127.0.0.1 - delegate_to: localhost - register: record - -- name: Delete record using record_ids - community.general.dnsimple: - domain: my.com - record_ids: '{{ record["id"] }}' - state: absent - delegate_to: localhost - -- name: Create a my.com CNAME record to example.com - community.general.dnsimple: - domain: my.com - record: '' - type: CNAME - value: example.com - state: present - delegate_to: localhost - -- name: Change TTL value for a record - community.general.dnsimple: - domain: my.com - record: '' - type: CNAME - value: example.com - ttl: 600 - state: present - delegate_to: localhost - -- name: Delete the record - community.general.dnsimple: - domain: my.com - record: '' - type: CNAME - value: example.com - state: absent - delegate_to: localhost -''' - -RETURN = r"""# """ - -import traceback -from distutils.version import LooseVersion -import re - - -class DNSimpleV1(): - """class which uses dnsimple-python < 2""" - - def __init__(self, account_email, account_api_token, sandbox, module): - """init""" - self.module = module - self.account_email = account_email - self.account_api_token = account_api_token - self.sandbox = sandbox - self.dnsimple_client() - - def dnsimple_client(self): - """creates a dnsimple client object""" - if self.account_email and self.account_api_token: - self.client = DNSimple(sandbox=self.sandbox, email=self.account_email, api_token=self.account_api_token) - else: - self.client = DNSimple(sandbox=self.sandbox) - - def get_all_domains(self): - """returns a list of all domains""" - domain_list = self.client.domains() - return [d['domain'] for d in domain_list] - - def get_domain(self, domain): - """returns a single domain by name or id""" - try: - dr = self.client.domain(domain)['domain'] - except DNSimpleException as e: - exception_string = str(e.args[0]['message']) - if re.match(r"^Domain .+ not found$", exception_string): - dr = None - else: - raise - return dr - - def create_domain(self, domain): - """create a single domain""" - return self.client.add_domain(domain)['domain'] - - def delete_domain(self, domain): - """delete a single domain""" - self.client.delete(domain) - - def get_records(self, domain, dnsimple_filter=None): - """return dns ressource records which match a specified filter""" - return [r['record'] for r in self.client.records(str(domain), params=dnsimple_filter)] - - def delete_record(self, domain, rid): - """delete a single dns ressource record""" - self.client.delete_record(str(domain), rid) - - def update_record(self, domain, rid, ttl=None, priority=None): - """update a single dns ressource record""" - data = {} - if ttl: - data['ttl'] = ttl - if priority: - data['priority'] = priority - return self.client.update_record(str(domain), str(rid), data)['record'] - - def create_record(self, domain, name, record_type, content, ttl=None, priority=None): - """create a single dns ressource record""" - data = { - 'name': name, - 'type': record_type, - 'content': content, - } - if ttl: - data['ttl'] = ttl - if priority: - data['priority'] = priority - return self.client.add_record(str(domain), data)['record'] - - -class DNSimpleV2(): - """class which uses dnsimple-python >= 2""" - - def __init__(self, account_email, account_api_token, sandbox, module): - """init""" - self.module = module - self.account_email = account_email - self.account_api_token = account_api_token - self.sandbox = sandbox - self.pagination_per_page = 30 - self.dnsimple_client() - self.dnsimple_account() - - def dnsimple_client(self): - """creates a dnsimple client object""" - if self.account_email and self.account_api_token: - client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token) - else: - msg = "Option account_email or account_api_token not provided. " \ - "Dnsimple authentiction with a .dnsimple config file is not " \ - "supported with dnsimple-python>=2.0.0" - raise DNSimpleException(msg) - client.identity.whoami() - self.client = client - - def dnsimple_account(self): - """select a dnsimple account. If a user token is used for authentication, - this user must only have access to a single account""" - account = self.client.identity.whoami().data.account - # user supplied a user token instead of account api token - if not account: - accounts = Accounts(self.client).list_accounts().data - if len(accounts) != 1: - msg = "The provided dnsimple token is a user token with multiple accounts." \ - "Use an account token or a user token with access to a single account." \ - "See https://support.dnsimple.com/articles/api-access-token/" - raise DNSimpleException(msg) - account = accounts[0] - self.account = account - - def get_all_domains(self): - """returns a list of all domains""" - domain_list = self._get_paginated_result(self.client.domains.list_domains, account_id=self.account.id) - return [d.__dict__ for d in domain_list] - - def get_domain(self, domain): - """returns a single domain by name or id""" - try: - dr = self.client.domains.get_domain(self.account.id, domain).data.__dict__ - except DNSimpleException as e: - exception_string = str(e.message) - if re.match(r"^Domain .+ not found$", exception_string): - dr = None - else: - raise - return dr - - def create_domain(self, domain): - """create a single domain""" - return self.client.domains.create_domain(self.account.id, domain).data.__dict__ - - def delete_domain(self, domain): - """delete a single domain""" - self.client.domains.delete_domain(self.account.id, domain) - - def get_records(self, zone, dnsimple_filter=None): - """return dns ressource records which match a specified filter""" - records_list = self._get_paginated_result(self.client.zones.list_records, - account_id=self.account.id, - zone=zone, filter=dnsimple_filter) - return [d.__dict__ for d in records_list] - - def delete_record(self, domain, rid): - """delete a single dns ressource record""" - self.client.zones.delete_record(self.account.id, domain, rid) - - def update_record(self, domain, rid, ttl=None, priority=None): - """update a single dns ressource record""" - zr = ZoneRecordUpdateInput(ttl=ttl, priority=priority) - result = self.client.zones.update_record(self.account.id, str(domain), str(rid), zr).data.__dict__ - return result - - def create_record(self, domain, name, record_type, content, ttl=None, priority=None): - """create a single dns ressource record""" - zr = ZoneRecordInput(name=name, type=record_type, content=content, ttl=ttl, priority=priority) - return self.client.zones.create_record(self.account.id, str(domain), zr).data.__dict__ - - def _get_paginated_result(self, operation, **options): - """return all results of a paginated api response""" - records_pagination = operation(per_page=self.pagination_per_page, **options).pagination - result_list = [] - for page in range(1, records_pagination.total_pages + 1): - page_data = operation(per_page=self.pagination_per_page, page=page, **options).data - result_list.extend(page_data) - return result_list - - -DNSIMPLE_IMP_ERR = [] -HAS_DNSIMPLE = False -try: - # try to import dnsimple >= 2.0.0 - from dnsimple import Client, DNSimpleException - from dnsimple.service import Accounts - from dnsimple.version import version as dnsimple_version - from dnsimple.struct.zone_record import ZoneRecordUpdateInput, ZoneRecordInput - HAS_DNSIMPLE = True -except ImportError: - DNSIMPLE_IMP_ERR.append(traceback.format_exc()) - -if not HAS_DNSIMPLE: - # try to import dnsimple < 2.0.0 - try: - from dnsimple.dnsimple import __version__ as dnsimple_version - from dnsimple import DNSimple - from dnsimple.dnsimple import DNSimpleException - HAS_DNSIMPLE = True - except ImportError: - DNSIMPLE_IMP_ERR.append(traceback.format_exc()) - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback - - -def main(): - module = AnsibleModule( - argument_spec=dict( - account_email=dict(type='str', fallback=(env_fallback, ['DNSIMPLE_EMAIL'])), - account_api_token=dict(type='str', - no_log=True, - fallback=(env_fallback, ['DNSIMPLE_API_TOKEN'])), - domain=dict(type='str'), - record=dict(type='str'), - record_ids=dict(type='list', elements='str'), - type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', - 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', - 'PTR', 'AAAA', 'SSHFP', 'HINFO', - 'POOL', 'CAA']), - ttl=dict(type='int', default=3600), - value=dict(type='str'), - priority=dict(type='int'), - state=dict(type='str', choices=['present', 'absent'], default='present'), - solo=dict(type='bool', default=False), - sandbox=dict(type='bool', default=False), - ), - required_together=[ - ['record', 'value'] - ], - supports_check_mode=True, - ) - - if not HAS_DNSIMPLE: - module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR[0]) - - account_email = module.params.get('account_email') - account_api_token = module.params.get('account_api_token') - domain = module.params.get('domain') - record = module.params.get('record') - record_ids = module.params.get('record_ids') - record_type = module.params.get('type') - ttl = module.params.get('ttl') - value = module.params.get('value') - priority = module.params.get('priority') - state = module.params.get('state') - is_solo = module.params.get('solo') - sandbox = module.params.get('sandbox') - - DNSIMPLE_MAJOR_VERSION = LooseVersion(dnsimple_version).version[0] - - try: - if DNSIMPLE_MAJOR_VERSION > 1: - ds = DNSimpleV2(account_email, account_api_token, sandbox, module) - else: - module.deprecate( - 'Support for python-dnsimple < 2 is deprecated. ' - 'Update python-dnsimple to version >= 2.0.0', - version='5.0.0', collection_name='community.general' - ) - ds = DNSimpleV1(account_email, account_api_token, sandbox, module) - # Let's figure out what operation we want to do - # No domain, return a list - if not domain: - all_domains = ds.get_all_domains() - module.exit_json(changed=False, result=all_domains) - - # Domain & No record - if record is None and not record_ids: - if domain.isdigit(): - typed_domain = int(domain) - else: - typed_domain = str(domain) - dr = ds.get_domain(typed_domain) - # domain does not exist - if state == 'present': - if dr: - module.exit_json(changed=False, result=dr) - else: - if module.check_mode: - module.exit_json(changed=True) - else: - response = ds.create_domain(domain) - module.exit_json(changed=True, result=response) - # state is absent - else: - if dr: - if not module.check_mode: - ds.delete_domain(domain) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - - # need the not none check since record could be an empty string - if record is not None: - if not record_type: - module.fail_json(msg="Missing the record type") - if not value: - module.fail_json(msg="Missing the record value") - - records_list = ds.get_records(domain, dnsimple_filter={'name': record}) - rr = next((r for r in records_list if r['name'] == record and r['type'] == record_type and r['content'] == value), None) - if state == 'present': - changed = False - if is_solo: - # delete any records that have the same name and record type - same_type = [r['id'] for r in records_list if r['name'] == record and r['type'] == record_type] - if rr: - same_type = [rid for rid in same_type if rid != rr['id']] - if same_type: - if not module.check_mode: - for rid in same_type: - ds.delete_record(domain, rid) - changed = True - if rr: - # check if we need to update - if rr['ttl'] != ttl or rr['priority'] != priority: - if module.check_mode: - module.exit_json(changed=True) - else: - response = ds.update_record(domain, rr['id'], ttl, priority) - module.exit_json(changed=True, result=response) - else: - module.exit_json(changed=changed, result=rr) - else: - # create it - if module.check_mode: - module.exit_json(changed=True) - else: - response = ds.create_record(domain, record, record_type, value, ttl, priority) - module.exit_json(changed=True, result=response) - # state is absent - else: - if rr: - if not module.check_mode: - ds.delete_record(domain, rr['id']) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - - # Make sure these record_ids either all exist or none - if record_ids: - current_records = ds.get_records(domain, dnsimple_filter=None) - current_record_ids = [str(d['id']) for d in current_records] - wanted_record_ids = [str(r) for r in record_ids] - if state == 'present': - difference = list(set(wanted_record_ids) - set(current_record_ids)) - if difference: - module.fail_json(msg="Missing the following records: %s" % difference) - else: - module.exit_json(changed=False) - # state is absent - else: - difference = list(set(wanted_record_ids) & set(current_record_ids)) - if difference: - if not module.check_mode: - for rid in difference: - ds.delete_record(domain, rid) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - - except DNSimpleException as e: - if DNSIMPLE_MAJOR_VERSION > 1: - module.fail_json(msg="DNSimple exception: %s" % e.message) - else: - module.fail_json(msg="DNSimple exception: %s" % str(e.args[0]['message'])) - module.fail_json(msg="Unknown what you wanted me to do") - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/dnsmadeeasy.py b/plugins/modules/net_tools/dnsmadeeasy.py deleted file mode 100644 index 75135c8277..0000000000 --- a/plugins/modules/net_tools/dnsmadeeasy.py +++ /dev/null @@ -1,717 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: dnsmadeeasy -short_description: Interface with dnsmadeeasy.com (a DNS hosting service). -description: - - > - Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or - monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/) -options: - account_key: - description: - - Account API Key. - required: true - type: str - - account_secret: - description: - - Account Secret Key. - required: true - type: str - - domain: - description: - - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster - resolution - required: true - type: str - - sandbox: - description: - - Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used. - type: bool - default: 'no' - - record_name: - description: - - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless - of the state argument. - type: str - - record_type: - description: - - Record type. - choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ] - type: str - - record_value: - description: - - > - Record value. HTTPRED: , MX: , NS: , PTR: , - SRV: , TXT: " - - > - If record_value is not specified; no changes will be made and the record will be returned in 'result' - (in other words, this module can be used to fetch a record's current id, type, and ttl) - type: str - - record_ttl: - description: - - record's "Time to live". Number of seconds the record remains cached in DNS servers. - default: 1800 - type: int - - state: - description: - - whether the record should exist or not - required: true - choices: [ 'present', 'absent' ] - type: str - - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - - monitor: - description: - - If C(yes), add or change the monitor. This is applicable only for A records. - type: bool - default: 'no' - - systemDescription: - description: - - Description used by the monitor. - default: '' - type: str - - maxEmails: - description: - - Number of emails sent to the contact list by the monitor. - default: 1 - type: int - - protocol: - description: - - Protocol used by the monitor. - default: 'HTTP' - choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS'] - type: str - - port: - description: - - Port used by the monitor. - default: 80 - type: int - - sensitivity: - description: - - Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3. - default: 'Medium' - choices: ['Low', 'Medium', 'High'] - type: str - - contactList: - description: - - Name or id of the contact list that the monitor will notify. - - The default C('') means the Account Owner. - default: '' - type: str - - httpFqdn: - description: - - The fully qualified domain name used by the monitor. - type: str - - httpFile: - description: - - The file at the Fqdn that the monitor queries for HTTP or HTTPS. - type: str - - httpQueryString: - description: - - The string in the httpFile that the monitor queries for HTTP or HTTPS. - type: str - - failover: - description: - - If C(yes), add or change the failover. This is applicable only for A records. - type: bool - default: 'no' - - autoFailover: - description: - - If true, fallback to the primary IP address is manual after a failover. - - If false, fallback to the primary IP address is automatic after a failover. - type: bool - default: 'no' - - ip1: - description: - - Primary IP address for the failover. - - Required if adding or changing the monitor or failover. - type: str - - ip2: - description: - - Secondary IP address for the failover. - - Required if adding or changing the failover. - type: str - - ip3: - description: - - Tertiary IP address for the failover. - type: str - - ip4: - description: - - Quaternary IP address for the failover. - type: str - - ip5: - description: - - Quinary IP address for the failover. - type: str - -notes: - - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few - seconds of actual time by using NTP. - - This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'. - These values can be be registered and used in your playbooks. - - Only A records can have a monitor or failover. - - To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required. - - To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required. - - The monitor and the failover will share 'port', 'protocol', and 'ip1' options. - -requirements: [ hashlib, hmac ] -author: "Brice Burgess (@briceburg)" -''' - -EXAMPLES = ''' -- name: Fetch my.com domain records - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - register: response - -- name: Create a record - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - -- name: Update the previously created record - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_value: 192.0.2.23 - -- name: Fetch a specific record - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - register: response - -- name: Delete a record - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - record_type: A - state: absent - record_name: test - -- name: Add a failover - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - failover: True - ip1: 127.0.0.2 - ip2: 127.0.0.3 - -- name: Add a failover - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - failover: True - ip1: 127.0.0.2 - ip2: 127.0.0.3 - ip3: 127.0.0.4 - ip4: 127.0.0.5 - ip5: 127.0.0.6 - -- name: Add a monitor - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - monitor: yes - ip1: 127.0.0.2 - protocol: HTTP # default - port: 80 # default - maxEmails: 1 - systemDescription: Monitor Test A record - contactList: my contact list - -- name: Add a monitor with http options - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - monitor: yes - ip1: 127.0.0.2 - protocol: HTTP # default - port: 80 # default - maxEmails: 1 - systemDescription: Monitor Test A record - contactList: 1174 # contact list id - httpFqdn: http://my.com - httpFile: example - httpQueryString: some string - -- name: Add a monitor and a failover - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - failover: True - ip1: 127.0.0.2 - ip2: 127.0.0.3 - monitor: yes - protocol: HTTPS - port: 443 - maxEmails: 1 - systemDescription: monitoring my.com status - contactList: emergencycontacts - -- name: Remove a failover - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - failover: no - -- name: Remove a monitor - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - monitor: no -''' - -# ============================================ -# DNSMadeEasy module specific support methods. -# - -import json -import hashlib -import hmac -import locale -from time import strftime, gmtime - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.six import string_types - - -class DME2(object): - - def __init__(self, apikey, secret, domain, sandbox, module): - self.module = module - - self.api = apikey - self.secret = secret - - if sandbox: - self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/' - self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl) - else: - self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/' - - self.domain = str(domain) - self.domain_map = None # ["domain_name"] => ID - self.record_map = None # ["record_name"] => ID - self.records = None # ["record_ID"] => - self.all_records = None - self.contactList_map = None # ["contactList_name"] => ID - - # Lookup the domain ID if passed as a domain name vs. ID - if not self.domain.isdigit(): - self.domain = self.getDomainByName(self.domain)['id'] - - self.record_url = 'dns/managed/' + str(self.domain) + '/records' - self.monitor_url = 'monitor' - self.contactList_url = 'contactList' - - def _headers(self): - currTime = self._get_date() - hashstring = self._create_hash(currTime) - headers = {'x-dnsme-apiKey': self.api, - 'x-dnsme-hmac': hashstring, - 'x-dnsme-requestDate': currTime, - 'content-type': 'application/json'} - return headers - - def _get_date(self): - locale.setlocale(locale.LC_TIME, 'C') - return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()) - - def _create_hash(self, rightnow): - return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest() - - def query(self, resource, method, data=None): - url = self.baseurl + resource - if data and not isinstance(data, string_types): - data = urlencode(data) - - response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers()) - if info['status'] not in (200, 201, 204): - self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) - - try: - return json.load(response) - except Exception: - return {} - - def getDomain(self, domain_id): - if not self.domain_map: - self._instMap('domain') - - return self.domains.get(domain_id, False) - - def getDomainByName(self, domain_name): - if not self.domain_map: - self._instMap('domain') - - return self.getDomain(self.domain_map.get(domain_name, 0)) - - def getDomains(self): - return self.query('dns/managed', 'GET')['data'] - - def getRecord(self, record_id): - if not self.record_map: - self._instMap('record') - - return self.records.get(record_id, False) - - # Try to find a single record matching this one. - # How we do this depends on the type of record. For instance, there - # can be several MX records for a single record_name while there can - # only be a single CNAME for a particular record_name. Note also that - # there can be several records with different types for a single name. - def getMatchingRecord(self, record_name, record_type, record_value): - # Get all the records if not already cached - if not self.all_records: - self.all_records = self.getRecords() - - if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]: - for result in self.all_records: - if result['name'] == record_name and result['type'] == record_type: - return result - return False - elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]: - for result in self.all_records: - if record_type == "MX": - value = record_value.split(" ")[1] - # Note that TXT records are surrounded by quotes in the API response. - elif record_type == "TXT": - value = '"{0}"'.format(record_value) - elif record_type == "SRV": - value = record_value.split(" ")[3] - else: - value = record_value - if result['name'] == record_name and result['type'] == record_type and result['value'] == value: - return result - return False - else: - raise Exception('record_type not yet supported') - - def getRecords(self): - return self.query(self.record_url, 'GET')['data'] - - def _instMap(self, type): - # @TODO cache this call so it's executed only once per ansible execution - map = {} - results = {} - - # iterate over e.g. self.getDomains() || self.getRecords() - for result in getattr(self, 'get' + type.title() + 's')(): - - map[result['name']] = result['id'] - results[result['id']] = result - - # e.g. self.domain_map || self.record_map - setattr(self, type + '_map', map) - setattr(self, type + 's', results) # e.g. self.domains || self.records - - def prepareRecord(self, data): - return json.dumps(data, separators=(',', ':')) - - def createRecord(self, data): - # @TODO update the cache w/ resultant record + id when impleneted - return self.query(self.record_url, 'POST', data) - - def updateRecord(self, record_id, data): - # @TODO update the cache w/ resultant record + id when impleneted - return self.query(self.record_url + '/' + str(record_id), 'PUT', data) - - def deleteRecord(self, record_id): - # @TODO remove record from the cache when impleneted - return self.query(self.record_url + '/' + str(record_id), 'DELETE') - - def getMonitor(self, record_id): - return self.query(self.monitor_url + '/' + str(record_id), 'GET') - - def updateMonitor(self, record_id, data): - return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data) - - def prepareMonitor(self, data): - return json.dumps(data, separators=(',', ':')) - - def getContactList(self, contact_list_id): - if not self.contactList_map: - self._instMap('contactList') - - return self.contactLists.get(contact_list_id, False) - - def getContactlists(self): - return self.query(self.contactList_url, 'GET')['data'] - - def getContactListByName(self, name): - if not self.contactList_map: - self._instMap('contactList') - - return self.getContactList(self.contactList_map.get(name, 0)) - -# =========================================== -# Module execution. -# - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - account_key=dict(required=True, no_log=True), - account_secret=dict(required=True, no_log=True), - domain=dict(required=True), - sandbox=dict(default=False, type='bool'), - state=dict(required=True, choices=['present', 'absent']), - record_name=dict(required=False), - record_type=dict(required=False, choices=[ - 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), - record_value=dict(required=False), - record_ttl=dict(required=False, default=1800, type='int'), - monitor=dict(default=False, type='bool'), - systemDescription=dict(default=''), - maxEmails=dict(default=1, type='int'), - protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']), - port=dict(default=80, type='int'), - sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']), - contactList=dict(default=None), - httpFqdn=dict(required=False), - httpFile=dict(required=False), - httpQueryString=dict(required=False), - failover=dict(default=False, type='bool'), - autoFailover=dict(default=False, type='bool'), - ip1=dict(required=False), - ip2=dict(required=False), - ip3=dict(required=False), - ip4=dict(required=False), - ip5=dict(required=False), - validate_certs=dict(default=True, type='bool'), - ), - required_together=[ - ['record_value', 'record_ttl', 'record_type'] - ], - required_if=[ - ['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']], - ['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']] - ] - ) - - protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6) - sensitivities = dict(Low=8, Medium=5, High=3) - - DME = DME2(module.params["account_key"], module.params[ - "account_secret"], module.params["domain"], module.params["sandbox"], module) - state = module.params["state"] - record_name = module.params["record_name"] - record_type = module.params["record_type"] - record_value = module.params["record_value"] - - # Follow Keyword Controlled Behavior - if record_name is None: - domain_records = DME.getRecords() - if not domain_records: - module.fail_json( - msg="The requested domain name is not accessible with this api_key; try using its ID if known.") - module.exit_json(changed=False, result=domain_records) - - # Fetch existing record + Build new one - current_record = DME.getMatchingRecord(record_name, record_type, record_value) - new_record = {'name': record_name} - for i in ["record_value", "record_type", "record_ttl"]: - if not module.params[i] is None: - new_record[i[len("record_"):]] = module.params[i] - # Special handling for mx record - if new_record["type"] == "MX": - new_record["mxLevel"] = new_record["value"].split(" ")[0] - new_record["value"] = new_record["value"].split(" ")[1] - - # Special handling for SRV records - if new_record["type"] == "SRV": - new_record["priority"] = new_record["value"].split(" ")[0] - new_record["weight"] = new_record["value"].split(" ")[1] - new_record["port"] = new_record["value"].split(" ")[2] - new_record["value"] = new_record["value"].split(" ")[3] - - # Fetch existing monitor if the A record indicates it should exist and build the new monitor - current_monitor = dict() - new_monitor = dict() - if current_record and current_record['type'] == 'A': - current_monitor = DME.getMonitor(current_record['id']) - - # Build the new monitor - for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails', - 'contactList', 'httpFqdn', 'httpFile', 'httpQueryString', - 'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']: - if module.params[i] is not None: - if i == 'protocol': - # The API requires protocol to be a numeric in the range 1-6 - new_monitor['protocolId'] = protocols[module.params[i]] - elif i == 'sensitivity': - # The API requires sensitivity to be a numeric of 8, 5, or 3 - new_monitor[i] = sensitivities[module.params[i]] - elif i == 'contactList': - # The module accepts either the name or the id of the contact list - contact_list_id = module.params[i] - if not contact_list_id.isdigit() and contact_list_id != '': - contact_list = DME.getContactListByName(contact_list_id) - if not contact_list: - module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id)) - contact_list_id = contact_list.get('id', '') - new_monitor['contactListId'] = contact_list_id - else: - # The module option names match the API field names - new_monitor[i] = module.params[i] - - # Compare new record against existing one - record_changed = False - if current_record: - for i in new_record: - # Remove leading and trailing quote character from values because TXT records - # are surrounded by quotes. - if str(current_record[i]).strip('"') != str(new_record[i]): - record_changed = True - new_record['id'] = str(current_record['id']) - - monitor_changed = False - if current_monitor: - for i in new_monitor: - if str(current_monitor.get(i)) != str(new_monitor[i]): - monitor_changed = True - - # Follow Keyword Controlled Behavior - if state == 'present': - # return the record if no value is specified - if "value" not in new_record: - if not current_record: - module.fail_json( - msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain'])) - module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor)) - - # create record and monitor as the record does not exist - if not current_record: - record = DME.createRecord(DME.prepareRecord(new_record)) - if new_monitor.get('monitor') and record_type == "A": - monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor)) - module.exit_json(changed=True, result=dict(record=record, monitor=monitor)) - else: - module.exit_json(changed=True, result=dict(record=record, monitor=current_monitor)) - - # update the record - updated = False - if record_changed: - DME.updateRecord(current_record['id'], DME.prepareRecord(new_record)) - updated = True - if monitor_changed: - DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor)) - updated = True - if updated: - module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor)) - - # return the record (no changes) - module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor)) - - elif state == 'absent': - changed = False - # delete the record (and the monitor/failover) if it exists - if current_record: - DME.deleteRecord(current_record['id']) - module.exit_json(changed=True) - - # record does not exist, return w/o change. - module.exit_json(changed=changed) - - else: - module.fail_json( - msg="'%s' is an unknown value for the state argument" % state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/gandi_livedns.py b/plugins/modules/net_tools/gandi_livedns.py deleted file mode 100644 index 6124288511..0000000000 --- a/plugins/modules/net_tools/gandi_livedns.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019 Gregory Thiemonge -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: gandi_livedns -author: -- Gregory Thiemonge (@gthiemonge) -version_added: "2.3.0" -short_description: Manage Gandi LiveDNS records -description: -- "Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/)." -options: - api_key: - description: - - Account API token. - type: str - required: true - record: - description: - - Record to add. - type: str - required: true - state: - description: - - Whether the record(s) should exist or not. - type: str - choices: [ absent, present ] - default: present - ttl: - description: - - The TTL to give the new record. - - Required when I(state=present). - type: int - type: - description: - - The type of DNS record to create. - type: str - required: true - values: - description: - - The record values. - - Required when I(state=present). - type: list - elements: str - domain: - description: - - The name of the Domain to work with (for example, "example.com"). - required: true - type: str -notes: -- Supports C(check_mode). -''' - -EXAMPLES = r''' -- name: Create a test A record to point to 127.0.0.1 in the my.com domain - community.general.gandi_livedns: - domain: my.com - record: test - type: A - values: - - 127.0.0.1 - ttl: 7200 - api_key: dummyapitoken - register: record - -- name: Create a mail CNAME record to www.my.com domain - community.general.gandi_livedns: - domain: my.com - type: CNAME - record: mail - values: - - www - ttl: 7200 - api_key: dummyapitoken - state: present - -- name: Change its TTL - community.general.gandi_livedns: - domain: my.com - type: CNAME - record: mail - values: - - www - ttl: 10800 - api_key: dummyapitoken - state: present - -- name: Delete the record - community.general.gandi_livedns: - domain: my.com - type: CNAME - record: mail - api_key: dummyapitoken - state: absent -''' - -RETURN = r''' -record: - description: A dictionary containing the record data. - returned: success, except on record deletion - type: dict - contains: - values: - description: The record content (details depend on record type). - returned: success - type: list - elements: str - sample: - - 192.0.2.91 - - 192.0.2.92 - record: - description: The record name. - returned: success - type: str - sample: www - ttl: - description: The time-to-live for the record. - returned: success - type: int - sample: 300 - type: - description: The record type. - returned: success - type: str - sample: A - domain: - description: The domain associated with the record. - returned: success - type: str - sample: my.com -''' - - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.gandi_livedns_api import GandiLiveDNSAPI - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(type='str', required=True, no_log=True), - record=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - ttl=dict(type='int'), - type=dict(type='str', required=True), - values=dict(type='list', elements='str'), - domain=dict(type='str', required=True), - ), - supports_check_mode=True, - required_if=[ - ('state', 'present', ['values', 'ttl']), - ], - ) - - gandi_api = GandiLiveDNSAPI(module) - - if module.params['state'] == 'present': - ret, changed = gandi_api.ensure_dns_record(module.params['record'], - module.params['type'], - module.params['ttl'], - module.params['values'], - module.params['domain']) - else: - ret, changed = gandi_api.delete_dns_record(module.params['record'], - module.params['type'], - module.params['values'], - module.params['domain']) - - result = dict( - changed=changed, - ) - if ret: - result['record'] = gandi_api.build_result(ret, - module.params['domain']) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/haproxy.py b/plugins/modules/net_tools/haproxy.py deleted file mode 100644 index f736036671..0000000000 --- a/plugins/modules/net_tools/haproxy.py +++ /dev/null @@ -1,480 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Ravi Bhure -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: haproxy -short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands -author: -- Ravi Bhure (@ravibhure) -description: - - Enable, disable, drain and set weights for HAProxy backend servers using socket commands. -notes: - - Enable, disable and drain commands are restricted and can only be issued on - sockets configured for level 'admin'. For example, you can add the line - 'stats socket /var/run/haproxy.sock level admin' to the general section of - haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt). - - Depends on netcat (nc) being available; you need to install the appropriate - package for your operating system before this module can be used. -options: - backend: - description: - - Name of the HAProxy backend pool. - - If this parameter is unset, it will be auto-detected. - type: str - drain: - description: - - Wait until the server has no active connections or until the timeout - determined by wait_interval and wait_retries is reached. - - Continue only after the status changes to 'MAINT'. - - This overrides the shutdown_sessions option. - type: bool - default: false - host: - description: - - Name of the backend host to change. - type: str - required: true - shutdown_sessions: - description: - - When disabling a server, immediately terminate all the sessions attached - to the specified server. - - This can be used to terminate long-running sessions after a server is put - into maintenance mode. Overridden by the drain option. - type: bool - default: no - socket: - description: - - Path to the HAProxy socket file. - type: path - default: /var/run/haproxy.sock - state: - description: - - Desired state of the provided backend host. - - Note that C(drain) state was added in version 2.4. - - It is supported only by HAProxy version 1.5 or later, - - When used on versions < 1.5, it will be ignored. - type: str - required: true - choices: [ disabled, drain, enabled ] - agent: - description: - - Disable/enable agent checks (depending on I(state) value). - type: bool - default: no - version_added: 1.0.0 - health: - description: - - Disable/enable health checks (depending on I(state) value). - type: bool - default: no - version_added: "1.0.0" - fail_on_not_found: - description: - - Fail whenever trying to enable/disable a backend host that does not exist - type: bool - default: no - wait: - description: - - Wait until the server reports a status of 'UP' when C(state=enabled), - status of 'MAINT' when C(state=disabled) or status of 'DRAIN' when C(state=drain) - type: bool - default: no - wait_interval: - description: - - Number of seconds to wait between retries. - type: int - default: 5 - wait_retries: - description: - - Number of times to check for status after changing the state. - type: int - default: 25 - weight: - description: - - The value passed in argument. - - If the value ends with the `%` sign, then the new weight will be - relative to the initially configured weight. - - Relative weights are only permitted between 0 and 100% and absolute - weights are permitted between 0 and 256. - type: str -''' - -EXAMPLES = r''' -- name: Disable server in 'www' backend pool - community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - backend: www - -- name: Disable server in 'www' backend pool, also stop health/agent checks - community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - health: yes - agent: yes - -- name: Disable server without backend pool name (apply to all available backend pool) - community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - -- name: Disable server, provide socket file - community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - socket: /var/run/haproxy.sock - backend: www - -- name: Disable server, provide socket file, wait until status reports in maintenance - community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - socket: /var/run/haproxy.sock - backend: www - wait: yes - -# Place server in drain mode, providing a socket file. Then check the server's -# status every minute to see if it changes to maintenance mode, continuing if it -# does in an hour and failing otherwise. -- community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - socket: /var/run/haproxy.sock - backend: www - wait: yes - drain: yes - wait_interval: 60 - wait_retries: 60 - -- name: Disable backend server in 'www' backend pool and drop open sessions to it - community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - backend: www - socket: /var/run/haproxy.sock - shutdown_sessions: yes - -- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found - community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - fail_on_not_found: yes - -- name: Enable server in 'www' backend pool - community.general.haproxy: - state: enabled - host: '{{ inventory_hostname }}' - backend: www - -- name: Enable server in 'www' backend pool wait until healthy - community.general.haproxy: - state: enabled - host: '{{ inventory_hostname }}' - backend: www - wait: yes - -- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health - community.general.haproxy: - state: enabled - host: '{{ inventory_hostname }}' - backend: www - wait: yes - wait_retries: 10 - wait_interval: 5 - -- name: Enable server in 'www' backend pool with change server(s) weight - community.general.haproxy: - state: enabled - host: '{{ inventory_hostname }}' - socket: /var/run/haproxy.sock - weight: 10 - backend: www - -- name: Set the server in 'www' backend pool to drain mode - community.general.haproxy: - state: drain - host: '{{ inventory_hostname }}' - socket: /var/run/haproxy.sock - backend: www -''' - -import csv -import socket -import time -from string import Template - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes, to_text - - -DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock" -RECV_SIZE = 1024 -ACTION_CHOICES = ['enabled', 'disabled', 'drain'] -WAIT_RETRIES = 25 -WAIT_INTERVAL = 5 - - -###################################################################### -class TimeoutException(Exception): - pass - - -class HAProxy(object): - """ - Used for communicating with HAProxy through its local UNIX socket interface. - Perform common tasks in Haproxy related to enable server and - disable server. - - The complete set of external commands Haproxy handles is documented - on their website: - - http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands - """ - - def __init__(self, module): - self.module = module - - self.state = self.module.params['state'] - self.host = self.module.params['host'] - self.backend = self.module.params['backend'] - self.weight = self.module.params['weight'] - self.socket = self.module.params['socket'] - self.shutdown_sessions = self.module.params['shutdown_sessions'] - self.fail_on_not_found = self.module.params['fail_on_not_found'] - self.agent = self.module.params['agent'] - self.health = self.module.params['health'] - self.wait = self.module.params['wait'] - self.wait_retries = self.module.params['wait_retries'] - self.wait_interval = self.module.params['wait_interval'] - self._drain = self.module.params['drain'] - self.command_results = {} - - def execute(self, cmd, timeout=200, capture_output=True): - """ - Executes a HAProxy command by sending a message to a HAProxy's local - UNIX socket and waiting up to 'timeout' milliseconds for the response. - """ - self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - self.client.connect(self.socket) - self.client.sendall(to_bytes('%s\n' % cmd)) - - result = b'' - buf = b'' - buf = self.client.recv(RECV_SIZE) - while buf: - result += buf - buf = self.client.recv(RECV_SIZE) - result = to_text(result, errors='surrogate_or_strict') - - if capture_output: - self.capture_command_output(cmd, result.strip()) - self.client.close() - return result - - def capture_command_output(self, cmd, output): - """ - Capture the output for a command - """ - if 'command' not in self.command_results: - self.command_results['command'] = [] - self.command_results['command'].append(cmd) - if 'output' not in self.command_results: - self.command_results['output'] = [] - self.command_results['output'].append(output) - - def discover_all_backends(self): - """ - Discover all entries with svname = 'BACKEND' and return a list of their corresponding - pxnames - """ - data = self.execute('show stat', 200, False).lstrip('# ') - r = csv.DictReader(data.splitlines()) - return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r))) - - def discover_version(self): - """ - Attempt to extract the haproxy version. - Return a tuple containing major and minor version. - """ - data = self.execute('show info', 200, False) - lines = data.splitlines() - line = [x for x in lines if 'Version:' in x] - try: - version_values = line[0].partition(':')[2].strip().split('.', 3) - version = (int(version_values[0]), int(version_values[1])) - except (ValueError, TypeError, IndexError): - version = None - - return version - - def execute_for_backends(self, cmd, pxname, svname, wait_for_status=None): - """ - Run some command on the specified backends. If no backends are provided they will - be discovered automatically (all backends) - """ - # Discover backends if none are given - if pxname is None: - backends = self.discover_all_backends() - else: - backends = [pxname] - - # Run the command for each requested backend - for backend in backends: - # Fail when backends were not found - state = self.get_state_for(backend, svname) - if (self.fail_on_not_found) and state is None: - self.module.fail_json( - msg="The specified backend '%s/%s' was not found!" % (backend, svname)) - - if state is not None: - self.execute(Template(cmd).substitute(pxname=backend, svname=svname)) - if self.wait: - self.wait_until_status(backend, svname, wait_for_status) - - def get_state_for(self, pxname, svname): - """ - Find the state of specific services. When pxname is not set, get all backends for a specific host. - Returns a list of dictionaries containing the status and weight for those services. - """ - data = self.execute('show stat', 200, False).lstrip('# ') - r = csv.DictReader(data.splitlines()) - state = tuple( - map( - lambda d: {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']}, - filter(lambda d: (pxname is None or d['pxname'] - == pxname) and d['svname'] == svname, r) - ) - ) - return state or None - - def wait_until_status(self, pxname, svname, status): - """ - Wait for a service to reach the specified status. Try RETRIES times - with INTERVAL seconds of sleep in between. If the service has not reached - the expected status in that time, the module will fail. If the service was - not found, the module will fail. - """ - for i in range(1, self.wait_retries): - state = self.get_state_for(pxname, svname) - - # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here - # When using track we get a status like this: MAINT (via pxname/svname) so we need to do substring matching - if status in state[0]['status']: - if not self._drain or state[0]['scur'] == '0': - return True - time.sleep(self.wait_interval) - - self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." % - (pxname, svname, status, self.wait_retries)) - - def enabled(self, host, backend, weight): - """ - Enabled action, marks server to UP and checks are re-enabled, - also supports to get current weight for server (default) and - set the weight for haproxy backend server when provides. - """ - cmd = "get weight $pxname/$svname; enable server $pxname/$svname" - if self.agent: - cmd += "; enable agent $pxname/$svname" - if self.health: - cmd += "; enable health $pxname/$svname" - if weight: - cmd += "; set weight $pxname/$svname %s" % weight - self.execute_for_backends(cmd, backend, host, 'UP') - - def disabled(self, host, backend, shutdown_sessions): - """ - Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be - performed on the server until it leaves maintenance, - also it shutdown sessions while disabling backend host server. - """ - cmd = "get weight $pxname/$svname" - if self.agent: - cmd += "; disable agent $pxname/$svname" - if self.health: - cmd += "; disable health $pxname/$svname" - cmd += "; disable server $pxname/$svname" - if shutdown_sessions: - cmd += "; shutdown sessions server $pxname/$svname" - self.execute_for_backends(cmd, backend, host, 'MAINT') - - def drain(self, host, backend, status='DRAIN'): - """ - Drain action, sets the server to DRAIN mode. - In this mode, the server will not accept any new connections - other than those that are accepted via persistence. - """ - haproxy_version = self.discover_version() - - # check if haproxy version supports DRAIN state (starting with 1.5) - if haproxy_version and (1, 5) <= haproxy_version: - cmd = "set server $pxname/$svname state drain" - self.execute_for_backends(cmd, backend, host, "DRAIN") - if status == "MAINT": - self.disabled(host, backend, self.shutdown_sessions) - - def act(self): - """ - Figure out what you want to do from ansible, and then do it. - """ - # Get the state before the run - self.command_results['state_before'] = self.get_state_for(self.backend, self.host) - - # toggle enable/disable server - if self.state == 'enabled': - self.enabled(self.host, self.backend, self.weight) - elif self.state == 'disabled' and self._drain: - self.drain(self.host, self.backend, status='MAINT') - elif self.state == 'disabled': - self.disabled(self.host, self.backend, self.shutdown_sessions) - elif self.state == 'drain': - self.drain(self.host, self.backend) - else: - self.module.fail_json(msg="unknown state specified: '%s'" % self.state) - - # Get the state after the run - self.command_results['state_after'] = self.get_state_for(self.backend, self.host) - - # Report change status - self.command_results['changed'] = (self.command_results['state_before'] != self.command_results['state_after']) - - self.module.exit_json(**self.command_results) - - -def main(): - - # load ansible module object - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', required=True, choices=ACTION_CHOICES), - host=dict(type='str', required=True), - backend=dict(type='str'), - weight=dict(type='str'), - socket=dict(type='path', default=DEFAULT_SOCKET_LOCATION), - shutdown_sessions=dict(type='bool', default=False), - fail_on_not_found=dict(type='bool', default=False), - health=dict(type='bool', default=False), - agent=dict(type='bool', default=False), - wait=dict(type='bool', default=False), - wait_retries=dict(type='int', default=WAIT_RETRIES), - wait_interval=dict(type='int', default=WAIT_INTERVAL), - drain=dict(type='bool', default=False), - ), - ) - - if not socket: - module.fail_json(msg="unable to locate haproxy socket") - - ansible_haproxy = HAProxy(module) - ansible_haproxy.act() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/infinity/infinity.py b/plugins/modules/net_tools/infinity/infinity.py deleted file mode 100644 index ab41f680c4..0000000000 --- a/plugins/modules/net_tools/infinity/infinity.py +++ /dev/null @@ -1,565 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' -module: infinity -short_description: Manage Infinity IPAM using Rest API -description: - - Manage Infinity IPAM using REST API. -author: - - Meirong Liu (@MeganLiu) -options: - server_ip: - description: - - Infinity server_ip with IP address. - type: str - required: true - username: - description: - - Username to access Infinity. - - The user must have REST API privileges. - type: str - required: true - password: - description: - - Infinity password. - type: str - required: true - action: - description: - - Action to perform - type: str - required: true - choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip ] - network_id: - description: - - Network ID. - type: str - default: '' - ip_address: - description: - - IP Address for a reservation or a release. - type: str - default: '' - network_address: - description: - - Network address with CIDR format (e.g., 192.168.310.0). - type: str - default: '' - network_size: - description: - - Network bitmask (e.g. 255.255.255.220) or CIDR format (e.g., /26). - type: str - default: '' - network_name: - description: - - The name of a network. - type: str - default: '' - network_location: - description: - - The parent network id for a given network. - type: int - default: -1 - network_type: - description: - - Network type defined by Infinity - type: str - choices: [ lan, shared_lan, supernet ] - default: lan - network_family: - description: - - Network family defined by Infinity, e.g. IPv4, IPv6 and Dual stack - type: str - choices: [ 4, 6, dual ] - default: 4 -''' - -EXAMPLES = r''' ---- -- hosts: localhost - connection: local - strategy: debug - tasks: - - name: Reserve network into Infinity IPAM - community.general.infinity: - server_ip: 80.75.107.12 - username: username - password: password - action: reserve_network - network_name: reserve_new_ansible_network - network_family: 4 - network_type: lan - network_id: 1201 - network_size: /28 - register: infinity -''' - -RETURN = r''' -network_id: - description: id for a given network - returned: success - type: str - sample: '1501' -ip_info: - description: when reserve next available ip address from a network, the ip address info ) is returned. - returned: success - type: str - sample: '{"address": "192.168.10.3", "hostname": "", "FQDN": "", "domainname": "", "id": 3229}' -network_info: - description: when reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved network is returned. - returned: success - type: str - sample: {"network_address": "192.168.10.32/28","network_family": "4", "network_id": 3102, - "network_size": null,"description": null,"network_location": "3085", - "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null}, - "network_type": "lan","network_name": "'reserve_new_ansible_network'"} -''' - - -from ansible.module_utils.basic import AnsibleModule, json -from ansible.module_utils.urls import open_url - - -class Infinity(object): - """ - Class for manage REST API calls with the Infinity. - """ - - def __init__(self, module, server_ip, username, password): - self.module = module - self.auth_user = username - self.auth_pass = password - self.base_url = "https://%s/rest/v1/" % (str(server_ip)) - - def _get_api_call_ansible_handler( - self, - method='get', - resource_url='', - stat_codes=None, - params=None, - payload_data=None): - """ - Perform the HTTPS request by using ansible get/delete method - """ - stat_codes = [200] if stat_codes is None else stat_codes - request_url = str(self.base_url) + str(resource_url) - response = None - headers = {'Content-Type': 'application/json'} - if not request_url: - self.module.exit_json( - msg="When sending Rest api call , the resource URL is empty, please check.") - if payload_data and not isinstance(payload_data, str): - payload_data = json.dumps(payload_data) - response_raw = open_url( - str(request_url), - method=method, - timeout=20, - headers=headers, - url_username=self.auth_user, - url_password=self.auth_pass, - validate_certs=False, - force_basic_auth=True, - data=payload_data) - - response = response_raw.read() - payload = '' - if response_raw.code not in stat_codes: - self.module.exit_json( - changed=False, - meta=" openurl response_raw.code show error and error code is %r" % - (response_raw.code)) - else: - if isinstance(response, str) and len(response) > 0: - payload = response - elif method.lower() == 'delete' and response_raw.code == 204: - payload = 'Delete is done.' - if isinstance(payload, dict) and "text" in payload: - self.module.exit_json( - changed=False, - meta="when calling rest api, returned data is not json ") - raise Exception(payload["text"]) - return payload - - # --------------------------------------------------------------------------- - # get_network() - # --------------------------------------------------------------------------- - def get_network(self, network_id, network_name, limit=-1): - """ - Search network_name inside Infinity by using rest api - Network id or network_name needs to be provided - return the details of a given with given network_id or name - """ - if network_name is None and network_id is None: - self.module.exit_json( - msg="You must specify one of the options 'network_name' or 'network_id'.") - method = "get" - resource_url = '' - params = {} - response = None - if network_id: - resource_url = "networks/" + str(network_id) - response = self._get_api_call_ansible_handler(method, resource_url) - if network_id is None and network_name: - method = "get" - resource_url = "search" - params = {"query": json.dumps( - {"name": network_name, "type": "network"})} - response = self._get_api_call_ansible_handler( - method, resource_url, payload_data=json.dumps(params)) - if response and isinstance(response, str): - response = json.loads(response) - if response and isinstance(response, list) and len( - response) > 1 and limit == 1: - response = response[0] - response = json.dumps(response) - return response - - # --------------------------------------------------------------------------- - # get_network_id() - # --------------------------------------------------------------------------- - def get_network_id(self, network_name="", network_type='lan'): - """ - query network_id from Infinity via rest api based on given network_name - """ - method = 'get' - resource_url = 'search' - response = None - if network_name is None: - self.module.exit_json( - msg="You must specify the option 'network_name'") - params = {"query": json.dumps( - {"name": network_name, "type": "network"})} - response = self._get_api_call_ansible_handler( - method, resource_url, payload_data=json.dumps(params)) - network_id = "" - if response and isinstance(response, str): - response = json.loads(response) - if response and isinstance(response, list): - response = response[0] - network_id = response['id'] - return network_id - - # --------------------------------------------------------------------------- - # reserve_next_available_ip() - # --------------------------------------------------------------------------- - def reserve_next_available_ip(self, network_id=""): - """ - Reserve ip address via Infinity by using rest api - network_id: the id of the network that users would like to reserve network from - return the next available ip address from that given network - """ - method = "post" - resource_url = '' - response = None - ip_info = '' - if not network_id: - self.module.exit_json( - msg="You must specify the option 'network_id'.") - if network_id: - resource_url = "networks/" + str(network_id) + "/reserve_ip" - response = self._get_api_call_ansible_handler(method, resource_url) - if response and response.find( - "[") >= 0 and response.find("]") >= 0: - start_pos = response.find("{") - end_pos = response.find("}") - ip_info = response[start_pos: (end_pos + 1)] - return ip_info - - # ------------------------- - # release_ip() - # ------------------------- - def release_ip(self, network_id="", ip_address=""): - """ - Reserve ip address via Infinity by using rest api - """ - method = "get" - resource_url = '' - response = None - if ip_address is None or network_id is None: - self.module.exit_json( - msg="You must specify those two options: 'network_id' and 'ip_address'.") - - resource_url = "networks/" + str(network_id) + "/children" - response = self._get_api_call_ansible_handler(method, resource_url) - if not response: - self.module.exit_json( - msg="There is an error in release ip %s from network %s." % - (ip_address, network_id)) - - ip_list = json.loads(response) - ip_idlist = [] - for ip_item in ip_list: - ip_id = ip_item['id'] - ip_idlist.append(ip_id) - deleted_ip_id = '' - for ip_id in ip_idlist: - ip_response = '' - resource_url = "ip_addresses/" + str(ip_id) - ip_response = self._get_api_call_ansible_handler( - method, - resource_url, - stat_codes=[200]) - if ip_response and json.loads( - ip_response)['address'] == str(ip_address): - deleted_ip_id = ip_id - break - if deleted_ip_id: - method = 'delete' - resource_url = "ip_addresses/" + str(deleted_ip_id) - response = self._get_api_call_ansible_handler( - method, resource_url, stat_codes=[204]) - else: - self.module.exit_json( - msg=" When release ip, could not find the ip address %r from the given network %r' ." % - (ip_address, network_id)) - - return response - - # ------------------- - # delete_network() - # ------------------- - def delete_network(self, network_id="", network_name=""): - """ - delete network from Infinity by using rest api - """ - method = 'delete' - resource_url = '' - response = None - if network_id is None and network_name is None: - self.module.exit_json( - msg="You must specify one of those options: 'network_id','network_name' .") - if network_id is None and network_name: - network_id = self.get_network_id(network_name=network_name) - if network_id: - resource_url = "networks/" + str(network_id) - response = self._get_api_call_ansible_handler( - method, resource_url, stat_codes=[204]) - return response - - # reserve_network() - # --------------------------------------------------------------------------- - def reserve_network(self, network_id="", - reserved_network_name="", reserved_network_description="", - reserved_network_size="", reserved_network_family='4', - reserved_network_type='lan', reserved_network_address="",): - """ - Reserves the first available network of specified size from a given supernet -
network_name (required)
Name of the network
-
description (optional)
Free description
-
network_family (required)
Address family of the network. One of '4', '6', 'IPv4', 'IPv6', 'dual'
-
network_address (optional)
Address of the new network. If not given, the first network available will be created.
-
network_size (required)
Size of the new network in /<prefix> notation.
-
network_type (required)
Type of network. One of 'supernet', 'lan', 'shared_lan'
- - """ - method = 'post' - resource_url = '' - network_info = None - if network_id is None or reserved_network_name is None or reserved_network_size is None: - self.module.exit_json( - msg="You must specify those options: 'network_id', 'reserved_network_name' and 'reserved_network_size'") - if network_id: - resource_url = "networks/" + str(network_id) + "/reserve_network" - if not reserved_network_family: - reserved_network_family = '4' - if not reserved_network_type: - reserved_network_type = 'lan' - payload_data = { - "network_name": reserved_network_name, - 'description': reserved_network_description, - 'network_size': reserved_network_size, - 'network_family': reserved_network_family, - 'network_type': reserved_network_type, - 'network_location': int(network_id)} - if reserved_network_address: - payload_data.update({'network_address': reserved_network_address}) - - network_info = self._get_api_call_ansible_handler( - method, resource_url, stat_codes=[200, 201], payload_data=payload_data) - - return network_info - - # --------------------------------------------------------------------------- - # release_network() - # --------------------------------------------------------------------------- - def release_network( - self, - network_id="", - released_network_name="", - released_network_type='lan'): - """ - Release the network with name 'released_network_name' from the given supernet network_id - """ - method = 'get' - response = None - if network_id is None or released_network_name is None: - self.module.exit_json( - msg="You must specify those options 'network_id', 'reserved_network_name' and 'reserved_network_size'") - matched_network_id = "" - resource_url = "networks/" + str(network_id) + "/children" - response = self._get_api_call_ansible_handler(method, resource_url) - if not response: - self.module.exit_json( - msg=" there is an error in releasing network %r from network %s." % - (network_id, released_network_name)) - if response: - response = json.loads(response) - for child_net in response: - if child_net['network'] and child_net['network']['network_name'] == released_network_name: - matched_network_id = child_net['network']['network_id'] - break - response = None - if matched_network_id: - method = 'delete' - resource_url = "networks/" + str(matched_network_id) - response = self._get_api_call_ansible_handler( - method, resource_url, stat_codes=[204]) - else: - self.module.exit_json( - msg=" When release network , could not find the network %r from the given superent %r' " % - (released_network_name, network_id)) - - return response - - # --------------------------------------------------------------------------- - # add_network() - # --------------------------------------------------------------------------- - def add_network( - self, network_name="", network_address="", - network_size="", network_family='4', - network_type='lan', network_location=-1): - """ - add a new LAN network into a given supernet Fusionlayer Infinity via rest api or default supernet - required fields=['network_name', 'network_family', 'network_type', 'network_address','network_size' ] - """ - method = 'post' - resource_url = 'networks' - response = None - if network_name is None or network_address is None or network_size is None: - self.module.exit_json( - msg="You must specify those options 'network_name', 'network_address' and 'network_size'") - - if not network_family: - network_family = '4' - if not network_type: - network_type = 'lan' - if not network_location: - network_location = -1 - payload_data = { - "network_name": network_name, - 'network_address': network_address, - 'network_size': network_size, - 'network_family': network_family, - 'network_type': network_type, - 'network_location': network_location} - response = self._get_api_call_ansible_handler( - method='post', resource_url=resource_url, - stat_codes=[200], payload_data=payload_data) - return response - - -def main(): - module = AnsibleModule( - argument_spec=dict( - server_ip=dict(type='str', required=True), - username=dict(type='str', required=True), - password=dict(type='str', required=True, no_log=True), - network_id=dict(type='str'), - ip_address=dict(type='str'), - network_name=dict(type='str'), - network_location=dict(type='int', default=-1), - network_family=dict(type='str', default='4', choices=['4', '6', 'dual']), - network_type=dict(type='str', default='lan', choices=['lan', 'shared_lan', 'supernet']), - network_address=dict(type='str'), - network_size=dict(type='str'), - action=dict(type='str', required=True, choices=[ - 'add_network', - 'delete_network', - 'get_network', - 'get_network_id', - 'release_ip', - 'release_network', - 'reserve_network', - 'reserve_next_available_ip', - ],), - ), - required_together=( - ['username', 'password'], - ), - ) - server_ip = module.params["server_ip"] - username = module.params["username"] - password = module.params["password"] - action = module.params["action"] - network_id = module.params["network_id"] - released_ip = module.params["ip_address"] - network_name = module.params["network_name"] - network_family = module.params["network_family"] - network_type = module.params["network_type"] - network_address = module.params["network_address"] - network_size = module.params["network_size"] - network_location = module.params["network_location"] - my_infinity = Infinity(module, server_ip, username, password) - result = '' - if action == "reserve_next_available_ip": - if network_id: - result = my_infinity.reserve_next_available_ip(network_id) - if not result: - result = 'There is an error in calling method of reserve_next_available_ip' - module.exit_json(changed=False, meta=result) - module.exit_json(changed=True, meta=result) - elif action == "release_ip": - if network_id and released_ip: - result = my_infinity.release_ip( - network_id=network_id, ip_address=released_ip) - module.exit_json(changed=True, meta=result) - elif action == "delete_network": - result = my_infinity.delete_network( - network_id=network_id, network_name=network_name) - module.exit_json(changed=True, meta=result) - - elif action == "get_network_id": - result = my_infinity.get_network_id( - network_name=network_name, network_type=network_type) - module.exit_json(changed=True, meta=result) - elif action == "get_network": - result = my_infinity.get_network( - network_id=network_id, network_name=network_name) - module.exit_json(changed=True, meta=result) - elif action == "reserve_network": - result = my_infinity.reserve_network( - network_id=network_id, - reserved_network_name=network_name, - reserved_network_size=network_size, - reserved_network_family=network_family, - reserved_network_type=network_type, - reserved_network_address=network_address) - module.exit_json(changed=True, meta=result) - elif action == "release_network": - result = my_infinity.release_network( - network_id=network_id, - released_network_name=network_name, - released_network_type=network_type) - module.exit_json(changed=True, meta=result) - - elif action == "add_network": - result = my_infinity.add_network( - network_name=network_name, - network_location=network_location, - network_address=network_address, - network_size=network_size, - network_family=network_family, - network_type=network_type) - - module.exit_json(changed=True, meta=result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/ip_netns.py b/plugins/modules/net_tools/ip_netns.py deleted file mode 100644 index 700f0a17bd..0000000000 --- a/plugins/modules/net_tools/ip_netns.py +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Arie Bregman -# -# This file is a module for Ansible that interacts with Network Manager -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ip_netns -author: "Arie Bregman (@bregman-arie)" -short_description: Manage network namespaces -requirements: [ ip ] -description: - - Create or delete network namespaces using the ip command. -options: - name: - required: false - description: - - Name of the namespace - type: str - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the namespace should exist - type: str -''' - -EXAMPLES = ''' -- name: Create a namespace named mario - community.general.ip_netns: - name: mario - state: present - -- name: Delete a namespace named luigi - community.general.ip_netns: - name: luigi - state: absent -''' - -RETURN = ''' -# Default return values -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_text - - -class Namespace(object): - """Interface to network namespaces. """ - - def __init__(self, module): - self.module = module - self.name = module.params['name'] - self.state = module.params['state'] - - def _netns(self, command): - '''Run ip nents command''' - return self.module.run_command(['ip', 'netns'] + command) - - def exists(self): - '''Check if the namespace already exists''' - rc, out, err = self.module.run_command('ip netns list') - if rc != 0: - self.module.fail_json(msg=to_text(err)) - return self.name in out - - def add(self): - '''Create network namespace''' - rtc, out, err = self._netns(['add', self.name]) - - if rtc != 0: - self.module.fail_json(msg=err) - - def delete(self): - '''Delete network namespace''' - rtc, out, err = self._netns(['del', self.name]) - if rtc != 0: - self.module.fail_json(msg=err) - - def check(self): - '''Run check mode''' - changed = False - - if self.state == 'present' and self.exists(): - changed = True - - elif self.state == 'absent' and self.exists(): - changed = True - elif self.state == 'present' and not self.exists(): - changed = True - - self.module.exit_json(changed=changed) - - def run(self): - '''Make the necessary changes''' - changed = False - - if self.state == 'absent': - if self.exists(): - self.delete() - changed = True - elif self.state == 'present': - if not self.exists(): - self.add() - changed = True - - self.module.exit_json(changed=changed) - - -def main(): - """Entry point.""" - module = AnsibleModule( - argument_spec={ - 'name': {'default': None}, - 'state': {'default': 'present', 'choices': ['present', 'absent']}, - }, - supports_check_mode=True, - ) - - network_namespace = Namespace(module) - if module.check_mode: - network_namespace.check() - else: - network_namespace.run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/ipify_facts.py b/plugins/modules/net_tools/ipify_facts.py deleted file mode 100644 index 2ae0348cb1..0000000000 --- a/plugins/modules/net_tools/ipify_facts.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2015, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: ipify_facts -short_description: Retrieve the public IP of your internet gateway -description: - - If behind NAT and need to know the public IP of your internet gateway. -author: -- René Moser (@resmo) -options: - api_url: - description: - - URL of the ipify.org API service. - - C(?format=json) will be appended per default. - type: str - default: https://api.ipify.org/ - timeout: - description: - - HTTP connection timeout in seconds. - type: int - default: 10 - validate_certs: - description: - - When set to C(NO), SSL certificates will not be validated. - type: bool - default: yes -notes: - - Visit https://www.ipify.org to get more information. -''' - -EXAMPLES = r''' -# Gather IP facts from ipify.org -- name: Get my public IP - community.general.ipify_facts: - -# Gather IP facts from your own ipify service endpoint with a custom timeout -- name: Get my public IP - community.general.ipify_facts: - api_url: http://api.example.com/ipify - timeout: 20 -''' - -RETURN = r''' ---- -ipify_public_ip: - description: Public IP of the internet gateway. - returned: success - type: str - sample: 1.2.3.4 -''' - -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.common.text.converters import to_text - - -class IpifyFacts(object): - - def __init__(self): - self.api_url = module.params.get('api_url') - self.timeout = module.params.get('timeout') - - def run(self): - result = { - 'ipify_public_ip': None - } - (response, info) = fetch_url(module=module, url=self.api_url + "?format=json", force=True, timeout=self.timeout) - - if not response: - module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout)) - - data = json.loads(to_text(response.read())) - result['ipify_public_ip'] = data.get('ip') - return result - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - api_url=dict(type='str', default='https://api.ipify.org/'), - timeout=dict(type='int', default=10), - validate_certs=dict(type='bool', default=True), - ), - supports_check_mode=True, - ) - - ipify_facts = IpifyFacts().run() - ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts) - module.exit_json(**ipify_facts_result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/ipinfoio_facts.py b/plugins/modules/net_tools/ipinfoio_facts.py deleted file mode 100644 index ee1d49f3ac..0000000000 --- a/plugins/modules/net_tools/ipinfoio_facts.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Aleksei Kostiuk -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ipinfoio_facts -short_description: "Retrieve IP geolocation facts of a host's IP address" -description: - - "Gather IP geolocation facts of a host's IP address using ipinfo.io API" -author: "Aleksei Kostiuk (@akostyuk)" -options: - timeout: - description: - - HTTP connection timeout in seconds - required: false - default: 10 - type: int - http_agent: - description: - - Set http user agent - required: false - default: "ansible-ipinfoio-module/0.0.1" - type: str -notes: - - "Check http://ipinfo.io/ for more information" -''' - -EXAMPLES = ''' -# Retrieve geolocation data of a host's IP address -- name: Get IP geolocation data - community.general.ipinfoio_facts: -''' - -RETURN = ''' -ansible_facts: - description: "Dictionary of ip geolocation facts for a host's IP address" - returned: changed - type: complex - contains: - ip: - description: "Public IP address of a host" - type: str - sample: "8.8.8.8" - hostname: - description: Domain name - type: str - sample: "google-public-dns-a.google.com" - country: - description: ISO 3166-1 alpha-2 country code - type: str - sample: "US" - region: - description: State or province name - type: str - sample: "California" - city: - description: City name - type: str - sample: "Mountain View" - loc: - description: Latitude and Longitude of the location - type: str - sample: "37.3860,-122.0838" - org: - description: "organization's name" - type: str - sample: "AS3356 Level 3 Communications, Inc." - postal: - description: Postal code - type: str - sample: "94035" -''' -from ansible.module_utils.basic import AnsibleModule - -from ansible.module_utils.urls import fetch_url - - -USER_AGENT = 'ansible-ipinfoio-module/0.0.1' - - -class IpinfoioFacts(object): - - def __init__(self, module): - self.url = 'https://ipinfo.io/json' - self.timeout = module.params.get('timeout') - self.module = module - - def get_geo_data(self): - response, info = fetch_url(self.module, self.url, force=True, # NOQA - timeout=self.timeout) - try: - info['status'] == 200 - except AssertionError: - self.module.fail_json(msg='Could not get {0} page, ' - 'check for connectivity!'.format(self.url)) - else: - try: - content = response.read() - result = self.module.from_json(content.decode('utf8')) - except ValueError: - self.module.fail_json( - msg='Failed to parse the ipinfo.io response: ' - '{0} {1}'.format(self.url, content)) - else: - return result - - -def main(): - module = AnsibleModule( # NOQA - argument_spec=dict( - http_agent=dict(default=USER_AGENT), - timeout=dict(type='int', default=10), - ), - supports_check_mode=True, - ) - - ipinfoio = IpinfoioFacts(module) - ipinfoio_result = dict( - changed=False, ansible_facts=ipinfoio.get_geo_data()) - module.exit_json(**ipinfoio_result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/ipwcli_dns.py b/plugins/modules/net_tools/ipwcli_dns.py deleted file mode 100644 index 8a6122edff..0000000000 --- a/plugins/modules/net_tools/ipwcli_dns.py +++ /dev/null @@ -1,349 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Christian Wollinger -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ipwcli_dns - -short_description: Manage DNS Records for Ericsson IPWorks via ipwcli - -version_added: '0.2.0' - -description: - - "Manage DNS records for the Ericsson IPWorks DNS server. The module will use the ipwcli to deploy the DNS records." - -requirements: - - ipwcli (installed on Ericsson IPWorks) - -notes: - - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli. - -options: - dnsname: - description: - - Name of the record. - required: true - type: str - type: - description: - - Type of the record. - required: true - type: str - choices: [ NAPTR, SRV, A, AAAA ] - container: - description: - - Sets the container zone for the record. - required: true - type: str - address: - description: - - The IP address for the A or AAAA record. - - Required for C(type=A) or C(type=AAAA) - type: str - ttl: - description: - - Sets the TTL of the record. - type: int - default: 3600 - state: - description: - - Whether the record should exist or not. - type: str - choices: [ absent, present ] - default: present - priority: - description: - - Sets the priority of the SRV record. - type: int - default: 10 - weight: - description: - - Sets the weight of the SRV record. - type: int - default: 10 - port: - description: - - Sets the port of the SRV record. - - Required for C(type=SRV) - type: int - target: - description: - - Sets the target of the SRV record. - - Required for C(type=SRV) - type: str - order: - description: - - Sets the order of the NAPTR record. - - Required for C(type=NAPTR) - type: int - preference: - description: - - Sets the preference of the NAPTR record. - - Required for C(type=NAPTR) - type: int - flags: - description: - - Sets one of the possible flags of NAPTR record. - - Required for C(type=NAPTR) - type: str - choices: ['S', 'A', 'U', 'P'] - service: - description: - - Sets the service of the NAPTR record. - - Required for C(type=NAPTR) - type: str - replacement: - description: - - Sets the replacement of the NAPTR record. - - Required for C(type=NAPTR) - type: str - username: - description: - - Username to login on ipwcli. - type: str - required: true - password: - description: - - Password to login on ipwcli. - type: str - required: true - -author: - - Christian Wollinger (@cwollinger) -''' - -EXAMPLES = ''' -- name: Create A record - community.general.ipwcli_dns: - dnsname: example.com - type: A - container: ZoneOne - address: 127.0.0.1 - -- name: Remove SRV record if exists - community.general.ipwcli_dns: - dnsname: _sip._tcp.test.example.com - type: SRV - container: ZoneOne - ttl: 100 - state: absent - target: example.com - port: 5060 - -- name: Create NAPTR record - community.general.ipwcli_dns: - dnsname: test.example.com - type: NAPTR - preference: 10 - container: ZoneOne - ttl: 100 - order: 10 - service: 'SIP+D2T' - replacement: '_sip._tcp.test.example.com.' - flags: S -''' - -RETURN = ''' -record: - description: The created record from the input params - type: str - returned: always -''' - -from ansible.module_utils.basic import AnsibleModule -import os - - -class ResourceRecord(object): - - def __init__(self, module): - self.module = module - self.dnsname = module.params['dnsname'] - self.dnstype = module.params['type'] - self.container = module.params['container'] - self.address = module.params['address'] - self.ttl = module.params['ttl'] - self.state = module.params['state'] - self.priority = module.params['priority'] - self.weight = module.params['weight'] - self.port = module.params['port'] - self.target = module.params['target'] - self.order = module.params['order'] - self.preference = module.params['preference'] - self.flags = module.params['flags'] - self.service = module.params['service'] - self.replacement = module.params['replacement'] - self.user = module.params['username'] - self.password = module.params['password'] - - def create_naptrrecord(self): - # create NAPTR record with the given params - record = ('naptrrecord %s -set ttl=%s;container=%s;order=%s;preference=%s;flags="%s";service="%s";replacement="%s"' - % (self.dnsname, self.ttl, self.container, self.order, self.preference, self.flags, self.service, self.replacement)) - return record - - def create_srvrecord(self): - # create SRV record with the given params - record = ('srvrecord %s -set ttl=%s;container=%s;priority=%s;weight=%s;port=%s;target=%s' - % (self.dnsname, self.ttl, self.container, self.priority, self.weight, self.port, self.target)) - return record - - def create_arecord(self): - # create A record with the given params - if self.dnstype == 'AAAA': - record = 'aaaarecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container) - else: - record = 'arecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container) - - return record - - def list_record(self, record): - # check if the record exists via list on ipwcli - search = 'list %s' % (record.replace(';', '&&').replace('set', 'where')) - cmd = [ - self.module.get_bin_path('ipwcli', True), - '-user=%s' % self.user, - '-password=%s' % self.password, - ] - rc, out, err = self.module.run_command(cmd, data=search) - - if 'Invalid username or password' in out: - self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') - - if (('ARecord %s' % self.dnsname in out and rc == 0) or ('SRVRecord %s' % self.dnsname in out and rc == 0) or - ('NAPTRRecord %s' % self.dnsname in out and rc == 0)): - return True, rc, out, err - - return False, rc, out, err - - def deploy_record(self, record): - # check what happens if create fails on ipworks - stdin = 'create %s' % (record) - cmd = [ - self.module.get_bin_path('ipwcli', True), - '-user=%s' % self.user, - '-password=%s' % self.password, - ] - rc, out, err = self.module.run_command(cmd, data=stdin) - - if 'Invalid username or password' in out: - self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') - - if '1 object(s) created.' in out: - return rc, out, err - else: - self.module.fail_json(msg='record creation failed', stderr=out) - - def delete_record(self, record): - # check what happens if create fails on ipworks - stdin = 'delete %s' % (record.replace(';', '&&').replace('set', 'where')) - cmd = [ - self.module.get_bin_path('ipwcli', True), - '-user=%s' % self.user, - '-password=%s' % self.password, - ] - rc, out, err = self.module.run_command(cmd, data=stdin) - - if 'Invalid username or password' in out: - self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') - - if '1 object(s) were updated.' in out: - return rc, out, err - else: - self.module.fail_json(msg='record deletion failed', stderr=out) - - -def run_module(): - # define available arguments/parameters a user can pass to the module - module_args = dict( - dnsname=dict(type='str', required=True), - type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']), - container=dict(type='str', required=True), - address=dict(type='str', required=False), - ttl=dict(type='int', required=False, default=3600), - state=dict(type='str', default='present', choices=['absent', 'present']), - priority=dict(type='int', required=False, default=10), - weight=dict(type='int', required=False, default=10), - port=dict(type='int', required=False), - target=dict(type='str', required=False), - order=dict(type='int', required=False), - preference=dict(type='int', required=False), - flags=dict(type='str', required=False, choices=['S', 'A', 'U', 'P']), - service=dict(type='str', required=False), - replacement=dict(type='str', required=False), - username=dict(type='str', required=True), - password=dict(type='str', required=True, no_log=True) - ) - - # define result - result = dict( - changed=False, - stdout='', - stderr='', - rc=0, - record='' - ) - - # supports check mode - module = AnsibleModule( - argument_spec=module_args, - required_if=[ - ['type', 'A', ['address']], - ['type', 'AAAA', ['address']], - ['type', 'SRV', ['port', 'target']], - ['type', 'NAPTR', ['preference', 'order', 'service', 'replacement']], - ], - supports_check_mode=True - ) - - user = ResourceRecord(module) - - if user.dnstype == 'NAPTR': - record = user.create_naptrrecord() - elif user.dnstype == 'SRV': - record = user.create_srvrecord() - elif user.dnstype == 'A' or user.dnstype == 'AAAA': - record = user.create_arecord() - - found, rc, out, err = user.list_record(record) - - if found and user.state == 'absent': - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = user.delete_record(record) - result['changed'] = True - result['record'] = record - result['rc'] = rc - result['stdout'] = out - result['stderr'] = err - elif not found and user.state == 'present': - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = user.deploy_record(record) - result['changed'] = True - result['record'] = record - result['rc'] = rc - result['stdout'] = out - result['stderr'] = err - else: - result['changed'] = False - result['record'] = record - result['rc'] = rc - result['stdout'] = out - result['stderr'] = err - - module.exit_json(**result) - - -def main(): - run_module() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/ldap/ldap_attrs.py b/plugins/modules/net_tools/ldap/ldap_attrs.py deleted file mode 100644 index c357a83087..0000000000 --- a/plugins/modules/net_tools/ldap/ldap_attrs.py +++ /dev/null @@ -1,318 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Maciej Delmanowski -# Copyright: (c) 2017, Alexander Korinek -# Copyright: (c) 2016, Peter Sagerson -# Copyright: (c) 2016, Jiri Tyr -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: ldap_attrs -short_description: Add or remove multiple LDAP attribute values -description: - - Add or remove multiple LDAP attribute values. -notes: - - This only deals with attributes on existing entries. To add or remove - whole entries, see M(community.general.ldap_entry). - - The default authentication settings will attempt to use a SASL EXTERNAL - bind over a UNIX domain socket. This works well with the default Ubuntu - install for example, which includes a cn=peercred,cn=external,cn=auth ACL - rule allowing root to modify the server configuration. If you need to use - a simple bind to access your server, pass the credentials in I(bind_dn) - and I(bind_pw). - - For I(state=present) and I(state=absent), all value comparisons are - performed on the server for maximum accuracy. For I(state=exact), values - have to be compared in Python, which obviously ignores LDAP matching - rules. This should work out in most cases, but it is theoretically - possible to see spurious changes when target and actual values are - semantically identical but lexically distinct. -version_added: '0.2.0' -author: - - Jiri Tyr (@jtyr) - - Alexander Korinek (@noles) - - Maciej Delmanowski (@drybjed) -requirements: - - python-ldap -options: - state: - required: false - type: str - choices: [present, absent, exact] - default: present - description: - - The state of the attribute values. If C(present), all given attribute - values will be added if they're missing. If C(absent), all given - attribute values will be removed if present. If C(exact), the set of - attribute values will be forced to exactly those provided and no others. - If I(state=exact) and the attribute I(value) is empty, all values for - this attribute will be removed. - attributes: - required: true - type: dict - description: - - The attribute(s) and value(s) to add or remove. The complex argument format is required in order to pass - a list of strings (see examples). - ordered: - required: false - type: bool - default: 'no' - description: - - If C(yes), prepend list values with X-ORDERED index numbers in all - attributes specified in the current task. This is useful mostly with - I(olcAccess) attribute to easily manage LDAP Access Control Lists. -extends_documentation_fragment: -- community.general.ldap.documentation - -''' - - -EXAMPLES = r''' -- name: Configure directory number 1 for example.com - community.general.ldap_attrs: - dn: olcDatabase={1}hdb,cn=config - attributes: - olcSuffix: dc=example,dc=com - state: exact - -# The complex argument format is required here to pass a list of ACL strings. -- name: Set up the ACL - community.general.ldap_attrs: - dn: olcDatabase={1}hdb,cn=config - attributes: - olcAccess: - - >- - {0}to attrs=userPassword,shadowLastChange - by self write - by anonymous auth - by dn="cn=admin,dc=example,dc=com" write - by * none' - - >- - {1}to dn.base="dc=example,dc=com" - by dn="cn=admin,dc=example,dc=com" write - by * read - state: exact - -# An alternative approach with automatic X-ORDERED numbering -- name: Set up the ACL - community.general.ldap_attrs: - dn: olcDatabase={1}hdb,cn=config - attributes: - olcAccess: - - >- - to attrs=userPassword,shadowLastChange - by self write - by anonymous auth - by dn="cn=admin,dc=example,dc=com" write - by * none' - - >- - to dn.base="dc=example,dc=com" - by dn="cn=admin,dc=example,dc=com" write - by * read - ordered: yes - state: exact - -- name: Declare some indexes - community.general.ldap_attrs: - dn: olcDatabase={1}hdb,cn=config - attributes: - olcDbIndex: - - objectClass eq - - uid eq - -- name: Set up a root user, which we can use later to bootstrap the directory - community.general.ldap_attrs: - dn: olcDatabase={1}hdb,cn=config - attributes: - olcRootDN: cn=root,dc=example,dc=com - olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" - state: exact - -- name: Remove an attribute with a specific value - community.general.ldap_attrs: - dn: uid=jdoe,ou=people,dc=example,dc=com - attributes: - description: "An example user account" - state: absent - server_uri: ldap://localhost/ - bind_dn: cn=admin,dc=example,dc=com - bind_pw: password - -- name: Remove specified attribute(s) from an entry - community.general.ldap_attrs: - dn: uid=jdoe,ou=people,dc=example,dc=com - attributes: - description: [] - state: exact - server_uri: ldap://localhost/ - bind_dn: cn=admin,dc=example,dc=com - bind_pw: password -''' - - -RETURN = r''' -modlist: - description: list of modified parameters - returned: success - type: list - sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]' -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native, to_bytes -from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs -import re - -LDAP_IMP_ERR = None -try: - import ldap - - HAS_LDAP = True -except ImportError: - LDAP_IMP_ERR = traceback.format_exc() - HAS_LDAP = False - - -class LdapAttrs(LdapGeneric): - def __init__(self, module): - LdapGeneric.__init__(self, module) - - # Shortcuts - self.attrs = self.module.params['attributes'] - self.state = self.module.params['state'] - self.ordered = self.module.params['ordered'] - - def _order_values(self, values): - """ Preprend X-ORDERED index numbers to attribute's values. """ - ordered_values = [] - - if isinstance(values, list): - for index, value in enumerate(values): - cleaned_value = re.sub(r'^\{\d+\}', '', value) - ordered_values.append('{' + str(index) + '}' + cleaned_value) - - return ordered_values - - def _normalize_values(self, values): - """ Normalize attribute's values. """ - norm_values = [] - - if isinstance(values, list): - if self.ordered: - norm_values = list(map(to_bytes, - self._order_values(list(map(str, - values))))) - else: - norm_values = list(map(to_bytes, values)) - else: - norm_values = [to_bytes(str(values))] - - return norm_values - - def add(self): - modlist = [] - for name, values in self.module.params['attributes'].items(): - norm_values = self._normalize_values(values) - for value in norm_values: - if self._is_value_absent(name, value): - modlist.append((ldap.MOD_ADD, name, value)) - - return modlist - - def delete(self): - modlist = [] - for name, values in self.module.params['attributes'].items(): - norm_values = self._normalize_values(values) - for value in norm_values: - if self._is_value_present(name, value): - modlist.append((ldap.MOD_DELETE, name, value)) - - return modlist - - def exact(self): - modlist = [] - for name, values in self.module.params['attributes'].items(): - norm_values = self._normalize_values(values) - try: - results = self.connection.search_s( - self.dn, ldap.SCOPE_BASE, attrlist=[name]) - except ldap.LDAPError as e: - self.fail("Cannot search for attribute %s" % name, e) - - current = results[0][1].get(name, []) - - if frozenset(norm_values) != frozenset(current): - if len(current) == 0: - modlist.append((ldap.MOD_ADD, name, norm_values)) - elif len(norm_values) == 0: - modlist.append((ldap.MOD_DELETE, name, None)) - else: - modlist.append((ldap.MOD_REPLACE, name, norm_values)) - - return modlist - - def _is_value_present(self, name, value): - """ True if the target attribute has the given value. """ - try: - is_present = bool( - self.connection.compare_s(self.dn, name, value)) - except ldap.NO_SUCH_ATTRIBUTE: - is_present = False - - return is_present - - def _is_value_absent(self, name, value): - """ True if the target attribute doesn't have the given value. """ - return not self._is_value_present(name, value) - - -def main(): - module = AnsibleModule( - argument_spec=gen_specs( - attributes=dict(type='dict', required=True), - ordered=dict(type='bool', default=False, required=False), - state=dict(type='str', default='present', choices=['absent', 'exact', 'present']), - ), - supports_check_mode=True, - ) - - if not HAS_LDAP: - module.fail_json(msg=missing_required_lib('python-ldap'), - exception=LDAP_IMP_ERR) - - # Instantiate the LdapAttr object - ldap = LdapAttrs(module) - - state = module.params['state'] - - # Perform action - if state == 'present': - modlist = ldap.add() - elif state == 'absent': - modlist = ldap.delete() - elif state == 'exact': - modlist = ldap.exact() - - changed = False - - if len(modlist) > 0: - changed = True - - if not module.check_mode: - try: - ldap.connection.modify_s(ldap.dn, modlist) - except Exception as e: - module.fail_json(msg="Attribute action failed.", details=to_native(e)) - - module.exit_json(changed=changed, modlist=modlist) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/ldap/ldap_entry.py b/plugins/modules/net_tools/ldap/ldap_entry.py deleted file mode 100644 index 2ef06b9693..0000000000 --- a/plugins/modules/net_tools/ldap/ldap_entry.py +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Peter Sagerson -# Copyright: (c) 2016, Jiri Tyr -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ldap_entry -short_description: Add or remove LDAP entries. -description: - - Add or remove LDAP entries. This module only asserts the existence or - non-existence of an LDAP entry, not its attributes. To assert the - attribute values of an entry, see M(community.general.ldap_attrs). -notes: - - The default authentication settings will attempt to use a SASL EXTERNAL - bind over a UNIX domain socket. This works well with the default Ubuntu - install for example, which includes a cn=peercred,cn=external,cn=auth ACL - rule allowing root to modify the server configuration. If you need to use - a simple bind to access your server, pass the credentials in I(bind_dn) - and I(bind_pw). -author: - - Jiri Tyr (@jtyr) -requirements: - - python-ldap -options: - attributes: - description: - - If I(state=present), attributes necessary to create an entry. Existing - entries are never modified. To assert specific attribute values on an - existing entry, use M(community.general.ldap_attrs) module instead. - type: dict - objectClass: - description: - - If I(state=present), value or list of values to use when creating - the entry. It can either be a string or an actual list of - strings. - type: list - elements: str - state: - description: - - The target state of the entry. - choices: [present, absent] - default: present - type: str -extends_documentation_fragment: -- community.general.ldap.documentation - -''' - - -EXAMPLES = """ -- name: Make sure we have a parent entry for users - community.general.ldap_entry: - dn: ou=users,dc=example,dc=com - objectClass: organizationalUnit - -- name: Make sure we have an admin user - community.general.ldap_entry: - dn: cn=admin,dc=example,dc=com - objectClass: - - simpleSecurityObject - - organizationalRole - attributes: - description: An LDAP administrator - userPassword: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" - -- name: Get rid of an old entry - community.general.ldap_entry: - dn: ou=stuff,dc=example,dc=com - state: absent - server_uri: ldap://localhost/ - bind_dn: cn=admin,dc=example,dc=com - bind_pw: password - -# -# The same as in the previous example but with the authentication details -# stored in the ldap_auth variable: -# -# ldap_auth: -# server_uri: ldap://localhost/ -# bind_dn: cn=admin,dc=example,dc=com -# bind_pw: password -# -# In the example below, 'args' is a task keyword, passed at the same level as the module -- name: Get rid of an old entry - community.general.ldap_entry: - dn: ou=stuff,dc=example,dc=com - state: absent - args: "{{ ldap_auth }}" -""" - - -RETURN = """ -# Default return values -""" - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native, to_bytes -from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs - -LDAP_IMP_ERR = None -try: - import ldap.modlist - - HAS_LDAP = True -except ImportError: - LDAP_IMP_ERR = traceback.format_exc() - HAS_LDAP = False - - -class LdapEntry(LdapGeneric): - def __init__(self, module): - LdapGeneric.__init__(self, module) - - # Shortcuts - self.state = self.module.params['state'] - - # Add the objectClass into the list of attributes - self.module.params['attributes']['objectClass'] = ( - self.module.params['objectClass']) - - # Load attributes - if self.state == 'present': - self.attrs = self._load_attrs() - - def _load_attrs(self): - """ Turn attribute's value to array. """ - attrs = {} - - for name, value in self.module.params['attributes'].items(): - if isinstance(value, list): - attrs[name] = list(map(to_bytes, value)) - else: - attrs[name] = [to_bytes(value)] - - return attrs - - def add(self): - """ If self.dn does not exist, returns a callable that will add it. """ - def _add(): - self.connection.add_s(self.dn, modlist) - - if not self._is_entry_present(): - modlist = ldap.modlist.addModlist(self.attrs) - action = _add - else: - action = None - - return action - - def delete(self): - """ If self.dn exists, returns a callable that will delete it. """ - def _delete(): - self.connection.delete_s(self.dn) - - if self._is_entry_present(): - action = _delete - else: - action = None - - return action - - def _is_entry_present(self): - try: - self.connection.search_s(self.dn, ldap.SCOPE_BASE) - except ldap.NO_SUCH_OBJECT: - is_present = False - else: - is_present = True - - return is_present - - -def main(): - module = AnsibleModule( - argument_spec=gen_specs( - attributes=dict(default={}, type='dict'), - objectClass=dict(type='list', elements='str'), - state=dict(default='present', choices=['present', 'absent']), - ), - required_if=[('state', 'present', ['objectClass'])], - supports_check_mode=True, - ) - - if not HAS_LDAP: - module.fail_json(msg=missing_required_lib('python-ldap'), - exception=LDAP_IMP_ERR) - - state = module.params['state'] - - # Instantiate the LdapEntry object - ldap = LdapEntry(module) - - # Get the action function - if state == 'present': - action = ldap.add() - elif state == 'absent': - action = ldap.delete() - - # Perform the action - if action is not None and not module.check_mode: - try: - action() - except Exception as e: - module.fail_json(msg="Entry action failed.", details=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=(action is not None)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/ldap/ldap_passwd.py b/plugins/modules/net_tools/ldap/ldap_passwd.py deleted file mode 100644 index 8d86ee93fc..0000000000 --- a/plugins/modules/net_tools/ldap/ldap_passwd.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017-2018, Keller Fuchs -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ldap_passwd -short_description: Set passwords in LDAP. -description: - - Set a password for an LDAP entry. This module only asserts that - a given password is valid for a given entry. To assert the - existence of an entry, see M(community.general.ldap_entry). -notes: - - The default authentication settings will attempt to use a SASL EXTERNAL - bind over a UNIX domain socket. This works well with the default Ubuntu - install for example, which includes a cn=peercred,cn=external,cn=auth ACL - rule allowing root to modify the server configuration. If you need to use - a simple bind to access your server, pass the credentials in I(bind_dn) - and I(bind_pw). -author: - - Keller Fuchs (@KellerFuchs) -requirements: - - python-ldap -options: - passwd: - description: - - The (plaintext) password to be set for I(dn). - type: str -extends_documentation_fragment: -- community.general.ldap.documentation - -''' - -EXAMPLES = """ -- name: Set a password for the admin user - community.general.ldap_passwd: - dn: cn=admin,dc=example,dc=com - passwd: "{{ vault_secret }}" - -- name: Setting passwords in bulk - community.general.ldap_passwd: - dn: "{{ item.key }}" - passwd: "{{ item.value }}" - with_dict: - alice: alice123123 - bob: "|30b!" - admin: "{{ vault_secret }}" -""" - -RETURN = """ -modlist: - description: list of modified parameters - returned: success - type: list - sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]' -""" - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs - -LDAP_IMP_ERR = None -try: - import ldap - - HAS_LDAP = True -except ImportError: - LDAP_IMP_ERR = traceback.format_exc() - HAS_LDAP = False - - -class LdapPasswd(LdapGeneric): - def __init__(self, module): - LdapGeneric.__init__(self, module) - - # Shortcuts - self.passwd = self.module.params['passwd'] - - def passwd_check(self): - try: - tmp_con = ldap.initialize(self.server_uri) - except ldap.LDAPError as e: - self.fail("Cannot initialize LDAP connection", e) - - if self.start_tls: - try: - tmp_con.start_tls_s() - except ldap.LDAPError as e: - self.fail("Cannot start TLS.", e) - - try: - tmp_con.simple_bind_s(self.dn, self.passwd) - except ldap.INVALID_CREDENTIALS: - return True - except ldap.LDAPError as e: - self.fail("Cannot bind to the server.", e) - else: - return False - finally: - tmp_con.unbind() - - def passwd_set(self): - # Exit early if the password is already valid - if not self.passwd_check(): - return False - - # Change the password (or throw an exception) - try: - self.connection.passwd_s(self.dn, None, self.passwd) - except ldap.LDAPError as e: - self.fail("Unable to set password", e) - - # Password successfully changed - return True - - -def main(): - module = AnsibleModule( - argument_spec=gen_specs(passwd=dict(no_log=True)), - supports_check_mode=True, - ) - - if not HAS_LDAP: - module.fail_json(msg=missing_required_lib('python-ldap'), - exception=LDAP_IMP_ERR) - - ldap = LdapPasswd(module) - - if module.check_mode: - module.exit_json(changed=ldap.passwd_check()) - - module.exit_json(changed=ldap.passwd_set()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/ldap/ldap_search.py b/plugins/modules/net_tools/ldap/ldap_search.py deleted file mode 100644 index 6b83321ff9..0000000000 --- a/plugins/modules/net_tools/ldap/ldap_search.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Peter Sagerson -# Copyright: (c) 2020, Sebastian Pfahl -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r""" ---- -module: ldap_search -version_added: '0.2.0' -short_description: Search for entries in a LDAP server -description: - - Return the results of an LDAP search. -notes: - - The default authentication settings will attempt to use a SASL EXTERNAL - bind over a UNIX domain socket. This works well with the default Ubuntu - install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL - rule allowing root to modify the server configuration. If you need to use - a simple bind to access your server, pass the credentials in I(bind_dn) - and I(bind_pw). -author: - - Sebastian Pfahl (@eryx12o45) -requirements: - - python-ldap -options: - dn: - required: true - type: str - description: - - The LDAP DN to search in. - scope: - choices: [base, onelevel, subordinate, children] - default: base - type: str - description: - - The LDAP scope to use. - filter: - default: '(objectClass=*)' - type: str - description: - - Used for filtering the LDAP search result. - attrs: - type: list - elements: str - description: - - A list of attributes for limiting the result. Use an - actual list or a comma-separated string. - schema: - default: false - type: bool - description: - - Set to C(true) to return the full attribute schema of entries, not - their attribute values. Overrides I(attrs) when provided. -extends_documentation_fragment: - - community.general.ldap.documentation -""" - -EXAMPLES = r""" -- name: Return all entries within the 'groups' organizational unit. - community.general.ldap_search: - dn: "ou=groups,dc=example,dc=com" - register: ldap_groups - -- name: Return GIDs for all groups - community.general.ldap_search: - dn: "ou=groups,dc=example,dc=com" - scope: "onelevel" - attrs: - - "gidNumber" - register: ldap_group_gids -""" - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs - -LDAP_IMP_ERR = None -try: - import ldap - - HAS_LDAP = True -except ImportError: - LDAP_IMP_ERR = traceback.format_exc() - HAS_LDAP = False - - -def main(): - module = AnsibleModule( - argument_spec=gen_specs( - dn=dict(type='str', required=True), - scope=dict(type='str', default='base', choices=['base', 'onelevel', 'subordinate', 'children']), - filter=dict(type='str', default='(objectClass=*)'), - attrs=dict(type='list', elements='str'), - schema=dict(type='bool', default=False), - ), - supports_check_mode=True, - ) - - if not HAS_LDAP: - module.fail_json(msg=missing_required_lib('python-ldap'), - exception=LDAP_IMP_ERR) - - if not module.check_mode: - try: - LdapSearch(module).main() - except Exception as exception: - module.fail_json(msg="Attribute action failed.", details=to_native(exception)) - - module.exit_json(changed=False) - - -def _extract_entry(dn, attrs): - extracted = {'dn': dn} - for attr, val in list(attrs.items()): - if len(val) == 1: - extracted[attr] = val[0] - else: - extracted[attr] = val - return extracted - - -class LdapSearch(LdapGeneric): - def __init__(self, module): - LdapGeneric.__init__(self, module) - - self.dn = self.module.params['dn'] - self.filterstr = self.module.params['filter'] - self.attrlist = [] - self._load_scope() - self._load_attrs() - self._load_schema() - - def _load_schema(self): - self.schema = self.module.boolean(self.module.params['schema']) - if self.schema: - self.attrsonly = 1 - else: - self.attrsonly = 0 - - def _load_scope(self): - spec = dict( - base=ldap.SCOPE_BASE, - onelevel=ldap.SCOPE_ONELEVEL, - subordinate=ldap.SCOPE_SUBORDINATE, - children=ldap.SCOPE_SUBTREE, - ) - self.scope = spec[self.module.params['scope']] - - def _load_attrs(self): - self.attrlist = self.module.params['attrs'] or None - - def main(self): - results = self.perform_search() - self.module.exit_json(changed=False, results=results) - - def perform_search(self): - try: - results = self.connection.search_s( - self.dn, - self.scope, - filterstr=self.filterstr, - attrlist=self.attrlist, - attrsonly=self.attrsonly - ) - ldap_entries = [] - for result in results: - if isinstance(result[1], dict): - if self.schema: - ldap_entries.append(dict(dn=result[0], attrs=list(result[1].keys()))) - else: - ldap_entries.append(_extract_entry(result[0], result[1])) - return ldap_entries - except ldap.NO_SUCH_OBJECT: - self.module.fail_json(msg="Base not found: {0}".format(self.dn)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/lldp.py b/plugins/modules/net_tools/lldp.py deleted file mode 100644 index 1b8fa9eb06..0000000000 --- a/plugins/modules/net_tools/lldp.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: lldp -requirements: [ lldpctl ] -short_description: get details reported by lldp -description: - - Reads data out of lldpctl -options: {} -author: "Andy Hill (@andyhky)" -notes: - - Requires lldpd running and lldp enabled on switches -''' - -EXAMPLES = ''' -# Retrieve switch/port information - - name: Gather information from lldp - community.general.lldp: - - - name: Print each switch/port - ansible.builtin.debug: - msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}" - with_items: "{{ lldp.keys() }}" - -# TASK: [Print each switch/port] *********************************************************** -# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"} -# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"} -# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"} - -''' - -from ansible.module_utils.basic import AnsibleModule - - -def gather_lldp(module): - cmd = [module.get_bin_path('lldpctl'), '-f', 'keyvalue'] - rc, output, err = module.run_command(cmd) - if output: - output_dict = {} - current_dict = {} - lldp_entries = output.split("\n") - - for entry in lldp_entries: - if entry.startswith('lldp'): - path, value = entry.strip().split("=", 1) - path = path.split(".") - path_components, final = path[:-1], path[-1] - else: - value = current_dict[final] + '\n' + entry - - current_dict = output_dict - for path_component in path_components: - current_dict[path_component] = current_dict.get(path_component, {}) - current_dict = current_dict[path_component] - current_dict[final] = value - return output_dict - - -def main(): - module = AnsibleModule({}) - - lldp_output = gather_lldp(module) - try: - data = {'lldp': lldp_output['lldp']} - module.exit_json(ansible_facts=data) - except TypeError: - module.fail_json(msg="lldpctl command failed. is lldpd running?") - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/netcup_dns.py b/plugins/modules/net_tools/netcup_dns.py deleted file mode 100644 index 5ec5cbb246..0000000000 --- a/plugins/modules/net_tools/netcup_dns.py +++ /dev/null @@ -1,268 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2018 Nicolai Buchwitz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: netcup_dns -notes: [] -short_description: manage Netcup DNS records -description: - - "Manages DNS records via the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php)" -options: - api_key: - description: - - API key for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net)) - required: True - type: str - api_password: - description: - - API password for authentication, must be obtained via the netcup CCP (https://ccp.netcup.net) - required: True - type: str - customer_id: - description: - - Netcup customer id - required: True - type: int - domain: - description: - - Domainname the records should be added / removed - required: True - type: str - record: - description: - - Record to add or delete, supports wildcard (*). Default is C(@) (e.g. the zone name) - default: "@" - aliases: [ name ] - type: str - type: - description: - - Record type - choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS'] - required: True - type: str - value: - description: - - Record value - required: true - type: str - solo: - type: bool - default: False - description: - - Whether the record should be the only one for that record type and record name. Only use with C(state=present) - - This will delete all other records with the same record name and type. - priority: - description: - - Record priority. Required for C(type=MX) - required: False - type: int - state: - description: - - Whether the record should exist or not - required: False - default: present - choices: [ 'present', 'absent' ] - type: str -requirements: - - "nc-dnsapi >= 0.1.3" -author: "Nicolai Buchwitz (@nbuchwitz)" - -''' - -EXAMPLES = ''' -- name: Create a record of type A - community.general.netcup_dns: - api_key: "..." - api_password: "..." - customer_id: "..." - domain: "example.com" - name: "mail" - type: "A" - value: "127.0.0.1" - -- name: Delete that record - community.general.netcup_dns: - api_key: "..." - api_password: "..." - customer_id: "..." - domain: "example.com" - name: "mail" - type: "A" - value: "127.0.0.1" - state: absent - -- name: Create a wildcard record - community.general.netcup_dns: - api_key: "..." - api_password: "..." - customer_id: "..." - domain: "example.com" - name: "*" - type: "A" - value: "127.0.1.1" - -- name: Set the MX record for example.com - community.general.netcup_dns: - api_key: "..." - api_password: "..." - customer_id: "..." - domain: "example.com" - type: "MX" - value: "mail.example.com" - -- name: Set a record and ensure that this is the only one - community.general.netcup_dns: - api_key: "..." - api_password: "..." - customer_id: "..." - name: "demo" - domain: "example.com" - type: "AAAA" - value: "::1" - solo: true -''' - -RETURN = ''' -records: - description: list containing all records - returned: success - type: complex - contains: - name: - description: the record name - returned: success - type: str - sample: fancy-hostname - type: - description: the record type - returned: succcess - type: str - sample: A - value: - description: the record destination - returned: success - type: str - sample: 127.0.0.1 - priority: - description: the record priority (only relevant if type=MX) - returned: success - type: int - sample: 0 - id: - description: internal id of the record - returned: success - type: int - sample: 12345 -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -NCDNSAPI_IMP_ERR = None -try: - import nc_dnsapi - from nc_dnsapi import DNSRecord - - HAS_NCDNSAPI = True -except ImportError: - NCDNSAPI_IMP_ERR = traceback.format_exc() - HAS_NCDNSAPI = False - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, no_log=True), - api_password=dict(required=True, no_log=True), - customer_id=dict(required=True, type='int'), - - domain=dict(required=True), - record=dict(required=False, default='@', aliases=['name']), - type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']), - value=dict(required=True), - priority=dict(required=False, type='int'), - solo=dict(required=False, type='bool', default=False), - state=dict(required=False, choices=['present', 'absent'], default='present'), - - ), - supports_check_mode=True - ) - - if not HAS_NCDNSAPI: - module.fail_json(msg=missing_required_lib('nc-dnsapi'), exception=NCDNSAPI_IMP_ERR) - - api_key = module.params.get('api_key') - api_password = module.params.get('api_password') - customer_id = module.params.get('customer_id') - domain = module.params.get('domain') - record_type = module.params.get('type') - record = module.params.get('record') - value = module.params.get('value') - priority = module.params.get('priority') - solo = module.params.get('solo') - state = module.params.get('state') - - if record_type == 'MX' and not priority: - module.fail_json(msg="record type MX required the 'priority' argument") - - has_changed = False - all_records = [] - try: - with nc_dnsapi.Client(customer_id, api_key, api_password) as api: - all_records = api.dns_records(domain) - record = DNSRecord(record, record_type, value, priority=priority) - - # try to get existing record - record_exists = False - for r in all_records: - if r == record: - record_exists = True - record = r - - break - - if state == 'present': - if solo: - obsolete_records = [r for r in all_records if - r.hostname == record.hostname - and r.type == record.type - and not r.destination == record.destination] - - if obsolete_records: - if not module.check_mode: - all_records = api.delete_dns_records(domain, obsolete_records) - - has_changed = True - - if not record_exists: - if not module.check_mode: - all_records = api.add_dns_record(domain, record) - - has_changed = True - elif state == 'absent' and record_exists: - if not module.check_mode: - all_records = api.delete_dns_record(domain, record) - - has_changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]}) - - -def record_data(r): - return {"name": r.hostname, "type": r.type, "value": r.destination, "priority": r.priority, "id": r.id} - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py deleted file mode 100644 index 83202f6ebc..0000000000 --- a/plugins/modules/net_tools/nmcli.py +++ /dev/null @@ -1,1916 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Chris Long -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: nmcli -author: -- Chris Long (@alcamie101) -short_description: Manage Networking -requirements: -- nmcli -description: - - 'Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc.' - - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager.' - - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-tui.' - - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager' - - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager.' -options: - state: - description: - - Whether the device should exist or not, taking action if the state is different from what is stated. - type: str - required: true - choices: [ absent, present ] - autoconnect: - description: - - Whether the connection should start on boot. - - Whether the connection profile can be automatically activated - type: bool - default: yes - conn_name: - description: - - The name used to call the connection. Pattern is [-][-]. - type: str - required: true - ifname: - description: - - The interface to bind the connection to. - - The connection will only be applicable to this interface name. - - A special value of C('*') can be used for interface-independent connections. - - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan. - - This parameter defaults to C(conn_name) when left unset. - type: str - type: - description: - - This is the type of device or network connection that you wish to create or modify. - - Type C(dummy) is added in community.general 3.5.0. - - Type C(generic) is added in Ansible 2.5. - - Type C(infiniband) is added in community.general 2.0.0. - - Type C(gsm) is added in community.general 3.7.0. - type: str - choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi, gsm ] - mode: - description: - - This is the type of device or network connection that you wish to create for a bond or bridge. - type: str - choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ] - default: balance-rr - master: - description: - - Master ] STP forwarding delay, in seconds. - type: int - default: 15 - hellotime: - description: - - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds. - type: int - default: 2 - maxage: - description: - - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds. - type: int - default: 20 - ageingtime: - description: - - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds. - type: int - default: 300 - mac: - description: - - MAC address of the connection. - - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel. - type: str - slavepriority: - description: - - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave. - type: int - default: 32 - path_cost: - description: - - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave. - type: int - default: 100 - hairpin: - description: - - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the - frame was received on. - type: bool - default: yes - runner: - description: - - This is the type of device or network connection that you wish to create for a team. - type: str - choices: [ broadcast, roundrobin, activebackup, loadbalance, lacp ] - default: roundrobin - version_added: 3.4.0 - runner_hwaddr_policy: - description: - - This defines the policy of how hardware addresses of team device and port devices - should be set during the team lifetime. - type: str - choices: [ same_all, by_active, only_active ] - version_added: 3.4.0 - vlanid: - description: - - This is only used with VLAN - VLAN ID in range <0-4095>. - type: int - vlandev: - description: - - This is only used with VLAN - parent device this VLAN is on, can use ifname. - type: str - flags: - description: - - This is only used with VLAN - flags. - type: str - ingress: - description: - - This is only used with VLAN - VLAN ingress priority mapping. - type: str - egress: - description: - - This is only used with VLAN - VLAN egress priority mapping. - type: str - vxlan_id: - description: - - This is only used with VXLAN - VXLAN ID. - type: int - vxlan_remote: - description: - - This is only used with VXLAN - VXLAN destination IP address. - type: str - vxlan_local: - description: - - This is only used with VXLAN - VXLAN local IP address. - type: str - ip_tunnel_dev: - description: - - This is used with GRE/IPIP/SIT - parent device this GRE/IPIP/SIT tunnel, can use ifname. - type: str - ip_tunnel_remote: - description: - - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT destination IP address. - type: str - ip_tunnel_local: - description: - - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT local IP address. - type: str - ip_tunnel_input_key: - description: - - The key used for tunnel input packets. - - Only used when I(type=gre). - type: str - version_added: 3.6.0 - ip_tunnel_output_key: - description: - - The key used for tunnel output packets. - - Only used when I(type=gre). - type: str - version_added: 3.6.0 - zone: - description: - - The trust level of the connection. - - When updating this property on a currently activated connection, the change takes effect immediately. - type: str - version_added: 2.0.0 - wifi_sec: - description: - - The security configuration of the WiFi connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. - - 'An up-to-date list of supported attributes can be found here: - U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).' - - 'For instance to use common WPA-PSK auth with a password: - C({key-mgmt: wpa-psk, psk: my_password}).' - type: dict - suboptions: - auth-alg: - description: - - When WEP is used (that is, if I(key-mgmt) = C(none) or C(ieee8021x)) indicate the 802.11 authentication algorithm required by the AP here. - - One of C(open) for Open System, C(shared) for Shared Key, or C(leap) for Cisco LEAP. - - When using Cisco LEAP (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)) the I(leap-username) and I(leap-password) properties - must be specified. - type: str - choices: [ open, shared, leap ] - fils: - description: - - Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection. - - One of C(0) (use global default value), C(1) (disable FILS), C(2) (enable FILS if the supplicant and the access point support it) or C(3) - (enable FILS and fail if not supported). - - When set to C(0) and no global default is set, FILS will be optionally enabled. - type: int - choices: [ 0, 1, 2, 3 ] - default: 0 - group: - description: - - A list of group/broadcast encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in - the list. - - For maximum compatibility leave this property empty. - type: list - elements: str - choices: [ wep40, wep104, tkip, ccmp ] - key-mgmt: - description: - - Key management used for the connection. - - One of C(none) (WEP or no password protection), C(ieee8021x) (Dynamic WEP), C(owe) (Opportunistic Wireless Encryption), C(wpa-psk) (WPA2 - + WPA3 personal), C(sae) (WPA3 personal only), C(wpa-eap) (WPA2 + WPA3 enterprise) or C(wpa-eap-suite-b-192) (WPA3 enterprise only). - - This property must be set for any Wi-Fi connection that uses security. - type: str - choices: [ none, ieee8021x, owe, wpa-psk, sae, wpa-eap, wpa-eap-suite-b-192 ] - leap-password-flags: - description: Flags indicating how to handle the I(leap-password) property. - type: list - elements: int - leap-password: - description: The login password for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)). - type: str - leap-username: - description: The login username for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)). - type: str - pairwise: - description: - - A list of pairwise encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in the - list. - - For maximum compatibility leave this property empty. - type: list - elements: str - choices: [ tkip, ccmp ] - pmf: - description: - - Indicates whether Protected Management Frames (802.11w) must be enabled for the connection. - - One of C(0) (use global default value), C(1) (disable PMF), C(2) (enable PMF if the supplicant and the access point support it) or C(3) - (enable PMF and fail if not supported). - - When set to C(0) and no global default is set, PMF will be optionally enabled. - type: int - choices: [ 0, 1, 2, 3 ] - default: 0 - proto: - description: - - List of strings specifying the allowed WPA protocol versions to use. - - Each element may be C(wpa) (allow WPA) or C(rsn) (allow WPA2/RSN). - - If not specified, both WPA and RSN connections are allowed. - type: list - elements: str - choices: [ wpa, rsn ] - psk-flags: - description: Flags indicating how to handle the I(psk) property. - type: list - elements: int - psk: - description: - - Pre-Shared-Key for WPA networks. - - For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is (as specified in the 802.11i standard) hashed to derive the - actual key, or the key in form of 64 hexadecimal character. - - The WPA3-Personal networks use a passphrase of any length for SAE authentication. - type: str - wep-key-flags: - description: Flags indicating how to handle the I(wep-key0), I(wep-key1), I(wep-key2), and I(wep-key3) properties. - type: list - elements: int - wep-key-type: - description: - - Controls the interpretation of WEP keys. - - Allowed values are C(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or 13-character ASCII - password; or C(2), in which case the passphrase is provided as a string and will be hashed using the de-facto MD5 method to derive the - actual WEP key. - type: int - choices: [ 1, 2 ] - wep-key0: - description: - - Index 0 WEP key. This is the WEP key used in most networks. - - See the I(wep-key-type) property for a description of how this key is interpreted. - type: str - wep-key1: - description: - - Index 1 WEP key. This WEP index is not used by most networks. - - See the I(wep-key-type) property for a description of how this key is interpreted. - type: str - wep-key2: - description: - - Index 2 WEP key. This WEP index is not used by most networks. - - See the I(wep-key-type) property for a description of how this key is interpreted. - type: str - wep-key3: - description: - - Index 3 WEP key. This WEP index is not used by most networks. - - See the I(wep-key-type) property for a description of how this key is interpreted. - type: str - wep-tx-keyidx: - description: - - When static WEP is used (that is, if I(key-mgmt=none)) and a non-default WEP key index is used by the AP, put that WEP key index here. - - Valid values are C(0) (default key) through C(3). - - Note that some consumer access points (like the Linksys WRT54G) number the keys C(1) - C(4). - type: int - choices: [ 0, 1, 2, 3 ] - default: 0 - wps-method: - description: - - Flags indicating which mode of WPS is to be used if any. - - There is little point in changing the default setting as NetworkManager will automatically determine whether it is feasible to start WPS - enrollment from the Access Point capabilities. - - WPS can be disabled by setting this property to a value of C(1). - type: int - default: 0 - version_added: 3.0.0 - ssid: - description: - - Name of the Wireless router or the access point. - type: str - version_added: 3.0.0 - wifi: - description: - - The configuration of the WiFi connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. - - 'An up-to-date list of supported attributes can be found here: - U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).' - - 'For instance to create a hidden AP mode WiFi connection: - C({hidden: true, mode: ap}).' - type: dict - suboptions: - ap-isolation: - description: - - Configures AP isolation, which prevents communication between wireless devices connected to this AP. - - This property can be set to a value different from C(-1) only when the interface is configured in AP mode. - - If set to C(1), devices are not able to communicate with each other. This increases security because it protects devices against attacks - from other clients in the network. At the same time, it prevents devices to access resources on the same wireless networks as file - shares, printers, etc. - - If set to C(0), devices can talk to each other. - - When set to C(-1), the global default is used; in case the global default is unspecified it is assumed to be C(0). - type: int - choices: [ -1, 0, 1 ] - default: -1 - assigned-mac-address: - description: - - The new field for the cloned MAC address. - - It can be either a hardware address in ASCII representation, or one of the special values C(preserve), C(permanent), C(random) or - C(stable). - - This field replaces the deprecated I(cloned-mac-address) on D-Bus, which can only contain explicit hardware addresses. - - Note that this property only exists in D-Bus API. libnm and nmcli continue to call this property I(cloned-mac-address). - type: str - band: - description: - - 802.11 frequency band of the network. - - One of C(a) for 5GHz 802.11a or C(bg) for 2.4GHz 802.11. - - This will lock associations to the Wi-Fi network to the specific band, so for example, if C(a) is specified, the device will not - associate with the same network in the 2.4GHz band even if the network's settings are compatible. - - This setting depends on specific driver capability and may not work with all drivers. - type: str - choices: [ a, bg ] - bssid: - description: - - If specified, directs the device to only associate with the given access point. - - This capability is highly driver dependent and not supported by all devices. - - Note this property does not control the BSSID used when creating an Ad-Hoc network and is unlikely to in the future. - type: str - channel: - description: - - Wireless channel to use for the Wi-Fi connection. - - The device will only join (or create for Ad-Hoc networks) a Wi-Fi network on the specified channel. - - Because channel numbers overlap between bands, this property also requires the I(band) property to be set. - type: int - default: 0 - cloned-mac-address: - description: - - This D-Bus field is deprecated in favor of I(assigned-mac-address) which is more flexible and allows specifying special variants like - C(random). - - For libnm and nmcli, this field is called I(cloned-mac-address). - type: str - generate-mac-address-mask: - description: - - With I(cloned-mac-address) setting C(random) or C(stable), by default all bits of the MAC address are scrambled and a - locally-administered, unicast MAC address is created. This property allows to specify that certain bits are fixed. - - Note that the least significant bit of the first MAC address will always be unset to create a unicast MAC address. - - If the property is C(null), it is eligible to be overwritten by a default connection setting. - - If the value is still c(null) or an empty string, the default is to create a locally-administered, unicast MAC address. - - If the value contains one MAC address, this address is used as mask. The set bits of the mask are to be filled with the current MAC - address of the device, while the unset bits are subject to randomization. - - Setting C(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower 3 bytes using the - C(random) or C(stable) algorithm. - - If the value contains one additional MAC address after the mask, this address is used instead of the current MAC address to fill the bits - that shall not be randomized. - - For example, a value of C(FE:FF:FF:00:00:00 68:F7:28:00:00:00) will set the OUI of the MAC address to 68:F7:28, while the lower bits are - randomized. - - A value of C(02:00:00:00:00:00 00:00:00:00:00:00) will create a fully scrambled globally-administered, burned-in MAC address. - - If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example, - C(02:00:00:00:00:00 00:00:00:00:00:00 02:00:00:00:00:00) will create a fully scrambled MAC address, randomly locally or globally - administered. - type: str - hidden: - description: - - If C(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure and AP mode. - - In infrastructure mode, various workarounds are used for a more reliable discovery of hidden networks, such as probe-scanning the SSID. - However, these workarounds expose inherent insecurities with hidden SSID networks, and thus hidden SSID networks should be used with - caution. - - In AP mode, the created network does not broadcast its SSID. - - Note that marking the network as hidden may be a privacy issue for you (in infrastructure mode) or client stations (in AP mode), as the - explicit probe-scans are distinctly recognizable on the air. - type: bool - default: false - mac-address-blacklist: - description: - - A list of permanent MAC addresses of Wi-Fi devices to which this connection should never apply. - - Each MAC address should be given in the standard hex-digits-and-colons notation (for example, C(00:11:22:33:44:55)). - type: list - elements: str - mac-address-randomization: - description: - - One of C(0) (never randomize unless the user has set a global default to randomize and the supplicant supports randomization), C(1) - (never randomize the MAC address), or C(2) (always randomize the MAC address). - - This property is deprecated for I(cloned-mac-address). - type: int - default: 0 - choices: [ 0, 1, 2 ] - mac-address: - description: - - If specified, this connection will only apply to the Wi-Fi device whose permanent MAC address matches. - - This property does not change the MAC address of the device (for example for MAC spoofing). - type: str - mode: - description: Wi-Fi network mode. If blank, C(infrastructure) is assumed. - type: str - choices: [ infrastructure, mesh, adhoc, ap ] - default: infrastructure - mtu: - description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames. - type: int - default: 0 - powersave: - description: - - One of C(2) (disable Wi-Fi power saving), C(3) (enable Wi-Fi power saving), C(1) (don't touch currently configure setting) or C(0) (use - the globally configured value). - - All other values are reserved. - type: int - default: 0 - choices: [ 0, 1, 2, 3 ] - rate: - description: - - If non-zero, directs the device to only use the specified bitrate for communication with the access point. - - Units are in Kb/s, so for example C(5500) = 5.5 Mbit/s. - - This property is highly driver dependent and not all devices support setting a static bitrate. - type: int - default: 0 - tx-power: - description: - - If non-zero, directs the device to use the specified transmit power. - - Units are dBm. - - This property is highly driver dependent and not all devices support setting a static transmit power. - type: int - default: 0 - wake-on-wlan: - description: - - The NMSettingWirelessWakeOnWLan options to enable. Not all devices support all options. - - May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (C(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) (C(0x4)), - C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (C(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) (C(0x10)), - C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (C(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) (C(0x40)), - C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (C(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) (C(0x100)) or the special values - C(0x1) (to use global settings) and C(0x8000) (to disable management of Wake-on-LAN in NetworkManager). - - Note the option values' sum must be specified in order to combine multiple options. - type: int - default: 1 - version_added: 3.5.0 - ignore_unsupported_suboptions: - description: - - Ignore suboptions which are invalid or unsupported by the version of NetworkManager/nmcli installed on the host. - - Only I(wifi) and I(wifi_sec) options are currently affected. - type: bool - default: false - version_added: 3.6.0 - gsm: - description: - - The configuration of the GSM connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. - - 'An up-to-date list of supported attributes can be found here: - U(https://networkmanager.dev/docs/api/latest/settings-gsm.html).' - - 'For instance to use apn, pin, username and password: - C({apn: provider.apn, pin: 1234, username: apn.username, password: apn.password}).' - type: dict - version_added: 3.7.0 - suboptions: - apn: - description: - - The GPRS Access Point Name specifying the APN used when establishing a data session with the GSM-based network. - - The APN often determines how the user will be billed for their network usage and whether the user has access to the Internet or - just a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile broadband plan. - - The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9. - type: str - auto-config: - description: When C(true), the settings such as I(gsm.apn), I(gsm.username), or I(gsm.password) will default to values that match the network - the modem will register to in the Mobile Broadband Provider database. - type: bool - default: false - device-id: - description: - - The device unique identifier (as given by the C(WWAN) management service) which this connection applies to. - - If given, the connection will only apply to the specified device. - type: str - home-only: - description: - - When C(true), only connections to the home network will be allowed. - - Connections to roaming networks will not be made. - type: bool - default: false - mtu: - description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames. - type: int - default: 0 - network-id: - description: - - The Network ID (GSM LAI format, ie MCC-MNC) to force specific network registration. - - If the Network ID is specified, NetworkManager will attempt to force the device to register only on the specified network. - - This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise possible. - type: str - number: - description: Legacy setting that used to help establishing PPP data sessions for GSM-based modems. - type: str - password: - description: - - The password used to authenticate with the network, if required. - - Many providers do not require a password, or accept any password. - - But if a password is required, it is specified here. - type: str - password-flags: - description: - - NMSettingSecretFlags indicating how to handle the I(password) property. - - 'Following choices are allowed: - C(0) B(NONE): The system is responsible for providing and storing this secret (default), - C(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be - asked to retrieve it - C(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed - C(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required - (some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.' - type: int - choices: [ 0, 1, 2 , 4 ] - default: 0 - pin: - description: - - If the SIM is locked with a PIN it must be unlocked before any other operations are requested. - - Specify the PIN here to allow operation of the device. - type: str - pin-flags: - description: - - NMSettingSecretFlags indicating how to handle the I(gsm.pin) property. - - See I(gsm.password-flags) for NMSettingSecretFlags choices. - type: int - choices: [ 0, 1, 2 , 4 ] - default: 0 - sim-id: - description: - - The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to. - - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) which contains a SIM card matching - the given identifier.' - type: str - sim-operator-id: - description: - - A MCC/MNC string like C(310260) or C(21601I) identifying the specific mobile network operator which this connection applies to. - - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) and I(gsm.sim-id) which contains a SIM card - provisioned by the given operator.' - type: str - username: - description: - - The username used to authenticate with the network, if required. - - Many providers do not require a username, or accept any username. - - But if a username is required, it is specified here. -''' - -EXAMPLES = r''' -# These examples are using the following inventory: -# -# ## Directory layout: -# -# |_/inventory/cloud-hosts -# | /group_vars/openstack-stage.yml -# | /host_vars/controller-01.openstack.host.com -# | /host_vars/controller-02.openstack.host.com -# |_/playbook/library/nmcli.py -# | /playbook-add.yml -# | /playbook-del.yml -# ``` -# -# ## inventory examples -# ### groups_vars -# ```yml -# --- -# #devops_os_define_network -# storage_gw: "192.0.2.254" -# external_gw: "198.51.100.254" -# tenant_gw: "203.0.113.254" -# -# #Team vars -# nmcli_team: -# - conn_name: tenant -# ip4: '{{ tenant_ip }}' -# gw4: '{{ tenant_gw }}' -# - conn_name: external -# ip4: '{{ external_ip }}' -# gw4: '{{ external_gw }}' -# - conn_name: storage -# ip4: '{{ storage_ip }}' -# gw4: '{{ storage_gw }}' -# nmcli_team_slave: -# - conn_name: em1 -# ifname: em1 -# master: tenant -# - conn_name: em2 -# ifname: em2 -# master: tenant -# - conn_name: p2p1 -# ifname: p2p1 -# master: storage -# - conn_name: p2p2 -# ifname: p2p2 -# master: external -# -# #bond vars -# nmcli_bond: -# - conn_name: tenant -# ip4: '{{ tenant_ip }}' -# gw4: '' -# mode: balance-rr -# - conn_name: external -# ip4: '{{ external_ip }}' -# gw4: '' -# mode: balance-rr -# - conn_name: storage -# ip4: '{{ storage_ip }}' -# gw4: '{{ storage_gw }}' -# mode: balance-rr -# nmcli_bond_slave: -# - conn_name: em1 -# ifname: em1 -# master: tenant -# - conn_name: em2 -# ifname: em2 -# master: tenant -# - conn_name: p2p1 -# ifname: p2p1 -# master: storage -# - conn_name: p2p2 -# ifname: p2p2 -# master: external -# -# #ethernet vars -# nmcli_ethernet: -# - conn_name: em1 -# ifname: em1 -# ip4: '{{ tenant_ip }}' -# gw4: '{{ tenant_gw }}' -# - conn_name: em2 -# ifname: em2 -# ip4: '{{ tenant_ip1 }}' -# gw4: '{{ tenant_gw }}' -# - conn_name: p2p1 -# ifname: p2p1 -# ip4: '{{ storage_ip }}' -# gw4: '{{ storage_gw }}' -# - conn_name: p2p2 -# ifname: p2p2 -# ip4: '{{ external_ip }}' -# gw4: '{{ external_gw }}' -# ``` -# -# ### host_vars -# ```yml -# --- -# storage_ip: "192.0.2.91/23" -# external_ip: "198.51.100.23/21" -# tenant_ip: "203.0.113.77/23" -# ``` - - - -## playbook-add.yml example - ---- -- hosts: openstack-stage - remote_user: root - tasks: - - - name: Install needed network manager libs - ansible.builtin.package: - name: - - NetworkManager-libnm - - nm-connection-editor - - libsemanage-python - - policycoreutils-python - state: present - -##### Working with all cloud nodes - Teaming - - name: Try nmcli add team - conn_name only & ip4 gw4 - community.general.nmcli: - type: team - conn_name: '{{ item.conn_name }}' - ip4: '{{ item.ip4 }}' - gw4: '{{ item.gw4 }}' - state: present - with_items: - - '{{ nmcli_team }}' - - - name: Try nmcli add teams-slave - community.general.nmcli: - type: team-slave - conn_name: '{{ item.conn_name }}' - ifname: '{{ item.ifname }}' - master: '{{ item.master }}' - state: present - with_items: - - '{{ nmcli_team_slave }}' - -###### Working with all cloud nodes - Bonding - - name: Try nmcli add bond - conn_name only & ip4 gw4 mode - community.general.nmcli: - type: bond - conn_name: '{{ item.conn_name }}' - ip4: '{{ item.ip4 }}' - gw4: '{{ item.gw4 }}' - mode: '{{ item.mode }}' - state: present - with_items: - - '{{ nmcli_bond }}' - - - name: Try nmcli add bond-slave - community.general.nmcli: - type: bond-slave - conn_name: '{{ item.conn_name }}' - ifname: '{{ item.ifname }}' - master: '{{ item.master }}' - state: present - with_items: - - '{{ nmcli_bond_slave }}' - -##### Working with all cloud nodes - Ethernet - - name: Try nmcli add Ethernet - conn_name only & ip4 gw4 - community.general.nmcli: - type: ethernet - conn_name: '{{ item.conn_name }}' - ip4: '{{ item.ip4 }}' - gw4: '{{ item.gw4 }}' - state: present - with_items: - - '{{ nmcli_ethernet }}' - -## playbook-del.yml example -- hosts: openstack-stage - remote_user: root - tasks: - - - name: Try nmcli del team - multiple - community.general.nmcli: - conn_name: '{{ item.conn_name }}' - state: absent - with_items: - - conn_name: em1 - - conn_name: em2 - - conn_name: p1p1 - - conn_name: p1p2 - - conn_name: p2p1 - - conn_name: p2p2 - - conn_name: tenant - - conn_name: storage - - conn_name: external - - conn_name: team-em1 - - conn_name: team-em2 - - conn_name: team-p1p1 - - conn_name: team-p1p2 - - conn_name: team-p2p1 - - conn_name: team-p2p2 - - - name: Add an Ethernet connection with static IP configuration - community.general.nmcli: - conn_name: my-eth1 - ifname: eth1 - type: ethernet - ip4: 192.0.2.100/24 - gw4: 192.0.2.1 - state: present - - - name: Add an Team connection with static IP configuration - community.general.nmcli: - conn_name: my-team1 - ifname: my-team1 - type: team - ip4: 192.0.2.100/24 - gw4: 192.0.2.1 - state: present - autoconnect: yes - - - name: Optionally, at the same time specify IPv6 addresses for the device - community.general.nmcli: - conn_name: my-eth1 - ifname: eth1 - type: ethernet - ip4: 192.0.2.100/24 - gw4: 192.0.2.1 - ip6: 2001:db8::cafe - gw6: 2001:db8::1 - state: present - - - name: Add two IPv4 DNS server addresses - community.general.nmcli: - conn_name: my-eth1 - type: ethernet - dns4: - - 192.0.2.53 - - 198.51.100.53 - state: present - - - name: Make a profile usable for all compatible Ethernet interfaces - community.general.nmcli: - ctype: ethernet - name: my-eth1 - ifname: '*' - state: present - - - name: Change the property of a setting e.g. MTU - community.general.nmcli: - conn_name: my-eth1 - mtu: 9000 - type: ethernet - state: present - - - name: Add VxLan - community.general.nmcli: - type: vxlan - conn_name: vxlan_test1 - vxlan_id: 16 - vxlan_local: 192.168.1.2 - vxlan_remote: 192.168.1.5 - - - name: Add gre - community.general.nmcli: - type: gre - conn_name: gre_test1 - ip_tunnel_dev: eth0 - ip_tunnel_local: 192.168.1.2 - ip_tunnel_remote: 192.168.1.5 - - - name: Add ipip - community.general.nmcli: - type: ipip - conn_name: ipip_test1 - ip_tunnel_dev: eth0 - ip_tunnel_local: 192.168.1.2 - ip_tunnel_remote: 192.168.1.5 - - - name: Add sit - community.general.nmcli: - type: sit - conn_name: sit_test1 - ip_tunnel_dev: eth0 - ip_tunnel_local: 192.168.1.2 - ip_tunnel_remote: 192.168.1.5 - - - name: Add zone - community.general.nmcli: - type: ethernet - conn_name: my-eth1 - zone: external - state: present - -# nmcli exits with status 0 if it succeeds and exits with a status greater -# than zero when there is a failure. The following list of status codes may be -# returned: -# -# - 0 Success - indicates the operation succeeded -# - 1 Unknown or unspecified error -# - 2 Invalid user input, wrong nmcli invocation -# - 3 Timeout expired (see --wait option) -# - 4 Connection activation failed -# - 5 Connection deactivation failed -# - 6 Disconnecting device failed -# - 7 Connection deletion failed -# - 8 NetworkManager is not running -# - 9 nmcli and NetworkManager versions mismatch -# - 10 Connection, device, or access point does not exist. - -- name: Create the wifi connection - community.general.nmcli: - type: wifi - conn_name: Brittany - ifname: wlp4s0 - ssid: Brittany - wifi_sec: - key-mgmt: wpa-psk - psk: my_password - autoconnect: true - state: present - -- name: Create a hidden AP mode wifi connection - community.general.nmcli: - type: wifi - conn_name: ChocoMaster - ifname: wlo1 - ssid: ChocoMaster - wifi: - hidden: true - mode: ap - autoconnect: true - state: present - -- name: Create a gsm connection - community.general.nmcli: - type: gsm - conn_name: my-gsm-provider - ifname: cdc-wdm0 - gsm: - apn: my.provider.apn - username: my-provider-username - password: my-provider-password - pin: my-sim-pin - autoconnect: true - state: present - -''' - -RETURN = r"""# -""" - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_text -import re - - -class NmcliModuleError(Exception): - pass - - -class Nmcli(object): - """ - This is the generic nmcli manipulation class that is subclassed based on platform. - A subclass may wish to override the following action methods:- - - create_connection() - - delete_connection() - - edit_connection() - - modify_connection() - - show_connection() - - up_connection() - - down_connection() - All subclasses MUST define platform and distribution (which may be None). - """ - - platform = 'Generic' - distribution = None - - SECRET_OPTIONS = ( - '802-11-wireless-security.leap-password', - '802-11-wireless-security.psk', - '802-11-wireless-security.wep-key0', - '802-11-wireless-security.wep-key1', - '802-11-wireless-security.wep-key2', - '802-11-wireless-security.wep-key3' - ) - - def __init__(self, module): - self.module = module - self.state = module.params['state'] - self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions'] - self.autoconnect = module.params['autoconnect'] - self.conn_name = module.params['conn_name'] - self.master = module.params['master'] - self.ifname = module.params['ifname'] - self.type = module.params['type'] - self.ip4 = module.params['ip4'] - self.gw4 = module.params['gw4'] - self.gw4_ignore_auto = module.params['gw4_ignore_auto'] - self.routes4 = module.params['routes4'] - self.route_metric4 = module.params['route_metric4'] - self.routing_rules4 = module.params['routing_rules4'] - self.never_default4 = module.params['never_default4'] - self.dns4 = module.params['dns4'] - self.dns4_search = module.params['dns4_search'] - self.dns4_ignore_auto = module.params['dns4_ignore_auto'] - self.method4 = module.params['method4'] - self.may_fail4 = module.params['may_fail4'] - self.ip6 = module.params['ip6'] - self.gw6 = module.params['gw6'] - self.gw6_ignore_auto = module.params['gw6_ignore_auto'] - self.dns6 = module.params['dns6'] - self.dns6_search = module.params['dns6_search'] - self.dns6_ignore_auto = module.params['dns6_ignore_auto'] - self.method6 = module.params['method6'] - self.mtu = module.params['mtu'] - self.stp = module.params['stp'] - self.priority = module.params['priority'] - self.mode = module.params['mode'] - self.miimon = module.params['miimon'] - self.primary = module.params['primary'] - self.downdelay = module.params['downdelay'] - self.updelay = module.params['updelay'] - self.arp_interval = module.params['arp_interval'] - self.arp_ip_target = module.params['arp_ip_target'] - self.slavepriority = module.params['slavepriority'] - self.forwarddelay = module.params['forwarddelay'] - self.hellotime = module.params['hellotime'] - self.maxage = module.params['maxage'] - self.ageingtime = module.params['ageingtime'] - self.hairpin = module.params['hairpin'] - self.path_cost = module.params['path_cost'] - self.mac = module.params['mac'] - self.runner = module.params['runner'] - self.runner_hwaddr_policy = module.params['runner_hwaddr_policy'] - self.vlanid = module.params['vlanid'] - self.vlandev = module.params['vlandev'] - self.flags = module.params['flags'] - self.ingress = module.params['ingress'] - self.egress = module.params['egress'] - self.vxlan_id = module.params['vxlan_id'] - self.vxlan_local = module.params['vxlan_local'] - self.vxlan_remote = module.params['vxlan_remote'] - self.ip_tunnel_dev = module.params['ip_tunnel_dev'] - self.ip_tunnel_local = module.params['ip_tunnel_local'] - self.ip_tunnel_remote = module.params['ip_tunnel_remote'] - self.ip_tunnel_input_key = module.params['ip_tunnel_input_key'] - self.ip_tunnel_output_key = module.params['ip_tunnel_output_key'] - self.nmcli_bin = self.module.get_bin_path('nmcli', True) - self.dhcp_client_id = module.params['dhcp_client_id'] - self.zone = module.params['zone'] - self.ssid = module.params['ssid'] - self.wifi = module.params['wifi'] - self.wifi_sec = module.params['wifi_sec'] - self.gsm = module.params['gsm'] - - if self.method4: - self.ipv4_method = self.method4 - elif self.type == 'dummy' and not self.ip4: - self.ipv4_method = 'disabled' - elif self.ip4: - self.ipv4_method = 'manual' - else: - self.ipv4_method = None - - if self.method6: - self.ipv6_method = self.method6 - elif self.type == 'dummy' and not self.ip6: - self.ipv6_method = 'disabled' - elif self.ip6: - self.ipv6_method = 'manual' - else: - self.ipv6_method = None - - self.edit_commands = [] - - def execute_command(self, cmd, use_unsafe_shell=False, data=None): - if isinstance(cmd, list): - cmd = [to_text(item) for item in cmd] - else: - cmd = to_text(cmd) - return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) - - def execute_edit_commands(self, commands, arguments): - arguments = arguments or [] - cmd = [self.nmcli_bin, 'con', 'edit'] + arguments - data = "\n".join(commands) - return self.execute_command(cmd, data=data) - - def connection_options(self, detect_change=False): - # Options common to multiple connection types. - options = { - 'connection.autoconnect': self.autoconnect, - 'connection.zone': self.zone, - } - - # IP address options. - if self.ip_conn_type and not self.master: - options.update({ - 'ipv4.addresses': self.ip4, - 'ipv4.dhcp-client-id': self.dhcp_client_id, - 'ipv4.dns': self.dns4, - 'ipv4.dns-search': self.dns4_search, - 'ipv4.ignore-auto-dns': self.dns4_ignore_auto, - 'ipv4.gateway': self.gw4, - 'ipv4.ignore-auto-routes': self.gw4_ignore_auto, - 'ipv4.routes': self.routes4, - 'ipv4.route-metric': self.route_metric4, - 'ipv4.routing-rules': self.routing_rules4, - 'ipv4.never-default': self.never_default4, - 'ipv4.method': self.ipv4_method, - 'ipv4.may-fail': self.may_fail4, - 'ipv6.addresses': self.ip6, - 'ipv6.dns': self.dns6, - 'ipv6.dns-search': self.dns6_search, - 'ipv6.ignore-auto-dns': self.dns6_ignore_auto, - 'ipv6.gateway': self.gw6, - 'ipv6.ignore-auto-routes': self.gw6_ignore_auto, - 'ipv6.method': self.ipv6_method, - }) - - # Layer 2 options. - if self.mac: - options.update({self.mac_setting: self.mac}) - - if self.mtu_conn_type: - options.update({self.mtu_setting: self.mtu}) - - # Connections that can have a master. - if self.slave_conn_type: - options.update({ - 'connection.master': self.master, - }) - - # Options specific to a connection type. - if self.type == 'bond': - options.update({ - 'arp-interval': self.arp_interval, - 'arp-ip-target': self.arp_ip_target, - 'downdelay': self.downdelay, - 'miimon': self.miimon, - 'mode': self.mode, - 'primary': self.primary, - 'updelay': self.updelay, - }) - elif self.type == 'bond-slave': - options.update({ - 'connection.slave-type': 'bond', - }) - elif self.type == 'bridge': - options.update({ - 'bridge.ageing-time': self.ageingtime, - 'bridge.forward-delay': self.forwarddelay, - 'bridge.hello-time': self.hellotime, - 'bridge.max-age': self.maxage, - 'bridge.priority': self.priority, - 'bridge.stp': self.stp, - }) - elif self.type == 'team': - options.update({ - 'team.runner': self.runner, - 'team.runner-hwaddr-policy': self.runner_hwaddr_policy, - }) - elif self.type == 'bridge-slave': - options.update({ - 'connection.slave-type': 'bridge', - 'bridge-port.path-cost': self.path_cost, - 'bridge-port.hairpin-mode': self.hairpin, - 'bridge-port.priority': self.slavepriority, - }) - elif self.type == 'team-slave': - options.update({ - 'connection.slave-type': 'team', - }) - elif self.tunnel_conn_type: - options.update({ - 'ip-tunnel.local': self.ip_tunnel_local, - 'ip-tunnel.mode': self.type, - 'ip-tunnel.parent': self.ip_tunnel_dev, - 'ip-tunnel.remote': self.ip_tunnel_remote, - }) - if self.type == 'gre': - options.update({ - 'ip-tunnel.input-key': self.ip_tunnel_input_key, - 'ip-tunnel.output-key': self.ip_tunnel_output_key - }) - elif self.type == 'vlan': - options.update({ - 'vlan.id': self.vlanid, - 'vlan.parent': self.vlandev, - }) - elif self.type == 'vxlan': - options.update({ - 'vxlan.id': self.vxlan_id, - 'vxlan.local': self.vxlan_local, - 'vxlan.remote': self.vxlan_remote, - }) - elif self.type == 'wifi': - options.update({ - '802-11-wireless.ssid': self.ssid, - 'connection.slave-type': 'bond' if self.master else None, - }) - if self.wifi: - for name, value in self.wifi.items(): - options.update({ - '802-11-wireless.%s' % name: value - }) - if self.wifi_sec: - for name, value in self.wifi_sec.items(): - options.update({ - '802-11-wireless-security.%s' % name: value - }) - elif self.type == 'gsm': - if self.gsm: - for name, value in self.gsm.items(): - options.update({ - 'gsm.%s' % name: value, - }) - # Convert settings values based on the situation. - for setting, value in options.items(): - setting_type = self.settings_type(setting) - convert_func = None - if setting_type is bool: - # Convert all bool options to yes/no. - convert_func = self.bool_to_string - if detect_change: - if setting in ('vlan.id', 'vxlan.id'): - # Convert VLAN/VXLAN IDs to text when detecting changes. - convert_func = to_text - elif setting == self.mtu_setting: - # MTU is 'auto' by default when detecting changes. - convert_func = self.mtu_to_string - elif setting_type is list: - # Convert lists to strings for nmcli create/modify commands. - convert_func = self.list_to_string - - if callable(convert_func): - options[setting] = convert_func(options[setting]) - - return options - - @property - def ip_conn_type(self): - return self.type in ( - 'bond', - 'bridge', - 'dummy', - 'ethernet', - 'generic', - 'gre', - 'infiniband', - 'ipip', - 'sit', - 'team', - 'vlan', - 'wifi', - 'gsm', - ) - - @property - def mac_setting(self): - if self.type == 'bridge': - return 'bridge.mac-address' - else: - return '802-3-ethernet.cloned-mac-address' - - @property - def mtu_conn_type(self): - return self.type in ( - 'dummy', - 'ethernet', - 'team-slave', - ) - - @property - def mtu_setting(self): - return '802-3-ethernet.mtu' - - @staticmethod - def mtu_to_string(mtu): - if not mtu: - return 'auto' - else: - return to_text(mtu) - - @property - def slave_conn_type(self): - return self.type in ( - 'bond-slave', - 'bridge-slave', - 'team-slave', - 'wifi', - ) - - @property - def tunnel_conn_type(self): - return self.type in ( - 'gre', - 'ipip', - 'sit', - ) - - @staticmethod - def bool_to_string(boolean): - if boolean: - return "yes" - else: - return "no" - - @staticmethod - def list_to_string(lst): - return ",".join(lst or [""]) - - @staticmethod - def settings_type(setting): - if setting in ('bridge.stp', - 'bridge-port.hairpin-mode', - 'connection.autoconnect', - 'ipv4.never-default', - 'ipv4.ignore-auto-dns', - 'ipv4.ignore-auto-routes', - 'ipv4.may-fail', - 'ipv6.ignore-auto-dns', - 'ipv6.ignore-auto-routes', - '802-11-wireless.hidden'): - return bool - elif setting in ('ipv4.dns', - 'ipv4.dns-search', - 'ipv4.routes', - 'ipv4.routing-rules', - 'ipv6.dns', - 'ipv6.dns-search', - '802-11-wireless-security.group', - '802-11-wireless-security.leap-password-flags', - '802-11-wireless-security.pairwise', - '802-11-wireless-security.proto', - '802-11-wireless-security.psk-flags', - '802-11-wireless-security.wep-key-flags', - '802-11-wireless.mac-address-blacklist'): - return list - return str - - def list_connection_info(self): - cmd = [self.nmcli_bin, '--fields', 'name', '--terse', 'con', 'show'] - (rc, out, err) = self.execute_command(cmd) - if rc != 0: - raise NmcliModuleError(err) - return out.splitlines() - - def connection_exists(self): - return self.conn_name in self.list_connection_info() - - def down_connection(self): - cmd = [self.nmcli_bin, 'con', 'down', self.conn_name] - return self.execute_command(cmd) - - def up_connection(self): - cmd = [self.nmcli_bin, 'con', 'up', self.conn_name] - return self.execute_command(cmd) - - def connection_update(self, nmcli_command): - if nmcli_command == 'create': - cmd = [self.nmcli_bin, 'con', 'add', 'type'] - if self.tunnel_conn_type: - cmd.append('ip-tunnel') - else: - cmd.append(self.type) - cmd.append('con-name') - elif nmcli_command == 'modify': - cmd = [self.nmcli_bin, 'con', 'modify'] - else: - self.module.fail_json(msg="Invalid nmcli command.") - cmd.append(self.conn_name) - - # Use connection name as default for interface name on creation. - if nmcli_command == 'create' and self.ifname is None: - ifname = self.conn_name - else: - ifname = self.ifname - - options = { - 'connection.interface-name': ifname, - } - - options.update(self.connection_options()) - - # Constructing the command. - for key, value in options.items(): - if value is not None: - if key in self.SECRET_OPTIONS: - self.edit_commands += ['set %s %s' % (key, value)] - continue - cmd.extend([key, value]) - - return self.execute_command(cmd) - - def create_connection(self): - status = self.connection_update('create') - if status[0] == 0 and self.edit_commands: - status = self.edit_connection() - if self.create_connection_up: - status = self.up_connection() - return status - - @property - def create_connection_up(self): - if self.type in ('bond', 'dummy', 'ethernet', 'infiniband', 'wifi'): - if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): - return True - elif self.type == 'team': - if (self.dns4 is not None) or (self.dns6 is not None): - return True - return False - - def remove_connection(self): - # self.down_connection() - cmd = [self.nmcli_bin, 'con', 'del', self.conn_name] - return self.execute_command(cmd) - - def modify_connection(self): - status = self.connection_update('modify') - if status[0] == 0 and self.edit_commands: - status = self.edit_connection() - return status - - def edit_connection(self): - commands = self.edit_commands + ['save', 'quit'] - return self.execute_edit_commands(commands, arguments=[self.conn_name]) - - def show_connection(self): - cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name] - - (rc, out, err) = self.execute_command(cmd) - - if rc != 0: - raise NmcliModuleError(err) - - p_enum_value = re.compile(r'^([-]?\d+) \((\w+)\)$') - - conn_info = dict() - for line in out.splitlines(): - pair = line.split(':', 1) - key = pair[0].strip() - key_type = self.settings_type(key) - if key and len(pair) > 1: - raw_value = pair[1].lstrip() - if raw_value == '--': - conn_info[key] = None - elif key == 'bond.options': - # Aliases such as 'miimon', 'downdelay' are equivalent to the +bond.options 'option=value' syntax. - opts = raw_value.split(',') - for opt in opts: - alias_pair = opt.split('=', 1) - if len(alias_pair) > 1: - alias_key = alias_pair[0] - alias_value = alias_pair[1] - conn_info[alias_key] = alias_value - elif key == 'ipv4.routes': - conn_info[key] = [s.strip() for s in raw_value.split(';')] - elif key_type == list: - conn_info[key] = [s.strip() for s in raw_value.split(',')] - else: - m_enum = p_enum_value.match(raw_value) - if m_enum is not None: - value = m_enum.group(1) - else: - value = raw_value - conn_info[key] = value - - return conn_info - - def get_supported_properties(self, setting): - properties = [] - - if setting == '802-11-wireless-security': - set_property = 'psk' - set_value = 'FAKEVALUE' - commands = ['set %s.%s %s' % (setting, set_property, set_value)] - else: - commands = [] - - commands += ['print %s' % setting, 'quit', 'yes'] - - (rc, out, err) = self.execute_edit_commands(commands, arguments=['type', self.type]) - - if rc != 0: - raise NmcliModuleError(err) - - for line in out.splitlines(): - prefix = '%s.' % setting - if (line.startswith(prefix)): - pair = line.split(':', 1) - property = pair[0].strip().replace(prefix, '') - properties.append(property) - - return properties - - def check_for_unsupported_properties(self, setting): - if setting == '802-11-wireless': - setting_key = 'wifi' - elif setting == '802-11-wireless-security': - setting_key = 'wifi_sec' - else: - setting_key = setting - - supported_properties = self.get_supported_properties(setting) - unsupported_properties = [] - - for property, value in getattr(self, setting_key).items(): - if property not in supported_properties: - unsupported_properties.append(property) - - if unsupported_properties: - msg_options = [] - for property in unsupported_properties: - msg_options.append('%s.%s' % (setting_key, property)) - - msg = 'Invalid or unsupported option(s): "%s"' % '", "'.join(msg_options) - if self.ignore_unsupported_suboptions: - self.module.warn(msg) - else: - self.module.fail_json(msg=msg) - - return unsupported_properties - - def _compare_conn_params(self, conn_info, options): - changed = False - diff_before = dict() - diff_after = dict() - - for key, value in options.items(): - if not value: - continue - - if key in conn_info: - current_value = conn_info[key] - if key == 'ipv4.routes' and current_value is not None: - # ipv4.routes do not have same options and show_connection() format - # options: ['10.11.0.0/24 10.10.0.2', '10.12.0.0/24 10.10.0.2 200'] - # show_connection(): ['{ ip = 10.11.0.0/24, nh = 10.10.0.2 }', '{ ip = 10.12.0.0/24, nh = 10.10.0.2, mt = 200 }'] - # Need to convert in order to compare both - current_value = [re.sub(r'^{\s*ip\s*=\s*([^, ]+),\s*nh\s*=\s*([^} ]+),\s*mt\s*=\s*([^} ]+)\s*}', r'\1 \2 \3', - route) for route in current_value] - current_value = [re.sub(r'^{\s*ip\s*=\s*([^, ]+),\s*nh\s*=\s*([^} ]+)\s*}', r'\1 \2', route) for route in current_value] - if key == self.mac_setting: - # MAC addresses are case insensitive, nmcli always reports them in uppercase - value = value.upper() - # ensure current_value is also converted to uppercase in case nmcli changes behaviour - current_value = current_value.upper() - if key == 'gsm.apn': - # Depending on version nmcli adds double-qoutes to gsm.apn - # Need to strip them in order to compare both - current_value = current_value.strip('"') - else: - # parameter does not exist - current_value = None - - if isinstance(current_value, list) and isinstance(value, list): - # compare values between two lists - if sorted(current_value) != sorted(value): - changed = True - else: - if current_value != to_text(value): - changed = True - - diff_before[key] = current_value - diff_after[key] = value - - diff = { - 'before': diff_before, - 'after': diff_after, - } - return (changed, diff) - - def is_connection_changed(self): - options = { - 'connection.interface-name': self.ifname, - } - options.update(self.connection_options(detect_change=True)) - return self._compare_conn_params(self.show_connection(), options) - - -def main(): - # Parsing argument file - module = AnsibleModule( - argument_spec=dict( - ignore_unsupported_suboptions=dict(type='bool', default=False), - autoconnect=dict(type='bool', default=True), - state=dict(type='str', required=True, choices=['absent', 'present']), - conn_name=dict(type='str', required=True), - master=dict(type='str'), - ifname=dict(type='str'), - type=dict(type='str', - choices=[ - 'bond', - 'bond-slave', - 'bridge', - 'bridge-slave', - 'dummy', - 'ethernet', - 'generic', - 'gre', - 'infiniband', - 'ipip', - 'sit', - 'team', - 'team-slave', - 'vlan', - 'vxlan', - 'wifi', - 'gsm', - ]), - ip4=dict(type='str'), - gw4=dict(type='str'), - gw4_ignore_auto=dict(type='bool', default=False), - routes4=dict(type='list', elements='str'), - route_metric4=dict(type='int'), - routing_rules4=dict(type='list', elements='str'), - never_default4=dict(type='bool', default=False), - dns4=dict(type='list', elements='str'), - dns4_search=dict(type='list', elements='str'), - dns4_ignore_auto=dict(type='bool', default=False), - method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']), - may_fail4=dict(type='bool', default=True), - dhcp_client_id=dict(type='str'), - ip6=dict(type='str'), - gw6=dict(type='str'), - gw6_ignore_auto=dict(type='bool', default=False), - dns6=dict(type='list', elements='str'), - dns6_search=dict(type='list', elements='str'), - dns6_ignore_auto=dict(type='bool', default=False), - method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared', 'disabled']), - # Bond Specific vars - mode=dict(type='str', default='balance-rr', - choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']), - miimon=dict(type='int'), - downdelay=dict(type='int'), - updelay=dict(type='int'), - arp_interval=dict(type='int'), - arp_ip_target=dict(type='str'), - primary=dict(type='str'), - # general usage - mtu=dict(type='int'), - mac=dict(type='str'), - zone=dict(type='str'), - # bridge specific vars - stp=dict(type='bool', default=True), - priority=dict(type='int', default=128), - slavepriority=dict(type='int', default=32), - forwarddelay=dict(type='int', default=15), - hellotime=dict(type='int', default=2), - maxage=dict(type='int', default=20), - ageingtime=dict(type='int', default=300), - hairpin=dict(type='bool', default=True), - path_cost=dict(type='int', default=100), - # team specific vars - runner=dict(type='str', default='roundrobin', - choices=['broadcast', 'roundrobin', 'activebackup', 'loadbalance', 'lacp']), - # team active-backup runner specific options - runner_hwaddr_policy=dict(type='str', choices=['same_all', 'by_active', 'only_active']), - # vlan specific vars - vlanid=dict(type='int'), - vlandev=dict(type='str'), - flags=dict(type='str'), - ingress=dict(type='str'), - egress=dict(type='str'), - # vxlan specific vars - vxlan_id=dict(type='int'), - vxlan_local=dict(type='str'), - vxlan_remote=dict(type='str'), - # ip-tunnel specific vars - ip_tunnel_dev=dict(type='str'), - ip_tunnel_local=dict(type='str'), - ip_tunnel_remote=dict(type='str'), - # ip-tunnel type gre specific vars - ip_tunnel_input_key=dict(type='str', no_log=True), - ip_tunnel_output_key=dict(type='str', no_log=True), - # 802-11-wireless* specific vars - ssid=dict(type='str'), - wifi=dict(type='dict'), - wifi_sec=dict(type='dict', no_log=True), - gsm=dict(type='dict'), - ), - mutually_exclusive=[['never_default4', 'gw4']], - required_if=[("type", "wifi", [("ssid")])], - supports_check_mode=True, - ) - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') - - nmcli = Nmcli(module) - - (rc, out, err) = (None, '', '') - result = {'conn_name': nmcli.conn_name, 'state': nmcli.state} - - # check for issues - if nmcli.conn_name is None: - nmcli.module.fail_json(msg="Please specify a name for the connection") - # team checks - if nmcli.type == "team": - if nmcli.runner_hwaddr_policy and not nmcli.runner == "activebackup": - nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup") - # team-slave checks - if nmcli.type == 'team-slave': - if nmcli.master is None: - nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type) - if nmcli.ifname is None: - nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type) - if nmcli.type == 'wifi': - unsupported_properties = {} - if nmcli.wifi: - if 'ssid' in nmcli.wifi: - module.warn("Ignoring option 'wifi.ssid', it must be specified with option 'ssid'") - del nmcli.wifi['ssid'] - unsupported_properties['wifi'] = nmcli.check_for_unsupported_properties('802-11-wireless') - if nmcli.wifi_sec: - unsupported_properties['wifi_sec'] = nmcli.check_for_unsupported_properties('802-11-wireless-security') - if nmcli.ignore_unsupported_suboptions and unsupported_properties: - for setting_key, properties in unsupported_properties.items(): - for property in properties: - del getattr(nmcli, setting_key)[property] - - try: - if nmcli.state == 'absent': - if nmcli.connection_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = nmcli.down_connection() - (rc, out, err) = nmcli.remove_connection() - if rc != 0: - module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc) - - elif nmcli.state == 'present': - if nmcli.connection_exists(): - changed, diff = nmcli.is_connection_changed() - if module._diff: - result['diff'] = diff - - if changed: - # modify connection (note: this function is check mode aware) - # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type)) - result['Exists'] = 'Connections do exist so we are modifying them' - if module.check_mode: - module.exit_json(changed=True, **result) - (rc, out, err) = nmcli.modify_connection() - else: - result['Exists'] = 'Connections already exist and no changes made' - if module.check_mode: - module.exit_json(changed=False, **result) - if not nmcli.connection_exists(): - result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type)) - if module.check_mode: - module.exit_json(changed=True, **result) - (rc, out, err) = nmcli.create_connection() - if rc is not None and rc != 0: - module.fail_json(name=nmcli.conn_name, msg=err, rc=rc) - except NmcliModuleError as e: - module.fail_json(name=nmcli.conn_name, msg=str(e)) - - if rc is None: - result['changed'] = False - else: - result['changed'] = True - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/nsupdate.py b/plugins/modules/net_tools/nsupdate.py deleted file mode 100644 index fc0d5e1c46..0000000000 --- a/plugins/modules/net_tools/nsupdate.py +++ /dev/null @@ -1,483 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Marcin Skarbek -# (c) 2016, Andreas Olsson -# (c) 2017, Loic Blot -# -# This module was ported from https://github.com/mskarbek/ansible-nsupdate -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: nsupdate - -short_description: Manage DNS records. -description: - - Create, update and remove DNS records using DDNS updates -requirements: - - dnspython -author: "Loic Blot (@nerzhul)" -options: - state: - description: - - Manage DNS record. - choices: ['present', 'absent'] - default: 'present' - type: str - server: - description: - - Apply DNS modification on this server, specified by IPv4 or IPv6 address. - required: true - type: str - port: - description: - - Use this TCP port when connecting to C(server). - default: 53 - type: int - key_name: - description: - - Use TSIG key name to authenticate against DNS C(server) - type: str - key_secret: - description: - - Use TSIG key secret, associated with C(key_name), to authenticate against C(server) - type: str - key_algorithm: - description: - - Specify key algorithm used by C(key_secret). - choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384', - 'hmac-sha512'] - default: 'hmac-md5' - type: str - zone: - description: - - DNS record will be modified on this C(zone). - - When omitted DNS will be queried to attempt finding the correct zone. - - Starting with Ansible 2.7 this parameter is optional. - type: str - record: - description: - - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot). - required: true - type: str - type: - description: - - Sets the record type. - default: 'A' - type: str - ttl: - description: - - Sets the record TTL. - default: 3600 - type: int - value: - description: - - Sets the record value. - type: list - elements: str - protocol: - description: - - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option. - default: 'tcp' - choices: ['tcp', 'udp'] - type: str -''' - -EXAMPLES = ''' -- name: Add or modify ansible.example.org A to 192.168.1.1" - community.general.nsupdate: - key_name: "nsupdate" - key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" - server: "10.1.1.1" - zone: "example.org" - record: "ansible" - value: "192.168.1.1" - -- name: Add or modify ansible.example.org A to 192.168.1.1, 192.168.1.2 and 192.168.1.3" - community.general.nsupdate: - key_name: "nsupdate" - key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" - server: "10.1.1.1" - zone: "example.org" - record: "ansible" - value: ["192.168.1.1", "192.168.1.2", "192.168.1.3"] - -- name: Remove puppet.example.org CNAME - community.general.nsupdate: - key_name: "nsupdate" - key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" - server: "10.1.1.1" - zone: "example.org" - record: "puppet" - type: "CNAME" - state: absent - -- name: Add 1.1.168.192.in-addr.arpa. PTR for ansible.example.org - community.general.nsupdate: - key_name: "nsupdate" - key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" - server: "10.1.1.1" - record: "1.1.168.192.in-addr.arpa." - type: "PTR" - value: "ansible.example.org." - state: present - -- name: Remove 1.1.168.192.in-addr.arpa. PTR - community.general.nsupdate: - key_name: "nsupdate" - key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" - server: "10.1.1.1" - record: "1.1.168.192.in-addr.arpa." - type: "PTR" - state: absent -''' - -RETURN = ''' -changed: - description: If module has modified record - returned: success - type: str -record: - description: DNS record - returned: success - type: str - sample: 'ansible' -ttl: - description: DNS record TTL - returned: success - type: int - sample: 86400 -type: - description: DNS record type - returned: success - type: str - sample: 'CNAME' -value: - description: DNS record value(s) - returned: success - type: list - sample: '192.168.1.1' -zone: - description: DNS record zone - returned: success - type: str - sample: 'example.org.' -dns_rc: - description: dnspython return code - returned: always - type: int - sample: 4 -dns_rc_str: - description: dnspython return code (string representation) - returned: always - type: str - sample: 'REFUSED' -''' - -import traceback - -from binascii import Error as binascii_error -from socket import error as socket_error - -DNSPYTHON_IMP_ERR = None -try: - import dns.update - import dns.query - import dns.tsigkeyring - import dns.message - import dns.resolver - - HAVE_DNSPYTHON = True -except ImportError: - DNSPYTHON_IMP_ERR = traceback.format_exc() - HAVE_DNSPYTHON = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -class RecordManager(object): - def __init__(self, module): - self.module = module - - if module.params['key_name']: - try: - self.keyring = dns.tsigkeyring.from_text({ - module.params['key_name']: module.params['key_secret'] - }) - except TypeError: - module.fail_json(msg='Missing key_secret') - except binascii_error as e: - module.fail_json(msg='TSIG key error: %s' % to_native(e)) - else: - self.keyring = None - - if module.params['key_algorithm'] == 'hmac-md5': - self.algorithm = 'HMAC-MD5.SIG-ALG.REG.INT' - else: - self.algorithm = module.params['key_algorithm'] - - if module.params['zone'] is None: - if module.params['record'][-1] != '.': - self.module.fail_json(msg='record must be absolute when omitting zone parameter') - self.zone = self.lookup_zone() - else: - self.zone = module.params['zone'] - - if self.zone[-1] != '.': - self.zone += '.' - - if module.params['record'][-1] != '.': - self.fqdn = module.params['record'] + '.' + self.zone - else: - self.fqdn = module.params['record'] - - if self.module.params['type'].lower() == 'txt' and self.module.params['value'] is not None: - self.value = list(map(self.txt_helper, self.module.params['value'])) - else: - self.value = self.module.params['value'] - - self.dns_rc = 0 - - def txt_helper(self, entry): - if entry[0] == '"' and entry[-1] == '"': - return entry - return '"{text}"'.format(text=entry) - - def lookup_zone(self): - name = dns.name.from_text(self.module.params['record']) - while True: - query = dns.message.make_query(name, dns.rdatatype.SOA) - if self.keyring: - query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) - try: - if self.module.params['protocol'] == 'tcp': - lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) - else: - lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) - except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: - self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) - except (socket_error, dns.exception.Timeout) as e: - self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) - if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]: - self.module.fail_json(msg='Zone lookup failure: \'%s\' will not respond to queries regarding \'%s\'.' % ( - self.module.params['server'], self.module.params['record'])) - try: - zone = lookup.authority[0].name - if zone == name: - return zone.to_text() - except IndexError: - pass - try: - name = name.parent() - except dns.name.NoParent: - self.module.fail_json(msg='Zone lookup of \'%s\' failed for unknown reason.' % (self.module.params['record'])) - - def __do_update(self, update): - response = None - try: - if self.module.params['protocol'] == 'tcp': - response = dns.query.tcp(update, self.module.params['server'], timeout=10, port=self.module.params['port']) - else: - response = dns.query.udp(update, self.module.params['server'], timeout=10, port=self.module.params['port']) - except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: - self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) - except (socket_error, dns.exception.Timeout) as e: - self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) - return response - - def create_or_update_record(self): - result = {'changed': False, 'failed': False} - - exists = self.record_exists() - if exists in [0, 2]: - if self.module.check_mode: - self.module.exit_json(changed=True) - - if exists == 0: - self.dns_rc = self.create_record() - if self.dns_rc != 0: - result['msg'] = "Failed to create DNS record (rc: %d)" % self.dns_rc - - elif exists == 2: - self.dns_rc = self.modify_record() - if self.dns_rc != 0: - result['msg'] = "Failed to update DNS record (rc: %d)" % self.dns_rc - - if self.dns_rc != 0: - result['failed'] = True - else: - result['changed'] = True - - else: - result['changed'] = False - - return result - - def create_record(self): - update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) - for entry in self.value: - try: - update.add(self.module.params['record'], - self.module.params['ttl'], - self.module.params['type'], - entry) - except AttributeError: - self.module.fail_json(msg='value needed when state=present') - except dns.exception.SyntaxError: - self.module.fail_json(msg='Invalid/malformed value') - - response = self.__do_update(update) - return dns.message.Message.rcode(response) - - def modify_record(self): - update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) - update.delete(self.module.params['record'], self.module.params['type']) - for entry in self.value: - try: - update.add(self.module.params['record'], - self.module.params['ttl'], - self.module.params['type'], - entry) - except AttributeError: - self.module.fail_json(msg='value needed when state=present') - except dns.exception.SyntaxError: - self.module.fail_json(msg='Invalid/malformed value') - response = self.__do_update(update) - - return dns.message.Message.rcode(response) - - def remove_record(self): - result = {'changed': False, 'failed': False} - - if self.record_exists() == 0: - return result - - # Check mode and record exists, declared fake change. - if self.module.check_mode: - self.module.exit_json(changed=True) - - update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) - update.delete(self.module.params['record'], self.module.params['type']) - - response = self.__do_update(update) - self.dns_rc = dns.message.Message.rcode(response) - - if self.dns_rc != 0: - result['failed'] = True - result['msg'] = "Failed to delete record (rc: %d)" % self.dns_rc - else: - result['changed'] = True - - return result - - def record_exists(self): - update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) - try: - update.present(self.module.params['record'], self.module.params['type']) - except dns.rdatatype.UnknownRdatatype as e: - self.module.fail_json(msg='Record error: {0}'.format(to_native(e))) - - response = self.__do_update(update) - self.dns_rc = dns.message.Message.rcode(response) - if self.dns_rc == 0: - if self.module.params['state'] == 'absent': - return 1 - for entry in self.value: - try: - update.present(self.module.params['record'], self.module.params['type'], entry) - except AttributeError: - self.module.fail_json(msg='value needed when state=present') - except dns.exception.SyntaxError: - self.module.fail_json(msg='Invalid/malformed value') - response = self.__do_update(update) - self.dns_rc = dns.message.Message.rcode(response) - if self.dns_rc == 0: - if self.ttl_changed(): - return 2 - else: - return 1 - else: - return 2 - else: - return 0 - - def ttl_changed(self): - query = dns.message.make_query(self.fqdn, self.module.params['type']) - if self.keyring: - query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) - - try: - if self.module.params['protocol'] == 'tcp': - lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) - else: - lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) - except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: - self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) - except (socket_error, dns.exception.Timeout) as e: - self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) - - if lookup.rcode() != dns.rcode.NOERROR: - self.module.fail_json(msg='Failed to lookup TTL of existing matching record.') - - current_ttl = lookup.answer[0].ttl - return current_ttl != self.module.params['ttl'] - - -def main(): - tsig_algs = ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', - 'hmac-sha256', 'hmac-sha384', 'hmac-sha512'] - - module = AnsibleModule( - argument_spec=dict( - state=dict(required=False, default='present', choices=['present', 'absent'], type='str'), - server=dict(required=True, type='str'), - port=dict(required=False, default=53, type='int'), - key_name=dict(required=False, type='str'), - key_secret=dict(required=False, type='str', no_log=True), - key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'), - zone=dict(required=False, default=None, type='str'), - record=dict(required=True, type='str'), - type=dict(required=False, default='A', type='str'), - ttl=dict(required=False, default=3600, type='int'), - value=dict(required=False, default=None, type='list', elements='str'), - protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str') - ), - supports_check_mode=True - ) - - if not HAVE_DNSPYTHON: - module.fail_json(msg=missing_required_lib('dnspython'), exception=DNSPYTHON_IMP_ERR) - - if len(module.params["record"]) == 0: - module.fail_json(msg='record cannot be empty.') - - record = RecordManager(module) - result = {} - if module.params["state"] == 'absent': - result = record.remove_record() - elif module.params["state"] == 'present': - result = record.create_or_update_record() - - result['dns_rc'] = record.dns_rc - result['dns_rc_str'] = dns.rcode.to_text(record.dns_rc) - if result['failed']: - module.fail_json(**result) - else: - result['record'] = dict(zone=record.zone, - record=module.params['record'], - type=module.params['type'], - ttl=module.params['ttl'], - value=record.value) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/omapi_host.py b/plugins/modules/net_tools/omapi_host.py deleted file mode 100644 index 4d65fcb95d..0000000000 --- a/plugins/modules/net_tools/omapi_host.py +++ /dev/null @@ -1,311 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# copyright: (c) 2016, Loic Blot -# Sponsored by Infopro Digital. http://www.infopro-digital.com/ -# Sponsored by E.T.A.I. http://www.etai.fr/ -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: omapi_host -short_description: Setup OMAPI hosts. -description: Manage OMAPI hosts into compatible DHCPd servers -requirements: - - pypureomapi -author: -- Loic Blot (@nerzhul) -options: - state: - description: - - Create or remove OMAPI host. - type: str - required: true - choices: [ absent, present ] - hostname: - description: - - Sets the host lease hostname (mandatory if state=present). - type: str - aliases: [ name ] - host: - description: - - Sets OMAPI server host to interact with. - type: str - default: localhost - port: - description: - - Sets the OMAPI server port to interact with. - type: int - default: 7911 - key_name: - description: - - Sets the TSIG key name for authenticating against OMAPI server. - type: str - required: true - key: - description: - - Sets the TSIG key content for authenticating against OMAPI server. - type: str - required: true - macaddr: - description: - - Sets the lease host MAC address. - type: str - required: true - ip: - description: - - Sets the lease host IP address. - type: str - statements: - description: - - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon). - type: list - elements: str - default: [] - ddns: - description: - - Enable dynamic DNS updates for this host. - type: bool - default: no - -''' -EXAMPLES = r''' -- name: Add a host using OMAPI - community.general.omapi_host: - key_name: defomapi - key: +bFQtBCta6j2vWkjPkNFtgA== - host: 10.98.4.55 - macaddr: 44:dd:ab:dd:11:44 - name: server01 - ip: 192.168.88.99 - ddns: yes - statements: - - filename "pxelinux.0" - - next-server 1.1.1.1 - state: present - -- name: Remove a host using OMAPI - community.general.omapi_host: - key_name: defomapi - key: +bFQtBCta6j2vWkjPkNFtgA== - host: 10.1.1.1 - macaddr: 00:66:ab:dd:11:44 - state: absent -''' - -RETURN = r''' -lease: - description: dictionary containing host information - returned: success - type: complex - contains: - ip-address: - description: IP address, if there is. - returned: success - type: str - sample: '192.168.1.5' - hardware-address: - description: MAC address - returned: success - type: str - sample: '00:11:22:33:44:55' - hardware-type: - description: hardware type, generally '1' - returned: success - type: int - sample: 1 - name: - description: hostname - returned: success - type: str - sample: 'mydesktop' -''' - -import binascii -import socket -import struct -import traceback - -PUREOMAPI_IMP_ERR = None -try: - from pypureomapi import Omapi, OmapiMessage, OmapiError, OmapiErrorNotFound - from pypureomapi import pack_ip, unpack_ip, pack_mac, unpack_mac - from pypureomapi import OMAPI_OP_STATUS, OMAPI_OP_UPDATE - pureomapi_found = True -except ImportError: - PUREOMAPI_IMP_ERR = traceback.format_exc() - pureomapi_found = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_bytes, to_native - - -class OmapiHostManager: - def __init__(self, module): - self.module = module - self.omapi = None - self.connect() - - def connect(self): - try: - self.omapi = Omapi(self.module.params['host'], self.module.params['port'], to_bytes(self.module.params['key_name']), - self.module.params['key']) - except binascii.Error: - self.module.fail_json(msg="Unable to open OMAPI connection. 'key' is not a valid base64 key.") - except OmapiError as e: - self.module.fail_json(msg="Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' " - "are valid. Exception was: %s" % to_native(e)) - except socket.error as e: - self.module.fail_json(msg="Unable to connect to OMAPI server: %s" % to_native(e)) - - def get_host(self, macaddr): - msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict')) - msg.obj.append((to_bytes("hardware-address", errors='surrogate_or_strict'), pack_mac(macaddr))) - msg.obj.append((to_bytes("hardware-type", errors='surrogate_or_strict'), struct.pack("!I", 1))) - response = self.omapi.query_server(msg) - if response.opcode != OMAPI_OP_UPDATE: - return None - return response - - @staticmethod - def unpack_facts(obj): - result = dict(obj) - if 'hardware-address' in result: - result['hardware-address'] = to_native(unpack_mac(result[to_bytes('hardware-address')])) - - if 'ip-address' in result: - result['ip-address'] = to_native(unpack_ip(result[to_bytes('ip-address')])) - - if 'hardware-type' in result: - result['hardware-type'] = struct.unpack("!I", result[to_bytes('hardware-type')]) - - return result - - def setup_host(self): - if self.module.params['hostname'] is None or len(self.module.params['hostname']) == 0: - self.module.fail_json(msg="name attribute could not be empty when adding or modifying host.") - - msg = None - host_response = self.get_host(self.module.params['macaddr']) - # If host was not found using macaddr, add create message - if host_response is None: - msg = OmapiMessage.open(to_bytes('host', errors='surrogate_or_strict')) - msg.message.append((to_bytes('create'), struct.pack('!I', 1))) - msg.message.append((to_bytes('exclusive'), struct.pack('!I', 1))) - msg.obj.append((to_bytes('hardware-address'), pack_mac(self.module.params['macaddr']))) - msg.obj.append((to_bytes('hardware-type'), struct.pack('!I', 1))) - msg.obj.append((to_bytes('name'), to_bytes(self.module.params['hostname']))) - if self.module.params['ip'] is not None: - msg.obj.append((to_bytes("ip-address", errors='surrogate_or_strict'), pack_ip(self.module.params['ip']))) - - stmt_join = "" - if self.module.params['ddns']: - stmt_join += 'ddns-hostname "{0}"; '.format(self.module.params['hostname']) - - try: - if len(self.module.params['statements']) > 0: - stmt_join += "; ".join(self.module.params['statements']) - stmt_join += "; " - except TypeError as e: - self.module.fail_json(msg="Invalid statements found: %s" % to_native(e)) - - if len(stmt_join) > 0: - msg.obj.append((to_bytes('statements'), to_bytes(stmt_join))) - - try: - response = self.omapi.query_server(msg) - if response.opcode != OMAPI_OP_UPDATE: - self.module.fail_json(msg="Failed to add host, ensure authentication and host parameters " - "are valid.") - self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj)) - except OmapiError as e: - self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) - # Forge update message - else: - response_obj = self.unpack_facts(host_response.obj) - fields_to_update = {} - - if to_bytes('ip-address', errors='surrogate_or_strict') not in response_obj or \ - unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']: - fields_to_update['ip-address'] = pack_ip(self.module.params['ip']) - - # Name cannot be changed - if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']: - self.module.fail_json(msg="Changing hostname is not supported. Old was %s, new is %s. " - "Please delete host and add new." % - (response_obj['name'], self.module.params['hostname'])) - - """ - # It seems statements are not returned by OMAPI, then we cannot modify them at this moment. - if 'statements' not in response_obj and len(self.module.params['statements']) > 0 or \ - response_obj['statements'] != self.module.params['statements']: - with open('/tmp/omapi', 'w') as fb: - for (k,v) in iteritems(response_obj): - fb.writelines('statements: %s %s\n' % (k, v)) - """ - if len(fields_to_update) == 0: - self.module.exit_json(changed=False, lease=response_obj) - else: - msg = OmapiMessage.update(host_response.handle) - msg.update_object(fields_to_update) - - try: - response = self.omapi.query_server(msg) - if response.opcode != OMAPI_OP_STATUS: - self.module.fail_json(msg="Failed to modify host, ensure authentication and host parameters " - "are valid.") - self.module.exit_json(changed=True) - except OmapiError as e: - self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) - - def remove_host(self): - try: - self.omapi.del_host(self.module.params['macaddr']) - self.module.exit_json(changed=True) - except OmapiErrorNotFound: - self.module.exit_json() - except OmapiError as e: - self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', required=True, choices=['absent', 'present']), - host=dict(type='str', default="localhost"), - port=dict(type='int', default=7911), - key_name=dict(type='str', required=True), - key=dict(type='str', required=True, no_log=True), - macaddr=dict(type='str', required=True), - hostname=dict(type='str', aliases=['name']), - ip=dict(type='str'), - ddns=dict(type='bool', default=False), - statements=dict(type='list', elements='str', default=[]), - ), - supports_check_mode=False, - ) - - if not pureomapi_found: - module.fail_json(msg=missing_required_lib('pypureomapi'), exception=PUREOMAPI_IMP_ERR) - - if module.params['key'] is None or len(module.params["key"]) == 0: - module.fail_json(msg="'key' parameter cannot be empty.") - - if module.params['key_name'] is None or len(module.params["key_name"]) == 0: - module.fail_json(msg="'key_name' parameter cannot be empty.") - - host_manager = OmapiHostManager(module) - try: - if module.params['state'] == 'present': - host_manager.setup_host() - elif module.params['state'] == 'absent': - host_manager.remove_host() - except ValueError as e: - module.fail_json(msg="OMAPI input value error: %s" % to_native(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/pritunl/pritunl_org.py b/plugins/modules/net_tools/pritunl/pritunl_org.py deleted file mode 100644 index 35796ae361..0000000000 --- a/plugins/modules/net_tools/pritunl/pritunl_org.py +++ /dev/null @@ -1,199 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Florian Dambrine -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = """ ---- -module: pritunl_org -author: Florian Dambrine (@Lowess) -version_added: 2.5.0 -short_description: Manages Pritunl Organizations using the Pritunl API -description: - - A module to manage Pritunl organizations using the Pritunl API. -extends_documentation_fragment: - - community.general.pritunl -options: - name: - type: str - required: true - aliases: - - org - description: - - The name of the organization to manage in Pritunl. - - force: - type: bool - default: false - description: - - If I(force) is C(true) and I(state) is C(absent), the module - will delete the organization, no matter if it contains users - or not. By default I(force) is C(false), which will cause the - module to fail the deletion of the organization when it contains - users. - - state: - type: str - default: 'present' - choices: - - present - - absent - description: - - If C(present), the module adds organization I(name) to - Pritunl. If C(absent), attempt to delete the organization - from Pritunl (please read about I(force) usage). -""" - -EXAMPLES = """ -- name: Ensure the organization named MyOrg exists - community.general.pritunl_org: - state: present - name: MyOrg - -- name: Ensure the organization named MyOrg does not exist - community.general.pritunl_org: - state: absent - name: MyOrg -""" - -RETURN = """ -response: - description: JSON representation of a Pritunl Organization. - returned: success - type: dict - sample: - { - "auth_api": False, - "name": "Foo", - "auth_token": None, - "user_count": 0, - "auth_secret": None, - "id": "csftwlu6uhralzi2dpmhekz3", - } -""" - - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.common.dict_transformations import dict_merge -from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( - PritunlException, - delete_pritunl_organization, - post_pritunl_organization, - list_pritunl_organizations, - get_pritunl_settings, - pritunl_argument_spec, -) - - -def add_pritunl_organization(module): - result = {} - - org_name = module.params.get("name") - - org_obj_list = list_pritunl_organizations( - **dict_merge( - get_pritunl_settings(module), - {"filters": {"name": org_name}}, - ) - ) - - # If the organization already exists - if len(org_obj_list) > 0: - result["changed"] = False - result["response"] = org_obj_list[0] - else: - # Otherwise create it - response = post_pritunl_organization( - **dict_merge( - get_pritunl_settings(module), - {"organization_name": org_name}, - ) - ) - result["changed"] = True - result["response"] = response - - module.exit_json(**result) - - -def remove_pritunl_organization(module): - result = {} - - org_name = module.params.get("name") - force = module.params.get("force") - - org_obj_list = [] - - org_obj_list = list_pritunl_organizations( - **dict_merge( - get_pritunl_settings(module), - { - "filters": {"name": org_name}, - }, - ) - ) - - # No organization found - if len(org_obj_list) == 0: - result["changed"] = False - result["response"] = {} - - else: - # Otherwise attempt to delete it - org = org_obj_list[0] - - # Only accept deletion under specific conditions - if force or org["user_count"] == 0: - response = delete_pritunl_organization( - **dict_merge( - get_pritunl_settings(module), - {"organization_id": org["id"]}, - ) - ) - result["changed"] = True - result["response"] = response - else: - module.fail_json( - msg=( - "Can not remove organization '%s' with %d attached users. " - "Either set 'force' option to true or remove active users " - "from the organization" - ) - % (org_name, org["user_count"]) - ) - - module.exit_json(**result) - - -def main(): - argument_spec = pritunl_argument_spec() - - argument_spec.update( - dict( - name=dict(required=True, type="str", aliases=["org"]), - force=dict(required=False, type="bool", default=False), - state=dict( - required=False, choices=["present", "absent"], default="present" - ), - ) - ), - - module = AnsibleModule(argument_spec=argument_spec) - - state = module.params.get("state") - - try: - if state == "present": - add_pritunl_organization(module) - elif state == "absent": - remove_pritunl_organization(module) - except PritunlException as e: - module.fail_json(msg=to_native(e)) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/net_tools/pritunl/pritunl_org_info.py b/plugins/modules/net_tools/pritunl/pritunl_org_info.py deleted file mode 100644 index a7e65c80d1..0000000000 --- a/plugins/modules/net_tools/pritunl/pritunl_org_info.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Florian Dambrine -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = """ ---- -module: pritunl_org_info -author: Florian Dambrine (@Lowess) -version_added: 2.5.0 -short_description: List Pritunl Organizations using the Pritunl API -description: - - A module to list Pritunl organizations using the Pritunl API. -extends_documentation_fragment: - - community.general.pritunl -options: - organization: - type: str - required: false - aliases: - - org - default: null - description: - - Name of the Pritunl organization to search for. - If none provided, the module will return all Pritunl - organizations. -""" - -EXAMPLES = """ -- name: List all existing Pritunl organizations - community.general.pritunl_org_info: - -- name: Search for an organization named MyOrg - community.general.pritunl_user_info: - organization: MyOrg -""" - -RETURN = """ -organizations: - description: List of Pritunl organizations. - returned: success - type: list - elements: dict - sample: - [ - { - "auth_api": False, - "name": "FooOrg", - "auth_token": None, - "user_count": 0, - "auth_secret": None, - "id": "csftwlu6uhralzi2dpmhekz3", - }, - { - "auth_api": False, - "name": "MyOrg", - "auth_token": None, - "user_count": 3, - "auth_secret": None, - "id": "58070daee63f3b2e6e472c36", - }, - { - "auth_api": False, - "name": "BarOrg", - "auth_token": None, - "user_count": 0, - "auth_secret": None, - "id": "v1sncsxxybnsylc8gpqg85pg", - } - ] -""" - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.common.dict_transformations import dict_merge -from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( - PritunlException, - get_pritunl_settings, - list_pritunl_organizations, - pritunl_argument_spec, -) - - -def get_pritunl_organizations(module): - org_name = module.params.get("organization") - - organizations = [] - - organizations = list_pritunl_organizations( - **dict_merge( - get_pritunl_settings(module), - {"filters": {"name": org_name} if org_name else None}, - ) - ) - - if org_name and len(organizations) == 0: - # When an org_name is provided but no organization match return an error - module.fail_json(msg="Organization '%s' does not exist" % org_name) - - result = {} - result["changed"] = False - result["organizations"] = organizations - - module.exit_json(**result) - - -def main(): - argument_spec = pritunl_argument_spec() - - argument_spec.update( - dict( - organization=dict(required=False, type="str", default=None, aliases=["org"]) - ) - ), - - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) - - try: - get_pritunl_organizations(module) - except PritunlException as e: - module.fail_json(msg=to_native(e)) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/net_tools/pritunl/pritunl_user.py b/plugins/modules/net_tools/pritunl/pritunl_user.py deleted file mode 100644 index 7ea4f18a44..0000000000 --- a/plugins/modules/net_tools/pritunl/pritunl_user.py +++ /dev/null @@ -1,343 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Florian Dambrine -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = """ ---- -module: pritunl_user -author: "Florian Dambrine (@Lowess)" -version_added: 2.3.0 -short_description: Manage Pritunl Users using the Pritunl API -description: - - A module to manage Pritunl users using the Pritunl API. -extends_documentation_fragment: - - community.general.pritunl -options: - organization: - type: str - required: true - aliases: - - org - description: - - The name of the organization the user is part of. - - state: - type: str - default: 'present' - choices: - - present - - absent - description: - - If C(present), the module adds user I(user_name) to - the Pritunl I(organization). If C(absent), removes the user - I(user_name) from the Pritunl I(organization). - - user_name: - type: str - required: true - default: null - description: - - Name of the user to create or delete from Pritunl. - - user_email: - type: str - required: false - default: null - description: - - Email address associated with the user I(user_name). - - user_type: - type: str - required: false - default: client - choices: - - client - - server - description: - - Type of the user I(user_name). - - user_groups: - type: list - elements: str - required: false - default: null - description: - - List of groups associated with the user I(user_name). - - user_disabled: - type: bool - required: false - default: null - description: - - Enable/Disable the user I(user_name). - - user_gravatar: - type: bool - required: false - default: null - description: - - Enable/Disable Gravatar usage for the user I(user_name). -""" - -EXAMPLES = """ -- name: Create the user Foo with email address foo@bar.com in MyOrg - community.general.pritunl_user: - state: present - name: MyOrg - user_name: Foo - user_email: foo@bar.com - -- name: Disable the user Foo but keep it in Pritunl - community.general.pritunl_user: - state: present - name: MyOrg - user_name: Foo - user_email: foo@bar.com - user_disabled: yes - -- name: Make sure the user Foo is not part of MyOrg anymore - community.general.pritunl_user: - state: absent - name: MyOrg - user_name: Foo -""" - -RETURN = """ -response: - description: JSON representation of Pritunl Users. - returned: success - type: dict - sample: - { - "audit": false, - "auth_type": "google", - "bypass_secondary": false, - "client_to_client": false, - "disabled": false, - "dns_mapping": null, - "dns_servers": null, - "dns_suffix": null, - "email": "foo@bar.com", - "gravatar": true, - "groups": [ - "foo", "bar" - ], - "id": "5d070dafe63q3b2e6s472c3b", - "name": "foo@acme.com", - "network_links": [], - "organization": "58070daee6sf342e6e4s2c36", - "organization_name": "Acme", - "otp_auth": true, - "otp_secret": "35H5EJA3XB2$4CWG", - "pin": false, - "port_forwarding": [], - "servers": [], - } -""" - - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.common.dict_transformations import dict_merge -from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( - PritunlException, - delete_pritunl_user, - get_pritunl_settings, - list_pritunl_organizations, - list_pritunl_users, - post_pritunl_user, - pritunl_argument_spec, -) - - -def add_or_update_pritunl_user(module): - result = {} - - org_name = module.params.get("organization") - user_name = module.params.get("user_name") - - user_params = { - "name": user_name, - "email": module.params.get("user_email"), - "groups": module.params.get("user_groups"), - "disabled": module.params.get("user_disabled"), - "gravatar": module.params.get("user_gravatar"), - "type": module.params.get("user_type"), - } - - org_obj_list = list_pritunl_organizations( - **dict_merge( - get_pritunl_settings(module), - {"filters": {"name": org_name}}, - ) - ) - - if len(org_obj_list) == 0: - module.fail_json( - msg="Can not add user to organization '%s' which does not exist" % org_name - ) - - org_id = org_obj_list[0]["id"] - - # Grab existing users from this org - users = list_pritunl_users( - **dict_merge( - get_pritunl_settings(module), - { - "organization_id": org_id, - "filters": {"name": user_name}, - }, - ) - ) - - # Check if the pritunl user already exists - if len(users) > 0: - # Compare remote user params with local user_params and trigger update if needed - user_params_changed = False - for key in user_params.keys(): - # When a param is not specified grab existing ones to prevent from changing it with the PUT request - if user_params[key] is None: - user_params[key] = users[0][key] - - # 'groups' is a list comparison - if key == "groups": - if set(users[0][key]) != set(user_params[key]): - user_params_changed = True - - # otherwise it is either a boolean or a string - else: - if users[0][key] != user_params[key]: - user_params_changed = True - - # Trigger a PUT on the API to update the current user if settings have changed - if user_params_changed: - response = post_pritunl_user( - **dict_merge( - get_pritunl_settings(module), - { - "organization_id": org_id, - "user_id": users[0]["id"], - "user_data": user_params, - }, - ) - ) - - result["changed"] = True - result["response"] = response - else: - result["changed"] = False - result["response"] = users - else: - response = post_pritunl_user( - **dict_merge( - get_pritunl_settings(module), - { - "organization_id": org_id, - "user_data": user_params, - }, - ) - ) - result["changed"] = True - result["response"] = response - - module.exit_json(**result) - - -def remove_pritunl_user(module): - result = {} - - org_name = module.params.get("organization") - user_name = module.params.get("user_name") - - org_obj_list = [] - - org_obj_list = list_pritunl_organizations( - **dict_merge( - get_pritunl_settings(module), - { - "filters": {"name": org_name}, - }, - ) - ) - - if len(org_obj_list) == 0: - module.fail_json( - msg="Can not remove user '%s' from a non existing organization '%s'" - % (user_name, org_name) - ) - - org_id = org_obj_list[0]["id"] - - # Grab existing users from this org - users = list_pritunl_users( - **dict_merge( - get_pritunl_settings(module), - { - "organization_id": org_id, - "filters": {"name": user_name}, - }, - ) - ) - - # Check if the pritunl user exists, if not, do nothing - if len(users) == 0: - result["changed"] = False - result["response"] = {} - - # Otherwise remove the org from Pritunl - else: - response = delete_pritunl_user( - **dict_merge( - get_pritunl_settings(module), - { - "organization_id": org_id, - "user_id": users[0]["id"], - }, - ) - ) - result["changed"] = True - result["response"] = response - - module.exit_json(**result) - - -def main(): - argument_spec = pritunl_argument_spec() - - argument_spec.update( - dict( - organization=dict(required=True, type="str", aliases=["org"]), - state=dict( - required=False, choices=["present", "absent"], default="present" - ), - user_name=dict(required=True, type="str"), - user_type=dict( - required=False, choices=["client", "server"], default="client" - ), - user_email=dict(required=False, type="str", default=None), - user_groups=dict(required=False, type="list", elements="str", default=None), - user_disabled=dict(required=False, type="bool", default=None), - user_gravatar=dict(required=False, type="bool", default=None), - ) - ), - - module = AnsibleModule(argument_spec=argument_spec) - - state = module.params.get("state") - - try: - if state == "present": - add_or_update_pritunl_user(module) - elif state == "absent": - remove_pritunl_user(module) - except PritunlException as e: - module.fail_json(msg=to_native(e)) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/net_tools/pritunl/pritunl_user_info.py b/plugins/modules/net_tools/pritunl/pritunl_user_info.py deleted file mode 100644 index e8cf5e2955..0000000000 --- a/plugins/modules/net_tools/pritunl/pritunl_user_info.py +++ /dev/null @@ -1,171 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Florian Dambrine -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = """ ---- -module: pritunl_user_info -author: "Florian Dambrine (@Lowess)" -version_added: 2.3.0 -short_description: List Pritunl Users using the Pritunl API -description: - - A module to list Pritunl users using the Pritunl API. -extends_documentation_fragment: - - community.general.pritunl -options: - organization: - type: str - required: true - aliases: - - org - description: - - The name of the organization the user is part of. - - user_name: - type: str - required: false - description: - - Name of the user to filter on Pritunl. - - user_type: - type: str - required: false - default: client - choices: - - client - - server - description: - - Type of the user I(user_name). -""" - -EXAMPLES = """ -- name: List all existing users part of the organization MyOrg - community.general.pritunl_user_info: - state: list - organization: MyOrg - -- name: Search for the user named Florian part of the organization MyOrg - community.general.pritunl_user_info: - state: list - organization: MyOrg - user_name: Florian -""" - -RETURN = """ -users: - description: List of Pritunl users. - returned: success - type: list - elements: dict - sample: - [ - { - "audit": false, - "auth_type": "google", - "bypass_secondary": false, - "client_to_client": false, - "disabled": false, - "dns_mapping": null, - "dns_servers": null, - "dns_suffix": null, - "email": "foo@bar.com", - "gravatar": true, - "groups": [ - "foo", "bar" - ], - "id": "5d070dafe63q3b2e6s472c3b", - "name": "foo@acme.com", - "network_links": [], - "organization": "58070daee6sf342e6e4s2c36", - "organization_name": "Acme", - "otp_auth": true, - "otp_secret": "35H5EJA3XB2$4CWG", - "pin": false, - "port_forwarding": [], - "servers": [], - } - ] -""" - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.common.dict_transformations import dict_merge -from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( - PritunlException, - get_pritunl_settings, - list_pritunl_organizations, - list_pritunl_users, - pritunl_argument_spec, -) - - -def get_pritunl_user(module): - user_name = module.params.get("user_name") - user_type = module.params.get("user_type") - org_name = module.params.get("organization") - - org_obj_list = [] - - org_obj_list = list_pritunl_organizations( - **dict_merge(get_pritunl_settings(module), {"filters": {"name": org_name}}) - ) - - if len(org_obj_list) == 0: - module.fail_json( - msg="Can not list users from the organization '%s' which does not exist" - % org_name - ) - - org_id = org_obj_list[0]["id"] - - users = list_pritunl_users( - **dict_merge( - get_pritunl_settings(module), - { - "organization_id": org_id, - "filters": ( - {"type": user_type} - if user_name is None - else {"name": user_name, "type": user_type} - ), - }, - ) - ) - - result = {} - result["changed"] = False - result["users"] = users - - module.exit_json(**result) - - -def main(): - argument_spec = pritunl_argument_spec() - - argument_spec.update( - dict( - organization=dict(required=True, type="str", aliases=["org"]), - user_name=dict(required=False, type="str", default=None), - user_type=dict( - required=False, - choices=["client", "server"], - default="client", - ), - ) - ), - - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) - - try: - get_pritunl_user(module) - except PritunlException as e: - module.fail_json(msg=to_native(e)) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/net_tools/snmp_facts.py b/plugins/modules/net_tools/snmp_facts.py deleted file mode 100644 index e9d0ebc94c..0000000000 --- a/plugins/modules/net_tools/snmp_facts.py +++ /dev/null @@ -1,472 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Networklore's snmp library for Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: snmp_facts -author: -- Patrick Ogenstad (@ogenstad) -short_description: Retrieve facts for a device using SNMP -description: - - Retrieve facts for a device using SNMP, the facts will be - inserted to the ansible_facts key. -requirements: - - pysnmp -options: - host: - description: - - Set to target SNMP server (normally C({{ inventory_hostname }})). - type: str - required: true - version: - description: - - SNMP Version to use, C(v2), C(v2c) or C(v3). - type: str - required: true - choices: [ v2, v2c, v3 ] - community: - description: - - The SNMP community string, required if I(version) is C(v2) or C(v2c). - type: str - level: - description: - - Authentication level. - - Required if I(version) is C(v3). - type: str - choices: [ authNoPriv, authPriv ] - username: - description: - - Username for SNMPv3. - - Required if I(version) is C(v3). - type: str - integrity: - description: - - Hashing algorithm. - - Required if I(version) is C(v3). - type: str - choices: [ md5, sha ] - authkey: - description: - - Authentication key. - - Required I(version) is C(v3). - type: str - privacy: - description: - - Encryption algorithm. - - Required if I(level) is C(authPriv). - type: str - choices: [ aes, des ] - privkey: - description: - - Encryption key. - - Required if I(level) is C(authPriv). - type: str - timeout: - description: - - Response timeout in seconds. - type: int - version_added: 2.3.0 - retries: - description: - - Maximum number of request retries, 0 retries means just a single request. - type: int - version_added: 2.3.0 -''' - -EXAMPLES = r''' -- name: Gather facts with SNMP version 2 - community.general.snmp_facts: - host: '{{ inventory_hostname }}' - version: v2c - community: public - delegate_to: local - -- name: Gather facts using SNMP version 3 - community.general.snmp_facts: - host: '{{ inventory_hostname }}' - version: v3 - level: authPriv - integrity: sha - privacy: aes - username: snmp-user - authkey: abc12345 - privkey: def6789 - delegate_to: localhost -''' - -RETURN = r''' -ansible_sysdescr: - description: A textual description of the entity. - returned: success - type: str - sample: Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64 -ansible_sysobjectid: - description: The vendor's authoritative identification of the network management subsystem contained in the entity. - returned: success - type: str - sample: 1.3.6.1.4.1.8072.3.2.10 -ansible_sysuptime: - description: The time (in hundredths of a second) since the network management portion of the system was last re-initialized. - returned: success - type: int - sample: 42388 -ansible_syscontact: - description: The textual identification of the contact person for this managed node, together with information on how to contact this person. - returned: success - type: str - sample: Me -ansible_sysname: - description: An administratively-assigned name for this managed node. - returned: success - type: str - sample: ubuntu-user -ansible_syslocation: - description: The physical location of this node (e.g., `telephone closet, 3rd floor'). - returned: success - type: str - sample: Sitting on the Dock of the Bay -ansible_all_ipv4_addresses: - description: List of all IPv4 addresses. - returned: success - type: list - sample: ["127.0.0.1", "172.17.0.1"] -ansible_interfaces: - description: Dictionary of each network interface and its metadata. - returned: success - type: dict - sample: { - "1": { - "adminstatus": "up", - "description": "", - "ifindex": "1", - "ipv4": [ - { - "address": "127.0.0.1", - "netmask": "255.0.0.0" - } - ], - "mac": "", - "mtu": "65536", - "name": "lo", - "operstatus": "up", - "speed": "65536" - }, - "2": { - "adminstatus": "up", - "description": "", - "ifindex": "2", - "ipv4": [ - { - "address": "192.168.213.128", - "netmask": "255.255.255.0" - } - ], - "mac": "000a305a52a1", - "mtu": "1500", - "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)", - "operstatus": "up", - "speed": "1500" - } - } -''' - -import binascii -import traceback -from collections import defaultdict - -PYSNMP_IMP_ERR = None -try: - from pysnmp.entity.rfc3413.oneliner import cmdgen - from pysnmp.proto.rfc1905 import EndOfMibView - HAS_PYSNMP = True -except Exception: - PYSNMP_IMP_ERR = traceback.format_exc() - HAS_PYSNMP = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_text - - -class DefineOid(object): - - def __init__(self, dotprefix=False): - if dotprefix: - dp = "." - else: - dp = "" - - # From SNMPv2-MIB - self.sysDescr = dp + "1.3.6.1.2.1.1.1.0" - self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0" - self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0" - self.sysContact = dp + "1.3.6.1.2.1.1.4.0" - self.sysName = dp + "1.3.6.1.2.1.1.5.0" - self.sysLocation = dp + "1.3.6.1.2.1.1.6.0" - - # From IF-MIB - self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1" - self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2" - self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4" - self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5" - self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6" - self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7" - self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8" - self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18" - - # From IP-MIB - self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1" - self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2" - self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3" - - -def decode_hex(hexstring): - - if len(hexstring) < 3: - return hexstring - if hexstring[:2] == "0x": - return to_text(binascii.unhexlify(hexstring[2:])) - return hexstring - - -def decode_mac(hexstring): - - if len(hexstring) != 14: - return hexstring - if hexstring[:2] == "0x": - return hexstring[2:] - return hexstring - - -def lookup_adminstatus(int_adminstatus): - adminstatus_options = { - 1: 'up', - 2: 'down', - 3: 'testing' - } - if int_adminstatus in adminstatus_options: - return adminstatus_options[int_adminstatus] - return "" - - -def lookup_operstatus(int_operstatus): - operstatus_options = { - 1: 'up', - 2: 'down', - 3: 'testing', - 4: 'unknown', - 5: 'dormant', - 6: 'notPresent', - 7: 'lowerLayerDown' - } - if int_operstatus in operstatus_options: - return operstatus_options[int_operstatus] - return "" - - -def main(): - module = AnsibleModule( - argument_spec=dict( - host=dict(type='str', required=True), - version=dict(type='str', required=True, choices=['v2', 'v2c', 'v3']), - community=dict(type='str'), - username=dict(type='str'), - level=dict(type='str', choices=['authNoPriv', 'authPriv']), - integrity=dict(type='str', choices=['md5', 'sha']), - privacy=dict(type='str', choices=['aes', 'des']), - authkey=dict(type='str', no_log=True), - privkey=dict(type='str', no_log=True), - timeout=dict(type='int'), - retries=dict(type='int'), - ), - required_together=( - ['username', 'level', 'integrity', 'authkey'], - ['privacy', 'privkey'], - ), - supports_check_mode=True, - ) - - m_args = module.params - - if not HAS_PYSNMP: - module.fail_json(msg=missing_required_lib('pysnmp'), exception=PYSNMP_IMP_ERR) - - cmdGen = cmdgen.CommandGenerator() - transport_opts = dict((k, m_args[k]) for k in ('timeout', 'retries') if m_args[k] is not None) - - # Verify that we receive a community when using snmp v2 - if m_args['version'] in ("v2", "v2c"): - if m_args['community'] is None: - module.fail_json(msg='Community not set when using snmp version 2') - - if m_args['version'] == "v3": - if m_args['username'] is None: - module.fail_json(msg='Username not set when using snmp version 3') - - if m_args['level'] == "authPriv" and m_args['privacy'] is None: - module.fail_json(msg='Privacy algorithm not set when using authPriv') - - if m_args['integrity'] == "sha": - integrity_proto = cmdgen.usmHMACSHAAuthProtocol - elif m_args['integrity'] == "md5": - integrity_proto = cmdgen.usmHMACMD5AuthProtocol - - if m_args['privacy'] == "aes": - privacy_proto = cmdgen.usmAesCfb128Protocol - elif m_args['privacy'] == "des": - privacy_proto = cmdgen.usmDESPrivProtocol - - # Use SNMP Version 2 - if m_args['version'] in ("v2", "v2c"): - snmp_auth = cmdgen.CommunityData(m_args['community']) - - # Use SNMP Version 3 with authNoPriv - elif m_args['level'] == "authNoPriv": - snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto) - - # Use SNMP Version 3 with authPriv - else: - snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto, - privProtocol=privacy_proto) - - # Use p to prefix OIDs with a dot for polling - p = DefineOid(dotprefix=True) - # Use v without a prefix to use with return values - v = DefineOid(dotprefix=False) - - def Tree(): - return defaultdict(Tree) - - results = Tree() - - errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd( - snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts), - cmdgen.MibVariable(p.sysDescr,), - cmdgen.MibVariable(p.sysObjectId,), - cmdgen.MibVariable(p.sysUpTime,), - cmdgen.MibVariable(p.sysContact,), - cmdgen.MibVariable(p.sysName,), - cmdgen.MibVariable(p.sysLocation,), - lookupMib=False - ) - - if errorIndication: - module.fail_json(msg=str(errorIndication)) - - for oid, val in varBinds: - current_oid = oid.prettyPrint() - current_val = val.prettyPrint() - if current_oid == v.sysDescr: - results['ansible_sysdescr'] = decode_hex(current_val) - elif current_oid == v.sysObjectId: - results['ansible_sysobjectid'] = current_val - elif current_oid == v.sysUpTime: - results['ansible_sysuptime'] = current_val - elif current_oid == v.sysContact: - results['ansible_syscontact'] = current_val - elif current_oid == v.sysName: - results['ansible_sysname'] = current_val - elif current_oid == v.sysLocation: - results['ansible_syslocation'] = current_val - - errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( - snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts), - cmdgen.MibVariable(p.ifIndex,), - cmdgen.MibVariable(p.ifDescr,), - cmdgen.MibVariable(p.ifMtu,), - cmdgen.MibVariable(p.ifSpeed,), - cmdgen.MibVariable(p.ifPhysAddress,), - cmdgen.MibVariable(p.ifAdminStatus,), - cmdgen.MibVariable(p.ifOperStatus,), - cmdgen.MibVariable(p.ipAdEntAddr,), - cmdgen.MibVariable(p.ipAdEntIfIndex,), - cmdgen.MibVariable(p.ipAdEntNetMask,), - - cmdgen.MibVariable(p.ifAlias,), - lookupMib=False - ) - - if errorIndication: - module.fail_json(msg=str(errorIndication)) - - interface_indexes = [] - - all_ipv4_addresses = [] - ipv4_networks = Tree() - - for varBinds in varTable: - for oid, val in varBinds: - if isinstance(val, EndOfMibView): - continue - current_oid = oid.prettyPrint() - current_val = val.prettyPrint() - if v.ifIndex in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['ifindex'] = current_val - interface_indexes.append(ifIndex) - if v.ifDescr in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['name'] = current_val - if v.ifMtu in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['mtu'] = current_val - if v.ifSpeed in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['speed'] = current_val - if v.ifPhysAddress in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val) - if v.ifAdminStatus in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val)) - if v.ifOperStatus in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val)) - if v.ipAdEntAddr in current_oid: - curIPList = current_oid.rsplit('.', 4)[-4:] - curIP = ".".join(curIPList) - ipv4_networks[curIP]['address'] = current_val - all_ipv4_addresses.append(current_val) - if v.ipAdEntIfIndex in current_oid: - curIPList = current_oid.rsplit('.', 4)[-4:] - curIP = ".".join(curIPList) - ipv4_networks[curIP]['interface'] = current_val - if v.ipAdEntNetMask in current_oid: - curIPList = current_oid.rsplit('.', 4)[-4:] - curIP = ".".join(curIPList) - ipv4_networks[curIP]['netmask'] = current_val - - if v.ifAlias in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['description'] = current_val - - interface_to_ipv4 = {} - for ipv4_network in ipv4_networks: - current_interface = ipv4_networks[ipv4_network]['interface'] - current_network = { - 'address': ipv4_networks[ipv4_network]['address'], - 'netmask': ipv4_networks[ipv4_network]['netmask'] - } - if current_interface not in interface_to_ipv4: - interface_to_ipv4[current_interface] = [] - interface_to_ipv4[current_interface].append(current_network) - else: - interface_to_ipv4[current_interface].append(current_network) - - for interface in interface_to_ipv4: - results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface] - - results['ansible_all_ipv4_addresses'] = all_ipv4_addresses - - module.exit_json(ansible_facts=results) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/netcup_dns.py b/plugins/modules/netcup_dns.py deleted file mode 120000 index d020221d87..0000000000 --- a/plugins/modules/netcup_dns.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/netcup_dns.py \ No newline at end of file diff --git a/plugins/modules/netcup_dns.py b/plugins/modules/netcup_dns.py new file mode 100644 index 0000000000..52ec6c1915 --- /dev/null +++ b/plugins/modules/netcup_dns.py @@ -0,0 +1,297 @@ +#!/usr/bin/python + +# Copyright (c) 2018 Nicolai Buchwitz +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: netcup_dns +notes: [] +short_description: Manage Netcup DNS records +description: + - Manages DNS records using the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + api_key: + description: + - API key for authentication, must be obtained using the netcup CCP (U(https://ccp.netcup.net)). + required: true + type: str + api_password: + description: + - API password for authentication, must be obtained using the netcup CCP (U(https://ccp.netcup.net)). + required: true + type: str + customer_id: + description: + - Netcup customer ID. + required: true + type: int + domain: + description: + - Domainname the records should be added / removed. + required: true + type: str + record: + description: + - Record to add or delete, supports wildcard (V(*)). Default is V(@) (that is, the zone name). + default: "@" + aliases: [name] + type: str + type: + description: + - Record type. + - Support for V(OPENPGPKEY), V(SMIMEA) and V(SSHFP) was added in community.general 8.1.0. + - Record types V(OPENPGPKEY) and V(SMIMEA) require nc-dnsapi >= 0.1.5. + - Record type V(SSHFP) requires nc-dnsapi >= 0.1.6. + choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS', 'OPENPGPKEY', 'SMIMEA', 'SSHFP'] + required: true + type: str + value: + description: + - Record value. + required: true + type: str + solo: + type: bool + default: false + description: + - Whether the record should be the only one for that record type and record name. Only use with O(state=present). + - This deletes all other records with the same record name and type. + priority: + description: + - Record priority. Required for O(type=MX). + required: false + type: int + state: + description: + - Whether the record should exist or not. + required: false + default: present + choices: ['present', 'absent'] + type: str + timeout: + description: + - HTTP(S) connection timeout in seconds. + default: 5 + type: int + version_added: 5.7.0 +requirements: + - "nc-dnsapi >= 0.1.3" +author: "Nicolai Buchwitz (@nbuchwitz)" +""" + +EXAMPLES = r""" +- name: Create a record of type A + community.general.netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + name: "mail" + type: "A" + value: "127.0.0.1" + +- name: Delete that record + community.general.netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + name: "mail" + type: "A" + value: "127.0.0.1" + state: absent + +- name: Create a wildcard record + community.general.netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + name: "*" + type: "A" + value: "127.0.1.1" + +- name: Set the MX record for example.com + community.general.netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + type: "MX" + value: "mail.example.com" + +- name: Set a record and ensure that this is the only one + community.general.netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + name: "demo" + domain: "example.com" + type: "AAAA" + value: "::1" + solo: true + +- name: Increase the connection timeout to avoid problems with an unstable connection + community.general.netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + name: "mail" + type: "A" + value: "127.0.0.1" + timeout: 30 +""" + +RETURN = r""" +records: + description: List containing all records. + returned: success + type: list + elements: dict + contains: + name: + description: The record name. + returned: success + type: str + sample: fancy-hostname + type: + description: The record type. + returned: success + type: str + sample: A + value: + description: The record destination. + returned: success + type: str + sample: 127.0.0.1 + priority: + description: The record priority (only relevant if RV(records[].type=MX)). + returned: success + type: int + sample: 0 + id: + description: Internal ID of the record. + returned: success + type: int + sample: 12345 +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +NCDNSAPI_IMP_ERR = None +try: + import nc_dnsapi + from nc_dnsapi import DNSRecord + + HAS_NCDNSAPI = True +except ImportError: + NCDNSAPI_IMP_ERR = traceback.format_exc() + HAS_NCDNSAPI = False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, no_log=True), + api_password=dict(required=True, no_log=True), + customer_id=dict(required=True, type='int'), + + domain=dict(required=True), + record=dict(default='@', aliases=['name']), + type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', + 'TLSA', 'NS', 'DS', 'OPENPGPKEY', 'SMIMEA', + 'SSHFP']), + value=dict(required=True), + priority=dict(type='int'), + solo=dict(type='bool', default=False), + state=dict(choices=['present', 'absent'], default='present'), + timeout=dict(type='int', default=5), + + ), + supports_check_mode=True + ) + + if not HAS_NCDNSAPI: + module.fail_json(msg=missing_required_lib('nc-dnsapi'), exception=NCDNSAPI_IMP_ERR) + + api_key = module.params.get('api_key') + api_password = module.params.get('api_password') + customer_id = module.params.get('customer_id') + domain = module.params.get('domain') + record_type = module.params.get('type') + record = module.params.get('record') + value = module.params.get('value') + priority = module.params.get('priority') + solo = module.params.get('solo') + state = module.params.get('state') + timeout = module.params.get('timeout') + + if record_type == 'MX' and not priority: + module.fail_json(msg="record type MX required the 'priority' argument") + + has_changed = False + all_records = [] + try: + with nc_dnsapi.Client(customer_id, api_key, api_password, timeout) as api: + all_records = api.dns_records(domain) + record = DNSRecord(record, record_type, value, priority=priority) + + # try to get existing record + record_exists = False + for r in all_records: + if r == record: + record_exists = True + record = r + + break + + if state == 'present': + if solo: + obsolete_records = [r for r in all_records if + r.hostname == record.hostname + and r.type == record.type + and not r.destination == record.destination] + + if obsolete_records: + if not module.check_mode: + all_records = api.delete_dns_records(domain, obsolete_records) + + has_changed = True + + if not record_exists: + if not module.check_mode: + all_records = api.add_dns_record(domain, record) + + has_changed = True + elif state == 'absent' and record_exists: + if not module.check_mode: + all_records = api.delete_dns_record(domain, record) + + has_changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]}) + + +def record_data(r): + return {"name": r.hostname, "type": r.type, "value": r.destination, "priority": r.priority, "id": r.id} + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/newrelic_deployment.py b/plugins/modules/newrelic_deployment.py deleted file mode 120000 index 724d09dace..0000000000 --- a/plugins/modules/newrelic_deployment.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/newrelic_deployment.py \ No newline at end of file diff --git a/plugins/modules/newrelic_deployment.py b/plugins/modules/newrelic_deployment.py new file mode 100644 index 0000000000..63495d9e7f --- /dev/null +++ b/plugins/modules/newrelic_deployment.py @@ -0,0 +1,185 @@ +#!/usr/bin/python + +# Copyright 2013 Matt Coddington +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: newrelic_deployment +author: "Matt Coddington (@mcodd)" +short_description: Notify New Relic about app deployments +description: + - Notify New Relic about app deployments (see U(https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/record-monitor-deployments/)). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + type: str + description: + - API token to place in the Api-Key header. + required: true + app_name: + type: str + description: + - The value of C(app_name) in the C(newrelic.yml) file used by the application. + - One of O(app_name) or O(application_id) is required. + required: false + application_id: + type: str + description: + - The application ID found in the metadata of the application in APM. + - One of O(app_name) or O(application_id) is required. + required: false + changelog: + type: str + description: + - A list of changes for this deployment. + required: false + description: + type: str + description: + - Text annotation for the deployment - notes for you. + required: false + revision: + type: str + description: + - A revision number (for example, git commit SHA). + required: true + user: + type: str + description: + - The name of the user/process that triggered this deployment. + required: false + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + required: false + default: true + type: bool + app_name_exact_match: + type: bool + description: + - If this flag is set to V(true) then the application ID lookup by name would only work for an exact match. If set to + V(false) it returns the first result. + required: false + default: false + version_added: 7.5.0 +requirements: [] +""" + +EXAMPLES = r""" +- name: Notify New Relic about an app deployment + community.general.newrelic_deployment: + token: AAAAAA + app_name: myapp + user: ansible deployment + revision: '1.0' +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from urllib.parse import quote +import json + +# =========================================== +# Module execution. +# + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + app_name=dict(), + application_id=dict(), + changelog=dict(), + description=dict(), + revision=dict(required=True), + user=dict(), + validate_certs=dict(default=True, type='bool'), + app_name_exact_match=dict(type='bool', default=False), + ), + required_one_of=[['app_name', 'application_id']], + required_if=[('app_name_exact_match', True, ['app_name'])], + supports_check_mode=True + ) + + # build list of params + params = {} + if module.params["app_name"] and module.params["application_id"]: + module.fail_json(msg="only one of 'app_name' or 'application_id' can be set") + app_id = None + if module.params["app_name"]: + app_id = get_application_id(module) + elif module.params["application_id"]: + app_id = module.params["application_id"] + else: + module.fail_json(msg="you must set one of 'app_name' or 'application_id'") + + if app_id is None: + module.fail_json(msg="No application with name %s is found in NewRelic" % module.params["app_name"]) + + for item in ["changelog", "description", "revision", "user"]: + if module.params[item]: + params[item] = module.params[item] + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True) + + # Send the data to New Relic + url = "https://api.newrelic.com/v2/applications/%s/deployments.json" % quote(str(app_id), safe='') + data = { + 'deployment': params + } + headers = { + 'Api-Key': module.params["token"], + 'Content-Type': 'application/json', + } + response, info = fetch_url(module, url, data=module.jsonify(data), headers=headers, method="POST") + if info['status'] in (200, 201): + module.exit_json(changed=True) + else: + module.fail_json(msg="Unable to insert deployment marker: %s" % info['msg']) + + +def get_application_id(module): + url = "https://api.newrelic.com/v2/applications.json" + data = "filter[name]=%s" % module.params["app_name"] + application_id = None + headers = { + 'Api-Key': module.params["token"], + } + response, info = fetch_url(module, url, data=data, headers=headers) + if info['status'] not in (200, 201): + module.fail_json(msg="Unable to get application: %s" % info['msg']) + + result = json.loads(response.read()) + if result is None or len(result.get("applications", "")) == 0: + module.fail_json(msg='No application found with name "%s"' % module.params["app_name"]) + + if module.params["app_name_exact_match"]: + for item in result["applications"]: + if item["name"] == module.params["app_name"]: + application_id = item["id"] + break + if application_id is None: + module.fail_json(msg='No application found with exact name "%s"' % module.params["app_name"]) + else: + application_id = result["applications"][0]["id"] + + return application_id + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/nexmo.py b/plugins/modules/nexmo.py deleted file mode 120000 index 9b13d8203c..0000000000 --- a/plugins/modules/nexmo.py +++ /dev/null @@ -1 +0,0 @@ -./notification/nexmo.py \ No newline at end of file diff --git a/plugins/modules/nexmo.py b/plugins/modules/nexmo.py new file mode 100644 index 0000000000..ee65bdda57 --- /dev/null +++ b/plugins/modules/nexmo.py @@ -0,0 +1,140 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Matt Martz +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: nexmo +short_description: Send a SMS using nexmo +description: + - Send a SMS message using nexmo. +author: "Matt Martz (@sivel)" +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + api_key: + type: str + description: + - Nexmo API Key. + required: true + api_secret: + type: str + description: + - Nexmo API Secret. + required: true + src: + type: int + description: + - Nexmo Number to send from. + required: true + dest: + type: list + elements: int + description: + - Phone number(s) to send SMS message to. + required: true + msg: + type: str + description: + - Message text to send. Messages longer than 160 characters are split into multiple messages. + required: true + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + type: bool + default: true +extends_documentation_fragment: + - ansible.builtin.url + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Send notification message via Nexmo + community.general.nexmo: + api_key: 640c8a53 + api_secret: 0ce239a6 + src: 12345678901 + dest: + - 10987654321 + - 16789012345 + msg: '{{ inventory_hostname }} completed' + delegate_to: localhost +""" +import json +from urllib.parse import urlencode + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url, url_argument_spec + + +NEXMO_API = 'https://rest.nexmo.com/sms/json' + + +def send_msg(module): + failed = list() + responses = dict() + msg = { + 'api_key': module.params.get('api_key'), + 'api_secret': module.params.get('api_secret'), + 'from': module.params.get('src'), + 'text': module.params.get('msg') + } + for number in module.params.get('dest'): + msg['to'] = number + url = "%s?%s" % (NEXMO_API, urlencode(msg)) + + headers = dict(Accept='application/json') + response, info = fetch_url(module, url, headers=headers) + if info['status'] != 200: + failed.append(number) + responses[number] = dict(failed=True) + + try: + responses[number] = json.load(response) + except Exception: + failed.append(number) + responses[number] = dict(failed=True) + else: + for message in responses[number]['messages']: + if int(message['status']) != 0: + failed.append(number) + responses[number] = dict(failed=True, **responses[number]) + + if failed: + msg = 'One or messages failed to send' + else: + msg = '' + + module.exit_json(failed=bool(failed), msg=msg, changed=False, + responses=responses) + + +def main(): + argument_spec = url_argument_spec() + argument_spec.update( + dict( + api_key=dict(required=True, no_log=True), + api_secret=dict(required=True, no_log=True), + src=dict(required=True, type='int'), + dest=dict(required=True, type='list', elements='int'), + msg=dict(required=True), + ), + ) + + module = AnsibleModule( + argument_spec=argument_spec + ) + + send_msg(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/nginx_status_info.py b/plugins/modules/nginx_status_info.py deleted file mode 120000 index 550ee7ccf6..0000000000 --- a/plugins/modules/nginx_status_info.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/nginx_status_info.py \ No newline at end of file diff --git a/plugins/modules/nginx_status_info.py b/plugins/modules/nginx_status_info.py new file mode 100644 index 0000000000..31707e0688 --- /dev/null +++ b/plugins/modules/nginx_status_info.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# +# Copyright (c) 2016, René Moser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: nginx_status_info +short_description: Retrieve information on nginx status +description: + - Gathers information from nginx from an URL having C(stub_status) enabled. +author: "René Moser (@resmo)" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + url: + type: str + description: + - URL of the nginx status. + required: true + timeout: + type: int + description: + - HTTP connection timeout in seconds. + required: false + default: 10 + +notes: + - See U(http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) for more information. +""" + +EXAMPLES = r""" +# Gather status info from nginx on localhost +- name: Get current http stats + community.general.nginx_status_info: + url: http://localhost/nginx_status + register: result + +# Gather status info from nginx on localhost with a custom timeout of 20 seconds +- name: Get current http stats + community.general.nginx_status_info: + url: http://localhost/nginx_status + timeout: 20 + register: result +""" + +RETURN = r""" +active_connections: + description: Active connections. + returned: success + type: int + sample: 2340 +accepts: + description: The total number of accepted client connections. + returned: success + type: int + sample: 81769947 +handled: + description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some + resource limits have been reached. + returned: success + type: int + sample: 81769947 +requests: + description: The total number of client requests. + returned: success + type: int + sample: 144332345 +reading: + description: The current number of connections where nginx is reading the request header. + returned: success + type: int + sample: 0 +writing: + description: The current number of connections where nginx is writing the response back to the client. + returned: success + type: int + sample: 241 +waiting: + description: The current number of idle client connections waiting for a request. + returned: success + type: int + sample: 2092 +data: + description: HTTP response as is. + returned: success + type: str + sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n" +""" + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.text.converters import to_text + + +class NginxStatusInfo(object): + + def __init__(self): + self.url = module.params.get('url') + self.timeout = module.params.get('timeout') + + def run(self): + result = { + 'active_connections': None, + 'accepts': None, + 'handled': None, + 'requests': None, + 'reading': None, + 'writing': None, + 'waiting': None, + 'data': None, + } + (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout) + if not response: + module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout)) + + data = to_text(response.read(), errors='surrogate_or_strict') + if not data: + return result + + result['data'] = data + expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \ + r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)' + match = re.match(expr, data, re.S) + if match: + result['active_connections'] = int(match.group(1)) + result['accepts'] = int(match.group(2)) + result['handled'] = int(match.group(3)) + result['requests'] = int(match.group(4)) + result['reading'] = int(match.group(5)) + result['writing'] = int(match.group(6)) + result['waiting'] = int(match.group(7)) + return result + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + url=dict(type='str', required=True), + timeout=dict(type='int', default=10), + ), + supports_check_mode=True, + ) + + nginx_status_info = NginxStatusInfo().run() + module.exit_json(changed=False, **nginx_status_info) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/nictagadm.py b/plugins/modules/nictagadm.py deleted file mode 120000 index 2b95fad653..0000000000 --- a/plugins/modules/nictagadm.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/smartos/nictagadm.py \ No newline at end of file diff --git a/plugins/modules/nictagadm.py b/plugins/modules/nictagadm.py new file mode 100644 index 0000000000..bd4f646bcf --- /dev/null +++ b/plugins/modules/nictagadm.py @@ -0,0 +1,229 @@ +#!/usr/bin/python + +# Copyright (c) 2018, Bruce Smith +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: nictagadm +short_description: Manage nic tags on SmartOS systems +description: + - Create or delete nic tags on SmartOS systems. +author: + - Bruce Smith (@SmithX10) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the nic tag. + required: true + type: str + mac: + description: + - Specifies the O(mac) address to attach the nic tag to when not creating an O(etherstub). + - Parameters O(mac) and O(etherstub) are mutually exclusive. + type: str + etherstub: + description: + - Specifies that the nic tag is attached to a created O(etherstub). + - Parameter O(etherstub) is mutually exclusive with both O(mtu), and O(mac). + type: bool + default: false + mtu: + description: + - Specifies the size of the O(mtu) of the desired nic tag. + - Parameters O(mtu) and O(etherstub) are mutually exclusive. + type: int + force: + description: + - When O(state=absent) this switch uses the C(-f) parameter and delete the nic tag regardless of existing VMs. + type: bool + default: false + state: + description: + - Create or delete a SmartOS nic tag. + type: str + choices: [absent, present] + default: present +""" + +EXAMPLES = r""" +- name: Create 'storage0' on '00:1b:21:a3:f5:4d' + community.general.nictagadm: + name: storage0 + mac: 00:1b:21:a3:f5:4d + mtu: 9000 + state: present + +- name: Remove 'storage0' nic tag + community.general.nictagadm: + name: storage0 + state: absent +""" + +RETURN = r""" +name: + description: Nic tag name. + returned: always + type: str + sample: storage0 +mac: + description: MAC Address that the nic tag was attached to. + returned: always + type: str + sample: 00:1b:21:a3:f5:4d +etherstub: + description: Specifies if the nic tag was created and attached to an etherstub. + returned: always + type: bool + sample: false +mtu: + description: Specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive. + returned: always + type: int + sample: 1500 +force: + description: Shows if C(-f) was used during the deletion of a nic tag. + returned: always + type: bool + sample: false +state: + description: State of the target. + returned: always + type: str + sample: present +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.network import is_mac + + +class NicTag(object): + + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.mac = module.params['mac'] + self.etherstub = module.params['etherstub'] + self.mtu = module.params['mtu'] + self.force = module.params['force'] + self.state = module.params['state'] + + self.nictagadm_bin = self.module.get_bin_path('nictagadm', True) + + def is_valid_mac(self): + return is_mac(self.mac.lower()) + + def nictag_exists(self): + cmd = [self.nictagadm_bin, 'exists', self.name] + (rc, dummy, dummy) = self.module.run_command(cmd) + + return rc == 0 + + def add_nictag(self): + cmd = [self.nictagadm_bin, '-v', 'add'] + + if self.etherstub: + cmd.append('-l') + + if self.mtu: + cmd.append('-p') + cmd.append('mtu=' + str(self.mtu)) + + if self.mac: + cmd.append('-p') + cmd.append('mac=' + str(self.mac)) + + cmd.append(self.name) + + return self.module.run_command(cmd) + + def delete_nictag(self): + cmd = [self.nictagadm_bin, '-v', 'delete'] + + if self.force: + cmd.append('-f') + + cmd.append(self.name) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + mac=dict(type='str'), + etherstub=dict(type='bool', default=False), + mtu=dict(type='int'), + force=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + mutually_exclusive=[ + ['etherstub', 'mac'], + ['etherstub', 'mtu'], + ], + required_if=[ + ['etherstub', False, ['name', 'mac']], + ['state', 'absent', ['name', 'force']], + ], + supports_check_mode=True + ) + + nictag = NicTag(module) + + rc = None + out = '' + err = '' + result = dict( + changed=False, + etherstub=nictag.etherstub, + force=nictag.force, + name=nictag.name, + mac=nictag.mac, + mtu=nictag.mtu, + state=nictag.state, + ) + + if not nictag.is_valid_mac(): + module.fail_json(msg='Invalid MAC Address Value', + name=nictag.name, + mac=nictag.mac, + etherstub=nictag.etherstub) + + if nictag.state == 'absent': + if nictag.nictag_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = nictag.delete_nictag() + if rc != 0: + module.fail_json(name=nictag.name, msg=err, rc=rc) + elif nictag.state == 'present': + if not nictag.nictag_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = nictag.add_nictag() + if rc is not None and rc != 0: + module.fail_json(name=nictag.name, msg=err, rc=rc) + + if rc is not None: + result['changed'] = True + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/nmcli.py b/plugins/modules/nmcli.py deleted file mode 120000 index 57c096371a..0000000000 --- a/plugins/modules/nmcli.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/nmcli.py \ No newline at end of file diff --git a/plugins/modules/nmcli.py b/plugins/modules/nmcli.py new file mode 100644 index 0000000000..e6edbbf1d2 --- /dev/null +++ b/plugins/modules/nmcli.py @@ -0,0 +1,2874 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Chris Long +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: nmcli +author: + - Chris Long (@alcamie101) +short_description: Manage Networking +requirements: + - nmcli +extends_documentation_fragment: + - community.general.attributes +description: + - Manage the network devices. Create, modify and manage various connection and device type, for example V(ethernet), V(team), + V(bond), V(vlan) and so on. + - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: C(NetworkManager).' + - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: C(NetworkManager-tui).' + - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: C(network-manager).' + - 'On openSUSE, the requirements can be met by installing the following packages: C(NetworkManager).' +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + state: + description: + - Whether the device should exist or not, taking action if the state is different from what is stated. + - Using O(state=present) creates connection set to be brought up automatically. + - Using O(state=up) and O(state=down) does not modify connection with other parameters. These states have been added + in community.general 9.5.0. + type: str + required: true + choices: [absent, present, up, down] + autoconnect: + description: + - Whether the connection should start on boot. + - Whether the connection profile can be automatically activated. + type: bool + default: true + autoconnect_priority: + description: + - The priority of the connection profile for autoconnect. If set, connection profiles with higher priority are preferred. + type: int + version_added: 11.0.0 + autoconnect_retries: + description: + - The number of times to retry autoconnecting. + type: int + version_added: 11.0.0 + conn_name: + description: + - The name used to call the connection. Pattern is V([-][-]). + type: str + required: true + conn_reload: + description: + - Whether the connection should be reloaded if it was modified. + type: bool + required: false + default: false + version_added: 9.5.0 + ifname: + description: + - The interface to bind the connection to. + - The connection is only applicable to this interface name. + - A special value of V(*) can be used for interface-independent connections. + - The O(ifname) argument is mandatory for all connection types except bond, team, bridge, vlan and vpn. + - This parameter defaults to O(conn_name) when left unset for all connection types except vpn that removes it. + type: str + type: + description: + - This is the type of device or network connection that you wish to create or modify. + - Type V(dummy) is added in community.general 3.5.0. + - Type V(gsm) is added in community.general 3.7.0. + - Type V(infiniband) is added in community.general 2.0.0. + - Type V(loopback) is added in community.general 8.1.0. + - Type V(macvlan) is added in community.general 6.6.0. + - Type V(ovs-bridge) is added in community.general 8.6.0. + - Type V(ovs-interface) is added in community.general 8.6.0. + - Type V(ovs-port) is added in community.general 8.6.0. + - Type V(wireguard) is added in community.general 4.3.0. + - Type V(vpn) is added in community.general 5.1.0. + - Type V(vrf) is added in community.general 10.4.0. + - Using V(bond-slave), V(bridge-slave), or V(team-slave) implies V(ethernet) connection type with corresponding O(slave_type) + option. + - If you want to control non-ethernet connection attached to V(bond), V(bridge), or V(team) consider using O(slave_type) + option. + type: str + choices: + - bond + - bond-slave + - bridge + - bridge-slave + - dummy + - ethernet + - generic + - gre + - infiniband + - ipip + - macvlan + - sit + - team + - team-slave + - vlan + - vxlan + - wifi + - gsm + - wireguard + - ovs-bridge + - ovs-port + - ovs-interface + - vpn + - vrf + - loopback + mode: + description: + - This is the type of device or network connection that you wish to create for a bond or bridge. + type: str + choices: [802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast] + default: balance-rr + transport_mode: + description: + - This option sets the connection type of Infiniband IPoIB devices. + type: str + choices: [datagram, connected] + version_added: 5.8.0 + infiniband_mac: + description: + - MAC address of the Infiniband IPoIB devices. + type: str + version_added: 10.6.0 + slave_type: + description: + - Type of the device of this slave's master connection (for example V(bond)). + - Type V(ovs-port) is added in community.general 8.6.0. + type: str + choices: ['bond', 'bridge', 'team', 'ovs-port', 'vrf'] + version_added: 7.0.0 + master: + description: + - Master ] STP forwarding delay, in seconds. + type: int + default: 15 + hellotime: + description: + - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds. + type: int + default: 2 + maxage: + description: + - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds. + type: int + default: 20 + ageingtime: + description: + - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds. + type: int + default: 300 + mac: + description: + - MAC address of the connection. + - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel. + type: str + slavepriority: + description: + - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave. + type: int + default: 32 + path_cost: + description: + - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations using this slave. + type: int + default: 100 + hairpin: + description: + - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through + the slave the frame was received on. + - The default change to V(false) in community.general 7.0.0. It used to be V(true) before. + type: bool + default: false + runner: + description: + - This is the type of device or network connection that you wish to create for a team. + type: str + choices: [broadcast, roundrobin, activebackup, loadbalance, lacp] + default: roundrobin + version_added: 3.4.0 + runner_hwaddr_policy: + description: + - This defines the policy of how hardware addresses of team device and port devices should be set during the team lifetime. + type: str + choices: [same_all, by_active, only_active] + version_added: 3.4.0 + runner_fast_rate: + description: + - Option specifies the rate at which our link partner is asked to transmit LACPDU packets. If this is V(true) then packets + are sent once per second. Otherwise they are sent every 30 seconds. + - Only allowed for O(runner=lacp). + type: bool + version_added: 6.5.0 + vlanid: + description: + - This is only used with VLAN - VLAN ID in range <0-4095>. + type: int + vlandev: + description: + - This is only used with VLAN - parent device this VLAN is on, can use ifname. + type: str + flags: + description: + - This is only used with VLAN - flags. + type: str + ingress: + description: + - This is only used with VLAN - VLAN ingress priority mapping. + type: str + egress: + description: + - This is only used with VLAN - VLAN egress priority mapping. + type: str + vxlan_id: + description: + - This is only used with VXLAN - VXLAN ID. + type: int + vxlan_remote: + description: + - This is only used with VXLAN - VXLAN destination IP address. + type: str + vxlan_local: + description: + - This is only used with VXLAN - VXLAN local IP address. + type: str + ip_tunnel_dev: + description: + - This is used with GRE/IPIP/SIT - parent device this GRE/IPIP/SIT tunnel, can use ifname. + type: str + ip_tunnel_remote: + description: + - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT destination IP address. + type: str + ip_tunnel_local: + description: + - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT local IP address. + type: str + ip_tunnel_input_key: + description: + - The key used for tunnel input packets. + - Only used when O(type=gre). + type: str + version_added: 3.6.0 + ip_tunnel_output_key: + description: + - The key used for tunnel output packets. + - Only used when O(type=gre). + type: str + version_added: 3.6.0 + table: + description: + - This is only used with VRF - VRF table number. + type: int + version_added: 10.4.0 + zone: + description: + - The trust level of the connection. + - When updating this property on a currently activated connection, the change takes effect immediately. + type: str + version_added: 2.0.0 + wifi_sec: + description: + - The security configuration of the WiFi connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).' + - 'For instance to use common WPA-PSK auth with a password: V({key-mgmt: wpa-psk, psk: my_password}).' + type: dict + suboptions: + auth-alg: + description: + - When WEP is used (that is, if O(wifi_sec.key-mgmt) is V(none) or V(ieee8021x)) indicate the 802.11 authentication + algorithm required by the AP here. + - One of V(open) for Open System, V(shared) for Shared Key, or V(leap) for Cisco LEAP. + - When using Cisco LEAP (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap)) the O(wifi_sec.leap-username) + and O(wifi_sec.leap-password) properties must be specified. + type: str + choices: [open, shared, leap] + fils: + description: + - Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection. + - One of V(0) (use global default value), V(1) (disable FILS), V(2) (enable FILS if the supplicant and the access + point support it) or V(3) (enable FILS and fail if not supported). + - When set to V(0) and no global default is set, FILS is optionally enabled. + type: int + choices: [0, 1, 2, 3] + default: 0 + group: + description: + - A list of group/broadcast encryption algorithms which prevents connections to Wi-Fi networks that do not utilize + one of the algorithms in the list. + - For maximum compatibility leave this property empty. + type: list + elements: str + choices: [wep40, wep104, tkip, ccmp] + key-mgmt: + description: + - Key management used for the connection. + - One of V(none) (WEP or no password protection), V(ieee8021x) (Dynamic WEP), V(owe) (Opportunistic Wireless Encryption), + V(wpa-psk) (WPA2 + WPA3 personal), V(sae) (WPA3 personal only), V(wpa-eap) (WPA2 + WPA3 enterprise) or V(wpa-eap-suite-b-192) + (WPA3 enterprise only). + - This property must be set for any Wi-Fi connection that uses security. + type: str + choices: [none, ieee8021x, owe, wpa-psk, sae, wpa-eap, wpa-eap-suite-b-192] + leap-password-flags: + description: Flags indicating how to handle the O(wifi_sec.leap-password) property. + type: list + elements: int + leap-password: + description: The login password for legacy LEAP connections (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap)). + type: str + leap-username: + description: The login username for legacy LEAP connections (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap)). + type: str + pairwise: + description: + - A list of pairwise encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one + of the algorithms in the list. + - For maximum compatibility leave this property empty. + type: list + elements: str + choices: [tkip, ccmp] + pmf: + description: + - Indicates whether Protected Management Frames (802.11w) must be enabled for the connection. + - One of V(0) (use global default value), V(1) (disable PMF), V(2) (enable PMF if the supplicant and the access + point support it) or V(3) (enable PMF and fail if not supported). + - When set to V(0) and no global default is set, PMF is optionally enabled. + type: int + choices: [0, 1, 2, 3] + default: 0 + proto: + description: + - List of strings specifying the allowed WPA protocol versions to use. + - Each element may be V(wpa) (allow WPA) or V(rsn) (allow WPA2/RSN). + - If not specified, both WPA and RSN connections are allowed. + type: list + elements: str + choices: [wpa, rsn] + psk-flags: + description: Flags indicating how to handle the O(wifi_sec.psk) property. + type: list + elements: int + psk: + description: + - Pre-Shared-Key for WPA networks. + - For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is (as specified in the 802.11i standard) + hashed to derive the actual key, or the key in form of 64 hexadecimal character. + - The WPA3-Personal networks use a passphrase of any length for SAE authentication. + type: str + wep-key-flags: + description: + - Flags indicating how to handle the O(wifi_sec.wep-key0), O(wifi_sec.wep-key1), O(wifi_sec.wep-key2), and O(wifi_sec.wep-key3) + properties. + type: list + elements: int + wep-key-type: + description: + - Controls the interpretation of WEP keys. + - Allowed values are V(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or + 13-character ASCII password; or V(2), in which case the passphrase is provided as a string and it is hashed using + the de-facto MD5 method to derive the actual WEP key. + type: int + choices: [1, 2] + wep-key0: + description: + - Index 0 WEP key. This is the WEP key used in most networks. + - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key1: + description: + - Index 1 WEP key. This WEP index is not used by most networks. + - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key2: + description: + - Index 2 WEP key. This WEP index is not used by most networks. + - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key3: + description: + - Index 3 WEP key. This WEP index is not used by most networks. + - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted. + type: str + wep-tx-keyidx: + description: + - When static WEP is used (that is, if O(wifi_sec.key-mgmt=none)) and a non-default WEP key index is used by the + AP, put that WEP key index here. + - Valid values are V(0) (default key) through V(3). + - Note that some consumer access points (like the Linksys WRT54G) number the keys V(1) to V(4). + type: int + choices: [0, 1, 2, 3] + default: 0 + wps-method: + description: + - Flags indicating which mode of WPS is to be used if any. + - There is little point in changing the default setting as NetworkManager automatically determines whether it is + feasible to start WPS enrollment from the Access Point capabilities. + - WPS can be disabled by setting this property to a value of V(1). + type: int + default: 0 + version_added: 3.0.0 + ssid: + description: + - Name of the Wireless router or the access point. + type: str + version_added: 3.0.0 + wifi: + description: + - The configuration of the WiFi connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).' + - 'For instance to create a hidden AP mode WiFi connection: V({hidden: true, mode: ap}).' + type: dict + suboptions: + ap-isolation: + description: + - Configures AP isolation, which prevents communication between wireless devices connected to this AP. + - This property can be set to a value different from V(-1) only when the interface is configured in AP mode. + - If set to V(1), devices are not able to communicate with each other. This increases security because it protects + devices against attacks from other clients in the network. At the same time, it prevents devices to access resources + on the same wireless networks as file shares, printers, and so on. + - If set to V(0), devices can talk to each other. + - When set to V(-1), the global default is used; in case the global default is unspecified it is assumed to be V(0). + type: int + choices: [-1, 0, 1] + default: -1 + assigned-mac-address: + description: + - The new field for the cloned MAC address. + - It can be either a hardware address in ASCII representation, or one of the special values V(preserve), V(permanent), + V(random) or V(stable). + - This field replaces the deprecated O(wifi.cloned-mac-address) on D-Bus, which can only contain explicit hardware + addresses. + - Note that this property only exists in D-Bus API. libnm and nmcli continue to call this property C(cloned-mac-address). + type: str + band: + description: + - 802.11 frequency band of the network. + - One of V(a) for 5GHz 802.11a or V(bg) for 2.4GHz 802.11. + - This locks associations to the Wi-Fi network to the specific band, so for example, if V(a) is specified, the device + does not associate with the same network in the 2.4GHz band even if the network's settings are compatible. + - This setting depends on specific driver capability and may not work with all drivers. + type: str + choices: [a, bg] + bssid: + description: + - If specified, directs the device to only associate with the given access point. + - This capability is highly driver dependent and not supported by all devices. + - Note this property does not control the BSSID used when creating an Ad-Hoc network and is unlikely to in the future. + type: str + channel: + description: + - Wireless channel to use for the Wi-Fi connection. + - The device only joins (or creates for Ad-Hoc networks) a Wi-Fi network on the specified channel. + - Because channel numbers overlap between bands, this property also requires the O(wifi.band) property to be set. + type: int + default: 0 + cloned-mac-address: + description: + - This D-Bus field is deprecated in favor of O(wifi.assigned-mac-address) which is more flexible and allows specifying + special variants like V(random). + - For libnm and nmcli, this field is called C(cloned-mac-address). + type: str + generate-mac-address-mask: + description: + - With O(wifi.cloned-mac-address) setting V(random) or V(stable), by default all bits of the MAC address are scrambled + and a locally-administered, unicast MAC address is created. This property allows to specify that certain bits + are fixed. + - Note that the least significant bit of the first MAC address is always unset to create a unicast MAC address. + - If the property is V(null), it is eligible to be overwritten by a default connection setting. + - If the value is still V(null) or an empty string, the default is to create a locally-administered, unicast MAC + address. + - If the value contains one MAC address, this address is used as mask. The set bits of the mask are to be filled + with the current MAC address of the device, while the unset bits are subject to randomization. + - Setting V(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower + 3 bytes using the V(random) or V(stable) algorithm. + - If the value contains one additional MAC address after the mask, this address is used instead of the current MAC + address to fill the bits that shall not be randomized. + - For example, a value of V(FE:FF:FF:00:00:00 68:F7:28:00:00:00) sets the OUI of the MAC address to 68:F7:28, while + the lower bits are randomized. + - A value of V(02:00:00:00:00:00 00:00:00:00:00:00) creates a fully scrambled globally-administered, burned-in MAC + address. + - If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example, V(02:00:00:00:00:00 + 00:00:00:00:00:00 02:00:00:00:00:00) creates a fully scrambled MAC address, randomly locally or globally administered. + type: str + hidden: + description: + - If V(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure + and AP mode. + - In infrastructure mode, various workarounds are used for a more reliable discovery of hidden networks, such as + probe-scanning the SSID. However, these workarounds expose inherent insecurities with hidden SSID networks, and + thus hidden SSID networks should be used with caution. + - In AP mode, the created network does not broadcast its SSID. + - Note that marking the network as hidden may be a privacy issue for you (in infrastructure mode) or client stations + (in AP mode), as the explicit probe-scans are distinctly recognizable on the air. + type: bool + default: false + mac-address-blacklist: + description: + - A list of permanent MAC addresses of Wi-Fi devices to which this connection should never apply. + - Each MAC address should be given in the standard hex-digits-and-colons notation (for example, V(00:11:22:33:44:55)). + type: list + elements: str + mac-address-randomization: + description: + - One of V(0) (never randomize unless the user has set a global default to randomize and the supplicant supports + randomization), V(1) (never randomize the MAC address), or V(2) (always randomize the MAC address). + - This property is deprecated for O(wifi.cloned-mac-address). + type: int + default: 0 + choices: [0, 1, 2] + mac-address: + description: + - If specified, this connection only applies to the Wi-Fi device whose permanent MAC address matches. + - This property does not change the MAC address of the device (for example for MAC spoofing). + type: str + mode: + description: Wi-Fi network mode. If blank, V(infrastructure) is assumed. + type: str + choices: [infrastructure, mesh, adhoc, ap] + default: infrastructure + mtu: + description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into + multiple Ethernet frames. + type: int + default: 0 + powersave: + description: + - One of V(2) (disable Wi-Fi power saving), V(3) (enable Wi-Fi power saving), V(1) (do not touch currently configure + setting) or V(0) (use the globally configured value). + - All other values are reserved. + type: int + default: 0 + choices: [0, 1, 2, 3] + rate: + description: + - If non-zero, directs the device to only use the specified bitrate for communication with the access point. + - Units are in Kb/s, so for example V(5500) = 5.5 Mbit/s. + - This property is highly driver dependent and not all devices support setting a static bitrate. + type: int + default: 0 + tx-power: + description: + - If non-zero, directs the device to use the specified transmit power. + - Units are dBm. + - This property is highly driver dependent and not all devices support setting a static transmit power. + type: int + default: 0 + wake-on-wlan: + description: + - The NMSettingWirelessWakeOnWLan options to enable. Not all devices support all options. + - May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (V(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) + (V(0x4)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (V(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) + (V(0x10)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (V(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) + (V(0x40)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (V(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) + (V(0x100)) or the special values V(0x1) (to use global settings) and V(0x8000) (to disable management of Wake-on-LAN + in NetworkManager). + - Note the option values' sum must be specified in order to combine multiple options. + type: int + default: 1 + version_added: 3.5.0 + ignore_unsupported_suboptions: + description: + - Ignore suboptions which are invalid or unsupported by the version of NetworkManager/nmcli installed on the host. + - Only O(wifi) and O(wifi_sec) options are currently affected. + type: bool + default: false + version_added: 3.6.0 + gsm: + description: + - The configuration of the GSM connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-gsm.html).' + - 'For instance to use apn, pin, username and password: V({apn: provider.apn, pin: 1234, username: apn.username, password: + apn.password}).' + type: dict + version_added: 3.7.0 + suboptions: + apn: + description: + - The GPRS Access Point Name specifying the APN used when establishing a data session with the GSM-based network. + - The APN often determines how the user is billed for their network usage and whether the user has access to the + Internet or just a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile + broadband plan. + - The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9. + type: str + auto-config: + description: When V(true), the settings such as O(gsm.apn), O(gsm.username), or O(gsm.password) default to values + that match the network the modem registers to in the Mobile Broadband Provider database. + type: bool + default: false + device-id: + description: + - The device unique identifier (as given by the V(WWAN) management service) which this connection applies to. + - If given, the connection only applies to the specified device. + type: str + home-only: + description: + - When V(true), only connections to the home network are allowed. + - Connections to roaming networks are not made. + type: bool + default: false + mtu: + description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into + multiple Ethernet frames. + type: int + default: 0 + network-id: + description: + - The Network ID (GSM LAI format, ie MCC-MNC) to force specific network registration. + - If the Network ID is specified, NetworkManager attempts to force the device to register only on the specified + network. + - This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise + possible. + type: str + number: + description: Legacy setting that used to help establishing PPP data sessions for GSM-based modems. + type: str + password: + description: + - The password used to authenticate with the network, if required. + - Many providers do not require a password, or accept any password. + - But if a password is required, it is specified here. + type: str + password-flags: + description: + - NMSettingSecretFlags indicating how to handle the O(gsm.password) property. + - 'Following choices are allowed: V(0) B(NONE): The system is responsible for providing and storing this secret + (default), V(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when + it is required agents are asked to retrieve it V(2) B(NOT_SAVED): This secret should not be saved, but should + be requested from the user each time it is needed V(4) B(NOT_REQUIRED): In situations where it cannot be automatically + determined that the secret is required (some VPNs and PPP providers do not require all secrets) this flag indicates + that the specific secret is not required.' + type: int + choices: [0, 1, 2, 4] + default: 0 + pin: + description: + - If the SIM is locked with a PIN it must be unlocked before any other operations are requested. + - Specify the PIN here to allow operation of the device. + type: str + pin-flags: + description: + - NMSettingSecretFlags indicating how to handle the O(gsm.pin) property. + - See O(gsm.password-flags) for NMSettingSecretFlags choices. + type: int + choices: [0, 1, 2, 4] + default: 0 + sim-id: + description: + - The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to. + - If given, the connection applies to any device also allowed by O(gsm.device-id) which contains a SIM card matching + the given identifier. + type: str + sim-operator-id: + description: + - A MCC/MNC string like V(310260) or V(21601I) identifying the specific mobile network operator which this connection + applies to. + - If given, the connection applies to any device also allowed by O(gsm.device-id) and O(gsm.sim-id) which contains + a SIM card provisioned by the given operator. + type: str + username: + description: + - The username used to authenticate with the network, if required. + - Many providers do not require a username, or accept any username. + - But if a username is required, it is specified here. + macvlan: + description: + - The configuration of the MAC VLAN connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-macvlan.html).' + type: dict + version_added: 6.6.0 + suboptions: + mode: + description: + - The macvlan mode, which specifies the communication mechanism between multiple macvlans on the same lower device. + - 'Following choices are allowed: V(1) B(vepa), V(2) B(bridge), V(3) B(private), V(4) B(passthru) and V(5) B(source).' + type: int + choices: [1, 2, 3, 4, 5] + required: true + parent: + description: + - If given, specifies the parent interface name or parent connection UUID from which this MAC-VLAN interface should + be created. If this property is not specified, the connection must contain an "802-3-ethernet" setting with a + "mac-address" property. + type: str + required: true + promiscuous: + description: + - Whether the interface should be put in promiscuous mode. + type: bool + tap: + description: + - Whether the interface should be a MACVTAP. + type: bool + wireguard: + description: + - The configuration of the Wireguard connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-wireguard.html).' + - 'For instance to configure a listen port: V({listen-port: 12345}).' + type: dict + version_added: 4.3.0 + suboptions: + fwmark: + description: + - The 32-bit fwmark for outgoing packets. + - The use of fwmark is optional and is by default off. Setting it to 0 disables it. + - Note that O(wireguard.ip4-auto-default-route) or O(wireguard.ip6-auto-default-route) enabled, implies to automatically + choose a fwmark. + type: int + ip4-auto-default-route: + description: + - Whether to enable special handling of the IPv4 default route. + - If enabled, the IPv4 default route from O(wireguard.peer-routes) is placed to a dedicated routing-table and two + policy routing rules are added. + - The fwmark number is also used as routing-table for the default-route, and if fwmark is zero, an unused fwmark/table + is chosen automatically. This corresponds to what wg-quick does with Table=auto and what WireGuard calls "Improved + Rule-based Routing". + type: bool + ip6-auto-default-route: + description: + - Like O(wireguard.ip4-auto-default-route), but for the IPv6 default route. + type: bool + listen-port: + description: The WireGuard connection listen-port. If not specified, the port is chosen randomly when the interface + comes up. + type: int + mtu: + description: + - If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple + fragments. + - If zero a default MTU is used. Note that contrary to wg-quick's MTU setting, this does not take into account the + current routes at the time of activation. + type: int + peer-routes: + description: + - Whether to automatically add routes for the AllowedIPs ranges of the peers. + - If V(true) (the default), NetworkManager automatically adds routes in the routing tables according to C(ipv4.route-table) + and C(ipv6.route-table). Usually you want this automatism enabled. + - If V(false), no such routes are added automatically. In this case, the user may want to configure static routes + in C(ipv4.routes) and C(ipv6.routes), respectively. + - Note that if the peer's AllowedIPs is V(0.0.0.0/0) or V(::/0) and the profile's C(ipv4.never-default) or C(ipv6.never-default) + setting is enabled, the peer route for this peer is not added automatically. + type: bool + private-key: + description: The 256 bit private-key in base64 encoding. + type: str + private-key-flags: + description: C(NMSettingSecretFlags) indicating how to handle the O(wireguard.private-key) property. + type: int + choices: [0, 1, 2] + vpn: + description: + - Configuration of a VPN connection (PPTP and L2TP). + - In order to use L2TP you need to be sure that C(network-manager-l2tp) - and C(network-manager-l2tp-gnome) if host + has UI - are installed on the host. + type: dict + version_added: 5.1.0 + suboptions: + permissions: + description: User that has permission to use the connection. + type: str + required: true + service-type: + description: This defines the service type of connection. + type: str + required: true + gateway: + description: The gateway to connection. It can be an IP address (for example V(192.0.2.1)) or a FQDN address (for + example V(vpn.example.com)). + type: str + required: true + password-flags: + description: + - NMSettingSecretFlags indicating how to handle the C(vpn.password) property. + - 'Following choices are allowed: V(0) B(NONE): The system is responsible for providing and storing this secret + (default); V(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when + it is required agents are asked to retrieve it; V(2) B(NOT_SAVED): This secret should not be saved, but should + be requested from the user each time it is needed; V(4) B(NOT_REQUIRED): In situations where it cannot be automatically + determined that the secret is required (some VPNs and PPP providers do not require all secrets) this flag indicates + that the specific secret is not required.' + type: int + choices: [0, 1, 2, 4] + default: 0 + user: + description: Username provided by VPN administrator. + type: str + required: true + ipsec-enabled: + description: + - Enable or disable IPSec tunnel to L2TP host. + - This option is need when O(vpn.service-type) is V(org.freedesktop.NetworkManager.l2tp). + type: bool + ipsec-psk: + description: + - The pre-shared key in base64 encoding. + - > + You can encode using this Ansible Jinja2 expression: V("0s{{ '[YOUR PRE-SHARED KEY]' | ansible.builtin.b64encode }}"). + - This is only used when O(vpn.ipsec-enabled=true). + type: str + sriov: + description: + - Allow to configure SR-IOV settings. + - 'An up-to-date list of supported attributes can be found here: + U(https://networkmanager.pages.freedesktop.org/NetworkManager/NetworkManager/settings-sriov.html).' + type: dict + version_added: 10.1.0 + suboptions: + autoprobe-drivers: + description: + - Whether to autoprobe virtual functions by a compatible driver. + type: int + eswitch-encap-mode: + description: + - Select the eswitch encapsulation support. + type: int + eswitch-inline-mode: + description: + - Select the eswitch inline-mode of the device. + type: int + eswitch-mode: + description: + - Select the eswitch mode of the device. + type: int + total-vfs: + description: Number of virtual functions to create. Consult your NIC documentation for the maximum number of VFs supported. + type: int + vfs: + description: + - 'Virtual function descriptors in the form: V(INDEX [ATTR=VALUE[ ATTR=VALUE]...]).' + - Multiple VFs can be specified using a comma as separator, for example V(2 mac=00:11:22:33:44:55 spoof-check=true,3 + vlans=100). + type: str +""" + +EXAMPLES = r""" +# These examples are using the following inventory: +# +# ## Directory layout: +# +# |_/inventory/cloud-hosts +# | /group_vars/openstack-stage.yml +# | /host_vars/controller-01.openstack.host.com +# | /host_vars/controller-02.openstack.host.com +# |_/playbook/library/nmcli.py +# | /playbook-add.yml +# | /playbook-del.yml +# ``` +# +# ## inventory examples +# ### groups_vars +# ```yml +# --- +# #devops_os_define_network +# storage_gw: "192.0.2.254" +# external_gw: "198.51.100.254" +# tenant_gw: "203.0.113.254" +# +# #Team vars +# nmcli_team: +# - conn_name: tenant +# ip4: '{{ tenant_ip }}' +# gw4: '{{ tenant_gw }}' +# - conn_name: external +# ip4: '{{ external_ip }}' +# gw4: '{{ external_gw }}' +# - conn_name: storage +# ip4: '{{ storage_ip }}' +# gw4: '{{ storage_gw }}' +# nmcli_team_slave: +# - conn_name: em1 +# ifname: em1 +# master: tenant +# - conn_name: em2 +# ifname: em2 +# master: tenant +# - conn_name: p2p1 +# ifname: p2p1 +# master: storage +# - conn_name: p2p2 +# ifname: p2p2 +# master: external +# +# #bond vars +# nmcli_bond: +# - conn_name: tenant +# ip4: '{{ tenant_ip }}' +# gw4: '' +# mode: balance-rr +# - conn_name: external +# ip4: '{{ external_ip }}' +# gw4: '' +# mode: balance-rr +# - conn_name: storage +# ip4: '{{ storage_ip }}' +# gw4: '{{ storage_gw }}' +# mode: balance-rr +# nmcli_bond_slave: +# - conn_name: em1 +# ifname: em1 +# master: tenant +# - conn_name: em2 +# ifname: em2 +# master: tenant +# - conn_name: p2p1 +# ifname: p2p1 +# master: storage +# - conn_name: p2p2 +# ifname: p2p2 +# master: external +# +# #ethernet vars +# nmcli_ethernet: +# - conn_name: em1 +# ifname: em1 +# ip4: +# - '{{ tenant_ip }}' +# - '{{ second_tenant_ip }}' +# gw4: '{{ tenant_gw }}' +# - conn_name: em2 +# ifname: em2 +# ip4: '{{ tenant_ip1 }}' +# gw4: '{{ tenant_gw }}' +# - conn_name: p2p1 +# ifname: p2p1 +# ip4: '{{ storage_ip }}' +# gw4: '{{ storage_gw }}' +# - conn_name: p2p2 +# ifname: p2p2 +# ip4: '{{ external_ip }}' +# gw4: '{{ external_gw }}' +# ``` +# +# ### host_vars +# ```yml +# --- +# storage_ip: "192.0.2.91/23" +# external_ip: "198.51.100.23/21" +# tenant_ip: "203.0.113.77/23" +# second_tenant_ip: "204.0.113.77/23" +# ``` + + +## playbook-add.yml example + +- hosts: openstack-stage + remote_user: root + tasks: + + - name: Install needed network manager libs + ansible.builtin.package: + name: + - NetworkManager-libnm + - nm-connection-editor + - libsemanage-python + - policycoreutils-python + state: present + +##### Working with all cloud nodes - Teaming + - name: Try nmcli add team - conn_name only & ip4 gw4 + community.general.nmcli: + type: team + conn_name: '{{ item.conn_name }}' + ip4: '{{ item.ip4 }}' + gw4: '{{ item.gw4 }}' + state: present + with_items: + - '{{ nmcli_team }}' + + - name: Try nmcli add teams-slave + community.general.nmcli: + type: team-slave + conn_name: '{{ item.conn_name }}' + ifname: '{{ item.ifname }}' + master: '{{ item.master }}' + state: present + with_items: + - '{{ nmcli_team_slave }}' + +##### Working with all cloud nodes - Bonding + - name: Try nmcli add bond - conn_name only & ip4 gw4 mode + community.general.nmcli: + type: bond + conn_name: '{{ item.conn_name }}' + ip4: '{{ item.ip4 }}' + gw4: '{{ item.gw4 }}' + mode: '{{ item.mode }}' + state: present + with_items: + - '{{ nmcli_bond }}' + + - name: Try nmcli add bond-slave + community.general.nmcli: + type: bond-slave + conn_name: '{{ item.conn_name }}' + ifname: '{{ item.ifname }}' + master: '{{ item.master }}' + state: present + with_items: + - '{{ nmcli_bond_slave }}' + +##### Working with all cloud nodes - Ethernet + - name: Try nmcli add Ethernet - conn_name only & ip4 gw4 + community.general.nmcli: + type: ethernet + conn_name: '{{ item.conn_name }}' + ip4: '{{ item.ip4 }}' + gw4: '{{ item.gw4 }}' + state: present + with_items: + - '{{ nmcli_ethernet }}' + +## playbook-del.yml example +- hosts: openstack-stage + remote_user: root + tasks: + + - name: Try nmcli del team - multiple + community.general.nmcli: + conn_name: '{{ item.conn_name }}' + state: absent + with_items: + - conn_name: em1 + - conn_name: em2 + - conn_name: p1p1 + - conn_name: p1p2 + - conn_name: p2p1 + - conn_name: p2p2 + - conn_name: tenant + - conn_name: storage + - conn_name: external + - conn_name: team-em1 + - conn_name: team-em2 + - conn_name: team-p1p1 + - conn_name: team-p1p2 + - conn_name: team-p2p1 + - conn_name: team-p2p2 + + - name: Add an Ethernet connection with static IP configuration + community.general.nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + state: present + + - name: Add an Team connection with static IP configuration + community.general.nmcli: + conn_name: my-team1 + ifname: my-team1 + type: team + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + state: present + autoconnect: true + + - name: Optionally, at the same time specify IPv6 addresses for the device + community.general.nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + ip6: 2001:db8::cafe + gw6: 2001:db8::1 + state: present + + - name: Add two IPv4 DNS server addresses + community.general.nmcli: + conn_name: my-eth1 + type: ethernet + dns4: + - 192.0.2.53 + - 198.51.100.53 + state: present + + - name: Make a profile usable for all compatible Ethernet interfaces + community.general.nmcli: + ctype: ethernet + name: my-eth1 + ifname: '*' + state: present + + - name: Change the property of a setting e.g. MTU + community.general.nmcli: + conn_name: my-eth1 + mtu: 9000 + type: ethernet + state: present + + - name: Change the property of a setting e.g. MTU and reload connection + community.general.nmcli: + conn_name: my-eth1 + mtu: 1500 + type: ethernet + state: present + conn_reload: true + + - name: Disable connection + community.general.nmcli: + conn_name: my-eth1 + state: down + + - name: Reload and enable connection + community.general.nmcli: + conn_name: my-eth1 + state: up + conn_reload: true + + - name: Add second ip4 address + community.general.nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip4: + - 192.0.2.100/24 + - 192.0.3.100/24 + state: present + + - name: Add second ip6 address + community.general.nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip6: + - 2001:db8::cafe + - 2002:db8::cafe + state: present + + - name: Add VxLan + community.general.nmcli: + type: vxlan + conn_name: vxlan_test1 + vxlan_id: 16 + vxlan_local: 192.168.1.2 + vxlan_remote: 192.168.1.5 + + - name: Add gre + community.general.nmcli: + type: gre + conn_name: gre_test1 + ip_tunnel_dev: eth0 + ip_tunnel_local: 192.168.1.2 + ip_tunnel_remote: 192.168.1.5 + + - name: Add ipip + community.general.nmcli: + type: ipip + conn_name: ipip_test1 + ip_tunnel_dev: eth0 + ip_tunnel_local: 192.168.1.2 + ip_tunnel_remote: 192.168.1.5 + + - name: Add sit + community.general.nmcli: + type: sit + conn_name: sit_test1 + ip_tunnel_dev: eth0 + ip_tunnel_local: 192.168.1.2 + ip_tunnel_remote: 192.168.1.5 + + - name: Add zone + community.general.nmcli: + type: ethernet + conn_name: my-eth1 + zone: external + state: present + +# nmcli exits with status 0 if it succeeds and exits with a status greater +# than zero when there is a failure. The following list of status codes may be +# returned: +# +# - 0 Success - indicates the operation succeeded +# - 1 Unknown or unspecified error +# - 2 Invalid user input, wrong nmcli invocation +# - 3 Timeout expired (see --wait option) +# - 4 Connection activation failed +# - 5 Connection deactivation failed +# - 6 Disconnecting device failed +# - 7 Connection deletion failed +# - 8 NetworkManager is not running +# - 9 nmcli and NetworkManager versions mismatch +# - 10 Connection, device, or access point does not exist. + +- name: Create the wifi connection + community.general.nmcli: + type: wifi + conn_name: Brittany + ifname: wlp4s0 + ssid: Brittany + wifi_sec: + key-mgmt: wpa-psk + psk: my_password + autoconnect: true + state: present + +- name: Create a hidden AP mode wifi connection + community.general.nmcli: + type: wifi + conn_name: ChocoMaster + ifname: wlo1 + ssid: ChocoMaster + wifi: + hidden: true + mode: ap + autoconnect: true + state: present + +- name: Create a gsm connection + community.general.nmcli: + type: gsm + conn_name: my-gsm-provider + ifname: cdc-wdm0 + gsm: + apn: my.provider.apn + username: my-provider-username + password: my-provider-password + pin: my-sim-pin + autoconnect: true + state: present + +- name: Create a macvlan connection + community.general.nmcli: + type: macvlan + conn_name: my-macvlan-connection + ifname: mymacvlan0 + macvlan: + mode: 2 + parent: eth1 + autoconnect: true + state: present + +- name: Create a wireguard connection + community.general.nmcli: + type: wireguard + conn_name: my-wg-provider + ifname: mywg0 + wireguard: + listen-port: 51820 + private-key: my-private-key + autoconnect: true + state: present + +- name: >- + Create a VPN L2TP connection for ansible_user to connect on vpn.example.com + authenticating with user 'brittany' and pre-shared key as 'Brittany123' + community.general.nmcli: + type: vpn + conn_name: my-vpn-connection + vpn: + permissions: "{{ ansible_user }}" + service-type: org.freedesktop.NetworkManager.l2tp + gateway: vpn.example.com + password-flags: 2 + user: brittany + ipsec-enabled: true + ipsec-psk: "0s{{ 'Brittany123' | ansible.builtin.b64encode }}" + autoconnect: false + state: present + +## Creating bond attached to bridge example +- name: Create bond attached to bridge + community.general.nmcli: + type: bond + conn_name: bond0 + slave_type: bridge + master: br0 + state: present + +- name: Create master bridge + community.general.nmcli: + type: bridge + conn_name: br0 + method4: disabled + method6: disabled + state: present + +## Creating vlan connection attached to bridge +- name: Create master bridge + community.general.nmcli: + type: bridge + conn_name: br0 + state: present + +- name: Create VLAN 5 + community.general.nmcli: + type: vlan + conn_name: eth0.5 + slave_type: bridge + master: br0 + vlandev: eth0 + vlanid: 5 + state: present + +## Creating VRF and adding VLAN interface to it +- name: Create VRF + community.general.nmcli: + type: vrf + ifname: vrf10 + table: 10 + state: present + conn_name: vrf10 + method4: disabled + method6: disabled + +- name: Create VLAN interface inside VRF + community.general.nmcli: + conn_name: "eth0.124" + type: vlan + vlanid: "124" + vlandev: "eth0" + master: "vrf10" + slave_type: vrf + state: "present" + ip4: '192.168.124.50' + gw4: '192.168.124.1' + +## Defining ip rules while setting a static IP +## table 'production' is set with id 200 in this example. +- name: Set Static ips for interface with ip rules and routes + community.general.nmcli: + type: ethernet + conn_name: 'eth0' + ip4: '192.168.1.50' + gw4: '192.168.1.1' + state: present + routes4_extended: + - ip: "0.0.0.0/0" + next_hop: "192.168.1.1" + table: "production" + routing_rules4: + - "priority 0 from 192.168.1.50 table 200" + +## Creating an OVS bridge and attaching a port +- name: Create OVS Bridge + community.general.nmcli: + conn_name: ovs-br-conn + ifname: ovs-br + type: ovs-bridge + state: present + +- name: Create OVS Port for OVS Bridge Interface + community.general.nmcli: + conn_name: ovs-br-interface-port-conn + ifname: ovs-br-interface-port + master: ovs-br + type: ovs-port + state: present + +## Adding an ethernet interface to an OVS bridge port +- name: Add Ethernet Interface to OVS Port + community.general.nmcli: + conn_name: eno1 + ifname: eno1 + master: ovs-br-interface-port + slave_type: ovs-port + type: ethernet + state: present +""" + +RETURN = r"""# +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text +import re + + +class NmcliModuleError(Exception): + pass + + +class Nmcli(object): + """ + This is the generic nmcli manipulation class that is subclassed based on platform. + A subclass may wish to override the following action methods:- + - create_connection() + - delete_connection() + - edit_connection() + - modify_connection() + - show_connection() + - up_connection() + - down_connection() + All subclasses MUST define platform and distribution (which may be None). + """ + + platform = 'Generic' + distribution = None + + SECRET_OPTIONS = ( + '802-11-wireless-security.leap-password', + '802-11-wireless-security.psk', + '802-11-wireless-security.wep-key0', + '802-11-wireless-security.wep-key1', + '802-11-wireless-security.wep-key2', + '802-11-wireless-security.wep-key3' + ) + + def __init__(self, module): + self.module = module + self.state = module.params['state'] + self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions'] + self.autoconnect = module.params['autoconnect'] + self.autoconnect_priority = module.params['autoconnect_priority'] + self.autoconnect_retries = module.params['autoconnect_retries'] + self.conn_name = module.params['conn_name'] + self.conn_reload = module.params['conn_reload'] + self.slave_type = module.params['slave_type'] + self.master = module.params['master'] + self.ifname = module.params['ifname'] + self.type = module.params['type'] + self.ip4 = module.params['ip4'] + self.gw4 = module.params['gw4'] + self.gw4_ignore_auto = module.params['gw4_ignore_auto'] + self.routes4 = module.params['routes4'] + self.routes4_extended = module.params['routes4_extended'] + self.route_metric4 = module.params['route_metric4'] + self.routing_rules4 = module.params['routing_rules4'] + self.never_default4 = module.params['never_default4'] + self.dns4 = module.params['dns4'] + self.dns4_search = module.params['dns4_search'] + self.dns4_options = module.params['dns4_options'] + self.dns4_ignore_auto = module.params['dns4_ignore_auto'] + self.method4 = module.params['method4'] + self.may_fail4 = module.params['may_fail4'] + self.ip6 = module.params['ip6'] + self.gw6 = module.params['gw6'] + self.gw6_ignore_auto = module.params['gw6_ignore_auto'] + self.routes6 = module.params['routes6'] + self.routes6_extended = module.params['routes6_extended'] + self.route_metric6 = module.params['route_metric6'] + self.dns6 = module.params['dns6'] + self.dns6_search = module.params['dns6_search'] + self.dns6_options = module.params['dns6_options'] + self.dns6_ignore_auto = module.params['dns6_ignore_auto'] + self.method6 = module.params['method6'] + self.ip_privacy6 = module.params['ip_privacy6'] + self.addr_gen_mode6 = module.params['addr_gen_mode6'] + self.mtu = module.params['mtu'] + self.stp = module.params['stp'] + self.priority = module.params['priority'] + self.mode = module.params['mode'] + self.miimon = module.params['miimon'] + self.primary = module.params['primary'] + self.downdelay = module.params['downdelay'] + self.updelay = module.params['updelay'] + self.xmit_hash_policy = module.params['xmit_hash_policy'] + self.fail_over_mac = module.params['fail_over_mac'] + self.arp_interval = module.params['arp_interval'] + self.arp_ip_target = module.params['arp_ip_target'] + self.slavepriority = module.params['slavepriority'] + self.forwarddelay = module.params['forwarddelay'] + self.hellotime = module.params['hellotime'] + self.maxage = module.params['maxage'] + self.ageingtime = module.params['ageingtime'] + self.hairpin = module.params['hairpin'] + self.path_cost = module.params['path_cost'] + self.mac = module.params['mac'] + self.runner = module.params['runner'] + self.runner_hwaddr_policy = module.params['runner_hwaddr_policy'] + self.runner_fast_rate = module.params['runner_fast_rate'] + self.vlanid = module.params['vlanid'] + self.vlandev = module.params['vlandev'] + self.flags = module.params['flags'] + self.ingress = module.params['ingress'] + self.egress = module.params['egress'] + self.vxlan_id = module.params['vxlan_id'] + self.vxlan_local = module.params['vxlan_local'] + self.vxlan_remote = module.params['vxlan_remote'] + self.ip_tunnel_dev = module.params['ip_tunnel_dev'] + self.ip_tunnel_local = module.params['ip_tunnel_local'] + self.ip_tunnel_remote = module.params['ip_tunnel_remote'] + self.ip_tunnel_input_key = module.params['ip_tunnel_input_key'] + self.ip_tunnel_output_key = module.params['ip_tunnel_output_key'] + self.nmcli_bin = self.module.get_bin_path('nmcli', True) + self.dhcp_client_id = module.params['dhcp_client_id'] + self.zone = module.params['zone'] + self.ssid = module.params['ssid'] + self.wifi = module.params['wifi'] + self.wifi_sec = module.params['wifi_sec'] + self.gsm = module.params['gsm'] + self.macvlan = module.params['macvlan'] + self.wireguard = module.params['wireguard'] + self.vpn = module.params['vpn'] + self.transport_mode = module.params['transport_mode'] + self.infiniband_mac = module.params['infiniband_mac'] + self.sriov = module.params['sriov'] + + if self.method4: + self.ipv4_method = self.method4 + elif self.type in ('dummy', 'macvlan', 'wireguard') and not self.ip4: + self.ipv4_method = 'disabled' + elif self.ip4: + self.ipv4_method = 'manual' + else: + self.ipv4_method = None + + if self.method6: + self.ipv6_method = self.method6 + elif self.type in ('dummy', 'macvlan', 'wireguard') and not self.ip6: + self.ipv6_method = 'disabled' + elif self.ip6: + self.ipv6_method = 'manual' + else: + self.ipv6_method = None + + if self.type == "vrf": + self.table = module.params['table'] + + self.edit_commands = [] + + self.extra_options_validation() + + def extra_options_validation(self): + """ Additional validation of options set passed to module that cannot be implemented in module's argspecs. """ + if self.type not in ("bridge-slave", "team-slave", "bond-slave"): + if self.master is None and self.slave_type is not None: + self.module.fail_json(msg="'master' option is required when 'slave_type' is specified.") + + def execute_command(self, cmd, use_unsafe_shell=False, data=None): + cmd = [to_text(item) for item in cmd] + return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) + + def execute_edit_commands(self, commands, arguments): + arguments = arguments or [] + cmd = [self.nmcli_bin, 'con', 'edit'] + arguments + data = "\n".join(commands) + return self.execute_command(cmd, data=data) + + def connection_options(self, detect_change=False): + # Options common to multiple connection types. + options = { + 'connection.autoconnect': self.autoconnect, + 'connection.autoconnect-priority': self.autoconnect_priority, + 'connection.autoconnect-retries': self.autoconnect_retries, + 'connection.zone': self.zone, + } + + # IP address options. + # The ovs-interface type can be both ip_conn_type and have a master + # An interface that has a master but is of slave type vrf can have an IP address + if (self.ip_conn_type and (not self.master or self.slave_type == "vrf")) or self.type == "ovs-interface": + options.update({ + 'ipv4.addresses': self.enforce_ipv4_cidr_notation(self.ip4), + 'ipv4.dhcp-client-id': self.dhcp_client_id, + 'ipv4.dns': self.dns4, + 'ipv4.dns-search': self.dns4_search, + 'ipv4.dns-options': self.dns4_options, + 'ipv4.ignore-auto-dns': self.dns4_ignore_auto, + 'ipv4.gateway': self.gw4, + 'ipv4.ignore-auto-routes': self.gw4_ignore_auto, + 'ipv4.routes': self.enforce_routes_format(self.routes4, self.routes4_extended), + 'ipv4.route-metric': self.route_metric4, + 'ipv4.routing-rules': self.routing_rules4, + 'ipv4.never-default': self.never_default4, + 'ipv4.method': self.ipv4_method, + 'ipv4.may-fail': self.may_fail4, + 'ipv6.addresses': self.enforce_ipv6_cidr_notation(self.ip6), + 'ipv6.dns': self.dns6, + 'ipv6.dns-search': self.dns6_search, + 'ipv6.dns-options': self.dns6_options, + 'ipv6.ignore-auto-dns': self.dns6_ignore_auto, + 'ipv6.gateway': self.gw6, + 'ipv6.ignore-auto-routes': self.gw6_ignore_auto, + 'ipv6.routes': self.enforce_routes_format(self.routes6, self.routes6_extended), + 'ipv6.route-metric': self.route_metric6, + 'ipv6.method': self.ipv6_method, + 'ipv6.ip6-privacy': self.ip_privacy6, + 'ipv6.addr-gen-mode': self.addr_gen_mode6 + }) + # when 'method' is disabled the 'may_fail' no make sense but accepted by nmcli with keeping 'yes' + # force ignoring to save idempotency + if self.ipv4_method and self.ipv4_method != 'disabled': + options.update({'ipv4.may-fail': self.may_fail4}) + + # Layer 2 options. + if self.mac: + options.update({self.mac_setting: self.mac}) + + if self.mtu_conn_type: + options.update({self.mtu_setting: self.mtu}) + + # Connections that can have a master. + if self.slave_conn_type: + options.update({ + 'connection.master': self.master, + 'connection.slave-type': self.slave_type, + }) + + # Options specific to a connection type. + if self.type == 'bond': + options.update({ + 'arp-interval': self.arp_interval, + 'arp-ip-target': self.arp_ip_target, + 'downdelay': self.downdelay, + 'miimon': self.miimon, + 'mode': self.mode, + 'primary': self.primary, + 'updelay': self.updelay, + 'xmit_hash_policy': self.xmit_hash_policy, + 'fail_over_mac': self.fail_over_mac, + }) + elif self.type == 'bond-slave': + if self.slave_type and self.slave_type != 'bond': + self.module.fail_json(msg="Connection type '%s' cannot be combined with '%s' slave-type. " + "Allowed slave-type for '%s' is 'bond'." + % (self.type, self.slave_type, self.type) + ) + if not self.slave_type: + self.module.warn("Connection 'slave-type' property automatically set to 'bond' " + "because of using 'bond-slave' connection type.") + options.update({ + 'connection.slave-type': 'bond', + }) + elif self.type == 'bridge': + options.update({ + 'bridge.ageing-time': self.ageingtime, + 'bridge.forward-delay': self.forwarddelay, + 'bridge.hello-time': self.hellotime, + 'bridge.max-age': self.maxage, + 'bridge.priority': self.priority, + 'bridge.stp': self.stp, + }) + # priority make sense when stp enabled, otherwise nmcli keeps bridge-priority to 32768 regrdless of input. + # force ignoring to save idempotency + if self.stp: + options.update({'bridge.priority': self.priority}) + elif self.type == 'team': + options.update({ + 'team.runner': self.runner, + 'team.runner-hwaddr-policy': self.runner_hwaddr_policy, + }) + if self.runner_fast_rate is not None: + options.update({ + 'team.runner-fast-rate': self.runner_fast_rate, + }) + elif self.type == 'bridge-slave': + if self.slave_type and self.slave_type != 'bridge': + self.module.fail_json(msg="Connection type '%s' cannot be combined with '%s' slave-type. " + "Allowed slave-type for '%s' is 'bridge'." + % (self.type, self.slave_type, self.type) + ) + if not self.slave_type: + self.module.warn("Connection 'slave-type' property automatically set to 'bridge' " + "because of using 'bridge-slave' connection type.") + options.update({'connection.slave-type': 'bridge'}) + self.module.warn( + "Connection type as 'bridge-slave' implies 'ethernet' connection with 'bridge' slave-type. " + "Consider using slave_type='bridge' with necessary type." + ) + options.update({ + 'bridge-port.path-cost': self.path_cost, + 'bridge-port.hairpin-mode': self.hairpin, + 'bridge-port.priority': self.slavepriority, + }) + elif self.type == 'team-slave': + if self.slave_type and self.slave_type != 'team': + self.module.fail_json(msg="Connection type '%s' cannot be combined with '%s' slave-type. " + "Allowed slave-type for '%s' is 'team'." + % (self.type, self.slave_type, self.type) + ) + if not self.slave_type: + self.module.warn("Connection 'slave-type' property automatically set to 'team' " + "because of using 'team-slave' connection type.") + options.update({ + 'connection.slave-type': 'team', + }) + elif self.tunnel_conn_type: + options.update({ + 'ip-tunnel.local': self.ip_tunnel_local, + 'ip-tunnel.mode': self.type, + 'ip-tunnel.parent': self.ip_tunnel_dev, + 'ip-tunnel.remote': self.ip_tunnel_remote, + }) + if self.type == 'gre': + options.update({ + 'ip-tunnel.input-key': self.ip_tunnel_input_key, + 'ip-tunnel.output-key': self.ip_tunnel_output_key + }) + elif self.type == 'vlan': + options.update({ + 'vlan.id': self.vlanid, + 'vlan.parent': self.vlandev, + 'vlan.flags': self.flags, + 'vlan.ingress': self.ingress, + 'vlan.egress': self.egress, + }) + elif self.type == 'vxlan': + options.update({ + 'vxlan.id': self.vxlan_id, + 'vxlan.local': self.vxlan_local, + 'vxlan.remote': self.vxlan_remote, + }) + elif self.type == 'wifi': + options.update({ + '802-11-wireless.ssid': self.ssid, + 'connection.slave-type': ('bond' if self.slave_type is None else self.slave_type) if self.master else None, + }) + if self.wifi: + for name, value in self.wifi.items(): + options.update({ + '802-11-wireless.%s' % name: value + }) + if self.wifi_sec: + for name, value in self.wifi_sec.items(): + options.update({ + '802-11-wireless-security.%s' % name: value + }) + elif self.type == 'gsm': + if self.gsm: + for name, value in self.gsm.items(): + options.update({ + 'gsm.%s' % name: value, + }) + elif self.type == 'macvlan': + if self.macvlan: + for name, value in self.macvlan.items(): + options.update({ + 'macvlan.%s' % name: value, + }) + elif self.state == 'present': + raise NmcliModuleError('type is macvlan but all of the following are missing: macvlan') + elif self.type == 'wireguard': + if self.wireguard: + for name, value in self.wireguard.items(): + options.update({ + 'wireguard.%s' % name: value, + }) + elif self.type == 'vpn': + if self.vpn: + vpn_data_values = '' + for name, value in self.vpn.items(): + if name == 'service-type': + options.update({ + 'vpn.service-type': value, + }) + elif name == 'permissions': + options.update({ + 'connection.permissions': value, + }) + else: + if vpn_data_values != '': + vpn_data_values += ', ' + + if isinstance(value, bool): + value = self.bool_to_string(value) + + vpn_data_values += '%s=%s' % (name, value) + options.update({ + 'vpn.data': vpn_data_values, + }) + elif self.type == 'infiniband': + options.update({ + 'infiniband.transport-mode': self.transport_mode, + }) + if self.infiniband_mac: + options['infiniband.mac-address'] = self.infiniband_mac + elif self.type == 'vrf': + options.update({ + 'table': self.table, + }) + + if self.type == 'ethernet': + if self.sriov: + for name, value in self.sriov.items(): + options.update({ + 'sriov.%s' % name: value, + }) + + # Convert settings values based on the situation. + for setting, value in options.items(): + setting_type = self.settings_type(setting) + convert_func = None + if setting_type is bool: + # Convert all bool options to yes/no. + convert_func = self.bool_to_string + if detect_change: + if setting in ('vlan.id', 'vxlan.id'): + # Convert VLAN/VXLAN IDs to text when detecting changes. + convert_func = to_text + elif setting == self.mtu_setting: + # MTU is 'auto' by default when detecting changes. + convert_func = self.mtu_to_string + elif setting == 'ipv6.ip6-privacy': + convert_func = self.ip6_privacy_to_num + elif setting_type is list: + # Convert lists to strings for nmcli create/modify commands. + convert_func = self.list_to_string + + if callable(convert_func): + options[setting] = convert_func(value) + + return options + + @property + def ip_conn_type(self): + return self.type in ( + 'bond', + 'bridge', + 'dummy', + 'ethernet', + '802-3-ethernet', + 'generic', + 'gre', + 'infiniband', + 'ipip', + 'sit', + 'team', + 'vlan', + 'wifi', + '802-11-wireless', + 'gsm', + 'macvlan', + 'wireguard', + 'vpn', + 'loopback', + 'ovs-interface', + 'vrf' + ) + + @property + def mac_setting(self): + if self.type == 'bridge': + return 'bridge.mac-address' + else: + return '802-3-ethernet.cloned-mac-address' + + @property + def mtu_conn_type(self): + return self.type in ( + 'bond', + 'bond-slave', + 'dummy', + 'ethernet', + 'infiniband', + 'team-slave', + 'vlan', + ) + + @property + def mtu_setting(self): + if self.type == 'infiniband': + return 'infiniband.mtu' + else: + return '802-3-ethernet.mtu' + + @staticmethod + def mtu_to_string(mtu): + if not mtu: + return 'auto' + else: + return to_text(mtu) + + @staticmethod + def ip6_privacy_to_num(privacy): + ip6_privacy_values = { + 'disabled': '0', + 'prefer-public-addr': '1 (enabled, prefer public IP)', + 'prefer-temp-addr': '2 (enabled, prefer temporary IP)', + 'unknown': '-1', + } + + if privacy is None: + return None + + if privacy not in ip6_privacy_values: + raise AssertionError('{privacy} is invalid ip_privacy6 option'.format(privacy=privacy)) + + return ip6_privacy_values[privacy] + + @property + def slave_conn_type(self): + return self.type in ( + 'ethernet', + 'bridge', + 'bond', + 'vlan', + 'team', + 'wifi', + 'bond-slave', + 'bridge-slave', + 'team-slave', + 'wifi', + 'infiniband', + 'ovs-port', + 'ovs-interface', + ) + + @property + def tunnel_conn_type(self): + return self.type in ( + 'gre', + 'ipip', + 'sit', + ) + + @staticmethod + def enforce_ipv4_cidr_notation(ip4_addresses): + if ip4_addresses is None: + return None + return [address if '/' in address else address + '/32' for address in ip4_addresses] + + @staticmethod + def enforce_ipv6_cidr_notation(ip6_addresses): + if ip6_addresses is None: + return None + return [address if '/' in address else address + '/128' for address in ip6_addresses] + + def enforce_routes_format(self, routes, routes_extended): + if routes is not None: + return routes + elif routes_extended is not None: + return [self.route_to_string(route) for route in routes_extended] + else: + return None + + @staticmethod + def route_to_string(route): + result_str = '' + result_str += route['ip'] + if route.get('next_hop') is not None: + result_str += ' ' + route['next_hop'] + if route.get('metric') is not None: + result_str += ' ' + str(route['metric']) + + for attribute, value in sorted(route.items()): + if attribute not in ('ip', 'next_hop', 'metric') and value is not None: + result_str += ' {0}={1}'.format(attribute, str(value).lower()) + + return result_str + + @staticmethod + def bool_to_string(boolean): + if boolean: + return "yes" + else: + return "no" + + @staticmethod + def list_to_string(lst): + if lst is None: + return None + else: + return ",".join(lst) + + @staticmethod + def settings_type(setting): + if setting in {'bridge.stp', + 'bridge-port.hairpin-mode', + 'connection.autoconnect', + 'ipv4.never-default', + 'ipv4.ignore-auto-dns', + 'ipv4.ignore-auto-routes', + 'ipv4.may-fail', + 'ipv6.ignore-auto-dns', + 'ipv6.ignore-auto-routes', + '802-11-wireless.hidden', + 'team.runner-fast-rate'}: + return bool + elif setting in {'ipv4.addresses', + 'ipv6.addresses', + 'ipv4.dns', + 'ipv4.dns-search', + 'ipv4.dns-options', + 'ipv4.routes', + 'ipv4.routing-rules', + 'ipv6.dns', + 'ipv6.dns-search', + 'ipv6.dns-options', + 'ipv6.routes', + '802-11-wireless-security.group', + '802-11-wireless-security.leap-password-flags', + '802-11-wireless-security.pairwise', + '802-11-wireless-security.proto', + '802-11-wireless-security.psk-flags', + '802-11-wireless-security.wep-key-flags', + '802-11-wireless.mac-address-blacklist'}: + return list + elif setting in {'connection.autoconnect-priority', + 'connection.autoconnect-retries'}: + return int + return str + + def get_route_params(self, raw_values): + routes_params = [] + for raw_value in raw_values: + route_params = {} + for parameter, value in re.findall(r'([\w-]*)\s?=\s?([^\s,}]*)', raw_value): + if parameter == 'nh': + route_params['next_hop'] = value + elif parameter == 'mt': + route_params['metric'] = value + else: + route_params[parameter] = value + routes_params.append(route_params) + return [self.route_to_string(route_params) for route_params in routes_params] + + def list_connection_info(self): + cmd = [self.nmcli_bin, '--fields', 'name', '--terse', 'con', 'show'] + (rc, out, err) = self.execute_command(cmd) + if rc != 0: + raise NmcliModuleError(err) + return out.splitlines() + + def connection_exists(self): + return self.conn_name in self.list_connection_info() + + def down_connection(self): + cmd = [self.nmcli_bin, 'con', 'down', self.conn_name] + return self.execute_command(cmd) + + def up_connection(self): + cmd = [self.nmcli_bin, 'con', 'up', self.conn_name] + return self.execute_command(cmd) + + def reload_connection(self): + cmd = [self.nmcli_bin, 'con', 'reload'] + return self.execute_command(cmd) + + def connection_update(self, nmcli_command): + if nmcli_command == 'create': + cmd = [self.nmcli_bin, 'con', 'add', 'type'] + if self.tunnel_conn_type: + cmd.append('ip-tunnel') + else: + cmd.append(self.type) + cmd.append('con-name') + elif nmcli_command == 'modify': + cmd = [self.nmcli_bin, 'con', 'modify'] + else: + self.module.fail_json(msg="Invalid nmcli command.") + cmd.append(self.conn_name) + + # Use connection name as default for interface name on creation. + if nmcli_command == 'create' and self.ifname is None: + ifname = self.conn_name + else: + ifname = self.ifname + + options = { + 'connection.interface-name': ifname, + } + + # VPN doesn't need an interface but if sended it must be a valid interface. + if self.type == 'vpn' and self.ifname is None: + del options['connection.interface-name'] + + options.update(self.connection_options()) + + # Constructing the command. + for key, value in options.items(): + if value is not None: + if key in self.SECRET_OPTIONS: + self.edit_commands += ['set %s %s' % (key, value)] + continue + if key == 'xmit_hash_policy': + cmd.extend(['+bond.options', 'xmit_hash_policy=%s' % value]) + continue + if key == 'fail_over_mac': + cmd.extend(['+bond.options', 'fail_over_mac=%s' % value]) + continue + cmd.extend([key, value]) + + return self.execute_command(cmd) + + def create_connection(self): + status = self.connection_update('create') + if status[0] == 0 and self.edit_commands: + status = self.edit_connection() + if self.create_connection_up: + status = self.up_connection() + return status + + @property + def create_connection_up(self): + if self.type in ('bond', 'dummy', 'ethernet', 'infiniband', 'wifi'): + if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): + return True + elif self.type == 'team': + if (self.dns4 is not None) or (self.dns6 is not None): + return True + return False + + def remove_connection(self): + # self.down_connection() + cmd = [self.nmcli_bin, 'con', 'del', self.conn_name] + return self.execute_command(cmd) + + def modify_connection(self): + status = self.connection_update('modify') + if status[0] == 0 and self.edit_commands: + status = self.edit_connection() + return status + + def edit_connection(self): + commands = self.edit_commands + ['save', 'quit'] + return self.execute_edit_commands(commands, arguments=[self.conn_name]) + + def show_connection(self): + cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name] + + (rc, out, err) = self.execute_command(cmd) + + if rc != 0: + raise NmcliModuleError(err) + + p_enum_value = re.compile(r'^([-]?\d+) \((\w+)\)$') + + conn_info = dict() + for line in out.splitlines(): + pair = line.split(':', 1) + key = pair[0].strip() + key_type = self.settings_type(key) + if key and len(pair) > 1: + raw_value = pair[1].lstrip() + if raw_value == '--': + if key_type == list: + conn_info[key] = [] + else: + conn_info[key] = None + elif key == 'bond.options': + # Aliases such as 'miimon', 'downdelay' are equivalent to the +bond.options 'option=value' syntax. + opts = raw_value.split(',') + for opt in opts: + alias_pair = opt.split('=', 1) + if len(alias_pair) > 1: + alias_key = alias_pair[0] + alias_value = alias_pair[1] + conn_info[alias_key] = alias_value + elif key in ('ipv4.routes', 'ipv6.routes'): + conn_info[key] = [s.strip() for s in raw_value.split(';')] + elif key_type == list: + conn_info[key] = [s.strip() for s in raw_value.split(',')] + else: + m_enum = p_enum_value.match(raw_value) + if m_enum is not None: + value = m_enum.group(1) + else: + value = raw_value + conn_info[key] = value + + return conn_info + + def get_supported_properties(self, setting): + properties = [] + + if setting == '802-11-wireless-security': + set_property = 'psk' + set_value = 'FAKEVALUE' + commands = ['set %s.%s %s' % (setting, set_property, set_value)] + else: + commands = [] + + commands += ['print %s' % setting, 'quit', 'yes'] + + (rc, out, err) = self.execute_edit_commands(commands, arguments=['type', self.type]) + + if rc != 0: + raise NmcliModuleError(err) + + for line in out.splitlines(): + prefix = '%s.' % setting + if line.startswith(prefix): + pair = line.split(':', 1) + property = pair[0].strip().replace(prefix, '') + properties.append(property) + + return properties + + def check_for_unsupported_properties(self, setting): + if setting == '802-11-wireless': + setting_key = 'wifi' + elif setting == '802-11-wireless-security': + setting_key = 'wifi_sec' + else: + setting_key = setting + + supported_properties = self.get_supported_properties(setting) + unsupported_properties = [] + + for property, value in getattr(self, setting_key).items(): + if property not in supported_properties: + unsupported_properties.append(property) + + if unsupported_properties: + msg_options = [] + for property in unsupported_properties: + msg_options.append('%s.%s' % (setting_key, property)) + + msg = 'Invalid or unsupported option(s): "%s"' % '", "'.join(msg_options) + if self.ignore_unsupported_suboptions: + self.module.warn(msg) + else: + self.module.fail_json(msg=msg) + + return unsupported_properties + + def _compare_conn_params(self, conn_info, options): + changed = False + diff_before = dict() + diff_after = dict() + + for key, value in options.items(): + # We can't just do `if not value` because then if there's a value + # of 0 specified as an integer it'll be interpreted as empty when + # it actually isn't. + if value not in (0, []) and not value: + continue + + if key in conn_info: + current_value = conn_info[key] + if key == '802-11-wireless.wake-on-wlan' and current_value is not None: + match = re.match('0x([0-9A-Fa-f]+)', current_value) + if match: + current_value = str(int(match.group(1), 16)) + if key in ('ipv4.routes', 'ipv6.routes') and current_value is not None: + current_value = self.get_route_params(current_value) + if key == self.mac_setting: + # MAC addresses are case insensitive, nmcli always reports them in uppercase + value = value.upper() + # ensure current_value is also converted to uppercase in case nmcli changes behaviour + if current_value: + current_value = current_value.upper() + if key == 'gsm.apn': + # Depending on version nmcli adds double-qoutes to gsm.apn + # Need to strip them in order to compare both + if current_value: + current_value = current_value.strip('"') + if key == self.mtu_setting and self.mtu is None: + self.mtu = 0 + if key == 'vpn.data': + if current_value: + current_value = sorted(re.sub(r'\s*=\s*', '=', part.strip(), count=1) for part in current_value.split(',')) + value = sorted(part.strip() for part in value.split(',')) + else: + # parameter does not exist + current_value = None + + if isinstance(current_value, list) and isinstance(value, list): + # compare values between two lists + if key in ('ipv4.addresses', 'ipv6.addresses', 'ipv4.dns', 'ipv6.dns', 'ipv4.dns-search', 'ipv6.dns-search'): + # The order of IP addresses matters because the first one + # is the default source address for outbound connections. + # Similarly, the order of DNS nameservers and search + # suffixes is important. + changed |= current_value != value + else: + changed |= sorted(current_value) != sorted(value) + elif all([key == self.mtu_setting, self.type == 'dummy', current_value is None, value == 'auto', self.mtu is None]): + value = None + else: + value = to_text(value) + if current_value != value: + changed = True + + diff_before[key] = current_value + diff_after[key] = value + + diff = { + 'before': diff_before, + 'after': diff_after, + } + return (changed, diff) + + def is_connection_changed(self): + options = { + 'connection.interface-name': self.ifname, + } + + # VPN doesn't need an interface but if sended it must be a valid interface. + if self.type == 'vpn' and self.ifname is None: + del options['connection.interface-name'] + + if not self.type: + current_con_type = self.show_connection().get('connection.type') + if current_con_type: + if current_con_type == '802-11-wireless': + current_con_type = 'wifi' + self.type = current_con_type + + options.update(self.connection_options(detect_change=True)) + return self._compare_conn_params(self.show_connection(), options) + + +def main(): + # Parsing argument file + module = AnsibleModule( + argument_spec=dict( + ignore_unsupported_suboptions=dict(type='bool', default=False), + autoconnect=dict(type='bool', default=True), + autoconnect_priority=dict(type='int'), + autoconnect_retries=dict(type='int'), + state=dict(type='str', required=True, choices=['absent', 'present', 'up', 'down']), + conn_name=dict(type='str', required=True), + conn_reload=dict(type='bool', default=False), + master=dict(type='str'), + slave_type=dict(type='str', choices=['bond', 'bridge', 'team', 'ovs-port', 'vrf']), + ifname=dict(type='str'), + type=dict(type='str', + choices=[ + 'bond', + 'bond-slave', + 'bridge', + 'bridge-slave', + 'dummy', + 'ethernet', + 'generic', + 'gre', + 'infiniband', + 'ipip', + 'sit', + 'team', + 'team-slave', + 'vlan', + 'vxlan', + 'wifi', + 'gsm', + 'macvlan', + 'wireguard', + 'vpn', + 'loopback', + 'ovs-interface', + 'ovs-bridge', + 'ovs-port', + 'vrf', + ]), + ip4=dict(type='list', elements='str'), + gw4=dict(type='str'), + gw4_ignore_auto=dict(type='bool', default=False), + routes4=dict(type='list', elements='str'), + routes4_extended=dict(type='list', + elements='dict', + options=dict( + ip=dict(type='str', required=True), + next_hop=dict(type='str'), + metric=dict(type='int'), + table=dict(type='int'), + tos=dict(type='int'), + cwnd=dict(type='int'), + mtu=dict(type='int'), + onlink=dict(type='bool') + )), + route_metric4=dict(type='int'), + routing_rules4=dict(type='list', elements='str'), + never_default4=dict(type='bool', default=False), + dns4=dict(type='list', elements='str'), + dns4_search=dict(type='list', elements='str'), + dns4_options=dict(type='list', elements='str'), + dns4_ignore_auto=dict(type='bool', default=False), + method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']), + may_fail4=dict(type='bool', default=True), + dhcp_client_id=dict(type='str'), + ip6=dict(type='list', elements='str'), + gw6=dict(type='str'), + gw6_ignore_auto=dict(type='bool', default=False), + dns6=dict(type='list', elements='str'), + dns6_search=dict(type='list', elements='str'), + dns6_options=dict(type='list', elements='str'), + dns6_ignore_auto=dict(type='bool', default=False), + routes6=dict(type='list', elements='str'), + routes6_extended=dict(type='list', + elements='dict', + options=dict( + ip=dict(type='str', required=True), + next_hop=dict(type='str'), + metric=dict(type='int'), + table=dict(type='int'), + cwnd=dict(type='int'), + mtu=dict(type='int'), + onlink=dict(type='bool') + )), + route_metric6=dict(type='int'), + method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared', 'disabled']), + ip_privacy6=dict(type='str', choices=['disabled', 'prefer-public-addr', 'prefer-temp-addr', 'unknown']), + addr_gen_mode6=dict(type='str', choices=['default', 'default-or-eui64', 'eui64', 'stable-privacy']), + # Bond Specific vars + mode=dict(type='str', default='balance-rr', + choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']), + miimon=dict(type='int'), + downdelay=dict(type='int'), + updelay=dict(type='int'), + xmit_hash_policy=dict(type='str'), + fail_over_mac=dict(type='str', choices=['none', 'active', 'follow']), + arp_interval=dict(type='int'), + arp_ip_target=dict(type='str'), + primary=dict(type='str'), + # general usage + mtu=dict(type='int'), + mac=dict(type='str'), + zone=dict(type='str'), + # bridge specific vars + stp=dict(type='bool', default=True), + priority=dict(type='int', default=128), + slavepriority=dict(type='int', default=32), + forwarddelay=dict(type='int', default=15), + hellotime=dict(type='int', default=2), + maxage=dict(type='int', default=20), + ageingtime=dict(type='int', default=300), + hairpin=dict(type='bool', default=False), + path_cost=dict(type='int', default=100), + # team specific vars + runner=dict(type='str', default='roundrobin', + choices=['broadcast', 'roundrobin', 'activebackup', 'loadbalance', 'lacp']), + # team active-backup runner specific options + runner_hwaddr_policy=dict(type='str', choices=['same_all', 'by_active', 'only_active']), + # team lacp runner specific options + runner_fast_rate=dict(type='bool'), + # vlan specific vars + vlanid=dict(type='int'), + vlandev=dict(type='str'), + flags=dict(type='str'), + ingress=dict(type='str'), + egress=dict(type='str'), + # vxlan specific vars + vxlan_id=dict(type='int'), + vxlan_local=dict(type='str'), + vxlan_remote=dict(type='str'), + # ip-tunnel specific vars + ip_tunnel_dev=dict(type='str'), + ip_tunnel_local=dict(type='str'), + ip_tunnel_remote=dict(type='str'), + # ip-tunnel type gre specific vars + ip_tunnel_input_key=dict(type='str', no_log=True), + ip_tunnel_output_key=dict(type='str', no_log=True), + # 802-11-wireless* specific vars + ssid=dict(type='str'), + wifi=dict(type='dict'), + wifi_sec=dict(type='dict', no_log=True), + gsm=dict(type='dict'), + macvlan=dict(type='dict', options=dict( + mode=dict(type='int', choices=[1, 2, 3, 4, 5], required=True), + parent=dict(type='str', required=True), + promiscuous=dict(type='bool'), + tap=dict(type='bool'))), + wireguard=dict(type='dict'), + vpn=dict(type='dict'), + sriov=dict(type='dict'), + table=dict(type='int'), + # infiniband specific vars + transport_mode=dict(type='str', choices=['datagram', 'connected']), + infiniband_mac=dict(type='str'), + + ), + mutually_exclusive=[['never_default4', 'gw4'], + ['routes4_extended', 'routes4'], + ['routes6_extended', 'routes6']], + required_if=[ + ("type", "wifi", ["ssid"]), + ("type", "team-slave", ["master", "ifname"]), + ("slave_type", "team", ["master", "ifname"]), + ], + supports_check_mode=True, + ) + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + nmcli = Nmcli(module) + + (rc, out, err) = (None, '', '') + result = {'conn_name': nmcli.conn_name, 'state': nmcli.state} + + # team checks + if nmcli.type == "team": + if nmcli.runner_hwaddr_policy and not nmcli.runner == "activebackup": + nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup") + if nmcli.runner_fast_rate is not None and nmcli.runner != "lacp": + nmcli.module.fail_json(msg="runner-fast-rate is only allowed for runner lacp") + if nmcli.type == 'wifi': + unsupported_properties = {} + if nmcli.wifi: + if 'ssid' in nmcli.wifi: + module.warn("Ignoring option 'wifi.ssid', it must be specified with option 'ssid'") + del nmcli.wifi['ssid'] + unsupported_properties['wifi'] = nmcli.check_for_unsupported_properties('802-11-wireless') + if nmcli.wifi_sec: + unsupported_properties['wifi_sec'] = nmcli.check_for_unsupported_properties('802-11-wireless-security') + if nmcli.ignore_unsupported_suboptions and unsupported_properties: + for setting_key, properties in unsupported_properties.items(): + for property in properties: + del getattr(nmcli, setting_key)[property] + + try: + if nmcli.state == 'absent': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = nmcli.down_connection() + (rc, out, err) = nmcli.remove_connection() + if rc != 0: + module.fail_json(name=('Error removing connection named %s' % nmcli.conn_name), msg=err, rc=rc) + + elif nmcli.state == 'present': + if nmcli.connection_exists(): + changed, diff = nmcli.is_connection_changed() + if module._diff: + result['diff'] = diff + + if changed: + # modify connection (note: this function is check mode aware) + # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type)) + result['Exists'] = 'Connections do exist so we are modifying them' + if module.check_mode: + module.exit_json(changed=True, **result) + (rc, out, err) = nmcli.modify_connection() + if nmcli.conn_reload: + (rc, out, err) = nmcli.reload_connection() + else: + result['Exists'] = 'Connections already exist and no changes made' + if module.check_mode: + module.exit_json(changed=False, **result) + if not nmcli.connection_exists(): + result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type)) + if module.check_mode: + module.exit_json(changed=True, **result) + (rc, out, err) = nmcli.create_connection() + if rc is not None and rc != 0: + module.fail_json(name=nmcli.conn_name, msg=err, rc=rc) + + elif nmcli.state == 'up': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + if nmcli.conn_reload: + (rc, out, err) = nmcli.reload_connection() + (rc, out, err) = nmcli.up_connection() + if rc != 0: + module.fail_json(name=('Error bringing up connection named %s' % nmcli.conn_name), msg=err, rc=rc) + + elif nmcli.state == 'down': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + if nmcli.conn_reload: + (rc, out, err) = nmcli.reload_connection() + (rc, out, err) = nmcli.down_connection() + if rc != 0: + module.fail_json(name=('Error bringing down connection named %s' % nmcli.conn_name), msg=err, rc=rc) + + except NmcliModuleError as e: + module.fail_json(name=nmcli.conn_name, msg=str(e)) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/nomad_job.py b/plugins/modules/nomad_job.py deleted file mode 120000 index 763b37d1b4..0000000000 --- a/plugins/modules/nomad_job.py +++ /dev/null @@ -1 +0,0 @@ -clustering/nomad/nomad_job.py \ No newline at end of file diff --git a/plugins/modules/nomad_job.py b/plugins/modules/nomad_job.py new file mode 100644 index 0000000000..d5ecec3107 --- /dev/null +++ b/plugins/modules/nomad_job.py @@ -0,0 +1,267 @@ +#!/usr/bin/python + +# Copyright (c) 2020, FERREIRA Christophe +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: nomad_job +author: FERREIRA Christophe (@chris93111) +version_added: "1.3.0" +short_description: Launch a Nomad Job +description: + - Launch a Nomad job. + - Stop a Nomad job. + - Force start a Nomad job. +requirements: + - python-nomad +extends_documentation_fragment: + - community.general.nomad + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of job for delete, stop and start job without source. + - Name of job for delete, stop and start job without source. + - Either this or O(content) must be specified. + type: str + state: + description: + - Deploy or remove job. + choices: ["present", "absent"] + required: true + type: str + force_start: + description: + - Force job to started. + type: bool + default: false + content: + description: + - Content of Nomad job. + - Either this or O(name) must be specified. + type: str + content_format: + description: + - Type of content of Nomad job. + choices: ["hcl", "json"] + default: hcl + type: str +seealso: + - name: Nomad jobs documentation + description: Complete documentation for Nomad API jobs. + link: https://www.nomadproject.io/api-docs/jobs/ +""" + +EXAMPLES = r""" +- name: Create job + community.general.nomad_job: + host: localhost + state: present + content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}" + timeout: 120 + +- name: Connect with port to create job + community.general.nomad_job: + host: localhost + port: 4645 + state: present + content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}" + timeout: 120 + +- name: Stop job + community.general.nomad_job: + host: localhost + state: absent + name: api + +- name: Force job to start + community.general.nomad_job: + host: localhost + state: present + name: api + timeout: 120 + force_start: true +""" + +import json + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + +import_nomad = None +try: + import nomad + import_nomad = True +except ImportError: + import_nomad = False + + +def run(): + module = AnsibleModule( + argument_spec=dict( + host=dict(required=True, type='str'), + port=dict(type='int', default=4646), + state=dict(required=True, choices=['present', 'absent']), + use_ssl=dict(type='bool', default=True), + timeout=dict(type='int', default=5), + validate_certs=dict(type='bool', default=True), + client_cert=dict(type='path'), + client_key=dict(type='path'), + namespace=dict(type='str'), + name=dict(type='str'), + content_format=dict(choices=['hcl', 'json'], default='hcl'), + content=dict(type='str'), + force_start=dict(type='bool', default=False), + token=dict(type='str', no_log=True) + ), + supports_check_mode=True, + mutually_exclusive=[ + ["name", "content"] + ], + required_one_of=[ + ['name', 'content'] + ] + ) + + if not import_nomad: + module.fail_json(msg=missing_required_lib("python-nomad")) + + certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) + + nomad_client = nomad.Nomad( + host=module.params.get('host'), + port=module.params.get('port'), + secure=module.params.get('use_ssl'), + timeout=module.params.get('timeout'), + verify=module.params.get('validate_certs'), + cert=certificate_ssl, + namespace=module.params.get('namespace'), + token=module.params.get('token') + ) + + if module.params.get('state') == "present": + + if module.params.get('name') and not module.params.get('force_start'): + module.fail_json(msg='For start job with name, force_start is needed') + + changed = False + if module.params.get('content'): + + if module.params.get('content_format') == 'json': + + job_json = module.params.get('content') + try: + job_json = json.loads(job_json) + except ValueError as e: + module.fail_json(msg=to_native(e)) + job = dict() + job['job'] = job_json + try: + job_id = job_json.get('ID') + if job_id is None: + module.fail_json(msg="Cannot retrieve job with ID None") + plan = nomad_client.job.plan_job(job_id, job, diff=True) + if not plan['Diff'].get('Type') == "None": + changed = True + if not module.check_mode: + result = nomad_client.jobs.register_job(job) + else: + result = plan + else: + result = plan + except Exception as e: + module.fail_json(msg=to_native(e)) + + if module.params.get('content_format') == 'hcl': + + try: + job_hcl = module.params.get('content') + job_json = nomad_client.jobs.parse(job_hcl) + job = dict() + job['job'] = job_json + except nomad.api.exceptions.BadRequestNomadException as err: + msg = str(err.nomad_resp.reason) + " " + str(err.nomad_resp.text) + module.fail_json(msg=to_native(msg)) + try: + job_id = job_json.get('ID') + plan = nomad_client.job.plan_job(job_id, job, diff=True) + if not plan['Diff'].get('Type') == "None": + changed = True + if not module.check_mode: + result = nomad_client.jobs.register_job(job) + else: + result = plan + else: + result = plan + except Exception as e: + module.fail_json(msg=to_native(e)) + + if module.params.get('force_start'): + + try: + job = dict() + if module.params.get('name'): + job_name = module.params.get('name') + else: + job_name = job_json['Name'] + job_json = nomad_client.job.get_job(job_name) + if job_json['Status'] == 'running': + result = job_json + else: + job_json['Status'] = 'running' + job_json['Stop'] = False + job['job'] = job_json + if not module.check_mode: + result = nomad_client.jobs.register_job(job) + else: + result = nomad_client.validate.validate_job(job) + if not result.status_code == 200: + module.fail_json(msg=to_native(result.text)) + result = json.loads(result.text) + changed = True + except Exception as e: + module.fail_json(msg=to_native(e)) + + if module.params.get('state') == "absent": + + try: + if not module.params.get('name') is None: + job_name = module.params.get('name') + else: + if module.params.get('content_format') == 'hcl': + job_json = nomad_client.jobs.parse(module.params.get('content')) + job_name = job_json['Name'] + if module.params.get('content_format') == 'json': + job_json = module.params.get('content') + job_name = job_json['Name'] + job = nomad_client.job.get_job(job_name) + if job['Status'] == 'dead': + changed = False + result = job + else: + if not module.check_mode: + result = nomad_client.job.deregister_job(job_name) + else: + result = job + changed = True + except Exception as e: + module.fail_json(msg=to_native(e)) + + module.exit_json(changed=changed, result=result) + + +def main(): + + run() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/nomad_job_info.py b/plugins/modules/nomad_job_info.py deleted file mode 120000 index 9749646dc4..0000000000 --- a/plugins/modules/nomad_job_info.py +++ /dev/null @@ -1 +0,0 @@ -clustering/nomad/nomad_job_info.py \ No newline at end of file diff --git a/plugins/modules/nomad_job_info.py b/plugins/modules/nomad_job_info.py new file mode 100644 index 0000000000..98cec59746 --- /dev/null +++ b/plugins/modules/nomad_job_info.py @@ -0,0 +1,339 @@ +#!/usr/bin/python + +# Copyright (c) 2020, FERREIRA Christophe +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: nomad_job_info +author: FERREIRA Christophe (@chris93111) +version_added: "1.3.0" +short_description: Get Nomad Jobs info +description: + - Get info for one Nomad job. + - List Nomad jobs. +requirements: + - python-nomad +extends_documentation_fragment: + - community.general.nomad + - community.general.attributes + - community.general.attributes.info_module +options: + name: + description: + - Name of job for Get info. + - If not specified, lists all jobs. + type: str +seealso: + - name: Nomad jobs documentation + description: Complete documentation for Nomad API jobs. + link: https://www.nomadproject.io/api-docs/jobs/ +""" + +EXAMPLES = r""" +- name: Get info for job awx + community.general.nomad_job_info: + host: localhost + name: awx + register: result + +- name: List Nomad jobs + community.general.nomad_job_info: + host: localhost + register: result +""" + +RETURN = r""" +result: + description: List with dictionary contains jobs info. + returned: success + type: list + sample: + [ + { + "Affinities": null, + "AllAtOnce": false, + "Constraints": null, + "ConsulToken": "", + "CreateIndex": 13, + "Datacenters": [ + "dc1" + ], + "Dispatched": false, + "ID": "example", + "JobModifyIndex": 13, + "Meta": null, + "ModifyIndex": 13, + "Multiregion": null, + "Name": "example", + "Namespace": "default", + "NomadTokenID": "", + "ParameterizedJob": null, + "ParentID": "", + "Payload": null, + "Periodic": null, + "Priority": 50, + "Region": "global", + "Spreads": null, + "Stable": false, + "Status": "pending", + "StatusDescription": "", + "Stop": false, + "SubmitTime": 1602244370615307000, + "TaskGroups": [ + { + "Affinities": null, + "Constraints": null, + "Count": 1, + "EphemeralDisk": { + "Migrate": false, + "SizeMB": 300, + "Sticky": false + }, + "Meta": null, + "Migrate": { + "HealthCheck": "checks", + "HealthyDeadline": 300000000000, + "MaxParallel": 1, + "MinHealthyTime": 10000000000 + }, + "Name": "cache", + "Networks": null, + "ReschedulePolicy": { + "Attempts": 0, + "Delay": 30000000000, + "DelayFunction": "exponential", + "Interval": 0, + "MaxDelay": 3600000000000, + "Unlimited": true + }, + "RestartPolicy": { + "Attempts": 3, + "Delay": 15000000000, + "Interval": 1800000000000, + "Mode": "fail" + }, + "Scaling": null, + "Services": null, + "ShutdownDelay": null, + "Spreads": null, + "StopAfterClientDisconnect": null, + "Tasks": [ + { + "Affinities": null, + "Artifacts": null, + "CSIPluginConfig": null, + "Config": { + "image": "redis:3.2", + "port_map": [ + { + "db": 6379.0 + } + ] + }, + "Constraints": null, + "DispatchPayload": null, + "Driver": "docker", + "Env": null, + "KillSignal": "", + "KillTimeout": 5000000000, + "Kind": "", + "Leader": false, + "Lifecycle": null, + "LogConfig": { + "MaxFileSizeMB": 10, + "MaxFiles": 10 + }, + "Meta": null, + "Name": "redis", + "Resources": { + "CPU": 500, + "Devices": null, + "DiskMB": 0, + "IOPS": 0, + "MemoryMB": 256, + "Networks": [ + { + "CIDR": "", + "DNS": null, + "Device": "", + "DynamicPorts": [ + { + "HostNetwork": "default", + "Label": "db", + "To": 0, + "Value": 0 + } + ], + "IP": "", + "MBits": 10, + "Mode": "", + "ReservedPorts": null + } + ] + }, + "RestartPolicy": { + "Attempts": 3, + "Delay": 15000000000, + "Interval": 1800000000000, + "Mode": "fail" + }, + "Services": [ + { + "AddressMode": "auto", + "CanaryMeta": null, + "CanaryTags": null, + "Checks": [ + { + "AddressMode": "", + "Args": null, + "CheckRestart": null, + "Command": "", + "Expose": false, + "FailuresBeforeCritical": 0, + "GRPCService": "", + "GRPCUseTLS": false, + "Header": null, + "InitialStatus": "", + "Interval": 10000000000, + "Method": "", + "Name": "alive", + "Path": "", + "PortLabel": "", + "Protocol": "", + "SuccessBeforePassing": 0, + "TLSSkipVerify": false, + "TaskName": "", + "Timeout": 2000000000, + "Type": "tcp" + } + ], + "Connect": null, + "EnableTagOverride": false, + "Meta": null, + "Name": "redis-cache", + "PortLabel": "db", + "Tags": [ + "global", + "cache" + ], + "TaskName": "" + } + ], + "ShutdownDelay": 0, + "Templates": null, + "User": "", + "Vault": null, + "VolumeMounts": null + } + ], + "Update": { + "AutoPromote": false, + "AutoRevert": false, + "Canary": 0, + "HealthCheck": "checks", + "HealthyDeadline": 180000000000, + "MaxParallel": 1, + "MinHealthyTime": 10000000000, + "ProgressDeadline": 600000000000, + "Stagger": 30000000000 + }, + "Volumes": null + } + ], + "Type": "service", + "Update": { + "AutoPromote": false, + "AutoRevert": false, + "Canary": 0, + "HealthCheck": "", + "HealthyDeadline": 0, + "MaxParallel": 1, + "MinHealthyTime": 0, + "ProgressDeadline": 0, + "Stagger": 30000000000 + }, + "VaultNamespace": "", + "VaultToken": "", + "Version": 0 + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + +import_nomad = None +try: + import nomad + import_nomad = True +except ImportError: + import_nomad = False + + +def run(): + module = AnsibleModule( + argument_spec=dict( + host=dict(required=True, type='str'), + port=dict(type='int', default=4646), + use_ssl=dict(type='bool', default=True), + timeout=dict(type='int', default=5), + validate_certs=dict(type='bool', default=True), + client_cert=dict(type='path'), + client_key=dict(type='path'), + namespace=dict(type='str'), + name=dict(type='str'), + token=dict(type='str', no_log=True) + ), + supports_check_mode=True + ) + + if not import_nomad: + module.fail_json(msg=missing_required_lib("python-nomad")) + + certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) + + nomad_client = nomad.Nomad( + host=module.params.get('host'), + port=module.params.get('port'), + secure=module.params.get('use_ssl'), + timeout=module.params.get('timeout'), + verify=module.params.get('validate_certs'), + cert=certificate_ssl, + namespace=module.params.get('namespace'), + token=module.params.get('token') + ) + + changed = False + result = list() + try: + job_list = nomad_client.jobs.get_jobs() + for job in job_list: + result.append(nomad_client.job.get_job(job.get('ID'))) + except Exception as e: + module.fail_json(msg=to_native(e)) + + if module.params.get('name'): + filter = list() + try: + for job in result: + if job.get('ID') == module.params.get('name'): + filter.append(job) + result = filter + if not filter: + module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name'))) + except Exception as e: + module.fail_json(msg=to_native(e)) + + module.exit_json(changed=changed, result=result) + + +def main(): + + run() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/nomad_token.py b/plugins/modules/nomad_token.py new file mode 100644 index 0000000000..8484334f91 --- /dev/null +++ b/plugins/modules/nomad_token.py @@ -0,0 +1,299 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Pedro Nascimento +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: nomad_token +author: Pedro Nascimento (@apecnascimento) +version_added: "8.1.0" +short_description: Manage Nomad ACL tokens +description: + - This module allows to create Bootstrap tokens, create ACL tokens, update ACL tokens, and delete ACL tokens. +requirements: + - python-nomad +extends_documentation_fragment: + - community.general.nomad + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of ACL token to create. + type: str + token_type: + description: + - The type of the token can be V(client), V(management), or V(bootstrap). + choices: ["client", "management", "bootstrap"] + type: str + default: "client" + policies: + description: + - A list of the policies assigned to the token. + type: list + elements: str + default: [] + global_replicated: + description: + - Indicates whether or not the token was created with the C(--global). + type: bool + default: false + state: + description: + - Create or remove ACL token. + choices: ["present", "absent"] + required: true + type: str + +seealso: + - name: Nomad ACL documentation + description: Complete documentation for Nomad API ACL. + link: https://developer.hashicorp.com/nomad/api-docs/acl/tokens +""" + +EXAMPLES = r""" +- name: Create boostrap token + community.general.nomad_token: + host: localhost + token_type: bootstrap + state: present + +- name: Create ACL token + community.general.nomad_token: + host: localhost + name: "Dev token" + token_type: client + policies: + - readonly + global_replicated: false + state: absent + +- name: Update ACL token Dev token + community.general.nomad_token: + host: localhost + name: "Dev token" + token_type: client + policies: + - readonly + - devpolicy + global_replicated: false + state: absent + +- name: Delete ACL token + community.general.nomad_token: + host: localhost + name: "Dev token" + state: absent +""" + +RETURN = r""" +result: + description: Result returned by nomad. + returned: always + type: dict + sample: + { + "accessor_id": "0d01c55f-8d63-f832-04ff-1866d4eb594e", + "create_index": 14, + "create_time": "2023-11-12T18:48:34.248857001Z", + "expiration_time": null, + "expiration_ttl": "", + "global": true, + "hash": "eSn8H8RVqh8As8WQNnC2vlBRqXy6DECogc5umzX0P30=", + "modify_index": 836, + "name": "devs", + "policies": [ + "readonly" + ], + "roles": null, + "secret_id": "12e878ab-e1f6-e103-b4c4-3b5173bb4cea", + "type": "client" + } +""" + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + +import_nomad = None + +try: + import nomad + + import_nomad = True +except ImportError: + import_nomad = False + + +def get_token(name, nomad_client): + tokens = nomad_client.acl.get_tokens() + token = next((token for token in tokens + if token.get('Name') == name), None) + return token + + +def transform_response(nomad_response): + transformed_response = { + "accessor_id": nomad_response['AccessorID'], + "create_index": nomad_response['CreateIndex'], + "create_time": nomad_response['CreateTime'], + "expiration_ttl": nomad_response['ExpirationTTL'], + "expiration_time": nomad_response['ExpirationTime'], + "global": nomad_response['Global'], + "hash": nomad_response['Hash'], + "modify_index": nomad_response['ModifyIndex'], + "name": nomad_response['Name'], + "policies": nomad_response['Policies'], + "roles": nomad_response['Roles'], + "secret_id": nomad_response['SecretID'], + "type": nomad_response['Type'] + } + + return transformed_response + + +argument_spec = dict( + host=dict(required=True, type='str'), + port=dict(type='int', default=4646), + state=dict(required=True, choices=['present', 'absent']), + use_ssl=dict(type='bool', default=True), + timeout=dict(type='int', default=5), + validate_certs=dict(type='bool', default=True), + client_cert=dict(type='path'), + client_key=dict(type='path'), + namespace=dict(type='str'), + token=dict(type='str', no_log=True), + name=dict(type='str'), + token_type=dict(choices=['client', 'management', 'bootstrap'], default='client'), + policies=dict(type='list', elements='str', default=[]), + global_replicated=dict(type='bool', default=False), +) + + +def setup_module_object(): + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False, + required_one_of=[ + ['name', 'token_type'] + ], + required_if=[ + ('token_type', 'client', ('name',)), + ('token_type', 'management', ('name',)), + ], + ) + return module + + +def setup_nomad_client(module): + if not import_nomad: + module.fail_json(msg=missing_required_lib("python-nomad")) + + certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) + + nomad_client = nomad.Nomad( + host=module.params.get('host'), + port=module.params.get('port'), + secure=module.params.get('use_ssl'), + timeout=module.params.get('timeout'), + verify=module.params.get('validate_certs'), + cert=certificate_ssl, + namespace=module.params.get('namespace'), + token=module.params.get('token') + ) + + return nomad_client + + +def run(module): + nomad_client = setup_nomad_client(module) + + msg = "" + result = {} + changed = False + if module.params.get('state') == "present": + + if module.params.get('token_type') == 'bootstrap': + try: + current_token = get_token('Bootstrap Token', nomad_client) + if current_token: + msg = "ACL bootstrap already exist." + else: + nomad_result = nomad_client.acl.generate_bootstrap() + msg = "Boostrap token created." + result = transform_response(nomad_result) + changed = True + + except nomad.api.exceptions.URLNotAuthorizedNomadException: + try: + nomad_result = nomad_client.acl.generate_bootstrap() + msg = "Boostrap token created." + result = transform_response(nomad_result) + changed = True + + except Exception as e: + module.fail_json(msg=to_native(e)) + else: + try: + token_info = { + "Name": module.params.get('name'), + "Type": module.params.get('token_type'), + "Policies": module.params.get('policies'), + "Global": module.params.get('global_replicated') + } + + current_token = get_token(token_info['Name'], nomad_client) + + if current_token: + token_info['AccessorID'] = current_token['AccessorID'] + nomad_result = nomad_client.acl.update_token(current_token['AccessorID'], token_info) + msg = "ACL token updated." + result = transform_response(nomad_result) + changed = True + + else: + nomad_result = nomad_client.acl.create_token(token_info) + msg = "ACL token Created." + result = transform_response(nomad_result) + changed = True + + except Exception as e: + module.fail_json(msg=to_native(e)) + + if module.params.get('state') == "absent": + + if not module.params.get('name'): + module.fail_json(msg="name is needed to delete token.") + + if module.params.get('token_type') == 'bootstrap' or module.params.get('name') == 'Bootstrap Token': + module.fail_json(msg="Delete ACL bootstrap token is not allowed.") + + try: + token = get_token(module.params.get('name'), nomad_client) + if token: + nomad_client.acl.delete_token(token.get('AccessorID')) + msg = 'ACL token deleted.' + changed = True + else: + msg = "No token with name '{0}' found".format(module.params.get('name')) + + except Exception as e: + module.fail_json(msg=to_native(e)) + + module.exit_json(changed=changed, msg=msg, result=result) + + +def main(): + module = setup_module_object() + run(module) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/nosh.py b/plugins/modules/nosh.py deleted file mode 120000 index be8004a7eb..0000000000 --- a/plugins/modules/nosh.py +++ /dev/null @@ -1 +0,0 @@ -./system/nosh.py \ No newline at end of file diff --git a/plugins/modules/nosh.py b/plugins/modules/nosh.py new file mode 100644 index 0000000000..1befdad369 --- /dev/null +++ b/plugins/modules/nosh.py @@ -0,0 +1,552 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Thomas Caravia +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: nosh +author: + - "Thomas Caravia (@tacatac)" +short_description: Manage services with nosh +description: + - Control running and enabled state for system-wide or user services. + - BSD and Linux systems are supported. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + required: true + description: + - Name of the service to manage. + state: + type: str + required: false + choices: [started, stopped, reset, restarted, reloaded] + description: + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - V(restarted) always bounces the service. + - V(reloaded) sends a SIGHUP or starts the service. + - V(reset) starts or stops the service according to whether it is enabled or not. + enabled: + required: false + type: bool + description: + - Enable or disable the service, independently of C(*.preset) file preference or running state. Mutually exclusive with + O(preset). It takes effect prior to O(state=reset). + preset: + required: false + type: bool + description: + - Enable or disable the service according to local preferences in C(*.preset) files. Mutually exclusive with O(enabled). + Only has an effect if set to true. It takes effect prior to O(state=reset). + user: + required: false + default: false + type: bool + description: + - Run system-control talking to the calling user's service manager, rather than the system-wide service manager. +requirements: + - A system with an active nosh service manager, see Notes for further information. +notes: + - Information on the nosh utilities suite may be found at U(https://jdebp.eu/Softwares/nosh/). +""" + +EXAMPLES = r""" +- name: Start dnscache if not running + community.general.nosh: + name: dnscache + state: started + +- name: Stop mpd, if running + community.general.nosh: + name: mpd + state: stopped + +- name: Restart unbound or start it if not already running + community.general.nosh: + name: unbound + state: restarted + +- name: Reload fail2ban or start it if not already running + community.general.nosh: + name: fail2ban + state: reloaded + +- name: Disable nsd + community.general.nosh: + name: nsd + enabled: false + +- name: For package installers, set nginx running state according to local enable settings, preset and reset + community.general.nosh: + name: nginx + preset: true + state: reset + +- name: Reboot the host if nosh is the system manager, would need a "wait_for*" task at least, not recommended as-is + community.general.nosh: + name: reboot + state: started + +- name: Using conditionals with the module facts + tasks: + - name: Obtain information on tinydns service + community.general.nosh: + name: tinydns + register: result + + - name: Fail if service not loaded + ansible.builtin.fail: + msg: "The {{ result.name }} service is not loaded" + when: not result.status + + - name: Fail if service is running + ansible.builtin.fail: + msg: "The {{ result.name }} service is running" + when: result.status and result.status['DaemontoolsEncoreState'] == "running" +""" + +RETURN = r""" +name: + description: Name used to find the service. + returned: success + type: str + sample: "sshd" +service_path: + description: Resolved path for the service. + returned: success + type: str + sample: "/var/sv/sshd" +enabled: + description: Whether the service is enabled at system bootstrap. + returned: success + type: bool + sample: true +preset: + description: Whether the enabled status reflects the one set in the relevant C(*.preset) file. + returned: success + type: bool + sample: 'False' +state: + description: Service process run state, V(none) if the service is not loaded and will not be started. + returned: if state option is used + type: str + sample: "reloaded" +status: + description: A dictionary with the key=value pairs returned by C(system-control show-json) or V(none) if the service is + not loaded. + returned: success + type: complex + contains: + After: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/basic", "../sshdgenkeys", "log"] + Before: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/shutdown"] + Conflicts: + description: [] # FIXME + returned: success + type: list + sample: [] + DaemontoolsEncoreState: + description: [] # FIXME + returned: success + type: str + sample: "running" + DaemontoolsState: + description: [] # FIXME + returned: success + type: str + sample: "up" + Enabled: + description: [] # FIXME + returned: success + type: bool + sample: true + LogService: + description: [] # FIXME + returned: success + type: str + sample: "../cyclog@sshd" + MainPID: + description: [] # FIXME + returned: success + type: int + sample: 661 + Paused: + description: [] # FIXME + returned: success + type: bool + sample: 'False' + ReadyAfterRun: + description: [] # FIXME + returned: success + type: bool + sample: 'False' + RemainAfterExit: + description: [] # FIXME + returned: success + type: bool + sample: 'False' + Required-By: + description: [] # FIXME + returned: success + type: list + sample: [] + RestartExitStatusCode: + description: [] # FIXME + returned: success + type: int + sample: '0' + RestartExitStatusNumber: + description: [] # FIXME + returned: success + type: int + sample: '0' + RestartTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + RestartUTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + RunExitStatusCode: + description: [] # FIXME + returned: success + type: int + sample: '0' + RunExitStatusNumber: + description: [] # FIXME + returned: success + type: int + sample: '0' + RunTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + RunUTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + StartExitStatusCode: + description: [] # FIXME + returned: success + type: int + sample: 1 + StartExitStatusNumber: + description: [] # FIXME + returned: success + type: int + sample: '0' + StartTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + StartUTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + StopExitStatusCode: + description: [] # FIXME + returned: success + type: int + sample: '0' + StopExitStatusNumber: + description: [] # FIXME + returned: success + type: int + sample: '0' + StopTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + StopUTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + Stopped-By: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/shutdown"] + Timestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + UTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + Want: + description: [] # FIXME + returned: success + type: str + sample: "nothing" + Wanted-By: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/server", "/etc/service-bundles/targets/sockets"] + Wants: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/basic", "../sshdgenkeys"] +user: + description: Whether the user-level service manager is called. + returned: success + type: bool + sample: false +""" + + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.service import fail_if_missing +from ansible.module_utils.common.text.converters import to_native + + +def run_sys_ctl(module, args): + sys_ctl = [module.get_bin_path('system-control', required=True)] + if module.params['user']: + sys_ctl = sys_ctl + ['--user'] + return module.run_command(sys_ctl + args) + + +def get_service_path(module, service): + (rc, out, err) = run_sys_ctl(module, ['find', service]) + # fail if service not found + if rc != 0: + fail_if_missing(module, False, service, msg='host') + else: + return to_native(out).strip() + + +def service_is_enabled(module, service_path): + (rc, out, err) = run_sys_ctl(module, ['is-enabled', service_path]) + return rc == 0 + + +def service_is_preset_enabled(module, service_path): + (rc, out, err) = run_sys_ctl(module, ['preset', '--dry-run', service_path]) + return to_native(out).strip().startswith("enable") + + +def service_is_loaded(module, service_path): + (rc, out, err) = run_sys_ctl(module, ['is-loaded', service_path]) + return rc == 0 + + +def get_service_status(module, service_path): + (rc, out, err) = run_sys_ctl(module, ['show-json', service_path]) + # will fail if not service is not loaded + if err is not None and err: + module.fail_json(msg=err) + else: + json_out = json.loads(to_native(out).strip()) + status = json_out[service_path] # descend past service path header + return status + + +def service_is_running(service_status): + return service_status['DaemontoolsEncoreState'] in set(['starting', 'started', 'running']) + + +def handle_enabled(module, result, service_path): + """Enable or disable a service as needed. + + - 'preset' will set the enabled state according to available preset file settings. + - 'enabled' will set the enabled state explicitly, independently of preset settings. + + These options are set to "mutually exclusive" but the explicit 'enabled' option will + have priority if the check is bypassed. + """ + + # computed prior in control flow + preset = result['preset'] + enabled = result['enabled'] + + # preset, effect only if option set to true (no reverse preset) + if module.params['preset']: + action = 'preset' + + # run preset if needed + if preset != module.params['preset']: + result['changed'] = True + if not module.check_mode: + (rc, out, err) = run_sys_ctl(module, [action, service_path]) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err)) + result['preset'] = not preset + result['enabled'] = not enabled + + # enabled/disabled state + if module.params['enabled'] is not None: + if module.params['enabled']: + action = 'enable' + else: + action = 'disable' + + # change enable/disable if needed + if enabled != module.params['enabled']: + result['changed'] = True + if not module.check_mode: + (rc, out, err) = run_sys_ctl(module, [action, service_path]) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err)) + result['enabled'] = not enabled + result['preset'] = not preset + + +def handle_state(module, result, service_path): + """Set service running state as needed. + + Takes into account the fact that a service may not be loaded (no supervise directory) in + which case it is 'stopped' as far as the service manager is concerned. No status information + can be obtained and the service can only be 'started'. + """ + # default to desired state, no action + result['state'] = module.params['state'] + state = module.params['state'] + action = None + + # computed prior in control flow, possibly modified by handle_enabled() + enabled = result['enabled'] + + # service not loaded -> not started by manager, no status information + if not service_is_loaded(module, service_path): + if state in ['started', 'restarted', 'reloaded']: + action = 'start' + result['state'] = 'started' + elif state == 'reset': + if enabled: + action = 'start' + result['state'] = 'started' + else: + result['state'] = None + else: + result['state'] = None + + # service is loaded + else: + # get status information + result['status'] = get_service_status(module, service_path) + running = service_is_running(result['status']) + + if state == 'started': + if not running: + action = 'start' + elif state == 'stopped': + if running: + action = 'stop' + # reset = start/stop according to enabled status + elif state == 'reset': + if enabled is not running: + if running: + action = 'stop' + result['state'] = 'stopped' + else: + action = 'start' + result['state'] = 'started' + # start if not running, 'service' module constraint + elif state == 'restarted': + if not running: + action = 'start' + result['state'] = 'started' + else: + action = 'condrestart' + # start if not running, 'service' module constraint + elif state == 'reloaded': + if not running: + action = 'start' + result['state'] = 'started' + else: + action = 'hangup' + + # change state as needed + if action: + result['changed'] = True + if not module.check_mode: + (rc, out, err) = run_sys_ctl(module, [action, service_path]) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, err)) + +# =========================================== +# Main control flow + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', choices=['started', 'stopped', 'reset', 'restarted', 'reloaded']), + enabled=dict(type='bool'), + preset=dict(type='bool'), + user=dict(type='bool', default=False), + ), + supports_check_mode=True, + mutually_exclusive=[['enabled', 'preset']], + ) + + service = module.params['name'] + rc = 0 + out = err = '' + result = { + 'name': service, + 'changed': False, + 'status': None, + } + + # check service can be found (or fail) and get path + service_path = get_service_path(module, service) + + # get preliminary service facts + result['service_path'] = service_path + result['user'] = module.params['user'] + result['enabled'] = service_is_enabled(module, service_path) + result['preset'] = result['enabled'] is service_is_preset_enabled(module, service_path) + + # set enabled state, service need not be loaded + if module.params['enabled'] is not None or module.params['preset']: + handle_enabled(module, result, service_path) + + # set service running state + if module.params['state'] is not None: + handle_state(module, result, service_path) + + # get final service status if possible + if service_is_loaded(module, service_path): + result['status'] = get_service_status(module, service_path) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/bearychat.py b/plugins/modules/notification/bearychat.py deleted file mode 100644 index 4c907ea6b7..0000000000 --- a/plugins/modules/notification/bearychat.py +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Jiangge Zhang -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: bearychat -short_description: Send BearyChat notifications -description: - - The M(community.general.bearychat) module sends notifications to U(https://bearychat.com) - via the Incoming Robot integration. -author: "Jiangge Zhang (@tonyseek)" -options: - url: - type: str - description: - - BearyChat WebHook URL. This authenticates you to the bearychat - service. It looks like - C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60). - required: true - text: - type: str - description: - - Message to send. - markdown: - description: - - If C(yes), text will be parsed as markdown. - default: 'yes' - type: bool - channel: - type: str - description: - - Channel to send the message to. If absent, the message goes to the - default channel selected by the I(url). - attachments: - type: list - elements: dict - description: - - Define a list of attachments. For more information, see - https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments -''' - -EXAMPLES = """ -- name: Send notification message via BearyChat - local_action: - module: bearychat - url: | - https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60 - text: "{{ inventory_hostname }} completed" - -- name: Send notification message via BearyChat all options - local_action: - module: bearychat - url: | - https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60 - text: "{{ inventory_hostname }} completed" - markdown: no - channel: "#ansible" - attachments: - - title: "Ansible on {{ inventory_hostname }}" - text: "May the Force be with you." - color: "#ffffff" - images: - - http://example.com/index.png -""" - -RETURN = """ -msg: - description: execution result - returned: success - type: str - sample: "OK" -""" - -try: - from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse - HAS_URLPARSE = True -except Exception: - HAS_URLPARSE = False -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def build_payload_for_bearychat(module, text, markdown, channel, attachments): - payload = {} - if text is not None: - payload['text'] = text - if markdown is not None: - payload['markdown'] = markdown - if channel is not None: - payload['channel'] = channel - if attachments is not None: - payload.setdefault('attachments', []).extend( - build_payload_for_bearychat_attachment( - module, item.get('title'), item.get('text'), item.get('color'), - item.get('images')) - for item in attachments) - payload = 'payload=%s' % module.jsonify(payload) - return payload - - -def build_payload_for_bearychat_attachment(module, title, text, color, images): - attachment = {} - if title is not None: - attachment['title'] = title - if text is not None: - attachment['text'] = text - if color is not None: - attachment['color'] = color - if images is not None: - target_images = attachment.setdefault('images', []) - if not isinstance(images, (list, tuple)): - images = [images] - for image in images: - if isinstance(image, dict) and 'url' in image: - image = {'url': image['url']} - elif hasattr(image, 'startswith') and image.startswith('http'): - image = {'url': image} - else: - module.fail_json( - msg="BearyChat doesn't have support for this kind of " - "attachment image") - target_images.append(image) - return attachment - - -def do_notify_bearychat(module, url, payload): - response, info = fetch_url(module, url, data=payload) - if info['status'] != 200: - url_info = urlparse(url) - obscured_incoming_webhook = urlunparse( - (url_info.scheme, url_info.netloc, '[obscured]', '', '', '')) - module.fail_json( - msg=" failed to send %s to %s: %s" % ( - payload, obscured_incoming_webhook, info['msg'])) - - -def main(): - module = AnsibleModule(argument_spec={ - 'url': dict(type='str', required=True, no_log=True), - 'text': dict(type='str'), - 'markdown': dict(default=True, type='bool'), - 'channel': dict(type='str'), - 'attachments': dict(type='list', elements='dict'), - }) - - if not HAS_URLPARSE: - module.fail_json(msg='urlparse is not installed') - - url = module.params['url'] - text = module.params['text'] - markdown = module.params['markdown'] - channel = module.params['channel'] - attachments = module.params['attachments'] - - payload = build_payload_for_bearychat( - module, text, markdown, channel, attachments) - do_notify_bearychat(module, url, payload) - - module.exit_json(msg="OK") - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/campfire.py b/plugins/modules/notification/campfire.py deleted file mode 100644 index c684823889..0000000000 --- a/plugins/modules/notification/campfire.py +++ /dev/null @@ -1,154 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: campfire -short_description: Send a message to Campfire -description: - - Send a message to Campfire. - - Messages with newlines will result in a "Paste" message being sent. -options: - subscription: - type: str - description: - - The subscription name to use. - required: true - token: - type: str - description: - - API token. - required: true - room: - type: str - description: - - Room number to which the message should be sent. - required: true - msg: - type: str - description: - - The message body. - required: true - notify: - type: str - description: - - Send a notification sound before the message. - required: false - choices: ["56k", "bell", "bezos", "bueller", "clowntown", - "cottoneyejoe", "crickets", "dadgummit", "dangerzone", - "danielsan", "deeper", "drama", "greatjob", "greyjoy", - "guarantee", "heygirl", "horn", "horror", - "inconceivable", "live", "loggins", "makeitso", "noooo", - "nyan", "ohmy", "ohyeah", "pushit", "rimshot", - "rollout", "rumble", "sax", "secret", "sexyback", - "story", "tada", "tmyk", "trololo", "trombone", "unix", - "vuvuzela", "what", "whoomp", "yeah", "yodel"] - -# informational: requirements for nodes -requirements: [ ] -author: "Adam Garside (@fabulops)" -''' - -EXAMPLES = ''' -- name: Send a message to Campfire - community.general.campfire: - subscription: foo - token: 12345 - room: 123 - msg: Task completed. - -- name: Send a message to Campfire - community.general.campfire: - subscription: foo - token: 12345 - room: 123 - notify: loggins - msg: Task completed ... with feeling. -''' - -try: - from html import escape as html_escape -except ImportError: - # Python-3.2 or later - import cgi - - def html_escape(text, quote=True): - return cgi.escape(text, quote) - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - subscription=dict(required=True), - token=dict(required=True, no_log=True), - room=dict(required=True), - msg=dict(required=True), - notify=dict(required=False, - choices=["56k", "bell", "bezos", "bueller", - "clowntown", "cottoneyejoe", - "crickets", "dadgummit", "dangerzone", - "danielsan", "deeper", "drama", - "greatjob", "greyjoy", "guarantee", - "heygirl", "horn", "horror", - "inconceivable", "live", "loggins", - "makeitso", "noooo", "nyan", "ohmy", - "ohyeah", "pushit", "rimshot", - "rollout", "rumble", "sax", "secret", - "sexyback", "story", "tada", "tmyk", - "trololo", "trombone", "unix", - "vuvuzela", "what", "whoomp", "yeah", - "yodel"]), - ), - supports_check_mode=False - ) - - subscription = module.params["subscription"] - token = module.params["token"] - room = module.params["room"] - msg = module.params["msg"] - notify = module.params["notify"] - - URI = "https://%s.campfirenow.com" % subscription - NSTR = "SoundMessage%s" - MSTR = "%s" - AGENT = "Ansible/1.2" - - # Hack to add basic auth username and password the way fetch_url expects - module.params['url_username'] = token - module.params['url_password'] = 'X' - - target_url = '%s/room/%s/speak.xml' % (URI, room) - headers = {'Content-Type': 'application/xml', - 'User-agent': AGENT} - - # Send some audible notification if requested - if notify: - response, info = fetch_url(module, target_url, data=NSTR % html_escape(notify), headers=headers) - if info['status'] not in [200, 201]: - module.fail_json(msg="unable to send msg: '%s', campfire api" - " returned error code: '%s'" % - (notify, info['status'])) - - # Send the message - response, info = fetch_url(module, target_url, data=MSTR % html_escape(msg), headers=headers) - if info['status'] not in [200, 201]: - module.fail_json(msg="unable to send msg: '%s', campfire api" - " returned error code: '%s'" % - (msg, info['status'])) - - module.exit_json(changed=True, room=room, msg=msg, notify=notify) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/catapult.py b/plugins/modules/notification/catapult.py deleted file mode 100644 index 1383362068..0000000000 --- a/plugins/modules/notification/catapult.py +++ /dev/null @@ -1,154 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Jonathan Mainguy -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# basis of code taken from the ansible twillio and nexmo modules - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: catapult -short_description: Send a sms / mms using the catapult bandwidth api -description: - - Allows notifications to be sent using sms / mms via the catapult bandwidth api. -options: - src: - type: str - description: - - One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)). - required: true - dest: - type: list - elements: str - description: - - The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)). - required: true - msg: - type: str - description: - - The contents of the text message (must be 2048 characters or less). - required: true - media: - type: str - description: - - For MMS messages, a media url to the location of the media to be sent with the message. - user_id: - type: str - description: - - User Id from Api account page. - required: true - api_token: - type: str - description: - - Api Token from Api account page. - required: true - api_secret: - type: str - description: - - Api Secret from Api account page. - required: true - -author: "Jonathan Mainguy (@Jmainguy)" -notes: - - Will return changed even if the media url is wrong. - - Will return changed if the destination number is invalid. - -''' - -EXAMPLES = ''' -- name: Send a mms to multiple users - community.general.catapult: - src: "+15035555555" - dest: - - "+12525089000" - - "+12018994225" - media: "http://example.com/foobar.jpg" - msg: "Task is complete" - user_id: "{{ user_id }}" - api_token: "{{ api_token }}" - api_secret: "{{ api_secret }}" - -- name: Send a sms to a single user - community.general.catapult: - src: "+15035555555" - dest: "+12018994225" - msg: "Consider yourself notified" - user_id: "{{ user_id }}" - api_token: "{{ api_token }}" - api_secret: "{{ api_secret }}" - -''' - -RETURN = ''' -changed: - description: Whether the api accepted the message. - returned: always - type: bool - sample: True -''' - - -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def send(module, src, dest, msg, media, user_id, api_token, api_secret): - """ - Send the message - """ - AGENT = "Ansible" - URI = "https://api.catapult.inetwork.com/v1/users/%s/messages" % user_id - data = {'from': src, 'to': dest, 'text': msg} - if media: - data['media'] = media - - headers = {'User-Agent': AGENT, 'Content-type': 'application/json'} - - # Hack module params to have the Basic auth params that fetch_url expects - module.params['url_username'] = api_token.replace('\n', '') - module.params['url_password'] = api_secret.replace('\n', '') - - return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post") - - -def main(): - module = AnsibleModule( - argument_spec=dict( - src=dict(required=True), - dest=dict(required=True, type='list', elements='str'), - msg=dict(required=True), - user_id=dict(required=True), - api_token=dict(required=True, no_log=True), - api_secret=dict(required=True, no_log=True), - media=dict(default=None, required=False), - ), - ) - - src = module.params['src'] - dest = module.params['dest'] - msg = module.params['msg'] - media = module.params['media'] - user_id = module.params['user_id'] - api_token = module.params['api_token'] - api_secret = module.params['api_secret'] - - for number in dest: - rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret) - if info["status"] != 201: - body = json.loads(info["body"]) - fail_msg = body["message"] - module.fail_json(msg=fail_msg) - - changed = True - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/cisco_spark.py b/plugins/modules/notification/cisco_spark.py deleted file mode 120000 index 6fe1011ffd..0000000000 --- a/plugins/modules/notification/cisco_spark.py +++ /dev/null @@ -1 +0,0 @@ -cisco_webex.py \ No newline at end of file diff --git a/plugins/modules/notification/cisco_webex.py b/plugins/modules/notification/cisco_webex.py deleted file mode 100644 index 8c1361fb14..0000000000 --- a/plugins/modules/notification/cisco_webex.py +++ /dev/null @@ -1,188 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: cisco_webex -short_description: Send a message to a Cisco Webex Teams Room or Individual -description: - - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting. -author: Drew Rusell (@drew-russell) -notes: - - The C(recipient_id) type must be valid for the supplied C(recipient_id). - - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics). - -options: - - recipient_type: - description: - - The request parameter you would like to send the message to. - - Messages can be sent to either a room or individual (by ID or E-Mail). - required: yes - choices: ['roomId', 'toPersonEmail', 'toPersonId'] - type: str - - recipient_id: - description: - - The unique identifier associated with the supplied C(recipient_type). - required: yes - type: str - - msg_type: - description: - - Specifies how you would like the message formatted. - default: text - choices: ['text', 'markdown'] - type: str - aliases: ['message_type'] - - personal_token: - description: - - Your personal access token required to validate the Webex Teams API. - required: yes - aliases: ['token'] - type: str - - msg: - description: - - The message you would like to send. - required: yes - type: str -''' - -EXAMPLES = """ -# Note: The following examples assume a variable file has been imported -# that contains the appropriate information. - -- name: Cisco Webex Teams - Markdown Message to a Room - community.general.cisco_webex: - recipient_type: roomId - recipient_id: "{{ room_id }}" - msg_type: markdown - personal_token: "{{ token }}" - msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**" - -- name: Cisco Webex Teams - Text Message to a Room - community.general.cisco_webex: - recipient_type: roomId - recipient_id: "{{ room_id }}" - msg_type: text - personal_token: "{{ token }}" - msg: "Cisco Webex Teams Ansible Module - Room Message in Text" - -- name: Cisco Webex Teams - Text Message by an Individuals ID - community.general.cisco_webex: - recipient_type: toPersonId - recipient_id: "{{ person_id}}" - msg_type: text - personal_token: "{{ token }}" - msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID" - -- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address - community.general.cisco_webex: - recipient_type: toPersonEmail - recipient_id: "{{ person_email }}" - msg_type: text - personal_token: "{{ token }}" - msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail" - -""" - -RETURN = """ -status_code: - description: - - The Response Code returned by the Webex Teams API. - - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics). - returned: always - type: int - sample: 200 - -message: - description: - - The Response Message returned by the Webex Teams API. - - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics). - returned: always - type: str - sample: OK (585 bytes) -""" -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def webex_msg(module): - """When check mode is specified, establish a read only connection, that does not return any user specific - data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual""" - - # Ansible Specific Variables - results = {} - ansible = module.params - - headers = { - 'Authorization': 'Bearer {0}'.format(ansible['personal_token']), - 'content-type': 'application/json' - } - - if module.check_mode: - url = "https://webexapis.com/v1/people/me" - payload = None - - else: - url = "https://webexapis.com/v1/messages" - - payload = { - ansible['recipient_type']: ansible['recipient_id'], - ansible['msg_type']: ansible['msg'] - } - - payload = module.jsonify(payload) - - response, info = fetch_url(module, url, data=payload, headers=headers) - - status_code = info['status'] - msg = info['msg'] - - # Module will fail if the response is not 200 - if status_code != 200: - results['failed'] = True - results['status_code'] = status_code - results['message'] = msg - else: - results['failed'] = False - results['status_code'] = status_code - - if module.check_mode: - results['message'] = 'Authentication Successful.' - else: - results['message'] = msg - - return results - - -def main(): - '''Ansible main. ''' - module = AnsibleModule( - argument_spec=dict( - recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']), - recipient_id=dict(required=True, no_log=True), - msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']), - personal_token=dict(required=True, no_log=True, aliases=['token']), - msg=dict(required=True), - ), - - supports_check_mode=True - ) - - results = webex_msg(module) - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/notification/discord.py b/plugins/modules/notification/discord.py deleted file mode 100644 index 27dc6fc85c..0000000000 --- a/plugins/modules/notification/discord.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Christian Wollinger -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: discord -short_description: Send Discord messages -version_added: 3.1.0 -description: - - Sends a message to a Discord channel using the Discord webhook API. -author: Christian Wollinger (@cwollinger) -seealso: - - name: API documentation - description: Documentation for Discord API - link: https://discord.com/developers/docs/resources/webhook#execute-webhook -options: - webhook_id: - description: - - The webhook ID. - - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})." - required: yes - type: str - webhook_token: - description: - - The webhook token. - - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})." - required: yes - type: str - content: - description: - - Content of the message to the Discord channel. - - At least one of I(content) and I(embeds) must be specified. - type: str - username: - description: - - Overrides the default username of the webhook. - type: str - avatar_url: - description: - - Overrides the default avatar of the webhook. - type: str - tts: - description: - - Set this to C(true) if this is a TTS (Text to Speech) message. - type: bool - default: false - embeds: - description: - - Send messages as Embeds to the Discord channel. - - Embeds can have a colored border, embedded images, text fields and more. - - "Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object)" - - At least one of I(content) and I(embeds) must be specified. - type: list - elements: dict -''' - -EXAMPLES = """ -- name: Send a message to the Discord channel - community.general.discord: - webhook_id: "00000" - webhook_token: "XXXYYY" - content: "This is a message from ansible" - -- name: Send a message to the Discord channel with specific username and avatar - community.general.discord: - webhook_id: "00000" - webhook_token: "XXXYYY" - content: "This is a message from ansible" - username: Ansible - avatar_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" - -- name: Send a embedded message to the Discord channel - community.general.discord: - webhook_id: "00000" - webhook_token: "XXXYYY" - embeds: - - title: "Embedded message" - description: "This is an embedded message" - footer: - text: "Author: Ansible" - image: - url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" - -- name: Send two embedded messages - community.general.discord: - webhook_id: "00000" - webhook_token: "XXXYYY" - embeds: - - title: "First message" - description: "This is my first embedded message" - footer: - text: "Author: Ansible" - image: - url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" - - title: "Second message" - description: "This is my first second message" - footer: - text: "Author: Ansible" - icon_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" - fields: - - name: "Field 1" - value: "Value of my first field" - - name: "Field 2" - value: "Value of my second field" - timestamp: "{{ ansible_date_time.iso8601 }}" -""" - -RETURN = """ -http_code: - description: - - Response Code returned by Discord API. - returned: always - type: int - sample: 204 -""" - -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.basic import AnsibleModule - - -def discord_check_mode(module): - - webhook_id = module.params['webhook_id'] - webhook_token = module.params['webhook_token'] - - headers = { - 'content-type': 'application/json' - } - - url = "https://discord.com/api/webhooks/%s/%s" % ( - webhook_id, webhook_token) - - response, info = fetch_url(module, url, method='GET', headers=headers) - return response, info - - -def discord_text_msg(module): - - webhook_id = module.params['webhook_id'] - webhook_token = module.params['webhook_token'] - content = module.params['content'] - user = module.params['username'] - avatar_url = module.params['avatar_url'] - tts = module.params['tts'] - embeds = module.params['embeds'] - - headers = { - 'content-type': 'application/json' - } - - url = "https://discord.com/api/webhooks/%s/%s" % ( - webhook_id, webhook_token) - - payload = { - 'content': content, - 'username': user, - 'avatar_url': avatar_url, - 'tts': tts, - 'embeds': embeds, - } - - payload = module.jsonify(payload) - - response, info = fetch_url(module, url, data=payload, headers=headers, method='POST') - return response, info - - -def main(): - module = AnsibleModule( - argument_spec=dict( - webhook_id=dict(type='str', required=True), - webhook_token=dict(type='str', required=True, no_log=True), - content=dict(type='str'), - username=dict(type='str'), - avatar_url=dict(type='str'), - tts=dict(type='bool', default=False), - embeds=dict(type='list', elements='dict'), - ), - required_one_of=[['content', 'embeds']], - supports_check_mode=True - ) - - result = dict( - changed=False, - http_code='', - ) - - if module.check_mode: - response, info = discord_check_mode(module) - if info['status'] != 200: - try: - module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info) - except Exception: - module.fail_json(http_code=info['status'], msg=info['msg'], info=info) - else: - module.exit_json(msg=info['msg'], changed=False, http_code=info['status'], response=module.from_json(response.read())) - else: - response, info = discord_text_msg(module) - if info['status'] != 204: - try: - module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info) - except Exception: - module.fail_json(http_code=info['status'], msg=info['msg'], info=info) - else: - module.exit_json(msg=info['msg'], changed=True, http_code=info['status']) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/notification/flowdock.py b/plugins/modules/notification/flowdock.py deleted file mode 100644 index a1842c5d16..0000000000 --- a/plugins/modules/notification/flowdock.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Matt Coddington -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: flowdock -author: "Matt Coddington (@mcodd)" -short_description: Send a message to a flowdock -description: - - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat) -options: - token: - type: str - description: - - API token. - required: true - type: - type: str - description: - - Whether to post to 'inbox' or 'chat' - required: true - choices: [ "inbox", "chat" ] - msg: - type: str - description: - - Content of the message - required: true - tags: - type: str - description: - - tags of the message, separated by commas - required: false - external_user_name: - type: str - description: - - (chat only - required) Name of the "user" sending the message - required: false - from_address: - type: str - description: - - (inbox only - required) Email address of the message sender - required: false - source: - type: str - description: - - (inbox only - required) Human readable identifier of the application that uses the Flowdock API - required: false - subject: - type: str - description: - - (inbox only - required) Subject line of the message - required: false - from_name: - type: str - description: - - (inbox only) Name of the message sender - required: false - reply_to: - type: str - description: - - (inbox only) Email address for replies - required: false - project: - type: str - description: - - (inbox only) Human readable identifier for more detailed message categorization - required: false - link: - type: str - description: - - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox. - required: false - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - type: bool - -requirements: [ ] -''' - -EXAMPLES = ''' -- name: Send a message to a flowdock - community.general.flowdock: - type: inbox - token: AAAAAA - from_address: user@example.com - source: my cool app - msg: test from ansible - subject: test subject - -- name: Send a message to a flowdock - community.general.flowdock: - type: chat - token: AAAAAA - external_user_name: testuser - msg: test from ansible - tags: tag1,tag2,tag3 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - msg=dict(required=True), - type=dict(required=True, choices=["inbox", "chat"]), - external_user_name=dict(required=False), - from_address=dict(required=False), - source=dict(required=False), - subject=dict(required=False), - from_name=dict(required=False), - reply_to=dict(required=False), - project=dict(required=False), - tags=dict(required=False), - link=dict(required=False), - validate_certs=dict(default=True, type='bool'), - ), - supports_check_mode=True - ) - - type = module.params["type"] - token = module.params["token"] - if type == 'inbox': - url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token) - else: - url = "https://api.flowdock.com/v1/messages/chat/%s" % (token) - - params = {} - - # required params - params['content'] = module.params["msg"] - - # required params for the 'chat' type - if module.params['external_user_name']: - if type == 'inbox': - module.fail_json(msg="external_user_name is not valid for the 'inbox' type") - else: - params['external_user_name'] = module.params["external_user_name"] - elif type == 'chat': - module.fail_json(msg="external_user_name is required for the 'chat' type") - - # required params for the 'inbox' type - for item in ['from_address', 'source', 'subject']: - if module.params[item]: - if type == 'chat': - module.fail_json(msg="%s is not valid for the 'chat' type" % item) - else: - params[item] = module.params[item] - elif type == 'inbox': - module.fail_json(msg="%s is required for the 'inbox' type" % item) - - # optional params - if module.params["tags"]: - params['tags'] = module.params["tags"] - - # optional params for the 'inbox' type - for item in ['from_name', 'reply_to', 'project', 'link']: - if module.params[item]: - if type == 'chat': - module.fail_json(msg="%s is not valid for the 'chat' type" % item) - else: - params[item] = module.params[item] - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=False) - - # Send the data to Flowdock - data = urlencode(params) - response, info = fetch_url(module, url, data=data) - if info['status'] != 200: - module.fail_json(msg="unable to send msg: %s" % info['msg']) - - module.exit_json(changed=True, msg=module.params["msg"]) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/grove.py b/plugins/modules/notification/grove.py deleted file mode 100644 index 12c910902e..0000000000 --- a/plugins/modules/notification/grove.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: grove -short_description: Sends a notification to a grove.io channel -description: - - The C(grove) module sends a message for a service to a Grove.io - channel. -options: - channel_token: - type: str - description: - - Token of the channel to post to. - required: true - service: - type: str - description: - - Name of the service (displayed as the "user" in the message) - required: false - default: ansible - message_content: - type: str - description: - - Message content. - - The alias I(message) is deprecated and will be removed in community.general 4.0.0. - required: true - url: - type: str - description: - - Service URL for the web client - required: false - icon_url: - type: str - description: - - Icon for the service - required: false - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - default: 'yes' - type: bool -author: "Jonas Pfenniger (@zimbatm)" -''' - -EXAMPLES = ''' -- name: Sends a notification to a grove.io channel - community.general.grove: - channel_token: 6Ph62VBBJOccmtTPZbubiPzdrhipZXtg - service: my-app - message: 'deployed {{ target }}' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url - - -BASE_URL = 'https://grove.io/api/notice/%s/' - -# ============================================================== -# do_notify_grove - - -def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None): - my_url = BASE_URL % (channel_token,) - - my_data = dict(service=service, message=message) - if url is not None: - my_data['url'] = url - if icon_url is not None: - my_data['icon_url'] = icon_url - - data = urlencode(my_data) - response, info = fetch_url(module, my_url, data=data) - if info['status'] != 200: - module.fail_json(msg="failed to send notification: %s" % info['msg']) - -# ============================================================== -# main - - -def main(): - module = AnsibleModule( - argument_spec=dict( - channel_token=dict(type='str', required=True, no_log=True), - message_content=dict(type='str', required=True), - service=dict(type='str', default='ansible'), - url=dict(type='str', default=None), - icon_url=dict(type='str', default=None), - validate_certs=dict(default=True, type='bool'), - ) - ) - - channel_token = module.params['channel_token'] - service = module.params['service'] - message = module.params['message_content'] - url = module.params['url'] - icon_url = module.params['icon_url'] - - do_notify_grove(module, channel_token, service, message, url, icon_url) - - # Mission complete - module.exit_json(msg="OK") - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/hipchat.py b/plugins/modules/notification/hipchat.py deleted file mode 100644 index 76c1227af4..0000000000 --- a/plugins/modules/notification/hipchat.py +++ /dev/null @@ -1,212 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: hipchat -short_description: Send a message to Hipchat. -description: - - Send a message to a Hipchat room, with options to control the formatting. -options: - token: - type: str - description: - - API token. - required: true - room: - type: str - description: - - ID or name of the room. - required: true - msg_from: - type: str - description: - - Name the message will appear to be sent from. Max length is 15 - characters - above this it will be truncated. - default: Ansible - aliases: [from] - msg: - type: str - description: - - The message body. - required: true - color: - type: str - description: - - Background color for the message. - default: yellow - choices: [ "yellow", "red", "green", "purple", "gray", "random" ] - msg_format: - type: str - description: - - Message format. - default: text - choices: [ "text", "html" ] - notify: - description: - - If true, a notification will be triggered for users in the room. - type: bool - default: 'yes' - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - api: - type: str - description: - - API url if using a self-hosted hipchat server. For Hipchat API version - 2 use the default URI with C(/v2) instead of C(/v1). - default: 'https://api.hipchat.com/v1' - -author: -- Shirou Wakayama (@shirou) -- Paul Bourdel (@pb8226) -''' - -EXAMPLES = ''' -- name: Send a message to a Hipchat room - community.general.hipchat: - room: notif - msg: Ansible task finished - -- name: Send a message to a Hipchat room using Hipchat API version 2 - community.general.hipchat: - api: https://api.hipchat.com/v2/ - token: OAUTH2_TOKEN - room: notify - msg: Ansible task finished -''' - -# =========================================== -# HipChat module specific support methods. -# - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.six.moves.urllib.request import pathname2url -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -DEFAULT_URI = "https://api.hipchat.com/v1" - -MSG_URI_V1 = "/rooms/message" - -NOTIFY_URI_V2 = "/room/{id_or_name}/notification" - - -def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=MSG_URI_V1): - '''sending message to hipchat v1 server''' - - params = {} - params['room_id'] = room - params['from'] = msg_from[:15] # max length is 15 - params['message'] = msg - params['message_format'] = msg_format - params['color'] = color - params['api'] = api - params['notify'] = int(notify) - - url = api + MSG_URI_V1 + "?auth_token=%s" % (token) - data = urlencode(params) - - if module.check_mode: - # In check mode, exit before actually sending the message - module.exit_json(changed=False) - - response, info = fetch_url(module, url, data=data) - if info['status'] == 200: - return response.read() - else: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - - -def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=NOTIFY_URI_V2): - '''sending message to hipchat v2 server''' - - headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} - - body = dict() - body['message'] = msg - body['color'] = color - body['message_format'] = msg_format - body['notify'] = notify - - POST_URL = api + NOTIFY_URI_V2 - - url = POST_URL.replace('{id_or_name}', pathname2url(room)) - data = json.dumps(body) - - if module.check_mode: - # In check mode, exit before actually sending the message - module.exit_json(changed=False) - - response, info = fetch_url(module, url, data=data, headers=headers, method='POST') - - # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows - # 204 to be the expected result code. - if info['status'] in [200, 204]: - return response.read() - else: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - room=dict(required=True), - msg=dict(required=True), - msg_from=dict(default="Ansible", aliases=['from']), - color=dict(default="yellow", choices=["yellow", "red", "green", - "purple", "gray", "random"]), - msg_format=dict(default="text", choices=["text", "html"]), - notify=dict(default=True, type='bool'), - validate_certs=dict(default=True, type='bool'), - api=dict(default=DEFAULT_URI), - ), - supports_check_mode=True - ) - - token = module.params["token"] - room = str(module.params["room"]) - msg = module.params["msg"] - msg_from = module.params["msg_from"] - color = module.params["color"] - msg_format = module.params["msg_format"] - notify = module.params["notify"] - api = module.params["api"] - - try: - if api.find('/v2') != -1: - send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) - else: - send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) - except Exception as e: - module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc()) - - changed = True - module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/irc.py b/plugins/modules/notification/irc.py deleted file mode 100644 index 9b1b91f586..0000000000 --- a/plugins/modules/notification/irc.py +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Jan-Piet Mens -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: irc -short_description: Send a message to an IRC channel or a nick -description: - - Send a message to an IRC channel or a nick. This is a very simplistic implementation. -options: - server: - type: str - description: - - IRC server name/address - default: localhost - port: - type: int - description: - - IRC server port number - default: 6667 - nick: - type: str - description: - - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting. - default: ansible - msg: - type: str - description: - - The message body. - required: true - topic: - type: str - description: - - Set the channel topic - color: - type: str - description: - - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). - Added 11 more colors in version 2.0. - default: "none" - choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan", - "light_blue", "pink", "gray", "light_gray"] - aliases: [colour] - channel: - type: str - description: - - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them. - nick_to: - type: list - elements: str - description: - - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them. - key: - type: str - description: - - Channel key - passwd: - type: str - description: - - Server password - timeout: - type: int - description: - - Timeout to use while waiting for successful registration and join - messages, this is to prevent an endless loop - default: 30 - use_ssl: - description: - - Designates whether TLS/SSL should be used when connecting to the IRC server - type: bool - default: 'no' - part: - description: - - Designates whether user should part from channel after sending message or not. - Useful for when using a faux bot and not wanting join/parts between messages. - type: bool - default: 'yes' - style: - type: str - description: - - Text style for the message. Note italic does not work on some clients - choices: [ "bold", "underline", "reverse", "italic", "none" ] - default: none - -# informational: requirements for nodes -requirements: [ socket ] -author: - - "Jan-Piet Mens (@jpmens)" - - "Matt Martz (@sivel)" -''' - -EXAMPLES = ''' -- name: Send a message to an IRC channel from nick ansible - community.general.irc: - server: irc.example.net - channel: #t1 - msg: Hello world - -- name: Send a message to an IRC channel - local_action: - module: irc - port: 6669 - server: irc.example.net - channel: #t1 - msg: 'All finished at {{ ansible_date_time.iso8601 }}' - color: red - nick: ansibleIRC - -- name: Send a message to an IRC channel - local_action: - module: irc - port: 6669 - server: irc.example.net - channel: #t1 - nick_to: - - nick1 - - nick2 - msg: 'All finished at {{ ansible_date_time.iso8601 }}' - color: red - nick: ansibleIRC -''' - -# =========================================== -# IRC module support methods. -# - -import re -import socket -import ssl -import time -import traceback - -from ansible.module_utils.common.text.converters import to_native, to_bytes -from ansible.module_utils.basic import AnsibleModule - - -def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None, - nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None): - '''send message to IRC''' - nick_to = [] if nick_to is None else nick_to - - colornumbers = { - 'white': "00", - 'black': "01", - 'blue': "02", - 'green': "03", - 'red': "04", - 'brown': "05", - 'purple': "06", - 'orange': "07", - 'yellow': "08", - 'light_green': "09", - 'teal': "10", - 'light_cyan': "11", - 'light_blue': "12", - 'pink': "13", - 'gray': "14", - 'light_gray': "15", - } - - stylechoices = { - 'bold': "\x02", - 'underline': "\x1F", - 'reverse': "\x16", - 'italic': "\x1D", - } - - try: - styletext = stylechoices[style] - except Exception: - styletext = "" - - try: - colornumber = colornumbers[color] - colortext = "\x03" + colornumber - except Exception: - colortext = "" - - message = styletext + colortext + msg - - irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - if use_ssl: - irc = ssl.wrap_socket(irc) - irc.connect((server, int(port))) - - if passwd: - irc.send(to_bytes('PASS %s\r\n' % passwd)) - irc.send(to_bytes('NICK %s\r\n' % nick)) - irc.send(to_bytes('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick))) - motd = '' - start = time.time() - while 1: - motd += to_native(irc.recv(1024)) - # The server might send back a shorter nick than we specified (due to NICKLEN), - # so grab that and use it from now on (assuming we find the 00[1-4] response). - match = re.search(r'^:\S+ 00[1-4] (?P\S+) :', motd, flags=re.M) - if match: - nick = match.group('nick') - break - elif time.time() - start > timeout: - raise Exception('Timeout waiting for IRC server welcome response') - time.sleep(0.5) - - if channel: - if key: - irc.send(to_bytes('JOIN %s %s\r\n' % (channel, key))) - else: - irc.send(to_bytes('JOIN %s\r\n' % channel)) - - join = '' - start = time.time() - while 1: - join += to_native(irc.recv(1024)) - if re.search(r'^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M | re.I): - break - elif time.time() - start > timeout: - raise Exception('Timeout waiting for IRC JOIN response') - time.sleep(0.5) - - if topic is not None: - irc.send(to_bytes('TOPIC %s :%s\r\n' % (channel, topic))) - time.sleep(1) - - if nick_to: - for nick in nick_to: - irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (nick, message))) - if channel: - irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (channel, message))) - time.sleep(1) - if part: - if channel: - irc.send(to_bytes('PART %s\r\n' % channel)) - irc.send(to_bytes('QUIT\r\n')) - time.sleep(1) - irc.close() - -# =========================================== -# Main -# - - -def main(): - module = AnsibleModule( - argument_spec=dict( - server=dict(default='localhost'), - port=dict(type='int', default=6667), - nick=dict(default='ansible'), - nick_to=dict(required=False, type='list', elements='str'), - msg=dict(required=True), - color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue", - "green", "red", "brown", - "purple", "orange", "yellow", - "light_green", "teal", "light_cyan", - "light_blue", "pink", "gray", - "light_gray", "none"]), - style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]), - channel=dict(required=False), - key=dict(no_log=True), - topic=dict(), - passwd=dict(no_log=True), - timeout=dict(type='int', default=30), - part=dict(type='bool', default=True), - use_ssl=dict(type='bool', default=False) - ), - supports_check_mode=True, - required_one_of=[['channel', 'nick_to']] - ) - - server = module.params["server"] - port = module.params["port"] - nick = module.params["nick"] - nick_to = module.params["nick_to"] - msg = module.params["msg"] - color = module.params["color"] - channel = module.params["channel"] - topic = module.params["topic"] - if topic and not channel: - module.fail_json(msg="When topic is specified, a channel is required.") - key = module.params["key"] - passwd = module.params["passwd"] - timeout = module.params["timeout"] - use_ssl = module.params["use_ssl"] - part = module.params["part"] - style = module.params["style"] - - try: - send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style) - except Exception as e: - module.fail_json(msg="unable to send to IRC: %s" % to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=False, channel=channel, nick=nick, - msg=msg) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/jabber.py b/plugins/modules/notification/jabber.py deleted file mode 100644 index 9b6811b3fa..0000000000 --- a/plugins/modules/notification/jabber.py +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2015, Brian Coca -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: jabber -short_description: Send a message to jabber user or chat room -description: - - Send a message to jabber -options: - user: - type: str - description: - - User as which to connect - required: true - password: - type: str - description: - - password for user to connect - required: true - to: - type: str - description: - - user ID or name of the room, when using room use a slash to indicate your nick. - required: true - msg: - type: str - description: - - The message body. - required: true - host: - type: str - description: - - host to connect, overrides user info - port: - type: int - description: - - port to connect to, overrides default - default: 5222 - encoding: - type: str - description: - - message encoding - -# informational: requirements for nodes -requirements: - - python xmpp (xmpppy) -author: "Brian Coca (@bcoca)" -''' - -EXAMPLES = ''' -- name: Send a message to a user - community.general.jabber: - user: mybot@example.net - password: secret - to: friend@example.net - msg: Ansible task finished - -- name: Send a message to a room - community.general.jabber: - user: mybot@example.net - password: secret - to: mychaps@conference.example.net/ansiblebot - msg: Ansible task finished - -- name: Send a message, specifying the host and port - community.general.jabber: - user: mybot@example.net - host: talk.example.net - port: 5223 - password: secret - to: mychaps@example.net - msg: Ansible task finished -''' - -import time -import traceback - -HAS_XMPP = True -XMPP_IMP_ERR = None -try: - import xmpp -except ImportError: - XMPP_IMP_ERR = traceback.format_exc() - HAS_XMPP = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - user=dict(required=True), - password=dict(required=True, no_log=True), - to=dict(required=True), - msg=dict(required=True), - host=dict(required=False), - port=dict(required=False, default=5222, type='int'), - encoding=dict(required=False), - ), - supports_check_mode=True - ) - - if not HAS_XMPP: - module.fail_json(msg=missing_required_lib('xmpppy'), exception=XMPP_IMP_ERR) - - jid = xmpp.JID(module.params['user']) - user = jid.getNode() - server = jid.getDomain() - port = module.params['port'] - password = module.params['password'] - try: - to, nick = module.params['to'].split('/', 1) - except ValueError: - to, nick = module.params['to'], None - - if module.params['host']: - host = module.params['host'] - else: - host = server - if module.params['encoding']: - xmpp.simplexml.ENCODING = module.params['encoding'] - - msg = xmpp.protocol.Message(body=module.params['msg']) - - try: - conn = xmpp.Client(server, debug=[]) - if not conn.connect(server=(host, port)): - module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server)) - if not conn.auth(user, password, 'Ansible'): - module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user, server)) - # some old servers require this, also the sleep following send - conn.sendInitPresence(requestRoster=0) - - if nick: # sending to room instead of user, need to join - msg.setType('groupchat') - msg.setTag('x', namespace='http://jabber.org/protocol/muc#user') - join = xmpp.Presence(to=module.params['to']) - join.setTag('x', namespace='http://jabber.org/protocol/muc') - conn.send(join) - time.sleep(1) - else: - msg.setType('chat') - - msg.setTo(to) - if not module.check_mode: - conn.send(msg) - time.sleep(1) - conn.disconnect() - except Exception as e: - module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=False, to=to, user=user, msg=msg.getBody()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/logentries_msg.py b/plugins/modules/notification/logentries_msg.py deleted file mode 100644 index 59e0f32565..0000000000 --- a/plugins/modules/notification/logentries_msg.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: logentries_msg -short_description: Send a message to logentries. -description: - - Send a message to logentries -requirements: - - "python >= 2.6" -options: - token: - type: str - description: - - Log token. - required: true - msg: - type: str - description: - - The message body. - required: true - api: - type: str - description: - - API endpoint - default: data.logentries.com - port: - type: int - description: - - API endpoint port - default: 80 -author: "Jimmy Tang (@jcftang) " -''' - -RETURN = '''# ''' - -EXAMPLES = ''' -- name: Send a message to logentries - community.general.logentries_msg: - token=00000000-0000-0000-0000-000000000000 - msg="{{ ansible_hostname }}" -''' - -import socket - -from ansible.module_utils.basic import AnsibleModule - - -def send_msg(module, token, msg, api, port): - - message = "{0} {1}\n".format(token, msg) - - api_ip = socket.gethostbyname(api) - - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.connect((api_ip, port)) - try: - if not module.check_mode: - s.send(message) - except Exception as e: - module.fail_json(msg="failed to send message, msg=%s" % e) - s.close() - - -def main(): - module = AnsibleModule( - argument_spec=dict( - token=dict(type='str', required=True, no_log=True), - msg=dict(type='str', required=True), - api=dict(type='str', default="data.logentries.com"), - port=dict(type='int', default=80)), - supports_check_mode=True - ) - - token = module.params["token"] - msg = module.params["msg"] - api = module.params["api"] - port = module.params["port"] - - changed = False - try: - send_msg(module, token, msg, api, port) - changed = True - except Exception as e: - module.fail_json(msg="unable to send msg: %s" % e) - - module.exit_json(changed=changed, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/mail.py b/plugins/modules/notification/mail.py deleted file mode 100644 index 82ca6d52b2..0000000000 --- a/plugins/modules/notification/mail.py +++ /dev/null @@ -1,408 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2012, Dag Wieers (@dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -author: -- Dag Wieers (@dagwieers) -module: mail -short_description: Send an email -description: -- This module is useful for sending emails from playbooks. -- One may wonder why automate sending emails? In complex environments - there are from time to time processes that cannot be automated, either - because you lack the authority to make it so, or because not everyone - agrees to a common approach. -- If you cannot automate a specific step, but the step is non-blocking, - sending out an email to the responsible party to make them perform their - part of the bargain is an elegant way to put the responsibility in - someone else's lap. -- Of course sending out a mail can be equally useful as a way to notify - one or more people in a team that a specific action has been - (successfully) taken. -options: - sender: - description: - - The email-address the mail is sent from. May contain address and phrase. - type: str - default: root - aliases: [ from ] - to: - description: - - The email-address(es) the mail is being sent to. - - This is a list, which may contain address and phrase portions. - type: list - elements: str - default: root - aliases: [ recipients ] - cc: - description: - - The email-address(es) the mail is being copied to. - - This is a list, which may contain address and phrase portions. - type: list - elements: str - bcc: - description: - - The email-address(es) the mail is being 'blind' copied to. - - This is a list, which may contain address and phrase portions. - type: list - elements: str - subject: - description: - - The subject of the email being sent. - required: yes - type: str - aliases: [ msg ] - body: - description: - - The body of the email being sent. - type: str - username: - description: - - If SMTP requires username. - type: str - password: - description: - - If SMTP requires password. - type: str - host: - description: - - The mail server. - type: str - default: localhost - port: - description: - - The mail server port. - - This must be a valid integer between 1 and 65534 - type: int - default: 25 - attach: - description: - - A list of pathnames of files to attach to the message. - - Attached files will have their content-type set to C(application/octet-stream). - type: list - elements: path - default: [] - headers: - description: - - A list of headers which should be added to the message. - - Each individual header is specified as C(header=value) (see example below). - type: list - elements: str - default: [] - charset: - description: - - The character set of email being sent. - type: str - default: utf-8 - subtype: - description: - - The minor mime type, can be either C(plain) or C(html). - - The major type is always C(text). - type: str - choices: [ html, plain ] - default: plain - secure: - description: - - If C(always), the connection will only send email if the connection is Encrypted. - If the server doesn't accept the encrypted connection it will fail. - - If C(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send. - - If C(never), the connection will not attempt to setup a secure SSL/TLS session, before sending - - If C(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending. - If it is unable to do so it will fail. - type: str - choices: [ always, never, starttls, try ] - default: try - timeout: - description: - - Sets the timeout in seconds for connection attempts. - type: int - default: 20 - ehlohost: - description: - - Allows for manual specification of host for EHLO. - type: str - version_added: 3.8.0 -''' - -EXAMPLES = r''' -- name: Example playbook sending mail to root - community.general.mail: - subject: System {{ ansible_hostname }} has been successfully provisioned. - delegate_to: localhost - -- name: Sending an e-mail using Gmail SMTP servers - community.general.mail: - host: smtp.gmail.com - port: 587 - username: username@gmail.com - password: mysecret - to: John Smith - subject: Ansible-report - body: System {{ ansible_hostname }} has been successfully provisioned. - delegate_to: localhost - -- name: Send e-mail to a bunch of users, attaching files - community.general.mail: - host: 127.0.0.1 - port: 2025 - subject: Ansible-report - body: Hello, this is an e-mail. I hope you like it ;-) - from: jane@example.net (Jane Jolie) - to: - - John Doe - - Suzie Something - cc: Charlie Root - attach: - - /etc/group - - /tmp/avatar2.png - headers: - - Reply-To=john@example.com - - X-Special="Something or other" - charset: us-ascii - delegate_to: localhost - -- name: Sending an e-mail using the remote machine, not the Ansible controller node - community.general.mail: - host: localhost - port: 25 - to: John Smith - subject: Ansible-report - body: System {{ ansible_hostname }} has been successfully provisioned. - -- name: Sending an e-mail using Legacy SSL to the remote machine - community.general.mail: - host: localhost - port: 25 - to: John Smith - subject: Ansible-report - body: System {{ ansible_hostname }} has been successfully provisioned. - secure: always - -- name: Sending an e-mail using StartTLS to the remote machine - community.general.mail: - host: localhost - port: 25 - to: John Smith - subject: Ansible-report - body: System {{ ansible_hostname }} has been successfully provisioned. - secure: starttls - -- name: Sending an e-mail using StartTLS, remote server, custom EHLO - community.general.mail: - host: some.smtp.host.tld - port: 25 - ehlohost: my-resolvable-hostname.tld - to: John Smith - subject: Ansible-report - body: System {{ ansible_hostname }} has been successfully provisioned. - secure: starttls -''' - -import os -import smtplib -import ssl -import traceback -from email import encoders -from email.utils import parseaddr, formataddr, formatdate -from email.mime.base import MIMEBase -from email.mime.multipart import MIMEMultipart -from email.mime.text import MIMEText -from email.header import Header - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import PY3 -from ansible.module_utils.common.text.converters import to_native - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - username=dict(type='str'), - password=dict(type='str', no_log=True), - host=dict(type='str', default='localhost'), - port=dict(type='int', default=25), - ehlohost=dict(type='str', default=None), - sender=dict(type='str', default='root', aliases=['from']), - to=dict(type='list', elements='str', default=['root'], aliases=['recipients']), - cc=dict(type='list', elements='str', default=[]), - bcc=dict(type='list', elements='str', default=[]), - subject=dict(type='str', required=True, aliases=['msg']), - body=dict(type='str'), - attach=dict(type='list', elements='path', default=[]), - headers=dict(type='list', elements='str', default=[]), - charset=dict(type='str', default='utf-8'), - subtype=dict(type='str', default='plain', choices=['html', 'plain']), - secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']), - timeout=dict(type='int', default=20), - ), - required_together=[['password', 'username']], - ) - - username = module.params.get('username') - password = module.params.get('password') - host = module.params.get('host') - port = module.params.get('port') - local_hostname = module.params.get('ehlohost') - sender = module.params.get('sender') - recipients = module.params.get('to') - copies = module.params.get('cc') - blindcopies = module.params.get('bcc') - subject = module.params.get('subject') - body = module.params.get('body') - attach_files = module.params.get('attach') - headers = module.params.get('headers') - charset = module.params.get('charset') - subtype = module.params.get('subtype') - secure = module.params.get('secure') - timeout = module.params.get('timeout') - - code = 0 - secure_state = False - sender_phrase, sender_addr = parseaddr(sender) - - if not body: - body = subject - - try: - if secure != 'never': - try: - if PY3: - smtp = smtplib.SMTP_SSL(host=host, port=port, local_hostname=local_hostname, timeout=timeout) - else: - smtp = smtplib.SMTP_SSL(local_hostname=local_hostname, timeout=timeout) - code, smtpmessage = smtp.connect(host, port) - secure_state = True - except ssl.SSLError as e: - if secure == 'always': - module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' % - (host, port, to_native(e)), exception=traceback.format_exc()) - except Exception: - pass - - if not secure_state: - if PY3: - smtp = smtplib.SMTP(host=host, port=port, local_hostname=local_hostname, timeout=timeout) - else: - smtp = smtplib.SMTP(local_hostname=local_hostname, timeout=timeout) - code, smtpmessage = smtp.connect(host, port) - - except smtplib.SMTPException as e: - module.fail_json(rc=1, msg='Unable to Connect %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) - - try: - smtp.ehlo() - except smtplib.SMTPException as e: - module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) - - if int(code) > 0: - if not secure_state and secure in ('starttls', 'try'): - if smtp.has_extn('STARTTLS'): - try: - smtp.starttls() - secure_state = True - except smtplib.SMTPException as e: - module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' % - (host, port, to_native(e)), exception=traceback.format_exc()) - try: - smtp.ehlo() - except smtplib.SMTPException as e: - module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) - else: - if secure == 'starttls': - module.fail_json(rc=1, msg='StartTLS is not offered on server %s:%s' % (host, port)) - - if username and password: - if smtp.has_extn('AUTH'): - try: - smtp.login(username, password) - except smtplib.SMTPAuthenticationError: - module.fail_json(rc=1, msg='Authentication to %s:%s failed, please check your username and/or password' % (host, port)) - except smtplib.SMTPException: - module.fail_json(rc=1, msg='No Suitable authentication method was found on %s:%s' % (host, port)) - else: - module.fail_json(rc=1, msg="No Authentication on the server at %s:%s" % (host, port)) - - if not secure_state and (username and password): - module.warn('Username and Password was sent without encryption') - - msg = MIMEMultipart(_charset=charset) - msg['From'] = formataddr((sender_phrase, sender_addr)) - msg['Date'] = formatdate(localtime=True) - msg['Subject'] = Header(subject, charset) - msg.preamble = "Multipart message" - - for header in headers: - # NOTE: Backward compatible with old syntax using '|' as delimiter - for hdr in [x.strip() for x in header.split('|')]: - try: - h_key, h_val = hdr.split('=') - h_val = to_native(Header(h_val, charset)) - msg.add_header(h_key, h_val) - except Exception: - module.warn("Skipping header '%s', unable to parse" % hdr) - - if 'X-Mailer' not in msg: - msg.add_header('X-Mailer', 'Ansible mail module') - - addr_list = [] - for addr in [x.strip() for x in blindcopies]: - addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase - - to_list = [] - for addr in [x.strip() for x in recipients]: - to_list.append(formataddr(parseaddr(addr))) - addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase - msg['To'] = ", ".join(to_list) - - cc_list = [] - for addr in [x.strip() for x in copies]: - cc_list.append(formataddr(parseaddr(addr))) - addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase - msg['Cc'] = ", ".join(cc_list) - - part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset) - msg.attach(part) - - # NOTE: Backware compatibility with old syntax using space as delimiter is not retained - # This breaks files with spaces in it :-( - for filename in attach_files: - try: - part = MIMEBase('application', 'octet-stream') - with open(filename, 'rb') as fp: - part.set_payload(fp.read()) - encoders.encode_base64(part) - part.add_header('Content-disposition', 'attachment', filename=os.path.basename(filename)) - msg.attach(part) - except Exception as e: - module.fail_json(rc=1, msg="Failed to send community.general.mail: can't attach file %s: %s" % - (filename, to_native(e)), exception=traceback.format_exc()) - - composed = msg.as_string() - - try: - result = smtp.sendmail(sender_addr, set(addr_list), composed) - except Exception as e: - module.fail_json(rc=1, msg="Failed to send mail to '%s': %s" % - (", ".join(set(addr_list)), to_native(e)), exception=traceback.format_exc()) - - smtp.quit() - - if result: - for key in result: - module.warn("Failed to send mail to '%s': %s %s" % (key, result[key][0], result[key][1])) - module.exit_json(msg='Failed to send mail to at least one recipient', result=result) - - module.exit_json(msg='Mail sent successfully', result=result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/matrix.py b/plugins/modules/notification/matrix.py deleted file mode 100644 index d94ed2b8de..0000000000 --- a/plugins/modules/notification/matrix.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 - -# (c) 2018, Jan Christian Grünhage -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -author: "Jan Christian Grünhage (@jcgruenhage)" -module: matrix -short_description: Send notifications to matrix -description: - - This module sends html formatted notifications to matrix rooms. -options: - msg_plain: - type: str - description: - - Plain text form of the message to send to matrix, usually markdown - required: true - msg_html: - type: str - description: - - HTML form of the message to send to matrix - required: true - room_id: - type: str - description: - - ID of the room to send the notification to - required: true - hs_url: - type: str - description: - - URL of the homeserver, where the CS-API is reachable - required: true - token: - type: str - description: - - Authentication token for the API call. If provided, user_id and password are not required - user_id: - type: str - description: - - The user id of the user - password: - type: str - description: - - The password to log in with -requirements: - - matrix-client (Python library) -''' - -EXAMPLES = ''' -- name: Send matrix notification with token - community.general.matrix: - msg_plain: "**hello world**" - msg_html: "hello world" - room_id: "!12345678:server.tld" - hs_url: "https://matrix.org" - token: "{{ matrix_auth_token }}" - -- name: Send matrix notification with user_id and password - community.general.matrix: - msg_plain: "**hello world**" - msg_html: "hello world" - room_id: "!12345678:server.tld" - hs_url: "https://matrix.org" - user_id: "ansible_notification_bot" - password: "{{ matrix_auth_password }}" -''' - -RETURN = ''' -''' -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -MATRIX_IMP_ERR = None -try: - from matrix_client.client import MatrixClient -except ImportError: - MATRIX_IMP_ERR = traceback.format_exc() - matrix_found = False -else: - matrix_found = True - - -def run_module(): - module_args = dict( - msg_plain=dict(type='str', required=True), - msg_html=dict(type='str', required=True), - room_id=dict(type='str', required=True), - hs_url=dict(type='str', required=True), - token=dict(type='str', required=False, no_log=True), - user_id=dict(type='str', required=False), - password=dict(type='str', required=False, no_log=True), - ) - - result = dict( - changed=False, - message='' - ) - - module = AnsibleModule( - argument_spec=module_args, - mutually_exclusive=[['password', 'token']], - required_one_of=[['password', 'token']], - required_together=[['user_id', 'password']], - supports_check_mode=True - ) - - if not matrix_found: - module.fail_json(msg=missing_required_lib('matrix-client'), exception=MATRIX_IMP_ERR) - - if module.check_mode: - return result - - # create a client object - client = MatrixClient(module.params['hs_url']) - if module.params['token'] is not None: - client.api.token = module.params['token'] - else: - client.login(module.params['user_id'], module.params['password'], sync=False) - - # make sure we are in a given room and return a room object for it - room = client.join_room(module.params['room_id']) - # send an html formatted messages - room.send_html(module.params['msg_html'], module.params['msg_plain']) - - module.exit_json(**result) - - -def main(): - run_module() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/mattermost.py b/plugins/modules/notification/mattermost.py deleted file mode 100644 index 579cfa5b32..0000000000 --- a/plugins/modules/notification/mattermost.py +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Benjamin Jolivot -# Inspired by slack module : -# # (c) 2017, Steve Pletcher -# # (c) 2016, René Moser -# # (c) 2015, Stefan Berggren -# # (c) 2014, Ramon de la Fuente ) -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: mattermost -short_description: Send Mattermost notifications -description: - - Sends notifications to U(http://your.mattermost.url) via the Incoming WebHook integration. -author: "Benjamin Jolivot (@bjolivot)" -options: - url: - type: str - description: - - Mattermost url (i.e. http://mattermost.yourcompany.com). - required: true - api_key: - type: str - description: - - Mattermost webhook api key. Log into your mattermost site, go to - Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook. - This will give you full URL. api_key is the last part. - http://mattermost.example.com/hooks/C(API_KEY) - required: true - text: - type: str - description: - - Text to send. Note that the module does not handle escaping characters. - required: true - channel: - type: str - description: - - Channel to send the message to. If absent, the message goes to the channel selected for the I(api_key). - username: - type: str - description: - - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc. - default: Ansible - icon_url: - type: str - description: - - Url for the message sender's icon. - default: https://www.ansible.com/favicon.ico - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - default: yes - type: bool -''' - -EXAMPLES = """ -- name: Send notification message via Mattermost - community.general.mattermost: - url: http://mattermost.example.com - api_key: my_api_key - text: '{{ inventory_hostname }} completed' - -- name: Send notification message via Mattermost all options - community.general.mattermost: - url: http://mattermost.example.com - api_key: my_api_key - text: '{{ inventory_hostname }} completed' - channel: notifications - username: 'Ansible on {{ inventory_hostname }}' - icon_url: http://www.example.com/some-image-file.png -""" - -RETURN = ''' -payload: - description: Mattermost payload - returned: success - type: str -webhook_url: - description: URL the webhook is sent to - returned: success - type: str -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def main(): - module = AnsibleModule( - supports_check_mode=True, - argument_spec=dict( - url=dict(type='str', required=True), - api_key=dict(type='str', required=True, no_log=True), - text=dict(type='str', required=True), - channel=dict(type='str', default=None), - username=dict(type='str', default='Ansible'), - icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'), - validate_certs=dict(default=True, type='bool'), - ) - ) - # init return dict - result = dict(changed=False, msg="OK") - - # define webhook - webhook_url = "{0}/hooks/{1}".format(module.params['url'], module.params['api_key']) - result['webhook_url'] = webhook_url - - # define payload - payload = {} - for param in ['text', 'channel', 'username', 'icon_url']: - if module.params[param] is not None: - payload[param] = module.params[param] - - payload = module.jsonify(payload) - result['payload'] = payload - - # http headers - headers = { - 'Content-Type': 'application/json', - 'Accept': 'application/json', - } - - # notes: - # Nothing is done in check mode - # it'll pass even if your server is down or/and if your token is invalid. - # If someone find good way to check... - - # send request if not in test mode - if module.check_mode is False: - response, info = fetch_url(module=module, url=webhook_url, headers=headers, method='POST', data=payload) - - # something's wrong - if info['status'] != 200: - # some problem - result['msg'] = "Failed to send mattermost message, the error was: {0}".format(info['msg']) - module.fail_json(**result) - - # Looks good - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/mqtt.py b/plugins/modules/notification/mqtt.py deleted file mode 100644 index 991114e8ae..0000000000 --- a/plugins/modules/notification/mqtt.py +++ /dev/null @@ -1,248 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, 2014, Jan-Piet Mens -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: mqtt -short_description: Publish a message on an MQTT topic for the IoT -description: - - Publish a message on an MQTT topic. -options: - server: - type: str - description: - - MQTT broker address/name - default: localhost - port: - type: int - description: - - MQTT broker port number - default: 1883 - username: - type: str - description: - - Username to authenticate against the broker. - password: - type: str - description: - - Password for C(username) to authenticate against the broker. - client_id: - type: str - description: - - MQTT client identifier - - If not specified, a value C(hostname + pid) will be used. - topic: - type: str - description: - - MQTT topic name - required: true - payload: - type: str - description: - - Payload. The special string C("None") may be used to send a NULL - (i.e. empty) payload which is useful to simply notify with the I(topic) - or to clear previously retained messages. - required: true - qos: - type: str - description: - - QoS (Quality of Service) - default: "0" - choices: [ "0", "1", "2" ] - retain: - description: - - Setting this flag causes the broker to retain (i.e. keep) the message so that - applications that subsequently subscribe to the topic can received the last - retained message immediately. - type: bool - default: 'no' - ca_cert: - type: path - description: - - The path to the Certificate Authority certificate files that are to be - treated as trusted by this client. If this is the only option given - then the client will operate in a similar manner to a web browser. That - is to say it will require the broker to have a certificate signed by the - Certificate Authorities in ca_certs and will communicate using TLS v1, - but will not attempt any form of authentication. This provides basic - network encryption but may not be sufficient depending on how the broker - is configured. - aliases: [ ca_certs ] - client_cert: - type: path - description: - - The path pointing to the PEM encoded client certificate. If this is not - None it will be used as client information for TLS based - authentication. Support for this feature is broker dependent. - aliases: [ certfile ] - client_key: - type: path - description: - - The path pointing to the PEM encoded client private key. If this is not - None it will be used as client information for TLS based - authentication. Support for this feature is broker dependent. - aliases: [ keyfile ] - tls_version: - description: - - Specifies the version of the SSL/TLS protocol to be used. - - By default (if the python version supports it) the highest TLS version is - detected. If unavailable, TLS v1 is used. - type: str - choices: - - tlsv1.1 - - tlsv1.2 -requirements: [ mosquitto ] -notes: - - This module requires a connection to an MQTT broker such as Mosquitto - U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.org/project/paho-mqtt/)). -author: "Jan-Piet Mens (@jpmens)" -''' - -EXAMPLES = ''' -- name: Publish a message on an MQTT topic - community.general.mqtt: - topic: 'service/ansible/{{ ansible_hostname }}' - payload: 'Hello at {{ ansible_date_time.iso8601 }}' - qos: 0 - retain: False - client_id: ans001 - delegate_to: localhost -''' - -# =========================================== -# MQTT module support methods. -# - -import os -import ssl -import traceback -import platform -from distutils.version import LooseVersion - -HAS_PAHOMQTT = True -PAHOMQTT_IMP_ERR = None -try: - import socket - import paho.mqtt.publish as mqtt -except ImportError: - PAHOMQTT_IMP_ERR = traceback.format_exc() - HAS_PAHOMQTT = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -# =========================================== -# Main -# - -def main(): - tls_map = {} - - try: - tls_map['tlsv1.2'] = ssl.PROTOCOL_TLSv1_2 - except AttributeError: - pass - - try: - tls_map['tlsv1.1'] = ssl.PROTOCOL_TLSv1_1 - except AttributeError: - pass - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='localhost'), - port=dict(default=1883, type='int'), - topic=dict(required=True), - payload=dict(required=True), - client_id=dict(default=None), - qos=dict(default="0", choices=["0", "1", "2"]), - retain=dict(default=False, type='bool'), - username=dict(default=None), - password=dict(default=None, no_log=True), - ca_cert=dict(default=None, type='path', aliases=['ca_certs']), - client_cert=dict(default=None, type='path', aliases=['certfile']), - client_key=dict(default=None, type='path', aliases=['keyfile']), - tls_version=dict(default=None, choices=['tlsv1.1', 'tlsv1.2']) - ), - supports_check_mode=True - ) - - if not HAS_PAHOMQTT: - module.fail_json(msg=missing_required_lib('paho-mqtt'), exception=PAHOMQTT_IMP_ERR) - - server = module.params.get("server", 'localhost') - port = module.params.get("port", 1883) - topic = module.params.get("topic") - payload = module.params.get("payload") - client_id = module.params.get("client_id", '') - qos = int(module.params.get("qos", 0)) - retain = module.params.get("retain") - username = module.params.get("username", None) - password = module.params.get("password", None) - ca_certs = module.params.get("ca_cert", None) - certfile = module.params.get("client_cert", None) - keyfile = module.params.get("client_key", None) - tls_version = module.params.get("tls_version", None) - - if client_id is None: - client_id = "%s_%s" % (socket.getfqdn(), os.getpid()) - - if payload and payload == 'None': - payload = None - - auth = None - if username is not None: - auth = {'username': username, 'password': password} - - tls = None - if ca_certs is not None: - if tls_version: - tls_version = tls_map.get(tls_version, ssl.PROTOCOL_SSLv23) - else: - if LooseVersion(platform.python_version()) <= "3.5.2": - # Specifying `None` on later versions of python seems sufficient to - # instruct python to autonegotiate the SSL/TLS connection. On versions - # 3.5.2 and lower though we need to specify the version. - # - # Note that this is an alias for PROTOCOL_TLS, but PROTOCOL_TLS was - # not available until 3.5.3. - tls_version = ssl.PROTOCOL_SSLv23 - - tls = { - 'ca_certs': ca_certs, - 'certfile': certfile, - 'keyfile': keyfile, - 'tls_version': tls_version, - } - - try: - mqtt.single( - topic, - payload, - qos=qos, - retain=retain, - client_id=client_id, - hostname=server, - port=port, - auth=auth, - tls=tls - ) - except Exception as e: - module.fail_json( - msg="unable to publish to MQTT broker %s" % to_native(e), - exception=traceback.format_exc() - ) - - module.exit_json(changed=False, topic=topic) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/nexmo.py b/plugins/modules/notification/nexmo.py deleted file mode 100644 index d239bb4456..0000000000 --- a/plugins/modules/notification/nexmo.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Matt Martz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: nexmo -short_description: Send a SMS via nexmo -description: - - Send a SMS message via nexmo -author: "Matt Martz (@sivel)" -options: - api_key: - type: str - description: - - Nexmo API Key - required: true - api_secret: - type: str - description: - - Nexmo API Secret - required: true - src: - type: int - description: - - Nexmo Number to send from - required: true - dest: - type: list - elements: int - description: - - Phone number(s) to send SMS message to - required: true - msg: - type: str - description: - - Message to text to send. Messages longer than 160 characters will be - split into multiple messages - required: true - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' -extends_documentation_fragment: - - url -''' - -EXAMPLES = """ -- name: Send notification message via Nexmo - community.general.nexmo: - api_key: 640c8a53 - api_secret: 0ce239a6 - src: 12345678901 - dest: - - 10987654321 - - 16789012345 - msg: '{{ inventory_hostname }} completed' - delegate_to: localhost -""" -import json - -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url, url_argument_spec - - -NEXMO_API = 'https://rest.nexmo.com/sms/json' - - -def send_msg(module): - failed = list() - responses = dict() - msg = { - 'api_key': module.params.get('api_key'), - 'api_secret': module.params.get('api_secret'), - 'from': module.params.get('src'), - 'text': module.params.get('msg') - } - for number in module.params.get('dest'): - msg['to'] = number - url = "%s?%s" % (NEXMO_API, urlencode(msg)) - - headers = dict(Accept='application/json') - response, info = fetch_url(module, url, headers=headers) - if info['status'] != 200: - failed.append(number) - responses[number] = dict(failed=True) - - try: - responses[number] = json.load(response) - except Exception: - failed.append(number) - responses[number] = dict(failed=True) - else: - for message in responses[number]['messages']: - if int(message['status']) != 0: - failed.append(number) - responses[number] = dict(failed=True, **responses[number]) - - if failed: - msg = 'One or messages failed to send' - else: - msg = '' - - module.exit_json(failed=bool(failed), msg=msg, changed=False, - responses=responses) - - -def main(): - argument_spec = url_argument_spec() - argument_spec.update( - dict( - api_key=dict(required=True, no_log=True), - api_secret=dict(required=True, no_log=True), - src=dict(required=True, type='int'), - dest=dict(required=True, type='list', elements='int'), - msg=dict(required=True), - ), - ) - - module = AnsibleModule( - argument_spec=argument_spec - ) - - send_msg(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/office_365_connector_card.py b/plugins/modules/notification/office_365_connector_card.py deleted file mode 100644 index 04d5e385d4..0000000000 --- a/plugins/modules/notification/office_365_connector_card.py +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017 Marc Sensenich -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' -module: office_365_connector_card -short_description: Use webhooks to create Connector Card messages within an Office 365 group -description: - - Creates Connector Card messages through - - Office 365 Connectors U(https://dev.outlook.com/Connectors) -author: "Marc Sensenich (@marc-sensenich)" -notes: - - This module is not idempotent, therefore if the same task is run twice - there will be two Connector Cards created -options: - webhook: - type: str - description: - - The webhook URL is given to you when you create a new Connector. - required: true - summary: - type: str - description: - - A string used for summarizing card content. - - This will be shown as the message subject. - - This is required if the text parameter isn't populated. - color: - type: str - description: - - Accent color used for branding or indicating status in the card. - title: - type: str - description: - - A title for the Connector message. Shown at the top of the message. - text: - type: str - description: - - The main text of the card. - - This will be rendered below the sender information and optional title, - - and above any sections or actions present. - actions: - type: list - elements: dict - description: - - This array of objects will power the action links - - found at the bottom of the card. - sections: - type: list - elements: dict - description: - - Contains a list of sections to display in the card. - - For more information see https://dev.outlook.com/Connectors/reference. -''' - -EXAMPLES = """ -- name: Create a simple Connector Card - community.general.office_365_connector_card: - webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID - text: 'Hello, World!' - -- name: Create a Connector Card with the full format - community.general.office_365_connector_card: - webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID - summary: This is the summary property - title: This is the **card's title** property - text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur - adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. - color: E81123 - sections: - - title: This is the **section's title** property - activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg - activity_title: This is the section's **activityTitle** property - activity_subtitle: This is the section's **activitySubtitle** property - activity_text: This is the section's **activityText** property. - hero_image: - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg - title: This is the image's alternate text - text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur - adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. - facts: - - name: This is a fact name - value: This is a fact value - - name: This is a fact name - value: This is a fact value - - name: This is a fact name - value: This is a fact value - images: - - image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg - title: This is the image's alternate text - - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg - title: This is the image's alternate text - - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg - title: This is the image's alternate text - actions: - - "@type": ActionCard - name: Comment - inputs: - - "@type": TextInput - id: comment - is_multiline: true - title: Input's title property - actions: - - "@type": HttpPOST - name: Save - target: http://... - - "@type": ActionCard - name: Due Date - inputs: - - "@type": DateInput - id: dueDate - title: Input's title property - actions: - - "@type": HttpPOST - name: Save - target: http://... - - "@type": HttpPOST - name: Action's name prop. - target: http://... - - "@type": OpenUri - name: Action's name prop - targets: - - os: default - uri: http://... - - start_group: true - title: This is the title of a **second section** - text: This second section is visually separated from the first one by setting its - **startGroup** property to true. -""" - -RETURN = """ -""" - -# import module snippets -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -OFFICE_365_CARD_CONTEXT = "http://schema.org/extensions" -OFFICE_365_CARD_TYPE = "MessageCard" -OFFICE_365_CARD_EMPTY_PAYLOAD_MSG = "Summary or Text is required." -OFFICE_365_INVALID_WEBHOOK_MSG = "The Incoming Webhook was not reachable." - - -def build_actions(actions): - action_items = [] - - for action in actions: - action_item = snake_dict_to_camel_dict(action) - action_items.append(action_item) - - return action_items - - -def build_sections(sections): - sections_created = [] - - for section in sections: - sections_created.append(build_section(section)) - - return sections_created - - -def build_section(section): - section_payload = dict() - - if 'title' in section: - section_payload['title'] = section['title'] - - if 'start_group' in section: - section_payload['startGroup'] = section['start_group'] - - if 'activity_image' in section: - section_payload['activityImage'] = section['activity_image'] - - if 'activity_title' in section: - section_payload['activityTitle'] = section['activity_title'] - - if 'activity_subtitle' in section: - section_payload['activitySubtitle'] = section['activity_subtitle'] - - if 'activity_text' in section: - section_payload['activityText'] = section['activity_text'] - - if 'hero_image' in section: - section_payload['heroImage'] = section['hero_image'] - - if 'text' in section: - section_payload['text'] = section['text'] - - if 'facts' in section: - section_payload['facts'] = section['facts'] - - if 'images' in section: - section_payload['images'] = section['images'] - - if 'actions' in section: - section_payload['potentialAction'] = build_actions(section['actions']) - - return section_payload - - -def build_payload_for_connector_card(module, summary=None, color=None, title=None, text=None, actions=None, sections=None): - payload = dict() - payload['@context'] = OFFICE_365_CARD_CONTEXT - payload['@type'] = OFFICE_365_CARD_TYPE - - if summary is not None: - payload['summary'] = summary - - if color is not None: - payload['themeColor'] = color - - if title is not None: - payload['title'] = title - - if text is not None: - payload['text'] = text - - if actions: - payload['potentialAction'] = build_actions(actions) - - if sections: - payload['sections'] = build_sections(sections) - - payload = module.jsonify(payload) - return payload - - -def do_notify_connector_card_webhook(module, webhook, payload): - headers = { - 'Content-Type': 'application/json' - } - - response, info = fetch_url( - module=module, - url=webhook, - headers=headers, - method='POST', - data=payload - ) - - if info['status'] == 200: - module.exit_json(changed=True) - elif info['status'] == 400 and module.check_mode: - if info['body'] == OFFICE_365_CARD_EMPTY_PAYLOAD_MSG: - module.exit_json(changed=True) - else: - module.fail_json(msg=OFFICE_365_INVALID_WEBHOOK_MSG) - else: - module.fail_json( - msg="failed to send %s as a connector card to Incoming Webhook: %s" - % (payload, info['msg']) - ) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - webhook=dict(required=True, no_log=True), - summary=dict(type='str'), - color=dict(type='str'), - title=dict(type='str'), - text=dict(type='str'), - actions=dict(type='list', elements='dict'), - sections=dict(type='list', elements='dict') - ), - supports_check_mode=True - ) - - webhook = module.params['webhook'] - summary = module.params['summary'] - color = module.params['color'] - title = module.params['title'] - text = module.params['text'] - actions = module.params['actions'] - sections = module.params['sections'] - - payload = build_payload_for_connector_card( - module, - summary, - color, - title, - text, - actions, - sections) - - if module.check_mode: - # In check mode, send an empty payload to validate connection - check_mode_payload = build_payload_for_connector_card(module) - do_notify_connector_card_webhook(module, webhook, check_mode_payload) - - do_notify_connector_card_webhook(module, webhook, payload) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/pushbullet.py b/plugins/modules/notification/pushbullet.py deleted file mode 100644 index 435fcf2fcb..0000000000 --- a/plugins/modules/notification/pushbullet.py +++ /dev/null @@ -1,189 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -author: "Willy Barro (@willybarro)" -requirements: [ pushbullet.py ] -module: pushbullet -short_description: Sends notifications to Pushbullet -description: - - This module sends push notifications via Pushbullet to channels or devices. -options: - api_key: - type: str - description: - - Push bullet API token - required: true - channel: - type: str - description: - - The channel TAG you wish to broadcast a push notification, - as seen on the "My Channels" > "Edit your channel" at - Pushbullet page. - device: - type: str - description: - - The device NAME you wish to send a push notification, - as seen on the Pushbullet main page. - push_type: - type: str - description: - - Thing you wish to push. - default: note - choices: [ "note", "link" ] - title: - type: str - description: - - Title of the notification. - required: true - body: - type: str - description: - - Body of the notification, e.g. Details of the fault you're alerting. - url: - type: str - description: - - URL field, used when I(push_type) is C(link). - -notes: - - Requires pushbullet.py Python package on the remote host. - You can install it via pip with ($ pip install pushbullet.py). - See U(https://github.com/randomchars/pushbullet.py) -''' - -EXAMPLES = ''' -- name: Sends a push notification to a device - community.general.pushbullet: - api_key: "ABC123abc123ABC123abc123ABC123ab" - device: "Chrome" - title: "You may see this on Google Chrome" - -- name: Sends a link to a device - community.general.pushbullet: - api_key: ABC123abc123ABC123abc123ABC123ab - device: Chrome - push_type: link - title: Ansible Documentation - body: https://docs.ansible.com/ - -- name: Sends a push notification to a channel - community.general.pushbullet: - api_key: ABC123abc123ABC123abc123ABC123ab - channel: my-awesome-channel - title: Broadcasting a message to the #my-awesome-channel folks - -- name: Sends a push notification with title and body to a channel - community.general.pushbullet: - api_key: ABC123abc123ABC123abc123ABC123ab - channel: my-awesome-channel - title: ALERT! Signup service is down - body: Error rate on signup service is over 90% for more than 2 minutes -''' - -import traceback - -PUSHBULLET_IMP_ERR = None -try: - from pushbullet import PushBullet - from pushbullet.errors import InvalidKeyError, PushError -except ImportError: - PUSHBULLET_IMP_ERR = traceback.format_exc() - pushbullet_found = False -else: - pushbullet_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -# =========================================== -# Main -# - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(type='str', required=True, no_log=True), - channel=dict(type='str', default=None), - device=dict(type='str', default=None), - push_type=dict(type='str', default="note", choices=['note', 'link']), - title=dict(type='str', required=True), - body=dict(type='str', default=None), - url=dict(type='str', default=None), - ), - mutually_exclusive=( - ['channel', 'device'], - ), - supports_check_mode=True - ) - - api_key = module.params['api_key'] - channel = module.params['channel'] - device = module.params['device'] - push_type = module.params['push_type'] - title = module.params['title'] - body = module.params['body'] - url = module.params['url'] - - if not pushbullet_found: - module.fail_json(msg=missing_required_lib('pushbullet.py'), exception=PUSHBULLET_IMP_ERR) - - # Init pushbullet - try: - pb = PushBullet(api_key) - target = None - except InvalidKeyError: - module.fail_json(msg="Invalid api_key") - - # Checks for channel/device - if device is None and channel is None: - module.fail_json(msg="You need to provide a channel or a device.") - - # Search for given device - if device is not None: - devices_by_nickname = {} - for d in pb.devices: - devices_by_nickname[d.nickname] = d - - if device in devices_by_nickname: - target = devices_by_nickname[device] - else: - module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys()))) - - # Search for given channel - if channel is not None: - channels_by_tag = {} - for c in pb.channels: - channels_by_tag[c.channel_tag] = c - - if channel in channels_by_tag: - target = channels_by_tag[channel] - else: - module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys()))) - - # If in check mode, exit saying that we succeeded - if module.check_mode: - module.exit_json(changed=False, msg="OK") - - # Send push notification - try: - if push_type == "link": - target.push_link(title, url, body) - else: - target.push_note(title, body) - module.exit_json(changed=False, msg="OK") - except PushError as e: - module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e)) - - module.fail_json(msg="An unknown error has occurred") - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/pushover.py b/plugins/modules/notification/pushover.py deleted file mode 100644 index 7f73592a36..0000000000 --- a/plugins/modules/notification/pushover.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2012, Jim Richardson -# Copyright (c) 2019, Bernd Arnold -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: pushover -short_description: Send notifications via U(https://pushover.net) -description: - - Send notifications via pushover, to subscriber list of devices, and email - addresses. Requires pushover app on devices. -notes: - - You will require a pushover.net account to use this module. But no account - is required to receive messages. -options: - msg: - type: str - description: - - What message you wish to send. - required: true - app_token: - type: str - description: - - Pushover issued token identifying your pushover app. - required: true - user_key: - type: str - description: - - Pushover issued authentication key for your user. - required: true - title: - type: str - description: - - Message title. - required: false - pri: - type: str - description: - - Message priority (see U(https://pushover.net) for details). - required: false - default: '0' - choices: [ '-2', '-1', '0', '1', '2' ] - device: - type: str - description: - - A device the message should be sent to. Multiple devices can be specified, separated by a comma. - required: false - version_added: 1.2.0 - -author: - - "Jim Richardson (@weaselkeeper)" - - "Bernd Arnold (@wopfel)" -''' - -EXAMPLES = ''' -- name: Send notifications via pushover.net - community.general.pushover: - msg: '{{ inventory_hostname }} is acting strange ...' - app_token: wxfdksl - user_key: baa5fe97f2c5ab3ca8f0bb59 - delegate_to: localhost - -- name: Send notifications via pushover.net - community.general.pushover: - title: 'Alert!' - msg: '{{ inventory_hostname }} has exploded in flames, It is now time to panic' - pri: 1 - app_token: wxfdksl - user_key: baa5fe97f2c5ab3ca8f0bb59 - delegate_to: localhost - -- name: Send notifications via pushover.net to a specific device - community.general.pushover: - msg: '{{ inventory_hostname }} has been lost somewhere' - app_token: wxfdksl - user_key: baa5fe97f2c5ab3ca8f0bb59 - device: admins-iPhone - delegate_to: localhost -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url - - -class Pushover(object): - ''' Instantiates a pushover object, use it to send notifications ''' - base_uri = 'https://api.pushover.net' - - def __init__(self, module, user, token): - self.module = module - self.user = user - self.token = token - - def run(self, priority, msg, title, device): - ''' Do, whatever it is, we do. ''' - - url = '%s/1/messages.json' % (self.base_uri) - - # parse config - options = dict(user=self.user, - token=self.token, - priority=priority, - message=msg) - - if title is not None: - options = dict(options, - title=title) - - if device is not None: - options = dict(options, - device=device) - - data = urlencode(options) - - headers = {"Content-type": "application/x-www-form-urlencoded"} - r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers) - if info['status'] != 200: - raise Exception(info) - - return r.read() - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - title=dict(type='str'), - msg=dict(required=True), - app_token=dict(required=True, no_log=True), - user_key=dict(required=True, no_log=True), - pri=dict(required=False, default='0', choices=['-2', '-1', '0', '1', '2']), - device=dict(type='str'), - ), - ) - - msg_object = Pushover(module, module.params['user_key'], module.params['app_token']) - try: - response = msg_object.run(module.params['pri'], module.params['msg'], module.params['title'], module.params['device']) - except Exception: - module.fail_json(msg='Unable to send msg via pushover') - - module.exit_json(msg='message sent successfully: %s' % response, changed=False) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/rocketchat.py b/plugins/modules/notification/rocketchat.py deleted file mode 100644 index 500560e417..0000000000 --- a/plugins/modules/notification/rocketchat.py +++ /dev/null @@ -1,242 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Deepak Kothandan -# (c) 2015, Stefan Berggren -# (c) 2014, Ramon de la Fuente -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: rocketchat -short_description: Send notifications to Rocket Chat -description: - - The C(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration -author: "Ramon de la Fuente (@ramondelafuente)" -options: - domain: - type: str - description: - - The domain for your environment without protocol. (i.e. - C(example.com) or C(chat.example.com)) - required: true - token: - type: str - description: - - Rocket Chat Incoming Webhook integration token. This provides - authentication to Rocket Chat's Incoming webhook for posting - messages. - required: true - protocol: - type: str - description: - - Specify the protocol used to send notification messages before the webhook url. (i.e. http or https) - default: https - choices: - - 'http' - - 'https' - msg: - type: str - description: - - Message to be sent. - channel: - type: str - description: - - Channel to send the message to. If absent, the message goes to the channel selected for the I(token) - specified during the creation of webhook. - username: - type: str - description: - - This is the sender of the message. - default: "Ansible" - icon_url: - type: str - description: - - URL for the message sender's icon. - default: "https://www.ansible.com/favicon.ico" - icon_emoji: - type: str - description: - - Emoji for the message sender. The representation for the available emojis can be - got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used) - link_names: - type: int - description: - - Automatically create links for channels and usernames in I(msg). - default: 1 - choices: - - 1 - - 0 - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - color: - type: str - description: - - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message - default: 'normal' - choices: - - 'normal' - - 'good' - - 'warning' - - 'danger' - attachments: - type: list - elements: dict - description: - - Define a list of attachments. -''' - -EXAMPLES = """ -- name: Send notification message via Rocket Chat - community.general.rocketchat: - token: thetoken/generatedby/rocketchat - domain: chat.example.com - msg: '{{ inventory_hostname }} completed' - delegate_to: localhost - -- name: Send notification message via Rocket Chat all options - community.general.rocketchat: - domain: chat.example.com - token: thetoken/generatedby/rocketchat - msg: '{{ inventory_hostname }} completed' - channel: #ansible - username: 'Ansible on {{ inventory_hostname }}' - icon_url: http://www.example.com/some-image-file.png - link_names: 0 - delegate_to: localhost - -- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat - community.general.rocketchat: - token: thetoken/generatedby/rocketchat - domain: chat.example.com - msg: '{{ inventory_hostname }} is alive!' - color: good - username: '' - icon_url: '' - delegate_to: localhost - -- name: Use the attachments API - community.general.rocketchat: - token: thetoken/generatedby/rocketchat - domain: chat.example.com - attachments: - - text: Display my system load on host A and B - color: #ff00dd - title: System load - fields: - - title: System A - value: 'load average: 0,74, 0,66, 0,63' - short: True - - title: System B - value: 'load average: 5,16, 4,64, 2,43' - short: True - delegate_to: localhost -""" - -RETURN = """ -changed: - description: A flag indicating if any change was made or not. - returned: success - type: bool - sample: false -""" - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s' - - -def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments): - payload = {} - if color == "normal" and text is not None: - payload = dict(text=text) - elif text is not None: - payload = dict(attachments=[dict(text=text, color=color)]) - if channel is not None: - if (channel[0] == '#') or (channel[0] == '@'): - payload['channel'] = channel - else: - payload['channel'] = '#' + channel - if username is not None: - payload['username'] = username - if icon_emoji is not None: - payload['icon_emoji'] = icon_emoji - else: - payload['icon_url'] = icon_url - if link_names is not None: - payload['link_names'] = link_names - - if attachments is not None: - if 'attachments' not in payload: - payload['attachments'] = [] - - if attachments is not None: - for attachment in attachments: - if 'fallback' not in attachment: - attachment['fallback'] = attachment['text'] - payload['attachments'].append(attachment) - - payload = "payload=" + module.jsonify(payload) - return payload - - -def do_notify_rocketchat(module, domain, token, protocol, payload): - - if token.count('/') < 1: - module.fail_json(msg="Invalid Token specified, provide a valid token") - - rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token) - - response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload) - if info['status'] != 200: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - domain=dict(type='str', required=True), - token=dict(type='str', required=True, no_log=True), - protocol=dict(type='str', default='https', choices=['http', 'https']), - msg=dict(type='str', required=False), - channel=dict(type='str'), - username=dict(type='str', default='Ansible'), - icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'), - icon_emoji=dict(type='str'), - link_names=dict(type='int', default=1, choices=[0, 1]), - validate_certs=dict(default=True, type='bool'), - color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']), - attachments=dict(type='list', elements='dict', required=False) - ) - ) - - domain = module.params['domain'] - token = module.params['token'] - protocol = module.params['protocol'] - text = module.params['msg'] - channel = module.params['channel'] - username = module.params['username'] - icon_url = module.params['icon_url'] - icon_emoji = module.params['icon_emoji'] - link_names = module.params['link_names'] - color = module.params['color'] - attachments = module.params['attachments'] - - payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments) - do_notify_rocketchat(module, domain, token, protocol, payload) - - module.exit_json(msg="OK") - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/say.py b/plugins/modules/notification/say.py deleted file mode 100644 index 1c66adf66e..0000000000 --- a/plugins/modules/notification/say.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Michael DeHaan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: say -short_description: Makes a computer to speak. -description: - - makes a computer speak! Amuse your friends, annoy your coworkers! -notes: - - In 2.5, this module has been renamed from C(osx_say) to M(community.general.say). - - If you like this module, you may also be interested in the osx_say callback plugin. - - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on a Linux host. -options: - msg: - type: str - description: - What to say - required: true - voice: - type: str - description: - What voice to use - required: false -requirements: [ say or espeak or espeak-ng ] -author: - - "Ansible Core Team" - - "Michael DeHaan (@mpdehaan)" -''' - -EXAMPLES = ''' -- name: Makes a computer to speak - community.general.say: - msg: '{{ inventory_hostname }} is all done' - voice: Zarvox - delegate_to: localhost -''' -import platform - -from ansible.module_utils.basic import AnsibleModule - - -def say(module, executable, msg, voice): - cmd = [executable, msg] - if voice: - cmd.extend(('-v', voice)) - module.run_command(cmd, check_rc=True) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - msg=dict(required=True), - voice=dict(required=False), - ), - supports_check_mode=True - ) - - msg = module.params['msg'] - voice = module.params['voice'] - possibles = ('say', 'espeak', 'espeak-ng') - - if platform.system() != 'Darwin': - # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter - voice = None - - for possible in possibles: - executable = module.get_bin_path(possible) - if executable: - break - else: - module.fail_json(msg='Unable to find either %s' % ', '.join(possibles)) - - if module.check_mode: - module.exit_json(msg=msg, changed=False) - - say(module, executable, msg, voice) - - module.exit_json(msg=msg, changed=True) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/sendgrid.py b/plugins/modules/notification/sendgrid.py deleted file mode 100644 index 4a63a03db7..0000000000 --- a/plugins/modules/notification/sendgrid.py +++ /dev/null @@ -1,272 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Matt Makai -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: sendgrid -short_description: Sends an email with the SendGrid API -description: - - "Sends an email with a SendGrid account through their API, not through - the SMTP service." -notes: - - "This module is non-idempotent because it sends an email through the - external API. It is idempotent only in the case that the module fails." - - "Like the other notification modules, this one requires an external - dependency to work. In this case, you'll need an active SendGrid - account." - - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers - you must pip install sendgrid" - - "since 2.2 I(username) and I(password) are not required if you supply an I(api_key)" -requirements: - - sendgrid Python library 1.6.22 or lower (Sendgrid API V2 supported) -options: - username: - type: str - description: - - Username for logging into the SendGrid account. - - Since 2.2 it is only required if I(api_key) is not supplied. - password: - type: str - description: - - Password that corresponds to the username. - - Since 2.2 it is only required if I(api_key) is not supplied. - from_address: - type: str - description: - - The address in the "from" field for the email. - required: true - to_addresses: - type: list - elements: str - description: - - A list with one or more recipient email addresses. - required: true - subject: - type: str - description: - - The desired subject for the email. - required: true - api_key: - type: str - description: - - Sendgrid API key to use instead of username/password. - cc: - type: list - elements: str - description: - - A list of email addresses to cc. - bcc: - type: list - elements: str - description: - - A list of email addresses to bcc. - attachments: - type: list - elements: path - description: - - A list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs). - from_name: - type: str - description: - - The name you want to appear in the from field, i.e 'John Doe'. - html_body: - description: - - Whether the body is html content that should be rendered. - type: bool - default: 'no' - headers: - type: dict - description: - - A dict to pass on as headers. - body: - type: str - description: - - The e-mail body content. - required: yes -author: "Matt Makai (@makaimc)" -''' - -EXAMPLES = r''' -- name: Send an email to a single recipient that the deployment was successful - community.general.sendgrid: - username: "{{ sendgrid_username }}" - password: "{{ sendgrid_password }}" - from_address: "ansible@mycompany.com" - to_addresses: - - "ops@mycompany.com" - subject: "Deployment success." - body: "The most recent Ansible deployment was successful." - delegate_to: localhost - -- name: Send an email to more than one recipient that the build failed - community.general.sendgrid: - username: "{{ sendgrid_username }}" - password: "{{ sendgrid_password }}" - from_address: "build@mycompany.com" - to_addresses: - - "ops@mycompany.com" - - "devteam@mycompany.com" - subject: "Build failure!." - body: "Unable to pull source repository from Git server." - delegate_to: localhost -''' - -# ======================================= -# sendgrid module support methods -# -import os -import traceback - -from distutils.version import LooseVersion - -SENDGRID_IMP_ERR = None -try: - import sendgrid - HAS_SENDGRID = True -except ImportError: - SENDGRID_IMP_ERR = traceback.format_exc() - HAS_SENDGRID = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.common.text.converters import to_bytes -from ansible.module_utils.urls import fetch_url - - -def post_sendgrid_api(module, username, password, from_address, to_addresses, - subject, body, api_key=None, cc=None, bcc=None, attachments=None, - html_body=False, from_name=None, headers=None): - - if not HAS_SENDGRID: - SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json" - AGENT = "Ansible" - data = {'api_user': username, 'api_key': password, - 'from': from_address, 'subject': subject, 'text': body} - encoded_data = urlencode(data) - to_addresses_api = '' - for recipient in to_addresses: - recipient = to_bytes(recipient, errors='surrogate_or_strict') - to_addresses_api += '&to[]=%s' % recipient - encoded_data += to_addresses_api - - headers = {'User-Agent': AGENT, - 'Content-type': 'application/x-www-form-urlencoded', - 'Accept': 'application/json'} - return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST') - else: - # Remove this check when adding Sendgrid API v3 support - if LooseVersion(sendgrid.version.__version__) > LooseVersion("1.6.22"): - module.fail_json(msg="Please install sendgrid==1.6.22 or lower since module uses Sendgrid V2 APIs.") - - if api_key: - sg = sendgrid.SendGridClient(api_key) - else: - sg = sendgrid.SendGridClient(username, password) - - message = sendgrid.Mail() - message.set_subject(subject) - - for recip in to_addresses: - message.add_to(recip) - - if cc: - for recip in cc: - message.add_cc(recip) - if bcc: - for recip in bcc: - message.add_bcc(recip) - - if headers: - message.set_headers(headers) - - if attachments: - for f in attachments: - name = os.path.basename(f) - message.add_attachment(name, f) - - if from_name: - message.set_from('%s <%s.' % (from_name, from_address)) - else: - message.set_from(from_address) - - if html_body: - message.set_html(body) - else: - message.set_text(body) - - return sg.send(message) -# ======================================= -# Main -# - - -def main(): - module = AnsibleModule( - argument_spec=dict( - username=dict(required=False), - password=dict(required=False, no_log=True), - api_key=dict(required=False, no_log=True), - bcc=dict(required=False, type='list', elements='str'), - cc=dict(required=False, type='list', elements='str'), - headers=dict(required=False, type='dict'), - from_address=dict(required=True), - from_name=dict(required=False), - to_addresses=dict(required=True, type='list', elements='str'), - subject=dict(required=True), - body=dict(required=True), - html_body=dict(required=False, default=False, type='bool'), - attachments=dict(required=False, type='list', elements='path') - ), - supports_check_mode=True, - mutually_exclusive=[ - ['api_key', 'password'], - ['api_key', 'username'] - ], - required_together=[['username', 'password']], - ) - - username = module.params['username'] - password = module.params['password'] - api_key = module.params['api_key'] - bcc = module.params['bcc'] - cc = module.params['cc'] - headers = module.params['headers'] - from_name = module.params['from_name'] - from_address = module.params['from_address'] - to_addresses = module.params['to_addresses'] - subject = module.params['subject'] - body = module.params['body'] - html_body = module.params['html_body'] - attachments = module.params['attachments'] - - sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments] - - if any(lib_arg is not None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID: - reason = 'when using any of the following arguments: ' \ - 'api_key, bcc, cc, headers, from_name, html_body, attachments' - module.fail_json(msg=missing_required_lib('sendgrid', reason=reason), - exception=SENDGRID_IMP_ERR) - - response, info = post_sendgrid_api(module, username, password, - from_address, to_addresses, subject, body, attachments=attachments, - bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key) - - if not HAS_SENDGRID: - if info['status'] != 200: - module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg']) - else: - if response != 200: - module.fail_json(msg="unable to send email through SendGrid API: %s" % info['message']) - - module.exit_json(msg=subject, changed=False) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/slack.py b/plugins/modules/notification/slack.py deleted file mode 100644 index 3023bd9d8a..0000000000 --- a/plugins/modules/notification/slack.py +++ /dev/null @@ -1,488 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2020, Lee Goolsbee -# (c) 2020, Michal Middleton -# (c) 2017, Steve Pletcher -# (c) 2016, René Moser -# (c) 2015, Stefan Berggren -# (c) 2014, Ramon de la Fuente -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = """ -module: slack -short_description: Send Slack notifications -description: - - The C(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration -author: "Ramon de la Fuente (@ramondelafuente)" -options: - domain: - type: str - description: - - Slack (sub)domain for your environment without protocol. (i.e. - C(example.slack.com)) In 1.8 and beyond, this is deprecated and may - be ignored. See token documentation for information. - token: - type: str - description: - - Slack integration token. This authenticates you to the slack service. - Make sure to use the correct type of token, depending on what method you use. - - "Webhook token: - Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In - 1.8 and above, ansible adapts to the new slack API where tokens look - like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens - are in the new format then slack will ignore any value of domain. If - the token is in the old format the domain is required. Ansible has no - control of when slack will get rid of the old API. When slack does - that the old format will stop working. ** Please keep in mind the tokens - are not the API tokens but are the webhook tokens. In slack these are - found in the webhook URL which are obtained under the apps and integrations. - The incoming webhooks can be added in that area. In some cases this may - be locked by your Slack admin and you must request access. It is there - that the incoming webhooks can be added. The key is on the end of the - URL given to you in that section." - - "WebAPI token: - Slack WebAPI requires a personal, bot or work application token. These tokens start with C(xoxp-), C(xoxb-) - or C(xoxa-), eg. C(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id. - See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information." - required: true - msg: - type: str - description: - - Message to send. Note that the module does not handle escaping characters. - Plain-text angle brackets and ampersands should be converted to HTML entities (e.g. & to &) before sending. - See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more. - channel: - type: str - description: - - Channel to send the message to. If absent, the message goes to the channel selected for the I(token). - thread_id: - description: - - Optional. Timestamp of parent message to thread this message. https://api.slack.com/docs/message-threading - type: str - message_id: - description: - - Optional. Message ID to edit, instead of posting a new message. - Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)). - type: str - version_added: 1.2.0 - username: - type: str - description: - - This is the sender of the message. - default: "Ansible" - icon_url: - type: str - description: - - Url for the message sender's icon (default C(https://www.ansible.com/favicon.ico)) - default: https://www.ansible.com/favicon.ico - icon_emoji: - type: str - description: - - Emoji for the message sender. See Slack documentation for options. - (if I(icon_emoji) is set, I(icon_url) will not be used) - link_names: - type: int - description: - - Automatically create links for channels and usernames in I(msg). - default: 1 - choices: - - 1 - - 0 - parse: - type: str - description: - - Setting for the message parser at Slack - choices: - - 'full' - - 'none' - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - color: - type: str - description: - - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message. - - Allowed values for color can be one of 'normal', 'good', 'warning', 'danger', any valid 3 digit or 6 digit hex color value. - - Specifying value in hex is supported since Ansible 2.8. - default: 'normal' - attachments: - type: list - elements: dict - description: - - Define a list of attachments. This list mirrors the Slack JSON API. - - For more information, see U(https://api.slack.com/docs/attachments). - blocks: - description: - - Define a list of blocks. This list mirrors the Slack JSON API. - - For more information, see U(https://api.slack.com/block-kit). - type: list - elements: dict - version_added: 1.0.0 -""" - -EXAMPLES = """ -- name: Send notification message via Slack - community.general.slack: - token: thetoken/generatedby/slack - msg: '{{ inventory_hostname }} completed' - delegate_to: localhost - -- name: Send notification message via Slack all options - community.general.slack: - token: thetoken/generatedby/slack - msg: '{{ inventory_hostname }} completed' - channel: '#ansible' - thread_id: '1539917263.000100' - username: 'Ansible on {{ inventory_hostname }}' - icon_url: http://www.example.com/some-image-file.png - link_names: 0 - parse: 'none' - delegate_to: localhost - -- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack - community.general.slack: - token: thetoken/generatedby/slack - msg: '{{ inventory_hostname }} is alive!' - color: good - username: '' - icon_url: '' - -- name: Insert a color bar in front of the message with valid hex color value - community.general.slack: - token: thetoken/generatedby/slack - msg: 'This message uses color in hex value' - color: '#00aacc' - username: '' - icon_url: '' - -- name: Use the attachments API - community.general.slack: - token: thetoken/generatedby/slack - attachments: - - text: Display my system load on host A and B - color: '#ff00dd' - title: System load - fields: - - title: System A - value: "load average: 0,74, 0,66, 0,63" - short: True - - title: System B - value: 'load average: 5,16, 4,64, 2,43' - short: True - -- name: Use the blocks API - community.general.slack: - token: thetoken/generatedby/slack - blocks: - - type: section - text: - type: mrkdwn - text: |- - *System load* - Display my system load on host A and B - - type: context - elements: - - type: mrkdwn - text: |- - *System A* - load average: 0,74, 0,66, 0,63 - - type: mrkdwn - text: |- - *System B* - load average: 5,16, 4,64, 2,43 - -- name: Send a message with a link using Slack markup - community.general.slack: - token: thetoken/generatedby/slack - msg: We sent this message using ! - -- name: Send a message with angle brackets and ampersands - community.general.slack: - token: thetoken/generatedby/slack - msg: This message has <brackets> & ampersands in plain text. - -- name: Initial Threaded Slack message - community.general.slack: - channel: '#ansible' - token: xoxb-1234-56789abcdefghijklmnop - msg: 'Starting a thread with my initial post.' - register: slack_response -- name: Add more info to thread - community.general.slack: - channel: '#ansible' - token: xoxb-1234-56789abcdefghijklmnop - thread_id: "{{ slack_response['ts'] }}" - color: good - msg: 'And this is my threaded response!' - -- name: Send a message to be edited later on - community.general.slack: - token: thetoken/generatedby/slack - channel: '#ansible' - msg: Deploying something... - register: slack_response -- name: Edit message - community.general.slack: - token: thetoken/generatedby/slack - channel: "{{ slack_response.channel }}" - msg: Deployment complete! - message_id: "{{ slack_response.ts }}" -""" - -import re -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url - -OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' -SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s' -SLACK_POSTMESSAGE_WEBAPI = 'https://slack.com/api/chat.postMessage' -SLACK_UPDATEMESSAGE_WEBAPI = 'https://slack.com/api/chat.update' -SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://slack.com/api/conversations.history' - -# Escaping quotes and apostrophes to avoid ending string prematurely in ansible call. -# We do not escape other characters used as Slack metacharacters (e.g. &, <, >). -escape_table = { - '"': "\"", - "'": "\'", -} - - -def is_valid_hex_color(color_choice): - if re.match(r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', color_choice): - return True - return False - - -def escape_quotes(text): - """Backslash any quotes within text.""" - return "".join(escape_table.get(c, c) for c in text) - - -def recursive_escape_quotes(obj, keys): - """Recursively escape quotes inside supplied keys inside block kit objects""" - if isinstance(obj, dict): - escaped = {} - for k, v in obj.items(): - if isinstance(v, str) and k in keys: - escaped[k] = escape_quotes(v) - else: - escaped[k] = recursive_escape_quotes(v, keys) - elif isinstance(obj, list): - escaped = [recursive_escape_quotes(v, keys) for v in obj] - else: - escaped = obj - return escaped - - -def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, - parse, color, attachments, blocks, message_id): - payload = {} - if color == "normal" and text is not None: - payload = dict(text=escape_quotes(text)) - elif text is not None: - # With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it. - payload = dict(attachments=[dict(text=escape_quotes(text), color=color, mrkdwn_in=["text"])]) - if channel is not None: - if channel.startswith(('#', '@', 'C0')): - payload['channel'] = channel - else: - payload['channel'] = '#' + channel - if thread_id is not None: - payload['thread_ts'] = thread_id - if username is not None: - payload['username'] = username - if icon_emoji is not None: - payload['icon_emoji'] = icon_emoji - else: - payload['icon_url'] = icon_url - if link_names is not None: - payload['link_names'] = link_names - if parse is not None: - payload['parse'] = parse - if message_id is not None: - payload['ts'] = message_id - - if attachments is not None: - if 'attachments' not in payload: - payload['attachments'] = [] - - if attachments is not None: - attachment_keys_to_escape = [ - 'title', - 'text', - 'author_name', - 'pretext', - 'fallback', - ] - for attachment in attachments: - for key in attachment_keys_to_escape: - if key in attachment: - attachment[key] = escape_quotes(attachment[key]) - - if 'fallback' not in attachment: - attachment['fallback'] = attachment['text'] - - payload['attachments'].append(attachment) - - if blocks is not None: - block_keys_to_escape = [ - 'text', - 'alt_text' - ] - payload['blocks'] = recursive_escape_quotes(blocks, block_keys_to_escape) - - return payload - - -def get_slack_message(module, token, channel, ts): - headers = { - 'Content-Type': 'application/json', - 'Accept': 'application/json', - 'Authorization': 'Bearer ' + token - } - qs = urlencode({ - 'channel': channel, - 'ts': ts, - 'limit': 1, - 'inclusive': 'true', - }) - url = SLACK_CONVERSATIONS_HISTORY_WEBAPI + '?' + qs - response, info = fetch_url(module=module, url=url, headers=headers, method='GET') - if info['status'] != 200: - module.fail_json(msg="failed to get slack message") - data = module.from_json(response.read()) - if len(data['messages']) < 1: - module.fail_json(msg="no messages matching ts: %s" % ts) - if len(data['messages']) > 1: - module.fail_json(msg="more than 1 message matching ts: %s" % ts) - return data['messages'][0] - - -def do_notify_slack(module, domain, token, payload): - use_webapi = False - if token.count('/') >= 2: - # New style webhook token - slack_uri = SLACK_INCOMING_WEBHOOK % token - elif re.match(r'^xox[abp]-\S+$', token): - slack_uri = SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI - use_webapi = True - else: - if not domain: - module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form " - "XXXX/YYYY/ZZZZ in your playbook") - slack_uri = OLD_SLACK_INCOMING_WEBHOOK % (domain, token) - - headers = { - 'Content-Type': 'application/json', - 'Accept': 'application/json', - } - if use_webapi: - headers['Authorization'] = 'Bearer ' + token - - data = module.jsonify(payload) - response, info = fetch_url(module=module, url=slack_uri, headers=headers, method='POST', data=data) - - if info['status'] != 200: - if use_webapi: - obscured_incoming_webhook = slack_uri - else: - obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % '[obscured]' - module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg'])) - - # each API requires different handling - if use_webapi: - return module.from_json(response.read()) - else: - return {'webhook': 'ok'} - - -def main(): - module = AnsibleModule( - argument_spec=dict( - domain=dict(type='str'), - token=dict(type='str', required=True, no_log=True), - msg=dict(type='str'), - channel=dict(type='str'), - thread_id=dict(type='str'), - username=dict(type='str', default='Ansible'), - icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'), - icon_emoji=dict(type='str'), - link_names=dict(type='int', default=1, choices=[0, 1]), - parse=dict(type='str', choices=['none', 'full']), - validate_certs=dict(default=True, type='bool'), - color=dict(type='str', default='normal'), - attachments=dict(type='list', elements='dict'), - blocks=dict(type='list', elements='dict'), - message_id=dict(type='str'), - ), - supports_check_mode=True, - ) - - domain = module.params['domain'] - token = module.params['token'] - text = module.params['msg'] - channel = module.params['channel'] - thread_id = module.params['thread_id'] - username = module.params['username'] - icon_url = module.params['icon_url'] - icon_emoji = module.params['icon_emoji'] - link_names = module.params['link_names'] - parse = module.params['parse'] - color = module.params['color'] - attachments = module.params['attachments'] - blocks = module.params['blocks'] - message_id = module.params['message_id'] - - color_choices = ['normal', 'good', 'warning', 'danger'] - if color not in color_choices and not is_valid_hex_color(color): - module.fail_json(msg="Color value specified should be either one of %r " - "or any valid hex value with length 3 or 6." % color_choices) - - changed = True - - # if updating an existing message, we can check if there's anything to update - if message_id is not None: - changed = False - msg = get_slack_message(module, token, channel, message_id) - for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'): - if msg.get(key) != module.params.get(key): - changed = True - break - # if check mode is active, we shouldn't do anything regardless. - # if changed=False, we don't need to do anything, so don't do it. - if module.check_mode or not changed: - module.exit_json(changed=changed, ts=msg['ts'], channel=msg['channel']) - elif module.check_mode: - module.exit_json(changed=changed) - - payload = build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, - parse, color, attachments, blocks, message_id) - slack_response = do_notify_slack(module, domain, token, payload) - - if 'ok' in slack_response: - # Evaluate WebAPI response - if slack_response['ok']: - # return payload as a string for backwards compatibility - payload_json = module.jsonify(payload) - module.exit_json(changed=changed, ts=slack_response['ts'], channel=slack_response['channel'], - api=slack_response, payload=payload_json) - else: - module.fail_json(msg="Slack API error", error=slack_response['error']) - else: - # Exit with plain OK from WebHook, since we don't have more information - # If we get 200 from webhook, the only answer is OK - module.exit_json(msg="OK") - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/syslogger.py b/plugins/modules/notification/syslogger.py deleted file mode 100644 index 7627f35985..0000000000 --- a/plugins/modules/notification/syslogger.py +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Tim Rightnour -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: syslogger -short_description: Log messages in the syslog -description: - - Uses syslog to add log entries to the host. -options: - msg: - type: str - description: - - This is the message to place in syslog. - required: True - priority: - type: str - description: - - Set the log priority. - choices: [ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug" ] - default: "info" - facility: - type: str - description: - - Set the log facility. - choices: [ "kern", "user", "mail", "daemon", "auth", "lpr", "news", - "uucp", "cron", "syslog", "local0", "local1", "local2", - "local3", "local4", "local5", "local6", "local7" ] - default: "daemon" - log_pid: - description: - - Log the PID in brackets. - type: bool - default: False - ident: - description: - - Specify the name of application name which is sending the log to syslog. - type: str - default: 'ansible_syslogger' - version_added: '0.2.0' -author: - - Tim Rightnour (@garbled1) -''' - -EXAMPLES = r''' -- name: Simple Usage - community.general.syslogger: - msg: "I will end up as daemon.info" - -- name: Send a log message with err priority and user facility with log_pid - community.general.syslogger: - msg: "Hello from Ansible" - priority: "err" - facility: "user" - log_pid: true - -- name: Specify the name of application which is sending log message - community.general.syslogger: - ident: "MyApp" - msg: "I want to believe" - priority: "alert" -''' - -RETURN = r''' -ident: - description: Name of application sending the message to log - returned: always - type: str - sample: "ansible_syslogger" - version_added: '0.2.0' -priority: - description: Priority level - returned: always - type: str - sample: "daemon" -facility: - description: Syslog facility - returned: always - type: str - sample: "info" -log_pid: - description: Log PID status - returned: always - type: bool - sample: True -msg: - description: Message sent to syslog - returned: always - type: str - sample: "Hello from Ansible" -''' - -import syslog -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def get_facility(facility): - return { - 'kern': syslog.LOG_KERN, - 'user': syslog.LOG_USER, - 'mail': syslog.LOG_MAIL, - 'daemon': syslog.LOG_DAEMON, - 'auth': syslog.LOG_AUTH, - 'lpr': syslog.LOG_LPR, - 'news': syslog.LOG_NEWS, - 'uucp': syslog.LOG_UUCP, - 'cron': syslog.LOG_CRON, - 'syslog': syslog.LOG_SYSLOG, - 'local0': syslog.LOG_LOCAL0, - 'local1': syslog.LOG_LOCAL1, - 'local2': syslog.LOG_LOCAL2, - 'local3': syslog.LOG_LOCAL3, - 'local4': syslog.LOG_LOCAL4, - 'local5': syslog.LOG_LOCAL5, - 'local6': syslog.LOG_LOCAL6, - 'local7': syslog.LOG_LOCAL7 - }.get(facility, syslog.LOG_DAEMON) - - -def get_priority(priority): - return { - 'emerg': syslog.LOG_EMERG, - 'alert': syslog.LOG_ALERT, - 'crit': syslog.LOG_CRIT, - 'err': syslog.LOG_ERR, - 'warning': syslog.LOG_WARNING, - 'notice': syslog.LOG_NOTICE, - 'info': syslog.LOG_INFO, - 'debug': syslog.LOG_DEBUG - }.get(priority, syslog.LOG_INFO) - - -def main(): - # define the available arguments/parameters that a user can pass to - # the module - module_args = dict( - ident=dict(type='str', default='ansible_syslogger'), - msg=dict(type='str', required=True), - priority=dict(type='str', required=False, - choices=["emerg", "alert", "crit", "err", "warning", - "notice", "info", "debug"], - default='info'), - facility=dict(type='str', required=False, - choices=["kern", "user", "mail", "daemon", "auth", - "lpr", "news", "uucp", "cron", "syslog", - "local0", "local1", "local2", "local3", - "local4", "local5", "local6", "local7"], - default='daemon'), - log_pid=dict(type='bool', required=False, default=False) - ) - - module = AnsibleModule( - argument_spec=module_args, - ) - - result = dict( - changed=False, - ident=module.params['ident'], - priority=module.params['priority'], - facility=module.params['facility'], - log_pid=module.params['log_pid'], - msg=module.params['msg'] - ) - - # do the logging - try: - syslog.openlog(module.params['ident'], - syslog.LOG_PID if module.params['log_pid'] else 0, - get_facility(module.params['facility'])) - syslog.syslog(get_priority(module.params['priority']), - module.params['msg']) - syslog.closelog() - result['changed'] = True - - except Exception as exc: - module.fail_json(error='Failed to write to syslog %s' % to_native(exc), exception=traceback.format_exc(), **result) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/telegram.py b/plugins/modules/notification/telegram.py deleted file mode 100644 index 4960874ddb..0000000000 --- a/plugins/modules/notification/telegram.py +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Artem Feofanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: telegram -author: - - "Artem Feofanov (@tyouxa)" - - "Nikolai Lomov (@lomserman)" - -short_description: module for sending notifications via telegram - -description: - - Send notifications via telegram bot, to a verified group or user. - - Also, the user may try to use any other telegram bot API method, if you specify I(api_method) argument. -notes: - - You will require a telegram account and create telegram bot to use this module. -options: - token: - type: str - description: - - Token identifying your telegram bot. - required: true - api_method: - type: str - description: - - Bot API method. - - For reference, see U(https://core.telegram.org/bots/api). - default: SendMessage - version_added: 2.0.0 - api_args: - type: dict - description: - - Any parameters for the method. - - For reference to default method, C(SendMessage), see U(https://core.telegram.org/bots/api#sendmessage). - version_added: 2.0.0 - -''' - -EXAMPLES = """ - -- name: Send notify to Telegram - community.general.telegram: - token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX' - api_args: - chat_id: 000000 - parse_mode: "markdown" - text: "Your precious application has been deployed: https://example.com" - disable_web_page_preview: True - disable_notification: True - -- name: Forward message to someone - community.general.telegram: - token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX' - api_method: forwardMessage - api_args: - chat_id: 000000 - from_chat_id: 111111 - disable_notification: True - message_id: '{{ saved_msg_id }}' -""" - -RETURN = """ - -msg: - description: The message you attempted to send - returned: success - type: str - sample: "Ansible task finished" -telegram_error: - description: Error message gotten from Telegram API - returned: failure - type: str - sample: "Bad Request: message text is empty" -""" - -import json - -from ansible.module_utils.basic import AnsibleModule -# noinspection PyUnresolvedReferences -from ansible.module_utils.six.moves.urllib.parse import quote -from ansible.module_utils.urls import fetch_url - - -def main(): - module = AnsibleModule( - argument_spec=dict( - token=dict(type='str', required=True, no_log=True), - api_args=dict(type='dict'), - api_method=dict(type="str", default="SendMessage"), - ), - supports_check_mode=True - ) - - token = quote(module.params.get('token')) - api_args = module.params.get('api_args') or {} - api_method = module.params.get('api_method') - # filling backward compatibility args - api_args['chat_id'] = api_args.get('chat_id') - api_args['parse_mode'] = api_args.get('parse_mode') - api_args['text'] = api_args.get('text') - - if api_args['parse_mode'] == 'plain': - del api_args['parse_mode'] - - url = 'https://api.telegram.org/bot{token}/{api_method}'.format(token=token, api_method=api_method) - - if module.check_mode: - module.exit_json(changed=False) - - response, info = fetch_url(module, url, method="POST", data=json.dumps(api_args), - headers={'Content-Type': 'application/json'}) - if info['status'] == 200: - module.exit_json(changed=True) - elif info['status'] == -1: - # SSL errors, connection problems, etc. - module.fail_json(msg="Failed to send message", info=info, response=response) - else: - body = json.loads(info['body']) - module.fail_json( - msg="Failed to send message, return status = {status}\n" - "url = {api_url}\n" - "api_args = {api_args}".format( - status=info['status'], api_url=url, api_args=api_args - ), - telegram_error=body['description'], - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/twilio.py b/plugins/modules/notification/twilio.py deleted file mode 100644 index 88851a6ad3..0000000000 --- a/plugins/modules/notification/twilio.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2015, Matt Makai -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: twilio -short_description: Sends a text message to a mobile phone through Twilio. -description: - - Sends a text message to a phone number through the Twilio messaging API. -notes: - - This module is non-idempotent because it sends an email through the - external API. It is idempotent only in the case that the module fails. - - Like the other notification modules, this one requires an external - dependency to work. In this case, you'll need a Twilio account with - a purchased or verified phone number to send the text message. -options: - account_sid: - type: str - description: - user's Twilio account token found on the account page - required: true - auth_token: - type: str - description: user's Twilio authentication token - required: true - msg: - type: str - description: - the body of the text message - required: true - to_numbers: - type: list - elements: str - description: - one or more phone numbers to send the text message to, - format +15551112222 - required: true - aliases: [ to_number ] - from_number: - type: str - description: - the Twilio number to send the text message from, format +15551112222 - required: true - media_url: - type: str - description: - a URL with a picture, video or sound clip to send with an MMS - (multimedia message) instead of a plain SMS - required: false - -author: "Matt Makai (@makaimc)" -''' - -EXAMPLES = ''' -# send an SMS about the build status to (555) 303 5681 -# note: replace account_sid and auth_token values with your credentials -# and you have to have the 'from_number' on your Twilio account -- name: Send a text message to a mobile phone through Twilio - community.general.twilio: - msg: All servers with webserver role are now configured. - account_sid: ACXXXXXXXXXXXXXXXXX - auth_token: ACXXXXXXXXXXXXXXXXX - from_number: +15552014545 - to_number: +15553035681 - delegate_to: localhost - -# send an SMS to multiple phone numbers about the deployment -# note: replace account_sid and auth_token values with your credentials -# and you have to have the 'from_number' on your Twilio account -- name: Send a text message to a mobile phone through Twilio - community.general.twilio: - msg: This server configuration is now complete. - account_sid: ACXXXXXXXXXXXXXXXXX - auth_token: ACXXXXXXXXXXXXXXXXX - from_number: +15553258899 - to_numbers: - - +15551113232 - - +12025551235 - - +19735559010 - delegate_to: localhost - -# send an MMS to a single recipient with an update on the deployment -# and an image of the results -# note: replace account_sid and auth_token values with your credentials -# and you have to have the 'from_number' on your Twilio account -- name: Send a text message to a mobile phone through Twilio - community.general.twilio: - msg: Deployment complete! - account_sid: ACXXXXXXXXXXXXXXXXX - auth_token: ACXXXXXXXXXXXXXXXXX - from_number: +15552014545 - to_number: +15553035681 - media_url: https://demo.twilio.com/logo.png - delegate_to: localhost -''' - -# ======================================= -# twilio module support methods -# -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url - - -def post_twilio_api(module, account_sid, auth_token, msg, from_number, - to_number, media_url=None): - URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ - % (account_sid,) - AGENT = "Ansible" - - data = {'From': from_number, 'To': to_number, 'Body': msg} - if media_url: - data['MediaUrl'] = media_url - encoded_data = urlencode(data) - - headers = {'User-Agent': AGENT, - 'Content-type': 'application/x-www-form-urlencoded', - 'Accept': 'application/json', - } - - # Hack module params to have the Basic auth params that fetch_url expects - module.params['url_username'] = account_sid.replace('\n', '') - module.params['url_password'] = auth_token.replace('\n', '') - - return fetch_url(module, URI, data=encoded_data, headers=headers) - - -# ======================================= -# Main -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - account_sid=dict(required=True), - auth_token=dict(required=True, no_log=True), - msg=dict(required=True), - from_number=dict(required=True), - to_numbers=dict(required=True, aliases=['to_number'], type='list', elements='str'), - media_url=dict(default=None, required=False), - ), - supports_check_mode=True - ) - - account_sid = module.params['account_sid'] - auth_token = module.params['auth_token'] - msg = module.params['msg'] - from_number = module.params['from_number'] - to_numbers = module.params['to_numbers'] - media_url = module.params['media_url'] - - for number in to_numbers: - r, info = post_twilio_api(module, account_sid, auth_token, msg, - from_number, number, media_url) - if info['status'] not in [200, 201]: - body_message = "unknown error" - if 'body' in info: - body = module.from_json(info['body']) - body_message = body['message'] - module.fail_json(msg="unable to send message to %s: %s" % (number, body_message)) - - module.exit_json(msg=msg, changed=False) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/typetalk.py b/plugins/modules/notification/typetalk.py deleted file mode 100644 index 6f8e4e8b9a..0000000000 --- a/plugins/modules/notification/typetalk.py +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: typetalk -short_description: Send a message to typetalk -description: - - Send a message to typetalk using typetalk API -options: - client_id: - type: str - description: - - OAuth2 client ID - required: true - client_secret: - type: str - description: - - OAuth2 client secret - required: true - topic: - type: int - description: - - topic id to post message - required: true - msg: - type: str - description: - - message body - required: true -requirements: [ json ] -author: "Takashi Someda (@tksmd)" -''' - -EXAMPLES = ''' -- name: Send a message to typetalk - community.general.typetalk: - client_id: 12345 - client_secret: 12345 - topic: 1 - msg: install completed -''' - -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url, ConnectionError - - -def do_request(module, url, params, headers=None): - data = urlencode(params) - if headers is None: - headers = dict() - headers = dict(headers, **{ - 'User-Agent': 'Ansible/typetalk module', - }) - r, info = fetch_url(module, url, data=data, headers=headers) - if info['status'] != 200: - exc = ConnectionError(info['msg']) - exc.code = info['status'] - raise exc - return r - - -def get_access_token(module, client_id, client_secret): - params = { - 'client_id': client_id, - 'client_secret': client_secret, - 'grant_type': 'client_credentials', - 'scope': 'topic.post' - } - res = do_request(module, 'https://typetalk.com/oauth2/access_token', params) - return json.load(res)['access_token'] - - -def send_message(module, client_id, client_secret, topic, msg): - """ - send message to typetalk - """ - try: - access_token = get_access_token(module, client_id, client_secret) - url = 'https://typetalk.com/api/v1/topics/%d' % topic - headers = { - 'Authorization': 'Bearer %s' % access_token, - } - do_request(module, url, {'message': msg}, headers) - return True, {'access_token': access_token} - except ConnectionError as e: - return False, e - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - client_id=dict(required=True), - client_secret=dict(required=True, no_log=True), - topic=dict(required=True, type='int'), - msg=dict(required=True), - ), - supports_check_mode=False - ) - - if not json: - module.fail_json(msg="json module is required") - - client_id = module.params["client_id"] - client_secret = module.params["client_secret"] - topic = module.params["topic"] - msg = module.params["msg"] - - res, error = send_message(module, client_id, client_secret, topic, msg) - if not res: - module.fail_json(msg='fail to send message with response code %s' % error.code) - - module.exit_json(changed=True, topic=topic, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/npm.py b/plugins/modules/npm.py deleted file mode 120000 index f661431a2c..0000000000 --- a/plugins/modules/npm.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/language/npm.py \ No newline at end of file diff --git a/plugins/modules/npm.py b/plugins/modules/npm.py new file mode 100644 index 0000000000..7779f326aa --- /dev/null +++ b/plugins/modules/npm.py @@ -0,0 +1,363 @@ +#!/usr/bin/python +# Copyright (c) 2017 Chris Hoffman +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: npm +short_description: Manage node.js packages with npm +description: + - Manage node.js packages with Node Package Manager (npm). +author: "Chris Hoffman (@chrishoffman)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The name of a node.js library to install. + type: str + required: false + path: + description: + - The base path where to install the node.js libraries. + type: path + required: false + version: + description: + - The version to be installed. + type: str + required: false + global: + description: + - Install the node.js library globally. + required: false + default: false + type: bool + executable: + description: + - The executable location for npm. + - This is useful if you are using a version manager, such as nvm. + type: path + required: false + ignore_scripts: + description: + - Use the C(--ignore-scripts) flag when installing. + required: false + type: bool + default: false + unsafe_perm: + description: + - Use the C(--unsafe-perm) flag when installing. + type: bool + default: false + ci: + description: + - Install packages based on package-lock file, same as running C(npm ci). + type: bool + default: false + production: + description: + - Install dependencies in production mode, excluding devDependencies. + required: false + type: bool + default: false + registry: + description: + - The registry to install modules from. + required: false + type: str + state: + description: + - The state of the node.js library. + required: false + type: str + default: present + choices: ["present", "absent", "latest"] + no_optional: + description: + - Use the C(--no-optional) flag when installing. + type: bool + default: false + version_added: 2.0.0 + no_bin_links: + description: + - Use the C(--no-bin-links) flag when installing. + type: bool + default: false + version_added: 2.5.0 + force: + description: + - Use the C(--force) flag when installing. + type: bool + default: false + version_added: 9.5.0 +requirements: + - npm installed in bin path (recommended /usr/local/bin) +""" + +EXAMPLES = r""" +- name: Install "coffee-script" node.js package. + community.general.npm: + name: coffee-script + path: /app/location + +- name: Install "coffee-script" node.js package on version 1.6.1. + community.general.npm: + name: coffee-script + version: '1.6.1' + path: /app/location + +- name: Install "coffee-script" node.js package globally. + community.general.npm: + name: coffee-script + global: true + +- name: Force Install "coffee-script" node.js package. + community.general.npm: + name: coffee-script + force: true + +- name: Remove the globally package "coffee-script". + community.general.npm: + name: coffee-script + global: true + state: absent + +- name: Install "coffee-script" node.js package from custom registry. + community.general.npm: + name: coffee-script + registry: 'http://registry.mysite.com' + +- name: Install packages based on package.json. + community.general.npm: + path: /app/location + +- name: Update packages based on package.json to their latest version. + community.general.npm: + path: /app/location + state: latest + +- name: Install packages based on package.json using the npm installed with nvm v0.10.1. + community.general.npm: + path: /app/location + executable: /opt/nvm/v0.10.1/bin/npm + state: present +""" + +import json +import os +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +class Npm(object): + def __init__(self, module, **kwargs): + self.module = module + self.glbl = kwargs['glbl'] + self.name = kwargs['name'] + self.version = kwargs['version'] + self.path = kwargs['path'] + self.registry = kwargs['registry'] + self.production = kwargs['production'] + self.ignore_scripts = kwargs['ignore_scripts'] + self.unsafe_perm = kwargs['unsafe_perm'] + self.state = kwargs['state'] + self.no_optional = kwargs['no_optional'] + self.no_bin_links = kwargs['no_bin_links'] + self.force = kwargs['force'] + + if kwargs['executable']: + self.executable = kwargs['executable'].split(' ') + else: + self.executable = [module.get_bin_path('npm', True)] + + if kwargs['version'] and kwargs['state'] != 'absent': + self.name_version = self.name + '@' + str(kwargs['version']) + else: + self.name_version = self.name + + self.runner = CmdRunner( + module, + command=self.executable, + arg_formats=dict( + exec_args=cmd_runner_fmt.as_list(), + global_=cmd_runner_fmt.as_bool('--global'), + production=cmd_runner_fmt.as_bool('--production'), + ignore_scripts=cmd_runner_fmt.as_bool('--ignore-scripts'), + unsafe_perm=cmd_runner_fmt.as_bool('--unsafe-perm'), + name_version=cmd_runner_fmt.as_list(), + registry=cmd_runner_fmt.as_opt_val('--registry'), + no_optional=cmd_runner_fmt.as_bool('--no-optional'), + no_bin_links=cmd_runner_fmt.as_bool('--no-bin-links'), + force=cmd_runner_fmt.as_bool('--force'), + ) + ) + + def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + # If path is specified, cd into that path and run the command. + cwd = None + if self.path: + if not os.path.exists(self.path): + os.makedirs(self.path) + if not os.path.isdir(self.path): + self.module.fail_json(msg="path %s is not a directory" % self.path) + cwd = self.path + + params = dict(self.module.params) + params['exec_args'] = args + params['global_'] = self.glbl + params['production'] = self.production and ('install' in args or 'update' in args or 'ci' in args) + params['name_version'] = self.name_version if add_package_name else None + + with self.runner( + "exec_args global_ production ignore_scripts unsafe_perm name_version registry no_optional no_bin_links force", + check_rc=check_rc, cwd=cwd + ) as ctx: + rc, out, err = ctx.run(**params) + return out + + return '' + + def list(self): + cmd = ['list', '--json', '--long'] + + installed = list() + missing = list() + data = {} + try: + data = json.loads(self._exec(cmd, True, False, False) or '{}') + except (getattr(json, 'JSONDecodeError', ValueError)) as e: + self.module.fail_json(msg="Failed to parse NPM output with error %s" % to_native(e)) + if 'dependencies' in data: + for dep, props in data['dependencies'].items(): + + if 'missing' in props and props['missing']: + missing.append(dep) + elif 'invalid' in props and props['invalid']: + missing.append(dep) + else: + installed.append(dep) + if 'version' in props and props['version']: + dep_version = dep + '@' + str(props['version']) + installed.append(dep_version) + if self.name_version and self.name_version not in installed: + missing.append(self.name) + # Named dependency not installed + else: + missing.append(self.name) + + return installed, missing + + def install(self): + return self._exec(['install']) + + def ci_install(self): + return self._exec(['ci']) + + def update(self): + return self._exec(['update']) + + def uninstall(self): + return self._exec(['uninstall']) + + def list_outdated(self): + outdated = list() + data = self._exec(['outdated'], True, False) + for dep in data.splitlines(): + if dep: + # node.js v0.10.22 changed the `npm outdated` module separator + # from "@" to " ". Split on both for backwards compatibility. + pkg, other = re.split(r'\s|@', dep, 1) + outdated.append(pkg) + + return outdated + + +def main(): + arg_spec = dict( + name=dict(type='str'), + path=dict(type='path'), + version=dict(type='str'), + production=dict(default=False, type='bool'), + executable=dict(type='path'), + registry=dict(type='str'), + state=dict(default='present', choices=['present', 'absent', 'latest']), + ignore_scripts=dict(default=False, type='bool'), + unsafe_perm=dict(default=False, type='bool'), + ci=dict(default=False, type='bool'), + no_optional=dict(default=False, type='bool'), + no_bin_links=dict(default=False, type='bool'), + force=dict(default=False, type='bool'), + ) + arg_spec['global'] = dict(default=False, type='bool') + module = AnsibleModule( + argument_spec=arg_spec, + required_if=[('state', 'absent', ['name'])], + supports_check_mode=True, + ) + + name = module.params['name'] + path = module.params['path'] + version = module.params['version'] + glbl = module.params['global'] + state = module.params['state'] + + if not path and not glbl: + module.fail_json(msg='path must be specified when not using global') + + npm = Npm(module, + name=name, + path=path, + version=version, + glbl=glbl, + production=module.params['production'], + executable=module.params['executable'], + registry=module.params['registry'], + ignore_scripts=module.params['ignore_scripts'], + unsafe_perm=module.params['unsafe_perm'], + state=state, + no_optional=module.params['no_optional'], + no_bin_links=module.params['no_bin_links'], + force=module.params['force']) + + changed = False + if module.params['ci']: + npm.ci_install() + changed = True + elif state == 'present': + installed, missing = npm.list() + if missing: + changed = True + npm.install() + elif state == 'latest': + installed, missing = npm.list() + outdated = npm.list_outdated() + if missing: + changed = True + npm.install() + if outdated: + changed = True + npm.update() + else: # absent + installed, missing = npm.list() + if name in installed: + changed = True + npm.uninstall() + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/nsupdate.py b/plugins/modules/nsupdate.py deleted file mode 120000 index 54f07bfa49..0000000000 --- a/plugins/modules/nsupdate.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/nsupdate.py \ No newline at end of file diff --git a/plugins/modules/nsupdate.py b/plugins/modules/nsupdate.py new file mode 100644 index 0000000000..7d56924112 --- /dev/null +++ b/plugins/modules/nsupdate.py @@ -0,0 +1,517 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Marcin Skarbek +# Copyright (c) 2016, Andreas Olsson +# Copyright (c) 2017, Loic Blot +# +# This module was ported from https://github.com/mskarbek/ansible-nsupdate +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: nsupdate + +short_description: Manage DNS records +description: + - Create, update and remove DNS records using DDNS updates. +requirements: + - dnspython +author: "Loic Blot (@nerzhul)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Manage DNS record. + choices: ['present', 'absent'] + default: 'present' + type: str + server: + description: + - Apply DNS modification on this server, specified by IPv4 or IPv6 address. + required: true + type: str + port: + description: + - Use this TCP port when connecting to O(server). + default: 53 + type: int + key_name: + description: + - Use TSIG key name to authenticate against DNS O(server). + type: str + key_secret: + description: + - Use TSIG key secret, associated with O(key_name), to authenticate against O(server). + type: str + key_algorithm: + description: + - Specify key algorithm used by O(key_secret). + choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384', 'hmac-sha512'] + default: 'hmac-md5' + type: str + zone: + description: + - DNS record is modified on this O(zone). + - When omitted, DNS is queried to attempt finding the correct zone. + type: str + record: + description: + - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot). + required: true + type: str + type: + description: + - Sets the record type. + default: 'A' + type: str + ttl: + description: + - Sets the record TTL. + default: 3600 + type: int + value: + description: + - Sets the record value. + type: list + elements: str + protocol: + description: + - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option. + default: 'tcp' + choices: ['tcp', 'udp'] + type: str +""" + +EXAMPLES = r""" +- name: Add or modify ansible.example.org A to 192.168.1.1" + community.general.nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + zone: "example.org" + record: "ansible" + value: "192.168.1.1" + +- name: Add or modify ansible.example.org A to 192.168.1.1, 192.168.1.2 and 192.168.1.3" + community.general.nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + zone: "example.org" + record: "ansible" + value: ["192.168.1.1", "192.168.1.2", "192.168.1.3"] + +- name: Remove puppet.example.org CNAME + community.general.nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + zone: "example.org" + record: "puppet" + type: "CNAME" + state: absent + +- name: Add 1.1.168.192.in-addr.arpa. PTR for ansible.example.org + community.general.nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + record: "1.1.168.192.in-addr.arpa." + type: "PTR" + value: "ansible.example.org." + state: present + +- name: Remove 1.1.168.192.in-addr.arpa. PTR + community.general.nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + record: "1.1.168.192.in-addr.arpa." + type: "PTR" + state: absent +""" + +RETURN = r""" +record: + description: DNS record. + returned: success + type: str + sample: 'ansible' +ttl: + description: DNS record TTL. + returned: success + type: int + sample: 86400 +type: + description: DNS record type. + returned: success + type: str + sample: 'CNAME' +value: + description: DNS record value(s). + returned: success + type: list + sample: '192.168.1.1' +zone: + description: DNS record zone. + returned: success + type: str + sample: 'example.org.' +dns_rc: + description: C(dnspython) return code. + returned: always + type: int + sample: 4 +dns_rc_str: + description: C(dnspython) return code (string representation). + returned: always + type: str + sample: 'REFUSED' +""" + +import traceback + +from binascii import Error as binascii_error +from socket import error as socket_error + +DNSPYTHON_IMP_ERR = None +try: + import dns.update + import dns.query + import dns.tsigkeyring + import dns.message + import dns.resolver + + HAVE_DNSPYTHON = True +except ImportError: + DNSPYTHON_IMP_ERR = traceback.format_exc() + HAVE_DNSPYTHON = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +class RecordManager(object): + def __init__(self, module): + self.module = module + + if module.params['key_name']: + try: + self.keyring = dns.tsigkeyring.from_text({ + module.params['key_name']: module.params['key_secret'] + }) + except TypeError: + module.fail_json(msg='Missing key_secret') + except binascii_error as e: + module.fail_json(msg='TSIG key error: %s' % to_native(e)) + else: + self.keyring = None + + if module.params['key_algorithm'] == 'hmac-md5': + self.algorithm = 'HMAC-MD5.SIG-ALG.REG.INT' + else: + self.algorithm = module.params['key_algorithm'] + + if module.params['zone'] is None: + if module.params['record'][-1] != '.': + self.module.fail_json(msg='record must be absolute when omitting zone parameter') + self.zone = self.lookup_zone() + else: + self.zone = module.params['zone'] + + if self.zone[-1] != '.': + self.zone += '.' + + if module.params['record'][-1] != '.': + self.fqdn = module.params['record'] + '.' + self.zone + else: + self.fqdn = module.params['record'] + + if self.module.params['type'].lower() == 'txt' and self.module.params['value'] is not None: + self.value = list(map(self.txt_helper, self.module.params['value'])) + else: + self.value = self.module.params['value'] + + self.dns_rc = 0 + + def txt_helper(self, entry): + if entry[0] == '"' and entry[-1] == '"': + return entry + return '"{text}"'.format(text=entry) + + def lookup_zone(self): + name = dns.name.from_text(self.module.params['record']) + while True: + query = dns.message.make_query(name, dns.rdatatype.SOA) + if self.keyring: + query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) + try: + if self.module.params['protocol'] == 'tcp': + lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + else: + lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: + self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) + except (socket_error, dns.exception.Timeout) as e: + self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) + if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]: + self.module.fail_json(msg='Zone lookup failure: \'%s\' will not respond to queries regarding \'%s\'.' % ( + self.module.params['server'], self.module.params['record'])) + # If the response contains an Answer SOA RR whose name matches the queried name, + # this is the name of the zone in which the record needs to be inserted. + for rr in lookup.answer: + if rr.rdtype == dns.rdatatype.SOA and rr.name == name: + return rr.name.to_text() + # If the response contains an Authority SOA RR whose name is a subdomain of the queried name, + # this SOA name is the zone in which the record needs to be inserted. + for rr in lookup.authority: + if rr.rdtype == dns.rdatatype.SOA and name.fullcompare(rr.name)[0] == dns.name.NAMERELN_SUBDOMAIN: + return rr.name.to_text() + try: + name = name.parent() + except dns.name.NoParent: + self.module.fail_json(msg='Zone lookup of \'%s\' failed for unknown reason.' % (self.module.params['record'])) + + def __do_update(self, update): + response = None + try: + if self.module.params['protocol'] == 'tcp': + response = dns.query.tcp(update, self.module.params['server'], timeout=10, port=self.module.params['port']) + else: + response = dns.query.udp(update, self.module.params['server'], timeout=10, port=self.module.params['port']) + except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: + self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) + except (socket_error, dns.exception.Timeout) as e: + self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) + return response + + def create_or_update_record(self): + result = {'changed': False, 'failed': False} + + exists = self.record_exists() + if exists in [0, 2]: + if self.module.check_mode: + self.module.exit_json(changed=True) + + if exists == 0: + self.dns_rc = self.create_record() + if self.dns_rc != 0: + result['msg'] = "Failed to create DNS record (rc: %d)" % self.dns_rc + + elif exists == 2: + self.dns_rc = self.modify_record() + if self.dns_rc != 0: + result['msg'] = "Failed to update DNS record (rc: %d)" % self.dns_rc + + if self.dns_rc != 0: + result['failed'] = True + else: + result['changed'] = True + + else: + result['changed'] = False + + return result + + def create_record(self): + update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) + for entry in self.value: + try: + update.add(self.module.params['record'], + self.module.params['ttl'], + self.module.params['type'], + entry) + except AttributeError: + self.module.fail_json(msg='value needed when state=present') + except dns.exception.SyntaxError: + self.module.fail_json(msg='Invalid/malformed value') + + response = self.__do_update(update) + return dns.message.Message.rcode(response) + + def modify_record(self): + update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) + + if self.module.params['type'].upper() == 'NS': + # When modifying a NS record, Bind9 silently refuses to delete all the NS entries for a zone: + # > 09-May-2022 18:00:50.352 client @0x7fe7dd1f9568 192.168.1.3#45458/key rndc_ddns_ansible: + # > updating zone 'lab/IN': attempt to delete all SOA or NS records ignored + # https://gitlab.isc.org/isc-projects/bind9/-/blob/v9_18/lib/ns/update.c#L3304 + # Let's perform dns inserts and updates first, deletes after. + query = dns.message.make_query(self.module.params['record'], self.module.params['type']) + if self.keyring: + query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) + + try: + if self.module.params['protocol'] == 'tcp': + lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + else: + lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: + self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) + except (socket_error, dns.exception.Timeout) as e: + self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) + + lookup_result = lookup.answer[0] if lookup.answer else lookup.authority[0] + entries_to_remove = [n.to_text() for n in lookup_result.items if n.to_text() not in self.value] + else: + update.delete(self.module.params['record'], self.module.params['type']) + + for entry in self.value: + try: + update.add(self.module.params['record'], + self.module.params['ttl'], + self.module.params['type'], + entry) + except AttributeError: + self.module.fail_json(msg='value needed when state=present') + except dns.exception.SyntaxError: + self.module.fail_json(msg='Invalid/malformed value') + + if self.module.params['type'].upper() == 'NS': + for entry in entries_to_remove: + update.delete(self.module.params['record'], self.module.params['type'], entry) + + response = self.__do_update(update) + + return dns.message.Message.rcode(response) + + def remove_record(self): + result = {'changed': False, 'failed': False} + + if self.record_exists() == 0: + return result + + # Check mode and record exists, declared fake change. + if self.module.check_mode: + self.module.exit_json(changed=True) + + update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) + update.delete(self.module.params['record'], self.module.params['type']) + + response = self.__do_update(update) + self.dns_rc = dns.message.Message.rcode(response) + + if self.dns_rc != 0: + result['failed'] = True + result['msg'] = "Failed to delete record (rc: %d)" % self.dns_rc + else: + result['changed'] = True + + return result + + def record_exists(self): + update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) + try: + update.present(self.module.params['record'], self.module.params['type']) + except dns.rdatatype.UnknownRdatatype as e: + self.module.fail_json(msg='Record error: {0}'.format(to_native(e))) + + response = self.__do_update(update) + self.dns_rc = dns.message.Message.rcode(response) + if self.dns_rc == 0: + if self.module.params['state'] == 'absent': + return 1 + for entry in self.value: + try: + update.present(self.module.params['record'], self.module.params['type'], entry) + except AttributeError: + self.module.fail_json(msg='value needed when state=present') + except dns.exception.SyntaxError: + self.module.fail_json(msg='Invalid/malformed value') + response = self.__do_update(update) + self.dns_rc = dns.message.Message.rcode(response) + if self.dns_rc == 0: + if self.ttl_changed(): + return 2 + else: + return 1 + else: + return 2 + else: + return 0 + + def ttl_changed(self): + query = dns.message.make_query(self.fqdn, self.module.params['type']) + if self.keyring: + query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) + + try: + if self.module.params['protocol'] == 'tcp': + lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + else: + lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: + self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) + except (socket_error, dns.exception.Timeout) as e: + self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) + + if lookup.rcode() != dns.rcode.NOERROR: + self.module.fail_json(msg='Failed to lookup TTL of existing matching record.') + + current_ttl = lookup.answer[0].ttl if lookup.answer else lookup.authority[0].ttl + + return current_ttl != self.module.params['ttl'] + + +def main(): + tsig_algs = ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', + 'hmac-sha256', 'hmac-sha384', 'hmac-sha512'] + + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + server=dict(required=True, type='str'), + port=dict(default=53, type='int'), + key_name=dict(type='str'), + key_secret=dict(type='str', no_log=True), + key_algorithm=dict(default='hmac-md5', choices=tsig_algs, type='str'), + zone=dict(type='str'), + record=dict(required=True, type='str'), + type=dict(default='A', type='str'), + ttl=dict(default=3600, type='int'), + value=dict(type='list', elements='str'), + protocol=dict(default='tcp', choices=['tcp', 'udp'], type='str') + ), + supports_check_mode=True + ) + + if not HAVE_DNSPYTHON: + module.fail_json(msg=missing_required_lib('dnspython'), exception=DNSPYTHON_IMP_ERR) + + if len(module.params["record"]) == 0: + module.fail_json(msg='record cannot be empty.') + + record = RecordManager(module) + result = {} + if module.params["state"] == 'absent': + result = record.remove_record() + elif module.params["state"] == 'present': + result = record.create_or_update_record() + + result['dns_rc'] = record.dns_rc + result['dns_rc_str'] = dns.rcode.to_text(record.dns_rc) + if result['failed']: + module.fail_json(**result) + else: + result['record'] = dict(zone=record.zone, + record=module.params['record'], + type=module.params['type'], + ttl=module.params['ttl'], + value=record.value) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ocapi_command.py b/plugins/modules/ocapi_command.py new file mode 100644 index 0000000000..91fb7ab5e6 --- /dev/null +++ b/plugins/modules/ocapi_command.py @@ -0,0 +1,270 @@ +#!/usr/bin/python + +# Copyright (c) 2022 Western Digital Corporation +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ocapi_command +version_added: 6.3.0 +short_description: Manages Out-Of-Band controllers using Open Composable API (OCAPI) +description: + - Builds OCAPI URIs locally and sends them to remote OOB controllers to perform an action. + - Manages OOB controller such as Indicator LED, Reboot, Power Mode, Firmware Update. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - Command to execute on OOB controller. + type: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + proxy_slot_number: + description: For proxied inband requests, the slot number of the IOM. Only applies if O(baseuri) is a proxy server. + type: int + update_image_path: + required: false + description: + - For O(command=FWUpload), the path on the local filesystem of the firmware update image. + type: str + job_name: + required: false + description: + - For O(command=DeleteJob) command, the name of the job to delete. + type: str + username: + required: true + description: + - Username for authenticating to OOB controller. + type: str + password: + required: true + description: + - Password for authenticating to OOB controller. + type: str + timeout: + description: + - Timeout in seconds for URL requests to OOB controller. + default: 10 + type: int + +author: "Mike Moerk (@mikemoerk)" +""" + +EXAMPLES = r""" +- name: Set the power state to low + community.general.ocapi_command: + category: Chassis + command: PowerModeLow + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set the power state to normal + community.general.ocapi_command: + category: Chassis + command: PowerModeNormal + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +- name: Set chassis indicator LED to on + community.general.ocapi_command: + category: Chassis + command: IndicatorLedOn + baseuri: "{{ baseuri }}" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" +- name: Set chassis indicator LED to off + community.general.ocapi_command: + category: Chassis + command: IndicatorLedOff + baseuri: "{{ baseuri }}" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" +- name: Reset Enclosure + community.general.ocapi_command: + category: Systems + command: PowerGracefulRestart + baseuri: "{{ baseuri }}" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" +- name: Firmware Upload + community.general.ocapi_command: + category: Update + command: FWUpload + baseuri: "iom1.wdc.com" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" + update_image_path: "/path/to/firmware.tar.gz" +- name: Firmware Update + community.general.ocapi_command: + category: Update + command: FWUpdate + baseuri: "iom1.wdc.com" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" +- name: Firmware Activate + community.general.ocapi_command: + category: Update + command: FWActivate + baseuri: "iom1.wdc.com" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" +- name: Delete Job + community.general.ocapi_command: + category: Jobs + command: DeleteJob + job_name: FirmwareUpdate + baseuri: "{{ baseuri }}" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = r""" +msg: + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" + +jobUri: + description: URI to use to monitor status of the operation. Returned for async commands such as Firmware Update, Firmware + Activate. + returned: when supported + type: str + sample: "https://ioma.wdc.com/Storage/Devices/openflex-data24-usalp03020qb0003/Jobs/FirmwareUpdate/" + +operationStatusId: + description: OCAPI State ID (see OCAPI documentation for possible values). + returned: when supported + type: int + sample: 2 +""" + +from urllib.parse import urljoin +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils +from ansible.module_utils.common.text.converters import to_native + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "PowerModeLow", "PowerModeNormal"], + "Systems": ["PowerGracefulRestart"], + "Update": ["FWUpload", "FWUpdate", "FWActivate"], + "Jobs": ["DeleteJob"] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='str'), + job_name=dict(type='str'), + baseuri=dict(required=True, type='str'), + proxy_slot_number=dict(type='int'), + update_image_path=dict(type='str'), + username=dict(required=True), + password=dict(required=True, no_log=True), + timeout=dict(type='int', default=10) + ), + supports_check_mode=True + ) + + category = module.params['category'] + command = module.params['command'] + + # admin credentials used for authentication + creds = { + 'user': module.params['username'], + 'pswd': module.params['password'] + } + + # timeout + timeout = module.params['timeout'] + + base_uri = "https://" + module.params["baseuri"] + proxy_slot_number = module.params.get("proxy_slot_number") + ocapi_utils = OcapiUtils(creds, base_uri, proxy_slot_number, timeout, module) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that the command is valid + if command not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (command, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Chassis": + if command.startswith("IndicatorLed"): + result = ocapi_utils.manage_chassis_indicator_led(command) + elif command.startswith("PowerMode"): + result = ocapi_utils.manage_system_power(command) + elif category == "Systems": + if command.startswith("Power"): + result = ocapi_utils.manage_system_power(command) + elif category == "Update": + if command == "FWUpload": + update_image_path = module.params.get("update_image_path") + if update_image_path is None: + module.fail_json(msg=to_native("Missing update_image_path.")) + result = ocapi_utils.upload_firmware_image(update_image_path) + elif command == "FWUpdate": + result = ocapi_utils.update_firmware_image() + elif command == "FWActivate": + result = ocapi_utils.activate_firmware_image() + elif category == "Jobs": + if command == "DeleteJob": + job_name = module.params.get("job_name") + if job_name is None: + module.fail_json("Missing job_name") + job_uri = urljoin(base_uri, "Jobs/" + job_name) + result = ocapi_utils.delete_job(job_uri) + + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + else: + del result['ret'] + changed = result.get('changed', True) + session = result.get('session', dict()) + kwargs = { + "changed": changed, + "session": session, + "msg": "Action was successful." if not module.check_mode else result.get( + "msg", "No action performed in check mode." + ) + } + result_keys = [result_key for result_key in result if result_key not in kwargs] + for result_key in result_keys: + kwargs[result_key] = result[result_key] + module.exit_json(**kwargs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ocapi_info.py b/plugins/modules/ocapi_info.py new file mode 100644 index 0000000000..3eb0422054 --- /dev/null +++ b/plugins/modules/ocapi_info.py @@ -0,0 +1,221 @@ +#!/usr/bin/python + +# Copyright (c) 2022 Western Digital Corporation +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ocapi_info +version_added: 6.3.0 +short_description: Manages Out-Of-Band controllers using Open Composable API (OCAPI) +description: + - Builds OCAPI URIs locally and sends them to remote OOB controllers to get information back. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - Command to execute on OOB controller. + type: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + proxy_slot_number: + description: For proxied inband requests, the slot number of the IOM. Only applies if O(baseuri) is a proxy server. + type: int + username: + required: true + description: + - Username for authenticating to OOB controller. + type: str + password: + required: true + description: + - Password for authenticating to OOB controller. + type: str + timeout: + description: + - Timeout in seconds for URL requests to OOB controller. + default: 10 + type: int + job_name: + description: + - Name of job for fetching status. + type: str + + +author: "Mike Moerk (@mikemoerk)" +""" + +EXAMPLES = r""" +- name: Get job status + community.general.ocapi_info: + category: Status + command: JobStatus + baseuri: "http://iom1.wdc.com" + jobName: FirmwareUpdate + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = r""" +msg: + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" + +percentComplete: + description: Percent complete of the relevant operation. Applies to O(command=JobStatus). + returned: when supported + type: int + sample: 99 + +operationStatus: + description: Status of the relevant operation. Applies to O(command=JobStatus). See OCAPI documentation for details. + returned: when supported + type: str + sample: "Activate needed" + +operationStatusId: + description: Integer value of status (corresponds to operationStatus). Applies to O(command=JobStatus). See OCAPI documentation + for details. + returned: when supported + type: int + sample: 65540 + +operationHealth: + description: Health of the operation. Applies to O(command=JobStatus). See OCAPI documentation for details. + returned: when supported + type: str + sample: "OK" + +operationHealthId: + description: >- + Integer value for health of the operation (corresponds to RV(operationHealth)). Applies to O(command=JobStatus). See OCAPI + documentation for details. + returned: when supported + type: str + sample: "OK" + +details: + description: Details of the relevant operation. Applies to O(command=JobStatus). + returned: when supported + type: list + elements: str + +status: + description: Dictionary containing status information. See OCAPI documentation for details. + returned: when supported + type: dict + sample: + { + "Details": [ + "None" + ], + "Health": [ + { + "ID": 5, + "Name": "OK" + } + ], + "State": { + "ID": 16, + "Name": "In service" + } + } +""" + +from urllib.parse import urljoin + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils +from ansible.module_utils.common.text.converters import to_native + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Jobs": ["JobStatus"] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='str'), + job_name=dict(type='str'), + baseuri=dict(required=True, type='str'), + proxy_slot_number=dict(type='int'), + username=dict(required=True), + password=dict(required=True, no_log=True), + timeout=dict(type='int', default=10) + ), + supports_check_mode=True + ) + + category = module.params['category'] + command = module.params['command'] + + # admin credentials used for authentication + creds = { + 'user': module.params['username'], + 'pswd': module.params['password'] + } + + # timeout + timeout = module.params['timeout'] + + base_uri = "https://" + module.params["baseuri"] + proxy_slot_number = module.params.get("proxy_slot_number") + ocapi_utils = OcapiUtils(creds, base_uri, proxy_slot_number, timeout, module) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that the command is valid + if command not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (command, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Jobs": + if command == "JobStatus": + if module.params.get("job_name") is None: + module.fail_json(msg=to_native( + "job_name required for JobStatus command.")) + job_uri = urljoin(base_uri, 'Jobs/' + module.params["job_name"]) + result = ocapi_utils.get_job_status(job_uri) + + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + else: + del result['ret'] + changed = False + session = result.get('session', dict()) + kwargs = { + "changed": changed, + "session": session, + "msg": "Action was successful." if not module.check_mode else result.get( + "msg", "No action performed in check mode." + ) + } + result_keys = [result_key for result_key in result if result_key not in kwargs] + for result_key in result_keys: + kwargs[result_key] = result[result_key] + module.exit_json(**kwargs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oci_vcn.py b/plugins/modules/oci_vcn.py deleted file mode 120000 index 561d79304a..0000000000 --- a/plugins/modules/oci_vcn.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/oracle/oci_vcn.py \ No newline at end of file diff --git a/plugins/modules/oci_vcn.py b/plugins/modules/oci_vcn.py new file mode 100644 index 0000000000..ef7d7c4994 --- /dev/null +++ b/plugins/modules/oci_vcn.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# Copyright (c) 2017, 2018, Oracle and/or its affiliates. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: oci_vcn +short_description: Manage Virtual Cloud Networks(VCN) in OCI +deprecated: + removed_in: 13.0.0 + why: Superseded by official Oracle collection. + alternative: Use module C(oci_network_vcn) from the C(oracle.oci) collection. +description: + - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI. The complete Oracle Cloud + Infrastructure Ansible Modules can be downloaded from U(https://github.com/oracle/oci-ansible-modules/releases). +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + cidr_block: + description: The CIDR IP address block of the VCN. Required when creating a VCN with O(state=present). + type: str + required: false + compartment_id: + description: The OCID of the compartment to contain the VCN. Required when creating a VCN with O(state=present). This + option is mutually exclusive with O(vcn_id). + type: str + display_name: + description: A user-friendly name. Does not have to be unique, and it is changeable. + type: str + aliases: ['name'] + dns_label: + description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to form a fully + qualified domain name (FQDN) for each VNIC within this subnet (for example, V(bminstance-1.subnet123.vcn1.oraclevcn.com)). + Not required to be unique, but it is a best practice to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric + string that begins with a letter. The value cannot be changed. + type: str + state: + description: Create or update a VCN with O(state=present). Use O(state=absent) to delete a VCN. + type: str + default: present + choices: ['present', 'absent'] + vcn_id: + description: The OCID of the VCN. Required when deleting a VCN with O(state=absent) or updating a VCN with O(state=present). + This option is mutually exclusive with O(compartment_id). + type: str + aliases: ['id'] +author: "Rohit Chaware (@rohitChaware)" +extends_documentation_fragment: + - community.general.oracle + - community.general.oracle_creatable_resource + - community.general.oracle_wait_options + - community.general.oracle_tags + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Create a VCN + community.general.oci_vcn: + cidr_block: '10.0.0.0/16' + compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx' + display_name: my_vcn + dns_label: ansiblevcn + +- name: Updates the specified VCN's display name + community.general.oci_vcn: + vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx + display_name: ansible_vcn + +- name: Delete the specified VCN + community.general.oci_vcn: + vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx + state: absent +""" + +RETURN = r""" +vcn: + description: Information about the VCN. + returned: On successful create and update operation + type: dict + sample: + { + "cidr_block": "10.0.0.0/16", + "compartment_id\"": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx", + "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx", + "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx", + "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx", + "display_name": "ansible_vcn", + "dns_label": "ansiblevcn", + "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx", + "lifecycle_state": "AVAILABLE", + "time_created": "2017-11-13T20:22:40.626000+00:00", + "vcn_domain_name": "ansiblevcn.oraclevcn.com" + } +""" + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.oracle import oci_utils + +try: + from oci.core.virtual_network_client import VirtualNetworkClient + from oci.core.models import CreateVcnDetails + from oci.core.models import UpdateVcnDetails + + HAS_OCI_PY_SDK = True +except ImportError: + HAS_OCI_PY_SDK = False + + +def delete_vcn(virtual_network_client, module): + result = oci_utils.delete_and_wait( + resource_type="vcn", + client=virtual_network_client, + get_fn=virtual_network_client.get_vcn, + kwargs_get={"vcn_id": module.params["vcn_id"]}, + delete_fn=virtual_network_client.delete_vcn, + kwargs_delete={"vcn_id": module.params["vcn_id"]}, + module=module, + ) + return result + + +def update_vcn(virtual_network_client, module): + result = oci_utils.check_and_update_resource( + resource_type="vcn", + client=virtual_network_client, + get_fn=virtual_network_client.get_vcn, + kwargs_get={"vcn_id": module.params["vcn_id"]}, + update_fn=virtual_network_client.update_vcn, + primitive_params_update=["vcn_id"], + kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"}, + module=module, + update_attributes=list(UpdateVcnDetails().attribute_map.keys()), + ) + return result + + +def create_vcn(virtual_network_client, module): + create_vcn_details = CreateVcnDetails() + for attribute in create_vcn_details.attribute_map.keys(): + if attribute in module.params: + setattr(create_vcn_details, attribute, module.params[attribute]) + + result = oci_utils.create_and_wait( + resource_type="vcn", + create_fn=virtual_network_client.create_vcn, + kwargs_create={"create_vcn_details": create_vcn_details}, + client=virtual_network_client, + get_fn=virtual_network_client.get_vcn, + get_param="vcn_id", + module=module, + ) + return result + + +def main(): + module_args = oci_utils.get_taggable_arg_spec( + supports_create=True, supports_wait=True + ) + module_args.update( + dict( + cidr_block=dict(type="str"), + compartment_id=dict(type="str"), + display_name=dict(type="str", aliases=["name"]), + dns_label=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present"]), + vcn_id=dict(type="str", aliases=["id"]), + ) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=False, + mutually_exclusive=[["compartment_id", "vcn_id"]], + ) + + if not HAS_OCI_PY_SDK: + module.fail_json(msg=missing_required_lib("oci")) + + virtual_network_client = oci_utils.create_service_client( + module, VirtualNetworkClient + ) + + exclude_attributes = {"display_name": True, "dns_label": True} + state = module.params["state"] + vcn_id = module.params["vcn_id"] + + if state == "absent": + if vcn_id is not None: + result = delete_vcn(virtual_network_client, module) + else: + module.fail_json( + msg="Specify vcn_id with state as 'absent' to delete a VCN." + ) + + else: + if vcn_id is not None: + result = update_vcn(virtual_network_client, module) + else: + result = oci_utils.check_and_create_resource( + resource_type="vcn", + create_fn=create_vcn, + kwargs_create={ + "virtual_network_client": virtual_network_client, + "module": module, + }, + list_fn=virtual_network_client.list_vcns, + kwargs_list={"compartment_id": module.params["compartment_id"]}, + module=module, + model=CreateVcnDetails(), + exclude_attributes=exclude_attributes, + ) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/odbc.py b/plugins/modules/odbc.py deleted file mode 120000 index ee3d8312c6..0000000000 --- a/plugins/modules/odbc.py +++ /dev/null @@ -1 +0,0 @@ -./database/misc/odbc.py \ No newline at end of file diff --git a/plugins/modules/odbc.py b/plugins/modules/odbc.py new file mode 100644 index 0000000000..5fc2e8b18d --- /dev/null +++ b/plugins/modules/odbc.py @@ -0,0 +1,173 @@ +#!/usr/bin/python + +# Copyright (c) 2019, John Westcott +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: odbc +author: "John Westcott IV (@john-westcott-iv)" +version_added: "1.0.0" +short_description: Execute SQL using ODBC +description: + - Read/Write info using ODBC drivers. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + dsn: + description: + - The connection string passed into ODBC. + required: true + type: str + query: + description: + - The SQL query to perform. + required: true + type: str + params: + description: + - Parameters to pass to the SQL query. + type: list + elements: str + commit: + description: + - Perform a commit after the execution of the SQL query. + - Some databases allow a commit after a select whereas others raise an exception. + - Default is V(true) to support legacy module behavior. + type: bool + default: true + version_added: 1.3.0 +requirements: + - "pyodbc" + +notes: + - Like the command module, this module always returns V(changed=true) whether or not the query would change the database. + - 'To alter this behavior you can use C(changed_when): [true or false].' + - For details about return values (RV(description) and RV(row_count)) see U(https://github.com/mkleehammer/pyodbc/wiki/Cursor). +""" + +EXAMPLES = r""" +- name: Set some values in the test db + community.general.odbc: + dsn: "DRIVER={ODBC Driver 13 for SQL Server};Server=db.ansible.com;Database=my_db;UID=admin;PWD=password;" + query: "Select * from table_a where column1 = ?" + params: + - "value1" + commit: false + changed_when: false +""" + +# @FIXME RV 'results' is meant to be used when 'loop:' was used with the module. +RETURN = r""" +results: + description: List of lists of strings containing selected rows, likely empty for DDL statements. + returned: success + type: list + elements: list +description: + description: "List of dicts about the columns selected from the cursors, likely empty for DDL statements. See notes." + returned: success + type: list + elements: dict +row_count: + description: "The number of rows selected or modified according to the cursor defaults to V(-1). See notes." + returned: success + type: str +""" + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + +HAS_PYODBC = None +try: + import pyodbc + HAS_PYODBC = True +except ImportError as e: + HAS_PYODBC = False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + dsn=dict(type='str', required=True, no_log=True), + query=dict(type='str', required=True), + params=dict(type='list', elements='str'), + commit=dict(type='bool', default=True), + ), + ) + + dsn = module.params.get('dsn') + query = module.params.get('query') + params = module.params.get('params') + commit = module.params.get('commit') + + if not HAS_PYODBC: + module.fail_json(msg=missing_required_lib('pyodbc')) + + # Try to make a connection with the DSN + connection = None + try: + connection = pyodbc.connect(dsn) + except Exception as e: + module.fail_json(msg='Failed to connect to DSN: {0}'.format(to_native(e))) + + result = dict( + changed=True, + description=[], + row_count=-1, + results=[], + ) + + try: + cursor = connection.cursor() + + if params: + cursor.execute(query, params) + else: + cursor.execute(query) + if commit: + cursor.commit() + try: + # Get the rows out into an 2d array + for row in cursor.fetchall(): + new_row = [] + for column in row: + new_row.append("{0}".format(column)) + result['results'].append(new_row) + + # Return additional information from the cursor + for row_description in cursor.description: + description = {} + description['name'] = row_description[0] + description['type'] = row_description[1].__name__ + description['display_size'] = row_description[2] + description['internal_size'] = row_description[3] + description['precision'] = row_description[4] + description['scale'] = row_description[5] + description['nullable'] = row_description[6] + result['description'].append(description) + + result['row_count'] = cursor.rowcount + except pyodbc.ProgrammingError as pe: + pass + except Exception as e: + module.fail_json(msg="Exception while reading rows: {0}".format(to_native(e))) + + cursor.close() + except Exception as e: + module.fail_json(msg="Failed to execute query: {0}".format(to_native(e))) + finally: + connection.close() + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/office_365_connector_card.py b/plugins/modules/office_365_connector_card.py deleted file mode 120000 index a14104ce20..0000000000 --- a/plugins/modules/office_365_connector_card.py +++ /dev/null @@ -1 +0,0 @@ -./notification/office_365_connector_card.py \ No newline at end of file diff --git a/plugins/modules/office_365_connector_card.py b/plugins/modules/office_365_connector_card.py new file mode 100644 index 0000000000..abfdf93cce --- /dev/null +++ b/plugins/modules/office_365_connector_card.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# Copyright (c) 2017 Marc Sensenich +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: office_365_connector_card +short_description: Use webhooks to create Connector Card messages within an Office 365 group +description: + - Creates Connector Card messages through Office 365 Connectors. + - See + U(https://learn.microsoft.com/en-us/microsoftteams/platform/task-modules-and-cards/cards/cards-reference#connector-card-for-microsoft-365-groups). +author: "Marc Sensenich (@marc-sensenich)" +notes: + - This module is not idempotent, therefore if you run the same task twice then you create two Connector Cards. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + webhook: + type: str + description: + - The webhook URL is given to you when you create a new Connector. + required: true + summary: + type: str + description: + - A string used for summarizing card content. + - This is the message subject. + - This is required if the text parameter is not populated. + color: + type: str + description: + - Accent color used for branding or indicating status in the card. + title: + type: str + description: + - A title for the Connector message. Shown at the top of the message. + text: + type: str + description: + - The main text of the card. + - This is rendered below the sender information and optional title, + - And above any sections or actions present. + actions: + type: list + elements: dict + description: + - This array of objects is used to power the action links found at the bottom of the card. + sections: + type: list + elements: dict + description: + - Contains a list of sections to display in the card. + - For more information see U(https://learn.microsoft.com/en-us/outlook/actionable-messages/message-card-reference#section-fields). +""" + +EXAMPLES = r""" +- name: Create a simple Connector Card + community.general.office_365_connector_card: + webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID + text: 'Hello, World!' + +- name: Create a Connector Card with the full format + community.general.office_365_connector_card: + webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID + summary: This is the summary property + title: This is the **card's title** property + text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod + tempor incididunt ut labore et dolore magna aliqua. + color: E81123 + sections: + - title: This is the **section's title** property + activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg + activity_title: This is the section's **activityTitle** property + activity_subtitle: This is the section's **activitySubtitle** property + activity_text: This is the section's **activityText** property. + hero_image: + image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg + title: This is the image's alternate text + text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod + tempor incididunt ut labore et dolore magna aliqua. + facts: + - name: This is a fact name + value: This is a fact value + - name: This is a fact name + value: This is a fact value + - name: This is a fact name + value: This is a fact value + images: + - image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg + title: This is the image's alternate text + - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg + title: This is the image's alternate text + - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg + title: This is the image's alternate text + actions: + - "@type": ActionCard + name: Comment + inputs: + - "@type": TextInput + id: comment + is_multiline: true + title: Input's title property + actions: + - "@type": HttpPOST + name: Save + target: http://... + - "@type": ActionCard + name: Due Date + inputs: + - "@type": DateInput + id: dueDate + title: Input's title property + actions: + - "@type": HttpPOST + name: Save + target: http://... + - "@type": HttpPOST + name: Action's name prop. + target: http://... + - "@type": OpenUri + name: Action's name prop + targets: + - os: default + uri: http://... + - start_group: true + title: This is the title of a **second section** + text: This second section is visually separated from the first one by setting its **startGroup** property to true. +""" + +RETURN = r""" +""" + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +OFFICE_365_CARD_CONTEXT = "http://schema.org/extensions" +OFFICE_365_CARD_TYPE = "MessageCard" +OFFICE_365_CARD_EMPTY_PAYLOAD_MSG = "Summary or Text is required." +OFFICE_365_INVALID_WEBHOOK_MSG = "The Incoming Webhook was not reachable." + + +def build_actions(actions): + action_items = [] + + for action in actions: + action_item = snake_dict_to_camel_dict(action) + action_items.append(action_item) + + return action_items + + +def build_sections(sections): + sections_created = [] + + for section in sections: + sections_created.append(build_section(section)) + + return sections_created + + +def build_section(section): + section_payload = dict() + + if 'title' in section: + section_payload['title'] = section['title'] + + if 'start_group' in section: + section_payload['startGroup'] = section['start_group'] + + if 'activity_image' in section: + section_payload['activityImage'] = section['activity_image'] + + if 'activity_title' in section: + section_payload['activityTitle'] = section['activity_title'] + + if 'activity_subtitle' in section: + section_payload['activitySubtitle'] = section['activity_subtitle'] + + if 'activity_text' in section: + section_payload['activityText'] = section['activity_text'] + + if 'hero_image' in section: + section_payload['heroImage'] = section['hero_image'] + + if 'text' in section: + section_payload['text'] = section['text'] + + if 'facts' in section: + section_payload['facts'] = section['facts'] + + if 'images' in section: + section_payload['images'] = section['images'] + + if 'actions' in section: + section_payload['potentialAction'] = build_actions(section['actions']) + + return section_payload + + +def build_payload_for_connector_card(module, summary=None, color=None, title=None, text=None, actions=None, sections=None): + payload = dict() + payload['@context'] = OFFICE_365_CARD_CONTEXT + payload['@type'] = OFFICE_365_CARD_TYPE + + if summary is not None: + payload['summary'] = summary + + if color is not None: + payload['themeColor'] = color + + if title is not None: + payload['title'] = title + + if text is not None: + payload['text'] = text + + if actions: + payload['potentialAction'] = build_actions(actions) + + if sections: + payload['sections'] = build_sections(sections) + + payload = module.jsonify(payload) + return payload + + +def do_notify_connector_card_webhook(module, webhook, payload): + headers = { + 'Content-Type': 'application/json' + } + + response, info = fetch_url( + module=module, + url=webhook, + headers=headers, + method='POST', + data=payload + ) + + if info['status'] == 200: + module.exit_json(changed=True) + elif info['status'] == 400 and module.check_mode: + if info['body'] == OFFICE_365_CARD_EMPTY_PAYLOAD_MSG: + module.exit_json(changed=True) + else: + module.fail_json(msg=OFFICE_365_INVALID_WEBHOOK_MSG) + else: + module.fail_json( + msg="failed to send %s as a connector card to Incoming Webhook: %s" + % (payload, info['msg']) + ) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + webhook=dict(required=True, no_log=True), + summary=dict(type='str'), + color=dict(type='str'), + title=dict(type='str'), + text=dict(type='str'), + actions=dict(type='list', elements='dict'), + sections=dict(type='list', elements='dict') + ), + supports_check_mode=True + ) + + webhook = module.params['webhook'] + summary = module.params['summary'] + color = module.params['color'] + title = module.params['title'] + text = module.params['text'] + actions = module.params['actions'] + sections = module.params['sections'] + + payload = build_payload_for_connector_card( + module, + summary, + color, + title, + text, + actions, + sections) + + if module.check_mode: + # In check mode, send an empty payload to validate connection + check_mode_payload = build_payload_for_connector_card(module) + do_notify_connector_card_webhook(module, webhook, check_mode_payload) + + do_notify_connector_card_webhook(module, webhook, payload) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ohai.py b/plugins/modules/ohai.py deleted file mode 120000 index eff4005ff7..0000000000 --- a/plugins/modules/ohai.py +++ /dev/null @@ -1 +0,0 @@ -./system/ohai.py \ No newline at end of file diff --git a/plugins/modules/ohai.py b/plugins/modules/ohai.py new file mode 100644 index 0000000000..6d30a06230 --- /dev/null +++ b/plugins/modules/ohai.py @@ -0,0 +1,50 @@ +#!/usr/bin/python + +# Copyright (c) 2012, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ohai +short_description: Returns inventory data from I(Ohai) +description: + - Similar to the M(community.general.facter_facts) module, this runs the I(Ohai) discovery program (U(https://docs.chef.io/ohai.html)) + on the remote host and returns JSON inventory data. I(Ohai) data is a bit more verbose and nested than I(facter). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: {} +notes: [] +requirements: ["ohai"] +author: + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" +""" + +EXAMPLES = r""" +ansible webservers -m ohai --tree=/tmp/ohaidata +... +""" +import json + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict() + ) + cmd = ["/usr/bin/env", "ohai"] + rc, out, err = module.run_command(cmd, check_rc=True) + module.exit_json(**json.loads(out)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/omapi_host.py b/plugins/modules/omapi_host.py deleted file mode 120000 index 4a65cbe5ec..0000000000 --- a/plugins/modules/omapi_host.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/omapi_host.py \ No newline at end of file diff --git a/plugins/modules/omapi_host.py b/plugins/modules/omapi_host.py new file mode 100644 index 0000000000..5dfa01b19e --- /dev/null +++ b/plugins/modules/omapi_host.py @@ -0,0 +1,315 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Loic Blot +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: omapi_host +short_description: Setup OMAPI hosts +description: Manage OMAPI hosts into compatible DHCPd servers. +requirements: + - pypureomapi +author: + - Loic Blot (@nerzhul) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Create or remove OMAPI host. + type: str + required: true + choices: [absent, present] + hostname: + description: + - Sets the host lease hostname (mandatory if O(state=present)). + type: str + aliases: [name] + host: + description: + - Sets OMAPI server host to interact with. + type: str + default: localhost + port: + description: + - Sets the OMAPI server port to interact with. + type: int + default: 7911 + key_name: + description: + - Sets the TSIG key name for authenticating against OMAPI server. + type: str + required: true + key: + description: + - Sets the TSIG key content for authenticating against OMAPI server. + type: str + required: true + macaddr: + description: + - Sets the lease host MAC address. + type: str + required: true + ip: + description: + - Sets the lease host IP address. + type: str + statements: + description: + - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon). + type: list + elements: str + default: [] + ddns: + description: + - Enable dynamic DNS updates for this host. + type: bool + default: false +""" +EXAMPLES = r""" +- name: Add a host using OMAPI + community.general.omapi_host: + key_name: defomapi + key: +bFQtBCta6j2vWkjPkNFtgA== + host: 10.98.4.55 + macaddr: 44:dd:ab:dd:11:44 + name: server01 + ip: 192.168.88.99 + ddns: true + statements: + - filename "pxelinux.0" + - next-server 1.1.1.1 + state: present + +- name: Remove a host using OMAPI + community.general.omapi_host: + key_name: defomapi + key: +bFQtBCta6j2vWkjPkNFtgA== + host: 10.1.1.1 + macaddr: 00:66:ab:dd:11:44 + state: absent +""" + +RETURN = r""" +lease: + description: Dictionary containing host information. + returned: success + type: complex + contains: + ip-address: + description: IP address, if there is. + returned: success + type: str + sample: '192.168.1.5' + hardware-address: + description: MAC address. + returned: success + type: str + sample: '00:11:22:33:44:55' + hardware-type: + description: Hardware type, generally V(1). + returned: success + type: int + sample: 1 + name: + description: Hostname. + returned: success + type: str + sample: 'mydesktop' +""" + +import binascii +import socket +import struct +import traceback + +PUREOMAPI_IMP_ERR = None +try: + from pypureomapi import Omapi, OmapiMessage, OmapiError, OmapiErrorNotFound + from pypureomapi import pack_ip, unpack_ip, pack_mac, unpack_mac + from pypureomapi import OMAPI_OP_STATUS, OMAPI_OP_UPDATE + pureomapi_found = True +except ImportError: + PUREOMAPI_IMP_ERR = traceback.format_exc() + pureomapi_found = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_bytes, to_native + + +class OmapiHostManager: + def __init__(self, module): + self.module = module + self.omapi = None + self.connect() + + def connect(self): + try: + self.omapi = Omapi(self.module.params['host'], self.module.params['port'], to_bytes(self.module.params['key_name']), + self.module.params['key']) + except binascii.Error: + self.module.fail_json(msg="Unable to open OMAPI connection. 'key' is not a valid base64 key.") + except OmapiError as e: + self.module.fail_json(msg="Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' " + "are valid. Exception was: %s" % to_native(e)) + except socket.error as e: + self.module.fail_json(msg="Unable to connect to OMAPI server: %s" % to_native(e)) + + def get_host(self, macaddr): + msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict')) + msg.obj.append((to_bytes("hardware-address", errors='surrogate_or_strict'), pack_mac(macaddr))) + msg.obj.append((to_bytes("hardware-type", errors='surrogate_or_strict'), struct.pack("!I", 1))) + response = self.omapi.query_server(msg) + if response.opcode != OMAPI_OP_UPDATE: + return None + return response + + @staticmethod + def unpack_facts(obj): + result = dict(obj) + if 'hardware-address' in result: + result['hardware-address'] = to_native(unpack_mac(result[to_bytes('hardware-address')])) + + if 'ip-address' in result: + result['ip-address'] = to_native(unpack_ip(result[to_bytes('ip-address')])) + + if 'hardware-type' in result: + result['hardware-type'] = struct.unpack("!I", result[to_bytes('hardware-type')]) + + return result + + def setup_host(self): + if self.module.params['hostname'] is None or len(self.module.params['hostname']) == 0: + self.module.fail_json(msg="name attribute could not be empty when adding or modifying host.") + + msg = None + host_response = self.get_host(self.module.params['macaddr']) + # If host was not found using macaddr, add create message + if host_response is None: + msg = OmapiMessage.open(to_bytes('host', errors='surrogate_or_strict')) + msg.message.append((to_bytes('create'), struct.pack('!I', 1))) + msg.message.append((to_bytes('exclusive'), struct.pack('!I', 1))) + msg.obj.append((to_bytes('hardware-address'), pack_mac(self.module.params['macaddr']))) + msg.obj.append((to_bytes('hardware-type'), struct.pack('!I', 1))) + msg.obj.append((to_bytes('name'), to_bytes(self.module.params['hostname']))) + if self.module.params['ip'] is not None: + msg.obj.append((to_bytes("ip-address", errors='surrogate_or_strict'), pack_ip(self.module.params['ip']))) + + stmt_join = "" + if self.module.params['ddns']: + stmt_join += 'ddns-hostname "{0}"; '.format(self.module.params['hostname']) + + try: + if len(self.module.params['statements']) > 0: + stmt_join += "; ".join(self.module.params['statements']) + stmt_join += "; " + except TypeError as e: + self.module.fail_json(msg="Invalid statements found: %s" % to_native(e)) + + if len(stmt_join) > 0: + msg.obj.append((to_bytes('statements'), to_bytes(stmt_join))) + + try: + response = self.omapi.query_server(msg) + if response.opcode != OMAPI_OP_UPDATE: + self.module.fail_json(msg="Failed to add host, ensure authentication and host parameters " + "are valid.") + self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj)) + except OmapiError as e: + self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) + # Forge update message + else: + response_obj = self.unpack_facts(host_response.obj) + fields_to_update = {} + + if to_bytes('ip-address', errors='surrogate_or_strict') not in response_obj or \ + unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']: + fields_to_update['ip-address'] = pack_ip(self.module.params['ip']) + + # Name cannot be changed + if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']: + self.module.fail_json(msg="Changing hostname is not supported. Old was %s, new is %s. " + "Please delete host and add new." % + (response_obj['name'], self.module.params['hostname'])) + + """ + # It seems statements are not returned by OMAPI, then we cannot modify them at this moment. + if 'statements' not in response_obj and len(self.module.params['statements']) > 0 or \ + response_obj['statements'] != self.module.params['statements']: + with open('/tmp/omapi', 'w') as fb: + for (k,v) in iteritems(response_obj): + fb.writelines('statements: %s %s\n' % (k, v)) + """ + if len(fields_to_update) == 0: + self.module.exit_json(changed=False, lease=response_obj) + else: + msg = OmapiMessage.update(host_response.handle) + msg.update_object(fields_to_update) + + try: + response = self.omapi.query_server(msg) + if response.opcode != OMAPI_OP_STATUS: + self.module.fail_json(msg="Failed to modify host, ensure authentication and host parameters " + "are valid.") + self.module.exit_json(changed=True) + except OmapiError as e: + self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) + + def remove_host(self): + try: + self.omapi.del_host(self.module.params['macaddr']) + self.module.exit_json(changed=True) + except OmapiErrorNotFound: + self.module.exit_json() + except OmapiError as e: + self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', required=True, choices=['absent', 'present']), + host=dict(type='str', default="localhost"), + port=dict(type='int', default=7911), + key_name=dict(type='str', required=True), + key=dict(type='str', required=True, no_log=True), + macaddr=dict(type='str', required=True), + hostname=dict(type='str', aliases=['name']), + ip=dict(type='str'), + ddns=dict(type='bool', default=False), + statements=dict(type='list', elements='str', default=[]), + ), + supports_check_mode=False, + ) + + if not pureomapi_found: + module.fail_json(msg=missing_required_lib('pypureomapi'), exception=PUREOMAPI_IMP_ERR) + + if module.params['key'] is None or len(module.params["key"]) == 0: + module.fail_json(msg="'key' parameter cannot be empty.") + + if module.params['key_name'] is None or len(module.params["key_name"]) == 0: + module.fail_json(msg="'key_name' parameter cannot be empty.") + + host_manager = OmapiHostManager(module) + try: + if module.params['state'] == 'present': + host_manager.setup_host() + elif module.params['state'] == 'absent': + host_manager.remove_host() + except ValueError as e: + module.fail_json(msg="OMAPI input value error: %s" % to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/one_host.py b/plugins/modules/one_host.py deleted file mode 120000 index 5e80eb2de6..0000000000 --- a/plugins/modules/one_host.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/opennebula/one_host.py \ No newline at end of file diff --git a/plugins/modules/one_host.py b/plugins/modules/one_host.py new file mode 100644 index 0000000000..e5781fb07f --- /dev/null +++ b/plugins/modules/one_host.py @@ -0,0 +1,302 @@ +#!/usr/bin/python +# +# Copyright 2018 www.privaz.io Valletech AB +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import annotations + + +DOCUMENTATION = r""" +module: one_host + +short_description: Manages OpenNebula Hosts + + +requirements: + - pyone + +description: + - Manages OpenNebula Hosts. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + name: + description: + - Hostname of the machine to manage. + required: true + type: str + state: + description: + - Takes the host to the desired lifecycle state. + - If V(absent) the host is deleted from the cluster. + - If V(present) the host is created in the cluster (includes V(enabled), V(disabled) and V(offline) states). + - If V(enabled) the host is fully operational. + - V(disabled), for example to perform maintenance operations. + - V(offline), host is totally offline. + choices: + - absent + - present + - enabled + - disabled + - offline + default: present + type: str + im_mad_name: + description: + - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name). + default: kvm + type: str + vmm_mad_name: + description: + - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD + (name). + default: kvm + type: str + cluster_id: + description: + - The cluster ID. + default: 0 + type: int + cluster_name: + description: + - The cluster specified by name. + type: str + labels: + description: + - The labels for this host. + type: list + elements: str + template: + description: + - The template or attribute changes to merge into the host template. + aliases: + - attributes + type: dict + +extends_documentation_fragment: + - community.general.opennebula + - community.general.attributes + +author: + - Rafael del Valle (@rvalle) +""" + +EXAMPLES = r""" +- name: Create a new host in OpenNebula + community.general.one_host: + name: host1 + cluster_id: 1 + api_url: http://127.0.0.1:2633/RPC2 + +- name: Create a host and adjust its template + community.general.one_host: + name: host2 + cluster_name: default + template: + LABELS: + - gold + - ssd + RESERVED_CPU: -100 +""" + +# TODO: pending setting guidelines on returned values +RETURN = r""" +""" + +# TODO: Documentation on valid state transitions is required to properly implement all valid cases +# TODO: To be coherent with CLI this module should also provide "flush" functionality + +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule + +try: + from pyone import HOST_STATES, HOST_STATUS +except ImportError: + pass # handled at module utils + + +# Pseudo definitions... + +HOST_ABSENT = -99 # the host is absent (special case defined by this module) + + +class HostModule(OpenNebulaModule): + + def __init__(self): + + argument_spec = dict( + name=dict(type='str', required=True), + state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'offline'], default='present'), + im_mad_name=dict(type='str', default="kvm"), + vmm_mad_name=dict(type='str', default="kvm"), + cluster_id=dict(type='int', default=0), + cluster_name=dict(type='str'), + labels=dict(type='list', elements='str'), + template=dict(type='dict', aliases=['attributes']), + ) + + mutually_exclusive = [ + ['cluster_id', 'cluster_name'] + ] + + OpenNebulaModule.__init__(self, argument_spec, mutually_exclusive=mutually_exclusive) + + def allocate_host(self): + """ + Creates a host entry in OpenNebula + self.one.host.allocate returns ID of a host + Returns: True on success, fails otherwise. + + """ + try: + self.one.host.allocate(self.get_parameter('name'), + self.get_parameter('vmm_mad_name'), + self.get_parameter('im_mad_name'), + self.get_parameter('cluster_id')) + self.result['changed'] = True + except Exception as e: + self.fail(msg="Could not allocate host, ERROR: " + str(e)) + + return True + + def wait_for_host_state(self, host, target_states): + """ + Utility method that waits for a host state. + Args: + host: + target_states: + + """ + return self.wait_for_state('host', + lambda: self.one.host.info(host.ID).STATE, + lambda s: HOST_STATES(s).name, target_states, + invalid_states=[HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]) + + def run(self, one, module, result): + + # Get the list of hosts + host_name = self.get_parameter("name") + host = self.get_host_by_name(host_name) + + # manage host state + desired_state = self.get_parameter('state') + if bool(host): + current_state = host.STATE + current_state_name = HOST_STATES(host.STATE).name + else: + current_state = HOST_ABSENT + current_state_name = "ABSENT" + + # apply properties + if desired_state == 'present': + if current_state == HOST_ABSENT: + self.allocate_host() + host = self.get_host_by_name(host_name) + self.wait_for_host_state(host, [HOST_STATES.MONITORED]) + elif current_state in [HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]: + self.fail(msg="invalid host state %s" % current_state_name) + + elif desired_state == 'enabled': + if current_state == HOST_ABSENT: + self.allocate_host() + host = self.get_host_by_name(host_name) + self.wait_for_host_state(host, [HOST_STATES.MONITORED]) + elif current_state in [HOST_STATES.DISABLED, HOST_STATES.OFFLINE]: + if one.host.status(host.ID, HOST_STATUS.ENABLED): + self.wait_for_host_state(host, [HOST_STATES.MONITORED]) + result['changed'] = True + else: + self.fail(msg="could not enable host") + elif current_state in [HOST_STATES.MONITORED]: + pass + else: + self.fail(msg="unknown host state %s, cowardly refusing to change state to enable" % current_state_name) + + elif desired_state == 'disabled': + if current_state == HOST_ABSENT: + self.fail(msg='absent host cannot be put in disabled state') + elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]: + # returns host ID integer + try: + one.host.status(host.ID, HOST_STATUS.DISABLED) + result['changed'] = True + except Exception as e: + self.fail(msg="Could not disable host, ERROR: " + str(e)) + self.wait_for_host_state(host, [HOST_STATES.DISABLED]) + elif current_state in [HOST_STATES.DISABLED]: + pass + else: + self.fail(msg="unknown host state %s, cowardly refusing to change state to disable" % current_state_name) + + elif desired_state == 'offline': + if current_state == HOST_ABSENT: + self.fail(msg='absent host cannot be placed in offline state') + elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]: + # returns host ID integer + try: + one.host.status(host.ID, HOST_STATUS.OFFLINE) + result['changed'] = True + except Exception as e: + self.fail(msg="Could not set host offline, ERROR: " + str(e)) + self.wait_for_host_state(host, [HOST_STATES.OFFLINE]) + elif current_state in [HOST_STATES.OFFLINE]: + pass + else: + self.fail(msg="unknown host state %s, cowardly refusing to change state to offline" % current_state_name) + + elif desired_state == 'absent': + if current_state != HOST_ABSENT: + # returns host ID integer + try: + one.host.delete(host.ID) + result['changed'] = True + except Exception as e: + self.fail(msg="Could not delete host from cluster, ERROR: " + str(e)) + + # if we reach this point we can assume that the host was taken to the desired state + + if desired_state != "absent": + # manipulate or modify the template + desired_template_changes = self.get_parameter('template') + + if desired_template_changes is None: + desired_template_changes = dict() + + # complete the template with specific ansible parameters + if self.is_parameter('labels'): + desired_template_changes['LABELS'] = self.get_parameter('labels') + + if self.requires_template_update(host.TEMPLATE, desired_template_changes): + # setup the root element so that pyone will generate XML instead of attribute vector + desired_template_changes = {"TEMPLATE": desired_template_changes} + # merge the template, returns host ID integer + try: + one.host.update(host.ID, desired_template_changes, 1) + result['changed'] = True + except Exception as e: + self.fail(msg="Failed to update the host template, ERROR: " + str(e)) + + # the cluster + if host.CLUSTER_ID != self.get_parameter('cluster_id'): + # returns cluster id in int + try: + one.cluster.addhost(self.get_parameter('cluster_id'), host.ID) + result['changed'] = True + except Exception as e: + self.fail(msg="Failed to update the host cluster, ERROR: " + str(e)) + + # return + self.exit() + + +def main(): + HostModule().run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/one_image.py b/plugins/modules/one_image.py deleted file mode 120000 index c2d310acf7..0000000000 --- a/plugins/modules/one_image.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/opennebula/one_image.py \ No newline at end of file diff --git a/plugins/modules/one_image.py b/plugins/modules/one_image.py new file mode 100644 index 0000000000..92786fd91d --- /dev/null +++ b/plugins/modules/one_image.py @@ -0,0 +1,626 @@ +#!/usr/bin/python +# Copyright (c) 2018, Milan Ilic +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import annotations + +DOCUMENTATION = r""" +module: one_image +short_description: Manages OpenNebula images +description: + - Manages OpenNebula images. +requirements: + - pyone +extends_documentation_fragment: + - community.general.opennebula + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + id: + description: + - A O(id) of the image you would like to manage. + type: int + name: + description: + - A O(name) of the image you would like to manage. + - Required if O(create=true). + type: str + state: + description: + - V(present) - state that is used to manage the image. + - V(absent) - delete the image. + - V(cloned) - clone the image. + - V(renamed) - rename the image to the O(new_name). + choices: ["present", "absent", "cloned", "renamed"] + default: present + type: str + enabled: + description: + - Whether the image should be enabled or disabled. + type: bool + new_name: + description: + - A name that is assigned to the existing or new image. + - In the case of cloning, by default O(new_name) is set to the name of the origin image with the prefix 'Copy of'. + type: str + persistent: + description: + - Whether the image should be persistent or non-persistent. + type: bool + version_added: 9.5.0 + create: + description: + - Whether the image should be created if not present. + - This is ignored if O(state=absent). + type: bool + version_added: 10.0.0 + template: + description: + - Use with O(create=true) to specify image template. + type: str + version_added: 10.0.0 + datastore_id: + description: + - Use with O(create=true) to specify datastore for image. + type: int + version_added: 10.0.0 + wait_timeout: + description: + - Seconds to wait until image is ready, deleted or cloned. + type: int + default: 60 + version_added: 10.0.0 +author: + - "Milan Ilic (@ilicmilan)" +""" + +EXAMPLES = r""" +- name: Fetch the IMAGE by id + community.general.one_image: + id: 45 + register: result + +- name: Print the IMAGE properties + ansible.builtin.debug: + var: result + +- name: Rename existing IMAGE + community.general.one_image: + id: 34 + state: renamed + new_name: bar-image + +- name: Disable the IMAGE by id + community.general.one_image: + id: 37 + enabled: false + +- name: Make the IMAGE persistent + community.general.one_image: + id: 37 + persistent: true + +- name: Enable the IMAGE by name + community.general.one_image: + name: bar-image + enabled: true + +- name: Clone the IMAGE by name + community.general.one_image: + name: bar-image + state: cloned + new_name: bar-image-clone + register: result + +- name: Delete the IMAGE by id + community.general.one_image: + id: '{{ result.id }}' + state: absent + +- name: Make sure IMAGE is present + community.general.one_image: + name: myyy-image + state: present + create: true + datastore_id: 100 + template: | + PATH = "/var/tmp/image" + TYPE = "OS" + SIZE = 20512 + FORMAT = "qcow2" + PERSISTENT = "Yes" + DEV_PREFIX = "vd" + +- name: Make sure IMAGE is present with a longer timeout + community.general.one_image: + name: big-image + state: present + create: true + datastore_id: 100 + wait_timeout: 900 + template: |- + PATH = "https://192.0.2.200/repo/tipa_image.raw" + TYPE = "OS" + SIZE = 82048 + FORMAT = "raw" + PERSISTENT = "Yes" + DEV_PREFIX = "vd" +""" + +RETURN = r""" +id: + description: Image ID. + type: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: 153 +name: + description: Image name. + type: str + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: app1 +group_id: + description: Image's group ID. + type: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: 1 +group_name: + description: Image's group name. + type: str + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: one-users +owner_id: + description: Image's owner ID. + type: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: 143 +owner_name: + description: Image's owner name. + type: str + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: ansible-test +state: + description: State of image instance. + type: str + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: READY +used: + description: Is image in use. + type: bool + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: true +running_vms: + description: Count of running vms that use this image. + type: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: 7 +permissions: + description: The image's permissions. + type: dict + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 + contains: + owner_u: + description: The image's owner USAGE permissions. + type: str + sample: 1 + owner_m: + description: The image's owner MANAGE permissions. + type: str + sample: 0 + owner_a: + description: The image's owner ADMIN permissions. + type: str + sample: 0 + group_u: + description: The image's group USAGE permissions. + type: str + sample: 0 + group_m: + description: The image's group MANAGE permissions. + type: str + sample: 0 + group_a: + description: The image's group ADMIN permissions. + type: str + sample: 0 + other_u: + description: The image's other users USAGE permissions. + type: str + sample: 0 + other_m: + description: The image's other users MANAGE permissions. + type: str + sample: 0 + other_a: + description: The image's other users ADMIN permissions. + type: str + sample: 0 + sample: + owner_u: 1 + owner_m: 0 + owner_a: 0 + group_u: 0 + group_m: 0 + group_a: 0 + other_u: 0 + other_m: 0 + other_a: 0 +type: + description: The image's type. + type: str + sample: 0 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +disk_type: + description: The image's format type. + type: str + sample: 0 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +persistent: + description: The image's persistence status (1 means true, 0 means false). + type: int + sample: 1 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +source: + description: The image's source. + type: str + sample: /var/lib/one//datastores/100/somerandomstringxd + returned: when O(state=present), O(state=cloned), or O(state=renamed) +path: + description: The image's filesystem path. + type: str + sample: /var/tmp/hello.qcow2 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +fstype: + description: The image's filesystem type. + type: str + sample: ext4 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +size: + description: The image's size in MegaBytes. + type: int + sample: 10000 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +cloning_ops: + description: The image's cloning operations per second. + type: int + sample: 0 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +cloning_id: + description: The image's cloning ID. + type: int + sample: -1 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +target_snapshot: + description: The image's target snapshot. + type: int + sample: 1 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +datastore_id: + description: The image's datastore ID. + type: int + sample: 100 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +datastore: + description: The image's datastore name. + type: int + sample: image_datastore + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +vms: + description: The image's list of VM ID's. + type: list + elements: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: + - 1 + - 2 + - 3 + version_added: 9.5.0 +clones: + description: The image's list of clones ID's. + type: list + elements: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: + - 1 + - 2 + - 3 + version_added: 9.5.0 +app_clones: + description: The image's list of app_clones ID's. + type: list + elements: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: + - 1 + - 2 + - 3 + version_added: 9.5.0 +snapshots: + description: The image's list of snapshots. + type: list + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 + sample: + - date: 123123 + parent: 1 + size: 10228 + allow_orphans: 1 + children: 0 + active: 1 + name: SampleName +""" + + +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule + + +IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] + + +class ImageModule(OpenNebulaModule): + def __init__(self): + argument_spec = dict( + id=dict(type='int'), + name=dict(type='str'), + state=dict(type='str', choices=['present', 'absent', 'cloned', 'renamed'], default='present'), + enabled=dict(type='bool'), + new_name=dict(type='str'), + persistent=dict(type='bool'), + create=dict(type='bool'), + template=dict(type='str'), + datastore_id=dict(type='int'), + wait_timeout=dict(type='int', default=60), + ) + required_if = [ + ['state', 'renamed', ['id']], + ['create', True, ['template', 'datastore_id', 'name']], + ] + mutually_exclusive = [ + ['id', 'name'], + ] + + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_if=required_if) + + def run(self, one, module, result): + params = module.params + id = params.get('id') + name = params.get('name') + desired_state = params.get('state') + enabled = params.get('enabled') + new_name = params.get('new_name') + persistent = params.get('persistent') + create = params.get('create') + template = params.get('template') + datastore_id = params.get('datastore_id') + wait_timeout = params.get('wait_timeout') + + self.result = {} + + image = self.get_image_instance(id, name) + if not image and desired_state != 'absent': + if create: + self.result = self.create_image(name, template, datastore_id, wait_timeout) + # Using 'if id:' doesn't work properly when id=0 + elif id is not None: + module.fail_json(msg="There is no image with id=" + str(id)) + elif name is not None: + module.fail_json(msg="There is no image with name=" + name) + + if desired_state == 'absent': + self.result = self.delete_image(image, wait_timeout) + else: + if persistent is not None: + self.result = self.change_persistence(image, persistent) + if enabled is not None: + self.result = self.enable_image(image, enabled) + if desired_state == "cloned": + self.result = self.clone_image(image, new_name, wait_timeout) + elif desired_state == "renamed": + self.result = self.rename_image(image, new_name) + + self.exit() + + def get_image(self, predicate): + # Filter -2 means fetch all images user can Use + pool = self.one.imagepool.info(-2, -1, -1, -1) + + for image in pool.IMAGE: + if predicate(image): + return image + + return None + + def get_image_by_name(self, image_name): + return self.get_image(lambda image: (image.NAME == image_name)) + + def get_image_by_id(self, image_id): + return self.get_image(lambda image: (image.ID == image_id)) + + def get_image_instance(self, requested_id, requested_name): + # Using 'if requested_id:' doesn't work properly when requested_id=0 + if requested_id is not None: + return self.get_image_by_id(requested_id) + else: + return self.get_image_by_name(requested_name) + + def create_image(self, image_name, template, datastore_id, wait_timeout): + if not self.module.check_mode: + image_id = self.one.image.allocate("NAME = \"" + image_name + "\"\n" + template, datastore_id) + self.wait_for_ready(image_id, wait_timeout) + image = self.get_image_by_id(image_id) + result = self.get_image_info(image) + + result['changed'] = True + return result + + def wait_for_ready(self, image_id, wait_timeout=60): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + image = self.one.image.info(image_id) + state = image.STATE + + if state in [IMAGE_STATES.index('ERROR')]: + self.module.fail_json(msg="Got an ERROR state: " + image.TEMPLATE['ERROR']) + + if state in [IMAGE_STATES.index('READY')]: + return True + + time.sleep(1) + self.module.fail_json(msg="Wait timeout has expired!") + + def wait_for_delete(self, image_id, wait_timeout=60): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + # It might be already deleted by the time this function is called + try: + image = self.one.image.info(image_id) + except Exception: + check_image = self.get_image_instance(image_id) + if not check_image: + return True + + state = image.STATE + + if state in [IMAGE_STATES.index('DELETE')]: + return True + + time.sleep(1) + + self.module.fail_json(msg="Wait timeout has expired!") + + def enable_image(self, image, enable): + image = self.one.image.info(image.ID) + changed = False + + state = image.STATE + + if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: + if enable: + self.module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!") + else: + self.module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!") + + if ((enable and state != IMAGE_STATES.index('READY')) or + (not enable and state != IMAGE_STATES.index('DISABLED'))): + changed = True + + if changed and not self.module.check_mode: + self.one.image.enable(image.ID, enable) + + result = self.get_image_info(image) + result['changed'] = changed + + return result + + def change_persistence(self, image, enable): + image = self.one.image.info(image.ID) + changed = False + + state = image.STATE + + if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: + if enable: + self.module.fail_json(msg="Cannot enable persistence for " + IMAGE_STATES[state] + " image!") + else: + self.module.fail_json(msg="Cannot disable persistence for " + IMAGE_STATES[state] + " image!") + + if ((enable and state != IMAGE_STATES.index('READY')) or + (not enable and state != IMAGE_STATES.index('DISABLED'))): + changed = True + + if changed and not self.module.check_mode: + self.one.image.persistent(image.ID, enable) + + result = self.get_image_info(image) + result['changed'] = changed + + return result + + def clone_image(self, image, new_name, wait_timeout): + if new_name is None: + new_name = "Copy of " + image.NAME + + tmp_image = self.get_image_by_name(new_name) + if tmp_image: + result = self.get_image_info(image) + result['changed'] = False + return result + + if image.STATE == IMAGE_STATES.index('DISABLED'): + self.module.fail_json(msg="Cannot clone DISABLED image") + + if not self.module.check_mode: + new_id = self.one.image.clone(image.ID, new_name) + self.wait_for_ready(new_id, wait_timeout) + image = self.one.image.info(new_id) + + result = self.get_image_info(image) + result['changed'] = True + + return result + + def rename_image(self, image, new_name): + if new_name is None: + self.module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'") + + if new_name == image.NAME: + result = self.get_image_info(image) + result['changed'] = False + return result + + tmp_image = self.get_image_by_name(new_name) + if tmp_image: + self.module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.ID)) + + if not self.module.check_mode: + self.one.image.rename(image.ID, new_name) + + result = self.get_image_info(image) + result['changed'] = True + return result + + def delete_image(self, image, wait_timeout): + if not image: + return {'changed': False} + + if image.RUNNING_VMS > 0: + self.module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.") + + if not self.module.check_mode: + self.one.image.delete(image.ID) + self.wait_for_delete(image.ID, wait_timeout) + + return {'changed': True} + + +def main(): + ImageModule().run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/one_image_info.py b/plugins/modules/one_image_info.py deleted file mode 120000 index 7484742685..0000000000 --- a/plugins/modules/one_image_info.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/opennebula/one_image_info.py \ No newline at end of file diff --git a/plugins/modules/one_image_info.py b/plugins/modules/one_image_info.py new file mode 100644 index 0000000000..37c70c69f1 --- /dev/null +++ b/plugins/modules/one_image_info.py @@ -0,0 +1,367 @@ +#!/usr/bin/python +# Copyright (c) 2018, Milan Ilic +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import annotations + +DOCUMENTATION = r""" +module: one_image_info +short_description: Gather information on OpenNebula images +description: + - Gather information on OpenNebula images. +requirements: + - pyone +extends_documentation_fragment: + - community.general.opennebula + - community.general.attributes + - community.general.attributes.info_module +options: + ids: + description: + - A list of images IDs whose facts you want to gather. + - Module can use integers too. + aliases: ['id'] + type: list + elements: str + name: + description: + - A O(name) of the image whose facts is gathered. + - If the O(name) begins with V(~) the O(name) is used as regex pattern, which restricts the list of images (whose facts + is returned) whose names match specified regex. + - Also, if the O(name) begins with V(~*) case-insensitive matching is performed. + - See examples for more details. + type: str +author: + - "Milan Ilic (@ilicmilan)" + - "Jan Meerkamp (@meerkampdvv)" +""" + +EXAMPLES = r""" +- name: Gather facts about all images + community.general.one_image_info: + register: result + +- name: Print all images facts + ansible.builtin.debug: + msg: result + +- name: Gather facts about an image using ID + community.general.one_image_info: + ids: 123 + +- name: Gather facts about an image using list of ID + community.general.one_image_info: + ids: + - 123 + - 456 + - 789 + - 0 + +- name: Gather facts about an image using the name + community.general.one_image_info: + name: 'foo-image' + register: foo_image + +- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*' + community.general.one_image_info: + name: '~app-image-.*' + register: app_images + +- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases + community.general.one_image_info: + name: '~*foo-image-.*' + register: foo_images +""" + +RETURN = r""" +images: + description: A list of images info. + type: complex + returned: success + contains: + id: + description: The image's ID. + type: int + sample: 153 + name: + description: The image's name. + type: str + sample: app1 + group_id: + description: The image's group ID. + type: int + sample: 1 + group_name: + description: The image's group name. + type: str + sample: one-users + owner_id: + description: The image's owner ID. + type: int + sample: 143 + owner_name: + description: The image's owner name. + type: str + sample: ansible-test + state: + description: The image's state. + type: str + sample: READY + used: + description: The image's usage status. + type: bool + sample: true + running_vms: + description: The image's count of running vms that use this image. + type: int + sample: 7 + permissions: + description: The image's permissions. + type: dict + version_added: 9.5.0 + contains: + owner_u: + description: The image's owner USAGE permissions. + type: str + sample: 1 + owner_m: + description: The image's owner MANAGE permissions. + type: str + sample: 0 + owner_a: + description: The image's owner ADMIN permissions. + type: str + sample: 0 + group_u: + description: The image's group USAGE permissions. + type: str + sample: 0 + group_m: + description: The image's group MANAGE permissions. + type: str + sample: 0 + group_a: + description: The image's group ADMIN permissions. + type: str + sample: 0 + other_u: + description: The image's other users USAGE permissions. + type: str + sample: 0 + other_m: + description: The image's other users MANAGE permissions. + type: str + sample: 0 + other_a: + description: The image's other users ADMIN permissions. + type: str + sample: 0 + sample: + owner_u: 1 + owner_m: 0 + owner_a: 0 + group_u: 0 + group_m: 0 + group_a: 0 + other_u: 0 + other_m: 0 + other_a: 0 + type: + description: The image's type. + type: int + sample: 0 + version_added: 9.5.0 + disk_type: + description: The image's format type. + type: int + sample: 0 + version_added: 9.5.0 + persistent: + description: The image's persistence status (1 means true, 0 means false). + type: int + sample: 1 + version_added: 9.5.0 + source: + description: The image's source. + type: str + sample: /var/lib/one//datastores/100/somerandomstringxd + version_added: 9.5.0 + path: + description: The image's filesystem path. + type: str + sample: /var/tmp/hello.qcow2 + version_added: 9.5.0 + fstype: + description: The image's filesystem type. + type: str + sample: ext4 + version_added: 9.5.0 + size: + description: The image's size in MegaBytes. + type: int + sample: 10000 + version_added: 9.5.0 + cloning_ops: + description: The image's cloning operations per second. + type: int + sample: 0 + version_added: 9.5.0 + cloning_id: + description: The image's cloning ID. + type: int + sample: -1 + version_added: 9.5.0 + target_snapshot: + description: The image's target snapshot. + type: int + sample: 1 + version_added: 9.5.0 + datastore_id: + description: The image's datastore ID. + type: int + sample: 100 + version_added: 9.5.0 + datastore: + description: The image's datastore name. + type: int + sample: image_datastore + version_added: 9.5.0 + vms: + description: The image's list of VM ID's. + type: list + elements: int + version_added: 9.5.0 + sample: + - 1 + - 2 + - 3 + clones: + description: The image's list of clones ID's. + type: list + elements: int + version_added: 9.5.0 + sample: + - 1 + - 2 + - 3 + app_clones: + description: The image's list of app_clones ID's. + type: list + elements: int + version_added: 9.5.0 + sample: + - 1 + - 2 + - 3 + snapshots: + description: The image's list of snapshots. + type: list + version_added: 9.5.0 + sample: + - date: 123123 + parent: 1 + size: 10228 + allow_orphans: 1 + children: 0 + active: 1 + name: SampleName +""" + + +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule + + +IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] + + +class ImageInfoModule(OpenNebulaModule): + def __init__(self): + argument_spec = dict( + ids=dict(type='list', aliases=['id'], elements='str'), + name=dict(type='str'), + ) + mutually_exclusive = [ + ['ids', 'name'], + ] + + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive) + + def run(self, one, module, result): + params = module.params + ids = params.get('ids') + name = params.get('name') + + if ids: + images = self.get_images_by_ids(ids) + elif name: + images = self.get_images_by_name(name) + else: + images = self.get_all_images().IMAGE + + self.result = { + 'images': [self.get_image_info(image) for image in images] + } + + self.exit() + + def get_all_images(self): + pool = self.one.imagepool.info(-2, -1, -1, -1) + # Filter -2 means fetch all images user can Use + + return pool + + def get_images_by_ids(self, ids): + images = [] + pool = self.get_all_images() + + for image in pool.IMAGE: + if str(image.ID) in ids: + images.append(image) + ids.remove(str(image.ID)) + if len(ids) == 0: + break + + if len(ids) > 0: + self.module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids)) + + return images + + def get_images_by_name(self, name_pattern): + images = [] + pattern = None + + pool = self.get_all_images() + + if name_pattern.startswith('~'): + import re + if name_pattern[1] == '*': + pattern = re.compile(name_pattern[2:], re.IGNORECASE) + else: + pattern = re.compile(name_pattern[1:]) + + for image in pool.IMAGE: + if pattern is not None: + if pattern.match(image.NAME): + images.append(image) + elif name_pattern == image.NAME: + images.append(image) + break + + # if the specific name is indicated + if pattern is None and len(images) == 0: + self.module.fail_json(msg="There is no IMAGE with name=" + name_pattern) + + return images + + +def main(): + ImageInfoModule().run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/one_service.py b/plugins/modules/one_service.py deleted file mode 120000 index 8fbacfa35c..0000000000 --- a/plugins/modules/one_service.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/opennebula/one_service.py \ No newline at end of file diff --git a/plugins/modules/one_service.py b/plugins/modules/one_service.py new file mode 100644 index 0000000000..78238fd618 --- /dev/null +++ b/plugins/modules/one_service.py @@ -0,0 +1,758 @@ +#!/usr/bin/python +# Copyright (c) 2017, Milan Ilic +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import annotations + +DOCUMENTATION = r""" +module: one_service +short_description: Deploy and manage OpenNebula services +description: + - Manage OpenNebula services. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + api_url: + description: + - URL of the OpenNebula OneFlow API server. + - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted. + - If not set then the value of the E(ONEFLOW_URL) environment variable is used. + type: str + api_username: + description: + - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the E(ONEFLOW_USERNAME) + environment variable is used. + type: str + api_password: + description: + - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the E(ONEFLOW_PASSWORD) + environment variable is used. + type: str + template_name: + description: + - Name of service template to use to create a new instance of a service. + type: str + template_id: + description: + - ID of a service template to use to create a new instance of a service. + type: int + service_id: + description: + - ID of a service instance that you would like to manage. + type: int + service_name: + description: + - Name of a service instance that you would like to manage. + type: str + unique: + description: + - Setting O(unique=true) ensures that there is only one service instance running with a name set with O(service_name) + when instantiating a service from a template specified with O(template_id) or O(template_name). Check examples below. + type: bool + default: false + state: + description: + - V(present) - instantiate a service from a template specified with O(template_id) or O(template_name). + - V(absent) - terminate an instance of a service specified with O(template_id) or O(template_name). + choices: ["present", "absent"] + default: present + type: str + mode: + description: + - Set permission mode of a service instance in octet format, for example V(0600) to give owner C(use) and C(manage) + and nothing to group and others. + type: str + owner_id: + description: + - ID of the user which is set as the owner of the service. + type: int + group_id: + description: + - ID of the group which is set as the group of the service. + type: int + wait: + description: + - Wait for the instance to reach RUNNING state after DEPLOYING or COOLDOWN state after SCALING. + type: bool + default: false + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 300 + type: int + custom_attrs: + description: + - Dictionary of key/value custom attributes which is used when instantiating a new service. + default: {} + type: dict + role: + description: + - Name of the role whose cardinality should be changed. + type: str + cardinality: + description: + - Number of VMs for the specified role. + type: int + force: + description: + - Force the new cardinality even if it is outside the limits. + type: bool + default: false +author: + - "Milan Ilic (@ilicmilan)" +""" + +EXAMPLES = r""" +- name: Instantiate a new service + community.general.one_service: + template_id: 90 + register: result + +- name: Print service properties + ansible.builtin.debug: + msg: result + +- name: Instantiate a new service with specified service_name, service group and mode + community.general.one_service: + template_name: 'app1_template' + service_name: 'app1' + group_id: 1 + mode: '660' + +- name: Instantiate a new service with template_id and pass custom_attrs dict + community.general.one_service: + template_id: 90 + custom_attrs: + public_network_id: 21 + private_network_id: 26 + +- name: Instantiate a new service 'foo' if the service doesn't already exist, otherwise do nothing + community.general.one_service: + template_id: 53 + service_name: 'foo' + unique: true + +- name: Delete a service by ID + community.general.one_service: + service_id: 153 + state: absent + +- name: Get service info + community.general.one_service: + service_id: 153 + register: service_info + +- name: Change service owner, group and mode + community.general.one_service: + service_name: 'app2' + owner_id: 34 + group_id: 113 + mode: '600' + +- name: Instantiate service and wait for it to become RUNNING + community.general.one_service: + template_id: 43 + service_name: 'foo1' + +- name: Wait service to become RUNNING + community.general.one_service: + service_id: 112 + wait: true + +- name: Change role cardinality + community.general.one_service: + service_id: 153 + role: bar + cardinality: 5 + +- name: Change role cardinality and wait for it to be applied + community.general.one_service: + service_id: 112 + role: foo + cardinality: 7 + wait: true +""" + +RETURN = r""" +service_id: + description: Service ID. + type: int + returned: success + sample: 153 +service_name: + description: Service name. + type: str + returned: success + sample: app1 +group_id: + description: Service's group ID. + type: int + returned: success + sample: 1 +group_name: + description: Service's group name. + type: str + returned: success + sample: one-users +owner_id: + description: Service's owner ID. + type: int + returned: success + sample: 143 +owner_name: + description: Service's owner name. + type: str + returned: success + sample: ansible-test +state: + description: State of service instance. + type: str + returned: success + sample: RUNNING +mode: + description: Service's mode. + type: int + returned: success + sample: 660 +roles: + description: List of dictionaries of roles, each role is described by name, cardinality, state and nodes IDs. + type: list + returned: success + sample: + - {"cardinality": 1, "name": "foo", "state": "RUNNING", "ids": [123, 456]} + - {"cardinality": 2, "name": "bar", "state": "RUNNING", "ids": [452, 567, 746]} +""" + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import open_url + +STATES = ("PENDING", "DEPLOYING", "RUNNING", "UNDEPLOYING", "WARNING", "DONE", + "FAILED_UNDEPLOYING", "FAILED_DEPLOYING", "SCALING", "FAILED_SCALING", "COOLDOWN") + + +def get_all_templates(module, auth): + try: + all_templates = open_url(url=(auth.url + "/service_template"), method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg=str(e)) + + return module.from_json(all_templates.read()) + + +def get_template(module, auth, pred): + all_templates_dict = get_all_templates(module, auth) + + found = 0 + found_template = None + template_name = '' + + if "DOCUMENT_POOL" in all_templates_dict and "DOCUMENT" in all_templates_dict["DOCUMENT_POOL"]: + for template in all_templates_dict["DOCUMENT_POOL"]["DOCUMENT"]: + if pred(template): + found = found + 1 + found_template = template + template_name = template["NAME"] + + if found <= 0: + return None + elif found > 1: + module.fail_json(msg="There is no template with unique name: " + template_name) + else: + return found_template + + +def get_all_services(module, auth): + try: + response = open_url(auth.url + "/service", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg=str(e)) + + return module.from_json(response.read()) + + +def get_service(module, auth, pred): + all_services_dict = get_all_services(module, auth) + + found = 0 + found_service = None + service_name = '' + + if "DOCUMENT_POOL" in all_services_dict and "DOCUMENT" in all_services_dict["DOCUMENT_POOL"]: + for service in all_services_dict["DOCUMENT_POOL"]["DOCUMENT"]: + if pred(service): + found = found + 1 + found_service = service + service_name = service["NAME"] + + # fail if there are more services with same name + if found > 1: + module.fail_json(msg="There are multiple services with a name: '" + + service_name + "'. You have to use a unique service name or use 'service_id' instead.") + elif found <= 0: + return None + else: + return found_service + + +def get_service_by_id(module, auth, service_id): + return get_service(module, auth, lambda service: (int(service["ID"]) == int(service_id))) if service_id else None + + +def get_service_by_name(module, auth, service_name): + return get_service(module, auth, lambda service: (service["NAME"] == service_name)) + + +def get_service_info(module, auth, service): + + result = { + "service_id": int(service["ID"]), + "service_name": service["NAME"], + "group_id": int(service["GID"]), + "group_name": service["GNAME"], + "owner_id": int(service["UID"]), + "owner_name": service["UNAME"], + "state": STATES[service["TEMPLATE"]["BODY"]["state"]] + } + + roles_status = service["TEMPLATE"]["BODY"]["roles"] + roles = [] + for role in roles_status: + nodes_ids = [] + if "nodes" in role: + for node in role["nodes"]: + nodes_ids.append(node["deploy_id"]) + roles.append({"name": role["name"], "cardinality": role["cardinality"], "state": STATES[int(role["state"])], "ids": nodes_ids}) + + result["roles"] = roles + result["mode"] = int(parse_service_permissions(service)) + + return result + + +def create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout): + # make sure that the values in custom_attrs dict are strings + custom_attrs_with_str = {k: str(v) for k, v in custom_attrs.items()} + + data = { + "action": { + "perform": "instantiate", + "params": { + "merge_template": { + "custom_attrs_values": custom_attrs_with_str, + "name": service_name + } + } + } + } + + try: + response = open_url(auth.url + "/service_template/" + str(template_id) + "/action", method="POST", + data=module.jsonify(data), force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg=str(e)) + + service_result = module.from_json(response.read())["DOCUMENT"] + + return service_result + + +def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + try: + status_result = open_url(auth.url + "/service/" + str(service_id), method="GET", + force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg="Request for service status has failed. Error message: " + str(e)) + + status_result = module.from_json(status_result.read()) + service_state = status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["state"] + + if service_state in [STATES.index("RUNNING"), STATES.index("COOLDOWN")]: + return status_result["DOCUMENT"] + elif service_state not in [STATES.index("PENDING"), STATES.index("DEPLOYING"), STATES.index("SCALING")]: + log_message = '' + for log_info in status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["log"]: + if log_info["severity"] == "E": + log_message = log_message + log_info["message"] + break + + module.fail_json(msg="Deploying is unsuccessful. Service state: " + STATES[service_state] + ". Error message: " + log_message) + + time.sleep(1) + + module.fail_json(msg="Wait timeout has expired") + + +def change_service_permissions(module, auth, service_id, permissions): + + data = { + "action": { + "perform": "chmod", + "params": {"octet": permissions} + } + } + + try: + status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, + url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + except Exception as e: + module.fail_json(msg=str(e)) + + +def change_service_owner(module, auth, service_id, owner_id): + data = { + "action": { + "perform": "chown", + "params": {"owner_id": owner_id} + } + } + + try: + status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, + url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + except Exception as e: + module.fail_json(msg=str(e)) + + +def change_service_group(module, auth, service_id, group_id): + + data = { + "action": { + "perform": "chgrp", + "params": {"group_id": group_id} + } + } + + try: + status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, + url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + except Exception as e: + module.fail_json(msg=str(e)) + + +def change_role_cardinality(module, auth, service_id, role, cardinality, force): + + data = { + "cardinality": cardinality, + "force": force + } + + try: + status_result = open_url(auth.url + "/service/" + str(service_id) + "/role/" + role, method="PUT", + force_basic_auth=True, url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + except Exception as e: + module.fail_json(msg=str(e)) + + if status_result.getcode() != 204: + module.fail_json(msg="Failed to change cardinality for role: " + role + ". Return code: " + str(status_result.getcode())) + + +def check_change_service_owner(module, service, owner_id): + old_owner_id = int(service["UID"]) + + return old_owner_id != owner_id + + +def check_change_service_group(module, service, group_id): + old_group_id = int(service["GID"]) + + return old_group_id != group_id + + +def parse_service_permissions(service): + perm_dict = service["PERMISSIONS"] + ''' + This is the structure of the 'PERMISSIONS' dictionary: + + "PERMISSIONS": { + "OWNER_U": "1", + "OWNER_M": "1", + "OWNER_A": "0", + "GROUP_U": "0", + "GROUP_M": "0", + "GROUP_A": "0", + "OTHER_U": "0", + "OTHER_M": "0", + "OTHER_A": "0" + } + ''' + + owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"]) + group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"]) + other_octal = int(perm_dict["OTHER_U"]) * 4 + int(perm_dict["OTHER_M"]) * 2 + int(perm_dict["OTHER_A"]) + + permissions = str(owner_octal) + str(group_octal) + str(other_octal) + + return permissions + + +def check_change_service_permissions(module, service, permissions): + old_permissions = parse_service_permissions(service) + + return old_permissions != permissions + + +def check_change_role_cardinality(module, service, role_name, cardinality): + roles_list = service["TEMPLATE"]["BODY"]["roles"] + + for role in roles_list: + if role["name"] == role_name: + return int(role["cardinality"]) != cardinality + + module.fail_json(msg="There is no role with name: " + role_name) + + +def create_service_and_operation(module, auth, template_id, service_name, owner_id, group_id, permissions, custom_attrs, unique, wait, wait_timeout): + if not service_name: + service_name = '' + changed = False + service = None + + if unique: + service = get_service_by_name(module, auth, service_name) + + if not service or service["TEMPLATE"]["BODY"]["state"] == "DONE": + if not module.check_mode: + service = create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout) + changed = True + + # if check_mode=true and there would be changes, service doesn't exist and we can not get it + if module.check_mode and changed: + return {"changed": True} + + result = service_operation(module, auth, owner_id=owner_id, group_id=group_id, wait=wait, + wait_timeout=wait_timeout, permissions=permissions, service=service) + + if result["changed"]: + changed = True + + result["changed"] = changed + + return result + + +def service_operation(module, auth, service_id=None, owner_id=None, group_id=None, permissions=None, + role=None, cardinality=None, force=None, wait=False, wait_timeout=None, service=None): + + changed = False + + if not service: + service = get_service_by_id(module, auth, service_id) + else: + service_id = service["ID"] + + if not service: + module.fail_json(msg="There is no service with id: " + str(service_id)) + + if owner_id: + if check_change_service_owner(module, service, owner_id): + if not module.check_mode: + change_service_owner(module, auth, service_id, owner_id) + changed = True + if group_id: + if check_change_service_group(module, service, group_id): + if not module.check_mode: + change_service_group(module, auth, service_id, group_id) + changed = True + if permissions: + if check_change_service_permissions(module, service, permissions): + if not module.check_mode: + change_service_permissions(module, auth, service_id, permissions) + changed = True + + if role: + if check_change_role_cardinality(module, service, role, cardinality): + if not module.check_mode: + change_role_cardinality(module, auth, service_id, role, cardinality, force) + changed = True + + if wait and not module.check_mode: + service = wait_for_service_to_become_ready(module, auth, service_id, wait_timeout) + + # if something has changed, fetch service info again + if changed: + service = get_service_by_id(module, auth, service_id) + + service_info = get_service_info(module, auth, service) + service_info["changed"] = changed + + return service_info + + +def delete_service(module, auth, service_id): + service = get_service_by_id(module, auth, service_id) + if not service: + return {"changed": False} + + service_info = get_service_info(module, auth, service) + + service_info["changed"] = True + + if module.check_mode: + return service_info + + try: + result = open_url(auth.url + '/service/' + str(service_id), method="DELETE", force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg="Service deletion has failed. Error message: " + str(e)) + + return service_info + + +def get_template_by_name(module, auth, template_name): + return get_template(module, auth, lambda template: (template["NAME"] == template_name)) + + +def get_template_by_id(module, auth, template_id): + return get_template(module, auth, lambda template: (int(template["ID"]) == int(template_id))) if template_id else None + + +def get_template_id(module, auth, requested_id, requested_name): + template = get_template_by_id(module, auth, requested_id) if requested_id else get_template_by_name(module, auth, requested_name) + + if template: + return template["ID"] + + return None + + +def get_service_id_by_name(module, auth, service_name): + service = get_service_by_name(module, auth, service_name) + + if service: + return service["ID"] + + return None + + +def get_connection_info(module): + url = module.params.get('api_url') + username = module.params.get('api_username') + password = module.params.get('api_password') + + if not url: + url = os.environ.get('ONEFLOW_URL') + + if not username: + username = os.environ.get('ONEFLOW_USERNAME') + + if not password: + password = os.environ.get('ONEFLOW_PASSWORD') + + if not (url and username and password): + module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") + from collections import namedtuple + + auth_params = namedtuple('auth', ('url', 'user', 'password')) + + return auth_params(url=url, user=username, password=password) + + +def main(): + fields = { + "api_url": {"required": False, "type": "str"}, + "api_username": {"required": False, "type": "str"}, + "api_password": {"required": False, "type": "str", "no_log": True}, + "service_name": {"required": False, "type": "str"}, + "service_id": {"required": False, "type": "int"}, + "template_name": {"required": False, "type": "str"}, + "template_id": {"required": False, "type": "int"}, + "state": { + "default": "present", + "choices": ['present', 'absent'], + "type": "str" + }, + "mode": {"required": False, "type": "str"}, + "owner_id": {"required": False, "type": "int"}, + "group_id": {"required": False, "type": "int"}, + "unique": {"default": False, "type": "bool"}, + "wait": {"default": False, "type": "bool"}, + "wait_timeout": {"default": 300, "type": "int"}, + "custom_attrs": {"default": {}, "type": "dict"}, + "role": {"required": False, "type": "str"}, + "cardinality": {"required": False, "type": "int"}, + "force": {"default": False, "type": "bool"} + } + + module = AnsibleModule(argument_spec=fields, + mutually_exclusive=[ + ['template_id', 'template_name', 'service_id'], + ['service_id', 'service_name'], + ['template_id', 'template_name', 'role'], + ['template_id', 'template_name', 'cardinality'], + ['service_id', 'custom_attrs'] + ], + required_together=[['role', 'cardinality']], + supports_check_mode=True) + + auth = get_connection_info(module) + params = module.params + service_name = params.get('service_name') + service_id = params.get('service_id') + + requested_template_id = params.get('template_id') + requested_template_name = params.get('template_name') + state = params.get('state') + permissions = params.get('mode') + owner_id = params.get('owner_id') + group_id = params.get('group_id') + unique = params.get('unique') + wait = params.get('wait') + wait_timeout = params.get('wait_timeout') + custom_attrs = params.get('custom_attrs') + role = params.get('role') + cardinality = params.get('cardinality') + force = params.get('force') + + template_id = None + + if requested_template_id or requested_template_name: + template_id = get_template_id(module, auth, requested_template_id, requested_template_name) + if not template_id: + if requested_template_id: + module.fail_json(msg="There is no template with template_id: " + str(requested_template_id)) + elif requested_template_name: + module.fail_json(msg="There is no template with name: " + requested_template_name) + + if unique and not service_name: + module.fail_json(msg="You cannot use unique without passing service_name!") + + if template_id and state == 'absent': + module.fail_json(msg="State absent is not valid for template") + + if template_id and state == 'present': # Instantiate a service + result = create_service_and_operation(module, auth, template_id, service_name, owner_id, + group_id, permissions, custom_attrs, unique, wait, wait_timeout) + else: + if not (service_id or service_name): + module.fail_json(msg="To manage the service at least the service id or service name should be specified!") + if custom_attrs: + module.fail_json(msg="You can only set custom_attrs when instantiate service!") + + if not service_id: + service_id = get_service_id_by_name(module, auth, service_name) + # The task should be failed when we want to manage a non-existent service identified by its name + if not service_id and state == 'present': + module.fail_json(msg="There is no service with name: " + service_name) + + if state == 'absent': + result = delete_service(module, auth, service_id) + else: + result = service_operation(module, auth, service_id, owner_id, group_id, permissions, role, cardinality, force, wait, wait_timeout) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/one_template.py b/plugins/modules/one_template.py deleted file mode 120000 index 78637e1843..0000000000 --- a/plugins/modules/one_template.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/opennebula/one_template.py \ No newline at end of file diff --git a/plugins/modules/one_template.py b/plugins/modules/one_template.py new file mode 100644 index 0000000000..a279e3a88c --- /dev/null +++ b/plugins/modules/one_template.py @@ -0,0 +1,294 @@ +#!/usr/bin/python +# +# Copyright (c) 2021, Jyrki Gadinger +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import annotations + +DOCUMENTATION = r""" +module: one_template + +short_description: Manages OpenNebula templates + +version_added: 2.4.0 + +requirements: + - pyone + +description: + - Manages OpenNebula templates. +attributes: + check_mode: + support: partial + details: + - Note that check mode always returns C(changed=true) for existing templates, even if the template would not actually + change. + diff_mode: + support: none + +options: + id: + description: + - A O(id) of the template you would like to manage. If not set then a new template is created with the given O(name). + type: int + name: + description: + - A O(name) of the template you would like to manage. If a template with the given name does not exist it is created, + otherwise it is managed by this module. + type: str + template: + description: + - A string containing the template contents. + type: str + state: + description: + - V(present) - state that is used to manage the template. + - V(absent) - delete the template. + choices: ["present", "absent"] + default: present + type: str + filter: + description: + - V(user_primary_group) - Resources belonging to the user's primary group. + - V(user) - Resources belonging to the user. + - V(all) - All resources. + - V(user_groups) - Resources belonging to the user and any of his groups. + choices: [user_primary_group, user, all, user_groups] + default: user + type: str + version_added: 10.3.0 + +extends_documentation_fragment: + - community.general.opennebula + - community.general.attributes + +author: + - "Jyrki Gadinger (@nilsding)" +""" + +EXAMPLES = r""" +- name: Fetch the TEMPLATE by id + community.general.one_template: + id: 6459 + register: result + +- name: Print the TEMPLATE properties + ansible.builtin.debug: + var: result + +- name: Fetch the TEMPLATE by name + community.general.one_template: + name: tf-prd-users-workerredis-p6379a + register: result + +- name: Create a new or update an existing TEMPLATE + community.general.one_template: + name: generic-opensuse + template: | + CONTEXT = [ + HOSTNAME = "generic-opensuse" + ] + CPU = "1" + CUSTOM_ATTRIBUTE = "" + DISK = [ + CACHE = "writeback", + DEV_PREFIX = "sd", + DISCARD = "unmap", + IMAGE = "opensuse-leap-15.2", + IMAGE_UNAME = "oneadmin", + IO = "threads", + SIZE = "" ] + MEMORY = "2048" + NIC = [ + MODEL = "virtio", + NETWORK = "testnet", + NETWORK_UNAME = "oneadmin" ] + OS = [ + ARCH = "x86_64", + BOOT = "disk0" ] + SCHED_REQUIREMENTS = "CLUSTER_ID=\\"100\\"" + VCPU = "2" + +- name: Delete the TEMPLATE by id + community.general.one_template: + id: 6459 + state: absent +""" + +RETURN = r""" +id: + description: Template ID. + type: int + returned: when O(state=present) + sample: 153 +name: + description: Template name. + type: str + returned: when O(state=present) + sample: app1 +template: + description: The parsed template. + type: dict + returned: when O(state=present) +group_id: + description: Template's group ID. + type: int + returned: when O(state=present) + sample: 1 +group_name: + description: Template's group name. + type: str + returned: when O(state=present) + sample: one-users +owner_id: + description: Template's owner ID. + type: int + returned: when O(state=present) + sample: 143 +owner_name: + description: Template's owner name. + type: str + returned: when O(state=present) + sample: ansible-test +""" + + +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule + + +class TemplateModule(OpenNebulaModule): + def __init__(self): + argument_spec = dict( + id=dict(type='int'), + name=dict(type='str'), + state=dict(type='str', choices=['present', 'absent'], default='present'), + template=dict(type='str'), + filter=dict(type='str', choices=['user_primary_group', 'user', 'all', 'user_groups'], default='user'), + ) + + mutually_exclusive = [ + ['id', 'name'] + ] + + required_one_of = [('id', 'name')] + + required_if = [ + ['state', 'present', ['template']] + ] + + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + required_if=required_if) + + def run(self, one, module, result): + params = module.params + id = params.get('id') + name = params.get('name') + desired_state = params.get('state') + template_data = params.get('template') + filter = params.get('filter') + + self.result = {} + + template = self.get_template_instance(id, name, filter) + needs_creation = False + if not template and desired_state != 'absent': + if id: + module.fail_json(msg="There is no template with id=" + str(id)) + else: + needs_creation = True + + if desired_state == 'absent': + self.result = self.delete_template(template) + else: + if needs_creation: + self.result = self.create_template(name, template_data, filter) + else: + self.result = self.update_template(template, template_data, filter) + + self.exit() + + def get_template(self, predicate, filter): + # filter was included, for discussions see: + # Issue: https://github.com/ansible-collections/community.general/issues/9278 + # PR: https://github.com/ansible-collections/community.general/pull/9547 + # the other two parameters are used for pagination, -1 for both essentially means "return all" + filter_values = {'user_primary_group': -4, 'user': -3, 'all': -2, 'user_groups': -1} + pool = self.one.templatepool.info(filter_values[filter], -1, -1) + + for template in pool.VMTEMPLATE: + if predicate(template): + return template + + return None + + def get_template_by_id(self, template_id, filter): + return self.get_template(lambda template: (template.ID == template_id), filter) + + def get_template_by_name(self, name, filter): + return self.get_template(lambda template: (template.NAME == name), filter) + + def get_template_instance(self, requested_id, requested_name, filter): + if requested_id: + return self.get_template_by_id(requested_id, filter) + else: + return self.get_template_by_name(requested_name, filter) + + def get_template_info(self, template): + info = { + 'id': template.ID, + 'name': template.NAME, + 'template': template.TEMPLATE, + 'user_name': template.UNAME, + 'user_id': template.UID, + 'group_name': template.GNAME, + 'group_id': template.GID, + } + + return info + + def create_template(self, name, template_data, filter): + if not self.module.check_mode: + self.one.template.allocate("NAME = \"" + name + "\"\n" + template_data) + + result = self.get_template_info(self.get_template_by_name(name, filter)) + result['changed'] = True + + return result + + def update_template(self, template, template_data, filter): + if not self.module.check_mode: + # 0 = replace the whole template + self.one.template.update(template.ID, template_data, 0) + + result = self.get_template_info(self.get_template_by_id(template.ID, filter)) + if self.module.check_mode: + # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here. + result['changed'] = True + else: + # if the previous parsed template data is not equal to the updated one, this has changed + result['changed'] = template.TEMPLATE != result['template'] + + return result + + def delete_template(self, template): + if not template: + return {'changed': False} + + if not self.module.check_mode: + self.one.template.delete(template.ID) + + return {'changed': True} + + +def main(): + TemplateModule().run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/one_vm.py b/plugins/modules/one_vm.py deleted file mode 120000 index 31255bd356..0000000000 --- a/plugins/modules/one_vm.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/opennebula/one_vm.py \ No newline at end of file diff --git a/plugins/modules/one_vm.py b/plugins/modules/one_vm.py new file mode 100644 index 0000000000..53806cad9b --- /dev/null +++ b/plugins/modules/one_vm.py @@ -0,0 +1,1727 @@ +#!/usr/bin/python +# Copyright (c) 2017, Milan Ilic +# Copyright (c) 2019, Jan Meerkamp +# Copyright (c) 2025, Tom Paine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import annotations + +DOCUMENTATION = r""" +module: one_vm +short_description: Creates or terminates OpenNebula instances +description: + - Manages OpenNebula instances. +requirements: + - pyone +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + api_url: + description: + - URL of the OpenNebula RPC server. + - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted. + - If not set then the value of the E(ONE_URL) environment variable is used. + type: str + api_username: + description: + - Name of the user to login into the OpenNebula RPC server. If not set then the value of the E(ONE_USERNAME) environment + variable is used. + type: str + api_password: + description: + - Password of the user to login into OpenNebula RPC server. If not set then the value of the E(ONE_PASSWORD) environment + variable is used. if both O(api_username) or O(api_password) are not set, then it tries to authenticate with ONE auth + file. Default path is C(~/.one/one_auth). + - Set environment variable E(ONE_AUTH) to override this path. + type: str + template_name: + description: + - Name of VM template to use to create a new instance. + type: str + template_id: + description: + - ID of a VM template to use to create a new instance. + type: int + vm_start_on_hold: + description: + - Set to true to put VM on hold while creating. + default: false + type: bool + instance_ids: + description: + - 'A list of instance IDs used for states: V(absent), V(running), V(rebooted), V(poweredoff).' + aliases: ['ids'] + type: list + elements: int + state: + description: + - V(present) - create instances from a template specified with C(template_id)/C(template_name). + - V(running) - run instances. + - V(poweredoff) - power-off instances. + - V(rebooted) - reboot instances. + - V(absent) - terminate instances. + choices: ["present", "absent", "running", "rebooted", "poweredoff"] + default: present + type: str + hard: + description: + - Reboot, power-off or terminate instances C(hard). + default: false + type: bool + wait: + description: + - Wait for the instance to reach its desired state before returning. Keep in mind if you are waiting for instance to + be in running state it does not mean that you are able to SSH on that machine only that boot process have started + on that instance. See the example using the M(ansible.builtin.wait_for) module for details. + default: true + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 300 + type: int + attributes: + description: + - A dictionary of key/value attributes to add to new instances, or for setting C(state) of instances with these attributes. + - Keys are case insensitive and OpenNebula automatically converts them to upper case. + - Be aware V(NAME) is a special attribute which sets the name of the VM when it is deployed. + - C(#) character(s) can be appended to the C(NAME) and the module automatically adds indexes to the names of VMs. + - 'For example: V(NAME: foo-###) would create VMs with names V(foo-000), V(foo-001),...' + - When used with O(count_attributes) and O(exact_count) the module matches the base name without the index part. + default: {} + type: dict + labels: + description: + - A list of labels to associate with new instances, or for setting C(state) of instances with these labels. + default: [] + type: list + elements: str + count_attributes: + description: + - A dictionary of key/value attributes that can only be used with O(exact_count) to determine how many nodes based on + a specific attributes criteria should be deployed. This can be expressed in multiple ways and is shown in the EXAMPLES + section. + type: dict + count_labels: + description: + - A list of labels that can only be used with O(exact_count) to determine how many nodes based on a specific labels + criteria should be deployed. This can be expressed in multiple ways and is shown in the EXAMPLES section. + type: list + elements: str + count: + description: + - Number of instances to launch. + default: 1 + type: int + exact_count: + description: + - Indicates how many instances that match O(count_attributes) and O(count_labels) parameters should be deployed. Instances + are either created or terminated based on this value. + - B(NOTE:) Instances with the least IDs are terminated first. + type: int + mode: + description: + - Set permission mode of the instance in octet format, for example V(0600) to give owner C(use) and C(manage) and nothing + to group and others. + type: str + owner_id: + description: + - ID of the user which is set as the owner of the instance. + type: int + group_id: + description: + - ID of the group which is set as the group of the instance. + type: int + memory: + description: + - The size of the memory for new instances (in MB, GB, ..). + type: str + disk_size: + description: + - The size of the disk created for new instances (in MB, GB, TB,...). + - B(NOTE:) If The Template hats Multiple Disks the Order of the Sizes is matched against the order specified in O(template_id)/O(template_name). + type: list + elements: str + cpu: + description: + - Percentage of CPU divided by 100 required for the new instance. Half a processor is written 0.5. + type: float + vcpu: + description: + - Number of CPUs (cores) the new VM uses. + type: int + networks: + description: + - A list of dictionaries with network parameters. See examples for more details. + default: [] + type: list + elements: dict + disk_saveas: + description: + - Creates an image from a VM disk. + - It is a dictionary where you have to specify C(name) of the new image. + - Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0. + - B(NOTE:) This operation is only performed on the first VM (if more than one VM ID is passed) and the VM has to be + in the C(poweredoff) state. + - Also this operation fails if an image with specified C(name) already exists. + type: dict + persistent: + description: + - Create a private persistent copy of the template plus any image defined in DISK, and instantiate that copy. + default: false + type: bool + version_added: '0.2.0' + datastore_id: + description: + - Name of Datastore to use to create a new instance. + version_added: '0.2.0' + type: int + datastore_name: + description: + - Name of Datastore to use to create a new instance. + version_added: '0.2.0' + type: str + updateconf: + description: + - When O(instance_ids) is provided, updates running VMs with the C(updateconf) API call. + - When new VMs are being created, emulates the C(updateconf) API call using direct template merge. + - Allows for complete modifications of the C(CONTEXT) attribute. + - 'Supported attributes include:' + - B(BACKUP_CONFIG:) V(BACKUP_VOLATILE), V(FS_FREEZE), V(INCREMENT_MODE), V(KEEP_LAST), V(MODE); + - B(CONTEXT:) (Any value, except V(ETH*). Variable substitutions are made); + - B(CPU_MODEL:) V(FEATURES), V(MODEL); + - B(FEATURES:) V(ACPI), V(APIC), V(GUEST_AGENT), V(HYPERV), V(IOTHREADS), V(LOCALTIME), V(PAE), V(VIRTIO_BLK_QUEUES), + V(VIRTIO_SCSI_QUEUES); + - B(GRAPHICS:) V(COMMAND), V(KEYMAP), V(LISTEN), V(PASSWD), V(PORT), V(TYPE); + - B(INPUT:) V(BUS), V(TYPE); + - B(OS:) V(ARCH), V(BOOT), V(BOOTLOADER), V(FIRMWARE), V(INITRD), V(KERNEL), V(KERNEL_CMD), V(MACHINE), V(ROOT), V(SD_DISK_BUS), + V(UUID); + - B(RAW:) V(DATA), V(DATA_VMX), V(TYPE), V(VALIDATE); + - B(VIDEO:) V(ATS), V(IOMMU), V(RESOLUTION), V(TYPE), V(VRAM). + type: dict + version_added: 6.3.0 +author: + - "Milan Ilic (@ilicmilan)" + - "Jan Meerkamp (@meerkampdvv)" +""" + + +EXAMPLES = r""" +- name: Create a new instance + community.general.one_vm: + template_id: 90 + register: result + +- name: Print VM properties + ansible.builtin.debug: + msg: result + +- name: Deploy a new VM on hold + community.general.one_vm: + template_name: 'app1_template' + vm_start_on_hold: 'True' + +- name: Deploy a new VM and set its name to 'foo' + community.general.one_vm: + template_name: 'app1_template' + attributes: + name: foo + +- name: Deploy a new VM and set its group_id and mode + community.general.one_vm: + template_id: 90 + group_id: 16 + mode: 660 + +- name: Deploy a new VM as persistent + community.general.one_vm: + template_id: 90 + persistent: true + +- name: Change VM's permissions to 640 + community.general.one_vm: + instance_ids: 5 + mode: 640 + +- name: Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks + community.general.one_vm: + template_id: 15 + disk_size: 35.2 GB + memory: 4 GB + vcpu: 4 + count: 2 + networks: + - NETWORK_ID: 27 + - NETWORK: "default-network" + NETWORK_UNAME: "app-user" + SECURITY_GROUPS: "120,124" + - NETWORK_ID: 27 + SECURITY_GROUPS: "10" + +- name: Deploy a new instance which uses a Template with two Disks + community.general.one_vm: + template_id: 42 + disk_size: + - 35.2 GB + - 50 GB + memory: 4 GB + vcpu: 4 + count: 1 + networks: + - NETWORK_ID: 27 + +- name: "Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo'" + community.general.one_vm: + template_id: 53 + attributes: + name: foo + bar: bar1 + +- name: "Enforce that 2 instances with attributes 'foo1: app1' and 'foo2: app2' are deployed" + community.general.one_vm: + template_id: 53 + attributes: + foo1: app1 + foo2: app2 + exact_count: 2 + count_attributes: + foo1: app1 + foo2: app2 + +- name: Enforce that 4 instances with an attribute 'bar' are deployed + community.general.one_vm: + template_id: 53 + attributes: + name: app + bar: bar2 + exact_count: 4 + count_attributes: + bar: + +# Deploy 2 new instances with attribute 'foo: bar' and labels 'app1' and 'app2' and names in format 'fooapp-##' +# Names will be: fooapp-00 and fooapp-01 +- name: Deploy 2 new instances + community.general.one_vm: + template_id: 53 + attributes: + name: fooapp-## + foo: bar + labels: + - app1 + - app2 + count: 2 + +# Deploy 2 new instances with attribute 'app: app1' and names in format 'fooapp-###' +# Names will be: fooapp-002 and fooapp-003 +- name: Deploy 2 new instances + community.general.one_vm: + template_id: 53 + attributes: + name: fooapp-### + app: app1 + count: 2 + +# Reboot all instances with name in format 'fooapp-#' +# Instances 'fooapp-00', 'fooapp-01', 'fooapp-002' and 'fooapp-003' will be rebooted +- name: Reboot all instances with names in a certain format + community.general.one_vm: + attributes: + name: fooapp-# + state: rebooted + +# Enforce that only 1 instance with name in format 'fooapp-#' is deployed +# The task will delete oldest instances, so only the 'fooapp-003' will remain +- name: Enforce that only 1 instance with name in a certain format is deployed + community.general.one_vm: + template_id: 53 + exact_count: 1 + count_attributes: + name: fooapp-# + +- name: Deploy an new instance with a network + community.general.one_vm: + template_id: 53 + networks: + - NETWORK_ID: 27 + register: vm + +- name: Wait for SSH to come up + ansible.builtin.wait_for: + port: 22 + host: '{{ vm.instances[0].networks[0].ip }}' + +- name: Terminate VMs by ids + community.general.one_vm: + instance_ids: + - 153 + - 160 + state: absent + +- name: Reboot all VMs that have labels 'foo' and 'app1' + community.general.one_vm: + labels: + - foo + - app1 + state: rebooted + +- name: "Fetch all VMs that have name 'foo' and attribute 'app: bar'" + community.general.one_vm: + attributes: + name: foo + app: bar + register: results + +- name: Deploy 2 new instances with labels 'foo1' and 'foo2' + community.general.one_vm: + template_name: app_template + labels: + - foo1 + - foo2 + count: 2 + +- name: Enforce that only 1 instance with label 'foo1' will be running + community.general.one_vm: + template_name: app_template + labels: + - foo1 + exact_count: 1 + count_labels: + - foo1 + +- name: Terminate all instances that have attribute foo + community.general.one_vm: + template_id: 53 + exact_count: 0 + count_attributes: + foo: + +- name: "Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image'" + community.general.one_vm: + instance_ids: 351 + state: poweredoff + disk_saveas: + name: foo-image + +- name: "Save VM's disk with id=1 to the image with name 'bar-image'" + community.general.one_vm: + instance_ids: 351 + disk_saveas: + name: bar-image + disk_id: 1 + +- name: "Deploy 2 new instances with a custom 'start script'" + community.general.one_vm: + template_name: app_template + count: 2 + updateconf: + CONTEXT: + START_SCRIPT: ip r r 169.254.16.86/32 dev eth0 + +- name: "Add a custom 'start script' to a running VM" + community.general.one_vm: + instance_ids: 351 + updateconf: + CONTEXT: + START_SCRIPT: ip r r 169.254.16.86/32 dev eth0 + +- name: "Update SSH public keys inside the VM's context" + community.general.one_vm: + instance_ids: 351 + updateconf: + CONTEXT: + SSH_PUBLIC_KEY: |- + ssh-rsa ... + ssh-ed25519 ... +""" + +RETURN = r""" +instances_ids: + description: A list of instances IDs whose state is changed or which are fetched with O(instance_ids) option. + type: list + returned: success + sample: [1234, 1235] +instances: + description: A list of instances info whose state is changed or which are fetched with O(instance_ids) option. + type: complex + returned: success + contains: + vm_id: + description: VM ID. + type: int + sample: 153 + vm_name: + description: VM name. + type: str + sample: foo + template_id: + description: VM's template ID. + type: int + sample: 153 + group_id: + description: VM's group ID. + type: int + sample: 1 + group_name: + description: VM's group name. + type: str + sample: one-users + owner_id: + description: VM's owner ID. + type: int + sample: 143 + owner_name: + description: VM's owner name. + type: str + sample: app-user + mode: + description: VM's mode. + type: str + returned: success + sample: 660 + state: + description: State of an instance. + type: str + sample: ACTIVE + lcm_state: + description: Lcm state of an instance that is only relevant when the state is ACTIVE. + type: str + sample: RUNNING + cpu: + description: Percentage of CPU divided by 100. + type: float + sample: 0.2 + vcpu: + description: Number of CPUs (cores). + type: int + sample: 2 + memory: + description: The size of the memory in MB. + type: str + sample: 4096 MB + disk_size: + description: The size of the disk in MB. + type: str + sample: 20480 MB + networks: + description: A list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC. + type: list + sample: + [ + { + "ip": "10.120.5.33", + "mac": "02:00:0a:78:05:21", + "name": "default-test-private", + "security_groups": "0,10" + }, + { + "ip": "10.120.5.34", + "mac": "02:00:0a:78:05:22", + "name": "default-test-private", + "security_groups": "0" + } + ] + uptime_h: + description: Uptime of the instance in hours. + type: int + sample: 35 + labels: + description: A list of string labels that are associated with the instance. + type: list + sample: ["foo", "spec-label"] + attributes: + description: A dictionary of key/values attributes that are associated with the instance. + type: dict + sample: + { + "HYPERVISOR": "kvm", + "LOGO": "images/logos/centos.png", + "TE_GALAXY": "bar", + "USER_INPUTS": null + } + updateconf: + description: A dictionary of key/values attributes that are set with the updateconf API call. + type: dict + version_added: 6.3.0 + sample: + { + "OS": { + "ARCH": "x86_64" + }, + "CONTEXT": { + "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0", + "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..." + } + } +tagged_instances: + description: + - A list of instances info based on a specific attributes and/or labels that are specified with O(count_attributes) and + O(count_labels) options. + type: complex + returned: success + contains: + vm_id: + description: VM ID. + type: int + sample: 153 + vm_name: + description: VM name. + type: str + sample: foo + template_id: + description: VM's template ID. + type: int + sample: 153 + group_id: + description: VM's group ID. + type: int + sample: 1 + group_name: + description: VM's group name. + type: str + sample: one-users + owner_id: + description: VM's user ID. + type: int + sample: 143 + owner_name: + description: VM's user name. + type: str + sample: app-user + mode: + description: VM's mode. + type: str + returned: success + sample: 660 + state: + description: State of an instance. + type: str + sample: ACTIVE + lcm_state: + description: Lcm state of an instance that is only relevant when the state is ACTIVE. + type: str + sample: RUNNING + cpu: + description: Percentage of CPU divided by 100. + type: float + sample: 0.2 + vcpu: + description: Number of CPUs (cores). + type: int + sample: 2 + memory: + description: The size of the memory in MB. + type: str + sample: 4096 MB + disk_size: + description: The size of the disk in MB. + type: list + sample: ["20480 MB", "10240 MB"] + networks: + description: A list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC. + type: list + sample: + [ + { + "ip": "10.120.5.33", + "mac": "02:00:0a:78:05:21", + "name": "default-test-private", + "security_groups": "0,10" + }, + { + "ip": "10.120.5.34", + "mac": "02:00:0a:78:05:22", + "name": "default-test-private", + "security_groups": "0" + } + ] + uptime_h: + description: Uptime of the instance in hours. + type: int + sample: 35 + labels: + description: A list of string labels that are associated with the instance. + type: list + sample: ["foo", "spec-label"] + attributes: + description: A dictionary of key/values attributes that are associated with the instance. + type: dict + sample: + { + "HYPERVISOR": "kvm", + "LOGO": "images/logos/centos.png", + "TE_GALAXY": "bar", + "USER_INPUTS": null + } + updateconf: + description: A dictionary of key/values attributes that are set with the updateconf API call. + type: dict + version_added: 6.3.0 + sample: + { + "OS": { + "ARCH": "x86_64" + }, + "CONTEXT": { + "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0", + "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..." + } + } +""" + +try: + import pyone + HAS_PYONE = True +except ImportError: + HAS_PYONE = False + + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.dict_transformations import dict_merge + +from ansible_collections.community.general.plugins.module_utils.opennebula import flatten, render + + +# Updateconf attributes documentation: https://docs.opennebula.io/6.10/integration_and_development/system_interfaces/api.html#one-vm-updateconf +UPDATECONF_ATTRIBUTES = { + "OS": ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT", "SD_DISK_BUS", "UUID", "FIRMWARE"], + "CPU_MODEL": ["MODEL", "FEATURES"], + "FEATURES": ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT", "VIRTIO_BLK_QUEUES", "VIRTIO_SCSI_QUEUES", "IOTHREADS"], + "INPUT": ["TYPE", "BUS"], + "GRAPHICS": ["TYPE", "LISTEN", "PORT", "PASSWD", "KEYMAP", "COMMAND"], + "VIDEO": ["ATS", "IOMMU", "RESOLUTION", "TYPE", "VRAM"], + "RAW": ["DATA", "DATA_VMX", "TYPE", "VALIDATE"], + "CONTEXT": [], + "BACKUP_CONFIG": ["FS_FREEZE", "KEEP_LAST", "BACKUP_VOLATILE", "MODE", "INCREMENT_MODE"], +} + + +def check_updateconf(module, to_check): + '''Checks if attributes are compatible with one.vm.updateconf API call.''' + for attr, subattributes in to_check.items(): + if attr not in UPDATECONF_ATTRIBUTES: + module.fail_json(msg="'{0:}' is not a valid VM attribute.".format(attr)) + if not UPDATECONF_ATTRIBUTES[attr]: + continue + for subattr in subattributes: + if subattr not in UPDATECONF_ATTRIBUTES[attr]: + module.fail_json(msg="'{0:}' is not a valid VM subattribute of '{1:}'".format(subattr, attr)) + + +def parse_updateconf(vm_template): + '''Extracts 'updateconf' attributes from a VM template.''' + updateconf = {} + for attr, subattributes in vm_template.items(): + if attr not in UPDATECONF_ATTRIBUTES: + continue + tmp = {} + for subattr, value in subattributes.items(): + if UPDATECONF_ATTRIBUTES[attr] and subattr not in UPDATECONF_ATTRIBUTES[attr]: + continue + tmp[subattr] = value + if tmp: + updateconf[attr] = tmp + return updateconf + + +def get_template(module, client, predicate): + + pool = client.templatepool.info(-2, -1, -1, -1) + # Filter -2 means fetch all templates user can Use + found = 0 + found_template = None + template_name = '' + + for template in pool.VMTEMPLATE: + if predicate(template): + found = found + 1 + found_template = template + template_name = template.NAME + + if found == 0: + return None + elif found > 1: + module.fail_json(msg='There are more templates with name: ' + template_name) + return found_template + + +def get_template_by_name(module, client, template_name): + return get_template(module, client, lambda template: (template.NAME == template_name)) + + +def get_template_by_id(module, client, template_id): + return get_template(module, client, lambda template: (template.ID == template_id)) + + +def get_template_id(module, client, requested_id, requested_name): + template = get_template_by_id(module, client, requested_id) if requested_id is not None else get_template_by_name(module, client, requested_name) + if template: + return template.ID + else: + return None + + +def get_datastore(module, client, predicate): + pool = client.datastorepool.info() + found = 0 + found_datastore = None + datastore_name = '' + + for datastore in pool.DATASTORE: + if predicate(datastore): + found = found + 1 + found_datastore = datastore + datastore_name = datastore.NAME + + if found == 0: + return None + elif found > 1: + module.fail_json(msg='There are more datastores with name: ' + datastore_name) + return found_datastore + + +def get_datastore_by_name(module, client, datastore_name): + return get_datastore(module, client, lambda datastore: (datastore.NAME == datastore_name)) + + +def get_datastore_by_id(module, client, datastore_id): + return get_datastore(module, client, lambda datastore: (datastore.ID == datastore_id)) + + +def get_datastore_id(module, client, requested_id, requested_name): + datastore = get_datastore_by_id(module, client, requested_id) if requested_id else get_datastore_by_name(module, client, requested_name) + if datastore: + return datastore.ID + else: + return None + + +def get_vm_by_id(client, vm_id): + try: + vm = client.vm.info(int(vm_id)) + except BaseException: + return None + return vm + + +def get_vms_by_ids(module, client, state, ids): + vms = [] + + for vm_id in ids: + vm = get_vm_by_id(client, vm_id) + if vm is None and state != 'absent': + module.fail_json(msg='There is no VM with id=' + str(vm_id)) + vms.append(vm) + + return vms + + +def get_vm_info(client, vm): + + vm = client.vm.info(vm.ID) + + networks_info = [] + + disk_size = [] + if 'DISK' in vm.TEMPLATE: + if isinstance(vm.TEMPLATE['DISK'], list): + for disk in vm.TEMPLATE['DISK']: + disk_size.append(disk['SIZE'] + ' MB') + else: + disk_size.append(vm.TEMPLATE['DISK']['SIZE'] + ' MB') + + if 'NIC' in vm.TEMPLATE: + if isinstance(vm.TEMPLATE['NIC'], list): + for nic in vm.TEMPLATE['NIC']: + networks_info.append({ + 'ip': nic.get('IP', ''), + 'mac': nic.get('MAC', ''), + 'name': nic.get('NETWORK', ''), + 'security_groups': nic.get('SECURITY_GROUPS', '') + }) + else: + networks_info.append({ + 'ip': vm.TEMPLATE['NIC'].get('IP', ''), + 'mac': vm.TEMPLATE['NIC'].get('MAC', ''), + 'name': vm.TEMPLATE['NIC'].get('NETWORK', ''), + 'security_groups': + vm.TEMPLATE['NIC'].get('SECURITY_GROUPS', '') + }) + import time + + current_time = time.localtime() + vm_start_time = time.localtime(vm.STIME) + + vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time) + vm_uptime /= (60 * 60) + + permissions_str = parse_vm_permissions(client, vm) + + # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE + vm_lcm_state = None + if vm.STATE == VM_STATES.index('ACTIVE'): + vm_lcm_state = LCM_STATES[vm.LCM_STATE] + + vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID) + + updateconf = parse_updateconf(vm.TEMPLATE) + + info = { + 'template_id': int(vm.TEMPLATE['TEMPLATE_ID']), + 'vm_id': vm.ID, + 'vm_name': vm.NAME, + 'state': VM_STATES[vm.STATE], + 'lcm_state': vm_lcm_state, + 'owner_name': vm.UNAME, + 'owner_id': vm.UID, + 'networks': networks_info, + 'disk_size': disk_size, + 'memory': vm.TEMPLATE['MEMORY'] + ' MB', + 'vcpu': vm.TEMPLATE['VCPU'], + 'cpu': vm.TEMPLATE['CPU'], + 'group_name': vm.GNAME, + 'group_id': vm.GID, + 'uptime_h': int(vm_uptime), + 'attributes': vm_attributes, + 'mode': permissions_str, + 'labels': vm_labels, + 'updateconf': updateconf, + } + + return info + + +def parse_vm_permissions(client, vm): + vm_PERMISSIONS = client.vm.info(vm.ID).PERMISSIONS + + owner_octal = int(vm_PERMISSIONS.OWNER_U) * 4 + int(vm_PERMISSIONS.OWNER_M) * 2 + int(vm_PERMISSIONS.OWNER_A) + group_octal = int(vm_PERMISSIONS.GROUP_U) * 4 + int(vm_PERMISSIONS.GROUP_M) * 2 + int(vm_PERMISSIONS.GROUP_A) + other_octal = int(vm_PERMISSIONS.OTHER_U) * 4 + int(vm_PERMISSIONS.OTHER_M) * 2 + int(vm_PERMISSIONS.OTHER_A) + + permissions = str(owner_octal) + str(group_octal) + str(other_octal) + + return permissions + + +def set_vm_permissions(module, client, vms, permissions): + changed = False + + for vm in vms: + vm = client.vm.info(vm.ID) + old_permissions = parse_vm_permissions(client, vm) + changed = changed or old_permissions != permissions + + if not module.check_mode and old_permissions != permissions: + permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000 + mode_bits = [int(d) for d in permissions_str] + try: + client.vm.chmod( + vm.ID, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8]) + except pyone.OneAuthorizationException: + module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.") + + return changed + + +def set_vm_ownership(module, client, vms, owner_id, group_id): + changed = False + + for vm in vms: + vm = client.vm.info(vm.ID) + if owner_id is None: + owner_id = vm.UID + if group_id is None: + group_id = vm.GID + + changed = changed or owner_id != vm.UID or group_id != vm.GID + + if not module.check_mode and (owner_id != vm.UID or group_id != vm.GID): + try: + client.vm.chown(vm.ID, owner_id, group_id) + except pyone.OneAuthorizationException: + module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.") + + return changed + + +def update_vm(module, client, vm, updateconf_dict): + changed = False + if not updateconf_dict: + return changed + + before = client.vm.info(vm.ID).TEMPLATE + + client.vm.updateconf(vm.ID, render(updateconf_dict), 1) # 1: Merge new template with the existing one. + + after = client.vm.info(vm.ID).TEMPLATE + + changed = before != after + return changed + + +def update_vms(module, client, vms, *args): + changed = False + for vm in vms: + changed = update_vm(module, client, vm, *args) or changed + return changed + + +def get_size_in_MB(module, size_str): + + SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB'] + + s = size_str + init = size_str + num = "" + while s and s[0:1].isdigit() or s[0:1] == '.': + num += s[0] + s = s[1:] + num = float(num) + symbol = s.strip() + + if symbol not in SYMBOLS: + module.fail_json(msg="Cannot interpret %r %r %d" % (init, symbol, num)) + + prefix = {'B': 1} + + for i, s in enumerate(SYMBOLS[1:]): + prefix[s] = 1 << (i + 1) * 10 + + size_in_bytes = int(num * prefix[symbol]) + size_in_MB = size_in_bytes / (1024 * 1024) + + return size_in_MB + + +def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent, updateconf_dict): + if attributes_dict: + vm_name = attributes_dict.get('NAME', '') + + template = client.template.info(template_id).TEMPLATE + + disk_count = len(flatten(template.get('DISK', []))) + if disk_size: + size_count = len(flatten(disk_size)) + # check if the number of disks is correct + if disk_count != size_count: + module.fail_json(msg='This template has ' + str(disk_count) + ' disks but you defined ' + str(size_count)) + + vm_extra_template = dict_merge(template or {}, attributes_dict or {}) + vm_extra_template = dict_merge(vm_extra_template, { + 'LABELS': ','.join(labels_list), + 'NIC': flatten(network_attrs_list, extract=True), + 'DISK': flatten([ + disk if not size else dict_merge(disk, { + 'SIZE': str(int(get_size_in_MB(module, size))), + }) + for disk, size in zip( + flatten(template.get('DISK', [])), + flatten(disk_size or [None] * disk_count), + ) + if disk is not None + ], extract=True) + }) + vm_extra_template = dict_merge(vm_extra_template, updateconf_dict or {}) + + try: + vm_id = client.template.instantiate(template_id, + vm_name, + vm_start_on_hold, + render(vm_extra_template), + vm_persistent) + except pyone.OneException as e: + module.fail_json(msg=str(e)) + + vm = get_vm_by_id(client, vm_id) + return get_vm_info(client, vm) + + +def generate_next_index(vm_filled_indexes_list, num_sign_cnt): + counter = 0 + cnt_str = str(counter).zfill(num_sign_cnt) + + while cnt_str in vm_filled_indexes_list: + counter = counter + 1 + cnt_str = str(counter).zfill(num_sign_cnt) + + return cnt_str + + +def get_vm_labels_and_attributes_dict(client, vm_id): + vm_USER_TEMPLATE = client.vm.info(vm_id).USER_TEMPLATE + + attrs_dict = {} + labels_list = [] + + for key, value in vm_USER_TEMPLATE.items(): + if key != 'LABELS': + attrs_dict[key] = value + else: + if key is not None and value is not None: + labels_list = value.split(',') + + return labels_list, attrs_dict + + +def get_all_vms_by_attributes(client, attributes_dict, labels_list): + pool = client.vmpool.info(-2, -1, -1, -1).VM + vm_list = [] + name = '' + if attributes_dict: + name = attributes_dict.pop('NAME', '') + + if name != '': + base_name = name[:len(name) - name.count('#')] + # Check does the name have indexed format + with_hash = name.endswith('#') + + for vm in pool: + if vm.NAME.startswith(base_name): + if with_hash and vm.NAME[len(base_name):].isdigit(): + # If the name has indexed format and after base_name it has only digits it'll be matched + vm_list.append(vm) + elif not with_hash and vm.NAME == name: + # If the name is not indexed it has to be same + vm_list.append(vm) + pool = vm_list + + import copy + + vm_list = copy.copy(pool) + + for vm in pool: + remove_list = [] + vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.ID) + + if attributes_dict and len(attributes_dict) > 0: + for key, val in attributes_dict.items(): + if key in vm_attributes_dict: + if val and vm_attributes_dict[key] != val: + remove_list.append(vm) + break + else: + remove_list.append(vm) + break + vm_list = list(set(vm_list).difference(set(remove_list))) + + remove_list = [] + if labels_list and len(labels_list) > 0: + for label in labels_list: + if label not in vm_labels_list: + remove_list.append(vm) + break + vm_list = list(set(vm_list).difference(set(remove_list))) + + return vm_list + + +def create_count_of_vms(module, client, + template_id, count, + attributes_dict, labels_list, disk_size, network_attrs_list, + wait, wait_timeout, vm_start_on_hold, vm_persistent, updateconf_dict): + new_vms_list = [] + + vm_name = '' + if attributes_dict: + vm_name = attributes_dict.get('NAME', '') + + if module.check_mode: + return True, [], [] + + # Create list of used indexes + vm_filled_indexes_list = None + num_sign_cnt = vm_name.count('#') + if vm_name != '' and num_sign_cnt > 0: + vm_list = get_all_vms_by_attributes(client, {'NAME': vm_name}, None) + base_name = vm_name[:len(vm_name) - num_sign_cnt] + vm_name = base_name + # Make list which contains used indexes in format ['000', '001',...] + vm_filled_indexes_list = [vm.NAME[len(base_name):].zfill(num_sign_cnt) for vm in vm_list] + + while count > 0: + new_vm_name = vm_name + # Create indexed name + if vm_filled_indexes_list is not None: + next_index = generate_next_index(vm_filled_indexes_list, num_sign_cnt) + vm_filled_indexes_list.append(next_index) + new_vm_name += next_index + # Update NAME value in the attributes in case there is index + attributes_dict['NAME'] = new_vm_name + new_vm_dict = create_vm(module, client, + template_id, attributes_dict, labels_list, disk_size, network_attrs_list, + vm_start_on_hold, vm_persistent, updateconf_dict) + new_vm_id = new_vm_dict.get('vm_id') + new_vm = get_vm_by_id(client, new_vm_id) + new_vms_list.append(new_vm) + count -= 1 + + if vm_start_on_hold: + if wait: + for vm in new_vms_list: + wait_for_hold(module, client, vm, wait_timeout) + else: + if wait: + for vm in new_vms_list: + wait_for_running(module, client, vm, wait_timeout) + + return True, new_vms_list, [] + + +def create_exact_count_of_vms(module, client, + template_id, exact_count, attributes_dict, count_attributes_dict, + labels_list, count_labels_list, disk_size, network_attrs_list, + hard, wait, wait_timeout, vm_start_on_hold, vm_persistent, updateconf_dict): + vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list) + + vm_count_diff = exact_count - len(vm_list) + changed = vm_count_diff != 0 + + new_vms_list = [] + instances_list = [] + tagged_instances_list = vm_list + + if module.check_mode: + return changed, instances_list, tagged_instances_list + + if vm_count_diff > 0: + # Add more VMs + changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict, + labels_list, disk_size, network_attrs_list, wait, wait_timeout, + vm_start_on_hold, vm_persistent, updateconf_dict) + + tagged_instances_list += instances_list + elif vm_count_diff < 0: + # Delete surplus VMs + old_vms_list = [] + + while vm_count_diff < 0: + old_vm = vm_list.pop(0) + old_vms_list.append(old_vm) + terminate_vm(module, client, old_vm, hard) + vm_count_diff += 1 + + if wait: + for vm in old_vms_list: + wait_for_done(module, client, vm, wait_timeout) + + instances_list = old_vms_list + # store only the remaining instances + old_vms_set = set(old_vms_list) + tagged_instances_list = [vm for vm in vm_list if vm not in old_vms_set] + + return changed, instances_list, tagged_instances_list + + +VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE'] +LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP', + 'SAVE_SUSPEND', 'SAVE_MIGRATE', 'PROLOG_MIGRATE', 'PROLOG_RESUME', + 'EPILOG_STOP', 'EPILOG', 'SHUTDOWN', 'STATE13', 'STATE14', 'CLEANUP_RESUBMIT', 'UNKNOWN', 'HOTPLUG', 'SHUTDOWN_POWEROFF', + 'BOOT_UNKNOWN', 'BOOT_POWEROFF', 'BOOT_SUSPENDED', 'BOOT_STOPPED', 'CLEANUP_DELETE', 'HOTPLUG_SNAPSHOT', 'HOTPLUG_NIC', + 'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY'] + + +def wait_for_state(module, client, vm, wait_timeout, state_predicate): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + vm = client.vm.info(vm.ID) + state = vm.STATE + lcm_state = vm.LCM_STATE + + if state_predicate(state, lcm_state): + return vm + elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'), + VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]: + module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state]) + + time.sleep(1) + + module.fail_json(msg="Wait timeout has expired!") + + +def wait_for_running(module, client, vm, wait_timeout): + return wait_for_state(module, client, vm, wait_timeout, lambda state, + lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')])) + + +def wait_for_done(module, client, vm, wait_timeout): + return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')])) + + +def wait_for_hold(module, client, vm, wait_timeout): + return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')])) + + +def wait_for_poweroff(module, client, vm, wait_timeout): + return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')])) + + +def terminate_vm(module, client, vm, hard=False): + changed = False + + if not vm: + return changed + + changed = True + + if not module.check_mode: + if hard: + client.vm.action('terminate-hard', vm.ID) + else: + client.vm.action('terminate', vm.ID) + + return changed + + +def terminate_vms(module, client, vms, hard): + changed = False + + for vm in vms: + changed = terminate_vm(module, client, vm, hard) or changed + + return changed + + +def poweroff_vm(module, client, vm, hard): + vm = client.vm.info(vm.ID) + changed = False + + lcm_state = vm.LCM_STATE + state = vm.STATE + + if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: + changed = True + + if changed and not module.check_mode: + if not hard: + client.vm.action('poweroff', vm.ID) + else: + client.vm.action('poweroff-hard', vm.ID) + + return changed + + +def poweroff_vms(module, client, vms, hard): + changed = False + + for vm in vms: + changed = poweroff_vm(module, client, vm, hard) or changed + + return changed + + +def reboot_vms(module, client, vms, wait_timeout, hard): + + if not module.check_mode: + # Firstly, power-off all instances + for vm in vms: + vm = client.vm.info(vm.ID) + lcm_state = vm.LCM_STATE + state = vm.STATE + if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: + poweroff_vm(module, client, vm, hard) + + # Wait for all to be power-off + for vm in vms: + wait_for_poweroff(module, client, vm, wait_timeout) + + for vm in vms: + resume_vm(module, client, vm) + + return True + + +def resume_vm(module, client, vm): + vm = client.vm.info(vm.ID) + changed = False + + state = vm.STATE + if state in [VM_STATES.index('HOLD')]: + changed = release_vm(module, client, vm) + return changed + + lcm_state = vm.LCM_STATE + if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'): + module.fail_json(msg="Cannot perform action 'resume' because this action is not available " + + "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly") + if lcm_state not in [LCM_STATES.index('RUNNING')]: + changed = True + + if changed and not module.check_mode: + client.vm.action('resume', vm.ID) + + return changed + + +def resume_vms(module, client, vms): + changed = False + + for vm in vms: + changed = resume_vm(module, client, vm) or changed + + return changed + + +def release_vm(module, client, vm): + vm = client.vm.info(vm.ID) + changed = False + + state = vm.STATE + if state != VM_STATES.index('HOLD'): + module.fail_json(msg="Cannot perform action 'release' because this action is not available " + + "because VM is not in state 'HOLD'.") + else: + changed = True + + if changed and not module.check_mode: + client.vm.action('release', vm.ID) + + return changed + + +def check_name_attribute(module, attributes): + if attributes.get("NAME"): + import re + if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None: + module.fail_json(msg="Illegal 'NAME' attribute: '" + attributes.get("NAME") + + "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.") + + +TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS", + "CONTEXT", "CREATED_BY", "CPU_COST", "DISK_COST", "MEMORY_COST", + "TEMPLATE_ID", "VMID", "AUTOMATIC_DS_REQUIREMENTS", "DEPLOY_FOLDER", "LABELS"] + + +def check_attributes(module, attributes): + for key in attributes.keys(): + if key in TEMPLATE_RESTRICTED_ATTRIBUTES: + module.fail_json(msg='Restricted attribute `' + key + '` cannot be used when filtering VMs.') + # Check the format of the name attribute + check_name_attribute(module, attributes) + + +def disk_save_as(module, client, vm, disk_saveas, wait_timeout): + if not disk_saveas.get('name'): + module.fail_json(msg="Key 'name' is required for 'disk_saveas' option") + + image_name = disk_saveas.get('name') + disk_id = disk_saveas.get('disk_id', 0) + + if not module.check_mode: + if vm.STATE != VM_STATES.index('POWEROFF'): + module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state") + try: + client.vm.disksaveas(vm.ID, disk_id, image_name, 'OS', -1) + except pyone.OneException as e: + module.fail_json(msg=str(e)) + wait_for_poweroff(module, client, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state + + +def get_connection_info(module): + + url = module.params.get('api_url') + username = module.params.get('api_username') + password = module.params.get('api_password') + + if not url: + url = os.environ.get('ONE_URL') + + if not username: + username = os.environ.get('ONE_USERNAME') + + if not password: + password = os.environ.get('ONE_PASSWORD') + + if not username: + if not password: + authfile = os.environ.get('ONE_AUTH') + if authfile is None: + authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth") + try: + with open(authfile, "r") as fp: + authstring = fp.read().rstrip() + username = authstring.split(":")[0] + password = authstring.split(":")[1] + except (OSError, IOError): + module.fail_json(msg=("Could not find or read ONE_AUTH file at '%s'" % authfile)) + except Exception: + module.fail_json(msg=("Error occurs when read ONE_AUTH file at '%s'" % authfile)) + if not url: + module.fail_json(msg="Opennebula API url (api_url) is not specified") + from collections import namedtuple + + auth_params = namedtuple('auth', ('url', 'username', 'password')) + + return auth_params(url=url, username=username, password=password) + + +def main(): + fields = { + "api_url": {"required": False, "type": "str"}, + "api_username": {"required": False, "type": "str"}, + "api_password": {"required": False, "type": "str", "no_log": True}, + "instance_ids": {"required": False, "aliases": ['ids'], "type": "list", "elements": "int"}, + "template_name": {"required": False, "type": "str"}, + "template_id": {"required": False, "type": "int"}, + "vm_start_on_hold": {"default": False, "type": "bool"}, + "state": { + "default": "present", + "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'], + "type": "str" + }, + "mode": {"required": False, "type": "str"}, + "owner_id": {"required": False, "type": "int"}, + "group_id": {"required": False, "type": "int"}, + "wait": {"default": True, "type": "bool"}, + "wait_timeout": {"default": 300, "type": "int"}, + "hard": {"default": False, "type": "bool"}, + "memory": {"required": False, "type": "str"}, + "cpu": {"required": False, "type": "float"}, + "vcpu": {"required": False, "type": "int"}, + "disk_size": {"required": False, "type": "list", "elements": "str"}, + "datastore_name": {"required": False, "type": "str"}, + "datastore_id": {"required": False, "type": "int"}, + "networks": {"default": [], "type": "list", "elements": "dict"}, + "count": {"default": 1, "type": "int"}, + "exact_count": {"required": False, "type": "int"}, + "attributes": {"default": {}, "type": "dict"}, + "count_attributes": {"required": False, "type": "dict"}, + "labels": {"default": [], "type": "list", "elements": "str"}, + "count_labels": {"required": False, "type": "list", "elements": "str"}, + "disk_saveas": {"type": "dict"}, + "persistent": {"default": False, "type": "bool"}, + "updateconf": {"type": "dict"}, + } + + module = AnsibleModule(argument_spec=fields, + mutually_exclusive=[ + ['template_id', 'template_name', 'instance_ids'], + ['template_id', 'template_name', 'disk_saveas'], + ['instance_ids', 'count_attributes', 'count'], + ['instance_ids', 'count_labels', 'count'], + ['instance_ids', 'exact_count'], + ['instance_ids', 'attributes'], + ['instance_ids', 'labels'], + ['disk_saveas', 'attributes'], + ['disk_saveas', 'labels'], + ['exact_count', 'count'], + ['count', 'hard'], + ['instance_ids', 'cpu'], ['instance_ids', 'vcpu'], + ['instance_ids', 'memory'], ['instance_ids', 'disk_size'], + ['instance_ids', 'networks'], + ['persistent', 'disk_size'] + ], + supports_check_mode=True) + + if not HAS_PYONE: + module.fail_json(msg='This module requires pyone to work!') + + auth = get_connection_info(module) + params = module.params + instance_ids = params.get('instance_ids') + requested_template_name = params.get('template_name') + requested_template_id = params.get('template_id') + put_vm_on_hold = params.get('vm_start_on_hold') + state = params.get('state') + permissions = params.get('mode') + owner_id = params.get('owner_id') + group_id = params.get('group_id') + wait = params.get('wait') + wait_timeout = params.get('wait_timeout') + hard = params.get('hard') + memory = params.get('memory') + cpu = params.get('cpu') + vcpu = params.get('vcpu') + disk_size = params.get('disk_size') + requested_datastore_id = params.get('datastore_id') + requested_datastore_name = params.get('datastore_name') + networks = params.get('networks') + count = params.get('count') + exact_count = params.get('exact_count') + attributes = params.get('attributes') + count_attributes = params.get('count_attributes') + labels = params.get('labels') + count_labels = params.get('count_labels') + disk_saveas = params.get('disk_saveas') + persistent = params.get('persistent') + updateconf = params.get('updateconf') + + if not (auth.username and auth.password): + module.warn("Credentials missing") + else: + one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) + + if attributes: + attributes = {key.upper(): value for key, value in attributes.items()} + check_attributes(module, attributes) + + if count_attributes: + count_attributes = {key.upper(): value for key, value in count_attributes.items()} + if not attributes: + import copy + module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.') + attributes = copy.copy(count_attributes) + check_attributes(module, count_attributes) + + if updateconf: + check_updateconf(module, updateconf) + + if count_labels and not labels: + module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.') + labels = count_labels + + # Fetch template + template_id = None + if requested_template_id is not None or requested_template_name: + template_id = get_template_id(module, one_client, requested_template_id, requested_template_name) + if template_id is None: + if requested_template_id is not None: + module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id)) + elif requested_template_name: + module.fail_json(msg="There is no template with name: " + requested_template_name) + + # Fetch datastore + datastore_id = None + if requested_datastore_id or requested_datastore_name: + datastore_id = get_datastore_id(module, one_client, requested_datastore_id, requested_datastore_name) + if datastore_id is None: + if requested_datastore_id: + module.fail_json(msg='There is no datastore with datastore_id: ' + str(requested_datastore_id)) + elif requested_datastore_name: + module.fail_json(msg="There is no datastore with name: " + requested_datastore_name) + else: + attributes['SCHED_DS_REQUIREMENTS'] = 'ID=' + str(datastore_id) + + if exact_count and template_id is None: + module.fail_json(msg='Option `exact_count` needs template_id or template_name') + + if exact_count is not None and not (count_attributes or count_labels): + module.fail_json(msg='Either `count_attributes` or `count_labels` has to be specified with option `exact_count`.') + if (count_attributes or count_labels) and exact_count is None: + module.fail_json(msg='Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used.') + if template_id is not None and state != 'present': + module.fail_json(msg="Only state 'present' is valid for the template") + + if memory: + attributes['MEMORY'] = str(int(get_size_in_MB(module, memory))) + if cpu: + attributes['CPU'] = str(cpu) + if vcpu: + attributes['VCPU'] = str(vcpu) + + if exact_count is not None and state != 'present': + module.fail_json(msg='The `exact_count` option is valid only for the `present` state') + if exact_count is not None and exact_count < 0: + module.fail_json(msg='`exact_count` cannot be less than 0') + if count <= 0: + module.fail_json(msg='`count` has to be greater than 0') + + if permissions is not None: + import re + if re.match("^[0-7]{3}$", permissions) is None: + module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600") + + if exact_count is not None: + # Deploy an exact count of VMs + changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes, + count_attributes, labels, count_labels, disk_size, + networks, hard, wait, wait_timeout, put_vm_on_hold, persistent, updateconf) + vms = tagged_instances_list + elif template_id is not None and state == 'present': + # Deploy count VMs + changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count, + attributes, labels, disk_size, networks, wait, wait_timeout, + put_vm_on_hold, persistent, updateconf) + # instances_list - new instances + # tagged_instances_list - all instances with specified `count_attributes` and `count_labels` + vms = instances_list + else: + # Fetch data of instances, or change their state + if not (instance_ids or attributes or labels): + module.fail_json(msg="At least one of `instance_ids`,`attributes`,`labels` must be passed!") + + if memory or cpu or vcpu or disk_size or networks: + module.fail_json(msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!") + + if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']: + module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'") + + vms = [] + tagged = False + changed = False + + if instance_ids: + vms = get_vms_by_ids(module, one_client, state, instance_ids) + else: + tagged = True + vms = get_all_vms_by_attributes(one_client, attributes, labels) + + if len(vms) == 0 and state != 'absent' and state != 'present': + module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`') + + if len(vms) == 0 and state == 'present' and not tagged: + module.fail_json(msg='There are no instances with specified `instance_ids`.') + + if tagged and state == 'absent': + module.fail_json(msg='Option `instance_ids` is required when state is `absent`.') + + if state == 'absent': + changed = terminate_vms(module, one_client, vms, hard) + elif state == 'rebooted': + changed = reboot_vms(module, one_client, vms, wait_timeout, hard) + elif state == 'poweredoff': + changed = poweroff_vms(module, one_client, vms, hard) + elif state == 'running': + changed = resume_vms(module, one_client, vms) + + instances_list = vms + tagged_instances_list = [] + + if permissions is not None: + changed = set_vm_permissions(module, one_client, vms, permissions) or changed + + if owner_id is not None or group_id is not None: + changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed + + if template_id is None and updateconf is not None: + changed = update_vms(module, one_client, vms, updateconf) or changed + + if wait and not module.check_mode and state != 'present': + wait_for = { + 'absent': wait_for_done, + 'rebooted': wait_for_running, + 'poweredoff': wait_for_poweroff, + 'running': wait_for_running + } + for vm in vms: + if vm is not None: + wait_for[state](module, one_client, vm, wait_timeout) + + if disk_saveas is not None: + if len(vms) == 0: + module.fail_json(msg="There is no VM whose disk will be saved.") + disk_save_as(module, one_client, vms[0], disk_saveas, wait_timeout) + changed = True + + # instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option + instances = list(get_vm_info(one_client, vm) for vm in instances_list if vm is not None) + instances_ids = list(vm.ID for vm in instances_list if vm is not None) + # tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels) + tagged_instances = list(get_vm_info(one_client, vm) for vm in tagged_instances_list if vm is not None) + + result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances} + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/one_vnet.py b/plugins/modules/one_vnet.py new file mode 100644 index 0000000000..3038f8cdc2 --- /dev/null +++ b/plugins/modules/one_vnet.py @@ -0,0 +1,434 @@ +#!/usr/bin/python +# +# Copyright (c) 2024, Alexander Bakanovskii +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import annotations + +DOCUMENTATION = r""" +module: one_vnet +short_description: Manages OpenNebula virtual networks +version_added: 9.4.0 +author: "Alexander Bakanovskii (@abakanovskii)" +requirements: + - pyone +description: + - Manages virtual networks in OpenNebula. +attributes: + check_mode: + support: partial + details: + - Note that check mode always returns C(changed=true) for existing networks, even if the network would not actually + change. + diff_mode: + support: none +options: + id: + description: + - A O(id) of the network you would like to manage. + - If not set then a new network is created with the given O(name). + type: int + name: + description: + - A O(name) of the network you would like to manage. If a network with the given name does not exist it, then is created, + otherwise it is managed by this module. + type: str + template: + description: + - A string containing the network template contents. + type: str + state: + description: + - V(present) - state that is used to manage the network. + - V(absent) - delete the network. + choices: ["present", "absent"] + default: present + type: str + +extends_documentation_fragment: + - community.general.opennebula + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Make sure the network is present by ID + community.general.one_vnet: + id: 0 + state: present + register: result + +- name: Make sure the network is present by name + community.general.one_vnet: + name: opennebula-bridge + state: present + register: result + +- name: Create a new or update an existing network + community.general.one_vnet: + name: bridge-network + template: | + VN_MAD = "bridge" + BRIDGE = "br0" + BRIDGE_TYPE = "linux" + AR=[ + TYPE = "IP4", + IP = 192.0.2.50, + SIZE = "20" + ] + DNS = 192.0.2.1 + GATEWAY = 192.0.2.1 + +- name: Delete the network by ID + community.general.one_vnet: + id: 0 + state: absent +""" + +RETURN = r""" +id: + description: The network ID. + type: int + returned: when O(state=present) + sample: 153 +name: + description: The network name. + type: str + returned: when O(state=present) + sample: app1 +template: + description: The parsed network template. + type: dict + returned: when O(state=present) + sample: + BRIDGE: onebr.1000 + BRIDGE_TYPE: linux + DESCRIPTION: sampletext + PHYDEV: eth0 + SECURITY_GROUPS: 0 + VLAN_ID: 1000 + VN_MAD: 802.1Q +user_id: + description: The network's user name. + type: int + returned: when O(state=present) + sample: 1 +user_name: + description: The network's user ID. + type: str + returned: when O(state=present) + sample: oneadmin +group_id: + description: The network's group ID. + type: int + returned: when O(state=present) + sample: 1 +group_name: + description: The network's group name. + type: str + returned: when O(state=present) + sample: one-users +owner_id: + description: The network's owner ID. + type: int + returned: when O(state=present) + sample: 143 +owner_name: + description: The network's owner name. + type: str + returned: when O(state=present) + sample: ansible-test +permissions: + description: The network's permissions. + type: dict + returned: when O(state=present) + contains: + owner_u: + description: The network's owner USAGE permissions. + type: str + sample: 1 + owner_m: + description: The network's owner MANAGE permissions. + type: str + sample: 0 + owner_a: + description: The network's owner ADMIN permissions. + type: str + sample: 0 + group_u: + description: The network's group USAGE permissions. + type: str + sample: 0 + group_m: + description: The network's group MANAGE permissions. + type: str + sample: 0 + group_a: + description: The network's group ADMIN permissions. + type: str + sample: 0 + other_u: + description: The network's other users USAGE permissions. + type: str + sample: 0 + other_m: + description: The network's other users MANAGE permissions. + type: str + sample: 0 + other_a: + description: The network's other users ADMIN permissions. + type: str + sample: 0 + sample: + owner_u: 1 + owner_m: 0 + owner_a: 0 + group_u: 0 + group_m: 0 + group_a: 0 + other_u: 0 + other_m: 0 + other_a: 0 +clusters: + description: The network's clusters. + type: list + returned: when O(state=present) + sample: [0, 100] +bridge: + description: The network's bridge interface. + type: str + returned: when O(state=present) + sample: br0 +bridge_type: + description: The network's bridge type. + type: str + returned: when O(state=present) + sample: linux +parent_network_id: + description: The network's parent network ID. + type: int + returned: when O(state=present) + sample: 1 +vn_mad: + description: The network's VN_MAD. + type: str + returned: when O(state=present) + sample: bridge +phydev: + description: The network's physical device (NIC). + type: str + returned: when O(state=present) + sample: eth0 +vlan_id: + description: The network's VLAN tag. + type: int + returned: when O(state=present) + sample: 1000 +outer_vlan_id: + description: The network's outer VLAN tag. + type: int + returned: when O(state=present) + sample: 1000 +vrouters: + description: The network's list of virtual routers IDs. + type: list + returned: when O(state=present) + sample: [0, 1] +ar_pool: + description: The network's list of ar_pool. + type: list + returned: when O(state=present) + sample: + - ar_id: 0 + ip: 192.0.2.1 + mac: 6c:1e:46:01:cd:d1 + size: 20 + type: IP4 + - ar_id: 1 + allocated: 0 + ip: 198.51.100.1 + mac: 5d:9b:c0:9e:f6:e5 + size: 20 + type: IP4 +""" + + +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule + + +class NetworksModule(OpenNebulaModule): + + def __init__(self): + argument_spec = dict( + id=dict(type='int'), + name=dict(type='str'), + state=dict(type='str', choices=['present', 'absent'], default='present'), + template=dict(type='str'), + ) + + mutually_exclusive = [ + ['id', 'name'] + ] + + required_one_of = [('id', 'name')] + + required_if = [ + ['state', 'present', ['template']] + ] + + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + required_if=required_if) + + def run(self, one, module, result): + params = module.params + id = params.get('id') + name = params.get('name') + desired_state = params.get('state') + template_data = params.get('template') + + self.result = {} + + template = self.get_template_instance(id, name) + needs_creation = False + if not template and desired_state != 'absent': + if id: + module.fail_json(msg="There is no template with id=" + str(id)) + else: + needs_creation = True + + if desired_state == 'absent': + self.result = self.delete_template(template) + else: + if needs_creation: + self.result = self.create_template(name, template_data) + else: + self.result = self.update_template(template, template_data) + + self.exit() + + def get_template(self, predicate): + # -2 means "Resources belonging to all users" + # the other two parameters are used for pagination, -1 for both essentially means "return all" + pool = self.one.vnpool.info(-2, -1, -1) + + for template in pool.VNET: + if predicate(template): + return template + + return None + + def get_template_by_id(self, template_id): + return self.get_template(lambda template: (template.ID == template_id)) + + def get_template_by_name(self, name): + return self.get_template(lambda template: (template.NAME == name)) + + def get_template_instance(self, requested_id, requested_name): + if requested_id: + return self.get_template_by_id(requested_id) + else: + return self.get_template_by_name(requested_name) + + def get_networks_ar_pool(self, template): + ar_pool = [] + template_pool = template.AR_POOL.AR + for ar in range(len(template_pool)): + template_param = template_pool[ar] + ar_pool.append({ + # These params will always be present + 'ar_id': template_param.AR_ID, + 'mac': template_param.MAC, + 'size': template_param.SIZE, + 'type': template_param.TYPE, + # These are optional so firstly check for presence + # and if not present set value to Null + 'allocated': getattr(template_param, 'ALLOCATED', 'Null'), + 'ip': getattr(template_param, 'IP', 'Null'), + 'global_prefix': getattr(template_param, 'GLOBAL_PREFIX', 'Null'), + 'parent_network_ar_id': getattr(template_param, 'PARENT_NETWORK_AR_ID', 'Null'), + 'ula_prefix': getattr(template_param, 'ULA_PREFIX', 'Null'), + 'vn_mad': getattr(template_param, 'VN_MAD', 'Null'), + }) + return ar_pool + + def get_template_info(self, template): + info = { + 'id': template.ID, + 'name': template.NAME, + 'template': template.TEMPLATE, + 'user_name': template.UNAME, + 'user_id': template.UID, + 'group_name': template.GNAME, + 'group_id': template.GID, + 'permissions': { + 'owner_u': template.PERMISSIONS.OWNER_U, + 'owner_m': template.PERMISSIONS.OWNER_M, + 'owner_a': template.PERMISSIONS.OWNER_A, + 'group_u': template.PERMISSIONS.GROUP_U, + 'group_m': template.PERMISSIONS.GROUP_M, + 'group_a': template.PERMISSIONS.GROUP_A, + 'other_u': template.PERMISSIONS.OTHER_U, + 'other_m': template.PERMISSIONS.OTHER_M, + 'other_a': template.PERMISSIONS.OTHER_A + }, + 'clusters': template.CLUSTERS.ID, + 'bridge': template.BRIDGE, + 'bride_type': template.BRIDGE_TYPE, + 'parent_network_id': template.PARENT_NETWORK_ID, + 'vn_mad': template.VN_MAD, + 'phydev': template.PHYDEV, + 'vlan_id': template.VLAN_ID, + 'outer_vlan_id': template.OUTER_VLAN_ID, + 'used_leases': template.USED_LEASES, + 'vrouters': template.VROUTERS.ID, + 'ar_pool': self.get_networks_ar_pool(template) + } + + return info + + def create_template(self, name, template_data): + if not self.module.check_mode: + # -1 means that network won't be added to any cluster which happens by default + self.one.vn.allocate("NAME = \"" + name + "\"\n" + template_data, -1) + + result = self.get_template_info(self.get_template_by_name(name)) + result['changed'] = True + + return result + + def update_template(self, template, template_data): + if not self.module.check_mode: + # 0 = replace the whole template + self.one.vn.update(template.ID, template_data, 0) + + result = self.get_template_info(self.get_template_by_id(template.ID)) + if self.module.check_mode: + # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here. + result['changed'] = True + else: + # if the previous parsed template data is not equal to the updated one, this has changed + result['changed'] = template.TEMPLATE != result['template'] + + return result + + def delete_template(self, template): + if not template: + return {'changed': False} + + if not self.module.check_mode: + self.one.vn.delete(template.ID) + + return {'changed': True} + + +def main(): + NetworksModule().run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneandone_firewall_policy.py b/plugins/modules/oneandone_firewall_policy.py deleted file mode 120000 index d34ad1a7e1..0000000000 --- a/plugins/modules/oneandone_firewall_policy.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/oneandone/oneandone_firewall_policy.py \ No newline at end of file diff --git a/plugins/modules/oneandone_firewall_policy.py b/plugins/modules/oneandone_firewall_policy.py new file mode 100644 index 0000000000..9078075361 --- /dev/null +++ b/plugins/modules/oneandone_firewall_policy.py @@ -0,0 +1,568 @@ +#!/usr/bin/python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneandone_firewall_policy +short_description: Configure 1&1 firewall policy +description: + - Create, remove, reconfigure, update firewall policies. This module has a dependency on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Define a firewall policy state to create, remove, or update. + required: false + type: str + default: 'present' + choices: ["present", "absent", "update"] + auth_token: + description: + - Authenticating API token provided by 1&1. + type: str + api_url: + description: + - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. + type: str + required: false + name: + description: + - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128. + type: str + firewall_policy: + description: + - The identifier (id or name) of the firewall policy used with update state. + type: str + rules: + description: + - List of rules that are set for the firewall policy. Each rule must contain protocol parameter, in addition to three + optional parameters (port_from, port_to, and source). + type: list + elements: dict + default: [] + add_server_ips: + description: + - A list of server identifiers (ID or name) to be assigned to a firewall policy. Used in combination with update state. + type: list + elements: str + required: false + default: [] + remove_server_ips: + description: + - A list of server IP IDs to be unassigned from a firewall policy. Used in combination with update state. + type: list + elements: str + required: false + default: [] + add_rules: + description: + - List of rules that are added to an existing firewall policy. It is syntax is the same as the one used for rules parameter. + Used in combination with update state. + type: list + elements: dict + required: false + default: [] + remove_rules: + description: + - List of rule IDs that are removed from an existing firewall policy. Used in combination with update state. + type: list + elements: str + required: false + default: [] + description: + description: + - Firewall policy description. maxLength=256. + type: str + required: false + wait: + description: + - Wait for the instance to be in state 'running' before returning. + required: false + default: true + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds. + type: int + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods. + type: int + default: 5 + +requirements: + - "1and1" + +author: + - "Amel Ajdinovic (@aajdinov)" + - "Ethan Devenport (@edevenport)" +""" + +EXAMPLES = r""" +- name: Create a firewall policy + community.general.oneandone_firewall_policy: + auth_token: oneandone_private_api_key + name: ansible-firewall-policy + description: Testing creation of firewall policies with ansible + rules: + - protocol: TCP + port_from: 80 + port_to: 80 + source: 0.0.0.0 + wait: true + wait_timeout: 500 + +- name: Destroy a firewall policy + community.general.oneandone_firewall_policy: + auth_token: oneandone_private_api_key + state: absent + name: ansible-firewall-policy + +- name: Update a firewall policy + community.general.oneandone_firewall_policy: + auth_token: oneandone_private_api_key + state: update + firewall_policy: ansible-firewall-policy + name: ansible-firewall-policy-updated + description: Testing creation of firewall policies with ansible - updated + +- name: Add server to a firewall policy + community.general.oneandone_firewall_policy: + auth_token: oneandone_private_api_key + firewall_policy: ansible-firewall-policy-updated + add_server_ips: + - server_identifier (id or name) + - "server_identifier #2 (id or name)" + wait: true + wait_timeout: 500 + state: update + +- name: Remove server from a firewall policy + community.general.oneandone_firewall_policy: + auth_token: oneandone_private_api_key + firewall_policy: ansible-firewall-policy-updated + remove_server_ips: + - B2504878540DBC5F7634EB00A07C1EBD (server's IP id) + wait: true + wait_timeout: 500 + state: update + +- name: Add rules to a firewall policy + community.general.oneandone_firewall_policy: + auth_token: oneandone_private_api_key + firewall_policy: ansible-firewall-policy-updated + description: Adding rules to an existing firewall policy + add_rules: + - protocol: TCP + port_from: 70 + port_to: 70 + source: 0.0.0.0 + - protocol: TCP + port_from: 60 + port_to: 60 + source: 0.0.0.0 + wait: true + wait_timeout: 500 + state: update + +- name: Remove rules from a firewall policy + community.general.oneandone_firewall_policy: + auth_token: oneandone_private_api_key + firewall_policy: ansible-firewall-policy-updated + remove_rules: + - "rule_id #1" + - "rule_id #2" + - '...' + wait: true + wait_timeout: 500 + state: update +""" + +RETURN = r""" +firewall_policy: + description: Information about the firewall policy that was processed. + type: dict + sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"} + returned: always +""" + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_firewall_policy, + get_server, + OneAndOneResources, + wait_for_resource_creation_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _add_server_ips(module, oneandone_conn, firewall_id, server_ids): + """ + Assigns servers to a firewall policy. + """ + try: + attach_servers = [] + + for _server_id in server_ids: + server = get_server(oneandone_conn, _server_id, True) + attach_server = oneandone.client.AttachServer( + server_id=server['id'], + server_ip_id=next(iter(server['ips'] or []), None)['id'] + ) + attach_servers.append(attach_server) + + if module.check_mode: + if attach_servers: + return True + return False + + firewall_policy = oneandone_conn.attach_server_firewall_policy( + firewall_id=firewall_id, + server_ips=attach_servers) + return firewall_policy + except Exception as e: + module.fail_json(msg=str(e)) + + +def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id): + """ + Unassigns a server/IP from a firewall policy. + """ + try: + if module.check_mode: + firewall_server = oneandone_conn.get_firewall_server( + firewall_id=firewall_id, + server_ip_id=server_ip_id) + if firewall_server: + return True + return False + + firewall_policy = oneandone_conn.remove_firewall_server( + firewall_id=firewall_id, + server_ip_id=server_ip_id) + return firewall_policy + except Exception as e: + module.fail_json(msg=str(e)) + + +def _add_firewall_rules(module, oneandone_conn, firewall_id, rules): + """ + Adds new rules to a firewall policy. + """ + try: + firewall_rules = [] + + for rule in rules: + firewall_rule = oneandone.client.FirewallPolicyRule( + protocol=rule['protocol'], + port_from=rule['port_from'], + port_to=rule['port_to'], + source=rule['source']) + firewall_rules.append(firewall_rule) + + if module.check_mode: + firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id) + if firewall_rules and firewall_policy_id: + return True + return False + + firewall_policy = oneandone_conn.add_firewall_policy_rule( + firewall_id=firewall_id, + firewall_policy_rules=firewall_rules + ) + return firewall_policy + except Exception as e: + module.fail_json(msg=str(e)) + + +def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id): + """ + Removes a rule from a firewall policy. + """ + try: + if module.check_mode: + rule = oneandone_conn.get_firewall_policy_rule( + firewall_id=firewall_id, + rule_id=rule_id) + if rule: + return True + return False + + firewall_policy = oneandone_conn.remove_firewall_rule( + firewall_id=firewall_id, + rule_id=rule_id + ) + return firewall_policy + except Exception as e: + module.fail_json(msg=str(e)) + + +def update_firewall_policy(module, oneandone_conn): + """ + Updates a firewall policy based on input arguments. + Firewall rules and server ips can be added/removed to/from + firewall policy. Firewall policy name and description can be + updated as well. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + firewall_policy_id = module.params.get('firewall_policy') + name = module.params.get('name') + description = module.params.get('description') + add_server_ips = module.params.get('add_server_ips') + remove_server_ips = module.params.get('remove_server_ips') + add_rules = module.params.get('add_rules') + remove_rules = module.params.get('remove_rules') + + changed = False + + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True) + if firewall_policy is None: + _check_mode(module, False) + + if name or description: + _check_mode(module, True) + firewall_policy = oneandone_conn.modify_firewall( + firewall_id=firewall_policy['id'], + name=name, + description=description) + changed = True + + if add_server_ips: + if module.check_mode: + _check_mode(module, _add_server_ips(module, + oneandone_conn, + firewall_policy['id'], + add_server_ips)) + + firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips) + changed = True + + if remove_server_ips: + chk_changed = False + for server_ip_id in remove_server_ips: + if module.check_mode: + chk_changed |= _remove_firewall_server(module, + oneandone_conn, + firewall_policy['id'], + server_ip_id) + + _remove_firewall_server(module, + oneandone_conn, + firewall_policy['id'], + server_ip_id) + _check_mode(module, chk_changed) + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) + changed = True + + if add_rules: + firewall_policy = _add_firewall_rules(module, + oneandone_conn, + firewall_policy['id'], + add_rules) + _check_mode(module, firewall_policy) + changed = True + + if remove_rules: + chk_changed = False + for rule_id in remove_rules: + if module.check_mode: + chk_changed |= _remove_firewall_rule(module, + oneandone_conn, + firewall_policy['id'], + rule_id) + + _remove_firewall_rule(module, + oneandone_conn, + firewall_policy['id'], + rule_id) + _check_mode(module, chk_changed) + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) + changed = True + + return (changed, firewall_policy) + except Exception as e: + module.fail_json(msg=str(e)) + + +def create_firewall_policy(module, oneandone_conn): + """ + Create a new firewall policy. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + name = module.params.get('name') + description = module.params.get('description') + rules = module.params.get('rules') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + firewall_rules = [] + + for rule in rules: + firewall_rule = oneandone.client.FirewallPolicyRule( + protocol=rule['protocol'], + port_from=rule['port_from'], + port_to=rule['port_to'], + source=rule['source']) + firewall_rules.append(firewall_rule) + + firewall_policy_obj = oneandone.client.FirewallPolicy( + name=name, + description=description + ) + + _check_mode(module, True) + firewall_policy = oneandone_conn.create_firewall_policy( + firewall_policy=firewall_policy_obj, + firewall_policy_rules=firewall_rules + ) + + if wait: + wait_for_resource_creation_completion( + oneandone_conn, + OneAndOneResources.firewall_policy, + firewall_policy['id'], + wait_timeout, + wait_interval) + + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) # refresh + changed = True if firewall_policy else False + + _check_mode(module, False) + + return (changed, firewall_policy) + except Exception as e: + module.fail_json(msg=str(e)) + + +def remove_firewall_policy(module, oneandone_conn): + """ + Removes a firewall policy. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + fp_id = module.params.get('name') + firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id) + if module.check_mode: + if firewall_policy_id is None: + _check_mode(module, False) + _check_mode(module, True) + firewall_policy = oneandone_conn.delete_firewall(firewall_policy_id) + + changed = True if firewall_policy else False + + return (changed, { + 'id': firewall_policy['id'], + 'name': firewall_policy['name'] + }) + except Exception as e: + module.fail_json(msg=str(e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', no_log=True, + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + name=dict(type='str'), + firewall_policy=dict(type='str'), + description=dict(type='str'), + rules=dict(type='list', elements="dict", default=[]), + add_server_ips=dict(type='list', elements="str", default=[]), + remove_server_ips=dict(type='list', elements="str", default=[]), + add_rules=dict(type='list', elements="dict", default=[]), + remove_rules=dict(type='list', elements="str", default=[]), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='The "auth_token" parameter or ' + + 'ONEANDONE_AUTH_TOKEN environment variable is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required to delete a firewall policy.") + try: + (changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + elif state == 'update': + if not module.params.get('firewall_policy'): + module.fail_json( + msg="'firewall_policy' parameter is required to update a firewall policy.") + try: + (changed, firewall_policy) = update_firewall_policy(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + elif state == 'present': + for param in ('name', 'rules'): + if not module.params.get(param): + module.fail_json( + msg="%s parameter is required for new firewall policies." % param) + try: + (changed, firewall_policy) = create_firewall_policy(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=changed, firewall_policy=firewall_policy) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneandone_load_balancer.py b/plugins/modules/oneandone_load_balancer.py deleted file mode 120000 index 3b84c7b351..0000000000 --- a/plugins/modules/oneandone_load_balancer.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/oneandone/oneandone_load_balancer.py \ No newline at end of file diff --git a/plugins/modules/oneandone_load_balancer.py b/plugins/modules/oneandone_load_balancer.py new file mode 100644 index 0000000000..d75127e416 --- /dev/null +++ b/plugins/modules/oneandone_load_balancer.py @@ -0,0 +1,673 @@ +#!/usr/bin/python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneandone_load_balancer +short_description: Configure 1&1 load balancer +description: + - Create, remove, update load balancers. This module has a dependency on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Define a load balancer state to create, remove, or update. + type: str + required: false + default: 'present' + choices: ["present", "absent", "update"] + auth_token: + description: + - Authenticating API token provided by 1&1. + type: str + load_balancer: + description: + - The identifier (id or name) of the load balancer used with update state. + type: str + api_url: + description: + - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. + type: str + required: false + name: + description: + - Load balancer name used with present state. Used as identifier (ID or name) when used with absent state. maxLength=128. + type: str + health_check_test: + description: + - Type of the health check. At the moment, HTTP is not allowed. + type: str + choices: ["NONE", "TCP", "HTTP", "ICMP"] + health_check_interval: + description: + - Health check period in seconds. minimum=5, maximum=300, multipleOf=1. + type: str + health_check_path: + description: + - URL to call for checking. Required for HTTP health check. maxLength=1000. + type: str + required: false + health_check_parse: + description: + - Regular expression to check. Required for HTTP health check. maxLength=64. + type: str + required: false + persistence: + description: + - Persistence. + type: bool + persistence_time: + description: + - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1. + type: str + method: + description: + - Balancing procedure. + type: str + choices: ["ROUND_ROBIN", "LEAST_CONNECTIONS"] + datacenter: + description: + - ID or country code of the datacenter where the load balancer is created. + - If not specified, it defaults to V(US). + type: str + choices: ["US", "ES", "DE", "GB"] + required: false + rules: + description: + - A list of rule objects that are set for the load balancer. Each rule must contain protocol, port_balancer, and port_server + parameters, in addition to source parameter, which is optional. + type: list + elements: dict + default: [] + description: + description: + - Description of the load balancer. maxLength=256. + type: str + required: false + add_server_ips: + description: + - A list of server identifiers (id or name) to be assigned to a load balancer. Used in combination with O(state=update). + type: list + elements: str + required: false + default: [] + remove_server_ips: + description: + - A list of server IP IDs to be unassigned from a load balancer. Used in combination with O(state=update). + type: list + elements: str + required: false + default: [] + add_rules: + description: + - A list of rules that are added to an existing load balancer. It is syntax is the same as the one used for rules parameter. + Used in combination with O(state=update). + type: list + elements: dict + required: false + default: [] + remove_rules: + description: + - A list of rule IDs that are removed from an existing load balancer. Used in combination with O(state=update). + type: list + elements: str + required: false + default: [] + wait: + description: + - Wait for the instance to be in state 'running' before returning. + required: false + default: true + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds. + type: int + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods. + type: int + default: 5 + +requirements: + - "1and1" + +author: + - Amel Ajdinovic (@aajdinov) + - Ethan Devenport (@edevenport) +""" + +EXAMPLES = r""" +- name: Create a load balancer + community.general.oneandone_load_balancer: + auth_token: oneandone_private_api_key + name: ansible load balancer + description: Testing creation of load balancer with ansible + health_check_test: TCP + health_check_interval: 40 + persistence: true + persistence_time: 1200 + method: ROUND_ROBIN + datacenter: US + rules: + - protocol: TCP + port_balancer: 80 + port_server: 80 + source: 0.0.0.0 + wait: true + wait_timeout: 500 + +- name: Destroy a load balancer + community.general.oneandone_load_balancer: + auth_token: oneandone_private_api_key + name: ansible load balancer + wait: true + wait_timeout: 500 + state: absent + +- name: Update a load balancer + community.general.oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer + name: ansible load balancer updated + description: Testing the update of a load balancer with ansible + wait: true + wait_timeout: 500 + state: update + +- name: Add server to a load balancer + community.general.oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer updated + description: Adding server to a load balancer with ansible + add_server_ips: + - server identifier (id or name) + wait: true + wait_timeout: 500 + state: update + +- name: Remove server from a load balancer + community.general.oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer updated + description: Removing server from a load balancer with ansible + remove_server_ips: + - B2504878540DBC5F7634EB00A07C1EBD (server's ip id) + wait: true + wait_timeout: 500 + state: update + +- name: Add rules to a load balancer + community.general.oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer updated + description: Adding rules to a load balancer with ansible + add_rules: + - protocol: TCP + port_balancer: 70 + port_server: 70 + source: 0.0.0.0 + - protocol: TCP + port_balancer: 60 + port_server: 60 + source: 0.0.0.0 + wait: true + wait_timeout: 500 + state: update + +- name: Remove rules from a load balancer + community.general.oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer updated + description: Adding rules to a load balancer with ansible + remove_rules: + - "rule_id #1" + - "rule_id #2" + - '...' + wait: true + wait_timeout: 500 + state: update +""" + +RETURN = r""" +load_balancer: + description: Information about the load balancer that was processed. + type: dict + sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"} + returned: always +""" + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_load_balancer, + get_server, + get_datacenter, + OneAndOneResources, + wait_for_resource_creation_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + +DATACENTERS = ['US', 'ES', 'DE', 'GB'] +HEALTH_CHECK_TESTS = ['NONE', 'TCP', 'HTTP', 'ICMP'] +METHODS = ['ROUND_ROBIN', 'LEAST_CONNECTIONS'] + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids): + """ + Assigns servers to a load balancer. + """ + try: + attach_servers = [] + + for server_id in server_ids: + server = get_server(oneandone_conn, server_id, True) + attach_server = oneandone.client.AttachServer( + server_id=server['id'], + server_ip_id=next(iter(server['ips'] or []), None)['id'] + ) + attach_servers.append(attach_server) + + if module.check_mode: + if attach_servers: + return True + return False + + load_balancer = oneandone_conn.attach_load_balancer_server( + load_balancer_id=load_balancer_id, + server_ips=attach_servers) + return load_balancer + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id): + """ + Unassigns a server/IP from a load balancer. + """ + try: + if module.check_mode: + lb_server = oneandone_conn.get_load_balancer_server( + load_balancer_id=load_balancer_id, + server_ip_id=server_ip_id) + if lb_server: + return True + return False + + load_balancer = oneandone_conn.remove_load_balancer_server( + load_balancer_id=load_balancer_id, + server_ip_id=server_ip_id) + return load_balancer + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules): + """ + Adds new rules to a load_balancer. + """ + try: + load_balancer_rules = [] + + for rule in rules: + load_balancer_rule = oneandone.client.LoadBalancerRule( + protocol=rule['protocol'], + port_balancer=rule['port_balancer'], + port_server=rule['port_server'], + source=rule['source']) + load_balancer_rules.append(load_balancer_rule) + + if module.check_mode: + lb_id = get_load_balancer(oneandone_conn, load_balancer_id) + if load_balancer_rules and lb_id: + return True + return False + + load_balancer = oneandone_conn.add_load_balancer_rule( + load_balancer_id=load_balancer_id, + load_balancer_rules=load_balancer_rules + ) + + return load_balancer + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id): + """ + Removes a rule from a load_balancer. + """ + try: + if module.check_mode: + rule = oneandone_conn.get_load_balancer_rule( + load_balancer_id=load_balancer_id, + rule_id=rule_id) + if rule: + return True + return False + + load_balancer = oneandone_conn.remove_load_balancer_rule( + load_balancer_id=load_balancer_id, + rule_id=rule_id + ) + return load_balancer + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def update_load_balancer(module, oneandone_conn): + """ + Updates a load_balancer based on input arguments. + Load balancer rules and server ips can be added/removed to/from + load balancer. Load balancer name, description, health_check_test, + health_check_interval, persistence, persistence_time, and method + can be updated as well. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + load_balancer_id = module.params.get('load_balancer') + name = module.params.get('name') + description = module.params.get('description') + health_check_test = module.params.get('health_check_test') + health_check_interval = module.params.get('health_check_interval') + health_check_path = module.params.get('health_check_path') + health_check_parse = module.params.get('health_check_parse') + persistence = module.params.get('persistence') + persistence_time = module.params.get('persistence_time') + method = module.params.get('method') + add_server_ips = module.params.get('add_server_ips') + remove_server_ips = module.params.get('remove_server_ips') + add_rules = module.params.get('add_rules') + remove_rules = module.params.get('remove_rules') + + changed = False + + load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True) + if load_balancer is None: + _check_mode(module, False) + + if (name or description or health_check_test or health_check_interval or health_check_path or + health_check_parse or persistence or persistence_time or method): + _check_mode(module, True) + load_balancer = oneandone_conn.modify_load_balancer( + load_balancer_id=load_balancer['id'], + name=name, + description=description, + health_check_test=health_check_test, + health_check_interval=health_check_interval, + health_check_path=health_check_path, + health_check_parse=health_check_parse, + persistence=persistence, + persistence_time=persistence_time, + method=method) + changed = True + + if add_server_ips: + if module.check_mode: + _check_mode(module, _add_server_ips(module, + oneandone_conn, + load_balancer['id'], + add_server_ips)) + + load_balancer = _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips) + changed = True + + if remove_server_ips: + chk_changed = False + for server_ip_id in remove_server_ips: + if module.check_mode: + chk_changed |= _remove_load_balancer_server(module, + oneandone_conn, + load_balancer['id'], + server_ip_id) + + _remove_load_balancer_server(module, + oneandone_conn, + load_balancer['id'], + server_ip_id) + _check_mode(module, chk_changed) + load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) + changed = True + + if add_rules: + load_balancer = _add_load_balancer_rules(module, + oneandone_conn, + load_balancer['id'], + add_rules) + _check_mode(module, load_balancer) + changed = True + + if remove_rules: + chk_changed = False + for rule_id in remove_rules: + if module.check_mode: + chk_changed |= _remove_load_balancer_rule(module, + oneandone_conn, + load_balancer['id'], + rule_id) + + _remove_load_balancer_rule(module, + oneandone_conn, + load_balancer['id'], + rule_id) + _check_mode(module, chk_changed) + load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) + changed = True + + try: + return (changed, load_balancer) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def create_load_balancer(module, oneandone_conn): + """ + Create a new load_balancer. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + name = module.params.get('name') + description = module.params.get('description') + health_check_test = module.params.get('health_check_test') + health_check_interval = module.params.get('health_check_interval') + health_check_path = module.params.get('health_check_path') + health_check_parse = module.params.get('health_check_parse') + persistence = module.params.get('persistence') + persistence_time = module.params.get('persistence_time') + method = module.params.get('method') + datacenter = module.params.get('datacenter') + rules = module.params.get('rules') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + load_balancer_rules = [] + + datacenter_id = None + if datacenter is not None: + datacenter_id = get_datacenter(oneandone_conn, datacenter) + if datacenter_id is None: + module.fail_json( + msg='datacenter %s not found.' % datacenter) + + for rule in rules: + load_balancer_rule = oneandone.client.LoadBalancerRule( + protocol=rule['protocol'], + port_balancer=rule['port_balancer'], + port_server=rule['port_server'], + source=rule['source']) + load_balancer_rules.append(load_balancer_rule) + + _check_mode(module, True) + load_balancer_obj = oneandone.client.LoadBalancer( + health_check_path=health_check_path, + health_check_parse=health_check_parse, + name=name, + description=description, + health_check_test=health_check_test, + health_check_interval=health_check_interval, + persistence=persistence, + persistence_time=persistence_time, + method=method, + datacenter_id=datacenter_id + ) + + load_balancer = oneandone_conn.create_load_balancer( + load_balancer=load_balancer_obj, + load_balancer_rules=load_balancer_rules + ) + + if wait: + wait_for_resource_creation_completion(oneandone_conn, + OneAndOneResources.load_balancer, + load_balancer['id'], + wait_timeout, + wait_interval) + + load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) # refresh + changed = True if load_balancer else False + + _check_mode(module, False) + + return (changed, load_balancer) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def remove_load_balancer(module, oneandone_conn): + """ + Removes a load_balancer. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + lb_id = module.params.get('name') + load_balancer_id = get_load_balancer(oneandone_conn, lb_id) + if module.check_mode: + if load_balancer_id is None: + _check_mode(module, False) + _check_mode(module, True) + load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id) + + changed = True if load_balancer else False + + return (changed, { + 'id': load_balancer['id'], + 'name': load_balancer['name'] + }) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', no_log=True, + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + load_balancer=dict(type='str'), + name=dict(type='str'), + description=dict(type='str'), + health_check_test=dict( + choices=HEALTH_CHECK_TESTS), + health_check_interval=dict(type='str'), + health_check_path=dict(type='str'), + health_check_parse=dict(type='str'), + persistence=dict(type='bool'), + persistence_time=dict(type='str'), + method=dict( + choices=METHODS), + datacenter=dict( + choices=DATACENTERS), + rules=dict(type='list', elements="dict", default=[]), + add_server_ips=dict(type='list', elements="str", default=[]), + remove_server_ips=dict(type='list', elements="str", default=[]), + add_rules=dict(type='list', elements="dict", default=[]), + remove_rules=dict(type='list', elements="str", default=[]), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='auth_token parameter is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required for deleting a load balancer.") + try: + (changed, load_balancer) = remove_load_balancer(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + elif state == 'update': + if not module.params.get('load_balancer'): + module.fail_json( + msg="'load_balancer' parameter is required for updating a load balancer.") + try: + (changed, load_balancer) = update_load_balancer(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + elif state == 'present': + for param in ('name', 'health_check_test', 'health_check_interval', 'persistence', + 'persistence_time', 'method', 'rules'): + if not module.params.get(param): + module.fail_json( + msg="%s parameter is required for new load balancers." % param) + try: + (changed, load_balancer) = create_load_balancer(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + module.exit_json(changed=changed, load_balancer=load_balancer) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneandone_monitoring_policy.py b/plugins/modules/oneandone_monitoring_policy.py deleted file mode 120000 index ee9b7e36b4..0000000000 --- a/plugins/modules/oneandone_monitoring_policy.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/oneandone/oneandone_monitoring_policy.py \ No newline at end of file diff --git a/plugins/modules/oneandone_monitoring_policy.py b/plugins/modules/oneandone_monitoring_policy.py new file mode 100644 index 0000000000..a33abc8cb2 --- /dev/null +++ b/plugins/modules/oneandone_monitoring_policy.py @@ -0,0 +1,1020 @@ +#!/usr/bin/python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneandone_monitoring_policy +short_description: Configure 1&1 monitoring policy +description: + - Create, remove, update monitoring policies (and add/remove ports, processes, and servers). This module has a dependency + on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Define a monitoring policy's state to create, remove, update. + type: str + required: false + default: present + choices: ["present", "absent", "update"] + auth_token: + description: + - Authenticating API token provided by 1&1. + type: str + api_url: + description: + - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. + type: str + required: false + name: + description: + - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128. + type: str + monitoring_policy: + description: + - The identifier (id or name) of the monitoring policy used with update state. + type: str + agent: + description: + - Set true for using agent. + type: str + email: + description: + - User's email. maxLength=128. + type: str + description: + description: + - Monitoring policy description. maxLength=256. + type: str + required: false + thresholds: + description: + - Monitoring policy thresholds. Each of the suboptions have warning and critical, which both have alert and value suboptions. + Warning is used to set limits for warning alerts, critical is used to set critical alerts. alert enables alert, and + value is used to advise when the value is exceeded. + type: list + elements: dict + default: [] + suboptions: + cpu: + description: + - Consumption limits of CPU. + required: true + ram: + description: + - Consumption limits of RAM. + required: true + disk: + description: + - Consumption limits of hard disk. + required: true + internal_ping: + description: + - Response limits of internal ping. + required: true + transfer: + description: + - Consumption limits for transfer. + required: true + ports: + description: + - Array of ports that are to be monitored. + type: list + elements: dict + default: [] + suboptions: + protocol: + description: + - Internet protocol. + choices: ["TCP", "UDP"] + required: true + port: + description: + - Port number. minimum=1, maximum=65535. + required: true + alert_if: + description: + - Case of alert. + choices: ["RESPONDING", "NOT_RESPONDING"] + required: true + email_notification: + description: + - Set true for sending e-mail notifications. + required: true + processes: + description: + - Array of processes that are to be monitored. + type: list + elements: dict + default: [] + suboptions: + process: + description: + - Name of the process. maxLength=50. + required: true + alert_if: + description: + - Case of alert. + choices: ["RUNNING", "NOT_RUNNING"] + required: true + add_ports: + description: + - Ports to add to the monitoring policy. + type: list + elements: dict + required: false + default: [] + add_processes: + description: + - Processes to add to the monitoring policy. + type: list + elements: dict + required: false + default: [] + add_servers: + description: + - Servers to add to the monitoring policy. + type: list + elements: str + required: false + default: [] + remove_ports: + description: + - Ports to remove from the monitoring policy. + type: list + elements: str + required: false + default: [] + remove_processes: + description: + - Processes to remove from the monitoring policy. + type: list + elements: str + required: false + default: [] + remove_servers: + description: + - Servers to remove from the monitoring policy. + type: list + elements: str + required: false + default: [] + update_ports: + description: + - Ports to be updated on the monitoring policy. + type: list + elements: dict + required: false + default: [] + update_processes: + description: + - Processes to be updated on the monitoring policy. + type: list + elements: dict + required: false + default: [] + wait: + description: + - Wait for the instance to be in state 'running' before returning. + required: false + default: true + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds. + type: int + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods. + type: int + default: 5 + +requirements: + - "1and1" + +author: + - "Amel Ajdinovic (@aajdinov)" + - "Ethan Devenport (@edevenport)" +""" + +EXAMPLES = r""" +- name: Create a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + name: ansible monitoring policy + description: Testing creation of a monitoring policy with ansible + email: your@emailaddress.com + agent: true + thresholds: + - cpu: + warning: + value: 80 + alert: false + critical: + value: 92 + alert: false + - ram: + warning: + value: 80 + alert: false + critical: + value: 90 + alert: false + - disk: + warning: + value: 80 + alert: false + critical: + value: 90 + alert: false + - internal_ping: + warning: + value: 50 + alert: false + critical: + value: 100 + alert: false + - transfer: + warning: + value: 1000 + alert: false + critical: + value: 2000 + alert: false + ports: + - protocol: TCP + port: 22 + alert_if: RESPONDING + email_notification: false + processes: + - process: test + alert_if: NOT_RUNNING + email_notification: false + wait: true + +- name: Destroy a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + state: absent + name: ansible monitoring policy + +- name: Update a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy + name: ansible monitoring policy updated + description: Testing creation of a monitoring policy with ansible updated + email: another@emailaddress.com + thresholds: + - cpu: + warning: + value: 70 + alert: false + critical: + value: 90 + alert: false + - ram: + warning: + value: 70 + alert: false + critical: + value: 80 + alert: false + - disk: + warning: + value: 70 + alert: false + critical: + value: 80 + alert: false + - internal_ping: + warning: + value: 60 + alert: false + critical: + value: 90 + alert: false + - transfer: + warning: + value: 900 + alert: false + critical: + value: 1900 + alert: false + wait: true + state: update + +- name: Add a port to a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + add_ports: + - protocol: TCP + port: 33 + alert_if: RESPONDING + email_notification: false + wait: true + state: update + +- name: Update existing ports of a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + update_ports: + - id: existing_port_id + protocol: TCP + port: 34 + alert_if: RESPONDING + email_notification: false + - id: existing_port_id + protocol: TCP + port: 23 + alert_if: RESPONDING + email_notification: false + wait: true + state: update + +- name: Remove a port from a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + remove_ports: + - port_id + state: update + +- name: Add a process to a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + add_processes: + - process: test_2 + alert_if: NOT_RUNNING + email_notification: false + wait: true + state: update + +- name: Update existing processes of a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + update_processes: + - id: process_id + process: test_1 + alert_if: NOT_RUNNING + email_notification: false + - id: process_id + process: test_3 + alert_if: NOT_RUNNING + email_notification: false + wait: true + state: update + +- name: Remove a process from a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + remove_processes: + - process_id + wait: true + state: update + +- name: Add server to a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + add_servers: + - server id or name + wait: true + state: update + +- name: Remove server from a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + remove_servers: + - server01 + wait: true + state: update +""" + +RETURN = r""" +monitoring_policy: + description: Information about the monitoring policy that was processed. + type: dict + sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"} + returned: always +""" + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_monitoring_policy, + get_server, + OneAndOneResources, + wait_for_resource_creation_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _add_ports(module, oneandone_conn, monitoring_policy_id, ports): + """ + Adds new ports to a monitoring policy. + """ + try: + monitoring_policy_ports = [] + + for _port in ports: + monitoring_policy_port = oneandone.client.Port( + protocol=_port['protocol'], + port=_port['port'], + alert_if=_port['alert_if'], + email_notification=_port['email_notification'] + ) + monitoring_policy_ports.append(monitoring_policy_port) + + if module.check_mode: + if monitoring_policy_ports: + return True + return False + + monitoring_policy = oneandone_conn.add_port( + monitoring_policy_id=monitoring_policy_id, + ports=monitoring_policy_ports) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, port_id): + """ + Removes a port from a monitoring policy. + """ + try: + if module.check_mode: + monitoring_policy = oneandone_conn.delete_monitoring_policy_port( + monitoring_policy_id=monitoring_policy_id, + port_id=port_id) + if monitoring_policy: + return True + return False + + monitoring_policy = oneandone_conn.delete_monitoring_policy_port( + monitoring_policy_id=monitoring_policy_id, + port_id=port_id) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port): + """ + Modifies a monitoring policy port. + """ + try: + if module.check_mode: + cm_port = oneandone_conn.get_monitoring_policy_port( + monitoring_policy_id=monitoring_policy_id, + port_id=port_id) + if cm_port: + return True + return False + + monitoring_policy_port = oneandone.client.Port( + protocol=port['protocol'], + port=port['port'], + alert_if=port['alert_if'], + email_notification=port['email_notification'] + ) + + monitoring_policy = oneandone_conn.modify_port( + monitoring_policy_id=monitoring_policy_id, + port_id=port_id, + port=monitoring_policy_port) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _add_processes(module, oneandone_conn, monitoring_policy_id, processes): + """ + Adds new processes to a monitoring policy. + """ + try: + monitoring_policy_processes = [] + + for _process in processes: + monitoring_policy_process = oneandone.client.Process( + process=_process['process'], + alert_if=_process['alert_if'], + email_notification=_process['email_notification'] + ) + monitoring_policy_processes.append(monitoring_policy_process) + + if module.check_mode: + mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id) + if monitoring_policy_processes and mp_id: + return True + return False + + monitoring_policy = oneandone_conn.add_process( + monitoring_policy_id=monitoring_policy_id, + processes=monitoring_policy_processes) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_id, process_id): + """ + Removes a process from a monitoring policy. + """ + try: + if module.check_mode: + process = oneandone_conn.get_monitoring_policy_process( + monitoring_policy_id=monitoring_policy_id, + process_id=process_id + ) + if process: + return True + return False + + monitoring_policy = oneandone_conn.delete_monitoring_policy_process( + monitoring_policy_id=monitoring_policy_id, + process_id=process_id) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, process): + """ + Modifies a monitoring policy process. + """ + try: + if module.check_mode: + cm_process = oneandone_conn.get_monitoring_policy_process( + monitoring_policy_id=monitoring_policy_id, + process_id=process_id) + if cm_process: + return True + return False + + monitoring_policy_process = oneandone.client.Process( + process=process['process'], + alert_if=process['alert_if'], + email_notification=process['email_notification'] + ) + + monitoring_policy = oneandone_conn.modify_process( + monitoring_policy_id=monitoring_policy_id, + process_id=process_id, + process=monitoring_policy_process) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers): + """ + Attaches servers to a monitoring policy. + """ + try: + attach_servers = [] + + for _server_id in servers: + server_id = get_server(oneandone_conn, _server_id) + attach_server = oneandone.client.AttachServer( + server_id=server_id + ) + attach_servers.append(attach_server) + + if module.check_mode: + if attach_servers: + return True + return False + + monitoring_policy = oneandone_conn.attach_monitoring_policy_server( + monitoring_policy_id=monitoring_policy_id, + servers=attach_servers) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, server_id): + """ + Detaches a server from a monitoring policy. + """ + try: + if module.check_mode: + mp_server = oneandone_conn.get_monitoring_policy_server( + monitoring_policy_id=monitoring_policy_id, + server_id=server_id) + if mp_server: + return True + return False + + monitoring_policy = oneandone_conn.detach_monitoring_policy_server( + monitoring_policy_id=monitoring_policy_id, + server_id=server_id) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def update_monitoring_policy(module, oneandone_conn): + """ + Updates a monitoring_policy based on input arguments. + Monitoring policy ports, processes and servers can be added/removed to/from + a monitoring policy. Monitoring policy name, description, email, + thresholds for cpu, ram, disk, transfer and internal_ping + can be updated as well. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + monitoring_policy_id = module.params.get('monitoring_policy') + name = module.params.get('name') + description = module.params.get('description') + email = module.params.get('email') + thresholds = module.params.get('thresholds') + add_ports = module.params.get('add_ports') + update_ports = module.params.get('update_ports') + remove_ports = module.params.get('remove_ports') + add_processes = module.params.get('add_processes') + update_processes = module.params.get('update_processes') + remove_processes = module.params.get('remove_processes') + add_servers = module.params.get('add_servers') + remove_servers = module.params.get('remove_servers') + + changed = False + + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy_id, True) + if monitoring_policy is None: + _check_mode(module, False) + + _monitoring_policy = oneandone.client.MonitoringPolicy( + name=name, + description=description, + email=email + ) + + _thresholds = None + + if thresholds: + threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer'] + + _thresholds = [] + for threshold in thresholds: + key = list(threshold.keys())[0] + if key in threshold_entities: + _threshold = oneandone.client.Threshold( + entity=key, + warning_value=threshold[key]['warning']['value'], + warning_alert=str(threshold[key]['warning']['alert']).lower(), + critical_value=threshold[key]['critical']['value'], + critical_alert=str(threshold[key]['critical']['alert']).lower()) + _thresholds.append(_threshold) + + if name or description or email or thresholds: + _check_mode(module, True) + monitoring_policy = oneandone_conn.modify_monitoring_policy( + monitoring_policy_id=monitoring_policy['id'], + monitoring_policy=_monitoring_policy, + thresholds=_thresholds) + changed = True + + if add_ports: + if module.check_mode: + _check_mode(module, _add_ports(module, + oneandone_conn, + monitoring_policy['id'], + add_ports)) + + monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy['id'], add_ports) + changed = True + + if update_ports: + chk_changed = False + for update_port in update_ports: + if module.check_mode: + chk_changed |= _modify_port(module, + oneandone_conn, + monitoring_policy['id'], + update_port['id'], + update_port) + + _modify_port(module, + oneandone_conn, + monitoring_policy['id'], + update_port['id'], + update_port) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + if remove_ports: + chk_changed = False + for port_id in remove_ports: + if module.check_mode: + chk_changed |= _delete_monitoring_policy_port(module, + oneandone_conn, + monitoring_policy['id'], + port_id) + + _delete_monitoring_policy_port(module, + oneandone_conn, + monitoring_policy['id'], + port_id) + _check_mode(module, chk_changed) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + if add_processes: + monitoring_policy = _add_processes(module, + oneandone_conn, + monitoring_policy['id'], + add_processes) + _check_mode(module, monitoring_policy) + changed = True + + if update_processes: + chk_changed = False + for update_process in update_processes: + if module.check_mode: + chk_changed |= _modify_process(module, + oneandone_conn, + monitoring_policy['id'], + update_process['id'], + update_process) + + _modify_process(module, + oneandone_conn, + monitoring_policy['id'], + update_process['id'], + update_process) + _check_mode(module, chk_changed) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + if remove_processes: + chk_changed = False + for process_id in remove_processes: + if module.check_mode: + chk_changed |= _delete_monitoring_policy_process(module, + oneandone_conn, + monitoring_policy['id'], + process_id) + + _delete_monitoring_policy_process(module, + oneandone_conn, + monitoring_policy['id'], + process_id) + _check_mode(module, chk_changed) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + if add_servers: + monitoring_policy = _attach_monitoring_policy_server(module, + oneandone_conn, + monitoring_policy['id'], + add_servers) + _check_mode(module, monitoring_policy) + changed = True + + if remove_servers: + chk_changed = False + for _server_id in remove_servers: + server_id = get_server(oneandone_conn, _server_id) + + if module.check_mode: + chk_changed |= _detach_monitoring_policy_server(module, + oneandone_conn, + monitoring_policy['id'], + server_id) + + _detach_monitoring_policy_server(module, + oneandone_conn, + monitoring_policy['id'], + server_id) + _check_mode(module, chk_changed) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + return (changed, monitoring_policy) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def create_monitoring_policy(module, oneandone_conn): + """ + Creates a new monitoring policy. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + name = module.params.get('name') + description = module.params.get('description') + email = module.params.get('email') + agent = module.params.get('agent') + thresholds = module.params.get('thresholds') + ports = module.params.get('ports') + processes = module.params.get('processes') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + _monitoring_policy = oneandone.client.MonitoringPolicy(name, + description, + email, + agent, ) + + _monitoring_policy.specs['agent'] = str(_monitoring_policy.specs['agent']).lower() + + threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer'] + + _thresholds = [] + for threshold in thresholds: + key = list(threshold.keys())[0] + if key in threshold_entities: + _threshold = oneandone.client.Threshold( + entity=key, + warning_value=threshold[key]['warning']['value'], + warning_alert=str(threshold[key]['warning']['alert']).lower(), + critical_value=threshold[key]['critical']['value'], + critical_alert=str(threshold[key]['critical']['alert']).lower()) + _thresholds.append(_threshold) + + _ports = [] + for port in ports: + _port = oneandone.client.Port( + protocol=port['protocol'], + port=port['port'], + alert_if=port['alert_if'], + email_notification=str(port['email_notification']).lower()) + _ports.append(_port) + + _processes = [] + for process in processes: + _process = oneandone.client.Process( + process=process['process'], + alert_if=process['alert_if'], + email_notification=str(process['email_notification']).lower()) + _processes.append(_process) + + _check_mode(module, True) + monitoring_policy = oneandone_conn.create_monitoring_policy( + monitoring_policy=_monitoring_policy, + thresholds=_thresholds, + ports=_ports, + processes=_processes + ) + + if wait: + wait_for_resource_creation_completion( + oneandone_conn, + OneAndOneResources.monitoring_policy, + monitoring_policy['id'], + wait_timeout, + wait_interval) + + changed = True if monitoring_policy else False + + _check_mode(module, False) + + return (changed, monitoring_policy) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def remove_monitoring_policy(module, oneandone_conn): + """ + Removes a monitoring policy. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + mp_id = module.params.get('name') + monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id) + if module.check_mode: + if monitoring_policy_id is None: + _check_mode(module, False) + _check_mode(module, True) + monitoring_policy = oneandone_conn.delete_monitoring_policy(monitoring_policy_id) + + changed = True if monitoring_policy else False + + return (changed, { + 'id': monitoring_policy['id'], + 'name': monitoring_policy['name'] + }) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', no_log=True, + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + name=dict(type='str'), + monitoring_policy=dict(type='str'), + agent=dict(type='str'), + email=dict(type='str'), + description=dict(type='str'), + thresholds=dict(type='list', elements="dict", default=[]), + ports=dict(type='list', elements="dict", default=[]), + processes=dict(type='list', elements="dict", default=[]), + add_ports=dict(type='list', elements="dict", default=[]), + update_ports=dict(type='list', elements="dict", default=[]), + remove_ports=dict(type='list', elements="str", default=[]), + add_processes=dict(type='list', elements="dict", default=[]), + update_processes=dict(type='list', elements="dict", default=[]), + remove_processes=dict(type='list', elements="str", default=[]), + add_servers=dict(type='list', elements="str", default=[]), + remove_servers=dict(type='list', elements="str", default=[]), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='auth_token parameter is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required to delete a monitoring policy.") + try: + (changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + elif state == 'update': + if not module.params.get('monitoring_policy'): + module.fail_json( + msg="'monitoring_policy' parameter is required to update a monitoring policy.") + try: + (changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + elif state == 'present': + for param in ('name', 'agent', 'email', 'thresholds', 'ports', 'processes'): + if not module.params.get(param): + module.fail_json( + msg="%s parameter is required for a new monitoring policy." % param) + try: + (changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + module.exit_json(changed=changed, monitoring_policy=monitoring_policy) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneandone_private_network.py b/plugins/modules/oneandone_private_network.py deleted file mode 120000 index 61ac35f592..0000000000 --- a/plugins/modules/oneandone_private_network.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/oneandone/oneandone_private_network.py \ No newline at end of file diff --git a/plugins/modules/oneandone_private_network.py b/plugins/modules/oneandone_private_network.py new file mode 100644 index 0000000000..2b74dff4f0 --- /dev/null +++ b/plugins/modules/oneandone_private_network.py @@ -0,0 +1,449 @@ +#!/usr/bin/python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneandone_private_network +short_description: Configure 1&1 private networking +description: + - Create, remove, reconfigure, update a private network. This module has a dependency on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Define a network's state to create, remove, or update. + type: str + required: false + default: 'present' + choices: ["present", "absent", "update"] + auth_token: + description: + - Authenticating API token provided by 1&1. + type: str + private_network: + description: + - The identifier (id or name) of the network used with update state. + type: str + api_url: + description: + - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. + type: str + required: false + name: + description: + - Private network name used with present state. Used as identifier (id or name) when used with absent state. + type: str + description: + description: + - Set a description for the network. + type: str + datacenter: + description: + - The identifier of the datacenter where the private network is created. + type: str + choices: [US, ES, DE, GB] + network_address: + description: + - Set a private network space, for example V(192.168.1.0). + type: str + subnet_mask: + description: + - Set the netmask for the private network, for example V(255.255.255.0). + type: str + add_members: + description: + - List of server identifiers (name or id) to be added to the private network. + type: list + elements: str + default: [] + remove_members: + description: + - List of server identifiers (name or id) to be removed from the private network. + type: list + elements: str + default: [] + wait: + description: + - Wait for the instance to be in state 'running' before returning. + required: false + default: true + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds. + type: int + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods. + type: int + default: 5 + +requirements: + - "1and1" + +author: + - Amel Ajdinovic (@aajdinov) + - Ethan Devenport (@edevenport) +""" + +EXAMPLES = r""" +- name: Create a private network + community.general.oneandone_private_network: + auth_token: oneandone_private_api_key + name: backup_network + description: Testing creation of a private network with ansible + network_address: 70.35.193.100 + subnet_mask: 255.0.0.0 + datacenter: US + +- name: Destroy a private network + community.general.oneandone_private_network: + auth_token: oneandone_private_api_key + state: absent + name: backup_network + +- name: Modify the private network + community.general.oneandone_private_network: + auth_token: oneandone_private_api_key + state: update + private_network: backup_network + network_address: 192.168.2.0 + subnet_mask: 255.255.255.0 + +- name: Add members to the private network + community.general.oneandone_private_network: + auth_token: oneandone_private_api_key + state: update + private_network: backup_network + add_members: + - server identifier (id or name) + +- name: Remove members from the private network + community.general.oneandone_private_network: + auth_token: oneandone_private_api_key + state: update + private_network: backup_network + remove_members: + - server identifier (id or name) +""" + +RETURN = r""" +private_network: + description: Information about the private network. + type: dict + sample: {"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"} + returned: always +""" + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_private_network, + get_server, + get_datacenter, + OneAndOneResources, + wait_for_resource_creation_completion, + wait_for_resource_deletion_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + +DATACENTERS = ['US', 'ES', 'DE', 'GB'] + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _add_servers(module, oneandone_conn, name, members): + try: + private_network_id = get_private_network(oneandone_conn, name) + + if module.check_mode: + if private_network_id and members: + return True + return False + + network = oneandone_conn.attach_private_network_servers( + private_network_id=private_network_id, + server_ids=members) + + return network + except Exception as e: + module.fail_json(msg=str(e)) + + +def _remove_member(module, oneandone_conn, name, member_id): + try: + private_network_id = get_private_network(oneandone_conn, name) + + if module.check_mode: + if private_network_id: + network_member = oneandone_conn.get_private_network_server( + private_network_id=private_network_id, + server_id=member_id) + if network_member: + return True + return False + + network = oneandone_conn.remove_private_network_server( + private_network_id=name, + server_id=member_id) + + return network + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def create_network(module, oneandone_conn): + """ + Create new private network + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any network was added. + """ + name = module.params.get('name') + description = module.params.get('description') + network_address = module.params.get('network_address') + subnet_mask = module.params.get('subnet_mask') + datacenter = module.params.get('datacenter') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + if datacenter is not None: + datacenter_id = get_datacenter(oneandone_conn, datacenter) + if datacenter_id is None: + module.fail_json( + msg='datacenter %s not found.' % datacenter) + + try: + _check_mode(module, True) + network = oneandone_conn.create_private_network( + private_network=oneandone.client.PrivateNetwork( + name=name, + description=description, + network_address=network_address, + subnet_mask=subnet_mask, + datacenter_id=datacenter_id + )) + + if wait: + wait_for_resource_creation_completion( + oneandone_conn, + OneAndOneResources.private_network, + network['id'], + wait_timeout, + wait_interval) + network = get_private_network(oneandone_conn, + network['id'], + True) + + changed = True if network else False + + _check_mode(module, False) + + return (changed, network) + except Exception as e: + module.fail_json(msg=str(e)) + + +def update_network(module, oneandone_conn): + """ + Modifies a private network. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + _private_network_id = module.params.get('private_network') + _name = module.params.get('name') + _description = module.params.get('description') + _network_address = module.params.get('network_address') + _subnet_mask = module.params.get('subnet_mask') + _add_members = module.params.get('add_members') + _remove_members = module.params.get('remove_members') + + changed = False + + private_network = get_private_network(oneandone_conn, + _private_network_id, + True) + if private_network is None: + _check_mode(module, False) + + if _name or _description or _network_address or _subnet_mask: + _check_mode(module, True) + private_network = oneandone_conn.modify_private_network( + private_network_id=private_network['id'], + name=_name, + description=_description, + network_address=_network_address, + subnet_mask=_subnet_mask) + changed = True + + if _add_members: + instances = [] + + for member in _add_members: + instance_id = get_server(oneandone_conn, member) + instance_obj = oneandone.client.AttachServer(server_id=instance_id) + + instances.extend([instance_obj]) + private_network = _add_servers(module, oneandone_conn, private_network['id'], instances) + _check_mode(module, private_network) + changed = True + + if _remove_members: + chk_changed = False + for member in _remove_members: + instance = get_server(oneandone_conn, member, True) + + if module.check_mode: + chk_changed |= _remove_member(module, + oneandone_conn, + private_network['id'], + instance['id']) + _check_mode(module, instance and chk_changed) + + _remove_member(module, + oneandone_conn, + private_network['id'], + instance['id']) + private_network = get_private_network(oneandone_conn, + private_network['id'], + True) + changed = True + + return (changed, private_network) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def remove_network(module, oneandone_conn): + """ + Removes a private network. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object. + """ + try: + pn_id = module.params.get('name') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + private_network_id = get_private_network(oneandone_conn, pn_id) + if module.check_mode: + if private_network_id is None: + _check_mode(module, False) + _check_mode(module, True) + private_network = oneandone_conn.delete_private_network(private_network_id) + wait_for_resource_deletion_completion(oneandone_conn, + OneAndOneResources.private_network, + private_network['id'], + wait_timeout, + wait_interval) + + changed = True if private_network else False + + return (changed, { + 'id': private_network['id'], + 'name': private_network['name'] + }) + except Exception as e: + module.fail_json(msg=str(e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', no_log=True, + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + private_network=dict(type='str'), + name=dict(type='str'), + description=dict(type='str'), + network_address=dict(type='str'), + subnet_mask=dict(type='str'), + add_members=dict(type='list', elements="str", default=[]), + remove_members=dict(type='list', elements="str", default=[]), + datacenter=dict( + choices=DATACENTERS), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='auth_token parameter is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required for deleting a network.") + try: + (changed, private_network) = remove_network(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + elif state == 'update': + if not module.params.get('private_network'): + module.fail_json( + msg="'private_network' parameter is required for updating a network.") + try: + (changed, private_network) = update_network(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + elif state == 'present': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required for new networks.") + try: + (changed, private_network) = create_network(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=changed, private_network=private_network) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneandone_public_ip.py b/plugins/modules/oneandone_public_ip.py deleted file mode 120000 index ad2aba0e01..0000000000 --- a/plugins/modules/oneandone_public_ip.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/oneandone/oneandone_public_ip.py \ No newline at end of file diff --git a/plugins/modules/oneandone_public_ip.py b/plugins/modules/oneandone_public_ip.py new file mode 100644 index 0000000000..4cc622eaa4 --- /dev/null +++ b/plugins/modules/oneandone_public_ip.py @@ -0,0 +1,332 @@ +#!/usr/bin/python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneandone_public_ip +short_description: Configure 1&1 public IPs +description: + - Create, update, and remove public IPs. This module has a dependency on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Define a public IP state to create, remove, or update. + type: str + required: false + default: 'present' + choices: ["present", "absent", "update"] + auth_token: + description: + - Authenticating API token provided by 1&1. + type: str + api_url: + description: + - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. + type: str + required: false + reverse_dns: + description: + - Reverse DNS name. maxLength=256. + type: str + required: false + datacenter: + description: + - ID of the datacenter where the IP is created (only for unassigned IPs). + type: str + choices: [US, ES, DE, GB] + default: US + required: false + type: + description: + - Type of IP. Currently, only IPV4 is available. + type: str + choices: ["IPV4", "IPV6"] + default: 'IPV4' + required: false + public_ip_id: + description: + - The ID of the public IP used with update and delete states. + type: str + wait: + description: + - Wait for the instance to be in state 'running' before returning. + required: false + default: true + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds. + type: int + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods. + type: int + default: 5 + +requirements: + - "1and1" + +author: + - Amel Ajdinovic (@aajdinov) + - Ethan Devenport (@edevenport) +""" + +EXAMPLES = r""" +- name: Create a public IP + community.general.oneandone_public_ip: + auth_token: oneandone_private_api_key + reverse_dns: example.com + datacenter: US + type: IPV4 + +- name: Update a public IP + community.general.oneandone_public_ip: + auth_token: oneandone_private_api_key + public_ip_id: public ip id + reverse_dns: secondexample.com + state: update + +- name: Delete a public IP + community.general.oneandone_public_ip: + auth_token: oneandone_private_api_key + public_ip_id: public ip id + state: absent +""" + +RETURN = r""" +public_ip: + description: Information about the public IP that was processed. + type: dict + sample: {"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"} + returned: always +""" + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_datacenter, + get_public_ip, + OneAndOneResources, + wait_for_resource_creation_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + +DATACENTERS = ['US', 'ES', 'DE', 'GB'] + +TYPES = ['IPV4', 'IPV6'] + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def create_public_ip(module, oneandone_conn): + """ + Create new public IP + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any public IP was added. + """ + reverse_dns = module.params.get('reverse_dns') + datacenter = module.params.get('datacenter') + ip_type = module.params.get('type') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + if datacenter is not None: + datacenter_id = get_datacenter(oneandone_conn, datacenter) + if datacenter_id is None: + _check_mode(module, False) + module.fail_json( + msg='datacenter %s not found.' % datacenter) + + try: + _check_mode(module, True) + public_ip = oneandone_conn.create_public_ip( + reverse_dns=reverse_dns, + ip_type=ip_type, + datacenter_id=datacenter_id) + + if wait: + wait_for_resource_creation_completion(oneandone_conn, + OneAndOneResources.public_ip, + public_ip['id'], + wait_timeout, + wait_interval) + public_ip = oneandone_conn.get_public_ip(public_ip['id']) + + changed = True if public_ip else False + + return (changed, public_ip) + except Exception as e: + module.fail_json(msg=str(e)) + + +def update_public_ip(module, oneandone_conn): + """ + Update a public IP + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any public IP was changed. + """ + reverse_dns = module.params.get('reverse_dns') + public_ip_id = module.params.get('public_ip_id') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + public_ip = get_public_ip(oneandone_conn, public_ip_id, True) + if public_ip is None: + _check_mode(module, False) + module.fail_json( + msg='public IP %s not found.' % public_ip_id) + + try: + _check_mode(module, True) + public_ip = oneandone_conn.modify_public_ip( + ip_id=public_ip['id'], + reverse_dns=reverse_dns) + + if wait: + wait_for_resource_creation_completion(oneandone_conn, + OneAndOneResources.public_ip, + public_ip['id'], + wait_timeout, + wait_interval) + public_ip = oneandone_conn.get_public_ip(public_ip['id']) + + changed = True if public_ip else False + + return (changed, public_ip) + except Exception as e: + module.fail_json(msg=str(e)) + + +def delete_public_ip(module, oneandone_conn): + """ + Delete a public IP + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any public IP was deleted. + """ + public_ip_id = module.params.get('public_ip_id') + + public_ip = get_public_ip(oneandone_conn, public_ip_id, True) + if public_ip is None: + _check_mode(module, False) + module.fail_json( + msg='public IP %s not found.' % public_ip_id) + + try: + _check_mode(module, True) + deleted_public_ip = oneandone_conn.delete_public_ip( + ip_id=public_ip['id']) + + changed = True if deleted_public_ip else False + + return (changed, { + 'id': public_ip['id'] + }) + except Exception as e: + module.fail_json(msg=str(e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', no_log=True, + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + public_ip_id=dict(type='str'), + reverse_dns=dict(type='str'), + datacenter=dict( + choices=DATACENTERS, + default='US'), + type=dict( + choices=TYPES, + default='IPV4'), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='auth_token parameter is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('public_ip_id'): + module.fail_json( + msg="'public_ip_id' parameter is required to delete a public ip.") + try: + (changed, public_ip) = delete_public_ip(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + elif state == 'update': + if not module.params.get('public_ip_id'): + module.fail_json( + msg="'public_ip_id' parameter is required to update a public ip.") + try: + (changed, public_ip) = update_public_ip(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + elif state == 'present': + try: + (changed, public_ip) = create_public_ip(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=changed, public_ip=public_ip) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneandone_server.py b/plugins/modules/oneandone_server.py deleted file mode 120000 index 64e5beaa5f..0000000000 --- a/plugins/modules/oneandone_server.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/oneandone/oneandone_server.py \ No newline at end of file diff --git a/plugins/modules/oneandone_server.py b/plugins/modules/oneandone_server.py new file mode 100644 index 0000000000..23713890fd --- /dev/null +++ b/plugins/modules/oneandone_server.py @@ -0,0 +1,688 @@ +#!/usr/bin/python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneandone_server +short_description: Create, destroy, start, stop, and reboot a 1&1 Host server +description: + - Create, destroy, update, start, stop, and reboot a 1&1 Host server. When the server is created it can optionally wait + for it to be 'running' before returning. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Define a server's state to create, remove, start or stop it. + type: str + default: present + choices: ["present", "absent", "running", "stopped"] + auth_token: + description: + - Authenticating API token provided by 1&1. Overrides the E(ONEANDONE_AUTH_TOKEN) environment variable. + type: str + api_url: + description: + - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. + type: str + datacenter: + description: + - The datacenter location. + type: str + default: US + choices: ["US", "ES", "DE", "GB"] + hostname: + description: + - The hostname or ID of the server. Only used when state is 'present'. + type: str + description: + description: + - The description of the server. + type: str + appliance: + description: + - The operating system name or ID for the server. It is required only for 'present' state. + type: str + fixed_instance_size: + description: + - The instance size name or ID of the server. It is required only for 'present' state, and it is mutually exclusive + with vcore, cores_per_processor, ram, and hdds parameters. + - 'The available choices are: V(S), V(M), V(L), V(XL), V(XXL), V(3XL), V(4XL), V(5XL).' + type: str + vcore: + description: + - The total number of processors. It must be provided with O(cores_per_processor), O(ram), and O(hdds) parameters. + type: int + cores_per_processor: + description: + - The number of cores per processor. It must be provided with O(vcore), O(ram), and O(hdds) parameters. + type: int + ram: + description: + - The amount of RAM memory. It must be provided with with O(vcore), O(cores_per_processor), and O(hdds) parameters. + type: float + hdds: + description: + - A list of hard disks with nested O(ignore:hdds[].size) and O(ignore:hdds[].is_main) properties. It must be provided + with O(vcore), O(cores_per_processor), and O(ram) parameters. + type: list + elements: dict + private_network: + description: + - The private network name or ID. + type: str + firewall_policy: + description: + - The firewall policy name or ID. + type: str + load_balancer: + description: + - The load balancer name or ID. + type: str + monitoring_policy: + description: + - The monitoring policy name or ID. + type: str + server: + description: + - Server identifier (ID or hostname). It is required for all states except 'running' and 'present'. + type: str + count: + description: + - The number of servers to create. + type: int + default: 1 + ssh_key: + description: + - User's public SSH key (contents, not path). + type: raw + server_type: + description: + - The type of server to be built. + type: str + default: "cloud" + choices: ["cloud", "baremetal", "k8s_node"] + wait: + description: + - Wait for the server to be in state 'running' before returning. Also used for delete operation (set to V(false) if + you do not want to wait for each individual server to be deleted before moving on with other tasks). + type: bool + default: true + wait_timeout: + description: + - How long before wait gives up, in seconds. + type: int + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the wait_for methods. + type: int + default: 5 + auto_increment: + description: + - When creating multiple servers at once, whether to differentiate hostnames by appending a count after them or substituting + the count where there is a %02d or %03d in the hostname string. + type: bool + default: true + +requirements: + - "1and1" + +author: + - "Amel Ajdinovic (@aajdinov)" + - "Ethan Devenport (@edevenport)" +""" + +EXAMPLES = r""" +- name: Create three servers and enumerate their names + community.general.oneandone_server: + auth_token: oneandone_private_api_key + hostname: node%02d + fixed_instance_size: XL + datacenter: US + appliance: C5A349786169F140BCBC335675014C08 + auto_increment: true + count: 3 + +- name: Create three servers, passing in an ssh_key + community.general.oneandone_server: + auth_token: oneandone_private_api_key + hostname: node%02d + vcore: 2 + cores_per_processor: 4 + ram: 8.0 + hdds: + - size: 50 + is_main: false + datacenter: ES + appliance: C5A349786169F140BCBC335675014C08 + count: 3 + wait: true + wait_timeout: 600 + wait_interval: 10 + ssh_key: SSH_PUBLIC_KEY + +- name: Removing server + community.general.oneandone_server: + auth_token: oneandone_private_api_key + state: absent + server: 'node01' + +- name: Starting server + community.general.oneandone_server: + auth_token: oneandone_private_api_key + state: running + server: 'node01' + +- name: Stopping server + community.general.oneandone_server: + auth_token: oneandone_private_api_key + state: stopped + server: 'node01' +""" + +RETURN = r""" +servers: + description: Information about each server that was processed. + type: list + sample: + - {"hostname": "my-server", "id": "server-id"} + returned: always +""" + +import os +import time +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_datacenter, + get_fixed_instance_size, + get_appliance, + get_private_network, + get_monitoring_policy, + get_firewall_policy, + get_load_balancer, + get_server, + OneAndOneResources, + wait_for_resource_creation_completion, + wait_for_resource_deletion_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + +DATACENTERS = ['US', 'ES', 'DE', 'GB'] + +ONEANDONE_SERVER_STATES = ( + 'DEPLOYING', + 'POWERED_OFF', + 'POWERED_ON', + 'POWERING_ON', + 'POWERING_OFF', +) + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _create_server(module, oneandone_conn, hostname, description, + fixed_instance_size_id, vcore, cores_per_processor, ram, + hdds, datacenter_id, appliance_id, ssh_key, + private_network_id, firewall_policy_id, load_balancer_id, + monitoring_policy_id, server_type, wait, wait_timeout, + wait_interval): + + try: + existing_server = get_server(oneandone_conn, hostname) + + if existing_server: + if module.check_mode: + return False + return None + + if module.check_mode: + return True + + server = oneandone_conn.create_server( + oneandone.client.Server( + name=hostname, + description=description, + fixed_instance_size_id=fixed_instance_size_id, + vcore=vcore, + cores_per_processor=cores_per_processor, + ram=ram, + appliance_id=appliance_id, + datacenter_id=datacenter_id, + rsa_key=ssh_key, + private_network_id=private_network_id, + firewall_policy_id=firewall_policy_id, + load_balancer_id=load_balancer_id, + monitoring_policy_id=monitoring_policy_id, + server_type=server_type,), hdds) + + if wait: + wait_for_resource_creation_completion( + oneandone_conn, + OneAndOneResources.server, + server['id'], + wait_timeout, + wait_interval) + server = oneandone_conn.get_server(server['id']) # refresh + + return server + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _insert_network_data(server): + for addr_data in server['ips']: + if addr_data['type'] == 'IPV6': + server['public_ipv6'] = addr_data['ip'] + elif addr_data['type'] == 'IPV4': + server['public_ipv4'] = addr_data['ip'] + return server + + +def create_server(module, oneandone_conn): + """ + Create new server + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any server was added, and a 'servers' attribute with the list of the + created servers' hostname, id and ip addresses. + """ + hostname = module.params.get('hostname') + description = module.params.get('description') + auto_increment = module.params.get('auto_increment') + count = module.params.get('count') + fixed_instance_size = module.params.get('fixed_instance_size') + vcore = module.params.get('vcore') + cores_per_processor = module.params.get('cores_per_processor') + ram = module.params.get('ram') + hdds = module.params.get('hdds') + datacenter = module.params.get('datacenter') + appliance = module.params.get('appliance') + ssh_key = module.params.get('ssh_key') + private_network = module.params.get('private_network') + monitoring_policy = module.params.get('monitoring_policy') + firewall_policy = module.params.get('firewall_policy') + load_balancer = module.params.get('load_balancer') + server_type = module.params.get('server_type') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + datacenter_id = get_datacenter(oneandone_conn, datacenter) + if datacenter_id is None: + _check_mode(module, False) + module.fail_json( + msg='datacenter %s not found.' % datacenter) + + fixed_instance_size_id = None + if fixed_instance_size: + fixed_instance_size_id = get_fixed_instance_size( + oneandone_conn, + fixed_instance_size) + if fixed_instance_size_id is None: + _check_mode(module, False) + module.fail_json( + msg='fixed_instance_size %s not found.' % fixed_instance_size) + + appliance_id = get_appliance(oneandone_conn, appliance) + if appliance_id is None: + _check_mode(module, False) + module.fail_json( + msg='appliance %s not found.' % appliance) + + private_network_id = None + if private_network: + private_network_id = get_private_network( + oneandone_conn, + private_network) + if private_network_id is None: + _check_mode(module, False) + module.fail_json( + msg='private network %s not found.' % private_network) + + monitoring_policy_id = None + if monitoring_policy: + monitoring_policy_id = get_monitoring_policy( + oneandone_conn, + monitoring_policy) + if monitoring_policy_id is None: + _check_mode(module, False) + module.fail_json( + msg='monitoring policy %s not found.' % monitoring_policy) + + firewall_policy_id = None + if firewall_policy: + firewall_policy_id = get_firewall_policy( + oneandone_conn, + firewall_policy) + if firewall_policy_id is None: + _check_mode(module, False) + module.fail_json( + msg='firewall policy %s not found.' % firewall_policy) + + load_balancer_id = None + if load_balancer: + load_balancer_id = get_load_balancer( + oneandone_conn, + load_balancer) + if load_balancer_id is None: + _check_mode(module, False) + module.fail_json( + msg='load balancer %s not found.' % load_balancer) + + if auto_increment: + hostnames = _auto_increment_hostname(count, hostname) + descriptions = _auto_increment_description(count, description) + else: + hostnames = [hostname] * count + descriptions = [description] * count + + hdd_objs = [] + if hdds: + for hdd in hdds: + hdd_objs.append(oneandone.client.Hdd( + size=hdd['size'], + is_main=hdd['is_main'] + )) + + servers = [] + for index, name in enumerate(hostnames): + server = _create_server( + module=module, + oneandone_conn=oneandone_conn, + hostname=name, + description=descriptions[index], + fixed_instance_size_id=fixed_instance_size_id, + vcore=vcore, + cores_per_processor=cores_per_processor, + ram=ram, + hdds=hdd_objs, + datacenter_id=datacenter_id, + appliance_id=appliance_id, + ssh_key=ssh_key, + private_network_id=private_network_id, + monitoring_policy_id=monitoring_policy_id, + firewall_policy_id=firewall_policy_id, + load_balancer_id=load_balancer_id, + server_type=server_type, + wait=wait, + wait_timeout=wait_timeout, + wait_interval=wait_interval) + if server: + servers.append(server) + + changed = False + + if servers: + for server in servers: + if server: + _check_mode(module, True) + _check_mode(module, False) + servers = [_insert_network_data(_server) for _server in servers] + changed = True + + _check_mode(module, False) + + return (changed, servers) + + +def remove_server(module, oneandone_conn): + """ + Removes a server. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object. + + Returns a dictionary containing a 'changed' attribute indicating whether + the server was removed, and a 'removed_server' attribute with + the removed server's hostname and id. + """ + server_id = module.params.get('server') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + changed = False + removed_server = None + + server = get_server(oneandone_conn, server_id, True) + if server: + _check_mode(module, True) + try: + oneandone_conn.delete_server(server_id=server['id']) + if wait: + wait_for_resource_deletion_completion(oneandone_conn, + OneAndOneResources.server, + server['id'], + wait_timeout, + wait_interval) + changed = True + except Exception as ex: + module.fail_json( + msg="failed to terminate the server: %s" % str(ex)) + + removed_server = { + 'id': server['id'], + 'hostname': server['name'] + } + _check_mode(module, False) + + return (changed, removed_server) + + +def startstop_server(module, oneandone_conn): + """ + Starts or Stops a server. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object. + + Returns a dictionary with a 'changed' attribute indicating whether + anything has changed for the server as a result of this function + being run, and a 'server' attribute with basic information for + the server. + """ + state = module.params.get('state') + server_id = module.params.get('server') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + changed = False + + # Resolve server + server = get_server(oneandone_conn, server_id, True) + if server: + # Attempt to change the server state, only if it is not already there + # or on its way. + try: + if state == 'stopped' and server['status']['state'] == 'POWERED_ON': + _check_mode(module, True) + oneandone_conn.modify_server_status( + server_id=server['id'], + action='POWER_OFF', + method='SOFTWARE') + elif state == 'running' and server['status']['state'] == 'POWERED_OFF': + _check_mode(module, True) + oneandone_conn.modify_server_status( + server_id=server['id'], + action='POWER_ON', + method='SOFTWARE') + except Exception as ex: + module.fail_json( + msg="failed to set server %s to state %s: %s" % ( + server_id, state, str(ex))) + + _check_mode(module, False) + + # Make sure the server has reached the desired state + if wait: + operation_completed = False + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(wait_interval) + server = oneandone_conn.get_server(server['id']) # refresh + server_state = server['status']['state'] + if state == 'stopped' and server_state == 'POWERED_OFF': + operation_completed = True + break + if state == 'running' and server_state == 'POWERED_ON': + operation_completed = True + break + if not operation_completed: + module.fail_json( + msg="Timeout waiting for server %s to get to state %s" % ( + server_id, state)) + + changed = True + server = _insert_network_data(server) + + _check_mode(module, False) + + return (changed, server) + + +def _auto_increment_hostname(count, hostname): + """ + Allow a custom incremental count in the hostname when defined with the + string formatting (%) operator. Otherwise, increment using name-01, + name-02, name-03, and so forth. + """ + if '%' not in hostname: + hostname = "%s-%%01d" % hostname + + return [ + hostname % i + for i in range(1, count + 1) + ] + + +def _auto_increment_description(count, description): + """ + Allow the incremental count in the description when defined with the + string formatting (%) operator. Otherwise, repeat the same description. + """ + if '%' in description: + return [ + description % i + for i in range(1, count + 1) + ] + else: + return [description] * count + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', + default=os.environ.get('ONEANDONE_AUTH_TOKEN'), + no_log=True), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + hostname=dict(type='str'), + description=dict(type='str'), + appliance=dict(type='str'), + fixed_instance_size=dict(type='str'), + vcore=dict(type='int'), + cores_per_processor=dict(type='int'), + ram=dict(type='float'), + hdds=dict(type='list', elements='dict'), + count=dict(type='int', default=1), + ssh_key=dict(type='raw', no_log=False), + auto_increment=dict(type='bool', default=True), + server=dict(type='str'), + datacenter=dict( + choices=DATACENTERS, + default='US'), + private_network=dict(type='str'), + firewall_policy=dict(type='str'), + load_balancer=dict(type='str'), + monitoring_policy=dict(type='str'), + server_type=dict(type='str', default='cloud', choices=['cloud', 'baremetal', 'k8s_node']), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'running', 'stopped']), + ), + supports_check_mode=True, + mutually_exclusive=(['fixed_instance_size', 'vcore'], ['fixed_instance_size', 'cores_per_processor'], + ['fixed_instance_size', 'ram'], ['fixed_instance_size', 'hdds'],), + required_together=(['vcore', 'cores_per_processor', 'ram', 'hdds'],) + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='The "auth_token" parameter or ' + + 'ONEANDONE_AUTH_TOKEN environment variable is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('server'): + module.fail_json( + msg="'server' parameter is required for deleting a server.") + try: + (changed, servers) = remove_server(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + elif state in ('running', 'stopped'): + if not module.params.get('server'): + module.fail_json( + msg="'server' parameter is required for starting/stopping a server.") + try: + (changed, servers) = startstop_server(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + elif state == 'present': + for param in ('hostname', + 'appliance', + 'datacenter'): + if not module.params.get(param): + module.fail_json( + msg="%s parameter is required for new server." % param) + try: + (changed, servers) = create_server(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + module.exit_json(changed=changed, servers=servers) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/onepassword_info.py b/plugins/modules/onepassword_info.py deleted file mode 120000 index d8d5642ca9..0000000000 --- a/plugins/modules/onepassword_info.py +++ /dev/null @@ -1 +0,0 @@ -./identity/onepassword_info.py \ No newline at end of file diff --git a/plugins/modules/onepassword_info.py b/plugins/modules/onepassword_info.py new file mode 100644 index 0000000000..e60f060b0e --- /dev/null +++ b/plugins/modules/onepassword_info.py @@ -0,0 +1,387 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Ryan Conway (@rylon) +# Copyright (c) 2018, Scott Buchanan (onepassword.py used as starting point) +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: onepassword_info +author: + - Ryan Conway (@Rylon) +requirements: + - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) +notes: + - Tested with C(op) version 0.5.5. + - Based on the P(community.general.onepassword#lookup) lookup plugin by Scott Buchanan . +short_description: Gather items from 1Password +description: + - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items. + - A fatal error occurs if any of the items being searched for can not be found. + - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + search_terms: + type: list + elements: dict + description: + - A list of one or more search terms. + - Each search term can either be a simple string or it can be a dictionary for more control. + - When passing a simple string, O(search_terms[].field) is assumed to be V(password). + - When passing a dictionary, the following fields are available. + suboptions: + name: + type: str + description: + - The name of the 1Password item to search for (required). + field: + type: str + description: + - The name of the field to search for within this item (optional, defaults to V(password), or V(document) if the + item has an attachment). + section: + type: str + description: + - The name of a section within this item containing the specified field (optional, it searches all sections if not + specified). + vault: + type: str + description: + - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults + (optional). + required: true + auto_login: + type: dict + description: + - A dictionary containing authentication details. If this is set, the module attempts to sign in to 1Password automatically. + - Without this option, you must have already logged in using the 1Password CLI before running Ansible. + - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt + the Ansible Vault is equal to or greater in strength than the 1Password master password. + suboptions: + subdomain: + type: str + description: + - 1Password subdomain name (V(subdomain).1password.com). + - If this is not specified, the most recent subdomain is used. + username: + type: str + description: + - 1Password username. + - Only required for initial sign in. + master_password: + type: str + description: + - The master password for your subdomain. + - This is always required when specifying O(auto_login). + required: true + secret_key: + type: str + description: + - The secret key for your subdomain. + - Only required for initial sign in. + required: false + cli_path: + type: path + description: Used to specify the exact path to the C(op) command line interface. + required: false + default: 'op' +""" + +EXAMPLES = r""" +# Gather secrets from 1Password, assuming there is a 'password' field: +- name: Get a password + community.general.onepassword_info: + search_terms: My 1Password item + delegate_to: localhost + register: my_1password_item + no_log: true # Don't want to log the secrets to the console! + +# Gather secrets from 1Password, with more advanced search terms: +- name: Get a password + community.general.onepassword_info: + search_terms: + - name: My 1Password item + field: Custom field name # optional, defaults to 'password' + section: Custom section name # optional, defaults to 'None' + vault: Name of the vault # optional, only necessary if there is more than 1 Vault available + delegate_to: localhost + register: my_1password_item + no_log: true # Don't want to log the secrets to the console! + +# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two +# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the +# second, 'Custom field name' is fetched, as that is specified explicitly. +- name: Get a password + community.general.onepassword_info: + search_terms: + - My 1Password item # 'name' is optional when passing a simple string... + - name: My Other 1Password item # ...but it can also be set for consistency + - name: My 1Password item + field: Custom field name # optional, defaults to 'password' + section: Custom section name # optional, defaults to 'None' + vault: Name of the vault # optional, only necessary if there is more than 1 Vault available + - name: A 1Password item with document attachment + delegate_to: localhost + register: my_1password_item + no_log: true # Don't want to log the secrets to the console! + +- name: Debug a password (for example) + ansible.builtin.debug: + msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}" +""" + +RETURN = r""" +# One or more dictionaries for each matching item from 1Password, along with the appropriate fields. +# This shows the response you would expect to receive from the third example documented above. +onepassword: + description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third + example above. + returned: success + type: dict + sample: + "My 1Password item": + password: the value of this field + Custom field name: the value of this field + "My Other 1Password item": + password: the value of this field + "A 1Password item with document attachment": + document: the contents of the document attached to this item +""" + + +import errno +import json +import os +import re + +from subprocess import Popen, PIPE + +from ansible.module_utils.common.text.converters import to_bytes, to_native +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.onepassword import OnePasswordConfig + + +class AnsibleModuleError(Exception): + def __init__(self, results): + self.results = results + + def __repr__(self): + return self.results + + +class OnePasswordInfo(object): + + def __init__(self): + self.cli_path = module.params.get('cli_path') + self.auto_login = module.params.get('auto_login') + self.logged_in = False + self.token = None + + terms = module.params.get('search_terms') + self.terms = self.parse_search_terms(terms) + + self._config = OnePasswordConfig() + + def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False): + if self.token: + # Adds the session token to all commands if we're logged in. + args += [to_bytes('--session=') + self.token] + + command = [self.cli_path] + args + p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) + out, err = p.communicate(input=command_input) + rc = p.wait() + if not ignore_errors and rc != expected_rc: + raise AnsibleModuleError(to_native(err)) + return rc, out, err + + def _parse_field(self, data_json, item_id, field_name, section_title=None): + data = json.loads(data_json) + + if 'documentAttributes' in data['details']: + # This is actually a document, let's fetch the document data instead! + document = self._run(["get", "document", data['overview']['title']]) + return {'document': document[1].strip()} + + else: + # This is not a document, let's try to find the requested field + + # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute, + # not inside it, so we need to check there first. + if field_name in data['details']: + return {field_name: data['details'][field_name]} + + # Otherwise we continue looking inside the 'fields' attribute for the specified field. + else: + if section_title is None: + for field_data in data['details'].get('fields', []): + if field_data.get('name', '').lower() == field_name.lower(): + return {field_name: field_data.get('value', '')} + + # Not found it yet, so now lets see if there are any sections defined + # and search through those for the field. If a section was given, we skip + # any non-matching sections, otherwise we search them all until we find the field. + for section_data in data['details'].get('sections', []): + if section_title is not None and section_title.lower() != section_data['title'].lower(): + continue + for field_data in section_data.get('fields', []): + if field_data.get('t', '').lower() == field_name.lower(): + return {field_name: field_data.get('v', '')} + + # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded. + optional_section_title = '' if section_title is None else " in the section '%s'" % section_title + module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title)) + + def parse_search_terms(self, terms): + processed_terms = [] + + for term in terms: + if not isinstance(term, dict): + term = {'name': term} + + if 'name' not in term: + module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term)) + + term['field'] = term.get('field', 'password') + term['section'] = term.get('section', None) + term['vault'] = term.get('vault', None) + + processed_terms.append(term) + + return processed_terms + + def get_raw(self, item_id, vault=None): + try: + args = ["get", "item", item_id] + if vault is not None: + args += ['--vault={0}'.format(vault)] + rc, output, dummy = self._run(args) + return output + + except Exception as e: + if re.search(".*not found.*", to_native(e)): + module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id) + else: + module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e))) + + def get_field(self, item_id, field, section=None, vault=None): + output = self.get_raw(item_id, vault) + return self._parse_field(output, item_id, field, section) if output != '' else '' + + def full_login(self): + if self.auto_login is not None: + if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'), + self.auto_login.get('secret_key'), self.auto_login.get('master_password')]: + module.fail_json(msg='Unable to perform initial sign in to 1Password. ' + 'subdomain, username, secret_key, and master_password are required to perform initial sign in.') + + args = [ + 'signin', + '{0}.1password.com'.format(self.auto_login['subdomain']), + to_bytes(self.auto_login['username']), + to_bytes(self.auto_login['secret_key']), + '--output=raw', + ] + + try: + rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password'])) + self.token = out.strip() + except AnsibleModuleError as e: + module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e)) + else: + module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s signin' " + "or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path) + + def get_token(self): + # If the config file exists, assume an initial signin has taken place and try basic sign in + if os.path.isfile(self._config.config_file_path): + + if self.auto_login is not None: + + # Since we are not currently signed in, master_password is required at a minimum + if not self.auto_login.get('master_password'): + module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.") + + # Try signing in using the master_password and a subdomain if one is provided + try: + args = ['signin', '--output=raw'] + + if self.auto_login.get('subdomain'): + args = ['signin', self.auto_login['subdomain'], '--output=raw'] + + rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password'])) + self.token = out.strip() + + except AnsibleModuleError: + self.full_login() + + else: + self.full_login() + + else: + # Attempt a full sign in since there appears to be no existing sign in + self.full_login() + + def assert_logged_in(self): + try: + rc, out, err = self._run(['get', 'account'], ignore_errors=True) + if rc == 0: + self.logged_in = True + if not self.logged_in: + self.get_token() + except OSError as e: + if e.errno == errno.ENOENT: + module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path) + raise e + + def run(self): + result = {} + + self.assert_logged_in() + + for term in self.terms: + value = self.get_field(term['name'], term['field'], term['section'], term['vault']) + + if term['name'] in result: + # If we already have a result for this key, we have to append this result dictionary + # to the existing one. This is only applicable when there is a single item + # in 1Password which has two different fields, and we want to retrieve both of them. + result[term['name']].update(value) + else: + # If this is the first result for this key, simply set it. + result[term['name']] = value + + return result + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + cli_path=dict(type='path', default='op'), + auto_login=dict(type='dict', options=dict( + subdomain=dict(type='str'), + username=dict(type='str'), + master_password=dict(required=True, type='str', no_log=True), + secret_key=dict(type='str', no_log=True), + )), + search_terms=dict(required=True, type='list', elements='dict'), + ), + supports_check_mode=True + ) + + results = {'onepassword': OnePasswordInfo().run()} + + module.exit_json(changed=False, **results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneview_datacenter_info.py b/plugins/modules/oneview_datacenter_info.py deleted file mode 120000 index 654acf48e1..0000000000 --- a/plugins/modules/oneview_datacenter_info.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/oneview/oneview_datacenter_info.py \ No newline at end of file diff --git a/plugins/modules/oneview_datacenter_info.py b/plugins/modules/oneview_datacenter_info.py new file mode 100644 index 0000000000..cf9f10af79 --- /dev/null +++ b/plugins/modules/oneview_datacenter_info.py @@ -0,0 +1,162 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneview_datacenter_info +short_description: Retrieve information about the OneView Data Centers +description: + - Retrieve information about the OneView Data Centers. +requirements: + - "hpOneView >= 2.0.1" +author: + - Alex Monteiro (@aalexmonteiro) + - Madhav Bharadwaj (@madhav-bharadwaj) + - Priyanka Sood (@soodpr) + - Ricardo Galeno (@ricardogpsf) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + name: + description: + - Data Center name. + type: str + options: + description: + - 'Retrieve additional information. Options available: V(visualContent).' + type: list + elements: str + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather information about all Data Centers + community.general.oneview_datacenter_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + delegate_to: localhost + register: result + +- name: Print fetched information about Data Centers + ansible.builtin.debug: + msg: "{{ result.datacenters }}" + +- name: Gather paginated, filtered and sorted information about Data Centers + community.general.oneview_datacenter_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + params: + start: 0 + count: 3 + sort: 'name:descending' + filter: 'state=Unmanaged' + register: result + +- name: Print fetched information about paginated, filtered and sorted list of Data Centers + ansible.builtin.debug: + msg: "{{ result.datacenters }}" + +- name: Gather information about a Data Center by name + community.general.oneview_datacenter_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + name: "My Data Center" + delegate_to: localhost + register: result + +- name: Print fetched information about Data Center found by name + ansible.builtin.debug: + msg: "{{ result.datacenters }}" + +- name: Gather information about the Data Center Visual Content + community.general.oneview_datacenter_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + name: "My Data Center" + options: + - visualContent + delegate_to: localhost + register: result + +- name: Print fetched information about Data Center found by name + ansible.builtin.debug: + msg: "{{ result.datacenters }}" + +- name: Print fetched information about Data Center Visual Content + ansible.builtin.debug: + msg: "{{ result.datacenter_visual_content }}" +""" + +RETURN = r""" +datacenters: + description: Has all the OneView information about the Data Centers. + returned: Always, but can be null. + type: dict + +datacenter_visual_content: + description: Has information about the Data Center Visual Content. + returned: When requested, but can be null. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class DatacenterInfoModule(OneViewModuleBase): + argument_spec = dict( + name=dict(type='str'), + options=dict(type='list', elements='str'), + params=dict(type='dict') + ) + + def __init__(self): + super(DatacenterInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) + + def execute_module(self): + + client = self.oneview_client.datacenters + info = {} + + if self.module.params.get('name'): + datacenters = client.get_by('name', self.module.params['name']) + + if self.options and 'visualContent' in self.options: + if datacenters: + info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri']) + else: + info['datacenter_visual_content'] = None + + info['datacenters'] = datacenters + else: + info['datacenters'] = client.get_all(**self.facts_params) + + return dict(changed=False, **info) + + +def main(): + DatacenterInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneview_enclosure_info.py b/plugins/modules/oneview_enclosure_info.py deleted file mode 120000 index 4c011d58be..0000000000 --- a/plugins/modules/oneview_enclosure_info.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/oneview/oneview_enclosure_info.py \ No newline at end of file diff --git a/plugins/modules/oneview_enclosure_info.py b/plugins/modules/oneview_enclosure_info.py new file mode 100644 index 0000000000..b57c8210f4 --- /dev/null +++ b/plugins/modules/oneview_enclosure_info.py @@ -0,0 +1,245 @@ +#!/usr/bin/python + +# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneview_enclosure_info +short_description: Retrieve information about one or more Enclosures +description: + - Retrieve information about one or more of the Enclosures from OneView. +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + name: + description: + - Enclosure name. + type: str + options: + description: + - 'List with options to gather additional information about an Enclosure and related resources. Options allowed: V(script), + V(environmentalConfiguration), and V(utilization). For the option V(utilization), you can provide specific parameters.' + type: list + elements: raw + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather information about all Enclosures + community.general.oneview_enclosure_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Enclosures + ansible.builtin.debug: + msg: "{{ result.enclosures }}" + +- name: Gather paginated, filtered and sorted information about Enclosures + community.general.oneview_enclosure_info: + params: + start: 0 + count: 3 + sort: name:descending + filter: status=OK + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about paginated, filtered and sorted list of Enclosures + ansible.builtin.debug: + msg: "{{ result.enclosures }}" + +- name: Gather information about an Enclosure by name + community.general.oneview_enclosure_info: + name: Enclosure-Name + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Enclosure found by name + ansible.builtin.debug: + msg: "{{ result.enclosures }}" + +- name: Gather information about an Enclosure by name with options + community.general.oneview_enclosure_info: + name: Test-Enclosure + options: + - script # optional + - environmentalConfiguration # optional + - utilization # optional + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Enclosure found by name + ansible.builtin.debug: + msg: "{{ result.enclosures }}" + +- name: Print fetched information about Enclosure Script + ansible.builtin.debug: + msg: "{{ result.enclosure_script }}" + +- name: Print fetched information about Enclosure Environmental Configuration + ansible.builtin.debug: + msg: "{{ result.enclosure_environmental_configuration }}" + +- name: Print fetched information about Enclosure Utilization + ansible.builtin.debug: + msg: "{{ result.enclosure_utilization }}" + +- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two + specified dates" + community.general.oneview_enclosure_info: + name: Test-Enclosure + options: + - utilization: # optional + fields: AmbientTemperature + filter: + - startDate=2016-07-01T14:29:42.000Z + - endDate=2017-07-01T03:29:42.000Z + view: day + refresh: false + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Enclosure found by name + ansible.builtin.debug: + msg: "{{ result.enclosures }}" + +- name: Print fetched information about Enclosure Utilization + ansible.builtin.debug: + msg: "{{ result.enclosure_utilization }}" +""" + +RETURN = r""" +enclosures: + description: Has all the OneView information about the Enclosures. + returned: Always, but can be null. + type: dict + +enclosure_script: + description: Has all the OneView information about the script of an Enclosure. + returned: When requested, but can be null. + type: str + +enclosure_environmental_configuration: + description: Has all the OneView information about the environmental configuration of an Enclosure. + returned: When requested, but can be null. + type: dict + +enclosure_utilization: + description: Has all the OneView information about the utilization of an Enclosure. + returned: When requested, but can be null. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class EnclosureInfoModule(OneViewModuleBase): + argument_spec = dict( + name=dict(type='str'), + options=dict(type='list', elements='raw'), + params=dict(type='dict') + ) + + def __init__(self): + super(EnclosureInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) + + def execute_module(self): + + info = {} + + if self.module.params['name']: + enclosures = self._get_by_name(self.module.params['name']) + + if self.options and enclosures: + info = self._gather_optional_info(self.options, enclosures[0]) + else: + enclosures = self.oneview_client.enclosures.get_all(**self.facts_params) + + info['enclosures'] = enclosures + + return dict(changed=False, **info) + + def _gather_optional_info(self, options, enclosure): + + enclosure_client = self.oneview_client.enclosures + info = {} + + if options.get('script'): + info['enclosure_script'] = enclosure_client.get_script(enclosure['uri']) + if options.get('environmentalConfiguration'): + env_config = enclosure_client.get_environmental_configuration(enclosure['uri']) + info['enclosure_environmental_configuration'] = env_config + if options.get('utilization'): + info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization']) + + return info + + def _get_utilization(self, enclosure, params): + fields = view = refresh = filter = '' + + if isinstance(params, dict): + fields = params.get('fields') + view = params.get('view') + refresh = params.get('refresh') + filter = params.get('filter') + + return self.oneview_client.enclosures.get_utilization(enclosure['uri'], + fields=fields, + filter=filter, + refresh=refresh, + view=view) + + def _get_by_name(self, name): + return self.oneview_client.enclosures.get_by('name', name) + + +def main(): + EnclosureInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneview_ethernet_network.py b/plugins/modules/oneview_ethernet_network.py deleted file mode 120000 index a830b7132b..0000000000 --- a/plugins/modules/oneview_ethernet_network.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/oneview/oneview_ethernet_network.py \ No newline at end of file diff --git a/plugins/modules/oneview_ethernet_network.py b/plugins/modules/oneview_ethernet_network.py new file mode 100644 index 0000000000..1a50d9ea65 --- /dev/null +++ b/plugins/modules/oneview_ethernet_network.py @@ -0,0 +1,253 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneview_ethernet_network +short_description: Manage OneView Ethernet Network resources +description: + - Provides an interface to manage Ethernet Network resources. Can create, update, or delete. +requirements: + - hpOneView >= 3.1.0 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicates the desired state for the Ethernet Network resource. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. + - V(default_bandwidth_reset) resets the network connection template to the default. + type: str + default: present + choices: [present, absent, default_bandwidth_reset] + data: + description: + - List with Ethernet Network properties. + type: dict + required: true +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure that the Ethernet Network is present using the default configuration + community.general.oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: 'Test Ethernet Network' + vlanId: '201' + delegate_to: localhost + +- name: Update the Ethernet Network changing bandwidth and purpose + community.general.oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: 'Test Ethernet Network' + purpose: Management + bandwidth: + maximumBandwidth: 3000 + typicalBandwidth: 2000 + delegate_to: localhost + +- name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network' + community.general.oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: 'Test Ethernet Network' + newName: 'Renamed Ethernet Network' + delegate_to: localhost + +- name: Ensure that the Ethernet Network is absent + community.general.oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: absent + data: + name: 'New Ethernet Network' + delegate_to: localhost + +- name: Create Ethernet networks in bulk + community.general.oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + vlanIdRange: '1-10,15,17' + purpose: General + namePrefix: TestNetwork + smartLink: false + privateNetwork: false + bandwidth: + maximumBandwidth: 10000 + typicalBandwidth: 2000 + delegate_to: localhost + +- name: Reset to the default network connection template + community.general.oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: default_bandwidth_reset + data: + name: 'Test Ethernet Network' + delegate_to: localhost +""" + +RETURN = r""" +ethernet_network: + description: Has the facts about the Ethernet Networks. + returned: On O(state=present). Can be null. + type: dict + +ethernet_network_bulk: + description: Has the facts about the Ethernet Networks affected by the bulk insert. + returned: When V(vlanIdRange) attribute is in O(data) argument. Can be null. + type: dict + +ethernet_network_connection_template: + description: Has the facts about the Ethernet Network Connection Template. + returned: On O(state=default_bandwidth_reset). Can be null. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound + + +class EthernetNetworkModule(OneViewModuleBase): + MSG_CREATED = 'Ethernet Network created successfully.' + MSG_UPDATED = 'Ethernet Network updated successfully.' + MSG_DELETED = 'Ethernet Network deleted successfully.' + MSG_ALREADY_PRESENT = 'Ethernet Network is already present.' + MSG_ALREADY_ABSENT = 'Ethernet Network is already absent.' + + MSG_BULK_CREATED = 'Ethernet Networks created successfully.' + MSG_MISSING_BULK_CREATED = 'Some missing Ethernet Networks were created successfully.' + MSG_BULK_ALREADY_EXIST = 'The specified Ethernet Networks already exist.' + MSG_CONNECTION_TEMPLATE_RESET = 'Ethernet Network connection template was reset to the default.' + MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network was not found.' + + RESOURCE_FACT_NAME = 'ethernet_network' + + def __init__(self): + + argument_spec = dict( + state=dict(type='str', default='present', choices=['absent', 'default_bandwidth_reset', 'present']), + data=dict(type='dict', required=True), + ) + + super(EthernetNetworkModule, self).__init__(additional_arg_spec=argument_spec, validate_etag_support=True) + + self.resource_client = self.oneview_client.ethernet_networks + + def execute_module(self): + + changed, msg, ansible_facts, resource = False, '', {}, None + + if self.data.get('name'): + resource = self.get_by_name(self.data['name']) + + if self.state == 'present': + if self.data.get('vlanIdRange'): + return self._bulk_present() + else: + return self._present(resource) + elif self.state == 'absent': + return self.resource_absent(resource) + elif self.state == 'default_bandwidth_reset': + changed, msg, ansible_facts = self._default_bandwidth_reset(resource) + return dict(changed=changed, msg=msg, ansible_facts=ansible_facts) + + def _present(self, resource): + + bandwidth = self.data.pop('bandwidth', None) + scope_uris = self.data.pop('scopeUris', None) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + + if bandwidth: + if self._update_connection_template(result['ansible_facts']['ethernet_network'], bandwidth)[0]: + result['changed'] = True + result['msg'] = self.MSG_UPDATED + + if scope_uris is not None: + result = self.resource_scopes_set(result, 'ethernet_network', scope_uris) + + return result + + def _bulk_present(self): + vlan_id_range = self.data['vlanIdRange'] + result = dict(ansible_facts={}) + ethernet_networks = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range) + + if not ethernet_networks: + self.resource_client.create_bulk(self.data) + result['changed'] = True + result['msg'] = self.MSG_BULK_CREATED + + else: + vlan_ids = self.resource_client.dissociate_values_or_ranges(vlan_id_range) + for net in ethernet_networks[:]: + vlan_ids.remove(net['vlanId']) + + if len(vlan_ids) == 0: + result['msg'] = self.MSG_BULK_ALREADY_EXIST + result['changed'] = False + else: + if len(vlan_ids) == 1: + self.data['vlanIdRange'] = '{0}-{1}'.format(vlan_ids[0], vlan_ids[0]) + else: + self.data['vlanIdRange'] = ','.join(map(str, vlan_ids)) + + self.resource_client.create_bulk(self.data) + result['changed'] = True + result['msg'] = self.MSG_MISSING_BULK_CREATED + result['ansible_facts']['ethernet_network_bulk'] = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range) + + return result + + def _update_connection_template(self, ethernet_network, bandwidth): + + if 'connectionTemplateUri' not in ethernet_network: + return False, None + + connection_template = self.oneview_client.connection_templates.get(ethernet_network['connectionTemplateUri']) + + merged_data = connection_template.copy() + merged_data.update({'bandwidth': bandwidth}) + + if not self.compare(connection_template, merged_data): + connection_template = self.oneview_client.connection_templates.update(merged_data) + return True, connection_template + else: + return False, None + + def _default_bandwidth_reset(self, resource): + + if not resource: + raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND) + + default_connection_template = self.oneview_client.connection_templates.get_default() + + changed, connection_template = self._update_connection_template(resource, default_connection_template['bandwidth']) + + return changed, self.MSG_CONNECTION_TEMPLATE_RESET, dict( + ethernet_network_connection_template=connection_template) + + +def main(): + EthernetNetworkModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneview_ethernet_network_info.py b/plugins/modules/oneview_ethernet_network_info.py deleted file mode 120000 index b3fdf12da0..0000000000 --- a/plugins/modules/oneview_ethernet_network_info.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/oneview/oneview_ethernet_network_info.py \ No newline at end of file diff --git a/plugins/modules/oneview_ethernet_network_info.py b/plugins/modules/oneview_ethernet_network_info.py new file mode 100644 index 0000000000..9528323fcf --- /dev/null +++ b/plugins/modules/oneview_ethernet_network_info.py @@ -0,0 +1,171 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneview_ethernet_network_info +short_description: Retrieve the information about one or more of the OneView Ethernet Networks +description: + - Retrieve the information about one or more of the Ethernet Networks from OneView. +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + name: + description: + - Ethernet Network name. + type: str + options: + description: + - 'List with options to gather additional information about an Ethernet Network and related resources. Options allowed: + V(associatedProfiles) and V(associatedUplinkGroups).' + type: list + elements: str +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather information about all Ethernet Networks + community.general.oneview_ethernet_network_info: + config: /etc/oneview/oneview_config.json + delegate_to: localhost + register: result + +- name: Print fetched information about Ethernet Networks + ansible.builtin.debug: + msg: "{{ result.ethernet_networks }}" + +- name: Gather paginated and filtered information about Ethernet Networks + community.general.oneview_ethernet_network_info: + config: /etc/oneview/oneview_config.json + params: + start: 1 + count: 3 + sort: 'name:descending' + filter: 'purpose=General' + delegate_to: localhost + register: result + +- name: Print fetched information about paginated and filtered list of Ethernet Networks + ansible.builtin.debug: + msg: "{{ result.ethernet_networks }}" + +- name: Gather information about an Ethernet Network by name + community.general.oneview_ethernet_network_info: + config: /etc/oneview/oneview_config.json + name: Ethernet network name + delegate_to: localhost + register: result + +- name: Print fetched information about Ethernet Network found by name + ansible.builtin.debug: + msg: "{{ result.ethernet_networks }}" + +- name: Gather information about an Ethernet Network by name with options + community.general.oneview_ethernet_network_info: + config: /etc/oneview/oneview_config.json + name: eth1 + options: + - associatedProfiles + - associatedUplinkGroups + delegate_to: localhost + register: result + +- name: Print fetched information about Ethernet Network Associated Profiles + ansible.builtin.debug: + msg: "{{ result.enet_associated_profiles }}" + +- name: Print fetched information about Ethernet Network Associated Uplink Groups + ansible.builtin.debug: + msg: "{{ result.enet_associated_uplink_groups }}" +""" + +RETURN = r""" +ethernet_networks: + description: Has all the OneView information about the Ethernet Networks. + returned: Always, but can be null. + type: dict + +enet_associated_profiles: + description: Has all the OneView information about the profiles which are using the Ethernet network. + returned: When requested, but can be null. + type: dict + +enet_associated_uplink_groups: + description: Has all the OneView information about the uplink sets which are using the Ethernet network. + returned: When requested, but can be null. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class EthernetNetworkInfoModule(OneViewModuleBase): + argument_spec = dict( + name=dict(type='str'), + options=dict(type='list', elements='str'), + params=dict(type='dict') + ) + + def __init__(self): + super(EthernetNetworkInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) + + self.resource_client = self.oneview_client.ethernet_networks + + def execute_module(self): + info = {} + if self.module.params['name']: + ethernet_networks = self.resource_client.get_by('name', self.module.params['name']) + + if self.module.params.get('options') and ethernet_networks: + info = self.__gather_optional_info(ethernet_networks[0]) + else: + ethernet_networks = self.resource_client.get_all(**self.facts_params) + + info['ethernet_networks'] = ethernet_networks + + return dict(changed=False, **info) + + def __gather_optional_info(self, ethernet_network): + + info = {} + + if self.options.get('associatedProfiles'): + info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network) + if self.options.get('associatedUplinkGroups'): + info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network) + + return info + + def __get_associated_profiles(self, ethernet_network): + associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri']) + return [self.oneview_client.server_profiles.get(x) for x in associated_profiles] + + def __get_associated_uplink_groups(self, ethernet_network): + uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri']) + return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups] + + +def main(): + EthernetNetworkInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneview_fc_network.py b/plugins/modules/oneview_fc_network.py deleted file mode 120000 index 5c372c5ffc..0000000000 --- a/plugins/modules/oneview_fc_network.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/oneview/oneview_fc_network.py \ No newline at end of file diff --git a/plugins/modules/oneview_fc_network.py b/plugins/modules/oneview_fc_network.py new file mode 100644 index 0000000000..0b20a96625 --- /dev/null +++ b/plugins/modules/oneview_fc_network.py @@ -0,0 +1,127 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneview_fc_network +short_description: Manage OneView Fibre Channel Network resources +description: + - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete. +requirements: + - "hpOneView >= 4.0.0" +author: "Felipe Bulsoni (@fgbulsoni)" +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicates the desired state for the Fibre Channel Network resource. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. + type: str + choices: ['present', 'absent'] + required: true + data: + description: + - List with the Fibre Channel Network properties. + type: dict + required: true + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure that the Fibre Channel Network is present using the default configuration + community.general.oneview_fc_network: + config: "{{ config_file_path }}" + state: present + data: + name: 'New FC Network' + +- name: Ensure that the Fibre Channel Network is present with fabricType 'DirectAttach' + community.general.oneview_fc_network: + config: "{{ config_file_path }}" + state: present + data: + name: 'New FC Network' + fabricType: 'DirectAttach' + +- name: Ensure that the Fibre Channel Network is present and is inserted in the desired scopes + community.general.oneview_fc_network: + config: "{{ config_file_path }}" + state: present + data: + name: 'New FC Network' + scopeUris: + - '/rest/scopes/00SC123456' + - '/rest/scopes/01SC123456' + +- name: Ensure that the Fibre Channel Network is absent + community.general.oneview_fc_network: + config: "{{ config_file_path }}" + state: absent + data: + name: 'New FC Network' +""" + +RETURN = r""" +fc_network: + description: Has the facts about the managed OneView FC Network. + returned: On O(state=present). Can be null. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class FcNetworkModule(OneViewModuleBase): + MSG_CREATED = 'FC Network created successfully.' + MSG_UPDATED = 'FC Network updated successfully.' + MSG_DELETED = 'FC Network deleted successfully.' + MSG_ALREADY_PRESENT = 'FC Network is already present.' + MSG_ALREADY_ABSENT = 'FC Network is already absent.' + RESOURCE_FACT_NAME = 'fc_network' + + def __init__(self): + + additional_arg_spec = dict(data=dict(required=True, type='dict'), + state=dict( + required=True, + choices=['present', 'absent'])) + + super(FcNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec, + validate_etag_support=True) + + self.resource_client = self.oneview_client.fc_networks + + def execute_module(self): + resource = self.get_by_name(self.data['name']) + + if self.state == 'present': + return self._present(resource) + else: + return self.resource_absent(resource) + + def _present(self, resource): + scope_uris = self.data.pop('scopeUris', None) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + if scope_uris is not None: + result = self.resource_scopes_set(result, 'fc_network', scope_uris) + return result + + +def main(): + FcNetworkModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneview_fc_network_info.py b/plugins/modules/oneview_fc_network_info.py deleted file mode 120000 index 5b773fe043..0000000000 --- a/plugins/modules/oneview_fc_network_info.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/oneview/oneview_fc_network_info.py \ No newline at end of file diff --git a/plugins/modules/oneview_fc_network_info.py b/plugins/modules/oneview_fc_network_info.py new file mode 100644 index 0000000000..525659e207 --- /dev/null +++ b/plugins/modules/oneview_fc_network_info.py @@ -0,0 +1,112 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneview_fc_network_info +short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks +description: + - Retrieve the information about one or more of the Fibre Channel Networks from OneView. +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + name: + description: + - Fibre Channel Network name. + type: str + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather information about all Fibre Channel Networks + community.general.oneview_fc_network_info: + config: /etc/oneview/oneview_config.json + delegate_to: localhost + register: result + +- name: Print fetched information about Fibre Channel Networks + ansible.builtin.debug: + msg: "{{ result.fc_networks }}" + +- name: Gather paginated, filtered and sorted information about Fibre Channel Networks + community.general.oneview_fc_network_info: + config: /etc/oneview/oneview_config.json + params: + start: 1 + count: 3 + sort: 'name:descending' + filter: 'fabricType=FabricAttach' + delegate_to: localhost + register: result + +- name: Print fetched information about paginated, filtered and sorted list of Fibre Channel Networks + ansible.builtin.debug: + msg: "{{ result.fc_networks }}" + +- name: Gather information about a Fibre Channel Network by name + community.general.oneview_fc_network_info: + config: /etc/oneview/oneview_config.json + name: network name + delegate_to: localhost + register: result + +- name: Print fetched information about Fibre Channel Network found by name + ansible.builtin.debug: + msg: "{{ result.fc_networks }}" +""" + +RETURN = r""" +fc_networks: + description: Has all the OneView information about the Fibre Channel Networks. + returned: Always, but can be null. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class FcNetworkInfoModule(OneViewModuleBase): + def __init__(self): + + argument_spec = dict( + name=dict(type='str'), + params=dict(type='dict') + ) + + super(FcNetworkInfoModule, self).__init__( + additional_arg_spec=argument_spec, + supports_check_mode=True, + ) + + def execute_module(self): + + if self.module.params['name']: + fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name']) + else: + fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params) + + return dict(changed=False, fc_networks=fc_networks) + + +def main(): + FcNetworkInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneview_fcoe_network.py b/plugins/modules/oneview_fcoe_network.py deleted file mode 120000 index 7c65ae9fd5..0000000000 --- a/plugins/modules/oneview_fcoe_network.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/oneview/oneview_fcoe_network.py \ No newline at end of file diff --git a/plugins/modules/oneview_fcoe_network.py b/plugins/modules/oneview_fcoe_network.py new file mode 100644 index 0000000000..0212ea0b64 --- /dev/null +++ b/plugins/modules/oneview_fcoe_network.py @@ -0,0 +1,123 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneview_fcoe_network +short_description: Manage OneView FCoE Network resources +description: + - Provides an interface to manage FCoE Network resources. Can create, update, or delete. +requirements: + - "Python >= 2.7.9" + - "hpOneView >= 4.0.0" +author: "Felipe Bulsoni (@fgbulsoni)" +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicates the desired state for the FCoE Network resource. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. + type: str + default: present + choices: ['present', 'absent'] + data: + description: + - List with FCoE Network properties. + type: dict + required: true + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure that FCoE Network is present using the default configuration + community.general.oneview_fcoe_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: Test FCoE Network + vlanId: 201 + delegate_to: localhost + +- name: Update the FCOE network scopes + community.general.oneview_fcoe_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: New FCoE Network + scopeUris: + - '/rest/scopes/00SC123456' + - '/rest/scopes/01SC123456' + delegate_to: localhost + +- name: Ensure that FCoE Network is absent + community.general.oneview_fcoe_network: + config: '/etc/oneview/oneview_config.json' + state: absent + data: + name: New FCoE Network + delegate_to: localhost +""" + +RETURN = r""" +fcoe_network: + description: Has the facts about the OneView FCoE Networks. + returned: On O(state=present). Can be null. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class FcoeNetworkModule(OneViewModuleBase): + MSG_CREATED = 'FCoE Network created successfully.' + MSG_UPDATED = 'FCoE Network updated successfully.' + MSG_DELETED = 'FCoE Network deleted successfully.' + MSG_ALREADY_PRESENT = 'FCoE Network is already present.' + MSG_ALREADY_ABSENT = 'FCoE Network is already absent.' + RESOURCE_FACT_NAME = 'fcoe_network' + + def __init__(self): + + additional_arg_spec = dict(data=dict(required=True, type='dict'), + state=dict(default='present', + choices=['present', 'absent'])) + + super(FcoeNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec, + validate_etag_support=True) + + self.resource_client = self.oneview_client.fcoe_networks + + def execute_module(self): + resource = self.get_by_name(self.data.get('name')) + + if self.state == 'present': + return self.__present(resource) + elif self.state == 'absent': + return self.resource_absent(resource) + + def __present(self, resource): + scope_uris = self.data.pop('scopeUris', None) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + if scope_uris is not None: + result = self.resource_scopes_set(result, 'fcoe_network', scope_uris) + return result + + +def main(): + FcoeNetworkModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneview_fcoe_network_info.py b/plugins/modules/oneview_fcoe_network_info.py deleted file mode 120000 index 269b2bfde8..0000000000 --- a/plugins/modules/oneview_fcoe_network_info.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/oneview/oneview_fcoe_network_info.py \ No newline at end of file diff --git a/plugins/modules/oneview_fcoe_network_info.py b/plugins/modules/oneview_fcoe_network_info.py new file mode 100644 index 0000000000..b1b4f49fda --- /dev/null +++ b/plugins/modules/oneview_fcoe_network_info.py @@ -0,0 +1,110 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneview_fcoe_network_info +short_description: Retrieve the information about one or more of the OneView FCoE Networks +description: + - Retrieve the information about one or more of the FCoE Networks from OneView. +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + name: + description: + - FCoE Network name. + type: str +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather information about all FCoE Networks + community.general.oneview_fcoe_network_info: + config: /etc/oneview/oneview_config.json + delegate_to: localhost + register: result + +- name: Print fetched information about FCoE Networks + ansible.builtin.debug: + msg: "{{ result.fcoe_networks }}" + +- name: Gather paginated, filtered and sorted information about FCoE Networks + community.general.oneview_fcoe_network_info: + config: /etc/oneview/oneview_config.json + params: + start: 0 + count: 3 + sort: 'name:descending' + filter: 'vlanId=2' + delegate_to: localhost + register: result + +- name: Print fetched information about paginated, filtered and sorted list of FCoE Networks + ansible.builtin.debug: + msg: "{{ result.fcoe_networks }}" + +- name: Gather information about a FCoE Network by name + community.general.oneview_fcoe_network_info: + config: /etc/oneview/oneview_config.json + name: Test FCoE Network Information + delegate_to: localhost + register: result + +- name: Print fetched information about FCoE Network found by name + ansible.builtin.debug: + msg: "{{ result.fcoe_networks }}" +""" + +RETURN = r""" +fcoe_networks: + description: Has all the OneView information about the FCoE Networks. + returned: Always, but can be null. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class FcoeNetworkInfoModule(OneViewModuleBase): + def __init__(self): + argument_spec = dict( + name=dict(type='str'), + params=dict(type='dict'), + ) + + super(FcoeNetworkInfoModule, self).__init__( + additional_arg_spec=argument_spec, + supports_check_mode=True, + ) + + def execute_module(self): + + if self.module.params['name']: + fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name']) + else: + fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params) + + return dict(changed=False, fcoe_networks=fcoe_networks) + + +def main(): + FcoeNetworkInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneview_logical_interconnect_group.py b/plugins/modules/oneview_logical_interconnect_group.py deleted file mode 120000 index 7e70aaaac7..0000000000 --- a/plugins/modules/oneview_logical_interconnect_group.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/oneview/oneview_logical_interconnect_group.py \ No newline at end of file diff --git a/plugins/modules/oneview_logical_interconnect_group.py b/plugins/modules/oneview_logical_interconnect_group.py new file mode 100644 index 0000000000..9f33726e8c --- /dev/null +++ b/plugins/modules/oneview_logical_interconnect_group.py @@ -0,0 +1,170 @@ +#!/usr/bin/python + +# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneview_logical_interconnect_group +short_description: Manage OneView Logical Interconnect Group resources +description: + - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete. +requirements: + - hpOneView >= 4.0.0 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicates the desired state for the Logical Interconnect Group resource. + - V(absent) removes the resource from OneView, if it exists. + - V(present) ensures data properties are compliant with OneView. + type: str + choices: [absent, present] + default: present + data: + description: + - List with the Logical Interconnect Group properties. + type: dict + required: true +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Ensure that the Logical Interconnect Group is present + community.general.oneview_logical_interconnect_group: + config: /etc/oneview/oneview_config.json + state: present + data: + name: Test Logical Interconnect Group + uplinkSets: [] + enclosureType: C7000 + interconnectMapTemplate: + interconnectMapEntryTemplates: + - logicalDownlinkUri: + logicalLocation: + locationEntries: + - relativeValue: 1 + type: Bay + - relativeValue: 1 + type: Enclosure + permittedInterconnectTypeName: HP VC Flex-10/10D Module + # Alternatively you can inform permittedInterconnectTypeUri + delegate_to: localhost + +- name: Ensure that the Logical Interconnect Group has the specified scopes + community.general.oneview_logical_interconnect_group: + config: /etc/oneview/oneview_config.json + state: present + data: + name: Test Logical Interconnect Group + scopeUris: + - /rest/scopes/00SC123456 + - /rest/scopes/01SC123456 + delegate_to: localhost + +- name: Ensure that the Logical Interconnect Group is present with name 'Test' + community.general.oneview_logical_interconnect_group: + config: /etc/oneview/oneview_config.json + state: present + data: + name: New Logical Interconnect Group + newName: Test + delegate_to: localhost + +- name: Ensure that the Logical Interconnect Group is absent + community.general.oneview_logical_interconnect_group: + config: /etc/oneview/oneview_config.json + state: absent + data: + name: New Logical Interconnect Group + delegate_to: localhost +""" + +RETURN = r""" +logical_interconnect_group: + description: Has the facts about the OneView Logical Interconnect Group. + returned: On O(state=present). Can be null. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound + + +class LogicalInterconnectGroupModule(OneViewModuleBase): + MSG_CREATED = 'Logical Interconnect Group created successfully.' + MSG_UPDATED = 'Logical Interconnect Group updated successfully.' + MSG_DELETED = 'Logical Interconnect Group deleted successfully.' + MSG_ALREADY_PRESENT = 'Logical Interconnect Group is already present.' + MSG_ALREADY_ABSENT = 'Logical Interconnect Group is already absent.' + MSG_INTERCONNECT_TYPE_NOT_FOUND = 'Interconnect Type was not found.' + + RESOURCE_FACT_NAME = 'logical_interconnect_group' + + def __init__(self): + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent']), + data=dict(required=True, type='dict') + ) + + super(LogicalInterconnectGroupModule, self).__init__(additional_arg_spec=argument_spec, + validate_etag_support=True) + self.resource_client = self.oneview_client.logical_interconnect_groups + + def execute_module(self): + resource = self.get_by_name(self.data['name']) + + if self.state == 'present': + return self.__present(resource) + elif self.state == 'absent': + return self.resource_absent(resource) + + def __present(self, resource): + scope_uris = self.data.pop('scopeUris', None) + + self.__replace_name_by_uris(self.data) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + + if scope_uris is not None: + result = self.resource_scopes_set(result, 'logical_interconnect_group', scope_uris) + + return result + + def __replace_name_by_uris(self, data): + map_template = data.get('interconnectMapTemplate') + + if map_template: + map_entry_templates = map_template.get('interconnectMapEntryTemplates') + if map_entry_templates: + for value in map_entry_templates: + permitted_interconnect_type_name = value.pop('permittedInterconnectTypeName', None) + if permitted_interconnect_type_name: + value['permittedInterconnectTypeUri'] = self.__get_interconnect_type_by_name( + permitted_interconnect_type_name).get('uri') + + def __get_interconnect_type_by_name(self, name): + i_type = self.oneview_client.interconnect_types.get_by('name', name) + if i_type: + return i_type[0] + else: + raise OneViewModuleResourceNotFound(self.MSG_INTERCONNECT_TYPE_NOT_FOUND) + + +def main(): + LogicalInterconnectGroupModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneview_logical_interconnect_group_info.py b/plugins/modules/oneview_logical_interconnect_group_info.py deleted file mode 120000 index f123ead7e7..0000000000 --- a/plugins/modules/oneview_logical_interconnect_group_info.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/oneview/oneview_logical_interconnect_group_info.py \ No newline at end of file diff --git a/plugins/modules/oneview_logical_interconnect_group_info.py b/plugins/modules/oneview_logical_interconnect_group_info.py new file mode 100644 index 0000000000..25a278b15a --- /dev/null +++ b/plugins/modules/oneview_logical_interconnect_group_info.py @@ -0,0 +1,123 @@ +#!/usr/bin/python + +# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneview_logical_interconnect_group_info +short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups +description: + - Retrieve information about one or more of the Logical Interconnect Groups from OneView. +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + name: + description: + - Logical Interconnect Group name. + type: str +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather information about all Logical Interconnect Groups + community.general.oneview_logical_interconnect_group_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Logical Interconnect Groups + ansible.builtin.debug: + msg: "{{ result.logical_interconnect_groups }}" + +- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups + community.general.oneview_logical_interconnect_group_info: + params: + start: 0 + count: 3 + sort: name:descending + filter: name=LIGName + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about paginated, filtered and sorted list of Logical Interconnect Groups + ansible.builtin.debug: + msg: "{{ result.logical_interconnect_groups }}" + +- name: Gather information about a Logical Interconnect Group by name + community.general.oneview_logical_interconnect_group_info: + name: logical interconnect group name + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Logical Interconnect Group found by name + ansible.builtin.debug: + msg: "{{ result.logical_interconnect_groups }}" +""" + +RETURN = r""" +logical_interconnect_groups: + description: Has all the OneView information about the Logical Interconnect Groups. + returned: Always, but can be null. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class LogicalInterconnectGroupInfoModule(OneViewModuleBase): + def __init__(self): + + argument_spec = dict( + name=dict(type='str'), + params=dict(type='dict'), + ) + + super(LogicalInterconnectGroupInfoModule, self).__init__( + additional_arg_spec=argument_spec, + supports_check_mode=True, + ) + + def execute_module(self): + if self.module.params.get('name'): + ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name']) + else: + ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params) + + return dict(changed=False, logical_interconnect_groups=ligs) + + +def main(): + LogicalInterconnectGroupInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneview_network_set.py b/plugins/modules/oneview_network_set.py deleted file mode 120000 index b57e024d95..0000000000 --- a/plugins/modules/oneview_network_set.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/oneview/oneview_network_set.py \ No newline at end of file diff --git a/plugins/modules/oneview_network_set.py b/plugins/modules/oneview_network_set.py new file mode 100644 index 0000000000..a7a9592a5b --- /dev/null +++ b/plugins/modules/oneview_network_set.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneview_network_set +short_description: Manage HPE OneView Network Set resources +description: + - Provides an interface to manage Network Set resources. Can create, update, or delete. +requirements: + - hpOneView >= 4.0.0 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicates the desired state for the Network Set resource. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. + type: str + default: present + choices: ['present', 'absent'] + data: + description: + - List with the Network Set properties. + type: dict + required: true + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Create a Network Set + community.general.oneview_network_set: + config: /etc/oneview/oneview_config.json + state: present + data: + name: OneViewSDK Test Network Set + networkUris: + - Test Ethernet Network_1 # can be a name + - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI + delegate_to: localhost + +- name: Update the Network Set name to 'OneViewSDK Test Network Set - Renamed' and change the associated networks + community.general.oneview_network_set: + config: /etc/oneview/oneview_config.json + state: present + data: + name: OneViewSDK Test Network Set + newName: OneViewSDK Test Network Set - Renamed + networkUris: + - Test Ethernet Network_1 + delegate_to: localhost + +- name: Delete the Network Set + community.general.oneview_network_set: + config: /etc/oneview/oneview_config.json + state: absent + data: + name: OneViewSDK Test Network Set - Renamed + delegate_to: localhost + +- name: Update the Network set with two scopes + community.general.oneview_network_set: + config: /etc/oneview/oneview_config.json + state: present + data: + name: OneViewSDK Test Network Set + scopeUris: + - /rest/scopes/01SC123456 + - /rest/scopes/02SC123456 + delegate_to: localhost +""" + +RETURN = r""" +network_set: + description: Has the facts about the Network Set. + returned: On O(state=present), but can be null. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound + + +class NetworkSetModule(OneViewModuleBase): + MSG_CREATED = 'Network Set created successfully.' + MSG_UPDATED = 'Network Set updated successfully.' + MSG_DELETED = 'Network Set deleted successfully.' + MSG_ALREADY_PRESENT = 'Network Set is already present.' + MSG_ALREADY_ABSENT = 'Network Set is already absent.' + MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network not found: ' + RESOURCE_FACT_NAME = 'network_set' + + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent']), + data=dict(required=True, type='dict')) + + def __init__(self): + super(NetworkSetModule, self).__init__(additional_arg_spec=self.argument_spec, + validate_etag_support=True) + self.resource_client = self.oneview_client.network_sets + + def execute_module(self): + resource = self.get_by_name(self.data.get('name')) + + if self.state == 'present': + return self._present(resource) + elif self.state == 'absent': + return self.resource_absent(resource) + + def _present(self, resource): + scope_uris = self.data.pop('scopeUris', None) + self._replace_network_name_by_uri(self.data) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + if scope_uris is not None: + result = self.resource_scopes_set(result, self.RESOURCE_FACT_NAME, scope_uris) + return result + + def _get_ethernet_network_by_name(self, name): + result = self.oneview_client.ethernet_networks.get_by('name', name) + return result[0] if result else None + + def _get_network_uri(self, network_name_or_uri): + if network_name_or_uri.startswith('/rest/ethernet-networks'): + return network_name_or_uri + else: + enet_network = self._get_ethernet_network_by_name(network_name_or_uri) + if enet_network: + return enet_network['uri'] + else: + raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND + network_name_or_uri) + + def _replace_network_name_by_uri(self, data): + if 'networkUris' in data: + data['networkUris'] = [self._get_network_uri(x) for x in data['networkUris']] + + +def main(): + NetworkSetModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneview_network_set_info.py b/plugins/modules/oneview_network_set_info.py deleted file mode 120000 index 2fea73c4aa..0000000000 --- a/plugins/modules/oneview_network_set_info.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/oneview/oneview_network_set_info.py \ No newline at end of file diff --git a/plugins/modules/oneview_network_set_info.py b/plugins/modules/oneview_network_set_info.py new file mode 100644 index 0000000000..4b413f278e --- /dev/null +++ b/plugins/modules/oneview_network_set_info.py @@ -0,0 +1,169 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneview_network_set_info +short_description: Retrieve information about the OneView Network Sets +description: + - Retrieve information about the Network Sets from OneView. +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + name: + description: + - Network Set name. + type: str + + options: + description: + - 'List with options to gather information about Network Set. Option allowed: V(withoutEthernet). The option V(withoutEthernet) + retrieves the list of network_sets excluding Ethernet networks.' + type: list + elements: str + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather information about all Network Sets + community.general.oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Network Sets + ansible.builtin.debug: + msg: "{{ result.network_sets }}" + +- name: Gather paginated, filtered and sorted information about Network Sets + community.general.oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + params: + start: 0 + count: 3 + sort: 'name:descending' + filter: name='netset001' + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about paginated, filtered and sorted list of Network Sets + ansible.builtin.debug: + msg: "{{ result.network_sets }}" + +- name: Gather information about all Network Sets, excluding Ethernet networks + community.general.oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + options: + - withoutEthernet + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Network Sets, excluding Ethernet networks + ansible.builtin.debug: + msg: "{{ result.network_sets }}" + +- name: Gather information about a Network Set by name + community.general.oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + name: Name of the Network Set + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Network Set found by name + ansible.builtin.debug: + msg: "{{ result.network_sets }}" + +- name: Gather information about a Network Set by name, excluding Ethernet networks + community.general.oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + name: Name of the Network Set + options: + - withoutEthernet + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Network Set found by name, excluding Ethernet networks + ansible.builtin.debug: + msg: "{{ result.network_sets }}" +""" + +RETURN = r""" +network_sets: + description: Has all the OneView information about the Network Sets. + returned: Always, but can be empty. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class NetworkSetInfoModule(OneViewModuleBase): + argument_spec = dict( + name=dict(type='str'), + options=dict(type='list', elements='str'), + params=dict(type='dict'), + ) + + def __init__(self): + super(NetworkSetInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) + + def execute_module(self): + + name = self.module.params.get('name') + + if 'withoutEthernet' in self.options: + filter_by_name = ("\"'name'='%s'\"" % name) if name else '' + network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name) + elif name: + network_sets = self.oneview_client.network_sets.get_by('name', name) + else: + network_sets = self.oneview_client.network_sets.get_all(**self.facts_params) + + return dict(changed=False, network_sets=network_sets) + + +def main(): + NetworkSetInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneview_san_manager.py b/plugins/modules/oneview_san_manager.py deleted file mode 120000 index c694175759..0000000000 --- a/plugins/modules/oneview_san_manager.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/oneview/oneview_san_manager.py \ No newline at end of file diff --git a/plugins/modules/oneview_san_manager.py b/plugins/modules/oneview_san_manager.py new file mode 100644 index 0000000000..105aca72ac --- /dev/null +++ b/plugins/modules/oneview_san_manager.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneview_san_manager +short_description: Manage OneView SAN Manager resources +description: + - Provides an interface to manage SAN Manager resources. Can create, update, or delete. +requirements: + - hpOneView >= 3.1.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicates the desired state for the Uplink Set resource. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. + - V(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent. + type: str + default: present + choices: [present, absent, connection_information_set] + data: + description: + - List with SAN Manager properties. + type: dict + required: true + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials + community.general.oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: present + data: + providerDisplayName: Brocade Network Advisor + connectionInfo: + - name: Host + value: 172.18.15.1 + - name: Port + value: 5989 + - name: Username + value: username + - name: Password + value: password + - name: UseSsl + value: true + delegate_to: localhost + +- name: Ensure a Device Manager for the Cisco SAN Provider is present + community.general.oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: present + data: + name: 172.18.20.1 + providerDisplayName: Cisco + connectionInfo: + - name: Host + value: 172.18.20.1 + - name: SnmpPort + value: 161 + - name: SnmpUserName + value: admin + - name: SnmpAuthLevel + value: authnopriv + - name: SnmpAuthProtocol + value: sha + - name: SnmpAuthString + value: password + delegate_to: localhost + +- name: Sets the SAN Manager connection information + community.general.oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: connection_information_set + data: + connectionInfo: + - name: Host + value: '172.18.15.1' + - name: Port + value: '5989' + - name: Username + value: 'username' + - name: Password + value: 'password' + - name: UseSsl + value: true + delegate_to: localhost + +- name: Refreshes the SAN Manager + community.general.oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: present + data: + name: 172.18.15.1 + refreshState: RefreshPending + delegate_to: localhost + +- name: Delete the SAN Manager recently created + community.general.oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: absent + data: + name: '172.18.15.1' + delegate_to: localhost +""" + +RETURN = r""" +san_manager: + description: Has the OneView facts about the SAN Manager. + returned: On O(state=present). Can be null. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError + + +class SanManagerModule(OneViewModuleBase): + MSG_CREATED = 'SAN Manager created successfully.' + MSG_UPDATED = 'SAN Manager updated successfully.' + MSG_DELETED = 'SAN Manager deleted successfully.' + MSG_ALREADY_PRESENT = 'SAN Manager is already present.' + MSG_ALREADY_ABSENT = 'SAN Manager is already absent.' + MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND = "The provider '{0}' was not found." + + argument_spec = dict( + state=dict(type='str', default='present', choices=['absent', 'present', 'connection_information_set']), + data=dict(type='dict', required=True) + ) + + def __init__(self): + super(SanManagerModule, self).__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True) + self.resource_client = self.oneview_client.san_managers + + def execute_module(self): + if self.data.get('connectionInfo'): + for connection_hash in self.data.get('connectionInfo'): + if connection_hash.get('name') == 'Host': + resource_name = connection_hash.get('value') + elif self.data.get('name'): + resource_name = self.data.get('name') + else: + msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. ' + msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.' + raise OneViewModuleValueError(msg.format()) + + resource = self.resource_client.get_by_name(resource_name) + + if self.state == 'present': + changed, msg, san_manager = self._present(resource) + return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager)) + + elif self.state == 'absent': + return self.resource_absent(resource, method='remove') + + elif self.state == 'connection_information_set': + changed, msg, san_manager = self._connection_information_set(resource) + return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager)) + + def _present(self, resource): + if not resource: + provider_uri = self.data.get('providerUri', self._get_provider_uri_by_display_name(self.data)) + return True, self.MSG_CREATED, self.resource_client.add(self.data, provider_uri) + else: + merged_data = resource.copy() + merged_data.update(self.data) + + # Remove 'connectionInfo' from comparison, since it is not possible to validate it. + resource.pop('connectionInfo', None) + merged_data.pop('connectionInfo', None) + + if self.compare(resource, merged_data): + return False, self.MSG_ALREADY_PRESENT, resource + else: + updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri']) + return True, self.MSG_UPDATED, updated_san_manager + + def _connection_information_set(self, resource): + if not resource: + return self._present(resource) + else: + merged_data = resource.copy() + merged_data.update(self.data) + merged_data.pop('refreshState', None) + if not self.data.get('connectionInfo', None): + raise OneViewModuleValueError('A connectionInfo field is required for this operation.') + updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri']) + return True, self.MSG_UPDATED, updated_san_manager + + def _get_provider_uri_by_display_name(self, data): + display_name = data.get('providerDisplayName') + provider_uri = self.resource_client.get_provider_uri(display_name) + + if not provider_uri: + raise OneViewModuleValueError(self.MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND.format(display_name)) + + return provider_uri + + +def main(): + SanManagerModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/oneview_san_manager_info.py b/plugins/modules/oneview_san_manager_info.py deleted file mode 120000 index e0de4939d4..0000000000 --- a/plugins/modules/oneview_san_manager_info.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/oneview/oneview_san_manager_info.py \ No newline at end of file diff --git a/plugins/modules/oneview_san_manager_info.py b/plugins/modules/oneview_san_manager_info.py new file mode 100644 index 0000000000..e158a40533 --- /dev/null +++ b/plugins/modules/oneview_san_manager_info.py @@ -0,0 +1,123 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: oneview_san_manager_info +short_description: Retrieve information about one or more of the OneView SAN Managers +description: + - Retrieve information about one or more of the SAN Managers from OneView. +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + provider_display_name: + description: + - Provider Display Name. + type: str + params: + description: + - List of params to delimit, filter and sort the list of resources. + - 'Params allowed:' + - 'V(start): The first item to return, using 0-based indexing.' + - 'V(count): The number of resources to return.' + - 'V(query): A general query string to narrow the list of resources returned.' + - 'V(sort): The sort order of the returned data set.' + type: dict +extends_documentation_fragment: + - community.general.oneview + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather information about all SAN Managers + community.general.oneview_san_manager_info: + config: /etc/oneview/oneview_config.json + delegate_to: localhost + register: result + +- name: Print fetched information about SAN Managers + ansible.builtin.debug: + msg: "{{ result.san_managers }}" + +- name: Gather paginated, filtered and sorted information about SAN Managers + community.general.oneview_san_manager_info: + config: /etc/oneview/oneview_config.json + params: + start: 0 + count: 3 + sort: name:ascending + query: isInternal eq false + delegate_to: localhost + register: result + +- name: Print fetched information about paginated, filtered and sorted list of SAN Managers + ansible.builtin.debug: + msg: "{{ result.san_managers }}" + +- name: Gather information about a SAN Manager by provider display name + community.general.oneview_san_manager_info: + config: /etc/oneview/oneview_config.json + provider_display_name: Brocade Network Advisor + delegate_to: localhost + register: result + +- name: Print fetched information about SAN Manager found by provider display name + ansible.builtin.debug: + msg: "{{ result.san_managers }}" +""" + +RETURN = r""" +san_managers: + description: Has all the OneView information about the SAN Managers. + returned: Always, but can be null. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class SanManagerInfoModule(OneViewModuleBase): + argument_spec = dict( + provider_display_name=dict(type='str'), + params=dict(type='dict') + ) + + def __init__(self): + super(SanManagerInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) + self.resource_client = self.oneview_client.san_managers + + def execute_module(self): + if self.module.params.get('provider_display_name'): + provider_display_name = self.module.params['provider_display_name'] + san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name) + if san_manager: + resources = [san_manager] + else: + resources = [] + else: + resources = self.oneview_client.san_managers.get_all(**self.facts_params) + + return dict(changed=False, san_managers=resources) + + +def main(): + SanManagerInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/online_server_info.py b/plugins/modules/online_server_info.py deleted file mode 120000 index e26bbcc3e9..0000000000 --- a/plugins/modules/online_server_info.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/online/online_server_info.py \ No newline at end of file diff --git a/plugins/modules/online_server_info.py b/plugins/modules/online_server_info.py new file mode 100644 index 0000000000..3c241d062b --- /dev/null +++ b/plugins/modules/online_server_info.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: online_server_info +short_description: Gather information about Online servers +description: + - Gather information about the servers. + - U(https://www.online.net/en/dedicated-server). +author: + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.online + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather Online server information + community.general.online_server_info: + api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f' + register: result + +- ansible.builtin.debug: + msg: "{{ result.online_server_info }}" +""" + +RETURN = r""" +online_server_info: + description: + - Response from Online API. + - 'For more details please refer to: U(https://console.online.net/en/api/).' + returned: success + type: list + elements: dict + sample: + [ + { + "abuse": "abuse@example.com", + "anti_ddos": false, + "bmc": { + "session_key": null + }, + "boot_mode": "normal", + "contacts": { + "owner": "foobar", + "tech": "foobar" + }, + "disks": [ + { + "$ref": "/api/v1/server/hardware/disk/68452" + }, + { + "$ref": "/api/v1/server/hardware/disk/68453" + } + ], + "drive_arrays": [ + { + "disks": [ + { + "$ref": "/api/v1/server/hardware/disk/68452" + }, + { + "$ref": "/api/v1/server/hardware/disk/68453" + } + ], + "raid_controller": { + "$ref": "/api/v1/server/hardware/raidController/9910" + }, + "raid_level": "RAID1" + } + ], + "hardware_watch": true, + "hostname": "sd-42", + "id": 42, + "ip": [ + { + "address": "195.154.172.149", + "mac": "28:92:4a:33:5e:c6", + "reverse": "195-154-172-149.rev.poneytelecom.eu.", + "switch_port_state": "up", + "type": "public" + }, + { + "address": "10.90.53.212", + "mac": "28:92:4a:33:5e:c7", + "reverse": null, + "switch_port_state": "up", + "type": "private" + } + ], + "last_reboot": "2018-08-23T08:32:03.000Z", + "location": { + "block": "A", + "datacenter": "DC3", + "position": 19, + "rack": "A23", + "room": "4 4-4" + }, + "network": { + "ip": [ + "195.154.172.149" + ], + "ipfo": [], + "private": [ + "10.90.53.212" + ] + }, + "offer": "Pro-1-S-SATA", + "os": { + "name": "FreeBSD", + "version": "11.1-RELEASE" + }, + "power": "ON", + "proactive_monitoring": false, + "raid_controllers": [ + { + "$ref": "/api/v1/server/hardware/raidController/9910" + } + ], + "support": "Basic service level" + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.online import ( + Online, OnlineException, online_argument_spec +) + + +class OnlineServerInfo(Online): + + def __init__(self, module): + super(OnlineServerInfo, self).__init__(module) + self.name = 'api/v1/server' + + def _get_server_detail(self, server_path): + try: + return self.get(path=server_path).json + except OnlineException as exc: + self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc)) + + def all_detailed_servers(self): + servers_api_path = self.get_resources() + + server_data = ( + self._get_server_detail(server_api_path) + for server_api_path in servers_api_path + ) + + return [s for s in server_data if s is not None] + + +def main(): + module = AnsibleModule( + argument_spec=online_argument_spec(), + supports_check_mode=True, + ) + + try: + servers_info = OnlineServerInfo(module).all_detailed_servers() + module.exit_json( + online_server_info=servers_info + ) + except OnlineException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/online_user_info.py b/plugins/modules/online_user_info.py deleted file mode 120000 index 546ac3ebc0..0000000000 --- a/plugins/modules/online_user_info.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/online/online_user_info.py \ No newline at end of file diff --git a/plugins/modules/online_user_info.py b/plugins/modules/online_user_info.py new file mode 100644 index 0000000000..61b2c23ae8 --- /dev/null +++ b/plugins/modules/online_user_info.py @@ -0,0 +1,76 @@ +#!/usr/bin/python +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: online_user_info +short_description: Gather information about Online user +description: + - Gather information about the user. +author: + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.online + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather Online user info + community.general.online_user_info: + register: result + +- ansible.builtin.debug: + msg: "{{ result.online_user_info }}" +""" + +RETURN = r""" +online_user_info: + description: + - Response from Online API. + - 'For more details please refer to: U(https://console.online.net/en/api/).' + returned: success + type: dict + sample: + { + "company": "foobar LLC", + "email": "foobar@example.com", + "first_name": "foo", + "id": 42, + "last_name": "bar", + "login": "foobar" + } +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.online import ( + Online, OnlineException, online_argument_spec +) + + +class OnlineUserInfo(Online): + + def __init__(self, module): + super(OnlineUserInfo, self).__init__(module) + self.name = 'api/v1/user' + + +def main(): + module = AnsibleModule( + argument_spec=online_argument_spec(), + supports_check_mode=True, + ) + + try: + module.exit_json( + online_user_info=OnlineUserInfo(module).get_resources() + ) + except OnlineException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/open_iscsi.py b/plugins/modules/open_iscsi.py deleted file mode 120000 index de84668ebb..0000000000 --- a/plugins/modules/open_iscsi.py +++ /dev/null @@ -1 +0,0 @@ -./system/open_iscsi.py \ No newline at end of file diff --git a/plugins/modules/open_iscsi.py b/plugins/modules/open_iscsi.py new file mode 100644 index 0000000000..8ccd5351e2 --- /dev/null +++ b/plugins/modules/open_iscsi.py @@ -0,0 +1,493 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Serge van Ginderachter +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: open_iscsi +author: + - Serge van Ginderachter (@srvg) +short_description: Manage iSCSI targets with Open-iSCSI +description: + - Discover targets on given portal, (dis)connect targets, mark targets to manually or auto start, return device nodes of + connected targets. +requirements: + - open_iscsi library and tools (iscsiadm) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + portal: + description: + - The domain name or IP address of the iSCSI target. + type: str + aliases: [ip] + port: + description: + - The port on which the iSCSI target process listens. + type: str + default: '3260' + target: + description: + - The iSCSI target name. + type: str + aliases: [name, targetname] + login: + description: + - Whether the target node should be connected. + - When O(target) is omitted, it logins to all available. + type: bool + aliases: [state] + node_auth: + description: + - The value for C(node.session.auth.authmethod). + type: str + default: CHAP + node_user: + description: + - The value for C(node.session.auth.username). + type: str + node_pass: + description: + - The value for C(node.session.auth.password). + type: str + node_user_in: + description: + - The value for C(node.session.auth.username_in). + type: str + version_added: 3.8.0 + node_pass_in: + description: + - The value for C(node.session.auth.password_in). + type: str + version_added: 3.8.0 + auto_node_startup: + description: + - Whether the target node should be automatically connected at startup. + type: bool + aliases: [automatic] + auto_portal_startup: + description: + - Whether the target node portal should be automatically connected at startup. + type: bool + version_added: 3.2.0 + discover: + description: + - Whether the list of target nodes on the portal should be (re)discovered and added to the persistent iSCSI database. + - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup) to manual, hence combined with + O(auto_node_startup=true) always returns a changed state. + type: bool + default: false + show_nodes: + description: + - Whether the list of nodes in the persistent iSCSI database should be returned by the module. + type: bool + default: false + rescan: + description: + - Rescan an established session for discovering new targets. + - When O(target) is omitted, it rescans all sessions. + type: bool + default: false + version_added: 4.1.0 +""" + +EXAMPLES = r""" +- name: Perform a discovery on sun.com and show available target nodes + community.general.open_iscsi: + show_nodes: true + discover: true + portal: sun.com + +- name: Perform a discovery on 10.1.2.3 and show available target nodes + community.general.open_iscsi: + show_nodes: true + discover: true + ip: 10.1.2.3 + +- name: Discover targets on portal and login to the ones available + community.general.open_iscsi: + portal: '{{ iscsi_target }}' + login: true + discover: true + +- name: Connect to the named target, after updating the local persistent database (cache) + community.general.open_iscsi: + login: true + target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d + +- name: Disconnect from the cached named target + community.general.open_iscsi: + login: false + target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d + +- name: Override and disable automatic portal login on specific portal + community.general.open_iscsi: + login: false + portal: 10.1.1.250 + auto_portal_startup: false + target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d + +- name: Rescan one or all established sessions to discover new targets (omit target for all sessions) + community.general.open_iscsi: + rescan: true + target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d +""" + +import glob +import os +import re +import socket +import time + +from ansible.module_utils.basic import AnsibleModule + +ISCSIADM = 'iscsiadm' +iscsiadm_cmd = None + + +def compare_nodelists(l1, l2): + l1.sort() + l2.sort() + return l1 == l2 + + +def iscsi_get_cached_nodes(module, portal=None): + cmd = [iscsiadm_cmd, '--mode', 'node'] + rc, out, err = module.run_command(cmd) + + nodes = [] + if rc == 0: + lines = out.splitlines() + for line in lines: + # line format is "ip:port,target_portal_group_tag targetname" + parts = line.split() + if len(parts) > 2: + module.fail_json(msg='error parsing output', cmd=cmd) + target = parts[1] + parts = parts[0].split(':') + target_portal = parts[0] + + if portal is None or portal == target_portal: + nodes.append(target) + + # older versions of scsiadm don't have nice return codes + # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details + # err can contain [N|n]o records... + elif rc == 21 or (rc == 255 and "o records found" in err): + pass + else: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + return nodes + + +def iscsi_discover(module, portal, port): + cmd = [iscsiadm_cmd, '--mode', 'discovery', '--type', 'sendtargets', '--portal', '%s:%s' % (portal, port)] + module.run_command(cmd, check_rc=True) + + +def iscsi_rescan(module, target=None): + if target is None: + cmd = [iscsiadm_cmd, '--mode', 'session', '--rescan'] + else: + cmd = [iscsiadm_cmd, '--mode', 'node', '--rescan', '-T', target] + rc, out, err = module.run_command(cmd) + return out + + +def target_loggedon(module, target, portal=None, port=None): + cmd = [iscsiadm_cmd, '--mode', 'session'] + rc, out, err = module.run_command(cmd) + + if portal is None: + portal = "" + if port is None: + port = "" + + if rc == 0: + search_re = "%s:%s.*%s" % (re.escape(portal), port, re.escape(target)) + return re.search(search_re, out) is not None + elif rc == 21: + return False + else: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def target_login(module, target, check_rc, portal=None, port=None): + node_auth = module.params['node_auth'] + node_user = module.params['node_user'] + node_pass = module.params['node_pass'] + node_user_in = module.params['node_user_in'] + node_pass_in = module.params['node_pass_in'] + + if node_user: + params = [('node.session.auth.authmethod', node_auth), + ('node.session.auth.username', node_user), + ('node.session.auth.password', node_pass)] + for (name, value) in params: + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value] + module.run_command(cmd, check_rc=check_rc) + + if node_user_in: + params = [('node.session.auth.username_in', node_user_in), + ('node.session.auth.password_in', node_pass_in)] + for (name, value) in params: + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value] + module.run_command(cmd, check_rc=check_rc) + + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--login'] + if portal is not None and port is not None: + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) + + rc, out, err = module.run_command(cmd, check_rc=check_rc) + return rc + + +def target_logout(module, target): + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--logout'] + module.run_command(cmd, check_rc=True) + + +def target_device_node(target): + # if anyone know a better way to find out which devicenodes get created for + # a given target... + + devices = glob.glob('/dev/disk/by-path/*%s*' % target) + devdisks = [] + for dev in devices: + # exclude partitions + if "-part" not in dev: + devdisk = os.path.realpath(dev) + # only add once (multi-path?) + if devdisk not in devdisks: + devdisks.append(devdisk) + return devdisks + + +def target_isauto(module, target, portal=None, port=None): + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target] + + if portal is not None and port is not None: + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) + + dummy, out, dummy = module.run_command(cmd, check_rc=True) + + lines = out.splitlines() + for line in lines: + if 'node.startup' in line: + return 'automatic' in line + return False + + +def target_setauto(module, target, portal=None, port=None): + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', 'node.startup', '--value', 'automatic'] + + if portal is not None and port is not None: + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) + + module.run_command(cmd, check_rc=True) + + +def target_setmanual(module, target, portal=None, port=None): + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', 'node.startup', '--value', 'manual'] + + if portal is not None and port is not None: + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) + + module.run_command(cmd, check_rc=True) + + +def main(): + # load ansible module object + module = AnsibleModule( + argument_spec=dict( + + # target + portal=dict(type='str', aliases=['ip']), + port=dict(type='str', default='3260'), + target=dict(type='str', aliases=['name', 'targetname']), + node_auth=dict(type='str', default='CHAP'), + node_user=dict(type='str'), + node_pass=dict(type='str', no_log=True), + node_user_in=dict(type='str'), + node_pass_in=dict(type='str', no_log=True), + + # actions + login=dict(type='bool', aliases=['state']), + auto_node_startup=dict(type='bool', aliases=['automatic']), + auto_portal_startup=dict(type='bool'), + discover=dict(type='bool', default=False), + show_nodes=dict(type='bool', default=False), + rescan=dict(type='bool', default=False), + ), + + required_together=[['node_user', 'node_pass'], ['node_user_in', 'node_pass_in']], + required_if=[ + ('discover', True, ['portal']), + ('auto_node_startup', True, ['target']), + ('auto_portal_startup', True, ['target'])], + supports_check_mode=True, + ) + + global iscsiadm_cmd + iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True) + + # parameters + portal = module.params['portal'] + if portal: + try: + portal = socket.getaddrinfo(portal, None)[0][4][0] + except socket.gaierror: + module.fail_json(msg="Portal address is incorrect") + + target = module.params['target'] + port = module.params['port'] + login = module.params['login'] + automatic = module.params['auto_node_startup'] + automatic_portal = module.params['auto_portal_startup'] + discover = module.params['discover'] + show_nodes = module.params['show_nodes'] + rescan = module.params['rescan'] + + check = module.check_mode + + cached = iscsi_get_cached_nodes(module, portal) + + # return json dict + result = {'changed': False} + login_to_all_nodes = False + check_rc = True + + if discover: + if check: + nodes = cached + else: + iscsi_discover(module, portal, port) + nodes = iscsi_get_cached_nodes(module, portal) + if not compare_nodelists(cached, nodes): + result['changed'] |= True + result['cache_updated'] = True + else: + nodes = cached + + if login is not None or automatic is not None: + if target is None: + if len(nodes) > 1: + # Disable strict return code checking if there are multiple targets + # That will allow to skip target where we have no rights to login + login_to_all_nodes = True + check_rc = False + else: + # check given target is in cache + check_target = False + for node in nodes: + if node == target: + check_target = True + break + if not check_target: + module.fail_json(msg="Specified target not found") + + if show_nodes: + result['nodes'] = nodes + + if login is not None: + if login_to_all_nodes: + result['devicenodes'] = [] + for index_target in nodes: + loggedon = target_loggedon(module, index_target, portal, port) + if (login and loggedon) or (not login and not loggedon): + result['changed'] |= False + if login: + result['devicenodes'] += target_device_node(index_target) + elif not check: + if login: + login_result = target_login(module, index_target, check_rc, portal, port) + # give udev some time + time.sleep(1) + result['devicenodes'] += target_device_node(index_target) + else: + target_logout(module, index_target) + # Check if there are multiple targets on a single portal and + # do not mark the task changed if host could not login to one of them + if len(nodes) > 1 and login_result == 24: + result['changed'] |= False + result['connection_changed'] = False + else: + result['changed'] |= True + result['connection_changed'] = True + else: + result['changed'] |= True + result['connection_changed'] = True + else: + loggedon = target_loggedon(module, target, portal, port) + if (login and loggedon) or (not login and not loggedon): + result['changed'] |= False + if login: + result['devicenodes'] = target_device_node(target) + elif not check: + if login: + target_login(module, target, portal, port) + # give udev some time + time.sleep(1) + result['devicenodes'] = target_device_node(target) + else: + target_logout(module, target) + result['changed'] |= True + result['connection_changed'] = True + else: + result['changed'] |= True + result['connection_changed'] = True + + if automatic is not None and not login_to_all_nodes: + isauto = target_isauto(module, target) + if (automatic and isauto) or (not automatic and not isauto): + result['changed'] |= False + result['automatic_changed'] = False + elif not check: + if automatic: + target_setauto(module, target) + else: + target_setmanual(module, target) + result['changed'] |= True + result['automatic_changed'] = True + else: + result['changed'] |= True + result['automatic_changed'] = True + + if automatic_portal is not None and not login_to_all_nodes: + isauto = target_isauto(module, target, portal, port) + if (automatic_portal and isauto) or (not automatic_portal and not isauto): + result['changed'] |= False + result['automatic_portal_changed'] = False + elif not check: + if automatic_portal: + target_setauto(module, target, portal, port) + else: + target_setmanual(module, target, portal, port) + result['changed'] |= True + result['automatic_portal_changed'] = True + else: + result['changed'] |= True + result['automatic_portal_changed'] = True + + if rescan is not False: + result['changed'] = True + result['sessions'] = iscsi_rescan(module, target) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/openbsd_pkg.py b/plugins/modules/openbsd_pkg.py deleted file mode 120000 index 7814c60535..0000000000 --- a/plugins/modules/openbsd_pkg.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/openbsd_pkg.py \ No newline at end of file diff --git a/plugins/modules/openbsd_pkg.py b/plugins/modules/openbsd_pkg.py new file mode 100644 index 0000000000..8d199a9da4 --- /dev/null +++ b/plugins/modules/openbsd_pkg.py @@ -0,0 +1,732 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Patrik Lundin +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: openbsd_pkg +author: + - Patrik Lundin (@eest) +short_description: Manage packages on OpenBSD +description: + - Manage packages on OpenBSD using the pkg tools. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + version_added: 9.1.0 + details: + - Only works when check mode is not enabled. +options: + name: + description: + - A name or a list of names of the packages. + required: true + type: list + elements: str + state: + description: + - V(present) ensures the package is installed. + - V(latest) ensures the latest version of the package is installed. + - V(absent) ensures the specified package is not installed. + choices: [absent, latest, present, installed, removed] + default: present + type: str + build: + description: + - Build the package from source instead of downloading and installing a binary. Requires that the port source tree is + already installed. Automatically builds and installs the C(sqlports) package, if it is not already installed. + - Mutually exclusive with O(snapshot). + type: bool + default: false + snapshot: + description: + - Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel. + - Mutually exclusive with O(build). + type: bool + default: false + version_added: 1.3.0 + ports_dir: + description: + - When used in combination with the O(build) option, allows overriding the default ports source directory. + default: /usr/ports + type: path + clean: + description: + - When updating or removing packages, delete the extra configuration file(s) in the old packages which are annotated + with C(@extra) in the packaging-list. + type: bool + default: false + quick: + description: + - Replace or delete packages quickly; do not bother with checksums before removing normal files. + type: bool + default: false + autoremove: + description: + - Calls C(pkg_delete -a) to remove automatically installed packages which are no longer needed. + type: bool + default: false + version_added: 11.3.0 +notes: + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. +""" + +EXAMPLES = r""" +- name: Make sure nmap is installed + community.general.openbsd_pkg: + name: nmap + state: present + +- name: Make sure nmap is the latest version + community.general.openbsd_pkg: + name: nmap + state: latest + +- name: Make sure nmap is not installed + community.general.openbsd_pkg: + name: nmap + state: absent + +- name: Make sure nmap is installed, build it from source if it is not + community.general.openbsd_pkg: + name: nmap + state: present + build: true + +- name: Specify a pkg flavour with '--' + community.general.openbsd_pkg: + name: vim--no_x11 + state: present + +- name: Specify the default flavour to avoid ambiguity errors + community.general.openbsd_pkg: + name: vim-- + state: present + +- name: Specify a package branch (requires at least OpenBSD 6.0) + community.general.openbsd_pkg: + name: python%3.5 + state: present + +- name: Update all packages on the system + community.general.openbsd_pkg: + name: '*' + state: latest + +- name: Purge a package and its configuration files + community.general.openbsd_pkg: + name: mpd + clean: true + state: absent + +- name: Quickly remove a package without checking checksums + community.general.openbsd_pkg: + name: qt5 + quick: true + state: absent + +- name: Install packages, remove unused dependencies + community.general.openbsd_pkg: + name: ["tree", "mtr"] + autoremove: true + +- name: Remove all unused dependencies + community.general.openbsd_pkg: + name: '*' + autoremove: true +""" + +import os +import platform +import re +import shlex +import sqlite3 + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +# Function used for executing commands. +def execute_command(cmd, module): + # Break command line into arguments. + # This makes run_command() use shell=False which we need to not cause shell + # expansion of special characters like '*'. + cmd_args = shlex.split(cmd) + + # We set TERM to 'dumb' to keep pkg_add happy if the machine running + # ansible is using a TERM that the managed machine does not know about, + # e.g.: "No progress meter: failed termcap lookup on xterm-kitty". + return module.run_command(cmd_args, environ_update={'TERM': 'dumb'}) + + +def get_all_installed(module): + """ + Get all installed packaged. Used to support diff mode + """ + command = 'pkg_info -Iq' + + rc, stdout, stderr = execute_command(command, module) + + if stderr: + module.fail_json(msg="failed in get_all_installed(): %s" % stderr) + + return stdout + + +# Function used to find out if a package is currently installed. +def get_package_state(names, pkg_spec, module): + info_cmd = 'pkg_info -Iq' + + for name in names: + command = "%s inst:%s" % (info_cmd, name) + + rc, stdout, stderr = execute_command(command, module) + + if stderr: + match = re.search(r"^Can't find inst:%s$" % re.escape(name), stderr) + if match: + pkg_spec[name]['installed_state'] = False + else: + module.fail_json(msg="failed in get_package_state(): " + stderr) + + if stdout: + # If the requested package name is just a stem, like "python", we may + # find multiple packages with that name. + pkg_spec[name]['installed_names'] = stdout.splitlines() + module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names']) + pkg_spec[name]['installed_state'] = True + else: + pkg_spec[name]['installed_state'] = False + + +# Function used to make sure a package is present. +def package_present(names, pkg_spec, module): + build = module.params['build'] + + for name in names: + # It is possible package_present() has been called from package_latest(). + # In that case we do not want to operate on the whole list of names, + # only the leftovers. + if pkg_spec['package_latest_leftovers']: + if name not in pkg_spec['package_latest_leftovers']: + module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name) + continue + else: + module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name) + + if module.check_mode: + install_cmd = 'pkg_add -Imn' + else: + if build is True: + port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module)) + if os.path.isdir(port_dir): + if pkg_spec[name]['flavor']: + flavors = pkg_spec[name]['flavor'].replace('-', ' ') + install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors) + elif pkg_spec[name]['subpackage']: + install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir, + pkg_spec[name]['subpackage']) + else: + install_cmd = "cd %s && make install && make clean=depends" % (port_dir) + else: + module.fail_json(msg="the port source directory %s does not exist" % (port_dir)) + else: + install_cmd = 'pkg_add -Im' + + if module.params['snapshot'] is True: + install_cmd += ' -Dsnap' + + if pkg_spec[name]['installed_state'] is False: + + # Attempt to install the package + if build is True and not module.check_mode: + (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True) + else: + (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module) + + # The behaviour of pkg_add is a bit different depending on if a + # specific version is supplied or not. + # + # When a specific version is supplied the return code will be 0 when + # a package is found and 1 when it is not. If a version is not + # supplied the tool will exit 0 in both cases. + # + # It is important to note that "version" relates to the + # packages-specs(7) notion of a version. If using the branch syntax + # (like "python%3.5") even though a branch name may look like a + # version string it is not used an one by pkg_add. + if pkg_spec[name]['version'] or build is True: + # Depend on the return code. + module.debug("package_present(): depending on return code for name '%s'" % name) + if pkg_spec[name]['rc']: + pkg_spec[name]['changed'] = False + else: + # Depend on stderr instead. + module.debug("package_present(): depending on stderr for name '%s'" % name) + if pkg_spec[name]['stderr']: + # There is a corner case where having an empty directory in + # installpath prior to the right location will result in a + # "file:/local/package/directory/ is empty" message on stderr + # while still installing the package, so we need to look for + # for a message like "packagename-1.0: ok" just in case. + match = re.search(r"\W%s-[^:]+: ok\W" % re.escape(pkg_spec[name]['stem']), pkg_spec[name]['stdout']) + + if match: + # It turns out we were able to install the package. + module.debug("package_present(): we were able to install package for name '%s'" % name) + pkg_spec[name]['changed'] = True + else: + # We really did fail, fake the return code. + module.debug("package_present(): we really did fail for name '%s'" % name) + pkg_spec[name]['rc'] = 1 + pkg_spec[name]['changed'] = False + else: + module.debug("package_present(): stderr was not set for name '%s'" % name) + + if pkg_spec[name]['rc'] == 0: + pkg_spec[name]['changed'] = True + + else: + pkg_spec[name]['rc'] = 0 + pkg_spec[name]['stdout'] = '' + pkg_spec[name]['stderr'] = '' + pkg_spec[name]['changed'] = False + + +# Function used to make sure a package is the latest available version. +def package_latest(names, pkg_spec, module): + if module.params['build'] is True: + module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build']) + + upgrade_cmd = 'pkg_add -um' + + if module.check_mode: + upgrade_cmd += 'n' + + if module.params['clean']: + upgrade_cmd += 'c' + + if module.params['quick']: + upgrade_cmd += 'q' + + if module.params['snapshot']: + upgrade_cmd += ' -Dsnap' + + for name in names: + if pkg_spec[name]['installed_state'] is True: + + # Attempt to upgrade the package. + (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (upgrade_cmd, name), module) + + # Look for output looking something like "nmap-6.01->6.25: ok" to see if + # something changed (or would have changed). Use \W to delimit the match + # from progress meter output. + pkg_spec[name]['changed'] = False + for installed_name in pkg_spec[name]['installed_names']: + module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name) + match = re.search(r"\W%s->.+: ok\W" % re.escape(installed_name), pkg_spec[name]['stdout']) + if match: + module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name) + + pkg_spec[name]['changed'] = True + break + + # FIXME: This part is problematic. Based on the issues mentioned (and + # handled) in package_present() it is not safe to blindly trust stderr + # as an indicator that the command failed, and in the case with + # empty installpath directories this will break. + # + # For now keep this safeguard here, but ignore it if we managed to + # parse out a successful update above. This way we will report a + # successful run when we actually modify something but fail + # otherwise. + if pkg_spec[name]['changed'] is not True: + if pkg_spec[name]['stderr']: + pkg_spec[name]['rc'] = 1 + + else: + # Note packages that need to be handled by package_present + module.debug("package_latest(): package '%s' is not installed, will be handled by package_present()" % name) + pkg_spec['package_latest_leftovers'].append(name) + + # If there were any packages that were not installed we call + # package_present() which will handle those. + if pkg_spec['package_latest_leftovers']: + module.debug("package_latest(): calling package_present() to handle leftovers") + package_present(names, pkg_spec, module) + + +# Function used to make sure a package is not installed. +def package_absent(names, pkg_spec, module): + remove_cmd = 'pkg_delete -I' + + if module.check_mode: + remove_cmd += 'n' + + if module.params['clean']: + remove_cmd += 'c' + + if module.params['quick']: + remove_cmd += 'q' + + for name in names: + if pkg_spec[name]['installed_state'] is True: + # Attempt to remove the package. + (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (remove_cmd, name), module) + + if pkg_spec[name]['rc'] == 0: + pkg_spec[name]['changed'] = True + else: + pkg_spec[name]['changed'] = False + + else: + pkg_spec[name]['rc'] = 0 + pkg_spec[name]['stdout'] = '' + pkg_spec[name]['stderr'] = '' + pkg_spec[name]['changed'] = False + + +# Function used to remove unused dependencies. +def package_rm_unused_deps(pkg_spec, module): + rm_unused_deps_cmd = 'pkg_delete -Ia' + + if module.check_mode: + rm_unused_deps_cmd += 'n' + + if module.params['clean']: + rm_unused_deps_cmd += 'c' + + if module.params['quick']: + rm_unused_deps_cmd += 'q' + + # If we run the commands, we set changed to true to let + # the package list change detection code do the actual work. + + # Create a minimal pkg_spec entry for '*' to store return values. + pkg_spec['*'] = {} + + # Attempt to remove unused dependencies. + pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command(rm_unused_deps_cmd, module) + pkg_spec['*']['changed'] = True + + +# Function used to parse the package name based on packages-specs(7). +# The general name structure is "stem-version[-flavors]". +# +# Names containing "%" are a special variation not part of the +# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a +# description. +def parse_package_name(names, pkg_spec, module): + + # Initialize empty list of package_latest() leftovers. + pkg_spec['package_latest_leftovers'] = [] + + for name in names: + module.debug("parse_package_name(): parsing name: %s" % name) + # Do some initial matches so we can base the more advanced regex on that. + version_match = re.search("-[0-9]", name) + versionless_match = re.search("--", name) + + # Stop if someone is giving us a name that both has a version and is + # version-less at the same time. + if version_match and versionless_match: + module.fail_json(msg="package name both has a version and is version-less: " + name) + + # All information for a given name is kept in the pkg_spec keyed by that name. + pkg_spec[name] = {} + + # If name includes a version. + if version_match: + match = re.search("^(?P[^%]+)-(?P[0-9][^-]*)(?P-)?(?P[a-z].*)?(%(?P.+))?$", name) + if match: + pkg_spec[name]['stem'] = match.group('stem') + pkg_spec[name]['version_separator'] = '-' + pkg_spec[name]['version'] = match.group('version') + pkg_spec[name]['flavor_separator'] = match.group('flavor_separator') + pkg_spec[name]['flavor'] = match.group('flavor') + pkg_spec[name]['branch'] = match.group('branch') + pkg_spec[name]['style'] = 'version' + module.debug("version_match: stem: %(stem)s, version: %(version)s, flavor_separator: %(flavor_separator)s, " + "flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name]) + else: + module.fail_json(msg="unable to parse package name at version_match: " + name) + + # If name includes no version but is version-less ("--"). + elif versionless_match: + match = re.search("^(?P[^%]+)--(?P[a-z].*)?(%(?P.+))?$", name) + if match: + pkg_spec[name]['stem'] = match.group('stem') + pkg_spec[name]['version_separator'] = '-' + pkg_spec[name]['version'] = None + pkg_spec[name]['flavor_separator'] = '-' + pkg_spec[name]['flavor'] = match.group('flavor') + pkg_spec[name]['branch'] = match.group('branch') + pkg_spec[name]['style'] = 'versionless' + module.debug("versionless_match: stem: %(stem)s, flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name]) + else: + module.fail_json(msg="unable to parse package name at versionless_match: " + name) + + # If name includes no version, and is not version-less, it is all a + # stem, possibly with a branch (%branchname) tacked on at the + # end. + else: + match = re.search("^(?P[^%]+)(%(?P.+))?$", name) + if match: + pkg_spec[name]['stem'] = match.group('stem') + pkg_spec[name]['version_separator'] = None + pkg_spec[name]['version'] = None + pkg_spec[name]['flavor_separator'] = None + pkg_spec[name]['flavor'] = None + pkg_spec[name]['branch'] = match.group('branch') + pkg_spec[name]['style'] = 'stem' + module.debug("stem_match: stem: %(stem)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name]) + else: + module.fail_json(msg="unable to parse package name at else: " + name) + + # Verify that the managed host is new enough to support branch syntax. + if pkg_spec[name]['branch']: + branch_release = "6.0" + + if LooseVersion(platform.release()) < LooseVersion(branch_release): + module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name)) + + # Sanity check that there are no trailing dashes in flavor. + # Try to stop strange stuff early so we can be strict later. + if pkg_spec[name]['flavor']: + match = re.search("-$", pkg_spec[name]['flavor']) + if match: + module.fail_json(msg="trailing dash in flavor: " + pkg_spec[name]['flavor']) + + +# Function used for figuring out the port path. +def get_package_source_path(name, pkg_spec, module): + pkg_spec[name]['subpackage'] = None + if pkg_spec[name]['stem'] == 'sqlports': + return 'databases/sqlports' + else: + # try for an exact match first + sqlports_db_file = '/usr/local/share/sqlports' + if not os.path.isfile(sqlports_db_file): + module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file) + + conn = sqlite3.connect(sqlports_db_file) + first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname' + query = first_part_of_query + ' = ?' + module.debug("package_package_source_path(): exact query: %s" % query) + cursor = conn.execute(query, (name,)) + results = cursor.fetchall() + + # next, try for a fuzzier match + if len(results) < 1: + looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%') + query = first_part_of_query + ' LIKE ?' + if pkg_spec[name]['flavor']: + looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor'] + module.debug("package_package_source_path(): fuzzy flavor query: %s" % query) + cursor = conn.execute(query, (looking_for,)) + elif pkg_spec[name]['style'] == 'versionless': + query += ' AND fullpkgname NOT LIKE ?' + module.debug("package_package_source_path(): fuzzy versionless query: %s" % query) + cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,)) + else: + module.debug("package_package_source_path(): fuzzy query: %s" % query) + cursor = conn.execute(query, (looking_for,)) + results = cursor.fetchall() + + # error if we don't find exactly 1 match + conn.close() + if len(results) < 1: + module.fail_json(msg="could not find a port by the name '%s'" % name) + if len(results) > 1: + matches = map(lambda x: x[1], results) + module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches)) + + # there's exactly 1 match, so figure out the subpackage, if any, then return + fullpkgpath = results[0][0] + parts = fullpkgpath.split(',') + if len(parts) > 1 and parts[1][0] == '-': + pkg_spec[name]['subpackage'] = parts[1] + return parts[0] + + +# Function used for upgrading all installed packages. +def upgrade_packages(pkg_spec, module): + if module.check_mode: + upgrade_cmd = 'pkg_add -Imnu' + else: + upgrade_cmd = 'pkg_add -Imu' + + if module.params['snapshot']: + upgrade_cmd += ' -Dsnap' + + # Create a minimal pkg_spec entry for '*' to store return values. + pkg_spec['*'] = {} + + # Attempt to upgrade all packages. + pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command(upgrade_cmd, module) + + # Try to find any occurrence of a package changing version like: + # "bzip2-1.0.6->1.0.6p0: ok". + match = re.search(r"\W\w.+->.+: ok\W", pkg_spec['*']['stdout']) + if match: + pkg_spec['*']['changed'] = True + + else: + pkg_spec['*']['changed'] = False + + # It seems we can not trust the return value, so depend on the presence of + # stderr to know if something failed. + if pkg_spec['*']['stderr']: + pkg_spec['*']['rc'] = 1 + else: + pkg_spec['*']['rc'] = 0 + + +# =========================================== +# Main control flow. +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), + build=dict(type='bool', default=False), + snapshot=dict(type='bool', default=False), + ports_dir=dict(type='path', default='/usr/ports'), + quick=dict(type='bool', default=False), + clean=dict(type='bool', default=False), + autoremove=dict(type='bool', default=False), + ), + mutually_exclusive=[['snapshot', 'build']], + supports_check_mode=True + ) + + name = module.params['name'] + state = module.params['state'] + build = module.params['build'] + ports_dir = module.params['ports_dir'] + + result = {} + result['name'] = name + result['state'] = state + result['build'] = build + result['diff'] = {} + + # The data structure used to keep track of package information. + pkg_spec = {} + + new_package_list = original_package_list = get_all_installed(module) + + if build is True: + if not os.path.isdir(ports_dir): + module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir)) + + # build sqlports if its not installed yet + parse_package_name(['sqlports'], pkg_spec, module) + get_package_state(['sqlports'], pkg_spec, module) + if not pkg_spec['sqlports']['installed_state']: + module.debug("main(): installing 'sqlports' because build=%s" % module.params['build']) + package_present(['sqlports'], pkg_spec, module) + + asterisk_name = False + for n in name: + if n == '*': + if len(name) != 1: + module.fail_json(msg="the package name '*' can not be mixed with other names") + + asterisk_name = True + + if asterisk_name: + if state != 'latest' and not module.params['autoremove']: + module.fail_json(msg="the package name '*' is only valid when using state=latest or autoremove=true") + + if state == 'latest': + # Perform an upgrade of all installed packages. + upgrade_packages(pkg_spec, module) + + if module.params['autoremove']: + # Remove unused dependencies. + package_rm_unused_deps(pkg_spec, module) + else: + # Parse package names and put results in the pkg_spec dictionary. + parse_package_name(name, pkg_spec, module) + + # Not sure how the branch syntax is supposed to play together + # with build mode. Disable it for now. + for n in name: + if pkg_spec[n]['branch'] and module.params['build'] is True: + module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n)) + + # Get state for all package names. + get_package_state(name, pkg_spec, module) + + # Perform requested action. + if state in ['installed', 'present']: + package_present(name, pkg_spec, module) + elif state in ['absent', 'removed']: + package_absent(name, pkg_spec, module) + elif state == 'latest': + package_latest(name, pkg_spec, module) + + # Handle autoremove if requested for non-asterisk packages + if module.params['autoremove']: + package_rm_unused_deps(pkg_spec, module) + + # The combined changed status for all requested packages. If anything + # is changed this is set to True. + combined_changed = False + + # The combined failed status for all requested packages. If anything + # failed this is set to True. + combined_failed = False + + # We combine all error messages in this comma separated string, for example: + # "msg": "Can't find nmapp\n, Can't find nmappp\n" + combined_error_message = '' + + # Loop over all requested package names and check if anything failed or + # changed. + for n in name: + if pkg_spec[n]['rc'] != 0: + combined_failed = True + if pkg_spec[n]['stderr']: + if combined_error_message: + combined_error_message += ", %s" % pkg_spec[n]['stderr'] + else: + combined_error_message = pkg_spec[n]['stderr'] + else: + if combined_error_message: + combined_error_message += ", %s" % pkg_spec[n]['stdout'] + else: + combined_error_message = pkg_spec[n]['stdout'] + + if pkg_spec[n]['changed'] is True: + combined_changed = True + + # If combined_error_message contains anything at least some part of the + # list of requested package names failed. + if combined_failed: + module.fail_json(msg=combined_error_message, **result) + + result['changed'] = combined_changed + + if not module.check_mode: + new_package_list = get_all_installed(module) + result['diff'] = dict(before=original_package_list, after=new_package_list) + result['changed'] = (result['diff']['before'] != result['diff']['after']) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/opendj_backendprop.py b/plugins/modules/opendj_backendprop.py deleted file mode 120000 index 399af4d2c6..0000000000 --- a/plugins/modules/opendj_backendprop.py +++ /dev/null @@ -1 +0,0 @@ -./identity/opendj/opendj_backendprop.py \ No newline at end of file diff --git a/plugins/modules/opendj_backendprop.py b/plugins/modules/opendj_backendprop.py new file mode 100644 index 0000000000..4f0940d391 --- /dev/null +++ b/plugins/modules/opendj_backendprop.py @@ -0,0 +1,213 @@ +#!/usr/bin/python +# Copyright (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: opendj_backendprop +short_description: Update the backend configuration of OpenDJ using the dsconfig set-backend-prop command +description: + - This module updates settings for OpenDJ with the command C(set-backend-prop). + - It checks first using C(get-backend-prop) if configuration needs to be applied. +author: + - Werner Dijkerman (@dj-wasabi) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + opendj_bindir: + description: + - The path to the bin directory of OpenDJ. + required: false + default: /opt/opendj/bin + type: path + hostname: + description: + - The hostname of the OpenDJ server. + required: true + type: str + port: + description: + - The Admin port on which the OpenDJ instance is available. + required: true + type: str + username: + description: + - The username to connect to. + required: false + default: cn=Directory Manager + type: str + password: + description: + - The password for the C(cn=Directory Manager) user. + - Either password or passwordfile is needed. + required: false + type: str + passwordfile: + description: + - Location to the password file which holds the password for the C(cn=Directory Manager) user. + - Either password or passwordfile is needed. + required: false + type: path + backend: + description: + - The name of the backend on which the property needs to be updated. + required: true + type: str + name: + description: + - The configuration setting to update. + required: true + type: str + value: + description: + - The value for the configuration item. + required: true + type: str + state: + description: + - If configuration needs to be added/updated. + required: false + default: "present" + type: str +""" + +EXAMPLES = r""" +- name: Add or update OpenDJ backend properties + opendj_backendprop: + hostname: localhost + port: 4444 + username: "cn=Directory Manager" + password: password + backend: userRoot + name: index-entry-limit + value: 5000 +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule + + +class BackendProp(object): + + def __init__(self, module): + self._module = module + + def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name): + my_command = [ + opendj_bindir + '/dsconfig', + 'get-backend-prop', + '-h', hostname, + '--port', str(port), + '--bindDN', username, + '--backend-name', backend_name, + '-n', '-X', '-s' + ] + password_method + rc, stdout, stderr = self._module.run_command(my_command) + if rc == 0: + return stdout + else: + self._module.fail_json(msg="Error message: " + str(stderr)) + + def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value): + my_command = [ + opendj_bindir + '/dsconfig', + 'set-backend-prop', + '-h', hostname, + '--port', str(port), + '--bindDN', username, + '--backend-name', backend_name, + '--set', name + ":" + value, + '-n', '-X' + ] + password_method + rc, stdout, stderr = self._module.run_command(my_command) + if rc == 0: + return True + else: + self._module.fail_json(msg="Error message: " + stderr) + + def validate_data(self, data=None, name=None, value=None): + for config_line in data.split('\n'): + if config_line: + split_line = config_line.split() + if split_line[0] == name: + if split_line[1] == value: + return True + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + opendj_bindir=dict(default="/opt/opendj/bin", type="path"), + hostname=dict(required=True), + port=dict(required=True), + username=dict(default="cn=Directory Manager"), + password=dict(no_log=True), + passwordfile=dict(type="path"), + backend=dict(required=True), + name=dict(required=True), + value=dict(required=True), + state=dict(default="present"), + ), + supports_check_mode=True, + mutually_exclusive=[['password', 'passwordfile']], + required_one_of=[['password', 'passwordfile']] + ) + + opendj_bindir = module.params['opendj_bindir'] + hostname = module.params['hostname'] + port = module.params['port'] + username = module.params['username'] + password = module.params['password'] + passwordfile = module.params['passwordfile'] + backend_name = module.params['backend'] + name = module.params['name'] + value = module.params['value'] + state = module.params['state'] + + if module.params["password"] is not None: + password_method = ['-w', password] + elif module.params["passwordfile"] is not None: + password_method = ['-j', passwordfile] + + opendj = BackendProp(module) + validate = opendj.get_property(opendj_bindir=opendj_bindir, + hostname=hostname, + port=port, + username=username, + password_method=password_method, + backend_name=backend_name) + + if validate: + if not opendj.validate_data(data=validate, name=name, value=value): + if module.check_mode: + module.exit_json(changed=True) + if opendj.set_property(opendj_bindir=opendj_bindir, + hostname=hostname, + port=port, + username=username, + password_method=password_method, + backend_name=backend_name, + name=name, + value=value): + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + else: + module.exit_json(changed=False) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/openwrt_init.py b/plugins/modules/openwrt_init.py deleted file mode 120000 index 9f1facd729..0000000000 --- a/plugins/modules/openwrt_init.py +++ /dev/null @@ -1 +0,0 @@ -./system/openwrt_init.py \ No newline at end of file diff --git a/plugins/modules/openwrt_init.py b/plugins/modules/openwrt_init.py new file mode 100644 index 0000000000..abee16bbf3 --- /dev/null +++ b/plugins/modules/openwrt_init.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# Copyright (c) 2016, Andrew Gaffney +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: openwrt_init +author: + - "Andrew Gaffney (@agaffney)" +short_description: Manage services on OpenWrt +description: + - Controls OpenWrt services on remote hosts. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - Name of the service. + required: true + aliases: ['service'] + state: + type: str + description: + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - V(restarted) always bounces the service. + - V(reloaded) always reloads. + choices: ['started', 'stopped', 'restarted', 'reloaded'] + enabled: + description: + - Whether the service should start on boot. B(At least one) of O(state) and O(enabled) are required. + type: bool + pattern: + type: str + description: + - If the service does not respond to the C(running) command, name a substring to look for as would be found in the output + of the C(ps) command as a stand-in for a C(running) result. If the string is found, the service is assumed to be running. +notes: + - One option other than O(name) is required. +requirements: + - An OpenWrt system (with python) +""" + +EXAMPLES = r""" +- name: Start service httpd, if not running + community.general.openwrt_init: + state: started + name: httpd + +- name: Stop service cron, if running + community.general.openwrt_init: + name: cron + state: stopped + +- name: Reload service httpd, in all cases + community.general.openwrt_init: + name: httpd + state: reloaded + +- name: Enable service httpd + community.general.openwrt_init: + name: httpd + enabled: true +""" + +RETURN = r""" +""" + +import os +from ansible.module_utils.basic import AnsibleModule + +module = None +init_script = None + + +# =============================== +# Check if service is enabled +def is_enabled(): + rc, dummy, dummy = module.run_command([init_script, 'enabled']) + return rc == 0 + + +# =========================================== +# Main control flow +def main(): + global module, init_script + # init + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, type='str', aliases=['service']), + state=dict(type='str', choices=['started', 'stopped', 'restarted', 'reloaded']), + enabled=dict(type='bool'), + pattern=dict(type='str'), + ), + supports_check_mode=True, + required_one_of=[('state', 'enabled')], + ) + + # initialize + service = module.params['name'] + init_script = '/etc/init.d/' + service + result = { + 'name': service, + 'changed': False, + } + # check if service exists + if not os.path.exists(init_script): + module.fail_json(msg='service %s does not exist' % service) + + # Enable/disable service startup at boot if requested + if module.params['enabled'] is not None: + # do we need to enable the service? + enabled = is_enabled() + + # default to current state + result['enabled'] = enabled + + # Change enable/disable if needed + if enabled != module.params['enabled']: + result['changed'] = True + action = 'enable' if module.params['enabled'] else 'disable' + + if not module.check_mode: + rc, dummy, err = module.run_command([init_script, action]) + # openwrt init scripts can return a non-zero exit code on a successful 'enable' + # command if the init script doesn't contain a STOP value, so we ignore the exit + # code and explicitly check if the service is now in the desired state + if is_enabled() != module.params['enabled']: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err)) + + result['enabled'] = not enabled + + if module.params['state'] is not None: + running = False + + # check if service is currently running + if module.params['pattern']: + # Find ps binary + psbin = module.get_bin_path('ps', True) + + # this should be busybox ps, so we only want/need to the 'w' option + rc, psout, dummy = module.run_command([psbin, 'w']) + # If rc is 0, set running as appropriate + if rc == 0: + lines = psout.split("\n") + running = any((module.params['pattern'] in line and "pattern=" not in line) for line in lines) + else: + rc, dummy, dummy = module.run_command([init_script, 'running']) + if rc == 0: + running = True + + # default to desired state + result['state'] = module.params['state'] + + # determine action, if any + action = None + if module.params['state'] == 'started': + if not running: + action = 'start' + result['changed'] = True + elif module.params['state'] == 'stopped': + if running: + action = 'stop' + result['changed'] = True + else: + action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded + result['state'] = 'started' + result['changed'] = True + + if action: + if not module.check_mode: + rc, dummy, err = module.run_command([init_script, action]) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/opkg.py b/plugins/modules/opkg.py deleted file mode 120000 index ae098045e0..0000000000 --- a/plugins/modules/opkg.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/opkg.py \ No newline at end of file diff --git a/plugins/modules/opkg.py b/plugins/modules/opkg.py new file mode 100644 index 0000000000..a65c00193d --- /dev/null +++ b/plugins/modules/opkg.py @@ -0,0 +1,223 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Patrick Pelletier +# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: opkg +author: "Patrick Pelletier (@skinp)" +short_description: Package manager for OpenWrt and Openembedded/Yocto based Linux distributions +description: + - Manages ipk packages for OpenWrt and Openembedded/Yocto based Linux distributions. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of package(s) to install/remove. + - V(NAME=VERSION) syntax is also supported to install a package in a certain version. See the examples. This only works + on Yocto based Linux distributions (opkg>=0.3.2) and not for OpenWrt. This is supported since community.general 6.2.0. + aliases: [pkg] + required: true + type: list + elements: str + state: + description: + - State of the package. + choices: ['present', 'absent', 'installed', 'removed'] + default: present + type: str + force: + description: + - The C(opkg --force) parameter used. + choices: + - "depends" + - "maintainer" + - "reinstall" + - "overwrite" + - "downgrade" + - "space" + - "postinstall" + - "remove" + - "checksum" + - "removal-of-dependent-packages" + type: str + update_cache: + description: + - Update the package DB first. + default: false + type: bool + executable: + description: + - The executable location for C(opkg). + type: path + version_added: 7.2.0 +requirements: + - opkg + - python +""" + +EXAMPLES = r""" +- name: Install foo + community.general.opkg: + name: foo + state: present + +- name: Install foo in version 1.2 (opkg>=0.3.2 on Yocto based Linux distributions) + community.general.opkg: + name: foo=1.2 + state: present + +- name: Update cache and install foo + community.general.opkg: + name: foo + state: present + update_cache: true + +- name: Remove foo + community.general.opkg: + name: foo + state: absent + +- name: Remove foo and bar + community.general.opkg: + name: + - foo + - bar + state: absent + +- name: Install foo using overwrite option forcibly + community.general.opkg: + name: foo + state: present + force: overwrite +""" + +RETURN = r""" +version: + description: Version of opkg. + type: str + returned: always + sample: "2.80.0" + version_added: 10.0.0 +""" + +import os +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + + +class Opkg(StateModuleHelper): + module = dict( + argument_spec=dict( + name=dict(aliases=["pkg"], required=True, type="list", elements="str"), + state=dict(default="present", choices=["present", "installed", "absent", "removed"]), + force=dict(choices=["depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", + "postinstall", "remove", "checksum", "removal-of-dependent-packages"]), + update_cache=dict(default=False, type='bool'), + executable=dict(type="path"), + ), + ) + + def __init_module__(self): + self.vars.set("install_c", 0, output=False, change=True) + self.vars.set("remove_c", 0, output=False, change=True) + + state_map = dict( + query="list-installed", + present="install", + installed="install", + absent="remove", + removed="remove", + ) + + dir, cmd = os.path.split(self.vars.executable) if self.vars.executable else (None, "opkg") + + self.runner = CmdRunner( + self.module, + command=cmd, + arg_formats=dict( + package=cmd_runner_fmt.as_list(), + state=cmd_runner_fmt.as_map(state_map), + force=cmd_runner_fmt.as_optval("--force-"), + update_cache=cmd_runner_fmt.as_bool("update"), + version=cmd_runner_fmt.as_fixed("--version"), + ), + path_prefix=dir, + ) + + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip().replace("opkg version ", "") + + if self.vars.update_cache: + rc, dummy, dummy = self.runner("update_cache").run() + if rc != 0: + self.do_raise("could not update package db") + + @staticmethod + def split_name_and_version(package): + """ Split the name and the version when using the NAME=VERSION syntax """ + splitted = package.split('=', 1) + if len(splitted) == 1: + return splitted[0], None + else: + return splitted[0], splitted[1] + + def _package_in_desired_state(self, name, want_installed, version=None): + dummy, out, dummy = self.runner("state package").run(state="query", package=name) + + has_package = out.startswith(name + " - %s" % ("" if not version else (version + " "))) + return want_installed == has_package + + def state_present(self): + with self.runner("state force package") as ctx: + for package in self.vars.name: + pkg_name, pkg_version = self.split_name_and_version(package) + if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version) or self.vars.force == "reinstall": + ctx.run(package=package) + self.vars.set("run_info", ctx.run_info, verbosity=4) + if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version): + self.do_raise("failed to install %s" % package) + self.vars.install_c += 1 + if self.vars.install_c > 0: + self.vars.msg = "installed %s package(s)" % self.vars.install_c + else: + self.vars.msg = "package(s) already present" + + def state_absent(self): + with self.runner("state force package") as ctx: + for package in self.vars.name: + package, dummy = self.split_name_and_version(package) + if not self._package_in_desired_state(package, want_installed=False): + ctx.run(package=package) + self.vars.set("run_info", ctx.run_info, verbosity=4) + if not self._package_in_desired_state(package, want_installed=False): + self.do_raise("failed to remove %s" % package) + self.vars.remove_c += 1 + if self.vars.remove_c > 0: + self.vars.msg = "removed %s package(s)" % self.vars.remove_c + else: + self.vars.msg = "package(s) already absent" + + state_installed = state_present + state_removed = state_absent + + +def main(): + Opkg.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/osx_defaults.py b/plugins/modules/osx_defaults.py deleted file mode 120000 index f28ea958f0..0000000000 --- a/plugins/modules/osx_defaults.py +++ /dev/null @@ -1 +0,0 @@ -./system/osx_defaults.py \ No newline at end of file diff --git a/plugins/modules/osx_defaults.py b/plugins/modules/osx_defaults.py new file mode 100644 index 0000000000..f694dbaad2 --- /dev/null +++ b/plugins/modules/osx_defaults.py @@ -0,0 +1,414 @@ +#!/usr/bin/python + +# Copyright (c) 2014, GeekChimp - Franck Nijhof (DO NOT CONTACT!) +# Copyright (c) 2019, Ansible project +# Copyright (c) 2019, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: osx_defaults +author: +# DO NOT RE-ADD GITHUB HANDLE! + - Franck Nijhof (!UNKNOWN) +short_description: Manage macOS user defaults +description: + - This module allows users to read, write, and delete macOS user defaults from Ansible scripts. + - MacOS applications and other programs use the defaults system to record user preferences and other information that must + be maintained when the applications are not running (such as default font for new documents, or the position of an Info + panel). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + domain: + description: + - The domain is a domain name of the form C(com.companyname.appname). + type: str + default: NSGlobalDomain + host: + description: + - The host on which the preference should apply. + - The special value V(currentHost) corresponds to the C(-currentHost) switch of the defaults commandline tool. + type: str + key: + description: + - The key of the user preference. + type: str + type: + description: + - The type of value to write. + type: str + choices: [array, bool, boolean, date, float, int, integer, string] + default: string + check_type: + description: + - Checks if the type of the provided O(value) matches the type of an existing default. + - If the types do not match, raises an error. + type: bool + default: true + version_added: 8.6.0 + array_add: + description: + - Add new elements to the array for a key which has an array as its value. + type: bool + default: false + value: + description: + - The value to write. + - Only required when O(state=present). + type: raw + state: + description: + - The state of the user defaults. + - If set to V(list) it queries the given parameter specified by O(key). Returns V(null) is nothing found or misspelled. + type: str + choices: [absent, list, present] + default: present + path: + description: + - The path in which to search for C(defaults). + type: str + default: /usr/bin:/usr/local/bin +notes: + - Apple Mac caches defaults. You may need to logout and login to apply the changes. +""" + +EXAMPLES = r""" +- name: Set boolean valued key for application domain + community.general.osx_defaults: + domain: com.apple.Safari + key: IncludeInternalDebugMenu + type: bool + value: true + state: present + +- name: Set string valued key for global domain + community.general.osx_defaults: + domain: NSGlobalDomain + key: AppleMeasurementUnits + type: string + value: Centimeters + state: present + +- name: Set int valued key for arbitrary plist + community.general.osx_defaults: + domain: /Library/Preferences/com.apple.SoftwareUpdate + key: AutomaticCheckEnabled + type: int + value: 1 + become: true + +- name: Set int valued key only for the current host + community.general.osx_defaults: + domain: com.apple.screensaver + host: currentHost + key: showClock + type: int + value: 1 + +- name: Defaults to global domain and setting value + community.general.osx_defaults: + key: AppleMeasurementUnits + type: string + value: Centimeters + +- name: Setting an array valued key + community.general.osx_defaults: + key: AppleLanguages + type: array + value: + - en + - nl + +- name: Removing a key + community.general.osx_defaults: + domain: com.geekchimp.macable + key: ExampleKeyToRemove + state: absent +""" + +from datetime import datetime +import re + +from ansible.module_utils.basic import AnsibleModule + + +# exceptions --------------------------------------------------------------- {{{ +class OSXDefaultsException(Exception): + def __init__(self, msg): + self.message = msg + + +# /exceptions -------------------------------------------------------------- }}} + +# class MacDefaults -------------------------------------------------------- {{{ +class OSXDefaults(object): + """ Class to manage Mac OS user defaults """ + + # init ---------------------------------------------------------------- {{{ + def __init__(self, module): + """ Initialize this module. Finds 'defaults' executable and preps the parameters """ + # Initial var for storing current defaults value + self.current_value = None + self.module = module + self.domain = module.params['domain'] + self.host = module.params['host'] + self.key = module.params['key'] + self.check_type = module.params['check_type'] + self.type = module.params['type'] + self.array_add = module.params['array_add'] + self.value = module.params['value'] + self.state = module.params['state'] + self.path = module.params['path'] + + # Try to find the defaults executable + self.executable = self.module.get_bin_path( + 'defaults', + required=False, + opt_dirs=self.path.split(':'), + ) + + if not self.executable: + raise OSXDefaultsException("Unable to locate defaults executable.") + + # Ensure the value is the correct type + if self.state != 'absent': + self.value = self._convert_type(self.type, self.value) + + # /init --------------------------------------------------------------- }}} + + # tools --------------------------------------------------------------- {{{ + @staticmethod + def is_int(value): + as_str = str(value) + if as_str.startswith("-"): + return as_str[1:].isdigit() + else: + return as_str.isdigit() + + @staticmethod + def _convert_type(data_type, value): + """ Converts value to given type """ + if data_type == "string": + return str(value) + elif data_type in ["bool", "boolean"]: + if isinstance(value, (bytes, str)): + value = value.lower() + if value in [True, 1, "true", "1", "yes"]: + return True + elif value in [False, 0, "false", "0", "no"]: + return False + raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value))) + elif data_type == "date": + try: + return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S") + except ValueError: + raise OSXDefaultsException( + "Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value)) + ) + elif data_type in ["int", "integer"]: + if not OSXDefaults.is_int(value): + raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value))) + return int(value) + elif data_type == "float": + try: + value = float(value) + except ValueError: + raise OSXDefaultsException("Invalid float value: {0}".format(repr(value))) + return value + elif data_type == "array": + if not isinstance(value, list): + raise OSXDefaultsException("Invalid value. Expected value to be an array") + return value + + raise OSXDefaultsException('Type is not supported: {0}'.format(data_type)) + + def _host_args(self): + """ Returns a normalized list of commandline arguments based on the "host" attribute """ + if self.host is None: + return [] + elif self.host == 'currentHost': + return ['-currentHost'] + else: + return ['-host', self.host] + + def _base_command(self): + """ Returns a list containing the "defaults" executable and any common base arguments """ + return [self.executable] + self._host_args() + + @staticmethod + def _convert_defaults_str_to_list(value): + """ Converts array output from defaults to an list """ + # Split output of defaults. Every line contains a value + value = value.splitlines() + + # Remove first and last item, those are not actual values + value.pop(0) + value.pop(-1) + + # Remove spaces at beginning and comma (,) at the end, unquote and unescape double quotes + value = [re.sub('^ *"?|"?,? *$', '', x.replace('\\"', '"')) for x in value] + + return value + + # /tools -------------------------------------------------------------- }}} + + # commands ------------------------------------------------------------ {{{ + def read(self): + """ Reads value of this domain & key from defaults """ + # First try to find out the type + rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key]) + + # If RC is 1, the key does not exist + if rc == 1: + return None + + # If the RC is not 0, then terrible happened! Ooooh nooo! + if rc != 0: + raise OSXDefaultsException("An error occurred while reading key type from defaults: %s" % err) + + # Ok, lets parse the type from output + data_type = out.strip().replace('Type is ', '') + + # Now get the current value + rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key]) + + # Strip output + out = out.strip() + + # A non zero RC at this point is kinda strange... + if rc != 0: + raise OSXDefaultsException("An error occurred while reading key value from defaults: %s" % err) + + # Convert string to list when type is array + if data_type == "array": + out = self._convert_defaults_str_to_list(out) + + # Store the current_value + self.current_value = self._convert_type(data_type, out) + + def write(self): + """ Writes value to this domain & key to defaults """ + # We need to convert some values so the defaults commandline understands it + if isinstance(self.value, bool): + if self.value: + value = "TRUE" + else: + value = "FALSE" + elif isinstance(self.value, (int, float)): + value = str(self.value) + elif self.array_add and self.current_value is not None: + value = list(set(self.value) - set(self.current_value)) + elif isinstance(self.value, datetime): + value = self.value.strftime('%Y-%m-%d %H:%M:%S') + else: + value = self.value + + # When the type is array and array_add is enabled, morph the type :) + if self.type == "array" and self.array_add: + self.type = "array-add" + + # All values should be a list, for easy passing it to the command + if not isinstance(value, list): + value = [value] + + rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value, + expand_user_and_vars=False) + + if rc != 0: + raise OSXDefaultsException('An error occurred while writing value to defaults: %s' % err) + + def delete(self): + """ Deletes defaults key from domain """ + rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key]) + if rc != 0: + raise OSXDefaultsException("An error occurred while deleting key from defaults: %s" % err) + + # /commands ----------------------------------------------------------- }}} + + # run ----------------------------------------------------------------- {{{ + """ Does the magic! :) """ + + def run(self): + + # Get the current value from defaults + self.read() + + if self.state == 'list': + self.module.exit_json(key=self.key, value=self.current_value) + + # Handle absent state + if self.state == "absent": + if self.current_value is None: + return False + if self.module.check_mode: + return True + self.delete() + return True + + # Check if there is a type mismatch, e.g. given type does not match the type in defaults + if self.check_type: + value_type = type(self.value) + if self.current_value is not None and not isinstance(self.current_value, value_type): + raise OSXDefaultsException("Type mismatch. Type in defaults: %s" % type(self.current_value).__name__) + + # Current value matches the given value. Nothing need to be done. Arrays need extra care + if self.type == "array" and self.current_value is not None and not self.array_add and \ + set(self.current_value) == set(self.value): + return False + elif self.type == "array" and self.current_value is not None and self.array_add and len(list(set(self.value) - set(self.current_value))) == 0: + return False + elif self.current_value == self.value: + return False + + if self.module.check_mode: + return True + + # Change/Create/Set given key/value for domain in defaults + self.write() + return True + + # /run ---------------------------------------------------------------- }}} + + +# /class MacDefaults ------------------------------------------------------ }}} + + +# main -------------------------------------------------------------------- {{{ +def main(): + module = AnsibleModule( + argument_spec=dict( + domain=dict(type='str', default='NSGlobalDomain'), + host=dict(type='str'), + key=dict(type='str', no_log=False), + check_type=dict(type='bool', default=True), + type=dict(type='str', default='string', choices=['array', 'bool', 'boolean', 'date', 'float', 'int', 'integer', 'string']), + array_add=dict(type='bool', default=False), + value=dict(type='raw'), + state=dict(type='str', default='present', choices=['absent', 'list', 'present']), + path=dict(type='str', default='/usr/bin:/usr/local/bin'), + ), + supports_check_mode=True, + required_if=( + ('state', 'present', ['value']), + ), + ) + + try: + defaults = OSXDefaults(module=module) + module.exit_json(changed=defaults.run()) + except OSXDefaultsException as e: + module.fail_json(msg=e.message) + + +# /main ------------------------------------------------------------------- }}} + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ovh_ip_failover.py b/plugins/modules/ovh_ip_failover.py deleted file mode 120000 index a3faff96c9..0000000000 --- a/plugins/modules/ovh_ip_failover.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovh/ovh_ip_failover.py \ No newline at end of file diff --git a/plugins/modules/ovh_ip_failover.py b/plugins/modules/ovh_ip_failover.py new file mode 100644 index 0000000000..a32db78451 --- /dev/null +++ b/plugins/modules/ovh_ip_failover.py @@ -0,0 +1,259 @@ +#!/usr/bin/python + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ovh_ip_failover +short_description: Manage OVH IP failover address +description: + - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move an IP + failover (or failover block) between services. +author: "Pascal HERAUD (@pascalheraud)" +notes: + - Uses the Python OVH API U(https://github.com/ovh/python-ovh). You have to create an application (a key and secret) with + a consumer key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/). +requirements: + - ovh >= 0.4.8 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + required: true + description: + - The IP address to manage (can be a single IP like V(1.1.1.1) or a block like V(1.1.1.1/28)). + type: str + service: + required: true + description: + - The name of the OVH service this IP address should be routed. + type: str + endpoint: + required: true + description: + - The endpoint to use (for instance V(ovh-eu)). + type: str + wait_completion: + required: false + default: true + type: bool + description: + - If V(true), the module waits for the IP address to be moved. If false, exit without waiting. The C(taskId) is returned + in module output. + wait_task_completion: + required: false + default: 0 + description: + - If not V(0), the module waits for this task ID to be completed. Use O(wait_task_completion) if you want to wait for + completion of a previously executed task with O(wait_completion=false). You can execute this module repeatedly on + a list of failover IPs using O(wait_completion=false) (see examples). + type: int + application_key: + required: true + description: + - The applicationKey to use. + type: str + application_secret: + required: true + description: + - The application secret to use. + type: str + consumer_key: + required: true + description: + - The consumer key to use. + type: str + timeout: + required: false + default: 120 + description: + - The timeout in seconds used to wait for a task to be completed. Default is 120 seconds. + type: int +""" + +EXAMPLES = r""" +# Route an IP address 1.1.1.1 to the service ns666.ovh.net +- community.general.ovh_ip_failover: + name: 1.1.1.1 + service: ns666.ovh.net + endpoint: ovh-eu + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey +- community.general.ovh_ip_failover: + name: 1.1.1.1 + service: ns666.ovh.net + endpoint: ovh-eu + wait_completion: false + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey + register: moved +- community.general.ovh_ip_failover: + name: 1.1.1.1 + service: ns666.ovh.net + endpoint: ovh-eu + wait_task_completion: "{{moved.taskId}}" + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey +""" + +RETURN = r""" +""" + +import time +from urllib.parse import quote_plus + +try: + import ovh + import ovh.exceptions + from ovh.exceptions import APIError + HAS_OVH = True +except ImportError: + HAS_OVH = False + +from ansible.module_utils.basic import AnsibleModule + + +def getOvhClient(ansibleModule): + endpoint = ansibleModule.params.get('endpoint') + application_key = ansibleModule.params.get('application_key') + application_secret = ansibleModule.params.get('application_secret') + consumer_key = ansibleModule.params.get('consumer_key') + + return ovh.Client( + endpoint=endpoint, + application_key=application_key, + application_secret=application_secret, + consumer_key=consumer_key + ) + + +def waitForNoTask(client, name, timeout): + currentTimeout = timeout + while client.get('/ip/{0}/task'.format(quote_plus(name)), + function='genericMoveFloatingIp', + status='todo'): + time.sleep(1) # Delay for 1 sec + currentTimeout -= 1 + if currentTimeout < 0: + return False + return True + + +def waitForTaskDone(client, name, taskId, timeout): + currentTimeout = timeout + while True: + task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId)) + if task['status'] == 'done': + return True + time.sleep(5) # Delay for 5 sec to not harass the API + currentTimeout -= 5 + if currentTimeout < 0: + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + service=dict(required=True), + endpoint=dict(required=True), + wait_completion=dict(default=True, type='bool'), + wait_task_completion=dict(default=0, type='int'), + application_key=dict(required=True, no_log=True), + application_secret=dict(required=True, no_log=True), + consumer_key=dict(required=True, no_log=True), + timeout=dict(default=120, type='int') + ), + supports_check_mode=True + ) + + result = dict( + changed=False + ) + + if not HAS_OVH: + module.fail_json(msg='ovh-api python module is required to run this module ') + + # Get parameters + name = module.params.get('name') + service = module.params.get('service') + timeout = module.params.get('timeout') + wait_completion = module.params.get('wait_completion') + wait_task_completion = module.params.get('wait_task_completion') + + # Connect to OVH API + client = getOvhClient(module) + + # Check that the load balancing exists + try: + ips = client.get('/ip', ip=name, type='failover') + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of ips, ' + 'check application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}'.format(apiError)) + + if name not in ips and '{0}/32'.format(name) not in ips: + module.fail_json(msg='IP {0} does not exist'.format(name)) + + # Check that no task is pending before going on + try: + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for no pending ' + 'tasks before executing the module '.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of pending tasks ' + 'of the ip, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + try: + ipproperties = client.get('/ip/{0}'.format(quote_plus(name))) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the properties ' + 'of the ip, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + if ipproperties['routedTo']['serviceName'] != service: + if not module.check_mode: + if wait_task_completion == 0: + # Move the IP and get the created taskId + task = client.post('/ip/{0}/move'.format(quote_plus(name)), to=service) + taskId = task['taskId'] + result['moved'] = True + else: + # Just wait for the given taskId to be completed + taskId = wait_task_completion + result['moved'] = False + result['taskId'] = taskId + if wait_completion or wait_task_completion != 0: + if not waitForTaskDone(client, name, taskId, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion ' + 'of move ip to service'.format(timeout)) + result['waited'] = True + else: + result['waited'] = False + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ovh_ip_loadbalancing_backend.py b/plugins/modules/ovh_ip_loadbalancing_backend.py deleted file mode 120000 index 382c665c68..0000000000 --- a/plugins/modules/ovh_ip_loadbalancing_backend.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovh/ovh_ip_loadbalancing_backend.py \ No newline at end of file diff --git a/plugins/modules/ovh_ip_loadbalancing_backend.py b/plugins/modules/ovh_ip_loadbalancing_backend.py new file mode 100644 index 0000000000..2c786022ba --- /dev/null +++ b/plugins/modules/ovh_ip_loadbalancing_backend.py @@ -0,0 +1,313 @@ +#!/usr/bin/python + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ovh_ip_loadbalancing_backend +short_description: Manage OVH IP LoadBalancing backends +description: + - Manage OVH (French European hosting provider) LoadBalancing IP backends. +author: Pascal Heraud (@pascalheraud) +notes: + - Uses the Python OVH API U(https://github.com/ovh/python-ovh). You have to create an application (a key and secret) with + a consumer key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/). +requirements: + - ovh > 0.3.5 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + required: true + description: + - Name of the LoadBalancing internal name (V(ip-X.X.X.X)). + type: str + backend: + required: true + description: + - The IP address of the backend to update / modify / delete. + type: str + state: + default: present + choices: ['present', 'absent'] + description: + - Determines whether the backend is to be created/modified or deleted. + type: str + probe: + default: 'none' + choices: ['none', 'http', 'icmp', 'oco'] + description: + - Determines the type of probe to use for this backend. + type: str + weight: + default: 8 + description: + - Determines the weight for this backend. + type: int + endpoint: + required: true + description: + - The endpoint to use (for instance V(ovh-eu)). + type: str + application_key: + required: true + description: + - The applicationKey to use. + type: str + application_secret: + required: true + description: + - The application secret to use. + type: str + consumer_key: + required: true + description: + - The consumer key to use. + type: str + timeout: + default: 120 + description: + - The timeout in seconds used to wait for a task to be completed. + type: int +""" + +EXAMPLES = r""" +- name: Adds or modify the backend '212.1.1.1' to a loadbalancing 'ip-1.1.1.1' + ovh_ip_loadbalancing: + name: ip-1.1.1.1 + backend: 212.1.1.1 + state: present + probe: none + weight: 8 + endpoint: ovh-eu + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey + +- name: Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1' + ovh_ip_loadbalancing: + name: ip-1.1.1.1 + backend: 212.1.1.1 + state: absent + endpoint: ovh-eu + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey +""" + +RETURN = r""" +""" + +import time + +try: + import ovh + import ovh.exceptions + from ovh.exceptions import APIError + HAS_OVH = True +except ImportError: + HAS_OVH = False + +from ansible.module_utils.basic import AnsibleModule + + +def getOvhClient(ansibleModule): + endpoint = ansibleModule.params.get('endpoint') + application_key = ansibleModule.params.get('application_key') + application_secret = ansibleModule.params.get('application_secret') + consumer_key = ansibleModule.params.get('consumer_key') + + return ovh.Client( + endpoint=endpoint, + application_key=application_key, + application_secret=application_secret, + consumer_key=consumer_key + ) + + +def waitForNoTask(client, name, timeout): + currentTimeout = timeout + while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0: + time.sleep(1) # Delay for 1 sec + currentTimeout -= 1 + if currentTimeout < 0: + return False + return True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + backend=dict(required=True), + weight=dict(default=8, type='int'), + probe=dict(default='none', + choices=['none', 'http', 'icmp', 'oco']), + state=dict(default='present', choices=['present', 'absent']), + endpoint=dict(required=True), + application_key=dict(required=True, no_log=True), + application_secret=dict(required=True, no_log=True), + consumer_key=dict(required=True, no_log=True), + timeout=dict(default=120, type='int') + ) + ) + + if not HAS_OVH: + module.fail_json(msg='ovh-api python module' + 'is required to run this module ') + + # Get parameters + name = module.params.get('name') + state = module.params.get('state') + backend = module.params.get('backend') + weight = module.params.get('weight') + probe = module.params.get('probe') + timeout = module.params.get('timeout') + + # Connect to OVH API + client = getOvhClient(module) + + # Check that the load balancing exists + try: + loadBalancings = client.get('/ip/loadBalancing') + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of loadBalancing, ' + 'check application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}'.format(apiError)) + + if name not in loadBalancings: + module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name)) + + # Check that no task is pending before going on + try: + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for no pending ' + 'tasks before executing the module '.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of pending tasks ' + 'of the loadBalancing, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + try: + backends = client.get('/ip/loadBalancing/{0}/backend'.format(name)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of backends ' + 'of the loadBalancing, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + backendExists = backend in backends + moduleChanged = False + if state == "absent": + if backendExists: + # Remove backend + try: + client.delete( + '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend)) + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion ' + 'of removing backend task'.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for deleting the backend, ' + 'check application key, secret, consumerkey and ' + 'parameters. Error returned by OVH api was : {0}' + .format(apiError)) + moduleChanged = True + else: + if backendExists: + # Get properties + try: + backendProperties = client.get( + '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the backend properties, ' + 'check application key, secret, consumerkey and ' + 'parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + if backendProperties['weight'] != weight: + # Change weight + try: + client.post( + '/ip/loadBalancing/{0}/backend/{1}/setWeight' + .format(name, backend), weight=weight) + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion ' + 'of setWeight to backend task' + .format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for updating the weight of the ' + 'backend, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + moduleChanged = True + + if backendProperties['probe'] != probe: + # Change probe + backendProperties['probe'] = probe + try: + client.put( + '/ip/loadBalancing/{0}/backend/{1}' + .format(name, backend), probe=probe) + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion of ' + 'setProbe to backend task' + .format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for updating the probe of ' + 'the backend, check application key, secret, ' + 'consumerkey and parameters. Error returned by OVH api ' + 'was : {0}' + .format(apiError)) + moduleChanged = True + + else: + # Creates backend + try: + try: + client.post('/ip/loadBalancing/{0}/backend'.format(name), + ipBackend=backend, probe=probe, weight=weight) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for creating the backend, check ' + 'application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}' + .format(apiError)) + + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion of ' + 'backend creation task'.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for creating the backend, check ' + 'application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}'.format(apiError)) + moduleChanged = True + + module.exit_json(changed=moduleChanged) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ovh_monthly_billing.py b/plugins/modules/ovh_monthly_billing.py deleted file mode 120000 index 48f2ffada5..0000000000 --- a/plugins/modules/ovh_monthly_billing.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovh/ovh_monthly_billing.py \ No newline at end of file diff --git a/plugins/modules/ovh_monthly_billing.py b/plugins/modules/ovh_monthly_billing.py new file mode 100644 index 0000000000..e297e8979d --- /dev/null +++ b/plugins/modules/ovh_monthly_billing.py @@ -0,0 +1,160 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Francois Lallart (@fraff) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ovh_monthly_billing +author: Francois Lallart (@fraff) +version_added: '0.2.0' +short_description: Manage OVH monthly billing +description: + - Enable monthly billing on OVH cloud instances (be aware OVH does not allow to disable it). +requirements: ["ovh"] +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + project_id: + required: true + type: str + description: + - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET). + instance_id: + required: true + type: str + description: + - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET). + endpoint: + type: str + description: + - The endpoint to use (for instance V(ovh-eu)). + application_key: + type: str + description: + - The applicationKey to use. + application_secret: + type: str + description: + - The application secret to use. + consumer_key: + type: str + description: + - The consumer key to use. +""" + +EXAMPLES = r""" +- name: Basic usage, using auth from /etc/ovh.conf + community.general.ovh_monthly_billing: + project_id: 0c727a20aa144485b70c44dee9123b46 + instance_id: 8fa89ad2-8f08-4220-9fa4-9695ea23e948 + +# Get openstack cloud ID and instance ID, OVH use them in its API +- name: Get openstack cloud ID and instance ID + os_server_info: + cloud: myProjectName + region_name: myRegionName + server: myServerName + register: openstack_servers + +- name: Use IDs + community.general.ovh_monthly_billing: + project_id: "{{ openstack_servers.0.tenant_id }}" + instance_id: "{{ openstack_servers.0.id }}" + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey +""" + +RETURN = r""" +""" + +import traceback + +try: + import ovh + import ovh.exceptions + from ovh.exceptions import APIError + HAS_OVH = True +except ImportError: + HAS_OVH = False + OVH_IMPORT_ERROR = traceback.format_exc() + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + project_id=dict(required=True), + instance_id=dict(required=True), + endpoint=dict(), + application_key=dict(no_log=True), + application_secret=dict(no_log=True), + consumer_key=dict(no_log=True), + ), + supports_check_mode=True + ) + + # Get parameters + project_id = module.params.get('project_id') + instance_id = module.params.get('instance_id') + endpoint = module.params.get('endpoint') + application_key = module.params.get('application_key') + application_secret = module.params.get('application_secret') + consumer_key = module.params.get('consumer_key') + project = "" + instance = "" + ovh_billing_status = "" + + if not HAS_OVH: + module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh') + + # Connect to OVH API + client = ovh.Client( + endpoint=endpoint, + application_key=application_key, + application_secret=application_secret, + consumer_key=consumer_key + ) + + # Check that the instance exists + try: + project = client.get('/cloud/project/{0}'.format(project_id)) + except ovh.exceptions.ResourceNotFoundError: + module.fail_json(msg='project {0} does not exist'.format(project_id)) + + # Check that the instance exists + try: + instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id)) + except ovh.exceptions.ResourceNotFoundError: + module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id)) + + # Is monthlyBilling already enabled or pending ? + if instance['monthlyBilling'] is not None: + if instance['monthlyBilling']['status'] in ['ok', 'activationPending']: + module.exit_json(changed=False, ovh_billing_status=instance['monthlyBilling']) + + if module.check_mode: + module.exit_json(changed=True, msg="Dry Run!") + + try: + ovh_billing_status = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id)) + module.exit_json(changed=True, ovh_billing_status=ovh_billing_status['monthlyBilling']) + except APIError as apiError: + module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError)) + + # We should never reach here + module.fail_json(msg='Internal ovh_monthly_billing module error') + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/pacemaker_cluster.py b/plugins/modules/pacemaker_cluster.py deleted file mode 120000 index b5ddfb42b2..0000000000 --- a/plugins/modules/pacemaker_cluster.py +++ /dev/null @@ -1 +0,0 @@ -./clustering/pacemaker_cluster.py \ No newline at end of file diff --git a/plugins/modules/pacemaker_cluster.py b/plugins/modules/pacemaker_cluster.py new file mode 100644 index 0000000000..f72f0fa5e5 --- /dev/null +++ b/plugins/modules/pacemaker_cluster.py @@ -0,0 +1,169 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Mathieu Bultel +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pacemaker_cluster +short_description: Manage pacemaker clusters +author: + - Mathieu Bultel (@matbu) + - Dexter Le (@munchtoast) +description: + - This module can manage a pacemaker cluster and nodes from Ansible using the pacemaker CLI. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Indicate desired state of the cluster. + - The value V(maintenance) has been added in community.general 11.1.0. + choices: [cleanup, offline, online, restart, maintenance] + type: str + required: true + name: + description: + - Specify which node of the cluster you want to manage. V(null) == the cluster status itself, V(all) == check the status + of all nodes. + type: str + aliases: ['node'] + timeout: + description: + - Timeout period (in seconds) for polling the cluster operation. + type: int + default: 300 + force: + description: + - Force the change of the cluster state. + type: bool + default: true +""" + +EXAMPLES = r""" +- name: Set cluster Online + hosts: localhost + gather_facts: false + tasks: + - name: Get cluster state + community.general.pacemaker_cluster: + state: online +""" + +RETURN = r""" +out: + description: The output of the current state of the cluster. It returns a list of the nodes state. + type: str + sample: 'out: [[" overcloud-controller-0", " Online"]]}' + returned: always +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner, get_pacemaker_maintenance_mode + + +class PacemakerCluster(StateModuleHelper): + module = dict( + argument_spec=dict( + state=dict(type='str', choices=[ + 'cleanup', 'offline', 'online', 'restart', 'maintenance'], required=True), + name=dict(type='str', aliases=['node']), + timeout=dict(type='int', default=300), + force=dict(type='bool', default=True) + ), + supports_check_mode=True, + ) + default_state = "" + + def __init_module__(self): + self.runner = pacemaker_runner(self.module) + self.vars.set('apply_all', True if not self.module.params['name'] else False) + get_args = dict(cli_action='cluster', state='status', name=None, apply_all=self.vars.apply_all) + if self.module.params['state'] == "maintenance": + get_args['cli_action'] = "property" + get_args['state'] = "config" + get_args['name'] = "maintenance-mode" + elif self.module.params['state'] == "cleanup": + get_args['cli_action'] = "resource" + get_args['name'] = self.module.params['name'] + + self.vars.set('get_args', get_args) + self.vars.set('previous_value', self._get()['out']) + self.vars.set('value', self.vars.previous_value, change=True, diff=True) + + if self.module.params['state'] == "cleanup": + self.module.deprecate( + 'The value `cleanup` for "state" is being deprecated, use pacemaker_resource module instead.', + version='14.0.0', + collection_name='community.general' + ) + + def __quit_module__(self): + self.vars.set('value', self._get()['out']) + + def _process_command_output(self, fail_on_err, ignore_err_msg=""): + def process(rc, out, err): + if fail_on_err and rc != 0 and err and ignore_err_msg not in err: + self.do_raise('pcs failed with error (rc={0}): {1}'.format(rc, err)) + out = out.rstrip() + return None if out == "" else out + return process + + def _get(self): + with self.runner('cli_action state name') as ctx: + result = ctx.run(cli_action=self.vars.get_args['cli_action'], state=self.vars.get_args['state'], name=self.vars.get_args['name']) + return dict(rc=result[0], + out=(result[1] if result[1] != "" else None), + err=result[2]) + + def state_cleanup(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource') + + def state_offline(self): + with self.runner('cli_action state name apply_all wait', + output_process=self._process_command_output(True, "not currently running"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='cluster', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) + + def state_online(self): + with self.runner('cli_action state name apply_all wait', + output_process=self._process_command_output(True, "currently running"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='cluster', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) + + if get_pacemaker_maintenance_mode(self.runner): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: + ctx.run(cli_action='property', state='maintenance', name='maintenance-mode=false') + + def state_maintenance(self): + with self.runner('cli_action state name', + output_process=self._process_command_output(True, "Fail"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='property', name='maintenance-mode=true') + + def state_restart(self): + with self.runner('cli_action state name apply_all wait', + output_process=self._process_command_output(True, "not currently running"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='cluster', state='offline', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) + ctx.run(cli_action='cluster', state='online', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) + + if get_pacemaker_maintenance_mode(self.runner): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: + ctx.run(cli_action='property', state='maintenance', name='maintenance-mode=false') + + +def main(): + PacemakerCluster.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pacemaker_info.py b/plugins/modules/pacemaker_info.py new file mode 100644 index 0000000000..f57accd429 --- /dev/null +++ b/plugins/modules/pacemaker_info.py @@ -0,0 +1,107 @@ +#!/usr/bin/python + +# Copyright (c) 2025, Dexter Le +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pacemaker_info +short_description: Gather information about Pacemaker cluster +author: + - Dexter Le (@munchtoast) +version_added: 11.2.0 +description: + - Gather information about a Pacemaker cluster. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather Pacemaker cluster info + community.general.pacemaker_info: + register: result + +- name: Debug cluster info + ansible.builtin.debug: + msg: "{{ result }}" +""" + +RETURN = r""" +version: + description: Pacemaker CLI version + returned: always + type: str +cluster_info: + description: Cluster information such as the name, UUID, and nodes. + returned: always + type: dict +resource_info: + description: All resources available on the cluster and their status. + returned: success + type: dict +stonith_info: + description: All STONITH information on the cluster. + returned: success + type: dict +constraint_info: + description: All cluster resource constraints on the cluster. + returned: success + type: dict +property_info: + description: All properties present on the cluster. + returned: success + type: dict +""" + +import json + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper +from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner + + +class PacemakerInfo(ModuleHelper): + module = dict( + argument_spec=dict(), + supports_check_mode=True, + ) + info_vars = { + "cluster_info": "cluster", + "resource_info": "resource", + "stonith_info": "stonith", + "constraint_info": "constraint", + "property_info": "property" + } + output_params = info_vars.keys() + + def __init_module__(self): + self.runner = pacemaker_runner(self.module) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + + def _process_command_output(self, cli_action=""): + def process(rc, out, err): + if rc != 0: + self.do_raise('pcs {0} config failed with error (rc={1}): {2}'.format(cli_action, rc, err)) + out = json.loads(out) + return None if out == "" else out + return process + + def _get_info(self, cli_action): + with self.runner("cli_action config output_format", output_process=self._process_command_output(cli_action)) as ctx: + return ctx.run(cli_action=cli_action, output_format="json") + + def __run__(self): + for key, cli_action in sorted(self.info_vars.items()): + self.vars.set(key, self._get_info(cli_action)) + + +def main(): + PacemakerInfo.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pacemaker_resource.py b/plugins/modules/pacemaker_resource.py new file mode 100644 index 0000000000..c3c11f683a --- /dev/null +++ b/plugins/modules/pacemaker_resource.py @@ -0,0 +1,256 @@ +#!/usr/bin/python + +# Copyright (c) 2025, Dexter Le +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pacemaker_resource +short_description: Manage pacemaker resources +author: + - Dexter Le (@munchtoast) +version_added: 10.5.0 +description: + - This module can manage resources in a Pacemaker cluster using the pacemaker CLI. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Indicate desired state for cluster resource. + - The states V(cleanup) and V(cloned) have been added in community.general 11.3.0. + - If O(state=cloned) or O(state=present), you can set O(resource_clone_ids) and O(resource_clone_meta) to determine exactly what and how to clone. + choices: [present, absent, cloned, enabled, disabled, cleanup] + default: present + type: str + name: + description: + - Specify the resource name to create or clone to. + - This is required if O(state=present), O(state=absent), O(state=enabled), or O(state=disabled). + type: str + resource_type: + description: + - Resource type to create. + type: dict + suboptions: + resource_name: + description: + - Specify the resource type name. + type: str + resource_standard: + description: + - Specify the resource type standard. + type: str + resource_provider: + description: + - Specify the resource type providers. + type: str + resource_option: + description: + - Specify the resource option to create. + type: list + elements: str + default: [] + resource_operation: + description: + - List of operations to associate with resource. + type: list + elements: dict + default: [] + suboptions: + operation_action: + description: + - Operation action to associate with resource. + type: str + operation_option: + description: + - Operation option to associate with action. + type: list + elements: str + resource_meta: + description: + - List of meta to associate with resource. + type: list + elements: str + resource_argument: + description: + - Action to associate with resource. + type: dict + suboptions: + argument_action: + description: + - Action to apply to resource. + type: str + choices: [clone, master, group, promotable] + argument_option: + description: + - Options to associate with resource action. + type: list + elements: str + resource_clone_ids: + description: + - List of clone resource IDs to clone from. + type: list + elements: str + version_added: 11.3.0 + resource_clone_meta: + description: + - List of metadata to associate with clone resource. + type: list + elements: str + version_added: 11.3.0 + wait: + description: + - Timeout period for polling the resource creation. + type: int + default: 300 +""" + +EXAMPLES = r""" +--- +- name: Create pacemaker resource + hosts: localhost + gather_facts: false + tasks: + - name: Create virtual-ip resource + community.general.pacemaker_resource: + state: present + name: virtual-ip + resource_type: + resource_name: IPaddr2 + resource_option: + - "ip=[192.168.2.1]" + resource_argument: + argument_action: group + argument_option: + - master + resource_operation: + - operation_action: monitor + operation_option: + - interval=20 +""" + +RETURN = r""" +cluster_resources: + description: The cluster resource output message. + type: str + sample: "Assumed agent name ocf:heartbeat:IPaddr2 (deduced from IPaddr2)" + returned: always +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner, get_pacemaker_maintenance_mode + + +class PacemakerResource(StateModuleHelper): + module = dict( + argument_spec=dict( + state=dict(type='str', default='present', choices=[ + 'present', 'absent', 'cloned', 'enabled', 'disabled', 'cleanup']), + name=dict(type='str'), + resource_type=dict(type='dict', options=dict( + resource_name=dict(type='str'), + resource_standard=dict(type='str'), + resource_provider=dict(type='str'), + )), + resource_option=dict(type='list', elements='str', default=list()), + resource_operation=dict(type='list', elements='dict', default=list(), options=dict( + operation_action=dict(type='str'), + operation_option=dict(type='list', elements='str'), + )), + resource_meta=dict(type='list', elements='str'), + resource_argument=dict(type='dict', options=dict( + argument_action=dict(type='str', choices=['clone', 'master', 'group', 'promotable']), + argument_option=dict(type='list', elements='str'), + )), + resource_clone_ids=dict(type='list', elements='str'), + resource_clone_meta=dict(type='list', elements='str'), + wait=dict(type='int', default=300), + ), + required_if=[ + ('state', 'present', ['resource_type', 'resource_option', 'name']), + ('state', 'absent', ['name']), + ('state', 'enabled', ['name']), + ('state', 'disabled', ['name']), + ], + supports_check_mode=True, + ) + + def __init_module__(self): + self.runner = pacemaker_runner(self.module) + self.vars.set('previous_value', self._get()['out']) + self.vars.set('value', self.vars.previous_value, change=True, diff=True) + self.module.params['name'] = self.module.params['name'] or None + + def __quit_module__(self): + self.vars.set('value', self._get()['out']) + + def _process_command_output(self, fail_on_err, ignore_err_msg=""): + def process(rc, out, err): + if fail_on_err and rc != 0 and err and ignore_err_msg not in err: + self.do_raise('pcs failed with error (rc={0}): {1}'.format(rc, err)) + out = out.rstrip() + return None if out == "" else out + return process + + def _get(self): + with self.runner('cli_action state name') as ctx: + result = ctx.run(cli_action="resource", state='status') + return dict(rc=result[0], + out=(result[1] if result[1] != "" else None), + err=result[2]) + + def fmt_as_stack_argument(self, value, arg): + if value is not None: + return [x for k in value for x in (arg, k)] + + def state_absent(self): + force = get_pacemaker_maintenance_mode(self.runner) + with self.runner('cli_action state name force', output_process=self._process_command_output(True, "does not exist"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource', force=force) + + def state_present(self): + with self.runner( + 'cli_action state name resource_type resource_option resource_operation resource_meta resource_argument ' + 'resource_clone_ids resource_clone_meta wait', + output_process=self._process_command_output(not get_pacemaker_maintenance_mode(self.runner), "already exists"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='resource', resource_clone_ids=self.fmt_as_stack_argument(self.module.params["resource_clone_ids"], "clone")) + + def state_cloned(self): + with self.runner( + 'cli_action state name resource_clone_ids resource_clone_meta wait', + output_process=self._process_command_output( + not get_pacemaker_maintenance_mode(self.runner), + "already a clone resource"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource', resource_clone_meta=self.fmt_as_stack_argument(self.module.params["resource_clone_meta"], "meta")) + + def state_enabled(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Starting"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource') + + def state_disabled(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Stopped"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource') + + def state_cleanup(self): + runner_args = ['cli_action', 'state'] + if self.module.params['name']: + runner_args.append('name') + with self.runner(runner_args, output_process=self._process_command_output(True, "Clean"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource') + + +def main(): + PacemakerResource.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pacemaker_stonith.py b/plugins/modules/pacemaker_stonith.py new file mode 100644 index 0000000000..f8c6bbddc4 --- /dev/null +++ b/plugins/modules/pacemaker_stonith.py @@ -0,0 +1,218 @@ +#!/usr/bin/python + +# Copyright (c) 2025, Dexter Le +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = ''' +module: pacemaker_stonith +short_description: Manage Pacemaker STONITH +author: + - Dexter Le (@munchtoast) +version_added: 11.3.0 +description: + - This module manages STONITH in a Pacemaker cluster using the Pacemaker CLI. +seealso: + - name: Pacemaker STONITH documentation + description: Complete documentation for Pacemaker STONITH. + link: https://clusterlabs.org/projects/pacemaker/doc/3.0/Pacemaker_Explained/html/resources.html#stonith +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - Only works when check mode is not enabled. +options: + state: + description: + - Indicate desired state for cluster STONITH. + choices: [present, absent, enabled, disabled] + default: present + type: str + name: + description: + - Specify the STONITH name to create. + required: true + type: str + stonith_type: + description: + - Specify the STONITH device type. + type: str + stonith_options: + description: + - Specify the STONITH option to create. + type: list + elements: str + default: [] + stonith_operations: + description: + - List of operations to associate with STONITH. + type: list + elements: dict + default: [] + suboptions: + operation_action: + description: + - Operation action to associate with STONITH. + type: str + operation_options: + description: + - Operation options to associate with action. + type: list + elements: str + stonith_metas: + description: + - List of metadata to associate with STONITH. + type: list + elements: str + stonith_argument: + description: + - Action to associate with STONITH. + type: dict + suboptions: + argument_action: + description: + - Action to apply to STONITH. + type: str + choices: [group, before, after] + argument_options: + description: + - Options to associate with STONITH action. + type: list + elements: str + agent_validation: + description: + - Enabled agent validation for STONITH creation. + type: bool + default: false + wait: + description: + - Timeout period for polling the STONITH creation. + type: int + default: 300 +''' + +EXAMPLES = ''' +- name: Create virtual-ip STONITH + community.general.pacemaker_stonith: + state: present + name: virtual-stonith + stonith_type: fence_virt + stonith_options: + - "pcmk_host_list=f1" + stonith_operations: + - operation_action: monitor + operation_options: + - "interval=30s" +''' + +RETURN = ''' +previous_value: + description: The value of the STONITH before executing the module. + type: str + sample: " * virtual-stonith\t(stonith:fence_virt):\t Started" + returned: on success +value: + description: The value of the STONITH after executing the module. + type: str + sample: " * virtual-stonith\t(stonith:fence_virt):\t Started" + returned: on success +''' + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner + + +class PacemakerStonith(StateModuleHelper): + module = dict( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + name=dict(type='str', required=True), + stonith_type=dict(type='str'), + stonith_options=dict(type='list', elements='str', default=[]), + stonith_operations=dict(type='list', elements='dict', default=[], options=dict( + operation_action=dict(type='str'), + operation_options=dict(type='list', elements='str'), + )), + stonith_metas=dict(type='list', elements='str'), + stonith_argument=dict(type='dict', options=dict( + argument_action=dict(type='str', choices=['before', 'after', 'group']), + argument_options=dict(type='list', elements='str'), + )), + agent_validation=dict(type='bool', default=False), + wait=dict(type='int', default=300), + ), + required_if=[('state', 'present', ['stonith_type', 'stonith_options'])], + supports_check_mode=True + ) + + def __init_module__(self): + self.runner = pacemaker_runner(self.module) + self.vars.set('previous_value', self._get()['out']) + self.vars.set('value', self.vars.previous_value, change=True, diff=True) + + def __quit_module__(self): + self.vars.set('value', self._get()['out']) + + def _process_command_output(self, fail_on_err, ignore_err_msg=""): + def process(rc, out, err): + if fail_on_err and rc != 0 and err and ignore_err_msg not in err: + self.do_raise('pcs failed with error (rc={0}): {1}'.format(rc, err)) + out = out.rstrip() + return None if out == "" else out + return process + + def _get(self): + with self.runner('cli_action state name') as ctx: + result = ctx.run(cli_action='stonith', state='status') + return dict(rc=result[0], + out=result[1] if result[1] != "" else None, + err=result[2]) + + def fmt_stonith_resource(self): + return dict(resource_name=self.vars.stonith_type) + + # TODO: Pluralize operation_options in separate PR and remove this helper fmt function + def fmt_stonith_operations(self): + modified_stonith_operations = [] + for stonith_operation in self.vars.stonith_operations: + modified_stonith_operations.append(dict(operation_action=stonith_operation.get('operation_action'), + operation_option=stonith_operation.get('operation_options'))) + return modified_stonith_operations + + def state_absent(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "does not exist"), check_mode_skip=True) as ctx: + ctx.run(cli_action='stonith') + + def state_present(self): + with self.runner( + 'cli_action state name resource_type resource_option resource_operation resource_meta resource_argument agent_validation wait', + output_process=self._process_command_output(True, "already exists"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='stonith', + resource_type=self.fmt_stonith_resource(), + resource_option=self.vars.stonith_options, + resource_operation=self.fmt_stonith_operations(), + resource_meta=self.vars.stonith_metas, + resource_argument=self.vars.stonith_argument) + + def state_enabled(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Starting"), check_mode_skip=True) as ctx: + ctx.run(cli_action='stonith') + + def state_disabled(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Stopped"), check_mode_skip=True) as ctx: + ctx.run(cli_action='stonith') + + +def main(): + PacemakerStonith.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/ansible_galaxy_install.py b/plugins/modules/packaging/language/ansible_galaxy_install.py deleted file mode 100644 index 8efa9d842b..0000000000 --- a/plugins/modules/packaging/language/ansible_galaxy_install.py +++ /dev/null @@ -1,318 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2021, Alexei Znamensky -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = """ -module: ansible_galaxy_install -author: - - "Alexei Znamensky (@russoz)" -short_description: Install Ansible roles or collections using ansible-galaxy -version_added: 3.5.0 -description: - - This module allows the installation of Ansible collections or roles using C(ansible-galaxy). -notes: - - > - B(Ansible 2.9/2.10): The C(ansible-galaxy) command changed significantly between Ansible 2.9 and - ansible-base 2.10 (later ansible-core 2.11). See comments in the parameters. -requirements: - - Ansible 2.9, ansible-base 2.10, or ansible-core 2.11 or newer -options: - type: - description: - - The type of installation performed by C(ansible-galaxy). - - If I(type) is C(both), then I(requirements_file) must be passed and it may contain both roles and collections. - - "Note however that the opposite is not true: if using a I(requirements_file), then I(type) can be any of the three choices." - - "B(Ansible 2.9): The option C(both) will have the same effect as C(role)." - type: str - choices: [collection, role, both] - required: true - name: - description: - - Name of the collection or role being installed. - - Versions can be specified with C(ansible-galaxy) usual formats. For example, C(community.docker:1.6.1) or C(ansistrano.deploy,3.8.0). - - I(name) and I(requirements_file) are mutually exclusive. - type: str - requirements_file: - description: - - Path to a file containing a list of requirements to be installed. - - It works for I(type) equals to C(collection) and C(role). - - I(name) and I(requirements_file) are mutually exclusive. - - "B(Ansible 2.9): It can only be used to install either I(type=role) or I(type=collection), but not both at the same run." - type: path - dest: - description: - - The path to the directory containing your collections or roles, according to the value of I(type). - - > - Please notice that C(ansible-galaxy) will not install collections with I(type=both), when I(requirements_file) - contains both roles and collections and I(dest) is specified. - type: path - force: - description: - - Force overwriting an existing role or collection. - - Using I(force=true) is mandatory when downgrading. - - "B(Ansible 2.9 and 2.10): Must be C(true) to upgrade roles and collections." - type: bool - default: false - ack_ansible29: - description: - - Acknowledge using Ansible 2.9 with its limitations, and prevents the module from generating warnings about them. - - This option is completely ignored if using a version Ansible greater than C(2.9.x). - type: bool - default: false -""" - -EXAMPLES = """ -- name: Install collection community.network - community.general.ansible_galaxy_install: - type: collection - name: community.network - -- name: Install role at specific path - community.general.ansible_galaxy_install: - type: role - name: ansistrano.deploy - dest: /ansible/roles - -- name: Install collections and roles together - community.general.ansible_galaxy_install: - type: both - requirements_file: requirements.yml - -- name: Force-install collection community.network at specific version - community.general.ansible_galaxy_install: - type: collection - name: community.network:3.0.2 - force: true - -""" - -RETURN = """ - type: - description: The value of the I(type) parameter. - type: str - returned: always - name: - description: The value of the I(name) parameter. - type: str - returned: always - dest: - description: The value of the I(dest) parameter. - type: str - returned: always - requirements_file: - description: The value of the I(requirements_file) parameter. - type: str - returned: always - force: - description: The value of the I(force) parameter. - type: bool - returned: always - installed_roles: - description: - - If I(requirements_file) is specified instead, returns dictionary with all the roles installed per path. - - If I(name) is specified, returns that role name and the version installed per path. - - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand." - type: dict - returned: always when installing roles - contains: - "": - description: Roles and versions for that path. - type: dict - sample: - /home/user42/.ansible/roles: - ansistrano.deploy: 3.9.0 - baztian.xfce: v0.0.3 - /custom/ansible/roles: - ansistrano.deploy: 3.8.0 - installed_collections: - description: - - If I(requirements_file) is specified instead, returns dictionary with all the collections installed per path. - - If I(name) is specified, returns that collection name and the version installed per path. - - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand." - type: dict - returned: always when installing collections - contains: - "": - description: Collections and versions for that path - type: dict - sample: - /home/az/.ansible/collections/ansible_collections: - community.docker: 1.6.0 - community.general: 3.0.2 - /custom/ansible/ansible_collections: - community.general: 3.1.0 - new_collections: - description: New collections installed by this module. - returned: success - type: dict - sample: - community.general: 3.1.0 - community.docker: 1.6.1 - new_roles: - description: New roles installed by this module. - returned: success - type: dict - sample: - ansistrano.deploy: 3.8.0 - baztian.xfce: v0.0.3 -""" - -import re - -from ansible_collections.community.general.plugins.module_utils.module_helper import CmdModuleHelper, ArgFormat - - -class AnsibleGalaxyInstall(CmdModuleHelper): - _RE_GALAXY_VERSION = re.compile(r'^ansible-galaxy(?: \[core)? (?P\d+\.\d+\.\d+)(?:\.\w+)?(?:\])?') - _RE_LIST_PATH = re.compile(r'^# (?P.*)$') - _RE_LIST_COLL = re.compile(r'^(?P\w+\.\w+)\s+(?P[\d\.]+)\s*$') - _RE_LIST_ROLE = re.compile(r'^- (?P\w+\.\w+),\s+(?P[\d\.]+)\s*$') - _RE_INSTALL_OUTPUT = None # Set after determining ansible version, see __init_module__() - ansible_version = None - is_ansible29 = None - - output_params = ('type', 'name', 'dest', 'requirements_file', 'force') - module = dict( - argument_spec=dict( - type=dict(type='str', choices=('collection', 'role', 'both'), required=True), - name=dict(type='str'), - requirements_file=dict(type='path'), - dest=dict(type='path'), - force=dict(type='bool', default=False), - ack_ansible29=dict(type='bool', default=False), - ), - mutually_exclusive=[('name', 'requirements_file')], - required_one_of=[('name', 'requirements_file')], - required_if=[('type', 'both', ['requirements_file'])], - supports_check_mode=False, - ) - - command = 'ansible-galaxy' - command_args_formats = dict( - type=dict(fmt=lambda v: [] if v == 'both' else [v]), - galaxy_cmd=dict(), - requirements_file=dict(fmt=('-r', '{0}'),), - dest=dict(fmt=('-p', '{0}'),), - force=dict(fmt="--force", style=ArgFormat.BOOLEAN), - ) - force_lang = "en_US.UTF-8" - check_rc = True - - def _get_ansible_galaxy_version(self): - ansible_galaxy = self.module.get_bin_path("ansible-galaxy", required=True) - dummy, out, dummy = self.module.run_command([ansible_galaxy, "--version"], check_rc=True) - line = out.splitlines()[0] - match = self._RE_GALAXY_VERSION.match(line) - if not match: - raise RuntimeError("Unable to determine ansible-galaxy version from: {0}".format(line)) - version = match.group("version") - version = tuple(int(x) for x in version.split('.')[:3]) - return version - - def __init_module__(self): - self.ansible_version = self._get_ansible_galaxy_version() - self.is_ansible29 = self.ansible_version < (2, 10) - if self.is_ansible29: - self._RE_INSTALL_OUTPUT = re.compile(r"^(?:.*Installing '(?P\w+\.\w+):(?P[\d\.]+)'.*" - r'|- (?P\w+\.\w+) \((?P[\d\.]+)\)' - r' was installed successfully)$') - else: - # Collection install output changed: - # ansible-base 2.10: "coll.name (x.y.z)" - # ansible-core 2.11+: "coll.name:x.y.z" - self._RE_INSTALL_OUTPUT = re.compile(r'^(?:(?P\w+\.\w+)(?: \(|:)(?P[\d\.]+)\)?' - r'|- (?P\w+\.\w+) \((?P[\d\.]+)\))' - r' was installed successfully$') - - @staticmethod - def _process_output_list(*args): - if "None of the provided paths were usable" in args[1]: - return [] - return args[1].splitlines() - - def _list_element(self, _type, path_re, elem_re): - params = ({'type': _type}, {'galaxy_cmd': 'list'}, 'dest') - elems = self.run_command(params=params, - publish_rc=False, publish_out=False, publish_err=False, publish_cmd=False, - process_output=self._process_output_list, - check_rc=False) - elems_dict = {} - current_path = None - for line in elems: - if line.startswith("#"): - match = path_re.match(line) - if not match: - continue - if self.vars.dest is not None and match.group('path') != self.vars.dest: - current_path = None - continue - current_path = match.group('path') if match else None - elems_dict[current_path] = {} - - elif current_path is not None: - match = elem_re.match(line) - if not match or (self.vars.name is not None and match.group('elem') != self.vars.name): - continue - elems_dict[current_path][match.group('elem')] = match.group('version') - return elems_dict - - def _list_collections(self): - return self._list_element('collection', self._RE_LIST_PATH, self._RE_LIST_COLL) - - def _list_roles(self): - return self._list_element('role', self._RE_LIST_PATH, self._RE_LIST_ROLE) - - def _setup29(self): - self.vars.set("new_collections", {}) - self.vars.set("new_roles", {}) - self.vars.set("ansible29_change", False, change=True, output=False) - if not self.vars.ack_ansible29: - self.module.warn("Ansible 2.9 or older: unable to retrieve lists of roles and collections already installed") - if self.vars.requirements_file is not None and self.vars.type == 'both': - self.module.warn("Ansible 2.9 or older: will install only roles from requirement files") - - def _setup210plus(self): - self.vars.set("new_collections", {}, change=True) - self.vars.set("new_roles", {}, change=True) - if self.vars.type != "collection": - self.vars.installed_roles = self._list_roles() - if self.vars.type != "roles": - self.vars.installed_collections = self._list_collections() - - def __run__(self): - if self.is_ansible29: - if self.vars.type == 'both': - raise ValueError("Type 'both' not supported in Ansible 2.9") - self._setup29() - else: - self._setup210plus() - params = ('type', {'galaxy_cmd': 'install'}, 'force', 'dest', 'requirements_file', 'name') - self.run_command(params=params) - - def process_command_output(self, rc, out, err): - for line in out.splitlines(): - match = self._RE_INSTALL_OUTPUT.match(line) - if not match: - continue - if match.group("collection"): - self.vars.new_collections[match.group("collection")] = match.group("cversion") - if self.is_ansible29: - self.vars.ansible29_change = True - elif match.group("role"): - self.vars.new_roles[match.group("role")] = match.group("rversion") - if self.is_ansible29: - self.vars.ansible29_change = True - - -def main(): - galaxy = AnsibleGalaxyInstall() - galaxy.run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/bower.py b/plugins/modules/packaging/language/bower.py deleted file mode 100644 index 911d99b7d9..0000000000 --- a/plugins/modules/packaging/language/bower.py +++ /dev/null @@ -1,228 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Michael Warkentin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: bower -short_description: Manage bower packages with bower -description: - - Manage bower packages with bower -author: "Michael Warkentin (@mwarkentin)" -options: - name: - type: str - description: - - The name of a bower package to install - offline: - description: - - Install packages from local cache, if the packages were installed before - type: bool - default: 'no' - production: - description: - - Install with --production flag - type: bool - default: 'no' - path: - type: path - description: - - The base path where to install the bower packages - required: true - relative_execpath: - type: path - description: - - Relative path to bower executable from install path - state: - type: str - description: - - The state of the bower package - default: present - choices: [ "present", "absent", "latest" ] - version: - type: str - description: - - The version to be installed -''' - -EXAMPLES = ''' -- name: Install "bootstrap" bower package. - community.general.bower: - name: bootstrap - -- name: Install "bootstrap" bower package on version 3.1.1. - community.general.bower: - name: bootstrap - version: '3.1.1' - -- name: Remove the "bootstrap" bower package. - community.general.bower: - name: bootstrap - state: absent - -- name: Install packages based on bower.json. - community.general.bower: - path: /app/location - -- name: Update packages based on bower.json to their latest version. - community.general.bower: - path: /app/location - state: latest - -# install bower locally and run from there -- npm: - path: /app/location - name: bower - global: no -- community.general.bower: - path: /app/location - relative_execpath: node_modules/.bin -''' -import json -import os - -from ansible.module_utils.basic import AnsibleModule - - -class Bower(object): - def __init__(self, module, **kwargs): - self.module = module - self.name = kwargs['name'] - self.offline = kwargs['offline'] - self.production = kwargs['production'] - self.path = kwargs['path'] - self.relative_execpath = kwargs['relative_execpath'] - self.version = kwargs['version'] - - if kwargs['version']: - self.name_version = self.name + '#' + self.version - else: - self.name_version = self.name - - def _exec(self, args, run_in_check_mode=False, check_rc=True): - if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = [] - - if self.relative_execpath: - cmd.append(os.path.join(self.path, self.relative_execpath, "bower")) - if not os.path.isfile(cmd[-1]): - self.module.fail_json(msg="bower not found at relative path %s" % self.relative_execpath) - else: - cmd.append("bower") - - cmd.extend(args) - cmd.extend(['--config.interactive=false', '--allow-root']) - - if self.name: - cmd.append(self.name_version) - - if self.offline: - cmd.append('--offline') - - if self.production: - cmd.append('--production') - - # If path is specified, cd into that path and run the command. - cwd = None - if self.path: - if not os.path.exists(self.path): - os.makedirs(self.path) - if not os.path.isdir(self.path): - self.module.fail_json(msg="path %s is not a directory" % self.path) - cwd = self.path - - rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) - return out - return '' - - def list(self): - cmd = ['list', '--json'] - - installed = list() - missing = list() - outdated = list() - data = json.loads(self._exec(cmd, True, False)) - if 'dependencies' in data: - for dep in data['dependencies']: - dep_data = data['dependencies'][dep] - if dep_data.get('missing', False): - missing.append(dep) - elif ('version' in dep_data['pkgMeta'] and - 'update' in dep_data and - dep_data['pkgMeta']['version'] != dep_data['update']['latest']): - outdated.append(dep) - elif dep_data.get('incompatible', False): - outdated.append(dep) - else: - installed.append(dep) - # Named dependency not installed - else: - missing.append(self.name) - - return installed, missing, outdated - - def install(self): - return self._exec(['install']) - - def update(self): - return self._exec(['update']) - - def uninstall(self): - return self._exec(['uninstall']) - - -def main(): - arg_spec = dict( - name=dict(default=None), - offline=dict(default=False, type='bool'), - production=dict(default=False, type='bool'), - path=dict(required=True, type='path'), - relative_execpath=dict(default=None, required=False, type='path'), - state=dict(default='present', choices=['present', 'absent', 'latest', ]), - version=dict(default=None), - ) - module = AnsibleModule( - argument_spec=arg_spec - ) - - name = module.params['name'] - offline = module.params['offline'] - production = module.params['production'] - path = module.params['path'] - relative_execpath = module.params['relative_execpath'] - state = module.params['state'] - version = module.params['version'] - - if state == 'absent' and not name: - module.fail_json(msg='uninstalling a package is only available for named packages') - - bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version) - - changed = False - if state == 'present': - installed, missing, outdated = bower.list() - if missing: - changed = True - bower.install() - elif state == 'latest': - installed, missing, outdated = bower.list() - if missing or outdated: - changed = True - bower.update() - else: # Absent - installed, missing, outdated = bower.list() - if name in installed: - changed = True - bower.uninstall() - - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/bundler.py b/plugins/modules/packaging/language/bundler.py deleted file mode 100644 index 43f8cfa2ee..0000000000 --- a/plugins/modules/packaging/language/bundler.py +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2015, Tim Hoiberg -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: bundler -short_description: Manage Ruby Gem dependencies with Bundler -description: - - Manage installation and Gem version dependencies for Ruby using the Bundler gem -options: - executable: - type: str - description: - - The path to the bundler executable - state: - type: str - description: - - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version - choices: [present, latest] - default: present - chdir: - type: path - description: - - The directory to execute the bundler commands from. This directory - needs to contain a valid Gemfile or .bundle/ directory - - If not specified, it will default to the temporary working directory - exclude_groups: - type: list - elements: str - description: - - A list of Gemfile groups to exclude during operations. This only - applies when state is C(present). Bundler considers this - a 'remembered' property for the Gemfile and will automatically exclude - groups in future operations even if C(exclude_groups) is not set - clean: - description: - - Only applies if state is C(present). If set removes any gems on the - target host that are not in the gemfile - type: bool - default: 'no' - gemfile: - type: path - description: - - Only applies if state is C(present). The path to the gemfile to use to install gems. - - If not specified it will default to the Gemfile in current directory - local: - description: - - If set only installs gems from the cache on the target host - type: bool - default: 'no' - deployment_mode: - description: - - Only applies if state is C(present). If set it will install gems in - ./vendor/bundle instead of the default location. Requires a Gemfile.lock - file to have been created prior - type: bool - default: 'no' - user_install: - description: - - Only applies if state is C(present). Installs gems in the local user's cache or for all users - type: bool - default: 'yes' - gem_path: - type: path - description: - - Only applies if state is C(present). Specifies the directory to - install the gems into. If C(chdir) is set then this path is relative to - C(chdir) - - If not specified the default RubyGems gem paths will be used. - binstub_directory: - type: path - description: - - Only applies if state is C(present). Specifies the directory to - install any gem bins files to. When executed the bin files will run - within the context of the Gemfile and fail if any required gem - dependencies are not installed. If C(chdir) is set then this path is - relative to C(chdir) - extra_args: - type: str - description: - - A space separated string of additional commands that can be applied to - the Bundler command. Refer to the Bundler documentation for more - information -author: "Tim Hoiberg (@thoiberg)" -''' - -EXAMPLES = ''' -- name: Install gems from a Gemfile in the current directory - community.general.bundler: - state: present - executable: ~/.rvm/gems/2.1.5/bin/bundle - -- name: Exclude the production group from installing - community.general.bundler: - state: present - exclude_groups: production - -- name: Install gems into ./vendor/bundle - community.general.bundler: - state: present - deployment_mode: yes - -- name: Install gems using a Gemfile in another directory - community.general.bundler: - state: present - gemfile: ../rails_project/Gemfile - -- name: Update Gemfile in another directory - community.general.bundler: - state: latest - chdir: ~/rails_project -''' - -from ansible.module_utils.basic import AnsibleModule - - -def get_bundler_executable(module): - if module.params.get('executable'): - result = module.params.get('executable').split(' ') - else: - result = [module.get_bin_path('bundle', True)] - return result - - -def main(): - module = AnsibleModule( - argument_spec=dict( - executable=dict(default=None, required=False), - state=dict(default='present', required=False, choices=['present', 'latest']), - chdir=dict(default=None, required=False, type='path'), - exclude_groups=dict(default=None, required=False, type='list', elements='str'), - clean=dict(default=False, required=False, type='bool'), - gemfile=dict(default=None, required=False, type='path'), - local=dict(default=False, required=False, type='bool'), - deployment_mode=dict(default=False, required=False, type='bool'), - user_install=dict(default=True, required=False, type='bool'), - gem_path=dict(default=None, required=False, type='path'), - binstub_directory=dict(default=None, required=False, type='path'), - extra_args=dict(default=None, required=False), - ), - supports_check_mode=True - ) - - state = module.params.get('state') - chdir = module.params.get('chdir') - exclude_groups = module.params.get('exclude_groups') - clean = module.params.get('clean') - gemfile = module.params.get('gemfile') - local = module.params.get('local') - deployment_mode = module.params.get('deployment_mode') - user_install = module.params.get('user_install') - gem_path = module.params.get('gem_path') - binstub_directory = module.params.get('binstub_directory') - extra_args = module.params.get('extra_args') - - cmd = get_bundler_executable(module) - - if module.check_mode: - cmd.append('check') - rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False) - - module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err) - - if state == 'present': - cmd.append('install') - if exclude_groups: - cmd.extend(['--without', ':'.join(exclude_groups)]) - if clean: - cmd.append('--clean') - if gemfile: - cmd.extend(['--gemfile', gemfile]) - if local: - cmd.append('--local') - if deployment_mode: - cmd.append('--deployment') - if not user_install: - cmd.append('--system') - if gem_path: - cmd.extend(['--path', gem_path]) - if binstub_directory: - cmd.extend(['--binstubs', binstub_directory]) - else: - cmd.append('update') - if local: - cmd.append('--local') - - if extra_args: - cmd.extend(extra_args.split(' ')) - - rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True) - - module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/composer.py b/plugins/modules/packaging/language/composer.py deleted file mode 100644 index 86fe7bdea3..0000000000 --- a/plugins/modules/packaging/language/composer.py +++ /dev/null @@ -1,310 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Dimitrios Tydeas Mengidis -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: composer -author: - - "Dimitrios Tydeas Mengidis (@dmtrs)" - - "René Moser (@resmo)" -short_description: Dependency Manager for PHP -description: - - > - Composer is a tool for dependency management in PHP. It allows you to - declare the dependent libraries your project needs and it will install - them in your project for you. -options: - command: - type: str - description: - - Composer command like "install", "update" and so on. - default: install - arguments: - type: str - description: - - Composer arguments like required package, version and so on. - executable: - type: path - description: - - Path to PHP Executable on the remote host, if PHP is not in PATH. - aliases: [ php_path ] - working_dir: - type: path - description: - - Directory of your project (see --working-dir). This is required when - the command is not run globally. - - Will be ignored if C(global_command=true). - - Alias C(working-dir) has been deprecated and will be removed in community.general 5.0.0. - aliases: [ working-dir ] - global_command: - description: - - Runs the specified command globally. - - Alias C(global-command) has been deprecated and will be removed in community.general 5.0.0. - type: bool - default: false - aliases: [ global-command ] - prefer_source: - description: - - Forces installation from package sources when possible (see --prefer-source). - - Alias C(prefer-source) has been deprecated and will be removed in community.general 5.0.0. - default: false - type: bool - aliases: [ prefer-source ] - prefer_dist: - description: - - Forces installation from package dist even for dev versions (see --prefer-dist). - - Alias C(prefer-dist) has been deprecated and will be removed in community.general 5.0.0. - default: false - type: bool - aliases: [ prefer-dist ] - no_dev: - description: - - Disables installation of require-dev packages (see --no-dev). - - Alias C(no-dev) has been deprecated and will be removed in community.general 5.0.0. - default: true - type: bool - aliases: [ no-dev ] - no_scripts: - description: - - Skips the execution of all scripts defined in composer.json (see --no-scripts). - - Alias C(no-scripts) has been deprecated and will be removed in community.general 5.0.0. - default: false - type: bool - aliases: [ no-scripts ] - no_plugins: - description: - - Disables all plugins ( see --no-plugins ). - - Alias C(no-plugins) has been deprecated and will be removed in community.general 5.0.0. - default: false - type: bool - aliases: [ no-plugins ] - optimize_autoloader: - description: - - Optimize autoloader during autoloader dump (see --optimize-autoloader). - - Convert PSR-0/4 autoloading to classmap to get a faster autoloader. - - Recommended especially for production, but can take a bit of time to run. - - Alias C(optimize-autoloader) has been deprecated and will be removed in community.general 5.0.0. - default: true - type: bool - aliases: [ optimize-autoloader ] - classmap_authoritative: - description: - - Autoload classes from classmap only. - - Implicitely enable optimize_autoloader. - - Recommended especially for production, but can take a bit of time to run. - - Alias C(classmap-authoritative) has been deprecated and will be removed in community.general 5.0.0. - default: false - type: bool - aliases: [ classmap-authoritative ] - apcu_autoloader: - description: - - Uses APCu to cache found/not-found classes - - Alias C(apcu-autoloader) has been deprecated and will be removed in community.general 5.0.0. - default: false - type: bool - aliases: [ apcu-autoloader ] - ignore_platform_reqs: - description: - - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these. - - Alias C(ignore-platform-reqs) has been deprecated and will be removed in community.general 5.0.0. - default: false - type: bool - aliases: [ ignore-platform-reqs ] - composer_executable: - type: path - description: - - Path to composer executable on the remote host, if composer is not in C(PATH) or a custom composer is needed. - version_added: 3.2.0 -requirements: - - php - - composer installed in bin path (recommended /usr/local/bin) or specified in I(composer_executable) -notes: - - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available. - - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues. -''' - -EXAMPLES = ''' -- name: Download and installs all libs and dependencies outlined in the /path/to/project/composer.lock - community.general.composer: - command: install - working_dir: /path/to/project - -- name: Install a new package - community.general.composer: - command: require - arguments: my/package - working_dir: /path/to/project - -- name: Clone and install a project with all dependencies - community.general.composer: - command: create-project - arguments: package/package /path/to/project ~1.0 - working_dir: /path/to/project - prefer_dist: yes - -- name: Install a package globally - community.general.composer: - command: require - global_command: yes - arguments: my/package -''' - -import re -from ansible.module_utils.basic import AnsibleModule - - -def parse_out(string): - return re.sub(r"\s+", " ", string).strip() - - -def has_changed(string): - for no_change in ["Nothing to install or update", "Nothing to install, update or remove"]: - if no_change in string: - return False - - return True - - -def get_available_options(module, command='install'): - # get all available options from a composer command using composer help to json - rc, out, err = composer_command(module, "help %s" % command, arguments="--no-interaction --format=json") - if rc != 0: - output = parse_out(err) - module.fail_json(msg=output) - - command_help_json = module.from_json(out) - return command_help_json['definition']['options'] - - -def composer_command(module, command, arguments="", options=None, global_command=False): - if options is None: - options = [] - - if module.params['executable'] is None: - php_path = module.get_bin_path("php", True, ["/usr/local/bin"]) - else: - php_path = module.params['executable'] - - if module.params['composer_executable'] is None: - composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"]) - else: - composer_path = module.params['composer_executable'] - - cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments) - return module.run_command(cmd) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - command=dict(default="install", type="str"), - arguments=dict(default="", type="str"), - executable=dict(type="path", aliases=["php_path"]), - working_dir=dict( - type="path", aliases=["working-dir"], - deprecated_aliases=[dict(name='working-dir', version='5.0.0', collection_name='community.general')]), - global_command=dict( - default=False, type="bool", aliases=["global-command"], - deprecated_aliases=[dict(name='global-command', version='5.0.0', collection_name='community.general')]), - prefer_source=dict( - default=False, type="bool", aliases=["prefer-source"], - deprecated_aliases=[dict(name='prefer-source', version='5.0.0', collection_name='community.general')]), - prefer_dist=dict( - default=False, type="bool", aliases=["prefer-dist"], - deprecated_aliases=[dict(name='prefer-dist', version='5.0.0', collection_name='community.general')]), - no_dev=dict( - default=True, type="bool", aliases=["no-dev"], - deprecated_aliases=[dict(name='no-dev', version='5.0.0', collection_name='community.general')]), - no_scripts=dict( - default=False, type="bool", aliases=["no-scripts"], - deprecated_aliases=[dict(name='no-scripts', version='5.0.0', collection_name='community.general')]), - no_plugins=dict( - default=False, type="bool", aliases=["no-plugins"], - deprecated_aliases=[dict(name='no-plugins', version='5.0.0', collection_name='community.general')]), - apcu_autoloader=dict( - default=False, type="bool", aliases=["apcu-autoloader"], - deprecated_aliases=[dict(name='apcu-autoloader', version='5.0.0', collection_name='community.general')]), - optimize_autoloader=dict( - default=True, type="bool", aliases=["optimize-autoloader"], - deprecated_aliases=[dict(name='optimize-autoloader', version='5.0.0', collection_name='community.general')]), - classmap_authoritative=dict( - default=False, type="bool", aliases=["classmap-authoritative"], - deprecated_aliases=[dict(name='classmap-authoritative', version='5.0.0', collection_name='community.general')]), - ignore_platform_reqs=dict( - default=False, type="bool", aliases=["ignore-platform-reqs"], - deprecated_aliases=[dict(name='ignore-platform-reqs', version='5.0.0', collection_name='community.general')]), - composer_executable=dict(type="path"), - ), - required_if=[('global_command', False, ['working_dir'])], - supports_check_mode=True - ) - - # Get composer command with fallback to default - command = module.params['command'] - if re.search(r"\s", command): - module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'") - - arguments = module.params['arguments'] - global_command = module.params['global_command'] - available_options = get_available_options(module=module, command=command) - - options = [] - - # Default options - default_options = [ - 'no-ansi', - 'no-interaction', - 'no-progress', - ] - - for option in default_options: - if option in available_options: - option = "--%s" % option - options.append(option) - - if not global_command: - options.extend(['--working-dir', "'%s'" % module.params['working_dir']]) - - option_params = { - 'prefer_source': 'prefer-source', - 'prefer_dist': 'prefer-dist', - 'no_dev': 'no-dev', - 'no_scripts': 'no-scripts', - 'no_plugins': 'no-plugins', - 'apcu_autoloader': 'acpu-autoloader', - 'optimize_autoloader': 'optimize-autoloader', - 'classmap_authoritative': 'classmap-authoritative', - 'ignore_platform_reqs': 'ignore-platform-reqs', - } - - for param, option in option_params.items(): - if module.params.get(param) and option in available_options: - option = "--%s" % option - options.append(option) - - if module.check_mode: - if 'dry-run' in available_options: - options.append('--dry-run') - else: - module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command) - - rc, out, err = composer_command(module, command, arguments, options, global_command) - - if rc != 0: - output = parse_out(err) - module.fail_json(msg=output, stdout=err) - else: - # Composer version > 1.0.0-alpha9 now use stderr for standard notification messages - output = parse_out(out + err) - module.exit_json(changed=has_changed(output), msg=output, stdout=out + err) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/cpanm.py b/plugins/modules/packaging/language/cpanm.py deleted file mode 100644 index 7b209d1fae..0000000000 --- a/plugins/modules/packaging/language/cpanm.py +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Franck Cuny -# (c) 2021, Alexei Znamensky -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: cpanm -short_description: Manages Perl library dependencies. -description: - - Manage Perl library dependencies using cpanminus. -options: - name: - type: str - description: - - The Perl library to install. Valid values change according to the I(mode), see notes for more details. - - Note that for installing from a local path the parameter I(from_path) should be used. - aliases: [pkg] - from_path: - type: path - description: - - The local directory or C(tar.gz) file to install from. - notest: - description: - - Do not run unit tests. - type: bool - default: no - locallib: - description: - - Specify the install base to install modules. - type: path - mirror: - description: - - Specifies the base URL for the CPAN mirror to use. - type: str - mirror_only: - description: - - Use the mirror's index file instead of the CPAN Meta DB. - type: bool - default: no - installdeps: - description: - - Only install dependencies. - type: bool - default: no - version: - description: - - Version specification for the perl module. When I(mode) is C(new), C(cpanm) version operators are accepted. - type: str - executable: - description: - - Override the path to the cpanm executable. - type: path - mode: - description: - - Controls the module behavior. See notes below for more details. - type: str - choices: [compatibility, new] - default: compatibility - version_added: 3.0.0 - name_check: - description: - - When in C(new) mode, this parameter can be used to check if there is a module I(name) installed (at I(version), when specified). - type: str - version_added: 3.0.0 -notes: - - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. - - "This module now comes with a choice of execution I(mode): C(compatibility) or C(new)." - - "C(compatibility) mode:" - - When using C(compatibility) mode, the module will keep backward compatibility. This is the default mode. - - I(name) must be either a module name or a distribution file. - - > - If the perl module given by I(name) is installed (at the exact I(version) when specified), then nothing happens. - Otherwise, it will be installed using the C(cpanm) executable. - - I(name) cannot be an URL, or a git URL. - - C(cpanm) version specifiers do not work in this mode. - - "C(new) mode:" - - "When using C(new) mode, the module will behave differently" - - > - The I(name) parameter may refer to a module name, a distribution file, - a HTTP URL or a git repository URL as described in C(cpanminus) documentation. - - C(cpanm) version specifiers are recognized. -author: - - "Franck Cuny (@fcuny)" - - "Alexei Znamensky (@russoz)" -''' - -EXAMPLES = ''' -- name: Install Dancer perl package - community.general.cpanm: - name: Dancer - -- name: Install version 0.99_05 of the Plack perl package - community.general.cpanm: - name: MIYAGAWA/Plack-0.99_05.tar.gz - -- name: Install Dancer into the specified locallib - community.general.cpanm: - name: Dancer - locallib: /srv/webapps/my_app/extlib - -- name: Install perl dependencies from local directory - community.general.cpanm: - from_path: /srv/webapps/my_app/src/ - -- name: Install Dancer perl package without running the unit tests in indicated locallib - community.general.cpanm: - name: Dancer - notest: True - locallib: /srv/webapps/my_app/extlib - -- name: Install Dancer perl package from a specific mirror - community.general.cpanm: - name: Dancer - mirror: 'http://cpan.cpantesters.org/' - -- name: Install Dancer perl package into the system root path - become: yes - community.general.cpanm: - name: Dancer - -- name: Install Dancer if it is not already installed OR the installed version is older than version 1.0 - community.general.cpanm: - name: Dancer - version: '1.0' -''' - -import os - -from ansible_collections.community.general.plugins.module_utils.module_helper import ( - ModuleHelper, CmdMixin, ArgFormat, ModuleHelperException -) - - -class CPANMinus(CmdMixin, ModuleHelper): - output_params = ['name', 'version'] - module = dict( - argument_spec=dict( - name=dict(type='str', aliases=['pkg']), - version=dict(type='str'), - from_path=dict(type='path'), - notest=dict(type='bool', default=False), - locallib=dict(type='path'), - mirror=dict(type='str'), - mirror_only=dict(type='bool', default=False), - installdeps=dict(type='bool', default=False), - executable=dict(type='path'), - mode=dict(type='str', choices=['compatibility', 'new'], default='compatibility'), - name_check=dict(type='str') - ), - required_one_of=[('name', 'from_path')], - - ) - command = 'cpanm' - command_args_formats = dict( - notest=dict(fmt="--notest", style=ArgFormat.BOOLEAN), - locallib=dict(fmt=('--local-lib', '{0}'),), - mirror=dict(fmt=('--mirror', '{0}'),), - mirror_only=dict(fmt="--mirror-only", style=ArgFormat.BOOLEAN), - installdeps=dict(fmt="--installdeps", style=ArgFormat.BOOLEAN), - ) - check_rc = True - - def __init_module__(self): - v = self.vars - if v.mode == "compatibility": - if v.name_check: - raise ModuleHelperException("Parameter name_check can only be used with mode=new") - else: - if v.name and v.from_path: - raise ModuleHelperException("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'") - - self.command = self.module.get_bin_path(v.executable if v.executable else self.command) - self.vars.set("binary", self.command) - - def _is_package_installed(self, name, locallib, version): - if name is None or name.endswith('.tar.gz'): - return False - version = "" if version is None else " " + version - - env = {"PERL5LIB": "%s/lib/perl5" % locallib} if locallib else {} - cmd = ['perl', '-le', 'use %s%s;' % (name, version)] - rc, out, err = self.module.run_command(cmd, check_rc=False, environ_update=env) - - return rc == 0 - - @staticmethod - def sanitize_pkg_spec_version(pkg_spec, version): - if version is None: - return pkg_spec - if pkg_spec.endswith('.tar.gz'): - raise ModuleHelperException(msg="parameter 'version' must not be used when installing from a file") - if os.path.isdir(pkg_spec): - raise ModuleHelperException(msg="parameter 'version' must not be used when installing from a directory") - if pkg_spec.endswith('.git'): - if version.startswith('~'): - raise ModuleHelperException(msg="operator '~' not allowed in version parameter when installing from git repository") - version = version if version.startswith('@') else '@' + version - elif version[0] not in ('@', '~'): - version = '~' + version - return pkg_spec + version - - def __run__(self): - v = self.vars - pkg_param = 'from_path' if v.from_path else 'name' - - if v.mode == 'compatibility': - if self._is_package_installed(v.name, v.locallib, v.version): - return - pkg_spec = v[pkg_param] - self.changed = self.run_command( - params=['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', {'name': pkg_spec}], - ) - else: - installed = self._is_package_installed(v.name_check, v.locallib, v.version) if v.name_check else False - if installed: - return - pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version) - self.changed = self.run_command( - params=['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', {'name': pkg_spec}], - ) - - def process_command_output(self, rc, out, err): - if self.vars.mode == "compatibility" and rc != 0: - raise ModuleHelperException(msg=err, cmd=self.vars.cmd_args) - return 'is up to date' not in err and 'is up to date' not in out - - -def main(): - CPANMinus.execute() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/easy_install.py b/plugins/modules/packaging/language/easy_install.py deleted file mode 100644 index 5e1d7930b5..0000000000 --- a/plugins/modules/packaging/language/easy_install.py +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Matt Wright -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: easy_install -short_description: Installs Python libraries -description: - - Installs Python libraries, optionally in a I(virtualenv) -options: - name: - type: str - description: - - A Python library name - required: true - virtualenv: - type: str - description: - - an optional I(virtualenv) directory path to install into. If the - I(virtualenv) does not exist, it is created automatically - virtualenv_site_packages: - description: - - Whether the virtual environment will inherit packages from the - global site-packages directory. Note that if this setting is - changed on an already existing virtual environment it will not - have any effect, the environment must be deleted and newly - created. - type: bool - default: 'no' - virtualenv_command: - type: str - description: - - The command to create the virtual environment with. For example - C(pyvenv), C(virtualenv), C(virtualenv2). - default: virtualenv - executable: - type: str - description: - - The explicit executable or a pathname to the executable to be used to - run easy_install for a specific version of Python installed in the - system. For example C(easy_install-3.3), if there are both Python 2.7 - and 3.3 installations in the system and you want to run easy_install - for the Python 3.3 installation. - default: easy_install - state: - type: str - description: - - The desired state of the library. C(latest) ensures that the latest version is installed. - choices: [present, latest] - default: present -notes: - - Please note that the C(easy_install) module can only install Python - libraries. Thus this module is not able to remove libraries. It is - generally recommended to use the M(ansible.builtin.pip) module which you can first install - using M(community.general.easy_install). - - Also note that I(virtualenv) must be installed on the remote host if the - C(virtualenv) parameter is specified. -requirements: [ "virtualenv" ] -author: "Matt Wright (@mattupstate)" -''' - -EXAMPLES = ''' -- name: Install or update pip - community.general.easy_install: - name: pip - state: latest - -- name: Install Bottle into the specified virtualenv - community.general.easy_install: - name: bottle - virtualenv: /webapps/myapp/venv -''' - -import os -import os.path -import tempfile -from ansible.module_utils.basic import AnsibleModule - - -def install_package(module, name, easy_install, executable_arguments): - cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name) - rc, out, err = module.run_command(cmd) - return rc, out, err - - -def _is_package_installed(module, name, easy_install, executable_arguments): - # Copy and add to the arguments - executable_arguments = executable_arguments[:] - executable_arguments.append('--dry-run') - rc, out, err = install_package(module, name, easy_install, executable_arguments) - if rc: - module.fail_json(msg=err) - return 'Downloading' not in out - - -def _get_easy_install(module, env=None, executable=None): - candidate_easy_inst_basenames = ['easy_install'] - easy_install = None - if executable is not None: - if os.path.isabs(executable): - easy_install = executable - else: - candidate_easy_inst_basenames.insert(0, executable) - if easy_install is None: - if env is None: - opt_dirs = [] - else: - # Try easy_install with the virtualenv directory first. - opt_dirs = ['%s/bin' % env] - for basename in candidate_easy_inst_basenames: - easy_install = module.get_bin_path(basename, False, opt_dirs) - if easy_install is not None: - break - # easy_install should have been found by now. The final call to - # get_bin_path will trigger fail_json. - if easy_install is None: - basename = candidate_easy_inst_basenames[0] - easy_install = module.get_bin_path(basename, True, opt_dirs) - return easy_install - - -def main(): - arg_spec = dict( - name=dict(required=True), - state=dict(required=False, - default='present', - choices=['present', 'latest'], - type='str'), - virtualenv=dict(default=None, required=False), - virtualenv_site_packages=dict(default=False, type='bool'), - virtualenv_command=dict(default='virtualenv', required=False), - executable=dict(default='easy_install', required=False), - ) - - module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) - - name = module.params['name'] - env = module.params['virtualenv'] - executable = module.params['executable'] - site_packages = module.params['virtualenv_site_packages'] - virtualenv_command = module.params['virtualenv_command'] - executable_arguments = [] - if module.params['state'] == 'latest': - executable_arguments.append('--upgrade') - - rc = 0 - err = '' - out = '' - - if env: - virtualenv = module.get_bin_path(virtualenv_command, True) - - if not os.path.exists(os.path.join(env, 'bin', 'activate')): - if module.check_mode: - module.exit_json(changed=True) - command = '%s %s' % (virtualenv, env) - if site_packages: - command += ' --system-site-packages' - cwd = tempfile.gettempdir() - rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd) - - rc += rc_venv - out += out_venv - err += err_venv - - easy_install = _get_easy_install(module, env, executable) - - cmd = None - changed = False - installed = _is_package_installed(module, name, easy_install, executable_arguments) - - if not installed: - if module.check_mode: - module.exit_json(changed=True) - rc_easy_inst, out_easy_inst, err_easy_inst = install_package(module, name, easy_install, executable_arguments) - - rc += rc_easy_inst - out += out_easy_inst - err += err_easy_inst - - changed = True - - if rc != 0: - module.fail_json(msg=err, cmd=cmd) - - module.exit_json(changed=changed, binary=easy_install, - name=name, virtualenv=env) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/gem.py b/plugins/modules/packaging/language/gem.py deleted file mode 100644 index c7ccdec498..0000000000 --- a/plugins/modules/packaging/language/gem.py +++ /dev/null @@ -1,344 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Johan Wiren -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: gem -short_description: Manage Ruby gems -description: - - Manage installation and uninstallation of Ruby gems. -options: - name: - type: str - description: - - The name of the gem to be managed. - required: true - state: - type: str - description: - - The desired state of the gem. C(latest) ensures that the latest version is installed. - required: false - choices: [present, absent, latest] - default: present - gem_source: - type: path - description: - - The path to a local gem used as installation source. - required: false - include_dependencies: - description: - - Whether to include dependencies or not. - required: false - type: bool - default: "yes" - repository: - type: str - description: - - The repository from which the gem will be installed - required: false - aliases: [source] - user_install: - description: - - Install gem in user's local gems cache or for all users - required: false - type: bool - default: "yes" - executable: - type: path - description: - - Override the path to the gem executable - required: false - install_dir: - type: path - description: - - Install the gems into a specific directory. - These gems will be independent from the global installed ones. - Specifying this requires user_install to be false. - required: false - bindir: - type: path - description: - - Install executables into a specific directory. - version_added: 3.3.0 - norc: - type: bool - default: false - description: - - Avoid loading any C(.gemrc) file. Ignored for RubyGems prior to 2.5.2. - - "The current default value will be deprecated in community.general 4.0.0: if the value is not explicitly specified, a deprecation message will be shown." - - From community.general 5.0.0 on, the default will be changed to C(true). - version_added: 3.3.0 - env_shebang: - description: - - Rewrite the shebang line on installed scripts to use /usr/bin/env. - required: false - default: "no" - type: bool - version: - type: str - description: - - Version of the gem to be installed/removed. - required: false - pre_release: - description: - - Allow installation of pre-release versions of the gem. - required: false - default: "no" - type: bool - include_doc: - description: - - Install with or without docs. - required: false - default: "no" - type: bool - build_flags: - type: str - description: - - Allow adding build flags for gem compilation - required: false - force: - description: - - Force gem to install, bypassing dependency checks. - required: false - default: "no" - type: bool -author: - - "Ansible Core Team" - - "Johan Wiren (@johanwiren)" -''' - -EXAMPLES = ''' -- name: Install version 1.0 of vagrant - community.general.gem: - name: vagrant - version: 1.0 - state: present - -- name: Install latest available version of rake - community.general.gem: - name: rake - state: latest - -- name: Install rake version 1.0 from a local gem on disk - community.general.gem: - name: rake - gem_source: /path/to/gems/rake-1.0.gem - state: present -''' - -import re - -from ansible.module_utils.basic import AnsibleModule - - -def get_rubygems_path(module): - if module.params['executable']: - result = module.params['executable'].split(' ') - else: - result = [module.get_bin_path('gem', True)] - return result - - -def get_rubygems_version(module): - if hasattr(get_rubygems_version, "ver"): - return get_rubygems_version.ver - - cmd = get_rubygems_path(module) + ['--version'] - (rc, out, err) = module.run_command(cmd, check_rc=True) - - match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out) - if not match: - return None - - ver = tuple(int(x) for x in match.groups()) - get_rubygems_version.ver = ver - - return ver - - -def get_rubygems_environ(module): - if module.params['install_dir']: - return {'GEM_HOME': module.params['install_dir']} - return None - - -def get_installed_versions(module, remote=False): - - cmd = get_rubygems_path(module) - cmd.append('query') - cmd.extend(common_opts(module)) - if remote: - cmd.append('--remote') - if module.params['repository']: - cmd.extend(['--source', module.params['repository']]) - cmd.append('-n') - cmd.append('^%s$' % module.params['name']) - - environ = get_rubygems_environ(module) - (rc, out, err) = module.run_command(cmd, environ_update=environ, check_rc=True) - installed_versions = [] - for line in out.splitlines(): - match = re.match(r"\S+\s+\((?:default: )?(.+)\)", line) - if match: - versions = match.group(1) - for version in versions.split(', '): - installed_versions.append(version.split()[0]) - return installed_versions - - -def exists(module): - if module.params['state'] == 'latest': - remoteversions = get_installed_versions(module, remote=True) - if remoteversions: - module.params['version'] = remoteversions[0] - installed_versions = get_installed_versions(module) - if module.params['version']: - if module.params['version'] in installed_versions: - return True - else: - if installed_versions: - return True - return False - - -def common_opts(module): - opts = [] - ver = get_rubygems_version(module) - if module.params['norc'] and ver and ver >= (2, 5, 2): - opts.append('--norc') - return opts - - -def uninstall(module): - - if module.check_mode: - return - cmd = get_rubygems_path(module) - environ = get_rubygems_environ(module) - cmd.append('uninstall') - cmd.extend(common_opts(module)) - if module.params['install_dir']: - cmd.extend(['--install-dir', module.params['install_dir']]) - - if module.params['bindir']: - cmd.extend(['--bindir', module.params['bindir']]) - - if module.params['version']: - cmd.extend(['--version', module.params['version']]) - else: - cmd.append('--all') - cmd.append('--executable') - cmd.append(module.params['name']) - module.run_command(cmd, environ_update=environ, check_rc=True) - - -def install(module): - - if module.check_mode: - return - - ver = get_rubygems_version(module) - - cmd = get_rubygems_path(module) - cmd.append('install') - cmd.extend(common_opts(module)) - if module.params['version']: - cmd.extend(['--version', module.params['version']]) - if module.params['repository']: - cmd.extend(['--source', module.params['repository']]) - if not module.params['include_dependencies']: - cmd.append('--ignore-dependencies') - else: - if ver and ver < (2, 0, 0): - cmd.append('--include-dependencies') - if module.params['user_install']: - cmd.append('--user-install') - else: - cmd.append('--no-user-install') - if module.params['install_dir']: - cmd.extend(['--install-dir', module.params['install_dir']]) - if module.params['bindir']: - cmd.extend(['--bindir', module.params['bindir']]) - if module.params['pre_release']: - cmd.append('--pre') - if not module.params['include_doc']: - if ver and ver < (2, 0, 0): - cmd.append('--no-rdoc') - cmd.append('--no-ri') - else: - cmd.append('--no-document') - if module.params['env_shebang']: - cmd.append('--env-shebang') - cmd.append(module.params['gem_source']) - if module.params['build_flags']: - cmd.extend(['--', module.params['build_flags']]) - if module.params['force']: - cmd.append('--force') - module.run_command(cmd, check_rc=True) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - executable=dict(required=False, type='path'), - gem_source=dict(required=False, type='path'), - include_dependencies=dict(required=False, default=True, type='bool'), - name=dict(required=True, type='str'), - repository=dict(required=False, aliases=['source'], type='str'), - state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'), - user_install=dict(required=False, default=True, type='bool'), - install_dir=dict(required=False, type='path'), - bindir=dict(type='path'), - norc=dict(default=False, type='bool'), - pre_release=dict(required=False, default=False, type='bool'), - include_doc=dict(required=False, default=False, type='bool'), - env_shebang=dict(required=False, default=False, type='bool'), - version=dict(required=False, type='str'), - build_flags=dict(required=False, type='str'), - force=dict(required=False, default=False, type='bool'), - ), - supports_check_mode=True, - mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']], - ) - - if module.params['version'] and module.params['state'] == 'latest': - module.fail_json(msg="Cannot specify version when state=latest") - if module.params['gem_source'] and module.params['state'] == 'latest': - module.fail_json(msg="Cannot maintain state=latest when installing from local source") - if module.params['user_install'] and module.params['install_dir']: - module.fail_json(msg="install_dir requires user_install=false") - - if not module.params['gem_source']: - module.params['gem_source'] = module.params['name'] - - changed = False - - if module.params['state'] in ['present', 'latest']: - if not exists(module): - install(module) - changed = True - elif module.params['state'] == 'absent': - if exists(module): - uninstall(module) - changed = True - - result = {} - result['name'] = module.params['name'] - result['state'] = module.params['state'] - if module.params['version']: - result['version'] = module.params['version'] - result['changed'] = changed - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/maven_artifact.py b/plugins/modules/packaging/language/maven_artifact.py deleted file mode 100644 index c184830580..0000000000 --- a/plugins/modules/packaging/language/maven_artifact.py +++ /dev/null @@ -1,731 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2014, Chris Schmidt -# -# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact -# as a reference and starting point. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: maven_artifact -short_description: Downloads an Artifact from a Maven Repository -description: - - Downloads an artifact from a maven repository given the maven coordinates provided to the module. - - Can retrieve snapshots or release versions of the artifact and will resolve the latest available - version if one is not available. -author: "Chris Schmidt (@chrisisbeef)" -requirements: - - lxml - - boto if using a S3 repository (s3://...) -options: - group_id: - type: str - description: - - The Maven groupId coordinate - required: true - artifact_id: - type: str - description: - - The maven artifactId coordinate - required: true - version: - type: str - description: - - The maven version coordinate - - Mutually exclusive with I(version_by_spec). - version_by_spec: - type: str - description: - - The maven dependency version ranges. - - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution) - - The range type "(,1.0],[1.2,)" and "(,1.1),(1.1,)" is not supported. - - Mutually exclusive with I(version). - version_added: '0.2.0' - classifier: - type: str - description: - - The maven classifier coordinate - extension: - type: str - description: - - The maven type/extension coordinate - default: jar - repository_url: - type: str - description: - - The URL of the Maven Repository to download from. - - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2. - - Use file://... if the repository is local, added in version 2.6 - default: https://repo1.maven.org/maven2 - username: - type: str - description: - - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3 - aliases: [ "aws_secret_key" ] - password: - type: str - description: - - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3 - aliases: [ "aws_secret_access_key" ] - headers: - description: - - Add custom HTTP headers to a request in hash/dict format. - type: dict - force_basic_auth: - description: - - httplib2, the library used by the uri module only sends authentication information when a webservice - responds to an initial request with a 401 status. Since some basic auth services do not properly - send a 401, logins will fail. This option forces the sending of the Basic authentication header - upon initial request. - default: 'no' - type: bool - version_added: '0.2.0' - dest: - type: path - description: - - The path where the artifact should be written to - - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file - required: true - state: - type: str - description: - - The desired state of the artifact - default: present - choices: [present,absent] - timeout: - type: int - description: - - Specifies a timeout in seconds for the connection attempt - default: 10 - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists. - type: bool - default: 'yes' - client_cert: - description: - - PEM formatted certificate chain file to be used for SSL client authentication. - - This file can also include the key as well, and if the key is included, I(client_key) is not required. - type: path - version_added: '1.3.0' - client_key: - description: - - PEM formatted file that contains your private key to be used for SSL client authentication. - - If I(client_cert) contains both the certificate and key, this option is not required. - type: path - version_added: '1.3.0' - keep_name: - description: - - If C(yes), the downloaded artifact's name is preserved, i.e the version number remains part of it. - - This option only has effect when C(dest) is a directory and C(version) is set to C(latest) or C(version_by_spec) - is defined. - type: bool - default: 'no' - verify_checksum: - type: str - description: - - If C(never), the MD5/SHA1 checksum will never be downloaded and verified. - - If C(download), the MD5/SHA1 checksum will be downloaded and verified only after artifact download. This is the default. - - If C(change), the MD5/SHA1 checksum will be downloaded and verified if the destination already exist, - to verify if they are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe) - downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error - if the artifact has not been cached yet, it may fail unexpectedly. - If you still need it, you should consider using C(always) instead - if you deal with a checksum, it is better to - use it to verify integrity after download. - - C(always) combines C(download) and C(change). - required: false - default: 'download' - choices: ['never', 'download', 'change', 'always'] - checksum_alg: - type: str - description: - - If C(md5), checksums will use the MD5 algorithm. This is the default. - - If C(sha1), checksums will use the SHA1 algorithm. This can be used on systems configured to use - FIPS-compliant algorithms, since MD5 will be blocked on such systems. - default: 'md5' - choices: ['md5', 'sha1'] - version_added: 3.2.0 - directory_mode: - type: str - description: - - Filesystem permission mode applied recursively to I(dest) when it is a directory. -extends_documentation_fragment: - - files -''' - -EXAMPLES = ''' -- name: Download the latest version of the JUnit framework artifact from Maven Central - community.general.maven_artifact: - group_id: junit - artifact_id: junit - dest: /tmp/junit-latest.jar - -- name: Download JUnit 4.11 from Maven Central - community.general.maven_artifact: - group_id: junit - artifact_id: junit - version: 4.11 - dest: /tmp/junit-4.11.jar - -- name: Download an artifact from a private repository requiring authentication - community.general.maven_artifact: - group_id: com.company - artifact_id: library-name - repository_url: 'https://repo.company.com/maven' - username: user - password: pass - dest: /tmp/library-name-latest.jar - -- name: Download an artifact from a private repository requiring certificate authentication - community.general.maven_artifact: - group_id: com.company - artifact_id: library-name - repository_url: 'https://repo.company.com/maven' - client_cert: /path/to/cert.pem - client_key: /path/to/key.pem - dest: /tmp/library-name-latest.jar - -- name: Download a WAR File to the Tomcat webapps directory to be deployed - community.general.maven_artifact: - group_id: com.company - artifact_id: web-app - extension: war - repository_url: 'https://repo.company.com/maven' - dest: /var/lib/tomcat7/webapps/web-app.war - -- name: Keep a downloaded artifact's name, i.e. retain the version - community.general.maven_artifact: - version: latest - artifact_id: spring-core - group_id: org.springframework - dest: /tmp/ - keep_name: yes - -- name: Download the latest version of the JUnit framework artifact from Maven local - community.general.maven_artifact: - group_id: junit - artifact_id: junit - dest: /tmp/junit-latest.jar - repository_url: "file://{{ lookup('env','HOME') }}/.m2/repository" - -- name: Download the latest version between 3.8 and 4.0 (exclusive) of the JUnit framework artifact from Maven Central - community.general.maven_artifact: - group_id: junit - artifact_id: junit - version_by_spec: "[3.8,4.0)" - dest: /tmp/ -''' - -import hashlib -import os -import posixpath -import shutil -import io -import tempfile -import traceback -import re - -from ansible.module_utils.ansible_release import __version__ as ansible_version -from re import match - -LXML_ETREE_IMP_ERR = None -try: - from lxml import etree - HAS_LXML_ETREE = True -except ImportError: - LXML_ETREE_IMP_ERR = traceback.format_exc() - HAS_LXML_ETREE = False - -BOTO_IMP_ERR = None -try: - import boto3 - HAS_BOTO = True -except ImportError: - BOTO_IMP_ERR = traceback.format_exc() - HAS_BOTO = False - -SEMANTIC_VERSION_IMP_ERR = None -try: - from semantic_version import Version, Spec - HAS_SEMANTIC_VERSION = True -except ImportError: - SEMANTIC_VERSION_IMP_ERR = traceback.format_exc() - HAS_SEMANTIC_VERSION = False - - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six.moves.urllib.parse import urlparse -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text - - -def split_pre_existing_dir(dirname): - ''' - Return the first pre-existing directory and a list of the new directories that will be created. - ''' - head, tail = os.path.split(dirname) - b_head = to_bytes(head, errors='surrogate_or_strict') - if not os.path.exists(b_head): - if head == dirname: - return None, [head] - else: - (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head) - else: - return head, [tail] - new_directory_list.append(tail) - return pre_existing_dir, new_directory_list - - -def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed): - ''' - Walk the new directories list and make sure that permissions are as we would expect - ''' - if new_directory_list: - first_sub_dir = new_directory_list.pop(0) - if not pre_existing_dir: - working_dir = first_sub_dir - else: - working_dir = os.path.join(pre_existing_dir, first_sub_dir) - directory_args['path'] = working_dir - changed = module.set_fs_attributes_if_different(directory_args, changed) - changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed) - return changed - - -class Artifact(object): - def __init__(self, group_id, artifact_id, version, version_by_spec, classifier='', extension='jar'): - if not group_id: - raise ValueError("group_id must be set") - if not artifact_id: - raise ValueError("artifact_id must be set") - - self.group_id = group_id - self.artifact_id = artifact_id - self.version = version - self.version_by_spec = version_by_spec - self.classifier = classifier - - if not extension: - self.extension = "jar" - else: - self.extension = extension - - def is_snapshot(self): - return self.version and self.version.endswith("SNAPSHOT") - - def path(self, with_version=True): - base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id) - if with_version and self.version: - timestamp_version_match = re.match("^(.*-)?([0-9]{8}\\.[0-9]{6}-[0-9]+)$", self.version) - if timestamp_version_match: - base = posixpath.join(base, timestamp_version_match.group(1) + "SNAPSHOT") - else: - base = posixpath.join(base, self.version) - return base - - def _generate_filename(self): - filename = self.artifact_id + "-" + self.classifier + "." + self.extension - if not self.classifier: - filename = self.artifact_id + "." + self.extension - return filename - - def get_filename(self, filename=None): - if not filename: - filename = self._generate_filename() - elif os.path.isdir(filename): - filename = os.path.join(filename, self._generate_filename()) - return filename - - def __str__(self): - result = "%s:%s:%s" % (self.group_id, self.artifact_id, self.version) - if self.classifier: - result = "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version) - elif self.extension != "jar": - result = "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version) - return result - - @staticmethod - def parse(input): - parts = input.split(":") - if len(parts) >= 3: - g = parts[0] - a = parts[1] - v = parts[-1] - t = None - c = None - if len(parts) == 4: - t = parts[2] - if len(parts) == 5: - t = parts[2] - c = parts[3] - return Artifact(g, a, v, c, t) - else: - return None - - -class MavenDownloader: - def __init__(self, module, base, local=False, headers=None): - self.module = module - if base.endswith("/"): - base = base.rstrip("/") - self.base = base - self.local = local - self.headers = headers - self.user_agent = "Ansible {0} maven_artifact".format(ansible_version) - self.latest_version_found = None - self.metadata_file_name = "maven-metadata-local.xml" if local else "maven-metadata.xml" - - def find_version_by_spec(self, artifact): - path = "/%s/%s" % (artifact.path(False), self.metadata_file_name) - content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path) - xml = etree.fromstring(content) - original_versions = xml.xpath("/metadata/versioning/versions/version/text()") - versions = [] - for version in original_versions: - try: - versions.append(Version.coerce(version)) - except ValueError: - # This means that version string is not a valid semantic versioning - pass - - parse_versions_syntax = { - # example -> (,1.0] - r"^\(,(?P[0-9.]*)]$": "<={upper_bound}", - # example -> 1.0 - r"^(?P[0-9.]*)$": "~={version}", - # example -> [1.0] - r"^\[(?P[0-9.]*)\]$": "=={version}", - # example -> [1.2, 1.3] - r"^\[(?P[0-9.]*),\s*(?P[0-9.]*)\]$": ">={lower_bound},<={upper_bound}", - # example -> [1.2, 1.3) - r"^\[(?P[0-9.]*),\s*(?P[0-9.]+)\)$": ">={lower_bound},<{upper_bound}", - # example -> [1.5,) - r"^\[(?P[0-9.]*),\)$": ">={lower_bound}", - } - - for regex, spec_format in parse_versions_syntax.items(): - regex_result = match(regex, artifact.version_by_spec) - if regex_result: - spec = Spec(spec_format.format(**regex_result.groupdict())) - selected_version = spec.select(versions) - - if not selected_version: - raise ValueError("No version found with this spec version: {0}".format(artifact.version_by_spec)) - - # To deal when repos on maven don't have patch number on first build (e.g. 3.8 instead of 3.8.0) - if str(selected_version) not in original_versions: - selected_version.patch = None - - return str(selected_version) - - raise ValueError("The spec version {0} is not supported! ".format(artifact.version_by_spec)) - - def find_latest_version_available(self, artifact): - if self.latest_version_found: - return self.latest_version_found - path = "/%s/%s" % (artifact.path(False), self.metadata_file_name) - content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path) - xml = etree.fromstring(content) - v = xml.xpath("/metadata/versioning/versions/version[last()]/text()") - if v: - self.latest_version_found = v[0] - return v[0] - - def find_uri_for_artifact(self, artifact): - if artifact.version_by_spec: - artifact.version = self.find_version_by_spec(artifact) - - if artifact.version == "latest": - artifact.version = self.find_latest_version_available(artifact) - - if artifact.is_snapshot(): - if self.local: - return self._uri_for_artifact(artifact, artifact.version) - path = "/%s/%s" % (artifact.path(), self.metadata_file_name) - content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path) - xml = etree.fromstring(content) - - for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"): - classifier = snapshotArtifact.xpath("classifier/text()") - artifact_classifier = classifier[0] if classifier else '' - extension = snapshotArtifact.xpath("extension/text()") - artifact_extension = extension[0] if extension else '' - if artifact_classifier == artifact.classifier and artifact_extension == artifact.extension: - return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0]) - timestamp_xmlpath = xml.xpath("/metadata/versioning/snapshot/timestamp/text()") - if timestamp_xmlpath: - timestamp = timestamp_xmlpath[0] - build_number = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0] - return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + build_number)) - - return self._uri_for_artifact(artifact, artifact.version) - - def _uri_for_artifact(self, artifact, version=None): - if artifact.is_snapshot() and not version: - raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact)) - elif not artifact.is_snapshot(): - version = artifact.version - if artifact.classifier: - return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension) - - return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension) - - # for small files, directly get the full content - def _getContent(self, url, failmsg, force=True): - if self.local: - parsed_url = urlparse(url) - if os.path.isfile(parsed_url.path): - with io.open(parsed_url.path, 'rb') as f: - return f.read() - if force: - raise ValueError(failmsg + " because can not find file: " + url) - return None - response = self._request(url, failmsg, force) - if response: - return response.read() - return None - - # only for HTTP request - def _request(self, url, failmsg, force=True): - url_to_use = url - parsed_url = urlparse(url) - - if parsed_url.scheme == 's3': - parsed_url = urlparse(url) - bucket_name = parsed_url.netloc - key_name = parsed_url.path[1:] - client = boto3.client('s3', aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', '')) - url_to_use = client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': key_name}, ExpiresIn=10) - - req_timeout = self.module.params.get('timeout') - - # Hack to add parameters in the way that fetch_url expects - self.module.params['url_username'] = self.module.params.get('username', '') - self.module.params['url_password'] = self.module.params.get('password', '') - self.module.params['http_agent'] = self.user_agent - - response, info = fetch_url(self.module, url_to_use, timeout=req_timeout, headers=self.headers) - if info['status'] == 200: - return response - if force: - raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use) - return None - - def download(self, tmpdir, artifact, verify_download, filename=None, checksum_alg='md5'): - if (not artifact.version and not artifact.version_by_spec) or artifact.version == "latest": - artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact), None, - artifact.classifier, artifact.extension) - url = self.find_uri_for_artifact(artifact) - tempfd, tempname = tempfile.mkstemp(dir=tmpdir) - - try: - # copy to temp file - if self.local: - parsed_url = urlparse(url) - if os.path.isfile(parsed_url.path): - shutil.copy2(parsed_url.path, tempname) - else: - return "Can not find local file: " + parsed_url.path - else: - response = self._request(url, "Failed to download artifact " + str(artifact)) - with os.fdopen(tempfd, 'wb') as f: - shutil.copyfileobj(response, f) - - if verify_download: - invalid_checksum = self.is_invalid_checksum(tempname, url, checksum_alg) - if invalid_checksum: - # if verify_change was set, the previous file would be deleted - os.remove(tempname) - return invalid_checksum - except Exception as e: - os.remove(tempname) - raise e - - # all good, now copy temp file to target - shutil.move(tempname, artifact.get_filename(filename)) - return None - - def is_invalid_checksum(self, file, remote_url, checksum_alg='md5'): - if os.path.exists(file): - local_checksum = self._local_checksum(checksum_alg, file) - if self.local: - parsed_url = urlparse(remote_url) - remote_checksum = self._local_checksum(checksum_alg, parsed_url.path) - else: - try: - remote_checksum = to_text(self._getContent(remote_url + '.' + checksum_alg, "Failed to retrieve checksum", False), errors='strict') - except UnicodeError as e: - return "Cannot retrieve a valid %s checksum from %s: %s" % (checksum_alg, remote_url, to_native(e)) - if not remote_checksum: - return "Cannot find %s checksum from %s" % (checksum_alg, remote_url) - try: - # Check if remote checksum only contains md5/sha1 or md5/sha1 + filename - _remote_checksum = remote_checksum.split(None, 1)[0] - remote_checksum = _remote_checksum - # remote_checksum is empty so we continue and keep original checksum string - # This should not happen since we check for remote_checksum before - except IndexError: - pass - if local_checksum.lower() == remote_checksum.lower(): - return None - else: - return "Checksum does not match: we computed " + local_checksum + " but the repository states " + remote_checksum - - return "Path does not exist: " + file - - def _local_checksum(self, checksum_alg, file): - if checksum_alg.lower() == 'md5': - hash = hashlib.md5() - elif checksum_alg.lower() == 'sha1': - hash = hashlib.sha1() - else: - raise ValueError("Unknown checksum_alg %s" % checksum_alg) - with io.open(file, 'rb') as f: - for chunk in iter(lambda: f.read(8192), b''): - hash.update(chunk) - return hash.hexdigest() - - -def main(): - module = AnsibleModule( - argument_spec=dict( - group_id=dict(required=True), - artifact_id=dict(required=True), - version=dict(default=None), - version_by_spec=dict(default=None), - classifier=dict(default=''), - extension=dict(default='jar'), - repository_url=dict(default='https://repo1.maven.org/maven2'), - username=dict(default=None, aliases=['aws_secret_key']), - password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']), - headers=dict(type='dict'), - force_basic_auth=dict(default=False, type='bool'), - state=dict(default="present", choices=["present", "absent"]), # TODO - Implement a "latest" state - timeout=dict(default=10, type='int'), - dest=dict(type="path", required=True), - validate_certs=dict(required=False, default=True, type='bool'), - client_cert=dict(type="path", required=False), - client_key=dict(type="path", required=False), - keep_name=dict(required=False, default=False, type='bool'), - verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']), - checksum_alg=dict(required=False, default='md5', choices=['md5', 'sha1']), - directory_mode=dict(type='str'), - ), - add_file_common_args=True, - mutually_exclusive=([('version', 'version_by_spec')]) - ) - - if not HAS_LXML_ETREE: - module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR) - - if module.params['version_by_spec'] and not HAS_SEMANTIC_VERSION: - module.fail_json(msg=missing_required_lib('semantic_version'), exception=SEMANTIC_VERSION_IMP_ERR) - - repository_url = module.params["repository_url"] - if not repository_url: - repository_url = "https://repo1.maven.org/maven2" - try: - parsed_url = urlparse(repository_url) - except AttributeError as e: - module.fail_json(msg='url parsing went wrong %s' % e) - - local = parsed_url.scheme == "file" - - if parsed_url.scheme == 's3' and not HAS_BOTO: - module.fail_json(msg=missing_required_lib('boto3', reason='when using s3:// repository URLs'), - exception=BOTO_IMP_ERR) - - group_id = module.params["group_id"] - artifact_id = module.params["artifact_id"] - version = module.params["version"] - version_by_spec = module.params["version_by_spec"] - classifier = module.params["classifier"] - extension = module.params["extension"] - headers = module.params['headers'] - state = module.params["state"] - dest = module.params["dest"] - b_dest = to_bytes(dest, errors='surrogate_or_strict') - keep_name = module.params["keep_name"] - verify_checksum = module.params["verify_checksum"] - verify_download = verify_checksum in ['download', 'always'] - verify_change = verify_checksum in ['change', 'always'] - checksum_alg = module.params["checksum_alg"] - - downloader = MavenDownloader(module, repository_url, local, headers) - - if not version_by_spec and not version: - version = "latest" - - try: - artifact = Artifact(group_id, artifact_id, version, version_by_spec, classifier, extension) - except ValueError as e: - module.fail_json(msg=e.args[0]) - - changed = False - prev_state = "absent" - - if dest.endswith(os.sep): - b_dest = to_bytes(dest, errors='surrogate_or_strict') - if not os.path.exists(b_dest): - (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dest) - os.makedirs(b_dest) - directory_args = module.load_file_common_arguments(module.params) - directory_mode = module.params["directory_mode"] - if directory_mode is not None: - directory_args['mode'] = directory_mode - else: - directory_args['mode'] = None - changed = adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed) - - if os.path.isdir(b_dest): - version_part = version - if version == 'latest': - version_part = downloader.find_latest_version_available(artifact) - elif version_by_spec: - version_part = downloader.find_version_by_spec(artifact) - - filename = "{artifact_id}{version_part}{classifier}.{extension}".format( - artifact_id=artifact_id, - version_part="-{0}".format(version_part) if keep_name else "", - classifier="-{0}".format(classifier) if classifier else "", - extension=extension - ) - dest = posixpath.join(dest, filename) - - b_dest = to_bytes(dest, errors='surrogate_or_strict') - - if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_checksum(dest, downloader.find_uri_for_artifact(artifact), checksum_alg)): - prev_state = "present" - - if prev_state == "absent": - try: - download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest, checksum_alg) - if download_error is None: - changed = True - else: - module.fail_json(msg="Cannot retrieve the artifact to destination: " + download_error) - except ValueError as e: - module.fail_json(msg=e.args[0]) - - try: - file_args = module.load_file_common_arguments(module.params, path=dest) - except TypeError: - # The path argument is only supported in Ansible-base 2.10+. Fall back to - # pre-2.10 behavior for older Ansible versions. - module.params['path'] = dest - file_args = module.load_file_common_arguments(module.params) - changed = module.set_fs_attributes_if_different(file_args, changed) - if changed: - module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, - extension=extension, repository_url=repository_url, changed=changed) - else: - module.exit_json(state=state, dest=dest, changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/npm.py b/plugins/modules/packaging/language/npm.py deleted file mode 100644 index de316d397f..0000000000 --- a/plugins/modules/packaging/language/npm.py +++ /dev/null @@ -1,334 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017 Chris Hoffman -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: npm -short_description: Manage node.js packages with npm -description: - - Manage node.js packages with Node Package Manager (npm). -author: "Chris Hoffman (@chrishoffman)" -options: - name: - description: - - The name of a node.js library to install. - type: str - required: false - path: - description: - - The base path where to install the node.js libraries. - type: path - required: false - version: - description: - - The version to be installed. - type: str - required: false - global: - description: - - Install the node.js library globally. - required: false - default: no - type: bool - executable: - description: - - The executable location for npm. - - This is useful if you are using a version manager, such as nvm. - type: path - required: false - ignore_scripts: - description: - - Use the C(--ignore-scripts) flag when installing. - required: false - type: bool - default: no - unsafe_perm: - description: - - Use the C(--unsafe-perm) flag when installing. - type: bool - default: no - ci: - description: - - Install packages based on package-lock file, same as running C(npm ci). - type: bool - default: no - production: - description: - - Install dependencies in production mode, excluding devDependencies. - required: false - type: bool - default: no - registry: - description: - - The registry to install modules from. - required: false - type: str - state: - description: - - The state of the node.js library. - required: false - type: str - default: present - choices: [ "present", "absent", "latest" ] - no_optional: - description: - - Use the C(--no-optional) flag when installing. - type: bool - default: no - version_added: 2.0.0 - no_bin_links: - description: - - Use the C(--no-bin-links) flag when installing. - type: bool - default: no - version_added: 2.5.0 -requirements: - - npm installed in bin path (recommended /usr/local/bin) -''' - -EXAMPLES = r''' -- name: Install "coffee-script" node.js package. - community.general.npm: - name: coffee-script - path: /app/location - -- name: Install "coffee-script" node.js package on version 1.6.1. - community.general.npm: - name: coffee-script - version: '1.6.1' - path: /app/location - -- name: Install "coffee-script" node.js package globally. - community.general.npm: - name: coffee-script - global: yes - -- name: Remove the globally package "coffee-script". - community.general.npm: - name: coffee-script - global: yes - state: absent - -- name: Install "coffee-script" node.js package from custom registry. - community.general.npm: - name: coffee-script - registry: 'http://registry.mysite.com' - -- name: Install packages based on package.json. - community.general.npm: - path: /app/location - -- name: Update packages based on package.json to their latest version. - community.general.npm: - path: /app/location - state: latest - -- name: Install packages based on package.json using the npm installed with nvm v0.10.1. - community.general.npm: - path: /app/location - executable: /opt/nvm/v0.10.1/bin/npm - state: present -''' - -import json -import os -import re - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -class Npm(object): - def __init__(self, module, **kwargs): - self.module = module - self.glbl = kwargs['glbl'] - self.name = kwargs['name'] - self.version = kwargs['version'] - self.path = kwargs['path'] - self.registry = kwargs['registry'] - self.production = kwargs['production'] - self.ignore_scripts = kwargs['ignore_scripts'] - self.unsafe_perm = kwargs['unsafe_perm'] - self.state = kwargs['state'] - self.no_optional = kwargs['no_optional'] - self.no_bin_links = kwargs['no_bin_links'] - - if kwargs['executable']: - self.executable = kwargs['executable'].split(' ') - else: - self.executable = [module.get_bin_path('npm', True)] - - if kwargs['version'] and self.state != 'absent': - self.name_version = self.name + '@' + str(self.version) - else: - self.name_version = self.name - - def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True): - if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = self.executable + args - - if self.glbl: - cmd.append('--global') - if self.production and ('install' in cmd or 'update' in cmd): - cmd.append('--production') - if self.ignore_scripts: - cmd.append('--ignore-scripts') - if self.unsafe_perm: - cmd.append('--unsafe-perm') - if self.name_version and add_package_name: - cmd.append(self.name_version) - if self.registry: - cmd.append('--registry') - cmd.append(self.registry) - if self.no_optional: - cmd.append('--no-optional') - if self.no_bin_links: - cmd.append('--no-bin-links') - - # If path is specified, cd into that path and run the command. - cwd = None - if self.path: - if not os.path.exists(self.path): - os.makedirs(self.path) - if not os.path.isdir(self.path): - self.module.fail_json(msg="path %s is not a directory" % self.path) - cwd = self.path - - rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) - return out - return '' - - def list(self): - cmd = ['list', '--json', '--long'] - - installed = list() - missing = list() - data = {} - try: - data = json.loads(self._exec(cmd, True, False, False) or '{}') - except (getattr(json, 'JSONDecodeError', ValueError)) as e: - self.module.fail_json(msg="Failed to parse NPM output with error %s" % to_native(e)) - if 'dependencies' in data: - for dep, props in data['dependencies'].items(): - - if 'missing' in props and props['missing']: - missing.append(dep) - elif 'invalid' in props and props['invalid']: - missing.append(dep) - else: - installed.append(dep) - if 'version' in props and props['version']: - dep_version = dep + '@' + str(props['version']) - installed.append(dep_version) - if self.name_version and self.name_version not in installed: - missing.append(self.name) - # Named dependency not installed - else: - missing.append(self.name) - - return installed, missing - - def install(self): - return self._exec(['install']) - - def ci_install(self): - return self._exec(['ci']) - - def update(self): - return self._exec(['update']) - - def uninstall(self): - return self._exec(['uninstall']) - - def list_outdated(self): - outdated = list() - data = self._exec(['outdated'], True, False) - for dep in data.splitlines(): - if dep: - # node.js v0.10.22 changed the `npm outdated` module separator - # from "@" to " ". Split on both for backwards compatibility. - pkg, other = re.split(r'\s|@', dep, 1) - outdated.append(pkg) - - return outdated - - -def main(): - arg_spec = dict( - name=dict(default=None, type='str'), - path=dict(default=None, type='path'), - version=dict(default=None, type='str'), - production=dict(default=False, type='bool'), - executable=dict(default=None, type='path'), - registry=dict(default=None, type='str'), - state=dict(default='present', choices=['present', 'absent', 'latest']), - ignore_scripts=dict(default=False, type='bool'), - unsafe_perm=dict(default=False, type='bool'), - ci=dict(default=False, type='bool'), - no_optional=dict(default=False, type='bool'), - no_bin_links=dict(default=False, type='bool'), - ) - arg_spec['global'] = dict(default=False, type='bool') - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - name = module.params['name'] - path = module.params['path'] - version = module.params['version'] - glbl = module.params['global'] - production = module.params['production'] - executable = module.params['executable'] - registry = module.params['registry'] - state = module.params['state'] - ignore_scripts = module.params['ignore_scripts'] - unsafe_perm = module.params['unsafe_perm'] - ci = module.params['ci'] - no_optional = module.params['no_optional'] - no_bin_links = module.params['no_bin_links'] - - if not path and not glbl: - module.fail_json(msg='path must be specified when not using global') - if state == 'absent' and not name: - module.fail_json(msg='uninstalling a package is only available for named packages') - - npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, - executable=executable, registry=registry, ignore_scripts=ignore_scripts, - unsafe_perm=unsafe_perm, state=state, no_optional=no_optional, no_bin_links=no_bin_links) - - changed = False - if ci: - npm.ci_install() - changed = True - elif state == 'present': - installed, missing = npm.list() - if missing: - changed = True - npm.install() - elif state == 'latest': - installed, missing = npm.list() - outdated = npm.list_outdated() - if missing: - changed = True - npm.install() - if outdated: - changed = True - npm.update() - else: # absent - installed, missing = npm.list() - if name in installed: - changed = True - npm.uninstall() - - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/pear.py b/plugins/modules/packaging/language/pear.py deleted file mode 100644 index e8e36b3c56..0000000000 --- a/plugins/modules/packaging/language/pear.py +++ /dev/null @@ -1,319 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Afterburn -# (c) 2013, Aaron Bull Schaefer -# (c) 2015, Jonathan Lestrelin -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: pear -short_description: Manage pear/pecl packages -description: - - Manage PHP packages with the pear package manager. -author: - - Jonathan Lestrelin (@jle64) -options: - name: - type: str - description: - - Name of the package to install, upgrade, or remove. - required: true - aliases: [pkg] - state: - type: str - description: - - Desired state of the package. - default: "present" - choices: ["present", "installed", "latest", "absent", "removed"] - executable: - type: path - description: - - Path to the pear executable. - prompts: - description: - - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question. - - Prompts will be processed in the same order as the packages list. - - You can optionnally specify an answer to any question in the list. - - If no answer is provided, the list item will only contain the regular expression. - - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')." - - You can provide a list containing items with or without answer. - - A prompt list can be shorter or longer than the packages list but will issue a warning. - - If you want to specify that a package will not need prompts in the middle of a list, C(null). - type: list - elements: raw - version_added: 0.2.0 -''' - -EXAMPLES = r''' -- name: Install pear package - community.general.pear: - name: Net_URL2 - state: present - -- name: Install pecl package - community.general.pear: - name: pecl/json_post - state: present - -- name: Install pecl package with expected prompt - community.general.pear: - name: pecl/apcu - state: present - prompts: - - (.*)Enable internal debugging in APCu \[no\] - -- name: Install pecl package with expected prompt and an answer - community.general.pear: - name: pecl/apcu - state: present - prompts: - - (.*)Enable internal debugging in APCu \[no\]: "yes" - -- name: Install multiple pear/pecl packages at once with prompts. - Prompts will be processed on the same order as the packages order. - If there is more prompts than packages, packages without prompts will be installed without any prompt expected. - If there is more packages than prompts, additionnal prompts will be ignored. - community.general.pear: - name: pecl/gnupg, pecl/apcu - state: present - prompts: - - I am a test prompt because gnupg doesnt asks anything - - (.*)Enable internal debugging in APCu \[no\]: "yes" - -- name: Install multiple pear/pecl packages at once skipping the first prompt. - Prompts will be processed on the same order as the packages order. - If there is more prompts than packages, packages without prompts will be installed without any prompt expected. - If there is more packages than prompts, additionnal prompts will be ignored. - community.general.pear: - name: pecl/gnupg, pecl/apcu - state: present - prompts: - - null - - (.*)Enable internal debugging in APCu \[no\]: "yes" - -- name: Upgrade package - community.general.pear: - name: Net_URL2 - state: latest - -- name: Remove packages - community.general.pear: - name: Net_URL2,pecl/json_post - state: absent -''' - -import os - -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.basic import AnsibleModule - - -def get_local_version(pear_output): - """Take pear remoteinfo output and get the installed version""" - lines = pear_output.split('\n') - for line in lines: - if 'Installed ' in line: - installed = line.rsplit(None, 1)[-1].strip() - if installed == '-': - continue - return installed - return None - - -def _get_pear_path(module): - if module.params['executable'] and os.path.isfile(module.params['executable']): - result = module.params['executable'] - else: - result = module.get_bin_path('pear', True, [module.params['executable']]) - return result - - -def get_repository_version(pear_output): - """Take pear remote-info output and get the latest version""" - lines = pear_output.split('\n') - for line in lines: - if 'Latest ' in line: - return line.rsplit(None, 1)[-1].strip() - return None - - -def query_package(module, name, state="present"): - """Query the package status in both the local system and the repository. - Returns a boolean to indicate if the package is installed, - and a second boolean to indicate if the package is up-to-date.""" - if state == "present": - lcmd = "%s info %s" % (_get_pear_path(module), name) - lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) - if lrc != 0: - # package is not installed locally - return False, False - - rcmd = "%s remote-info %s" % (_get_pear_path(module), name) - rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) - - # get the version installed locally (if any) - lversion = get_local_version(rstdout) - - # get the version in the repository - rversion = get_repository_version(rstdout) - - if rrc == 0: - # Return True to indicate that the package is installed locally, - # and the result of the version number comparison - # to determine if the package is up-to-date. - return True, (lversion == rversion) - - return False, False - - -def remove_packages(module, packages): - remove_c = 0 - # Using a for loop in case of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - installed, updated = query_package(module, package) - if not installed: - continue - - cmd = "%s uninstall %s" % (_get_pear_path(module), package) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc != 0: - module.fail_json(msg="failed to remove %s: %s" % (package, to_text(stdout + stderr))) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, state, packages, prompts): - install_c = 0 - has_prompt = bool(prompts) - default_stdin = "\n" - - if has_prompt: - nb_prompts = len(prompts) - nb_packages = len(packages) - - if nb_prompts > 0 and (nb_prompts != nb_packages): - if nb_prompts > nb_packages: - diff = nb_prompts - nb_packages - msg = "%s packages to install but %s prompts to expect. %s prompts will be ignored" % (to_text(nb_packages), to_text(nb_prompts), to_text(diff)) - else: - diff = nb_packages - nb_prompts - msg = "%s packages to install but only %s prompts to expect. %s packages won't be expected to have a prompt" \ - % (to_text(nb_packages), to_text(nb_prompts), to_text(diff)) - module.warn(msg) - - # Preparing prompts answer according to item type - tmp_prompts = [] - for _item in prompts: - # If the current item is a dict then we expect it's key to be the prompt regex and it's value to be the answer - # We also expect here that the dict only has ONE key and the first key will be taken - if isinstance(_item, dict): - key = list(_item.keys())[0] - answer = _item[key] + "\n" - - tmp_prompts.append((key, answer)) - elif not _item: - tmp_prompts.append((None, default_stdin)) - else: - tmp_prompts.append((_item, default_stdin)) - prompts = tmp_prompts - for i, package in enumerate(packages): - # if the package is installed and state == present - # or state == latest and is up-to-date then skip - installed, updated = query_package(module, package) - if installed and (state == 'present' or (state == 'latest' and updated)): - continue - - if state == 'present': - command = 'install' - - if state == 'latest': - command = 'upgrade' - - if has_prompt and i < len(prompts): - prompt_regex = prompts[i][0] - data = prompts[i][1] - else: - prompt_regex = None - data = default_stdin - - cmd = "%s %s %s" % (_get_pear_path(module), command, package) - rc, stdout, stderr = module.run_command(cmd, check_rc=False, prompt_regex=prompt_regex, data=data, binary_data=True) - if rc != 0: - module.fail_json(msg="failed to install %s: %s" % (package, to_text(stdout + stderr))) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) - - module.exit_json(changed=False, msg="package(s) already installed") - - -def check_packages(module, packages, state): - would_be_changed = [] - for package in packages: - installed, updated = query_package(module, package) - if ((state in ["present", "latest"] and not installed) or - (state == "absent" and installed) or - (state == "latest" and not updated)): - would_be_changed.append(package) - if would_be_changed: - if state == "absent": - state = "removed" - module.exit_json(changed=True, msg="%s package(s) would be %s" % ( - len(would_be_changed), state)) - else: - module.exit_json(change=False, msg="package(s) already %s" % state) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(aliases=['pkg'], required=True), - state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']), - executable=dict(default=None, required=False, type='path'), - prompts=dict(default=None, required=False, type='list', elements='raw'), - ), - supports_check_mode=True) - - p = module.params - - # normalize the state parameter - if p['state'] in ['present', 'installed']: - p['state'] = 'present' - elif p['state'] in ['absent', 'removed']: - p['state'] = 'absent' - - if p['name']: - pkgs = p['name'].split(',') - - pkg_files = [] - for i, pkg in enumerate(pkgs): - pkg_files.append(None) - - if module.check_mode: - check_packages(module, pkgs, p['state']) - - if p['state'] in ['present', 'latest']: - install_packages(module, p['state'], pkgs, p["prompts"]) - elif p['state'] == 'absent': - remove_packages(module, pkgs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/pip_package_info.py b/plugins/modules/packaging/language/pip_package_info.py deleted file mode 100644 index 25825cefb1..0000000000 --- a/plugins/modules/packaging/language/pip_package_info.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# started out with AWX's scan_packages module - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: pip_package_info -short_description: pip package information -description: - - Return information about installed pip packages -options: - clients: - description: - - A list of the pip executables that will be used to get the packages. - They can be supplied with the full path or just the executable name, i.e `pip3.7`. - default: ['pip'] - required: False - type: list - elements: path -requirements: - - The requested pip executables must be installed on the target. -author: - - Matthew Jones (@matburt) - - Brian Coca (@bcoca) - - Adam Miller (@maxamillion) -''' - -EXAMPLES = ''' -- name: Just get the list from default pip - community.general.pip_package_info: - -- name: Get the facts for default pip, pip2 and pip3.6 - community.general.pip_package_info: - clients: ['pip', 'pip2', 'pip3.6'] - -- name: Get from specific paths (virtualenvs?) - community.general.pip_package_info: - clients: '/home/me/projec42/python/pip3.5' -''' - -RETURN = ''' -packages: - description: a dictionary of installed package data - returned: always - type: dict - contains: - python: - description: A dictionary with each pip client which then contains a list of dicts with python package information - returned: always - type: dict - sample: - "packages": { - "pip": { - "Babel": [ - { - "name": "Babel", - "source": "pip", - "version": "2.6.0" - } - ], - "Flask": [ - { - "name": "Flask", - "source": "pip", - "version": "1.0.2" - } - ], - "Flask-SQLAlchemy": [ - { - "name": "Flask-SQLAlchemy", - "source": "pip", - "version": "2.3.2" - } - ], - "Jinja2": [ - { - "name": "Jinja2", - "source": "pip", - "version": "2.10" - } - ], - }, - } -''' -import json -import os - -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.facts.packages import CLIMgr - - -class PIP(CLIMgr): - - def __init__(self, pip): - - self.CLI = pip - - def list_installed(self): - global module - rc, out, err = module.run_command([self._cli, 'list', '-l', '--format=json']) - if rc != 0: - raise Exception("Unable to list packages rc=%s : %s" % (rc, err)) - return json.loads(out) - - def get_package_details(self, package): - package['source'] = self.CLI - return package - - -def main(): - - # start work - global module - module = AnsibleModule( - argument_spec=dict( - clients=dict(type='list', elements='path', default=['pip']), - ), - supports_check_mode=True) - packages = {} - results = {'packages': {}} - clients = module.params['clients'] - - found = 0 - for pip in clients: - - if not os.path.basename(pip).startswith('pip'): - module.warn('Skipping invalid pip client: %s' % (pip)) - continue - try: - pip_mgr = PIP(pip) - if pip_mgr.is_available(): - found += 1 - packages[pip] = pip_mgr.get_packages() - except Exception as e: - module.warn('Failed to retrieve packages with %s: %s' % (pip, to_text(e))) - continue - - if found == 0: - module.fail_json(msg='Unable to use any of the supplied pip clients: %s' % clients) - - # return info - results['packages'] = packages - module.exit_json(**results) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/pipx.py b/plugins/modules/packaging/language/pipx.py deleted file mode 100644 index f771cf6025..0000000000 --- a/plugins/modules/packaging/language/pipx.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2021, Alexei Znamensky -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: pipx -short_description: Manages applications installed with pipx -version_added: 3.8.0 -description: - - Manage Python applications installed in isolated virtualenvs using pipx. -options: - state: - type: str - choices: [present, absent, install, uninstall, uninstall_all, inject, upgrade, upgrade_all, reinstall, reinstall_all] - default: install - description: - - Desired state for the application. - - The states C(present) and C(absent) are aliases to C(install) and C(uninstall), respectively. - name: - type: str - description: - - > - The name of the application to be installed. It must to be a simple package name. - For passing package specifications or installing from URLs or directories, - please use the I(source) option. - source: - type: str - description: - - > - If the application source, such as a package with version specifier, or an URL, - directory or any other accepted specification. See C(pipx) documentation for more details. - - When specified, the C(pipx) command will use I(source) instead of I(name). - install_deps: - description: - - Include applications of dependent packages. - - Only used when I(state=install) or I(state=upgrade). - type: bool - default: false - inject_packages: - description: - - Packages to be injected into an existing virtual environment. - - Only used when I(state=inject). - type: list - elements: str - force: - description: - - Force modification of the application's virtual environment. See C(pipx) for details. - - Only used when I(state=install), I(state=upgrade), I(state=upgrade_all), or I(state=inject). - type: bool - default: false - include_injected: - description: - - Upgrade the injected packages along with the application. - - Only used when I(state=upgrade) or I(state=upgrade_all). - type: bool - default: false - index_url: - description: - - Base URL of Python Package Index. - - Only used when I(state=install), I(state=upgrade), or I(state=inject). - type: str - python: - description: - - Python version to be used when creating the application virtual environment. Must be 3.6+. - - Only used when I(state=install), I(state=reinstall), or I(state=reinstall_all). - type: str - executable: - description: - - Path to the C(pipx) installed in the system. - - > - If not specified, the module will use C(python -m pipx) to run the tool, - using the same Python interpreter as ansible itself. - type: path -notes: - - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). - - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module. - - Please note that C(pipx) requires Python 3.6 or above. - - > - This first implementation does not verify whether a specified version constraint has been installed or not. - Hence, when using version operators, C(pipx) module will always try to execute the operation, - even when the application was previously installed. - This feature will be added in the future. - - See also the C(pipx) documentation at U(https://pypa.github.io/pipx/). -author: - - "Alexei Znamensky (@russoz)" -''' - -EXAMPLES = ''' -- name: Install tox - community.general.pipx: - name: tox - -- name: Install tox from git repository - community.general.pipx: - name: tox - source: git+https://github.com/tox-dev/tox.git - -- name: Upgrade tox - community.general.pipx: - name: tox - state: upgrade - -- name: Reinstall black with specific Python version - community.general.pipx: - name: black - state: reinstall - python: 3.7 - -- name: Uninstall pycowsay - community.general.pipx: - name: pycowsay - state: absent -''' - - -import json - -from ansible_collections.community.general.plugins.module_utils.module_helper import ( - CmdStateModuleHelper, ArgFormat, ModuleHelperException -) -from ansible.module_utils.facts.compat import ansible_facts - - -_state_map = dict( - present='install', - absent='uninstall', - uninstall_all='uninstall-all', - upgrade_all='upgrade-all', - reinstall_all='reinstall-all', -) - - -class PipX(CmdStateModuleHelper): - output_params = ['name', 'source', 'index_url', 'force', 'installdeps'] - module = dict( - argument_spec=dict( - state=dict(type='str', default='install', - choices=[ - 'present', 'absent', 'install', 'uninstall', 'uninstall_all', - 'inject', 'upgrade', 'upgrade_all', 'reinstall', 'reinstall_all']), - name=dict(type='str'), - source=dict(type='str'), - install_deps=dict(type='bool', default=False), - inject_packages=dict(type='list', elements='str'), - force=dict(type='bool', default=False), - include_injected=dict(type='bool', default=False), - index_url=dict(type='str'), - python=dict(type='str'), - executable=dict(type='path') - ), - required_if=[ - ('state', 'present', ['name']), - ('state', 'install', ['name']), - ('state', 'absent', ['name']), - ('state', 'uninstall', ['name']), - ('state', 'inject', ['name', 'inject_packages']), - ], - supports_check_mode=True, - ) - command_args_formats = dict( - state=dict(fmt=lambda v: [_state_map.get(v, v)]), - name_source=dict(fmt=lambda n, s: [s] if s else [n], stars=1), - install_deps=dict(fmt="--install-deps", style=ArgFormat.BOOLEAN), - inject_packages=dict(fmt=lambda v: v), - force=dict(fmt="--force", style=ArgFormat.BOOLEAN), - include_injected=dict(fmt="--include-injected", style=ArgFormat.BOOLEAN), - index_url=dict(fmt=('--index-url', '{0}'),), - python=dict(fmt=('--python', '{0}'),), - _list=dict(fmt=('list', '--include-injected', '--json'), style=ArgFormat.BOOLEAN), - ) - check_rc = True - run_command_fixed_options = dict( - environ_update={'USE_EMOJI': '0'} - ) - - def _retrieve_installed(self): - def process_list(rc, out, err): - if not out: - return {} - - results = {} - raw_data = json.loads(out) - for venv_name, venv in raw_data['venvs'].items(): - results[venv_name] = { - 'version': venv['metadata']['main_package']['package_version'], - 'injected': dict( - (k, v['package_version']) for k, v in venv['metadata']['injected_packages'].items() - ), - } - return results - - installed = self.run_command(params=[{'_list': True}], process_output=process_list, - publish_rc=False, publish_out=False, publish_err=False, publish_cmd=False) - - if self.vars.name is not None: - app_list = installed.get(self.vars.name) - if app_list: - return {self.vars.name: app_list} - else: - return {} - - return installed - - def __init_module__(self): - if self.vars.executable: - self.command = [self.vars.executable] - else: - facts = ansible_facts(self.module, gather_subset=['python']) - self.command = [facts['python']['executable'], '-m', 'pipx'] - - self.vars.set('application', self._retrieve_installed(), change=True, diff=True) - - def __quit_module__(self): - self.vars.application = self._retrieve_installed() - - def state_install(self): - if not self.vars.application or self.vars.force: - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', 'index_url', 'install_deps', 'force', 'python', - {'name_source': [self.vars.name, self.vars.source]}]) - - state_present = state_install - - def state_upgrade(self): - if not self.vars.application: - raise ModuleHelperException( - "Trying to upgrade a non-existent application: {0}".format(self.vars.name)) - if self.vars.force: - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', 'index_url', 'install_deps', 'force', 'name']) - - def state_uninstall(self): - if self.vars.application and not self.module.check_mode: - self.run_command(params=['state', 'name']) - - state_absent = state_uninstall - - def state_reinstall(self): - if not self.vars.application: - raise ModuleHelperException( - "Trying to reinstall a non-existent application: {0}".format(self.vars.name)) - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', 'name', 'python']) - - def state_inject(self): - if not self.vars.application: - raise ModuleHelperException( - "Trying to inject packages into a non-existent application: {0}".format(self.vars.name)) - if self.vars.force: - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', 'index_url', 'force', 'name', 'inject_packages']) - - def state_uninstall_all(self): - if not self.module.check_mode: - self.run_command(params=['state']) - - def state_reinstall_all(self): - if not self.module.check_mode: - self.run_command(params=['state', 'python']) - - def state_upgrade_all(self): - if self.vars.force: - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', 'include_injected', 'force']) - - -def main(): - PipX.execute() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/yarn.py b/plugins/modules/packaging/language/yarn.py deleted file mode 100644 index 77489e240f..0000000000 --- a/plugins/modules/packaging/language/yarn.py +++ /dev/null @@ -1,394 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2017 David Gunter -# Copyright (c) 2017 Chris Hoffman -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: yarn -short_description: Manage node.js packages with Yarn -description: - - Manage node.js packages with the Yarn package manager (https://yarnpkg.com/) -author: - - "David Gunter (@verkaufer)" - - "Chris Hoffman (@chrishoffman), creator of NPM Ansible module)" -options: - name: - type: str - description: - - The name of a node.js library to install - - If omitted all packages in package.json are installed. - - To globally install from local node.js library. Prepend "file:" to the path of the node.js library. - required: false - path: - type: path - description: - - The base path where Node.js libraries will be installed. - - This is where the node_modules folder lives. - required: false - version: - type: str - description: - - The version of the library to be installed. - - Must be in semver format. If "latest" is desired, use "state" arg instead - required: false - global: - description: - - Install the node.js library globally - required: false - default: no - type: bool - executable: - type: path - description: - - The executable location for yarn. - required: false - ignore_scripts: - description: - - Use the --ignore-scripts flag when installing. - required: false - type: bool - default: no - production: - description: - - Install dependencies in production mode. - - Yarn will ignore any dependencies under devDependencies in package.json - required: false - type: bool - default: no - registry: - type: str - description: - - The registry to install modules from. - required: false - state: - type: str - description: - - Installation state of the named node.js library - - If absent is selected, a name option must be provided - required: false - default: present - choices: [ "present", "absent", "latest" ] -requirements: - - Yarn installed in bin path (typically /usr/local/bin) -''' - -EXAMPLES = ''' -- name: Install "imagemin" node.js package. - community.general.yarn: - name: imagemin - path: /app/location - -- name: Install "imagemin" node.js package on version 5.3.1 - community.general.yarn: - name: imagemin - version: '5.3.1' - path: /app/location - -- name: Install "imagemin" node.js package globally. - community.general.yarn: - name: imagemin - global: yes - -- name: Remove the globally-installed package "imagemin". - community.general.yarn: - name: imagemin - global: yes - state: absent - -- name: Install "imagemin" node.js package from custom registry. - community.general.yarn: - name: imagemin - registry: 'http://registry.mysite.com' - -- name: Install packages based on package.json. - community.general.yarn: - path: /app/location - -- name: Update all packages in package.json to their latest version. - community.general.yarn: - path: /app/location - state: latest -''' - -RETURN = ''' -changed: - description: Whether Yarn changed any package data - returned: always - type: bool - sample: true -msg: - description: Provides an error message if Yarn syntax was incorrect - returned: failure - type: str - sample: "Package must be explicitly named when uninstalling." -invocation: - description: Parameters and values used during execution - returned: success - type: dict - sample: { - "module_args": { - "executable": null, - "globally": false, - "ignore_scripts": false, - "name": null, - "path": "/some/path/folder", - "production": false, - "registry": null, - "state": "present", - "version": null - } - } -out: - description: Output generated from Yarn with emojis removed. - returned: always - type: str - sample: "yarn add v0.16.1[1/4] Resolving packages...[2/4] Fetching packages...[3/4] Linking dependencies...[4/4] - Building fresh packages...success Saved lockfile.success Saved 1 new dependency..left-pad@1.1.3 Done in 0.59s." -''' - -import os -import re -import json - -from ansible.module_utils.basic import AnsibleModule - - -class Yarn(object): - - DEFAULT_GLOBAL_INSTALLATION_PATH = '~/.config/yarn/global' - - def __init__(self, module, **kwargs): - self.module = module - self.globally = kwargs['globally'] - self.name = kwargs['name'] - self.version = kwargs['version'] - self.path = kwargs['path'] - self.registry = kwargs['registry'] - self.production = kwargs['production'] - self.ignore_scripts = kwargs['ignore_scripts'] - - # Specify a version of package if version arg passed in - self.name_version = None - - if kwargs['executable']: - self.executable = kwargs['executable'].split(' ') - else: - self.executable = [module.get_bin_path('yarn', True)] - - if kwargs['version'] and self.name is not None: - self.name_version = self.name + '@' + str(self.version) - elif self.name is not None: - self.name_version = self.name - - def _exec(self, args, run_in_check_mode=False, check_rc=True): - if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - - if self.globally: - # Yarn global arg is inserted before the command (e.g. `yarn global {some-command}`) - args.insert(0, 'global') - - cmd = self.executable + args - - if self.production: - cmd.append('--production') - if self.ignore_scripts: - cmd.append('--ignore-scripts') - if self.registry: - cmd.append('--registry') - cmd.append(self.registry) - - # always run Yarn without emojis when called via Ansible - cmd.append('--no-emoji') - - # If path is specified, cd into that path and run the command. - cwd = None - if self.path and not self.globally: - if not os.path.exists(self.path): - # Module will make directory if not exists. - os.makedirs(self.path) - if not os.path.isdir(self.path): - self.module.fail_json(msg="Path provided %s is not a directory" % self.path) - cwd = self.path - - if not os.path.isfile(os.path.join(self.path, 'package.json')): - self.module.fail_json(msg="Package.json does not exist in provided path.") - - rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) - return out, err - - return(None, None) - - def list(self): - cmd = ['list', '--depth=0', '--json'] - - installed = list() - missing = list() - - if not os.path.isfile(os.path.join(self.path, 'yarn.lock')): - missing.append(self.name) - return installed, missing - - result, error = self._exec(cmd, True, False) - - if error: - self.module.fail_json(msg=error) - - data = json.loads(result) - try: - dependencies = data['data']['trees'] - except KeyError: - missing.append(self.name) - return installed, missing - - for dep in dependencies: - name, version = dep['name'].rsplit('@', 1) - installed.append(name) - - if self.name not in installed: - missing.append(self.name) - - return installed, missing - - def install(self): - if self.name_version: - # Yarn has a separate command for installing packages by name... - return self._exec(['add', self.name_version]) - # And one for installing all packages in package.json - return self._exec(['install', '--non-interactive']) - - def update(self): - return self._exec(['upgrade', '--latest']) - - def uninstall(self): - return self._exec(['remove', self.name]) - - def list_outdated(self): - outdated = list() - - if not os.path.isfile(os.path.join(self.path, 'yarn.lock')): - return outdated - - cmd_result, err = self._exec(['outdated', '--json'], True, False) - if err: - self.module.fail_json(msg=err) - - if not cmd_result: - return outdated - - outdated_packages_data = cmd_result.splitlines()[1] - - data = json.loads(outdated_packages_data) - - try: - outdated_dependencies = data['data']['body'] - except KeyError: - return outdated - - for dep in outdated_dependencies: - # Outdated dependencies returned as a list of lists, where - # item at index 0 is the name of the dependency - outdated.append(dep[0]) - return outdated - - -def main(): - arg_spec = dict( - name=dict(default=None), - path=dict(default=None, type='path'), - version=dict(default=None), - production=dict(default=False, type='bool'), - executable=dict(default=None, type='path'), - registry=dict(default=None), - state=dict(default='present', choices=['present', 'absent', 'latest']), - ignore_scripts=dict(default=False, type='bool'), - ) - arg_spec['global'] = dict(default=False, type='bool') - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - name = module.params['name'] - path = module.params['path'] - version = module.params['version'] - globally = module.params['global'] - production = module.params['production'] - executable = module.params['executable'] - registry = module.params['registry'] - state = module.params['state'] - ignore_scripts = module.params['ignore_scripts'] - - # When installing globally, users should not be able to define a path for installation. - # Require a path if global is False, though! - if path is None and globally is False: - module.fail_json(msg='Path must be specified when not using global arg') - elif path and globally is True: - module.fail_json(msg='Cannot specify path if doing global installation') - - if state == 'absent' and not name: - module.fail_json(msg='Package must be explicitly named when uninstalling.') - if state == 'latest': - version = 'latest' - - # When installing globally, use the defined path for global node_modules - if globally: - path = Yarn.DEFAULT_GLOBAL_INSTALLATION_PATH - - yarn = Yarn(module, - name=name, - path=path, - version=version, - globally=globally, - production=production, - executable=executable, - registry=registry, - ignore_scripts=ignore_scripts) - - changed = False - out = '' - err = '' - if state == 'present': - - if not name: - changed = True - out, err = yarn.install() - else: - installed, missing = yarn.list() - if len(missing): - changed = True - out, err = yarn.install() - - elif state == 'latest': - - if not name: - changed = True - out, err = yarn.install() - else: - installed, missing = yarn.list() - outdated = yarn.list_outdated() - if len(missing): - changed = True - out, err = yarn.install() - if len(outdated): - changed = True - out, err = yarn.update() - else: - # state == absent - installed, missing = yarn.list() - if name in installed: - changed = True - out, err = yarn.uninstall() - - module.exit_json(changed=changed, out=out, err=err) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/apk.py b/plugins/modules/packaging/os/apk.py deleted file mode 100644 index 74b738de27..0000000000 --- a/plugins/modules/packaging/os/apk.py +++ /dev/null @@ -1,357 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2015, Kevin Brebanov -# Based on pacman (Afterburn , Aaron Bull Schaefer ) -# and apt (Matthew Williams ) modules. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: apk -short_description: Manages apk packages -description: - - Manages I(apk) packages for Alpine Linux. -author: "Kevin Brebanov (@kbrebanov)" -options: - available: - description: - - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them) - if the currently installed package is no longer available from any repository. - type: bool - default: no - name: - description: - - A package name, like C(foo), or multiple packages, like C(foo, bar). - type: list - elements: str - no_cache: - description: - - Do not use any local cache path. - type: bool - default: no - version_added: 1.0.0 - repository: - description: - - A package repository or multiple repositories. - Unlike with the underlying apk command, this list will override the system repositories rather than supplement them. - type: list - elements: str - state: - description: - - Indicates the desired package(s) state. - - C(present) ensures the package(s) is/are present. C(installed) can be used as an alias. - - C(absent) ensures the package(s) is/are absent. C(removed) can be used as an alias. - - C(latest) ensures the package(s) is/are present and the latest version(s). - default: present - choices: [ "present", "absent", "latest", "installed", "removed" ] - type: str - update_cache: - description: - - Update repository indexes. Can be run with other steps or on it's own. - type: bool - default: no - upgrade: - description: - - Upgrade all installed packages to their latest version. - type: bool - default: no -notes: - - '"name" and "upgrade" are mutually exclusive.' - - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option. -''' - -EXAMPLES = ''' -- name: Update repositories and install foo package - community.general.apk: - name: foo - update_cache: yes - -- name: Update repositories and install foo and bar packages - community.general.apk: - name: foo,bar - update_cache: yes - -- name: Remove foo package - community.general.apk: - name: foo - state: absent - -- name: Remove foo and bar packages - community.general.apk: - name: foo,bar - state: absent - -- name: Install the package foo - community.general.apk: - name: foo - state: present - -- name: Install the packages foo and bar - community.general.apk: - name: foo,bar - state: present - -- name: Update repositories and update package foo to latest version - community.general.apk: - name: foo - state: latest - update_cache: yes - -- name: Update repositories and update packages foo and bar to latest versions - community.general.apk: - name: foo,bar - state: latest - update_cache: yes - -- name: Update all installed packages to the latest versions - community.general.apk: - upgrade: yes - -- name: Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available - community.general.apk: - available: yes - upgrade: yes - -- name: Update repositories as a separate step - community.general.apk: - update_cache: yes - -- name: Install package from a specific repository - community.general.apk: - name: foo - state: latest - update_cache: yes - repository: http://dl-3.alpinelinux.org/alpine/edge/main - -- name: Install package without using cache - community.general.apk: - name: foo - state: latest - no_cache: yes -''' - -RETURN = ''' -packages: - description: a list of packages that have been changed - returned: when packages have changed - type: list - sample: ['package', 'other-package'] -''' - -import re -# Import module snippets. -from ansible.module_utils.basic import AnsibleModule - - -def parse_for_packages(stdout): - packages = [] - data = stdout.split('\n') - regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)') - for l in data: - p = regex.search(l) - if p: - packages.append(p.group(1)) - return packages - - -def update_package_db(module, exit): - cmd = "%s update" % (APK_PATH) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - if rc != 0: - module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr) - elif exit: - module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr) - else: - return True - - -def query_toplevel(module, name): - # /etc/apk/world contains a list of top-level packages separated by ' ' or \n - # packages may contain repository (@) or version (=<>~) separator characters or start with negation ! - regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$') - with open('/etc/apk/world') as f: - content = f.read().split() - for p in content: - if regex.search(p): - return True - return False - - -def query_package(module, name): - cmd = "%s -v info --installed %s" % (APK_PATH, name) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - if rc == 0: - return True - else: - return False - - -def query_latest(module, name): - cmd = "%s version %s" % (APK_PATH, name) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name)) - match = re.search(search_pattern, stdout) - if match and match.group(2) == "<": - return False - return True - - -def query_virtual(module, name): - cmd = "%s -v info --description %s" % (APK_PATH, name) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - search_pattern = r"^%s: virtual meta package" % (re.escape(name)) - if re.search(search_pattern, stdout): - return True - return False - - -def get_dependencies(module, name): - cmd = "%s -v info --depends %s" % (APK_PATH, name) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - dependencies = stdout.split() - if len(dependencies) > 1: - return dependencies[1:] - else: - return [] - - -def upgrade_packages(module, available): - if module.check_mode: - cmd = "%s upgrade --simulate" % (APK_PATH) - else: - cmd = "%s upgrade" % (APK_PATH) - if available: - cmd = "%s --available" % cmd - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - packagelist = parse_for_packages(stdout) - if rc != 0: - module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist) - if re.search(r'^OK', stdout): - module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist) - module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist) - - -def install_packages(module, names, state): - upgrade = False - to_install = [] - to_upgrade = [] - for name in names: - # Check if virtual package - if query_virtual(module, name): - # Get virtual package dependencies - dependencies = get_dependencies(module, name) - for dependency in dependencies: - if state == 'latest' and not query_latest(module, dependency): - to_upgrade.append(dependency) - else: - if not query_toplevel(module, name): - to_install.append(name) - elif state == 'latest' and not query_latest(module, name): - to_upgrade.append(name) - if to_upgrade: - upgrade = True - if not to_install and not upgrade: - module.exit_json(changed=False, msg="package(s) already installed") - packages = " ".join(to_install + to_upgrade) - if upgrade: - if module.check_mode: - cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages) - else: - cmd = "%s add --upgrade %s" % (APK_PATH, packages) - else: - if module.check_mode: - cmd = "%s add --simulate %s" % (APK_PATH, packages) - else: - cmd = "%s add %s" % (APK_PATH, packages) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - packagelist = parse_for_packages(stdout) - if rc != 0: - module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist) - module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist) - - -def remove_packages(module, names): - installed = [] - for name in names: - if query_package(module, name): - installed.append(name) - if not installed: - module.exit_json(changed=False, msg="package(s) already removed") - names = " ".join(installed) - if module.check_mode: - cmd = "%s del --purge --simulate %s" % (APK_PATH, names) - else: - cmd = "%s del --purge %s" % (APK_PATH, names) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - packagelist = parse_for_packages(stdout) - # Check to see if packages are still present because of dependencies - for name in installed: - if query_package(module, name): - rc = 1 - break - if rc != 0: - module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist) - module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist) - -# ========================================== -# Main control flow. - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']), - name=dict(type='list', elements='str'), - no_cache=dict(default=False, type='bool'), - repository=dict(type='list', elements='str'), - update_cache=dict(default=False, type='bool'), - upgrade=dict(default=False, type='bool'), - available=dict(default=False, type='bool'), - ), - required_one_of=[['name', 'update_cache', 'upgrade']], - mutually_exclusive=[['name', 'upgrade']], - supports_check_mode=True - ) - - # Set LANG env since we parse stdout - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') - - global APK_PATH - APK_PATH = module.get_bin_path('apk', required=True) - - p = module.params - - if p['no_cache']: - APK_PATH = "%s --no-cache" % (APK_PATH, ) - - # add repositories to the APK_PATH - if p['repository']: - for r in p['repository']: - APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r) - - # normalize the state parameter - if p['state'] in ['present', 'installed']: - p['state'] = 'present' - if p['state'] in ['absent', 'removed']: - p['state'] = 'absent' - - if p['update_cache']: - update_package_db(module, not p['name'] and not p['upgrade']) - - if p['upgrade']: - upgrade_packages(module, p['available']) - - if p['state'] in ['present', 'latest']: - install_packages(module, p['name'], p['state']) - elif p['state'] == 'absent': - remove_packages(module, p['name']) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/apt_repo.py b/plugins/modules/packaging/os/apt_repo.py deleted file mode 100644 index d196e03be1..0000000000 --- a/plugins/modules/packaging/os/apt_repo.py +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Mikhail Gordeev - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: apt_repo -short_description: Manage APT repositories via apt-repo -description: - - Manages APT repositories using apt-repo tool. - - See U(https://www.altlinux.org/Apt-repo) for details about apt-repo -notes: - - This module works on ALT based distros. - - Does NOT support checkmode, due to a limitation in apt-repo tool. -options: - repo: - description: - - Name of the repository to add or remove. - required: true - type: str - state: - description: - - Indicates the desired repository state. - choices: [ absent, present ] - default: present - type: str - remove_others: - description: - - Remove other then added repositories - - Used if I(state=present) - type: bool - default: no - update: - description: - - Update the package database after changing repositories. - type: bool - default: no -author: -- Mikhail Gordeev (@obirvalger) -''' - -EXAMPLES = ''' -- name: Remove all repositories - community.general.apt_repo: - repo: all - state: absent - -- name: Add repository `Sisysphus` and remove other repositories - community.general.apt_repo: - repo: Sisysphus - state: present - remove_others: yes - -- name: Add local repository `/space/ALT/Sisyphus` and update package cache - community.general.apt_repo: - repo: copy:///space/ALT/Sisyphus - state: present - update: yes -''' - -RETURN = ''' # ''' - -import os - -from ansible.module_utils.basic import AnsibleModule - -APT_REPO_PATH = "/usr/bin/apt-repo" - - -def apt_repo(module, *args): - """run apt-repo with args and return its output""" - # make args list to use in concatenation - args = list(args) - rc, out, err = module.run_command([APT_REPO_PATH] + args) - - if rc != 0: - module.fail_json(msg="'%s' failed: %s" % (' '.join(['apt-repo'] + args), err)) - - return out - - -def add_repo(module, repo): - """add a repository""" - apt_repo(module, 'add', repo) - - -def rm_repo(module, repo): - """remove a repository""" - apt_repo(module, 'rm', repo) - - -def set_repo(module, repo): - """add a repository and remove other repositories""" - # first add to validate repository - apt_repo(module, 'add', repo) - apt_repo(module, 'rm', 'all') - apt_repo(module, 'add', repo) - - -def update(module): - """update package cache""" - apt_repo(module, 'update') - - -def main(): - module = AnsibleModule( - argument_spec=dict( - repo=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - remove_others=dict(type='bool', default=False), - update=dict(type='bool', default=False), - ), - ) - - if not os.path.exists(APT_REPO_PATH): - module.fail_json(msg='cannot find /usr/bin/apt-repo') - - params = module.params - repo = params['repo'] - state = params['state'] - old_repositories = apt_repo(module) - - if state == 'present': - if params['remove_others']: - set_repo(module, repo) - else: - add_repo(module, repo) - elif state == 'absent': - rm_repo(module, repo) - - if params['update']: - update(module) - - new_repositories = apt_repo(module) - changed = old_repositories != new_repositories - module.exit_json(changed=changed, repo=repo, state=state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/apt_rpm.py b/plugins/modules/packaging/os/apt_rpm.py deleted file mode 100644 index 3c200927ce..0000000000 --- a/plugins/modules/packaging/os/apt_rpm.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2013, Evgenii Terechkov -# Written by Evgenii Terechkov -# Based on urpmi module written by Philippe Makowski - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: apt_rpm -short_description: apt_rpm package manager -description: - - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required. -options: - package: - description: - - list of packages to install, upgrade or remove. - required: true - aliases: [ name, pkg ] - type: list - elements: str - state: - description: - - Indicates the desired package state. - choices: [ absent, present, installed, removed ] - default: present - type: str - update_cache: - description: - - update the package database first C(apt-get update). - - Alias C(update-cache) has been deprecated and will be removed in community.general 5.0.0. - aliases: [ 'update-cache' ] - type: bool - default: no -author: -- Evgenii Terechkov (@evgkrsk) -''' - -EXAMPLES = ''' -- name: Install package foo - community.general.apt_rpm: - pkg: foo - state: present - -- name: Install packages foo and bar - community.general.apt_rpm: - pkg: - - foo - - bar - state: present - -- name: Remove package foo - community.general.apt_rpm: - pkg: foo - state: absent - -- name: Remove packages foo and bar - community.general.apt_rpm: - pkg: foo,bar - state: absent - -# bar will be the updated if a newer version exists -- name: Update the package database and install bar - community.general.apt_rpm: - name: bar - state: present - update_cache: yes -''' - -import json -import os -import shlex -import sys - -from ansible.module_utils.basic import AnsibleModule - -APT_PATH = "/usr/bin/apt-get" -RPM_PATH = "/usr/bin/rpm" - - -def query_package(module, name): - # rpm -q returns 0 if the package is installed, - # 1 if it is not installed - rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name)) - if rc == 0: - return True - else: - return False - - -def query_package_provides(module, name): - # rpm -q returns 0 if the package is installed, - # 1 if it is not installed - rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name)) - return rc == 0 - - -def update_package_db(module): - rc, out, err = module.run_command("%s update" % APT_PATH) - - if rc != 0: - module.fail_json(msg="could not update package db: %s" % err) - - -def remove_packages(module, packages): - - remove_c = 0 - # Using a for loop in case of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, package): - continue - - rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package)) - - if rc != 0: - module.fail_json(msg="failed to remove %s: %s" % (package, err)) - - remove_c += 1 - - if remove_c > 0: - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, pkgspec): - - packages = "" - for package in pkgspec: - if not query_package_provides(module, package): - packages += "'%s' " % package - - if len(packages) != 0: - - rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages)) - - installed = True - for packages in pkgspec: - if not query_package_provides(module, package): - installed = False - - # apt-rpm always have 0 for exit code if --force is used - if rc or not installed: - module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err)) - else: - module.exit_json(changed=True, msg="%s present(s)" % packages) - else: - module.exit_json(changed=False) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']), - update_cache=dict( - type='bool', default=False, aliases=['update-cache'], - deprecated_aliases=[dict(name='update-cache', version='5.0.0', collection_name='community.general')]), - package=dict(type='list', elements='str', required=True, aliases=['name', 'pkg']), - ), - ) - - if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH): - module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm") - - p = module.params - - if p['update_cache']: - update_package_db(module) - - packages = p['package'] - - if p['state'] in ['installed', 'present']: - install_packages(module, packages) - - elif p['state'] in ['absent', 'removed']: - remove_packages(module, packages) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/copr.py b/plugins/modules/packaging/os/copr.py deleted file mode 100644 index cb31e8c9fb..0000000000 --- a/plugins/modules/packaging/os/copr.py +++ /dev/null @@ -1,491 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Silvie Chlupova -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r""" ---- -module: copr -short_description: Manage one of the Copr repositories -version_added: 2.0.0 -description: This module can enable, disable or remove the specified repository. -author: Silvie Chlupova (@schlupov) -requirements: - - dnf - - dnf-plugins-core -notes: - - Supports C(check_mode). -options: - host: - description: The Copr host to work with. - default: copr.fedorainfracloud.org - type: str - protocol: - description: This indicate which protocol to use with the host. - default: https - type: str - name: - description: Copr directory name, for example C(@copr/copr-dev). - required: true - type: str - state: - description: - - Whether to set this project as C(enabled), C(disabled) or C(absent). - default: enabled - type: str - choices: [absent, enabled, disabled] - chroot: - description: - - The name of the chroot that you want to enable/disable/remove in the project, - for example C(epel-7-x86_64). Default chroot is determined by the operating system, - version of the operating system, and architecture on which the module is run. - type: str -""" - -EXAMPLES = r""" -- name: Enable project Test of the user schlupov - community.general.copr: - host: copr.fedorainfracloud.org - state: enabled - name: schlupov/Test - chroot: fedora-31-x86_64 - -- name: Remove project integration_tests of the group copr - community.general.copr: - state: absent - name: '@copr/integration_tests' -""" - -RETURN = r""" -repo_filename: - description: The name of the repo file in which the copr project information is stored. - returned: success - type: str - sample: _copr:copr.fedorainfracloud.org:group_copr:integration_tests.repo - -repo: - description: Path to the project on the host. - returned: success - type: str - sample: copr.fedorainfracloud.org/group_copr/integration_tests -""" - -import stat -import os -import traceback - -try: - import dnf - import dnf.cli - import dnf.repodict - from dnf.conf import Conf - HAS_DNF_PACKAGES = True -except ImportError: - DNF_IMP_ERR = traceback.format_exc() - HAS_DNF_PACKAGES = False - -from ansible.module_utils.six.moves.urllib.error import HTTPError -from ansible.module_utils.basic import missing_required_lib -from ansible.module_utils import distro # pylint: disable=import-error -from ansible.module_utils.basic import AnsibleModule # pylint: disable=import-error -from ansible.module_utils.urls import open_url # pylint: disable=import-error - - -class CoprModule(object): - """The class represents a copr module. - - The class contains methods that take care of the repository state of a project, - whether the project is enabled, disabled or missing. - """ - - ansible_module = None - - def __init__(self, host, name, state, protocol, chroot=None, check_mode=False): - self.host = host - self.name = name - self.state = state - self.chroot = chroot - self.protocol = protocol - self.check_mode = check_mode - if not chroot: - self.chroot = self.chroot_conf() - else: - self.chroot = chroot - self.get_base() - - @property - def short_chroot(self): - """str: Chroot (distribution-version-architecture) shorten to distribution-version.""" - return self.chroot.rsplit('-', 1)[0] - - @property - def arch(self): - """str: Target architecture.""" - chroot_parts = self.chroot.split("-") - return chroot_parts[-1] - - @property - def user(self): - """str: Copr user (this can also be the name of the group).""" - return self._sanitize_username(self.name.split("/")[0]) - - @property - def project(self): - """str: The name of the copr project.""" - return self.name.split("/")[1] - - @classmethod - def need_root(cls): - """Check if the module was run as root.""" - if os.geteuid() != 0: - cls.raise_exception("This command has to be run under the root user.") - - @classmethod - def get_base(cls): - """Initialize the configuration from dnf. - - Returns: - An instance of the BaseCli class. - """ - cls.base = dnf.cli.cli.BaseCli(Conf()) - return cls.base - - @classmethod - def raise_exception(cls, msg): - """Raise either an ansible exception or a python exception. - - Args: - msg: The message to be displayed when an exception is thrown. - """ - if cls.ansible_module: - raise cls.ansible_module.fail_json(msg=msg, changed=False) - raise Exception(msg) - - def _get(self, chroot): - """Send a get request to the server to obtain the necessary data. - - Args: - chroot: Chroot in the form of distribution-version. - - Returns: - Info about a repository and status code of the get request. - """ - repo_info = None - url = "{0}://{1}/coprs/{2}/repo/{3}/dnf.repo?arch={4}".format( - self.protocol, self.host, self.name, chroot, self.arch - ) - try: - r = open_url(url) - status_code = r.getcode() - repo_info = r.read().decode("utf-8") - except HTTPError as e: - status_code = e.getcode() - return repo_info, status_code - - def _download_repo_info(self): - """Download information about the repository. - - Returns: - Information about the repository. - """ - distribution, version = self.short_chroot.split('-', 1) - chroot = self.short_chroot - while True: - repo_info, status_code = self._get(chroot) - if repo_info: - return repo_info - if distribution == "rhel": - chroot = "centos-stream-8" - distribution = "centos" - elif distribution == "centos": - if version == "stream-8": - version = "8" - elif version == "stream-9": - version = "9" - chroot = "epel-{0}".format(version) - distribution = "epel" - else: - if str(status_code) != "404": - self.raise_exception( - "This repository does not have any builds yet so you cannot enable it now." - ) - else: - self.raise_exception( - "Chroot {0} does not exist in {1}".format(self.chroot, self.name) - ) - - def _enable_repo(self, repo_filename_path, repo_content=None): - """Write information to a repo file. - - Args: - repo_filename_path: Path to repository. - repo_content: Repository information from the host. - - Returns: - True, if the information in the repo file matches that stored on the host, - False otherwise. - """ - if not repo_content: - repo_content = self._download_repo_info() - if self._compare_repo_content(repo_filename_path, repo_content): - return False - if not self.check_mode: - with open(repo_filename_path, "w+") as file: - file.write(repo_content) - os.chmod( - repo_filename_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH, - ) - return True - - def _get_repo_with_old_id(self): - """Try to get a repository with the old name.""" - repo_id = "{0}-{1}".format(self.user, self.project) - if repo_id in self.base.repos and "_copr" in self.base.repos[repo_id].repofile: - file_name = self.base.repos[repo_id].repofile.split("/")[-1] - try: - copr_hostname = file_name.rsplit(":", 2)[0].split(":", 1)[1] - if copr_hostname != self.host: - return None - return file_name - except IndexError: - return file_name - return None - - def _read_all_repos(self, repo_id=None): - """The method is used to initialize the base variable by - repositories using the RepoReader class from dnf. - - Args: - repo_id: Repo id of the repository we want to work with. - """ - reader = dnf.conf.read.RepoReader(self.base.conf, None) - for repo in reader: - try: - if repo_id: - if repo.id == repo_id: - self.base.repos.add(repo) - break - else: - self.base.repos.add(repo) - except dnf.exceptions.ConfigError as e: - self.raise_exception(str(e)) - - def _get_copr_repo(self): - """Return one specific repository from all repositories on the system. - - Returns: - The repository that a user wants to enable, disable, or remove. - """ - repo_id = "copr:{0}:{1}:{2}".format(self.host, self.user, self.project) - if repo_id not in self.base.repos: - if self._get_repo_with_old_id() is None: - return None - return self.base.repos[repo_id] - - def _disable_repo(self, repo_filename_path): - """Disable the repository. - - Args: - repo_filename_path: Path to repository. - - Returns: - False, if the repository is already disabled on the system, - True otherwise. - """ - self._read_all_repos() - repo = self._get_copr_repo() - if repo is None: - if self.check_mode: - return True - self._enable_repo(repo_filename_path) - self._read_all_repos("copr:{0}:{1}:{2}".format(self.host, self.user, self.project)) - repo = self._get_copr_repo() - for repo_id in repo.cfg.sections(): - repo_content_api = self._download_repo_info() - with open(repo_filename_path, "r") as file: - repo_content_file = file.read() - if repo_content_file != repo_content_api: - if not self.resolve_differences( - repo_content_file, repo_content_api, repo_filename_path - ): - return False - if not self.check_mode: - self.base.conf.write_raw_configfile( - repo.repofile, repo_id, self.base.conf.substitutions, {"enabled": "0"}, - ) - return True - - def resolve_differences(self, repo_content_file, repo_content_api, repo_filename_path): - """Detect differences between the contents of the repository stored on the - system and the information about the repository on the server. - - Args: - repo_content_file: The contents of the repository stored on the system. - repo_content_api: The information about the repository from the server. - repo_filename_path: Path to repository. - - Returns: - False, if the contents of the repo file and the information on the server match, - True otherwise. - """ - repo_file_lines = repo_content_file.split("\n") - repo_api_lines = repo_content_api.split("\n") - repo_api_lines.remove("enabled=1") - if "enabled=0" in repo_file_lines: - repo_file_lines.remove("enabled=0") - if " ".join(repo_api_lines) == " ".join(repo_file_lines): - return False - if not self.check_mode: - os.remove(repo_filename_path) - self._enable_repo(repo_filename_path, repo_content_api) - else: - repo_file_lines.remove("enabled=1") - if " ".join(repo_api_lines) != " ".join(repo_file_lines): - if not self.check_mode: - os.remove(repo_filename_path) - self._enable_repo(repo_filename_path, repo_content_api) - return True - - def _remove_repo(self): - """Remove the required repository. - - Returns: - True, if the repository has been removed, False otherwise. - """ - self._read_all_repos() - repo = self._get_copr_repo() - if not repo: - return False - if not self.check_mode: - try: - os.remove(repo.repofile) - except OSError as e: - self.raise_exception(str(e)) - return True - - def run(self): - """The method uses methods of the CoprModule class to change the state of the repository. - - Returns: - Dictionary with information that the ansible module displays to the user at the end of the run. - """ - self.need_root() - state = dict() - repo_filename = "_copr:{0}:{1}:{2}.repo".format(self.host, self.user, self.project) - state["repo"] = "{0}/{1}/{2}".format(self.host, self.user, self.project) - state["repo_filename"] = repo_filename - repo_filename_path = "{0}/_copr:{1}:{2}:{3}.repo".format( - self.base.conf.get_reposdir, self.host, self.user, self.project - ) - if self.state == "enabled": - enabled = self._enable_repo(repo_filename_path) - state["msg"] = "enabled" - state["state"] = bool(enabled) - elif self.state == "disabled": - disabled = self._disable_repo(repo_filename_path) - state["msg"] = "disabled" - state["state"] = bool(disabled) - elif self.state == "absent": - removed = self._remove_repo() - state["msg"] = "absent" - state["state"] = bool(removed) - return state - - @staticmethod - def _compare_repo_content(repo_filename_path, repo_content_api): - """Compare the contents of the stored repository with the information from the server. - - Args: - repo_filename_path: Path to repository. - repo_content_api: The information about the repository from the server. - - Returns: - True, if the information matches, False otherwise. - """ - if not os.path.isfile(repo_filename_path): - return False - with open(repo_filename_path, "r") as file: - repo_content_file = file.read() - return repo_content_file == repo_content_api - - @staticmethod - def chroot_conf(): - """Obtain information about the distribution, version, and architecture of the target. - - Returns: - Chroot info in the form of distribution-version-architecture. - """ - (distribution, version, codename) = distro.linux_distribution(full_distribution_name=False) - base = CoprModule.get_base() - return "{0}-{1}-{2}".format(distribution, version, base.conf.arch) - - @staticmethod - def _sanitize_username(user): - """Modify the group name. - - Args: - user: User name. - - Returns: - Modified user name if it is a group name with @. - """ - if user[0] == "@": - return "group_{0}".format(user[1:]) - return user - - -def run_module(): - """The function takes care of the functioning of the whole ansible copr module.""" - module_args = dict( - host=dict(type="str", default="copr.fedorainfracloud.org"), - protocol=dict(type="str", default="https"), - name=dict(type="str", required=True), - state=dict(type="str", choices=["enabled", "disabled", "absent"], default="enabled"), - chroot=dict(type="str"), - ) - module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) - params = module.params - - if not HAS_DNF_PACKAGES: - module.fail_json(msg=missing_required_lib("dnf"), exception=DNF_IMP_ERR) - - CoprModule.ansible_module = module - copr_module = CoprModule( - host=params["host"], - name=params["name"], - state=params["state"], - protocol=params["protocol"], - chroot=params["chroot"], - check_mode=module.check_mode, - ) - state = copr_module.run() - - info = "Please note that this repository is not part of the main distribution" - - if params["state"] == "enabled" and state["state"]: - module.exit_json( - changed=state["state"], - msg=state["msg"], - repo=state["repo"], - repo_filename=state["repo_filename"], - info=info, - ) - module.exit_json( - changed=state["state"], - msg=state["msg"], - repo=state["repo"], - repo_filename=state["repo_filename"], - ) - - -def main(): - """Launches ansible Copr module.""" - run_module() - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/packaging/os/dnf_versionlock.py b/plugins/modules/packaging/os/dnf_versionlock.py deleted file mode 100644 index fca33fd83c..0000000000 --- a/plugins/modules/packaging/os/dnf_versionlock.py +++ /dev/null @@ -1,347 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Roberto Moreda -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: dnf_versionlock -version_added: '4.0.0' -short_description: Locks package versions in C(dnf) based systems -description: -- Locks package versions using the C(versionlock) plugin in C(dnf) based - systems. This plugin takes a set of name and versions for packages and - excludes all other versions of those packages. This allows you to for example - protect packages from being updated by newer versions. The state of the - plugin that reflects locking of packages is the C(locklist). -options: - name: - description: - - Package name spec to add or exclude to or delete from the C(locklist) - using the format expected by the C(dnf repoquery) command. - - This parameter is mutually exclusive with I(state=clean). - type: list - required: false - elements: str - default: [] - raw: - description: - - Do not resolve package name specs to NEVRAs to find specific version - to lock to. Instead the package name specs are used as they are. This - enables locking to not yet available versions of the package. - type: bool - default: false - state: - description: - - Whether to add (C(present) or C(excluded)) to or remove (C(absent) or - C(clean)) from the C(locklist). - - C(present) will add a package name spec to the C(locklist). If there is a - installed package that matches, then only that version will be added. - Otherwise, all available package versions will be added. - - C(excluded) will add a package name spec as excluded to the - C(locklist). It means that packages represented by the package name - spec will be excluded from transaction operations. All available - package versions will be added. - - C(absent) will delete entries in the C(locklist) that match the - package name spec. - - C(clean) will delete all entries in the C(locklist). This option is - mutually exclusive with C(name). - choices: [ 'absent', 'clean', 'excluded', 'present' ] - type: str - default: present -notes: - - The logics of the C(versionlock) plugin for corner cases could be - confusing, so please take in account that this module will do its best to - give a C(check_mode) prediction on what is going to happen. In case of - doubt, check the documentation of the plugin. - - Sometimes the module could predict changes in C(check_mode) that will not - be such because C(versionlock) concludes that there is already a entry in - C(locklist) that already matches. - - In an ideal world, the C(versionlock) plugin would have a dry-run option to - know for sure what is going to happen. So far we have to work with a best - guess as close as possible to the behaviour inferred from its code. - - For most of cases where you want to lock and unlock specific versions of a - package, this works fairly well. - - Supports C(check_mode). -requirements: - - dnf - - dnf-plugin-versionlock -author: - - Roberto Moreda (@moreda) -''' - -EXAMPLES = r''' -- name: Prevent installed nginx from being updated - community.general.dnf_versionlock: - name: nginx - state: present - -- name: Prevent multiple packages from being updated - community.general.dnf_versionlock: - name: - - nginx - - haproxy - state: present - -- name: Remove lock from nginx to be updated again - community.general.dnf_versionlock: - package: nginx - state: absent - -- name: Exclude bind 32:9.11 from installs or updates - community.general.dnf_versionlock: - package: bind-32:9.11* - state: excluded - -- name: Keep bash package in major version 4 - community.general.dnf_versionlock: - name: bash-0:4.* - raw: true - state: present - -- name: Delete all entries in the locklist of versionlock - community.general.dnf_versionlock: - state: clean -''' - -RETURN = r''' -locklist_pre: - description: Locklist before module execution. - returned: success - type: list - elements: str - sample: [ 'bash-0:4.4.20-1.el8_4.*', '!bind-32:9.11.26-4.el8_4.*' ] -locklist_post: - description: Locklist after module execution. - returned: success and (not check mode or state is clean) - type: list - elements: str - sample: [ 'bash-0:4.4.20-1.el8_4.*' ] -specs_toadd: - description: Package name specs meant to be added by versionlock. - returned: success - type: list - elements: str - sample: [ 'bash' ] -specs_todelete: - description: Package name specs meant to be deleted by versionlock. - returned: success - type: list - elements: str - sample: [ 'bind' ] -''' - -from ansible.module_utils.basic import AnsibleModule -import fnmatch -import os -import re - -DNF_BIN = "/usr/bin/dnf" -VERSIONLOCK_CONF = "/etc/dnf/plugins/versionlock.conf" -# NEVRA regex. -NEVRA_RE = re.compile(r"^(?P.+)-(?P\d+):(?P.+)-" - r"(?P.+)\.(?P.+)$") - - -def do_versionlock(module, command, patterns=None, raw=False): - patterns = [] if not patterns else patterns - raw_parameter = ["--raw"] if raw else [] - # Call dnf versionlock using a just one full NEVR package-name-spec each - # time because multiple package-name-spec and globs are not well supported. - # - # This is a workaround for two alleged bugs in the dnf versionlock plugin: - # * Multiple package-name-spec arguments don't lock correctly - # (https://bugzilla.redhat.com/show_bug.cgi?id=2013324). - # * Locking a version of a not installed package disallows locking other - # versions later (https://bugzilla.redhat.com/show_bug.cgi?id=2013332) - # - # NOTE: This is suboptimal in terms of performance if there are more than a - # few package-name-spec patterns to lock, because there is a command - # execution per each. This will improve by changing the strategy once the - # mentioned alleged bugs in the dnf versionlock plugin are fixed. - if patterns: - outs = [] - for p in patterns: - rc, out, err = module.run_command( - [DNF_BIN, "-q", "versionlock", command] + raw_parameter + [p], - check_rc=True) - outs.append(out) - out = "\n".join(outs) - else: - rc, out, err = module.run_command( - [DNF_BIN, "-q", "versionlock", command], check_rc=True) - return out - - -# This is equivalent to the _match function of the versionlock plugin. -def match(entry, pattern): - entry = entry.lstrip('!') - if entry == pattern: - return True - m = NEVRA_RE.match(entry) - if not m: - return False - for name in ( - '%s' % m["name"], - '%s.%s' % (m["name"], m["arch"]), - '%s-%s' % (m["name"], m["version"]), - '%s-%s-%s' % (m["name"], m["version"], m["release"]), - '%s-%s:%s' % (m["name"], m["epoch"], m["version"]), - '%s-%s-%s.%s' % (m["name"], m["version"], m["release"], m["arch"]), - '%s-%s:%s-%s' % (m["name"], m["epoch"], m["version"], m["release"]), - '%s:%s-%s-%s.%s' % (m["epoch"], m["name"], m["version"], m["release"], - m["arch"]), - '%s-%s:%s-%s.%s' % (m["name"], m["epoch"], m["version"], m["release"], - m["arch"]) - ): - if fnmatch.fnmatch(name, pattern): - return True - return False - - -def get_packages(module, patterns, only_installed=False): - packages_available_map_name_evrs = {} - rc, out, err = module.run_command( - [DNF_BIN, "-q", "repoquery"] + - (["--installed"] if only_installed else []) + - patterns, - check_rc=True) - - for p in out.split(): - # Extract the NEVRA pattern. - m = NEVRA_RE.match(p) - if not m: - module.fail_json( - msg="failed to parse nevra for %s" % p, - rc=rc, out=out, err=err) - - evr = "%s:%s-%s" % (m["epoch"], - m["version"], - m["release"]) - - packages_available_map_name_evrs.setdefault(m["name"], set()) - packages_available_map_name_evrs[m["name"]].add(evr) - return packages_available_map_name_evrs - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type="list", elements="str", default=[]), - raw=dict(type="bool", default=False), - state=dict(type="str", default="present", - choices=["present", "absent", "excluded", "clean"]), - ), - supports_check_mode=True, - ) - - patterns = module.params["name"] - raw = module.params["raw"] - state = module.params["state"] - changed = False - msg = "" - - # Check module pre-requisites. - if not os.path.exists(DNF_BIN): - module.fail_json(msg="%s was not found" % DNF_BIN) - if not os.path.exists(VERSIONLOCK_CONF): - module.fail_json(msg="plugin versionlock is required") - - # Check incompatible options. - if state == "clean" and patterns: - module.fail_json(msg="clean state is incompatible with a name list") - if state != "clean" and not patterns: - module.fail_json(msg="name list is required for %s state" % state) - - locklist_pre = do_versionlock(module, "list").split() - - specs_toadd = [] - specs_todelete = [] - - if state in ["present", "excluded"]: - - if raw: - # Add raw patterns as specs to add. - for p in patterns: - if ((p if state == "present" else "!" + p) - not in locklist_pre): - specs_toadd.append(p) - else: - # Get available packages that match the patterns. - packages_map_name_evrs = get_packages( - module, - patterns) - - # Get installed packages that match the patterns. - packages_installed_map_name_evrs = get_packages( - module, - patterns, - only_installed=True) - - # Obtain the list of package specs that require an entry in the - # locklist. This list is composed by: - # a) the non-installed packages list with all available - # versions - # b) the installed packages list - packages_map_name_evrs.update(packages_installed_map_name_evrs) - for name in packages_map_name_evrs: - for evr in packages_map_name_evrs[name]: - locklist_entry = "%s-%s.*" % (name, evr) - - if (locklist_entry if state == "present" - else "!%s" % locklist_entry) not in locklist_pre: - specs_toadd.append(locklist_entry) - - if specs_toadd and not module.check_mode: - cmd = "add" if state == "present" else "exclude" - msg = do_versionlock(module, cmd, patterns=specs_toadd, raw=raw) - - elif state == "absent": - - if raw: - # Add raw patterns as specs to delete. - for p in patterns: - if p in locklist_pre: - specs_todelete.append(p) - - else: - # Get patterns that match the some line in the locklist. - for p in patterns: - for e in locklist_pre: - if match(e, p): - specs_todelete.append(p) - - if specs_todelete and not module.check_mode: - msg = do_versionlock( - module, "delete", patterns=specs_todelete, raw=raw) - - elif state == "clean": - specs_todelete = locklist_pre - - if specs_todelete and not module.check_mode: - msg = do_versionlock(module, "clear") - - if specs_toadd or specs_todelete: - changed = True - - response = { - "changed": changed, - "msg": msg, - "locklist_pre": locklist_pre, - "specs_toadd": specs_toadd, - "specs_todelete": specs_todelete - } - if not module.check_mode: - response["locklist_post"] = do_versionlock(module, "list").split() - else: - if state == "clean": - response["locklist_post"] = [] - - module.exit_json(**response) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/packaging/os/flatpak.py b/plugins/modules/packaging/os/flatpak.py deleted file mode 100644 index 7f3963ad3e..0000000000 --- a/plugins/modules/packaging/os/flatpak.py +++ /dev/null @@ -1,342 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) -# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) -# Copyright: (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: flatpak -short_description: Manage flatpaks -description: -- Allows users to add or remove flatpaks. -- See the M(community.general.flatpak_remote) module for managing flatpak remotes. -author: -- John Kwiatkoski (@JayKayy) -- Alexander Bethke (@oolongbrothers) -requirements: -- flatpak -options: - executable: - description: - - The path to the C(flatpak) executable to use. - - By default, this module looks for the C(flatpak) executable on the path. - type: path - default: flatpak - method: - description: - - The installation method to use. - - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system) - or only for the current C(user). - type: str - choices: [ system, user ] - default: system - name: - description: - - The name of the flatpak to manage. To operate on several packages this - can accept a list of packages. - - When used with I(state=present), I(name) can be specified as a URL to a - C(flatpakref) file or the unique reverse DNS name that identifies a flatpak. - - Both C(https://) and C(http://) URLs are supported. - - When supplying a reverse DNS name, you can use the I(remote) option to specify on what remote - to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit). - - When used with I(state=absent), it is recommended to specify the name in the reverse DNS - format. - - When supplying a URL with I(state=absent), the module will try to match the - installed flatpak based on the name of the flatpakref to remove it. However, there is no - guarantee that the names of the flatpakref file and the reverse DNS name of the installed - flatpak do match. - type: list - elements: str - required: true - no_dependencies: - description: - - If installing runtime dependencies should be omitted or not - - This parameter is primarily implemented for integration testing this module. - There might however be some use cases where you would want to have this, like when you are - packaging your own flatpaks. - type: bool - default: false - version_added: 3.2.0 - remote: - description: - - The flatpak remote (repository) to install the flatpak from. - - By default, C(flathub) is assumed, but you do need to add the flathub flatpak_remote before - you can use this. - - See the M(community.general.flatpak_remote) module for managing flatpak remotes. - type: str - default: flathub - state: - description: - - Indicates the desired package state. - choices: [ absent, present ] - type: str - default: present -''' - -EXAMPLES = r''' -- name: Install the spotify flatpak - community.general.flatpak: - name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref - state: present - -- name: Install the gedit flatpak package without dependencies (not recommended) - community.general.flatpak: - name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref - state: present - no_dependencies: true - -- name: Install the gedit package from flathub for current user - community.general.flatpak: - name: org.gnome.gedit - state: present - method: user - -- name: Install the Gnome Calendar flatpak from the gnome remote system-wide - community.general.flatpak: - name: org.gnome.Calendar - state: present - remote: gnome - -- name: Install multiple packages - community.general.flatpak: - name: - - org.gimp.GIMP - - org.inkscape.Inkscape - - org.mozilla.firefox - -- name: Remove the gedit flatpak - community.general.flatpak: - name: org.gnome.gedit - state: absent - -- name: Remove multiple packages - community.general.flatpak: - name: - - org.gimp.GIMP - - org.inkscape.Inkscape - - org.mozilla.firefox - state: absent -''' - -RETURN = r''' -command: - description: The exact flatpak command that was executed - returned: When a flatpak command has been executed - type: str - sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator" -msg: - description: Module error message - returned: failure - type: str - sample: "Executable '/usr/local/bin/flatpak' was not found on the system." -rc: - description: Return code from flatpak binary - returned: When a flatpak command has been executed - type: int - sample: 0 -stderr: - description: Error output from flatpak binary - returned: When a flatpak command has been executed - type: str - sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE" -stdout: - description: Output from flatpak binary - returned: When a flatpak command has been executed - type: str - sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n" -''' - -from distutils.version import StrictVersion - -from ansible.module_utils.six.moves.urllib.parse import urlparse -from ansible.module_utils.basic import AnsibleModule - -OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application" - - -def install_flat(module, binary, remote, names, method, no_dependencies): - """Add new flatpaks.""" - global result - uri_names = [] - id_names = [] - for name in names: - if name.startswith('http://') or name.startswith('https://'): - uri_names.append(name) - else: - id_names.append(name) - base_command = [binary, "install", "--{0}".format(method)] - flatpak_version = _flatpak_version(module, binary) - if StrictVersion(flatpak_version) < StrictVersion('1.1.3'): - base_command += ["-y"] - else: - base_command += ["--noninteractive"] - if no_dependencies: - base_command += ["--no-deps"] - if uri_names: - command = base_command + uri_names - _flatpak_command(module, module.check_mode, command) - if id_names: - command = base_command + [remote] + id_names - _flatpak_command(module, module.check_mode, command) - result['changed'] = True - - -def uninstall_flat(module, binary, names, method): - """Remove existing flatpaks.""" - global result - installed_flat_names = [ - _match_installed_flat_name(module, binary, name, method) - for name in names - ] - command = [binary, "uninstall"] - flatpak_version = _flatpak_version(module, binary) - if StrictVersion(flatpak_version) < StrictVersion('1.1.3'): - command += ["-y"] - else: - command += ["--noninteractive"] - command += ["--{0}".format(method)] + installed_flat_names - _flatpak_command(module, module.check_mode, command) - result['changed'] = True - - -def flatpak_exists(module, binary, names, method): - """Check if the flatpaks are installed.""" - command = [binary, "list", "--{0}".format(method), "--app"] - output = _flatpak_command(module, False, command) - installed = [] - not_installed = [] - for name in names: - parsed_name = _parse_flatpak_name(name).lower() - if parsed_name in output.lower(): - installed.append(name) - else: - not_installed.append(name) - return installed, not_installed - - -def _match_installed_flat_name(module, binary, name, method): - # This is a difficult function, since if the user supplies a flatpakref url, - # we have to rely on a naming convention: - # The flatpakref file name needs to match the flatpak name - global result - parsed_name = _parse_flatpak_name(name) - # Try running flatpak list with columns feature - command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"] - _flatpak_command(module, False, command, ignore_failure=True) - if result['rc'] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result['stderr']: - # Probably flatpak before 1.2 - matched_flatpak_name = \ - _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method) - else: - # Probably flatpak >= 1.2 - matched_flatpak_name = \ - _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method) - - if matched_flatpak_name: - return matched_flatpak_name - else: - result['msg'] = "Flatpak removal failed: Could not match any installed flatpaks to " +\ - "the name `{0}`. ".format(_parse_flatpak_name(name)) +\ - "If you used a URL, try using the reverse DNS name of the flatpak" - module.fail_json(**result) - - -def _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method): - global result - command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"] - output = _flatpak_command(module, False, command) - for row in output.split('\n'): - if parsed_name.lower() == row.lower(): - return row - - -def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method): - global result - command = [binary, "list", "--{0}".format(method), "--app"] - output = _flatpak_command(module, False, command) - for row in output.split('\n'): - if parsed_name.lower() in row.lower(): - return row.split()[0] - - -def _parse_flatpak_name(name): - if name.startswith('http://') or name.startswith('https://'): - file_name = urlparse(name).path.split('/')[-1] - file_name_without_extension = file_name.split('.')[0:-1] - common_name = ".".join(file_name_without_extension) - else: - common_name = name - return common_name - - -def _flatpak_version(module, binary): - global result - command = [binary, "--version"] - output = _flatpak_command(module, False, command) - version_number = output.split()[1] - return version_number - - -def _flatpak_command(module, noop, command, ignore_failure=False): - global result - result['command'] = ' '.join(command) - if noop: - result['rc'] = 0 - return "" - - result['rc'], result['stdout'], result['stderr'] = module.run_command( - command, check_rc=not ignore_failure - ) - return result['stdout'] - - -def main(): - # This module supports check mode - module = AnsibleModule( - argument_spec=dict( - name=dict(type='list', elements='str', required=True), - remote=dict(type='str', default='flathub'), - method=dict(type='str', default='system', - choices=['user', 'system']), - state=dict(type='str', default='present', - choices=['absent', 'present']), - no_dependencies=dict(type='bool', default=False), - executable=dict(type='path', default='flatpak') - ), - supports_check_mode=True, - ) - - name = module.params['name'] - state = module.params['state'] - remote = module.params['remote'] - no_dependencies = module.params['no_dependencies'] - method = module.params['method'] - executable = module.params['executable'] - binary = module.get_bin_path(executable, None) - - global result - result = dict( - changed=False - ) - - # If the binary was not found, fail the operation - if not binary: - module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) - - installed, not_installed = flatpak_exists(module, binary, name, method) - if state == 'present' and not_installed: - install_flat(module, binary, remote, not_installed, method, no_dependencies) - elif state == 'absent' and installed: - uninstall_flat(module, binary, installed, method) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/flatpak_remote.py b/plugins/modules/packaging/os/flatpak_remote.py deleted file mode 100644 index e0e4170f47..0000000000 --- a/plugins/modules/packaging/os/flatpak_remote.py +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) -# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) -# Copyright: (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: flatpak_remote -short_description: Manage flatpak repository remotes -description: -- Allows users to add or remove flatpak remotes. -- The flatpak remotes concept is comparable to what is called repositories in other packaging - formats. -- Currently, remote addition is only supported via I(flatpakrepo) file URLs. -- Existing remotes will not be updated. -- See the M(community.general.flatpak) module for managing flatpaks. -author: -- John Kwiatkoski (@JayKayy) -- Alexander Bethke (@oolongbrothers) -requirements: -- flatpak -options: - executable: - description: - - The path to the C(flatpak) executable to use. - - By default, this module looks for the C(flatpak) executable on the path. - type: str - default: flatpak - flatpakrepo_url: - description: - - The URL to the I(flatpakrepo) file representing the repository remote to add. - - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url) - is added using the specified installation C(method). - - When used with I(state=absent), this is not required. - - Required when I(state=present). - type: str - method: - description: - - The installation method to use. - - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system) - or only for the current C(user). - type: str - choices: [ system, user ] - default: system - name: - description: - - The desired name for the flatpak remote to be registered under on the managed host. - - When used with I(state=present), the remote will be added to the managed host under - the specified I(name). - - When used with I(state=absent) the remote with that name will be removed. - type: str - required: true - state: - description: - - Indicates the desired package state. - type: str - choices: [ absent, present ] - default: present -''' - -EXAMPLES = r''' -- name: Add the Gnome flatpak remote to the system installation - community.general.flatpak_remote: - name: gnome - state: present - flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo - -- name: Add the flathub flatpak repository remote to the user installation - community.general.flatpak_remote: - name: flathub - state: present - flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo - method: user - -- name: Remove the Gnome flatpak remote from the user installation - community.general.flatpak_remote: - name: gnome - state: absent - method: user - -- name: Remove the flathub remote from the system installation - community.general.flatpak_remote: - name: flathub - state: absent -''' - -RETURN = r''' -command: - description: The exact flatpak command that was executed - returned: When a flatpak command has been executed - type: str - sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo" -msg: - description: Module error message - returned: failure - type: str - sample: "Executable '/usr/local/bin/flatpak' was not found on the system." -rc: - description: Return code from flatpak binary - returned: When a flatpak command has been executed - type: int - sample: 0 -stderr: - description: Error output from flatpak binary - returned: When a flatpak command has been executed - type: str - sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n" -stdout: - description: Output from flatpak binary - returned: When a flatpak command has been executed - type: str - sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes, to_native - - -def add_remote(module, binary, name, flatpakrepo_url, method): - """Add a new remote.""" - global result - command = [binary, "remote-add", "--{0}".format(method), name, flatpakrepo_url] - _flatpak_command(module, module.check_mode, command) - result['changed'] = True - - -def remove_remote(module, binary, name, method): - """Remove an existing remote.""" - global result - command = [binary, "remote-delete", "--{0}".format(method), "--force", name] - _flatpak_command(module, module.check_mode, command) - result['changed'] = True - - -def remote_exists(module, binary, name, method): - """Check if the remote exists.""" - command = [binary, "remote-list", "-d", "--{0}".format(method)] - # The query operation for the remote needs to be run even in check mode - output = _flatpak_command(module, False, command) - for line in output.splitlines(): - listed_remote = line.split() - if len(listed_remote) == 0: - continue - if listed_remote[0] == to_native(name): - return True - return False - - -def _flatpak_command(module, noop, command): - global result - result['command'] = ' '.join(command) - if noop: - result['rc'] = 0 - return "" - - result['rc'], result['stdout'], result['stderr'] = module.run_command( - command, check_rc=True - ) - return result['stdout'] - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - flatpakrepo_url=dict(type='str'), - method=dict(type='str', default='system', - choices=['user', 'system']), - state=dict(type='str', default="present", - choices=['absent', 'present']), - executable=dict(type='str', default="flatpak") - ), - # This module supports check mode - supports_check_mode=True, - ) - - name = module.params['name'] - flatpakrepo_url = module.params['flatpakrepo_url'] - method = module.params['method'] - state = module.params['state'] - executable = module.params['executable'] - binary = module.get_bin_path(executable, None) - - if flatpakrepo_url is None: - flatpakrepo_url = '' - - global result - result = dict( - changed=False - ) - - # If the binary was not found, fail the operation - if not binary: - module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) - - remote_already_exists = remote_exists(module, binary, to_bytes(name), method) - - if state == 'present' and not remote_already_exists: - add_remote(module, binary, name, flatpakrepo_url, method) - elif state == 'absent' and remote_already_exists: - remove_remote(module, binary, name, method) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/homebrew.py b/plugins/modules/packaging/os/homebrew.py deleted file mode 100644 index 80e436191a..0000000000 --- a/plugins/modules/packaging/os/homebrew.py +++ /dev/null @@ -1,978 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Andrew Dunham -# (c) 2013, Daniel Jaouen -# (c) 2015, Indrajit Raychaudhuri -# -# Based on macports (Jimmy Tang ) -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: homebrew -author: - - "Indrajit Raychaudhuri (@indrajitr)" - - "Daniel Jaouen (@danieljaouen)" - - "Andrew Dunham (@andrew-d)" -requirements: - - "python >= 2.6" - - homebrew must already be installed on the target system -short_description: Package manager for Homebrew -description: - - Manages Homebrew packages -options: - name: - description: - - A list of names of packages to install/remove. - aliases: [ 'formula', 'package', 'pkg' ] - type: list - elements: str - path: - description: - - "A ':' separated list of paths to search for 'brew' executable. - Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, - providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system." - default: '/usr/local/bin:/opt/homebrew/bin' - type: path - state: - description: - - state of the package. - choices: [ 'absent', 'head', 'installed', 'latest', 'linked', 'present', 'removed', 'uninstalled', 'unlinked', 'upgraded' ] - default: present - type: str - update_homebrew: - description: - - update homebrew itself first. - - Alias C(update-brew) has been deprecated and will be removed in community.general 5.0.0. - type: bool - default: no - aliases: ['update-brew'] - upgrade_all: - description: - - upgrade all homebrew packages. - type: bool - default: no - aliases: ['upgrade'] - install_options: - description: - - options flags to install a package. - aliases: ['options'] - type: list - elements: str - upgrade_options: - description: - - Option flags to upgrade. - type: list - elements: str - version_added: '0.2.0' -notes: - - When used with a `loop:` each package will be processed individually, - it is much more efficient to pass the list directly to the `name` option. -''' - -EXAMPLES = ''' -# Install formula foo with 'brew' in default path -- community.general.homebrew: - name: foo - state: present - -# Install formula foo with 'brew' in alternate path C(/my/other/location/bin) -- community.general.homebrew: - name: foo - path: /my/other/location/bin - state: present - -# Update homebrew first and install formula foo with 'brew' in default path -- community.general.homebrew: - name: foo - state: present - update_homebrew: yes - -# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path -- community.general.homebrew: - name: foo - state: latest - update_homebrew: yes - -# Update homebrew and upgrade all packages -- community.general.homebrew: - update_homebrew: yes - upgrade_all: yes - -# Miscellaneous other examples -- community.general.homebrew: - name: foo - state: head - -- community.general.homebrew: - name: foo - state: linked - -- community.general.homebrew: - name: foo - state: absent - -- community.general.homebrew: - name: foo,bar - state: absent - -- community.general.homebrew: - name: foo - state: present - install_options: with-baz,enable-debug - -- name: Install formula foo with 'brew' from cask - community.general.homebrew: - name: homebrew/cask/foo - state: present - -- name: Use ignore-pinned option while upgrading all - community.general.homebrew: - upgrade_all: yes - upgrade_options: ignore-pinned -''' - -RETURN = ''' -msg: - description: if the cache was updated or not - returned: always - type: str - sample: "Changed: 0, Unchanged: 2" -unchanged_pkgs: - description: - - List of package names which are unchanged after module run - returned: success - type: list - sample: ["awscli", "ag"] - version_added: '0.2.0' -changed_pkgs: - description: - - List of package names which are changed after module run - returned: success - type: list - sample: ['git', 'git-cola'] - version_added: '0.2.0' -''' - -import os.path -import re - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import iteritems, string_types - - -# exceptions -------------------------------------------------------------- {{{ -class HomebrewException(Exception): - pass -# /exceptions ------------------------------------------------------------- }}} - - -# utils ------------------------------------------------------------------- {{{ -def _create_regex_group_complement(s): - lines = (line.strip() for line in s.split('\n') if line.strip()) - chars = filter(None, (line.split('#')[0].strip() for line in lines)) - group = r'[^' + r''.join(chars) + r']' - return re.compile(group) -# /utils ------------------------------------------------------------------ }}} - - -class Homebrew(object): - '''A class to manage Homebrew packages.''' - - # class regexes ------------------------------------------------ {{{ - VALID_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - : # colons - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - - VALID_BREW_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - - VALID_PACKAGE_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - . # dots - / # slash (for taps) - \+ # plusses - \- # dashes - : # colons (for URLs) - @ # at-sign - ''' - - INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS) - INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS) - INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS) - # /class regexes ----------------------------------------------- }}} - - # class validations -------------------------------------------- {{{ - @classmethod - def valid_path(cls, path): - ''' - `path` must be one of: - - list of paths - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - colons - - os.path.sep - ''' - - if isinstance(path, string_types): - return not cls.INVALID_PATH_REGEX.search(path) - - try: - iter(path) - except TypeError: - return False - else: - paths = path - return all(cls.valid_brew_path(path_) for path_ in paths) - - @classmethod - def valid_brew_path(cls, brew_path): - ''' - `brew_path` must be one of: - - None - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - os.path.sep - ''' - - if brew_path is None: - return True - - return ( - isinstance(brew_path, string_types) - and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) - ) - - @classmethod - def valid_package(cls, package): - '''A valid package is either None or alphanumeric.''' - - if package is None: - return True - - return ( - isinstance(package, string_types) - and not cls.INVALID_PACKAGE_REGEX.search(package) - ) - - @classmethod - def valid_state(cls, state): - ''' - A valid state is one of: - - None - - installed - - upgraded - - head - - linked - - unlinked - - absent - ''' - - if state is None: - return True - else: - return ( - isinstance(state, string_types) - and state.lower() in ( - 'installed', - 'upgraded', - 'head', - 'linked', - 'unlinked', - 'absent', - ) - ) - - @classmethod - def valid_module(cls, module): - '''A valid module is an instance of AnsibleModule.''' - - return isinstance(module, AnsibleModule) - - # /class validations ------------------------------------------- }}} - - # class properties --------------------------------------------- {{{ - @property - def module(self): - return self._module - - @module.setter - def module(self, module): - if not self.valid_module(module): - self._module = None - self.failed = True - self.message = 'Invalid module: {0}.'.format(module) - raise HomebrewException(self.message) - - else: - self._module = module - return module - - @property - def path(self): - return self._path - - @path.setter - def path(self, path): - if not self.valid_path(path): - self._path = [] - self.failed = True - self.message = 'Invalid path: {0}.'.format(path) - raise HomebrewException(self.message) - - else: - if isinstance(path, string_types): - self._path = path.split(':') - else: - self._path = path - - return path - - @property - def brew_path(self): - return self._brew_path - - @brew_path.setter - def brew_path(self, brew_path): - if not self.valid_brew_path(brew_path): - self._brew_path = None - self.failed = True - self.message = 'Invalid brew_path: {0}.'.format(brew_path) - raise HomebrewException(self.message) - - else: - self._brew_path = brew_path - return brew_path - - @property - def params(self): - return self._params - - @params.setter - def params(self, params): - self._params = self.module.params - return self._params - - @property - def current_package(self): - return self._current_package - - @current_package.setter - def current_package(self, package): - if not self.valid_package(package): - self._current_package = None - self.failed = True - self.message = 'Invalid package: {0}.'.format(package) - raise HomebrewException(self.message) - - else: - self._current_package = package - return package - # /class properties -------------------------------------------- }}} - - def __init__(self, module, path, packages=None, state=None, - update_homebrew=False, upgrade_all=False, - install_options=None, upgrade_options=None): - if not install_options: - install_options = list() - if not upgrade_options: - upgrade_options = list() - self._setup_status_vars() - self._setup_instance_vars(module=module, path=path, packages=packages, - state=state, update_homebrew=update_homebrew, - upgrade_all=upgrade_all, - install_options=install_options, - upgrade_options=upgrade_options,) - - self._prep() - - # prep --------------------------------------------------------- {{{ - def _setup_status_vars(self): - self.failed = False - self.changed = False - self.changed_count = 0 - self.unchanged_count = 0 - self.changed_pkgs = [] - self.unchanged_pkgs = [] - self.message = '' - - def _setup_instance_vars(self, **kwargs): - for key, val in iteritems(kwargs): - setattr(self, key, val) - - def _prep(self): - self._prep_brew_path() - - def _prep_brew_path(self): - if not self.module: - self.brew_path = None - self.failed = True - self.message = 'AnsibleModule not set.' - raise HomebrewException(self.message) - - self.brew_path = self.module.get_bin_path( - 'brew', - required=True, - opt_dirs=self.path, - ) - if not self.brew_path: - self.brew_path = None - self.failed = True - self.message = 'Unable to locate homebrew executable.' - raise HomebrewException('Unable to locate homebrew executable.') - - return self.brew_path - - def _status(self): - return (self.failed, self.changed, self.message) - # /prep -------------------------------------------------------- }}} - - def run(self): - try: - self._run() - except HomebrewException: - pass - - if not self.failed and (self.changed_count + self.unchanged_count > 1): - self.message = "Changed: %d, Unchanged: %d" % ( - self.changed_count, - self.unchanged_count, - ) - (failed, changed, message) = self._status() - - return (failed, changed, message) - - # checks ------------------------------------------------------- {{{ - def _current_package_is_installed(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - cmd = [ - "{brew_path}".format(brew_path=self.brew_path), - "info", - self.current_package, - ] - rc, out, err = self.module.run_command(cmd) - for line in out.split('\n'): - if ( - re.search(r'Built from source', line) - or re.search(r'Poured from bottle', line) - ): - return True - - return False - - def _current_package_is_outdated(self): - if not self.valid_package(self.current_package): - return False - - rc, out, err = self.module.run_command([ - self.brew_path, - 'outdated', - self.current_package, - ]) - - return rc != 0 - - def _current_package_is_installed_from_head(self): - if not Homebrew.valid_package(self.current_package): - return False - elif not self._current_package_is_installed(): - return False - - rc, out, err = self.module.run_command([ - self.brew_path, - 'info', - self.current_package, - ]) - - try: - version_info = [line for line in out.split('\n') if line][0] - except IndexError: - return False - - return version_info.split(' ')[-1] == 'HEAD' - # /checks ------------------------------------------------------ }}} - - # commands ----------------------------------------------------- {{{ - def _run(self): - if self.update_homebrew: - self._update_homebrew() - - if self.upgrade_all: - self._upgrade_all() - - if self.packages: - if self.state == 'installed': - return self._install_packages() - elif self.state == 'upgraded': - return self._upgrade_packages() - elif self.state == 'head': - return self._install_packages() - elif self.state == 'linked': - return self._link_packages() - elif self.state == 'unlinked': - return self._unlink_packages() - elif self.state == 'absent': - return self._uninstall_packages() - - # updated -------------------------------- {{{ - def _update_homebrew(self): - if self.module.check_mode: - self.changed = True - self.message = 'Homebrew would be updated.' - raise HomebrewException(self.message) - - rc, out, err = self.module.run_command([ - self.brew_path, - 'update', - ]) - if rc == 0: - if out and isinstance(out, string_types): - already_updated = any( - re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) - for s in out.split('\n') - if s - ) - if not already_updated: - self.changed = True - self.message = 'Homebrew updated successfully.' - else: - self.message = 'Homebrew already up-to-date.' - - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - # /updated ------------------------------- }}} - - # _upgrade_all --------------------------- {{{ - def _upgrade_all(self): - if self.module.check_mode: - self.changed = True - self.message = 'Homebrew packages would be upgraded.' - raise HomebrewException(self.message) - cmd = [self.brew_path, 'upgrade'] + self.upgrade_options - - rc, out, err = self.module.run_command(cmd) - if rc == 0: - if not out: - self.message = 'Homebrew packages already upgraded.' - - else: - self.changed = True - self.message = 'Homebrew upgraded.' - - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - # /_upgrade_all -------------------------- }}} - - # installed ------------------------------ {{{ - def _install_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if self._current_package_is_installed(): - self.unchanged_count += 1 - self.unchanged_pkgs.append(self.current_package) - self.message = 'Package already installed: {0}'.format( - self.current_package, - ) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be installed: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - if self.state == 'head': - head = '--HEAD' - else: - head = None - - opts = ( - [self.brew_path, 'install'] - + self.install_options - + [self.current_package, head] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if self._current_package_is_installed(): - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) - self.changed = True - self.message = 'Package installed: {0}'.format(self.current_package) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - - def _install_packages(self): - for package in self.packages: - self.current_package = package - self._install_current_package() - - return True - # /installed ----------------------------- }}} - - # upgraded ------------------------------- {{{ - def _upgrade_current_package(self): - command = 'upgrade' - - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - command = 'install' - - if self._current_package_is_installed() and not self._current_package_is_outdated(): - self.message = 'Package is already upgraded: {0}'.format( - self.current_package, - ) - self.unchanged_count += 1 - self.unchanged_pkgs.append(self.current_package) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be upgraded: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, command] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if self._current_package_is_installed() and not self._current_package_is_outdated(): - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) - self.changed = True - self.message = 'Package upgraded: {0}'.format(self.current_package) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - - def _upgrade_all_packages(self): - opts = ( - [self.brew_path, 'upgrade'] - + self.install_options - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if rc == 0: - self.changed = True - self.message = 'All packages upgraded.' - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - - def _upgrade_packages(self): - if not self.packages: - self._upgrade_all_packages() - else: - for package in self.packages: - self.current_package = package - self._upgrade_current_package() - return True - # /upgraded ------------------------------ }}} - - # uninstalled ---------------------------- {{{ - def _uninstall_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - self.unchanged_count += 1 - self.unchanged_pkgs.append(self.current_package) - self.message = 'Package already uninstalled: {0}'.format( - self.current_package, - ) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be uninstalled: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, 'uninstall', '--force'] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if not self._current_package_is_installed(): - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) - self.changed = True - self.message = 'Package uninstalled: {0}'.format(self.current_package) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - - def _uninstall_packages(self): - for package in self.packages: - self.current_package = package - self._uninstall_current_package() - - return True - # /uninstalled ----------------------------- }}} - - # linked --------------------------------- {{{ - def _link_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - self.failed = True - self.message = 'Package not installed: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be linked: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, 'link'] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if rc == 0: - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) - self.changed = True - self.message = 'Package linked: {0}'.format(self.current_package) - - return True - else: - self.failed = True - self.message = 'Package could not be linked: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - def _link_packages(self): - for package in self.packages: - self.current_package = package - self._link_current_package() - - return True - # /linked -------------------------------- }}} - - # unlinked ------------------------------- {{{ - def _unlink_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - self.failed = True - self.message = 'Package not installed: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be unlinked: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, 'unlink'] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if rc == 0: - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) - self.changed = True - self.message = 'Package unlinked: {0}'.format(self.current_package) - - return True - else: - self.failed = True - self.message = 'Package could not be unlinked: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - def _unlink_packages(self): - for package in self.packages: - self.current_package = package - self._unlink_current_package() - - return True - # /unlinked ------------------------------ }}} - # /commands ---------------------------------------------------- }}} - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict( - aliases=["pkg", "package", "formula"], - required=False, - type='list', - elements='str', - ), - path=dict( - default="/usr/local/bin:/opt/homebrew/bin", - required=False, - type='path', - ), - state=dict( - default="present", - choices=[ - "present", "installed", - "latest", "upgraded", "head", - "linked", "unlinked", - "absent", "removed", "uninstalled", - ], - ), - update_homebrew=dict( - default=False, - aliases=["update-brew"], - type='bool', - deprecated_aliases=[dict(name='update-brew', version='5.0.0', collection_name='community.general')], - ), - upgrade_all=dict( - default=False, - aliases=["upgrade"], - type='bool', - ), - install_options=dict( - default=None, - aliases=['options'], - type='list', - elements='str', - ), - upgrade_options=dict( - default=None, - type='list', - elements='str', - ) - ), - supports_check_mode=True, - ) - - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') - - p = module.params - - if p['name']: - packages = p['name'] - else: - packages = None - - path = p['path'] - if path: - path = path.split(':') - - state = p['state'] - if state in ('present', 'installed'): - state = 'installed' - if state in ('head', ): - state = 'head' - if state in ('latest', 'upgraded'): - state = 'upgraded' - if state == 'linked': - state = 'linked' - if state == 'unlinked': - state = 'unlinked' - if state in ('absent', 'removed', 'uninstalled'): - state = 'absent' - - update_homebrew = p['update_homebrew'] - if not update_homebrew: - module.run_command_environ_update.update( - dict(HOMEBREW_NO_AUTO_UPDATE="True") - ) - upgrade_all = p['upgrade_all'] - p['install_options'] = p['install_options'] or [] - install_options = ['--{0}'.format(install_option) - for install_option in p['install_options']] - - p['upgrade_options'] = p['upgrade_options'] or [] - upgrade_options = ['--{0}'.format(upgrade_option) - for upgrade_option in p['upgrade_options']] - brew = Homebrew(module=module, path=path, packages=packages, - state=state, update_homebrew=update_homebrew, - upgrade_all=upgrade_all, install_options=install_options, - upgrade_options=upgrade_options) - (failed, changed, message) = brew.run() - changed_pkgs = brew.changed_pkgs - unchanged_pkgs = brew.unchanged_pkgs - - if failed: - module.fail_json(msg=message) - module.exit_json( - changed=changed, - msg=message, - unchanged_pkgs=unchanged_pkgs, - changed_pkgs=changed_pkgs - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/homebrew_cask.py b/plugins/modules/packaging/os/homebrew_cask.py deleted file mode 100644 index 6c3de1c9ba..0000000000 --- a/plugins/modules/packaging/os/homebrew_cask.py +++ /dev/null @@ -1,877 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2013, Daniel Jaouen -# Copyright: (c) 2016, Indrajit Raychaudhuri -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: homebrew_cask -author: -- "Indrajit Raychaudhuri (@indrajitr)" -- "Daniel Jaouen (@danieljaouen)" -- "Enric Lluelles (@enriclluelles)" -requirements: -- "python >= 2.6" -short_description: Install and uninstall homebrew casks -description: -- Manages Homebrew casks. -options: - name: - description: - - Name of cask to install or remove. - aliases: [ 'cask', 'package', 'pkg' ] - type: list - elements: str - path: - description: - - "':' separated list of paths to search for 'brew' executable." - default: '/usr/local/bin:/opt/homebrew/bin' - type: path - state: - description: - - State of the cask. - choices: [ 'absent', 'installed', 'latest', 'present', 'removed', 'uninstalled', 'upgraded' ] - default: present - type: str - sudo_password: - description: - - The sudo password to be passed to SUDO_ASKPASS. - required: false - type: str - update_homebrew: - description: - - Update homebrew itself first. - - Note that C(brew cask update) is a synonym for C(brew update). - - Alias C(update-brew) has been deprecated and will be removed in community.general 5.0.0. - type: bool - default: no - aliases: [ 'update-brew' ] - install_options: - description: - - Options flags to install a package. - aliases: [ 'options' ] - type: list - elements: str - accept_external_apps: - description: - - Allow external apps. - type: bool - default: no - upgrade_all: - description: - - Upgrade all casks. - - Mutually exclusive with C(upgraded) state. - type: bool - default: no - aliases: [ 'upgrade' ] - greedy: - description: - - Upgrade casks that auto update. - - Passes --greedy to brew cask outdated when checking - if an installed cask has a newer version available. - type: bool - default: no -''' -EXAMPLES = ''' -- name: Install cask - community.general.homebrew_cask: - name: alfred - state: present - -- name: Remove cask - community.general.homebrew_cask: - name: alfred - state: absent - -- name: Install cask with install options - community.general.homebrew_cask: - name: alfred - state: present - install_options: 'appdir=/Applications' - -- name: Install cask with install options - community.general.homebrew_cask: - name: alfred - state: present - install_options: 'debug,appdir=/Applications' - -- name: Allow external app - community.general.homebrew_cask: - name: alfred - state: present - accept_external_apps: True - -- name: Remove cask with force option - community.general.homebrew_cask: - name: alfred - state: absent - install_options: force - -- name: Upgrade all casks - community.general.homebrew_cask: - upgrade_all: true - -- name: Upgrade given cask with force option - community.general.homebrew_cask: - name: alfred - state: upgraded - install_options: force - -- name: Upgrade cask with greedy option - community.general.homebrew_cask: - name: 1password - state: upgraded - greedy: True - -- name: Using sudo password for installing cask - community.general.homebrew_cask: - name: wireshark - state: present - sudo_password: "{{ ansible_become_pass }}" -''' - -import os -import re -import tempfile -from distutils import version - -from ansible.module_utils.common.text.converters import to_bytes -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import iteritems, string_types - - -# exceptions -------------------------------------------------------------- {{{ -class HomebrewCaskException(Exception): - pass -# /exceptions ------------------------------------------------------------- }}} - - -# utils ------------------------------------------------------------------- {{{ -def _create_regex_group_complement(s): - lines = (line.strip() for line in s.split('\n') if line.strip()) - chars = filter(None, (line.split('#')[0].strip() for line in lines)) - group = r'[^' + r''.join(chars) + r']' - return re.compile(group) -# /utils ------------------------------------------------------------------ }}} - - -class HomebrewCask(object): - '''A class to manage Homebrew casks.''' - - # class regexes ------------------------------------------------ {{{ - VALID_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - : # colons - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - - VALID_BREW_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - - VALID_CASK_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - . # dots - / # slash (for taps) - \- # dashes - @ # at symbol - ''' - - INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS) - INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS) - INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS) - # /class regexes ----------------------------------------------- }}} - - # class validations -------------------------------------------- {{{ - @classmethod - def valid_path(cls, path): - ''' - `path` must be one of: - - list of paths - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - colons - - os.path.sep - ''' - - if isinstance(path, (string_types)): - return not cls.INVALID_PATH_REGEX.search(path) - - try: - iter(path) - except TypeError: - return False - else: - paths = path - return all(cls.valid_brew_path(path_) for path_ in paths) - - @classmethod - def valid_brew_path(cls, brew_path): - ''' - `brew_path` must be one of: - - None - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - os.path.sep - ''' - - if brew_path is None: - return True - - return ( - isinstance(brew_path, string_types) - and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) - ) - - @classmethod - def valid_cask(cls, cask): - '''A valid cask is either None or alphanumeric + backslashes.''' - - if cask is None: - return True - - return ( - isinstance(cask, string_types) - and not cls.INVALID_CASK_REGEX.search(cask) - ) - - @classmethod - def valid_state(cls, state): - ''' - A valid state is one of: - - installed - - absent - ''' - - if state is None: - return True - else: - return ( - isinstance(state, string_types) - and state.lower() in ( - 'installed', - 'absent', - ) - ) - - @classmethod - def valid_module(cls, module): - '''A valid module is an instance of AnsibleModule.''' - - return isinstance(module, AnsibleModule) - # /class validations ------------------------------------------- }}} - - # class properties --------------------------------------------- {{{ - @property - def module(self): - return self._module - - @module.setter - def module(self, module): - if not self.valid_module(module): - self._module = None - self.failed = True - self.message = 'Invalid module: {0}.'.format(module) - raise HomebrewCaskException(self.message) - - else: - self._module = module - return module - - @property - def path(self): - return self._path - - @path.setter - def path(self, path): - if not self.valid_path(path): - self._path = [] - self.failed = True - self.message = 'Invalid path: {0}.'.format(path) - raise HomebrewCaskException(self.message) - - else: - if isinstance(path, string_types): - self._path = path.split(':') - else: - self._path = path - - return path - - @property - def brew_path(self): - return self._brew_path - - @brew_path.setter - def brew_path(self, brew_path): - if not self.valid_brew_path(brew_path): - self._brew_path = None - self.failed = True - self.message = 'Invalid brew_path: {0}.'.format(brew_path) - raise HomebrewCaskException(self.message) - - else: - self._brew_path = brew_path - return brew_path - - @property - def params(self): - return self._params - - @params.setter - def params(self, params): - self._params = self.module.params - return self._params - - @property - def current_cask(self): - return self._current_cask - - @current_cask.setter - def current_cask(self, cask): - if not self.valid_cask(cask): - self._current_cask = None - self.failed = True - self.message = 'Invalid cask: {0}.'.format(cask) - raise HomebrewCaskException(self.message) - - else: - self._current_cask = cask - return cask - - @property - def brew_version(self): - try: - return self._brew_version - except AttributeError: - return None - - @brew_version.setter - def brew_version(self, brew_version): - self._brew_version = brew_version - - # /class properties -------------------------------------------- }}} - - def __init__(self, module, path=path, casks=None, state=None, - sudo_password=None, update_homebrew=False, - install_options=None, accept_external_apps=False, - upgrade_all=False, greedy=False): - if not install_options: - install_options = list() - self._setup_status_vars() - self._setup_instance_vars(module=module, path=path, casks=casks, - state=state, sudo_password=sudo_password, - update_homebrew=update_homebrew, - install_options=install_options, - accept_external_apps=accept_external_apps, - upgrade_all=upgrade_all, - greedy=greedy, ) - - self._prep() - - # prep --------------------------------------------------------- {{{ - def _setup_status_vars(self): - self.failed = False - self.changed = False - self.changed_count = 0 - self.unchanged_count = 0 - self.message = '' - - def _setup_instance_vars(self, **kwargs): - for key, val in iteritems(kwargs): - setattr(self, key, val) - - def _prep(self): - self._prep_brew_path() - - def _prep_brew_path(self): - if not self.module: - self.brew_path = None - self.failed = True - self.message = 'AnsibleModule not set.' - raise HomebrewCaskException(self.message) - - self.brew_path = self.module.get_bin_path( - 'brew', - required=True, - opt_dirs=self.path, - ) - if not self.brew_path: - self.brew_path = None - self.failed = True - self.message = 'Unable to locate homebrew executable.' - raise HomebrewCaskException('Unable to locate homebrew executable.') - - return self.brew_path - - def _status(self): - return (self.failed, self.changed, self.message) - # /prep -------------------------------------------------------- }}} - - def run(self): - try: - self._run() - except HomebrewCaskException: - pass - - if not self.failed and (self.changed_count + self.unchanged_count > 1): - self.message = "Changed: %d, Unchanged: %d" % ( - self.changed_count, - self.unchanged_count, - ) - (failed, changed, message) = self._status() - - return (failed, changed, message) - - # checks ------------------------------------------------------- {{{ - def _current_cask_is_outdated(self): - if not self.valid_cask(self.current_cask): - return False - - if self._brew_cask_command_is_deprecated(): - base_opts = [self.brew_path, 'outdated', '--cask'] - else: - base_opts = [self.brew_path, 'cask', 'outdated'] - - cask_is_outdated_command = base_opts + (['--greedy'] if self.greedy else []) + [self.current_cask] - - rc, out, err = self.module.run_command(cask_is_outdated_command) - - return out != "" - - def _current_cask_is_installed(self): - if not self.valid_cask(self.current_cask): - self.failed = True - self.message = 'Invalid cask: {0}.'.format(self.current_cask) - raise HomebrewCaskException(self.message) - - if self._brew_cask_command_is_deprecated(): - base_opts = [self.brew_path, "list", "--cask"] - else: - base_opts = [self.brew_path, "cask", "list"] - - cmd = base_opts + [self.current_cask] - rc, out, err = self.module.run_command(cmd) - - if rc == 0: - return True - else: - return False - - def _get_brew_version(self): - if self.brew_version: - return self.brew_version - - cmd = [self.brew_path, '--version'] - - rc, out, err = self.module.run_command(cmd, check_rc=True) - - # get version string from first line of "brew --version" output - version = out.split('\n')[0].split(' ')[1] - self.brew_version = version - return self.brew_version - - def _brew_cask_command_is_deprecated(self): - # The `brew cask` replacements were fully available in 2.6.0 (https://brew.sh/2020/12/01/homebrew-2.6.0/) - return version.LooseVersion(self._get_brew_version()) >= version.LooseVersion('2.6.0') - # /checks ------------------------------------------------------ }}} - - # commands ----------------------------------------------------- {{{ - def _run(self): - if self.upgrade_all: - return self._upgrade_all() - - if self.casks: - if self.state == 'installed': - return self._install_casks() - elif self.state == 'upgraded': - return self._upgrade_casks() - elif self.state == 'absent': - return self._uninstall_casks() - - self.failed = True - self.message = "You must select a cask to install." - raise HomebrewCaskException(self.message) - - # sudo_password fix ---------------------- {{{ - def _run_command_with_sudo_password(self, cmd): - rc, out, err = '', '', '' - - with tempfile.NamedTemporaryFile() as sudo_askpass_file: - sudo_askpass_file.write(b"#!/bin/sh\n\necho '%s'\n" % to_bytes(self.sudo_password)) - os.chmod(sudo_askpass_file.name, 0o700) - sudo_askpass_file.file.close() - - rc, out, err = self.module.run_command( - cmd, - environ_update={'SUDO_ASKPASS': sudo_askpass_file.name} - ) - - self.module.add_cleanup_file(sudo_askpass_file.name) - - return (rc, out, err) - # /sudo_password fix --------------------- }}} - - # updated -------------------------------- {{{ - def _update_homebrew(self): - rc, out, err = self.module.run_command([ - self.brew_path, - 'update', - ]) - if rc == 0: - if out and isinstance(out, string_types): - already_updated = any( - re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) - for s in out.split('\n') - if s - ) - if not already_updated: - self.changed = True - self.message = 'Homebrew updated successfully.' - else: - self.message = 'Homebrew already up-to-date.' - - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewCaskException(self.message) - # /updated ------------------------------- }}} - - # _upgrade_all --------------------------- {{{ - def _upgrade_all(self): - if self.module.check_mode: - self.changed = True - self.message = 'Casks would be upgraded.' - raise HomebrewCaskException(self.message) - - if self._brew_cask_command_is_deprecated(): - cmd = [self.brew_path, 'upgrade', '--cask'] - else: - cmd = [self.brew_path, 'cask', 'upgrade'] - - rc, out, err = '', '', '' - - if self.sudo_password: - rc, out, err = self._run_command_with_sudo_password(cmd) - else: - rc, out, err = self.module.run_command(cmd) - - if rc == 0: - if re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE): - self.message = 'Homebrew casks already upgraded.' - - else: - self.changed = True - self.message = 'Homebrew casks upgraded.' - - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewCaskException(self.message) - # /_upgrade_all -------------------------- }}} - - # installed ------------------------------ {{{ - def _install_current_cask(self): - if not self.valid_cask(self.current_cask): - self.failed = True - self.message = 'Invalid cask: {0}.'.format(self.current_cask) - raise HomebrewCaskException(self.message) - - if self._current_cask_is_installed(): - self.unchanged_count += 1 - self.message = 'Cask already installed: {0}'.format( - self.current_cask, - ) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Cask would be installed: {0}'.format( - self.current_cask - ) - raise HomebrewCaskException(self.message) - - if self._brew_cask_command_is_deprecated(): - base_opts = [self.brew_path, 'install', '--cask'] - else: - base_opts = [self.brew_path, 'cask', 'install'] - - opts = base_opts + [self.current_cask] + self.install_options - - cmd = [opt for opt in opts if opt] - - rc, out, err = '', '', '' - - if self.sudo_password: - rc, out, err = self._run_command_with_sudo_password(cmd) - else: - rc, out, err = self.module.run_command(cmd) - - if self._current_cask_is_installed(): - self.changed_count += 1 - self.changed = True - self.message = 'Cask installed: {0}'.format(self.current_cask) - return True - elif self.accept_external_apps and re.search(r"Error: It seems there is already an App at", err): - self.unchanged_count += 1 - self.message = 'Cask already installed: {0}'.format( - self.current_cask, - ) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewCaskException(self.message) - - def _install_casks(self): - for cask in self.casks: - self.current_cask = cask - self._install_current_cask() - - return True - # /installed ----------------------------- }}} - - # upgraded ------------------------------- {{{ - def _upgrade_current_cask(self): - command = 'upgrade' - - if not self.valid_cask(self.current_cask): - self.failed = True - self.message = 'Invalid cask: {0}.'.format(self.current_cask) - raise HomebrewCaskException(self.message) - - if not self._current_cask_is_installed(): - command = 'install' - - if self._current_cask_is_installed() and not self._current_cask_is_outdated(): - self.message = 'Cask is already upgraded: {0}'.format( - self.current_cask, - ) - self.unchanged_count += 1 - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Cask would be upgraded: {0}'.format( - self.current_cask - ) - raise HomebrewCaskException(self.message) - - if self._brew_cask_command_is_deprecated(): - base_opts = [self.brew_path, command, '--cask'] - else: - base_opts = [self.brew_path, 'cask', command] - - opts = base_opts + self.install_options + [self.current_cask] - - cmd = [opt for opt in opts if opt] - - rc, out, err = '', '', '' - - if self.sudo_password: - rc, out, err = self._run_command_with_sudo_password(cmd) - else: - rc, out, err = self.module.run_command(cmd) - - if self._current_cask_is_installed() and not self._current_cask_is_outdated(): - self.changed_count += 1 - self.changed = True - self.message = 'Cask upgraded: {0}'.format(self.current_cask) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewCaskException(self.message) - - def _upgrade_casks(self): - for cask in self.casks: - self.current_cask = cask - self._upgrade_current_cask() - - return True - # /upgraded ------------------------------ }}} - - # uninstalled ---------------------------- {{{ - def _uninstall_current_cask(self): - if not self.valid_cask(self.current_cask): - self.failed = True - self.message = 'Invalid cask: {0}.'.format(self.current_cask) - raise HomebrewCaskException(self.message) - - if not self._current_cask_is_installed(): - self.unchanged_count += 1 - self.message = 'Cask already uninstalled: {0}'.format( - self.current_cask, - ) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Cask would be uninstalled: {0}'.format( - self.current_cask - ) - raise HomebrewCaskException(self.message) - - if self._brew_cask_command_is_deprecated(): - base_opts = [self.brew_path, 'uninstall', '--cask'] - else: - base_opts = [self.brew_path, 'cask', 'uninstall'] - - opts = base_opts + [self.current_cask] + self.install_options - - cmd = [opt for opt in opts if opt] - - rc, out, err = '', '', '' - - if self.sudo_password: - rc, out, err = self._run_command_with_sudo_password(cmd) - else: - rc, out, err = self.module.run_command(cmd) - - if not self._current_cask_is_installed(): - self.changed_count += 1 - self.changed = True - self.message = 'Cask uninstalled: {0}'.format(self.current_cask) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewCaskException(self.message) - - def _uninstall_casks(self): - for cask in self.casks: - self.current_cask = cask - self._uninstall_current_cask() - - return True - # /uninstalled --------------------------- }}} - # /commands ---------------------------------------------------- }}} - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict( - aliases=["pkg", "package", "cask"], - required=False, - type='list', - elements='str', - ), - path=dict( - default="/usr/local/bin:/opt/homebrew/bin", - required=False, - type='path', - ), - state=dict( - default="present", - choices=[ - "present", "installed", - "latest", "upgraded", - "absent", "removed", "uninstalled", - ], - ), - sudo_password=dict( - type="str", - required=False, - no_log=True, - ), - update_homebrew=dict( - default=False, - aliases=["update-brew"], - type='bool', - deprecated_aliases=[dict(name='update-brew', version='5.0.0', collection_name='community.general')], - ), - install_options=dict( - default=None, - aliases=['options'], - type='list', - elements='str', - ), - accept_external_apps=dict( - default=False, - type='bool', - ), - upgrade_all=dict( - default=False, - aliases=["upgrade"], - type='bool', - ), - greedy=dict( - default=False, - type='bool', - ), - ), - supports_check_mode=True, - ) - - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') - - p = module.params - - if p['name']: - casks = p['name'] - else: - casks = None - - path = p['path'] - if path: - path = path.split(':') - - state = p['state'] - if state in ('present', 'installed'): - state = 'installed' - if state in ('latest', 'upgraded'): - state = 'upgraded' - if state in ('absent', 'removed', 'uninstalled'): - state = 'absent' - - sudo_password = p['sudo_password'] - - update_homebrew = p['update_homebrew'] - upgrade_all = p['upgrade_all'] - greedy = p['greedy'] - p['install_options'] = p['install_options'] or [] - install_options = ['--{0}'.format(install_option) - for install_option in p['install_options']] - - accept_external_apps = p['accept_external_apps'] - - brew_cask = HomebrewCask(module=module, path=path, casks=casks, - state=state, sudo_password=sudo_password, - update_homebrew=update_homebrew, - install_options=install_options, - accept_external_apps=accept_external_apps, - upgrade_all=upgrade_all, - greedy=greedy, - ) - (failed, changed, message) = brew_cask.run() - if failed: - module.fail_json(msg=message) - else: - module.exit_json(changed=changed, msg=message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/homebrew_tap.py b/plugins/modules/packaging/os/homebrew_tap.py deleted file mode 100644 index 6b30fdb68f..0000000000 --- a/plugins/modules/packaging/os/homebrew_tap.py +++ /dev/null @@ -1,271 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2013, Daniel Jaouen -# Copyright: (c) 2016, Indrajit Raychaudhuri -# -# Based on homebrew (Andrew Dunham ) -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: homebrew_tap -author: - - "Indrajit Raychaudhuri (@indrajitr)" - - "Daniel Jaouen (@danieljaouen)" -short_description: Tap a Homebrew repository. -description: - - Tap external Homebrew repositories. -options: - name: - description: - - The GitHub user/organization repository to tap. - required: true - aliases: ['tap'] - type: list - elements: str - url: - description: - - The optional git URL of the repository to tap. The URL is not - assumed to be on GitHub, and the protocol doesn't have to be HTTP. - Any location and protocol that git can handle is fine. - - I(name) option may not be a list of multiple taps (but a single - tap instead) when this option is provided. - required: false - type: str - state: - description: - - state of the repository. - choices: [ 'present', 'absent' ] - required: false - default: 'present' - type: str - path: - description: - - "A ':' separated list of paths to search for C(brew) executable." - default: '/usr/local/bin:/opt/homebrew/bin' - type: path - version_added: '2.1.0' -requirements: [ homebrew ] -''' - -EXAMPLES = r''' -- name: Tap a Homebrew repository, state present - community.general.homebrew_tap: - name: homebrew/dupes - -- name: Tap a Homebrew repository, state absent - community.general.homebrew_tap: - name: homebrew/dupes - state: absent - -- name: Tap a Homebrew repository, state present - community.general.homebrew_tap: - name: homebrew/dupes,homebrew/science - state: present - -- name: Tap a Homebrew repository using url, state present - community.general.homebrew_tap: - name: telemachus/brew - url: 'https://bitbucket.org/telemachus/brew' -''' - -import re - -from ansible.module_utils.basic import AnsibleModule - - -def a_valid_tap(tap): - '''Returns True if the tap is valid.''' - regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$') - return regex.match(tap) - - -def already_tapped(module, brew_path, tap): - '''Returns True if already tapped.''' - - rc, out, err = module.run_command([ - brew_path, - 'tap', - ]) - - taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_] - tap_name = re.sub('homebrew-', '', tap.lower()) - - return tap_name in taps - - -def add_tap(module, brew_path, tap, url=None): - '''Adds a single tap.''' - failed, changed, msg = False, False, '' - - if not a_valid_tap(tap): - failed = True - msg = 'not a valid tap: %s' % tap - - elif not already_tapped(module, brew_path, tap): - if module.check_mode: - module.exit_json(changed=True) - - rc, out, err = module.run_command([ - brew_path, - 'tap', - tap, - url, - ]) - if rc == 0: - changed = True - msg = 'successfully tapped: %s' % tap - else: - failed = True - msg = 'failed to tap: %s due to %s' % (tap, err) - - else: - msg = 'already tapped: %s' % tap - - return (failed, changed, msg) - - -def add_taps(module, brew_path, taps): - '''Adds one or more taps.''' - failed, changed, unchanged, added, msg = False, False, 0, 0, '' - - for tap in taps: - (failed, changed, msg) = add_tap(module, brew_path, tap) - if failed: - break - if changed: - added += 1 - else: - unchanged += 1 - - if failed: - msg = 'added: %d, unchanged: %d, error: ' + msg - msg = msg % (added, unchanged) - elif added: - changed = True - msg = 'added: %d, unchanged: %d' % (added, unchanged) - else: - msg = 'added: %d, unchanged: %d' % (added, unchanged) - - return (failed, changed, msg) - - -def remove_tap(module, brew_path, tap): - '''Removes a single tap.''' - failed, changed, msg = False, False, '' - - if not a_valid_tap(tap): - failed = True - msg = 'not a valid tap: %s' % tap - - elif already_tapped(module, brew_path, tap): - if module.check_mode: - module.exit_json(changed=True) - - rc, out, err = module.run_command([ - brew_path, - 'untap', - tap, - ]) - if not already_tapped(module, brew_path, tap): - changed = True - msg = 'successfully untapped: %s' % tap - else: - failed = True - msg = 'failed to untap: %s due to %s' % (tap, err) - - else: - msg = 'already untapped: %s' % tap - - return (failed, changed, msg) - - -def remove_taps(module, brew_path, taps): - '''Removes one or more taps.''' - failed, changed, unchanged, removed, msg = False, False, 0, 0, '' - - for tap in taps: - (failed, changed, msg) = remove_tap(module, brew_path, tap) - if failed: - break - if changed: - removed += 1 - else: - unchanged += 1 - - if failed: - msg = 'removed: %d, unchanged: %d, error: ' + msg - msg = msg % (removed, unchanged) - elif removed: - changed = True - msg = 'removed: %d, unchanged: %d' % (removed, unchanged) - else: - msg = 'removed: %d, unchanged: %d' % (removed, unchanged) - - return (failed, changed, msg) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(aliases=['tap'], type='list', required=True, elements='str'), - url=dict(default=None, required=False), - state=dict(default='present', choices=['present', 'absent']), - path=dict( - default="/usr/local/bin:/opt/homebrew/bin", - required=False, - type='path', - ), - ), - supports_check_mode=True, - ) - - path = module.params['path'] - if path: - path = path.split(':') - - brew_path = module.get_bin_path( - 'brew', - required=True, - opt_dirs=path, - ) - - taps = module.params['name'] - url = module.params['url'] - - if module.params['state'] == 'present': - if url is None: - # No tap URL provided explicitly, continue with bulk addition - # of all the taps. - failed, changed, msg = add_taps(module, brew_path, taps) - else: - # When an tap URL is provided explicitly, we allow adding - # *single* tap only. Validate and proceed to add single tap. - if len(taps) > 1: - msg = "List of multiple taps may not be provided with 'url' option." - module.fail_json(msg=msg) - else: - failed, changed, msg = add_tap(module, brew_path, taps[0], url) - - if failed: - module.fail_json(msg=msg) - else: - module.exit_json(changed=changed, msg=msg) - - elif module.params['state'] == 'absent': - failed, changed, msg = remove_taps(module, brew_path, taps) - - if failed: - module.fail_json(msg=msg) - else: - module.exit_json(changed=changed, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/installp.py b/plugins/modules/packaging/os/installp.py deleted file mode 100644 index af7a950afa..0000000000 --- a/plugins/modules/packaging/os/installp.py +++ /dev/null @@ -1,292 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Kairo Araujo -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: installp -author: -- Kairo Araujo (@kairoaraujo) -short_description: Manage packages on AIX -description: - - Manage packages using 'installp' on AIX -options: - accept_license: - description: - - Whether to accept the license for the package(s). - type: bool - default: no - name: - description: - - One or more packages to install or remove. - - Use C(all) to install all packages available on informed C(repository_path). - type: list - elements: str - required: true - aliases: [ pkg ] - repository_path: - description: - - Path with AIX packages (required to install). - type: path - state: - description: - - Whether the package needs to be present on or absent from the system. - type: str - choices: [ absent, present ] - default: present -notes: -- If the package is already installed, even the package/fileset is new, the module will not install it. -''' - -EXAMPLES = r''' -- name: Install package foo - community.general.installp: - name: foo - repository_path: /repository/AIX71/installp/base - accept_license: yes - state: present - -- name: Install bos.sysmgt that includes bos.sysmgt.nim.master, bos.sysmgt.nim.spot - community.general.installp: - name: bos.sysmgt - repository_path: /repository/AIX71/installp/base - accept_license: yes - state: present - -- name: Install bos.sysmgt.nim.master only - community.general.installp: - name: bos.sysmgt.nim.master - repository_path: /repository/AIX71/installp/base - accept_license: yes - state: present - -- name: Install bos.sysmgt.nim.master and bos.sysmgt.nim.spot - community.general.installp: - name: bos.sysmgt.nim.master, bos.sysmgt.nim.spot - repository_path: /repository/AIX71/installp/base - accept_license: yes - state: present - -- name: Remove packages bos.sysmgt.nim.master - community.general.installp: - name: bos.sysmgt.nim.master - state: absent -''' - -RETURN = r''' # ''' - -import os -import re - -from ansible.module_utils.basic import AnsibleModule - - -def _check_new_pkg(module, package, repository_path): - """ - Check if the package of fileset is correct name and repository path. - - :param module: Ansible module arguments spec. - :param package: Package/fileset name. - :param repository_path: Repository package path. - :return: Bool, package information. - """ - - if os.path.isdir(repository_path): - installp_cmd = module.get_bin_path('installp', True) - rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path)) - if rc != 0: - module.fail_json(msg="Failed to run installp.", rc=rc, err=err) - - if package == 'all': - pkg_info = "All packages on dir" - return True, pkg_info - - else: - pkg_info = {} - for line in package_result.splitlines(): - if re.findall(package, line): - pkg_name = line.split()[0].strip() - pkg_version = line.split()[1].strip() - pkg_info[pkg_name] = pkg_version - - return True, pkg_info - - return False, None - - else: - module.fail_json(msg="Repository path %s is not valid." % repository_path) - - -def _check_installed_pkg(module, package, repository_path): - """ - Check the package on AIX. - It verifies if the package is installed and informations - - :param module: Ansible module parameters spec. - :param package: Package/fileset name. - :param repository_path: Repository package path. - :return: Bool, package data. - """ - - lslpp_cmd = module.get_bin_path('lslpp', True) - rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package)) - - if rc == 1: - package_state = ' '.join(err.split()[-2:]) - if package_state == 'not installed.': - return False, None - else: - module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err) - - if rc != 0: - module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err) - - pkg_data = {} - full_pkg_data = lslpp_result.splitlines() - for line in full_pkg_data: - pkg_name, fileset, level = line.split(':')[0:3] - pkg_data[pkg_name] = fileset, level - - return True, pkg_data - - -def remove(module, installp_cmd, packages): - repository_path = None - remove_count = 0 - removed_pkgs = [] - not_found_pkg = [] - for package in packages: - pkg_check, dummy = _check_installed_pkg(module, package, repository_path) - - if pkg_check: - if not module.check_mode: - rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package)) - if rc != 0: - module.fail_json(msg="Failed to run installp.", rc=rc, err=err) - remove_count += 1 - removed_pkgs.append(package) - - else: - not_found_pkg.append(package) - - if remove_count > 0: - if len(not_found_pkg) > 1: - not_found_pkg.insert(0, "Package(s) not found: ") - - changed = True - msg = "Packages removed: %s. %s " % (' '.join(removed_pkgs), ' '.join(not_found_pkg)) - - else: - changed = False - msg = ("No packages removed, all packages not found: %s" % ' '.join(not_found_pkg)) - - return changed, msg - - -def install(module, installp_cmd, packages, repository_path, accept_license): - installed_pkgs = [] - not_found_pkgs = [] - already_installed_pkgs = {} - - accept_license_param = { - True: '-Y', - False: '', - } - - # Validate if package exists on repository path. - for package in packages: - pkg_check, pkg_data = _check_new_pkg(module, package, repository_path) - - # If package exists on repository path, check if package is installed. - if pkg_check: - pkg_check_current, pkg_info = _check_installed_pkg(module, package, repository_path) - - # If package is already installed. - if pkg_check_current: - # Check if package is a package and not a fileset, get version - # and add the package into already installed list - if package in pkg_info.keys(): - already_installed_pkgs[package] = pkg_info[package][1] - - else: - # If the package is not a package but a fileset, confirm - # and add the fileset/package into already installed list - for key in pkg_info.keys(): - if package in pkg_info[key]: - already_installed_pkgs[package] = pkg_info[key][1] - - else: - if not module.check_mode: - rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package)) - if rc != 0: - module.fail_json(msg="Failed to run installp", rc=rc, err=err) - installed_pkgs.append(package) - - else: - not_found_pkgs.append(package) - - if len(installed_pkgs) > 0: - installed_msg = (" Installed: %s." % ' '.join(installed_pkgs)) - else: - installed_msg = '' - - if len(not_found_pkgs) > 0: - not_found_msg = (" Not found: %s." % ' '.join(not_found_pkgs)) - else: - not_found_msg = '' - - if len(already_installed_pkgs) > 0: - already_installed_msg = (" Already installed: %s." % already_installed_pkgs) - else: - already_installed_msg = '' - - if len(installed_pkgs) > 0: - changed = True - msg = ("%s%s%s" % (installed_msg, not_found_msg, already_installed_msg)) - else: - changed = False - msg = ("No packages installed.%s%s%s" % (installed_msg, not_found_msg, already_installed_msg)) - - return changed, msg - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='list', elements='str', required=True, aliases=['pkg']), - repository_path=dict(type='path'), - accept_license=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'present']), - ), - supports_check_mode=True, - ) - - name = module.params['name'] - repository_path = module.params['repository_path'] - accept_license = module.params['accept_license'] - state = module.params['state'] - - installp_cmd = module.get_bin_path('installp', True) - - if state == 'present': - if repository_path is None: - module.fail_json(msg="repository_path is required to install package") - - changed, msg = install(module, installp_cmd, name, repository_path, accept_license) - - elif state == 'absent': - changed, msg = remove(module, installp_cmd, name) - - else: - module.fail_json(changed=False, msg="Unexpected state.") - - module.exit_json(changed=changed, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/layman.py b/plugins/modules/packaging/os/layman.py deleted file mode 100644 index 3c990205d9..0000000000 --- a/plugins/modules/packaging/os/layman.py +++ /dev/null @@ -1,268 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Jakub Jirutka -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: layman -author: "Jakub Jirutka (@jirutka)" -short_description: Manage Gentoo overlays -description: - - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. - Please note that Layman must be installed on a managed node prior using this module. -requirements: - - "python >= 2.6" - - layman python module -options: - name: - description: - - The overlay id to install, synchronize, or uninstall. - Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)). - required: true - type: str - list_url: - description: - - An URL of the alternative overlays list that defines the overlay to install. - This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where - C(overlay_defs) is readed from the Layman's configuration. - aliases: [url] - type: str - state: - description: - - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay. - default: present - choices: [present, absent, updated] - type: str - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be - set to C(no) when no other option exists. Prior to 1.9.3 the code - defaulted to C(no). - type: bool - default: yes -''' - -EXAMPLES = ''' -- name: Install the overlay mozilla which is on the central overlays list - community.general.layman: - name: mozilla - -- name: Install the overlay cvut from the specified alternative list - community.general.layman: - name: cvut - list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml' - -- name: Update (sync) the overlay cvut or install if not installed yet - community.general.layman: - name: cvut - list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml' - state: updated - -- name: Update (sync) all of the installed overlays - community.general.layman: - name: ALL - state: updated - -- name: Uninstall the overlay cvut - community.general.layman: - name: cvut - state: absent -''' - -import shutil -import traceback - -from os import path - -LAYMAN_IMP_ERR = None -try: - from layman.api import LaymanAPI - from layman.config import BareConfig - HAS_LAYMAN_API = True -except ImportError: - LAYMAN_IMP_ERR = traceback.format_exc() - HAS_LAYMAN_API = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.urls import fetch_url - - -USERAGENT = 'ansible-httpget' - - -class ModuleError(Exception): - pass - - -def init_layman(config=None): - '''Returns the initialized ``LaymanAPI``. - - :param config: the layman's configuration to use (optional) - ''' - if config is None: - config = BareConfig(read_configfile=True, quietness=1) - return LaymanAPI(config) - - -def download_url(module, url, dest): - ''' - :param url: the URL to download - :param dest: the absolute path of where to save the downloaded content to; - it must be writable and not a directory - - :raises ModuleError - ''' - - # Hack to add params in the form that fetch_url expects - module.params['http_agent'] = USERAGENT - response, info = fetch_url(module, url) - if info['status'] != 200: - raise ModuleError("Failed to get %s: %s" % (url, info['msg'])) - - try: - with open(dest, 'w') as f: - shutil.copyfileobj(response, f) - except IOError as e: - raise ModuleError("Failed to write: %s" % str(e)) - - -def install_overlay(module, name, list_url=None): - '''Installs the overlay repository. If not on the central overlays list, - then :list_url of an alternative list must be provided. The list will be - fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the - ``overlay_defs`` is read from the Layman's configuration). - - :param name: the overlay id - :param list_url: the URL of the remote repositories list to look for the overlay - definition (optional, default: None) - - :returns: True if the overlay was installed, or False if already exists - (i.e. nothing has changed) - :raises ModuleError - ''' - # read Layman configuration - layman_conf = BareConfig(read_configfile=True) - layman = init_layman(layman_conf) - - if layman.is_installed(name): - return False - - if module.check_mode: - mymsg = 'Would add layman repo \'' + name + '\'' - module.exit_json(changed=True, msg=mymsg) - - if not layman.is_repo(name): - if not list_url: - raise ModuleError("Overlay '%s' is not on the list of known " - "overlays and URL of the remote list was not provided." % name) - - overlay_defs = layman_conf.get_option('overlay_defs') - dest = path.join(overlay_defs, name + '.xml') - - download_url(module, list_url, dest) - - # reload config - layman = init_layman() - - if not layman.add_repos(name): - raise ModuleError(layman.get_errors()) - - return True - - -def uninstall_overlay(module, name): - '''Uninstalls the given overlay repository from the system. - - :param name: the overlay id to uninstall - - :returns: True if the overlay was uninstalled, or False if doesn't exist - (i.e. nothing has changed) - :raises ModuleError - ''' - layman = init_layman() - - if not layman.is_installed(name): - return False - - if module.check_mode: - mymsg = 'Would remove layman repo \'' + name + '\'' - module.exit_json(changed=True, msg=mymsg) - - layman.delete_repos(name) - if layman.get_errors(): - raise ModuleError(layman.get_errors()) - - return True - - -def sync_overlay(name): - '''Synchronizes the specified overlay repository. - - :param name: the overlay repository id to sync - :raises ModuleError - ''' - layman = init_layman() - - if not layman.sync(name): - messages = [str(item[1]) for item in layman.sync_results[2]] - raise ModuleError(messages) - - -def sync_overlays(): - '''Synchronize all of the installed overlays. - - :raises ModuleError - ''' - layman = init_layman() - - for name in layman.get_installed(): - sync_overlay(name) - - -def main(): - # define module - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - list_url=dict(aliases=['url']), - state=dict(default="present", choices=['present', 'absent', 'updated']), - validate_certs=dict(required=False, default=True, type='bool'), - ), - supports_check_mode=True - ) - - if not HAS_LAYMAN_API: - module.fail_json(msg=missing_required_lib('Layman'), exception=LAYMAN_IMP_ERR) - - state, name, url = (module.params[key] for key in ['state', 'name', 'list_url']) - - changed = False - try: - if state == 'present': - changed = install_overlay(module, name, url) - - elif state == 'updated': - if name == 'ALL': - sync_overlays() - elif install_overlay(module, name, url): - changed = True - else: - sync_overlay(name) - else: - changed = uninstall_overlay(module, name) - - except ModuleError as e: - module.fail_json(msg=e.message) - else: - module.exit_json(changed=changed, name=name) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/macports.py b/plugins/modules/packaging/os/macports.py deleted file mode 100644 index 1d3f47a240..0000000000 --- a/plugins/modules/packaging/os/macports.py +++ /dev/null @@ -1,319 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Jimmy Tang -# Based on okpg (Patrick Pelletier ), pacman -# (Afterburn) and pkgin (Shaun Zinck) modules -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: macports -author: "Jimmy Tang (@jcftang)" -short_description: Package manager for MacPorts -description: - - Manages MacPorts packages (ports) -options: - name: - description: - - A list of port names. - aliases: ['port'] - type: list - elements: str - selfupdate: - description: - - Update Macports and the ports tree, either prior to installing ports or as a separate step. - - Equivalent to running C(port selfupdate). - aliases: ['update_cache', 'update_ports'] - default: "no" - type: bool - state: - description: - - Indicates the desired state of the port. - choices: [ 'present', 'absent', 'active', 'inactive', 'installed', 'removed'] - default: present - type: str - upgrade: - description: - - Upgrade all outdated ports, either prior to installing ports or as a separate step. - - Equivalent to running C(port upgrade outdated). - default: "no" - type: bool - variant: - description: - - A port variant specification. - - 'C(variant) is only supported with state: I(installed)/I(present).' - aliases: ['variants'] - type: str -''' -EXAMPLES = ''' -- name: Install the foo port - community.general.macports: - name: foo - -- name: Install the universal, x11 variant of the foo port - community.general.macports: - name: foo - variant: +universal+x11 - -- name: Install a list of ports - community.general.macports: - name: "{{ ports }}" - vars: - ports: - - foo - - foo-tools - -- name: Update Macports and the ports tree, then upgrade all outdated ports - community.general.macports: - selfupdate: yes - upgrade: yes - -- name: Update Macports and the ports tree, then install the foo port - community.general.macports: - name: foo - selfupdate: yes - -- name: Remove the foo port - community.general.macports: - name: foo - state: absent - -- name: Activate the foo port - community.general.macports: - name: foo - state: active - -- name: Deactivate the foo port - community.general.macports: - name: foo - state: inactive -''' - -import re - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote - - -def selfupdate(module, port_path): - """ Update Macports and the ports tree. """ - - rc, out, err = module.run_command("%s -v selfupdate" % port_path) - - if rc == 0: - updated = any( - re.search(r'Total number of ports parsed:\s+[^0]', s.strip()) or - re.search(r'Installing new Macports release', s.strip()) - for s in out.split('\n') - if s - ) - if updated: - changed = True - msg = "Macports updated successfully" - else: - changed = False - msg = "Macports already up-to-date" - - return (changed, msg, out, err) - else: - module.fail_json(msg="Failed to update Macports", stdout=out, stderr=err) - - -def upgrade(module, port_path): - """ Upgrade outdated ports. """ - - rc, out, err = module.run_command("%s upgrade outdated" % port_path) - - # rc is 1 when nothing to upgrade so check stdout first. - if out.strip() == "Nothing to upgrade.": - changed = False - msg = "Ports already upgraded" - return (changed, msg, out, err) - elif rc == 0: - changed = True - msg = "Outdated ports upgraded successfully" - return (changed, msg, out, err) - else: - module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err) - - -def query_port(module, port_path, name, state="present"): - """ Returns whether a port is installed or not. """ - - if state == "present": - - rc, out, err = module.run_command([port_path, "-q", "installed", name]) - - if rc == 0 and out.strip().startswith(name + " "): - return True - - return False - - elif state == "active": - - rc, out, err = module.run_command([port_path, "-q", "installed", name]) - - if rc == 0 and "(active)" in out: - return True - - return False - - -def remove_ports(module, port_path, ports, stdout, stderr): - """ Uninstalls one or more ports if installed. """ - - remove_c = 0 - # Using a for loop in case of error, we can report the port that failed - for port in ports: - # Query the port first, to see if we even need to remove - if not query_port(module, port_path, port): - continue - - rc, out, err = module.run_command("%s uninstall %s" % (port_path, port)) - stdout += out - stderr += err - if query_port(module, port_path, port): - module.fail_json(msg="Failed to remove %s: %s" % (port, err), stdout=stdout, stderr=stderr) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c, stdout=stdout, stderr=stderr) - - module.exit_json(changed=False, msg="Port(s) already absent", stdout=stdout, stderr=stderr) - - -def install_ports(module, port_path, ports, variant, stdout, stderr): - """ Installs one or more ports if not already installed. """ - - install_c = 0 - - for port in ports: - if query_port(module, port_path, port): - continue - - rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant)) - stdout += out - stderr += err - if not query_port(module, port_path, port): - module.fail_json(msg="Failed to install %s: %s" % (port, err), stdout=stdout, stderr=stderr) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c), stdout=stdout, stderr=stderr) - - module.exit_json(changed=False, msg="Port(s) already present", stdout=stdout, stderr=stderr) - - -def activate_ports(module, port_path, ports, stdout, stderr): - """ Activate a port if it's inactive. """ - - activate_c = 0 - - for port in ports: - if not query_port(module, port_path, port): - module.fail_json(msg="Failed to activate %s, port(s) not present" % (port), stdout=stdout, stderr=stderr) - - if query_port(module, port_path, port, state="active"): - continue - - rc, out, err = module.run_command("%s activate %s" % (port_path, port)) - stdout += out - stderr += err - - if not query_port(module, port_path, port, state="active"): - module.fail_json(msg="Failed to activate %s: %s" % (port, err), stdout=stdout, stderr=stderr) - - activate_c += 1 - - if activate_c > 0: - module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c), stdout=stdout, stderr=stderr) - - module.exit_json(changed=False, msg="Port(s) already active", stdout=stdout, stderr=stderr) - - -def deactivate_ports(module, port_path, ports, stdout, stderr): - """ Deactivate a port if it's active. """ - - deactivated_c = 0 - - for port in ports: - if not query_port(module, port_path, port): - module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port), stdout=stdout, stderr=stderr) - - if not query_port(module, port_path, port, state="active"): - continue - - rc, out, err = module.run_command("%s deactivate %s" % (port_path, port)) - stdout += out - stderr += err - if query_port(module, port_path, port, state="active"): - module.fail_json(msg="Failed to deactivate %s: %s" % (port, err), stdout=stdout, stderr=stderr) - - deactivated_c += 1 - - if deactivated_c > 0: - module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c), stdout=stdout, stderr=stderr) - - module.exit_json(changed=False, msg="Port(s) already inactive", stdout=stdout, stderr=stderr) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='list', elements='str', aliases=["port"]), - selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'), - state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]), - upgrade=dict(default=False, type='bool'), - variant=dict(aliases=["variants"], default=None, type='str') - ) - ) - - stdout = "" - stderr = "" - - port_path = module.get_bin_path('port', True, ['/opt/local/bin']) - - p = module.params - - if p["selfupdate"]: - (changed, msg, out, err) = selfupdate(module, port_path) - stdout += out - stderr += err - if not (p["name"] or p["upgrade"]): - module.exit_json(changed=changed, msg=msg, stdout=stdout, stderr=stderr) - - if p["upgrade"]: - (changed, msg, out, err) = upgrade(module, port_path) - stdout += out - stderr += err - if not p["name"]: - module.exit_json(changed=changed, msg=msg, stdout=stdout, stderr=stderr) - - pkgs = p["name"] - - variant = p["variant"] - - if p["state"] in ["present", "installed"]: - install_ports(module, port_path, pkgs, variant, stdout, stderr) - - elif p["state"] in ["absent", "removed"]: - remove_ports(module, port_path, pkgs, stdout, stderr) - - elif p["state"] == "active": - activate_ports(module, port_path, pkgs, stdout, stderr) - - elif p["state"] == "inactive": - deactivate_ports(module, port_path, pkgs, stdout, stderr) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/mas.py b/plugins/modules/packaging/os/mas.py deleted file mode 100644 index dd394b7c43..0000000000 --- a/plugins/modules/packaging/os/mas.py +++ /dev/null @@ -1,295 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Lukas Bestle -# Copyright: (c) 2017, Michael Heap -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: mas -short_description: Manage Mac App Store applications with mas-cli -description: - - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli). -version_added: '0.2.0' -author: - - Michael Heap (@mheap) - - Lukas Bestle (@lukasbestle) -options: - id: - description: - - The Mac App Store identifier of the app(s) you want to manage. - - This can be found by running C(mas search APP_NAME) on your machine. - type: list - elements: int - state: - description: - - Desired state of the app installation. - - The C(absent) value requires root permissions, also see the examples. - type: str - choices: - - absent - - latest - - present - default: present - upgrade_all: - description: - - Upgrade all installed Mac App Store apps. - type: bool - default: "no" - aliases: ["upgrade"] -requirements: - - macOS 10.11+ - - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path" - - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)). -notes: - - This module supports C(check_mode). -''' - -EXAMPLES = ''' -- name: Install Keynote - community.general.mas: - id: 409183694 - state: present - -- name: Install Divvy with command mas installed in /usr/local/bin - community.general.mas: - id: 413857545 - state: present - environment: - PATH: /usr/local/bin:{{ ansible_facts.env.PATH }} - -- name: Install a list of apps - community.general.mas: - id: - - 409183694 # Keynote - - 413857545 # Divvy - state: present - -- name: Ensure the latest Keynote version is installed - community.general.mas: - id: 409183694 - state: latest - -- name: Upgrade all installed Mac App Store apps - community.general.mas: - upgrade_all: yes - -- name: Install specific apps and also upgrade all others - community.general.mas: - id: - - 409183694 # Keynote - - 413857545 # Divvy - state: present - upgrade_all: yes - -- name: Uninstall Divvy - community.general.mas: - id: 413857545 - state: absent - become: yes # Uninstallation requires root permissions -''' - -RETURN = r''' # ''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from distutils.version import StrictVersion -import os - - -class Mas(object): - - def __init__(self, module): - self.module = module - - # Initialize data properties - self.mas_path = self.module.get_bin_path('mas') - self._checked_signin = False - self._installed = None # Populated only if needed - self._outdated = None # Populated only if needed - self.count_install = 0 - self.count_upgrade = 0 - self.count_uninstall = 0 - self.result = { - 'changed': False - } - - self.check_mas_tool() - - def app_command(self, command, id): - ''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' ''' - - if not self.module.check_mode: - if command != 'uninstall': - self.check_signin() - - rc, out, err = self.run([command, str(id)]) - if rc != 0: - self.module.fail_json( - msg="Error running command '{0}' on app '{1}': {2}".format(command, str(id), out.rstrip()) - ) - - # No error or dry run - self.__dict__['count_' + command] += 1 - - def check_mas_tool(self): - ''' Verifies that the `mas` tool is available in a recent version ''' - - # Is the `mas` tool available at all? - if not self.mas_path: - self.module.fail_json(msg='Required `mas` tool is not installed') - - # Is the version recent enough? - rc, out, err = self.run(['version']) - if rc != 0 or not out.strip() or StrictVersion(out.strip()) < StrictVersion('1.5.0'): - self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip()) - - def check_signin(self): - ''' Verifies that the user is signed in to the Mac App Store ''' - - # Only check this once per execution - if self._checked_signin: - return - - rc, out, err = self.run(['account']) - if out.split("\n", 1)[0].rstrip() == 'Not signed in': - self.module.fail_json(msg='You must be signed in to the Mac App Store') - - self._checked_signin = True - - def exit(self): - ''' Exit with the data we have collected over time ''' - - msgs = [] - if self.count_install > 0: - msgs.append('Installed {0} app(s)'.format(self.count_install)) - if self.count_upgrade > 0: - msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade)) - if self.count_uninstall > 0: - msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall)) - - if msgs: - self.result['changed'] = True - self.result['msg'] = ', '.join(msgs) - - self.module.exit_json(**self.result) - - def get_current_state(self, command): - ''' Returns the list of all app IDs; command can either be 'list' or 'outdated' ''' - - rc, raw_apps, err = self.run([command]) - rows = raw_apps.split("\n") - if rows[0] == "No installed apps found": - rows = [] - apps = [] - for r in rows: - # Format: "123456789 App Name" - r = r.split(' ', 1) - if len(r) == 2: - apps.append(int(r[0])) - - return apps - - def installed(self): - ''' Returns the list of installed apps ''' - - # Populate cache if not already done - if self._installed is None: - self._installed = self.get_current_state('list') - - return self._installed - - def is_installed(self, id): - ''' Checks whether the given app is installed ''' - - return int(id) in self.installed() - - def is_outdated(self, id): - ''' Checks whether the given app is installed, but outdated ''' - - return int(id) in self.outdated() - - def outdated(self): - ''' Returns the list of installed, but outdated apps ''' - - # Populate cache if not already done - if self._outdated is None: - self._outdated = self.get_current_state('outdated') - - return self._outdated - - def run(self, cmd): - ''' Runs a command of the `mas` tool ''' - - cmd.insert(0, self.mas_path) - return self.module.run_command(cmd, False) - - def upgrade_all(self): - ''' Upgrades all installed apps and sets the correct result data ''' - - outdated = self.outdated() - - if not self.module.check_mode: - self.check_signin() - - rc, out, err = self.run(['upgrade']) - if rc != 0: - self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip()) - - self.count_upgrade += len(outdated) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - id=dict(type='list', elements='int'), - state=dict(type='str', default='present', choices=['absent', 'latest', 'present']), - upgrade_all=dict(type='bool', default=False, aliases=['upgrade']), - ), - supports_check_mode=True - ) - mas = Mas(module) - - if module.params['id']: - apps = module.params['id'] - else: - apps = [] - - state = module.params['state'] - upgrade = module.params['upgrade_all'] - - # Run operations on the given app IDs - for app in sorted(set(apps)): - if state == 'present': - if not mas.is_installed(app): - mas.app_command('install', app) - - elif state == 'absent': - if mas.is_installed(app): - # Ensure we are root - if os.getuid() != 0: - module.fail_json(msg="Uninstalling apps requires root permissions ('become: yes')") - - mas.app_command('uninstall', app) - - elif state == 'latest': - if not mas.is_installed(app): - mas.app_command('install', app) - elif mas.is_outdated(app): - mas.app_command('upgrade', app) - - # Upgrade all apps if requested - mas._outdated = None # Clear cache - if upgrade and mas.outdated(): - mas.upgrade_all() - - # Exit with the collected data - mas.exit() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/openbsd_pkg.py b/plugins/modules/packaging/os/openbsd_pkg.py deleted file mode 100644 index 4299d60ad0..0000000000 --- a/plugins/modules/packaging/os/openbsd_pkg.py +++ /dev/null @@ -1,654 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Patrik Lundin -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: openbsd_pkg -author: -- Patrik Lundin (@eest) -short_description: Manage packages on OpenBSD -description: - - Manage packages on OpenBSD using the pkg tools. -requirements: -- python >= 2.5 -options: - name: - description: - - A name or a list of names of the packages. - required: yes - type: list - elements: str - state: - description: - - C(present) will make sure the package is installed. - C(latest) will make sure the latest version of the package is installed. - C(absent) will make sure the specified package is not installed. - choices: [ absent, latest, present, installed, removed ] - default: present - type: str - build: - description: - - Build the package from source instead of downloading and installing - a binary. Requires that the port source tree is already installed. - Automatically builds and installs the 'sqlports' package, if it is - not already installed. - - Mutually exclusive with I(snapshot). - type: bool - default: no - snapshot: - description: - - Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel. - - Mutually exclusive with I(build). - type: bool - default: no - version_added: 1.3.0 - ports_dir: - description: - - When used in combination with the C(build) option, allows overriding - the default ports source directory. - default: /usr/ports - type: path - clean: - description: - - When updating or removing packages, delete the extra configuration - file(s) in the old packages which are annotated with @extra in - the packaging-list. - type: bool - default: no - quick: - description: - - Replace or delete packages quickly; do not bother with checksums - before removing normal files. - type: bool - default: no -notes: - - When used with a `loop:` each package will be processed individually, - it is much more efficient to pass the list directly to the `name` option. -''' - -EXAMPLES = ''' -- name: Make sure nmap is installed - community.general.openbsd_pkg: - name: nmap - state: present - -- name: Make sure nmap is the latest version - community.general.openbsd_pkg: - name: nmap - state: latest - -- name: Make sure nmap is not installed - community.general.openbsd_pkg: - name: nmap - state: absent - -- name: Make sure nmap is installed, build it from source if it is not - community.general.openbsd_pkg: - name: nmap - state: present - build: yes - -- name: Specify a pkg flavour with '--' - community.general.openbsd_pkg: - name: vim--no_x11 - state: present - -- name: Specify the default flavour to avoid ambiguity errors - community.general.openbsd_pkg: - name: vim-- - state: present - -- name: Specify a package branch (requires at least OpenBSD 6.0) - community.general.openbsd_pkg: - name: python%3.5 - state: present - -- name: Update all packages on the system - community.general.openbsd_pkg: - name: '*' - state: latest - -- name: Purge a package and it's configuration files - community.general.openbsd_pkg: - name: mpd - clean: yes - state: absent - -- name: Quickly remove a package without checking checksums - community.general.openbsd_pkg: - name: qt5 - quick: yes - state: absent -''' - -import os -import platform -import re -import shlex -import sqlite3 - -from distutils.version import StrictVersion - -from ansible.module_utils.basic import AnsibleModule - - -# Function used for executing commands. -def execute_command(cmd, module): - # Break command line into arguments. - # This makes run_command() use shell=False which we need to not cause shell - # expansion of special characters like '*'. - cmd_args = shlex.split(cmd) - return module.run_command(cmd_args) - - -# Function used to find out if a package is currently installed. -def get_package_state(names, pkg_spec, module): - info_cmd = 'pkg_info -Iq' - - for name in names: - command = "%s inst:%s" % (info_cmd, name) - - rc, stdout, stderr = execute_command(command, module) - - if stderr: - module.fail_json(msg="failed in get_package_state(): " + stderr) - - if stdout: - # If the requested package name is just a stem, like "python", we may - # find multiple packages with that name. - pkg_spec[name]['installed_names'] = stdout.splitlines() - module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names']) - pkg_spec[name]['installed_state'] = True - else: - pkg_spec[name]['installed_state'] = False - - -# Function used to make sure a package is present. -def package_present(names, pkg_spec, module): - build = module.params['build'] - - for name in names: - # It is possible package_present() has been called from package_latest(). - # In that case we do not want to operate on the whole list of names, - # only the leftovers. - if pkg_spec['package_latest_leftovers']: - if name not in pkg_spec['package_latest_leftovers']: - module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name) - continue - else: - module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name) - - if module.check_mode: - install_cmd = 'pkg_add -Imn' - else: - if build is True: - port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module)) - if os.path.isdir(port_dir): - if pkg_spec[name]['flavor']: - flavors = pkg_spec[name]['flavor'].replace('-', ' ') - install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors) - elif pkg_spec[name]['subpackage']: - install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir, - pkg_spec[name]['subpackage']) - else: - install_cmd = "cd %s && make install && make clean=depends" % (port_dir) - else: - module.fail_json(msg="the port source directory %s does not exist" % (port_dir)) - else: - install_cmd = 'pkg_add -Im' - - if module.params['snapshot'] is True: - install_cmd += ' -Dsnap' - - if pkg_spec[name]['installed_state'] is False: - - # Attempt to install the package - if build is True and not module.check_mode: - (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True) - else: - (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module) - - # The behaviour of pkg_add is a bit different depending on if a - # specific version is supplied or not. - # - # When a specific version is supplied the return code will be 0 when - # a package is found and 1 when it is not. If a version is not - # supplied the tool will exit 0 in both cases. - # - # It is important to note that "version" relates to the - # packages-specs(7) notion of a version. If using the branch syntax - # (like "python%3.5") even though a branch name may look like a - # version string it is not used an one by pkg_add. - if pkg_spec[name]['version'] or build is True: - # Depend on the return code. - module.debug("package_present(): depending on return code for name '%s'" % name) - if pkg_spec[name]['rc']: - pkg_spec[name]['changed'] = False - else: - # Depend on stderr instead. - module.debug("package_present(): depending on stderr for name '%s'" % name) - if pkg_spec[name]['stderr']: - # There is a corner case where having an empty directory in - # installpath prior to the right location will result in a - # "file:/local/package/directory/ is empty" message on stderr - # while still installing the package, so we need to look for - # for a message like "packagename-1.0: ok" just in case. - match = re.search(r"\W%s-[^:]+: ok\W" % re.escape(pkg_spec[name]['stem']), pkg_spec[name]['stdout']) - - if match: - # It turns out we were able to install the package. - module.debug("package_present(): we were able to install package for name '%s'" % name) - pkg_spec[name]['changed'] = True - else: - # We really did fail, fake the return code. - module.debug("package_present(): we really did fail for name '%s'" % name) - pkg_spec[name]['rc'] = 1 - pkg_spec[name]['changed'] = False - else: - module.debug("package_present(): stderr was not set for name '%s'" % name) - - if pkg_spec[name]['rc'] == 0: - pkg_spec[name]['changed'] = True - - else: - pkg_spec[name]['rc'] = 0 - pkg_spec[name]['stdout'] = '' - pkg_spec[name]['stderr'] = '' - pkg_spec[name]['changed'] = False - - -# Function used to make sure a package is the latest available version. -def package_latest(names, pkg_spec, module): - if module.params['build'] is True: - module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build']) - - upgrade_cmd = 'pkg_add -um' - - if module.check_mode: - upgrade_cmd += 'n' - - if module.params['clean']: - upgrade_cmd += 'c' - - if module.params['quick']: - upgrade_cmd += 'q' - - if module.params['snapshot']: - upgrade_cmd += ' -Dsnap' - - for name in names: - if pkg_spec[name]['installed_state'] is True: - - # Attempt to upgrade the package. - (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (upgrade_cmd, name), module) - - # Look for output looking something like "nmap-6.01->6.25: ok" to see if - # something changed (or would have changed). Use \W to delimit the match - # from progress meter output. - pkg_spec[name]['changed'] = False - for installed_name in pkg_spec[name]['installed_names']: - module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name) - match = re.search(r"\W%s->.+: ok\W" % re.escape(installed_name), pkg_spec[name]['stdout']) - if match: - module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name) - - pkg_spec[name]['changed'] = True - break - - # FIXME: This part is problematic. Based on the issues mentioned (and - # handled) in package_present() it is not safe to blindly trust stderr - # as an indicator that the command failed, and in the case with - # empty installpath directories this will break. - # - # For now keep this safeguard here, but ignore it if we managed to - # parse out a successful update above. This way we will report a - # successful run when we actually modify something but fail - # otherwise. - if pkg_spec[name]['changed'] is not True: - if pkg_spec[name]['stderr']: - pkg_spec[name]['rc'] = 1 - - else: - # Note packages that need to be handled by package_present - module.debug("package_latest(): package '%s' is not installed, will be handled by package_present()" % name) - pkg_spec['package_latest_leftovers'].append(name) - - # If there were any packages that were not installed we call - # package_present() which will handle those. - if pkg_spec['package_latest_leftovers']: - module.debug("package_latest(): calling package_present() to handle leftovers") - package_present(names, pkg_spec, module) - - -# Function used to make sure a package is not installed. -def package_absent(names, pkg_spec, module): - remove_cmd = 'pkg_delete -I' - - if module.check_mode: - remove_cmd += 'n' - - if module.params['clean']: - remove_cmd += 'c' - - if module.params['quick']: - remove_cmd += 'q' - - for name in names: - if pkg_spec[name]['installed_state'] is True: - # Attempt to remove the package. - (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (remove_cmd, name), module) - - if pkg_spec[name]['rc'] == 0: - pkg_spec[name]['changed'] = True - else: - pkg_spec[name]['changed'] = False - - else: - pkg_spec[name]['rc'] = 0 - pkg_spec[name]['stdout'] = '' - pkg_spec[name]['stderr'] = '' - pkg_spec[name]['changed'] = False - - -# Function used to parse the package name based on packages-specs(7). -# The general name structure is "stem-version[-flavors]". -# -# Names containing "%" are a special variation not part of the -# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a -# description. -def parse_package_name(names, pkg_spec, module): - - # Initialize empty list of package_latest() leftovers. - pkg_spec['package_latest_leftovers'] = [] - - for name in names: - module.debug("parse_package_name(): parsing name: %s" % name) - # Do some initial matches so we can base the more advanced regex on that. - version_match = re.search("-[0-9]", name) - versionless_match = re.search("--", name) - - # Stop if someone is giving us a name that both has a version and is - # version-less at the same time. - if version_match and versionless_match: - module.fail_json(msg="package name both has a version and is version-less: " + name) - - # All information for a given name is kept in the pkg_spec keyed by that name. - pkg_spec[name] = {} - - # If name includes a version. - if version_match: - match = re.search("^(?P[^%]+)-(?P[0-9][^-]*)(?P-)?(?P[a-z].*)?(%(?P.+))?$", name) - if match: - pkg_spec[name]['stem'] = match.group('stem') - pkg_spec[name]['version_separator'] = '-' - pkg_spec[name]['version'] = match.group('version') - pkg_spec[name]['flavor_separator'] = match.group('flavor_separator') - pkg_spec[name]['flavor'] = match.group('flavor') - pkg_spec[name]['branch'] = match.group('branch') - pkg_spec[name]['style'] = 'version' - module.debug("version_match: stem: %(stem)s, version: %(version)s, flavor_separator: %(flavor_separator)s, " - "flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name]) - else: - module.fail_json(msg="unable to parse package name at version_match: " + name) - - # If name includes no version but is version-less ("--"). - elif versionless_match: - match = re.search("^(?P[^%]+)--(?P[a-z].*)?(%(?P.+))?$", name) - if match: - pkg_spec[name]['stem'] = match.group('stem') - pkg_spec[name]['version_separator'] = '-' - pkg_spec[name]['version'] = None - pkg_spec[name]['flavor_separator'] = '-' - pkg_spec[name]['flavor'] = match.group('flavor') - pkg_spec[name]['branch'] = match.group('branch') - pkg_spec[name]['style'] = 'versionless' - module.debug("versionless_match: stem: %(stem)s, flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name]) - else: - module.fail_json(msg="unable to parse package name at versionless_match: " + name) - - # If name includes no version, and is not version-less, it is all a - # stem, possibly with a branch (%branchname) tacked on at the - # end. - else: - match = re.search("^(?P[^%]+)(%(?P.+))?$", name) - if match: - pkg_spec[name]['stem'] = match.group('stem') - pkg_spec[name]['version_separator'] = None - pkg_spec[name]['version'] = None - pkg_spec[name]['flavor_separator'] = None - pkg_spec[name]['flavor'] = None - pkg_spec[name]['branch'] = match.group('branch') - pkg_spec[name]['style'] = 'stem' - module.debug("stem_match: stem: %(stem)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name]) - else: - module.fail_json(msg="unable to parse package name at else: " + name) - - # Verify that the managed host is new enough to support branch syntax. - if pkg_spec[name]['branch']: - branch_release = "6.0" - - if StrictVersion(platform.release()) < StrictVersion(branch_release): - module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name)) - - # Sanity check that there are no trailing dashes in flavor. - # Try to stop strange stuff early so we can be strict later. - if pkg_spec[name]['flavor']: - match = re.search("-$", pkg_spec[name]['flavor']) - if match: - module.fail_json(msg="trailing dash in flavor: " + pkg_spec[name]['flavor']) - - -# Function used for figuring out the port path. -def get_package_source_path(name, pkg_spec, module): - pkg_spec[name]['subpackage'] = None - if pkg_spec[name]['stem'] == 'sqlports': - return 'databases/sqlports' - else: - # try for an exact match first - sqlports_db_file = '/usr/local/share/sqlports' - if not os.path.isfile(sqlports_db_file): - module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file) - - conn = sqlite3.connect(sqlports_db_file) - first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname' - query = first_part_of_query + ' = ?' - module.debug("package_package_source_path(): exact query: %s" % query) - cursor = conn.execute(query, (name,)) - results = cursor.fetchall() - - # next, try for a fuzzier match - if len(results) < 1: - looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%') - query = first_part_of_query + ' LIKE ?' - if pkg_spec[name]['flavor']: - looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor'] - module.debug("package_package_source_path(): fuzzy flavor query: %s" % query) - cursor = conn.execute(query, (looking_for,)) - elif pkg_spec[name]['style'] == 'versionless': - query += ' AND fullpkgname NOT LIKE ?' - module.debug("package_package_source_path(): fuzzy versionless query: %s" % query) - cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,)) - else: - module.debug("package_package_source_path(): fuzzy query: %s" % query) - cursor = conn.execute(query, (looking_for,)) - results = cursor.fetchall() - - # error if we don't find exactly 1 match - conn.close() - if len(results) < 1: - module.fail_json(msg="could not find a port by the name '%s'" % name) - if len(results) > 1: - matches = map(lambda x: x[1], results) - module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches)) - - # there's exactly 1 match, so figure out the subpackage, if any, then return - fullpkgpath = results[0][0] - parts = fullpkgpath.split(',') - if len(parts) > 1 and parts[1][0] == '-': - pkg_spec[name]['subpackage'] = parts[1] - return parts[0] - - -# Function used for upgrading all installed packages. -def upgrade_packages(pkg_spec, module): - if module.check_mode: - upgrade_cmd = 'pkg_add -Imnu' - else: - upgrade_cmd = 'pkg_add -Imu' - - if module.params['snapshot']: - upgrade_cmd += ' -Dsnap' - - # Create a minimal pkg_spec entry for '*' to store return values. - pkg_spec['*'] = {} - - # Attempt to upgrade all packages. - pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command("%s" % upgrade_cmd, module) - - # Try to find any occurrence of a package changing version like: - # "bzip2-1.0.6->1.0.6p0: ok". - match = re.search(r"\W\w.+->.+: ok\W", pkg_spec['*']['stdout']) - if match: - pkg_spec['*']['changed'] = True - - else: - pkg_spec['*']['changed'] = False - - # It seems we can not trust the return value, so depend on the presence of - # stderr to know if something failed. - if pkg_spec['*']['stderr']: - pkg_spec['*']['rc'] = 1 - else: - pkg_spec['*']['rc'] = 0 - - -# =========================================== -# Main control flow. -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='list', elements='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), - build=dict(type='bool', default=False), - snapshot=dict(type='bool', default=False), - ports_dir=dict(type='path', default='/usr/ports'), - quick=dict(type='bool', default=False), - clean=dict(type='bool', default=False), - ), - mutually_exclusive=[['snapshot', 'build']], - supports_check_mode=True - ) - - name = module.params['name'] - state = module.params['state'] - build = module.params['build'] - ports_dir = module.params['ports_dir'] - - rc = 0 - stdout = '' - stderr = '' - result = {} - result['name'] = name - result['state'] = state - result['build'] = build - - # The data structure used to keep track of package information. - pkg_spec = {} - - if build is True: - if not os.path.isdir(ports_dir): - module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir)) - - # build sqlports if its not installed yet - parse_package_name(['sqlports'], pkg_spec, module) - get_package_state(['sqlports'], pkg_spec, module) - if not pkg_spec['sqlports']['installed_state']: - module.debug("main(): installing 'sqlports' because build=%s" % module.params['build']) - package_present(['sqlports'], pkg_spec, module) - - asterisk_name = False - for n in name: - if n == '*': - if len(name) != 1: - module.fail_json(msg="the package name '*' can not be mixed with other names") - - asterisk_name = True - - if asterisk_name: - if state != 'latest': - module.fail_json(msg="the package name '*' is only valid when using state=latest") - else: - # Perform an upgrade of all installed packages. - upgrade_packages(pkg_spec, module) - else: - # Parse package names and put results in the pkg_spec dictionary. - parse_package_name(name, pkg_spec, module) - - # Not sure how the branch syntax is supposed to play together - # with build mode. Disable it for now. - for n in name: - if pkg_spec[n]['branch'] and module.params['build'] is True: - module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n)) - - # Get state for all package names. - get_package_state(name, pkg_spec, module) - - # Perform requested action. - if state in ['installed', 'present']: - package_present(name, pkg_spec, module) - elif state in ['absent', 'removed']: - package_absent(name, pkg_spec, module) - elif state == 'latest': - package_latest(name, pkg_spec, module) - - # The combined changed status for all requested packages. If anything - # is changed this is set to True. - combined_changed = False - - # The combined failed status for all requested packages. If anything - # failed this is set to True. - combined_failed = False - - # We combine all error messages in this comma separated string, for example: - # "msg": "Can't find nmapp\n, Can't find nmappp\n" - combined_error_message = '' - - # Loop over all requested package names and check if anything failed or - # changed. - for n in name: - if pkg_spec[n]['rc'] != 0: - combined_failed = True - if pkg_spec[n]['stderr']: - if combined_error_message: - combined_error_message += ", %s" % pkg_spec[n]['stderr'] - else: - combined_error_message = pkg_spec[n]['stderr'] - else: - if combined_error_message: - combined_error_message += ", %s" % pkg_spec[n]['stdout'] - else: - combined_error_message = pkg_spec[n]['stdout'] - - if pkg_spec[n]['changed'] is True: - combined_changed = True - - # If combined_error_message contains anything at least some part of the - # list of requested package names failed. - if combined_failed: - module.fail_json(msg=combined_error_message, **result) - - result['changed'] = combined_changed - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/opkg.py b/plugins/modules/packaging/os/opkg.py deleted file mode 100644 index bede73fb88..0000000000 --- a/plugins/modules/packaging/os/opkg.py +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Patrick Pelletier -# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: opkg -author: "Patrick Pelletier (@skinp)" -short_description: Package manager for OpenWrt -description: - - Manages OpenWrt packages -options: - name: - description: - - Name of package(s) to install/remove. - aliases: [pkg] - required: true - type: list - elements: str - state: - description: - - State of the package. - choices: [ 'present', 'absent', 'installed', 'removed' ] - default: present - type: str - force: - description: - - The C(opkg --force) parameter used. - choices: - - "" - - "depends" - - "maintainer" - - "reinstall" - - "overwrite" - - "downgrade" - - "space" - - "postinstall" - - "remove" - - "checksum" - - "removal-of-dependent-packages" - type: str - update_cache: - description: - - Update the package DB first. - - Alias C(update-cache) has been deprecated and will be removed in community.general 5.0.0. - aliases: ['update-cache'] - default: false - type: bool -requirements: - - opkg - - python -''' -EXAMPLES = ''' -- name: Install foo - community.general.opkg: - name: foo - state: present - -- name: Update cache and install foo - community.general.opkg: - name: foo - state: present - update_cache: yes - -- name: Remove foo - community.general.opkg: - name: foo - state: absent - -- name: Remove foo and bar - community.general.opkg: - name: - - foo - - bar - state: absent - -- name: Install foo using overwrite option forcibly - community.general.opkg: - name: foo - state: present - force: overwrite -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote - - -def update_package_db(module, opkg_path): - """ Updates packages list. """ - - rc, out, err = module.run_command("%s update" % opkg_path) - - if rc != 0: - module.fail_json(msg="could not update package db") - - -def query_package(module, opkg_path, name, state="present"): - """ Returns whether a package is installed or not. """ - - if state == "present": - - rc, out, err = module.run_command("%s list-installed | grep -q \"^%s \"" % (shlex_quote(opkg_path), shlex_quote(name)), use_unsafe_shell=True) - if rc == 0: - return True - - return False - - -def remove_packages(module, opkg_path, packages): - """ Uninstalls one or more packages if installed. """ - - p = module.params - force = p["force"] - if force: - force = "--force-%s" % force - - remove_c = 0 - # Using a for loop in case of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, opkg_path, package): - continue - - rc, out, err = module.run_command("%s remove %s %s" % (opkg_path, force, package)) - - if query_package(module, opkg_path, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, opkg_path, packages): - """ Installs one or more packages if not already installed. """ - - p = module.params - force = p["force"] - if force: - force = "--force-%s" % force - - install_c = 0 - - for package in packages: - if query_package(module, opkg_path, package): - continue - - rc, out, err = module.run_command("%s install %s %s" % (opkg_path, force, package)) - - if not query_package(module, opkg_path, package): - module.fail_json(msg="failed to install %s: %s" % (package, out)) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) - - module.exit_json(changed=False, msg="package(s) already present") - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(aliases=["pkg"], required=True, type="list", elements="str"), - state=dict(default="present", choices=["present", "installed", "absent", "removed"]), - force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", - "checksum", "removal-of-dependent-packages"]), - update_cache=dict( - default="no", aliases=["update-cache"], type='bool', - deprecated_aliases=[dict(name='update-cache', version='5.0.0', collection_name='community.general')]), - ) - ) - - opkg_path = module.get_bin_path('opkg', True, ['/bin']) - - p = module.params - - if p["update_cache"]: - update_package_db(module, opkg_path) - - pkgs = p["name"] - - if p["state"] in ["present", "installed"]: - install_packages(module, opkg_path, pkgs) - - elif p["state"] in ["absent", "removed"]: - remove_packages(module, opkg_path, pkgs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/pacman.py b/plugins/modules/packaging/os/pacman.py deleted file mode 100644 index c85faf208c..0000000000 --- a/plugins/modules/packaging/os/pacman.py +++ /dev/null @@ -1,519 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2012, Afterburn -# Copyright: (c) 2013, Aaron Bull Schaefer -# Copyright: (c) 2015, Indrajit Raychaudhuri -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: pacman -short_description: Manage packages with I(pacman) -description: - - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants. -author: - - Indrajit Raychaudhuri (@indrajitr) - - Aaron Bull Schaefer (@elasticdog) - - Maxime de Roucy (@tchernomax) -options: - name: - description: - - Name or list of names of the package(s) or file(s) to install, upgrade, or remove. - Can't be used in combination with C(upgrade). - aliases: [ package, pkg ] - type: list - elements: str - - state: - description: - - Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package. - - C(present) and C(installed) will simply ensure that a desired package is installed. - - C(latest) will update the specified package if it is not of the latest available version. - - C(absent) and C(removed) will remove the specified package. - default: present - choices: [ absent, installed, latest, present, removed ] - type: str - - force: - description: - - When removing package, force remove package, without any checks. - Same as `extra_args="--nodeps --nodeps"`. - When update_cache, force redownload repo databases. - Same as `update_cache_extra_args="--refresh --refresh"`. - default: no - type: bool - - executable: - description: - - Name of binary to use. This can either be C(pacman) or a pacman compatible AUR helper. - - Beware that AUR helpers might behave unexpectedly and are therefore not recommended. - default: pacman - type: str - version_added: 3.1.0 - - extra_args: - description: - - Additional option to pass to pacman when enforcing C(state). - default: - type: str - - update_cache: - description: - - Whether or not to refresh the master package lists. - - This can be run as part of a package installation or as a separate step. - - Alias C(update-cache) has been deprecated and will be removed in community.general 5.0.0. - default: no - type: bool - aliases: [ update-cache ] - - update_cache_extra_args: - description: - - Additional option to pass to pacman when enforcing C(update_cache). - default: - type: str - - upgrade: - description: - - Whether or not to upgrade the whole system. - Can't be used in combination with C(name). - default: no - type: bool - - upgrade_extra_args: - description: - - Additional option to pass to pacman when enforcing C(upgrade). - default: - type: str - -notes: - - When used with a C(loop:) each package will be processed individually, - it is much more efficient to pass the list directly to the I(name) option. - - To use an AUR helper (I(executable) option), a few extra setup steps might be required beforehand. - For example, a dedicated build user with permissions to install packages could be necessary. -''' - -RETURN = ''' -packages: - description: a list of packages that have been changed - returned: when upgrade is set to yes - type: list - sample: [ package, other-package ] -''' - -EXAMPLES = ''' -- name: Install package foo from repo - community.general.pacman: - name: foo - state: present - -- name: Install package bar from file - community.general.pacman: - name: ~/bar-1.0-1-any.pkg.tar.xz - state: present - -- name: Install package foo from repo and bar from file - community.general.pacman: - name: - - foo - - ~/bar-1.0-1-any.pkg.tar.xz - state: present - -- name: Install package from AUR using a Pacman compatible AUR helper - community.general.pacman: - name: foo - state: present - executable: yay - extra_args: --builddir /var/cache/yay - -- name: Upgrade package foo - community.general.pacman: - name: foo - state: latest - update_cache: yes - -- name: Remove packages foo and bar - community.general.pacman: - name: - - foo - - bar - state: absent - -- name: Recursively remove package baz - community.general.pacman: - name: baz - state: absent - extra_args: --recursive - -- name: Run the equivalent of "pacman -Sy" as a separate step - community.general.pacman: - update_cache: yes - -- name: Run the equivalent of "pacman -Su" as a separate step - community.general.pacman: - upgrade: yes - -- name: Run the equivalent of "pacman -Syu" as a separate step - community.general.pacman: - update_cache: yes - upgrade: yes - -- name: Run the equivalent of "pacman -Rdd", force remove package baz - community.general.pacman: - name: baz - state: absent - force: yes -''' - -import re - -from ansible.module_utils.basic import AnsibleModule - - -def get_version(pacman_output): - """Take pacman -Q or pacman -S output and get the Version""" - fields = pacman_output.split() - if len(fields) == 2: - return fields[1] - return None - - -def get_name(module, pacman_output): - """Take pacman -Q or pacman -S output and get the package name""" - fields = pacman_output.split() - if len(fields) == 2: - return fields[0] - module.fail_json(msg="get_name: fail to retrieve package name from pacman output") - - -def query_package(module, pacman_path, name, state): - """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second - boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available - """ - - lcmd = "%s --query %s" % (pacman_path, name) - lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) - if lrc != 0: - # package is not installed locally - return False, False, False - else: - # a non-zero exit code doesn't always mean the package is installed - # for example, if the package name queried is "provided" by another package - installed_name = get_name(module, lstdout) - if installed_name != name: - return False, False, False - - # no need to check the repository if state is present or absent - # return False for package version check, because we didn't check it - if state == 'present' or state == 'absent': - return True, False, False - - # get the version installed locally (if any) - lversion = get_version(lstdout) - - rcmd = "%s --sync --print-format \"%%n %%v\" %s" % (pacman_path, name) - rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) - # get the version in the repository - rversion = get_version(rstdout) - - if rrc == 0: - # Return True to indicate that the package is installed locally, and the result of the version number comparison - # to determine if the package is up-to-date. - return True, (lversion == rversion), False - - # package is installed but cannot fetch remote Version. Last True stands for the error - return True, True, True - - -def update_package_db(module, pacman_path): - if module.params['force']: - module.params["update_cache_extra_args"] += " --refresh --refresh" - - cmd = "%s --sync --refresh %s" % (pacman_path, module.params["update_cache_extra_args"]) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc == 0: - return True - else: - module.fail_json(msg="could not update package db") - - -def upgrade(module, pacman_path): - cmdupgrade = "%s --sync --sysupgrade --quiet --noconfirm %s" % (pacman_path, module.params["upgrade_extra_args"]) - cmdneedrefresh = "%s --query --upgrades" % (pacman_path) - rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False) - data = stdout.split('\n') - data.remove('') - packages = [] - diff = { - 'before': '', - 'after': '', - } - - if rc == 0: - # Match lines of `pacman -Qu` output of the form: - # (package name) (before version-release) -> (after version-release) - # e.g., "ansible 2.7.1-1 -> 2.7.2-1" - regex = re.compile(r'([\w+\-.@]+) (\S+-\S+) -> (\S+-\S+)') - for p in data: - if '[ignored]' not in p: - m = regex.search(p) - packages.append(m.group(1)) - if module._diff: - diff['before'] += "%s-%s\n" % (m.group(1), m.group(2)) - diff['after'] += "%s-%s\n" % (m.group(1), m.group(3)) - if module.check_mode: - if packages: - module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff) - else: - module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages) - rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False) - if rc == 0: - if packages: - module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff) - else: - module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages) - else: - module.fail_json(msg="Could not upgrade") - else: - module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages) - - -def remove_packages(module, pacman_path, packages): - data = [] - diff = { - 'before': '', - 'after': '', - } - - if module.params["force"]: - module.params["extra_args"] += " --nodeps --nodeps" - - remove_c = 0 - # Using a for loop in case of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - installed, updated, unknown = query_package(module, pacman_path, package, 'absent') - if not installed: - continue - - cmd = "%s --remove --noconfirm --noprogressbar %s %s" % (pacman_path, module.params["extra_args"], package) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc != 0: - module.fail_json(msg="failed to remove %s" % (package)) - - if module._diff: - d = stdout.split('\n')[2].split(' ')[2:] - for i, pkg in enumerate(d): - d[i] = re.sub('-[0-9].*$', '', d[i].split('/')[-1]) - diff['before'] += "%s\n" % pkg - data.append('\n'.join(d)) - - remove_c += 1 - - if remove_c > 0: - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c, diff=diff) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, pacman_path, state, packages, package_files): - install_c = 0 - package_err = [] - message = "" - data = [] - diff = { - 'before': '', - 'after': '', - } - - to_install_repos = [] - to_install_files = [] - for i, package in enumerate(packages): - # if the package is installed and state == present or state == latest and is up-to-date then skip - installed, updated, latestError = query_package(module, pacman_path, package, state) - if latestError and state == 'latest': - package_err.append(package) - - if installed and (state == 'present' or (state == 'latest' and updated)): - continue - - if package_files[i]: - to_install_files.append(package_files[i]) - else: - to_install_repos.append(package) - - if to_install_repos: - cmd = "%s --sync --noconfirm --noprogressbar --needed %s %s" % (pacman_path, module.params["extra_args"], " ".join(to_install_repos)) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc != 0: - module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_repos), stderr)) - - # As we pass `--needed` to pacman returns a single line of ` there is nothing to do` if no change is performed. - # The check for > 3 is here because we pick the 4th line in normal operation. - if len(stdout.split('\n')) > 3: - data = stdout.split('\n')[3].split(' ')[2:] - data = [i for i in data if i != ''] - for i, pkg in enumerate(data): - data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1]) - if module._diff: - diff['after'] += "%s\n" % pkg - - install_c += len(to_install_repos) - - if to_install_files: - cmd = "%s --upgrade --noconfirm --noprogressbar --needed %s %s" % (pacman_path, module.params["extra_args"], " ".join(to_install_files)) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc != 0: - module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_files), stderr)) - - # As we pass `--needed` to pacman returns a single line of ` there is nothing to do` if no change is performed. - # The check for > 3 is here because we pick the 4th line in normal operation. - if len(stdout.split('\n')) > 3: - data = stdout.split('\n')[3].split(' ')[2:] - data = [i for i in data if i != ''] - for i, pkg in enumerate(data): - data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1]) - if module._diff: - diff['after'] += "%s\n" % pkg - - install_c += len(to_install_files) - - if state == 'latest' and len(package_err) > 0: - message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err) - - if install_c > 0: - module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message), diff=diff) - - module.exit_json(changed=False, msg="package(s) already installed. %s" % (message), diff=diff) - - -def check_packages(module, pacman_path, packages, state): - would_be_changed = [] - diff = { - 'before': '', - 'after': '', - 'before_header': '', - 'after_header': '' - } - - for package in packages: - installed, updated, unknown = query_package(module, pacman_path, package, state) - if ((state in ["present", "latest"] and not installed) or - (state == "absent" and installed) or - (state == "latest" and not updated)): - would_be_changed.append(package) - if would_be_changed: - if state == "absent": - state = "removed" - - if module._diff and (state == 'removed'): - diff['before_header'] = 'removed' - diff['before'] = '\n'.join(would_be_changed) + '\n' - elif module._diff and ((state == 'present') or (state == 'latest')): - diff['after_header'] = 'installed' - diff['after'] = '\n'.join(would_be_changed) + '\n' - - module.exit_json(changed=True, msg="%s package(s) would be %s" % ( - len(would_be_changed), state), diff=diff) - else: - module.exit_json(changed=False, msg="package(s) already %s" % state, diff=diff) - - -def expand_package_groups(module, pacman_path, pkgs): - expanded = [] - - __, stdout, __ = module.run_command([pacman_path, "--sync", "--groups", "--quiet"], check_rc=True) - available_groups = stdout.splitlines() - - for pkg in pkgs: - if pkg: # avoid empty strings - if pkg in available_groups: - # A group was found matching the package name: expand it - cmd = [pacman_path, "--sync", "--groups", "--quiet", pkg] - rc, stdout, stderr = module.run_command(cmd, check_rc=True) - expanded.extend([name.strip() for name in stdout.splitlines()]) - else: - expanded.append(pkg) - - return expanded - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='list', elements='str', aliases=['pkg', 'package']), - state=dict(type='str', default='present', choices=['present', 'installed', 'latest', 'absent', 'removed']), - force=dict(type='bool', default=False), - executable=dict(type='str', default='pacman'), - extra_args=dict(type='str', default=''), - upgrade=dict(type='bool', default=False), - upgrade_extra_args=dict(type='str', default=''), - update_cache=dict( - type='bool', default=False, aliases=['update-cache'], - deprecated_aliases=[dict(name='update-cache', version='5.0.0', collection_name='community.general')]), - update_cache_extra_args=dict(type='str', default=''), - ), - required_one_of=[['name', 'update_cache', 'upgrade']], - mutually_exclusive=[['name', 'upgrade']], - supports_check_mode=True, - ) - - module.run_command_environ_update = dict(LC_ALL='C') - - p = module.params - - # find pacman binary - pacman_path = module.get_bin_path(p['executable'], True) - - # normalize the state parameter - if p['state'] in ['present', 'installed']: - p['state'] = 'present' - elif p['state'] in ['absent', 'removed']: - p['state'] = 'absent' - - if p["update_cache"] and not module.check_mode: - update_package_db(module, pacman_path) - if not (p['name'] or p['upgrade']): - module.exit_json(changed=True, msg='Updated the package master lists') - - if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']): - module.exit_json(changed=True, msg='Would have updated the package cache') - - if p['upgrade']: - upgrade(module, pacman_path) - - if p['name']: - pkgs = expand_package_groups(module, pacman_path, p['name']) - - pkg_files = [] - for i, pkg in enumerate(pkgs): - if not pkg: # avoid empty strings - continue - elif re.match(r".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z|zst))?$", pkg): - # The package given is a filename, extract the raw pkg name from - # it and store the filename - pkg_files.append(pkg) - pkgs[i] = re.sub(r'-[0-9].*$', '', pkgs[i].split('/')[-1]) - else: - pkg_files.append(None) - - if module.check_mode: - check_packages(module, pacman_path, pkgs, p['state']) - - if p['state'] in ['present', 'latest']: - install_packages(module, pacman_path, p['state'], pkgs, pkg_files) - elif p['state'] == 'absent': - remove_packages(module, pacman_path, pkgs) - else: - module.exit_json(changed=False, msg="No package specified to work on.") - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/packaging/os/pacman_key.py b/plugins/modules/packaging/os/pacman_key.py deleted file mode 100644 index a40575b697..0000000000 --- a/plugins/modules/packaging/os/pacman_key.py +++ /dev/null @@ -1,314 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, George Rawlinson -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: pacman_key -author: -- George Rawlinson (@grawlinson) -version_added: "3.2.0" -short_description: Manage pacman's list of trusted keys -description: -- Add or remove gpg keys from the pacman keyring. -notes: -- Use full-length key ID (40 characters). -- Keys will be verified when using I(data), I(file), or I(url) unless I(verify) is overridden. -- Keys will be locally signed after being imported into the keyring. -- If the key ID exists in the keyring, the key will not be added unless I(force_update) is specified. -- I(data), I(file), I(url), and I(keyserver) are mutually exclusive. -- Supports C(check_mode). -requirements: -- gpg -- pacman-key -options: - id: - description: - - The 40 character identifier of the key. - - Including this allows check mode to correctly report the changed state. - - Do not specify a subkey ID, instead specify the primary key ID. - required: true - type: str - data: - description: - - The keyfile contents to add to the keyring. - - Must be of C(PGP PUBLIC KEY BLOCK) type. - type: str - file: - description: - - The path to a keyfile on the remote server to add to the keyring. - - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. - type: path - url: - description: - - The URL to retrieve keyfile from. - - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. - type: str - keyserver: - description: - - The keyserver used to retrieve key from. - type: str - verify: - description: - - Whether or not to verify the keyfile's key ID against specified key ID. - type: bool - default: true - force_update: - description: - - This forces the key to be updated if it already exists in the keyring. - type: bool - default: false - keyring: - description: - - The full path to the keyring folder on the remote server. - - If not specified, module will use pacman's default (C(/etc/pacman.d/gnupg)). - - Useful if the remote system requires an alternative gnupg directory. - type: path - default: /etc/pacman.d/gnupg - state: - description: - - Ensures that the key is present (added) or absent (revoked). - default: present - choices: [ absent, present ] - type: str -''' - -EXAMPLES = ''' -- name: Import a key via local file - community.general.pacman_key: - data: "{{ lookup('file', 'keyfile.asc') }}" - state: present - -- name: Import a key via remote file - community.general.pacman_key: - file: /tmp/keyfile.asc - state: present - -- name: Import a key via url - community.general.pacman_key: - id: 01234567890ABCDE01234567890ABCDE12345678 - url: https://domain.tld/keys/keyfile.asc - state: present - -- name: Import a key via keyserver - community.general.pacman_key: - id: 01234567890ABCDE01234567890ABCDE12345678 - keyserver: keyserver.domain.tld - -- name: Import a key into an alternative keyring - community.general.pacman_key: - id: 01234567890ABCDE01234567890ABCDE12345678 - file: /tmp/keyfile.asc - keyring: /etc/pacman.d/gnupg-alternative - -- name: Remove a key from the keyring - community.general.pacman_key: - id: 01234567890ABCDE01234567890ABCDE12345678 - state: absent -''' - -RETURN = r''' # ''' - -import os.path -import tempfile -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.common.text.converters import to_native - - -class PacmanKey(object): - def __init__(self, module): - self.module = module - # obtain binary paths for gpg & pacman-key - self.gpg = module.get_bin_path('gpg', required=True) - self.pacman_key = module.get_bin_path('pacman-key', required=True) - - # obtain module parameters - keyid = module.params['id'] - url = module.params['url'] - data = module.params['data'] - file = module.params['file'] - keyserver = module.params['keyserver'] - verify = module.params['verify'] - force_update = module.params['force_update'] - keyring = module.params['keyring'] - state = module.params['state'] - self.keylength = 40 - - # sanitise key ID & check if key exists in the keyring - keyid = self.sanitise_keyid(keyid) - key_present = self.key_in_keyring(keyring, keyid) - - # check mode - if module.check_mode: - if state == "present": - changed = (key_present and force_update) or not key_present - module.exit_json(changed=changed) - elif state == "absent": - if key_present: - module.exit_json(changed=True) - module.exit_json(changed=False) - - if state == "present": - if key_present and not force_update: - module.exit_json(changed=False) - - if data: - file = self.save_key(data) - self.add_key(keyring, file, keyid, verify) - module.exit_json(changed=True) - elif file: - self.add_key(keyring, file, keyid, verify) - module.exit_json(changed=True) - elif url: - data = self.fetch_key(url) - file = self.save_key(data) - self.add_key(keyring, file, keyid, verify) - module.exit_json(changed=True) - elif keyserver: - self.recv_key(keyring, keyid, keyserver) - module.exit_json(changed=True) - elif state == "absent": - if key_present: - self.remove_key(keyring, keyid) - module.exit_json(changed=True) - module.exit_json(changed=False) - - def is_hexadecimal(self, string): - """Check if a given string is valid hexadecimal""" - try: - int(string, 16) - except ValueError: - return False - return True - - def sanitise_keyid(self, keyid): - """Sanitise given key ID. - - Strips whitespace, uppercases all characters, and strips leading `0X`. - """ - sanitised_keyid = keyid.strip().upper().replace(' ', '').replace('0X', '') - if len(sanitised_keyid) != self.keylength: - self.module.fail_json(msg="key ID is not full-length: %s" % sanitised_keyid) - if not self.is_hexadecimal(sanitised_keyid): - self.module.fail_json(msg="key ID is not hexadecimal: %s" % sanitised_keyid) - return sanitised_keyid - - def fetch_key(self, url): - """Downloads a key from url""" - response, info = fetch_url(self.module, url) - if info['status'] != 200: - self.module.fail_json(msg="failed to fetch key at %s, error was %s" % (url, info['msg'])) - return to_native(response.read()) - - def recv_key(self, keyring, keyid, keyserver): - """Receives key via keyserver""" - cmd = [self.pacman_key, '--gpgdir', keyring, '--keyserver', keyserver, '--recv-keys', keyid] - self.module.run_command(cmd, check_rc=True) - self.lsign_key(keyring, keyid) - - def lsign_key(self, keyring, keyid): - """Locally sign key""" - cmd = [self.pacman_key, '--gpgdir', keyring] - self.module.run_command(cmd + ['--lsign-key', keyid], check_rc=True) - - def save_key(self, data): - "Saves key data to a temporary file" - tmpfd, tmpname = tempfile.mkstemp() - self.module.add_cleanup_file(tmpname) - tmpfile = os.fdopen(tmpfd, "w") - tmpfile.write(data) - tmpfile.close() - return tmpname - - def add_key(self, keyring, keyfile, keyid, verify): - """Add key to pacman's keyring""" - if verify: - self.verify_keyfile(keyfile, keyid) - cmd = [self.pacman_key, '--gpgdir', keyring, '--add', keyfile] - self.module.run_command(cmd, check_rc=True) - self.lsign_key(keyring, keyid) - - def remove_key(self, keyring, keyid): - """Remove key from pacman's keyring""" - cmd = [self.pacman_key, '--gpgdir', keyring, '--delete', keyid] - self.module.run_command(cmd, check_rc=True) - - def verify_keyfile(self, keyfile, keyid): - """Verify that keyfile matches the specified key ID""" - if keyfile is None: - self.module.fail_json(msg="expected a key, got none") - elif keyid is None: - self.module.fail_json(msg="expected a key ID, got none") - - rc, stdout, stderr = self.module.run_command( - [ - self.gpg, - '--with-colons', - '--with-fingerprint', - '--batch', - '--no-tty', - '--show-keys', - keyfile - ], - check_rc=True, - ) - - extracted_keyid = None - for line in stdout.splitlines(): - if line.startswith('fpr:'): - extracted_keyid = line.split(':')[9] - break - - if extracted_keyid != keyid: - self.module.fail_json(msg="key ID does not match. expected %s, got %s" % (keyid, extracted_keyid)) - - def key_in_keyring(self, keyring, keyid): - "Check if the key ID is in pacman's keyring" - rc, stdout, stderr = self.module.run_command( - [ - self.gpg, - '--with-colons', - '--batch', - '--no-tty', - '--no-default-keyring', - '--keyring=%s/pubring.gpg' % keyring, - '--list-keys', keyid - ], - check_rc=False, - ) - if rc != 0: - if stderr.find("No public key") >= 0: - return False - else: - self.module.fail_json(msg="gpg returned an error: %s" % stderr) - return True - - -def main(): - module = AnsibleModule( - argument_spec=dict( - id=dict(type='str', required=True), - data=dict(type='str'), - file=dict(type='path'), - url=dict(type='str'), - keyserver=dict(type='str'), - verify=dict(type='bool', default=True), - force_update=dict(type='bool', default=False), - keyring=dict(type='path', default='/etc/pacman.d/gnupg'), - state=dict(type='str', default='present', choices=['absent', 'present']), - ), - supports_check_mode=True, - mutually_exclusive=(('data', 'file', 'url', 'keyserver'),), - required_if=[('state', 'present', ('data', 'file', 'url', 'keyserver'), True)], - ) - PacmanKey(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/pkg5.py b/plugins/modules/packaging/os/pkg5.py deleted file mode 100644 index 266c073f37..0000000000 --- a/plugins/modules/packaging/os/pkg5.py +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Peter Oliver -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: pkg5 -author: -- Peter Oliver (@mavit) -short_description: Manages packages with the Solaris 11 Image Packaging System -description: - - IPS packages are the native packages in Solaris 11 and higher. -notes: - - The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html). -options: - name: - description: - - An FRMI of the package(s) to be installed/removed/updated. - - Multiple packages may be specified, separated by C(,). - required: true - type: list - elements: str - state: - description: - - Whether to install (I(present), I(latest)), or remove (I(absent)) a package. - choices: [ absent, latest, present, installed, removed, uninstalled ] - default: present - type: str - accept_licenses: - description: - - Accept any licences. - type: bool - default: no - aliases: [ accept, accept_licences ] - be_name: - description: - - Creates a new boot environment with the given name. - type: str - refresh: - description: - - Refresh publishers before execution. - type: bool - default: yes -''' -EXAMPLES = ''' -- name: Install Vim - community.general.pkg5: - name: editor/vim - -- name: Install Vim without refreshing publishers - community.general.pkg5: - name: editor/vim - refresh: no - -- name: Remove finger daemon - community.general.pkg5: - name: service/network/finger - state: absent - -- name: Install several packages at once - community.general.pkg5: - name: - - /file/gnu-findutils - - /text/gnu-grep -''' - -import re - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='list', elements='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'uninstalled']), - accept_licenses=dict(type='bool', default=False, aliases=['accept', 'accept_licences']), - be_name=dict(type='str'), - refresh=dict(type='bool', default=True), - ), - supports_check_mode=True, - ) - - params = module.params - packages = [] - - # pkg(5) FRMIs include a comma before the release number, but - # AnsibleModule will have split this into multiple items for us. - # Try to spot where this has happened and fix it. - for fragment in params['name']: - if re.search(r'^\d+(?:\.\d+)*', fragment) and packages and re.search(r'@[^,]*$', packages[-1]): - packages[-1] += ',' + fragment - else: - packages.append(fragment) - - if params['state'] in ['present', 'installed']: - ensure(module, 'present', packages, params) - elif params['state'] in ['latest']: - ensure(module, 'latest', packages, params) - elif params['state'] in ['absent', 'uninstalled', 'removed']: - ensure(module, 'absent', packages, params) - - -def ensure(module, state, packages, params): - response = { - 'results': [], - 'msg': '', - } - behaviour = { - 'present': { - 'filter': lambda p: not is_installed(module, p), - 'subcommand': 'install', - }, - 'latest': { - 'filter': lambda p: ( - not is_installed(module, p) or not is_latest(module, p) - ), - 'subcommand': 'install', - }, - 'absent': { - 'filter': lambda p: is_installed(module, p), - 'subcommand': 'uninstall', - }, - } - - if module.check_mode: - dry_run = ['-n'] - else: - dry_run = [] - - if params['accept_licenses']: - accept_licenses = ['--accept'] - else: - accept_licenses = [] - - if params['be_name']: - beadm = ['--be-name=' + module.params['be_name']] - else: - beadm = [] - - if params['refresh']: - no_refresh = [] - else: - no_refresh = ['--no-refresh'] - - to_modify = list(filter(behaviour[state]['filter'], packages)) - if to_modify: - rc, out, err = module.run_command(['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + ['-q', '--'] + to_modify) - response['rc'] = rc - response['results'].append(out) - response['msg'] += err - response['changed'] = True - if rc == 4: - response['changed'] = False - response['failed'] = False - elif rc != 0: - module.fail_json(**response) - - module.exit_json(**response) - - -def is_installed(module, package): - rc, out, err = module.run_command(['pkg', 'list', '--', package]) - return not bool(int(rc)) - - -def is_latest(module, package): - rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package]) - return bool(int(rc)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/pkg5_publisher.py b/plugins/modules/packaging/os/pkg5_publisher.py deleted file mode 100644 index 95d577655f..0000000000 --- a/plugins/modules/packaging/os/pkg5_publisher.py +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2014 Peter Oliver -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: pkg5_publisher -author: "Peter Oliver (@mavit)" -short_description: Manages Solaris 11 Image Packaging System publishers -description: - - IPS packages are the native packages in Solaris 11 and higher. - - This modules will configure which publishers a client will download IPS - packages from. -options: - name: - description: - - The publisher's name. - required: true - aliases: [ publisher ] - type: str - state: - description: - - Whether to ensure that a publisher is present or absent. - default: present - choices: [ present, absent ] - type: str - sticky: - description: - - Packages installed from a sticky repository can only receive updates - from that repository. - type: bool - enabled: - description: - - Is the repository enabled or disabled? - type: bool - origin: - description: - - A path or URL to the repository. - - Multiple values may be provided. - type: list - elements: str - mirror: - description: - - A path or URL to the repository mirror. - - Multiple values may be provided. - type: list - elements: str -''' -EXAMPLES = ''' -- name: Fetch packages for the solaris publisher direct from Oracle - community.general.pkg5_publisher: - name: solaris - sticky: true - origin: https://pkg.oracle.com/solaris/support/ - -- name: Configure a publisher for locally-produced packages - community.general.pkg5_publisher: - name: site - origin: 'https://pkg.example.com/site/' -''' - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True, aliases=['publisher']), - state=dict(default='present', choices=['present', 'absent']), - sticky=dict(type='bool'), - enabled=dict(type='bool'), - # search_after=dict(), - # search_before=dict(), - origin=dict(type='list', elements='str'), - mirror=dict(type='list', elements='str'), - ) - ) - - for option in ['origin', 'mirror']: - if module.params[option] == ['']: - module.params[option] = [] - - if module.params['state'] == 'present': - modify_publisher(module, module.params) - else: - unset_publisher(module, module.params['name']) - - -def modify_publisher(module, params): - name = params['name'] - existing = get_publishers(module) - - if name in existing: - for option in ['origin', 'mirror', 'sticky', 'enabled']: - if params[option] is not None: - if params[option] != existing[name][option]: - return set_publisher(module, params) - else: - return set_publisher(module, params) - - module.exit_json() - - -def set_publisher(module, params): - name = params['name'] - args = [] - - if params['origin'] is not None: - args.append('--remove-origin=*') - args.extend(['--add-origin=' + u for u in params['origin']]) - if params['mirror'] is not None: - args.append('--remove-mirror=*') - args.extend(['--add-mirror=' + u for u in params['mirror']]) - - if params['sticky'] is not None and params['sticky']: - args.append('--sticky') - elif params['sticky'] is not None: - args.append('--non-sticky') - - if params['enabled'] is not None and params['enabled']: - args.append('--enable') - elif params['enabled'] is not None: - args.append('--disable') - - rc, out, err = module.run_command( - ["pkg", "set-publisher"] + args + [name], - check_rc=True - ) - response = { - 'rc': rc, - 'results': [out], - 'msg': err, - 'changed': True, - } - if rc != 0: - module.fail_json(**response) - module.exit_json(**response) - - -def unset_publisher(module, publisher): - if publisher not in get_publishers(module): - module.exit_json() - - rc, out, err = module.run_command( - ["pkg", "unset-publisher", publisher], - check_rc=True - ) - response = { - 'rc': rc, - 'results': [out], - 'msg': err, - 'changed': True, - } - if rc != 0: - module.fail_json(**response) - module.exit_json(**response) - - -def get_publishers(module): - rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True) - - lines = out.splitlines() - keys = lines.pop(0).lower().split("\t") - - publishers = {} - for line in lines: - values = dict(zip(keys, map(unstringify, line.split("\t")))) - name = values['publisher'] - - if name not in publishers: - publishers[name] = dict( - (k, values[k]) for k in ['sticky', 'enabled'] - ) - publishers[name]['origin'] = [] - publishers[name]['mirror'] = [] - - if values['type'] is not None: - publishers[name][values['type']].append(values['uri']) - - return publishers - - -def unstringify(val): - if val == "-" or val == '': - return None - elif val == "true": - return True - elif val == "false": - return False - else: - return val - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/pkgin.py b/plugins/modules/packaging/os/pkgin.py deleted file mode 100644 index dc7204e60d..0000000000 --- a/plugins/modules/packaging/os/pkgin.py +++ /dev/null @@ -1,388 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2013 Shaun Zinck -# Copyright (c) 2015 Lawrence Leonard Gilbert -# Copyright (c) 2016 Jasper Lievisse Adriaanse -# -# Written by Shaun Zinck -# Based on pacman module written by Afterburn -# that was based on apt module written by Matthew Williams -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: pkgin -short_description: Package manager for SmartOS, NetBSD, et al. -description: - - "The standard package manager for SmartOS, but also usable on NetBSD - or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))" -author: - - "Larry Gilbert (@L2G)" - - "Shaun Zinck (@szinck)" - - "Jasper Lievisse Adriaanse (@jasperla)" -notes: - - "Known bug with pkgin < 0.8.0: if a package is removed and another - package depends on it, the other package will be silently removed as - well. New to Ansible 1.9: check-mode support." -options: - name: - description: - - Name of package to install/remove; - - multiple names may be given, separated by commas - aliases: [pkg] - type: list - elements: str - state: - description: - - Intended state of the package - choices: [ 'present', 'absent' ] - default: present - type: str - update_cache: - description: - - Update repository database. Can be run with other steps or on it's own. - type: bool - default: no - upgrade: - description: - - Upgrade main packages to their newer versions - type: bool - default: no - full_upgrade: - description: - - Upgrade all packages to their newer versions - type: bool - default: no - clean: - description: - - Clean packages cache - type: bool - default: no - force: - description: - - Force package reinstall - type: bool - default: no -''' - -EXAMPLES = ''' -- name: Install package foo - community.general.pkgin: - name: foo - state: present - -- name: Install specific version of foo package - community.general.pkgin: - name: foo-2.0.1 - state: present - -- name: Update cache and install foo package - community.general.pkgin: - name: foo - update_cache: yes - -- name: Remove package foo - community.general.pkgin: - name: foo - state: absent - -- name: Remove packages foo and bar - community.general.pkgin: - name: foo,bar - state: absent - -- name: Update repositories as a separate step - community.general.pkgin: - update_cache: yes - -- name: Upgrade main packages (equivalent to pkgin upgrade) - community.general.pkgin: - upgrade: yes - -- name: Upgrade all packages (equivalent to pkgin full-upgrade) - community.general.pkgin: - full_upgrade: yes - -- name: Force-upgrade all packages (equivalent to pkgin -F full-upgrade) - community.general.pkgin: - full_upgrade: yes - force: yes - -- name: Clean packages cache (equivalent to pkgin clean) - community.general.pkgin: - clean: yes -''' - - -import re - -from ansible.module_utils.basic import AnsibleModule - - -class PackageState(object): - PRESENT = 1 - NOT_INSTALLED = 2 - OUTDATED = 4 - NOT_FOUND = 8 - - -def query_package(module, name): - """Search for the package by name and return state of the package. - """ - - # test whether '-p' (parsable) flag is supported. - rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH) - - if rc == 0: - pflag = '-p' - splitchar = ';' - else: - pflag = '' - splitchar = ' ' - - # Use "pkgin search" to find the package. The regular expression will - # only match on the complete name. - rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name)) - - # rc will not be 0 unless the search was a success - if rc == 0: - - # Search results may contain more than one line (e.g., 'emacs'), so iterate - # through each line to see if we have a match. - packages = out.split('\n') - - for package in packages: - - # Break up line at spaces. The first part will be the package with its - # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state - # of the package: - # '' - not installed - # '<' - installed but out of date - # '=' - installed and up to date - # '>' - installed but newer than the repository version - pkgname_with_version, raw_state = package.split(splitchar)[0:2] - - # Search for package, stripping version - # (results in sth like 'gcc47-libs' or 'emacs24-nox11') - pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M) - - # Do not proceed unless we have a match - if not pkg_search_obj: - continue - - # Grab matched string - pkgname_without_version = pkg_search_obj.group(1) - - if name not in (pkgname_with_version, pkgname_without_version): - continue - - # The package was found; now return its state - if raw_state == '<': - return PackageState.OUTDATED - elif raw_state == '=' or raw_state == '>': - return PackageState.PRESENT - else: - # Package found but not installed - return PackageState.NOT_INSTALLED - # no fall-through - - # No packages were matched - return PackageState.NOT_FOUND - - # Search failed - return PackageState.NOT_FOUND - - -def format_action_message(module, action, count): - vars = {"actioned": action, - "count": count} - - if module.check_mode: - message = "would have %(actioned)s %(count)d package" % vars - else: - message = "%(actioned)s %(count)d package" % vars - - if count == 1: - return message - else: - return message + "s" - - -def format_pkgin_command(module, command, package=None): - # Not all commands take a package argument, so cover this up by passing - # an empty string. Some commands (e.g. 'update') will ignore extra - # arguments, however this behaviour cannot be relied on for others. - if package is None: - package = "" - - if module.params["force"]: - force = "-F" - else: - force = "" - - vars = {"pkgin": PKGIN_PATH, - "command": command, - "package": package, - "force": force} - - if module.check_mode: - return "%(pkgin)s -n %(command)s %(package)s" % vars - else: - return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars - - -def remove_packages(module, packages): - - remove_c = 0 - - # Using a for loop in case of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if query_package(module, package) in [PackageState.NOT_INSTALLED, PackageState.NOT_FOUND]: - continue - - rc, out, err = module.run_command( - format_pkgin_command(module, "remove", package)) - - if not module.check_mode and query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]: - module.fail_json(msg="failed to remove %s: %s" % (package, out), stdout=out, stderr=err) - - remove_c += 1 - - if remove_c > 0: - module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c)) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, packages): - - install_c = 0 - - for package in packages: - query_result = query_package(module, package) - if query_result in [PackageState.PRESENT, PackageState.OUTDATED]: - continue - elif query_result is PackageState.NOT_FOUND: - module.fail_json(msg="failed to find package %s for installation" % package) - - rc, out, err = module.run_command( - format_pkgin_command(module, "install", package)) - - if not module.check_mode and not query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]: - module.fail_json(msg="failed to install %s: %s" % (package, out), stdout=out, stderr=err) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c), stdout=out, stderr=err) - - module.exit_json(changed=False, msg="package(s) already present") - - -def update_package_db(module): - rc, out, err = module.run_command( - format_pkgin_command(module, "update")) - - if rc == 0: - if re.search('database for.*is up-to-date\n$', out): - return False, "database is up-to-date" - else: - return True, "updated repository database" - else: - module.fail_json(msg="could not update package db", stdout=out, stderr=err) - - -def do_upgrade_packages(module, full=False): - if full: - cmd = "full-upgrade" - else: - cmd = "upgrade" - - rc, out, err = module.run_command( - format_pkgin_command(module, cmd)) - - if rc == 0: - if re.search('^nothing to do.\n$', out): - module.exit_json(changed=False, msg="nothing left to upgrade") - else: - module.fail_json(msg="could not %s packages" % cmd, stdout=out, stderr=err) - - -def upgrade_packages(module): - do_upgrade_packages(module) - - -def full_upgrade_packages(module): - do_upgrade_packages(module, True) - - -def clean_cache(module): - rc, out, err = module.run_command( - format_pkgin_command(module, "clean")) - - if rc == 0: - # There's no indication if 'clean' actually removed anything, - # so assume it did. - module.exit_json(changed=True, msg="cleaned caches") - else: - module.fail_json(msg="could not clean package cache", stdout=out, stderr=err) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(default="present", choices=["present", "absent"]), - name=dict(aliases=["pkg"], type='list', elements='str'), - update_cache=dict(default=False, type='bool'), - upgrade=dict(default=False, type='bool'), - full_upgrade=dict(default=False, type='bool'), - clean=dict(default=False, type='bool'), - force=dict(default=False, type='bool')), - required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']], - supports_check_mode=True) - - global PKGIN_PATH - PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin']) - - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') - - p = module.params - - if p["update_cache"]: - c, msg = update_package_db(module) - if not (p['name'] or p["upgrade"] or p["full_upgrade"]): - module.exit_json(changed=c, msg=msg) - - if p["upgrade"]: - upgrade_packages(module) - if not p['name']: - module.exit_json(changed=True, msg='upgraded packages') - - if p["full_upgrade"]: - full_upgrade_packages(module) - if not p['name']: - module.exit_json(changed=True, msg='upgraded all packages') - - if p["clean"]: - clean_cache(module) - if not p['name']: - module.exit_json(changed=True, msg='cleaned caches') - - pkgs = p["name"] - - if p["state"] == "present": - install_packages(module, pkgs) - - elif p["state"] == "absent": - remove_packages(module, pkgs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/pkgng.py b/plugins/modules/packaging/os/pkgng.py deleted file mode 100644 index ff7e45fa96..0000000000 --- a/plugins/modules/packaging/os/pkgng.py +++ /dev/null @@ -1,538 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, bleader -# Written by bleader -# Based on pkgin module written by Shaun Zinck -# that was based on pacman module written by Afterburn -# that was based on apt module written by Matthew Williams -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: pkgng -short_description: Package manager for FreeBSD >= 9.0 -description: - - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0. -options: - name: - description: - - Name or list of names of packages to install/remove. - - "With I(name=*), I(state: latest) will operate, but I(state: present) and I(state: absent) will be noops." - - > - Warning: In Ansible 2.9 and earlier this module had a misfeature - where I(name=*) with I(state: latest) or I(state: present) would - install every package from every package repository, filling up - the machines disk. Avoid using them unless you are certain that - your role will only be used with newer versions. - required: true - aliases: [pkg] - type: list - elements: str - state: - description: - - State of the package. - - 'Note: "latest" added in 2.7' - choices: [ 'present', 'latest', 'absent' ] - required: false - default: present - type: str - cached: - description: - - Use local package base instead of fetching an updated one. - type: bool - required: false - default: no - annotation: - description: - - A list of keyvalue-pairs of the form - C(<+/-/:>[=]). A C(+) denotes adding an annotation, a - C(-) denotes removing an annotation, and C(:) denotes modifying an - annotation. - If setting or modifying annotations, a value must be provided. - required: false - type: list - elements: str - pkgsite: - description: - - For pkgng versions before 1.1.4, specify packagesite to use - for downloading packages. If not specified, use settings from - C(/usr/local/etc/pkg.conf). - - For newer pkgng versions, specify a the name of a repository - configured in C(/usr/local/etc/pkg/repos). - required: false - type: str - rootdir: - description: - - For pkgng versions 1.5 and later, pkg will install all packages - within the specified root directory. - - Can not be used together with I(chroot) or I(jail) options. - required: false - type: path - chroot: - description: - - Pkg will chroot in the specified environment. - - Can not be used together with I(rootdir) or I(jail) options. - required: false - type: path - jail: - description: - - Pkg will execute in the given jail name or id. - - Can not be used together with I(chroot) or I(rootdir) options. - type: str - autoremove: - description: - - Remove automatically installed packages which are no longer needed. - required: false - type: bool - default: no - ignore_osver: - description: - - Ignore FreeBSD OS version check, useful on -STABLE and -CURRENT branches. - - Defines the C(IGNORE_OSVERSION) environment variable. - required: false - type: bool - default: no - version_added: 1.3.0 -author: "bleader (@bleader)" -notes: - - When using pkgsite, be careful that already in cache packages won't be downloaded again. - - When used with a `loop:` each package will be processed individually, - it is much more efficient to pass the list directly to the `name` option. -''' - -EXAMPLES = ''' -- name: Install package foo - community.general.pkgng: - name: foo - state: present - -- name: Annotate package foo and bar - community.general.pkgng: - name: - - foo - - bar - annotation: '+test1=baz,-test2,:test3=foobar' - -- name: Remove packages foo and bar - community.general.pkgng: - name: - - foo - - bar - state: absent - -# "latest" support added in 2.7 -- name: Upgrade package baz - community.general.pkgng: - name: baz - state: latest - -- name: Upgrade all installed packages (see warning for the name option first!) - community.general.pkgng: - name: "*" - state: latest -''' - - -from collections import defaultdict -import re -from ansible.module_utils.basic import AnsibleModule - - -def query_package(module, run_pkgng, name): - - rc, out, err = run_pkgng('info', '-g', '-e', name) - - if rc == 0: - return True - - return False - - -def query_update(module, run_pkgng, name): - - # Check to see if a package upgrade is available. - # rc = 0, no updates available or package not installed - # rc = 1, updates available - rc, out, err = run_pkgng('upgrade', '-g', '-n', name) - - if rc == 1: - return True - - return False - - -def pkgng_older_than(module, pkgng_path, compare_version): - - rc, out, err = module.run_command([pkgng_path, '-v']) - version = [int(x) for x in re.split(r'[\._]', out)] - - i = 0 - new_pkgng = True - while compare_version[i] == version[i]: - i += 1 - if i == min(len(compare_version), len(version)): - break - else: - if compare_version[i] > version[i]: - new_pkgng = False - return not new_pkgng - - -def upgrade_packages(module, run_pkgng): - # Run a 'pkg upgrade', updating all packages. - upgraded_c = 0 - - pkgng_args = ['upgrade'] - pkgng_args.append('-n' if module.check_mode else '-y') - rc, out, err = run_pkgng(*pkgng_args) - - matches = re.findall('^Number of packages to be (?:upgraded|reinstalled): ([0-9]+)', out, re.MULTILINE) - for match in matches: - upgraded_c += int(match) - - if upgraded_c > 0: - return (True, "updated %s package(s)" % upgraded_c, out, err) - return (False, "no packages need upgrades", out, err) - - -def remove_packages(module, run_pkgng, packages): - remove_c = 0 - stdout = "" - stderr = "" - # Using a for loop in case of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, run_pkgng, package): - continue - - if not module.check_mode: - rc, out, err = run_pkgng('delete', '-y', package) - stdout += out - stderr += err - - if not module.check_mode and query_package(module, run_pkgng, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out), stdout=stdout, stderr=stderr) - - remove_c += 1 - - if remove_c > 0: - return (True, "removed %s package(s)" % remove_c, stdout, stderr) - - return (False, "package(s) already absent", stdout, stderr) - - -def install_packages(module, run_pkgng, packages, cached, state): - action_queue = defaultdict(list) - action_count = defaultdict(int) - stdout = "" - stderr = "" - - if not module.check_mode and not cached: - rc, out, err = run_pkgng('update') - stdout += out - stderr += err - if rc != 0: - module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err), stdout=stdout, stderr=stderr) - - for package in packages: - already_installed = query_package(module, run_pkgng, package) - if already_installed and state == "present": - continue - - if ( - already_installed and state == "latest" - and not query_update(module, run_pkgng, package) - ): - continue - - if already_installed: - action_queue["upgrade"].append(package) - else: - action_queue["install"].append(package) - - # install/upgrade all named packages with one pkg command - for (action, package_list) in action_queue.items(): - if module.check_mode: - # Do nothing, but count up how many actions - # would be performed so that the changed/msg - # is correct. - action_count[action] += len(package_list) - continue - - pkgng_args = [action, '-g', '-U', '-y'] + package_list - rc, out, err = run_pkgng(*pkgng_args) - stdout += out - stderr += err - - # individually verify packages are in requested state - for package in package_list: - verified = False - if action == 'install': - verified = query_package(module, run_pkgng, package) - elif action == 'upgrade': - verified = not query_update(module, run_pkgng, package) - - if verified: - action_count[action] += 1 - else: - module.fail_json(msg="failed to %s %s" % (action, package), stdout=stdout, stderr=stderr) - - if sum(action_count.values()) > 0: - past_tense = {'install': 'installed', 'upgrade': 'upgraded'} - messages = [] - for (action, count) in action_count.items(): - messages.append("%s %s package%s" % (past_tense.get(action, action), count, "s" if count != 1 else "")) - - return (True, '; '.join(messages), stdout, stderr) - - return (False, "package(s) already %s" % (state), stdout, stderr) - - -def annotation_query(module, run_pkgng, package, tag): - rc, out, err = run_pkgng('info', '-g', '-A', package) - match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE) - if match: - return match.group('value') - return False - - -def annotation_add(module, run_pkgng, package, tag, value): - _value = annotation_query(module, run_pkgng, package, tag) - if not _value: - # Annotation does not exist, add it. - if not module.check_mode: - rc, out, err = run_pkgng('annotate', '-y', '-A', package, tag, data=value, binary_data=True) - if rc != 0: - module.fail_json(msg="could not annotate %s: %s" - % (package, out), stderr=err) - return True - elif _value != value: - # Annotation exists, but value differs - module.fail_json( - msg="failed to annotate %s, because %s is already set to %s, but should be set to %s" - % (package, tag, _value, value)) - return False - else: - # Annotation exists, nothing to do - return False - - -def annotation_delete(module, run_pkgng, package, tag, value): - _value = annotation_query(module, run_pkgng, package, tag) - if _value: - if not module.check_mode: - rc, out, err = run_pkgng('annotate', '-y', '-D', package, tag) - if rc != 0: - module.fail_json(msg="could not delete annotation to %s: %s" - % (package, out), stderr=err) - return True - return False - - -def annotation_modify(module, run_pkgng, package, tag, value): - _value = annotation_query(module, run_pkgng, package, tag) - if not _value: - # No such tag - module.fail_json(msg="could not change annotation to %s: tag %s does not exist" - % (package, tag)) - elif _value == value: - # No change in value - return False - else: - if not module.check_mode: - rc, out, err = run_pkgng('annotate', '-y', '-M', package, tag, data=value, binary_data=True) - - # pkg sometimes exits with rc == 1, even though the modification succeeded - # Check the output for a success message - if ( - rc != 0 - and re.search(r'^%s-[^:]+: Modified annotation tagged: %s' % (package, tag), out, flags=re.MULTILINE) is None - ): - module.fail_json(msg="failed to annotate %s, could not change annotation %s to %s: %s" - % (package, tag, value, out), stderr=err) - return True - - -def annotate_packages(module, run_pkgng, packages, annotations): - annotate_c = 0 - if len(annotations) == 1: - # Split on commas with optional trailing whitespace, - # to support the old style of multiple annotations - # on a single line, rather than YAML list syntax - annotations = re.split(r'\s*,\s*', annotations[0]) - - operation = { - '+': annotation_add, - '-': annotation_delete, - ':': annotation_modify - } - - for package in packages: - for annotation_string in annotations: - # Note to future maintainers: A dash (-) in a regex character class ([-+:] below) - # must appear as the first character in the class, or it will be interpreted - # as a range of characters. - annotation = \ - re.match(r'(?P[-+:])(?P[^=]+)(=(?P.+))?', annotation_string) - - if annotation is None: - module.fail_json( - msg="failed to annotate %s, invalid annotate string: %s" - % (package, annotation_string) - ) - - annotation = annotation.groupdict() - if operation[annotation['operation']](module, run_pkgng, package, annotation['tag'], annotation['value']): - annotate_c += 1 - - if annotate_c > 0: - return (True, "added %s annotations." % annotate_c) - return (False, "changed no annotations") - - -def autoremove_packages(module, run_pkgng): - stdout = "" - stderr = "" - rc, out, err = run_pkgng('autoremove', '-n') - - autoremove_c = 0 - - match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE) - if match: - autoremove_c = int(match.group(1)) - - if autoremove_c == 0: - return (False, "no package(s) to autoremove", stdout, stderr) - - if not module.check_mode: - rc, out, err = run_pkgng('autoremove', '-y') - stdout += out - stderr += err - - return (True, "autoremoved %d package(s)" % (autoremove_c), stdout, stderr) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(default="present", choices=["present", "latest", "absent"], required=False), - name=dict(aliases=["pkg"], required=True, type='list', elements='str'), - cached=dict(default=False, type='bool'), - ignore_osver=dict(default=False, required=False, type='bool'), - annotation=dict(required=False, type='list', elements='str'), - pkgsite=dict(required=False), - rootdir=dict(required=False, type='path'), - chroot=dict(required=False, type='path'), - jail=dict(required=False, type='str'), - autoremove=dict(default=False, type='bool')), - supports_check_mode=True, - mutually_exclusive=[["rootdir", "chroot", "jail"]]) - - pkgng_path = module.get_bin_path('pkg', True) - - p = module.params - - pkgs = p["name"] - - changed = False - msgs = [] - stdout = "" - stderr = "" - dir_arg = None - - if p["rootdir"] is not None: - rootdir_not_supported = pkgng_older_than(module, pkgng_path, [1, 5, 0]) - if rootdir_not_supported: - module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater") - else: - dir_arg = "--rootdir=%s" % (p["rootdir"]) - - if p["ignore_osver"]: - ignore_osver_not_supported = pkgng_older_than(module, pkgng_path, [1, 11, 0]) - if ignore_osver_not_supported: - module.fail_json(msg="To use option 'ignore_osver' pkg version must be 1.11 or greater") - - if p["chroot"] is not None: - dir_arg = '--chroot=%s' % (p["chroot"]) - - if p["jail"] is not None: - dir_arg = '--jail=%s' % (p["jail"]) - - # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions - # in /usr/local/etc/pkg/repos - repo_flag_not_supported = pkgng_older_than(module, pkgng_path, [1, 1, 4]) - - def run_pkgng(action, *args, **kwargs): - cmd = [pkgng_path, dir_arg, action] - - pkgng_env = {'BATCH': 'yes'} - - if p["ignore_osver"]: - pkgng_env['IGNORE_OSVERSION'] = 'yes' - - if p['pkgsite'] is not None and action in ('update', 'install', 'upgrade',): - if repo_flag_not_supported: - pkgng_env['PACKAGESITE'] = p['pkgsite'] - else: - cmd.append('--repository=%s' % (p['pkgsite'],)) - - # If environ_update is specified to be "passed through" - # to module.run_command, then merge its values into pkgng_env - pkgng_env.update(kwargs.pop('environ_update', dict())) - - return module.run_command(cmd + list(args), environ_update=pkgng_env, **kwargs) - - if pkgs == ['*'] and p["state"] == 'latest': - # Operate on all installed packages. Only state: latest makes sense here. - _changed, _msg, _stdout, _stderr = upgrade_packages(module, run_pkgng) - changed = changed or _changed - stdout += _stdout - stderr += _stderr - msgs.append(_msg) - - # Operate on named packages - if len(pkgs) == 1: - # The documentation used to show multiple packages specified in one line - # with comma or space delimiters. That doesn't result in a YAML list, and - # wrong actions (install vs upgrade) can be reported if those - # comma- or space-delimited strings make it to the pkg command line. - pkgs = re.split(r'[,\s]', pkgs[0]) - named_packages = [pkg for pkg in pkgs if pkg != '*'] - if p["state"] in ("present", "latest") and named_packages: - _changed, _msg, _out, _err = install_packages(module, run_pkgng, named_packages, - p["cached"], p["state"]) - stdout += _out - stderr += _err - changed = changed or _changed - msgs.append(_msg) - - elif p["state"] == "absent" and named_packages: - _changed, _msg, _out, _err = remove_packages(module, run_pkgng, named_packages) - stdout += _out - stderr += _err - changed = changed or _changed - msgs.append(_msg) - - if p["autoremove"]: - _changed, _msg, _stdout, _stderr = autoremove_packages(module, run_pkgng) - changed = changed or _changed - stdout += _stdout - stderr += _stderr - msgs.append(_msg) - - if p["annotation"] is not None: - _changed, _msg = annotate_packages(module, run_pkgng, pkgs, p["annotation"]) - changed = changed or _changed - msgs.append(_msg) - - module.exit_json(changed=changed, msg=", ".join(msgs), stdout=stdout, stderr=stderr) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/pkgutil.py b/plugins/modules/packaging/os/pkgutil.py deleted file mode 100644 index 0f1daca4ef..0000000000 --- a/plugins/modules/packaging/os/pkgutil.py +++ /dev/null @@ -1,293 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2013, Alexander Winkler -# based on svr4pkg by -# Boyd Adamson (2012) -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: pkgutil -short_description: OpenCSW package management on Solaris -description: -- This module installs, updates and removes packages from the OpenCSW project for Solaris. -- Unlike the M(community.general.svr4pkg) module, it will resolve and download dependencies. -- See U(https://www.opencsw.org/) for more information about the project. -author: -- Alexander Winkler (@dermute) -- David Ponessa (@scathatheworm) -options: - name: - description: - - The name of the package. - - When using C(state=latest), this can be C('*'), which updates all installed packages managed by pkgutil. - type: list - required: true - elements: str - aliases: [ pkg ] - site: - description: - - The repository path to install the package from. - - Its global definition is in C(/etc/opt/csw/pkgutil.conf). - required: false - type: str - state: - description: - - Whether to install (C(present)/C(installed)), or remove (C(absent)/C(removed)) packages. - - The upgrade (C(latest)) operation will update/install the packages to the latest version available. - type: str - required: true - choices: [ absent, installed, latest, present, removed ] - update_catalog: - description: - - If you always want to refresh your catalog from the mirror, even when it's not stale, set this to C(yes). - type: bool - default: no - force: - description: - - To allow the update process to downgrade packages to match what is present in the repository, set this to C(yes). - - This is useful for rolling back to stable from testing, or similar operations. - type: bool - default: false - version_added: 1.2.0 -notes: -- In order to check the availability of packages, the catalog cache under C(/var/opt/csw/pkgutil) may be refreshed even in check mode. -''' - -EXAMPLES = r''' -- name: Install a package - community.general.pkgutil: - name: CSWcommon - state: present - -- name: Install a package from a specific repository - community.general.pkgutil: - name: CSWnrpe - site: ftp://myinternal.repo/opencsw/kiel - state: latest - -- name: Remove a package - community.general.pkgutil: - name: CSWtop - state: absent - -- name: Install several packages - community.general.pkgutil: - name: - - CSWsudo - - CSWtop - state: present - -- name: Update all packages - community.general.pkgutil: - name: '*' - state: latest - -- name: Update all packages and force versions to match latest in catalog - community.general.pkgutil: - name: '*' - state: latest - force: yes -''' - -RETURN = r''' # ''' - -from ansible.module_utils.basic import AnsibleModule - - -def packages_not_installed(module, names): - ''' Check if each package is installed and return list of the ones absent ''' - pkgs = [] - for pkg in names: - rc, out, err = run_command(module, ['pkginfo', '-q', pkg]) - if rc != 0: - pkgs.append(pkg) - return pkgs - - -def packages_installed(module, names): - ''' Check if each package is installed and return list of the ones present ''' - pkgs = [] - for pkg in names: - if not pkg.startswith('CSW'): - continue - rc, out, err = run_command(module, ['pkginfo', '-q', pkg]) - if rc == 0: - pkgs.append(pkg) - return pkgs - - -def packages_not_latest(module, names, site, update_catalog): - ''' Check status of each package and return list of the ones with an upgrade available ''' - cmd = ['pkgutil'] - if update_catalog: - cmd.append('-U') - cmd.append('-c') - if site is not None: - cmd.extend(['-t', site]) - if names != ['*']: - cmd.extend(names) - rc, out, err = run_command(module, cmd) - - # Find packages in the catalog which are not up to date - packages = [] - for line in out.split('\n')[1:-1]: - if 'catalog' not in line and 'SAME' not in line: - packages.append(line.split(' ')[0]) - - # Remove duplicates - return list(set(packages)) - - -def run_command(module, cmd, **kwargs): - progname = cmd[0] - cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin']) - return module.run_command(cmd, **kwargs) - - -def package_install(module, state, pkgs, site, update_catalog, force): - cmd = ['pkgutil'] - if module.check_mode: - cmd.append('-n') - cmd.append('-iy') - if update_catalog: - cmd.append('-U') - if site is not None: - cmd.extend(['-t', site]) - if force: - cmd.append('-f') - cmd.extend(pkgs) - return run_command(module, cmd) - - -def package_upgrade(module, pkgs, site, update_catalog, force): - cmd = ['pkgutil'] - if module.check_mode: - cmd.append('-n') - cmd.append('-uy') - if update_catalog: - cmd.append('-U') - if site is not None: - cmd.extend(['-t', site]) - if force: - cmd.append('-f') - cmd += pkgs - return run_command(module, cmd) - - -def package_uninstall(module, pkgs): - cmd = ['pkgutil'] - if module.check_mode: - cmd.append('-n') - cmd.append('-ry') - cmd.extend(pkgs) - return run_command(module, cmd) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='list', elements='str', required=True, aliases=['pkg']), - state=dict(type='str', required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']), - site=dict(type='str'), - update_catalog=dict(type='bool', default=False), - force=dict(type='bool', default=False), - ), - supports_check_mode=True, - ) - name = module.params['name'] - state = module.params['state'] - site = module.params['site'] - update_catalog = module.params['update_catalog'] - force = module.params['force'] - - rc = None - out = '' - err = '' - result = dict( - name=name, - state=state, - ) - - if state in ['installed', 'present']: - # Fail with an explicit error when trying to "install" '*' - if name == ['*']: - module.fail_json(msg="Can not use 'state: present' with name: '*'") - - # Build list of packages that are actually not installed from the ones requested - pkgs = packages_not_installed(module, name) - - # If the package list is empty then all packages are already present - if pkgs == []: - module.exit_json(changed=False) - - (rc, out, err) = package_install(module, state, pkgs, site, update_catalog, force) - if rc != 0: - module.fail_json(msg=(err or out)) - - elif state in ['latest']: - # When using latest for * - if name == ['*']: - # Check for packages that are actually outdated - pkgs = packages_not_latest(module, name, site, update_catalog) - - # If the package list comes up empty, everything is already up to date - if pkgs == []: - module.exit_json(changed=False) - - # If there are packages to update, just empty the list and run the command without it - # pkgutil logic is to update all when run without packages names - pkgs = [] - (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force) - if rc != 0: - module.fail_json(msg=(err or out)) - else: - # Build list of packages that are either outdated or not installed - pkgs = packages_not_installed(module, name) - pkgs += packages_not_latest(module, name, site, update_catalog) - - # If the package list is empty that means all packages are installed and up to date - if pkgs == []: - module.exit_json(changed=False) - - (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force) - if rc != 0: - module.fail_json(msg=(err or out)) - - elif state in ['absent', 'removed']: - # Build list of packages requested for removal that are actually present - pkgs = packages_installed(module, name) - - # If the list is empty, no packages need to be removed - if pkgs == []: - module.exit_json(changed=False) - - (rc, out, err) = package_uninstall(module, pkgs) - if rc != 0: - module.fail_json(msg=(err or out)) - - if rc is None: - # pkgutil was not executed because the package was already present/absent/up to date - result['changed'] = False - elif rc == 0: - result['changed'] = True - else: - result['changed'] = False - result['failed'] = True - - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/portage.py b/plugins/modules/packaging/os/portage.py deleted file mode 100644 index 2a8679dbbd..0000000000 --- a/plugins/modules/packaging/os/portage.py +++ /dev/null @@ -1,539 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, William L Thomson Jr -# (c) 2013, Yap Sok Ann -# Written by Yap Sok Ann -# Modified by William L. Thomson Jr. -# Based on apt module written by Matthew Williams -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: portage -short_description: Package manager for Gentoo -description: - - Manages Gentoo packages - -options: - package: - description: - - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world) - aliases: [name] - type: list - elements: str - - state: - description: - - State of the package atom - default: "present" - choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged", "latest" ] - type: str - - update: - description: - - Update packages to the best version available (--update) - type: bool - default: no - - deep: - description: - - Consider the entire dependency tree of packages (--deep) - type: bool - default: no - - newuse: - description: - - Include installed packages where USE flags have changed (--newuse) - type: bool - default: no - - changed_use: - description: - - Include installed packages where USE flags have changed, except when - - flags that the user has not enabled are added or removed - - (--changed-use) - type: bool - default: no - - oneshot: - description: - - Do not add the packages to the world file (--oneshot) - type: bool - default: no - - noreplace: - description: - - Do not re-emerge installed packages (--noreplace) - type: bool - default: yes - - nodeps: - description: - - Only merge packages but not their dependencies (--nodeps) - type: bool - default: no - - onlydeps: - description: - - Only merge packages' dependencies but not the packages (--onlydeps) - type: bool - default: no - - depclean: - description: - - Remove packages not needed by explicitly merged packages (--depclean) - - If no package is specified, clean up the world's dependencies - - Otherwise, --depclean serves as a dependency aware version of --unmerge - type: bool - default: no - - quiet: - description: - - Run emerge in quiet mode (--quiet) - type: bool - default: no - - verbose: - description: - - Run emerge in verbose mode (--verbose) - type: bool - default: no - - sync: - description: - - Sync package repositories first - - If yes, perform "emerge --sync" - - If web, perform "emerge-webrsync" - choices: [ "web", "yes", "no" ] - type: str - - getbinpkgonly: - description: - - Merge only packages specified at C(PORTAGE_BINHOST) in C(make.conf). - type: bool - default: no - version_added: 1.3.0 - - getbinpkg: - description: - - Prefer packages specified at C(PORTAGE_BINHOST) in C(make.conf). - type: bool - default: no - - usepkgonly: - description: - - Merge only binaries (no compiling). - type: bool - default: no - - usepkg: - description: - - Tries to use the binary package(s) in the locally available packages directory. - type: bool - default: no - - keepgoing: - description: - - Continue as much as possible after an error. - type: bool - default: no - - jobs: - description: - - Specifies the number of packages to build simultaneously. - - "Since version 2.6: Value of 0 or False resets any previously added" - - --jobs setting values - type: int - - loadavg: - description: - - Specifies that no new builds should be started if there are - - other builds running and the load average is at least LOAD - - "Since version 2.6: Value of 0 or False resets any previously added" - - --load-average setting values - type: float - - quietbuild: - description: - - Redirect all build output to logs alone, and do not display it - - on stdout (--quiet-build) - type: bool - default: no - - quietfail: - description: - - Suppresses display of the build log on stdout (--quiet-fail) - - Only the die message and the path of the build log will be - - displayed on stdout. - type: bool - default: no - -requirements: [ gentoolkit ] -author: - - "William L Thomson Jr (@wltjr)" - - "Yap Sok Ann (@sayap)" - - "Andrew Udvare (@Tatsh)" -''' - -EXAMPLES = ''' -- name: Make sure package foo is installed - community.general.portage: - package: foo - state: present - -- name: Make sure package foo is not installed - community.general.portage: - package: foo - state: absent - -- name: Update package foo to the latest version (os specific alternative to latest) - community.general.portage: - package: foo - update: yes - -- name: Install package foo using PORTAGE_BINHOST setup - community.general.portage: - package: foo - getbinpkg: yes - -- name: Re-install world from binary packages only and do not allow any compiling - community.general.portage: - package: '@world' - usepkgonly: yes - -- name: Sync repositories and update world - community.general.portage: - package: '@world' - update: yes - deep: yes - sync: yes - -- name: Remove unneeded packages - community.general.portage: - depclean: yes - -- name: Remove package foo if it is not explicitly needed - community.general.portage: - package: foo - state: absent - depclean: yes -''' - -import os -import re - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def query_package(module, package, action): - if package.startswith('@'): - return query_set(module, package, action) - return query_atom(module, package, action) - - -def query_atom(module, atom, action): - cmd = '%s list %s' % (module.equery_path, atom) - - rc, out, err = module.run_command(cmd) - return rc == 0 - - -def query_set(module, set, action): - system_sets = [ - '@live-rebuild', - '@module-rebuild', - '@preserved-rebuild', - '@security', - '@selected', - '@system', - '@world', - '@x11-module-rebuild', - ] - - if set in system_sets: - if action == 'unmerge': - module.fail_json(msg='set %s cannot be removed' % set) - return False - - world_sets_path = '/var/lib/portage/world_sets' - if not os.path.exists(world_sets_path): - return False - - cmd = 'grep %s %s' % (set, world_sets_path) - - rc, out, err = module.run_command(cmd) - return rc == 0 - - -def sync_repositories(module, webrsync=False): - if module.check_mode: - module.exit_json(msg='check mode not supported by sync') - - if webrsync: - webrsync_path = module.get_bin_path('emerge-webrsync', required=True) - cmd = '%s --quiet' % webrsync_path - else: - cmd = '%s --sync --quiet --ask=n' % module.emerge_path - - rc, out, err = module.run_command(cmd) - if rc != 0: - module.fail_json(msg='could not sync package repositories') - - -# Note: In the 3 functions below, equery is done one-by-one, but emerge is done -# in one go. If that is not desirable, split the packages into multiple tasks -# instead of joining them together with comma. - - -def emerge_packages(module, packages): - """Run emerge command against given list of atoms.""" - p = module.params - - if p['noreplace'] and not (p['update'] or p['state'] == 'latest'): - for package in packages: - if p['noreplace'] and not query_package(module, package, 'emerge'): - break - else: - module.exit_json(changed=False, msg='Packages already present.') - if module.check_mode: - module.exit_json(changed=True, msg='Packages would be installed.') - - args = [] - emerge_flags = { - 'update': '--update', - 'deep': '--deep', - 'newuse': '--newuse', - 'changed_use': '--changed-use', - 'oneshot': '--oneshot', - 'noreplace': '--noreplace', - 'nodeps': '--nodeps', - 'onlydeps': '--onlydeps', - 'quiet': '--quiet', - 'verbose': '--verbose', - 'getbinpkgonly': '--getbinpkgonly', - 'getbinpkg': '--getbinpkg', - 'usepkgonly': '--usepkgonly', - 'usepkg': '--usepkg', - 'keepgoing': '--keep-going', - 'quietbuild': '--quiet-build', - 'quietfail': '--quiet-fail', - } - for flag, arg in emerge_flags.items(): - if p[flag]: - args.append(arg) - - if p['state'] and p['state'] == 'latest': - args.append("--update") - - emerge_flags = { - 'jobs': '--jobs', - 'loadavg': '--load-average', - } - - for flag, arg in emerge_flags.items(): - flag_val = p[flag] - - if flag_val is None: - """Fallback to default: don't use this argument at all.""" - continue - - if not flag_val: - """If the value is 0 or 0.0: add the flag, but not the value.""" - args.append(arg) - continue - - """Add the --flag=value pair.""" - args.extend((arg, to_native(flag_val))) - - cmd, (rc, out, err) = run_emerge(module, packages, *args) - if rc != 0: - module.fail_json( - cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages not installed.', - ) - - # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite - # this error - if (p['usepkgonly'] or p['getbinpkg'] or p['getbinpkgonly']) \ - and 'Permission denied (publickey).' in err: - module.fail_json( - cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Please check your PORTAGE_BINHOST configuration in make.conf ' - 'and your SSH authorized_keys file', - ) - - changed = True - for line in out.splitlines(): - if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line): - msg = 'Packages installed.' - break - elif module.check_mode and re.match(r'\[(binary|ebuild)', line): - msg = 'Packages would be installed.' - break - else: - changed = False - msg = 'No packages installed.' - - module.exit_json( - changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, - msg=msg, - ) - - -def unmerge_packages(module, packages): - p = module.params - - for package in packages: - if query_package(module, package, 'unmerge'): - break - else: - module.exit_json(changed=False, msg='Packages already absent.') - - args = ['--unmerge'] - - for flag in ['quiet', 'verbose']: - if p[flag]: - args.append('--%s' % flag) - - cmd, (rc, out, err) = run_emerge(module, packages, *args) - - if rc != 0: - module.fail_json( - cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages not removed.', - ) - - module.exit_json( - changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages removed.', - ) - - -def cleanup_packages(module, packages): - p = module.params - - if packages: - for package in packages: - if query_package(module, package, 'unmerge'): - break - else: - module.exit_json(changed=False, msg='Packages already absent.') - - args = ['--depclean'] - - for flag in ['quiet', 'verbose']: - if p[flag]: - args.append('--%s' % flag) - - cmd, (rc, out, err) = run_emerge(module, packages, *args) - if rc != 0: - module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err) - - removed = 0 - for line in out.splitlines(): - if not line.startswith('Number removed:'): - continue - parts = line.split(':') - removed = int(parts[1].strip()) - changed = removed > 0 - - module.exit_json( - changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Depclean completed.', - ) - - -def run_emerge(module, packages, *args): - args = list(args) - - args.append('--ask=n') - if module.check_mode: - args.append('--pretend') - - cmd = [module.emerge_path] + args + packages - return cmd, module.run_command(cmd) - - -portage_present_states = ['present', 'emerged', 'installed', 'latest'] -portage_absent_states = ['absent', 'unmerged', 'removed'] - - -def main(): - module = AnsibleModule( - argument_spec=dict( - package=dict(type='list', elements='str', default=None, aliases=['name']), - state=dict( - default=portage_present_states[0], - choices=portage_present_states + portage_absent_states, - ), - update=dict(default=False, type='bool'), - deep=dict(default=False, type='bool'), - newuse=dict(default=False, type='bool'), - changed_use=dict(default=False, type='bool'), - oneshot=dict(default=False, type='bool'), - noreplace=dict(default=True, type='bool'), - nodeps=dict(default=False, type='bool'), - onlydeps=dict(default=False, type='bool'), - depclean=dict(default=False, type='bool'), - quiet=dict(default=False, type='bool'), - verbose=dict(default=False, type='bool'), - sync=dict(default=None, choices=['yes', 'web', 'no']), - getbinpkgonly=dict(default=False, type='bool'), - getbinpkg=dict(default=False, type='bool'), - usepkgonly=dict(default=False, type='bool'), - usepkg=dict(default=False, type='bool'), - keepgoing=dict(default=False, type='bool'), - jobs=dict(default=None, type='int'), - loadavg=dict(default=None, type='float'), - quietbuild=dict(default=False, type='bool'), - quietfail=dict(default=False, type='bool'), - ), - required_one_of=[['package', 'sync', 'depclean']], - mutually_exclusive=[ - ['nodeps', 'onlydeps'], - ['quiet', 'verbose'], - ['quietbuild', 'verbose'], - ['quietfail', 'verbose'], - ], - supports_check_mode=True, - ) - - module.emerge_path = module.get_bin_path('emerge', required=True) - module.equery_path = module.get_bin_path('equery', required=True) - - p = module.params - - if p['sync'] and p['sync'].strip() != 'no': - sync_repositories(module, webrsync=(p['sync'] == 'web')) - if not p['package']: - module.exit_json(msg='Sync successfully finished.') - - packages = [] - if p['package']: - packages.extend(p['package']) - - if p['depclean']: - if packages and p['state'] not in portage_absent_states: - module.fail_json( - msg='Depclean can only be used with package when the state is ' - 'one of: %s' % portage_absent_states, - ) - - cleanup_packages(module, packages) - - elif p['state'] in portage_present_states: - emerge_packages(module, packages) - - elif p['state'] in portage_absent_states: - unmerge_packages(module, packages) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/portinstall.py b/plugins/modules/packaging/os/portinstall.py deleted file mode 100644 index d1c33cc5c8..0000000000 --- a/plugins/modules/packaging/os/portinstall.py +++ /dev/null @@ -1,210 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, berenddeboer -# Written by berenddeboer -# Based on pkgng module written by bleader -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: portinstall -short_description: Installing packages from FreeBSD's ports system -description: - - Manage packages for FreeBSD using 'portinstall'. -options: - name: - description: - - name of package to install/remove - aliases: [pkg] - required: true - type: str - state: - description: - - state of the package - choices: [ 'present', 'absent' ] - required: false - default: present - type: str - use_packages: - description: - - use packages instead of ports whenever available - type: bool - required: false - default: yes -author: "berenddeboer (@berenddeboer)" -''' - -EXAMPLES = ''' -- name: Install package foo - community.general.portinstall: - name: foo - state: present - -- name: Install package security/cyrus-sasl2-saslauthd - community.general.portinstall: - name: security/cyrus-sasl2-saslauthd - state: present - -- name: Remove packages foo and bar - community.general.portinstall: - name: foo,bar - state: absent -''' - -import os -import re -import sys - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote - - -def query_package(module, name): - - pkg_info_path = module.get_bin_path('pkg_info', False) - - # Assume that if we have pkg_info, we haven't upgraded to pkgng - if pkg_info_path: - pkgng = False - pkg_glob_path = module.get_bin_path('pkg_glob', True) - rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, shlex_quote(name)), use_unsafe_shell=True) - else: - pkgng = True - pkg_info_path = module.get_bin_path('pkg', True) - pkg_info_path = pkg_info_path + " info" - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name)) - - found = rc == 0 - - if not found: - # databases/mysql55-client installs as mysql-client, so try solving - # that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking - # some package is installed - name_without_digits = re.sub('[0-9]', '', name) - if name != name_without_digits: - if pkgng: - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) - else: - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) - - found = rc == 0 - - return found - - -def matching_packages(module, name): - - ports_glob_path = module.get_bin_path('ports_glob', True) - rc, out, err = module.run_command("%s %s" % (ports_glob_path, name)) - # counts the number of packages found - occurrences = out.count('\n') - if occurrences == 0: - name_without_digits = re.sub('[0-9]', '', name) - if name != name_without_digits: - rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits)) - occurrences = out.count('\n') - return occurrences - - -def remove_packages(module, packages): - - remove_c = 0 - pkg_glob_path = module.get_bin_path('pkg_glob', True) - - # If pkg_delete not found, we assume pkgng - pkg_delete_path = module.get_bin_path('pkg_delete', False) - if not pkg_delete_path: - pkg_delete_path = module.get_bin_path('pkg', True) - pkg_delete_path = pkg_delete_path + " delete -y" - - # Using a for loop in case of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, package): - continue - - rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, shlex_quote(package)), use_unsafe_shell=True) - - if query_package(module, package): - name_without_digits = re.sub('[0-9]', '', package) - rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, - shlex_quote(name_without_digits)), - use_unsafe_shell=True) - if query_package(module, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, packages, use_packages): - - install_c = 0 - - # If portinstall not found, automagically install - portinstall_path = module.get_bin_path('portinstall', False) - if not portinstall_path: - pkg_path = module.get_bin_path('pkg', False) - if pkg_path: - module.run_command("pkg install -y portupgrade") - portinstall_path = module.get_bin_path('portinstall', True) - - if use_packages: - portinstall_params = "--use-packages" - else: - portinstall_params = "" - - for package in packages: - if query_package(module, package): - continue - - # TODO: check how many match - matches = matching_packages(module, package) - if matches == 1: - rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package)) - if not query_package(module, package): - module.fail_json(msg="failed to install %s: %s" % (package, out)) - elif matches == 0: - module.fail_json(msg="no matches for package %s" % (package)) - else: - module.fail_json(msg="%s matches found for package name %s" % (matches, package)) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="present %s package(s)" % (install_c)) - - module.exit_json(changed=False, msg="package(s) already present") - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(default="present", choices=["present", "absent"]), - name=dict(aliases=["pkg"], required=True), - use_packages=dict(type='bool', default=True))) - - p = module.params - - pkgs = p["name"].split(",") - - if p["state"] == "present": - install_packages(module, pkgs, p["use_packages"]) - - elif p["state"] == "absent": - remove_packages(module, pkgs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/pulp_repo.py b/plugins/modules/packaging/os/pulp_repo.py deleted file mode 100644 index d14d84451b..0000000000 --- a/plugins/modules/packaging/os/pulp_repo.py +++ /dev/null @@ -1,736 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Joe Adams <@sysadmind> -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: pulp_repo -author: "Joe Adams (@sysadmind)" -short_description: Add or remove Pulp repos from a remote host. -description: - - Add or remove Pulp repos from a remote host. - - Note, this is for Pulp 2 only. -options: - add_export_distributor: - description: - - Whether or not to add the export distributor to new C(rpm) repositories. - type: bool - default: no - feed: - description: - - Upstream feed URL to receive updates from. - type: str - force_basic_auth: - description: - - httplib2, the library used by the M(ansible.builtin.uri) module only sends - authentication information when a webservice responds to an initial - request with a 401 status. Since some basic auth services do not - properly send a 401, logins will fail. This option forces the sending of - the Basic authentication header upon initial request. - type: bool - default: no - generate_sqlite: - description: - - Boolean flag to indicate whether sqlite files should be generated during - a repository publish. - required: false - type: bool - default: no - feed_ca_cert: - description: - - CA certificate string used to validate the feed source SSL certificate. - This can be the file content or the path to the file. - type: str - aliases: [ importer_ssl_ca_cert ] - feed_client_cert: - description: - - Certificate used as the client certificate when synchronizing the - repository. This is used to communicate authentication information to - the feed source. The value to this option must be the full path to the - certificate. The specified file may be the certificate itself or a - single file containing both the certificate and private key. This can be - the file content or the path to the file. - type: str - aliases: [ importer_ssl_client_cert ] - feed_client_key: - description: - - Private key to the certificate specified in I(importer_ssl_client_cert), - assuming it is not included in the certificate file itself. This can be - the file content or the path to the file. - type: str - aliases: [ importer_ssl_client_key ] - name: - description: - - Name of the repo to add or remove. This correlates to repo-id in Pulp. - required: true - type: str - aliases: [ repo ] - proxy_host: - description: - - Proxy url setting for the pulp repository importer. This is in the - format scheme://host. - required: false - default: null - type: str - proxy_port: - description: - - Proxy port setting for the pulp repository importer. - required: false - default: null - type: str - proxy_username: - description: - - Proxy username for the pulp repository importer. - required: false - default: null - type: str - proxy_password: - description: - - Proxy password for the pulp repository importer. - required: false - default: null - type: str - publish_distributor: - description: - - Distributor to use when state is C(publish). The default is to - publish all distributors. - type: str - pulp_host: - description: - - URL of the pulp server to connect to. - default: https://127.0.0.1 - type: str - relative_url: - description: - - Relative URL for the local repository. It's required when state=present. - type: str - repo_type: - description: - - Repo plugin type to use (i.e. C(rpm), C(docker)). - default: rpm - type: str - repoview: - description: - - Whether to generate repoview files for a published repository. Setting - this to "yes" automatically activates `generate_sqlite`. - required: false - type: bool - default: no - serve_http: - description: - - Make the repo available over HTTP. - type: bool - default: no - serve_https: - description: - - Make the repo available over HTTPS. - type: bool - default: yes - state: - description: - - The repo state. A state of C(sync) will queue a sync of the repo. - This is asynchronous but not delayed like a scheduled sync. A state of - C(publish) will use the repository's distributor to publish the content. - default: present - choices: [ "present", "absent", "sync", "publish" ] - type: str - url_password: - description: - - The password for use in HTTP basic authentication to the pulp API. - If the I(url_username) parameter is not specified, the I(url_password) - parameter will not be used. - url_username: - description: - - The username for use in HTTP basic authentication to the pulp API. - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be - used on personally controlled sites using self-signed certificates. - type: bool - default: yes - wait_for_completion: - description: - - Wait for asynchronous tasks to complete before returning. - type: bool - default: no -notes: - - This module can currently only create distributors and importers on rpm - repositories. Contributions to support other repo types are welcome. -extends_documentation_fragment: - - url -''' - -EXAMPLES = ''' -- name: Create a new repo with name 'my_repo' - community.general.pulp_repo: - name: my_repo - relative_url: my/repo - state: present - -- name: Create a repo with a feed and a relative URL - community.general.pulp_repo: - name: my_centos_updates - repo_type: rpm - feed: http://mirror.centos.org/centos/6/updates/x86_64/ - relative_url: centos/6/updates - url_username: admin - url_password: admin - force_basic_auth: yes - state: present - -- name: Remove a repo from the pulp server - community.general.pulp_repo: - name: my_old_repo - repo_type: rpm - state: absent -''' - -RETURN = ''' -repo: - description: Name of the repo that the action was performed on. - returned: success - type: str - sample: my_repo -''' - -import json -import os -from time import sleep - -# import module snippets -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.urls import url_argument_spec - - -class pulp_server(object): - """ - Class to interact with a Pulp server - """ - - def __init__(self, module, pulp_host, repo_type, wait_for_completion=False): - self.module = module - self.host = pulp_host - self.repo_type = repo_type - self.repo_cache = dict() - self.wait_for_completion = wait_for_completion - - def check_repo_exists(self, repo_id): - try: - self.get_repo_config_by_id(repo_id) - except IndexError: - return False - else: - return True - - def compare_repo_distributor_config(self, repo_id, **kwargs): - repo_config = self.get_repo_config_by_id(repo_id) - - for distributor in repo_config['distributors']: - for key, value in kwargs.items(): - if key not in distributor['config'].keys(): - return False - - if not distributor['config'][key] == value: - return False - - return True - - def compare_repo_importer_config(self, repo_id, **kwargs): - repo_config = self.get_repo_config_by_id(repo_id) - - for importer in repo_config['importers']: - for key, value in kwargs.items(): - if value is not None: - if key not in importer['config'].keys(): - return False - - if not importer['config'][key] == value: - return False - - return True - - def create_repo( - self, - repo_id, - relative_url, - feed=None, - generate_sqlite=False, - serve_http=False, - serve_https=True, - proxy_host=None, - proxy_port=None, - proxy_username=None, - proxy_password=None, - repoview=False, - ssl_ca_cert=None, - ssl_client_cert=None, - ssl_client_key=None, - add_export_distributor=False - ): - url = "%s/pulp/api/v2/repositories/" % self.host - data = dict() - data['id'] = repo_id - data['distributors'] = [] - - if self.repo_type == 'rpm': - yum_distributor = dict() - yum_distributor['distributor_id'] = "yum_distributor" - yum_distributor['distributor_type_id'] = "yum_distributor" - yum_distributor['auto_publish'] = True - yum_distributor['distributor_config'] = dict() - yum_distributor['distributor_config']['http'] = serve_http - yum_distributor['distributor_config']['https'] = serve_https - yum_distributor['distributor_config']['relative_url'] = relative_url - yum_distributor['distributor_config']['repoview'] = repoview - yum_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview - data['distributors'].append(yum_distributor) - - if add_export_distributor: - export_distributor = dict() - export_distributor['distributor_id'] = "export_distributor" - export_distributor['distributor_type_id'] = "export_distributor" - export_distributor['auto_publish'] = False - export_distributor['distributor_config'] = dict() - export_distributor['distributor_config']['http'] = serve_http - export_distributor['distributor_config']['https'] = serve_https - export_distributor['distributor_config']['relative_url'] = relative_url - export_distributor['distributor_config']['repoview'] = repoview - export_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview - data['distributors'].append(export_distributor) - - data['importer_type_id'] = "yum_importer" - data['importer_config'] = dict() - - if feed: - data['importer_config']['feed'] = feed - - if proxy_host: - data['importer_config']['proxy_host'] = proxy_host - - if proxy_port: - data['importer_config']['proxy_port'] = proxy_port - - if proxy_username: - data['importer_config']['proxy_username'] = proxy_username - - if proxy_password: - data['importer_config']['proxy_password'] = proxy_password - - if ssl_ca_cert: - data['importer_config']['ssl_ca_cert'] = ssl_ca_cert - - if ssl_client_cert: - data['importer_config']['ssl_client_cert'] = ssl_client_cert - - if ssl_client_key: - data['importer_config']['ssl_client_key'] = ssl_client_key - - data['notes'] = { - "_repo-type": "rpm-repo" - } - - response, info = fetch_url( - self.module, - url, - data=json.dumps(data), - method='POST') - - if info['status'] != 201: - self.module.fail_json( - msg="Failed to create repo.", - status_code=info['status'], - response=info['msg'], - url=url) - else: - return True - - def delete_repo(self, repo_id): - url = "%s/pulp/api/v2/repositories/%s/" % (self.host, repo_id) - response, info = fetch_url(self.module, url, data='', method='DELETE') - - if info['status'] != 202: - self.module.fail_json( - msg="Failed to delete repo.", - status_code=info['status'], - response=info['msg'], - url=url) - - if self.wait_for_completion: - self.verify_tasks_completed(json.load(response)) - - return True - - def get_repo_config_by_id(self, repo_id): - if repo_id not in self.repo_cache.keys(): - repo_array = [x for x in self.repo_list if x['id'] == repo_id] - self.repo_cache[repo_id] = repo_array[0] - - return self.repo_cache[repo_id] - - def publish_repo(self, repo_id, publish_distributor): - url = "%s/pulp/api/v2/repositories/%s/actions/publish/" % (self.host, repo_id) - - # If there's no distributor specified, we will publish them all - if publish_distributor is None: - repo_config = self.get_repo_config_by_id(repo_id) - - for distributor in repo_config['distributors']: - data = dict() - data['id'] = distributor['id'] - response, info = fetch_url( - self.module, - url, - data=json.dumps(data), - method='POST') - - if info['status'] != 202: - self.module.fail_json( - msg="Failed to publish the repo.", - status_code=info['status'], - response=info['msg'], - url=url, - distributor=distributor['id']) - else: - data = dict() - data['id'] = publish_distributor - response, info = fetch_url( - self.module, - url, - data=json.dumps(data), - method='POST') - - if info['status'] != 202: - self.module.fail_json( - msg="Failed to publish the repo", - status_code=info['status'], - response=info['msg'], - url=url, - distributor=publish_distributor) - - if self.wait_for_completion: - self.verify_tasks_completed(json.load(response)) - - return True - - def sync_repo(self, repo_id): - url = "%s/pulp/api/v2/repositories/%s/actions/sync/" % (self.host, repo_id) - response, info = fetch_url(self.module, url, data='', method='POST') - - if info['status'] != 202: - self.module.fail_json( - msg="Failed to schedule a sync of the repo.", - status_code=info['status'], - response=info['msg'], - url=url) - - if self.wait_for_completion: - self.verify_tasks_completed(json.load(response)) - - return True - - def update_repo_distributor_config(self, repo_id, **kwargs): - url = "%s/pulp/api/v2/repositories/%s/distributors/" % (self.host, repo_id) - repo_config = self.get_repo_config_by_id(repo_id) - - for distributor in repo_config['distributors']: - distributor_url = "%s%s/" % (url, distributor['id']) - data = dict() - data['distributor_config'] = dict() - - for key, value in kwargs.items(): - data['distributor_config'][key] = value - - response, info = fetch_url( - self.module, - distributor_url, - data=json.dumps(data), - method='PUT') - - if info['status'] != 202: - self.module.fail_json( - msg="Failed to set the relative url for the repository.", - status_code=info['status'], - response=info['msg'], - url=url) - - def update_repo_importer_config(self, repo_id, **kwargs): - url = "%s/pulp/api/v2/repositories/%s/importers/" % (self.host, repo_id) - data = dict() - importer_config = dict() - - for key, value in kwargs.items(): - if value is not None: - importer_config[key] = value - - data['importer_config'] = importer_config - - if self.repo_type == 'rpm': - data['importer_type_id'] = "yum_importer" - - response, info = fetch_url( - self.module, - url, - data=json.dumps(data), - method='POST') - - if info['status'] != 202: - self.module.fail_json( - msg="Failed to set the repo importer configuration", - status_code=info['status'], - response=info['msg'], - importer_config=importer_config, - url=url) - - def set_repo_list(self): - url = "%s/pulp/api/v2/repositories/?details=true" % self.host - response, info = fetch_url(self.module, url, method='GET') - - if info['status'] != 200: - self.module.fail_json( - msg="Request failed", - status_code=info['status'], - response=info['msg'], - url=url) - - self.repo_list = json.load(response) - - def verify_tasks_completed(self, response_dict): - for task in response_dict['spawned_tasks']: - task_url = "%s%s" % (self.host, task['_href']) - - while True: - response, info = fetch_url( - self.module, - task_url, - data='', - method='GET') - - if info['status'] != 200: - self.module.fail_json( - msg="Failed to check async task status.", - status_code=info['status'], - response=info['msg'], - url=task_url) - - task_dict = json.load(response) - - if task_dict['state'] == 'finished': - return True - - if task_dict['state'] == 'error': - self.module.fail_json(msg="Asynchronous task failed to complete.", error=task_dict['error']) - - sleep(2) - - -def main(): - argument_spec = url_argument_spec() - argument_spec.update( - add_export_distributor=dict(default=False, type='bool'), - feed=dict(), - generate_sqlite=dict(default=False, type='bool'), - feed_ca_cert=dict(aliases=['importer_ssl_ca_cert']), - feed_client_cert=dict(aliases=['importer_ssl_client_cert']), - feed_client_key=dict(aliases=['importer_ssl_client_key'], no_log=True), - name=dict(required=True, aliases=['repo']), - proxy_host=dict(), - proxy_port=dict(), - proxy_username=dict(), - proxy_password=dict(no_log=True), - publish_distributor=dict(), - pulp_host=dict(default="https://127.0.0.1"), - relative_url=dict(), - repo_type=dict(default="rpm"), - repoview=dict(default=False, type='bool'), - serve_http=dict(default=False, type='bool'), - serve_https=dict(default=True, type='bool'), - state=dict( - default="present", - choices=['absent', 'present', 'sync', 'publish']), - wait_for_completion=dict(default=False, type="bool")) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True) - - add_export_distributor = module.params['add_export_distributor'] - feed = module.params['feed'] - generate_sqlite = module.params['generate_sqlite'] - importer_ssl_ca_cert = module.params['feed_ca_cert'] - importer_ssl_client_cert = module.params['feed_client_cert'] - importer_ssl_client_key = module.params['feed_client_key'] - proxy_host = module.params['proxy_host'] - proxy_port = module.params['proxy_port'] - proxy_username = module.params['proxy_username'] - proxy_password = module.params['proxy_password'] - publish_distributor = module.params['publish_distributor'] - pulp_host = module.params['pulp_host'] - relative_url = module.params['relative_url'] - repo = module.params['name'] - repo_type = module.params['repo_type'] - repoview = module.params['repoview'] - serve_http = module.params['serve_http'] - serve_https = module.params['serve_https'] - state = module.params['state'] - wait_for_completion = module.params['wait_for_completion'] - - if (state == 'present') and (not relative_url): - module.fail_json(msg="When state is present, relative_url is required.") - - # Ensure that the importer_ssl_* is the content and not a file path - if importer_ssl_ca_cert is not None: - importer_ssl_ca_cert_file_path = os.path.abspath(importer_ssl_ca_cert) - if os.path.isfile(importer_ssl_ca_cert_file_path): - importer_ssl_ca_cert_file_object = open(importer_ssl_ca_cert_file_path, 'r') - try: - importer_ssl_ca_cert = importer_ssl_ca_cert_file_object.read() - finally: - importer_ssl_ca_cert_file_object.close() - - if importer_ssl_client_cert is not None: - importer_ssl_client_cert_file_path = os.path.abspath(importer_ssl_client_cert) - if os.path.isfile(importer_ssl_client_cert_file_path): - importer_ssl_client_cert_file_object = open(importer_ssl_client_cert_file_path, 'r') - try: - importer_ssl_client_cert = importer_ssl_client_cert_file_object.read() - finally: - importer_ssl_client_cert_file_object.close() - - if importer_ssl_client_key is not None: - importer_ssl_client_key_file_path = os.path.abspath(importer_ssl_client_key) - if os.path.isfile(importer_ssl_client_key_file_path): - importer_ssl_client_key_file_object = open(importer_ssl_client_key_file_path, 'r') - try: - importer_ssl_client_key = importer_ssl_client_key_file_object.read() - finally: - importer_ssl_client_key_file_object.close() - - server = pulp_server(module, pulp_host, repo_type, wait_for_completion=wait_for_completion) - server.set_repo_list() - repo_exists = server.check_repo_exists(repo) - - changed = False - - if state == 'absent' and repo_exists: - if not module.check_mode: - server.delete_repo(repo) - - changed = True - - if state == 'sync': - if not repo_exists: - module.fail_json(msg="Repository was not found. The repository can not be synced.") - - if not module.check_mode: - server.sync_repo(repo) - - changed = True - - if state == 'publish': - if not repo_exists: - module.fail_json(msg="Repository was not found. The repository can not be published.") - - if not module.check_mode: - server.publish_repo(repo, publish_distributor) - - changed = True - - if state == 'present': - if not repo_exists: - if not module.check_mode: - server.create_repo( - repo_id=repo, - relative_url=relative_url, - feed=feed, - generate_sqlite=generate_sqlite, - serve_http=serve_http, - serve_https=serve_https, - proxy_host=proxy_host, - proxy_port=proxy_port, - proxy_username=proxy_username, - proxy_password=proxy_password, - repoview=repoview, - ssl_ca_cert=importer_ssl_ca_cert, - ssl_client_cert=importer_ssl_client_cert, - ssl_client_key=importer_ssl_client_key, - add_export_distributor=add_export_distributor) - - changed = True - - else: - # Check to make sure all the settings are correct - # The importer config gets overwritten on set and not updated, so - # we set the whole config at the same time. - if not server.compare_repo_importer_config( - repo, - feed=feed, - proxy_host=proxy_host, - proxy_port=proxy_port, - proxy_username=proxy_username, - proxy_password=proxy_password, - ssl_ca_cert=importer_ssl_ca_cert, - ssl_client_cert=importer_ssl_client_cert, - ssl_client_key=importer_ssl_client_key - ): - if not module.check_mode: - server.update_repo_importer_config( - repo, - feed=feed, - proxy_host=proxy_host, - proxy_port=proxy_port, - proxy_username=proxy_username, - proxy_password=proxy_password, - ssl_ca_cert=importer_ssl_ca_cert, - ssl_client_cert=importer_ssl_client_cert, - ssl_client_key=importer_ssl_client_key) - - changed = True - - if relative_url is not None: - if not server.compare_repo_distributor_config( - repo, - relative_url=relative_url - ): - if not module.check_mode: - server.update_repo_distributor_config( - repo, - relative_url=relative_url) - - changed = True - - if not server.compare_repo_distributor_config(repo, generate_sqlite=generate_sqlite): - if not module.check_mode: - server.update_repo_distributor_config(repo, generate_sqlite=generate_sqlite) - - changed = True - - if not server.compare_repo_distributor_config(repo, repoview=repoview): - if not module.check_mode: - server.update_repo_distributor_config(repo, repoview=repoview) - - changed = True - - if not server.compare_repo_distributor_config(repo, http=serve_http): - if not module.check_mode: - server.update_repo_distributor_config(repo, http=serve_http) - - changed = True - - if not server.compare_repo_distributor_config(repo, https=serve_https): - if not module.check_mode: - server.update_repo_distributor_config(repo, https=serve_https) - - changed = True - - module.exit_json(changed=changed, repo=repo) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/redhat_subscription.py b/plugins/modules/packaging/os/redhat_subscription.py deleted file mode 100644 index 7bb540b3f1..0000000000 --- a/plugins/modules/packaging/os/redhat_subscription.py +++ /dev/null @@ -1,947 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# James Laska (jlaska@redhat.com) -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: redhat_subscription -short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command -description: - - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command -author: "Barnaby Court (@barnabycourt)" -notes: - - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID. - - Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl), - I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and - I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf) - config file and default to None. -requirements: - - subscription-manager -options: - state: - description: - - whether to register and subscribe (C(present)), or unregister (C(absent)) a system - choices: [ "present", "absent" ] - default: "present" - type: str - username: - description: - - access.redhat.com or Sat6 username - type: str - password: - description: - - access.redhat.com or Sat6 password - type: str - server_hostname: - description: - - Specify an alternative Red Hat Subscription Management or Sat6 server - type: str - server_insecure: - description: - - Enable or disable https server certificate verification when connecting to C(server_hostname) - type: str - server_prefix: - description: - - Specify the prefix when registering to the Red Hat Subscription Management or Sat6 server. - type: str - version_added: 3.3.0 - server_port: - description: - - Specify the port when registering to the Red Hat Subscription Management or Sat6 server. - type: str - version_added: 3.3.0 - rhsm_baseurl: - description: - - Specify CDN baseurl - type: str - rhsm_repo_ca_cert: - description: - - Specify an alternative location for a CA certificate for CDN - type: str - server_proxy_hostname: - description: - - Specify an HTTP proxy hostname. - type: str - server_proxy_port: - description: - - Specify an HTTP proxy port. - type: str - server_proxy_user: - description: - - Specify a user for HTTP proxy with basic authentication - type: str - server_proxy_password: - description: - - Specify a password for HTTP proxy with basic authentication - type: str - auto_attach: - description: - - Upon successful registration, auto-consume available subscriptions - - Added in favor of deprecated autosubscribe in 2.5. - type: bool - aliases: [autosubscribe] - activationkey: - description: - - supply an activation key for use with registration - type: str - org_id: - description: - - Organization ID to use in conjunction with activationkey - type: str - environment: - description: - - Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello - type: str - pool: - description: - - | - Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if - possible, as it is much faster. Mutually exclusive with I(pool_ids). - default: '^$' - type: str - pool_ids: - description: - - | - Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster. - A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)), - or as a C(dict) with the pool ID as the key, and a quantity as the value (ex. - C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple - entitlements from a pool (the pool must support this). Mutually exclusive with I(pool). - default: [] - type: list - elements: raw - consumer_type: - description: - - The type of unit to register, defaults to system - type: str - consumer_name: - description: - - Name of the system to register, defaults to the hostname - type: str - consumer_id: - description: - - | - References an existing consumer ID to resume using a previous registration - for this system. If the system's identity certificate is lost or corrupted, - this option allows it to resume using its previous identity and subscriptions. - The default is to not specify a consumer ID so a new ID is created. - type: str - force_register: - description: - - Register the system even if it is already registered - type: bool - default: no - release: - description: - - Set a release version - type: str - syspurpose: - description: - - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json) - and synchronize these attributes with RHSM server. Syspurpose attributes help attach - the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file - already contains some attributes, then new attributes overwrite existing attributes. - When some attribute is not listed in the new list of attributes, the existing - attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored. - type: dict - default: {} - suboptions: - usage: - description: Syspurpose attribute usage - type: str - role: - description: Syspurpose attribute role - type: str - service_level_agreement: - description: Syspurpose attribute service_level_agreement - type: str - addons: - description: Syspurpose attribute addons - type: list - elements: str - sync: - description: - - When this option is true, then syspurpose attributes are synchronized with - RHSM server immediately. When this option is false, then syspurpose attributes - will be synchronized with RHSM server by rhsmcertd daemon. - type: bool - default: no -''' - -EXAMPLES = ''' -- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content. - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - auto_attach: true - -- name: Same as above but subscribe to a specific pool by ID. - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - pool_ids: 0123456789abcdef0123456789abcdef - -- name: Register and subscribe to multiple pools. - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - pool_ids: - - 0123456789abcdef0123456789abcdef - - 1123456789abcdef0123456789abcdef - -- name: Same as above but consume multiple entitlements. - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - pool_ids: - - 0123456789abcdef0123456789abcdef: 2 - - 1123456789abcdef0123456789abcdef: 4 - -- name: Register and pull existing system data. - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - -- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization - community.general.redhat_subscription: - state: present - activationkey: 1-222333444 - org_id: 222333444 - pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$' - -- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription) - community.general.redhat_subscription: - state: present - activationkey: 1-222333444 - org_id: 222333444 - pool: '^Red Hat Enterprise Server$' - -- name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe. - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - environment: Library - auto_attach: true - -- name: Register as user (joe_user) with password (somepass) and a specific release - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - release: 7.4 - -- name: Register as user (joe_user) with password (somepass), set syspurpose attributes and synchronize them with server - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - auto_attach: true - syspurpose: - usage: "Production" - role: "Red Hat Enterprise Server" - service_level_agreement: "Premium" - addons: - - addon1 - - addon2 - sync: true -''' - -RETURN = ''' -subscribed_pool_ids: - description: List of pool IDs to which system is now subscribed - returned: success - type: complex - sample: { - "8a85f9815ab905d3015ab928c7005de4": "1" - } -''' - -from os.path import isfile -from os import unlink -import re -import shutil -import tempfile -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.six.moves import configparser - - -SUBMAN_CMD = None - - -class RegistrationBase(object): - - REDHAT_REPO = "/etc/yum.repos.d/redhat.repo" - - def __init__(self, module, username=None, password=None): - self.module = module - self.username = username - self.password = password - - def configure(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def enable(self): - # Remove any existing redhat.repo - if isfile(self.REDHAT_REPO): - unlink(self.REDHAT_REPO) - - def register(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unregister(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unsubscribe(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def update_plugin_conf(self, plugin, enabled=True): - plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin - - if isfile(plugin_conf): - tmpfd, tmpfile = tempfile.mkstemp() - shutil.copy2(plugin_conf, tmpfile) - cfg = configparser.ConfigParser() - cfg.read([tmpfile]) - - if enabled: - cfg.set('main', 'enabled', '1') - else: - cfg.set('main', 'enabled', '0') - - fd = open(tmpfile, 'w+') - cfg.write(fd) - fd.close() - self.module.atomic_move(tmpfile, plugin_conf) - - def subscribe(self, **kwargs): - raise NotImplementedError("Must be implemented by a sub-class") - - -class Rhsm(RegistrationBase): - def __init__(self, module, username=None, password=None): - RegistrationBase.__init__(self, module, username, password) - self.module = module - - def enable(self): - ''' - Enable the system to receive updates from subscription-manager. - This involves updating affected yum plugins and removing any - conflicting yum repositories. - ''' - RegistrationBase.enable(self) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', True) - - def configure(self, **kwargs): - ''' - Configure the system as directed for registration with RHSM - Raises: - * Exception - if error occurs while running command - ''' - - args = [SUBMAN_CMD, 'config'] - - # Pass supplied **kwargs as parameters to subscription-manager. Ignore - # non-configuration parameters and replace '_' with '.'. For example, - # 'server_hostname' becomes '--server.hostname'. - options = [] - for k, v in sorted(kwargs.items()): - if re.search(r'^(server|rhsm)_', k) and v is not None: - options.append('--%s=%s' % (k.replace('_', '.', 1), v)) - - # When there is nothing to configure, then it is not necessary - # to run config command, because it only returns current - # content of current configuration file - if len(options) == 0: - return - - args.extend(options) - - self.module.run_command(args, check_rc=True) - - @property - def is_registered(self): - ''' - Determine whether the current system - Returns: - * Boolean - whether the current system is currently registered to - RHSM. - ''' - - args = [SUBMAN_CMD, 'identity'] - rc, stdout, stderr = self.module.run_command(args, check_rc=False) - if rc == 0: - return True - else: - return False - - def register(self, username, password, auto_attach, activationkey, org_id, - consumer_type, consumer_name, consumer_id, force_register, environment, - rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname, - server_proxy_port, server_proxy_user, server_proxy_password, release): - ''' - Register the current system to the provided RHSM or Sat6 server - Raises: - * Exception - if error occurs while running command - ''' - args = [SUBMAN_CMD, 'register'] - - # Generate command arguments - if force_register: - args.extend(['--force']) - - if rhsm_baseurl: - args.extend(['--baseurl', rhsm_baseurl]) - - if server_insecure: - args.extend(['--insecure']) - - if server_hostname: - args.extend(['--serverurl', server_hostname]) - - if org_id: - args.extend(['--org', org_id]) - - if server_proxy_hostname and server_proxy_port: - args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port]) - - if server_proxy_user: - args.extend(['--proxyuser', server_proxy_user]) - - if server_proxy_password: - args.extend(['--proxypassword', server_proxy_password]) - - if activationkey: - args.extend(['--activationkey', activationkey]) - else: - if auto_attach: - args.append('--auto-attach') - if username: - args.extend(['--username', username]) - if password: - args.extend(['--password', password]) - if consumer_type: - args.extend(['--type', consumer_type]) - if consumer_name: - args.extend(['--name', consumer_name]) - if consumer_id: - args.extend(['--consumerid', consumer_id]) - if environment: - args.extend(['--environment', environment]) - - if release: - args.extend(['--release', release]) - - rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False) - - def unsubscribe(self, serials=None): - ''' - Unsubscribe a system from subscribed channels - Args: - serials(list or None): list of serials to unsubscribe. If - serials is none or an empty list, then - all subscribed channels will be removed. - Raises: - * Exception - if error occurs while running command - ''' - items = [] - if serials is not None and serials: - items = ["--serial=%s" % s for s in serials] - if serials is None: - items = ["--all"] - - if items: - args = [SUBMAN_CMD, 'unsubscribe'] + items - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - return serials - - def unregister(self): - ''' - Unregister a currently registered system - Raises: - * Exception - if error occurs while running command - ''' - args = [SUBMAN_CMD, 'unregister'] - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', False) - - def subscribe(self, regexp): - ''' - Subscribe current system to available pools matching the specified - regular expression. It matches regexp against available pool ids first. - If any pool ids match, subscribe to those pools and return. - - If no pool ids match, then match regexp against available pool product - names. Note this can still easily match many many pools. Then subscribe - to those pools. - - Since a pool id is a more specific match, we only fallback to matching - against names if we didn't match pool ids. - - Raises: - * Exception - if error occurs while running command - ''' - # See https://github.com/ansible/ansible/issues/19466 - - # subscribe to pools whose pool id matches regexp (and only the pool id) - subscribed_pool_ids = self.subscribe_pool(regexp) - - # If we found any matches, we are done - # Don't attempt to match pools by product name - if subscribed_pool_ids: - return subscribed_pool_ids - - # We didn't match any pool ids. - # Now try subscribing to pools based on product name match - # Note: This can match lots of product names. - subscribed_by_product_pool_ids = self.subscribe_product(regexp) - if subscribed_by_product_pool_ids: - return subscribed_by_product_pool_ids - - # no matches - return [] - - def subscribe_by_pool_ids(self, pool_ids): - """ - Try to subscribe to the list of pool IDs - """ - available_pools = RhsmPools(self.module) - - available_pool_ids = [p.get_pool_id() for p in available_pools] - - for pool_id, quantity in sorted(pool_ids.items()): - if pool_id in available_pool_ids: - args = [SUBMAN_CMD, 'attach', '--pool', pool_id] - if quantity is not None: - args.extend(['--quantity', to_native(quantity)]) - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - else: - self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id) - return pool_ids - - def subscribe_pool(self, regexp): - ''' - Subscribe current system to available pools matching the specified - regular expression - Raises: - * Exception - if error occurs while running command - ''' - - # Available pools ready for subscription - available_pools = RhsmPools(self.module) - - subscribed_pool_ids = [] - for pool in available_pools.filter_pools(regexp): - pool.subscribe() - subscribed_pool_ids.append(pool.get_pool_id()) - return subscribed_pool_ids - - def subscribe_product(self, regexp): - ''' - Subscribe current system to available pools matching the specified - regular expression - Raises: - * Exception - if error occurs while running command - ''' - - # Available pools ready for subscription - available_pools = RhsmPools(self.module) - - subscribed_pool_ids = [] - for pool in available_pools.filter_products(regexp): - pool.subscribe() - subscribed_pool_ids.append(pool.get_pool_id()) - return subscribed_pool_ids - - def update_subscriptions(self, regexp): - changed = False - consumed_pools = RhsmPools(self.module, consumed=True) - pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)] - pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)]) - - serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep] - serials = self.unsubscribe(serials=serials_to_remove) - - subscribed_pool_ids = self.subscribe(regexp) - - if subscribed_pool_ids or serials: - changed = True - return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids, - 'unsubscribed_serials': serials} - - def update_subscriptions_by_pool_ids(self, pool_ids): - changed = False - consumed_pools = RhsmPools(self.module, consumed=True) - - existing_pools = {} - for p in consumed_pools: - existing_pools[p.get_pool_id()] = p.QuantityUsed - - serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed] - serials = self.unsubscribe(serials=serials_to_remove) - - missing_pools = {} - for pool_id, quantity in sorted(pool_ids.items()): - if existing_pools.get(pool_id, 0) != quantity: - missing_pools[pool_id] = quantity - - self.subscribe_by_pool_ids(missing_pools) - - if missing_pools or serials: - changed = True - return {'changed': changed, 'subscribed_pool_ids': list(missing_pools.keys()), - 'unsubscribed_serials': serials} - - def sync_syspurpose(self): - """ - Try to synchronize syspurpose attributes with server - """ - args = [SUBMAN_CMD, 'status'] - rc, stdout, stderr = self.module.run_command(args, check_rc=False) - - -class RhsmPool(object): - ''' - Convenience class for housing subscription information - ''' - - def __init__(self, module, **kwargs): - self.module = module - for k, v in kwargs.items(): - setattr(self, k, v) - - def __str__(self): - return str(self.__getattribute__('_name')) - - def get_pool_id(self): - return getattr(self, 'PoolId', getattr(self, 'PoolID')) - - def subscribe(self): - args = "subscription-manager attach --pool %s" % self.get_pool_id() - rc, stdout, stderr = self.module.run_command(args, check_rc=True) - if rc == 0: - return True - else: - return False - - -class RhsmPools(object): - """ - This class is used for manipulating pools subscriptions with RHSM - """ - - def __init__(self, module, consumed=False): - self.module = module - self.products = self._load_product_list(consumed) - - def __iter__(self): - return self.products.__iter__() - - def _load_product_list(self, consumed=False): - """ - Loads list of all available or consumed pools for system in data structure - - Args: - consumed(bool): if True list consumed pools, else list available pools (default False) - """ - args = "subscription-manager list" - if consumed: - args += " --consumed" - else: - args += " --available" - lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env) - - products = [] - for line in stdout.split('\n'): - # Remove leading+trailing whitespace - line = line.strip() - # An empty line implies the end of a output group - if len(line) == 0: - continue - # If a colon ':' is found, parse - elif ':' in line: - (key, value) = line.split(':', 1) - key = key.strip().replace(" ", "") # To unify - value = value.strip() - if key in ['ProductName', 'SubscriptionName']: - # Remember the name for later processing - products.append(RhsmPool(self.module, _name=value, key=value)) - elif products: - # Associate value with most recently recorded product - products[-1].__setattr__(key, value) - # FIXME - log some warning? - # else: - # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) - return products - - def filter_pools(self, regexp='^$'): - ''' - Return a list of RhsmPools whose pool id matches the provided regular expression - ''' - r = re.compile(regexp) - for product in self.products: - if r.search(product.get_pool_id()): - yield product - - def filter_products(self, regexp='^$'): - ''' - Return a list of RhsmPools whose product name matches the provided regular expression - ''' - r = re.compile(regexp) - for product in self.products: - if r.search(product._name): - yield product - - -class SysPurpose(object): - """ - This class is used for reading and writing to syspurpose.json file - """ - - SYSPURPOSE_FILE_PATH = "/etc/rhsm/syspurpose/syspurpose.json" - - ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons'] - - def __init__(self, path=None): - """ - Initialize class used for reading syspurpose json file - """ - self.path = path or self.SYSPURPOSE_FILE_PATH - - def update_syspurpose(self, new_syspurpose): - """ - Try to update current syspurpose with new attributes from new_syspurpose - """ - syspurpose = {} - syspurpose_changed = False - for key, value in new_syspurpose.items(): - if key in self.ALLOWED_ATTRIBUTES: - if value is not None: - syspurpose[key] = value - elif key == 'sync': - pass - else: - raise KeyError("Attribute: %s not in list of allowed attributes: %s" % - (key, self.ALLOWED_ATTRIBUTES)) - current_syspurpose = self._read_syspurpose() - if current_syspurpose != syspurpose: - syspurpose_changed = True - # Update current syspurpose with new values - current_syspurpose.update(syspurpose) - # When some key is not listed in new syspurpose, then delete it from current syspurpose - # and ignore custom attributes created by user (e.g. "foo": "bar") - for key in list(current_syspurpose): - if key in self.ALLOWED_ATTRIBUTES and key not in syspurpose: - del current_syspurpose[key] - self._write_syspurpose(current_syspurpose) - return syspurpose_changed - - def _write_syspurpose(self, new_syspurpose): - """ - This function tries to update current new_syspurpose attributes to - json file. - """ - with open(self.path, "w") as fp: - fp.write(json.dumps(new_syspurpose, indent=2, ensure_ascii=False, sort_keys=True)) - - def _read_syspurpose(self): - """ - Read current syspurpuse from json file. - """ - current_syspurpose = {} - try: - with open(self.path, "r") as fp: - content = fp.read() - except IOError: - pass - else: - current_syspurpose = json.loads(content) - return current_syspurpose - - -def main(): - - # Load RHSM configuration from file - rhsm = Rhsm(None) - - # Note: the default values for parameters are: - # 'type': 'str', 'default': None, 'required': False - # So there is no need to repeat these values for each parameter. - module = AnsibleModule( - argument_spec={ - 'state': {'default': 'present', 'choices': ['present', 'absent']}, - 'username': {}, - 'password': {'no_log': True}, - 'server_hostname': {}, - 'server_insecure': {}, - 'server_prefix': {}, - 'server_port': {}, - 'rhsm_baseurl': {}, - 'rhsm_repo_ca_cert': {}, - 'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'}, - 'activationkey': {'no_log': True}, - 'org_id': {}, - 'environment': {}, - 'pool': {'default': '^$'}, - 'pool_ids': {'default': [], 'type': 'list', 'elements': 'raw'}, - 'consumer_type': {}, - 'consumer_name': {}, - 'consumer_id': {}, - 'force_register': {'default': False, 'type': 'bool'}, - 'server_proxy_hostname': {}, - 'server_proxy_port': {}, - 'server_proxy_user': {}, - 'server_proxy_password': {'no_log': True}, - 'release': {}, - 'syspurpose': { - 'type': 'dict', - 'options': { - 'role': {}, - 'usage': {}, - 'service_level_agreement': {}, - 'addons': {'type': 'list', 'elements': 'str'}, - 'sync': {'type': 'bool', 'default': False} - } - } - }, - required_together=[['username', 'password'], - ['server_proxy_hostname', 'server_proxy_port'], - ['server_proxy_user', 'server_proxy_password']], - mutually_exclusive=[['activationkey', 'username'], - ['activationkey', 'consumer_id'], - ['activationkey', 'environment'], - ['activationkey', 'auto_attach'], - ['pool', 'pool_ids']], - required_if=[['state', 'present', ['username', 'activationkey'], True]], - ) - - rhsm.module = module - state = module.params['state'] - username = module.params['username'] - password = module.params['password'] - server_hostname = module.params['server_hostname'] - server_insecure = module.params['server_insecure'] - server_prefix = module.params['server_prefix'] - server_port = module.params['server_port'] - rhsm_baseurl = module.params['rhsm_baseurl'] - rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert'] - auto_attach = module.params['auto_attach'] - activationkey = module.params['activationkey'] - org_id = module.params['org_id'] - if activationkey and not org_id: - module.fail_json(msg='org_id is required when using activationkey') - environment = module.params['environment'] - pool = module.params['pool'] - pool_ids = {} - for value in module.params['pool_ids']: - if isinstance(value, dict): - if len(value) != 1: - module.fail_json(msg='Unable to parse pool_ids option.') - pool_id, quantity = list(value.items())[0] - else: - pool_id, quantity = value, None - pool_ids[pool_id] = quantity - consumer_type = module.params["consumer_type"] - consumer_name = module.params["consumer_name"] - consumer_id = module.params["consumer_id"] - force_register = module.params["force_register"] - server_proxy_hostname = module.params['server_proxy_hostname'] - server_proxy_port = module.params['server_proxy_port'] - server_proxy_user = module.params['server_proxy_user'] - server_proxy_password = module.params['server_proxy_password'] - release = module.params['release'] - syspurpose = module.params['syspurpose'] - - global SUBMAN_CMD - SUBMAN_CMD = module.get_bin_path('subscription-manager', True) - - syspurpose_changed = False - if syspurpose is not None: - try: - syspurpose_changed = SysPurpose().update_syspurpose(syspurpose) - except Exception as err: - module.fail_json(msg="Failed to update syspurpose attributes: %s" % to_native(err)) - - # Ensure system is registered - if state == 'present': - - # Register system - if rhsm.is_registered and not force_register: - if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True: - try: - rhsm.sync_syspurpose() - except Exception as e: - module.fail_json(msg="Failed to synchronize syspurpose attributes: %s" % to_native(e)) - if pool != '^$' or pool_ids: - try: - if pool_ids: - result = rhsm.update_subscriptions_by_pool_ids(pool_ids) - else: - result = rhsm.update_subscriptions(pool) - except Exception as e: - module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e))) - else: - module.exit_json(**result) - else: - if syspurpose_changed is True: - module.exit_json(changed=True, msg="Syspurpose attributes changed.") - else: - module.exit_json(changed=False, msg="System already registered.") - else: - try: - rhsm.enable() - rhsm.configure(**module.params) - rhsm.register(username, password, auto_attach, activationkey, org_id, - consumer_type, consumer_name, consumer_id, force_register, - environment, rhsm_baseurl, server_insecure, server_hostname, - server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password, release) - if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True: - rhsm.sync_syspurpose() - if pool_ids: - subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids) - elif pool != '^$': - subscribed_pool_ids = rhsm.subscribe(pool) - else: - subscribed_pool_ids = [] - except Exception as e: - module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e))) - else: - module.exit_json(changed=True, - msg="System successfully registered to '%s'." % server_hostname, - subscribed_pool_ids=subscribed_pool_ids) - - # Ensure system is *not* registered - if state == 'absent': - if not rhsm.is_registered: - module.exit_json(changed=False, msg="System already unregistered.") - else: - try: - rhsm.unsubscribe() - rhsm.unregister() - except Exception as e: - module.fail_json(msg="Failed to unregister: %s" % to_native(e)) - else: - module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/rhn_channel.py b/plugins/modules/packaging/os/rhn_channel.py deleted file mode 100644 index e3a1ae3098..0000000000 --- a/plugins/modules/packaging/os/rhn_channel.py +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) Vincent Van de Kussen -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: rhn_channel -short_description: Adds or removes Red Hat software channels -description: - - Adds or removes Red Hat software channels. -author: -- Vincent Van der Kussen (@vincentvdk) -notes: - - This module fetches the system id from RHN. - - This module doesn't support I(check_mode). -options: - name: - description: - - Name of the software channel. - required: true - type: str - sysname: - description: - - Name of the system as it is known in RHN/Satellite. - required: true - type: str - state: - description: - - Whether the channel should be present or not, taking action if the state is different from what is stated. - default: present - choices: [ present, absent ] - type: str - url: - description: - - The full URL to the RHN/Satellite API. - required: true - type: str - user: - description: - - RHN/Satellite login. - required: true - type: str - password: - description: - - RHN/Satellite password. - aliases: [pwd] - required: true - type: str - validate_certs: - description: - - If C(False), SSL certificates will not be validated. - - This should only set to C(False) when used on self controlled sites - using self-signed certificates, and you are absolutely sure that nobody - can modify traffic between the module and the site. - type: bool - default: true - version_added: '0.2.0' -''' - -EXAMPLES = ''' -- name: Add a Red Hat software channel - community.general.rhn_channel: - name: rhel-x86_64-server-v2vwin-6 - sysname: server01 - url: https://rhn.redhat.com/rpc/api - user: rhnuser - password: guessme - delegate_to: localhost -''' - -import ssl -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -def get_systemid(client, session, sysname): - systems = client.system.listUserSystems(session) - for system in systems: - if system.get('name') == sysname: - idres = system.get('id') - idd = int(idres) - return idd - - -def subscribe_channels(channelname, client, session, sysname, sys_id): - channels = base_channels(client, session, sys_id) - channels.append(channelname) - return client.system.setChildChannels(session, sys_id, channels) - - -def unsubscribe_channels(channelname, client, session, sysname, sys_id): - channels = base_channels(client, session, sys_id) - channels.remove(channelname) - return client.system.setChildChannels(session, sys_id, channels) - - -def base_channels(client, session, sys_id): - basechan = client.channel.software.listSystemChannels(session, sys_id) - try: - chans = [item['label'] for item in basechan] - except KeyError: - chans = [item['channel_label'] for item in basechan] - return chans - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - name=dict(type='str', required=True), - sysname=dict(type='str', required=True), - url=dict(type='str', required=True), - user=dict(type='str', required=True), - password=dict(type='str', required=True, aliases=['pwd'], no_log=True), - validate_certs=dict(type='bool', default=True), - ) - ) - - state = module.params['state'] - channelname = module.params['name'] - systname = module.params['sysname'] - saturl = module.params['url'] - user = module.params['user'] - password = module.params['password'] - validate_certs = module.params['validate_certs'] - - ssl_context = None - if not validate_certs: - try: # Python 2.7.9 and newer - ssl_context = ssl.create_unverified_context() - except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default - ssl_context = ssl._create_unverified_context() - else: # Python 2.7.8 and older - ssl._create_default_https_context = ssl._create_unverified_https_context - - # initialize connection - if ssl_context: - client = xmlrpc_client.ServerProxy(saturl, context=ssl_context) - else: - client = xmlrpc_client.Server(saturl) - - try: - session = client.auth.login(user, password) - except Exception as e: - module.fail_json(msg="Unable to establish session with Satellite server: %s " % to_text(e)) - - if not session: - module.fail_json(msg="Failed to establish session with Satellite server.") - - # get systemid - try: - sys_id = get_systemid(client, session, systname) - except Exception as e: - module.fail_json(msg="Unable to get system id: %s " % to_text(e)) - - if not sys_id: - module.fail_json(msg="Failed to get system id.") - - # get channels for system - try: - chans = base_channels(client, session, sys_id) - except Exception as e: - module.fail_json(msg="Unable to get channel information: %s " % to_text(e)) - - try: - if state == 'present': - if channelname in chans: - module.exit_json(changed=False, msg="Channel %s already exists" % channelname) - else: - subscribe_channels(channelname, client, session, systname, sys_id) - module.exit_json(changed=True, msg="Channel %s added" % channelname) - - if state == 'absent': - if channelname not in chans: - module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname) - else: - unsubscribe_channels(channelname, client, session, systname, sys_id) - module.exit_json(changed=True, msg="Channel %s removed" % channelname) - except Exception as e: - module.fail_json(msg='Unable to %s channel (%s): %s' % ('add' if state == 'present' else 'remove', channelname, to_text(e))) - finally: - client.auth.logout(session) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/rhn_register.py b/plugins/modules/packaging/os/rhn_register.py deleted file mode 100644 index 08e9a99e9a..0000000000 --- a/plugins/modules/packaging/os/rhn_register.py +++ /dev/null @@ -1,447 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) James Laska -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: rhn_register -short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command -description: - - Manage registration to the Red Hat Network. -author: -- James Laska (@jlaska) -notes: - - This is for older Red Hat products. You probably want the M(community.general.redhat_subscription) module instead. - - In order to register a system, C(rhnreg_ks) requires either a username and password, or an activationkey. -requirements: - - rhnreg_ks - - either libxml2 or lxml -options: - state: - description: - - Whether to register (C(present)), or unregister (C(absent)) a system. - type: str - choices: [ absent, present ] - default: present - username: - description: - - Red Hat Network username. - type: str - password: - description: - - Red Hat Network password. - type: str - server_url: - description: - - Specify an alternative Red Hat Network server URL. - - The default is the current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date). - type: str - activationkey: - description: - - Supply an activation key for use with registration. - type: str - profilename: - description: - - Supply an profilename for use with registration. - type: str - force: - description: - - Force registration, even if system is already registered. - type: bool - default: no - version_added: 2.0.0 - ca_cert: - description: - - Supply a custom ssl CA certificate file for use with registration. - type: path - aliases: [ sslcacert ] - systemorgid: - description: - - Supply an organizational id for use with registration. - type: str - channels: - description: - - Optionally specify a list of channels to subscribe to upon successful registration. - type: list - elements: str - default: [] - enable_eus: - description: - - If C(no), extended update support will be requested. - type: bool - default: no - nopackages: - description: - - If C(yes), the registered node will not upload its installed packages information to Satellite server. - type: bool - default: no -''' - -EXAMPLES = r''' -- name: Unregister system from RHN - community.general.rhn_register: - state: absent - username: joe_user - password: somepass - -- name: Register as user with password and auto-subscribe to available content - community.general.rhn_register: - state: present - username: joe_user - password: somepass - -- name: Register with activationkey and enable extended update support - community.general.rhn_register: - state: present - activationkey: 1-222333444 - enable_eus: yes - -- name: Register with activationkey and set a profilename which may differ from the hostname - community.general.rhn_register: - state: present - activationkey: 1-222333444 - profilename: host.example.com.custom - -- name: Register as user with password against a satellite server - community.general.rhn_register: - state: present - username: joe_user - password: somepass - server_url: https://xmlrpc.my.satellite/XMLRPC - -- name: Register as user with password and enable channels - community.general.rhn_register: - state: present - username: joe_user - password: somepass - channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1 - -- name: Force-register as user with password to ensure registration is current on server - community.general.rhn_register: - state: present - username: joe_user - password: somepass - server_url: https://xmlrpc.my.satellite/XMLRPC - force: yes -''' - -RETURN = r''' -# Default return values -''' - -import os -import sys - -# Attempt to import rhn client tools -sys.path.insert(0, '/usr/share/rhn') -try: - import up2date_client - import up2date_client.config - HAS_UP2DATE_CLIENT = True -except ImportError: - HAS_UP2DATE_CLIENT = False - -# INSERT REDHAT SNIPPETS -from ansible_collections.community.general.plugins.module_utils import redhat -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import urllib, xmlrpc_client - - -class Rhn(redhat.RegistrationBase): - - def __init__(self, module=None, username=None, password=None): - redhat.RegistrationBase.__init__(self, module, username, password) - self.config = self.load_config() - self.server = None - self.session = None - - def logout(self): - if self.session is not None: - self.server.auth.logout(self.session) - - def load_config(self): - ''' - Read configuration from /etc/sysconfig/rhn/up2date - ''' - if not HAS_UP2DATE_CLIENT: - return None - - config = up2date_client.config.initUp2dateConfig() - - return config - - @property - def server_url(self): - return self.config['serverURL'] - - @property - def hostname(self): - ''' - Return the non-xmlrpc RHN hostname. This is a convenience method - used for displaying a more readable RHN hostname. - - Returns: str - ''' - url = urllib.parse.urlparse(self.server_url) - return url[1].replace('xmlrpc.', '') - - @property - def systemid(self): - systemid = None - xpath_str = "//member[name='system_id']/value/string" - - if os.path.isfile(self.config['systemIdPath']): - fd = open(self.config['systemIdPath'], 'r') - xml_data = fd.read() - fd.close() - - # Ugh, xml parsing time ... - # First, try parsing with libxml2 ... - if systemid is None: - try: - import libxml2 - doc = libxml2.parseDoc(xml_data) - ctxt = doc.xpathNewContext() - systemid = ctxt.xpathEval(xpath_str)[0].content - doc.freeDoc() - ctxt.xpathFreeContext() - except ImportError: - pass - - # m-kay, let's try with lxml now ... - if systemid is None: - try: - from lxml import etree - root = etree.fromstring(xml_data) - systemid = root.xpath(xpath_str)[0].text - except ImportError: - raise Exception('"libxml2" or "lxml" is required for this module.') - - # Strip the 'ID-' prefix - if systemid is not None and systemid.startswith('ID-'): - systemid = systemid[3:] - - return int(systemid) - - @property - def is_registered(self): - ''' - Determine whether the current system is registered. - - Returns: True|False - ''' - return os.path.isfile(self.config['systemIdPath']) - - def configure_server_url(self, server_url): - ''' - Configure server_url for registration - ''' - - self.config.set('serverURL', server_url) - self.config.save() - - def enable(self): - ''' - Prepare the system for RHN registration. This includes ... - * enabling the rhnplugin yum plugin - * disabling the subscription-manager yum plugin - ''' - redhat.RegistrationBase.enable(self) - self.update_plugin_conf('rhnplugin', True) - self.update_plugin_conf('subscription-manager', False) - - def register(self, enable_eus=False, activationkey=None, profilename=None, sslcacert=None, systemorgid=None, nopackages=False): - ''' - Register system to RHN. If enable_eus=True, extended update - support will be requested. - ''' - register_cmd = ['/usr/sbin/rhnreg_ks', '--force'] - if self.username: - register_cmd.extend(['--username', self.username, '--password', self.password]) - if self.server_url: - register_cmd.extend(['--serverUrl', self.server_url]) - if enable_eus: - register_cmd.append('--use-eus-channel') - if nopackages: - register_cmd.append('--nopackages') - if activationkey is not None: - register_cmd.extend(['--activationkey', activationkey]) - if profilename is not None: - register_cmd.extend(['--profilename', profilename]) - if sslcacert is not None: - register_cmd.extend(['--sslCACert', sslcacert]) - if systemorgid is not None: - register_cmd.extend(['--systemorgid', systemorgid]) - rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True) - - def api(self, method, *args): - ''' - Convenience RPC wrapper - ''' - if self.server is None: - if self.hostname != 'rhn.redhat.com': - url = "https://%s/rpc/api" % self.hostname - else: - url = "https://xmlrpc.%s/rpc/api" % self.hostname - self.server = xmlrpc_client.ServerProxy(url) - self.session = self.server.auth.login(self.username, self.password) - - func = getattr(self.server, method) - return func(self.session, *args) - - def unregister(self): - ''' - Unregister a previously registered system - ''' - - # Initiate RPC connection - self.api('system.deleteSystems', [self.systemid]) - - # Remove systemid file - os.unlink(self.config['systemIdPath']) - - def subscribe(self, channels): - if not channels: - return - - if self._is_hosted(): - current_channels = self.api('channel.software.listSystemChannels', self.systemid) - new_channels = [item['channel_label'] for item in current_channels] - new_channels.extend(channels) - return self.api('channel.software.setSystemChannels', self.systemid, list(new_channels)) - - else: - current_channels = self.api('channel.software.listSystemChannels', self.systemid) - current_channels = [item['label'] for item in current_channels] - new_base = None - new_childs = [] - for ch in channels: - if ch in current_channels: - continue - if self.api('channel.software.getDetails', ch)['parent_channel_label'] == '': - new_base = ch - else: - if ch not in new_childs: - new_childs.append(ch) - out_base = 0 - out_childs = 0 - - if new_base: - out_base = self.api('system.setBaseChannel', self.systemid, new_base) - - if new_childs: - out_childs = self.api('system.setChildChannels', self.systemid, new_childs) - - return out_base and out_childs - - def _is_hosted(self): - ''' - Return True if we are running against Hosted (rhn.redhat.com) or - False otherwise (when running against Satellite or Spacewalk) - ''' - return 'rhn.redhat.com' in self.hostname - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - username=dict(type='str'), - password=dict(type='str', no_log=True), - server_url=dict(type='str'), - activationkey=dict(type='str', no_log=True), - profilename=dict(type='str'), - ca_cert=dict(type='path', aliases=['sslcacert']), - systemorgid=dict(type='str'), - enable_eus=dict(type='bool', default=False), - force=dict(type='bool', default=False), - nopackages=dict(type='bool', default=False), - channels=dict(type='list', elements='str', default=[]), - ), - # username/password is required for state=absent, or if channels is not empty - # (basically anything that uses self.api requires username/password) but it doesn't - # look like we can express that with required_if/required_together/mutually_exclusive - - # only username+password can be used for unregister - required_if=[['state', 'absent', ['username', 'password']]], - ) - - if not HAS_UP2DATE_CLIENT: - module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?") - - server_url = module.params['server_url'] - username = module.params['username'] - password = module.params['password'] - - state = module.params['state'] - force = module.params['force'] - activationkey = module.params['activationkey'] - profilename = module.params['profilename'] - sslcacert = module.params['ca_cert'] - systemorgid = module.params['systemorgid'] - channels = module.params['channels'] - enable_eus = module.params['enable_eus'] - nopackages = module.params['nopackages'] - - rhn = Rhn(module=module, username=username, password=password) - - # use the provided server url and persist it to the rhn config. - if server_url: - rhn.configure_server_url(server_url) - - if not rhn.server_url: - module.fail_json( - msg="No serverURL was found (from either the 'server_url' module arg or the config file option 'serverURL' in /etc/sysconfig/rhn/up2date)" - ) - - # Ensure system is registered - if state == 'present': - - # Check for missing parameters ... - if not (activationkey or rhn.username or rhn.password): - module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username, - rhn.password)) - if not activationkey and not (rhn.username and rhn.password): - module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password") - - # Register system - if rhn.is_registered and not force: - module.exit_json(changed=False, msg="System already registered.") - - try: - rhn.enable() - rhn.register(enable_eus, activationkey, profilename, sslcacert, systemorgid, nopackages) - rhn.subscribe(channels) - except Exception as exc: - module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, exc)) - finally: - rhn.logout() - - module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname) - - # Ensure system is *not* registered - if state == 'absent': - if not rhn.is_registered: - module.exit_json(changed=False, msg="System already unregistered.") - - if not (rhn.username and rhn.password): - module.fail_json(msg="Missing arguments, the system is currently registered and unregistration requires a username and password") - - try: - rhn.unregister() - except Exception as exc: - module.fail_json(msg="Failed to unregister: %s" % exc) - finally: - rhn.logout() - - module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/rhsm_release.py b/plugins/modules/packaging/os/rhsm_release.py deleted file mode 100644 index 4b76cee274..0000000000 --- a/plugins/modules/packaging/os/rhsm_release.py +++ /dev/null @@ -1,125 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2018, Sean Myers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: rhsm_release -short_description: Set or Unset RHSM Release version -description: - - Sets or unsets the release version used by RHSM repositories. -notes: - - This module will fail on an unregistered system. - Use the C(redhat_subscription) module to register a system - prior to setting the RHSM release. -requirements: - - Red Hat Enterprise Linux 6+ with subscription-manager installed -options: - release: - description: - - RHSM release version to use (use null to unset) - required: true - type: str -author: - - Sean Myers (@seandst) -''' - -EXAMPLES = ''' -# Set release version to 7.1 -- name: Set RHSM release version - community.general.rhsm_release: - release: "7.1" - -# Set release version to 6Server -- name: Set RHSM release version - community.general.rhsm_release: - release: "6Server" - -# Unset release version -- name: Unset RHSM release release - community.general.rhsm_release: - release: null -''' - -RETURN = ''' -current_release: - description: The current RHSM release version value - returned: success - type: str -''' - -from ansible.module_utils.basic import AnsibleModule - -import re - -# Matches release-like values such as 7.2, 5.10, 6Server, 8 -# but rejects unlikely values, like 100Server, 1.100, 7server etc. -release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server|Client|Workstation|)\b') - - -def _sm_release(module, *args): - # pass args to s-m release, e.g. _sm_release(module, '--set', '0.1') becomes - # "subscription-manager release --set 0.1" - sm_bin = module.get_bin_path('subscription-manager', required=True) - cmd = '{0} release {1}'.format(sm_bin, " ".join(args)) - # delegate nonzero rc handling to run_command - return module.run_command(cmd, check_rc=True) - - -def get_release(module): - # Get the current release version, or None if release unset - rc, out, err = _sm_release(module, '--show') - try: - match = release_matcher.findall(out)[0] - except IndexError: - # 0'th index did not exist; no matches - match = None - - return match - - -def set_release(module, release): - # Set current release version, or unset if release is None - if release is None: - args = ('--unset',) - else: - args = ('--set', release) - - return _sm_release(module, *args) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - release=dict(type='str', required=True), - ), - supports_check_mode=True - ) - - target_release = module.params['release'] - - # sanity check: the target release at least looks like a valid release - if target_release and not release_matcher.findall(target_release): - module.fail_json(msg='"{0}" does not appear to be a valid release.'.format(target_release)) - - # Will fail with useful error from s-m if system not subscribed - current_release = get_release(module) - - changed = (target_release != current_release) - if not module.check_mode and changed: - set_release(module, target_release) - # If setting the release fails, then a fail_json would have exited with - # the s-m error, e.g. "No releases match '7.20'...". If not, then the - # current release is now set to the target release (job's done) - current_release = target_release - - module.exit_json(current_release=current_release, changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/rhsm_repository.py b/plugins/modules/packaging/os/rhsm_repository.py deleted file mode 100644 index b103ea621a..0000000000 --- a/plugins/modules/packaging/os/rhsm_repository.py +++ /dev/null @@ -1,246 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: rhsm_repository -short_description: Manage RHSM repositories using the subscription-manager command -description: - - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription - Management entitlement platform using the C(subscription-manager) command. -author: Giovanni Sciortino (@giovannisciortino) -notes: - - In order to manage RHSM repositories the system must be already registered - to RHSM manually or using the Ansible C(redhat_subscription) module. - -requirements: - - subscription-manager -options: - state: - description: - - If state is equal to present or disabled, indicates the desired - repository state. - choices: [present, enabled, absent, disabled] - default: "enabled" - type: str - name: - description: - - The ID of repositories to enable. - - To operate on several repositories this can accept a comma separated - list or a YAML list. - required: True - type: list - elements: str - purge: - description: - - Disable all currently enabled repositories that are not not specified in C(name). - Only set this to C(True) if passing in a list of repositories to the C(name) field. - Using this with C(loop) will most likely not have the desired result. - type: bool - default: no -''' - -EXAMPLES = ''' -- name: Enable a RHSM repository - community.general.rhsm_repository: - name: rhel-7-server-rpms - -- name: Disable all RHSM repositories - community.general.rhsm_repository: - name: '*' - state: disabled - -- name: Enable all repositories starting with rhel-6-server - community.general.rhsm_repository: - name: rhel-6-server* - state: enabled - -- name: Disable all repositories except rhel-7-server-rpms - community.general.rhsm_repository: - name: rhel-7-server-rpms - purge: True -''' - -RETURN = ''' -repositories: - description: - - The list of RHSM repositories with their states. - - When this module is used to change the repository states, this list contains the updated states after the changes. - returned: success - type: list -''' - -import re -import os -from fnmatch import fnmatch -from copy import deepcopy -from ansible.module_utils.basic import AnsibleModule - - -def run_subscription_manager(module, arguments): - # Execute subscription-manager with arguments and manage common errors - rhsm_bin = module.get_bin_path('subscription-manager') - if not rhsm_bin: - module.fail_json(msg='The executable file subscription-manager was not found in PATH') - - lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env) - - if rc == 1 and (err == 'The password you typed is invalid.\nPlease try again.\n' or os.getuid() != 0): - module.fail_json(msg='The executable file subscription-manager must be run using root privileges') - elif rc == 0 and out == 'This system has no repositories available through subscriptions.\n': - module.fail_json(msg='This system has no repositories available through subscriptions') - elif rc == 1: - module.fail_json(msg='subscription-manager failed with the following error: %s' % err) - else: - return rc, out, err - - -def get_repository_list(module, list_parameter): - # Generate RHSM repository list and return a list of dict - if list_parameter == 'list_enabled': - rhsm_arguments = ['repos', '--list-enabled'] - elif list_parameter == 'list_disabled': - rhsm_arguments = ['repos', '--list-disabled'] - elif list_parameter == 'list': - rhsm_arguments = ['repos', '--list'] - rc, out, err = run_subscription_manager(module, rhsm_arguments) - - skip_lines = [ - '+----------------------------------------------------------+', - ' Available Repositories in /etc/yum.repos.d/redhat.repo' - ] - repo_id_re = re.compile(r'Repo ID:\s+(.*)') - repo_name_re = re.compile(r'Repo Name:\s+(.*)') - repo_url_re = re.compile(r'Repo URL:\s+(.*)') - repo_enabled_re = re.compile(r'Enabled:\s+(.*)') - - repo_id = '' - repo_name = '' - repo_url = '' - repo_enabled = '' - - repo_result = [] - for line in out.splitlines(): - if line == '' or line in skip_lines: - continue - - repo_id_match = repo_id_re.match(line) - if repo_id_match: - repo_id = repo_id_match.group(1) - continue - - repo_name_match = repo_name_re.match(line) - if repo_name_match: - repo_name = repo_name_match.group(1) - continue - - repo_url_match = repo_url_re.match(line) - if repo_url_match: - repo_url = repo_url_match.group(1) - continue - - repo_enabled_match = repo_enabled_re.match(line) - if repo_enabled_match: - repo_enabled = repo_enabled_match.group(1) - - repo = { - "id": repo_id, - "name": repo_name, - "url": repo_url, - "enabled": True if repo_enabled == '1' else False - } - - repo_result.append(repo) - - return repo_result - - -def repository_modify(module, state, name, purge=False): - name = set(name) - current_repo_list = get_repository_list(module, 'list') - updated_repo_list = deepcopy(current_repo_list) - matched_existing_repo = {} - for repoid in name: - matched_existing_repo[repoid] = [] - for idx, repo in enumerate(current_repo_list): - if fnmatch(repo['id'], repoid): - matched_existing_repo[repoid].append(repo) - # Update current_repo_list to return it as result variable - updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False - - changed = False - results = [] - diff_before = "" - diff_after = "" - rhsm_arguments = ['repos'] - - for repoid in matched_existing_repo: - if len(matched_existing_repo[repoid]) == 0: - results.append("%s is not a valid repository ID" % repoid) - module.fail_json(results=results, msg="%s is not a valid repository ID" % repoid) - for repo in matched_existing_repo[repoid]: - if state in ['disabled', 'absent']: - if repo['enabled']: - changed = True - diff_before += "Repository '%s' is enabled for this system\n" % repo['id'] - diff_after += "Repository '%s' is disabled for this system\n" % repo['id'] - results.append("Repository '%s' is disabled for this system" % repo['id']) - rhsm_arguments += ['--disable', repo['id']] - elif state in ['enabled', 'present']: - if not repo['enabled']: - changed = True - diff_before += "Repository '%s' is disabled for this system\n" % repo['id'] - diff_after += "Repository '%s' is enabled for this system\n" % repo['id'] - results.append("Repository '%s' is enabled for this system" % repo['id']) - rhsm_arguments += ['--enable', repo['id']] - - # Disable all enabled repos on the system that are not in the task and not - # marked as disabled by the task - if purge: - enabled_repo_ids = set(repo['id'] for repo in updated_repo_list if repo['enabled']) - matched_repoids_set = set(matched_existing_repo.keys()) - difference = enabled_repo_ids.difference(matched_repoids_set) - if len(difference) > 0: - for repoid in difference: - changed = True - diff_before.join("Repository '{repoid}'' is enabled for this system\n".format(repoid=repoid)) - diff_after.join("Repository '{repoid}' is disabled for this system\n".format(repoid=repoid)) - results.append("Repository '{repoid}' is disabled for this system".format(repoid=repoid)) - rhsm_arguments.extend(['--disable', repoid]) - - diff = {'before': diff_before, - 'after': diff_after, - 'before_header': "RHSM repositories", - 'after_header': "RHSM repositories"} - - if not module.check_mode and changed: - rc, out, err = run_subscription_manager(module, rhsm_arguments) - results = out.splitlines() - module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='list', elements='str', required=True), - state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'), - purge=dict(type='bool', default=False), - ), - supports_check_mode=True, - ) - name = module.params['name'] - state = module.params['state'] - purge = module.params['purge'] - - repository_modify(module, state, name, purge) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/rpm_ostree_pkg.py b/plugins/modules/packaging/os/rpm_ostree_pkg.py deleted file mode 100644 index 38e2486ddc..0000000000 --- a/plugins/modules/packaging/os/rpm_ostree_pkg.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Dusty Mabe -# Copyright: (c) 2018, Ansible Project -# Copyright: (c) 2021, Abhijeet Kasurde -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: rpm_ostree_pkg -short_description: Install or uninstall overlay additional packages -version_added: "2.0.0" -description: - - Install or uninstall overlay additional packages using C(rpm-ostree) command. -options: - name: - description: - - Name of overlay package to install or remove. - required: true - type: list - elements: str - aliases: [ pkg ] - state: - description: - - State of the overlay package. - - C(present) simply ensures that a desired package is installed. - - C(absent) removes the specified package. - choices: [ 'absent', 'present' ] - default: 'present' - type: str -author: -- Dusty Mabe (@dustymabe) -- Abhijeet Kasurde (@Akasurde) -notes: -- Does not support C(check_mode). -''' - -EXAMPLES = r''' -- name: Install overlay package - community.general.rpm_ostree_pkg: - name: nfs-utils - state: present - -- name: Remove overlay package - community.general.rpm_ostree_pkg: - name: nfs-utils - state: absent -''' - -RETURN = r''' -rc: - description: Return code of rpm-ostree command. - returned: always - type: int - sample: 0 -changed: - description: State changes. - returned: always - type: bool - sample: True -action: - description: Action performed. - returned: always - type: str - sample: 'install' -packages: - description: A list of packages specified. - returned: always - type: list - sample: ['nfs-utils'] -stdout: - description: Stdout of rpm-ostree command. - returned: always - type: str - sample: 'Staging deployment...done\n...' -stderr: - description: Stderr of rpm-ostree command. - returned: always - type: str - sample: '' -cmd: - description: Full command used for performed action. - returned: always - type: str - sample: 'rpm-ostree uninstall --allow-inactive --idempotent --unchanged-exit-77 nfs-utils' -''' - -from ansible.module_utils.basic import AnsibleModule - - -class RpmOstreePkg: - def __init__(self, module): - self.module = module - self.params = module.params - self.state = module.params['state'] - - def ensure(self): - results = dict( - rc=0, - changed=False, - action='', - packages=[], - stdout='', - stderr='', - cmd='', - ) - - # Ensure rpm-ostree command exists - cmd = [self.module.get_bin_path('rpm-ostree', required=True)] - - # Decide action to perform - if self.state in ('present'): - results['action'] = 'install' - cmd.append('install') - elif self.state in ('absent'): - results['action'] = 'uninstall' - cmd.append('uninstall') - - # Additional parameters - cmd.extend(['--allow-inactive', '--idempotent', '--unchanged-exit-77']) - for pkg in self.params['name']: - cmd.append(pkg) - results['packages'].append(pkg) - - rc, out, err = self.module.run_command(cmd) - - results.update(dict( - rc=rc, - cmd=' '.join(cmd), - stdout=out, - stderr=err, - )) - - # A few possible options: - # - rc=0 - succeeded in making a change - # - rc=77 - no change was needed - # - rc=? - error - if rc == 0: - results['changed'] = True - elif rc == 77: - results['changed'] = False - results['rc'] = 0 - else: - self.module.fail_json(msg='non-zero return code', **results) - - self.module.exit_json(**results) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict( - default="present", - choices=['absent', 'present'] - ), - name=dict( - aliases=["pkg"], - required=True, - type='list', - elements='str', - ), - ), - ) - - rpm_ostree_pkg = RpmOstreePkg(module) - rpm_ostree_pkg.ensure() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/slackpkg.py b/plugins/modules/packaging/os/slackpkg.py deleted file mode 100644 index b556d8be3d..0000000000 --- a/plugins/modules/packaging/os/slackpkg.py +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Kim Nørgaard -# Written by Kim Nørgaard -# Based on pkgng module written by bleader -# that was based on pkgin module written by Shaun Zinck -# that was based on pacman module written by Afterburn -# that was based on apt module written by Matthew Williams -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: slackpkg -short_description: Package manager for Slackware >= 12.2 -description: - - Manage binary packages for Slackware using 'slackpkg' which - is available in versions after 12.2. -options: - name: - description: - - name of package to install/remove - required: true - type: list - elements: str - aliases: [pkg] - - state: - description: - - state of the package, you can use "installed" as an alias for C(present) and removed as one for C(absent). - choices: [ 'present', 'absent', 'latest', 'installed', 'removed' ] - required: false - default: present - type: str - - update_cache: - description: - - update the package database first - - Alias C(update-cache) has been deprecated and will be removed in community.general 5.0.0. - required: false - default: false - type: bool - aliases: [update-cache] - -author: Kim Nørgaard (@KimNorgaard) -requirements: [ "Slackware >= 12.2" ] -''' - -EXAMPLES = ''' -- name: Install package foo - community.general.slackpkg: - name: foo - state: present - -- name: Remove packages foo and bar - community.general.slackpkg: - name: foo,bar - state: absent - -- name: Make sure that it is the most updated package - community.general.slackpkg: - name: foo - state: latest -''' - -from ansible.module_utils.basic import AnsibleModule - - -def query_package(module, slackpkg_path, name): - - import platform - import os - import re - - machine = platform.machine() - # Exception for kernel-headers package on x86_64 - if name == 'kernel-headers' and machine == 'x86_64': - machine = 'x86' - pattern = re.compile('^%s-[^-]+-(%s|noarch|fw)-[^-]+$' % (re.escape(name), re.escape(machine))) - packages = [f for f in os.listdir('/var/log/packages') if pattern.match(f)] - - if len(packages) > 0: - return True - - return False - - -def remove_packages(module, slackpkg_path, packages): - - remove_c = 0 - # Using a for loop in case of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, slackpkg_path, package): - continue - - if not module.check_mode: - rc, out, err = module.run_command("%s -default_answer=y -batch=on \ - remove %s" % (slackpkg_path, - package)) - - if not module.check_mode and query_package(module, slackpkg_path, - package): - module.fail_json(msg="failed to remove %s: %s" % (package, out)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, slackpkg_path, packages): - - install_c = 0 - - for package in packages: - if query_package(module, slackpkg_path, package): - continue - - if not module.check_mode: - rc, out, err = module.run_command("%s -default_answer=y -batch=on \ - install %s" % (slackpkg_path, - package)) - - if not module.check_mode and not query_package(module, slackpkg_path, - package): - module.fail_json(msg="failed to install %s: %s" % (package, out), - stderr=err) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="present %s package(s)" - % (install_c)) - - module.exit_json(changed=False, msg="package(s) already present") - - -def upgrade_packages(module, slackpkg_path, packages): - install_c = 0 - - for package in packages: - if not module.check_mode: - rc, out, err = module.run_command("%s -default_answer=y -batch=on \ - upgrade %s" % (slackpkg_path, - package)) - - if not module.check_mode and not query_package(module, slackpkg_path, - package): - module.fail_json(msg="failed to install %s: %s" % (package, out), - stderr=err) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="present %s package(s)" - % (install_c)) - - module.exit_json(changed=False, msg="package(s) already present") - - -def update_cache(module, slackpkg_path): - rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path)) - if rc != 0: - module.fail_json(msg="Could not update package cache") - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(default="present", choices=['installed', 'removed', 'absent', 'present', 'latest']), - name=dict(aliases=["pkg"], required=True, type='list', elements='str'), - update_cache=dict( - default=False, aliases=["update-cache"], type='bool', - deprecated_aliases=[dict(name='update-cache', version='5.0.0', collection_name='community.general')]), - ), - supports_check_mode=True) - - slackpkg_path = module.get_bin_path('slackpkg', True) - - p = module.params - - pkgs = p['name'] - - if p["update_cache"]: - update_cache(module, slackpkg_path) - - if p['state'] == 'latest': - upgrade_packages(module, slackpkg_path, pkgs) - - elif p['state'] in ['present', 'installed']: - install_packages(module, slackpkg_path, pkgs) - - elif p["state"] in ['removed', 'absent']: - remove_packages(module, slackpkg_path, pkgs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/snap.py b/plugins/modules/packaging/os/snap.py deleted file mode 100644 index 578abe215c..0000000000 --- a/plugins/modules/packaging/os/snap.py +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Alexei Znamensky (russoz) -# Copyright: (c) 2018, Stanislas Lange (angristan) -# Copyright: (c) 2018, Victor Carceler - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: snap -short_description: Manages snaps -description: - - "Manages snaps packages." -options: - name: - description: - - Name of the snaps. - required: true - type: list - elements: str - state: - description: - - Desired state of the package. - required: false - default: present - choices: [ absent, present, enabled, disabled ] - type: str - classic: - description: - - Confinement policy. The classic confinement allows a snap to have - the same level of access to the system as "classic" packages, - like those managed by APT. This option corresponds to the --classic argument. - This option can only be specified if there is a single snap in the task. - type: bool - required: false - default: no - channel: - description: - - Define which release of a snap is installed and tracked for updates. - This option can only be specified if there is a single snap in the task. - type: str - required: false - default: stable - -author: - - Victor Carceler (@vcarceler) - - Stanislas Lange (@angristan) - -seealso: - - module: community.general.snap_alias -''' - -EXAMPLES = ''' -# Install "foo" and "bar" snap -- name: Install foo - community.general.snap: - name: - - foo - - bar - -# Remove "foo" snap -- name: Remove foo - community.general.snap: - name: foo - state: absent - -# Install a snap with classic confinement -- name: Install "foo" with option --classic - community.general.snap: - name: foo - classic: yes - -# Install a snap with from a specific channel -- name: Install "foo" with option --channel=latest/edge - community.general.snap: - name: foo - channel: latest/edge -''' - -RETURN = ''' -classic: - description: Whether or not the snaps were installed with the classic confinement - type: bool - returned: When snaps are installed -channel: - description: The channel the snaps were installed from - type: str - returned: When snaps are installed -cmd: - description: The command that was executed on the host - type: str - returned: When changed is true -snaps_installed: - description: The list of actually installed snaps - type: list - returned: When any snaps have been installed -snaps_removed: - description: The list of actually removed snaps - type: list - returned: When any snaps have been removed -''' - -import re - -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.module_helper import ( - CmdStateModuleHelper, ArgFormat, ModuleHelperException -) - - -__state_map = dict( - present='install', - absent='remove', - enabled='enable', - disabled='disable', - info='info', # not public - list='list', # not public -) - - -def _state_map(value): - return [__state_map[value]] - - -class Snap(CmdStateModuleHelper): - __disable_re = re.compile(r'(?:\S+\s+){5}(?P\S+)') - module = dict( - argument_spec={ - 'name': dict(type='list', elements='str', required=True), - 'state': dict(type='str', default='present', - choices=['absent', 'present', 'enabled', 'disabled']), - 'classic': dict(type='bool', default=False), - 'channel': dict(type='str', default='stable'), - }, - supports_check_mode=True, - ) - command = "snap" - command_args_formats = dict( - actionable_snaps=dict(fmt=lambda v: v), - state=dict(fmt=_state_map), - classic=dict(fmt="--classic", style=ArgFormat.BOOLEAN), - channel=dict(fmt=lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)]), - ) - check_rc = False - - @staticmethod - def _first_non_zero(a): - for elem in a: - if elem != 0: - return elem - - return 0 - - def _run_multiple_commands(self, commands): - outputs = [(c,) + self.run_command(params=c) for c in commands] - results = ([], [], [], []) - for output in outputs: - for i in range(4): - results[i].append(output[i]) - - return [ - '; '.join([to_native(x) for x in results[0]]), - self._first_non_zero(results[1]), - '\n'.join(results[2]), - '\n'.join(results[3]), - ] - - def is_snap_installed(self, snap_name): - return 0 == self.run_command(params=[{'state': 'list'}, {'name': snap_name}])[0] - - def is_snap_enabled(self, snap_name): - rc, out, err = self.run_command(params=[{'state': 'list'}, {'name': snap_name}]) - if rc != 0: - return None - result = out.splitlines()[1] - match = self.__disable_re.match(result) - if not match: - raise ModuleHelperException(msg="Unable to parse 'snap list {0}' output:\n{1}".format(snap_name, out)) - notes = match.group('notes') - return "disabled" not in notes.split(',') - - def state_present(self): - self.vars.meta('classic').set(output=True) - self.vars.meta('channel').set(output=True) - actionable_snaps = [s for s in self.vars.name if not self.is_snap_installed(s)] - if not actionable_snaps: - return - self.changed = True - self.vars.snaps_installed = actionable_snaps - if self.module.check_mode: - return - params = ['state', 'classic', 'channel'] # get base cmd parts - has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != 'stable' - has_multiple_snaps = len(actionable_snaps) > 1 - if has_one_pkg_params and has_multiple_snaps: - commands = [params + [{'actionable_snaps': [s]}] for s in actionable_snaps] - else: - commands = [params + [{'actionable_snaps': actionable_snaps}]] - self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) - if rc == 0: - return - - classic_snap_pattern = re.compile(r'^error: This revision of snap "(?P\w+)"' - r' was published using classic confinement') - match = classic_snap_pattern.match(err) - if match: - err_pkg = match.group('package_name') - msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg) - else: - msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and " \ - "error output for more details.".format(cmd=self.vars.cmd) - raise ModuleHelperException(msg=msg) - - def _generic_state_action(self, actionable_func, actionable_var, params=None): - actionable_snaps = [s for s in self.vars.name if actionable_func(s)] - if not actionable_snaps: - return - self.changed = True - self.vars[actionable_var] = actionable_snaps - if self.module.check_mode: - return - if params is None: - params = ['state'] - commands = [params + [{'actionable_snaps': actionable_snaps}]] - self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) - if rc == 0: - return - msg = "Ooops! Snap operation failed while executing '{cmd}', please examine logs and " \ - "error output for more details.".format(cmd=self.vars.cmd) - raise ModuleHelperException(msg=msg) - - def state_absent(self): - self._generic_state_action(self.is_snap_installed, "snaps_removed", ['classic', 'channel', 'state']) - - def state_enabled(self): - self._generic_state_action(lambda s: not self.is_snap_enabled(s), "snaps_enabled", ['classic', 'channel', 'state']) - - def state_disabled(self): - self._generic_state_action(self.is_snap_enabled, "snaps_disabled", ['classic', 'channel', 'state']) - - -def main(): - snap = Snap() - snap.run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/snap_alias.py b/plugins/modules/packaging/os/snap_alias.py deleted file mode 100644 index 036be12004..0000000000 --- a/plugins/modules/packaging/os/snap_alias.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2021, Alexei Znamensky (russoz) -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: snap_alias -short_description: Manages snap aliases -version_added: 4.0.0 -description: - - "Manages snaps aliases." -options: - state: - description: - - Desired state of the alias. - type: str - choices: [ absent, present ] - default: present - name: - description: - - Name of the snap. - type: str - alias: - description: - - Aliases to be created or removed. - type: list - elements: str - aliases: [aliases] - -author: - - Alexei Znamensky (@russoz) - -seealso: - - module: community.general.snap -''' - -EXAMPLES = ''' -# Install "foo" and "bar" snap -- name: Create snap alias - community.general.snap_alias: - name: hello-world - alias: hw - -- name: Create multiple aliases - community.general.snap_alias: - name: hello-world - aliases: - - hw - - hw2 - - hw3 - state: present # optional - -- name: Remove one specific aliases - community.general.snap_alias: - name: hw - state: absent - -- name: Remove all aliases for snap - community.general.snap_alias: - name: hello-world - state: absent -''' - -RETURN = ''' -snap_aliases: - description: The snap aliases after execution. If called in check mode, then the list represents the state before execution. - type: list - elements: str - returned: always -''' - - -import re - -from ansible_collections.community.general.plugins.module_utils.module_helper import ( - CmdStateModuleHelper -) - - -_state_map = dict( - present='alias', - absent='unalias', - info='aliases', -) - - -class SnapAlias(CmdStateModuleHelper): - _RE_ALIAS_LIST = re.compile(r"^(?P[\w-]+)\s+(?P[\w-]+)\s+.*$") - - module = dict( - argument_spec={ - 'state': dict(type='str', choices=['absent', 'present'], default='present'), - 'name': dict(type='str'), - 'alias': dict(type='list', elements='str', aliases=['aliases']), - }, - required_if=[ - ('state', 'present', ['name', 'alias']), - ('state', 'absent', ['name', 'alias'], True), - ], - supports_check_mode=True, - ) - command = "snap" - command_args_formats = dict( - _alias=dict(fmt=lambda v: [v]), - state=dict(fmt=lambda v: [_state_map[v]]), - ) - check_rc = False - - def _aliases(self): - n = self.vars.name - return {n: self._get_aliases_for(n)} if n else self._get_aliases() - - def __init_module__(self): - self.vars.set("snap_aliases", self._aliases(), change=True, diff=True) - - def __quit_module__(self): - self.vars.snap_aliases = self._aliases() - - def _get_aliases(self): - def process_get_aliases(rc, out, err): - if err: - return {} - aliases = [self._RE_ALIAS_LIST.match(a.strip()) for a in out.splitlines()[1:]] - snap_alias_list = [(entry.group("snap"), entry.group("alias")) for entry in aliases] - results = {} - for snap, alias in snap_alias_list: - results[snap] = results.get(snap, []) + [alias] - return results - - return self.run_command(params=[{'state': 'info'}, 'name'], check_rc=True, - publish_rc=False, publish_out=False, publish_err=False, publish_cmd=False, - process_output=process_get_aliases) - - def _get_aliases_for(self, name): - return self._get_aliases().get(name, []) - - def _has_alias(self, name=None, alias=None): - if name: - if name not in self.vars.snap_aliases: - return False - if alias is None: - return bool(self.vars.snap_aliases[name]) - return alias in self.vars.snap_aliases[name] - - return any(alias in aliases for aliases in self.vars.snap_aliases.values()) - - def state_present(self): - for alias in self.vars.alias: - if not self._has_alias(self.vars.name, alias): - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', 'name', {'_alias': alias}]) - - def state_absent(self): - if not self.vars.alias: - if self._has_alias(self.vars.name): - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', 'name']) - else: - for alias in self.vars.alias: - if self._has_alias(self.vars.name, alias): - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', {'_alias': alias}]) - - -def main(): - SnapAlias.execute() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/sorcery.py b/plugins/modules/packaging/os/sorcery.py deleted file mode 100644 index 347413fc9d..0000000000 --- a/plugins/modules/packaging/os/sorcery.py +++ /dev/null @@ -1,644 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2015-2016, Vlad Glagolev -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: sorcery -short_description: Package manager for Source Mage GNU/Linux -description: - - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain -author: "Vlad Glagolev (@vaygr)" -notes: - - When all three components are selected, the update goes by the sequence -- - Sorcery -> Grimoire(s) -> Spell(s); you cannot override it. - - grimoire handling (i.e. add/remove, including SCM/rsync versions) is not - yet supported. -requirements: - - bash -options: - name: - description: - - Name of the spell - - multiple names can be given, separated by commas - - special value '*' in conjunction with states C(latest) or - C(rebuild) will update or rebuild the whole system respectively - aliases: ["spell"] - type: list - elements: str - - state: - description: - - Whether to cast, dispel or rebuild a package - - state C(cast) is an equivalent of C(present), not C(latest) - - state C(latest) always triggers C(update_cache=yes) - - state C(rebuild) implies cast of all specified spells, not only - those existed before - choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"] - default: "present" - type: str - - depends: - description: - - Comma-separated list of _optional_ dependencies to build a spell - (or make sure it is built) with; use +/- in front of dependency - to turn it on/off ('+' is optional though) - - this option is ignored if C(name) parameter is equal to '*' or - contains more than one spell - - providers must be supplied in the form recognized by Sorcery, e.g. - 'openssl(SSL)' - type: str - - update: - description: - - Whether or not to update sorcery scripts at the very first stage - type: bool - default: no - - update_cache: - description: - - Whether or not to update grimoire collection before casting spells - type: bool - default: no - aliases: ["update_codex"] - - cache_valid_time: - description: - - Time in seconds to invalidate grimoire collection on update - - especially useful for SCM and rsync grimoires - - makes sense only in pair with C(update_cache) - type: int -''' - - -EXAMPLES = ''' -- name: Make sure spell foo is installed - community.general.sorcery: - spell: foo - state: present - -- name: Make sure spells foo, bar and baz are removed - community.general.sorcery: - spell: foo,bar,baz - state: absent - -- name: Make sure spell foo with dependencies bar and baz is installed - community.general.sorcery: - spell: foo - depends: bar,baz - state: present - -- name: Make sure spell foo with bar and without baz dependencies is installed - community.general.sorcery: - spell: foo - depends: +bar,-baz - state: present - -- name: Make sure spell foo with libressl (providing SSL) dependency is installed - community.general.sorcery: - spell: foo - depends: libressl(SSL) - state: present - -- name: Make sure spells with/without required dependencies (if any) are installed - community.general.sorcery: - name: "{{ item.spell }}" - depends: "{{ item.depends | default(None) }}" - state: present - loop: - - { spell: 'vifm', depends: '+file,-gtk+2' } - - { spell: 'fwknop', depends: 'gpgme' } - - { spell: 'pv,tnftp,tor' } - -- name: Install the latest version of spell foo using regular glossary - community.general.sorcery: - name: foo - state: latest - -- name: Rebuild spell foo - community.general.sorcery: - spell: foo - state: rebuild - -- name: Rebuild the whole system, but update Sorcery and Codex first - community.general.sorcery: - spell: '*' - state: rebuild - update: yes - update_cache: yes - -- name: Refresh the grimoire collection if it is 1 day old using native sorcerous alias - community.general.sorcery: - update_codex: yes - cache_valid_time: 86400 - -- name: Update only Sorcery itself - community.general.sorcery: - update: yes -''' - - -RETURN = ''' -''' - - -import datetime -import fileinput -import os -import re -import shutil -import sys - - -# auto-filled at module init -SORCERY = { - 'sorcery': None, - 'scribe': None, - 'cast': None, - 'dispel': None, - 'gaze': None -} - -SORCERY_LOG_DIR = "/var/log/sorcery" -SORCERY_STATE_DIR = "/var/state/sorcery" - - -def get_sorcery_ver(module): - """ Get Sorcery version. """ - - cmd_sorcery = "%s --version" % SORCERY['sorcery'] - - rc, stdout, stderr = module.run_command(cmd_sorcery) - - if rc != 0 or not stdout: - module.fail_json(msg="unable to get Sorcery version") - - return stdout.strip() - - -def codex_fresh(codex, module): - """ Check if grimoire collection is fresh enough. """ - - if not module.params['cache_valid_time']: - return False - - timedelta = datetime.timedelta(seconds=module.params['cache_valid_time']) - - for grimoire in codex: - lastupdate_path = os.path.join(SORCERY_STATE_DIR, - grimoire + ".lastupdate") - - try: - mtime = os.stat(lastupdate_path).st_mtime - except Exception: - return False - - lastupdate_ts = datetime.datetime.fromtimestamp(mtime) - - # if any grimoire is not fresh, we invalidate the Codex - if lastupdate_ts + timedelta < datetime.datetime.now(): - return False - - return True - - -def codex_list(module): - """ List valid grimoire collection. """ - - codex = {} - - cmd_scribe = "%s index" % SORCERY['scribe'] - - rc, stdout, stderr = module.run_command(cmd_scribe) - - if rc != 0: - module.fail_json(msg="unable to list grimoire collection, fix your Codex") - - rex = re.compile(r"^\s*\[\d+\] : (?P[\w\-+.]+) : [\w\-+./]+(?: : (?P[\w\-+.]+))?\s*$") - - # drop 4-line header and empty trailing line - for line in stdout.splitlines()[4:-1]: - match = rex.match(line) - - if match: - codex[match.group('grim')] = match.group('ver') - - if not codex: - module.fail_json(msg="no grimoires to operate on; add at least one") - - return codex - - -def update_sorcery(module): - """ Update sorcery scripts. - - This runs 'sorcery update' ('sorcery -u'). Check mode always returns a - positive change value. - - """ - - changed = False - - if module.check_mode: - if not module.params['name'] and not module.params['update_cache']: - module.exit_json(changed=True, msg="would have updated Sorcery") - else: - sorcery_ver = get_sorcery_ver(module) - - cmd_sorcery = "%s update" % SORCERY['sorcery'] - - rc, stdout, stderr = module.run_command(cmd_sorcery) - - if rc != 0: - module.fail_json(msg="unable to update Sorcery: " + stdout) - - if sorcery_ver != get_sorcery_ver(module): - changed = True - - if not module.params['name'] and not module.params['update_cache']: - module.exit_json(changed=changed, - msg="successfully updated Sorcery") - - -def update_codex(module): - """ Update grimoire collections. - - This runs 'scribe update'. Check mode always returns a positive change - value when 'cache_valid_time' is used. - - """ - - params = module.params - - changed = False - - codex = codex_list(module) - fresh = codex_fresh(codex, module) - - if module.check_mode: - if not params['name']: - if not fresh: - changed = True - - module.exit_json(changed=changed, msg="would have updated Codex") - elif not fresh or params['name'] and params['state'] == 'latest': - # SILENT is required as a workaround for query() in libgpg - module.run_command_environ_update.update(dict(SILENT='1')) - - cmd_scribe = "%s update" % SORCERY['scribe'] - - rc, stdout, stderr = module.run_command(cmd_scribe) - - if rc != 0: - module.fail_json(msg="unable to update Codex: " + stdout) - - if codex != codex_list(module): - changed = True - - if not params['name']: - module.exit_json(changed=changed, - msg="successfully updated Codex") - - -def match_depends(module): - """ Check for matching dependencies. - - This inspects spell's dependencies with the desired states and returns - 'False' if a recast is needed to match them. It also adds required lines - to the system-wide depends file for proper recast procedure. - - """ - - params = module.params - spells = params['name'] - - depends = {} - - depends_ok = True - - if len(spells) > 1 or not params['depends']: - return depends_ok - - spell = spells[0] - - if module.check_mode: - sorcery_depends_orig = os.path.join(SORCERY_STATE_DIR, "depends") - sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends.check") - - try: - shutil.copy2(sorcery_depends_orig, sorcery_depends) - except IOError: - module.fail_json(msg="failed to copy depends.check file") - else: - sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends") - - rex = re.compile(r"^(?P\+?|\-){1}(?P[a-z0-9]+[a-z0-9_\-\+\.]*(\([A-Z0-9_\-\+\.]+\))*)$") - - for d in params['depends'].split(','): - match = rex.match(d) - - if not match: - module.fail_json(msg="wrong depends line for spell '%s'" % spell) - - # normalize status - if not match.group('status') or match.group('status') == '+': - status = 'on' - else: - status = 'off' - - depends[match.group('depend')] = status - - # drop providers spec - depends_list = [s.split('(')[0] for s in depends] - - cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(depends_list)) - - rc, stdout, stderr = module.run_command(cmd_gaze) - - if rc != 0: - module.fail_json(msg="wrong dependencies for spell '%s'" % spell) - - fi = fileinput.input(sorcery_depends, inplace=True) - - try: - try: - for line in fi: - if line.startswith(spell + ':'): - match = None - - for d in depends: - # when local status is 'off' and dependency is provider, - # use only provider value - d_offset = d.find('(') - - if d_offset == -1: - d_p = '' - else: - d_p = re.escape(d[d_offset:]) - - # .escape() is needed mostly for the spells like 'libsigc++' - rex = re.compile("%s:(?:%s|%s):(?Pon|off):optional:" % - (re.escape(spell), re.escape(d), d_p)) - - match = rex.match(line) - - # we matched the line "spell:dependency:on|off:optional:" - if match: - # if we also matched the local status, mark dependency - # as empty and put it back into depends file - if match.group('lstatus') == depends[d]: - depends[d] = None - - sys.stdout.write(line) - - # status is not that we need, so keep this dependency - # in the list for further reverse switching; - # stop and process the next line in both cases - break - - if not match: - sys.stdout.write(line) - else: - sys.stdout.write(line) - except IOError: - module.fail_json(msg="I/O error on the depends file") - finally: - fi.close() - - depends_new = [v for v in depends if depends[v]] - - if depends_new: - try: - try: - fl = open(sorcery_depends, 'a') - - for k in depends_new: - fl.write("%s:%s:%s:optional::\n" % (spell, k, depends[k])) - except IOError: - module.fail_json(msg="I/O error on the depends file") - finally: - fl.close() - - depends_ok = False - - if module.check_mode: - try: - os.remove(sorcery_depends) - except IOError: - module.fail_json(msg="failed to clean up depends.backup file") - - return depends_ok - - -def manage_spells(module): - """ Cast or dispel spells. - - This manages the whole system ('*'), list or a single spell. Command 'cast' - is used to install or rebuild spells, while 'dispel' takes care of theirs - removal from the system. - - """ - - params = module.params - spells = params['name'] - - sorcery_queue = os.path.join(SORCERY_LOG_DIR, "queue/install") - - if spells == '*': - if params['state'] == 'latest': - # back up original queue - try: - os.rename(sorcery_queue, sorcery_queue + ".backup") - except IOError: - module.fail_json(msg="failed to backup the update queue") - - # see update_codex() - module.run_command_environ_update.update(dict(SILENT='1')) - - cmd_sorcery = "%s queue" - - rc, stdout, stderr = module.run_command(cmd_sorcery) - - if rc != 0: - module.fail_json(msg="failed to generate the update queue") - - try: - queue_size = os.stat(sorcery_queue).st_size - except Exception: - module.fail_json(msg="failed to read the update queue") - - if queue_size != 0: - if module.check_mode: - try: - os.rename(sorcery_queue + ".backup", sorcery_queue) - except IOError: - module.fail_json(msg="failed to restore the update queue") - - module.exit_json(changed=True, msg="would have updated the system") - - cmd_cast = "%s --queue" % SORCERY['cast'] - - rc, stdout, stderr = module.run_command(cmd_cast) - - if rc != 0: - module.fail_json(msg="failed to update the system") - - module.exit_json(changed=True, msg="successfully updated the system") - else: - module.exit_json(changed=False, msg="the system is already up to date") - elif params['state'] == 'rebuild': - if module.check_mode: - module.exit_json(changed=True, msg="would have rebuilt the system") - - cmd_sorcery = "%s rebuild" % SORCERY['sorcery'] - - rc, stdout, stderr = module.run_command(cmd_sorcery) - - if rc != 0: - module.fail_json(msg="failed to rebuild the system: " + stdout) - - module.exit_json(changed=True, msg="successfully rebuilt the system") - else: - module.fail_json(msg="unsupported operation on '*' name value") - else: - if params['state'] in ('present', 'latest', 'rebuild', 'absent'): - # extract versions from the 'gaze' command - cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(spells)) - - rc, stdout, stderr = module.run_command(cmd_gaze) - - # fail if any of spells cannot be found - if rc != 0: - module.fail_json(msg="failed to locate spell(s) in the list (%s)" % - ', '.join(spells)) - - cast_queue = [] - dispel_queue = [] - - rex = re.compile(r"[^|]+\|[^|]+\|(?P[^|]+)\|(?P[^|]+)\|(?P[^$]+)") - - # drop 2-line header and empty trailing line - for line in stdout.splitlines()[2:-1]: - match = rex.match(line) - - cast = False - - if params['state'] == 'present': - # spell is not installed.. - if match.group('inst_ver') == '-': - # ..so set up depends reqs for it - match_depends(module) - - cast = True - # spell is installed.. - else: - # ..but does not conform depends reqs - if not match_depends(module): - cast = True - elif params['state'] == 'latest': - # grimoire and installed versions do not match.. - if match.group('grim_ver') != match.group('inst_ver'): - # ..so check for depends reqs first and set them up - match_depends(module) - - cast = True - # grimoire and installed versions match.. - else: - # ..but the spell does not conform depends reqs - if not match_depends(module): - cast = True - elif params['state'] == 'rebuild': - cast = True - # 'absent' - else: - if match.group('inst_ver') != '-': - dispel_queue.append(match.group('spell')) - - if cast: - cast_queue.append(match.group('spell')) - - if cast_queue: - if module.check_mode: - module.exit_json(changed=True, msg="would have cast spell(s)") - - cmd_cast = "%s -c %s" % (SORCERY['cast'], ' '.join(cast_queue)) - - rc, stdout, stderr = module.run_command(cmd_cast) - - if rc != 0: - module.fail_json(msg="failed to cast spell(s): %s" + stdout) - - module.exit_json(changed=True, msg="successfully cast spell(s)") - elif params['state'] != 'absent': - module.exit_json(changed=False, msg="spell(s) are already cast") - - if dispel_queue: - if module.check_mode: - module.exit_json(changed=True, msg="would have dispelled spell(s)") - - cmd_dispel = "%s %s" % (SORCERY['dispel'], ' '.join(dispel_queue)) - - rc, stdout, stderr = module.run_command(cmd_dispel) - - if rc != 0: - module.fail_json(msg="failed to dispel spell(s): %s" + stdout) - - module.exit_json(changed=True, msg="successfully dispelled spell(s)") - else: - module.exit_json(changed=False, msg="spell(s) are already dispelled") - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(default=None, aliases=['spell'], type='list', elements='str'), - state=dict(default='present', choices=['present', 'latest', - 'absent', 'cast', 'dispelled', 'rebuild']), - depends=dict(default=None), - update=dict(default=False, type='bool'), - update_cache=dict(default=False, aliases=['update_codex'], type='bool'), - cache_valid_time=dict(default=0, type='int') - ), - required_one_of=[['name', 'update', 'update_cache']], - supports_check_mode=True - ) - - if os.geteuid() != 0: - module.fail_json(msg="root privileges are required for this operation") - - for c in SORCERY: - SORCERY[c] = module.get_bin_path(c, True) - - # prepare environment: run sorcery commands without asking questions - module.run_command_environ_update = dict(PROMPT_DELAY='0', VOYEUR='0') - - params = module.params - - # normalize 'state' parameter - if params['state'] in ('present', 'cast'): - params['state'] = 'present' - elif params['state'] in ('absent', 'dispelled'): - params['state'] = 'absent' - - if params['update']: - update_sorcery(module) - - if params['update_cache'] or params['state'] == 'latest': - update_codex(module) - - if params['name']: - manage_spells(module) - - -# import module snippets -from ansible.module_utils.basic import AnsibleModule - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/svr4pkg.py b/plugins/modules/packaging/os/svr4pkg.py deleted file mode 100644 index aa7a5c2e52..0000000000 --- a/plugins/modules/packaging/os/svr4pkg.py +++ /dev/null @@ -1,262 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Boyd Adamson -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: svr4pkg -short_description: Manage Solaris SVR4 packages -description: - - Manages SVR4 packages on Solaris 10 and 11. - - These were the native packages on Solaris <= 10 and are available - as a legacy feature in Solaris 11. - - Note that this is a very basic packaging system. It will not enforce - dependencies on install or remove. -author: "Boyd Adamson (@brontitall)" -options: - name: - description: - - Package name, e.g. C(SUNWcsr) - required: true - type: str - - state: - description: - - Whether to install (C(present)), or remove (C(absent)) a package. - - If the package is to be installed, then I(src) is required. - - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package. - required: true - choices: ["present", "absent"] - type: str - - src: - description: - - Specifies the location to install the package from. Required when C(state=present). - - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)." - - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them there. - type: str - proxy: - description: - - HTTP[s] proxy to be used if C(src) is a URL. - type: str - response_file: - description: - - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4) - required: false - type: str - zone: - description: - - Whether to install the package only in the current zone, or install it into all zones. - - The installation into all zones works only if you are working with the global zone. - required: false - default: "all" - choices: ["current", "all"] - type: str - category: - description: - - Install/Remove category instead of a single package. - required: false - type: bool - default: false -''' - -EXAMPLES = ''' -- name: Install a package from an already copied file - community.general.svr4pkg: - name: CSWcommon - src: /tmp/cswpkgs.pkg - state: present - -- name: Install a package directly from an http site - community.general.svr4pkg: - name: CSWpkgutil - src: 'http://get.opencsw.org/now' - state: present - zone: current - -- name: Install a package with a response file - community.general.svr4pkg: - name: CSWggrep - src: /tmp/third-party.pkg - response_file: /tmp/ggrep.response - state: present - -- name: Ensure that a package is not installed - community.general.svr4pkg: - name: SUNWgnome-sound-recorder - state: absent - -- name: Ensure that a category is not installed - community.general.svr4pkg: - name: FIREFOX - state: absent - category: true -''' - - -import os -import tempfile - -from ansible.module_utils.basic import AnsibleModule - - -def package_installed(module, name, category): - cmd = [module.get_bin_path('pkginfo', True), '-q'] - if category: - cmd.append('-c') - cmd.append(name) - rc, out, err = module.run_command(' '.join(cmd)) - if rc == 0: - return True - else: - return False - - -def create_admin_file(): - (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True) - fullauto = b''' -mail= -instance=unique -partial=nocheck -runlevel=quit -idepend=nocheck -rdepend=nocheck -space=quit -setuid=nocheck -conflict=nocheck -action=nocheck -networktimeout=60 -networkretries=3 -authentication=quit -keystore=/var/sadm/security -proxy= -basedir=default -''' - os.write(desc, fullauto) - os.close(desc) - return filename - - -def run_command(module, cmd): - progname = cmd[0] - cmd[0] = module.get_bin_path(progname, True) - return module.run_command(cmd) - - -def package_install(module, name, src, proxy, response_file, zone, category): - adminfile = create_admin_file() - cmd = ['pkgadd', '-n'] - if zone == 'current': - cmd += ['-G'] - cmd += ['-a', adminfile, '-d', src] - if proxy is not None: - cmd += ['-x', proxy] - if response_file is not None: - cmd += ['-r', response_file] - if category: - cmd += ['-Y'] - cmd.append(name) - (rc, out, err) = run_command(module, cmd) - os.unlink(adminfile) - return (rc, out, err) - - -def package_uninstall(module, name, src, category): - adminfile = create_admin_file() - if category: - cmd = ['pkgrm', '-na', adminfile, '-Y', name] - else: - cmd = ['pkgrm', '-na', adminfile, name] - (rc, out, err) = run_command(module, cmd) - os.unlink(adminfile) - return (rc, out, err) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(required=True, choices=['present', 'absent']), - src=dict(default=None), - proxy=dict(default=None), - response_file=dict(default=None), - zone=dict(required=False, default='all', choices=['current', 'all']), - category=dict(default=False, type='bool') - ), - supports_check_mode=True - ) - state = module.params['state'] - name = module.params['name'] - src = module.params['src'] - proxy = module.params['proxy'] - response_file = module.params['response_file'] - zone = module.params['zone'] - category = module.params['category'] - rc = None - out = '' - err = '' - result = {} - result['name'] = name - result['state'] = state - - if state == 'present': - if src is None: - module.fail_json(name=name, - msg="src is required when state=present") - if not package_installed(module, name, category): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category) - # Stdout is normally empty but for some packages can be - # very long and is not often useful - if len(out) > 75: - out = out[:75] + '...' - - elif state == 'absent': - if package_installed(module, name, category): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = package_uninstall(module, name, src, category) - out = out[:75] - - # Returncodes as per pkgadd(1m) - # 0 Successful completion - # 1 Fatal error. - # 2 Warning. - # 3 Interruption. - # 4 Administration. - # 5 Administration. Interaction is required. Do not use pkgadd -n. - # 10 Reboot after installation of all packages. - # 20 Reboot after installation of this package. - # 99 (observed) pkgadd: ERROR: could not process datastream from - if rc in (0, 2, 3, 10, 20): - result['changed'] = True - # no install nor uninstall, or failed - else: - result['changed'] = False - - # rc will be none when the package already was installed and no action took place - # Only return failed=False when the returncode is known to be good as there may be more - # undocumented failure return codes - if rc not in (None, 0, 2, 10, 20): - result['failed'] = True - else: - result['failed'] = False - - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/swdepot.py b/plugins/modules/packaging/os/swdepot.py deleted file mode 100644 index 7e9db8353b..0000000000 --- a/plugins/modules/packaging/os/swdepot.py +++ /dev/null @@ -1,206 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Raul Melo -# Written by Raul Melo -# Based on yum module written by Seth Vidal -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: swdepot -short_description: Manage packages with swdepot package manager (HP-UX) -description: - - Will install, upgrade and remove packages with swdepot package manager (HP-UX) -notes: [] -author: "Raul Melo (@melodous)" -options: - name: - description: - - package name. - aliases: [pkg] - required: true - type: str - state: - description: - - whether to install (C(present), C(latest)), or remove (C(absent)) a package. - required: true - choices: [ 'present', 'latest', 'absent'] - type: str - depot: - description: - - The source repository from which install or upgrade a package. - type: str -''' - -EXAMPLES = ''' -- name: Install a package - community.general.swdepot: - name: unzip-6.0 - state: present - depot: 'repository:/path' - -- name: Install the latest version of a package - community.general.swdepot: - name: unzip - state: latest - depot: 'repository:/path' - -- name: Remove a package - community.general.swdepot: - name: unzip - state: absent -''' - -import re - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote - - -def compare_package(version1, version2): - """ Compare version packages. - Return values: - -1 first minor - 0 equal - 1 first greater """ - - def normalize(v): - return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")] - normalized_version1 = normalize(version1) - normalized_version2 = normalize(version2) - if normalized_version1 == normalized_version2: - rc = 0 - elif normalized_version1 < normalized_version2: - rc = -1 - else: - rc = 1 - return rc - - -def query_package(module, name, depot=None): - """ Returns whether a package is installed or not and version. """ - - cmd_list = '/usr/sbin/swlist -a revision -l product' - if depot: - rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, shlex_quote(depot), shlex_quote(name), shlex_quote(name)), - use_unsafe_shell=True) - else: - rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, shlex_quote(name), shlex_quote(name)), use_unsafe_shell=True) - if rc == 0: - version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1] - else: - version = None - - return rc, version - - -def remove_package(module, name): - """ Uninstall package if installed. """ - - cmd_remove = '/usr/sbin/swremove' - rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name)) - - if rc == 0: - return rc, stdout - else: - return rc, stderr - - -def install_package(module, depot, name): - """ Install package if not already installed """ - - cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false' - rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name)) - if rc == 0: - return rc, stdout - else: - return rc, stderr - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(aliases=['pkg'], required=True), - state=dict(choices=['present', 'absent', 'latest'], required=True), - depot=dict(default=None, required=False) - ), - supports_check_mode=True - ) - name = module.params['name'] - state = module.params['state'] - depot = module.params['depot'] - - changed = False - msg = "No changed" - rc = 0 - if (state == 'present' or state == 'latest') and depot is None: - output = "depot parameter is mandatory in present or latest task" - module.fail_json(name=name, msg=output, rc=rc) - - # Check local version - rc, version_installed = query_package(module, name) - if not rc: - installed = True - msg = "Already installed" - - else: - installed = False - - if (state == 'present' or state == 'latest') and installed is False: - if module.check_mode: - module.exit_json(changed=True) - rc, output = install_package(module, depot, name) - - if not rc: - changed = True - msg = "Package installed" - - else: - module.fail_json(name=name, msg=output, rc=rc) - - elif state == 'latest' and installed is True: - # Check depot version - rc, version_depot = query_package(module, name, depot) - - if not rc: - if compare_package(version_installed, version_depot) == -1: - if module.check_mode: - module.exit_json(changed=True) - # Install new version - rc, output = install_package(module, depot, name) - - if not rc: - msg = "Package upgraded, Before " + version_installed + " Now " + version_depot - changed = True - - else: - module.fail_json(name=name, msg=output, rc=rc) - - else: - output = "Software package not in repository " + depot - module.fail_json(name=name, msg=output, rc=rc) - - elif state == 'absent' and installed is True: - if module.check_mode: - module.exit_json(changed=True) - rc, output = remove_package(module, name) - if not rc: - changed = True - msg = "Package removed" - else: - module.fail_json(name=name, msg=output, rc=rc) - - if module.check_mode: - module.exit_json(changed=False) - - module.exit_json(changed=changed, name=name, state=state, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/swupd.py b/plugins/modules/packaging/os/swupd.py deleted file mode 100644 index 6ededcad02..0000000000 --- a/plugins/modules/packaging/os/swupd.py +++ /dev/null @@ -1,314 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2017, Alberto Murillo -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: swupd -short_description: Manages updates and bundles in ClearLinux systems. -description: - - Manages updates and bundles with the swupd bundle manager, which is used by the - Clear Linux Project for Intel Architecture. -author: Alberto Murillo (@albertomurillo) -options: - contenturl: - description: - - URL pointing to the contents of available bundles. - If not specified, the contents are retrieved from clearlinux.org. - type: str - format: - description: - - The format suffix for version file downloads. For example [1,2,3,staging,etc]. - If not specified, the default format is used. - type: str - manifest: - description: - - The manifest contains information about the bundles at certain version of the OS. - Specify a Manifest version to verify against that version or leave unspecified to - verify against the current version. - aliases: [release, version] - type: int - name: - description: - - Name of the (I)bundle to install or remove. - aliases: [bundle] - type: str - state: - description: - - Indicates the desired (I)bundle state. C(present) ensures the bundle - is installed while C(absent) ensures the (I)bundle is not installed. - default: present - choices: [present, absent] - type: str - update: - description: - - Updates the OS to the latest version. - type: bool - default: false - url: - description: - - Overrides both I(contenturl) and I(versionurl). - type: str - verify: - description: - - Verify content for OS version. - type: bool - default: false - versionurl: - description: - - URL for version string download. - type: str -''' - -EXAMPLES = ''' -- name: Update the OS to the latest version - community.general.swupd: - update: yes - -- name: Installs the "foo" bundle - community.general.swupd: - name: foo - state: present - -- name: Removes the "foo" bundle - community.general.swupd: - name: foo - state: absent - -- name: Check integrity of filesystem - community.general.swupd: - verify: yes - -- name: Downgrade OS to release 12920 - community.general.swupd: - verify: yes - manifest: 12920 -''' - -RETURN = ''' -stdout: - description: stdout of swupd - returned: always - type: str -stderr: - description: stderr of swupd - returned: always - type: str -''' - -import os -from ansible.module_utils.basic import AnsibleModule - - -class Swupd(object): - FILES_NOT_MATCH = "files did not match" - FILES_REPLACED = "missing files were replaced" - FILES_FIXED = "files were fixed" - FILES_DELETED = "files were deleted" - - def __init__(self, module): - # Fail if swupd is not found - self.module = module - self.swupd_cmd = module.get_bin_path("swupd", False) - if not self.swupd_cmd: - module.fail_json(msg="Could not find swupd.") - - # Initialize parameters - for key in module.params.keys(): - setattr(self, key, module.params[key]) - - # Initialize return values - self.changed = False - self.failed = False - self.msg = None - self.rc = None - self.stderr = "" - self.stdout = "" - - def _run_cmd(self, cmd): - self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False) - - def _get_cmd(self, command): - cmd = "%s %s" % (self.swupd_cmd, command) - - if self.format: - cmd += " --format=%s" % self.format - if self.manifest: - cmd += " --manifest=%s" % self.manifest - if self.url: - cmd += " --url=%s" % self.url - else: - if self.contenturl and command != "check-update": - cmd += " --contenturl=%s" % self.contenturl - if self.versionurl: - cmd += " --versionurl=%s" % self.versionurl - - return cmd - - def _is_bundle_installed(self, bundle): - try: - os.stat("/usr/share/clear/bundles/%s" % bundle) - except OSError: - return False - - return True - - def _needs_update(self): - cmd = self._get_cmd("check-update") - self._run_cmd(cmd) - - if self.rc == 0: - return True - - if self.rc == 1: - return False - - self.failed = True - self.msg = "Failed to check for updates" - - def _needs_verify(self): - cmd = self._get_cmd("verify") - self._run_cmd(cmd) - - if self.rc != 0: - self.failed = True - self.msg = "Failed to check for filesystem inconsistencies." - - if self.FILES_NOT_MATCH in self.stdout: - return True - - return False - - def install_bundle(self, bundle): - """Installs a bundle with `swupd bundle-add bundle`""" - if self.module.check_mode: - self.module.exit_json(changed=not self._is_bundle_installed(bundle)) - - if self._is_bundle_installed(bundle): - self.msg = "Bundle %s is already installed" % bundle - return - - cmd = self._get_cmd("bundle-add %s" % bundle) - self._run_cmd(cmd) - - if self.rc == 0: - self.changed = True - self.msg = "Bundle %s installed" % bundle - return - - self.failed = True - self.msg = "Failed to install bundle %s" % bundle - - def remove_bundle(self, bundle): - """Removes a bundle with `swupd bundle-remove bundle`""" - if self.module.check_mode: - self.module.exit_json(changed=self._is_bundle_installed(bundle)) - - if not self._is_bundle_installed(bundle): - self.msg = "Bundle %s not installed" - return - - cmd = self._get_cmd("bundle-remove %s" % bundle) - self._run_cmd(cmd) - - if self.rc == 0: - self.changed = True - self.msg = "Bundle %s removed" % bundle - return - - self.failed = True - self.msg = "Failed to remove bundle %s" % bundle - - def update_os(self): - """Updates the os with `swupd update`""" - if self.module.check_mode: - self.module.exit_json(changed=self._needs_update()) - - if not self._needs_update(): - self.msg = "There are no updates available" - return - - cmd = self._get_cmd("update") - self._run_cmd(cmd) - - if self.rc == 0: - self.changed = True - self.msg = "Update successful" - return - - self.failed = True - self.msg = "Failed to check for updates" - - def verify_os(self): - """Verifies filesystem against specified or current version""" - if self.module.check_mode: - self.module.exit_json(changed=self._needs_verify()) - - if not self._needs_verify(): - self.msg = "No files where changed" - return - - cmd = self._get_cmd("verify --fix") - self._run_cmd(cmd) - - if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout): - self.changed = True - self.msg = "Fix successful" - return - - self.failed = True - self.msg = "Failed to verify the OS" - - -def main(): - """The main function.""" - module = AnsibleModule( - argument_spec=dict( - contenturl=dict(type="str"), - format=dict(type="str"), - manifest=dict(aliases=["release", "version"], type="int"), - name=dict(aliases=["bundle"], type="str"), - state=dict(default="present", choices=["present", "absent"], type="str"), - update=dict(default=False, type="bool"), - url=dict(type="str"), - verify=dict(default=False, type="bool"), - versionurl=dict(type="str"), - ), - required_one_of=[["name", "update", "verify"]], - mutually_exclusive=[["name", "update", "verify"]], - supports_check_mode=True - ) - - swupd = Swupd(module) - - name = module.params["name"] - state = module.params["state"] - update = module.params["update"] - verify = module.params["verify"] - - if update: - swupd.update_os() - elif verify: - swupd.verify_os() - elif state == "present": - swupd.install_bundle(name) - elif state == "absent": - swupd.remove_bundle(name) - else: - swupd.failed = True - - if swupd.failed: - module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr) - else: - module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/urpmi.py b/plugins/modules/packaging/os/urpmi.py deleted file mode 100644 index 47c22ffb93..0000000000 --- a/plugins/modules/packaging/os/urpmi.py +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2013, Philippe Makowski -# Written by Philippe Makowski -# Based on apt module written by Matthew Williams - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: urpmi -short_description: Urpmi manager -description: - - Manages packages with I(urpmi) (such as for Mageia or Mandriva) -options: - name: - description: - - A list of package names to install, upgrade or remove. - required: yes - aliases: [ package, pkg ] - type: list - elements: str - state: - description: - - Indicates the desired package state. - choices: [ absent, present, installed, removed ] - default: present - type: str - update_cache: - description: - - Update the package database first C(urpmi.update -a). - - Alias C(update-cache) has been deprecated and will be removed in community.general 5.0.0. - type: bool - default: no - aliases: ['update-cache'] - no_recommends: - description: - - Corresponds to the C(--no-recommends) option for I(urpmi). - - Alias C(no-recommends) has been deprecated and will be removed in community.general 5.0.0. - type: bool - default: yes - aliases: ['no-recommends'] - force: - description: - - Assume "yes" is the answer to any question urpmi has to ask. - Corresponds to the C(--force) option for I(urpmi). - type: bool - default: yes - root: - description: - - Specifies an alternative install root, relative to which all packages will be installed. - Corresponds to the C(--root) option for I(urpmi). - aliases: [ installroot ] - type: str -author: -- Philippe Makowski (@pmakowski) -''' - -EXAMPLES = ''' -- name: Install package foo - community.general.urpmi: - pkg: foo - state: present - -- name: Remove package foo - community.general.urpmi: - pkg: foo - state: absent - -- name: Remove packages foo and bar - community.general.urpmi: - pkg: foo,bar - state: absent - -- name: Update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists) -- community.general.urpmi: - name: bar - state: present - update_cache: yes -''' - - -import os -import shlex -import sys - -from ansible.module_utils.basic import AnsibleModule - - -def query_package(module, name, root): - # rpm -q returns 0 if the package is installed, - # 1 if it is not installed - rpm_path = module.get_bin_path("rpm", True) - cmd = "%s -q %s %s" % (rpm_path, name, root_option(root)) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - if rc == 0: - return True - else: - return False - - -def query_package_provides(module, name, root): - # rpm -q returns 0 if the package is installed, - # 1 if it is not installed - rpm_path = module.get_bin_path("rpm", True) - cmd = "%s -q --whatprovides %s %s" % (rpm_path, name, root_option(root)) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - return rc == 0 - - -def update_package_db(module): - - urpmiupdate_path = module.get_bin_path("urpmi.update", True) - cmd = "%s -a -q" % (urpmiupdate_path,) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - if rc != 0: - module.fail_json(msg="could not update package db") - - -def remove_packages(module, packages, root): - - remove_c = 0 - # Using a for loop in case of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, package, root): - continue - - urpme_path = module.get_bin_path("urpme", True) - cmd = "%s --auto %s %s" % (urpme_path, root_option(root), package) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc != 0: - module.fail_json(msg="failed to remove %s" % (package)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, pkgspec, root, force=True, no_recommends=True): - - packages = "" - for package in pkgspec: - if not query_package_provides(module, package, root): - packages += "'%s' " % package - - if len(packages) != 0: - if no_recommends: - no_recommends_yes = '--no-recommends' - else: - no_recommends_yes = '' - - if force: - force_yes = '--force' - else: - force_yes = '' - - urpmi_path = module.get_bin_path("urpmi", True) - cmd = ("%s --auto %s --quiet %s %s %s" % (urpmi_path, force_yes, - no_recommends_yes, - root_option(root), - packages)) - - rc, out, err = module.run_command(cmd) - - for package in pkgspec: - if not query_package_provides(module, package, root): - module.fail_json(msg="'urpmi %s' failed: %s" % (package, err)) - - # urpmi always have 0 for exit code if --force is used - if rc: - module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err)) - else: - module.exit_json(changed=True, msg="%s present(s)" % packages) - else: - module.exit_json(changed=False) - - -def root_option(root): - if (root): - return "--root=%s" % (root) - else: - return "" - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', - choices=['absent', 'installed', 'present', 'removed']), - update_cache=dict( - type='bool', default=False, aliases=['update-cache'], - deprecated_aliases=[dict(name='update-cache', version='5.0.0', collection_name='community.general')]), - force=dict(type='bool', default=True), - no_recommends=dict( - type='bool', default=True, aliases=['no-recommends'], - deprecated_aliases=[dict(name='no-recommends', version='5.0.0', collection_name='community.general')]), - name=dict(type='list', elements='str', required=True, aliases=['package', 'pkg']), - root=dict(type='str', aliases=['installroot']), - ), - ) - - p = module.params - - if p['update_cache']: - update_package_db(module) - - if p['state'] in ['installed', 'present']: - install_packages(module, p['name'], p['root'], p['force'], p['no_recommends']) - - elif p['state'] in ['removed', 'absent']: - remove_packages(module, p['name'], p['root']) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/xbps.py b/plugins/modules/packaging/os/xbps.py deleted file mode 100644 index 8d314ea859..0000000000 --- a/plugins/modules/packaging/os/xbps.py +++ /dev/null @@ -1,331 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2016 Dino Occhialini -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: xbps -short_description: Manage packages with XBPS -description: - - Manage packages with the XBPS package manager. -author: - - "Dino Occhialini (@dinoocch)" - - "Michael Aldridge (@the-maldridge)" -options: - name: - description: - - Name of the package to install, upgrade, or remove. - aliases: [pkg,package] - type: list - elements: str - state: - description: - - Desired state of the package. - default: "present" - choices: ["present", "absent", "latest", "installed", "removed"] - type: str - recurse: - description: - - When removing a package, also remove its dependencies, provided - that they are not required by other packages and were not - explicitly installed by a user. - type: bool - default: no - update_cache: - description: - - Whether or not to refresh the master package lists. This can be - run as part of a package installation or as a separate step. - - Alias C(update-cache) has been deprecated and will be removed in community.general 5.0.0. - aliases: ['update-cache'] - type: bool - default: yes - upgrade: - description: - - Whether or not to upgrade whole system - type: bool - default: no - upgrade_xbps: - description: - - Whether or not to upgrade the xbps package when necessary. - Before installing new packages, - xbps requires the user to update the xbps package itself. - Thus when this option is set to C(no), - upgrades and installations will fail when xbps is not up to date. - type: bool - default: yes - version_added: '0.2.0' -''' - -EXAMPLES = ''' -- name: Install package foo (automatically updating the xbps package if needed) - community.general.xbps: name=foo state=present - -- name: Upgrade package foo - community.general.xbps: name=foo state=latest update_cache=yes - -- name: Remove packages foo and bar - community.general.xbps: name=foo,bar state=absent - -- name: Recursively remove package foo - community.general.xbps: name=foo state=absent recurse=yes - -- name: Update package cache - community.general.xbps: update_cache=yes - -- name: Upgrade packages - community.general.xbps: upgrade=yes - -- name: Install a package, failing if the xbps package is out of date - community.general.xbps: - name: foo - state: present - upgrade_xbps: no -''' - -RETURN = ''' -msg: - description: Message about results - returned: success - type: str - sample: "System Upgraded" -packages: - description: Packages that are affected/would be affected - type: list - sample: ["ansible"] - returned: success -''' - - -import os - -from ansible.module_utils.basic import AnsibleModule - - -def is_installed(xbps_output): - """Returns package install state""" - return bool(len(xbps_output)) - - -def query_package(module, xbps_path, name, state="present"): - """Returns Package info""" - if state == "present": - lcmd = "%s %s" % (xbps_path['query'], name) - lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) - if not is_installed(lstdout): - # package is not installed locally - return False, False - - rcmd = "%s -Sun" % (xbps_path['install']) - rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) - if rrc == 0 or rrc == 17: - """Return True to indicate that the package is installed locally, - and the result of the version number comparison to determine if the - package is up-to-date""" - return True, name not in rstdout - - return False, False - - -def update_package_db(module, xbps_path): - """Returns True if update_package_db changed""" - cmd = "%s -S" % (xbps_path['install']) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc != 0: - module.fail_json(msg="Could not update package db") - if "avg rate" in stdout: - return True - else: - return False - - -def upgrade_xbps(module, xbps_path, exit_on_success=False): - cmdupgradexbps = "%s -uy xbps" % (xbps_path['install']) - rc, stdout, stderr = module.run_command(cmdupgradexbps, check_rc=False) - if rc != 0: - module.fail_json(msg='Could not upgrade xbps itself') - - -def upgrade(module, xbps_path): - """Returns true is full upgrade succeeds""" - cmdupgrade = "%s -uy" % (xbps_path['install']) - cmdneedupgrade = "%s -un" % (xbps_path['install']) - - rc, stdout, stderr = module.run_command(cmdneedupgrade, check_rc=False) - if rc == 0: - if(len(stdout.splitlines()) == 0): - module.exit_json(changed=False, msg='Nothing to upgrade') - elif module.check_mode: - module.exit_json(changed=True, msg='Would have performed upgrade') - else: - rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False) - if rc == 0: - module.exit_json(changed=True, msg='System upgraded') - elif rc == 16 and module.params['upgrade_xbps']: - upgrade_xbps(module, xbps_path) - # avoid loops by not trying self-upgrade again - module.params['upgrade_xbps'] = False - upgrade(module, xbps_path) - else: - module.fail_json(msg="Could not upgrade") - else: - module.fail_json(msg="Could not upgrade") - - -def remove_packages(module, xbps_path, packages): - """Returns true if package removal succeeds""" - changed_packages = [] - # Using a for loop in case of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - installed, updated = query_package(module, xbps_path, package) - if not installed: - continue - - cmd = "%s -y %s" % (xbps_path['remove'], package) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc != 0: - module.fail_json(msg="failed to remove %s" % (package)) - - changed_packages.append(package) - - if len(changed_packages) > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % - len(changed_packages), packages=changed_packages) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, xbps_path, state, packages): - """Returns true if package install succeeds.""" - toInstall = [] - for i, package in enumerate(packages): - """If the package is installed and state == present or state == latest - and is up-to-date then skip""" - installed, updated = query_package(module, xbps_path, package) - if installed and (state == 'present' or - (state == 'latest' and updated)): - continue - - toInstall.append(package) - - if len(toInstall) == 0: - module.exit_json(changed=False, msg="Nothing to Install") - - cmd = "%s -y %s" % (xbps_path['install'], " ".join(toInstall)) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc == 16 and module.params['upgrade_xbps']: - upgrade_xbps(module, xbps_path) - # avoid loops by not trying self-update again - module.params['upgrade_xbps'] = False - install_packages(module, xbps_path, state, packages) - elif rc != 0 and not (state == 'latest' and rc == 17): - module.fail_json(msg="failed to install %s" % (package)) - - module.exit_json(changed=True, msg="installed %s package(s)" - % (len(toInstall)), - packages=toInstall) - - -def check_packages(module, xbps_path, packages, state): - """Returns change status of command""" - would_be_changed = [] - for package in packages: - installed, updated = query_package(module, xbps_path, package) - if ((state in ["present", "latest"] and not installed) or - (state == "absent" and installed) or - (state == "latest" and not updated)): - would_be_changed.append(package) - if would_be_changed: - if state == "absent": - state = "removed" - module.exit_json(changed=True, msg="%s package(s) would be %s" % ( - len(would_be_changed), state), - packages=would_be_changed) - else: - module.exit_json(changed=False, msg="package(s) already %s" % state, - packages=[]) - - -def update_cache(module, xbps_path, upgrade_planned): - """Update package cache""" - if module.check_mode: - if upgrade_planned: - return - module.exit_json( - changed=True, msg='Would have updated the package cache' - ) - changed = update_package_db(module, xbps_path) - if not upgrade_planned: - module.exit_json(changed=changed, msg=( - 'Updated the package master lists' if changed - else 'Package list already up to date' - )) - - -def main(): - """Returns, calling appropriate command""" - - module = AnsibleModule( - argument_spec=dict( - name=dict(default=None, aliases=['pkg', 'package'], type='list', elements='str'), - state=dict(default='present', choices=['present', 'installed', - 'latest', 'absent', - 'removed']), - recurse=dict(default=False, type='bool'), - upgrade=dict(default=False, type='bool'), - update_cache=dict( - default=True, aliases=['update-cache'], type='bool', - deprecated_aliases=[dict(name='update-cache', version='5.0.0', collection_name='community.general')]), - upgrade_xbps=dict(default=True, type='bool') - ), - required_one_of=[['name', 'update_cache', 'upgrade']], - supports_check_mode=True) - - xbps_path = dict() - xbps_path['install'] = module.get_bin_path('xbps-install', True) - xbps_path['query'] = module.get_bin_path('xbps-query', True) - xbps_path['remove'] = module.get_bin_path('xbps-remove', True) - - if not os.path.exists(xbps_path['install']): - module.fail_json(msg="cannot find xbps, in path %s" - % (xbps_path['install'])) - - p = module.params - - # normalize the state parameter - if p['state'] in ['present', 'installed']: - p['state'] = 'present' - elif p['state'] in ['absent', 'removed']: - p['state'] = 'absent' - - if p['update_cache']: - update_cache(module, xbps_path, (p['name'] or p['upgrade'])) - - if p['upgrade']: - upgrade(module, xbps_path) - - if p['name']: - pkgs = p['name'] - - if module.check_mode: - check_packages(module, xbps_path, pkgs, p['state']) - - if p['state'] in ['present', 'latest']: - install_packages(module, xbps_path, p['state'], pkgs) - elif p['state'] == 'absent': - remove_packages(module, xbps_path, pkgs) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/packaging/os/yum_versionlock.py b/plugins/modules/packaging/os/yum_versionlock.py deleted file mode 100644 index 62110bf00a..0000000000 --- a/plugins/modules/packaging/os/yum_versionlock.py +++ /dev/null @@ -1,157 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Florian Paul Hoberg -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: yum_versionlock -version_added: 2.0.0 -short_description: Locks / unlocks a installed package(s) from being updated by yum package manager -description: - - This module adds installed packages to yum versionlock to prevent the package(s) from being updated. -options: - name: - description: - - Package name or a list of packages. - type: list - required: true - elements: str - state: - description: - - If state is C(present), package(s) will be added to yum versionlock list. - - If state is C(absent), package(s) will be removed from yum versionlock list. - choices: [ 'absent', 'present' ] - type: str - default: present -notes: - - Requires yum-plugin-versionlock package on the remote node. - - Supports C(check_mode). -requirements: -- yum -- yum-versionlock -author: - - Florian Paul Hoberg (@florianpaulhoberg) - - Amin Vakil (@aminvakil) -''' - -EXAMPLES = r''' -- name: Prevent Apache / httpd from being updated - community.general.yum_versionlock: - state: present - name: httpd - -- name: Prevent multiple packages from being updated - community.general.yum_versionlock: - state: present - name: - - httpd - - nginx - - haproxy - - curl - -- name: Remove lock from Apache / httpd to be updated again - community.general.yum_versionlock: - state: absent - package: httpd -''' - -RETURN = r''' -packages: - description: A list of package(s) in versionlock list. - returned: success - type: list - elements: str - sample: [ 'httpd' ] -state: - description: State of package(s). - returned: success - type: str - sample: present -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from fnmatch import fnmatch - - -class YumVersionLock: - def __init__(self, module): - self.module = module - self.params = module.params - self.yum_bin = module.get_bin_path('yum', required=True) - - def get_versionlock_packages(self): - """ Get an overview of all packages on yum versionlock """ - rc, out, err = self.module.run_command([self.yum_bin, "versionlock", "list"]) - if rc == 0: - return out - elif rc == 1 and 'o such command:' in err: - self.module.fail_json(msg="Error: Please install rpm package yum-plugin-versionlock : " + to_native(err) + to_native(out)) - self.module.fail_json(msg="Error: " + to_native(err) + to_native(out)) - - def ensure_state(self, packages, command): - """ Ensure packages state """ - rc, out, err = self.module.run_command([self.yum_bin, "-q", "versionlock", command] + packages) - if rc == 0: - return True - self.module.fail_json(msg="Error: " + to_native(err) + to_native(out)) - - -def main(): - """ start main program to add/remove a package to yum versionlock""" - module = AnsibleModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent']), - name=dict(required=True, type='list', elements='str'), - ), - supports_check_mode=True - ) - - state = module.params['state'] - packages = module.params['name'] - changed = False - - yum_v = YumVersionLock(module) - - # Get an overview of all packages that have a version lock - versionlock_packages = yum_v.get_versionlock_packages() - - # Ensure versionlock state of packages - packages_list = [] - if state in ('present'): - command = 'add' - for single_pkg in packages: - if not any(fnmatch(pkg.split(":", 1)[-1], single_pkg) for pkg in versionlock_packages.split()): - packages_list.append(single_pkg) - if packages_list: - if module.check_mode: - changed = True - else: - changed = yum_v.ensure_state(packages_list, command) - elif state in ('absent'): - command = 'delete' - for single_pkg in packages: - if any(fnmatch(pkg, single_pkg) for pkg in versionlock_packages.split()): - packages_list.append(single_pkg) - if packages_list: - if module.check_mode: - changed = True - else: - changed = yum_v.ensure_state(packages_list, command) - - module.exit_json( - changed=changed, - meta={ - "packages": packages, - "state": state - } - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/zypper.py b/plugins/modules/packaging/os/zypper.py deleted file mode 100644 index 2295b5a566..0000000000 --- a/plugins/modules/packaging/os/zypper.py +++ /dev/null @@ -1,569 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Patrick Callahan -# based on -# openbsd_pkg -# (c) 2013 -# Patrik Lundin -# -# yum -# (c) 2012, Red Hat, Inc -# Written by Seth Vidal -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: zypper -author: - - "Patrick Callahan (@dirtyharrycallahan)" - - "Alexander Gubin (@alxgu)" - - "Thomas O'Donnell (@andytom)" - - "Robin Roth (@robinro)" - - "Andrii Radyk (@AnderEnder)" -short_description: Manage packages on SUSE and openSUSE -description: - - Manage packages on SUSE and openSUSE using the zypper and rpm tools. - - Also supports transactional updates, by running zypper inside C(/sbin/transactional-update --continue --drop-if-no-change --quiet run). -options: - name: - description: - - Package name C(name) or package specifier or a list of either. - - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to - update the package within the version range given. - - You can also pass a url or a local path to a rpm file. - - When using state=latest, this can be '*', which updates all installed packages. - required: true - aliases: [ 'pkg' ] - type: list - elements: str - state: - description: - - C(present) will make sure the package is installed. - C(latest) will make sure the latest version of the package is installed. - C(absent) will make sure the specified package is not installed. - C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed. - - When using C(dist-upgrade), I(name) should be C('*'). - required: false - choices: [ present, latest, absent, dist-upgrade, installed, removed ] - default: "present" - type: str - type: - description: - - The type of package to be operated on. - required: false - choices: [ package, patch, pattern, product, srcpackage, application ] - default: "package" - type: str - extra_args_precommand: - required: false - description: - - Add additional global target options to C(zypper). - - Options should be supplied in a single line as if given in the command line. - type: str - disable_gpg_check: - description: - - Whether to disable to GPG signature checking of the package - signature being installed. Has an effect only if state is - I(present) or I(latest). - required: false - default: "no" - type: bool - disable_recommends: - description: - - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does - install recommended packages. - required: false - default: "yes" - type: bool - force: - description: - - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture. - required: false - default: "no" - type: bool - force_resolution: - description: - - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution). - required: false - default: "no" - type: bool - version_added: '0.2.0' - update_cache: - description: - - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode. - required: false - default: "no" - type: bool - aliases: [ "refresh" ] - oldpackage: - description: - - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a - version is specified as part of the package name. - required: false - default: "no" - type: bool - extra_args: - required: false - description: - - Add additional options to C(zypper) command. - - Options should be supplied in a single line as if given in the command line. - type: str - allow_vendor_change: - type: bool - required: false - default: false - description: - - Adds C(--allow_vendor_change) option to I(zypper) dist-upgrade command. - version_added: '0.2.0' - replacefiles: - type: bool - required: false - default: false - description: - - Adds C(--replacefiles) option to I(zypper) install/update command. - version_added: '0.2.0' -notes: - - When used with a `loop:` each package will be processed individually, - it is much more efficient to pass the list directly to the `name` option. -# informational: requirements for nodes -requirements: - - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0" - - python-xml - - rpm -''' - -EXAMPLES = ''' -- name: Install nmap - community.general.zypper: - name: nmap - state: present - -- name: Install apache2 with recommended packages - community.general.zypper: - name: apache2 - state: present - disable_recommends: no - -- name: Apply a given patch - community.general.zypper: - name: openSUSE-2016-128 - state: present - type: patch - -- name: Remove the nmap package - community.general.zypper: - name: nmap - state: absent - -- name: Install the nginx rpm from a remote repo - community.general.zypper: - name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm' - state: present - -- name: Install local rpm file - community.general.zypper: - name: /tmp/fancy-software.rpm - state: present - -- name: Update all packages - community.general.zypper: - name: '*' - state: latest - -- name: Apply all available patches - community.general.zypper: - name: '*' - state: latest - type: patch - -- name: Perform a dist-upgrade with additional arguments - community.general.zypper: - name: '*' - state: dist-upgrade - allow_vendor_change: true - extra_args: '--allow-arch-change' - -- name: Perform a installaion of nmap with the install option replacefiles - community.general.zypper: - name: 'nmap' - state: latest - replacefiles: true - -- name: Refresh repositories and update package openssl - community.general.zypper: - name: openssl - state: present - update_cache: yes - -- name: "Install specific version (possible comparisons: <, >, <=, >=, =)" - community.general.zypper: - name: 'docker>=1.10' - state: present - -- name: Wait 20 seconds to acquire the lock before failing - community.general.zypper: - name: mosh - state: present - environment: - ZYPP_LOCK_TIMEOUT: 20 -''' - -import os.path -import xml -import re -from xml.dom.minidom import parseString as parseXML -from ansible.module_utils.common.text.converters import to_native - -# import module snippets -from ansible.module_utils.basic import AnsibleModule - - -class Package: - def __init__(self, name, prefix, version): - self.name = name - self.prefix = prefix - self.version = version - self.shouldinstall = (prefix == '+') - - def __str__(self): - return self.prefix + self.name + self.version - - -def split_name_version(name): - """splits of the package name and desired version - - example formats: - - docker>=1.10 - - apache=2.4 - - Allowed version specifiers: <, >, <=, >=, = - Allowed version format: [0-9.-]* - - Also allows a prefix indicating remove "-", "~" or install "+" - """ - - prefix = '' - if name[0] in ['-', '~', '+']: - prefix = name[0] - name = name[1:] - if prefix == '~': - prefix = '-' - - version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$') - try: - reres = version_check.match(name) - name, version = reres.groups() - if version is None: - version = '' - return prefix, name, version - except Exception: - return prefix, name, '' - - -def get_want_state(names, remove=False): - packages = [] - urls = [] - for name in names: - if '://' in name or name.endswith('.rpm'): - urls.append(name) - else: - prefix, pname, version = split_name_version(name) - if prefix not in ['-', '+']: - if remove: - prefix = '-' - else: - prefix = '+' - packages.append(Package(pname, prefix, version)) - return packages, urls - - -def get_installed_state(m, packages): - "get installed state of packages" - - cmd = get_cmd(m, 'search') - cmd.extend(['--match-exact', '--details', '--installed-only']) - cmd.extend([p.name for p in packages]) - return parse_zypper_xml(m, cmd, fail_not_found=False)[0] - - -def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None): - rc, stdout, stderr = m.run_command(cmd, check_rc=False) - - try: - dom = parseXML(stdout) - except xml.parsers.expat.ExpatError as exc: - m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc), - rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) - - if rc == 104: - # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found) - if fail_not_found: - errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data - m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) - else: - return {}, rc, stdout, stderr - elif rc in [0, 106, 103]: - # zypper exit codes - # 0: success - # 106: signature verification failed - # 103: zypper was upgraded, run same command again - if packages is None: - firstrun = True - packages = {} - solvable_list = dom.getElementsByTagName('solvable') - for solvable in solvable_list: - name = solvable.getAttribute('name') - packages[name] = {} - packages[name]['version'] = solvable.getAttribute('edition') - packages[name]['oldversion'] = solvable.getAttribute('edition-old') - status = solvable.getAttribute('status') - packages[name]['installed'] = status == "installed" - packages[name]['group'] = solvable.parentNode.nodeName - if rc == 103 and firstrun: - # if this was the first run and it failed with 103 - # run zypper again with the same command to complete update - return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages) - - return packages, rc, stdout, stderr - m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) - - -def get_cmd(m, subcommand): - "puts together the basic zypper command arguments with those passed to the module" - is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade'] - is_refresh = subcommand == 'refresh' - cmd = [m.get_bin_path('zypper', required=True), '--quiet', '--non-interactive', '--xmlout'] - if transactional_updates(): - cmd = [m.get_bin_path('transactional-update', required=True), '--continue', '--drop-if-no-change', '--quiet', 'run'] + cmd - if m.params['extra_args_precommand']: - args_list = m.params['extra_args_precommand'].split() - cmd.extend(args_list) - # add global options before zypper command - if (is_install or is_refresh) and m.params['disable_gpg_check']: - cmd.append('--no-gpg-checks') - - if subcommand == 'search': - cmd.append('--disable-repositories') - - cmd.append(subcommand) - if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh: - cmd.extend(['--type', m.params['type']]) - if m.check_mode and subcommand != 'search': - cmd.append('--dry-run') - if is_install: - cmd.append('--auto-agree-with-licenses') - if m.params['disable_recommends']: - cmd.append('--no-recommends') - if m.params['force']: - cmd.append('--force') - if m.params['force_resolution']: - cmd.append('--force-resolution') - if m.params['oldpackage']: - cmd.append('--oldpackage') - if m.params['replacefiles']: - cmd.append('--replacefiles') - if subcommand == 'dist-upgrade' and m.params['allow_vendor_change']: - cmd.append('--allow-vendor-change') - if m.params['extra_args']: - args_list = m.params['extra_args'].split(' ') - cmd.extend(args_list) - - return cmd - - -def set_diff(m, retvals, result): - # TODO: if there is only one package, set before/after to version numbers - packages = {'installed': [], 'removed': [], 'upgraded': []} - if result: - for p in result: - group = result[p]['group'] - if group == 'to-upgrade': - versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')' - packages['upgraded'].append(p + versions) - elif group == 'to-install': - packages['installed'].append(p) - elif group == 'to-remove': - packages['removed'].append(p) - - output = '' - for state in packages: - if packages[state]: - output += state + ': ' + ', '.join(packages[state]) + '\n' - if 'diff' not in retvals: - retvals['diff'] = {} - if 'prepared' not in retvals['diff']: - retvals['diff']['prepared'] = output - else: - retvals['diff']['prepared'] += '\n' + output - - -def package_present(m, name, want_latest): - "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove" - retvals = {'rc': 0, 'stdout': '', 'stderr': ''} - packages, urls = get_want_state(name) - - # add oldpackage flag when a version is given to allow downgrades - if any(p.version for p in packages): - m.params['oldpackage'] = True - - if not want_latest: - # for state=present: filter out already installed packages - # if a version is given leave the package in to let zypper handle the version - # resolution - packageswithoutversion = [p for p in packages if not p.version] - prerun_state = get_installed_state(m, packageswithoutversion) - # generate lists of packages to install or remove - packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)] - - if not packages and not urls: - # nothing to install/remove and nothing to update - return None, retvals - - # zypper install also updates packages - cmd = get_cmd(m, 'install') - cmd.append('--') - cmd.extend(urls) - # pass packages to zypper - # allow for + or - prefixes in install/remove lists - # also add version specifier if given - # do this in one zypper run to allow for dependency-resolution - # for example "-exim postfix" runs without removing packages depending on mailserver - cmd.extend([str(p) for p in packages]) - - retvals['cmd'] = cmd - result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) - - return result, retvals - - -def package_update_all(m): - "run update or patch on all available packages" - - retvals = {'rc': 0, 'stdout': '', 'stderr': ''} - if m.params['type'] == 'patch': - cmdname = 'patch' - elif m.params['state'] == 'dist-upgrade': - cmdname = 'dist-upgrade' - else: - cmdname = 'update' - - cmd = get_cmd(m, cmdname) - retvals['cmd'] = cmd - result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) - return result, retvals - - -def package_absent(m, name): - "remove the packages in name" - retvals = {'rc': 0, 'stdout': '', 'stderr': ''} - # Get package state - packages, urls = get_want_state(name, remove=True) - if any(p.prefix == '+' for p in packages): - m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.") - if urls: - m.fail_json(msg="Can not remove via URL.") - if m.params['type'] == 'patch': - m.fail_json(msg="Can not remove patches.") - prerun_state = get_installed_state(m, packages) - packages = [p for p in packages if p.name in prerun_state] - - if not packages: - return None, retvals - - cmd = get_cmd(m, 'remove') - cmd.extend([p.name + p.version for p in packages]) - - retvals['cmd'] = cmd - result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) - return result, retvals - - -def repo_refresh(m): - "update the repositories" - retvals = {'rc': 0, 'stdout': '', 'stderr': ''} - - cmd = get_cmd(m, 'refresh') - - retvals['cmd'] = cmd - result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) - - return retvals - - -def transactional_updates(): - return os.path.exists('/var/lib/misc/transactional-update.state') - -# =========================================== -# Main control flow - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True, aliases=['pkg'], type='list', elements='str'), - state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']), - type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']), - extra_args_precommand=dict(required=False, default=None), - disable_gpg_check=dict(required=False, default=False, type='bool'), - disable_recommends=dict(required=False, default=True, type='bool'), - force=dict(required=False, default=False, type='bool'), - force_resolution=dict(required=False, default=False, type='bool'), - update_cache=dict(required=False, aliases=['refresh'], default=False, type='bool'), - oldpackage=dict(required=False, default=False, type='bool'), - extra_args=dict(required=False, default=None), - allow_vendor_change=dict(required=False, default=False, type='bool'), - replacefiles=dict(required=False, default=False, type='bool') - ), - supports_check_mode=True - ) - - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - - name = module.params['name'] - state = module.params['state'] - update_cache = module.params['update_cache'] - - # remove empty strings from package list - name = list(filter(None, name)) - - # Refresh repositories - if update_cache and not module.check_mode: - retvals = repo_refresh(module) - - if retvals['rc'] != 0: - module.fail_json(msg="Zypper refresh run failed.", **retvals) - - # Perform requested action - if name == ['*'] and state in ['latest', 'dist-upgrade']: - packages_changed, retvals = package_update_all(module) - elif name != ['*'] and state == 'dist-upgrade': - module.fail_json(msg="Can not dist-upgrade specific packages.") - else: - if state in ['absent', 'removed']: - packages_changed, retvals = package_absent(module, name) - elif state in ['installed', 'present', 'latest']: - packages_changed, retvals = package_present(module, name, state == 'latest') - - retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed) - - if module._diff: - set_diff(module, retvals, packages_changed) - - if retvals['rc'] != 0: - module.fail_json(msg="Zypper run failed.", **retvals) - - if not retvals['changed']: - del retvals['stdout'] - del retvals['stderr'] - - module.exit_json(name=name, state=state, update_cache=update_cache, **retvals) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/packaging/os/zypper_repository.py b/plugins/modules/packaging/os/zypper_repository.py deleted file mode 100644 index a29650a17e..0000000000 --- a/plugins/modules/packaging/os/zypper_repository.py +++ /dev/null @@ -1,466 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Matthias Vogelgesang -# (c) 2014, Justin Lecher -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: zypper_repository -author: "Matthias Vogelgesang (@matze)" -short_description: Add and remove Zypper repositories -description: - - Add or remove Zypper repositories on SUSE and openSUSE -options: - name: - description: - - A name for the repository. Not required when adding repofiles. - type: str - repo: - description: - - URI of the repository or .repo file. Required when state=present. - type: str - state: - description: - - A source string state. - choices: [ "absent", "present" ] - default: "present" - type: str - description: - description: - - A description of the repository - type: str - disable_gpg_check: - description: - - Whether to disable GPG signature checking of - all packages. Has an effect only if state is - I(present). - - Needs zypper version >= 1.6.2. - type: bool - default: no - autorefresh: - description: - - Enable autorefresh of the repository. - type: bool - default: yes - aliases: [ "refresh" ] - priority: - description: - - Set priority of repository. Packages will always be installed - from the repository with the smallest priority number. - - Needs zypper version >= 1.12.25. - type: int - overwrite_multiple: - description: - - Overwrite multiple repository entries, if repositories with both name and - URL already exist. - type: bool - default: no - auto_import_keys: - description: - - Automatically import the gpg signing key of the new or changed repository. - - Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent). - - Implies runrefresh. - - Only works with C(.repo) files if `name` is given explicitly. - type: bool - default: no - runrefresh: - description: - - Refresh the package list of the given repository. - - Can be used with repo=* to refresh all repositories. - type: bool - default: no - enabled: - description: - - Set repository to enabled (or disabled). - type: bool - default: yes - - -requirements: - - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0" - - python-xml -''' - -EXAMPLES = ''' -- name: Add NVIDIA repository for graphics drivers - community.general.zypper_repository: - name: nvidia-repo - repo: 'ftp://download.nvidia.com/opensuse/12.2' - state: present - -- name: Remove NVIDIA repository - community.general.zypper_repository: - name: nvidia-repo - repo: 'ftp://download.nvidia.com/opensuse/12.2' - state: absent - -- name: Add python development repository - community.general.zypper_repository: - repo: 'http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo' - -- name: Refresh all repos - community.general.zypper_repository: - repo: '*' - runrefresh: yes - -- name: Add a repo and add its gpg key - community.general.zypper_repository: - repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/' - auto_import_keys: yes - -- name: Force refresh of a repository - community.general.zypper_repository: - repo: 'http://my_internal_ci_repo/repo' - name: my_ci_repo - state: present - runrefresh: yes -''' - -import traceback - -XML_IMP_ERR = None -try: - from xml.dom.minidom import parseString as parseXML - HAS_XML = True -except ImportError: - XML_IMP_ERR = traceback.format_exc() - HAS_XML = False - -from distutils.version import LooseVersion - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.six.moves import configparser, StringIO -from io import open - -REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck'] - - -def _get_cmd(module, *args): - """Combines the non-interactive zypper command with arguments/subcommands""" - cmd = [module.get_bin_path('zypper', required=True), '--quiet', '--non-interactive'] - cmd.extend(args) - - return cmd - - -def _parse_repos(module): - """parses the output of zypper --xmlout repos and return a parse repo dictionary""" - cmd = _get_cmd(module, '--xmlout', 'repos') - - if not HAS_XML: - module.fail_json(msg=missing_required_lib("python-xml"), exception=XML_IMP_ERR) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - if rc == 0: - repos = [] - dom = parseXML(stdout) - repo_list = dom.getElementsByTagName('repo') - for repo in repo_list: - opts = {} - for o in REPO_OPTS: - opts[o] = repo.getAttribute(o) - opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data - # A repo can be uniquely identified by an alias + url - repos.append(opts) - return repos - # exit code 6 is ZYPPER_EXIT_NO_REPOS (no repositories defined) - elif rc == 6: - return [] - else: - module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr) - - -def _repo_changes(module, realrepo, repocmp): - "Check whether the 2 given repos have different settings." - for k in repocmp: - if repocmp[k] and k not in realrepo: - return True - - for k, v in realrepo.items(): - if k in repocmp and repocmp[k]: - valold = str(repocmp[k] or "") - valnew = v or "" - if k == "url": - if '$releasever' in valold or '$releasever' in valnew: - cmd = ['rpm', '-q', '--qf', '%{version}', '-f', '/etc/os-release'] - rc, stdout, stderr = module.run_command(cmd, check_rc=True) - valnew = valnew.replace('$releasever', stdout) - valold = valold.replace('$releasever', stdout) - if '$basearch' in valold or '$basearch' in valnew: - cmd = ['rpm', '-q', '--qf', '%{arch}', '-f', '/etc/os-release'] - rc, stdout, stderr = module.run_command(cmd, check_rc=True) - valnew = valnew.replace('$basearch', stdout) - valold = valold.replace('$basearch', stdout) - valold, valnew = valold.rstrip("/"), valnew.rstrip("/") - if valold != valnew: - return True - return False - - -def repo_exists(module, repodata, overwrite_multiple): - """Check whether the repository already exists. - - returns (exists, mod, old_repos) - exists: whether a matching (name, URL) repo exists - mod: whether there are changes compared to the existing repo - old_repos: list of matching repos - """ - existing_repos = _parse_repos(module) - - # look for repos that have matching alias or url to the one searched - repos = [] - for kw in ['alias', 'url']: - name = repodata[kw] - for oldr in existing_repos: - if repodata[kw] == oldr[kw] and oldr not in repos: - repos.append(oldr) - - if len(repos) == 0: - # Repo does not exist yet - return (False, False, None) - elif len(repos) == 1: - # Found an existing repo, look for changes - has_changes = _repo_changes(module, repos[0], repodata) - return (True, has_changes, repos) - elif len(repos) >= 2: - if overwrite_multiple: - # Found two repos and want to overwrite_multiple - return (True, True, repos) - else: - errmsg = 'More than one repo matched "%s": "%s".' % (name, repos) - errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten' - module.fail_json(msg=errmsg) - - -def addmodify_repo(module, repodata, old_repos, zypper_version, warnings): - "Adds the repo, removes old repos before, that would conflict." - repo = repodata['url'] - cmd = _get_cmd(module, 'addrepo', '--check') - if repodata['name']: - cmd.extend(['--name', repodata['name']]) - - # priority on addrepo available since 1.12.25 - # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336 - if repodata['priority']: - if zypper_version >= LooseVersion('1.12.25'): - cmd.extend(['--priority', str(repodata['priority'])]) - else: - warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.") - - if repodata['enabled'] == '0': - cmd.append('--disable') - - # gpgcheck available since 1.6.2 - # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449 - # the default changed in the past, so don't assume a default here and show warning for old zypper versions - if zypper_version >= LooseVersion('1.6.2'): - if repodata['gpgcheck'] == '1': - cmd.append('--gpgcheck') - else: - cmd.append('--no-gpgcheck') - else: - warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.") - - if repodata['autorefresh'] == '1': - cmd.append('--refresh') - - cmd.append(repo) - - if not repo.endswith('.repo'): - cmd.append(repodata['alias']) - - if old_repos is not None: - for oldrepo in old_repos: - remove_repo(module, oldrepo['url']) - - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - return rc, stdout, stderr - - -def remove_repo(module, repo): - "Removes the repo." - cmd = _get_cmd(module, 'removerepo', repo) - - rc, stdout, stderr = module.run_command(cmd, check_rc=True) - return rc, stdout, stderr - - -def get_zypper_version(module): - rc, stdout, stderr = module.run_command([module.get_bin_path('zypper', required=True), '--version']) - if rc != 0 or not stdout.startswith('zypper '): - return LooseVersion('1.0') - return LooseVersion(stdout.split()[1]) - - -def runrefreshrepo(module, auto_import_keys=False, shortname=None): - "Forces zypper to refresh repo metadata." - if auto_import_keys: - cmd = _get_cmd(module, '--gpg-auto-import-keys', 'refresh', '--force') - else: - cmd = _get_cmd(module, 'refresh', '--force') - if shortname is not None: - cmd.extend(['-r', shortname]) - - rc, stdout, stderr = module.run_command(cmd, check_rc=True) - return rc, stdout, stderr - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=False), - repo=dict(required=False), - state=dict(choices=['present', 'absent'], default='present'), - runrefresh=dict(required=False, default=False, type='bool'), - description=dict(required=False), - disable_gpg_check=dict(required=False, default=False, type='bool'), - autorefresh=dict(required=False, default=True, type='bool', aliases=['refresh']), - priority=dict(required=False, type='int'), - enabled=dict(required=False, default=True, type='bool'), - overwrite_multiple=dict(required=False, default=False, type='bool'), - auto_import_keys=dict(required=False, default=False, type='bool'), - ), - supports_check_mode=False, - required_one_of=[['state', 'runrefresh']], - ) - - repo = module.params['repo'] - alias = module.params['name'] - state = module.params['state'] - overwrite_multiple = module.params['overwrite_multiple'] - auto_import_keys = module.params['auto_import_keys'] - runrefresh = module.params['runrefresh'] - - zypper_version = get_zypper_version(module) - warnings = [] # collect warning messages for final output - - repodata = { - 'url': repo, - 'alias': alias, - 'name': module.params['description'], - 'priority': module.params['priority'], - } - # rewrite bools in the language that zypper lr -x provides for easier comparison - if module.params['enabled']: - repodata['enabled'] = '1' - else: - repodata['enabled'] = '0' - if module.params['disable_gpg_check']: - repodata['gpgcheck'] = '0' - else: - repodata['gpgcheck'] = '1' - if module.params['autorefresh']: - repodata['autorefresh'] = '1' - else: - repodata['autorefresh'] = '0' - - def exit_unchanged(): - module.exit_json(changed=False, repodata=repodata, state=state) - - # Check run-time module parameters - if repo == '*' or alias == '*': - if runrefresh: - runrefreshrepo(module, auto_import_keys) - module.exit_json(changed=False, runrefresh=True) - else: - module.fail_json(msg='repo=* can only be used with the runrefresh option.') - - if state == 'present' and not repo: - module.fail_json(msg='Module option state=present requires repo') - if state == 'absent' and not repo and not alias: - module.fail_json(msg='Alias or repo parameter required when state=absent') - - if repo and repo.endswith('.repo'): - if alias: - module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files') - else: - if not alias and state == "present": - module.fail_json(msg='Name required when adding non-repo files.') - - # Download / Open and parse .repo file to ensure idempotency - if repo and repo.endswith('.repo'): - if repo.startswith(('http://', 'https://')): - response, info = fetch_url(module=module, url=repo, force=True) - if not response or info['status'] != 200: - module.fail_json(msg='Error downloading .repo file from provided URL') - repofile_text = to_text(response.read(), errors='surrogate_or_strict') - else: - try: - with open(repo, encoding='utf-8') as file: - repofile_text = file.read() - except IOError: - module.fail_json(msg='Error opening .repo file from provided path') - - repofile = configparser.ConfigParser() - try: - repofile.readfp(StringIO(repofile_text)) - except configparser.Error: - module.fail_json(msg='Invalid format, .repo file could not be parsed') - - # No support for .repo file with zero or more than one repository - if len(repofile.sections()) != 1: - err = "Invalid format, .repo file contains %s repositories, expected 1" % len(repofile.sections()) - module.fail_json(msg=err) - - section = repofile.sections()[0] - repofile_items = dict(repofile.items(section)) - # Only proceed if at least baseurl is available - if 'baseurl' not in repofile_items: - module.fail_json(msg='No baseurl found in .repo file') - - # Set alias (name) and url based on values from .repo file - alias = section - repodata['alias'] = section - repodata['url'] = repofile_items['baseurl'] - - # If gpgkey is part of the .repo file, auto import key - if 'gpgkey' in repofile_items: - auto_import_keys = True - - # Map additional values, if available - if 'name' in repofile_items: - repodata['name'] = repofile_items['name'] - if 'enabled' in repofile_items: - repodata['enabled'] = repofile_items['enabled'] - if 'autorefresh' in repofile_items: - repodata['autorefresh'] = repofile_items['autorefresh'] - if 'gpgcheck' in repofile_items: - repodata['gpgcheck'] = repofile_items['gpgcheck'] - - exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple) - - if alias: - shortname = alias - else: - shortname = repo - - if state == 'present': - if exists and not mod: - if runrefresh: - runrefreshrepo(module, auto_import_keys, shortname) - exit_unchanged() - rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings) - if rc == 0 and (runrefresh or auto_import_keys): - runrefreshrepo(module, auto_import_keys, shortname) - elif state == 'absent': - if not exists: - exit_unchanged() - rc, stdout, stderr = remove_repo(module, shortname) - - if rc == 0: - module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings) - else: - module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packet_device.py b/plugins/modules/packet_device.py deleted file mode 120000 index 572b6386e4..0000000000 --- a/plugins/modules/packet_device.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/packet/packet_device.py \ No newline at end of file diff --git a/plugins/modules/packet_device.py b/plugins/modules/packet_device.py new file mode 100644 index 0000000000..575d377b56 --- /dev/null +++ b/plugins/modules/packet_device.py @@ -0,0 +1,671 @@ +#!/usr/bin/python +# Copyright (c) 2016, Tomas Karasek +# Copyright (c) 2016, Matt Baldwin +# Copyright (c) 2016, Thibaud Morel l'Horset +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: packet_device + +short_description: Manage a bare metal server in the Packet Host + +description: + - Manage a bare metal server in the Packet Host (a "device" in the API terms). + - When the machine is created it can optionally wait for public IP address, or for active state. + - This module has a dependency on packet >= 1.0. + - API is documented at U(https://www.packet.net/developers/api/devices). +author: + - Tomas Karasek (@t0mk) + - Matt Baldwin (@baldwinSPC) + - Thibaud Morel l'Horset (@teebes) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + auth_token: + description: + - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). + type: str + + count: + description: + - The number of devices to create. Count number can be included in hostname using the C(%d) string formatter. + default: 1 + type: int + + count_offset: + description: + - From which number to start the count. + default: 1 + type: int + + device_ids: + description: + - List of device IDs on which to operate. + type: list + elements: str + + tags: + description: + - List of device tags. + - Currently implemented only for device creation. + type: list + elements: str + version_added: '0.2.0' + + facility: + description: + - Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/). + type: str + + features: + description: + - Dict with "features" for device creation. See Packet API docs for details. + type: dict + + hostnames: + description: + - A hostname of a device, or a list of hostnames. + - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from O(count). + - If only one hostname, it might be expanded to list if O(count)>1. + aliases: [name] + type: list + elements: str + + locked: + description: + - Whether to lock a created device. + default: false + aliases: [lock] + type: bool + + operating_system: + description: + - OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/). + type: str + + plan: + description: + - Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/). + type: str + + project_id: + description: + - ID of project of the device. + required: true + type: str + + state: + description: + - Desired state of the device. + - If set to V(present) (the default), the module call returns immediately after the device-creating HTTP request successfully + returns. + - If set to V(active), the module call blocks until all the specified devices are in state active due to the Packet + API, or until O(wait_timeout). + choices: [present, absent, active, inactive, rebooted] + default: present + type: str + + user_data: + description: + - Userdata blob made available to the machine. + type: str + + wait_for_public_IPv: + description: + - Whether to wait for the instance to be assigned a public IPv4/IPv6 address. + - If set to V(4), it waits until IPv4 is assigned to the instance. + - If set to V(6), it waits until public IPv6 is assigned to the instance. + choices: [4, 6] + type: int + + wait_timeout: + description: + - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the V(active) state. + - If O(wait_for_public_IPv) is set and O(state=active), the module waits for both events consequently, applying the + timeout twice. + default: 900 + type: int + + ipxe_script_url: + description: + - URL of custom iPXE script for provisioning. + - More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe). + type: str + default: '' + + always_pxe: + description: + - Persist PXE as the first boot option. + - Normally, the PXE process happens only on the first boot. Set this arg to have your device continuously boot to iPXE. + default: false + type: bool + + +requirements: + - "packet-python >= 1.35" +""" + +EXAMPLES = r""" +# All the examples assume that you have your Packet API token in environment variable PACKET_API_TOKEN. +# You can also pass it to the auth_token parameter of the module instead. + +# Creating devices + +- name: Create 1 device + hosts: localhost + tasks: + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + tags: ci-xyz + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + +# Create the same device and wait until it is in state "active", (when it is +# ready for other API operations). Fail if the device is not "active" in +# 10 minutes. + +- name: Create device and wait up to 10 minutes for active state + hosts: localhost + tasks: + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + state: active + wait_timeout: 600 + +- name: Create 3 ubuntu devices called server-01, server-02 and server-03 + hosts: localhost + tasks: + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: server-%02d + count: 3 + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + +- name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH + hosts: localhost + tasks: + - name: Create 3 devices and register their facts + community.general.packet_device: + hostnames: [coreos-one, coreos-two, coreos-three] + operating_system: coreos_stable + plan: baremetal_0 + facility: ewr1 + locked: true + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + wait_for_public_IPv: 4 + user_data: | + #cloud-config + ssh_authorized_keys: + - {{ lookup('file', 'my_packet_sshkey') }} + coreos: + etcd: + discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3 + addr: $private_ipv4:4001 + peer-addr: $private_ipv4:7001 + fleet: + public-ip: $private_ipv4 + units: + - name: etcd.service + command: start + - name: fleet.service + command: start + register: newhosts + + - name: Wait for ssh + ansible.builtin.wait_for: + delay: 1 + host: "{{ item.public_ipv4 }}" + port: 22 + state: started + timeout: 500 + with_items: "{{ newhosts.devices }}" + + +# Other states of devices + +- name: Remove 3 devices by uuid + hosts: localhost + tasks: + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + state: absent + device_ids: + - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8 + - 2eb4faf8-a638-4ac7-8f47-86fe514c3043 + - 6bb4faf8-a638-4ac7-8f47-86fe514c301f +""" + +RETURN = r""" +devices: + description: Information about each device that was processed. + type: list + sample: + - "hostname": "my-server.com" + "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7" + "public_ipv4": "147.229.15.12" + "private-ipv4": "10.0.15.12" + "tags": [] + "locked": false + "state": "provisioning" + "public_ipv6": "2604:1380:2:5200::3" + returned: success +""" + + +import os +import re +import time +import uuid +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +HAS_PACKET_SDK = True +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + + +NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]') +HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE) +MAX_DEVICES = 100 + +PACKET_DEVICE_STATES = ( + 'queued', + 'provisioning', + 'failed', + 'powering_on', + 'active', + 'powering_off', + 'inactive', + 'rebooting', +) + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + + +ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present'] + + +def serialize_device(device): + """ + Standard representation for a device as returned by various tasks:: + + { + 'id': 'device_id' + 'hostname': 'device_hostname', + 'tags': [], + 'locked': false, + 'state': 'provisioning', + 'ip_addresses': [ + { + "address": "147.75.194.227", + "address_family": 4, + "public": true + }, + { + "address": "2604:1380:2:5200::3", + "address_family": 6, + "public": true + }, + { + "address": "10.100.11.129", + "address_family": 4, + "public": false + } + ], + "private_ipv4": "10.100.11.129", + "public_ipv4": "147.75.194.227", + "public_ipv6": "2604:1380:2:5200::3", + } + + """ + device_data = {} + device_data['id'] = device.id + device_data['hostname'] = device.hostname + device_data['tags'] = device.tags + device_data['locked'] = device.locked + device_data['state'] = device.state + device_data['ip_addresses'] = [ + { + 'address': addr_data['address'], + 'address_family': addr_data['address_family'], + 'public': addr_data['public'], + } + for addr_data in device.ip_addresses + ] + # Also include each IPs as a key for easier lookup in roles. + # Key names: + # - public_ipv4 + # - public_ipv6 + # - private_ipv4 + # - private_ipv6 (if there is one) + for ipdata in device_data['ip_addresses']: + if ipdata['public']: + if ipdata['address_family'] == 6: + device_data['public_ipv6'] = ipdata['address'] + elif ipdata['address_family'] == 4: + device_data['public_ipv4'] = ipdata['address'] + elif not ipdata['public']: + if ipdata['address_family'] == 6: + # Packet doesn't give public ipv6 yet, but maybe one + # day they will + device_data['private_ipv6'] = ipdata['address'] + elif ipdata['address_family'] == 4: + device_data['private_ipv4'] = ipdata['address'] + return device_data + + +def is_valid_hostname(hostname): + return re.match(HOSTNAME_RE, hostname) is not None + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def listify_string_name_or_id(s): + if ',' in s: + return s.split(',') + else: + return [s] + + +def get_hostname_list(module): + # hostname is a list-typed param, so I guess it should return list + # (and it does, in Ansible 2.2.1) but in order to be defensive, + # I keep here the code to convert an eventual string to list + hostnames = module.params.get('hostnames') + count = module.params.get('count') + count_offset = module.params.get('count_offset') + if isinstance(hostnames, str): + hostnames = listify_string_name_or_id(hostnames) + if not isinstance(hostnames, list): + raise Exception("name %s is not convertible to list" % hostnames) + + # at this point, hostnames is a list + hostnames = [h.strip() for h in hostnames] + + if len(hostnames) > 1 and count > 1: + _msg = ("If you set count>1, you should only specify one hostname " + "with the %d formatter, not a list of hostnames.") + raise Exception(_msg) + + if len(hostnames) == 1 and count > 0: + hostname_spec = hostnames[0] + count_range = range(count_offset, count_offset + count) + if re.search(r"%\d{0,2}d", hostname_spec): + hostnames = [hostname_spec % i for i in count_range] + elif count > 1: + hostname_spec = '%s%%02d' % hostname_spec + hostnames = [hostname_spec % i for i in count_range] + + for hn in hostnames: + if not is_valid_hostname(hn): + raise Exception("Hostname '%s' does not seem to be valid" % hn) + + if len(hostnames) > MAX_DEVICES: + raise Exception("You specified too many hostnames, max is %d" % + MAX_DEVICES) + return hostnames + + +def get_device_id_list(module): + device_ids = module.params.get('device_ids') + + if isinstance(device_ids, str): + device_ids = listify_string_name_or_id(device_ids) + + device_ids = [di.strip() for di in device_ids] + + for di in device_ids: + if not is_valid_uuid(di): + raise Exception("Device ID '%s' does not seem to be valid" % di) + + if len(device_ids) > MAX_DEVICES: + raise Exception("You specified too many devices, max is %d" % + MAX_DEVICES) + return device_ids + + +def create_single_device(module, packet_conn, hostname): + + for param in ('hostnames', 'operating_system', 'plan'): + if not module.params.get(param): + raise Exception("%s parameter is required for new device." + % param) + project_id = module.params.get('project_id') + plan = module.params.get('plan') + tags = module.params.get('tags') + user_data = module.params.get('user_data') + facility = module.params.get('facility') + operating_system = module.params.get('operating_system') + locked = module.params.get('locked') + ipxe_script_url = module.params.get('ipxe_script_url') + always_pxe = module.params.get('always_pxe') + if operating_system != 'custom_ipxe': + for param in ('ipxe_script_url', 'always_pxe'): + if module.params.get(param): + raise Exception('%s parameter is not valid for non custom_ipxe operating_system.' % param) + + device = packet_conn.create_device( + project_id=project_id, + hostname=hostname, + tags=tags, + plan=plan, + facility=facility, + operating_system=operating_system, + userdata=user_data, + locked=locked, + ipxe_script_url=ipxe_script_url, + always_pxe=always_pxe) + return device + + +def refresh_device_list(module, packet_conn, devices): + device_ids = [d.id for d in devices] + new_device_list = get_existing_devices(module, packet_conn) + return [d for d in new_device_list if d.id in device_ids] + + +def wait_for_devices_active(module, packet_conn, watched_devices): + wait_timeout = module.params.get('wait_timeout') + wait_timeout = time.time() + wait_timeout + refreshed = watched_devices + while wait_timeout > time.time(): + refreshed = refresh_device_list(module, packet_conn, watched_devices) + if all(d.state == 'active' for d in refreshed): + return refreshed + time.sleep(5) + raise Exception("Waiting for state \"active\" timed out for devices: %s" + % [d.hostname for d in refreshed if d.state != "active"]) + + +def wait_for_public_IPv(module, packet_conn, created_devices): + + def has_public_ip(addr_list, ip_v): + return any(a['public'] and a['address_family'] == ip_v and a['address'] for a in addr_list) + + def all_have_public_ip(ds, ip_v): + return all(has_public_ip(d.ip_addresses, ip_v) for d in ds) + + address_family = module.params.get('wait_for_public_IPv') + + wait_timeout = module.params.get('wait_timeout') + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + refreshed = refresh_device_list(module, packet_conn, created_devices) + if all_have_public_ip(refreshed, address_family): + return refreshed + time.sleep(5) + + raise Exception("Waiting for IPv%d address timed out. Hostnames: %s" + % (address_family, [d.hostname for d in created_devices])) + + +def get_existing_devices(module, packet_conn): + project_id = module.params.get('project_id') + return packet_conn.list_devices( + project_id, params={ + 'per_page': MAX_DEVICES}) + + +def get_specified_device_identifiers(module): + if module.params.get('device_ids'): + device_id_list = get_device_id_list(module) + return {'ids': device_id_list, 'hostnames': []} + elif module.params.get('hostnames'): + hostname_list = get_hostname_list(module) + return {'hostnames': hostname_list, 'ids': []} + + +def act_on_devices(module, packet_conn, target_state): + specified_identifiers = get_specified_device_identifiers(module) + existing_devices = get_existing_devices(module, packet_conn) + changed = False + create_hostnames = [] + if target_state in ['present', 'active', 'rebooted']: + # states where we might create non-existing specified devices + existing_devices_names = [ed.hostname for ed in existing_devices] + create_hostnames = [hn for hn in specified_identifiers['hostnames'] + if hn not in existing_devices_names] + + process_devices = [d for d in existing_devices + if (d.id in specified_identifiers['ids']) or + (d.hostname in specified_identifiers['hostnames'])] + + if target_state != 'present': + _absent_state_map = {} + for s in PACKET_DEVICE_STATES: + _absent_state_map[s] = packet.Device.delete + + state_map = { + 'absent': _absent_state_map, + 'active': {'inactive': packet.Device.power_on, + 'provisioning': None, 'rebooting': None + }, + 'inactive': {'active': packet.Device.power_off}, + 'rebooted': {'active': packet.Device.reboot, + 'inactive': packet.Device.power_on, + 'provisioning': None, 'rebooting': None + }, + } + + # First do non-creation actions, it might be faster + for d in process_devices: + if d.state == target_state: + continue + if d.state in state_map[target_state]: + api_operation = state_map[target_state].get(d.state) + if api_operation is not None: + api_operation(d) + changed = True + else: + _msg = ( + "I don't know how to process existing device %s from state %s " + "to state %s" % + (d.hostname, d.state, target_state)) + raise Exception(_msg) + + # At last create missing devices + created_devices = [] + if create_hostnames: + created_devices = [create_single_device(module, packet_conn, n) + for n in create_hostnames] + if module.params.get('wait_for_public_IPv'): + created_devices = wait_for_public_IPv( + module, packet_conn, created_devices) + changed = True + + processed_devices = created_devices + process_devices + if target_state == 'active': + processed_devices = wait_for_devices_active( + module, packet_conn, processed_devices) + + return { + 'changed': changed, + 'devices': [serialize_device(d) for d in processed_devices] + } + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), + no_log=True), + count=dict(type='int', default=1), + count_offset=dict(type='int', default=1), + device_ids=dict(type='list', elements='str'), + facility=dict(), + features=dict(type='dict'), + hostnames=dict(type='list', elements='str', aliases=['name']), + tags=dict(type='list', elements='str'), + locked=dict(type='bool', default=False, aliases=['lock']), + operating_system=dict(), + plan=dict(), + project_id=dict(required=True), + state=dict(choices=ALLOWED_STATES, default='present'), + user_data=dict(), + wait_for_public_IPv=dict(type='int', choices=[4, 6]), + wait_timeout=dict(type='int', default=900), + ipxe_script_url=dict(default=''), + always_pxe=dict(type='bool', default=False), + ), + required_one_of=[('device_ids', 'hostnames',)], + mutually_exclusive=[ + ('hostnames', 'device_ids'), + ('count', 'device_ids'), + ('count_offset', 'device_ids'), + ] + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable %s, " + "the auth_token parameter is required" % + PACKET_API_TOKEN_ENV_VAR) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + try: + module.exit_json(**act_on_devices(module, packet_conn, state)) + except Exception as e: + module.fail_json(msg='failed to set device state %s, error: %s' % + (state, to_native(e)), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packet_ip_subnet.py b/plugins/modules/packet_ip_subnet.py deleted file mode 120000 index 1d1288f1f3..0000000000 --- a/plugins/modules/packet_ip_subnet.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/packet/packet_ip_subnet.py \ No newline at end of file diff --git a/plugins/modules/packet_ip_subnet.py b/plugins/modules/packet_ip_subnet.py new file mode 100644 index 0000000000..e3b0204158 --- /dev/null +++ b/plugins/modules/packet_ip_subnet.py @@ -0,0 +1,327 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Nurfet Becirevic +# Copyright (c) 2017, Tomas Karasek +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: packet_ip_subnet + +short_description: Assign IP subnet to a bare metal server + +description: + - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host. + - IPv4 subnets must come from already reserved block. + - IPv6 subnets must come from publicly routable /56 block from your project. + - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation. +version_added: '0.2.0' + +author: + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + auth_token: + description: + - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). + type: str + + hostname: + description: + - A hostname of a device to/from which to assign/remove a subnet. + required: false + type: str + + device_id: + description: + - UUID of a device to/from which to assign/remove a subnet. + required: false + type: str + + project_id: + description: + - UUID of a project of the device to/from which to assign/remove a subnet. + type: str + + device_count: + description: + - The number of devices to retrieve from the project. The max allowed value is 1000. + - See U(https://www.packet.com/developers/api/#retrieve-all-devices-of-a-project) for more info. + default: 100 + type: int + + cidr: + description: + - IPv4 or IPv6 subnet which you want to manage. It must come from a reserved block for your project in the Packet Host. + aliases: [name] + type: str + required: true + + state: + description: + - Desired state of the IP subnet on the specified device. + - With O(state=present), you must specify either O(hostname) or O(device_id). Subnet with given CIDR is then assigned + to the specified device. + - With O(state=absent), you can specify either O(hostname) or O(device_id). The subnet is then removed from specified + devices. + - If you leave both O(hostname) and O(device_id) empty, the subnet is then removed from any device it is assigned to. + choices: ['present', 'absent'] + default: 'present' + type: str + +requirements: + - "packet-python >= 1.35" +""" + +EXAMPLES = r""" +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass it to the auth_token parameter of the module instead. + +- name: Create 1 device and assign an arbitrary public IPv4 subnet to it + hosts: localhost + tasks: + + - packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + state: active + +# Pick an IPv4 address from a block allocated to your project. + + - community.general.packet_ip_subnet: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostname: myserver + cidr: "147.75.201.78/32" + +# Release IP address 147.75.201.78 + +- name: Unassign IP address from any device in your project + hosts: localhost + tasks: + - community.general.packet_ip_subnet: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + cidr: "147.75.201.78/32" + state: absent +""" + +RETURN = r""" +device_id: + type: str + description: UUID of the device associated with the specified IP address. + returned: success + +subnet: + description: Dict with data about the handled IP subnet. + type: dict + sample: + address: 147.75.90.241 + address_family: 4 + assigned_to: {href: /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0} + cidr: 31 + created_at: '2017-08-07T15:15:30Z' + enabled: true + gateway: 147.75.90.240 + href: /ips/31eda960-0a16-4c0f-b196-f3dc4928529f + id: 1eda960-0a16-4c0f-b196-f3dc4928529f + manageable: true + management: true + netmask: 255.255.255.254 + network: 147.75.90.240 + public: true + returned: success +""" + + +import uuid +import re + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.common.text.converters import to_native + +HAS_PACKET_SDK = True + +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + + +NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]') +HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE) +PROJECT_MAX_DEVICES = 100 + + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + + +ALLOWED_STATES = ['absent', 'present'] + + +def is_valid_hostname(hostname): + return re.match(HOSTNAME_RE, hostname) is not None + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def get_existing_devices(module, packet_conn): + project_id = module.params.get('project_id') + if not is_valid_uuid(project_id): + raise Exception("Project ID {0} does not seem to be valid".format(project_id)) + + per_page = module.params.get('device_count') + return packet_conn.list_devices( + project_id, params={'per_page': per_page}) + + +def get_specified_device_identifiers(module): + if module.params.get('device_id'): + _d_id = module.params.get('device_id') + if not is_valid_uuid(_d_id): + raise Exception("Device ID '{0}' does not seem to be valid".format(_d_id)) + return {'device_id': _d_id, 'hostname': None} + elif module.params.get('hostname'): + _hn = module.params.get('hostname') + if not is_valid_hostname(_hn): + raise Exception("Hostname '{0}' does not seem to be valid".format(_hn)) + return {'hostname': _hn, 'device_id': None} + else: + return {'hostname': None, 'device_id': None} + + +def parse_subnet_cidr(cidr): + if "/" not in cidr: + raise Exception("CIDR expression in wrong format, must be address/prefix_len") + addr, prefixlen = cidr.split("/") + try: + prefixlen = int(prefixlen) + except ValueError: + raise Exception("Wrong prefix length in CIDR expression {0}".format(cidr)) + return addr, prefixlen + + +def act_on_assignment(target_state, module, packet_conn): + return_dict = {'changed': False} + specified_cidr = module.params.get("cidr") + address, prefixlen = parse_subnet_cidr(specified_cidr) + + specified_identifier = get_specified_device_identifiers(module) + + if module.check_mode: + return return_dict + + if (specified_identifier['hostname'] is None) and ( + specified_identifier['device_id'] is None): + if target_state == 'absent': + # The special case to release the IP from any assignment + for d in get_existing_devices(module, packet_conn): + for ia in d.ip_addresses: + if address == ia['address'] and prefixlen == ia['cidr']: + packet_conn.call_api(ia['href'], "DELETE") + return_dict['changed'] = True + return_dict['subnet'] = ia + return_dict['device_id'] = d.id + return return_dict + raise Exception("If you assign an address, you must specify either " + "target device ID or target unique hostname.") + + if specified_identifier['device_id'] is not None: + device = packet_conn.get_device(specified_identifier['device_id']) + else: + all_devices = get_existing_devices(module, packet_conn) + hn = specified_identifier['hostname'] + matching_devices = [d for d in all_devices if d.hostname == hn] + if len(matching_devices) > 1: + raise Exception("There are more than one devices matching given hostname {0}".format(hn)) + if len(matching_devices) == 0: + raise Exception("There is no device matching given hostname {0}".format(hn)) + device = matching_devices[0] + + return_dict['device_id'] = device.id + assignment_dicts = [i for i in device.ip_addresses + if i['address'] == address and i['cidr'] == prefixlen] + if len(assignment_dicts) > 1: + raise Exception("IP address {0} is assigned more than once for device {1}".format( + specified_cidr, device.hostname)) + + if target_state == "absent": + if len(assignment_dicts) == 1: + packet_conn.call_api(assignment_dicts[0]['href'], "DELETE") + return_dict['subnet'] = assignment_dicts[0] + return_dict['changed'] = True + elif target_state == "present": + if len(assignment_dicts) == 0: + new_assignment = packet_conn.call_api( + "devices/{0}/ips".format(device.id), "POST", {"address": "{0}".format(specified_cidr)}) + return_dict['changed'] = True + return_dict['subnet'] = new_assignment + return return_dict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', + fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), + no_log=True + ), + device_id=dict(type='str'), + hostname=dict(type='str'), + project_id=dict(type='str'), + device_count=dict(type='int', default=PROJECT_MAX_DEVICES), + cidr=dict(type='str', required=True, aliases=['name']), + state=dict(choices=ALLOWED_STATES, default='present'), + ), + supports_check_mode=True, + mutually_exclusive=[('hostname', 'device_id')], + required_one_of=[['hostname', 'device_id', 'project_id']], + required_by=dict( + hostname=('project_id',), + ), + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable {0}, " + "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + try: + module.exit_json(**act_on_assignment(state, module, packet_conn)) + except Exception as e: + module.fail_json( + msg="failed to set IP subnet to state {0}, error: {1}".format(state, to_native(e))) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packet_project.py b/plugins/modules/packet_project.py deleted file mode 120000 index 2a5fdd9d6b..0000000000 --- a/plugins/modules/packet_project.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/packet/packet_project.py \ No newline at end of file diff --git a/plugins/modules/packet_project.py b/plugins/modules/packet_project.py new file mode 100644 index 0000000000..be69c3b5b8 --- /dev/null +++ b/plugins/modules/packet_project.py @@ -0,0 +1,242 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Nurfet Becirevic +# Copyright (c) 2019, Tomas Karasek +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: packet_project + +short_description: Create/delete a project in Packet host + +description: + - Create/delete a project in Packet host. + - API is documented at U(https://www.packet.com/developers/api/#projects). +version_added: '0.2.0' + +author: + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + type: str + + payment_method: + description: + - Payment method is name of one of the payment methods available to your user. + - When blank, the API assumes the default payment method. + type: str + + auth_token: + description: + - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). + type: str + + name: + description: + - Name for/of the project. + type: str + + org_id: + description: + - UUID of the organization to create a project for. + - When blank, the API assumes the default organization. + type: str + + id: + description: + - UUID of the project which you want to remove. + type: str + + custom_data: + description: + - Custom data about the project to create. + type: str + +requirements: + - "packet-python >= 1.40" +""" + +EXAMPLES = r""" +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass the api token in module param auth_token. + +- name: Create new project + hosts: localhost + tasks: + community.general.packet_project: + name: "new project" + +- name: Create new project within non-default organization + hosts: localhost + tasks: + community.general.packet_project: + name: "my org project" + org_id: a4cc87f9-e00f-48c2-9460-74aa60beb6b0 + +- name: Remove project by id + hosts: localhost + tasks: + community.general.packet_project: + state: absent + id: eef49903-7a09-4ca1-af67-4087c29ab5b6 + +- name: Create new project with non-default billing method + hosts: localhost + tasks: + community.general.packet_project: + name: "newer project" + payment_method: "the other visa" +""" + +RETURN = r""" +name: + description: Name of addressed project. + type: str + returned: success + +id: + description: UUID of addressed project. + type: str + returned: success +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.common.text.converters import to_native + +HAS_PACKET_SDK = True + +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + + +def act_on_project(target_state, module, packet_conn): + result_dict = {'changed': False} + given_id = module.params.get('id') + given_name = module.params.get('name') + if given_id: + matching_projects = [ + p for p in packet_conn.list_projects() if given_id == p.id] + else: + matching_projects = [ + p for p in packet_conn.list_projects() if given_name == p.name] + + if target_state == 'present': + if len(matching_projects) == 0: + org_id = module.params.get('org_id') + custom_data = module.params.get('custom_data') + payment_method = module.params.get('payment_method') + + if not org_id: + params = { + "name": given_name, + "payment_method_id": payment_method, + "customdata": custom_data + } + new_project_data = packet_conn.call_api("projects", "POST", params) + new_project = packet.Project(new_project_data, packet_conn) + else: + new_project = packet_conn.create_organization_project( + org_id=org_id, + name=given_name, + payment_method_id=payment_method, + customdata=custom_data + ) + + result_dict['changed'] = True + matching_projects.append(new_project) + + result_dict['name'] = matching_projects[0].name + result_dict['id'] = matching_projects[0].id + else: + if len(matching_projects) > 1: + _msg = ("More than projects matched for module call with state = absent: " + "{0}".format(to_native(matching_projects))) + module.fail_json(msg=_msg) + + if len(matching_projects) == 1: + p = matching_projects[0] + result_dict['name'] = p.name + result_dict['id'] = p.id + result_dict['changed'] = True + try: + p.delete() + except Exception as e: + _msg = ("while trying to remove project {0}, id {1}, got error: {2}".format( + p.name, p.id, to_native(e))) + module.fail_json(msg=_msg) + return result_dict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=['present', 'absent'], default='present'), + auth_token=dict( + type='str', + fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), + no_log=True + ), + name=dict(type='str'), + id=dict(type='str'), + org_id=dict(type='str'), + payment_method=dict(type='str'), + custom_data=dict(type='str'), + ), + supports_check_mode=True, + required_one_of=[("name", "id",)], + mutually_exclusive=[ + ('name', 'id'), + ] + ) + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable {0}, " + "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + if state in ['present', 'absent']: + if module.check_mode: + module.exit_json(changed=False) + + try: + module.exit_json(**act_on_project(state, module, packet_conn)) + except Exception as e: + module.fail_json( + msg="failed to set project state {0}: {1}".format(state, to_native(e))) + else: + module.fail_json(msg="{0} is not a valid state for this module".format(state)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packet_sshkey.py b/plugins/modules/packet_sshkey.py deleted file mode 120000 index dd323eb867..0000000000 --- a/plugins/modules/packet_sshkey.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/packet/packet_sshkey.py \ No newline at end of file diff --git a/plugins/modules/packet_sshkey.py b/plugins/modules/packet_sshkey.py new file mode 100644 index 0000000000..a6962e34c0 --- /dev/null +++ b/plugins/modules/packet_sshkey.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# Copyright 2016 Tomas Karasek +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: packet_sshkey +short_description: Create/delete an SSH key in Packet host +description: + - Create/delete an SSH key in Packet host. + - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post). +author: "Tomas Karasek (@t0mk) " +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + type: str + auth_token: + description: + - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). + type: str + label: + description: + - Label for the key. If you keep it empty, it is read from key string. + type: str + aliases: [name] + id: + description: + - UUID of the key which you want to remove. + type: str + fingerprint: + description: + - Fingerprint of the key which you want to remove. + type: str + key: + description: + - Public Key string (V({type} {base64 encoded key} {description})). + type: str + key_file: + description: + - File with the public key. + type: path + +requirements: + - packet-python +""" + +EXAMPLES = r""" +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass the api token in module param auth_token. + +- name: Create sshkey from string + hosts: localhost + tasks: + community.general.packet_sshkey: + key: "{{ lookup('file', 'my_packet_sshkey.pub') }}" + +- name: Create sshkey from file + hosts: localhost + tasks: + community.general.packet_sshkey: + label: key from file + key_file: ~/ff.pub + +- name: Remove sshkey by id + hosts: localhost + tasks: + community.general.packet_sshkey: + state: absent + id: eef49903-7a09-4ca1-af67-4087c29ab5b6 +""" + +RETURN = r""" +sshkeys: + description: Information about sshkeys that were created/removed. + type: list + sample: + [ + { + "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46", + "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7", + "key": "ssh-dss AAAAB3NzaC1kc3MAAACBA ... MdDxfmcsCslJKgoRKSmQpCwXQtN2g== user@server", + "label": "mynewkey33" + } + ] + returned: always +""" + +import os +import uuid + +from ansible.module_utils.basic import AnsibleModule + +HAS_PACKET_SDK = True +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + + +def serialize_sshkey(sshkey): + sshkey_data = {} + copy_keys = ['id', 'key', 'label', 'fingerprint'] + for name in copy_keys: + sshkey_data[name] = getattr(sshkey, name) + return sshkey_data + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def load_key_string(key_str): + ret_dict = {} + key_str = key_str.strip() + ret_dict['key'] = key_str + cut_key = key_str.split() + if len(cut_key) in [2, 3]: + if len(cut_key) == 3: + ret_dict['label'] = cut_key[2] + else: + raise Exception("Public key %s is in wrong format" % key_str) + return ret_dict + + +def get_sshkey_selector(module): + key_id = module.params.get('id') + if key_id: + if not is_valid_uuid(key_id): + raise Exception("sshkey ID %s is not valid UUID" % key_id) + selecting_fields = ['label', 'fingerprint', 'id', 'key'] + select_dict = {} + for f in selecting_fields: + if module.params.get(f) is not None: + select_dict[f] = module.params.get(f) + + if module.params.get('key_file'): + with open(module.params.get('key_file')) as _file: + loaded_key = load_key_string(_file.read()) + select_dict['key'] = loaded_key['key'] + if module.params.get('label') is None: + if loaded_key.get('label'): + select_dict['label'] = loaded_key['label'] + + def selector(k): + if 'key' in select_dict: + # if key string is specified, compare only the key strings + return k.key == select_dict['key'] + else: + # if key string not specified, all the fields must match + return all(select_dict[f] == getattr(k, f) for f in select_dict) + return selector + + +def act_on_sshkeys(target_state, module, packet_conn): + selector = get_sshkey_selector(module) + existing_sshkeys = packet_conn.list_ssh_keys() + matching_sshkeys = list(filter(selector, existing_sshkeys)) + changed = False + if target_state == 'present': + if matching_sshkeys == []: + # there is no key matching the fields from module call + # => create the key, label and + newkey = {} + if module.params.get('key_file'): + with open(module.params.get('key_file')) as f: + newkey = load_key_string(f.read()) + if module.params.get('key'): + newkey = load_key_string(module.params.get('key')) + if module.params.get('label'): + newkey['label'] = module.params.get('label') + for param in ('label', 'key'): + if param not in newkey: + _msg = ("If you want to ensure a key is present, you must " + "supply both a label and a key string, either in " + "module params, or in a key file. %s is missing" + % param) + raise Exception(_msg) + matching_sshkeys = [] + new_key_response = packet_conn.create_ssh_key( + newkey['label'], newkey['key']) + changed = True + + matching_sshkeys.append(new_key_response) + else: + # state is 'absent' => delete matching keys + for k in matching_sshkeys: + try: + k.delete() + changed = True + except Exception as e: + _msg = ("while trying to remove sshkey %s, id %s %s, " + "got error: %s" % + (k.label, k.id, target_state, e)) + raise Exception(_msg) + + return { + 'changed': changed, + 'sshkeys': [serialize_sshkey(k) for k in matching_sshkeys] + } + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=['present', 'absent'], default='present'), + auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), + no_log=True), + label=dict(type='str', aliases=['name']), + id=dict(type='str'), + fingerprint=dict(type='str'), + key=dict(type='str', no_log=True), + key_file=dict(type='path'), + ), + mutually_exclusive=[ + ('label', 'id'), + ('label', 'fingerprint'), + ('id', 'fingerprint'), + ('key', 'fingerprint'), + ('key', 'id'), + ('key_file', 'key'), + ] + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable %s, " + "the auth_token parameter is required" % + PACKET_API_TOKEN_ENV_VAR) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + if state in ['present', 'absent']: + try: + module.exit_json(**act_on_sshkeys(state, module, packet_conn)) + except Exception as e: + module.fail_json(msg='failed to set sshkey state: %s' % str(e)) + else: + module.fail_json(msg='%s is not a valid state for this module' % state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packet_volume.py b/plugins/modules/packet_volume.py deleted file mode 120000 index 1828666e1b..0000000000 --- a/plugins/modules/packet_volume.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/packet/packet_volume.py \ No newline at end of file diff --git a/plugins/modules/packet_volume.py b/plugins/modules/packet_volume.py new file mode 100644 index 0000000000..826d9bc854 --- /dev/null +++ b/plugins/modules/packet_volume.py @@ -0,0 +1,324 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Nurfet Becirevic +# Copyright (c) 2017, Tomas Karasek +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: packet_volume + +short_description: Create/delete a volume in Packet host + +description: + - Create/delete a volume in Packet host. + - API is documented at U(https://www.packet.com/developers/api/#volumes). +version_added: '0.2.0' + +author: + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Desired state of the volume. + default: present + choices: ['present', 'absent'] + type: str + + project_id: + description: + - ID of project of the device. + required: true + type: str + + auth_token: + description: + - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). + type: str + + name: + description: + - Selector for API-generated name of the volume. + type: str + + description: + description: + - User-defined description attribute for Packet volume. + - It is used used as idempotent identifier - if volume with given description exists, new one is not created. + type: str + + id: + description: + - UUID of a volume. + type: str + + plan: + description: + - V(storage_1) for standard tier, V(storage_2) for premium (performance) tier. + - Tiers are described at U(https://www.packet.com/cloud/storage/). + choices: ['storage_1', 'storage_2'] + default: 'storage_1' + type: str + + facility: + description: + - Location of the volume. + - Volumes can only be attached to device in the same location. + type: str + + size: + description: + - Size of the volume in gigabytes. + type: int + + locked: + description: + - Create new volume locked. + type: bool + default: false + + billing_cycle: + description: + - Billing cycle for new volume. + choices: ['hourly', 'monthly'] + default: 'hourly' + type: str + + snapshot_policy: + description: + - Snapshot policy for new volume. + type: dict + + suboptions: + snapshot_count: + description: + - How many snapshots to keep, a positive integer. + required: true + type: int + + snapshot_frequency: + description: + - Frequency of snapshots. + required: true + choices: ["15min", "1hour", "1day", "1week", "1month", "1year"] + type: str + +requirements: + - "packet-python >= 1.35" +""" + +EXAMPLES = r""" +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass the api token in module param auth_token. + +- hosts: localhost + vars: + volname: testvol123 + project_id: 53000fb2-ee46-4673-93a8-de2c2bdba33b + + tasks: + - name: Create volume + community.general.packet_volume: + description: "{{ volname }}" + project_id: "{{ project_id }}" + facility: 'ewr1' + plan: 'storage_1' + state: present + size: 10 + snapshot_policy: + snapshot_count: 10 + snapshot_frequency: 1day + register: result_create + + - name: Delete volume + community.general.packet_volume: + id: "{{ result_create.id }}" + project_id: "{{ project_id }}" + state: absent +""" + +RETURN = r""" +id: + description: UUID of specified volume. + type: str + returned: success + sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c +name: + description: The API-generated name of the volume resource. + type: str + returned: if volume is attached/detached to/from some device + sample: "volume-a91dc506" +description: + description: The user-defined description of the volume resource. + type: str + returned: success + sample: "Just another volume" +""" + +import uuid + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.common.text.converters import to_native + +HAS_PACKET_SDK = True + + +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + +VOLUME_PLANS = ["storage_1", "storage_2"] +VOLUME_STATES = ["present", "absent"] +BILLING = ["hourly", "monthly"] + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def get_volume_selector(module): + if module.params.get('id'): + i = module.params.get('id') + if not is_valid_uuid(i): + raise Exception("Volume ID '{0}' is not a valid UUID".format(i)) + return lambda v: v['id'] == i + elif module.params.get('name'): + n = module.params.get('name') + return lambda v: v['name'] == n + elif module.params.get('description'): + d = module.params.get('description') + return lambda v: v['description'] == d + + +def get_or_fail(params, key): + item = params.get(key) + if item is None: + raise Exception("{0} must be specified for new volume".format(key)) + return item + + +def act_on_volume(target_state, module, packet_conn): + return_dict = {'changed': False} + s = get_volume_selector(module) + project_id = module.params.get("project_id") + api_method = "projects/{0}/storage".format(project_id) + all_volumes = packet_conn.call_api(api_method, "GET")['volumes'] + matching_volumes = [v for v in all_volumes if s(v)] + + if target_state == "present": + if len(matching_volumes) == 0: + params = { + "description": get_or_fail(module.params, "description"), + "size": get_or_fail(module.params, "size"), + "plan": get_or_fail(module.params, "plan"), + "facility": get_or_fail(module.params, "facility"), + "locked": get_or_fail(module.params, "locked"), + "billing_cycle": get_or_fail(module.params, "billing_cycle"), + "snapshot_policies": module.params.get("snapshot_policy"), + } + + new_volume_data = packet_conn.call_api(api_method, "POST", params) + return_dict['changed'] = True + for k in ['id', 'name', 'description']: + return_dict[k] = new_volume_data[k] + + else: + for k in ['id', 'name', 'description']: + return_dict[k] = matching_volumes[0][k] + + else: + if len(matching_volumes) > 1: + _msg = ("More than one volume matches in module call for absent state: {0}".format( + to_native(matching_volumes))) + module.fail_json(msg=_msg) + + if len(matching_volumes) == 1: + volume = matching_volumes[0] + packet_conn.call_api("storage/{0}".format(volume['id']), "DELETE") + return_dict['changed'] = True + for k in ['id', 'name', 'description']: + return_dict[k] = volume[k] + + return return_dict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + id=dict(type='str'), + description=dict(type="str"), + name=dict(type='str'), + state=dict(choices=VOLUME_STATES, default="present"), + auth_token=dict( + type='str', + fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), + no_log=True + ), + project_id=dict(required=True), + plan=dict(choices=VOLUME_PLANS, default="storage_1"), + facility=dict(type="str"), + size=dict(type="int"), + locked=dict(type="bool", default=False), + snapshot_policy=dict(type='dict'), + billing_cycle=dict(type='str', choices=BILLING, default="hourly"), + ), + supports_check_mode=True, + required_one_of=[("name", "id", "description")], + mutually_exclusive=[ + ('name', 'id'), + ('id', 'description'), + ('name', 'description'), + ] + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable {0}, " + "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + if state in VOLUME_STATES: + if module.check_mode: + module.exit_json(changed=False) + + try: + module.exit_json(**act_on_volume(state, module, packet_conn)) + except Exception as e: + module.fail_json( + msg="failed to set volume state {0}: {1}".format( + state, to_native(e))) + else: + module.fail_json(msg="{0} is not a valid state for this module".format(state)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packet_volume_attachment.py b/plugins/modules/packet_volume_attachment.py deleted file mode 120000 index afe16752fd..0000000000 --- a/plugins/modules/packet_volume_attachment.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/packet/packet_volume_attachment.py \ No newline at end of file diff --git a/plugins/modules/packet_volume_attachment.py b/plugins/modules/packet_volume_attachment.py new file mode 100644 index 0000000000..4308233bc1 --- /dev/null +++ b/plugins/modules/packet_volume_attachment.py @@ -0,0 +1,301 @@ +#!/usr/bin/python + +# Copyright (c) 2019, Nurfet Becirevic +# Copyright (c) 2017, Tomas Karasek +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: packet_volume_attachment + +short_description: Attach/detach a volume to a device in the Packet host + +description: + - Attach/detach a volume to a device in the Packet host. + - API is documented at U(https://www.packet.com/developers/api/volumes/). + - This module creates the attachment route in the Packet API. In order to discover the block devices on the server, you + have to run the Attach Scripts, as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux). +version_added: '0.2.0' + +author: + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Indicate desired state of the attachment. + default: present + choices: ['present', 'absent'] + type: str + + auth_token: + description: + - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). + type: str + + project_id: + description: + - UUID of the project to which the device and volume belong. + type: str + required: true + + volume: + description: + - Selector for the volume. + - It can be a UUID, an API-generated volume name, or user-defined description string. + - 'Example values: V(4a347482-b546-4f67-8300-fb5018ef0c5), V(volume-4a347482), V(my volume).' + type: str + required: true + + device: + description: + - Selector for the device. + - It can be a UUID of the device, or a hostname. + - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device".' + type: str + +requirements: + - "packet-python >= 1.35" +""" + +EXAMPLES = r""" +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass the api token in module param auth_token. + +- hosts: localhost + + vars: + volname: testvol + devname: testdev + project_id: 52000fb2-ee46-4673-93a8-de2c2bdba33b + + tasks: + - name: Create volume + packet_volume: + description: "{{ volname }}" + project_id: "{{ project_id }}" + facility: ewr1 + plan: storage_1 + state: present + size: 10 + snapshot_policy: + snapshot_count: 10 + snapshot_frequency: 1day + + - name: Create a device + packet_device: + project_id: "{{ project_id }}" + hostnames: "{{ devname }}" + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: ewr1 + state: present + + - name: Attach testvol to testdev + community.general.packet_volume_attachment: + project_id: "{{ project_id }}" + volume: "{{ volname }}" + device: "{{ devname }}" + + - name: Detach testvol from testdev + community.general.packet_volume_attachment: + project_id: "{{ project_id }}" + volume: "{{ volname }}" + device: "{{ devname }}" + state: absent +""" + +RETURN = r""" +volume_id: + description: UUID of volume addressed by the module call. + type: str + returned: success + +device_id: + description: UUID of device addressed by the module call. + type: str + returned: success +""" + +import uuid + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.common.text.converters import to_native + +HAS_PACKET_SDK = True + + +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + +STATES = ["present", "absent"] + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def get_volume_selector(spec): + if is_valid_uuid(spec): + return lambda v: v['id'] == spec + else: + return lambda v: v['name'] == spec or v['description'] == spec + + +def get_device_selector(spec): + if is_valid_uuid(spec): + return lambda v: v['id'] == spec + else: + return lambda v: v['hostname'] == spec + + +def do_attach(packet_conn, vol_id, dev_id): + api_method = "storage/{0}/attachments".format(vol_id) + packet_conn.call_api( + api_method, + params={"device_id": dev_id}, + type="POST") + + +def do_detach(packet_conn, vol, dev_id=None): + def dev_match(a): + return (dev_id is None) or (a['device']['id'] == dev_id) + for a in vol['attachments']: + if dev_match(a): + packet_conn.call_api(a['href'], type="DELETE") + + +def validate_selected(l, resource_type, spec): + if len(l) > 1: + _msg = ("more than one {0} matches specification {1}: {2}".format( + resource_type, spec, l)) + raise Exception(_msg) + if len(l) == 0: + _msg = "no {0} matches specification: {1}".format(resource_type, spec) + raise Exception(_msg) + + +def get_attached_dev_ids(volume_dict): + if len(volume_dict['attachments']) == 0: + return [] + else: + return [a['device']['id'] for a in volume_dict['attachments']] + + +def act_on_volume_attachment(target_state, module, packet_conn): + return_dict = {'changed': False} + volspec = module.params.get("volume") + devspec = module.params.get("device") + if devspec is None and target_state == 'present': + raise Exception("If you want to attach a volume, you must specify a device.") + project_id = module.params.get("project_id") + volumes_api_method = "projects/{0}/storage".format(project_id) + volumes = packet_conn.call_api(volumes_api_method, + params={'include': 'facility,attachments.device'})['volumes'] + v_match = get_volume_selector(volspec) + matching_volumes = [v for v in volumes if v_match(v)] + validate_selected(matching_volumes, "volume", volspec) + volume = matching_volumes[0] + return_dict['volume_id'] = volume['id'] + + device = None + if devspec is not None: + devices_api_method = "projects/{0}/devices".format(project_id) + devices = packet_conn.call_api(devices_api_method)['devices'] + d_match = get_device_selector(devspec) + matching_devices = [d for d in devices if d_match(d)] + validate_selected(matching_devices, "device", devspec) + device = matching_devices[0] + return_dict['device_id'] = device['id'] + + attached_device_ids = get_attached_dev_ids(volume) + + if target_state == "present": + if len(attached_device_ids) == 0: + do_attach(packet_conn, volume['id'], device['id']) + return_dict['changed'] = True + elif device['id'] not in attached_device_ids: + # Don't reattach volume which is attached to a different device. + # Rather fail than force remove a device on state == 'present'. + raise Exception("volume {0} is already attached to device {1}".format( + volume, attached_device_ids)) + else: + if device is None: + if len(attached_device_ids) > 0: + do_detach(packet_conn, volume) + return_dict['changed'] = True + elif device['id'] in attached_device_ids: + do_detach(packet_conn, volume, device['id']) + return_dict['changed'] = True + + return return_dict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=STATES, default="present"), + auth_token=dict( + type='str', + fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), + no_log=True + ), + volume=dict(type="str", required=True), + project_id=dict(type="str", required=True), + device=dict(type="str"), + ), + supports_check_mode=True, + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable {0}, " + "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + if state in STATES: + if module.check_mode: + module.exit_json(changed=False) + + try: + module.exit_json( + **act_on_volume_attachment(state, module, packet_conn)) + except Exception as e: + module.fail_json( + msg="failed to set volume_attachment state {0}: {1}".format(state, to_native(e))) + else: + module.fail_json(msg="{0} is not a valid state for this module".format(state)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pacman.py b/plugins/modules/pacman.py deleted file mode 120000 index 50b731f31e..0000000000 --- a/plugins/modules/pacman.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/pacman.py \ No newline at end of file diff --git a/plugins/modules/pacman.py b/plugins/modules/pacman.py new file mode 100644 index 0000000000..49d6c9a571 --- /dev/null +++ b/plugins/modules/pacman.py @@ -0,0 +1,848 @@ +#!/usr/bin/python + +# Copyright (c) 2012, Afterburn +# Copyright (c) 2013, Aaron Bull Schaefer +# Copyright (c) 2015, Indrajit Raychaudhuri +# Copyright (c) 2022, Jean Raby +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pacman +short_description: Manage packages with I(pacman) +description: + - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants. +author: + - Indrajit Raychaudhuri (@indrajitr) + - Aaron Bull Schaefer (@elasticdog) + - Maxime de Roucy (@tchernomax) + - Jean Raby (@jraby) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + name: + description: + - Name or list of names of the package(s) or file(s) to install, upgrade, or remove. Cannot be used in combination with + O(upgrade). + aliases: [package, pkg] + type: list + elements: str + + state: + description: + - Whether to install (V(present) or V(installed), V(latest)), or remove (V(absent) or V(removed)) a package. + - V(present) and V(installed) simply ensure that a desired package is installed. + - V(latest) updates the specified package if it is not of the latest available version. + - V(absent) and V(removed) remove the specified package. + default: present + choices: [absent, installed, latest, present, removed] + type: str + + force: + description: + - When removing packages, forcefully remove them, without any checks. Same as O(extra_args="--nodeps --nodeps"). + - When combined with O(update_cache), force a refresh of all package databases. Same as O(update_cache_extra_args="--refresh + --refresh"). + default: false + type: bool + + remove_nosave: + description: + - When removing packages, do not save modified configuration files as C(.pacsave) files. (passes C(--nosave) to pacman). + version_added: 4.6.0 + default: false + type: bool + + executable: + description: + - Path of the binary to use. This can either be C(pacman) or a pacman compatible AUR helper. + - Pacman compatibility is unfortunately ill defined, in particular, this modules makes extensive use of the C(--print-format) + directive which is known not to be implemented by some AUR helpers (notably, C(yay)). + - Beware that AUR helpers might behave unexpectedly and are therefore not recommended. + default: pacman + type: str + version_added: 3.1.0 + + extra_args: + description: + - Additional option to pass to pacman when enforcing O(state). + default: '' + type: str + + update_cache: + description: + - Whether or not to refresh the master package lists. + - This can be run as part of a package installation or as a separate step. + - If not specified, it defaults to V(false). + - Please note that this option only had an influence on the module's C(changed) state if O(name) and O(upgrade) are + not specified before community.general 5.0.0. See the examples for how to keep the old behavior. + type: bool + + update_cache_extra_args: + description: + - Additional option to pass to pacman when enforcing O(update_cache). + default: '' + type: str + + upgrade: + description: + - Whether or not to upgrade the whole system. Cannot be used in combination with O(name). + - If not specified, it defaults to V(false). + type: bool + + upgrade_extra_args: + description: + - Additional option to pass to pacman when enforcing O(upgrade). + default: '' + type: str + + reason: + description: + - The install reason to set for the packages. + choices: [dependency, explicit] + type: str + version_added: 5.4.0 + + reason_for: + description: + - Set the install reason for V(all) packages or only for V(new) packages. + - In case of O(state=latest) already installed packages which are updated to a newer version are not counted as V(new). + default: new + choices: [all, new] + type: str + version_added: 5.4.0 + +notes: + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. + - To use an AUR helper (O(executable) option), a few extra setup steps might be required beforehand. For example, a dedicated + build user with permissions to install packages could be necessary. + - 'In the tests, while using C(yay) as the O(executable) option, the module failed to install AUR packages with the error: + C(error: target not found: ). This is caused by an incompatibility of yay with the arguments passed by this module. + See L(yay bug #1744 report for details, https://github.com/Jguer/yay/issues/1744).' + - The common return values `stdout` and `stderr` are returned upon success, when needed, since community.general 4.1.0. +""" + +RETURN = r""" +packages: + description: + - A list of packages that have been changed. + - Before community.general 4.5.0 this was only returned when O(upgrade=true). In community.general 4.5.0, it was sometimes + omitted when the package list is empty, but since community.general 4.6.0 it is always returned when O(name) is specified + or O(upgrade=true). + returned: success and O(name) is specified or O(upgrade=true) + type: list + elements: str + sample: ["package", "other-package"] + +cache_updated: + description: + - The changed status of C(pacman -Sy). + - Useful when O(name) or O(upgrade=true) are specified next to O(update_cache=true). + returned: success, when O(update_cache=true) + type: bool + sample: false + version_added: 4.6.0 +""" + +EXAMPLES = r""" +- name: Install package foo from repo + community.general.pacman: + name: foo + state: present + +- name: Install package bar from file + community.general.pacman: + name: ~/bar-1.0-1-any.pkg.tar.xz + state: present + +- name: Install package foo from repo and bar from file + community.general.pacman: + name: + - foo + - ~/bar-1.0-1-any.pkg.tar.xz + state: present + +- name: Install package from AUR using a Pacman compatible AUR helper + community.general.pacman: + name: foo + state: present + executable: yay + extra_args: --builddir /var/cache/yay + +- name: Upgrade package foo + # The 'changed' state of this call will indicate whether the cache was + # updated *or* whether foo was installed/upgraded. + community.general.pacman: + name: foo + state: latest + update_cache: true + +- name: Remove packages foo and bar + community.general.pacman: + name: + - foo + - bar + state: absent + +- name: Recursively remove package baz + community.general.pacman: + name: baz + state: absent + extra_args: --recursive + +- name: Run the equivalent of "pacman -Sy" as a separate step + community.general.pacman: + update_cache: true + +- name: Run the equivalent of "pacman -Su" as a separate step + community.general.pacman: + upgrade: true + +- name: Run the equivalent of "pacman -Syu" as a separate step + # Since community.general 5.0.0 the 'changed' state of this call + # will be 'true' in case the cache was updated, or when a package + # was updated. + # + # The previous behavior was to only indicate whether something was + # upgraded. To keep the old behavior, add the following to the task: + # + # register: result + # changed_when: result.packages | length > 0 + community.general.pacman: + update_cache: true + upgrade: true + +- name: Run the equivalent of "pacman -Rdd", force remove package baz + community.general.pacman: + name: baz + state: absent + force: true + +- name: Install foo as dependency and leave reason untouched if already installed + community.general.pacman: + name: foo + state: present + reason: dependency + reason_for: new + +- name: Run the equivalent of "pacman -S --asexplicit", mark foo as explicit and install it if not present + community.general.pacman: + name: foo + state: present + reason: explicit + reason_for: all +""" + +import re +import shlex +from ansible.module_utils.basic import AnsibleModule +from collections import defaultdict, namedtuple + + +class Package(object): + def __init__(self, name, source, source_is_URL=False): + self.name = name + self.source = source + self.source_is_URL = source_is_URL + + def __eq__(self, o): + return self.name == o.name and self.source == o.source and self.source_is_URL == o.source_is_URL + + def __lt__(self, o): + return self.name < o.name + + def __repr__(self): + return 'Package("%s", "%s", %s)' % (self.name, self.source, self.source_is_URL) + + +VersionTuple = namedtuple("VersionTuple", ["current", "latest"]) + + +class Pacman(object): + def __init__(self, module): + self.m = module + + self.m.run_command_environ_update = dict(LC_ALL="C") + p = self.m.params + + self._msgs = [] + self._stdouts = [] + self._stderrs = [] + self.changed = False + self.exit_params = {} + + self.pacman_path = self.m.get_bin_path(p["executable"], True) + + self._cached_database = None + + # Normalize for old configs + if p["state"] == "installed": + self.target_state = "present" + elif p["state"] == "removed": + self.target_state = "absent" + else: + self.target_state = p["state"] + + def add_exit_infos(self, msg=None, stdout=None, stderr=None): + if msg: + self._msgs.append(msg) + if stdout: + self._stdouts.append(stdout) + if stderr: + self._stderrs.append(stderr) + + def _set_mandatory_exit_params(self): + msg = "\n".join(self._msgs) + stdouts = "\n".join(self._stdouts) + stderrs = "\n".join(self._stderrs) + if stdouts: + self.exit_params["stdout"] = stdouts + if stderrs: + self.exit_params["stderr"] = stderrs + self.exit_params["msg"] = msg # mandatory, but might be empty + + def fail(self, msg=None, stdout=None, stderr=None, **kwargs): + self.add_exit_infos(msg, stdout, stderr) + self._set_mandatory_exit_params() + if kwargs: + self.exit_params.update(**kwargs) + self.m.fail_json(**self.exit_params) + + def success(self): + self._set_mandatory_exit_params() + self.m.exit_json(changed=self.changed, **self.exit_params) + + def run(self): + if self.m.params["update_cache"]: + self.update_package_db() + + if not (self.m.params["name"] or self.m.params["upgrade"]): + self.success() + + self.inventory = self._build_inventory() + if self.m.params["upgrade"]: + self.upgrade() + self.success() + + if self.m.params["name"]: + pkgs = self.package_list() + + if self.target_state == "absent": + self.remove_packages(pkgs) + self.success() + else: + self.install_packages(pkgs) + self.success() + + # This happens if an empty list has been provided for name + self.add_exit_infos(msg='Nothing to do') + self.success() + + def install_packages(self, pkgs): + pkgs_to_install = [] + pkgs_to_install_from_url = [] + pkgs_to_set_reason = [] + for p in pkgs: + if self.m.params["reason"] and ( + p.name not in self.inventory["pkg_reasons"] + or self.m.params["reason_for"] == "all" + and self.inventory["pkg_reasons"][p.name] != self.m.params["reason"] + ): + pkgs_to_set_reason.append(p.name) + if p.source_is_URL: + # URL packages bypass the latest / upgradable_pkgs test + # They go through the dry-run to let pacman decide if they will be installed + pkgs_to_install_from_url.append(p) + continue + if ( + p.name not in self.inventory["installed_pkgs"] + or self.target_state == "latest" + and p.name in self.inventory["upgradable_pkgs"] + ): + pkgs_to_install.append(p) + + if len(pkgs_to_install) == 0 and len(pkgs_to_install_from_url) == 0 and len(pkgs_to_set_reason) == 0: + self.exit_params["packages"] = [] + self.add_exit_infos("package(s) already installed") + return + + cmd_base = [ + self.pacman_path, + "--noconfirm", + "--noprogressbar", + "--needed", + ] + if self.m.params["extra_args"]: + cmd_base.extend(self.m.params["extra_args"]) + + def _build_install_diff(pacman_verb, pkglist): + # Dry run to build the installation diff + + cmd = cmd_base + [pacman_verb, "--print-format", "%n %v"] + [p.source for p in pkglist] + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + self.fail("Failed to list package(s) to install", cmd=cmd, stdout=stdout, stderr=stderr) + + name_ver = [l.strip() for l in stdout.splitlines()] + before = [] + after = [] + to_be_installed = [] + for p in name_ver: + # With Pacman v6.0.1 - libalpm v13.0.1, --upgrade outputs "loading packages..." on stdout. strip that. + # When installing from URLs, pacman can also output a 'nothing to do' message. strip that too. + if "loading packages" in p or "there is nothing to do" in p or 'Avoid running' in p: + continue + name, version = p.split() + if name in self.inventory["installed_pkgs"]: + before.append("%s-%s-%s" % (name, self.inventory["installed_pkgs"][name], self.inventory["pkg_reasons"][name])) + if name in pkgs_to_set_reason: + after.append("%s-%s-%s" % (name, version, self.m.params["reason"])) + elif name in self.inventory["pkg_reasons"]: + after.append("%s-%s-%s" % (name, version, self.inventory["pkg_reasons"][name])) + else: + after.append("%s-%s" % (name, version)) + to_be_installed.append(name) + + return (to_be_installed, before, after) + + before = [] + after = [] + installed_pkgs = [] + + if pkgs_to_install: + p, b, a = _build_install_diff("--sync", pkgs_to_install) + installed_pkgs.extend(p) + before.extend(b) + after.extend(a) + if pkgs_to_install_from_url: + p, b, a = _build_install_diff("--upgrade", pkgs_to_install_from_url) + installed_pkgs.extend(p) + before.extend(b) + after.extend(a) + + if len(installed_pkgs) == 0 and len(pkgs_to_set_reason) == 0: + # This can happen with URL packages if pacman decides there's nothing to do + self.exit_params["packages"] = [] + self.add_exit_infos("package(s) already installed") + return + + self.changed = True + + self.exit_params["diff"] = { + "before": "\n".join(sorted(before)) + "\n" if before else "", + "after": "\n".join(sorted(after)) + "\n" if after else "", + } + + changed_reason_pkgs = [p for p in pkgs_to_set_reason if p not in installed_pkgs] + + if self.m.check_mode: + self.add_exit_infos("Would have installed %d packages" % (len(installed_pkgs) + len(changed_reason_pkgs))) + self.exit_params["packages"] = sorted(installed_pkgs + changed_reason_pkgs) + return + + # actually do it + def _install_packages_for_real(pacman_verb, pkglist): + cmd = cmd_base + [pacman_verb] + [p.source for p in pkglist] + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + self.fail("Failed to install package(s)", cmd=cmd, stdout=stdout, stderr=stderr) + self.add_exit_infos(stdout=stdout, stderr=stderr) + self._invalidate_database() + + if pkgs_to_install: + _install_packages_for_real("--sync", pkgs_to_install) + if pkgs_to_install_from_url: + _install_packages_for_real("--upgrade", pkgs_to_install_from_url) + + # set reason + if pkgs_to_set_reason: + cmd = [self.pacman_path, "--noconfirm", "--database"] + if self.m.params["reason"] == "dependency": + cmd.append("--asdeps") + else: + cmd.append("--asexplicit") + cmd.extend(pkgs_to_set_reason) + + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + self.fail("Failed to install package(s)", cmd=cmd, stdout=stdout, stderr=stderr) + self.add_exit_infos(stdout=stdout, stderr=stderr) + + self.exit_params["packages"] = sorted(installed_pkgs + changed_reason_pkgs) + self.add_exit_infos("Installed %d package(s)" % (len(installed_pkgs) + len(changed_reason_pkgs))) + + def remove_packages(self, pkgs): + # filter out pkgs that are already absent + pkg_names_to_remove = [p.name for p in pkgs if p.name in self.inventory["installed_pkgs"]] + + if len(pkg_names_to_remove) == 0: + self.exit_params["packages"] = [] + self.add_exit_infos("package(s) already absent") + return + + # There's something to do, set this in advance + self.changed = True + + cmd_base = [self.pacman_path, "--remove", "--noconfirm", "--noprogressbar"] + cmd_base += self.m.params["extra_args"] + cmd_base += ["--nodeps", "--nodeps"] if self.m.params["force"] else [] + # nosave_args conflicts with --print-format. Added later. + # https://github.com/ansible-collections/community.general/issues/4315 + + # This is a bit of a TOCTOU but it is better than parsing the output of + # pacman -R, which is different depending on the user config (VerbosePkgLists) + # Start by gathering what would be removed + cmd = cmd_base + ["--print-format", "%n-%v"] + pkg_names_to_remove + + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + self.fail("failed to list package(s) to remove", cmd=cmd, stdout=stdout, stderr=stderr) + + removed_pkgs = stdout.split() + self.exit_params["packages"] = removed_pkgs + self.exit_params["diff"] = { + "before": "\n".join(removed_pkgs) + "\n", # trailing \n to avoid diff complaints + "after": "", + } + + if self.m.check_mode: + self.exit_params["packages"] = removed_pkgs + self.add_exit_infos("Would have removed %d packages" % len(removed_pkgs)) + return + + nosave_args = ["--nosave"] if self.m.params["remove_nosave"] else [] + cmd = cmd_base + nosave_args + pkg_names_to_remove + + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + self.fail("failed to remove package(s)", cmd=cmd, stdout=stdout, stderr=stderr) + self._invalidate_database() + self.exit_params["packages"] = removed_pkgs + self.add_exit_infos("Removed %d package(s)" % len(removed_pkgs), stdout=stdout, stderr=stderr) + + def upgrade(self): + """Runs pacman --sync --sysupgrade if there are upgradable packages""" + + if len(self.inventory["upgradable_pkgs"]) == 0: + self.add_exit_infos("Nothing to upgrade") + return + + self.changed = True # there are upgrades, so there will be changes + + # Build diff based on inventory first. + diff = {"before": "", "after": ""} + for pkg, versions in self.inventory["upgradable_pkgs"].items(): + diff["before"] += "%s-%s\n" % (pkg, versions.current) + diff["after"] += "%s-%s\n" % (pkg, versions.latest) + self.exit_params["diff"] = diff + self.exit_params["packages"] = self.inventory["upgradable_pkgs"].keys() + + if self.m.check_mode: + self.add_exit_infos( + "%d packages would have been upgraded" % (len(self.inventory["upgradable_pkgs"])) + ) + else: + cmd = [ + self.pacman_path, + "--sync", + "--sysupgrade", + "--quiet", + "--noconfirm", + ] + if self.m.params["upgrade_extra_args"]: + cmd += self.m.params["upgrade_extra_args"] + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + self._invalidate_database() + if rc == 0: + self.add_exit_infos("System upgraded", stdout=stdout, stderr=stderr) + else: + self.fail("Could not upgrade", cmd=cmd, stdout=stdout, stderr=stderr) + + def _list_database(self): + """runs pacman --sync --list with some caching""" + if self._cached_database is None: + dummy, packages, dummy = self.m.run_command([self.pacman_path, '--sync', '--list'], check_rc=True) + self._cached_database = packages.splitlines() + return self._cached_database + + def _invalidate_database(self): + """invalidates the pacman --sync --list cache""" + self._cached_database = None + + def update_package_db(self): + """runs pacman --sync --refresh""" + if self.m.check_mode: + self.add_exit_infos("Would have updated the package db") + self.changed = True + self.exit_params["cache_updated"] = True + return + + cmd = [ + self.pacman_path, + "--sync", + "--refresh", + ] + if self.m.params["update_cache_extra_args"]: + cmd += self.m.params["update_cache_extra_args"] + if self.m.params["force"]: + cmd += ["--refresh"] + else: + # Dump package database to get contents before update + pre_state = sorted(self._list_database()) + + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + self._invalidate_database() + + if self.m.params["force"]: + # Always changed when force=true + self.exit_params["cache_updated"] = True + else: + # Dump package database to get contents after update + post_state = sorted(self._list_database()) + # If contents changed, set changed=true + self.exit_params["cache_updated"] = pre_state != post_state + if self.exit_params["cache_updated"]: + self.changed = True + + if rc == 0: + self.add_exit_infos("Updated package db", stdout=stdout, stderr=stderr) + else: + self.fail("could not update package db", cmd=cmd, stdout=stdout, stderr=stderr) + + def package_list(self): + """Takes the input package list and resolves packages groups to their package list using the inventory, + extracts package names from packages given as files or URLs using calls to pacman + + Returns the expanded/resolved list as a list of Package + """ + pkg_list = [] + for pkg in self.m.params["name"]: + if not pkg: + continue + + is_URL = False + if pkg in self.inventory["available_groups"]: + # Expand group members + for group_member in self.inventory["available_groups"][pkg]: + pkg_list.append(Package(name=group_member, source=group_member)) + elif pkg in self.inventory["available_pkgs"] or pkg in self.inventory["installed_pkgs"]: + # Just a regular pkg, either available in the repositories, + # or locally installed, which we need to know for absent state + pkg_list.append(Package(name=pkg, source=pkg)) + else: + # Last resort, call out to pacman to extract the info, + # pkg is possibly in the / format, or a filename or a URL + + # Start with / case + cmd = [self.pacman_path, "--sync", "--print-format", "%n", pkg] + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + # fallback to filename / URL + cmd = [self.pacman_path, "--upgrade", "--print-format", "%n", pkg] + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + if self.target_state == "absent": + continue # Don't bark for unavailable packages when trying to remove them + else: + self.fail( + msg="Failed to list package %s" % (pkg), + cmd=cmd, + stdout=stdout, + stderr=stderr, + rc=rc, + ) + # With Pacman v6.0.1 - libalpm v13.0.1, --upgrade outputs " filename_without_extension downloading..." if the URL is unseen. + # In all cases, pacman outputs "loading packages..." on stdout. strip both + stdout = stdout.splitlines()[-1] + is_URL = True + pkg_name = stdout.strip() + pkg_list.append(Package(name=pkg_name, source=pkg, source_is_URL=is_URL)) + + return pkg_list + + def _build_inventory(self): + """Build a cache datastructure used for all pkg lookups + Returns a dict: + { + "installed_pkgs": {pkgname: version}, + "installed_groups": {groupname: set(pkgnames)}, + "available_pkgs": {pkgname: version}, + "available_groups": {groupname: set(pkgnames)}, + "upgradable_pkgs": {pkgname: (current_version,latest_version)}, + "pkg_reasons": {pkgname: reason}, + } + + Fails the module if a package requested for install cannot be found + """ + + installed_pkgs = {} + dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query"], check_rc=True) + # Format of a line: "pacman 6.0.1-2" + query_re = re.compile(r'^\s*(?P\S+)\s+(?P\S+)\s*$') + for l in stdout.splitlines(): + query_match = query_re.match(l) + if not query_match: + continue + pkg, ver = query_match.groups() + installed_pkgs[pkg] = ver + + installed_groups = defaultdict(set) + dummy, stdout, dummy = self.m.run_command( + [self.pacman_path, "--query", "--groups"], check_rc=True + ) + # Format of lines: + # base-devel file + # base-devel findutils + # ... + query_groups_re = re.compile(r'^\s*(?P\S+)\s+(?P\S+)\s*$') + for l in stdout.splitlines(): + query_groups_match = query_groups_re.match(l) + if not query_groups_match: + continue + group, pkgname = query_groups_match.groups() + installed_groups[group].add(pkgname) + + available_pkgs = {} + database = self._list_database() + # Format of a line: "core pacman 6.0.1-2" + for l in database: + l = l.strip() + if not l: + continue + repo, pkg, ver = l.split()[:3] + available_pkgs[pkg] = ver + + available_groups = defaultdict(set) + dummy, stdout, dummy = self.m.run_command( + [self.pacman_path, "--sync", "--groups", "--groups"], check_rc=True + ) + # Format of lines: + # vim-plugins vim-airline + # vim-plugins vim-airline-themes + # vim-plugins vim-ale + # ... + sync_groups_re = re.compile(r'^\s*(?P\S+)\s+(?P\S+)\s*$') + for l in stdout.splitlines(): + sync_groups_match = sync_groups_re.match(l) + if not sync_groups_match: + continue + group, pkg = sync_groups_match.groups() + available_groups[group].add(pkg) + + upgradable_pkgs = {} + rc, stdout, stderr = self.m.run_command( + [self.pacman_path, "--query", "--upgrades"], check_rc=False + ) + + stdout = stdout.splitlines() + if stdout and "Avoid running" in stdout[0]: + stdout = stdout[1:] + stdout = "\n".join(stdout) + + # non-zero exit with nothing in stdout -> nothing to upgrade, all good + # stderr can have warnings, so not checked here + if rc == 1 and not stdout: + pass # nothing to upgrade + elif rc == 0: + # Format of lines: + # strace 5.14-1 -> 5.15-1 + # systemd 249.7-1 -> 249.7-2 [ignored] + for l in stdout.splitlines(): + l = l.strip() + if not l: + continue + if "[ignored]" in l or "Avoid running" in l: + continue + s = l.split() + if len(s) != 4: + self.fail(msg="Invalid line: %s" % l) + + pkg = s[0] + current = s[1] + latest = s[3] + upgradable_pkgs[pkg] = VersionTuple(current=current, latest=latest) + else: + # stuff in stdout but rc!=0, abort + self.fail( + "Couldn't get list of packages available for upgrade", + stdout=stdout, + stderr=stderr, + rc=rc, + ) + + pkg_reasons = {} + dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query", "--explicit"], check_rc=True) + # Format of a line: "pacman 6.0.1-2" + for l in stdout.splitlines(): + l = l.strip() + if not l: + continue + pkg = l.split()[0] + pkg_reasons[pkg] = "explicit" + dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query", "--deps"], check_rc=True) + # Format of a line: "pacman 6.0.1-2" + for l in stdout.splitlines(): + l = l.strip() + if not l: + continue + pkg = l.split()[0] + pkg_reasons[pkg] = "dependency" + + return dict( + installed_pkgs=installed_pkgs, + installed_groups=installed_groups, + available_pkgs=available_pkgs, + available_groups=available_groups, + upgradable_pkgs=upgradable_pkgs, + pkg_reasons=pkg_reasons, + ) + + +def setup_module(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type="list", elements="str", aliases=["pkg", "package"]), + state=dict( + type="str", + default="present", + choices=["present", "installed", "latest", "absent", "removed"], + ), + force=dict(type="bool", default=False), + remove_nosave=dict(type="bool", default=False), + executable=dict(type="str", default="pacman"), + extra_args=dict(type="str", default=""), + upgrade=dict(type="bool"), + upgrade_extra_args=dict(type="str", default=""), + update_cache=dict(type="bool"), + update_cache_extra_args=dict(type="str", default=""), + reason=dict(type="str", choices=["explicit", "dependency"]), + reason_for=dict(type="str", default="new", choices=["new", "all"]), + ), + required_one_of=[["name", "update_cache", "upgrade"]], + mutually_exclusive=[["name", "upgrade"]], + supports_check_mode=True, + ) + + # Split extra_args as the shell would for easier handling later + for str_args in ["extra_args", "upgrade_extra_args", "update_cache_extra_args"]: + module.params[str_args] = shlex.split(module.params[str_args]) + + return module + + +def main(): + + Pacman(setup_module()).run() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/pacman_key.py b/plugins/modules/pacman_key.py deleted file mode 120000 index ac0f448232..0000000000 --- a/plugins/modules/pacman_key.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/pacman_key.py \ No newline at end of file diff --git a/plugins/modules/pacman_key.py b/plugins/modules/pacman_key.py new file mode 100644 index 0000000000..001416e855 --- /dev/null +++ b/plugins/modules/pacman_key.py @@ -0,0 +1,369 @@ +#!/usr/bin/python + +# Copyright (c) 2019, George Rawlinson +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pacman_key +author: + - George Rawlinson (@grawlinson) +version_added: "3.2.0" +short_description: Manage pacman's list of trusted keys +description: + - Add or remove gpg keys from the pacman keyring. +notes: + - Use full-length key ID (40 characters). + - Keys are verified when using O(data), O(file), or O(url) unless O(verify) is overridden. + - Keys are locally signed after being imported into the keyring. + - If the key ID exists in the keyring, the key is not added unless O(force_update) is specified. + - O(data), O(file), O(url), and O(keyserver) are mutually exclusive. +requirements: + - gpg + - pacman-key +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + id: + description: + - The 40 character identifier of the key. + - Including this allows check mode to correctly report the changed state. + - Do not specify a subkey ID, instead specify the primary key ID. + required: true + type: str + data: + description: + - The keyfile contents to add to the keyring. + - Must be of C(PGP PUBLIC KEY BLOCK) type. + type: str + file: + description: + - The path to a keyfile on the remote server to add to the keyring. + - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. + type: path + url: + description: + - The URL to retrieve keyfile from. + - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. + type: str + keyserver: + description: + - The keyserver used to retrieve key from. + type: str + verify: + description: + - Whether or not to verify the keyfile's key ID against specified key ID. + type: bool + default: true + force_update: + description: + - This forces the key to be updated if it already exists in the keyring. + type: bool + default: false + keyring: + description: + - The full path to the keyring folder on the remote server. + - If not specified, module uses pacman's default (V(/etc/pacman.d/gnupg)). + - Useful if the remote system requires an alternative gnupg directory. + type: path + default: /etc/pacman.d/gnupg + state: + description: + - Ensures that the key is V(present) (added) or V(absent) (revoked). + default: present + choices: [absent, present] + type: str + ensure_trusted: + description: + - Ensure that the key is trusted (signed by the Pacman machine key and not expired). + type: bool + default: false + version_added: 11.0.0 +""" + +EXAMPLES = r""" +- name: Import a key via local file + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + data: "{{ lookup('file', 'keyfile.asc') }}" + state: present + +- name: Import a key via remote file + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + file: /tmp/keyfile.asc + state: present + +- name: Import a key via url + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + url: https://domain.tld/keys/keyfile.asc + state: present + +- name: Import a key via keyserver + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + keyserver: keyserver.domain.tld + +- name: Import a key into an alternative keyring + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + file: /tmp/keyfile.asc + keyring: /etc/pacman.d/gnupg-alternative + +- name: Remove a key from the keyring + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + state: absent +""" + +RETURN = r""" # """ + +import os.path +import tempfile +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.text.converters import to_native + + +class GpgListResult(object): + """Wraps gpg --list-* output.""" + + def __init__(self, line): + self._parts = line.split(':') + + @property + def kind(self): + return self._parts[0] + + @property + def valid(self): + return self._parts[1] + + @property + def is_fully_valid(self): + return self.valid == 'f' + + @property + def key(self): + return self._parts[4] + + @property + def user_id(self): + return self._parts[9] + + +def gpg_get_first_attr_of_kind(lines, kind, attr): + for line in lines: + glr = GpgListResult(line) + if glr.kind == kind: + return getattr(glr, attr) + + +def gpg_get_all_attrs_of_kind(lines, kind, attr): + result = [] + for line in lines: + glr = GpgListResult(line) + if glr.kind == kind: + result.append(getattr(glr, attr)) + return result + + +class PacmanKey(object): + def __init__(self, module): + self.module = module + # obtain binary paths for gpg & pacman-key + self.gpg_binary = module.get_bin_path('gpg', required=True) + self.pacman_key_binary = module.get_bin_path('pacman-key', required=True) + + # obtain module parameters + keyid = module.params['id'] + url = module.params['url'] + data = module.params['data'] + file = module.params['file'] + keyserver = module.params['keyserver'] + verify = module.params['verify'] + force_update = module.params['force_update'] + keyring = module.params['keyring'] + state = module.params['state'] + ensure_trusted = module.params['ensure_trusted'] + self.keylength = 40 + + # sanitise key ID & check if key exists in the keyring + keyid = self.sanitise_keyid(keyid) + key_validity = self.key_validity(keyring, keyid) + key_present = len(key_validity) > 0 + key_valid = any(key_validity) + + # check mode + if module.check_mode: + if state == 'present': + changed = (key_present and force_update) or not key_present + if not changed and ensure_trusted: + changed = not (key_valid and self.key_is_trusted(keyring, keyid)) + module.exit_json(changed=changed) + if state == 'absent': + module.exit_json(changed=key_present) + + if state == 'present': + trusted = key_valid and self.key_is_trusted(keyring, keyid) + if not force_update and key_present and (not ensure_trusted or trusted): + module.exit_json(changed=False) + changed = False + if data: + file = self.save_key(data) + self.add_key(keyring, file, keyid, verify) + changed = True + elif file: + self.add_key(keyring, file, keyid, verify) + changed = True + elif url: + data = self.fetch_key(url) + file = self.save_key(data) + self.add_key(keyring, file, keyid, verify) + changed = True + elif keyserver: + self.recv_key(keyring, keyid, keyserver) + changed = True + if changed or (ensure_trusted and not trusted): + self.lsign_key(keyring=keyring, keyid=keyid) + changed = True + module.exit_json(changed=changed) + elif state == 'absent': + if key_present: + self.remove_key(keyring, keyid) + module.exit_json(changed=True) + module.exit_json(changed=False) + + def gpg(self, args, keyring=None, **kwargs): + cmd = [self.gpg_binary] + if keyring: + cmd.append('--homedir={keyring}'.format(keyring=keyring)) + cmd.extend(['--no-permission-warning', '--with-colons', '--quiet', '--batch', '--no-tty']) + return self.module.run_command(cmd + args, **kwargs) + + def pacman_key(self, args, keyring, **kwargs): + return self.module.run_command( + [self.pacman_key_binary, '--gpgdir', keyring] + args, + **kwargs) + + def pacman_machine_key(self, keyring): + unused_rc, stdout, unused_stderr = self.gpg(['--list-secret-key'], keyring=keyring) + return gpg_get_first_attr_of_kind(stdout.splitlines(), 'sec', 'key') + + def is_hexadecimal(self, string): + """Check if a given string is valid hexadecimal""" + try: + int(string, 16) + except ValueError: + return False + return True + + def sanitise_keyid(self, keyid): + """Sanitise given key ID. + + Strips whitespace, uppercases all characters, and strips leading `0X`. + """ + sanitised_keyid = keyid.strip().upper().replace(' ', '').replace('0X', '') + if len(sanitised_keyid) != self.keylength: + self.module.fail_json(msg="key ID is not full-length: %s" % sanitised_keyid) + if not self.is_hexadecimal(sanitised_keyid): + self.module.fail_json(msg="key ID is not hexadecimal: %s" % sanitised_keyid) + return sanitised_keyid + + def fetch_key(self, url): + """Downloads a key from url""" + response, info = fetch_url(self.module, url) + if info['status'] != 200: + self.module.fail_json(msg="failed to fetch key at %s, error was %s" % (url, info['msg'])) + return to_native(response.read()) + + def recv_key(self, keyring, keyid, keyserver): + """Receives key via keyserver""" + self.pacman_key(['--keyserver', keyserver, '--recv-keys', keyid], keyring=keyring, check_rc=True) + + def lsign_key(self, keyring, keyid): + """Locally sign key""" + self.pacman_key(['--lsign-key', keyid], keyring=keyring, check_rc=True) + + def save_key(self, data): + "Saves key data to a temporary file" + tmpfd, tmpname = tempfile.mkstemp() + self.module.add_cleanup_file(tmpname) + tmpfile = os.fdopen(tmpfd, "w") + tmpfile.write(data) + tmpfile.close() + return tmpname + + def add_key(self, keyring, keyfile, keyid, verify): + """Add key to pacman's keyring""" + if verify: + self.verify_keyfile(keyfile, keyid) + self.pacman_key(['--add', keyfile], keyring=keyring, check_rc=True) + + def remove_key(self, keyring, keyid): + """Remove key from pacman's keyring""" + self.pacman_key(['--delete', keyid], keyring=keyring, check_rc=True) + + def verify_keyfile(self, keyfile, keyid): + """Verify that keyfile matches the specified key ID""" + if keyfile is None: + self.module.fail_json(msg="expected a key, got none") + elif keyid is None: + self.module.fail_json(msg="expected a key ID, got none") + + rc, stdout, stderr = self.gpg( + ['--with-fingerprint', '--show-keys', keyfile], + check_rc=True, + ) + + extracted_keyid = gpg_get_first_attr_of_kind(stdout.splitlines(), 'fpr', 'user_id') + if extracted_keyid != keyid: + self.module.fail_json(msg="key ID does not match. expected %s, got %s" % (keyid, extracted_keyid)) + + def key_validity(self, keyring, keyid): + "Check if the key ID is in pacman's keyring and not expired" + rc, stdout, stderr = self.gpg(['--no-default-keyring', '--list-keys', keyid], keyring=keyring, check_rc=False) + if rc != 0: + if stderr.find("No public key") >= 0: + return [] + else: + self.module.fail_json(msg="gpg returned an error: %s" % stderr) + return gpg_get_all_attrs_of_kind(stdout.splitlines(), 'uid', 'is_fully_valid') + + def key_is_trusted(self, keyring, keyid): + """Check if key is signed and not expired.""" + unused_rc, stdout, unused_stderr = self.gpg(['--check-signatures', keyid], keyring=keyring) + return self.pacman_machine_key(keyring) in gpg_get_all_attrs_of_kind(stdout.splitlines(), 'sig', 'key') + + +def main(): + module = AnsibleModule( + argument_spec=dict( + id=dict(type='str', required=True), + data=dict(type='str'), + file=dict(type='path'), + url=dict(type='str'), + keyserver=dict(type='str'), + verify=dict(type='bool', default=True), + force_update=dict(type='bool', default=False), + keyring=dict(type='path', default='/etc/pacman.d/gnupg'), + ensure_trusted=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + mutually_exclusive=(('data', 'file', 'url', 'keyserver'),), + required_if=[('state', 'present', ('data', 'file', 'url', 'keyserver'), True)], + ) + PacmanKey(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pagerduty.py b/plugins/modules/pagerduty.py deleted file mode 120000 index 0cf046e652..0000000000 --- a/plugins/modules/pagerduty.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/pagerduty.py \ No newline at end of file diff --git a/plugins/modules/pagerduty.py b/plugins/modules/pagerduty.py new file mode 100644 index 0000000000..2219d87928 --- /dev/null +++ b/plugins/modules/pagerduty.py @@ -0,0 +1,289 @@ +#!/usr/bin/python +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pagerduty +short_description: Create PagerDuty maintenance windows +description: + - This module lets you create PagerDuty maintenance windows. +author: + - "Andrew Newdigate (@suprememoocow)" + - "Dylan Silva (@thaumos)" + - "Justin Johns (!UNKNOWN)" + - "Bruce Pennypacker (@bpennypacker)" +requirements: + - PagerDuty API access +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - Create a maintenance window or get a list of ongoing windows. + required: true + choices: ["running", "started", "ongoing", "absent"] + name: + type: str + description: + - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. + user: + type: str + description: + - PagerDuty user ID. Obsolete. Please, use O(token) for authorization. + token: + type: str + description: + - A pagerduty token, generated on the pagerduty site. It is used for authorization. + required: true + requester_id: + type: str + description: + - ID of user making the request. Only needed when creating a maintenance_window. + service: + type: list + elements: str + description: + - A comma separated list of PagerDuty service IDs. + aliases: [services] + window_id: + type: str + description: + - ID of maintenance window. Only needed when absent a maintenance_window. + hours: + type: str + description: + - Length of maintenance window in hours. + default: '1' + minutes: + type: str + description: + - Maintenance window in minutes (this is added to the hours). + default: '0' + desc: + type: str + description: + - Short description of maintenance window. + default: Created by Ansible + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + type: bool + default: true +""" + +EXAMPLES = r""" +- name: List ongoing maintenance windows using a token + community.general.pagerduty: + name: companyabc + token: xxxxxxxxxxxxxx + state: ongoing + +- name: Create a 1 hour maintenance window for service FOO123 + community.general.pagerduty: + name: companyabc + user: example@example.com + token: yourtoken + state: running + service: FOO123 + +- name: Create a 5 minute maintenance window for service FOO123 + community.general.pagerduty: + name: companyabc + token: xxxxxxxxxxxxxx + hours: 0 + minutes: 5 + state: running + service: FOO123 + + +- name: Create a 4 hour maintenance window for service FOO123 with the description "deployment" + community.general.pagerduty: + name: companyabc + user: example@example.com + state: running + service: FOO123 + hours: 4 + desc: deployment + register: pd_window + +- name: Delete the previous maintenance window + community.general.pagerduty: + name: companyabc + user: example@example.com + state: absent + window_id: '{{ pd_window.result.maintenance_window.id }}' + +# Delete a maintenance window from a separate playbook than its creation, +# and if it is the only existing maintenance window +- name: Check + community.general.pagerduty: + requester_id: XXXXXXX + token: yourtoken + state: ongoing + register: pd_window + +- name: Delete + community.general.pagerduty: + requester_id: XXXXXXX + token: yourtoken + state: absent + window_id: "{{ pd_window.result.maintenance_windows[0].id }}" +""" + +import datetime +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + + +class PagerDutyRequest(object): + def __init__(self, module, name, user, token): + self.module = module + self.name = name + self.user = user + self.token = token + self.headers = { + 'Content-Type': 'application/json', + "Authorization": self._auth_header(), + 'Accept': 'application/vnd.pagerduty+json;version=2' + } + + def ongoing(self, http_call=fetch_url): + url = "https://api.pagerduty.com/maintenance_windows?filter=ongoing" + headers = dict(self.headers) + + response, info = http_call(self.module, url, headers=headers) + if info['status'] != 200: + self.module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg']) + + json_out = self._read_response(response) + + return False, json_out, False + + def create(self, requester_id, service, hours, minutes, desc, http_call=fetch_url): + if not requester_id: + self.module.fail_json(msg="requester_id is required when maintenance window should be created") + + url = 'https://api.pagerduty.com/maintenance_windows' + + headers = dict(self.headers) + headers.update({'From': requester_id}) + + start, end = self._compute_start_end_time(hours, minutes) + services = self._create_services_payload(service) + + request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'services': services}} + + data = json.dumps(request_data) + response, info = http_call(self.module, url, data=data, headers=headers, method='POST') + if info['status'] != 201: + self.module.fail_json(msg="failed to create the window: %s" % info['msg']) + + json_out = self._read_response(response) + + return False, json_out, True + + def _create_services_payload(self, service): + if isinstance(service, list): + return [{'id': s, 'type': 'service_reference'} for s in service] + else: + return [{'id': service, 'type': 'service_reference'}] + + def _compute_start_end_time(self, hours, minutes): + now_t = now() + later = now_t + datetime.timedelta(hours=int(hours), minutes=int(minutes)) + start = now_t.strftime("%Y-%m-%dT%H:%M:%SZ") + end = later.strftime("%Y-%m-%dT%H:%M:%SZ") + return start, end + + def absent(self, window_id, http_call=fetch_url): + url = "https://api.pagerduty.com/maintenance_windows/" + window_id + headers = dict(self.headers) + + response, info = http_call(self.module, url, headers=headers, method='DELETE') + if info['status'] != 204: + self.module.fail_json(msg="failed to delete the window: %s" % info['msg']) + + json_out = self._read_response(response) + + return False, json_out, True + + def _auth_header(self): + return "Token token=%s" % self.token + + def _read_response(self, response): + try: + return json.loads(response.read()) + except Exception: + return "" + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']), + name=dict(), + user=dict(), + token=dict(required=True, no_log=True), + service=dict(type='list', elements='str', aliases=["services"]), + window_id=dict(), + requester_id=dict(), + hours=dict(default='1'), # @TODO change to int? + minutes=dict(default='0'), # @TODO change to int? + desc=dict(default='Created by Ansible'), + validate_certs=dict(default=True, type='bool'), + ) + ) + + state = module.params['state'] + name = module.params['name'] + user = module.params['user'] + service = module.params['service'] + window_id = module.params['window_id'] + hours = module.params['hours'] + minutes = module.params['minutes'] + token = module.params['token'] + desc = module.params['desc'] + requester_id = module.params['requester_id'] + + pd = PagerDutyRequest(module, name, user, token) + + if state == "running" or state == "started": + if not service: + module.fail_json(msg="service not specified") + (rc, out, changed) = pd.create(requester_id, service, hours, minutes, desc) + if rc == 0: + changed = True + + if state == "ongoing": + (rc, out, changed) = pd.ongoing() + + if state == "absent": + (rc, out, changed) = pd.absent(window_id) + + if rc != 0: + module.fail_json(msg="failed", result=out) + + module.exit_json(msg="success", result=out, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pagerduty_alert.py b/plugins/modules/pagerduty_alert.py deleted file mode 120000 index b8d516074b..0000000000 --- a/plugins/modules/pagerduty_alert.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/pagerduty_alert.py \ No newline at end of file diff --git a/plugins/modules/pagerduty_alert.py b/plugins/modules/pagerduty_alert.py new file mode 100644 index 0000000000..215ad2d821 --- /dev/null +++ b/plugins/modules/pagerduty_alert.py @@ -0,0 +1,416 @@ +#!/usr/bin/python +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pagerduty_alert +short_description: Trigger, acknowledge or resolve PagerDuty incidents +description: + - This module lets you trigger, acknowledge or resolve a PagerDuty incident by sending events. +author: + - "Amanpreet Singh (@ApsOps)" + - "Xiao Shen (@xshen1)" +requirements: + - PagerDuty API access +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. + api_key: + type: str + description: + - The pagerduty API key (readonly access), generated on the pagerduty site. + - Required if O(api_version=v1). + integration_key: + type: str + description: + - The GUID of one of your 'Generic API' services. + - This is the 'integration key' listed on a 'Integrations' tab of PagerDuty service. + service_id: + type: str + description: + - ID of PagerDuty service when incidents are triggered, acknowledged or resolved. + - Required if O(api_version=v1). + service_key: + type: str + description: + - The GUID of one of your 'Generic API' services. Obsolete. Please use O(integration_key). + state: + type: str + description: + - Type of event to be sent. + required: true + choices: + - 'triggered' + - 'acknowledged' + - 'resolved' + api_version: + type: str + description: + - The API version we want to use to run the module. + - V1 is more limited with option we can provide to trigger incident. + - V2 has more variables for example, O(severity), O(source), O(custom_details) and so on. + default: 'v1' + choices: + - 'v1' + - 'v2' + version_added: 7.4.0 + client: + type: str + description: + - The name of the monitoring client that is triggering this event. + client_url: + type: str + description: + - The URL of the monitoring client that is triggering this event. + component: + type: str + description: + - Component of the source machine that is responsible for the event, for example C(mysql) or C(eth0). + version_added: 7.4.0 + custom_details: + type: dict + description: + - Additional details about the event and affected system. + - A dictionary with custom keys and values. + version_added: 7.4.0 + desc: + type: str + description: + - For O(state=triggered) - Required. Short description of the problem that led to this trigger. This field (or a truncated + version) is used when generating phone calls, SMS messages and alert emails. It also appears on the incidents tables + in the PagerDuty UI. The maximum length is 1024 characters. + - For O(state=acknowledged) or O(state=resolved) - Text that appears in the incident's log associated with this event. + default: Created via Ansible + incident_class: + type: str + description: + - The class/type of the event, for example C(ping failure) or C(cpu load). + version_added: 7.4.0 + incident_key: + type: str + description: + - Identifies the incident to which this O(state) should be applied. + - For O(state=triggered) - If there is no open (in other words unresolved) incident with this key, a new one is created. + If there is already an open incident with a matching key, this event is appended to that incident's log. The event + key provides an easy way to 'de-dup' problem reports. If no O(incident_key) is provided, then it is generated by PagerDuty. + - For O(state=acknowledged) or O(state=resolved) - This should be the incident_key you received back when the incident + was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents is discarded. + link_url: + type: str + description: + - Relevant link URL to the alert. For example, the website or the job link. + version_added: 7.4.0 + link_text: + type: str + description: + - A short description of the O(link_url). + version_added: 7.4.0 + source: + type: str + description: + - The unique location of the affected system, preferably a hostname or FQDN. + - Required in case of O(state=trigger) and O(api_version=v2). + version_added: 7.4.0 + severity: + type: str + description: + - The perceived severity of the status the event is describing with respect to the affected system. + - Required in case of O(state=trigger) and O(api_version=v2). + default: 'critical' + choices: + - 'critical' + - 'warning' + - 'error' + - 'info' + version_added: 7.4.0 +""" + +EXAMPLES = r""" +- name: Trigger an incident with just the basic options + community.general.pagerduty_alert: + name: companyabc + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: triggered + desc: problem that led to this trigger + +- name: Trigger an incident with more options + community.general.pagerduty_alert: + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: triggered + desc: problem that led to this trigger + incident_key: somekey + client: Sample Monitoring Service + client_url: http://service.example.com + +- name: Acknowledge an incident based on incident_key + community.general.pagerduty_alert: + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: acknowledged + incident_key: somekey + desc: "some text for incident's log" + +- name: Resolve an incident based on incident_key + community.general.pagerduty_alert: + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: resolved + incident_key: somekey + desc: "some text for incident's log" + +- name: Trigger an v2 incident with just the basic options + community.general.pagerduty_alert: + integration_key: xxx + api_version: v2 + source: My Ansible Script + state: triggered + desc: problem that led to this trigger + +- name: Trigger an v2 incident with more options + community.general.pagerduty_alert: + integration_key: xxx + api_version: v2 + source: My Ansible Script + state: triggered + desc: problem that led to this trigger + incident_key: somekey + client: Sample Monitoring Service + client_url: http://service.example.com + component: mysql + incident_class: ping failure + link_url: https://pagerduty.com + link_text: PagerDuty + +- name: Acknowledge an incident based on incident_key using v2 + community.general.pagerduty_alert: + api_version: v2 + integration_key: xxx + incident_key: somekey + state: acknowledged + +- name: Resolve an incident based on incident_key + community.general.pagerduty_alert: + api_version: v2 + integration_key: xxx + incident_key: somekey + state: resolved +""" +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from datetime import datetime +from urllib.parse import urlparse, urlencode, urlunparse + + +def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url): + url = 'https://api.pagerduty.com/incidents' + headers = { + "Content-type": "application/json", + "Authorization": "Token token=%s" % api_key, + 'Accept': 'application/vnd.pagerduty+json;version=2' + } + + params = { + 'service_ids[]': service_id, + 'sort_by': 'incident_number:desc', + 'time_zone': 'UTC' + } + if incident_key: + params['incident_key'] = incident_key + + url_parts = list(urlparse(url)) + url_parts[4] = urlencode(params, True) + + url = urlunparse(url_parts) + + response, info = http_call(module, url, method='get', headers=headers) + + if info['status'] != 200: + module.fail_json(msg="failed to check current incident status." + "Reason: %s" % info['msg']) + + incidents = json.loads(response.read())["incidents"] + msg = "No corresponding incident" + + if len(incidents) == 0: + if state in ('acknowledged', 'resolved'): + return msg, False + return msg, True + elif state != incidents[0]["status"]: + return incidents[0], True + + return incidents[0], False + + +def send_event_v1(module, service_key, event_type, desc, + incident_key=None, client=None, client_url=None): + url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" + headers = { + "Content-type": "application/json" + } + + data = { + "service_key": service_key, + "event_type": event_type, + "incident_key": incident_key, + "description": desc, + "client": client, + "client_url": client_url + } + + response, info = fetch_url(module, url, method='post', + headers=headers, data=json.dumps(data)) + if info['status'] != 200: + module.fail_json(msg="failed to %s. Reason: %s" % + (event_type, info['msg'])) + json_out = json.loads(response.read()) + return json_out + + +def send_event_v2(module, service_key, event_type, payload, link, + incident_key=None, client=None, client_url=None): + url = "https://events.pagerduty.com/v2/enqueue" + headers = { + "Content-type": "application/json" + } + data = { + "routing_key": service_key, + "event_action": event_type, + "payload": payload, + "client": client, + "client_url": client_url, + } + if link: + data["links"] = [link] + if incident_key: + data["dedup_key"] = incident_key + if event_type != "trigger": + data.pop("payload") + response, info = fetch_url(module, url, method="post", + headers=headers, data=json.dumps(data)) + if info["status"] != 202: + module.fail_json(msg="failed to %s. Reason: %s" % + (event_type, info['msg'])) + json_out = json.loads(response.read()) + return json_out, True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(), + api_key=dict(no_log=True), + integration_key=dict(no_log=True), + service_id=dict(), + service_key=dict(no_log=True), + state=dict( + required=True, choices=['triggered', 'acknowledged', 'resolved'] + ), + api_version=dict(type='str', default='v1', choices=['v1', 'v2']), + client=dict(), + client_url=dict(), + component=dict(), + custom_details=dict(type='dict'), + desc=dict(default='Created via Ansible'), + incident_class=dict(), + incident_key=dict(no_log=False), + link_url=dict(), + link_text=dict(), + source=dict(), + severity=dict( + default='critical', choices=['critical', 'warning', 'error', 'info'] + ), + ), + required_if=[ + ('api_version', 'v1', ['service_id', 'api_key']), + ('state', 'acknowledged', ['incident_key']), + ('state', 'resolved', ['incident_key']), + ], + required_one_of=[('service_key', 'integration_key')], + supports_check_mode=True, + ) + + name = module.params['name'] + service_id = module.params.get('service_id') + integration_key = module.params.get('integration_key') + service_key = module.params.get('service_key') + api_key = module.params.get('api_key') + state = module.params.get('state') + client = module.params.get('client') + client_url = module.params.get('client_url') + desc = module.params.get('desc') + incident_key = module.params.get('incident_key') + payload = { + 'summary': desc, + 'source': module.params.get('source'), + 'timestamp': datetime.now().isoformat(), + 'severity': module.params.get('severity'), + 'component': module.params.get('component'), + 'class': module.params.get('incident_class'), + 'custom_details': module.params.get('custom_details'), + } + link = {} + if module.params.get('link_url'): + link['href'] = module.params.get('link_url') + if module.params.get('link_text'): + link['text'] = module.params.get('link_text') + if integration_key is None: + integration_key = service_key + module.warn( + '"service_key" is obsolete parameter and will be removed.' + ' Please, use "integration_key" instead' + ) + + state_event_dict = { + 'triggered': 'trigger', + 'acknowledged': 'acknowledge', + 'resolved': 'resolve', + } + + event_type = state_event_dict[state] + if module.params.get('api_version') == 'v1': + out, changed = check(module, name, state, service_id, + integration_key, api_key, incident_key) + if not module.check_mode and changed is True: + out = send_event_v1(module, integration_key, event_type, desc, + incident_key, client, client_url) + else: + changed = True + if event_type == 'trigger' and not payload['source']: + module.fail_json(msg='"service" is a required variable for v2 api endpoint.') + out, changed = send_event_v2( + module, + integration_key, + event_type, + payload, + link, + incident_key, + client, + client_url, + ) + + module.exit_json(result=out, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pagerduty_change.py b/plugins/modules/pagerduty_change.py deleted file mode 120000 index 3c1ee64719..0000000000 --- a/plugins/modules/pagerduty_change.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/pagerduty_change.py \ No newline at end of file diff --git a/plugins/modules/pagerduty_change.py b/plugins/modules/pagerduty_change.py new file mode 100644 index 0000000000..2cd33a0da8 --- /dev/null +++ b/plugins/modules/pagerduty_change.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# Copyright (c) 2020, Adam Vaughan (@adamvaughan) avaughan@pagerduty.com +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pagerduty_change +short_description: Track a code or infrastructure change as a PagerDuty change event +version_added: 1.3.0 +description: + - This module lets you create a PagerDuty change event each time the module is run. + - This is not an idempotent action and a new change event is created each time it is run. +author: + - Adam Vaughan (@adamvaughan) +requirements: + - PagerDuty integration key +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + details: + - Check mode simply does nothing except returning C(changed=true) in case the O(url) seems to be correct. + diff_mode: + support: none +options: + integration_key: + description: + - The integration key that identifies the service the change was made to. This can be found by adding an integration + to a service in PagerDuty. + required: true + type: str + summary: + description: + - A short description of the change that occurred. + required: true + type: str + source: + description: + - The source of the change event. + default: Ansible + type: str + user: + description: + - The name of the user or process that triggered this deployment. + type: str + repo: + description: + - The URL of the project repository. + required: false + type: str + revision: + description: + - An identifier of the revision being deployed, typically a number or SHA from a version control system. + required: false + type: str + environment: + description: + - The environment name, typically V(production), V(staging), and so on. + required: false + type: str + link_url: + description: + - A URL where more information about the deployment can be obtained. + required: false + type: str + link_text: + description: + - Descriptive text for a URL where more information about the deployment can be obtained. + required: false + type: str + url: + description: + - URL to submit the change event to. + required: false + default: https://events.pagerduty.com/v2/change/enqueue + type: str + validate_certs: + description: + - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled + sites using self-signed certificates. + required: false + default: true + type: bool +""" + +EXAMPLES = r""" +- name: Track the deployment as a PagerDuty change event + community.general.pagerduty_change: + integration_key: abc123abc123abc123abc123abc123ab + summary: The application was deployed + +- name: Track the deployment as a PagerDuty change event with more details + community.general.pagerduty_change: + integration_key: abc123abc123abc123abc123abc123ab + summary: The application was deployed + source: Ansible Deploy + user: ansible + repo: github.com/ansible/ansible + revision: '4.2' + environment: production + link_url: https://github.com/ansible-collections/community.general/pull/1269 + link_text: View changes on GitHub +""" + +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + integration_key=dict(required=True, type='str', no_log=True), + summary=dict(required=True, type='str'), + source=dict(default='Ansible', type='str'), + user=dict(type='str'), + repo=dict(type='str'), + revision=dict(type='str'), + environment=dict(type='str'), + link_url=dict(type='str'), + link_text=dict(type='str'), + url=dict(default='https://events.pagerduty.com/v2/change/enqueue', type='str'), + validate_certs=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + # API documented at https://developer.pagerduty.com/docs/events-api-v2/send-change-events/ + + url = module.params['url'] + headers = {'Content-Type': 'application/json'} + + if module.check_mode: + _response, info = fetch_url( + module, url, headers=headers, method='POST') + + if info['status'] == 400: + module.exit_json(changed=True) + else: + module.fail_json( + msg='Checking the PagerDuty change event API returned an unexpected response: %d' % (info['status'])) + + custom_details = {} + + if module.params['user']: + custom_details['user'] = module.params['user'] + + if module.params['repo']: + custom_details['repo'] = module.params['repo'] + + if module.params['revision']: + custom_details['revision'] = module.params['revision'] + + if module.params['environment']: + custom_details['environment'] = module.params['environment'] + + timestamp = now().strftime("%Y-%m-%dT%H:%M:%S.%fZ") + + payload = { + 'summary': module.params['summary'], + 'source': module.params['source'], + 'timestamp': timestamp, + 'custom_details': custom_details + } + + event = { + 'routing_key': module.params['integration_key'], + 'payload': payload + } + + if module.params['link_url']: + link = { + 'href': module.params['link_url'] + } + + if module.params['link_text']: + link['text'] = module.params['link_text'] + + event['links'] = [link] + + _response, info = fetch_url( + module, url, data=module.jsonify(event), headers=headers, method='POST') + + if info['status'] == 202: + module.exit_json(changed=True) + else: + module.fail_json( + msg='Creating PagerDuty change event failed with %d' % (info['status'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pagerduty_user.py b/plugins/modules/pagerduty_user.py deleted file mode 120000 index bcf41f3fde..0000000000 --- a/plugins/modules/pagerduty_user.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/pagerduty_user.py \ No newline at end of file diff --git a/plugins/modules/pagerduty_user.py b/plugins/modules/pagerduty_user.py new file mode 100644 index 0000000000..7e000f1e8f --- /dev/null +++ b/plugins/modules/pagerduty_user.py @@ -0,0 +1,251 @@ +#!/usr/bin/python + +# Copyright (c) 2020, Zainab Alsaffar +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pagerduty_user +short_description: Manage a user account on PagerDuty +description: + - This module manages the creation/removal of a user account on PagerDuty. +version_added: '1.3.0' +author: Zainab Alsaffar (@zanssa) +requirements: + - pdpyras python module = 4.1.1 + - PagerDuty API Access +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + access_token: + description: + - An API access token to authenticate with the PagerDuty REST API. + required: true + type: str + pd_user: + description: + - Name of the user in PagerDuty. + required: true + type: str + pd_email: + description: + - The user's email address. + - O(pd_email) is the unique identifier used and cannot be updated using this module. + required: true + type: str + pd_role: + description: + - The user's role. + choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access'] + default: 'responder' + type: str + state: + description: + - State of the user. + - On V(present), it creates a user if the user does not exist. + - On V(absent), it removes a user if the account exists. + choices: ['present', 'absent'] + default: 'present' + type: str + pd_teams: + description: + - The teams to which the user belongs. + - Required if O(state=present). + type: list + elements: str +""" + +EXAMPLES = r""" +- name: Create a user account on PagerDuty + community.general.pagerduty_user: + access_token: 'Your_Access_token' + pd_user: user_full_name + pd_email: user_email + pd_role: user_pd_role + pd_teams: user_pd_teams + state: "present" + +- name: Remove a user account from PagerDuty + community.general.pagerduty_user: + access_token: 'Your_Access_token' + pd_user: user_full_name + pd_email: user_email + state: "absent" +""" + +RETURN = r""" # """ + +from os import path +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils import deps + +with deps.declare("pdpyras", url="https://github.com/PagerDuty/pdpyras"): + from pdpyras import APISession, PDClientError + + +class PagerDutyUser(object): + def __init__(self, module, session): + self._module = module + self._apisession = session + + # check if the user exists + def does_user_exist(self, pd_email): + for user in self._apisession.iter_all('users'): + if user['email'] == pd_email: + return user['id'] + + # create a user account on PD + def add_pd_user(self, pd_name, pd_email, pd_role): + try: + user = self._apisession.persist('users', 'email', { + "name": pd_name, + "email": pd_email, + "type": "user", + "role": pd_role, + }) + return user + + except PDClientError as e: + if e.response.status_code == 400: + self._module.fail_json( + msg="Failed to add %s due to invalid argument" % (pd_name)) + if e.response.status_code == 401: + self._module.fail_json(msg="Failed to add %s due to invalid API key" % (pd_name)) + if e.response.status_code == 402: + self._module.fail_json( + msg="Failed to add %s due to inability to perform the action within the API token" % (pd_name)) + if e.response.status_code == 403: + self._module.fail_json( + msg="Failed to add %s due to inability to review the requested resource within the API token" % (pd_name)) + if e.response.status_code == 429: + self._module.fail_json( + msg="Failed to add %s due to reaching the limit of making requests" % (pd_name)) + + # delete a user account from PD + def delete_user(self, pd_user_id, pd_name): + try: + user_path = path.join('/users/', pd_user_id) + self._apisession.rdelete(user_path) + + except PDClientError as e: + if e.response.status_code == 404: + self._module.fail_json( + msg="Failed to remove %s as user was not found" % (pd_name)) + if e.response.status_code == 403: + self._module.fail_json( + msg="Failed to remove %s due to inability to review the requested resource within the API token" % (pd_name)) + if e.response.status_code == 401: + # print out the list of incidents + pd_incidents = self.get_incidents_assigned_to_user(pd_user_id) + self._module.fail_json(msg="Failed to remove %s as user has assigned incidents %s" % (pd_name, pd_incidents)) + if e.response.status_code == 429: + self._module.fail_json( + msg="Failed to remove %s due to reaching the limit of making requests" % (pd_name)) + + # get incidents assigned to a user + def get_incidents_assigned_to_user(self, pd_user_id): + incident_info = {} + incidents = self._apisession.list_all('incidents', params={'user_ids[]': [pd_user_id]}) + + for incident in incidents: + incident_info = { + 'title': incident['title'], + 'key': incident['incident_key'], + 'status': incident['status'] + } + return incident_info + + # add a user to a team/teams + def add_user_to_teams(self, pd_user_id, pd_teams, pd_role): + updated_team = None + for team in pd_teams: + team_info = self._apisession.find('teams', team, attribute='name') + if team_info is not None: + try: + updated_team = self._apisession.rput('/teams/' + team_info['id'] + '/users/' + pd_user_id, json={ + 'role': pd_role + }) + except PDClientError: + updated_team = None + return updated_team + + +def main(): + module = AnsibleModule( + argument_spec=dict( + access_token=dict(type='str', required=True, no_log=True), + pd_user=dict(type='str', required=True), + pd_email=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + pd_role=dict(type='str', default='responder', + choices=['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']), + pd_teams=dict(type='list', elements='str')), + required_if=[['state', 'present', ['pd_teams']], ], + supports_check_mode=True, + ) + + deps.validate(module) + + access_token = module.params['access_token'] + pd_user = module.params['pd_user'] + pd_email = module.params['pd_email'] + state = module.params['state'] + pd_role = module.params['pd_role'] + pd_teams = module.params['pd_teams'] + + if pd_role: + pd_role_gui_value = { + 'global_admin': 'admin', + 'manager': 'user', + 'responder': 'limited_user', + 'observer': 'observer', + 'stakeholder': 'read_only_user', + 'limited_stakeholder': 'read_only_limited_user', + 'restricted_access': 'restricted_access' + } + pd_role = pd_role_gui_value[pd_role] + + # authenticate with PD API + try: + session = APISession(access_token) + except PDClientError as e: + module.fail_json(msg="Failed to authenticate with PagerDuty: %s" % e) + + user = PagerDutyUser(module, session) + + user_exists = user.does_user_exist(pd_email) + + if user_exists: + if state == "absent": + # remove user + if not module.check_mode: + user.delete_user(user_exists, pd_user) + module.exit_json(changed=True, result="Successfully deleted user %s" % pd_user) + else: + module.exit_json(changed=False, result="User %s already exists." % pd_user) + + # in case that the user does not exist + else: + if state == "absent": + module.exit_json(changed=False, result="User %s was not found." % pd_user) + + else: + # add user, adds user with the default notification rule and contact info (email) + if not module.check_mode: + user.add_pd_user(pd_user, pd_email, pd_role) + # get user's id + pd_user_id = user.does_user_exist(pd_email) + # add a user to the team/s + user.add_user_to_teams(pd_user_id, pd_teams, pd_role) + module.exit_json(changed=True, result="Successfully created & added user %s to team %s" % (pd_user, pd_teams)) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/pam_limits.py b/plugins/modules/pam_limits.py deleted file mode 120000 index 9e03a5467c..0000000000 --- a/plugins/modules/pam_limits.py +++ /dev/null @@ -1 +0,0 @@ -./system/pam_limits.py \ No newline at end of file diff --git a/plugins/modules/pam_limits.py b/plugins/modules/pam_limits.py new file mode 100644 index 0000000000..33a4eb7909 --- /dev/null +++ b/plugins/modules/pam_limits.py @@ -0,0 +1,359 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Sebastien Rohaut +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pam_limits +author: + - "Sebastien Rohaut (@usawa)" +short_description: Modify Linux PAM limits +description: + - The M(community.general.pam_limits) module modifies PAM limits. + - The default file is V(/etc/security/limits.conf). + - For the full documentation, see C(man 5 limits.conf). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + version_added: 2.0.0 + diff_mode: + support: full + version_added: 2.0.0 +options: + domain: + type: str + description: + - A username, @groupname, wildcard, UID/GID range. + required: true + limit_type: + type: str + description: + - Limit type, see C(man 5 limits.conf) for an explanation. + required: true + choices: ["hard", "soft", "-"] + limit_item: + type: str + description: + - The limit to be set. + required: true + choices: + - "core" + - "data" + - "fsize" + - "memlock" + - "nofile" + - "rss" + - "stack" + - "cpu" + - "nproc" + - "as" + - "maxlogins" + - "maxsyslogins" + - "priority" + - "locks" + - "sigpending" + - "msgqueue" + - "nice" + - "rtprio" + - "chroot" + value: + type: str + description: + - The value of the limit. + - Value must either be V(unlimited), V(infinity) or V(-1), all of which indicate no limit, or a limit of 0 or larger. + - Value must be a number in the range -20 to 19 inclusive, if O(limit_item) is set to V(nice) or V(priority). + - Refer to the C(man 5 limits.conf) manual pages for more details. + required: true + backup: + description: + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. + required: false + type: bool + default: false + use_min: + description: + - If set to V(true), the minimal value is used or conserved. + - If the specified value is inferior to the value in the file, file content is replaced with the new value, else content + is not modified. + required: false + type: bool + default: false + use_max: + description: + - If set to V(true), the maximal value is used or conserved. + - If the specified value is superior to the value in the file, file content is replaced with the new value, else content + is not modified. + required: false + type: bool + default: false + dest: + type: str + description: + - Modify the limits.conf path. + required: false + default: "/etc/security/limits.conf" + comment: + type: str + description: + - Comment associated with the limit. + required: false + default: '' +notes: + - If O(dest) file does not exist, it is created. +""" + +EXAMPLES = r""" +- name: Add or modify nofile soft limit for the user joe + community.general.pam_limits: + domain: joe + limit_type: soft + limit_item: nofile + value: 64000 + +- name: Add or modify fsize hard limit for the user smith. Keep or set the maximal value + community.general.pam_limits: + domain: smith + limit_type: hard + limit_item: fsize + value: 1000000 + use_max: true + +- name: Add or modify memlock, both soft and hard, limit for the user james with a comment + community.general.pam_limits: + domain: james + limit_type: '-' + limit_item: memlock + value: unlimited + comment: unlimited memory lock for james + +- name: Add or modify hard nofile limits for wildcard domain + community.general.pam_limits: + domain: '*' + limit_type: hard + limit_item: nofile + value: 39693561 +""" + +import os +import re +import tempfile + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def _assert_is_valid_value(module, item, value, prefix=''): + if item in ['nice', 'priority']: + try: + valid = -20 <= int(value) <= 19 + except ValueError: + valid = False + if not valid: + module.fail_json(msg="%s Value of %r for item %r is invalid. Value must be a number in the range -20 to 19 inclusive. " + "Refer to the limits.conf(5) manual pages for more details." % (prefix, value, item)) + elif not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()): + module.fail_json(msg="%s Value of %r for item %r is invalid. Value must either be 'unlimited', 'infinity' or -1, all of " + "which indicate no limit, or a limit of 0 or larger. Refer to the limits.conf(5) manual pages for " + "more details." % (prefix, value, item)) + + +def main(): + pam_items = ['core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks', + 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot'] + + pam_types = ['soft', 'hard', '-'] + + limits_conf = '/etc/security/limits.conf' + + module = AnsibleModule( + argument_spec=dict( + domain=dict(required=True, type='str'), + limit_type=dict(required=True, type='str', choices=pam_types), + limit_item=dict(required=True, type='str', choices=pam_items), + value=dict(required=True, type='str'), + use_max=dict(default=False, type='bool'), + use_min=dict(default=False, type='bool'), + backup=dict(default=False, type='bool'), + dest=dict(default=limits_conf, type='str'), + comment=dict(default='', type='str') + ), + supports_check_mode=True, + ) + + domain = module.params['domain'] + limit_type = module.params['limit_type'] + limit_item = module.params['limit_item'] + value = module.params['value'] + use_max = module.params['use_max'] + use_min = module.params['use_min'] + backup = module.params['backup'] + limits_conf = module.params['dest'] + new_comment = module.params['comment'] + + changed = False + does_not_exist = False + + if os.path.isfile(limits_conf): + if not os.access(limits_conf, os.W_OK): + module.fail_json(msg="%s is not writable. Use sudo" % limits_conf) + else: + limits_conf_dir = os.path.dirname(limits_conf) + if os.path.isdir(limits_conf_dir) and os.access(limits_conf_dir, os.W_OK): + does_not_exist = True + changed = True + else: + module.fail_json(msg="directory %s is not writable (check presence, access rights, use sudo)" % limits_conf_dir) + + if use_max and use_min: + module.fail_json(msg="Cannot use use_min and use_max at the same time.") + + _assert_is_valid_value(module, limit_item, value) + + # Backup + if backup: + backup_file = module.backup_local(limits_conf) + + space_pattern = re.compile(r'\s+') + + if does_not_exist: + lines = [] + else: + with open(limits_conf, 'rb') as f: + lines = list(f) + + message = '' + # Tempfile + nf = tempfile.NamedTemporaryFile(mode='w+') + + found = False + new_value = value + + for line in lines: + line = to_native(line, errors='surrogate_or_strict') + if line.startswith('#'): + nf.write(line) + continue + + newline = re.sub(space_pattern, ' ', line).strip() + if not newline: + nf.write(line) + continue + + # Remove comment in line + newline = newline.split('#', 1)[0] + try: + old_comment = line.split('#', 1)[1] + except Exception: + old_comment = '' + + newline = newline.rstrip() + + if not new_comment: + new_comment = old_comment + + line_fields = newline.split(' ') + + if len(line_fields) != 4: + nf.write(line) + continue + + line_domain = line_fields[0] + line_type = line_fields[1] + line_item = line_fields[2] + actual_value = line_fields[3] + + _assert_is_valid_value(module, line_item, actual_value, + prefix="Invalid configuration found in '%s'." % limits_conf) + + # Found the line + if line_domain == domain and line_type == limit_type and line_item == limit_item: + found = True + if value == actual_value: + message = line + nf.write(line) + continue + + if line_type not in ['nice', 'priority']: + actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1'] + value_unlimited = value in ['unlimited', 'infinity', '-1'] + else: + actual_value_unlimited = value_unlimited = False + + if use_max: + if actual_value_unlimited: + new_value = actual_value + elif value_unlimited: + new_value = value + else: + new_value = str(max(int(value), int(actual_value))) + + if use_min: + if actual_value_unlimited and value_unlimited: + new_value = actual_value + elif actual_value_unlimited: + new_value = value + elif value_unlimited: + new_value = actual_value + else: + new_value = str(min(int(value), int(actual_value))) + + # Change line only if value has changed + if new_value != actual_value: + changed = True + if new_comment: + new_comment = "\t#" + new_comment + new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n" + message = new_limit + nf.write(new_limit) + else: + message = line + nf.write(line) + else: + nf.write(line) + + if not found: + changed = True + if new_comment: + new_comment = "\t#" + new_comment + new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n" + message = new_limit + nf.write(new_limit) + + nf.flush() + + with open(nf.name, 'r') as content: + content_new = content.read() + + if not module.check_mode: + if does_not_exist: + with open(limits_conf, 'a'): + pass + + # Move tempfile to newfile + module.atomic_move(os.path.abspath(nf.name), os.path.abspath(limits_conf)) + + try: + nf.close() + except Exception: + pass + + res_args = dict( + changed=changed, + msg=message, + diff=dict(before=b''.join(lines), after=content_new), + ) + + if backup: + res_args['backup_file'] = backup_file + + module.exit_json(**res_args) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pamd.py b/plugins/modules/pamd.py deleted file mode 120000 index 82544bdf07..0000000000 --- a/plugins/modules/pamd.py +++ /dev/null @@ -1 +0,0 @@ -./system/pamd.py \ No newline at end of file diff --git a/plugins/modules/pamd.py b/plugins/modules/pamd.py new file mode 100644 index 0000000000..bf3bc40ef3 --- /dev/null +++ b/plugins/modules/pamd.py @@ -0,0 +1,838 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Kenneth D. Evensen +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pamd +author: + - Kenneth D. Evensen (@kevensen) +short_description: Manage PAM Modules +description: + - Edit PAM service's type, control, module path and module arguments. + - In order for a PAM rule to be modified, the type, control and module_path must match an existing rule. See man(5) pam.d + for details. +notes: + - This module does not handle authselect profiles. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The name generally refers to the PAM service file to change, for example system-auth. + type: str + required: true + type: + description: + - The type of the PAM rule being modified. + - The O(type), O(control), and O(module_path) options all must match a rule to be modified. + type: str + required: true + choices: [account, -account, auth, -auth, password, -password, session, -session] + control: + description: + - The control of the PAM rule being modified. + - This may be a complicated control with brackets. If this is the case, be sure to put "[bracketed controls]" in quotes. + - The O(type), O(control), and O(module_path) options all must match a rule to be modified. + type: str + required: true + module_path: + description: + - The module path of the PAM rule being modified. + - The O(type), O(control), and O(module_path) options all must match a rule to be modified. + type: str + required: true + new_type: + description: + - The new type to assign to the new rule. + type: str + choices: [account, -account, auth, -auth, password, -password, session, -session] + new_control: + description: + - The new control to assign to the new rule. + type: str + new_module_path: + description: + - The new module path to be assigned to the new rule. + type: str + module_arguments: + description: + - When O(state=updated), the O(module_arguments) replace existing module_arguments. + - When O(state=args_absent) args matching those listed in O(module_arguments) are removed. + - When O(state=args_present) any args listed in O(module_arguments) are added if missing from the existing rule. + - Furthermore, if the module argument takes a value denoted by C(=), the value changes to that specified in module_arguments. + type: list + elements: str + state: + description: + - The default of V(updated) modifies an existing rule if type, control and module_path all match an existing rule. + - With V(before), the new rule is inserted before a rule matching type, control and module_path. + - Similarly, with V(after), the new rule is inserted after an existing rulematching type, control and module_path. + - With either V(before) or V(after) O(new_type), O(new_control), and O(new_module_path) must all be specified. + - If state is V(args_absent) or V(args_present), O(new_type), O(new_control), and O(new_module_path) are ignored. + - State V(absent) removes the rule. + type: str + choices: [absent, before, after, args_absent, args_present, updated] + default: updated + path: + description: + - This is the path to the PAM service files. + type: path + default: /etc/pam.d + backup: + description: + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Update pamd rule's control in /etc/pam.d/system-auth + community.general.pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + new_control: sufficient + +- name: Update pamd rule's complex control in /etc/pam.d/system-auth + community.general.pamd: + name: system-auth + type: session + control: '[success=1 default=ignore]' + module_path: pam_succeed_if.so + new_control: '[success=2 default=ignore]' + +- name: Insert a new rule before an existing rule + community.general.pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + new_type: auth + new_control: sufficient + new_module_path: pam_faillock.so + state: before + +- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an existing rule pam_rootok.so + community.general.pamd: + name: su + type: auth + control: sufficient + module_path: pam_rootok.so + new_type: auth + new_control: required + new_module_path: pam_wheel.so + module_arguments: 'use_uid' + state: after + +- name: Remove module arguments from an existing rule + community.general.pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + module_arguments: '' + state: updated + +- name: Replace all module arguments in an existing rule + community.general.pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + module_arguments: 'preauth silent deny=3 unlock_time=604800 fail_interval=900' + state: updated + +- name: Remove specific arguments from a rule + community.general.pamd: + name: system-auth + type: session + control: '[success=1 default=ignore]' + module_path: pam_succeed_if.so + module_arguments: crond,quiet + state: args_absent + +- name: Ensure specific arguments are present in a rule + community.general.pamd: + name: system-auth + type: session + control: '[success=1 default=ignore]' + module_path: pam_succeed_if.so + module_arguments: crond,quiet + state: args_present + +- name: Ensure specific arguments are present in a rule (alternative) + community.general.pamd: + name: system-auth + type: session + control: '[success=1 default=ignore]' + module_path: pam_succeed_if.so + module_arguments: + - crond + - quiet + state: args_present + +- name: Module arguments requiring commas must be listed as a Yaml list + community.general.pamd: + name: special-module + type: account + control: required + module_path: pam_access.so + module_arguments: + - listsep=, + state: args_present + +- name: Update specific argument value in a rule + community.general.pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + module_arguments: 'fail_interval=300' + state: args_present + +- name: Add pam common-auth rule for duo + community.general.pamd: + name: common-auth + new_type: auth + new_control: '[success=1 default=ignore]' + new_module_path: '/lib64/security/pam_duo.so' + state: after + type: auth + module_path: pam_sss.so + control: 'requisite' +""" + +RETURN = r""" +change_count: + description: How many rules were changed. + type: int + sample: 1 + returned: success +backupdest: + description: + - The file name of the backup file, if created. + returned: success + type: str +""" + + +from ansible.module_utils.basic import AnsibleModule +import os +import re +from tempfile import NamedTemporaryFile +from datetime import datetime + + +RULE_REGEX = re.compile(r"""(?P-?(?:auth|account|session|password))\s+ + (?P\[.*\]|\S*)\s+ + (?P\S*)\s* + (?P.*)\s*""", re.X) +RULE_ARG_REGEX = re.compile(r"(\[.*\]|\S*)") + +VALID_TYPES = ['account', '-account', 'auth', '-auth', 'password', '-password', 'session', '-session'] + + +class PamdLine(object): + + def __init__(self, line): + self.line = line + self.prev = None + self.next = None + + @property + def is_valid(self): + if self.line.strip() == '': + return True + return False + + def validate(self): + if not self.is_valid: + return False, "Rule is not valid " + self.line + return True, "Rule is valid " + self.line + + # Method to check if a rule matches the type, control and path. + def matches(self, rule_type, rule_control, rule_path, rule_args=None): + return False + + def __str__(self): + return str(self.line) + + +class PamdEmptyLine(PamdLine): + pass + + +class PamdComment(PamdLine): + + def __init__(self, line): + super(PamdComment, self).__init__(line) + + @property + def is_valid(self): + if self.line.startswith('#'): + return True + return False + + +class PamdInclude(PamdLine): + def __init__(self, line): + super(PamdInclude, self).__init__(line) + + @property + def is_valid(self): + if self.line.startswith('@include'): + return True + return False + + +class PamdRule(PamdLine): + + valid_simple_controls = ['required', 'requisite', 'sufficient', 'optional', 'include', 'substack', 'definitive'] + valid_control_values = ['success', 'open_err', 'symbol_err', 'service_err', 'system_err', 'buf_err', + 'perm_denied', 'auth_err', 'cred_insufficient', 'authinfo_unavail', 'user_unknown', + 'maxtries', 'new_authtok_reqd', 'acct_expired', 'session_err', 'cred_unavail', + 'cred_expired', 'cred_err', 'no_module_data', 'conv_err', 'authtok_err', + 'authtok_recover_err', 'authtok_lock_busy', 'authtok_disable_aging', 'try_again', + 'ignore', 'abort', 'authtok_expired', 'module_unknown', 'bad_item', 'conv_again', + 'incomplete', 'default'] + valid_control_actions = ['ignore', 'bad', 'die', 'ok', 'done', 'reset'] + + def __init__(self, rule_type, rule_control, rule_path, rule_args=None): + self.prev = None + self.next = None + self._control = None + self._args = None + self.rule_type = rule_type + self.rule_control = rule_control + + self.rule_path = rule_path + self.rule_args = rule_args + + # Method to check if a rule matches the type, control and path. + def matches(self, rule_type, rule_control, rule_path, rule_args=None): + return (rule_type == self.rule_type and + rule_control == self.rule_control and + rule_path == self.rule_path) + + @classmethod + def rule_from_string(cls, line): + rule_match = RULE_REGEX.search(line) + rule_args = parse_module_arguments(rule_match.group('args')) + return cls(rule_match.group('rule_type'), rule_match.group('control'), rule_match.group('path'), rule_args) + + def __str__(self): + if self.rule_args: + return '{0: <11}{1} {2} {3}'.format(self.rule_type, self.rule_control, self.rule_path, ' '.join(self.rule_args)) + return '{0: <11}{1} {2}'.format(self.rule_type, self.rule_control, self.rule_path) + + @property + def rule_control(self): + if isinstance(self._control, list): + return '[' + ' '.join(self._control) + ']' + return self._control + + @rule_control.setter + def rule_control(self, control): + if control.startswith('['): + control = control.replace(' = ', '=').replace('[', '').replace(']', '') + self._control = control.split(' ') + else: + self._control = control + + @property + def rule_args(self): + if not self._args: + return [] + return self._args + + @rule_args.setter + def rule_args(self, args): + self._args = parse_module_arguments(args) + + @property + def line(self): + return str(self) + + @classmethod + def is_action_unsigned_int(cls, string_num): + number = 0 + try: + number = int(string_num) + except ValueError: + return False + + if number >= 0: + return True + return False + + @property + def is_valid(self): + return self.validate()[0] + + def validate(self): + # Validate the rule type + if self.rule_type not in VALID_TYPES: + return False, "Rule type, " + self.rule_type + ", is not valid in rule " + self.line + # Validate the rule control + if isinstance(self._control, str) and self.rule_control not in PamdRule.valid_simple_controls: + return False, "Rule control, " + self.rule_control + ", is not valid in rule " + self.line + elif isinstance(self._control, list): + for control in self._control: + value, action = control.split("=") + if value not in PamdRule.valid_control_values: + return False, "Rule control value, " + value + ", is not valid in rule " + self.line + if action not in PamdRule.valid_control_actions and not PamdRule.is_action_unsigned_int(action): + return False, "Rule control action, " + action + ", is not valid in rule " + self.line + + # TODO: Validate path + + return True, "Rule is valid " + self.line + + +# PamdService encapsulates an entire service and contains one or more rules. It seems the best way is to do this +# as a doubly linked list. +class PamdService(object): + + def __init__(self, content): + self._head = None + self._tail = None + for line in content.splitlines(): + if line.lstrip().startswith('#'): + pamd_line = PamdComment(line) + elif line.lstrip().startswith('@include'): + pamd_line = PamdInclude(line) + elif line.strip() == '': + pamd_line = PamdEmptyLine(line) + else: + pamd_line = PamdRule.rule_from_string(line) + + self.append(pamd_line) + + def append(self, pamd_line): + if self._head is None: + self._head = self._tail = pamd_line + else: + pamd_line.prev = self._tail + pamd_line.next = None + self._tail.next = pamd_line + self._tail = pamd_line + + def remove(self, rule_type, rule_control, rule_path): + current_line = self._head + changed = 0 + + while current_line is not None: + if current_line.matches(rule_type, rule_control, rule_path): + if current_line.prev is not None: + current_line.prev.next = current_line.next + if current_line.next is not None: + current_line.next.prev = current_line.prev + else: + self._head = current_line.next + current_line.next.prev = None + changed += 1 + + current_line = current_line.next + return changed + + def get(self, rule_type, rule_control, rule_path): + lines = [] + current_line = self._head + while current_line is not None: + + if isinstance(current_line, PamdRule) and current_line.matches(rule_type, rule_control, rule_path): + lines.append(current_line) + + current_line = current_line.next + + return lines + + def has_rule(self, rule_type, rule_control, rule_path): + if self.get(rule_type, rule_control, rule_path): + return True + return False + + def update_rule(self, rule_type, rule_control, rule_path, + new_type=None, new_control=None, new_path=None, new_args=None): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + + new_args = parse_module_arguments(new_args, return_none=True) + + changes = 0 + for current_rule in rules_to_find: + rule_changed = False + if new_type: + if current_rule.rule_type != new_type: + rule_changed = True + current_rule.rule_type = new_type + if new_control: + if current_rule.rule_control != new_control: + rule_changed = True + current_rule.rule_control = new_control + if new_path: + if current_rule.rule_path != new_path: + rule_changed = True + current_rule.rule_path = new_path + if new_args is not None: + if current_rule.rule_args != new_args: + rule_changed = True + current_rule.rule_args = new_args + + if rule_changed: + changes += 1 + + return changes + + def insert_before(self, rule_type, rule_control, rule_path, + new_type=None, new_control=None, new_path=None, new_args=None): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + changes = 0 + # There are two cases to consider. + # 1. The new rule doesn't exist before the existing rule + # 2. The new rule exists + + for current_rule in rules_to_find: + # Create a new rule + new_rule = PamdRule(new_type, new_control, new_path, new_args) + # First we'll get the previous rule. + previous_rule = current_rule.prev + + # Next we may have to loop backwards if the previous line is a comment. If it + # is, we'll get the previous "rule's" previous. + while previous_rule is not None and isinstance(previous_rule, (PamdComment, PamdEmptyLine)): + previous_rule = previous_rule.prev + # Next we'll see if the previous rule matches what we are trying to insert. + if previous_rule is not None and not previous_rule.matches(new_type, new_control, new_path): + # First set the original previous rule's next to the new_rule + previous_rule.next = new_rule + # Second, set the new_rule's previous to the original previous + new_rule.prev = previous_rule + # Third, set the new rule's next to the current rule + new_rule.next = current_rule + # Fourth, set the current rule's previous to the new_rule + current_rule.prev = new_rule + + changes += 1 + + # Handle the case where it is the first rule in the list. + elif previous_rule is None: + # This is the case where the current rule is not only the first rule + # but the first line as well. So we set the head to the new rule + if current_rule.prev is None: + self._head = new_rule + # This case would occur if the previous line was a comment. + else: + current_rule.prev.next = new_rule + new_rule.prev = current_rule.prev + new_rule.next = current_rule + current_rule.prev = new_rule + changes += 1 + + return changes + + def insert_after(self, rule_type, rule_control, rule_path, + new_type=None, new_control=None, new_path=None, new_args=None): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + changes = 0 + # There are two cases to consider. + # 1. The new rule doesn't exist after the existing rule + # 2. The new rule exists + for current_rule in rules_to_find: + # First we'll get the next rule. + next_rule = current_rule.next + # Next we may have to loop forwards if the next line is a comment. If it + # is, we'll get the next "rule's" next. + while next_rule is not None and isinstance(next_rule, (PamdComment, PamdEmptyLine)): + next_rule = next_rule.next + + # First we create a new rule + new_rule = PamdRule(new_type, new_control, new_path, new_args) + if next_rule is not None and not next_rule.matches(new_type, new_control, new_path): + # If the previous rule doesn't match we'll insert our new rule. + + # Second set the original next rule's previous to the new_rule + next_rule.prev = new_rule + # Third, set the new_rule's next to the original next rule + new_rule.next = next_rule + # Fourth, set the new rule's previous to the current rule + new_rule.prev = current_rule + # Fifth, set the current rule's next to the new_rule + current_rule.next = new_rule + + changes += 1 + + # This is the case where the current_rule is the last in the list + elif next_rule is None: + new_rule.prev = self._tail + new_rule.next = None + self._tail.next = new_rule + self._tail = new_rule + + current_rule.next = new_rule + changes += 1 + + return changes + + def add_module_arguments(self, rule_type, rule_control, rule_path, args_to_add): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + + args_to_add = parse_module_arguments(args_to_add) + + changes = 0 + + for current_rule in rules_to_find: + rule_changed = False + + # create some structures to evaluate the situation + simple_new_args = set() + key_value_new_args = dict() + + for arg in args_to_add: + if arg.startswith("["): + continue + elif "=" in arg: + key, value = arg.split("=") + key_value_new_args[key] = value + else: + simple_new_args.add(arg) + + key_value_new_args_set = set(key_value_new_args) + + simple_current_args = set() + key_value_current_args = dict() + + for arg in current_rule.rule_args: + if arg.startswith("["): + continue + elif "=" in arg: + key, value = arg.split("=") + key_value_current_args[key] = value + else: + simple_current_args.add(arg) + + key_value_current_args_set = set(key_value_current_args) + + new_args_to_add = list() + + # Handle new simple arguments + if simple_new_args.difference(simple_current_args): + for arg in simple_new_args.difference(simple_current_args): + new_args_to_add.append(arg) + + # Handle new key value arguments + if key_value_new_args_set.difference(key_value_current_args_set): + for key in key_value_new_args_set.difference(key_value_current_args_set): + new_args_to_add.append(key + '=' + key_value_new_args[key]) + + if new_args_to_add: + current_rule.rule_args += new_args_to_add + rule_changed = True + + # Handle existing key value arguments when value is not equal + if key_value_new_args_set.intersection(key_value_current_args_set): + for key in key_value_new_args_set.intersection(key_value_current_args_set): + if key_value_current_args[key] != key_value_new_args[key]: + arg_index = current_rule.rule_args.index(key + '=' + key_value_current_args[key]) + current_rule.rule_args[arg_index] = str(key + '=' + key_value_new_args[key]) + rule_changed = True + + if rule_changed: + changes += 1 + + return changes + + def remove_module_arguments(self, rule_type, rule_control, rule_path, args_to_remove): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + + args_to_remove = parse_module_arguments(args_to_remove) + + changes = 0 + + for current_rule in rules_to_find: + if not args_to_remove: + args_to_remove = [] + + # Let's check to see if there are any args to remove by finding the intersection + # of the rule's current args and the args_to_remove lists + if not list(set(current_rule.rule_args) & set(args_to_remove)): + continue + + # There are args to remove, so we create a list of new_args absent the args + # to remove. + current_rule.rule_args = [arg for arg in current_rule.rule_args if arg not in args_to_remove] + + changes += 1 + + return changes + + def validate(self): + current_line = self._head + + while current_line is not None: + curr_validate = current_line.validate() + if not curr_validate[0]: + return curr_validate + current_line = current_line.next + return True, "Module is valid" + + def __str__(self): + lines = [] + current_line = self._head + + mark = "# Updated by Ansible - %s" % datetime.now().isoformat() + while current_line is not None: + lines.append(str(current_line)) + current_line = current_line.next + + if len(lines) <= 1: + lines.insert(0, "") + lines.insert(1, mark) + else: + if lines[1].startswith("# Updated by Ansible"): + lines[1] = mark + else: + lines.insert(1, mark) + + return '\n'.join(lines) + '\n' + + +def parse_module_arguments(module_arguments, return_none=False): + # If args is None, return empty list by default. + # But if return_none is True, then return None + if module_arguments is None: + return None if return_none else [] + if isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]: + return [] + + if not isinstance(module_arguments, list): + module_arguments = [module_arguments] + + # From this point on, module_arguments is guaranteed to be a list, empty or not + parsed_args = [] + + re_clear_spaces = re.compile(r"\s*=\s*") + for arg in module_arguments: + for item in filter(None, RULE_ARG_REGEX.findall(arg)): + if not item.startswith("["): + re_clear_spaces.sub("=", item) + parsed_args.append(item) + + return parsed_args + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + type=dict(type='str', required=True, choices=VALID_TYPES), + control=dict(type='str', required=True), + module_path=dict(type='str', required=True), + new_type=dict(type='str', choices=VALID_TYPES), + new_control=dict(type='str'), + new_module_path=dict(type='str'), + module_arguments=dict(type='list', elements='str'), + state=dict(type='str', default='updated', choices=['absent', 'after', 'args_absent', 'args_present', 'before', 'updated']), + path=dict(type='path', default='/etc/pam.d'), + backup=dict(type='bool', default=False), + ), + supports_check_mode=True, + required_if=[ + ("state", "args_present", ["module_arguments"]), + ("state", "args_absent", ["module_arguments"]), + ("state", "before", ["new_control", "new_type", "new_module_path"]), + ("state", "after", ["new_control", "new_type", "new_module_path"]), + ], + ) + content = str() + fname = os.path.join(module.params["path"], module.params["name"]) + + # Open the file and read the content or fail + try: + with open(fname, 'r') as service_file_obj: + content = service_file_obj.read() + except IOError as e: + # If unable to read the file, fail out + module.fail_json(msg='Unable to open/read PAM module file %s with error %s.' % (fname, str(e))) + + # Assuming we didn't fail, create the service + service = PamdService(content) + # Set the action + action = module.params['state'] + + changes = 0 + + # Take action + if action == 'updated': + changes = service.update_rule(module.params['type'], module.params['control'], module.params['module_path'], + module.params['new_type'], module.params['new_control'], module.params['new_module_path'], + module.params['module_arguments']) + elif action == 'before': + changes = service.insert_before(module.params['type'], module.params['control'], module.params['module_path'], + module.params['new_type'], module.params['new_control'], module.params['new_module_path'], + module.params['module_arguments']) + elif action == 'after': + changes = service.insert_after(module.params['type'], module.params['control'], module.params['module_path'], + module.params['new_type'], module.params['new_control'], module.params['new_module_path'], + module.params['module_arguments']) + elif action == 'args_absent': + changes = service.remove_module_arguments(module.params['type'], module.params['control'], module.params['module_path'], + module.params['module_arguments']) + elif action == 'args_present': + if [arg for arg in parse_module_arguments(module.params['module_arguments']) if arg.startswith("[")]: + module.fail_json(msg="Unable to process bracketed '[' complex arguments with 'args_present'. Please use 'updated'.") + + changes = service.add_module_arguments(module.params['type'], module.params['control'], module.params['module_path'], + module.params['module_arguments']) + elif action == 'absent': + changes = service.remove(module.params['type'], module.params['control'], module.params['module_path']) + + valid, msg = service.validate() + + # If the module is not valid (meaning one of the rules is invalid), we will fail + if not valid: + module.fail_json(msg=msg) + + result = dict( + changed=(changes > 0), + change_count=changes, + backupdest='', + ) + + # If not check mode and something changed, backup the original if necessary then write out the file or fail + if not module.check_mode and result['changed']: + # First, create a backup if desired. + if module.params['backup']: + result['backupdest'] = module.backup_local(fname) + try: + temp_file = NamedTemporaryFile(mode='w', dir=module.tmpdir, delete=False) + with open(temp_file.name, 'w') as fd: + fd.write(str(service)) + + except IOError: + module.fail_json(msg='Unable to create temporary file %s' % temp_file) + + module.atomic_move(temp_file.name, os.path.realpath(fname)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/parted.py b/plugins/modules/parted.py deleted file mode 120000 index e08a57d28a..0000000000 --- a/plugins/modules/parted.py +++ /dev/null @@ -1 +0,0 @@ -./system/parted.py \ No newline at end of file diff --git a/plugins/modules/parted.py b/plugins/modules/parted.py new file mode 100644 index 0000000000..11e4577667 --- /dev/null +++ b/plugins/modules/parted.py @@ -0,0 +1,792 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Fabrizio Colonna +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +author: + - Fabrizio Colonna (@ColOfAbRiX) +module: parted +short_description: Configure block device partitions +description: + - This module allows configuring block device partition using the C(parted) command line tool. For a full description of + the fields and the options check the GNU parted manual. +requirements: + - This module requires C(parted) version 1.8.3 and above. + - Option O(align) (except V(undefined)) requires C(parted) 2.1 or above. + - If the version of C(parted) is below 3.1, it requires a Linux version running the C(sysfs) file system C(/sys/). + - Requires the C(resizepart) command when using the O(resize) parameter. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + device: + description: + - The block device (disk) where to operate. + - Regular files can also be partitioned, but it is recommended to create a loopback device using C(losetup) to easily + access its partitions. + type: str + required: true + align: + description: + - Set alignment for newly created partitions. Use V(undefined) for parted default alignment. + type: str + choices: [cylinder, minimal, none, optimal, undefined] + default: optimal + number: + description: + - The partition number being affected. + - Required when performing any action on the disk, except fetching information. + type: int + unit: + description: + - Selects the current default unit that Parted uses to display locations and capacities on the disk and to interpret + those given by the user if they are not suffixed by an unit. + - When fetching information about a disk, it is recommended to always specify a unit. + type: str + choices: [s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact] + default: KiB + label: + description: + - Disk label type or partition table to use. + - If O(device) already contains a different label, it is changed to O(label) and any previous partitions are lost. + - A O(name) must be specified for a V(gpt) partition table. + type: str + choices: [aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun] + default: msdos + part_type: + description: + - May be specified only with O(label=msdos) or O(label=dvh). + - Neither O(part_type) nor O(name) may be used with O(label=sun). + type: str + choices: [extended, logical, primary] + default: primary + part_start: + description: + - Where the partition starts as offset from the beginning of the disk, that is, the "distance" from the start of the + disk. Negative numbers specify distance from the end of the disk. + - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for + example V(10GiB), V(15%). + - Using negative values may require setting of O(fs_type) (see notes). + type: str + default: 0% + part_end: + description: + - Where the partition ends as offset from the beginning of the disk, that is, the "distance" from the start of the disk. + Negative numbers specify distance from the end of the disk. + - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for + example V(10GiB), V(15%). + type: str + default: 100% + name: + description: + - Sets the name for the partition number (GPT, Mac, MIPS and PC98 only). + type: str + flags: + description: A list of the flags that has to be set on the partition. + type: list + elements: str + state: + description: + - Whether to create or delete a partition. + - If set to V(info) the module only returns the device information. + type: str + choices: [absent, present, info] + default: info + fs_type: + description: + - If specified and the partition does not exist, sets filesystem type to given partition. + - Parameter optional, but see notes below about negative O(part_start) values. + type: str + version_added: '0.2.0' + resize: + description: + - Call C(resizepart) on existing partitions to match the size specified by O(part_end). + type: bool + default: false + version_added: '1.3.0' + +notes: + - When fetching information about a new disk and when the version of parted installed on the system is before version 3.1, + the module queries the kernel through C(/sys/) to obtain disk information. In this case the units CHS and CYL are not + supported. + - Negative O(part_start) start values were rejected if O(fs_type) was not given. This bug was fixed in parted 3.2.153. If + you want to use negative O(part_start), specify O(fs_type) as well or make sure your system contains newer parted. +""" + +RETURN = r""" +partition_info: + description: Current partition information. + returned: success + type: complex + contains: + disk: + description: Generic device information. + type: dict + partitions: + description: List of device partitions. + type: list + script: + description: Parted script executed by module. + type: str + sample: + "disk": + "dev": "/dev/sdb" + "logical_block": 512 + "model": "VMware Virtual disk" + "physical_block": 512 + "size": 5.0 + "table": "msdos" + "unit": "gib" + "partitions": + - "begin": 0.0 + "end": 1.0 + "flags": ["boot", "lvm"] + "fstype": "" + "name": "" + "num": 1 + "size": 1.0 + - "begin": 1.0 + "end": 5.0 + "flags": [] + "fstype": "" + "name": "" + "num": 2 + "size": 4.0 + "script": "unit KiB print " +""" + +EXAMPLES = r""" +- name: Create a new ext4 primary partition + community.general.parted: + device: /dev/sdb + number: 1 + state: present + fs_type: ext4 + +- name: Remove partition number 1 + community.general.parted: + device: /dev/sdb + number: 1 + state: absent + +- name: Create a new primary partition with a size of 1GiB + community.general.parted: + device: /dev/sdb + number: 1 + state: present + part_end: 1GiB + +- name: Create a new primary partition for LVM + community.general.parted: + device: /dev/sdb + number: 2 + flags: [lvm] + state: present + part_start: 1GiB + +- name: Create a new primary partition with a size of 1GiB at disk's end + community.general.parted: + device: /dev/sdb + number: 3 + state: present + fs_type: ext3 + part_start: -1GiB + +# Example on how to read info and reuse it in subsequent task +- name: Read device information (always use unit when probing) + community.general.parted: device=/dev/sdb unit=MiB + register: sdb_info + +- name: Remove all partitions from disk + community.general.parted: + device: /dev/sdb + number: '{{ item.num }}' + state: absent + loop: '{{ sdb_info.partitions }}' + +- name: Extend an existing partition to fill all available space + community.general.parted: + device: /dev/sdb + number: "{{ sdb_info.partitions | length }}" + part_end: "100%" + resize: true + state: present +""" + + +from ansible.module_utils.basic import AnsibleModule +import math +import re +import os + + +# Reference prefixes (International System of Units and IEC) +units_si = ['B', 'KB', 'MB', 'GB', 'TB'] +units_iec = ['KiB', 'MiB', 'GiB', 'TiB'] +parted_units = units_si + units_iec + ['s', '%', 'cyl', 'chs', 'compact'] + + +def parse_unit(size_str, unit=''): + """ + Parses a string containing a size or boundary information + """ + matches = re.search(r'^(-?[\d.]+) *([\w%]+)?$', size_str) + if matches is None: + # ",," format + matches = re.search(r'^(\d+),(\d+),(\d+)$', size_str) + if matches is None: + module.fail_json( + msg="Error interpreting parted size output: '%s'" % size_str + ) + + size = { + 'cylinder': int(matches.group(1)), + 'head': int(matches.group(2)), + 'sector': int(matches.group(3)) + } + unit = 'chs' + + else: + # Normal format: "[]" + if matches.group(2) is not None: + unit = matches.group(2) + + size = float(matches.group(1)) + + return size, unit + + +def parse_partition_info(parted_output, unit): + """ + Parses the output of parted and transforms the data into + a dictionary. + + Parted Machine Parseable Output: + See: https://lists.alioth.debian.org/pipermail/parted-devel/2006-December/00 + 0573.html + - All lines end with a semicolon (;) + - The first line indicates the units in which the output is expressed. + CHS, CYL and BYT stands for CHS, Cylinder and Bytes respectively. + - The second line is made of disk information in the following format: + "path":"size":"transport-type":"logical-sector-size":"physical-sector-siz + e":"partition-table-type":"model-name"; + - If the first line was either CYL or CHS, the next line will contain + information on no. of cylinders, heads, sectors and cylinder size. + - Partition information begins from the next line. This is of the format: + (for BYT) + "number":"begin":"end":"size":"filesystem-type":"partition-name":"flags-s + et"; + (for CHS/CYL) + "number":"begin":"end":"filesystem-type":"partition-name":"flags-set"; + """ + lines = [x for x in parted_output.split('\n') if x.strip() != ''] + + # Generic device info + generic_params = lines[1].rstrip(';').split(':') + + # The unit is read once, because parted always returns the same unit + size, unit = parse_unit(generic_params[1], unit) + + generic = { + 'dev': generic_params[0], + 'size': size, + 'unit': unit.lower(), + 'table': generic_params[5], + 'model': generic_params[6], + 'logical_block': int(generic_params[3]), + 'physical_block': int(generic_params[4]) + } + + # CYL and CHS have an additional line in the output + if unit in ['cyl', 'chs']: + chs_info = lines[2].rstrip(';').split(':') + cyl_size, cyl_unit = parse_unit(chs_info[3]) + generic['chs_info'] = { + 'cylinders': int(chs_info[0]), + 'heads': int(chs_info[1]), + 'sectors': int(chs_info[2]), + 'cyl_size': cyl_size, + 'cyl_size_unit': cyl_unit.lower() + } + lines = lines[1:] + + parts = [] + for line in lines[2:]: + part_params = line.rstrip(';').split(':') + + # CHS use a different format than BYT, but contrary to what stated by + # the author, CYL is the same as BYT. I've tested this undocumented + # behaviour down to parted version 1.8.3, which is the first version + # that supports the machine parseable output. + if unit != 'chs': + size = parse_unit(part_params[3])[0] + fstype = part_params[4] + name = part_params[5] + flags = part_params[6] + + else: + size = "" + fstype = part_params[3] + name = part_params[4] + flags = part_params[5] + + parts.append({ + 'num': int(part_params[0]), + 'begin': parse_unit(part_params[1])[0], + 'end': parse_unit(part_params[2])[0], + 'size': size, + 'fstype': fstype, + 'name': name, + 'flags': [f.strip() for f in flags.split(', ') if f != ''], + 'unit': unit.lower(), + }) + + return {'generic': generic, 'partitions': parts} + + +def format_disk_size(size_bytes, unit): + """ + Formats a size in bytes into a different unit, like parted does. It doesn't + manage CYL and CHS formats, though. + This function has been adapted from https://github.com/Distrotech/parted/blo + b/279d9d869ff472c52b9ec2e180d568f0c99e30b0/libparted/unit.c + """ + global units_si, units_iec # pylint: disable=global-variable-not-assigned + + unit = unit.lower() + + # Shortcut + if size_bytes == 0: + return 0.0, 'b' + + # Cases where we default to 'compact' + if unit in ['', 'compact', 'cyl', 'chs']: + index = max(0, int( + (math.log10(size_bytes) - 1.0) / 3.0 + )) + unit = 'b' + if index < len(units_si): + unit = units_si[index] + + # Find the appropriate multiplier + multiplier = 1.0 + if unit in units_si: + multiplier = 1000.0 ** units_si.index(unit) + elif unit in units_iec: + multiplier = 1024.0 ** units_iec.index(unit) + + output = size_bytes // multiplier * (1 + 1E-16) + + # Corrections to round up as per IEEE754 standard + if output < 10: + w = output + 0.005 + elif output < 100: + w = output + 0.05 + else: + w = output + 0.5 + + if w < 10: + precision = 2 + elif w < 100: + precision = 1 + else: + precision = 0 + + # Round and return + return round(output, precision), unit + + +def convert_to_bytes(size_str, unit): + size = float(size_str) + multiplier = 1.0 + if unit in units_si: + multiplier = 1000.0 ** units_si.index(unit) + elif unit in units_iec: + multiplier = 1024.0 ** (units_iec.index(unit) + 1) + elif unit in ['', 'compact', 'cyl', 'chs']: + # As per format_disk_size, default to compact, which defaults to megabytes + multiplier = 1000.0 ** units_si.index("MB") + + output = size * multiplier + return int(output) + + +def get_unlabeled_device_info(device, unit): + """ + Fetches device information directly from the kernel and it is used when + parted cannot work because of a missing label. It always returns a 'unknown' + label. + """ + device_name = os.path.basename(device) + base = "/sys/block/%s" % device_name + + vendor = read_record(base + "/device/vendor", "Unknown") + model = read_record(base + "/device/model", "model") + logic_block = int(read_record(base + "/queue/logical_block_size", 0)) + phys_block = int(read_record(base + "/queue/physical_block_size", 0)) + size_bytes = int(read_record(base + "/size", 0)) * logic_block + + size, unit = format_disk_size(size_bytes, unit) + + return { + 'generic': { + 'dev': device, + 'table': "unknown", + 'size': size, + 'unit': unit, + 'logical_block': logic_block, + 'physical_block': phys_block, + 'model': "%s %s" % (vendor, model), + }, + 'partitions': [] + } + + +def get_device_info(device, unit): + """ + Fetches information about a disk and its partitions and it returns a + dictionary. + """ + global module, parted_exec # pylint: disable=global-variable-not-assigned + + # If parted complains about missing labels, it means there are no partitions. + # In this case only, use a custom function to fetch information and emulate + # parted formats for the unit. + label_needed = check_parted_label(device) + if label_needed: + return get_unlabeled_device_info(device, unit) + + command = [parted_exec, "-s", "-m", device, "--", "unit", unit, "print"] + rc, out, err = module.run_command(command) + if rc != 0 and 'unrecognised disk label' not in err: + module.fail_json(msg=( + "Error while getting device information with parted " + "script: '%s'" % " ".join(command)), + rc=rc, out=out, err=err + ) + + return parse_partition_info(out, unit) + + +def check_parted_label(device): + """ + Determines if parted needs a label to complete its duties. Versions prior + to 3.1 don't return data when there is no label. For more information see: + http://upstream.rosalinux.ru/changelogs/libparted/3.1/changelog.html + """ + global parted_exec # pylint: disable=global-variable-not-assigned + + # Check the version + parted_major, parted_minor, dummy = parted_version() + if (parted_major == 3 and parted_minor >= 1) or parted_major > 3: + return False + + # Older parted versions return a message in the stdout and RC > 0. + rc, out, err = module.run_command([parted_exec, "-s", "-m", device, "print"]) + if rc != 0 and 'unrecognised disk label' in out.lower(): + return True + + return False + + +def parse_parted_version(out): + """ + Returns version tuple from the output of "parted --version" command + """ + lines = [x for x in out.split('\n') if x.strip() != ''] + if len(lines) == 0: + return None, None, None + + # Sample parted versions (see as well test unit): + # parted (GNU parted) 3.3 + # parted (GNU parted) 3.4.5 + # parted (GNU parted) 3.3.14-dfc61 + matches = re.search(r'^parted.+\s(\d+)\.(\d+)(?:\.(\d+))?', lines[0].strip()) + + if matches is None: + return None, None, None + + # Convert version to numbers + major = int(matches.group(1)) + minor = int(matches.group(2)) + rev = 0 + if matches.group(3) is not None: + rev = int(matches.group(3)) + + return major, minor, rev + + +def parted_version(): + """ + Returns the major and minor version of parted installed on the system. + """ + global module, parted_exec # pylint: disable=global-variable-not-assigned + + rc, out, err = module.run_command([parted_exec, "--version"]) + if rc != 0: + module.fail_json( + msg="Failed to get parted version.", rc=rc, out=out, err=err + ) + + (major, minor, rev) = parse_parted_version(out) + if major is None: + module.fail_json(msg="Failed to get parted version.", rc=0, out=out) + + return major, minor, rev + + +def parted(script, device, align): + """ + Runs a parted script. + """ + global module, parted_exec # pylint: disable=global-variable-not-assigned + + align_option = ['-a', align] + if align == 'undefined': + align_option = [] + + """ + Use option --fix (-f) if available. Versions prior + to 3.4.64 don't have it. For more information see: + http://savannah.gnu.org/news/?id=10114 + """ + if parted_version() >= (3, 4, 64): + script_option = ['-s', '-f'] + else: + script_option = ['-s'] + + if script and not module.check_mode: + command = [parted_exec] + script_option + ['-m'] + align_option + [device, '--'] + script + rc, out, err = module.run_command(command) + + if rc != 0: + module.fail_json( + msg="Error while running parted script: %s" % " ".join(command).strip(), + rc=rc, out=out, err=err + ) + + +def read_record(file_path, default=None): + """ + Reads the first line of a file and returns it. + """ + try: + with open(file_path, 'r') as f: + return f.readline().strip() + except IOError: + return default + + +def part_exists(partitions, attribute, number): + """ + Looks if a partition that has a specific value for a specific attribute + actually exists. + """ + return any(part.get(attribute) == number for part in partitions) + + +def check_size_format(size_str): + """ + Checks if the input string is an allowed size + """ + size, unit = parse_unit(size_str) + return unit in parted_units + + +def main(): + global module, units_si, units_iec, parted_exec # pylint: disable=global-variable-not-assigned + + changed = False + output_script = [] + script = [] + module = AnsibleModule( + argument_spec=dict( + device=dict(type='str', required=True), + align=dict(type='str', default='optimal', choices=['cylinder', 'minimal', 'none', 'optimal', 'undefined']), + number=dict(type='int'), + + # unit command + unit=dict(type='str', default='KiB', choices=parted_units), + + # mklabel command + label=dict(type='str', default='msdos', choices=['aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98', 'sun']), + + # mkpart [] command + part_type=dict(type='str', default='primary', choices=['extended', 'logical', 'primary']), + part_start=dict(type='str', default='0%'), + part_end=dict(type='str', default='100%'), + fs_type=dict(type='str'), + + # name command + name=dict(type='str'), + + # set command + flags=dict(type='list', elements='str'), + + # rm/mkpart command + state=dict(type='str', default='info', choices=['absent', 'info', 'present']), + + # resize part + resize=dict(type='bool', default=False), + ), + required_if=[ + ['state', 'present', ['number']], + ['state', 'absent', ['number']], + ], + supports_check_mode=True, + ) + module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C', 'LC_CTYPE': 'C'} + + # Data extraction + device = module.params['device'] + align = module.params['align'] + number = module.params['number'] + unit = module.params['unit'] + label = module.params['label'] + part_type = module.params['part_type'] + part_start = module.params['part_start'] + part_end = module.params['part_end'] + name = module.params['name'] + state = module.params['state'] + flags = module.params['flags'] + fs_type = module.params['fs_type'] + resize = module.params['resize'] + + # Parted executable + parted_exec = module.get_bin_path('parted', True) + + # Conditioning + if number is not None and number < 1: + module.fail_json(msg="The partition number must be greater then 0.") + if not check_size_format(part_start): + module.fail_json( + msg="The argument 'part_start' doesn't respect required format." + "The size unit is case sensitive.", + err=parse_unit(part_start) + ) + if not check_size_format(part_end): + module.fail_json( + msg="The argument 'part_end' doesn't respect required format." + "The size unit is case sensitive.", + err=parse_unit(part_end) + ) + + # Read the current disk information + current_device = get_device_info(device, unit) + current_parts = current_device['partitions'] + + if state == 'present': + + # Assign label if required + mklabel_needed = current_device['generic'].get('table', None) != label + if mklabel_needed: + script += ["mklabel", label] + + # Create partition if required + if part_type and (mklabel_needed or not part_exists(current_parts, 'num', number)): + script += ["mkpart"] + script += [part_type] + if fs_type is not None: + script += [fs_type] + script += [part_start, part_end] + + # Set the unit of the run + if unit and script: + script = ["unit", unit] + script + + # If partition exists, try to resize + if resize and part_exists(current_parts, 'num', number): + # Ensure new end is different to current + partition = [p for p in current_parts if p['num'] == number][0] + current_part_end = convert_to_bytes(partition['end'], unit) + + size, parsed_unit = parse_unit(part_end, unit) + if parsed_unit == "%": + size = int((int(current_device['generic']['size']) * size) / 100) + parsed_unit = unit + + desired_part_end = convert_to_bytes(size, parsed_unit) + + if current_part_end != desired_part_end: + script += ["resizepart", str(number), part_end] + + # Execute the script and update the data structure. + # This will create the partition for the next steps + if script: + output_script += script + parted(script, device, align) + changed = True + script = [] + + if not module.check_mode: + current_parts = get_device_info(device, unit)['partitions'] + + if part_exists(current_parts, 'num', number) or module.check_mode: + if changed and module.check_mode: + partition = {'flags': []} # Empty structure for the check-mode + else: + partition = [p for p in current_parts if p['num'] == number][0] + + # Assign name to the partition + if name is not None and partition.get('name', None) != name: + # The double quotes need to be included in the arg passed to parted + script += ['name', str(number), '"%s"' % name] + + # Manage flags + if flags: + # Parted infers boot with esp, if you assign esp, boot is set + # and if boot is unset, esp is also unset. + if 'esp' in flags and 'boot' not in flags: + flags.append('boot') + + # Compute only the changes in flags status + flags_off = list(set(partition['flags']) - set(flags)) + flags_on = list(set(flags) - set(partition['flags'])) + + for f in flags_on: + script += ["set", str(number), f, "on"] + + for f in flags_off: + script += ["set", str(number), f, "off"] + + # Set the unit of the run + if unit and script: + script = ["unit", unit] + script + + # Execute the script + if script: + output_script += script + changed = True + parted(script, device, align) + + elif state == 'absent': + # Remove the partition + if part_exists(current_parts, 'num', number) or module.check_mode: + script = ["rm", str(number)] + output_script += script + changed = True + parted(script, device, align) + + elif state == 'info': + output_script = ["unit", unit, "print"] + # Final status of the device + final_device_status = get_device_info(device, unit) + module.exit_json( + changed=changed, + disk=final_device_status['generic'], + partitions=final_device_status['partitions'], + script=output_script + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pear.py b/plugins/modules/pear.py deleted file mode 120000 index bb3e952942..0000000000 --- a/plugins/modules/pear.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/language/pear.py \ No newline at end of file diff --git a/plugins/modules/pear.py b/plugins/modules/pear.py new file mode 100644 index 0000000000..c31845cf54 --- /dev/null +++ b/plugins/modules/pear.py @@ -0,0 +1,323 @@ +#!/usr/bin/python + +# Copyright (c) 2012, Afterburn +# Copyright (c) 2013, Aaron Bull Schaefer +# Copyright (c) 2015, Jonathan Lestrelin +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pear +short_description: Manage pear/pecl packages +description: + - Manage PHP packages with the pear package manager. +author: + - Jonathan Lestrelin (@jle64) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - Name of the package to install, upgrade, or remove. + required: true + aliases: [pkg] + state: + type: str + description: + - Desired state of the package. + default: "present" + choices: ["present", "installed", "latest", "absent", "removed"] + executable: + type: path + description: + - Path to the pear executable. + prompts: + description: + - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected + question. + - Prompts are processed in the same order as the packages list. + - You can optionally specify an answer to any question in the list. + - If no answer is provided, the list item must contain only the regular expression. + - "To specify an answer, the item must be a dictionary with the regular expression as key and the answer as value C(my_regular_expression: + 'an_answer')." + - You can provide a list containing items with or without answer. + - A prompt list can be shorter or longer than the packages list but it issues a warning. + - If you want to specify that a package does not need prompts in the middle of a list, V(null). + type: list + elements: raw + version_added: 0.2.0 +""" + +EXAMPLES = r""" +- name: Install pear package + community.general.pear: + name: Net_URL2 + state: present + +- name: Install pecl package + community.general.pear: + name: pecl/json_post + state: present + +- name: Install pecl package with expected prompt + community.general.pear: + name: pecl/apcu + state: present + prompts: + - (.*)Enable internal debugging in APCu \[no\] + +- name: Install pecl package with expected prompt and an answer + community.general.pear: + name: pecl/apcu + state: present + prompts: + - (.*)Enable internal debugging in APCu \[no\]: "yes" + +- name: Install multiple pear/pecl packages at once with prompts. Prompts will be processed on the same order as the packages + order. If there is more prompts than packages, packages without prompts will be installed without any prompt expected. + If there is more packages than prompts, additional prompts will be ignored. + community.general.pear: + name: pecl/gnupg, pecl/apcu + state: present + prompts: + - I am a test prompt because gnupg doesnt asks anything + - (.*)Enable internal debugging in APCu \[no\]: "yes" + +- name: Install multiple pear/pecl packages at once skipping the first prompt. Prompts will be processed on the same order + as the packages order. If there is more prompts than packages, packages without prompts will be installed without any + prompt expected. If there is more packages than prompts, additional prompts will be ignored. + community.general.pear: + name: pecl/gnupg, pecl/apcu + state: present + prompts: + - null + - (.*)Enable internal debugging in APCu \[no\]: "yes" + +- name: Upgrade package + community.general.pear: + name: Net_URL2 + state: latest + +- name: Remove packages + community.general.pear: + name: Net_URL2,pecl/json_post + state: absent +""" + +import os + +from ansible.module_utils.common.text.converters import to_text +from ansible.module_utils.basic import AnsibleModule + + +def get_local_version(pear_output): + """Take pear remoteinfo output and get the installed version""" + lines = pear_output.split('\n') + for line in lines: + if 'Installed ' in line: + installed = line.rsplit(None, 1)[-1].strip() + if installed == '-': + continue + return installed + return None + + +def _get_pear_path(module): + if module.params['executable'] and os.path.isfile(module.params['executable']): + result = module.params['executable'] + else: + result = module.get_bin_path('pear', True, [module.params['executable']]) + return result + + +def get_repository_version(pear_output): + """Take pear remote-info output and get the latest version""" + lines = pear_output.split('\n') + for line in lines: + if 'Latest ' in line: + return line.rsplit(None, 1)[-1].strip() + return None + + +def query_package(module, name): + """Query the package status in both the local system and the repository. + Returns a boolean to indicate if the package is installed, + and a second boolean to indicate if the package is up-to-date.""" + lcmd = [_get_pear_path(module), "info", name] + lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) + if lrc != 0: + # package is not installed locally + return False, False + + rcmd = [_get_pear_path(module), "remote-info", name] + rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) + + # get the version installed locally (if any) + lversion = get_local_version(rstdout) + + # get the version in the repository + rversion = get_repository_version(rstdout) + + if rrc == 0: + # Return True to indicate that the package is installed locally, + # and the result of the version number comparison + # to determine if the package is up-to-date. + return True, (lversion == rversion) + + return False, False + + +def remove_packages(module, packages): + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + installed, updated = query_package(module, package) + if not installed: + continue + + cmd = [_get_pear_path(module), "uninstall", package] + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to remove %s: %s" % (package, to_text(stdout + stderr))) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, state, packages, prompts): + install_c = 0 + has_prompt = bool(prompts) + default_stdin = "\n" + + if has_prompt: + nb_prompts = len(prompts) + nb_packages = len(packages) + + if nb_prompts > 0 and (nb_prompts != nb_packages): + if nb_prompts > nb_packages: + diff = nb_prompts - nb_packages + msg = "%s packages to install but %s prompts to expect. %s prompts will be ignored" % (to_text(nb_packages), to_text(nb_prompts), to_text(diff)) + else: + diff = nb_packages - nb_prompts + msg = "%s packages to install but only %s prompts to expect. %s packages won't be expected to have a prompt" \ + % (to_text(nb_packages), to_text(nb_prompts), to_text(diff)) + module.warn(msg) + + # Preparing prompts answer according to item type + tmp_prompts = [] + for _item in prompts: + # If the current item is a dict then we expect its key to be the prompt regex and its value to be the answer + # We also expect here that the dict only has ONE key and the first key will be taken + if isinstance(_item, dict): + key = list(_item.keys())[0] + answer = _item[key] + "\n" + + tmp_prompts.append((key, answer)) + elif not _item: + tmp_prompts.append((None, default_stdin)) + else: + tmp_prompts.append((_item, default_stdin)) + prompts = tmp_prompts + for i, package in enumerate(packages): + # if the package is installed and state == present + # or state == latest and is up-to-date then skip + installed, updated = query_package(module, package) + if installed and (state == 'present' or (state == 'latest' and updated)): + continue + + if state == 'present': + command = 'install' + + if state == 'latest': + command = 'upgrade' + + if has_prompt and i < len(prompts): + prompt_regex = prompts[i][0] + data = prompts[i][1] + else: + prompt_regex = None + data = default_stdin + + cmd = [_get_pear_path(module), command, package] + rc, stdout, stderr = module.run_command(cmd, check_rc=False, prompt_regex=prompt_regex, data=data, binary_data=True) + if rc != 0: + module.fail_json(msg="failed to install %s: %s" % (package, to_text(stdout + stderr))) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already installed") + + +def check_packages(module, packages, state): + would_be_changed = [] + for package in packages: + installed, updated = query_package(module, package) + if ((state in ["present", "latest"] and not installed) or + (state == "absent" and installed) or + (state == "latest" and not updated)): + would_be_changed.append(package) + if would_be_changed: + if state == "absent": + state = "removed" + module.exit_json(changed=True, msg="%s package(s) would be %s" % ( + len(would_be_changed), state)) + else: + module.exit_json(change=False, msg="package(s) already %s" % state) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['pkg'], required=True), + state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']), + executable=dict(type='path'), + prompts=dict(type='list', elements='raw'), + ), + supports_check_mode=True) + + p = module.params + + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + elif p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + + if p['name']: + pkgs = p['name'].split(',') + + pkg_files = [] + for i, pkg in enumerate(pkgs): + pkg_files.append(None) + + if module.check_mode: + check_packages(module, pkgs, p['state']) + + if p['state'] in ['present', 'latest']: + install_packages(module, p['state'], pkgs, p["prompts"]) + elif p['state'] == 'absent': + remove_packages(module, pkgs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pids.py b/plugins/modules/pids.py deleted file mode 120000 index 1cd5e3c07b..0000000000 --- a/plugins/modules/pids.py +++ /dev/null @@ -1 +0,0 @@ -./system/pids.py \ No newline at end of file diff --git a/plugins/modules/pids.py b/plugins/modules/pids.py new file mode 100644 index 0000000000..aa5f772201 --- /dev/null +++ b/plugins/modules/pids.py @@ -0,0 +1,231 @@ +#!/usr/bin/python +# Copyright (c) 2019, Saranya Sridharan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pids +description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines. Returns an empty list + if no process in that name exists." +short_description: Retrieves process IDs list if the process is running otherwise return empty list +author: + - Saranya Sridharan (@saranyasridharan) +requirements: + - psutil(python module) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: The name of the process(es) you want to get PID(s) for. + type: str + pattern: + description: The pattern (regular expression) to match the process(es) you want to get PID(s) for. + type: str + version_added: 3.0.0 + ignore_case: + description: Ignore case in pattern if using the O(pattern) option. + type: bool + default: false + version_added: 3.0.0 +""" + +EXAMPLES = r""" +# Pass the process name +- name: Getting process IDs of the process + community.general.pids: + name: python + register: pids_of_python + +- name: Printing the process IDs obtained + ansible.builtin.debug: + msg: "PIDS of python:{{pids_of_python.pids|join(',')}}" + +- name: Getting process IDs of processes matching pattern + community.general.pids: + pattern: python(2(\.7)?|3(\.6)?)?\s+myapp\.py + register: myapp_pids +""" + +RETURN = r""" +pids: + description: Process IDs of the given process. + returned: list of none, one, or more process IDs + type: list + sample: [100, 200] +""" + +import abc +import re +from os.path import basename + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils import deps +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +with deps.declare("psutil"): + import psutil + + +class PSAdapterError(Exception): + pass + + +class PSAdapter(object, metaclass=abc.ABCMeta): + NAME_ATTRS = ('name', 'cmdline') + PATTERN_ATTRS = ('name', 'exe', 'cmdline') + + def __init__(self, psutil): + self._psutil = psutil + + @staticmethod + def from_package(psutil): + version = LooseVersion(psutil.__version__) + if version < LooseVersion('2.0.0'): + return PSAdapter100(psutil) + elif version < LooseVersion('5.3.0'): + return PSAdapter200(psutil) + else: + return PSAdapter530(psutil) + + def get_pids_by_name(self, name): + return [p.pid for p in self._process_iter(*self.NAME_ATTRS) if self._has_name(p, name)] + + def _process_iter(self, *attrs): + return self._psutil.process_iter() + + def _has_name(self, proc, name): + attributes = self._get_proc_attributes(proc, *self.NAME_ATTRS) + return (compare_lower(attributes['name'], name) or + attributes['cmdline'] and compare_lower(attributes['cmdline'][0], name)) + + def _get_proc_attributes(self, proc, *attributes): + return {attribute: self._get_attribute_from_proc(proc, attribute) for attribute in attributes} + + @staticmethod + @abc.abstractmethod + def _get_attribute_from_proc(proc, attribute): + pass + + def get_pids_by_pattern(self, pattern, ignore_case): + flags = 0 + if ignore_case: + flags |= re.I + + try: + regex = re.compile(pattern, flags) + except re.error as e: + raise PSAdapterError("'%s' is not a valid regular expression: %s" % (pattern, to_native(e))) + + return [p.pid for p in self._process_iter(*self.PATTERN_ATTRS) if self._matches_regex(p, regex)] + + def _matches_regex(self, proc, regex): + # See https://psutil.readthedocs.io/en/latest/#find-process-by-name for more information + attributes = self._get_proc_attributes(proc, *self.PATTERN_ATTRS) + matches_name = regex.search(to_native(attributes['name'])) + matches_exe = attributes['exe'] and regex.search(basename(to_native(attributes['exe']))) + matches_cmd = attributes['cmdline'] and regex.search(to_native(' '.join(attributes['cmdline']))) + + return any([matches_name, matches_exe, matches_cmd]) + + +class PSAdapter100(PSAdapter): + def __init__(self, psutil): + super(PSAdapter100, self).__init__(psutil) + + @staticmethod + def _get_attribute_from_proc(proc, attribute): + return getattr(proc, attribute) + + +class PSAdapter200(PSAdapter): + def __init__(self, psutil): + super(PSAdapter200, self).__init__(psutil) + + @staticmethod + def _get_attribute_from_proc(proc, attribute): + method = getattr(proc, attribute) + return method() + + +class PSAdapter530(PSAdapter): + def __init__(self, psutil): + super(PSAdapter530, self).__init__(psutil) + + def _process_iter(self, *attrs): + return self._psutil.process_iter(attrs=attrs) + + @staticmethod + def _get_attribute_from_proc(proc, attribute): + return proc.info[attribute] + + +def compare_lower(a, b): + if a is None or b is None: + # this could just be "return False" but would lead to surprising behavior if both a and b are None + return a == b + + return a.lower() == b.lower() + + +class Pids(object): + def __init__(self, module): + + deps.validate(module) + + self._ps = PSAdapter.from_package(psutil) + + self._module = module + self._name = module.params['name'] + self._pattern = module.params['pattern'] + self._ignore_case = module.params['ignore_case'] + + self._pids = [] + + def execute(self): + if self._name is not None: + self._pids = self._ps.get_pids_by_name(self._name) + else: + try: + self._pids = self._ps.get_pids_by_pattern(self._pattern, self._ignore_case) + except PSAdapterError as e: + self._module.fail_json(msg=to_native(e)) + + return self._module.exit_json(**self.result) + + @property + def result(self): + return { + 'pids': self._pids, + } + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type="str"), + pattern=dict(type="str"), + ignore_case=dict(type="bool", default=False), + ), + required_one_of=[ + ('name', 'pattern') + ], + mutually_exclusive=[ + ('name', 'pattern') + ], + supports_check_mode=True, + ) + + Pids(module).execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pingdom.py b/plugins/modules/pingdom.py deleted file mode 120000 index 590d78000f..0000000000 --- a/plugins/modules/pingdom.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/pingdom.py \ No newline at end of file diff --git a/plugins/modules/pingdom.py b/plugins/modules/pingdom.py new file mode 100644 index 0000000000..5c6ad6f88c --- /dev/null +++ b/plugins/modules/pingdom.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pingdom +short_description: Pause/unpause Pingdom alerts +description: + - This module lets you pause/unpause Pingdom alerts. +author: + - "Dylan Silva (@thaumos)" + - "Justin Johns (!UNKNOWN)" +requirements: + - "This pingdom python library: U(https://github.com/mbabineau/pingdom-python)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - Define whether or not the check should be running or paused. + required: true + choices: ["running", "paused", "started", "stopped"] + checkid: + type: str + description: + - Pingdom ID of the check. + required: true + uid: + type: str + description: + - Pingdom user ID. + required: true + passwd: + type: str + description: + - Pingdom user password. + required: true + key: + type: str + description: + - Pingdom API key. + required: true +notes: + - This module does not yet have support to add/remove checks. +""" + +EXAMPLES = r""" +- name: Pause the check with the ID of 12345 + community.general.pingdom: + uid: example@example.com + passwd: password123 + key: apipassword123 + checkid: 12345 + state: paused + +- name: Unpause the check with the ID of 12345 + community.general.pingdom: + uid: example@example.com + passwd: password123 + key: apipassword123 + checkid: 12345 + state: running +""" + +import traceback + +PINGDOM_IMP_ERR = None +try: + import pingdom + HAS_PINGDOM = True +except Exception: + PINGDOM_IMP_ERR = traceback.format_exc() + HAS_PINGDOM = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def pause(checkid, uid, passwd, key): + + c = pingdom.PingdomConnection(uid, passwd, key) + c.modify_check(checkid, paused=True) + check = c.get_check(checkid) + name = check.name + result = check.status + # if result != "paused": # api output buggy - accept raw exception for now + # return (True, name, result) + return (False, name, result) + + +def unpause(checkid, uid, passwd, key): + + c = pingdom.PingdomConnection(uid, passwd, key) + c.modify_check(checkid, paused=False) + check = c.get_check(checkid) + name = check.name + result = check.status + # if result != "up": # api output buggy - accept raw exception for now + # return (True, name, result) + return (False, name, result) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']), + checkid=dict(required=True), + uid=dict(required=True), + passwd=dict(required=True, no_log=True), + key=dict(required=True, no_log=True), + ) + ) + + if not HAS_PINGDOM: + module.fail_json(msg=missing_required_lib("pingdom"), exception=PINGDOM_IMP_ERR) + + checkid = module.params['checkid'] + state = module.params['state'] + uid = module.params['uid'] + passwd = module.params['passwd'] + key = module.params['key'] + + if state == "paused" or state == "stopped": + (rc, name, result) = pause(checkid, uid, passwd, key) + + if state == "running" or state == "started": + (rc, name, result) = unpause(checkid, uid, passwd, key) + + if rc != 0: + module.fail_json(checkid=checkid, name=name, status=result) + + module.exit_json(checkid=checkid, name=name, status=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pip_package_info.py b/plugins/modules/pip_package_info.py deleted file mode 120000 index cce6cdef23..0000000000 --- a/plugins/modules/pip_package_info.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/language/pip_package_info.py \ No newline at end of file diff --git a/plugins/modules/pip_package_info.py b/plugins/modules/pip_package_info.py new file mode 100644 index 0000000000..bcb4d45753 --- /dev/null +++ b/plugins/modules/pip_package_info.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# started out with AWX's scan_packages module + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pip_package_info +short_description: Pip package information +description: + - Return information about installed pip packages. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + clients: + description: + - A list of the pip executables that are used to get the packages. They can be supplied with the full path or just the + executable name, for example V(pip3.7). + default: ['pip'] + required: false + type: list + elements: path +requirements: + - pip >= 20.3b1 (necessary for the C(--format) option) + - The requested C(pip) executables must be installed on the target. +author: + - Matthew Jones (@matburt) + - Brian Coca (@bcoca) + - Adam Miller (@maxamillion) +""" + +EXAMPLES = r""" +- name: Just get the list from default pip + community.general.pip_package_info: + +- name: Get the facts for default pip, pip2 and pip3.6 + community.general.pip_package_info: + clients: ['pip', 'pip2', 'pip3.6'] + +- name: Get from specific paths (virtualenvs?) + community.general.pip_package_info: + clients: '/home/me/projec42/python/pip3.5' +""" + +RETURN = r""" +packages: + description: A dictionary of installed package data. + returned: always + type: dict + contains: + python: + description: A dictionary with each pip client which then contains a list of dicts with python package information. + returned: always + type: dict + sample: + { + "packages": { + "pip": { + "Babel": [ + { + "name": "Babel", + "source": "pip", + "version": "2.6.0" + } + ], + "Flask": [ + { + "name": "Flask", + "source": "pip", + "version": "1.0.2" + } + ], + "Flask-SQLAlchemy": [ + { + "name": "Flask-SQLAlchemy", + "source": "pip", + "version": "2.3.2" + } + ], + "Jinja2": [ + { + "name": "Jinja2", + "source": "pip", + "version": "2.10" + } + ] + } + } + } +""" + +import json +import os + +from ansible.module_utils.common.text.converters import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.facts.packages import CLIMgr + + +class PIP(CLIMgr): + + def __init__(self, pip, module): + + self.CLI = pip + self.module = module + + def list_installed(self): + rc, out, err = self.module.run_command([self._cli, 'list', '-l', '--format=json']) + if rc != 0: + raise Exception("Unable to list packages rc=%s : %s" % (rc, err)) + return json.loads(out) + + def get_package_details(self, package): + package['source'] = self.CLI + return package + + +def main(): + + # start work + module = AnsibleModule( + argument_spec=dict( + clients=dict(type='list', elements='path', default=['pip']), + ), + supports_check_mode=True) + packages = {} + results = {'packages': {}} + clients = module.params['clients'] + + found = 0 + for pip in clients: + + if not os.path.basename(pip).startswith('pip'): + module.warn('Skipping invalid pip client: %s' % (pip)) + continue + try: + pip_mgr = PIP(pip, module) + if pip_mgr.is_available(): + found += 1 + packages[pip] = pip_mgr.get_packages() + except Exception as e: + module.warn('Failed to retrieve packages with %s: %s' % (pip, to_text(e))) + continue + + if found == 0: + module.fail_json(msg='Unable to use any of the supplied pip clients: %s' % clients) + + # return info + results['packages'] = packages + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py deleted file mode 120000 index 7dc9c9878c..0000000000 --- a/plugins/modules/pipx.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/language/pipx.py \ No newline at end of file diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py new file mode 100644 index 0000000000..4d4f7227f7 --- /dev/null +++ b/plugins/modules/pipx.py @@ -0,0 +1,441 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pipx +short_description: Manages applications installed with pipx +version_added: 3.8.0 +description: + - Manage Python applications installed in isolated virtualenvs using pipx. +extends_documentation_fragment: + - community.general.attributes + - community.general.pipx +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + state: + type: str + choices: + - present + - absent + - install + - install_all + - uninstall + - uninstall_all + - inject + - uninject + - upgrade + - upgrade_shared + - upgrade_all + - reinstall + - reinstall_all + - latest + - pin + - unpin + default: install + description: + - Desired state for the application. + - The states V(present) and V(absent) are aliases to V(install) and V(uninstall), respectively. + - The state V(latest) is equivalent to executing the task twice, with state V(install) and then V(upgrade). It was added + in community.general 5.5.0. + - The states V(install_all), V(uninject), V(upgrade_shared), V(pin) and V(unpin) are only available in C(pipx>=1.6.0), + make sure to have a compatible version when using this option. These states have been added in community.general 9.4.0. + name: + type: str + description: + - The name of the application and also the name of the Python package being installed. + - In C(pipx) documentation it is also referred to as the name of the virtual environment where the application is installed. + - If O(name) is a simple package name without version specifiers, then that name is used as the Python package name + to be installed. + - Starting in community.general 10.7.0, you can use package specifiers when O(state=present) or O(state=install). For + example, O(name=tox<4.0.0) or O(name=tox>3.0.27). + - Please note that when you use O(state=present) and O(name) with version specifiers, contrary to the behavior of C(pipx), + this module honors the version specifier and installs a version of the application that satisfies it. If you want + to ensure the reinstallation of the application even when the version specifier is met, then you must use O(force=true), + or perhaps use O(state=upgrade) instead. + - Use O(source) for installing from URLs or directories. + source: + type: str + description: + - Source for the package. This option is used when O(state=install) or O(state=latest), and it is ignored with other + states. + - Use O(source) when installing a Python package with version specifier, or from a local path, from a VCS URL or compressed + file. + - The value of this option is passed as-is to C(pipx). + - O(name) is still required when using O(source) to establish the application name without fetching the package from + a remote source. + - The module is not idempotent when using O(source). + install_apps: + description: + - Add apps from the injected packages. + - Only used when O(state=inject). + type: bool + default: false + version_added: 6.5.0 + install_deps: + description: + - Include applications of dependent packages. + - Only used when O(state=install), O(state=latest), or O(state=inject). + type: bool + default: false + inject_packages: + description: + - Packages to be injected into an existing virtual environment. + - Only used when O(state=inject). + type: list + elements: str + force: + description: + - Force modification of the application's virtual environment. See C(pipx) for details. + - Only used when O(state=install), O(state=upgrade), O(state=upgrade_all), O(state=latest), or O(state=inject). + - The module is not idempotent when O(force=true). + type: bool + default: false + include_injected: + description: + - Upgrade the injected packages along with the application. + - Only used when O(state=upgrade), O(state=upgrade_all), or O(state=latest). + - This is used with O(state=upgrade) and O(state=latest) since community.general 6.6.0. + type: bool + default: false + index_url: + description: + - Base URL of Python Package Index. + - Only used when O(state=install), O(state=upgrade), O(state=latest), or O(state=inject). + type: str + python: + description: + - Python version to be used when creating the application virtual environment. Must be 3.6+. + - Only used when O(state=install), O(state=latest), O(state=reinstall), or O(state=reinstall_all). + type: str + system_site_packages: + description: + - Give application virtual environment access to the system site-packages directory. + - Only used when O(state=install) or O(state=latest). + type: bool + default: false + version_added: 6.6.0 + editable: + description: + - Install the project in editable mode. + type: bool + default: false + version_added: 4.6.0 + pip_args: + description: + - Arbitrary arguments to pass directly to C(pip). + type: str + version_added: 4.6.0 + suffix: + description: + - Optional suffix for virtual environment and executable names. + - B(Warning:) C(pipx) documentation states this is an B(experimental) feature subject to change. + type: str + version_added: 9.3.0 + global: + version_added: 9.4.0 + spec_metadata: + description: + - Spec metadata file for O(state=install_all). + - This content of the file is usually generated with C(pipx list --json), and it can be obtained with M(community.general.pipx_info) + with O(community.general.pipx_info#module:include_raw=true) and obtaining the content from the RV(community.general.pipx_info#module:raw_output). + type: path + version_added: 9.4.0 +requirements: + - When using O(name) with version specifiers, the Python package C(packaging) is required. + - If the package C(packaging) is at a version lesser than C(22.0.0), it fails silently when processing invalid specifiers, + like C(tox<<<<4.0). +author: + - "Alexei Znamensky (@russoz)" +""" + +EXAMPLES = r""" +- name: Install tox + community.general.pipx: + name: tox + +- name: Install tox from git repository + community.general.pipx: + name: tox + source: git+https://github.com/tox-dev/tox.git + +- name: Upgrade tox + community.general.pipx: + name: tox + state: upgrade + +- name: Install or upgrade tox with extra 'docs' + community.general.pipx: + name: tox + source: tox[docs] + state: latest + +- name: Reinstall black with specific Python version + community.general.pipx: + name: black + state: reinstall + python: 3.7 + +- name: Uninstall pycowsay + community.general.pipx: + name: pycowsay + state: absent + +- name: Install multiple packages from list + vars: + pipx_packages: + - pycowsay + - black + - tox + community.general.pipx: + name: "{{ item }}" + state: latest + with_items: "{{ pipx_packages }}" +""" + +RETURN = r""" +version: + description: Version of pipx. + type: str + returned: always + sample: "1.7.1" + version_added: 10.1.0 +""" + + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_dict +from ansible_collections.community.general.plugins.module_utils.pkg_req import PackageRequirement +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +from ansible.module_utils.facts.compat import ansible_facts + + +def _make_name(name, suffix): + return name if suffix is None else "{0}{1}".format(name, suffix) + + +class PipX(StateModuleHelper): + output_params = ['name', 'source', 'index_url', 'force', 'installdeps'] + argument_spec = dict( + state=dict(type='str', default='install', + choices=[ + 'present', 'absent', 'install', 'install_all', 'uninstall', 'uninstall_all', 'inject', 'uninject', + 'upgrade', 'upgrade_shared', 'upgrade_all', 'reinstall', 'reinstall_all', 'latest', 'pin', 'unpin', + ]), + name=dict(type='str'), + source=dict(type='str'), + install_apps=dict(type='bool', default=False), + install_deps=dict(type='bool', default=False), + inject_packages=dict(type='list', elements='str'), + force=dict(type='bool', default=False), + include_injected=dict(type='bool', default=False), + index_url=dict(type='str'), + python=dict(type='str'), + system_site_packages=dict(type='bool', default=False), + editable=dict(type='bool', default=False), + pip_args=dict(type='str'), + suffix=dict(type='str'), + spec_metadata=dict(type='path'), + ) + argument_spec.update(pipx_common_argspec) + + module = dict( + argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['name']), + ('state', 'install', ['name']), + ('state', 'install_all', ['spec_metadata']), + ('state', 'absent', ['name']), + ('state', 'uninstall', ['name']), + ('state', 'upgrade', ['name']), + ('state', 'reinstall', ['name']), + ('state', 'latest', ['name']), + ('state', 'inject', ['name', 'inject_packages']), + ('state', 'pin', ['name']), + ('state', 'unpin', ['name']), + ], + required_by=dict( + suffix="name", + ), + supports_check_mode=True, + ) + + def _retrieve_installed(self): + output_process = make_process_dict(include_injected=True) + installed, dummy = self.runner('_list global', output_process=output_process).run() + + if self.app_name is None: + return installed + + return {k: v for k, v in installed.items() if k == self.app_name} + + def __init_module__(self): + if self.vars.executable: + self.command = [self.vars.executable] + else: + facts = ansible_facts(self.module, gather_subset=['python']) + self.command = [facts['python']['executable'], '-m', 'pipx'] + self.runner = pipx_runner(self.module, self.command) + + pkg_req = PackageRequirement(self.module, self.vars.name) + self.parsed_name = pkg_req.parsed_name + self.parsed_req = pkg_req.requirement + self.app_name = _make_name(self.parsed_name, self.vars.suffix) + + self.vars.set('application', self._retrieve_installed(), change=True, diff=True) + + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + + if LooseVersion(self.vars.version) < LooseVersion("1.7.0"): + self.do_raise("The pipx tool must be at least at version 1.7.0") + + def __quit_module__(self): + self.vars.application = self._retrieve_installed() + + def _capture_results(self, ctx): + self.vars.stdout = ctx.results_out + self.vars.stderr = ctx.results_err + self.vars.cmd = ctx.cmd + self.vars.set('run_info', ctx.run_info, verbosity=4) + + def state_install(self): + # If we have a version spec and no source, use the version spec as source + if self.parsed_req and not self.vars.source: + self.vars.source = self.vars.name + + if self.vars.application.get(self.app_name): + is_installed = True + version_match = self.vars.application[self.app_name]['version'] in self.parsed_req.specifier if self.parsed_req else True + force = self.vars.force or (not version_match) + else: + is_installed = False + version_match = False + force = self.vars.force + + if is_installed and version_match and not force: + return + + self.changed = True + args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source' + with self.runner(args_order, check_mode_skip=True) as ctx: + ctx.run(name_source=[self.parsed_name, self.vars.source], force=force) + self._capture_results(ctx) + + state_present = state_install + + def state_install_all(self): + self.changed = True + with self.runner('state global index_url force python system_site_packages editable pip_args spec_metadata', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_upgrade(self): + name = _make_name(self.vars.name, self.vars.suffix) + if not self.vars.application: + self.do_raise("Trying to upgrade a non-existent application: {0}".format(name)) + if self.vars.force: + self.changed = True + + with self.runner('state global include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: + ctx.run(name=name) + self._capture_results(ctx) + + def state_uninstall(self): + if self.vars.application: + name = _make_name(self.vars.name, self.vars.suffix) + with self.runner('state global name', check_mode_skip=True) as ctx: + ctx.run(name=name) + self._capture_results(ctx) + + state_absent = state_uninstall + + def state_reinstall(self): + name = _make_name(self.vars.name, self.vars.suffix) + if not self.vars.application: + self.do_raise("Trying to reinstall a non-existent application: {0}".format(name)) + self.changed = True + with self.runner('state global name python', check_mode_skip=True) as ctx: + ctx.run(name=name) + self._capture_results(ctx) + + def state_inject(self): + name = _make_name(self.vars.name, self.vars.suffix) + if not self.vars.application: + self.do_raise("Trying to inject packages into a non-existent application: {0}".format(name)) + if self.vars.force: + self.changed = True + with self.runner('state global index_url install_apps install_deps force editable pip_args name inject_packages', check_mode_skip=True) as ctx: + ctx.run(name=name) + self._capture_results(ctx) + + def state_uninject(self): + name = _make_name(self.vars.name, self.vars.suffix) + if not self.vars.application: + self.do_raise("Trying to uninject packages into a non-existent application: {0}".format(name)) + with self.runner('state global name inject_packages', check_mode_skip=True) as ctx: + ctx.run(name=name) + self._capture_results(ctx) + + def state_uninstall_all(self): + with self.runner('state global', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_reinstall_all(self): + with self.runner('state global python', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_upgrade_all(self): + if self.vars.force: + self.changed = True + with self.runner('state global include_injected force', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_upgrade_shared(self): + with self.runner('state global pip_args', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_latest(self): + if not self.vars.application or self.vars.force: + self.changed = True + args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source' + with self.runner(args_order, check_mode_skip=True) as ctx: + ctx.run(state='install', name_source=[self.vars.name, self.vars.source]) + self._capture_results(ctx) + + with self.runner('state global include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: + ctx.run(state='upgrade') + self._capture_results(ctx) + + def state_pin(self): + with self.runner('state global name', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_unpin(self): + with self.runner('state global name', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + +def main(): + PipX.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py new file mode 100644 index 0000000000..85d094c837 --- /dev/null +++ b/plugins/modules/pipx_info.py @@ -0,0 +1,197 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pipx_info +short_description: Rretrieves information about applications installed with pipx +version_added: 5.6.0 +description: + - Retrieve details about Python applications installed in isolated virtualenvs using pipx. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + - community.general.pipx +options: + name: + description: + - Name of an application installed with C(pipx). + type: str + include_deps: + description: + - Include dependent packages in the output. + type: bool + default: false + include_injected: + description: + - Include injected packages in the output. + type: bool + default: false + include_raw: + description: + - Returns the raw output of C(pipx list --json). + - The raw output is not affected by O(include_deps) or O(include_injected). + type: bool + default: false + global: + version_added: 9.3.0 +author: + - "Alexei Znamensky (@russoz)" +""" + +EXAMPLES = r""" +- name: retrieve all installed applications + community.general.pipx_info: {} + +- name: retrieve all installed applications, include dependencies and injected packages + community.general.pipx_info: + include_deps: true + include_injected: true + +- name: retrieve application tox + community.general.pipx_info: + name: tox + include_deps: true + +- name: retrieve application ansible-lint, include dependencies + community.general.pipx_info: + name: ansible-lint + include_deps: true +""" + +RETURN = r""" +application: + description: The list of installed applications. + returned: success + type: list + elements: dict + contains: + name: + description: The name of the installed application. + returned: success + type: str + sample: "tox" + version: + description: The version of the installed application. + returned: success + type: str + sample: "3.24.0" + dependencies: + description: The dependencies of the installed application, when O(include_deps=true). + returned: success + type: list + elements: str + sample: ["virtualenv"] + injected: + description: The injected packages for the installed application, when O(include_injected=true). + returned: success + type: dict + sample: + licenses: "0.6.1" + pinned: + description: + - Whether the installed application is pinned or not. + - When using C(pipx<=1.6.0), this returns C(null). + returned: success + type: bool + sample: + pinned: true + version_added: 10.0.0 + +raw_output: + description: The raw output of the C(pipx list) command, when O(include_raw=true). Used for debugging. + returned: success + type: dict + +cmd: + description: Command executed to obtain the list of installed applications. + returned: success + type: list + elements: str + sample: + [ + "/usr/bin/python3.10", + "-m", + "pipx", + "list", + "--include-injected", + "--json" + ] + +version: + description: Version of pipx. + type: str + returned: always + sample: "1.7.1" + version_added: 10.1.0 +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper +from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_dict +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +from ansible.module_utils.facts.compat import ansible_facts + + +class PipXInfo(ModuleHelper): + output_params = ['name'] + argument_spec = dict( + name=dict(type='str'), + include_deps=dict(type='bool', default=False), + include_injected=dict(type='bool', default=False), + include_raw=dict(type='bool', default=False), + ) + argument_spec.update(pipx_common_argspec) + module = dict( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + def __init_module__(self): + if self.vars.executable: + self.command = [self.vars.executable] + else: + facts = ansible_facts(self.module, gather_subset=['python']) + self.command = [facts['python']['executable'], '-m', 'pipx'] + self.runner = pipx_runner(self.module, self.command) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + + if LooseVersion(self.vars.version) < LooseVersion("1.7.0"): + self.do_raise("The pipx tool must be at least at version 1.7.0") + + def __run__(self): + output_process = make_process_dict(self.vars.include_injected, self.vars.include_deps) + with self.runner('_list global', output_process=output_process) as ctx: + applications, raw_data = ctx.run() + if self.vars.include_raw: + self.vars.raw_output = raw_data + + if self.vars.name: + self.vars.application = [ + v + for k, v in applications.items() + if k == self.vars.name + ] + else: + self.vars.application = list(applications.values()) + self._capture_results(ctx) + + def _capture_results(self, ctx): + self.vars.cmd = ctx.cmd + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + + +def main(): + PipXInfo.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pkg5.py b/plugins/modules/pkg5.py deleted file mode 120000 index 18493d3a3e..0000000000 --- a/plugins/modules/pkg5.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/pkg5.py \ No newline at end of file diff --git a/plugins/modules/pkg5.py b/plugins/modules/pkg5.py new file mode 100644 index 0000000000..1055d9090f --- /dev/null +++ b/plugins/modules/pkg5.py @@ -0,0 +1,196 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Peter Oliver +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pkg5 +author: + - Peter Oliver (@mavit) +short_description: Manages packages with the Solaris 11 Image Packaging System +description: + - IPS packages are the native packages in Solaris 11 and higher. +notes: + - The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - An FRMI of the package(s) to be installed/removed/updated. + - Multiple packages may be specified, separated by V(,). + required: true + type: list + elements: str + state: + description: + - Whether to install (V(present), V(latest)), or remove (V(absent)) a package. + choices: [absent, latest, present, installed, removed, uninstalled] + default: present + type: str + accept_licenses: + description: + - Accept any licences. + type: bool + default: false + aliases: [accept, accept_licences] + be_name: + description: + - Creates a new boot environment with the given name. + type: str + refresh: + description: + - Refresh publishers before execution. + type: bool + default: true + verbose: + description: + - Set to V(true) to disable quiet execution. + type: bool + default: false + version_added: 9.0.0 +""" +EXAMPLES = r""" +- name: Install Vim + community.general.pkg5: + name: editor/vim + +- name: Install Vim without refreshing publishers + community.general.pkg5: + name: editor/vim + refresh: false + +- name: Remove finger daemon + community.general.pkg5: + name: service/network/finger + state: absent + +- name: Install several packages at once + community.general.pkg5: + name: + - /file/gnu-findutils + - /text/gnu-grep +""" + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'uninstalled']), + accept_licenses=dict(type='bool', default=False, aliases=['accept', 'accept_licences']), + be_name=dict(type='str'), + refresh=dict(type='bool', default=True), + verbose=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + params = module.params + packages = [] + + # pkg(5) FRMIs include a comma before the release number, but + # AnsibleModule will have split this into multiple items for us. + # Try to spot where this has happened and fix it. + for fragment in params['name']: + if re.search(r'^\d+(?:\.\d+)*', fragment) and packages and re.search(r'@[^,]*$', packages[-1]): + packages[-1] += ',' + fragment + else: + packages.append(fragment) + + if params['state'] in ['present', 'installed']: + ensure(module, 'present', packages, params) + elif params['state'] in ['latest']: + ensure(module, 'latest', packages, params) + elif params['state'] in ['absent', 'uninstalled', 'removed']: + ensure(module, 'absent', packages, params) + + +def ensure(module, state, packages, params): + response = { + 'results': [], + 'msg': '', + } + behaviour = { + 'present': { + 'filter': lambda p: not is_installed(module, p), + 'subcommand': 'install', + }, + 'latest': { + 'filter': lambda p: ( + not is_installed(module, p) or not is_latest(module, p) + ), + 'subcommand': 'install', + }, + 'absent': { + 'filter': lambda p: is_installed(module, p), + 'subcommand': 'uninstall', + }, + } + + if module.check_mode: + dry_run = ['-n'] + else: + dry_run = [] + + if params['accept_licenses']: + accept_licenses = ['--accept'] + else: + accept_licenses = [] + + if params['be_name']: + beadm = ['--be-name=' + module.params['be_name']] + else: + beadm = [] + + if params['refresh']: + no_refresh = [] + else: + no_refresh = ['--no-refresh'] + + if params['verbose']: + verbosity = [] + else: + verbosity = ['-q'] + + to_modify = list(filter(behaviour[state]['filter'], packages)) + if to_modify: + rc, out, err = module.run_command( + ['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + verbosity + ['--'] + to_modify) + response['rc'] = rc + response['results'].append(out) + response['msg'] += err + response['changed'] = True + if rc == 4: + response['changed'] = False + response['failed'] = False + elif rc != 0: + module.fail_json(**response) + + module.exit_json(**response) + + +def is_installed(module, package): + rc, out, err = module.run_command(['pkg', 'list', '--', package]) + return not bool(int(rc)) + + +def is_latest(module, package): + rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package]) + return bool(int(rc)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pkg5_publisher.py b/plugins/modules/pkg5_publisher.py deleted file mode 120000 index 15cd6aadf9..0000000000 --- a/plugins/modules/pkg5_publisher.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/pkg5_publisher.py \ No newline at end of file diff --git a/plugins/modules/pkg5_publisher.py b/plugins/modules/pkg5_publisher.py new file mode 100644 index 0000000000..8ff9463c6b --- /dev/null +++ b/plugins/modules/pkg5_publisher.py @@ -0,0 +1,203 @@ +#!/usr/bin/python + +# Copyright 2014 Peter Oliver +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pkg5_publisher +author: "Peter Oliver (@mavit)" +short_description: Manages Solaris 11 Image Packaging System publishers +description: + - IPS packages are the native packages in Solaris 11 and higher. + - This module configures which publishers a client downloads IPS packages from. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - The publisher's name. + required: true + aliases: [publisher] + type: str + state: + description: + - Whether to ensure that a publisher is present or absent. + default: present + choices: [present, absent] + type: str + sticky: + description: + - Packages installed from a sticky repository can only receive updates from that repository. + type: bool + enabled: + description: + - Is the repository enabled or disabled? + type: bool + origin: + description: + - A path or URL to the repository. + - Multiple values may be provided. + type: list + elements: str + mirror: + description: + - A path or URL to the repository mirror. + - Multiple values may be provided. + type: list + elements: str +""" +EXAMPLES = r""" +- name: Fetch packages for the solaris publisher direct from Oracle + community.general.pkg5_publisher: + name: solaris + sticky: true + origin: https://pkg.oracle.com/solaris/support/ + +- name: Configure a publisher for locally-produced packages + community.general.pkg5_publisher: + name: site + origin: 'https://pkg.example.com/site/' +""" + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['publisher']), + state=dict(default='present', choices=['present', 'absent']), + sticky=dict(type='bool'), + enabled=dict(type='bool'), + # search_after=dict(), + # search_before=dict(), + origin=dict(type='list', elements='str'), + mirror=dict(type='list', elements='str'), + ) + ) + + for option in ['origin', 'mirror']: + if module.params[option] == ['']: + module.params[option] = [] + + if module.params['state'] == 'present': + modify_publisher(module, module.params) + else: + unset_publisher(module, module.params['name']) + + +def modify_publisher(module, params): + name = params['name'] + existing = get_publishers(module) + + if name in existing: + for option in ['origin', 'mirror', 'sticky', 'enabled']: + if params[option] is not None: + if params[option] != existing[name][option]: + return set_publisher(module, params) + else: + return set_publisher(module, params) + + module.exit_json() + + +def set_publisher(module, params): + name = params['name'] + args = [] + + if params['origin'] is not None: + args.append('--remove-origin=*') + args.extend(['--add-origin=' + u for u in params['origin']]) + if params['mirror'] is not None: + args.append('--remove-mirror=*') + args.extend(['--add-mirror=' + u for u in params['mirror']]) + + if params['sticky'] is not None and params['sticky']: + args.append('--sticky') + elif params['sticky'] is not None: + args.append('--non-sticky') + + if params['enabled'] is not None and params['enabled']: + args.append('--enable') + elif params['enabled'] is not None: + args.append('--disable') + + rc, out, err = module.run_command( + ["pkg", "set-publisher"] + args + [name], + check_rc=True + ) + response = { + 'rc': rc, + 'results': [out], + 'msg': err, + 'changed': True, + } + if rc != 0: + module.fail_json(**response) + module.exit_json(**response) + + +def unset_publisher(module, publisher): + if publisher not in get_publishers(module): + module.exit_json() + + rc, out, err = module.run_command( + ["pkg", "unset-publisher", publisher], + check_rc=True + ) + response = { + 'rc': rc, + 'results': [out], + 'msg': err, + 'changed': True, + } + if rc != 0: + module.fail_json(**response) + module.exit_json(**response) + + +def get_publishers(module): + rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True) + + lines = out.splitlines() + keys = lines.pop(0).lower().split("\t") + + publishers = {} + for line in lines: + values = dict(zip(keys, map(unstringify, line.split("\t")))) + name = values['publisher'] + + if name not in publishers: + publishers[name] = {k: values[k] for k in ['sticky', 'enabled']} + publishers[name]['origin'] = [] + publishers[name]['mirror'] = [] + + if values['type'] is not None: + publishers[name][values['type']].append(values['uri']) + + return publishers + + +def unstringify(val): + if val == "-" or val == '': + return None + elif val == "true": + return True + elif val == "false": + return False + else: + return val + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pkgin.py b/plugins/modules/pkgin.py deleted file mode 120000 index 75630aced2..0000000000 --- a/plugins/modules/pkgin.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/pkgin.py \ No newline at end of file diff --git a/plugins/modules/pkgin.py b/plugins/modules/pkgin.py new file mode 100644 index 0000000000..e350f977ef --- /dev/null +++ b/plugins/modules/pkgin.py @@ -0,0 +1,395 @@ +#!/usr/bin/python + +# Copyright (c) 2013 Shaun Zinck +# Copyright (c) 2015 Lawrence Leonard Gilbert +# Copyright (c) 2016 Jasper Lievisse Adriaanse +# +# Written by Shaun Zinck +# Based on pacman module written by Afterburn +# that was based on apt module written by Matthew Williams +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pkgin +short_description: Package manager for SmartOS, NetBSD, et al +description: + - 'The standard package manager for SmartOS, but also usable on NetBSD or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/)).' +author: + - "Larry Gilbert (@L2G)" + - "Shaun Zinck (@szinck)" + - "Jasper Lievisse Adriaanse (@jasperla)" +notes: + - 'Known bug with pkgin < 0.8.0: if a package is removed and another package depends on it, the other package is silently + removed as well.' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of package to install/remove; + - Multiple names may be given, separated by commas. + aliases: [pkg] + type: list + elements: str + state: + description: + - Intended state of the package. + choices: ['present', 'absent'] + default: present + type: str + update_cache: + description: + - Update repository database. Can be run with other steps or on its own. + type: bool + default: false + upgrade: + description: + - Upgrade main packages to their newer versions. + type: bool + default: false + full_upgrade: + description: + - Upgrade all packages to their newer versions. + type: bool + default: false + clean: + description: + - Clean packages cache. + type: bool + default: false + force: + description: + - Force package reinstall. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Install package foo + community.general.pkgin: + name: foo + state: present + +- name: Install specific version of foo package + community.general.pkgin: + name: foo-2.0.1 + state: present + +- name: Update cache and install foo package + community.general.pkgin: + name: foo + update_cache: true + +- name: Remove package foo + community.general.pkgin: + name: foo + state: absent + +- name: Remove packages foo and bar + community.general.pkgin: + name: foo,bar + state: absent + +- name: Update repositories as a separate step + community.general.pkgin: + update_cache: true + +- name: Upgrade main packages (equivalent to pkgin upgrade) + community.general.pkgin: + upgrade: true + +- name: Upgrade all packages (equivalent to pkgin full-upgrade) + community.general.pkgin: + full_upgrade: true + +- name: Force-upgrade all packages (equivalent to pkgin -F full-upgrade) + community.general.pkgin: + full_upgrade: true + force: true + +- name: Clean packages cache (equivalent to pkgin clean) + community.general.pkgin: + clean: true +""" + + +import re + +from ansible.module_utils.basic import AnsibleModule + + +class PackageState(object): + PRESENT = 1 + NOT_INSTALLED = 2 + OUTDATED = 4 + NOT_FOUND = 8 + + +def query_package(module, name): + """Search for the package by name and return state of the package. + """ + + # test whether '-p' (parsable) flag is supported. + rc, out, err = module.run_command([PKGIN_PATH, "-p", "-v"]) + + if rc == 0: + pflag = ['-p'] + splitchar = ';' + else: + pflag = [] + splitchar = ' ' + + # Use "pkgin search" to find the package. The regular expression will + # only match on the complete name. + rc, out, err = module.run_command([PKGIN_PATH] + pflag + ["search", "^%s$" % name]) + + # rc will not be 0 unless the search was a success + if rc == 0: + + # Search results may contain more than one line (e.g., 'emacs'), so iterate + # through each line to see if we have a match. + packages = out.split('\n') + + for package in packages: + + # Break up line at spaces. The first part will be the package with its + # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state + # of the package: + # '' - not installed + # '<' - installed but out of date + # '=' - installed and up to date + # '>' - installed but newer than the repository version + + if (package in ('reading local summary...', + 'processing local summary...', + 'downloading pkg_summary.xz done.')) or \ + (package.startswith('processing remote summary (')): + continue + + pkgname_with_version, raw_state = package.split(splitchar)[0:2] + + # Search for package, stripping version + # (results in sth like 'gcc47-libs' or 'emacs24-nox11') + pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M) + + # Do not proceed unless we have a match + if not pkg_search_obj: + continue + + # Grab matched string + pkgname_without_version = pkg_search_obj.group(1) + + if name not in (pkgname_with_version, pkgname_without_version): + continue + + # The package was found; now return its state + if raw_state == '<': + return PackageState.OUTDATED + elif raw_state == '=' or raw_state == '>': + return PackageState.PRESENT + else: + # Package found but not installed + return PackageState.NOT_INSTALLED + # no fall-through + + # No packages were matched + return PackageState.NOT_FOUND + + # Search failed + return PackageState.NOT_FOUND + + +def format_action_message(module, action, count): + vars = {"actioned": action, + "count": count} + + if module.check_mode: + message = "would have %(actioned)s %(count)d package" % vars + else: + message = "%(actioned)s %(count)d package" % vars + + if count == 1: + return message + else: + return message + "s" + + +def format_pkgin_command(module, command, package=None): + # Not all commands take a package argument, so cover this up by passing + # an empty string. Some commands (e.g. 'update') will ignore extra + # arguments, however this behaviour cannot be relied on for others. + if package is None: + packages = [] + else: + packages = [package] + + if module.params["force"]: + force = ["-F"] + else: + force = [] + + if module.check_mode: + return [PKGIN_PATH, "-n", command] + packages + else: + return [PKGIN_PATH, "-y"] + force + [command] + packages + + +def remove_packages(module, packages): + + remove_c = 0 + + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if query_package(module, package) in [PackageState.NOT_INSTALLED, PackageState.NOT_FOUND]: + continue + + rc, out, err = module.run_command( + format_pkgin_command(module, "remove", package)) + + if not module.check_mode and query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]: + module.fail_json(msg="failed to remove %s: %s" % (package, out), stdout=out, stderr=err) + + remove_c += 1 + + if remove_c > 0: + module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c)) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, packages): + + install_c = 0 + + for package in packages: + query_result = query_package(module, package) + if query_result in [PackageState.PRESENT, PackageState.OUTDATED]: + continue + elif query_result is PackageState.NOT_FOUND: + module.fail_json(msg="failed to find package %s for installation" % package) + + rc, out, err = module.run_command( + format_pkgin_command(module, "install", package)) + + if not module.check_mode and not query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]: + module.fail_json(msg="failed to install %s: %s" % (package, out), stdout=out, stderr=err) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c), stdout=out, stderr=err) + + module.exit_json(changed=False, msg="package(s) already present") + + +def update_package_db(module): + rc, out, err = module.run_command( + format_pkgin_command(module, "update")) + + if rc == 0: + if re.search('database for.*is up-to-date\n$', out): + return False, "database is up-to-date" + else: + return True, "updated repository database" + else: + module.fail_json(msg="could not update package db", stdout=out, stderr=err) + + +def do_upgrade_packages(module, full=False): + if full: + cmd = "full-upgrade" + else: + cmd = "upgrade" + + rc, out, err = module.run_command( + format_pkgin_command(module, cmd)) + + if rc == 0: + if re.search('^(.*\n|)nothing to do.\n$', out): + module.exit_json(changed=False, msg="nothing left to upgrade") + else: + module.fail_json(msg="could not %s packages" % cmd, stdout=out, stderr=err) + + +def upgrade_packages(module): + do_upgrade_packages(module) + + +def full_upgrade_packages(module): + do_upgrade_packages(module, True) + + +def clean_cache(module): + rc, out, err = module.run_command( + format_pkgin_command(module, "clean")) + + if rc == 0: + # There's no indication if 'clean' actually removed anything, + # so assume it did. + module.exit_json(changed=True, msg="cleaned caches") + else: + module.fail_json(msg="could not clean package cache", stdout=out, stderr=err) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "absent"]), + name=dict(aliases=["pkg"], type='list', elements='str'), + update_cache=dict(default=False, type='bool'), + upgrade=dict(default=False, type='bool'), + full_upgrade=dict(default=False, type='bool'), + clean=dict(default=False, type='bool'), + force=dict(default=False, type='bool')), + required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']], + supports_check_mode=True) + + global PKGIN_PATH + PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin']) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + p = module.params + + if p["update_cache"]: + c, msg = update_package_db(module) + if not (p['name'] or p["upgrade"] or p["full_upgrade"]): + module.exit_json(changed=c, msg=msg) + + if p["upgrade"]: + upgrade_packages(module) + if not p['name']: + module.exit_json(changed=True, msg='upgraded packages') + + if p["full_upgrade"]: + full_upgrade_packages(module) + if not p['name']: + module.exit_json(changed=True, msg='upgraded all packages') + + if p["clean"]: + clean_cache(module) + if not p['name']: + module.exit_json(changed=True, msg='cleaned caches') + + pkgs = p["name"] + + if p["state"] == "present": + install_packages(module, pkgs) + + elif p["state"] == "absent": + remove_packages(module, pkgs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pkgng.py b/plugins/modules/pkgng.py deleted file mode 120000 index b40904a228..0000000000 --- a/plugins/modules/pkgng.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/pkgng.py \ No newline at end of file diff --git a/plugins/modules/pkgng.py b/plugins/modules/pkgng.py new file mode 100644 index 0000000000..fe559940a7 --- /dev/null +++ b/plugins/modules/pkgng.py @@ -0,0 +1,542 @@ +#!/usr/bin/python + +# Copyright (c) 2013, bleader +# Written by bleader +# Based on pkgin module written by Shaun Zinck +# that was based on pacman module written by Afterburn +# that was based on apt module written by Matthew Williams +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pkgng +short_description: Package manager for FreeBSD >= 9.0 +description: + - Manage binary packages for FreeBSD using C(pkgng) which is available in versions after 9.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name or list of names of packages to install/remove. + - With O(name=*), O(state=latest) operates, but O(state=present) and O(state=absent) are noops. + required: true + aliases: [pkg] + type: list + elements: str + state: + description: + - State of the package. + choices: ['present', 'latest', 'absent'] + required: false + default: present + type: str + cached: + description: + - Use local package base instead of fetching an updated one. + type: bool + required: false + default: false + annotation: + description: + - A list of keyvalue-pairs of the form C(<+/-/:>[=]). A V(+) denotes adding an annotation, a V(-) denotes + removing an annotation, and V(:) denotes modifying an annotation. If setting or modifying annotations, a value must + be provided. + required: false + type: list + elements: str + pkgsite: + description: + - For C(pkgng) versions before 1.1.4, specify C(packagesite) to use for downloading packages. If not specified, use + settings from C(/usr/local/etc/pkg.conf). + - For newer C(pkgng) versions, specify a the name of a repository configured in C(/usr/local/etc/pkg/repos). + required: false + type: str + rootdir: + description: + - For C(pkgng) versions 1.5 and later, pkg installs all packages within the specified root directory. + - Can not be used together with O(chroot) or O(jail) options. + required: false + type: path + chroot: + description: + - Pkg chroots in the specified environment. + - Can not be used together with O(rootdir) or O(jail) options. + required: false + type: path + jail: + description: + - Pkg executes in the given jail name or ID. + - Can not be used together with O(chroot) or O(rootdir) options. + type: str + autoremove: + description: + - Remove automatically installed packages which are no longer needed. + required: false + type: bool + default: false + ignore_osver: + description: + - Ignore FreeBSD OS version check, useful on C(-STABLE) and C(-CURRENT) branches. + - Defines the E(IGNORE_OSVERSION) environment variable. + required: false + type: bool + default: false + version_added: 1.3.0 + use_globs: + description: + - Treat the package names as shell glob patterns. + required: false + type: bool + default: true + version_added: 9.3.0 +author: "bleader (@bleader)" +notes: + - When using pkgsite, be careful that already in cache packages are not downloaded again. + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. +""" + +EXAMPLES = r""" +- name: Install package foo + community.general.pkgng: + name: foo + state: present + +- name: Annotate package foo and bar + community.general.pkgng: + name: + - foo + - bar + annotation: '+test1=baz,-test2,:test3=foobar' + +- name: Remove packages foo and bar + community.general.pkgng: + name: + - foo + - bar + state: absent + +- name: Upgrade package baz + community.general.pkgng: + name: baz + state: latest + +- name: Upgrade all installed packages (see warning for the name option first!) + community.general.pkgng: + name: "*" + state: latest + +- name: Upgrade foo/bar + community.general.pkgng: + name: foo/bar + state: latest + use_globs: false +""" + + +from collections import defaultdict +import re +from ansible.module_utils.basic import AnsibleModule + + +def query_package(module, run_pkgng, name): + + rc, out, err = run_pkgng('info', '-e', name) + + return rc == 0 + + +def query_update(module, run_pkgng, name): + + # Check to see if a package upgrade is available. + # rc = 0, no updates available or package not installed + # rc = 1, updates available + rc, out, err = run_pkgng('upgrade', '-n', name) + + return rc == 1 + + +def pkgng_older_than(module, pkgng_path, compare_version): + + rc, out, err = module.run_command([pkgng_path, '-v']) + version = [int(x) for x in re.split(r'[\._]', out)] + + i = 0 + new_pkgng = True + while compare_version[i] == version[i]: + i += 1 + if i == min(len(compare_version), len(version)): + break + else: + if compare_version[i] > version[i]: + new_pkgng = False + return not new_pkgng + + +def upgrade_packages(module, run_pkgng): + # Run a 'pkg upgrade', updating all packages. + upgraded_c = 0 + + pkgng_args = ['upgrade'] + pkgng_args.append('-n' if module.check_mode else '-y') + rc, out, err = run_pkgng(*pkgng_args, check_rc=(not module.check_mode)) + + matches = re.findall('^Number of packages to be (?:upgraded|reinstalled): ([0-9]+)', out, re.MULTILINE) + for match in matches: + upgraded_c += int(match) + + if upgraded_c > 0: + return (True, "updated %s package(s)" % upgraded_c, out, err) + return (False, "no packages need upgrades", out, err) + + +def remove_packages(module, run_pkgng, packages): + remove_c = 0 + stdout = "" + stderr = "" + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, run_pkgng, package): + continue + + if not module.check_mode: + rc, out, err = run_pkgng('delete', '-y', package) + stdout += out + stderr += err + + if not module.check_mode and query_package(module, run_pkgng, package): + module.fail_json(msg="failed to remove %s: %s" % (package, out), stdout=stdout, stderr=stderr) + + remove_c += 1 + + if remove_c > 0: + return (True, "removed %s package(s)" % remove_c, stdout, stderr) + + return (False, "package(s) already absent", stdout, stderr) + + +def install_packages(module, run_pkgng, packages, cached, state): + action_queue = defaultdict(list) + action_count = defaultdict(int) + stdout = "" + stderr = "" + + if not module.check_mode and not cached: + rc, out, err = run_pkgng('update') + stdout += out + stderr += err + if rc != 0: + module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err), stdout=stdout, stderr=stderr) + + for package in packages: + already_installed = query_package(module, run_pkgng, package) + if already_installed and state == "present": + continue + + if ( + already_installed and state == "latest" + and not query_update(module, run_pkgng, package) + ): + continue + + if already_installed: + action_queue["upgrade"].append(package) + else: + action_queue["install"].append(package) + + # install/upgrade all named packages with one pkg command + for (action, package_list) in action_queue.items(): + if module.check_mode: + # Do nothing, but count up how many actions + # would be performed so that the changed/msg + # is correct. + action_count[action] += len(package_list) + continue + + pkgng_args = [action, '-U', '-y'] + package_list + rc, out, err = run_pkgng(*pkgng_args) + stdout += out + stderr += err + + # individually verify packages are in requested state + for package in package_list: + verified = False + if action == 'install': + verified = query_package(module, run_pkgng, package) + elif action == 'upgrade': + verified = not query_update(module, run_pkgng, package) + + if verified: + action_count[action] += 1 + else: + module.fail_json(msg="failed to %s %s" % (action, package), stdout=stdout, stderr=stderr) + + if sum(action_count.values()) > 0: + past_tense = {'install': 'installed', 'upgrade': 'upgraded'} + messages = [] + for (action, count) in action_count.items(): + messages.append("%s %s package%s" % (past_tense.get(action, action), count, "s" if count != 1 else "")) + + return (True, '; '.join(messages), stdout, stderr) + + return (False, "package(s) already %s" % (state), stdout, stderr) + + +def annotation_query(module, run_pkgng, package, tag): + rc, out, err = run_pkgng('info', '-A', package) + match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE) + if match: + return match.group('value') + return False + + +def annotation_add(module, run_pkgng, package, tag, value): + _value = annotation_query(module, run_pkgng, package, tag) + if not _value: + # Annotation does not exist, add it. + if not module.check_mode: + rc, out, err = run_pkgng('annotate', '-y', '-A', package, tag, data=value, binary_data=True) + if rc != 0: + module.fail_json(msg="could not annotate %s: %s" + % (package, out), stderr=err) + return True + elif _value != value: + # Annotation exists, but value differs + module.fail_json( + msg="failed to annotate %s, because %s is already set to %s, but should be set to %s" + % (package, tag, _value, value)) + return False + else: + # Annotation exists, nothing to do + return False + + +def annotation_delete(module, run_pkgng, package, tag, value): + _value = annotation_query(module, run_pkgng, package, tag) + if _value: + if not module.check_mode: + rc, out, err = run_pkgng('annotate', '-y', '-D', package, tag) + if rc != 0: + module.fail_json(msg="could not delete annotation to %s: %s" + % (package, out), stderr=err) + return True + return False + + +def annotation_modify(module, run_pkgng, package, tag, value): + _value = annotation_query(module, run_pkgng, package, tag) + if not _value: + # No such tag + module.fail_json(msg="could not change annotation to %s: tag %s does not exist" + % (package, tag)) + elif _value == value: + # No change in value + return False + else: + if not module.check_mode: + rc, out, err = run_pkgng('annotate', '-y', '-M', package, tag, data=value, binary_data=True) + + # pkg sometimes exits with rc == 1, even though the modification succeeded + # Check the output for a success message + if ( + rc != 0 + and re.search(r'^%s-[^:]+: Modified annotation tagged: %s' % (package, tag), out, flags=re.MULTILINE) is None + ): + module.fail_json(msg="failed to annotate %s, could not change annotation %s to %s: %s" + % (package, tag, value, out), stderr=err) + return True + + +def annotate_packages(module, run_pkgng, packages, annotations): + annotate_c = 0 + if len(annotations) == 1: + # Split on commas with optional trailing whitespace, + # to support the old style of multiple annotations + # on a single line, rather than YAML list syntax + annotations = re.split(r'\s*,\s*', annotations[0]) + + operation = { + '+': annotation_add, + '-': annotation_delete, + ':': annotation_modify + } + + for package in packages: + for annotation_string in annotations: + # Note to future maintainers: A dash (-) in a regex character class ([-+:] below) + # must appear as the first character in the class, or it will be interpreted + # as a range of characters. + annotation = \ + re.match(r'(?P[-+:])(?P[^=]+)(=(?P.+))?', annotation_string) + + if annotation is None: + module.fail_json( + msg="failed to annotate %s, invalid annotate string: %s" + % (package, annotation_string) + ) + + annotation = annotation.groupdict() + if operation[annotation['operation']](module, run_pkgng, package, annotation['tag'], annotation['value']): + annotate_c += 1 + + if annotate_c > 0: + return (True, "added %s annotations." % annotate_c) + return (False, "changed no annotations") + + +def autoremove_packages(module, run_pkgng): + stdout = "" + stderr = "" + rc, out, err = run_pkgng('autoremove', '-n') + + autoremove_c = 0 + + match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE) + if match: + autoremove_c = int(match.group(1)) + + if autoremove_c == 0: + return (False, "no package(s) to autoremove", stdout, stderr) + + if not module.check_mode: + rc, out, err = run_pkgng('autoremove', '-y') + stdout += out + stderr += err + + return (True, "autoremoved %d package(s)" % (autoremove_c), stdout, stderr) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "latest", "absent"]), + name=dict(aliases=["pkg"], required=True, type='list', elements='str'), + cached=dict(default=False, type='bool'), + ignore_osver=dict(default=False, type='bool'), + annotation=dict(type='list', elements='str'), + pkgsite=dict(), + rootdir=dict(type='path'), + chroot=dict(type='path'), + jail=dict(type='str'), + autoremove=dict(default=False, type='bool'), + use_globs=dict(default=True, type='bool'), + ), + supports_check_mode=True, + mutually_exclusive=[["rootdir", "chroot", "jail"]]) + + pkgng_path = module.get_bin_path('pkg', True) + + p = module.params + + pkgs = p["name"] + + changed = False + msgs = [] + stdout = "" + stderr = "" + dir_arg = None + + if p["rootdir"] is not None: + rootdir_not_supported = pkgng_older_than(module, pkgng_path, [1, 5, 0]) + if rootdir_not_supported: + module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater") + else: + dir_arg = "--rootdir=%s" % (p["rootdir"]) + + if p["ignore_osver"]: + ignore_osver_not_supported = pkgng_older_than(module, pkgng_path, [1, 11, 0]) + if ignore_osver_not_supported: + module.fail_json(msg="To use option 'ignore_osver' pkg version must be 1.11 or greater") + + if p["chroot"] is not None: + dir_arg = '--chroot=%s' % (p["chroot"]) + + if p["jail"] is not None: + dir_arg = '--jail=%s' % (p["jail"]) + + # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions + # in /usr/local/etc/pkg/repos + repo_flag_not_supported = pkgng_older_than(module, pkgng_path, [1, 1, 4]) + + def run_pkgng(action, *args, **kwargs): + cmd = [pkgng_path, dir_arg, action] + + if p["use_globs"] and action in ('info', 'install', 'upgrade',): + args = ('-g',) + args + + pkgng_env = {'BATCH': 'yes'} + + if p["ignore_osver"]: + pkgng_env['IGNORE_OSVERSION'] = 'yes' + + if p['pkgsite'] is not None and action in ('update', 'install', 'upgrade',): + if repo_flag_not_supported: + pkgng_env['PACKAGESITE'] = p['pkgsite'] + else: + cmd.append('--repository=%s' % (p['pkgsite'],)) + + # If environ_update is specified to be "passed through" + # to module.run_command, then merge its values into pkgng_env + pkgng_env.update(kwargs.pop('environ_update', dict())) + + return module.run_command(cmd + list(args), environ_update=pkgng_env, **kwargs) + + if pkgs == ['*'] and p["state"] == 'latest': + # Operate on all installed packages. Only state: latest makes sense here. + _changed, _msg, _stdout, _stderr = upgrade_packages(module, run_pkgng) + changed = changed or _changed + stdout += _stdout + stderr += _stderr + msgs.append(_msg) + + # Operate on named packages + if len(pkgs) == 1: + # The documentation used to show multiple packages specified in one line + # with comma or space delimiters. That doesn't result in a YAML list, and + # wrong actions (install vs upgrade) can be reported if those + # comma- or space-delimited strings make it to the pkg command line. + pkgs = re.split(r'[,\s]', pkgs[0]) + named_packages = [pkg for pkg in pkgs if pkg != '*'] + if p["state"] in ("present", "latest") and named_packages: + _changed, _msg, _out, _err = install_packages(module, run_pkgng, named_packages, + p["cached"], p["state"]) + stdout += _out + stderr += _err + changed = changed or _changed + msgs.append(_msg) + + elif p["state"] == "absent" and named_packages: + _changed, _msg, _out, _err = remove_packages(module, run_pkgng, named_packages) + stdout += _out + stderr += _err + changed = changed or _changed + msgs.append(_msg) + + if p["autoremove"]: + _changed, _msg, _stdout, _stderr = autoremove_packages(module, run_pkgng) + changed = changed or _changed + stdout += _stdout + stderr += _stderr + msgs.append(_msg) + + if p["annotation"] is not None: + _changed, _msg = annotate_packages(module, run_pkgng, pkgs, p["annotation"]) + changed = changed or _changed + msgs.append(_msg) + + module.exit_json(changed=changed, msg=", ".join(msgs), stdout=stdout, stderr=stderr) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pkgutil.py b/plugins/modules/pkgutil.py deleted file mode 120000 index 79dbb6aa14..0000000000 --- a/plugins/modules/pkgutil.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/pkgutil.py \ No newline at end of file diff --git a/plugins/modules/pkgutil.py b/plugins/modules/pkgutil.py new file mode 100644 index 0000000000..3d4616bbcb --- /dev/null +++ b/plugins/modules/pkgutil.py @@ -0,0 +1,299 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Alexander Winkler +# based on svr4pkg by +# Boyd Adamson (2012) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pkgutil +short_description: OpenCSW package management on Solaris +description: + - This module installs, updates and removes packages from the OpenCSW project for Solaris. + - Unlike the M(community.general.svr4pkg) module, it resolves and downloads dependencies. + - See U(https://www.opencsw.org/) for more information about the project. +author: + - Alexander Winkler (@dermute) + - David Ponessa (@scathatheworm) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + details: + - In order to check the availability of packages, the catalog cache under C(/var/opt/csw/pkgutil) may be refreshed even + in check mode. + diff_mode: + support: none +options: + name: + description: + - The name of the package. + - When using O(state=latest), this can be V('*'), which updates all installed packages managed by pkgutil. + type: list + required: true + elements: str + aliases: [pkg] + site: + description: + - The repository path to install the package from. + - Its global definition is in C(/etc/opt/csw/pkgutil.conf). + required: false + type: str + state: + description: + - Whether to install (V(present)/V(installed)), or remove (V(absent)/V(removed)) packages. + - The upgrade (V(latest)) operation updates/installs the packages to the latest version available. + type: str + required: true + choices: [absent, installed, latest, present, removed] + update_catalog: + description: + - If you always want to refresh your catalog from the mirror, even when it is not stale, set this to V(true). + type: bool + default: false + force: + description: + - To allow the update process to downgrade packages to match what is present in the repository, set this to V(true). + - This is useful for rolling back to stable from testing, or similar operations. + type: bool + default: false + version_added: 1.2.0 +""" + +EXAMPLES = r""" +- name: Install a package + community.general.pkgutil: + name: CSWcommon + state: present + +- name: Install a package from a specific repository + community.general.pkgutil: + name: CSWnrpe + site: ftp://myinternal.repo/opencsw/kiel + state: latest + +- name: Remove a package + community.general.pkgutil: + name: CSWtop + state: absent + +- name: Install several packages + community.general.pkgutil: + name: + - CSWsudo + - CSWtop + state: present + +- name: Update all packages + community.general.pkgutil: + name: '*' + state: latest + +- name: Update all packages and force versions to match latest in catalog + community.general.pkgutil: + name: '*' + state: latest + force: true +""" + +RETURN = r""" # """ + +from ansible.module_utils.basic import AnsibleModule + + +def packages_not_installed(module, names): + ''' Check if each package is installed and return list of the ones absent ''' + pkgs = [] + for pkg in names: + rc, out, err = run_command(module, ['pkginfo', '-q', pkg]) + if rc != 0: + pkgs.append(pkg) + return pkgs + + +def packages_installed(module, names): + ''' Check if each package is installed and return list of the ones present ''' + pkgs = [] + for pkg in names: + if not pkg.startswith('CSW'): + continue + rc, out, err = run_command(module, ['pkginfo', '-q', pkg]) + if rc == 0: + pkgs.append(pkg) + return pkgs + + +def packages_not_latest(module, names, site, update_catalog): + ''' Check status of each package and return list of the ones with an upgrade available ''' + cmd = ['pkgutil'] + if update_catalog: + cmd.append('-U') + cmd.append('-c') + if site is not None: + cmd.extend(['-t', site]) + if names != ['*']: + cmd.extend(names) + rc, out, err = run_command(module, cmd) + + # Find packages in the catalog which are not up to date + packages = [] + for line in out.split('\n')[1:-1]: + if 'catalog' not in line and 'SAME' not in line: + packages.append(line.split(' ')[0]) + + # Remove duplicates + return list(set(packages)) + + +def run_command(module, cmd, **kwargs): + progname = cmd[0] + cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin']) + return module.run_command(cmd, **kwargs) + + +def package_install(module, state, pkgs, site, update_catalog, force): + cmd = ['pkgutil'] + if module.check_mode: + cmd.append('-n') + cmd.append('-iy') + if update_catalog: + cmd.append('-U') + if site is not None: + cmd.extend(['-t', site]) + if force: + cmd.append('-f') + cmd.extend(pkgs) + return run_command(module, cmd) + + +def package_upgrade(module, pkgs, site, update_catalog, force): + cmd = ['pkgutil'] + if module.check_mode: + cmd.append('-n') + cmd.append('-uy') + if update_catalog: + cmd.append('-U') + if site is not None: + cmd.extend(['-t', site]) + if force: + cmd.append('-f') + cmd += pkgs + return run_command(module, cmd) + + +def package_uninstall(module, pkgs): + cmd = ['pkgutil'] + if module.check_mode: + cmd.append('-n') + cmd.append('-ry') + cmd.extend(pkgs) + return run_command(module, cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', required=True, aliases=['pkg']), + state=dict(type='str', required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']), + site=dict(type='str'), + update_catalog=dict(type='bool', default=False), + force=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + name = module.params['name'] + state = module.params['state'] + site = module.params['site'] + update_catalog = module.params['update_catalog'] + force = module.params['force'] + + rc = None + out = '' + err = '' + result = dict( + name=name, + state=state, + ) + + if state in ['installed', 'present']: + # Fail with an explicit error when trying to "install" '*' + if name == ['*']: + module.fail_json(msg="Can not use 'state: present' with name: '*'") + + # Build list of packages that are actually not installed from the ones requested + pkgs = packages_not_installed(module, name) + + # If the package list is empty then all packages are already present + if pkgs == []: + module.exit_json(changed=False) + + (rc, out, err) = package_install(module, state, pkgs, site, update_catalog, force) + if rc != 0: + module.fail_json(msg=(err or out)) + + elif state in ['latest']: + # When using latest for * + if name == ['*']: + # Check for packages that are actually outdated + pkgs = packages_not_latest(module, name, site, update_catalog) + + # If the package list comes up empty, everything is already up to date + if pkgs == []: + module.exit_json(changed=False) + + # If there are packages to update, just empty the list and run the command without it + # pkgutil logic is to update all when run without packages names + pkgs = [] + (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force) + if rc != 0: + module.fail_json(msg=(err or out)) + else: + # Build list of packages that are either outdated or not installed + pkgs = packages_not_installed(module, name) + pkgs += packages_not_latest(module, name, site, update_catalog) + + # If the package list is empty that means all packages are installed and up to date + if pkgs == []: + module.exit_json(changed=False) + + (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force) + if rc != 0: + module.fail_json(msg=(err or out)) + + elif state in ['absent', 'removed']: + # Build list of packages requested for removal that are actually present + pkgs = packages_installed(module, name) + + # If the list is empty, no packages need to be removed + if pkgs == []: + module.exit_json(changed=False) + + (rc, out, err) = package_uninstall(module, pkgs) + if rc != 0: + module.fail_json(msg=(err or out)) + + if rc is None: + # pkgutil was not executed because the package was already present/absent/up to date + result['changed'] = False + elif rc == 0: + result['changed'] = True + else: + result['changed'] = False + result['failed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pmem.py b/plugins/modules/pmem.py new file mode 100644 index 0000000000..527c94cb98 --- /dev/null +++ b/plugins/modules/pmem.py @@ -0,0 +1,634 @@ +#!/usr/bin/python +# Copyright (c) 2022, Masayoshi Mizuma +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Masayoshi Mizuma (@mizumm) +module: pmem +short_description: Configure Intel Optane Persistent Memory modules +version_added: 4.5.0 +description: + - This module allows Configuring Intel Optane Persistent Memory modules (PMem) using C(ipmctl) and C(ndctl) command line + tools. +requirements: + - C(ipmctl) and C(ndctl) command line tools + - xmltodict +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + appdirect: + description: + - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)). + - Create AppDirect capacity utilizing hardware interleaving across the requested PMem modules if applicable given the + specified target. + - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100). + type: int + appdirect_interleaved: + description: + - Create AppDirect capacity that is interleaved any other PMem modules. + type: bool + required: false + default: true + memorymode: + description: + - Percentage of the total capacity to use in Memory Mode (V(0)-V(100)). + type: int + reserved: + description: + - Percentage of the capacity to reserve (V(0)-V(100)). O(reserved) is not mapped into the system physical address space + and is presented as reserved capacity with Show Device and Show Memory Resources Commands. + - O(reserved) is set automatically if this is not configured. + type: int + required: false + socket: + description: + - This enables to set the configuration for each socket by using the socket ID. + - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100) within one socket. + type: list + elements: dict + suboptions: + id: + description: The socket ID of the PMem module. + type: int + required: true + appdirect: + description: + - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)) within the socket ID. + type: int + required: true + appdirect_interleaved: + description: + - Create AppDirect capacity that is interleaved any other PMem modules within the socket ID. + type: bool + required: false + default: true + memorymode: + description: + - Percentage of the total capacity to use in Memory Mode (V(0)-V(100)) within the socket ID. + type: int + required: true + reserved: + description: + - Percentage of the capacity to reserve (V(0)-V(100)) within the socket ID. + type: int + namespace: + description: + - This enables to set the configuration for the namespace of the PMem. + type: list + elements: dict + suboptions: + mode: + description: + - The mode of namespace. The detail of the mode is in the man page of ndctl-create-namespace. + type: str + required: true + choices: ['raw', 'sector', 'fsdax', 'devdax'] + type: + description: + - The type of namespace. The detail of the type is in the man page of ndctl-create-namespace. + type: str + required: false + choices: ['pmem', 'blk'] + size: + description: + - The size of namespace. This option supports the suffixes V(k) or V(K) or V(KB) for KiB, V(m) or V(M) or V(MB) + for MiB, V(g) or V(G) or V(GB) for GiB and V(t) or V(T) or V(TB) for TiB. + - This option is required if multiple namespaces are configured. + - If this option is not set, all of the available space of a region is configured. + type: str + required: false + namespace_append: + description: + - Enable to append the new namespaces to the system. + - The default is V(false) so the all existing namespaces not listed in O(namespace) are removed. + type: bool + default: false + required: false +""" + +RETURN = r""" +reboot_required: + description: Indicates that the system reboot is required to complete the PMem configuration. + returned: success + type: bool + sample: true +result: + description: + - Shows the value of AppDirect, Memory Mode and Reserved size in bytes. + - If O(socket) argument is provided, shows the values in each socket with C(socket) which contains the socket ID. + - If O(namespace) argument is provided, shows the detail of each namespace. + returned: success + type: list + elements: dict + contains: + appdirect: + description: AppDirect size in bytes. + type: int + memorymode: + description: Memory Mode size in bytes. + type: int + reserved: + description: Reserved size in bytes. + type: int + socket: + description: The socket ID to be configured. + type: int + namespace: + description: The list of the detail of namespace. + type: list + sample: + [ + { + "appdirect": 111669149696, + "memorymode": 970662608896, + "reserved": 3626500096, + "socket": 0 + }, + { + "appdirect": 111669149696, + "memorymode": 970662608896, + "reserved": 3626500096, + "socket": 1 + } + ] +""" + +EXAMPLES = r""" +- name: Configure the Pmem as AppDirect 10, Memory Mode 70, and the Reserved 20 percent. + community.general.pmem: + appdirect: 10 + memorymode: 70 + +- name: Configure the Pmem as AppDirect 10, Memory Mode 80, and the Reserved 10 percent. + community.general.pmem: + appdirect: 10 + memorymode: 80 + reserved: 10 + +- name: Configure the Pmem as AppDirect with not interleaved 10, Memory Mode 70, and the Reserved 20 percent. + community.general.pmem: + appdirect: 10 + appdirect_interleaved: false + memorymode: 70 + +- name: Configure the Pmem each socket. + community.general.pmem: + socket: + - id: 0 + appdirect: 10 + appdirect_interleaved: false + memorymode: 70 + reserved: 20 + - id: 1 + appdirect: 10 + memorymode: 80 + reserved: 10 + +- name: Configure the two namespaces. + community.general.pmem: + namespace: + - size: 1GB + type: pmem + mode: raw + - size: 320MB + type: pmem + mode: sector +""" + +import json +import re +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, human_to_bytes + +try: + import xmltodict +except ImportError: + HAS_XMLTODICT_LIBRARY = False + XMLTODICT_LIBRARY_IMPORT_ERROR = traceback.format_exc() +else: + HAS_XMLTODICT_LIBRARY = True + XMLTODICT_LIBRARY_IMPORT_ERROR = None + + +class PersistentMemory(object): + def __init__(self): + module = AnsibleModule( + argument_spec=dict( + appdirect=dict(type='int'), + appdirect_interleaved=dict(type='bool', default=True), + memorymode=dict(type='int'), + reserved=dict(type='int'), + socket=dict( + type='list', elements='dict', + options=dict( + id=dict(required=True, type='int'), + appdirect=dict(required=True, type='int'), + appdirect_interleaved=dict(type='bool', default=True), + memorymode=dict(required=True, type='int'), + reserved=dict(type='int'), + ), + ), + namespace=dict( + type='list', elements='dict', + options=dict( + mode=dict(required=True, type='str', choices=['raw', 'sector', 'fsdax', 'devdax']), + type=dict(type='str', choices=['pmem', 'blk']), + size=dict(type='str'), + ), + ), + namespace_append=dict(type='bool', default=False), + ), + required_together=( + ['appdirect', 'memorymode'], + ), + required_one_of=( + ['appdirect', 'memorymode', 'socket', 'namespace'], + ), + mutually_exclusive=( + ['appdirect', 'socket'], + ['memorymode', 'socket'], + ['appdirect', 'namespace'], + ['memorymode', 'namespace'], + ['socket', 'namespace'], + ['appdirect', 'namespace_append'], + ['memorymode', 'namespace_append'], + ['socket', 'namespace_append'], + ), + ) + + if not HAS_XMLTODICT_LIBRARY: + module.fail_json( + msg=missing_required_lib('xmltodict'), + exception=XMLTODICT_LIBRARY_IMPORT_ERROR) + + self.ipmctl_exec = module.get_bin_path('ipmctl', True) + self.ndctl_exec = module.get_bin_path('ndctl', True) + + self.appdirect = module.params['appdirect'] + self.interleaved = module.params['appdirect_interleaved'] + self.memmode = module.params['memorymode'] + self.reserved = module.params['reserved'] + self.socket = module.params['socket'] + self.namespace = module.params['namespace'] + self.namespace_append = module.params['namespace_append'] + + self.module = module + self.changed = False + self.result = [] + + def pmem_run_command(self, command, returnCheck=True): + # in case command[] has number + cmd = [str(part) for part in command] + + self.module.log(msg='pmem_run_command: execute: %s' % cmd) + + rc, out, err = self.module.run_command(cmd) + + self.module.log(msg='pmem_run_command: result: %s' % out) + + if returnCheck and rc != 0: + self.module.fail_json(msg='Error while running: %s' % + cmd, rc=rc, out=out, err=err) + + return out + + def pmem_run_ipmctl(self, command, returnCheck=True): + + command = [self.ipmctl_exec] + command + + return self.pmem_run_command(command, returnCheck) + + def pmem_run_ndctl(self, command, returnCheck=True): + + command = [self.ndctl_exec] + command + + return self.pmem_run_command(command, returnCheck) + + def pmem_is_dcpmm_installed(self): + # To check this system has dcpmm + command = ['show', '-system', '-capabilities'] + return self.pmem_run_ipmctl(command) + + def pmem_get_region_align_size(self, region): + aligns = [] + for rg in region: + if rg['align'] not in aligns: + aligns.append(rg['align']) + + return aligns + + def pmem_get_available_region_size(self, region): + available_size = [] + for rg in region: + available_size.append(rg['available_size']) + + return available_size + + def pmem_get_available_region_type(self, region): + types = [] + for rg in region: + if rg['type'] not in types: + types.append(rg['type']) + + return types + + def pmem_argument_check(self): + def namespace_check(self): + command = ['list', '-R'] + out = self.pmem_run_ndctl(command) + if not out: + return 'Available region(s) is not in this system.' + region = json.loads(out) + + aligns = self.pmem_get_region_align_size(region) + if len(aligns) != 1: + return 'Not supported the regions whose alignment size is different.' + + available_size = self.pmem_get_available_region_size(region) + types = self.pmem_get_available_region_type(region) + for ns in self.namespace: + if ns['size']: + try: + size_byte = human_to_bytes(ns['size']) + except ValueError: + return 'The format of size: NNN TB|GB|MB|KB|T|G|M|K|B' + + if size_byte % aligns[0] != 0: + return 'size: %s should be align with %d' % (ns['size'], aligns[0]) + + is_space_enough = False + for i, avail in enumerate(available_size): + if avail > size_byte: + available_size[i] -= size_byte + is_space_enough = True + break + + if is_space_enough is False: + return 'There is not available region for size: %s' % ns['size'] + + ns['size_byte'] = size_byte + + elif len(self.namespace) != 1: + return 'size option is required to configure multiple namespaces' + + if ns['type'] not in types: + return 'type %s is not supported in this system. Supported type: %s' % (ns['type'], types) + + return None + + def percent_check(self, appdirect, memmode, reserved=None): + if appdirect is None or (appdirect < 0 or appdirect > 100): + return 'appdirect percent should be from 0 to 100.' + if memmode is None or (memmode < 0 or memmode > 100): + return 'memorymode percent should be from 0 to 100.' + + if reserved is None: + if appdirect + memmode > 100: + return 'Total percent should be less equal 100.' + else: + if reserved < 0 or reserved > 100: + return 'reserved percent should be from 0 to 100.' + if appdirect + memmode + reserved != 100: + return 'Total percent should be 100.' + + def socket_id_check(self): + command = ['show', '-o', 'nvmxml', '-socket'] + out = self.pmem_run_ipmctl(command) + sockets_dict = xmltodict.parse(out, dict_constructor=dict)['SocketList']['Socket'] + socket_ids = [] + for sl in sockets_dict: + socket_ids.append(int(sl['SocketID'], 16)) + + for skt in self.socket: + if skt['id'] not in socket_ids: + return 'Invalid socket number: %d' % skt['id'] + + return None + + if self.namespace: + return namespace_check(self) + elif self.socket is None: + return percent_check(self, self.appdirect, self.memmode, self.reserved) + else: + ret = socket_id_check(self) + if ret is not None: + return ret + + for skt in self.socket: + ret = percent_check( + self, skt['appdirect'], skt['memorymode'], skt['reserved']) + if ret is not None: + return ret + + return None + + def pmem_remove_namespaces(self): + command = ['list', '-N'] + out = self.pmem_run_ndctl(command) + + # There's nothing namespaces in this system. Nothing to do. + if not out: + return + + namespaces = json.loads(out) + + # Disable and destroy all namespaces + for ns in namespaces: + command = ['disable-namespace', ns['dev']] + self.pmem_run_ndctl(command) + + command = ['destroy-namespace', ns['dev']] + self.pmem_run_ndctl(command) + + return + + def pmem_delete_goal(self): + # delete the goal request + command = ['delete', '-goal'] + self.pmem_run_ipmctl(command) + + def pmem_init_env(self): + if self.namespace is None or (self.namespace and self.namespace_append is False): + self.pmem_remove_namespaces() + if self.namespace is None: + self.pmem_delete_goal() + + def pmem_get_capacity(self, skt=None): + command = ['show', '-d', 'Capacity', '-u', 'B', '-o', 'nvmxml', '-dimm'] + if skt: + command += ['-socket', skt['id']] + out = self.pmem_run_ipmctl(command) + + dimm_list = xmltodict.parse(out, dict_constructor=dict)['DimmList']['Dimm'] + capacity = 0 + for entry in dimm_list: + for key, v in entry.items(): + if key == 'Capacity': + capacity += int(v.split()[0]) + + return capacity + + def pmem_create_memory_allocation(self, skt=None): + def build_ipmctl_creation_opts(self, skt=None): + ipmctl_opts = [] + + if skt: + appdirect = skt['appdirect'] + memmode = skt['memorymode'] + reserved = skt['reserved'] + socket_id = skt['id'] + ipmctl_opts += ['-socket', socket_id] + else: + appdirect = self.appdirect + memmode = self.memmode + reserved = self.reserved + + if reserved is None: + res = 100 - memmode - appdirect + ipmctl_opts += ['memorymode=%d' % memmode, 'reserved=%d' % res] + else: + ipmctl_opts += ['memorymode=%d' % memmode, 'reserved=%d' % reserved] + + if self.interleaved: + ipmctl_opts += ['PersistentMemoryType=AppDirect'] + else: + ipmctl_opts += ['PersistentMemoryType=AppDirectNotInterleaved'] + + return ipmctl_opts + + def is_allocation_good(self, ipmctl_out, command): + warning = re.compile('WARNING') + error = re.compile('.*Error.*') + ignore_error = re.compile( + 'Do you want to continue? [y/n] Error: Invalid data input.') + + errmsg = '' + rc = True + for line in ipmctl_out.splitlines(): + if warning.match(line): + errmsg = '%s (command: %s)' % (line, command) + rc = False + break + elif error.match(line): + if not ignore_error: + errmsg = '%s (command: %s)' % (line, command) + rc = False + break + + return rc, errmsg + + def get_allocation_result(self, goal, skt=None): + ret = {'appdirect': 0, 'memorymode': 0} + + if skt: + ret['socket'] = skt['id'] + + out = xmltodict.parse(goal, dict_constructor=dict)['ConfigGoalList']['ConfigGoal'] + for entry in out: + + # Probably it is a bug of ipmctl to show the socket goal + # which isn't specified by the -socket option. + # Anyway, filter the noise out here: + if skt and skt['id'] != int(entry['SocketID'], 16): + continue + + for key, v in entry.items(): + if key == 'MemorySize': + ret['memorymode'] += int(v.split()[0]) + elif key == 'AppDirect1Size' or key == 'AapDirect2Size': + ret['appdirect'] += int(v.split()[0]) + + capacity = self.pmem_get_capacity(skt) + ret['reserved'] = capacity - ret['appdirect'] - ret['memorymode'] + + return ret + + reboot_required = False + + ipmctl_opts = build_ipmctl_creation_opts(self, skt) + + # First, do dry run ipmctl create command to check the error and warning. + command = ['create', '-goal'] + ipmctl_opts + out = self.pmem_run_ipmctl(command, returnCheck=False) + rc, errmsg = is_allocation_good(self, out, command) + if rc is False: + return reboot_required, {}, errmsg + + # Run actual creation here + command = ['create', '-u', 'B', '-o', 'nvmxml', '-force', '-goal'] + ipmctl_opts + goal = self.pmem_run_ipmctl(command) + ret = get_allocation_result(self, goal, skt) + reboot_required = True + + return reboot_required, ret, '' + + def pmem_config_namespaces(self, namespace): + command = ['create-namespace', '-m', namespace['mode']] + if namespace['type']: + command += ['-t', namespace['type']] + if 'size_byte' in namespace: + command += ['-s', namespace['size_byte']] + + self.pmem_run_ndctl(command) + + return None + + +def main(): + + pmem = PersistentMemory() + + pmem.pmem_is_dcpmm_installed() + + error = pmem.pmem_argument_check() + if error: + pmem.module.fail_json(msg=error) + + pmem.pmem_init_env() + pmem.changed = True + + if pmem.namespace: + for ns in pmem.namespace: + pmem.pmem_config_namespaces(ns) + + command = ['list', '-N'] + out = pmem.pmem_run_ndctl(command) + all_ns = json.loads(out) + + pmem.result = all_ns + reboot_required = False + elif pmem.socket is None: + reboot_required, ret, errmsg = pmem.pmem_create_memory_allocation() + if errmsg: + pmem.module.fail_json(msg=errmsg) + pmem.result.append(ret) + else: + for skt in pmem.socket: + skt_reboot_required, skt_ret, skt_errmsg = pmem.pmem_create_memory_allocation(skt) + + if skt_errmsg: + pmem.module.fail_json(msg=skt_errmsg) + + if skt_reboot_required: + reboot_required = True + + pmem.result.append(skt_ret) + + pmem.module.exit_json( + changed=pmem.changed, + reboot_required=reboot_required, + result=pmem.result + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pnpm.py b/plugins/modules/pnpm.py new file mode 100644 index 0000000000..8d11c83077 --- /dev/null +++ b/plugins/modules/pnpm.py @@ -0,0 +1,457 @@ +#!/usr/bin/python + +# Copyright (c) 2023 Aritra Sen +# Copyright (c) 2017 Chris Hoffman +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pnpm +short_description: Manage Node.js packages with C(pnpm) +version_added: 7.4.0 +description: + - Manage Node.js packages with the L(pnpm package manager, https://pnpm.io/). +author: + - "Aritra Sen (@aretrosen)" + - "Chris Hoffman (@chrishoffman), creator of NPM Ansible module" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The name of a Node.js library to install. + - All packages in C(package.json) are installed if not provided. + type: str + required: false + alias: + description: + - Alias of the Node.js library. + type: str + required: false + path: + description: + - The base path to install the Node.js libraries. + type: path + required: false + version: + description: + - The version of the library to be installed, in semver format. + type: str + required: false + global: + description: + - Install the Node.js library globally. + required: false + default: false + type: bool + executable: + description: + - The executable location for pnpm. + - The default location it searches for is E(PATH), fails if not set. + type: path + required: false + ignore_scripts: + description: + - Use the C(--ignore-scripts) flag when installing. + required: false + type: bool + default: false + no_optional: + description: + - Do not install optional packages, equivalent to C(--no-optional). + required: false + type: bool + default: false + production: + description: + - Install dependencies in production mode. + - Pnpm ignores any dependencies under C(devDependencies) in package.json. + required: false + type: bool + default: false + dev: + description: + - Install dependencies in development mode. + - Pnpm ignores any regular dependencies in C(package.json). + required: false + default: false + type: bool + optional: + description: + - Install dependencies in optional mode. + required: false + default: false + type: bool + state: + description: + - Installation state of the named Node.js library. + - If V(absent) is selected, a name option must be provided. + type: str + required: false + default: present + choices: ["present", "absent", "latest"] +requirements: + - Pnpm executable present in E(PATH). +""" + +EXAMPLES = r""" +- name: Install "tailwindcss" Node.js package. + community.general.pnpm: + name: tailwindcss + path: /app/location + +- name: Install "tailwindcss" Node.js package on version 3.3.2 + community.general.pnpm: + name: tailwindcss + version: 3.3.2 + path: /app/location + +- name: Install "tailwindcss" Node.js package globally. + community.general.pnpm: + name: tailwindcss + global: true + +- name: Install "tailwindcss" Node.js package as dev dependency. + community.general.pnpm: + name: tailwindcss + path: /app/location + dev: true + +- name: Install "tailwindcss" Node.js package as optional dependency. + community.general.pnpm: + name: tailwindcss + path: /app/location + optional: true + +- name: Install "tailwindcss" Node.js package version 0.1.3 as tailwind-1 + community.general.pnpm: + name: tailwindcss + alias: tailwind-1 + version: 0.1.3 + path: /app/location + +- name: Remove the globally-installed package "tailwindcss". + community.general.pnpm: + name: tailwindcss + global: true + state: absent + +- name: Install packages based on package.json. + community.general.pnpm: + path: /app/location + +- name: Update all packages in package.json to their latest version. + community.general.pnpm: + path: /app/location + state: latest +""" + +import json +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +class Pnpm(object): + def __init__(self, module, **kwargs): + self.module = module + self.name = kwargs["name"] + self.alias = kwargs["alias"] + self.version = kwargs["version"] + self.path = kwargs["path"] + self.globally = kwargs["globally"] + self.executable = kwargs["executable"] + self.ignore_scripts = kwargs["ignore_scripts"] + self.no_optional = kwargs["no_optional"] + self.production = kwargs["production"] + self.dev = kwargs["dev"] + self.optional = kwargs["optional"] + + self.alias_name_ver = None + + if self.alias is not None: + self.alias_name_ver = self.alias + "@npm:" + + if self.name is not None: + self.alias_name_ver = (self.alias_name_ver or "") + self.name + if self.version is not None: + self.alias_name_ver = self.alias_name_ver + "@" + str(self.version) + else: + self.alias_name_ver = self.alias_name_ver + "@latest" + + def _exec(self, args, run_in_check_mode=False, check_rc=True): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = self.executable + args + + if self.globally: + cmd.append("-g") + + if self.ignore_scripts: + cmd.append("--ignore-scripts") + + if self.no_optional: + cmd.append("--no-optional") + + if self.production: + cmd.append("-P") + + if self.dev: + cmd.append("-D") + + if self.name and self.optional: + cmd.append("-O") + + # If path is specified, cd into that path and run the command. + cwd = None + if self.path: + if not os.path.exists(self.path): + os.makedirs(self.path) + + if not os.path.isdir(self.path): + self.module.fail_json(msg="Path %s is not a directory" % self.path) + + if not self.alias_name_ver and not os.path.isfile( + os.path.join(self.path, "package.json") + ): + self.module.fail_json( + msg="package.json does not exist in provided path" + ) + + cwd = self.path + + _rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) + return out, err + + return None, None + + def missing(self): + if not os.path.isfile(os.path.join(self.path, "pnpm-lock.yaml")): + return True + + cmd = ["list", "--json"] + + if self.name is not None: + cmd.append(self.name) + + try: + out, err = self._exec(cmd, True, False) + if err is not None and err != "": + raise Exception(out) + + data = json.loads(out) + except Exception as e: + self.module.fail_json( + msg="Failed to parse pnpm output with error %s" % to_native(e) + ) + + if "error" in data: + return True + + data = data[0] + + for typedep in [ + "dependencies", + "devDependencies", + "optionalDependencies", + "unsavedDependencies", + ]: + if typedep not in data: + continue + + for dep, prop in data[typedep].items(): + if self.alias is not None and self.alias != dep: + continue + + name = prop["from"] if self.alias is not None else dep + if self.name != name: + continue + + if self.version is None or self.version == prop["version"]: + return False + + break + + return True + + def install(self): + if self.alias_name_ver is not None: + return self._exec(["add", self.alias_name_ver]) + return self._exec(["install"]) + + def update(self): + return self._exec(["update", "--latest"]) + + def uninstall(self): + if self.alias is not None: + return self._exec(["remove", self.alias]) + return self._exec(["remove", self.name]) + + def list_outdated(self): + if not os.path.isfile(os.path.join(self.path, "pnpm-lock.yaml")): + return list() + + cmd = ["outdated", "--format", "json"] + try: + out, err = self._exec(cmd, True, False) + + # BUG: It will not show correct error sometimes, like when it has + # plain text output intermingled with a {} + if err is not None and err != "": + raise Exception(out) + + # HACK: To fix the above bug, the following hack is implemented + data_lines = out.splitlines(True) + + out = None + for line in data_lines: + if len(line) > 0 and line[0] == "{": + out = line + continue + + if len(line) > 0 and line[0] == "}": + out += line + break + + if out is not None: + out += line + + data = json.loads(out) + except Exception as e: + self.module.fail_json( + msg="Failed to parse pnpm output with error %s" % to_native(e) + ) + + return data.keys() + + +def main(): + arg_spec = dict( + name=dict(), + alias=dict(), + path=dict(type="path"), + version=dict(), + executable=dict(type="path"), + ignore_scripts=dict(default=False, type="bool"), + no_optional=dict(default=False, type="bool"), + production=dict(default=False, type="bool"), + dev=dict(default=False, type="bool"), + optional=dict(default=False, type="bool"), + state=dict(default="present", choices=["present", "absent", "latest"]), + ) + arg_spec["global"] = dict(default=False, type="bool") + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + + name = module.params["name"] + alias = module.params["alias"] + path = module.params["path"] + version = module.params["version"] + globally = module.params["global"] + ignore_scripts = module.params["ignore_scripts"] + no_optional = module.params["no_optional"] + production = module.params["production"] + dev = module.params["dev"] + optional = module.params["optional"] + state = module.params["state"] + + if module.params["executable"]: + executable = module.params["executable"].split(" ") + else: + executable = [module.get_bin_path("pnpm", True)] + + if name is None and version is not None: + module.fail_json(msg="version is meaningless when name is not provided") + + if name is None and alias is not None: + module.fail_json(msg="alias is meaningless when name is not provided") + + if path is None and not globally: + module.fail_json(msg="path must be specified when not using global") + elif path is not None and globally: + module.fail_json(msg="Cannot specify path when doing global installation") + + if globally and (production or dev or optional): + module.fail_json( + msg="Options production, dev, and optional is meaningless when installing packages globally" + ) + + if name is not None and path is not None and globally: + module.fail_json(msg="path should not be mentioned when installing globally") + + if production and dev and optional: + module.fail_json( + msg="Options production and dev and optional don't go together" + ) + + if production and dev: + module.fail_json(msg="Options production and dev don't go together") + + if production and optional: + module.fail_json(msg="Options production and optional don't go together") + + if dev and optional: + module.fail_json(msg="Options dev and optional don't go together") + + if name is not None and name[0:4] == "http" and version is not None: + module.fail_json(msg="Semver not supported on remote url downloads") + + if name is None and optional: + module.fail_json( + msg="Optional not available when package name not provided, use no_optional instead" + ) + + if state == "absent" and name is None: + module.fail_json(msg="Package name is required for uninstalling") + + if globally: + _rc, out, _err = module.run_command(executable + ["root", "-g"], check_rc=True) + path, _tail = os.path.split(out.strip()) + + pnpm = Pnpm( + module, + name=name, + alias=alias, + path=path, + version=version, + globally=globally, + executable=executable, + ignore_scripts=ignore_scripts, + no_optional=no_optional, + production=production, + dev=dev, + optional=optional, + ) + + changed = False + out = "" + err = "" + if state == "present": + if pnpm.missing(): + changed = True + out, err = pnpm.install() + elif state == "latest": + outdated = pnpm.list_outdated() + if name is not None: + if pnpm.missing() or name in outdated: + changed = True + out, err = pnpm.install() + elif len(outdated): + changed = True + out, err = pnpm.update() + else: # absent + if not pnpm.missing(): + changed = True + out, err = pnpm.uninstall() + + module.exit_json(changed=changed, out=out, err=err) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/portage.py b/plugins/modules/portage.py deleted file mode 120000 index eba586290e..0000000000 --- a/plugins/modules/portage.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/portage.py \ No newline at end of file diff --git a/plugins/modules/portage.py b/plugins/modules/portage.py new file mode 100644 index 0000000000..752960c042 --- /dev/null +++ b/plugins/modules/portage.py @@ -0,0 +1,589 @@ +#!/usr/bin/python + +# Copyright (c) 2016, William L Thomson Jr +# Copyright (c) 2013, Yap Sok Ann +# Written by Yap Sok Ann +# Modified by William L. Thomson Jr. +# Based on apt module written by Matthew Williams +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: portage +short_description: Package manager for Gentoo +description: + - Manages Gentoo packages. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + package: + description: + - Package atom or set, for example V(sys-apps/foo) or V(>foo-2.13) or V(@world). + aliases: [name] + type: list + elements: str + + state: + description: + - State of the package atom. + default: "present" + choices: ["present", "installed", "emerged", "absent", "removed", "unmerged", "latest"] + type: str + + update: + description: + - Update packages to the best version available (C(--update)). + type: bool + default: false + + backtrack: + description: + - Set backtrack value (C(--backtrack)). + type: int + version_added: 5.8.0 + + deep: + description: + - Consider the entire dependency tree of packages (C(--deep)). + type: bool + default: false + + newuse: + description: + - Include installed packages where USE flags have changed (C(--newuse)). + type: bool + default: false + + changed_use: + description: + - Include installed packages where USE flags have changed, except when. + - Flags that the user has not enabled are added or removed. + - (C(--changed-use)). + type: bool + default: false + + oneshot: + description: + - Do not add the packages to the world file (C(--oneshot)). + type: bool + default: false + + noreplace: + description: + - Do not re-emerge installed packages (C(--noreplace)). + type: bool + default: true + + nodeps: + description: + - Only merge packages but not their dependencies (C(--nodeps)). + type: bool + default: false + + onlydeps: + description: + - Only merge packages' dependencies but not the packages (C(--onlydeps)). + type: bool + default: false + + depclean: + description: + - Remove packages not needed by explicitly merged packages (C(--depclean)). + - If no package is specified, clean up the world's dependencies. + - Otherwise, C(--depclean) serves as a dependency aware version of C(--unmerge). + type: bool + default: false + + quiet: + description: + - Run emerge in quiet mode (C(--quiet)). + type: bool + default: false + + verbose: + description: + - Run emerge in verbose mode (C(--verbose)). + type: bool + default: false + + select: + description: + - If set to V(true), explicitely add the package to the world file. + - Please note that this option is not used for idempotency, it is only used when actually installing a package. + type: bool + version_added: 8.6.0 + + sync: + description: + - Sync package repositories first. + - If V(yes), perform C(emerge --sync). + - If V(web), perform C(emerge-webrsync). + choices: ["web", "yes", "no"] + type: str + + getbinpkgonly: + description: + - Merge only packages specified at C(PORTAGE_BINHOST) in C(make.conf). + type: bool + default: false + version_added: 1.3.0 + + getbinpkg: + description: + - Prefer packages specified at C(PORTAGE_BINHOST) in C(make.conf). + type: bool + default: false + + usepkgonly: + description: + - Merge only binaries (no compiling). + type: bool + default: false + + usepkg: + description: + - Tries to use the binary package(s) in the locally available packages directory. + type: bool + default: false + + keepgoing: + description: + - Continue as much as possible after an error. + type: bool + default: false + + jobs: + description: + - Specifies the number of packages to build simultaneously. + - 'Since version 2.6: Value of V(0) or V(false) resets any previously added C(--jobs) setting values.' + type: int + + loadavg: + description: + - Specifies that no new builds should be started if there are other builds running and the load average is at least + LOAD. + - 'Since version 2.6: Value of 0 or False resets any previously added C(--load-average) setting values.' + type: float + + withbdeps: + description: + - Specifies that build time dependencies should be installed. + type: bool + version_added: 5.8.0 + + quietbuild: + description: + - Redirect all build output to logs alone, and do not display it on stdout (C(--quiet-build)). + type: bool + default: false + + quietfail: + description: + - Suppresses display of the build log on stdout (--quiet-fail). + - Only the die message and the path of the build log are displayed on stdout. + type: bool + default: false + +author: + - "William L Thomson Jr (@wltjr)" + - "Yap Sok Ann (@sayap)" + - "Andrew Udvare (@Tatsh)" +""" + +EXAMPLES = r""" +- name: Make sure package foo is installed + community.general.portage: + package: foo + state: present + +- name: Make sure package foo is not installed + community.general.portage: + package: foo + state: absent + +- name: Update package foo to the latest version (os specific alternative to latest) + community.general.portage: + package: foo + update: true + +- name: Install package foo using PORTAGE_BINHOST setup + community.general.portage: + package: foo + getbinpkg: true + +- name: Re-install world from binary packages only and do not allow any compiling + community.general.portage: + package: '@world' + usepkgonly: true + +- name: Sync repositories and update world + community.general.portage: + package: '@world' + update: true + deep: true + sync: true + +- name: Remove unneeded packages + community.general.portage: + depclean: true + +- name: Remove package foo if it is not explicitly needed + community.general.portage: + package: foo + state: absent + depclean: true +""" + +import os +import re +import sys +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.respawn import has_respawned, respawn_module +from ansible.module_utils.common.text.converters import to_native + + +try: + from portage.dbapi import vartree + from portage.exception import InvalidAtom + HAS_PORTAGE = True + PORTAGE_IMPORT_ERROR = None +except ImportError: + HAS_PORTAGE = False + PORTAGE_IMPORT_ERROR = traceback.format_exc() + + +def query_package(module, package, action): + if package.startswith('@'): + return query_set(module, package, action) + return query_atom(module, package, action) + + +def query_atom(module, atom, action): + vdb = vartree.vardbapi() + try: + exists = vdb.match(atom) + except InvalidAtom: + return False + return bool(exists) + + +def query_set(module, set_, action): + system_sets = [ + '@live-rebuild', + '@module-rebuild', + '@preserved-rebuild', + '@security', + '@selected', + '@system', + '@world', + '@x11-module-rebuild', + ] + + if set_ in system_sets: + if action == 'unmerge': + module.fail_json(msg='set %s cannot be removed' % set_) + return False + + world_sets_path = '/var/lib/portage/world_sets' + if not os.path.exists(world_sets_path): + return False + + cmd = ['grep', set_, world_sets_path] + + rc, out, err = module.run_command(cmd) + return rc == 0 + + +def sync_repositories(module, webrsync=False): + if module.check_mode: + module.exit_json(msg='check mode not supported by sync') + + if webrsync: + webrsync_path = module.get_bin_path('emerge-webrsync', required=True) + cmd = [webrsync_path, '--quiet'] + else: + cmd = [module.emerge_path, '--sync', '--quiet', '--ask=n'] + + rc, out, err = module.run_command(cmd) + if rc != 0: + module.fail_json(msg='could not sync package repositories') + + +# Note: In the 3 functions below, package querying is done one-by-one, +# but emerge is done in one go. If that is not desirable, split the +# packages into multiple tasks instead of joining them together with +# comma. + + +def emerge_packages(module, packages): + """Run emerge command against given list of atoms.""" + p = module.params + + if p['noreplace'] and not p['changed_use'] and not p['newuse'] and not (p['update'] or p['state'] == 'latest'): + for package in packages: + if p['noreplace'] and not p['changed_use'] and not p['newuse'] and not query_package(module, package, 'emerge'): + break + else: + module.exit_json(changed=False, msg='Packages already present.') + if module.check_mode: + module.exit_json(changed=True, msg='Packages would be installed.') + + args = [] + emerge_flags = { + 'update': '--update', + 'deep': '--deep', + 'newuse': '--newuse', + 'changed_use': '--changed-use', + 'oneshot': '--oneshot', + 'noreplace': '--noreplace', + 'nodeps': '--nodeps', + 'onlydeps': '--onlydeps', + 'quiet': '--quiet', + 'verbose': '--verbose', + 'getbinpkgonly': '--getbinpkgonly', + 'getbinpkg': '--getbinpkg', + 'usepkgonly': '--usepkgonly', + 'usepkg': '--usepkg', + 'keepgoing': '--keep-going', + 'quietbuild': '--quiet-build', + 'quietfail': '--quiet-fail', + } + for flag, arg in emerge_flags.items(): + if p[flag]: + args.append(arg) + + if p['state'] and p['state'] == 'latest': + args.append("--update") + + emerge_flags = { + 'jobs': '--jobs', + 'loadavg': '--load-average', + 'backtrack': '--backtrack', + 'withbdeps': '--with-bdeps', + 'select': '--select', + } + + for flag, arg in emerge_flags.items(): + flag_val = p[flag] + + if flag_val is None: + """Fallback to default: don't use this argument at all.""" + continue + + """Add the --flag=value pair.""" + if isinstance(flag_val, bool): + args.extend((arg, to_native('y' if flag_val else 'n'))) + elif not flag_val: + """If the value is 0 or 0.0: add the flag, but not the value.""" + args.append(arg) + else: + args.extend((arg, to_native(flag_val))) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + if rc != 0: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages not installed.', + ) + + # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite + # this error + if (p['usepkgonly'] or p['getbinpkg'] or p['getbinpkgonly']) \ + and 'Permission denied (publickey).' in err: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Please check your PORTAGE_BINHOST configuration in make.conf ' + 'and your SSH authorized_keys file', + ) + + changed = True + for line in out.splitlines(): + if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line): + msg = 'Packages installed.' + break + elif module.check_mode and re.match(r'\[(binary|ebuild)', line): + msg = 'Packages would be installed.' + break + else: + changed = False + msg = 'No packages installed.' + + module.exit_json( + changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg=msg, + ) + + +def unmerge_packages(module, packages): + p = module.params + + for package in packages: + if query_package(module, package, 'unmerge'): + break + else: + module.exit_json(changed=False, msg='Packages already absent.') + + args = ['--unmerge'] + + for flag in ['quiet', 'verbose']: + if p[flag]: + args.append('--%s' % flag) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + + if rc != 0: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages not removed.', + ) + + module.exit_json( + changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages removed.', + ) + + +def cleanup_packages(module, packages): + p = module.params + + if packages: + for package in packages: + if query_package(module, package, 'unmerge'): + break + else: + module.exit_json(changed=False, msg='Packages already absent.') + + args = ['--depclean'] + + for flag in ['quiet', 'verbose']: + if p[flag]: + args.append('--%s' % flag) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + if rc != 0: + module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err) + + removed = 0 + for line in out.splitlines(): + if not line.startswith('Number removed:'): + continue + parts = line.split(':') + removed = int(parts[1].strip()) + changed = removed > 0 + + module.exit_json( + changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Depclean completed.', + ) + + +def run_emerge(module, packages, *args): + args = list(args) + + args.append('--ask=n') + if module.check_mode: + args.append('--pretend') + + cmd = [module.emerge_path] + args + packages + return cmd, module.run_command(cmd) + + +portage_present_states = ['present', 'emerged', 'installed', 'latest'] +portage_absent_states = ['absent', 'unmerged', 'removed'] + + +def main(): + module = AnsibleModule( + argument_spec=dict( + package=dict(type='list', elements='str', aliases=['name']), + state=dict( + default=portage_present_states[0], + choices=portage_present_states + portage_absent_states, + ), + update=dict(default=False, type='bool'), + backtrack=dict(type='int'), + deep=dict(default=False, type='bool'), + newuse=dict(default=False, type='bool'), + changed_use=dict(default=False, type='bool'), + oneshot=dict(default=False, type='bool'), + noreplace=dict(default=True, type='bool'), + nodeps=dict(default=False, type='bool'), + onlydeps=dict(default=False, type='bool'), + depclean=dict(default=False, type='bool'), + select=dict(type='bool'), + quiet=dict(default=False, type='bool'), + verbose=dict(default=False, type='bool'), + sync=dict(choices=['yes', 'web', 'no']), + getbinpkgonly=dict(default=False, type='bool'), + getbinpkg=dict(default=False, type='bool'), + usepkgonly=dict(default=False, type='bool'), + usepkg=dict(default=False, type='bool'), + keepgoing=dict(default=False, type='bool'), + jobs=dict(type='int'), + loadavg=dict(type='float'), + withbdeps=dict(type='bool'), + quietbuild=dict(default=False, type='bool'), + quietfail=dict(default=False, type='bool'), + ), + required_one_of=[['package', 'sync', 'depclean']], + mutually_exclusive=[ + ['nodeps', 'onlydeps'], + ['quiet', 'verbose'], + ['quietbuild', 'verbose'], + ['quietfail', 'verbose'], + ['oneshot', 'select'], + ], + supports_check_mode=True, + ) + + if not HAS_PORTAGE: + if sys.executable != '/usr/bin/python' and not has_respawned(): + respawn_module('/usr/bin/python') + else: + module.fail_json(msg=missing_required_lib('portage'), + exception=PORTAGE_IMPORT_ERROR) + + module.emerge_path = module.get_bin_path('emerge', required=True) + + p = module.params + + if p['sync'] and p['sync'].strip() != 'no': + sync_repositories(module, webrsync=(p['sync'] == 'web')) + if not p['package']: + module.exit_json(msg='Sync successfully finished.') + + packages = [] + if p['package']: + packages.extend(p['package']) + + if p['depclean']: + if packages and p['state'] not in portage_absent_states: + module.fail_json( + msg='Depclean can only be used with package when the state is ' + 'one of: %s' % portage_absent_states, + ) + + cleanup_packages(module, packages) + + elif p['state'] in portage_present_states: + emerge_packages(module, packages) + + elif p['state'] in portage_absent_states: + unmerge_packages(module, packages) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/portinstall.py b/plugins/modules/portinstall.py deleted file mode 120000 index 83fd0c8340..0000000000 --- a/plugins/modules/portinstall.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/portinstall.py \ No newline at end of file diff --git a/plugins/modules/portinstall.py b/plugins/modules/portinstall.py new file mode 100644 index 0000000000..8598294a68 --- /dev/null +++ b/plugins/modules/portinstall.py @@ -0,0 +1,213 @@ +#!/usr/bin/python + +# Copyright (c) 2013, berenddeboer +# Written by berenddeboer +# Based on pkgng module written by bleader +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: portinstall +short_description: Installing packages from FreeBSD's ports system +description: + - Manage packages for FreeBSD using C(portinstall). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of package to install/remove. + aliases: [pkg] + required: true + type: str + state: + description: + - State of the package. + choices: ['present', 'absent'] + required: false + default: present + type: str + use_packages: + description: + - Use packages instead of ports whenever available. + type: bool + required: false + default: true +author: "berenddeboer (@berenddeboer)" +""" + +EXAMPLES = r""" +- name: Install package foo + community.general.portinstall: + name: foo + state: present + +- name: Install package security/cyrus-sasl2-saslauthd + community.general.portinstall: + name: security/cyrus-sasl2-saslauthd + state: present + +- name: Remove packages foo and bar + community.general.portinstall: + name: foo,bar + state: absent +""" + +import re + +from shlex import quote as shlex_quote +from ansible.module_utils.basic import AnsibleModule + + +def query_package(module, name): + + pkg_info_path = module.get_bin_path('pkg_info', False) + + # Assume that if we have pkg_info, we haven't upgraded to pkgng + if pkg_info_path: + pkgng = False + pkg_glob_path = module.get_bin_path('pkg_glob', True) + # TODO: convert run_comand() argument to list! + rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, shlex_quote(name)), use_unsafe_shell=True) + pkg_info_path = [pkg_info_path] + else: + pkgng = True + pkg_info_path = [module.get_bin_path('pkg', True), "info"] + rc, out, err = module.run_command(pkg_info_path + [name]) + + found = rc == 0 + + if not found: + # databases/mysql55-client installs as mysql-client, so try solving + # that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking + # some package is installed + name_without_digits = re.sub('[0-9]', '', name) + if name != name_without_digits: + rc, out, err = module.run_command(pkg_info_path + [name_without_digits]) + + found = rc == 0 + + return found + + +def matching_packages(module, name): + + ports_glob_path = module.get_bin_path('ports_glob', True) + rc, out, err = module.run_command([ports_glob_path, name]) + # counts the number of packages found + occurrences = out.count('\n') + if occurrences == 0: + name_without_digits = re.sub('[0-9]', '', name) + if name != name_without_digits: + rc, out, err = module.run_command([ports_glob_path, name_without_digits]) + occurrences = out.count('\n') + return occurrences + + +def remove_packages(module, packages): + + remove_c = 0 + pkg_glob_path = module.get_bin_path('pkg_glob', True) + + # If pkg_delete not found, we assume pkgng + pkg_delete_path = module.get_bin_path('pkg_delete', False) + if not pkg_delete_path: + pkg_delete_path = module.get_bin_path('pkg', True) + pkg_delete_path = pkg_delete_path + " delete -y" + + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, package): + continue + + # TODO: convert run_comand() argument to list! + rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, shlex_quote(package)), use_unsafe_shell=True) + + if query_package(module, package): + name_without_digits = re.sub('[0-9]', '', package) + # TODO: convert run_comand() argument to list! + rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, + shlex_quote(name_without_digits)), + use_unsafe_shell=True) + if query_package(module, package): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, packages, use_packages): + + install_c = 0 + + # If portinstall not found, automagically install + portinstall_path = module.get_bin_path('portinstall', False) + if not portinstall_path: + pkg_path = module.get_bin_path('pkg', False) + if pkg_path: + module.run_command([pkg_path, "install", "-y", "portupgrade"]) + portinstall_path = module.get_bin_path('portinstall', True) + + if use_packages: + portinstall_params = ["--use-packages"] + else: + portinstall_params = [] + + for package in packages: + if query_package(module, package): + continue + + # TODO: check how many match + matches = matching_packages(module, package) + if matches == 1: + rc, out, err = module.run_command([portinstall_path, "--batch"] + portinstall_params + [package]) + if not query_package(module, package): + module.fail_json(msg="failed to install %s: %s" % (package, out)) + elif matches == 0: + module.fail_json(msg="no matches for package %s" % (package)) + else: + module.fail_json(msg="%s matches found for package name %s" % (matches, package)) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="present %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "absent"]), + name=dict(aliases=["pkg"], required=True), + use_packages=dict(type='bool', default=True))) + + p = module.params + + pkgs = p["name"].split(",") + + if p["state"] == "present": + install_packages(module, pkgs, p["use_packages"]) + + elif p["state"] == "absent": + remove_packages(module, pkgs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pritunl_org.py b/plugins/modules/pritunl_org.py deleted file mode 120000 index 3e45ac224d..0000000000 --- a/plugins/modules/pritunl_org.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/pritunl/pritunl_org.py \ No newline at end of file diff --git a/plugins/modules/pritunl_org.py b/plugins/modules/pritunl_org.py new file mode 100644 index 0000000000..241d0cb08f --- /dev/null +++ b/plugins/modules/pritunl_org.py @@ -0,0 +1,196 @@ +#!/usr/bin/python +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pritunl_org +author: Florian Dambrine (@Lowess) +version_added: 2.5.0 +short_description: Manages Pritunl Organizations using the Pritunl API +description: + - A module to manage Pritunl organizations using the Pritunl API. +extends_documentation_fragment: + - community.general.pritunl + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + type: str + required: true + aliases: + - org + description: + - The name of the organization to manage in Pritunl. + force: + type: bool + default: false + description: + - If O(force) is V(true) and O(state) is V(absent), the module deletes the organization, no matter if it contains users + or not. By default O(force) is V(false), which causes the module to fail the deletion of the organization when it + contains users. + state: + type: str + default: 'present' + choices: + - present + - absent + description: + - If V(present), the module adds organization O(name) to Pritunl. If V(absent), attempt to delete the organization from + Pritunl (please read about O(force) usage). +""" + +EXAMPLES = r""" +- name: Ensure the organization named MyOrg exists + community.general.pritunl_org: + state: present + name: MyOrg + +- name: Ensure the organization named MyOrg does not exist + community.general.pritunl_org: + state: absent + name: MyOrg +""" + +RETURN = r""" +response: + description: JSON representation of a Pritunl Organization. + returned: success + type: dict + sample: + { + "auth_api": false, + "name": "Foo", + "auth_token": null, + "user_count": 0, + "auth_secret": null, + "id": "csftwlu6uhralzi2dpmhekz3" + } +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( + PritunlException, + delete_pritunl_organization, + post_pritunl_organization, + list_pritunl_organizations, + get_pritunl_settings, + pritunl_argument_spec, +) + + +def add_pritunl_organization(module): + result = {} + + org_name = module.params.get("name") + + org_obj_list = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + {"filters": {"name": org_name}}, + ) + ) + + # If the organization already exists + if len(org_obj_list) > 0: + result["changed"] = False + result["response"] = org_obj_list[0] + else: + # Otherwise create it + response = post_pritunl_organization( + **dict_merge( + get_pritunl_settings(module), + {"organization_name": org_name}, + ) + ) + result["changed"] = True + result["response"] = response + + module.exit_json(**result) + + +def remove_pritunl_organization(module): + result = {} + + org_name = module.params.get("name") + force = module.params.get("force") + + org_obj_list = [] + + org_obj_list = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + { + "filters": {"name": org_name}, + }, + ) + ) + + # No organization found + if len(org_obj_list) == 0: + result["changed"] = False + result["response"] = {} + + else: + # Otherwise attempt to delete it + org = org_obj_list[0] + + # Only accept deletion under specific conditions + if force or org["user_count"] == 0: + response = delete_pritunl_organization( + **dict_merge( + get_pritunl_settings(module), + {"organization_id": org["id"]}, + ) + ) + result["changed"] = True + result["response"] = response + else: + module.fail_json( + msg=( + "Can not remove organization '%s' with %d attached users. " + "Either set 'force' option to true or remove active users " + "from the organization" + ) + % (org_name, org["user_count"]) + ) + + module.exit_json(**result) + + +def main(): + argument_spec = pritunl_argument_spec() + + argument_spec.update( + dict( + name=dict(required=True, type="str", aliases=["org"]), + force=dict(type="bool", default=False), + state=dict(choices=["present", "absent"], default="present"), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + state = module.params.get("state") + + try: + if state == "present": + add_pritunl_organization(module) + elif state == "absent": + remove_pritunl_organization(module) + except PritunlException as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/pritunl_org_info.py b/plugins/modules/pritunl_org_info.py deleted file mode 120000 index 45ca579db2..0000000000 --- a/plugins/modules/pritunl_org_info.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/pritunl/pritunl_org_info.py \ No newline at end of file diff --git a/plugins/modules/pritunl_org_info.py b/plugins/modules/pritunl_org_info.py new file mode 100644 index 0000000000..a98fcd9f4d --- /dev/null +++ b/plugins/modules/pritunl_org_info.py @@ -0,0 +1,127 @@ +#!/usr/bin/python +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pritunl_org_info +author: Florian Dambrine (@Lowess) +version_added: 2.5.0 +short_description: List Pritunl Organizations using the Pritunl API +description: + - A module to list Pritunl organizations using the Pritunl API. +extends_documentation_fragment: + - community.general.pritunl + - community.general.attributes + - community.general.attributes.info_module +options: + organization: + type: str + required: false + aliases: + - org + default: null + description: + - Name of the Pritunl organization to search for. If none provided, the module returns all Pritunl organizations. +""" + +EXAMPLES = r""" +- name: List all existing Pritunl organizations + community.general.pritunl_org_info: + +- name: Search for an organization named MyOrg + community.general.pritunl_user_info: + organization: MyOrg +""" + +RETURN = r""" +organizations: + description: List of Pritunl organizations. + returned: success + type: list + elements: dict + sample: + [ + { + "auth_api": false, + "name": "FooOrg", + "auth_token": null, + "user_count": 0, + "auth_secret": null, + "id": "csftwlu6uhralzi2dpmhekz3" + }, + { + "auth_api": false, + "name": "MyOrg", + "auth_token": null, + "user_count": 3, + "auth_secret": null, + "id": "58070daee63f3b2e6e472c36" + }, + { + "auth_api": false, + "name": "BarOrg", + "auth_token": null, + "user_count": 0, + "auth_secret": null, + "id": "v1sncsxxybnsylc8gpqg85pg" + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( + PritunlException, + get_pritunl_settings, + list_pritunl_organizations, + pritunl_argument_spec, +) + + +def get_pritunl_organizations(module): + org_name = module.params.get("organization") + + organizations = [] + + organizations = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + {"filters": {"name": org_name} if org_name else None}, + ) + ) + + if org_name and len(organizations) == 0: + # When an org_name is provided but no organization match return an error + module.fail_json(msg="Organization '%s' does not exist" % org_name) + + result = {} + result["changed"] = False + result["organizations"] = organizations + + module.exit_json(**result) + + +def main(): + argument_spec = pritunl_argument_spec() + + argument_spec.update( + dict( + organization=dict(type="str", aliases=["org"]) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + get_pritunl_organizations(module) + except PritunlException as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/pritunl_user.py b/plugins/modules/pritunl_user.py deleted file mode 120000 index 25a91db66b..0000000000 --- a/plugins/modules/pritunl_user.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/pritunl/pritunl_user.py \ No newline at end of file diff --git a/plugins/modules/pritunl_user.py b/plugins/modules/pritunl_user.py new file mode 100644 index 0000000000..ff5ed479e6 --- /dev/null +++ b/plugins/modules/pritunl_user.py @@ -0,0 +1,346 @@ +#!/usr/bin/python +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pritunl_user +author: "Florian Dambrine (@Lowess)" +version_added: 2.3.0 +short_description: Manage Pritunl Users using the Pritunl API +description: + - A module to manage Pritunl users using the Pritunl API. +extends_documentation_fragment: + - community.general.pritunl + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + organization: + type: str + required: true + aliases: + - org + description: + - The name of the organization the user is part of. + state: + type: str + default: 'present' + choices: + - present + - absent + description: + - If V(present), the module adds user O(user_name) to the Pritunl O(organization). If V(absent), removes the user O(user_name) + from the Pritunl O(organization). + user_name: + type: str + required: true + default: + description: + - Name of the user to create or delete from Pritunl. + user_email: + type: str + required: false + default: + description: + - Email address associated with the user O(user_name). + user_type: + type: str + required: false + default: client + choices: + - client + - server + description: + - Type of the user O(user_name). + user_groups: + type: list + elements: str + required: false + default: + description: + - List of groups associated with the user O(user_name). + user_disabled: + type: bool + required: false + default: + description: + - Enable/Disable the user O(user_name). + user_gravatar: + type: bool + required: false + default: + description: + - Enable/Disable Gravatar usage for the user O(user_name). + user_mac_addresses: + type: list + elements: str + description: + - Allowed MAC addresses for the user O(user_name). + version_added: 5.0.0 +""" + +EXAMPLES = r""" +- name: Create the user Foo with email address foo@bar.com in MyOrg + community.general.pritunl_user: + state: present + organization: MyOrg + user_name: Foo + user_email: foo@bar.com + user_mac_addresses: + - "00:00:00:00:00:99" + +- name: Disable the user Foo but keep it in Pritunl + community.general.pritunl_user: + state: present + organization: MyOrg + user_name: Foo + user_email: foo@bar.com + user_disabled: true + +- name: Make sure the user Foo is not part of MyOrg anymore + community.general.pritunl_user: + state: absent + organization: MyOrg + user_name: Foo +""" + +RETURN = r""" +response: + description: JSON representation of Pritunl Users. + returned: success + type: dict + sample: + { + "audit": false, + "auth_type": "google", + "bypass_secondary": false, + "client_to_client": false, + "disabled": false, + "dns_mapping": null, + "dns_servers": null, + "dns_suffix": null, + "email": "foo@bar.com", + "gravatar": true, + "groups": [ + "foo", + "bar" + ], + "id": "5d070dafe63q3b2e6s472c3b", + "name": "foo@acme.com", + "network_links": [], + "organization": "58070daee6sf342e6e4s2c36", + "organization_name": "Acme", + "otp_auth": true, + "otp_secret": "35H5EJA3XB2$4CWG", + "pin": false, + "port_forwarding": [], + "servers": [] + } +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( + PritunlException, + delete_pritunl_user, + get_pritunl_settings, + list_pritunl_organizations, + list_pritunl_users, + post_pritunl_user, + pritunl_argument_spec, +) + + +def add_or_update_pritunl_user(module): + result = {} + + org_name = module.params.get("organization") + user_name = module.params.get("user_name") + + user_params = { + "name": user_name, + "email": module.params.get("user_email"), + "groups": module.params.get("user_groups"), + "disabled": module.params.get("user_disabled"), + "gravatar": module.params.get("user_gravatar"), + "mac_addresses": module.params.get("user_mac_addresses"), + "type": module.params.get("user_type"), + } + + org_obj_list = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + {"filters": {"name": org_name}}, + ) + ) + + if len(org_obj_list) == 0: + module.fail_json( + msg="Can not add user to organization '%s' which does not exist" % org_name + ) + + org_id = org_obj_list[0]["id"] + + # Grab existing users from this org + users = list_pritunl_users( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "filters": {"name": user_name}, + }, + ) + ) + + # Check if the pritunl user already exists + if len(users) > 0: + # Compare remote user params with local user_params and trigger update if needed + user_params_changed = False + for key in user_params.keys(): + # When a param is not specified grab existing ones to prevent from changing it with the PUT request + if user_params[key] is None: + user_params[key] = users[0][key] + + # 'groups' and 'mac_addresses' are list comparison + if key == "groups" or key == "mac_addresses": + if set(users[0][key]) != set(user_params[key]): + user_params_changed = True + + # otherwise it is either a boolean or a string + else: + if users[0][key] != user_params[key]: + user_params_changed = True + + # Trigger a PUT on the API to update the current user if settings have changed + if user_params_changed: + response = post_pritunl_user( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "user_id": users[0]["id"], + "user_data": user_params, + }, + ) + ) + + result["changed"] = True + result["response"] = response + else: + result["changed"] = False + result["response"] = users + else: + response = post_pritunl_user( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "user_data": user_params, + }, + ) + ) + result["changed"] = True + result["response"] = response + + module.exit_json(**result) + + +def remove_pritunl_user(module): + result = {} + + org_name = module.params.get("organization") + user_name = module.params.get("user_name") + + org_obj_list = [] + + org_obj_list = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + { + "filters": {"name": org_name}, + }, + ) + ) + + if len(org_obj_list) == 0: + module.fail_json( + msg="Can not remove user '%s' from a non existing organization '%s'" + % (user_name, org_name) + ) + + org_id = org_obj_list[0]["id"] + + # Grab existing users from this org + users = list_pritunl_users( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "filters": {"name": user_name}, + }, + ) + ) + + # Check if the pritunl user exists, if not, do nothing + if len(users) == 0: + result["changed"] = False + result["response"] = {} + + # Otherwise remove the org from Pritunl + else: + response = delete_pritunl_user( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "user_id": users[0]["id"], + }, + ) + ) + result["changed"] = True + result["response"] = response + + module.exit_json(**result) + + +def main(): + argument_spec = pritunl_argument_spec() + + argument_spec.update( + dict( + organization=dict(required=True, type="str", aliases=["org"]), + state=dict(choices=["present", "absent"], default="present"), + user_name=dict(required=True, type="str"), + user_type=dict(choices=["client", "server"], default="client"), + user_email=dict(type="str"), + user_groups=dict(type="list", elements="str"), + user_disabled=dict(type="bool"), + user_gravatar=dict(type="bool"), + user_mac_addresses=dict(type="list", elements="str"), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + state = module.params.get("state") + + try: + if state == "present": + add_or_update_pritunl_user(module) + elif state == "absent": + remove_pritunl_user(module) + except PritunlException as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/pritunl_user_info.py b/plugins/modules/pritunl_user_info.py deleted file mode 120000 index bfabbe0c8c..0000000000 --- a/plugins/modules/pritunl_user_info.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/pritunl/pritunl_user_info.py \ No newline at end of file diff --git a/plugins/modules/pritunl_user_info.py b/plugins/modules/pritunl_user_info.py new file mode 100644 index 0000000000..99a91eaad3 --- /dev/null +++ b/plugins/modules/pritunl_user_info.py @@ -0,0 +1,166 @@ +#!/usr/bin/python +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pritunl_user_info +author: "Florian Dambrine (@Lowess)" +version_added: 2.3.0 +short_description: List Pritunl Users using the Pritunl API +description: + - A module to list Pritunl users using the Pritunl API. +extends_documentation_fragment: + - community.general.pritunl + - community.general.attributes + - community.general.attributes.info_module +options: + organization: + type: str + required: true + aliases: + - org + description: + - The name of the organization the user is part of. + user_name: + type: str + required: false + description: + - Name of the user to filter on Pritunl. + user_type: + type: str + required: false + default: client + choices: + - client + - server + description: + - Type of the user O(user_name). +""" + +EXAMPLES = r""" +- name: List all existing users part of the organization MyOrg + community.general.pritunl_user_info: + state: list + organization: MyOrg + +- name: Search for the user named Florian part of the organization MyOrg + community.general.pritunl_user_info: + state: list + organization: MyOrg + user_name: Florian +""" + +RETURN = r""" +users: + description: List of Pritunl users. + returned: success + type: list + elements: dict + sample: + [ + { + "audit": false, + "auth_type": "google", + "bypass_secondary": false, + "client_to_client": false, + "disabled": false, + "dns_mapping": null, + "dns_servers": null, + "dns_suffix": null, + "email": "foo@bar.com", + "gravatar": true, + "groups": [ + "foo", + "bar" + ], + "id": "5d070dafe63q3b2e6s472c3b", + "name": "foo@acme.com", + "network_links": [], + "organization": "58070daee6sf342e6e4s2c36", + "organization_name": "Acme", + "otp_auth": true, + "otp_secret": "35H5EJA3XB2$4CWG", + "pin": false, + "port_forwarding": [], + "servers": [] + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( + PritunlException, + get_pritunl_settings, + list_pritunl_organizations, + list_pritunl_users, + pritunl_argument_spec, +) + + +def get_pritunl_user(module): + user_name = module.params.get("user_name") + user_type = module.params.get("user_type") + org_name = module.params.get("organization") + + org_obj_list = [] + + org_obj_list = list_pritunl_organizations( + **dict_merge(get_pritunl_settings(module), {"filters": {"name": org_name}}) + ) + + if len(org_obj_list) == 0: + module.fail_json( + msg="Can not list users from the organization '%s' which does not exist" + % org_name + ) + + org_id = org_obj_list[0]["id"] + + users = list_pritunl_users( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "filters": ( + {"type": user_type} + if user_name is None + else {"name": user_name, "type": user_type} + ), + }, + ) + ) + + result = {} + result["changed"] = False + result["users"] = users + + module.exit_json(**result) + + +def main(): + argument_spec = pritunl_argument_spec() + + argument_spec.update( + dict( + organization=dict(required=True, type="str", aliases=["org"]), + user_name=dict(type="str"), + user_type=dict(choices=["client", "server"], default="client"), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + get_pritunl_user(module) + except PritunlException as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/profitbricks.py b/plugins/modules/profitbricks.py deleted file mode 120000 index efead21367..0000000000 --- a/plugins/modules/profitbricks.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/profitbricks/profitbricks.py \ No newline at end of file diff --git a/plugins/modules/profitbricks_datacenter.py b/plugins/modules/profitbricks_datacenter.py deleted file mode 120000 index 7260b21830..0000000000 --- a/plugins/modules/profitbricks_datacenter.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/profitbricks/profitbricks_datacenter.py \ No newline at end of file diff --git a/plugins/modules/profitbricks_nic.py b/plugins/modules/profitbricks_nic.py deleted file mode 120000 index 6acdd15511..0000000000 --- a/plugins/modules/profitbricks_nic.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/profitbricks/profitbricks_nic.py \ No newline at end of file diff --git a/plugins/modules/profitbricks_volume.py b/plugins/modules/profitbricks_volume.py deleted file mode 120000 index 0099158fc8..0000000000 --- a/plugins/modules/profitbricks_volume.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/profitbricks/profitbricks_volume.py \ No newline at end of file diff --git a/plugins/modules/profitbricks_volume_attachments.py b/plugins/modules/profitbricks_volume_attachments.py deleted file mode 120000 index d031db4c36..0000000000 --- a/plugins/modules/profitbricks_volume_attachments.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/profitbricks/profitbricks_volume_attachments.py \ No newline at end of file diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py deleted file mode 120000 index cdc81bc3d9..0000000000 --- a/plugins/modules/proxmox.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/misc/proxmox.py \ No newline at end of file diff --git a/plugins/modules/proxmox_domain_info.py b/plugins/modules/proxmox_domain_info.py deleted file mode 120000 index a14a61a33b..0000000000 --- a/plugins/modules/proxmox_domain_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_domain_info.py \ No newline at end of file diff --git a/plugins/modules/proxmox_group_info.py b/plugins/modules/proxmox_group_info.py deleted file mode 120000 index f9b760742b..0000000000 --- a/plugins/modules/proxmox_group_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_group_info.py \ No newline at end of file diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py deleted file mode 120000 index 3af8641f1b..0000000000 --- a/plugins/modules/proxmox_kvm.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/misc/proxmox_kvm.py \ No newline at end of file diff --git a/plugins/modules/proxmox_nic.py b/plugins/modules/proxmox_nic.py deleted file mode 120000 index 88756ab636..0000000000 --- a/plugins/modules/proxmox_nic.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_nic.py \ No newline at end of file diff --git a/plugins/modules/proxmox_snap.py b/plugins/modules/proxmox_snap.py deleted file mode 120000 index d2e3b2b8de..0000000000 --- a/plugins/modules/proxmox_snap.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/misc/proxmox_snap.py \ No newline at end of file diff --git a/plugins/modules/proxmox_storage_info.py b/plugins/modules/proxmox_storage_info.py deleted file mode 120000 index 8128300547..0000000000 --- a/plugins/modules/proxmox_storage_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_storage_info.py \ No newline at end of file diff --git a/plugins/modules/proxmox_tasks_info.py b/plugins/modules/proxmox_tasks_info.py deleted file mode 120000 index 34343b8539..0000000000 --- a/plugins/modules/proxmox_tasks_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_tasks_info.py \ No newline at end of file diff --git a/plugins/modules/proxmox_template.py b/plugins/modules/proxmox_template.py deleted file mode 120000 index d959f27f23..0000000000 --- a/plugins/modules/proxmox_template.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/misc/proxmox_template.py \ No newline at end of file diff --git a/plugins/modules/proxmox_user_info.py b/plugins/modules/proxmox_user_info.py deleted file mode 120000 index a713ac8ddf..0000000000 --- a/plugins/modules/proxmox_user_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_user_info.py \ No newline at end of file diff --git a/plugins/modules/pubnub_blocks.py b/plugins/modules/pubnub_blocks.py deleted file mode 120000 index d1d75c2232..0000000000 --- a/plugins/modules/pubnub_blocks.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/pubnub/pubnub_blocks.py \ No newline at end of file diff --git a/plugins/modules/pubnub_blocks.py b/plugins/modules/pubnub_blocks.py new file mode 100644 index 0000000000..9f2135de20 --- /dev/null +++ b/plugins/modules/pubnub_blocks.py @@ -0,0 +1,612 @@ +#!/usr/bin/python +# +# PubNub Real-time Cloud-Hosted Push API and Push Notification Client +# Frameworks +# Copyright (C) 2016 PubNub Inc. +# http://www.pubnub.com/ +# http://www.pubnub.com/terms +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pubnub_blocks +short_description: PubNub blocks management module +description: + - 'This module allows Ansible to interface with the PubNub BLOCKS infrastructure by providing the following operations: + create / remove, start / stop and rename for blocks and create / modify / remove for event handlers.' +author: + - PubNub (@pubnub) + - Sergey Mamontov (@parfeon) +requirements: + - "pubnub_blocks_client >= 1.0" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + email: + description: + - Email from account for which new session should be started. + - Not required if O(cache) contains result of previous module call (in same play). + required: false + type: str + default: '' + password: + description: + - Password which match to account to which specified O(email) belong. + - Not required if O(cache) contains result of previous module call (in same play). + required: false + type: str + default: '' + cache: + description: >- + In case if single play use blocks management module few times it is preferred to enabled 'caching' by making previous + module to share gathered artifacts and pass them to this parameter. + required: false + type: dict + default: {} + account: + description: + - Name of PubNub account for from which O(application) is used to manage blocks. + - User's account is used if value not set or empty. + type: str + default: '' + application: + description: + - Name of target PubNub application for which blocks configuration on specific O(keyset) is done. + type: str + required: true + keyset: + description: + - Name of application's keys set which is bound to managed blocks. + type: str + required: true + state: + description: + - Intended block state after event handlers creation / update process is completed. + required: false + default: 'present' + choices: ['started', 'stopped', 'present', 'absent'] + type: str + name: + description: + - Name of managed block which is later visible on admin.pubnub.com. + required: true + type: str + description: + description: + - Short block description which is later visible on U(https://admin.pubnub.com). + - Used only if block does not exists and does not change description for existing block. + required: false + type: str + event_handlers: + description: + - List of event handlers which should be updated for specified block O(name). + - 'Each entry for new event handler should contain: V(name), V(src), V(channels), V(event). V(name) used as event handler + name which can be used later to make changes to it.' + - C(src) is full path to file with event handler code. + - V(channels) is name of channel from which event handler is waiting for events. + - 'V(event) is type of event which is able to trigger event handler: V(js-before-publish), V(js-after-publish), V(js-after-presence).' + - Each entry for existing handlers should contain C(name) (so target handler can be identified). Rest parameters (C(src), + C(channels) and C(event)) can be added if changes required for them. + - It is possible to rename event handler by adding C(changes) key to event handler payload and pass dictionary, which + contains single key C(name), where new name should be passed. + - To remove particular event handler it is possible to set C(state) for it to C(absent) and it is removed. + required: false + default: [] + type: list + elements: dict + changes: + description: + - List of fields which should be changed by block itself (does not affect any event handlers). + - 'Possible options for change is: O(name).' + required: false + default: {} + type: dict + validate_certs: + description: + - This key allow to try skip certificates check when performing REST API calls. Sometimes host may have issues with + certificates on it and this causes problems to call PubNub REST API. + - If check should be ignored V(false) should be passed to this parameter. + required: false + default: true + type: bool +""" + +EXAMPLES = r""" +# Event handler create example. +- name: Create single event handler + community.general.pubnub_blocks: + email: '{{ email }}' + password: '{{ password }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + event_handlers: + - src: '{{ path_to_handler_source }}' + name: '{{ handler_name }}' + event: 'js-before-publish' + channels: '{{ handler_channel }}' + +# Change event handler trigger event type. +- name: Change event handler 'event' + community.general.pubnub_blocks: + email: '{{ email }}' + password: '{{ password }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + event_handlers: + - name: '{{ handler_name }}' + event: 'js-after-publish' + +# Stop block and event handlers. +- name: Stopping block + community.general.pubnub_blocks: + email: '{{ email }}' + password: '{{ password }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: stop + +# Multiple module calls with cached result passing +- name: Create '{{ block_name }}' block + register: module_cache + community.general.pubnub_blocks: + email: '{{ email }}' + password: '{{ password }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: present +- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}' + register: module_cache + community.general.pubnub_blocks: + cache: '{{ module_cache }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: present + event_handlers: + - src: '{{ path_to_handler_1_source }}' + name: '{{ event_handler_1_name }}' + channels: '{{ event_handler_1_channel }}' + event: 'js-before-publish' +- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}' + register: module_cache + community.general.pubnub_blocks: + cache: '{{ module_cache }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: present + event_handlers: + - src: '{{ path_to_handler_2_source }}' + name: '{{ event_handler_2_name }}' + channels: '{{ event_handler_2_channel }}' + event: 'js-before-publish' +- name: Start '{{ block_name }}' block + register: module_cache + community.general.pubnub_blocks: + cache: '{{ module_cache }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: started +""" + +RETURN = r""" +module_cache: + description: + - Cached account information. In case if with single play module used few times it is better to pass cached data to next + module calls to speed up process. + type: dict + returned: always +""" +import copy +import os + +try: + # Import PubNub BLOCKS client. + from pubnub_blocks_client import User + from pubnub_blocks_client import Block, EventHandler + from pubnub_blocks_client import exceptions + HAS_PUBNUB_BLOCKS_CLIENT = True +except ImportError: + HAS_PUBNUB_BLOCKS_CLIENT = False + User = None + Account = None + Owner = None + Application = None + Keyset = None + Block = None + EventHandler = None + exceptions = None + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text + + +def pubnub_user(module): + """Create and configure user model if it possible. + + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + + :rtype: User + :return: Reference on initialized and ready to use user or 'None' in + case if not all required information has been passed to block. + """ + user = None + params = module.params + + if params.get('cache') and params['cache'].get('module_cache'): + cache = params['cache']['module_cache'] + user = User() + user.restore(cache=copy.deepcopy(cache['pnm_user'])) + elif params.get('email') and params.get('password'): + user = User(email=params.get('email'), password=params.get('password')) + else: + err_msg = 'It looks like not account credentials has been passed or ' \ + '\'cache\' field doesn\'t have result of previous module ' \ + 'call.' + module.fail_json(msg='Missing account credentials.', + description=err_msg, changed=False) + + return user + + +def pubnub_account(module, user): + """Create and configure account if it is possible. + + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + :type user: User + :param user: Reference on authorized user for which one of accounts + should be used during manipulations with block. + + :rtype: Account + :return: Reference on initialized and ready to use account or 'None' in + case if not all required information has been passed to block. + """ + params = module.params + if params.get('account'): + account_name = params.get('account') + account = user.account(name=params.get('account')) + if account is None: + err_frmt = 'It looks like there is no \'{0}\' account for ' \ + 'authorized user. Please make sure what correct ' \ + 'name has been passed during module configuration.' + module.fail_json(msg='Missing account.', + description=err_frmt.format(account_name), + changed=False) + else: + account = user.accounts()[0] + + return account + + +def pubnub_application(module, account): + """Retrieve reference on target application from account model. + + NOTE: In case if account authorization will fail or there is no + application with specified name, module will exit with error. + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + :type account: Account + :param account: Reference on PubNub account model from which reference + on application should be fetched. + + :rtype: Application + :return: Reference on initialized and ready to use application model. + """ + application = None + params = module.params + try: + application = account.application(params['application']) + except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc: + exc_msg = _failure_title_from_exception(exc) + exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] + module.fail_json(msg=exc_msg, description=exc_descr, + changed=account.changed, + module_cache=dict(account)) + + if application is None: + err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \ + 'correct application name has been passed. If application ' \ + 'doesn\'t exist you can create it on admin.pubnub.com.' + email = account.owner.email + module.fail_json(msg=err_fmt.format(params['application'], email), + changed=account.changed, module_cache=dict(account)) + + return application + + +def pubnub_keyset(module, account, application): + """Retrieve reference on target keyset from application model. + + NOTE: In case if there is no keyset with specified name, module will + exit with error. + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + :type account: Account + :param account: Reference on PubNub account model which will be + used in case of error to export cached data. + :type application: Application + :param application: Reference on PubNub application model from which + reference on keyset should be fetched. + + :rtype: Keyset + :return: Reference on initialized and ready to use keyset model. + """ + params = module.params + keyset = application.keyset(params['keyset']) + if keyset is None: + err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \ + 'sure what correct keyset name has been passed. If keyset ' \ + 'doesn\'t exist you can create it on admin.pubnub.com.' + module.fail_json(msg=err_fmt.format(params['keyset'], + application.name), + changed=account.changed, module_cache=dict(account)) + + return keyset + + +def pubnub_block(module, account, keyset): + """Retrieve reference on target keyset from application model. + + NOTE: In case if there is no block with specified name and module + configured to start/stop it, module will exit with error. + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + :type account: Account + :param account: Reference on PubNub account model which will be used in + case of error to export cached data. + :type keyset: Keyset + :param keyset: Reference on keyset model from which reference on block + should be fetched. + + :rtype: Block + :return: Reference on initialized and ready to use keyset model. + """ + block = None + params = module.params + try: + block = keyset.block(params['name']) + except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc: + exc_msg = _failure_title_from_exception(exc) + exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] + module.fail_json(msg=exc_msg, description=exc_descr, + changed=account.changed, module_cache=dict(account)) + + # Report error because block doesn't exists and at the same time + # requested to start/stop. + if block is None and params['state'] in ['started', 'stopped']: + block_name = params.get('name') + module.fail_json(msg="'{0}' block doesn't exists.".format(block_name), + changed=account.changed, module_cache=dict(account)) + + if block is None and params['state'] == 'present': + block = Block(name=params.get('name'), + description=params.get('description')) + keyset.add_block(block) + + if block: + # Update block information if required. + if params.get('changes') and params['changes'].get('name'): + block.name = params['changes']['name'] + if params.get('description'): + block.description = params.get('description') + + return block + + +def pubnub_event_handler(block, data): + """Retrieve reference on target event handler from application model. + + :type block: Block + :param block: Reference on block model from which reference on event + handlers should be fetched. + :type data: dict + :param data: Reference on dictionary which contain information about + event handler and whether it should be created or not. + + :rtype: EventHandler + :return: Reference on initialized and ready to use event handler model. + 'None' will be returned in case if there is no handler with + specified name and no request to create it. + """ + event_handler = block.event_handler(data['name']) + + # Prepare payload for event handler update. + changed_name = (data.pop('changes').get('name') + if 'changes' in data else None) + name = data.get('name') or changed_name + channels = data.get('channels') + event = data.get('event') + code = _content_of_file_at_path(data.get('src')) + state = data.get('state') or 'present' + + # Create event handler if required. + if event_handler is None and state == 'present': + event_handler = EventHandler(name=name, channels=channels, event=event, + code=code) + block.add_event_handler(event_handler) + + # Update event handler if required. + if event_handler is not None and state == 'present': + if name is not None: + event_handler.name = name + if channels is not None: + event_handler.channels = channels + if event is not None: + event_handler.event = event + if code is not None: + event_handler.code = code + + return event_handler + + +def _failure_title_from_exception(exception): + """Compose human-readable title for module error title. + + Title will be based on status codes if they has been provided. + :type exception: exceptions.GeneralPubNubError + :param exception: Reference on exception for which title should be + composed. + + :rtype: str + :return: Reference on error tile which should be shown on module + failure. + """ + title = 'General REST API access error.' + if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS: + title = 'Authorization error: missing credentials.' + elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS: + title = 'Authorization error: wrong credentials.' + elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS: + title = 'API access error: insufficient access rights.' + elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED: + title = 'API access error: time token expired.' + elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS: + title = 'Block create did fail: block with same name already exists).' + elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL: + title = 'Unable fetch list of blocks for keyset.' + elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL: + title = 'Block creation did fail.' + elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL: + title = 'Block update did fail.' + elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL: + title = 'Block removal did fail.' + elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL: + title = 'Block start/stop did fail.' + elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS: + title = 'Event handler creation did fail: missing fields.' + elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS: + title = 'Event handler creation did fail: missing fields.' + elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL: + title = 'Event handler creation did fail.' + elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL: + title = 'Event handler update did fail.' + elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL: + title = 'Event handler removal did fail.' + + return title + + +def _content_of_file_at_path(path): + """Read file content. + + Try read content of file at specified path. + :type path: str + :param path: Full path to location of file which should be read'ed. + :rtype: content + :return: File content or 'None' + """ + content = None + if path and os.path.exists(path): + with open(path, mode="rt") as opened_file: + b_content = opened_file.read() + try: + content = to_text(b_content, errors='surrogate_or_strict') + except UnicodeError: + pass + + return content + + +def main(): + fields = dict( + email=dict(default='', type='str'), + password=dict(default='', type='str', no_log=True), + account=dict(default='', type='str'), + application=dict(required=True, type='str'), + keyset=dict(required=True, type='str', no_log=False), + state=dict(default='present', type='str', + choices=['started', 'stopped', 'present', 'absent']), + name=dict(required=True, type='str'), description=dict(type='str'), + event_handlers=dict(default=list(), type='list', elements='dict'), + changes=dict(default=dict(), type='dict'), + cache=dict(default=dict(), type='dict'), + validate_certs=dict(default=True, type='bool')) + module = AnsibleModule(argument_spec=fields, supports_check_mode=True) + + if not HAS_PUBNUB_BLOCKS_CLIENT: + module.fail_json(msg='pubnub_blocks_client required for this module.') + + params = module.params + + # Authorize user. + user = pubnub_user(module) + # Initialize PubNub account instance. + account = pubnub_account(module, user=user) + # Try fetch application with which module should work. + application = pubnub_application(module, account=account) + # Try fetch keyset with which module should work. + keyset = pubnub_keyset(module, account=account, application=application) + # Try fetch block with which module should work. + block = pubnub_block(module, account=account, keyset=keyset) + is_new_block = block is not None and block.uid == -1 + + # Check whether block should be removed or not. + if block is not None and params['state'] == 'absent': + keyset.remove_block(block) + block = None + + if block is not None: + # Update block information if required. + if params.get('changes') and params['changes'].get('name'): + block.name = params['changes']['name'] + + # Process event changes to event handlers. + for event_handler_data in params.get('event_handlers') or list(): + state = event_handler_data.get('state') or 'present' + event_handler = pubnub_event_handler(data=event_handler_data, + block=block) + if state == 'absent' and event_handler: + block.delete_event_handler(event_handler) + + # Update block operation state if required. + if block and not is_new_block: + if params['state'] == 'started': + block.start() + elif params['state'] == 'stopped': + block.stop() + + # Save current account state. + if not module.check_mode: + try: + account.save() + except (exceptions.APIAccessError, exceptions.KeysetError, + exceptions.BlockError, exceptions.EventHandlerError, + exceptions.GeneralPubNubError) as exc: + module_cache = dict(account) + module_cache.update(dict(pnm_user=dict(user))) + exc_msg = _failure_title_from_exception(exc) + exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] + module.fail_json(msg=exc_msg, description=exc_descr, + changed=account.changed, + module_cache=module_cache) + + # Report module execution results. + module_cache = dict(account) + module_cache.update(dict(pnm_user=dict(user))) + changed_will_change = account.changed or account.will_change + module.exit_json(changed=changed_will_change, module_cache=module_cache) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pulp_repo.py b/plugins/modules/pulp_repo.py deleted file mode 120000 index b121617b55..0000000000 --- a/plugins/modules/pulp_repo.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/pulp_repo.py \ No newline at end of file diff --git a/plugins/modules/pulp_repo.py b/plugins/modules/pulp_repo.py new file mode 100644 index 0000000000..5486c56231 --- /dev/null +++ b/plugins/modules/pulp_repo.py @@ -0,0 +1,720 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Joe Adams <@sysadmind> +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pulp_repo +author: "Joe Adams (@sysadmind)" +short_description: Add or remove Pulp repos from a remote host +description: + - Add or remove Pulp repos from a remote host. + - Note, this is for Pulp 2 only. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + add_export_distributor: + description: + - Whether or not to add the export distributor to new C(rpm) repositories. + type: bool + default: false + feed: + description: + - Upstream feed URL to receive updates from. + type: str + force_basic_auth: + description: + - C(httplib2), the library used by the M(ansible.builtin.uri) module only sends authentication information when a webservice + responds to an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins + fail. This option forces the sending of the Basic authentication header upon initial request. + type: bool + default: false + generate_sqlite: + description: + - Boolean flag to indicate whether sqlite files should be generated during a repository publish. + required: false + type: bool + default: false + feed_ca_cert: + description: + - CA certificate string used to validate the feed source SSL certificate. This can be the file content or the path to + the file. + type: str + aliases: [importer_ssl_ca_cert] + feed_client_cert: + description: + - Certificate used as the client certificate when synchronizing the repository. This is used to communicate authentication + information to the feed source. The value to this option must be the full path to the certificate. The specified file + may be the certificate itself or a single file containing both the certificate and private key. This can be the file + content or the path to the file. + type: str + aliases: [importer_ssl_client_cert] + feed_client_key: + description: + - Private key to the certificate specified in O(feed_client_cert), assuming it is not included in the certificate file + itself. This can be the file content or the path to the file. + type: str + aliases: [importer_ssl_client_key] + name: + description: + - Name of the repo to add or remove. This correlates to repo-id in Pulp. + required: true + type: str + aliases: [repo] + proxy_host: + description: + - Proxy URL setting for the pulp repository importer. This is in the format V(scheme://host). + required: false + default: + type: str + proxy_port: + description: + - Proxy port setting for the pulp repository importer. + required: false + default: + type: str + proxy_username: + description: + - Proxy username for the pulp repository importer. + required: false + default: + type: str + proxy_password: + description: + - Proxy password for the pulp repository importer. + required: false + default: + type: str + publish_distributor: + description: + - Distributor to use when O(state=publish). The default is to publish all distributors. + type: str + pulp_host: + description: + - URL of the pulp server to connect to. + default: https://127.0.0.1 + type: str + relative_url: + description: + - Relative URL for the local repository. It's required when state=present. + type: str + repo_type: + description: + - Repo plugin type to use (that is, V(rpm), V(docker)). + default: rpm + type: str + repoview: + description: + - Whether to generate repoview files for a published repository. Setting this to V(true) automatically activates O(generate_sqlite). + required: false + type: bool + default: false + serve_http: + description: + - Make the repo available over HTTP. + type: bool + default: false + serve_https: + description: + - Make the repo available over HTTPS. + type: bool + default: true + state: + description: + - The repo state. A state of V(sync) queues a sync of the repo. This is asynchronous but not delayed like a scheduled + sync. A state of V(publish) uses the repository's distributor to publish the content. + default: present + choices: ["present", "absent", "sync", "publish"] + type: str + url_password: + description: + - The password for use in HTTP basic authentication to the pulp API. If the O(url_username) parameter is not specified, + the O(url_password) parameter is not used. + url_username: + description: + - The username for use in HTTP basic authentication to the pulp API. + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + type: bool + default: true + wait_for_completion: + description: + - Wait for asynchronous tasks to complete before returning. + type: bool + default: false +notes: + - This module can currently only create distributors and importers on rpm repositories. Contributions to support other repo + types are welcome. +extends_documentation_fragment: + - ansible.builtin.url + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Create a new repo with name 'my_repo' + community.general.pulp_repo: + name: my_repo + relative_url: my/repo + state: present + +- name: Create a repo with a feed and a relative URL + community.general.pulp_repo: + name: my_centos_updates + repo_type: rpm + feed: http://mirror.centos.org/centos/6/updates/x86_64/ + relative_url: centos/6/updates + url_username: admin + url_password: admin + force_basic_auth: true + state: present + +- name: Remove a repo from the pulp server + community.general.pulp_repo: + name: my_old_repo + repo_type: rpm + state: absent +""" + +RETURN = r""" +repo: + description: Name of the repo that the action was performed on. + returned: success + type: str + sample: my_repo +""" + +import json +import os +from time import sleep + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.urls import url_argument_spec + + +class pulp_server(object): + """ + Class to interact with a Pulp server + """ + + def __init__(self, module, pulp_host, repo_type, wait_for_completion=False): + self.module = module + self.host = pulp_host + self.repo_type = repo_type + self.repo_cache = dict() + self.wait_for_completion = wait_for_completion + + def check_repo_exists(self, repo_id): + try: + self.get_repo_config_by_id(repo_id) + except IndexError: + return False + else: + return True + + def compare_repo_distributor_config(self, repo_id, **kwargs): + repo_config = self.get_repo_config_by_id(repo_id) + + for distributor in repo_config['distributors']: + for key, value in kwargs.items(): + if key not in distributor['config'].keys(): + return False + + if not distributor['config'][key] == value: + return False + + return True + + def compare_repo_importer_config(self, repo_id, **kwargs): + repo_config = self.get_repo_config_by_id(repo_id) + + for importer in repo_config['importers']: + for key, value in kwargs.items(): + if value is not None: + if key not in importer['config'].keys(): + return False + + if not importer['config'][key] == value: + return False + + return True + + def create_repo( + self, + repo_id, + relative_url, + feed=None, + generate_sqlite=False, + serve_http=False, + serve_https=True, + proxy_host=None, + proxy_port=None, + proxy_username=None, + proxy_password=None, + repoview=False, + ssl_ca_cert=None, + ssl_client_cert=None, + ssl_client_key=None, + add_export_distributor=False + ): + url = "%s/pulp/api/v2/repositories/" % self.host + data = dict() + data['id'] = repo_id + data['distributors'] = [] + + if self.repo_type == 'rpm': + yum_distributor = dict() + yum_distributor['distributor_id'] = "yum_distributor" + yum_distributor['distributor_type_id'] = "yum_distributor" + yum_distributor['auto_publish'] = True + yum_distributor['distributor_config'] = dict() + yum_distributor['distributor_config']['http'] = serve_http + yum_distributor['distributor_config']['https'] = serve_https + yum_distributor['distributor_config']['relative_url'] = relative_url + yum_distributor['distributor_config']['repoview'] = repoview + yum_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview + data['distributors'].append(yum_distributor) + + if add_export_distributor: + export_distributor = dict() + export_distributor['distributor_id'] = "export_distributor" + export_distributor['distributor_type_id'] = "export_distributor" + export_distributor['auto_publish'] = False + export_distributor['distributor_config'] = dict() + export_distributor['distributor_config']['http'] = serve_http + export_distributor['distributor_config']['https'] = serve_https + export_distributor['distributor_config']['relative_url'] = relative_url + export_distributor['distributor_config']['repoview'] = repoview + export_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview + data['distributors'].append(export_distributor) + + data['importer_type_id'] = "yum_importer" + data['importer_config'] = dict() + + if feed: + data['importer_config']['feed'] = feed + + if proxy_host: + data['importer_config']['proxy_host'] = proxy_host + + if proxy_port: + data['importer_config']['proxy_port'] = proxy_port + + if proxy_username: + data['importer_config']['proxy_username'] = proxy_username + + if proxy_password: + data['importer_config']['proxy_password'] = proxy_password + + if ssl_ca_cert: + data['importer_config']['ssl_ca_cert'] = ssl_ca_cert + + if ssl_client_cert: + data['importer_config']['ssl_client_cert'] = ssl_client_cert + + if ssl_client_key: + data['importer_config']['ssl_client_key'] = ssl_client_key + + data['notes'] = { + "_repo-type": "rpm-repo" + } + + response, info = fetch_url( + self.module, + url, + data=json.dumps(data), + method='POST') + + if info['status'] != 201: + self.module.fail_json( + msg="Failed to create repo.", + status_code=info['status'], + response=info['msg'], + url=url) + else: + return True + + def delete_repo(self, repo_id): + url = "%s/pulp/api/v2/repositories/%s/" % (self.host, repo_id) + response, info = fetch_url(self.module, url, data='', method='DELETE') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to delete repo.", + status_code=info['status'], + response=info['msg'], + url=url) + + if self.wait_for_completion: + self.verify_tasks_completed(json.load(response)) + + return True + + def get_repo_config_by_id(self, repo_id): + if repo_id not in self.repo_cache.keys(): + repo_array = [x for x in self.repo_list if x['id'] == repo_id] + self.repo_cache[repo_id] = repo_array[0] + + return self.repo_cache[repo_id] + + def publish_repo(self, repo_id, publish_distributor): + url = "%s/pulp/api/v2/repositories/%s/actions/publish/" % (self.host, repo_id) + + # If there's no distributor specified, we will publish them all + if publish_distributor is None: + repo_config = self.get_repo_config_by_id(repo_id) + + for distributor in repo_config['distributors']: + data = dict() + data['id'] = distributor['id'] + response, info = fetch_url( + self.module, + url, + data=json.dumps(data), + method='POST') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to publish the repo.", + status_code=info['status'], + response=info['msg'], + url=url, + distributor=distributor['id']) + else: + data = dict() + data['id'] = publish_distributor + response, info = fetch_url( + self.module, + url, + data=json.dumps(data), + method='POST') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to publish the repo", + status_code=info['status'], + response=info['msg'], + url=url, + distributor=publish_distributor) + + if self.wait_for_completion: + self.verify_tasks_completed(json.load(response)) + + return True + + def sync_repo(self, repo_id): + url = "%s/pulp/api/v2/repositories/%s/actions/sync/" % (self.host, repo_id) + response, info = fetch_url(self.module, url, data='', method='POST') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to schedule a sync of the repo.", + status_code=info['status'], + response=info['msg'], + url=url) + + if self.wait_for_completion: + self.verify_tasks_completed(json.load(response)) + + return True + + def update_repo_distributor_config(self, repo_id, **kwargs): + url = "%s/pulp/api/v2/repositories/%s/distributors/" % (self.host, repo_id) + repo_config = self.get_repo_config_by_id(repo_id) + + for distributor in repo_config['distributors']: + distributor_url = "%s%s/" % (url, distributor['id']) + data = dict() + data['distributor_config'] = dict() + + for key, value in kwargs.items(): + data['distributor_config'][key] = value + + response, info = fetch_url( + self.module, + distributor_url, + data=json.dumps(data), + method='PUT') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to set the relative url for the repository.", + status_code=info['status'], + response=info['msg'], + url=url) + + def update_repo_importer_config(self, repo_id, **kwargs): + url = "%s/pulp/api/v2/repositories/%s/importers/" % (self.host, repo_id) + data = dict() + importer_config = dict() + + for key, value in kwargs.items(): + if value is not None: + importer_config[key] = value + + data['importer_config'] = importer_config + + if self.repo_type == 'rpm': + data['importer_type_id'] = "yum_importer" + + response, info = fetch_url( + self.module, + url, + data=json.dumps(data), + method='POST') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to set the repo importer configuration", + status_code=info['status'], + response=info['msg'], + importer_config=importer_config, + url=url) + + def set_repo_list(self): + url = "%s/pulp/api/v2/repositories/?details=true" % self.host + response, info = fetch_url(self.module, url, method='GET') + + if info['status'] != 200: + self.module.fail_json( + msg="Request failed", + status_code=info['status'], + response=info['msg'], + url=url) + + self.repo_list = json.load(response) + + def verify_tasks_completed(self, response_dict): + for task in response_dict['spawned_tasks']: + task_url = "%s%s" % (self.host, task['_href']) + + while True: + response, info = fetch_url( + self.module, + task_url, + data='', + method='GET') + + if info['status'] != 200: + self.module.fail_json( + msg="Failed to check async task status.", + status_code=info['status'], + response=info['msg'], + url=task_url) + + task_dict = json.load(response) + + if task_dict['state'] == 'finished': + return True + + if task_dict['state'] == 'error': + self.module.fail_json(msg="Asynchronous task failed to complete.", error=task_dict['error']) + + sleep(2) + + +def main(): + argument_spec = url_argument_spec() + argument_spec.update( + add_export_distributor=dict(default=False, type='bool'), + feed=dict(), + generate_sqlite=dict(default=False, type='bool'), + feed_ca_cert=dict(aliases=['importer_ssl_ca_cert']), + feed_client_cert=dict(aliases=['importer_ssl_client_cert']), + feed_client_key=dict(aliases=['importer_ssl_client_key'], no_log=True), + name=dict(required=True, aliases=['repo']), + proxy_host=dict(), + proxy_port=dict(), + proxy_username=dict(), + proxy_password=dict(no_log=True), + publish_distributor=dict(), + pulp_host=dict(default="https://127.0.0.1"), + relative_url=dict(), + repo_type=dict(default="rpm"), + repoview=dict(default=False, type='bool'), + serve_http=dict(default=False, type='bool'), + serve_https=dict(default=True, type='bool'), + state=dict( + default="present", + choices=['absent', 'present', 'sync', 'publish']), + wait_for_completion=dict(default=False, type="bool")) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + add_export_distributor = module.params['add_export_distributor'] + feed = module.params['feed'] + generate_sqlite = module.params['generate_sqlite'] + importer_ssl_ca_cert = module.params['feed_ca_cert'] + importer_ssl_client_cert = module.params['feed_client_cert'] + importer_ssl_client_key = module.params['feed_client_key'] + proxy_host = module.params['proxy_host'] + proxy_port = module.params['proxy_port'] + proxy_username = module.params['proxy_username'] + proxy_password = module.params['proxy_password'] + publish_distributor = module.params['publish_distributor'] + pulp_host = module.params['pulp_host'] + relative_url = module.params['relative_url'] + repo = module.params['name'] + repo_type = module.params['repo_type'] + repoview = module.params['repoview'] + serve_http = module.params['serve_http'] + serve_https = module.params['serve_https'] + state = module.params['state'] + wait_for_completion = module.params['wait_for_completion'] + + if (state == 'present') and (not relative_url): + module.fail_json(msg="When state is present, relative_url is required.") + + # Ensure that the importer_ssl_* is the content and not a file path + if importer_ssl_ca_cert is not None: + importer_ssl_ca_cert_file_path = os.path.abspath(importer_ssl_ca_cert) + if os.path.isfile(importer_ssl_ca_cert_file_path): + with open(importer_ssl_ca_cert_file_path, 'r') as importer_ssl_ca_cert_file_object: + importer_ssl_ca_cert = importer_ssl_ca_cert_file_object.read() + + if importer_ssl_client_cert is not None: + importer_ssl_client_cert_file_path = os.path.abspath(importer_ssl_client_cert) + if os.path.isfile(importer_ssl_client_cert_file_path): + with open(importer_ssl_client_cert_file_path, 'r') as importer_ssl_client_cert_file_object: + importer_ssl_client_cert = importer_ssl_client_cert_file_object.read() + + if importer_ssl_client_key is not None: + importer_ssl_client_key_file_path = os.path.abspath(importer_ssl_client_key) + if os.path.isfile(importer_ssl_client_key_file_path): + with open(importer_ssl_client_key_file_path, 'r') as importer_ssl_client_key_file_object: + importer_ssl_client_key = importer_ssl_client_key_file_object.read() + + server = pulp_server(module, pulp_host, repo_type, wait_for_completion=wait_for_completion) + server.set_repo_list() + repo_exists = server.check_repo_exists(repo) + + changed = False + + if state == 'absent' and repo_exists: + if not module.check_mode: + server.delete_repo(repo) + + changed = True + + if state == 'sync': + if not repo_exists: + module.fail_json(msg="Repository was not found. The repository can not be synced.") + + if not module.check_mode: + server.sync_repo(repo) + + changed = True + + if state == 'publish': + if not repo_exists: + module.fail_json(msg="Repository was not found. The repository can not be published.") + + if not module.check_mode: + server.publish_repo(repo, publish_distributor) + + changed = True + + if state == 'present': + if not repo_exists: + if not module.check_mode: + server.create_repo( + repo_id=repo, + relative_url=relative_url, + feed=feed, + generate_sqlite=generate_sqlite, + serve_http=serve_http, + serve_https=serve_https, + proxy_host=proxy_host, + proxy_port=proxy_port, + proxy_username=proxy_username, + proxy_password=proxy_password, + repoview=repoview, + ssl_ca_cert=importer_ssl_ca_cert, + ssl_client_cert=importer_ssl_client_cert, + ssl_client_key=importer_ssl_client_key, + add_export_distributor=add_export_distributor) + + changed = True + + else: + # Check to make sure all the settings are correct + # The importer config gets overwritten on set and not updated, so + # we set the whole config at the same time. + if not server.compare_repo_importer_config( + repo, + feed=feed, + proxy_host=proxy_host, + proxy_port=proxy_port, + proxy_username=proxy_username, + proxy_password=proxy_password, + ssl_ca_cert=importer_ssl_ca_cert, + ssl_client_cert=importer_ssl_client_cert, + ssl_client_key=importer_ssl_client_key + ): + if not module.check_mode: + server.update_repo_importer_config( + repo, + feed=feed, + proxy_host=proxy_host, + proxy_port=proxy_port, + proxy_username=proxy_username, + proxy_password=proxy_password, + ssl_ca_cert=importer_ssl_ca_cert, + ssl_client_cert=importer_ssl_client_cert, + ssl_client_key=importer_ssl_client_key) + + changed = True + + if relative_url is not None: + if not server.compare_repo_distributor_config( + repo, + relative_url=relative_url + ): + if not module.check_mode: + server.update_repo_distributor_config( + repo, + relative_url=relative_url) + + changed = True + + if not server.compare_repo_distributor_config(repo, generate_sqlite=generate_sqlite): + if not module.check_mode: + server.update_repo_distributor_config(repo, generate_sqlite=generate_sqlite) + + changed = True + + if not server.compare_repo_distributor_config(repo, repoview=repoview): + if not module.check_mode: + server.update_repo_distributor_config(repo, repoview=repoview) + + changed = True + + if not server.compare_repo_distributor_config(repo, http=serve_http): + if not module.check_mode: + server.update_repo_distributor_config(repo, http=serve_http) + + changed = True + + if not server.compare_repo_distributor_config(repo, https=serve_https): + if not module.check_mode: + server.update_repo_distributor_config(repo, https=serve_https) + + changed = True + + module.exit_json(changed=changed, repo=repo) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/puppet.py b/plugins/modules/puppet.py deleted file mode 120000 index 44bb61dc79..0000000000 --- a/plugins/modules/puppet.py +++ /dev/null @@ -1 +0,0 @@ -./system/puppet.py \ No newline at end of file diff --git a/plugins/modules/puppet.py b/plugins/modules/puppet.py new file mode 100644 index 0000000000..60500f2831 --- /dev/null +++ b/plugins/modules/puppet.py @@ -0,0 +1,302 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Hewlett-Packard Development Company, L.P. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: puppet +short_description: Runs puppet +description: + - Runs C(puppet) agent or apply in a reliable manner. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + timeout: + description: + - How long to wait for C(puppet) to finish. + type: str + default: 30m + puppetmaster: + description: + - The hostname of the puppetmaster to contact. + type: str + modulepath: + description: + - Path to an alternate location for puppet modules. + type: str + manifest: + description: + - Path to the manifest file to run puppet apply on. + type: str + noop: + description: + - Override puppet.conf noop mode. + - When V(true), run Puppet agent with C(--noop) switch set. + - When V(false), run Puppet agent with C(--no-noop) switch set. + - When unset (default), use default or puppet.conf value if defined. + type: bool + facts: + description: + - A dict of values to pass in as persistent external facter facts. + type: dict + facter_basename: + description: + - Basename of the facter output file. + type: str + default: ansible + environment: + description: + - Puppet environment to be used. + type: str + confdir: + description: + - Path to the directory containing the puppet.conf file. + type: str + version_added: 5.1.0 + logdest: + description: + - Where the puppet logs should go, if puppet apply is being used. + - V(all) goes to both C(console) and C(syslog). + - V(stdout) is deprecated and replaced by C(console). + type: str + choices: [all, stdout, syslog] + default: stdout + certname: + description: + - The name to use when handling certificates. + type: str + tags: + description: + - A list of puppet tags to be used. + type: list + elements: str + skip_tags: + description: + - A list of puppet tags to be excluded. + type: list + elements: str + version_added: 6.6.0 + execute: + description: + - Execute a specific piece of Puppet code. + - It has no effect with a puppetmaster. + type: str + use_srv_records: + description: + - Toggles use_srv_records flag. + type: bool + summarize: + description: + - Whether to print a transaction summary. + type: bool + default: false + waitforlock: + description: + - The maximum amount of time C(puppet) should wait for an already running C(puppet) agent to finish before starting. + - If a number without unit is provided, it is assumed to be a number of seconds. Allowed units are V(m) for minutes + and V(h) for hours. + type: str + version_added: 9.0.0 + verbose: + description: + - Print extra information. + type: bool + default: false + debug: + description: + - Enable full debugging. + type: bool + default: false + show_diff: + description: + - Whether to print file changes details. + type: bool + default: false + environment_lang: + description: + - The lang environment to use when running the puppet agent. + - The default value, V(C), is supported on every system, but can lead to encoding errors if UTF-8 is used in the output. + - Use V(C.UTF-8) or V(en_US.UTF-8) or similar UTF-8 supporting locales in case of problems. You need to make sure the + selected locale is supported on the system the puppet agent runs on. + - Starting with community.general 9.1.0, you can use the value V(auto) and the module tries to determine the best parseable + locale to use. + type: str + default: C + version_added: 8.6.0 +requirements: + - puppet +author: + - Monty Taylor (@emonty) +""" + +EXAMPLES = r""" +- name: Run puppet agent and fail if anything goes wrong + community.general.puppet: + +- name: Run puppet and timeout in 5 minutes + community.general.puppet: + timeout: 5m + +- name: Run puppet using a different environment + community.general.puppet: + environment: testing + +- name: Run puppet using a specific certname + community.general.puppet: + certname: agent01.example.com + +- name: Run puppet using a specific piece of Puppet code. Has no effect with a puppetmaster + community.general.puppet: + execute: include ::mymodule + +- name: Run puppet using a specific tags + community.general.puppet: + tags: + - update + - nginx + skip_tags: + - service + +- name: Wait 30 seconds for any current puppet runs to finish + community.general.puppet: + waitforlock: 30 + +- name: Wait 5 minutes for any current puppet runs to finish + community.general.puppet: + waitforlock: 5m + +- name: Run puppet agent in noop mode + community.general.puppet: + noop: true + +- name: Run a manifest with debug, log to both syslog and console, specify module path + community.general.puppet: + modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules + logdest: all + manifest: /var/lib/example/puppet_step_config.pp +""" + +import json +import os +import stat + +import ansible_collections.community.general.plugins.module_utils.puppet as puppet_utils + +from ansible.module_utils.basic import AnsibleModule + + +def _write_structured_data(basedir, basename, data): + if not os.path.exists(basedir): + os.makedirs(basedir) + file_path = os.path.join(basedir, "{0}.json".format(basename)) + # This is more complex than you might normally expect because we want to + # open the file with only u+rw set. Also, we use the stat constants + # because ansible still supports python 2.4 and the octal syntax changed + out_file = os.fdopen( + os.open( + file_path, os.O_CREAT | os.O_WRONLY, + stat.S_IRUSR | stat.S_IWUSR), 'w') + out_file.write(json.dumps(data).encode('utf8')) + out_file.close() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + timeout=dict(type='str', default='30m'), + puppetmaster=dict(type='str'), + modulepath=dict(type='str'), + manifest=dict(type='str'), + confdir=dict(type='str'), + noop=dict(type='bool'), + logdest=dict(type='str', default='stdout', choices=['all', 'stdout', 'syslog']), + # The following is not related to Ansible's diff; see https://github.com/ansible-collections/community.general/pull/3980#issuecomment-1005666154 + show_diff=dict(type='bool', default=False), + facts=dict(type='dict'), + facter_basename=dict(type='str', default='ansible'), + environment=dict(type='str'), + certname=dict(type='str'), + tags=dict(type='list', elements='str'), + skip_tags=dict(type='list', elements='str'), + execute=dict(type='str'), + summarize=dict(type='bool', default=False), + waitforlock=dict(type='str'), + debug=dict(type='bool', default=False), + verbose=dict(type='bool', default=False), + use_srv_records=dict(type='bool'), + environment_lang=dict(type='str', default='C'), + ), + supports_check_mode=True, + mutually_exclusive=[ + ('puppetmaster', 'manifest'), + ('puppetmaster', 'manifest', 'execute'), + ('puppetmaster', 'modulepath'), + ], + ) + p = module.params + + if p['manifest']: + if not os.path.exists(p['manifest']): + module.fail_json( + msg="Manifest file %(manifest)s not found." % dict( + manifest=p['manifest'])) + + # Check if puppet is disabled here + if not p['manifest']: + puppet_utils.ensure_agent_enabled(module) + + if module.params['facts'] and not module.check_mode: + _write_structured_data( + puppet_utils.get_facter_dir(), + module.params['facter_basename'], + module.params['facts']) + + runner = puppet_utils.puppet_runner(module) + + if not p['manifest'] and not p['execute']: + args_order = "_agent_fixed puppetmaster show_diff confdir environment tags skip_tags certname noop use_srv_records waitforlock" + with runner(args_order) as ctx: + rc, stdout, stderr = ctx.run() + else: + args_order = "_apply_fixed logdest modulepath environment certname tags skip_tags noop _execute summarize debug verbose waitforlock" + with runner(args_order) as ctx: + rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']]) + + if rc == 0: + # success + module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr) + elif rc == 1: + # rc==1 could be because it is disabled + # rc==1 could also mean there was a compilation failure + disabled = "administratively disabled" in stdout + if disabled: + msg = "puppet is disabled" + else: + msg = "puppet did not run" + module.exit_json( + rc=rc, disabled=disabled, msg=msg, + error=True, stdout=stdout, stderr=stderr) + elif rc == 2: + # success with changes + module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr) + elif rc == 124: + # timeout + module.exit_json( + rc=rc, msg="%s timed out" % ctx.cmd, stdout=stdout, stderr=stderr) + else: + # failure + module.fail_json( + rc=rc, msg="%s failed with return code: %d" % (ctx.cmd, rc), + stdout=stdout, stderr=stderr) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pushbullet.py b/plugins/modules/pushbullet.py deleted file mode 120000 index e197f3fb8b..0000000000 --- a/plugins/modules/pushbullet.py +++ /dev/null @@ -1 +0,0 @@ -./notification/pushbullet.py \ No newline at end of file diff --git a/plugins/modules/pushbullet.py b/plugins/modules/pushbullet.py new file mode 100644 index 0000000000..6c0d0d8770 --- /dev/null +++ b/plugins/modules/pushbullet.py @@ -0,0 +1,190 @@ +#!/usr/bin/python +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +author: "Willy Barro (@willybarro)" +requirements: [pushbullet.py] +module: pushbullet +short_description: Sends notifications to Pushbullet +description: + - This module sends push notifications through Pushbullet to channels or devices. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + api_key: + type: str + description: + - Push bullet API token. + required: true + channel: + type: str + description: + - The channel TAG you wish to broadcast a push notification, as seen on the "My Channels" > "Edit your channel" at Pushbullet + page. + device: + type: str + description: + - The device NAME you wish to send a push notification, as seen on the Pushbullet main page. + push_type: + type: str + description: + - Thing you wish to push. + default: note + choices: ["note", "link"] + title: + type: str + description: + - Title of the notification. + required: true + body: + type: str + description: + - Body of the notification, for example details of the fault you are alerting. + url: + type: str + description: + - URL field, used when O(push_type=link). +notes: + - Requires C(pushbullet.py) Python package on the remote host. You can install it through C(pip) with C(pip install pushbullet.py). + - See U(https://github.com/randomchars/pushbullet.py). +""" + +EXAMPLES = r""" +- name: Sends a push notification to a device + community.general.pushbullet: + api_key: "ABC123abc123ABC123abc123ABC123ab" + device: "Chrome" + title: "You may see this on Google Chrome" + +- name: Sends a link to a device + community.general.pushbullet: + api_key: ABC123abc123ABC123abc123ABC123ab + device: Chrome + push_type: link + title: Ansible Documentation + body: https://docs.ansible.com/ + +- name: Sends a push notification to a channel + community.general.pushbullet: + api_key: ABC123abc123ABC123abc123ABC123ab + channel: my-awesome-channel + title: "Broadcasting a message to the #my-awesome-channel folks" + +- name: Sends a push notification with title and body to a channel + community.general.pushbullet: + api_key: ABC123abc123ABC123abc123ABC123ab + channel: my-awesome-channel + title: ALERT! Signup service is down + body: Error rate on signup service is over 90% for more than 2 minutes +""" + +import traceback + +PUSHBULLET_IMP_ERR = None +try: + from pushbullet import PushBullet + from pushbullet.errors import InvalidKeyError, PushError +except ImportError: + PUSHBULLET_IMP_ERR = traceback.format_exc() + pushbullet_found = False +else: + pushbullet_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +# =========================================== +# Main +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(type='str', required=True, no_log=True), + channel=dict(type='str'), + device=dict(type='str'), + push_type=dict(type='str', default="note", choices=['note', 'link']), + title=dict(type='str', required=True), + body=dict(type='str'), + url=dict(type='str'), + ), + mutually_exclusive=( + ['channel', 'device'], + ), + supports_check_mode=True + ) + + api_key = module.params['api_key'] + channel = module.params['channel'] + device = module.params['device'] + push_type = module.params['push_type'] + title = module.params['title'] + body = module.params['body'] + url = module.params['url'] + + if not pushbullet_found: + module.fail_json(msg=missing_required_lib('pushbullet.py'), exception=PUSHBULLET_IMP_ERR) + + # Init pushbullet + try: + pb = PushBullet(api_key) + target = None + except InvalidKeyError: + module.fail_json(msg="Invalid api_key") + + # Checks for channel/device + if device is None and channel is None: + module.fail_json(msg="You need to provide a channel or a device.") + + # Search for given device + if device is not None: + devices_by_nickname = {} + for d in pb.devices: + devices_by_nickname[d.nickname] = d + + if device in devices_by_nickname: + target = devices_by_nickname[device] + else: + module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys()))) + + # Search for given channel + if channel is not None: + channels_by_tag = {} + for c in pb.channels: + channels_by_tag[c.channel_tag] = c + + if channel in channels_by_tag: + target = channels_by_tag[channel] + else: + module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys()))) + + # If in check mode, exit saying that we succeeded + if module.check_mode: + module.exit_json(changed=False, msg="OK") + + # Send push notification + try: + if push_type == "link": + target.push_link(title, url, body) + else: + target.push_note(title, body) + module.exit_json(changed=False, msg="OK") + except PushError as e: + module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e)) + + module.fail_json(msg="An unknown error has occurred") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pushover.py b/plugins/modules/pushover.py deleted file mode 120000 index 2c596d76cc..0000000000 --- a/plugins/modules/pushover.py +++ /dev/null @@ -1 +0,0 @@ -./notification/pushover.py \ No newline at end of file diff --git a/plugins/modules/pushover.py b/plugins/modules/pushover.py new file mode 100644 index 0000000000..483eeae863 --- /dev/null +++ b/plugins/modules/pushover.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# Copyright (c) 2012, Jim Richardson +# Copyright (c) 2019, Bernd Arnold +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pushover +short_description: Send notifications through U(https://pushover.net) +description: + - Send notifications through pushover to subscriber list of devices and email addresses. Requires pushover app on devices. +notes: + - You need a pushover.net account to use this module. But no account is required to receive messages. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + msg: + type: str + description: + - What message you wish to send. + required: true + app_token: + type: str + description: + - Pushover issued token identifying your pushover app. + required: true + user_key: + type: str + description: + - Pushover issued authentication key for your user. + required: true + title: + type: str + description: + - Message title. + required: false + pri: + type: str + description: + - Message priority (see U(https://pushover.net) for details). + required: false + default: '0' + choices: ['-2', '-1', '0', '1', '2'] + device: + type: str + description: + - A device the message should be sent to. Multiple devices can be specified, separated by a comma. + required: false + version_added: 1.2.0 + +author: + - "Jim Richardson (@weaselkeeper)" + - "Bernd Arnold (@wopfel)" +""" + +EXAMPLES = r""" +- name: Send notifications via pushover.net + community.general.pushover: + msg: '{{ inventory_hostname }} is acting strange ...' + app_token: wxfdksl + user_key: baa5fe97f2c5ab3ca8f0bb59 + delegate_to: localhost + +- name: Send notifications via pushover.net + community.general.pushover: + title: 'Alert!' + msg: '{{ inventory_hostname }} has exploded in flames, It is now time to panic' + pri: 1 + app_token: wxfdksl + user_key: baa5fe97f2c5ab3ca8f0bb59 + delegate_to: localhost + +- name: Send notifications via pushover.net to a specific device + community.general.pushover: + msg: '{{ inventory_hostname }} has been lost somewhere' + app_token: wxfdksl + user_key: baa5fe97f2c5ab3ca8f0bb59 + device: admins-iPhone + delegate_to: localhost +""" + +from urllib.parse import urlencode +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +class Pushover(object): + ''' Instantiates a pushover object, use it to send notifications ''' + base_uri = 'https://api.pushover.net' + + def __init__(self, module, user, token): + self.module = module + self.user = user + self.token = token + + def run(self, priority, msg, title, device): + ''' Do, whatever it is, we do. ''' + + url = '%s/1/messages.json' % (self.base_uri) + + # parse config + options = dict(user=self.user, + token=self.token, + priority=priority, + message=msg) + + if title is not None: + options = dict(options, + title=title) + + if device is not None: + options = dict(options, + device=device) + + data = urlencode(options) + + headers = {"Content-type": "application/x-www-form-urlencoded"} + r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers) + if info['status'] != 200: + raise Exception(info) + + return r.read() + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + title=dict(type='str'), + msg=dict(required=True), + app_token=dict(required=True, no_log=True), + user_key=dict(required=True, no_log=True), + pri=dict(default='0', choices=['-2', '-1', '0', '1', '2']), + device=dict(type='str'), + ), + ) + + msg_object = Pushover(module, module.params['user_key'], module.params['app_token']) + try: + response = msg_object.run(module.params['pri'], module.params['msg'], module.params['title'], module.params['device']) + except Exception: + module.fail_json(msg='Unable to send msg via pushover') + + module.exit_json(msg='message sent successfully: %s' % response, changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/python_requirements_info.py b/plugins/modules/python_requirements_info.py deleted file mode 120000 index 51c1c85000..0000000000 --- a/plugins/modules/python_requirements_info.py +++ /dev/null @@ -1 +0,0 @@ -./system/python_requirements_info.py \ No newline at end of file diff --git a/plugins/modules/python_requirements_info.py b/plugins/modules/python_requirements_info.py new file mode 100644 index 0000000000..5409b848e4 --- /dev/null +++ b/plugins/modules/python_requirements_info.py @@ -0,0 +1,213 @@ +#!/usr/bin/python +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: python_requirements_info +short_description: Show python path and assert dependency versions +description: + - Get info about available Python requirements on the target host, including listing required libraries and gathering versions. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + dependencies: + type: list + elements: str + description: + - 'A list of version-likes or module names to check for installation. Supported operators: C(<), C(>), C(<=), C(>=), + or C(==).' + - The bare module name like V(ansible), the module with a specific version like V(boto3==1.6.1), or a partial version + like V(requests>2) are all valid specifications. + default: [] +author: + - Will Thames (@willthames) + - Ryan Scott Brown (@ryansb) +""" + +EXAMPLES = r""" +- name: Show python lib/site paths + community.general.python_requirements_info: + +- name: Check for modern boto3 and botocore versions + community.general.python_requirements_info: + dependencies: + - boto3>1.6 + - botocore<2 +""" + +RETURN = r""" +python: + description: Path to the Python interpreter used. + returned: always + type: str + sample: /usr/local/opt/python@2/bin/python2.7 +python_version: + description: Version of Python. + returned: always + type: str + sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]" +python_version_info: + description: Breakdown version of Python. + returned: always + type: dict + contains: + major: + description: The C(major) component of the python interpreter version. + returned: always + type: int + sample: 3 + minor: + description: The C(minor) component of the python interpreter version. + returned: always + type: int + sample: 8 + micro: + description: The C(micro) component of the python interpreter version. + returned: always + type: int + sample: 10 + releaselevel: + description: The C(releaselevel) component of the python interpreter version. + returned: always + type: str + sample: final + serial: + description: The C(serial) component of the python interpreter version. + returned: always + type: int + sample: 0 + version_added: 4.2.0 +python_system_path: + description: List of paths Python is looking for modules in. + returned: always + type: list + sample: + - /usr/local/opt/python@2/site-packages/ + - /usr/lib/python/site-packages/ +valid: + description: A dictionary of dependencies that matched their desired versions. If no version was specified, then RV(ignore:desired) + is V(null). + returned: always + type: dict + sample: + boto3: + desired: + installed: 1.7.60 + botocore: + desired: botocore<2 + installed: 1.10.60 +mismatched: + description: A dictionary of dependencies that did not satisfy the desired version. + returned: always + type: dict + sample: + botocore: + desired: botocore>2 + installed: 1.10.60 +not_found: + description: A list of packages that could not be imported at all, and are not installed. + returned: always + type: list + sample: + - boto4 + - requests +""" + +import re +import sys +import operator + +HAS_DISTUTILS = False +try: + import pkg_resources + from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + HAS_DISTUTILS = True +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule + +operations = { + '<=': operator.le, + '>=': operator.ge, + '<': operator.lt, + '>': operator.gt, + '==': operator.eq, +} + +python_version_info = dict( + major=sys.version_info[0], + minor=sys.version_info[1], + micro=sys.version_info[2], + releaselevel=sys.version_info[3], + serial=sys.version_info[4], +) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + dependencies=dict(type='list', elements='str', default=[]) + ), + supports_check_mode=True, + ) + if not HAS_DISTUTILS: + module.fail_json( + msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.', + python=sys.executable, + python_version=sys.version, + python_version_info=python_version_info, + python_system_path=sys.path, + ) + pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(?:(==|[><]=?)([0-9.]+))?$') + + results = dict( + not_found=[], + mismatched={}, + valid={}, + ) + + for dep in module.params['dependencies']: + match = pkg_dep_re.match(dep) + if not match: + module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep)) + pkg, op, version = match.groups() + if op is not None and op not in operations: + module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep)) + try: + existing = pkg_resources.get_distribution(pkg).version + except pkg_resources.DistributionNotFound: + # not there + results['not_found'].append(pkg) + continue + if op is None and version is None: + results['valid'][pkg] = { + 'installed': existing, + 'desired': None, + } + elif operations[op](LooseVersion(existing), LooseVersion(version)): + results['valid'][pkg] = { + 'installed': existing, + 'desired': dep, + } + else: + results['mismatched'][pkg] = { + 'installed': existing, + 'desired': dep, + } + + module.exit_json( + python=sys.executable, + python_version=sys.version, + python_version_info=python_version_info, + python_system_path=sys.path, + **results + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/rax.py b/plugins/modules/rax.py deleted file mode 120000 index fac7e8b9c9..0000000000 --- a/plugins/modules/rax.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax.py \ No newline at end of file diff --git a/plugins/modules/rax_cbs.py b/plugins/modules/rax_cbs.py deleted file mode 120000 index 3427746f2a..0000000000 --- a/plugins/modules/rax_cbs.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_cbs.py \ No newline at end of file diff --git a/plugins/modules/rax_cbs_attachments.py b/plugins/modules/rax_cbs_attachments.py deleted file mode 120000 index f38201831e..0000000000 --- a/plugins/modules/rax_cbs_attachments.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_cbs_attachments.py \ No newline at end of file diff --git a/plugins/modules/rax_cdb.py b/plugins/modules/rax_cdb.py deleted file mode 120000 index 12f2c870b6..0000000000 --- a/plugins/modules/rax_cdb.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_cdb.py \ No newline at end of file diff --git a/plugins/modules/rax_cdb_database.py b/plugins/modules/rax_cdb_database.py deleted file mode 120000 index 888532e3fe..0000000000 --- a/plugins/modules/rax_cdb_database.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_cdb_database.py \ No newline at end of file diff --git a/plugins/modules/rax_cdb_user.py b/plugins/modules/rax_cdb_user.py deleted file mode 120000 index 5d087a5610..0000000000 --- a/plugins/modules/rax_cdb_user.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_cdb_user.py \ No newline at end of file diff --git a/plugins/modules/rax_clb.py b/plugins/modules/rax_clb.py deleted file mode 120000 index efd291f024..0000000000 --- a/plugins/modules/rax_clb.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_clb.py \ No newline at end of file diff --git a/plugins/modules/rax_clb_nodes.py b/plugins/modules/rax_clb_nodes.py deleted file mode 120000 index b7b6e54f55..0000000000 --- a/plugins/modules/rax_clb_nodes.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_clb_nodes.py \ No newline at end of file diff --git a/plugins/modules/rax_clb_ssl.py b/plugins/modules/rax_clb_ssl.py deleted file mode 120000 index 996be59f6d..0000000000 --- a/plugins/modules/rax_clb_ssl.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_clb_ssl.py \ No newline at end of file diff --git a/plugins/modules/rax_dns.py b/plugins/modules/rax_dns.py deleted file mode 120000 index 4653e988f4..0000000000 --- a/plugins/modules/rax_dns.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_dns.py \ No newline at end of file diff --git a/plugins/modules/rax_dns_record.py b/plugins/modules/rax_dns_record.py deleted file mode 120000 index 38ce5b24ac..0000000000 --- a/plugins/modules/rax_dns_record.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_dns_record.py \ No newline at end of file diff --git a/plugins/modules/rax_facts.py b/plugins/modules/rax_facts.py deleted file mode 120000 index 49c2a762af..0000000000 --- a/plugins/modules/rax_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_facts.py \ No newline at end of file diff --git a/plugins/modules/rax_files.py b/plugins/modules/rax_files.py deleted file mode 120000 index 984fdbef4a..0000000000 --- a/plugins/modules/rax_files.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_files.py \ No newline at end of file diff --git a/plugins/modules/rax_files_objects.py b/plugins/modules/rax_files_objects.py deleted file mode 120000 index 7aa4c08f6e..0000000000 --- a/plugins/modules/rax_files_objects.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_files_objects.py \ No newline at end of file diff --git a/plugins/modules/rax_identity.py b/plugins/modules/rax_identity.py deleted file mode 120000 index b70ddf27a7..0000000000 --- a/plugins/modules/rax_identity.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_identity.py \ No newline at end of file diff --git a/plugins/modules/rax_keypair.py b/plugins/modules/rax_keypair.py deleted file mode 120000 index 3c0ac90ffb..0000000000 --- a/plugins/modules/rax_keypair.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_keypair.py \ No newline at end of file diff --git a/plugins/modules/rax_meta.py b/plugins/modules/rax_meta.py deleted file mode 120000 index 7d7d2b4fcc..0000000000 --- a/plugins/modules/rax_meta.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_meta.py \ No newline at end of file diff --git a/plugins/modules/rax_mon_alarm.py b/plugins/modules/rax_mon_alarm.py deleted file mode 120000 index 19988f8dc0..0000000000 --- a/plugins/modules/rax_mon_alarm.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_mon_alarm.py \ No newline at end of file diff --git a/plugins/modules/rax_mon_check.py b/plugins/modules/rax_mon_check.py deleted file mode 120000 index eeb6c7f079..0000000000 --- a/plugins/modules/rax_mon_check.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_mon_check.py \ No newline at end of file diff --git a/plugins/modules/rax_mon_entity.py b/plugins/modules/rax_mon_entity.py deleted file mode 120000 index 4861e16f3a..0000000000 --- a/plugins/modules/rax_mon_entity.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_mon_entity.py \ No newline at end of file diff --git a/plugins/modules/rax_mon_notification.py b/plugins/modules/rax_mon_notification.py deleted file mode 120000 index 5a70dc3d16..0000000000 --- a/plugins/modules/rax_mon_notification.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_mon_notification.py \ No newline at end of file diff --git a/plugins/modules/rax_mon_notification_plan.py b/plugins/modules/rax_mon_notification_plan.py deleted file mode 120000 index 542fd3daf2..0000000000 --- a/plugins/modules/rax_mon_notification_plan.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_mon_notification_plan.py \ No newline at end of file diff --git a/plugins/modules/rax_network.py b/plugins/modules/rax_network.py deleted file mode 120000 index 60cda1c182..0000000000 --- a/plugins/modules/rax_network.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_network.py \ No newline at end of file diff --git a/plugins/modules/rax_queue.py b/plugins/modules/rax_queue.py deleted file mode 120000 index f40e412992..0000000000 --- a/plugins/modules/rax_queue.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_queue.py \ No newline at end of file diff --git a/plugins/modules/rax_scaling_group.py b/plugins/modules/rax_scaling_group.py deleted file mode 120000 index d38cb37430..0000000000 --- a/plugins/modules/rax_scaling_group.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_scaling_group.py \ No newline at end of file diff --git a/plugins/modules/rax_scaling_policy.py b/plugins/modules/rax_scaling_policy.py deleted file mode 120000 index da4f52fdc7..0000000000 --- a/plugins/modules/rax_scaling_policy.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/rackspace/rax_scaling_policy.py \ No newline at end of file diff --git a/plugins/modules/read_csv.py b/plugins/modules/read_csv.py deleted file mode 120000 index c51916e72d..0000000000 --- a/plugins/modules/read_csv.py +++ /dev/null @@ -1 +0,0 @@ -./files/read_csv.py \ No newline at end of file diff --git a/plugins/modules/read_csv.py b/plugins/modules/read_csv.py new file mode 100644 index 0000000000..e195029d03 --- /dev/null +++ b/plugins/modules/read_csv.py @@ -0,0 +1,219 @@ +#!/usr/bin/python + +# Copyright (c) 2018, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: read_csv +short_description: Read a CSV file +description: + - Read a CSV file and return a list or a dictionary, containing one dictionary per row. +author: + - Dag Wieers (@dagwieers) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + description: + - The CSV filename to read data from. + type: path + required: true + aliases: [filename] + key: + description: + - The column name used as a key for the resulting dictionary. + - If O(key) is unset, the module returns a list of dictionaries, where each dictionary is a row in the CSV file. + type: str + dialect: + description: + - The CSV dialect to use when parsing the CSV file. + - Possible values include V(excel), V(excel-tab) or V(unix). + type: str + default: excel + fieldnames: + description: + - A list of field names for every column. + - This is needed if the CSV does not have a header. + type: list + elements: str + unique: + description: + - Whether the O(key) used is expected to be unique. + type: bool + default: true + delimiter: + description: + - A one-character string used to separate fields. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: str + skipinitialspace: + description: + - Whether to ignore any whitespaces immediately following the delimiter. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: bool + strict: + description: + - Whether to raise an exception on bad CSV input. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: bool +seealso: + - plugin: ansible.builtin.csvfile + plugin_type: lookup + description: Can be used to do selective lookups in CSV files from Jinja. +""" + +EXAMPLES = r""" +# Example CSV file with header +# +# name,uid,gid +# dag,500,500 +# jeroen,501,500 + +# Read a CSV file and access user 'dag' +- name: Read users from CSV file and return a dictionary + community.general.read_csv: + path: users.csv + key: name + register: users + delegate_to: localhost + +- ansible.builtin.debug: + msg: 'User {{ users.dict.dag.name }} has UID {{ users.dict.dag.uid }} and GID {{ users.dict.dag.gid }}' + +# Read a CSV file and access the first item +- name: Read users from CSV file and return a list + community.general.read_csv: + path: users.csv + register: users + delegate_to: localhost + +- ansible.builtin.debug: + msg: 'User {{ users.list.1.name }} has UID {{ users.list.1.uid }} and GID {{ users.list.1.gid }}' + +# Example CSV file without header and semi-colon delimiter +# +# dag;500;500 +# jeroen;501;500 + +# Read a CSV file without headers +- name: Read users from CSV file and return a list + community.general.read_csv: + path: users.csv + fieldnames: name,uid,gid + delimiter: ';' + register: users + delegate_to: localhost +""" + +RETURN = r""" +dict: + description: The CSV content as a dictionary. + returned: success + type: dict + sample: + dag: + name: dag + uid: 500 + gid: 500 + jeroen: + name: jeroen + uid: 501 + gid: 500 +list: + description: The CSV content as a list. + returned: success + type: list + sample: + - name: dag + uid: 500 + gid: 500 + - name: jeroen + uid: 501 + gid: 500 +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, + DialectNotAvailableError, + CustomDialectFailureError) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True, aliases=['filename']), + dialect=dict(type='str', default='excel'), + key=dict(type='str', no_log=False), + fieldnames=dict(type='list', elements='str'), + unique=dict(type='bool', default=True), + delimiter=dict(type='str'), + skipinitialspace=dict(type='bool'), + strict=dict(type='bool'), + ), + supports_check_mode=True, + ) + + path = module.params['path'] + dialect = module.params['dialect'] + key = module.params['key'] + fieldnames = module.params['fieldnames'] + unique = module.params['unique'] + + dialect_params = { + "delimiter": module.params['delimiter'], + "skipinitialspace": module.params['skipinitialspace'], + "strict": module.params['strict'], + } + + try: + dialect = initialize_dialect(dialect, **dialect_params) + except (CustomDialectFailureError, DialectNotAvailableError) as e: + module.fail_json(msg=to_native(e)) + + try: + with open(path, 'rb') as f: + data = f.read() + except (IOError, OSError) as e: + module.fail_json(msg="Unable to open file: %s" % to_native(e)) + + reader = read_csv(data, dialect, fieldnames) + + if key and key not in reader.fieldnames: + module.fail_json(msg="Key '%s' was not found in the CSV header fields: %s" % (key, ', '.join(reader.fieldnames))) + + data_dict = dict() + data_list = list() + + if key is None: + try: + for row in reader: + data_list.append(row) + except CSVError as e: + module.fail_json(msg="Unable to process file: %s" % to_native(e)) + else: + try: + for row in reader: + if unique and row[key] in data_dict: + module.fail_json(msg="Key '%s' is not unique for value '%s'" % (key, row[key])) + data_dict[row[key]] = row + except CSVError as e: + module.fail_json(msg="Unable to process file: %s" % to_native(e)) + + module.exit_json(dict=data_dict, list=data_list) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py deleted file mode 120000 index 1d8cdd3110..0000000000 --- a/plugins/modules/redfish_command.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/redfish/redfish_command.py \ No newline at end of file diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py new file mode 100644 index 0000000000..736d38d6c4 --- /dev/null +++ b/plugins/modules/redfish_command.py @@ -0,0 +1,1159 @@ +#!/usr/bin/python + +# Copyright (c) 2017-2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: redfish_command +short_description: Manages Out-Of-Band controllers using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action. + - Manages OOB controller ex. reboot, log management. + - Manages OOB controller users ex. add, remove, update. + - Manages system power ex. on, off, graceful and forced reboot. +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + description: + - Username for authenticating to OOB controller. + type: str + password: + description: + - Password for authenticating to OOB controller. + type: str + auth_token: + description: + - Security token for authenticating to OOB controller. + type: str + version_added: 2.3.0 + session_uri: + description: + - URI of the session resource. + type: str + version_added: 2.3.0 + id: + required: false + aliases: [account_id] + description: + - ID of account to delete/modify. + - Can also be used in account creation to work around vendor issues where the ID of the new user is required in the + POST request. + type: str + new_username: + required: false + aliases: [account_username] + description: + - Username of account to add/delete/modify. + type: str + new_password: + required: false + aliases: [account_password] + description: + - New password of account to add/modify. + type: str + roleid: + required: false + aliases: [account_roleid] + description: + - Role of account to add/modify. + type: str + account_types: + required: false + aliases: [account_accounttypes] + description: + - Array of account types to apply to a user account. + type: list + elements: str + version_added: '7.2.0' + oem_account_types: + required: false + aliases: [account_oemaccounttypes] + description: + - Array of OEM account types to apply to a user account. + type: list + elements: str + version_added: '7.2.0' + bootdevice: + required: false + description: + - Boot device when setting boot configuration. + type: str + timeout: + description: + - Timeout in seconds for HTTP requests to OOB controller. + - The default value for this parameter changed from V(10) to V(60) in community.general 9.0.0. + type: int + default: 60 + boot_override_mode: + description: + - Boot mode when using an override. + type: str + choices: [Legacy, UEFI] + version_added: 3.5.0 + uefi_target: + required: false + description: + - UEFI boot target when bootdevice is "UefiTarget". + type: str + boot_next: + required: false + description: + - BootNext target when bootdevice is "UefiBootNext". + type: str + update_username: + required: false + aliases: [account_updatename] + description: + - New user name for updating account_username. + type: str + version_added: '0.2.0' + account_properties: + required: false + description: + - Properties of account service to update. + type: dict + default: {} + version_added: '0.2.0' + resource_id: + required: false + description: + - ID of the System, Manager or Chassis to modify. + type: str + version_added: '0.2.0' + update_image_uri: + required: false + description: + - URI of the image for the update. + type: str + version_added: '0.2.0' + update_image_file: + required: false + description: + - Filename, with optional path, of the image for the update. + type: path + version_added: '7.1.0' + update_protocol: + required: false + description: + - Protocol for the update. + type: str + version_added: '0.2.0' + update_targets: + required: false + description: + - List of target resource URIs to apply the update to. + type: list + elements: str + default: [] + version_added: '0.2.0' + update_creds: + required: false + description: + - Credentials for retrieving the update image. + type: dict + version_added: '0.2.0' + suboptions: + username: + required: false + description: + - Username for retrieving the update image. + type: str + password: + required: false + description: + - Password for retrieving the update image. + type: str + update_apply_time: + required: false + description: + - Time when to apply the update. + type: str + choices: + - Immediate + - OnReset + - AtMaintenanceWindowStart + - InMaintenanceWindowOnReset + - OnStartUpdateRequest + version_added: '6.1.0' + update_oem_params: + required: false + description: + - Properties for HTTP Multipart Push Updates. + type: dict + version_added: '7.5.0' + update_handle: + required: false + description: + - Handle to check the status of an update in progress. + type: str + version_added: '6.1.0' + update_custom_oem_header: + required: false + description: + - Optional OEM header, sent as separate form-data for the Multipart HTTP push update. + - The header shall start with "Oem" according to DMTF Redfish spec 12.6.2.2. + - For more details, see U(https://www.dmtf.org/sites/default/files/standards/documents/DSP0266_1.21.0.html). + - If set, then O(update_custom_oem_params) is required too. + type: str + version_added: '10.1.0' + update_custom_oem_params: + required: false + description: + - Custom OEM properties for HTTP Multipart Push updates. + - If set, then O(update_custom_oem_header) is required too. + - The properties are passed raw without any validation or conversion by Ansible. This means the content can be a file, + a string, or any other data. If the content is a dictionary that should be converted to JSON, then the content must + be converted to JSON before passing it to this module using the P(ansible.builtin.to_json#filter) filter. + type: raw + version_added: '10.1.0' + update_custom_oem_mime_type: + required: false + description: + - MIME Type for custom OEM properties for HTTP Multipart Push updates. + type: str + version_added: '10.1.0' + virtual_media: + required: false + description: + - Options for VirtualMedia commands. + type: dict + version_added: '0.2.0' + suboptions: + media_types: + required: false + description: + - List of media types appropriate for the image. + type: list + elements: str + default: [] + image_url: + required: false + description: + - URL of the image to insert or eject. + type: str + inserted: + required: false + description: + - Indicates that the image is treated as inserted on command completion. + type: bool + default: true + write_protected: + required: false + description: + - Indicates that the media is treated as write-protected. + type: bool + default: true + username: + required: false + description: + - Username for accessing the image URL. + type: str + password: + required: false + description: + - Password for accessing the image URL. + type: str + transfer_protocol_type: + required: false + description: + - Network protocol to use with the image. + type: str + transfer_method: + required: false + description: + - Transfer method to use with the image. + type: str + strip_etag_quotes: + description: + - Removes surrounding quotes of etag used in C(If-Match) header of C(PATCH) requests. + - Only use this option to resolve bad vendor implementation where C(If-Match) only matches the unquoted etag string. + type: bool + default: false + version_added: 3.7.0 + bios_attributes: + required: false + description: + - BIOS attributes that needs to be verified in the given server. + type: dict + version_added: 6.4.0 + reset_to_defaults_mode: + description: + - Mode to apply when reseting to default. + type: str + choices: [ResetAll, PreserveNetworkAndUsers, PreserveNetwork] + version_added: 8.6.0 + wait: + required: false + description: + - Block until the service is ready again. + type: bool + default: false + version_added: 9.1.0 + wait_timeout: + required: false + description: + - How long to block until the service is ready again before giving up. + type: int + default: 120 + version_added: 9.1.0 + ciphers: + version_added: 9.2.0 + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + +author: + - "Jose Delarosa (@jose-delarosa)" + - "T S Kushal (@TSKushal)" +""" + +EXAMPLES = r""" +- name: Restart system power gracefully + community.general.redfish_command: + category: Systems + command: PowerGracefulRestart + resource_id: 437XR1138R2 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Turn system power off + community.general.redfish_command: + category: Systems + command: PowerForceOff + resource_id: 437XR1138R2 + +- name: Restart system power forcefully + community.general.redfish_command: + category: Systems + command: PowerForceRestart + resource_id: 437XR1138R2 + +- name: Shutdown system power gracefully + community.general.redfish_command: + category: Systems + command: PowerGracefulShutdown + resource_id: 437XR1138R2 + +- name: Turn system power on + community.general.redfish_command: + category: Systems + command: PowerOn + resource_id: 437XR1138R2 + +- name: Reboot system power + community.general.redfish_command: + category: Systems + command: PowerReboot + resource_id: 437XR1138R2 + +- name: Set one-time boot device to {{ bootdevice }} + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "{{ bootdevice }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01" + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "UefiTarget" + uefi_target: "/0x31/0x33/0x01/0x01" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set one-time boot device to BootNext target of "Boot0001" + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "UefiBootNext" + boot_next: "Boot0001" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set persistent boot device override + community.general.redfish_command: + category: Systems + command: EnableContinuousBootOverride + resource_id: 437XR1138R2 + bootdevice: "{{ bootdevice }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set one-time boot to BiosSetup + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + boot_next: BiosSetup + boot_override_mode: Legacy + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Disable persistent boot device override + community.general.redfish_command: + category: Systems + command: DisableBootOverride + +- name: Set system indicator LED to blink using security token for auth + community.general.redfish_command: + category: Systems + command: IndicatorLedBlink + resource_id: 437XR1138R2 + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + +- name: Add user + community.general.redfish_command: + category: Accounts + command: AddUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + new_username: "{{ new_username }}" + new_password: "{{ new_password }}" + roleid: "{{ roleid }}" + +- name: Add user with specified account types + community.general.redfish_command: + category: Accounts + command: AddUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + new_username: "{{ new_username }}" + new_password: "{{ new_password }}" + roleid: "{{ roleid }}" + account_types: + - Redfish + - WebUI + +- name: Add user using new option aliases + community.general.redfish_command: + category: Accounts + command: AddUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_password: "{{ account_password }}" + account_roleid: "{{ account_roleid }}" + +- name: Delete user + community.general.redfish_command: + category: Accounts + command: DeleteUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + +- name: Disable user + community.general.redfish_command: + category: Accounts + command: DisableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + +- name: Enable user + community.general.redfish_command: + category: Accounts + command: EnableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + +- name: Add and enable user + community.general.redfish_command: + category: Accounts + command: AddUser,EnableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + new_username: "{{ new_username }}" + new_password: "{{ new_password }}" + roleid: "{{ roleid }}" + +- name: Update user password + community.general.redfish_command: + category: Accounts + command: UpdateUserPassword + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_password: "{{ account_password }}" + +- name: Update user role + community.general.redfish_command: + category: Accounts + command: UpdateUserRole + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + roleid: "{{ roleid }}" + +- name: Update user name + community.general.redfish_command: + category: Accounts + command: UpdateUserName + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_updatename: "{{ account_updatename }}" + +- name: Update user name + community.general.redfish_command: + category: Accounts + command: UpdateUserName + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + update_username: "{{ update_username }}" + +- name: Update AccountService properties + community.general.redfish_command: + category: Accounts + command: UpdateAccountServiceProperties + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_properties: + AccountLockoutThreshold: 5 + AccountLockoutDuration: 600 + +- name: Update user AccountTypes + community.general.redfish_command: + category: Accounts + command: UpdateUserAccountTypes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_types: + - Redfish + - WebUI + +- name: Clear Manager Logs with a timeout of 20 seconds + community.general.redfish_command: + category: Manager + command: ClearLogs + resource_id: BMC + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + +- name: Create session + community.general.redfish_command: + category: Sessions + command: CreateSession + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Set chassis indicator LED to blink using security token for auth + community.general.redfish_command: + category: Chassis + command: IndicatorLedBlink + resource_id: 1U + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + +- name: Delete session using security token created by CreateSesssion above + community.general.redfish_command: + category: Sessions + command: DeleteSession + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + session_uri: "{{ result.session.uri }}" + +- name: Clear Sessions + community.general.redfish_command: + category: Sessions + command: ClearSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Simple update + community.general.redfish_command: + category: Update + command: SimpleUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_uri: https://example.com/myupdate.img + +- name: Simple update with additional options + community.general.redfish_command: + category: Update + command: SimpleUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_uri: //example.com/myupdate.img + update_protocol: FTP + update_targets: + - /redfish/v1/UpdateService/FirmwareInventory/BMC + update_creds: + username: operator + password: supersecretpwd + +- name: Multipart HTTP push update; timeout is 600 seconds to allow for a large image transfer + community.general.redfish_command: + category: Update + command: MultipartHTTPPushUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 600 + update_image_file: ~/images/myupdate.img + +- name: Multipart HTTP push with additional options; timeout is 600 seconds to allow for a large image transfer + community.general.redfish_command: + category: Update + command: MultipartHTTPPushUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 600 + update_image_file: ~/images/myupdate.img + update_targets: + - /redfish/v1/UpdateService/FirmwareInventory/BMC + update_oem_params: + PreserveConfiguration: false + +- name: Multipart HTTP push with custom OEM options + vars: + oem_payload: + ImageType: BMC + community.general.redfish_command: + category: Update + command: MultipartHTTPPushUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_file: ~/images/myupdate.img + update_targets: + - /redfish/v1/UpdateService/FirmwareInventory/BMC + update_custom_oem_header: OemParameters + update_custom_oem_mime_type: "application/json" + update_custom_oem_params: "{{ oem_payload | to_json }}" + +- name: Perform requested operations to continue the update + community.general.redfish_command: + category: Update + command: PerformRequestedOperations + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_handle: /redfish/v1/TaskService/TaskMonitors/735 + +- name: Insert Virtual Media + community.general.redfish_command: + category: Systems + command: VirtualMediaInsert + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + media_types: + - CD + - DVD + resource_id: 1 + +- name: Insert Virtual Media + community.general.redfish_command: + category: Manager + command: VirtualMediaInsert + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + media_types: + - CD + - DVD + resource_id: BMC + +- name: Eject Virtual Media + community.general.redfish_command: + category: Systems + command: VirtualMediaEject + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + resource_id: 1 + +- name: Eject Virtual Media + community.general.redfish_command: + category: Manager + command: VirtualMediaEject + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + resource_id: BMC + +- name: Restart manager power gracefully + community.general.redfish_command: + category: Manager + command: GracefulRestart + resource_id: BMC + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Restart manager power gracefully and wait for it to be available + community.general.redfish_command: + category: Manager + command: GracefulRestart + resource_id: BMC + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + wait: true + +- name: Restart manager power gracefully + community.general.redfish_command: + category: Manager + command: PowerGracefulRestart + resource_id: BMC + +- name: Turn manager power off + community.general.redfish_command: + category: Manager + command: PowerForceOff + resource_id: BMC + +- name: Restart manager power forcefully + community.general.redfish_command: + category: Manager + command: PowerForceRestart + resource_id: BMC + +- name: Shutdown manager power gracefully + community.general.redfish_command: + category: Manager + command: PowerGracefulShutdown + resource_id: BMC + +- name: Turn manager power on + community.general.redfish_command: + category: Manager + command: PowerOn + resource_id: BMC + +- name: Reboot manager power + community.general.redfish_command: + category: Manager + command: PowerReboot + resource_id: BMC + +- name: Factory reset manager to defaults + community.general.redfish_command: + category: Manager + command: ResetToDefaults + resource_id: BMC + reset_to_defaults_mode: ResetAll + +- name: Verify BIOS attributes + community.general.redfish_command: + category: Systems + command: VerifyBiosAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + bios_attributes: + SubNumaClustering: "Disabled" + WorkloadProfile: "Virtualization-MaxPerformance" +""" + +RETURN = r""" +msg: + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +return_values: + description: Dictionary containing command-specific response data from the action. + returned: on success + type: dict + version_added: 6.1.0 + sample: + { + "update_status": { + "handle": "/redfish/v1/TaskService/TaskMonitors/735", + "messages": [], + "resets_requested": [], + "ret": true, + "status": "New" + } + } +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC +from ansible.module_utils.common.text.converters import to_native + + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart", + "PowerGracefulShutdown", "PowerReboot", "PowerCycle", "PowerFullPowerCycle", + "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride", + "IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink", "VirtualMediaInsert", + "VirtualMediaEject", "VerifyBiosAttributes"], + "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"], + "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser", + "UpdateUserRole", "UpdateUserPassword", "UpdateUserName", + "UpdateUserAccountTypes", "UpdateAccountServiceProperties"], + "Sessions": ["ClearSessions", "CreateSession", "DeleteSession"], + "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert", + "ResetToDefaults", + "VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart", + "PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"], + "Update": ["SimpleUpdate", "MultipartHTTPPushUpdate", "PerformRequestedOperations"], +} + + +def main(): + result = {} + return_values = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + session_uri=dict(), + id=dict(aliases=["account_id"]), + new_username=dict(aliases=["account_username"]), + new_password=dict(aliases=["account_password"], no_log=True), + roleid=dict(aliases=["account_roleid"]), + account_types=dict(type='list', elements='str', aliases=["account_accounttypes"]), + oem_account_types=dict(type='list', elements='str', aliases=["account_oemaccounttypes"]), + update_username=dict(type='str', aliases=["account_updatename"]), + account_properties=dict(type='dict', default={}), + bootdevice=dict(), + timeout=dict(type='int', default=60), + uefi_target=dict(), + boot_next=dict(), + boot_override_mode=dict(choices=['Legacy', 'UEFI']), + resource_id=dict(), + update_image_uri=dict(), + update_image_file=dict(type='path'), + update_protocol=dict(), + update_targets=dict(type='list', elements='str', default=[]), + update_oem_params=dict(type='dict'), + update_custom_oem_header=dict(type='str'), + update_custom_oem_mime_type=dict(type='str'), + update_custom_oem_params=dict(type='raw'), + update_creds=dict( + type='dict', + options=dict( + username=dict(), + password=dict(no_log=True) + ) + ), + update_apply_time=dict(choices=['Immediate', 'OnReset', 'AtMaintenanceWindowStart', + 'InMaintenanceWindowOnReset', 'OnStartUpdateRequest']), + update_handle=dict(), + virtual_media=dict( + type='dict', + options=dict( + media_types=dict(type='list', elements='str', default=[]), + image_url=dict(), + inserted=dict(type='bool', default=True), + write_protected=dict(type='bool', default=True), + username=dict(), + password=dict(no_log=True), + transfer_protocol_type=dict(), + transfer_method=dict(), + ) + ), + strip_etag_quotes=dict(type='bool', default=False), + reset_to_defaults_mode=dict(choices=['ResetAll', 'PreserveNetworkAndUsers', 'PreserveNetwork']), + bios_attributes=dict(type="dict"), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=120), + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, + required_together=[ + ('username', 'password'), + ('update_custom_oem_header', 'update_custom_oem_params'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # user to add/modify/delete + user = { + 'account_id': module.params['id'], + 'account_username': module.params['new_username'], + 'account_password': module.params['new_password'], + 'account_roleid': module.params['roleid'], + 'account_accounttypes': module.params['account_types'], + 'account_oemaccounttypes': module.params['oem_account_types'], + 'account_updatename': module.params['update_username'], + 'account_properties': module.params['account_properties'], + 'account_passwordchangerequired': None, + } + + # timeout + timeout = module.params['timeout'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # update options + update_opts = { + 'update_image_uri': module.params['update_image_uri'], + 'update_image_file': module.params['update_image_file'], + 'update_protocol': module.params['update_protocol'], + 'update_targets': module.params['update_targets'], + 'update_creds': module.params['update_creds'], + 'update_apply_time': module.params['update_apply_time'], + 'update_oem_params': module.params['update_oem_params'], + 'update_custom_oem_header': module.params['update_custom_oem_header'], + 'update_custom_oem_params': module.params['update_custom_oem_params'], + 'update_custom_oem_mime_type': module.params['update_custom_oem_mime_type'], + 'update_handle': module.params['update_handle'], + } + + # Boot override options + boot_opts = { + 'bootdevice': module.params['bootdevice'], + 'uefi_target': module.params['uefi_target'], + 'boot_next': module.params['boot_next'], + 'boot_override_mode': module.params['boot_override_mode'], + } + + # VirtualMedia options + virtual_media = module.params['virtual_media'] + + # Etag options + strip_etag_quotes = module.params['strip_etag_quotes'] + + # BIOS Attributes options + bios_attributes = module.params['bios_attributes'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = RedfishUtils(creds, root_uri, timeout, module, + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Accounts": + ACCOUNTS_COMMANDS = { + "AddUser": rf_utils.add_user, + "EnableUser": rf_utils.enable_user, + "DeleteUser": rf_utils.delete_user, + "DisableUser": rf_utils.disable_user, + "UpdateUserRole": rf_utils.update_user_role, + "UpdateUserPassword": rf_utils.update_user_password, + "UpdateUserName": rf_utils.update_user_name, + "UpdateUserAccountTypes": rf_utils.update_user_accounttypes, + "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties + } + + # execute only if we find an Account service resource + result = rf_utils._find_accountservice_resource() + if result['ret'] is False: + # If a password change is required and the user is attempting to + # modify their password, try to proceed. + user['account_passwordchangerequired'] = rf_utils.check_password_change_required(result) + if len(command_list) == 1 and command_list[0] == "UpdateUserPassword" and user['account_passwordchangerequired']: + result = rf_utils.update_user_password(user) + else: + module.fail_json(msg=to_native(result['msg'])) + else: + for command in command_list: + result = ACCOUNTS_COMMANDS[command](user) + + elif category == "Systems": + # execute only if we find a System resource + result = rf_utils._find_systems_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command.startswith('Power'): + result = rf_utils.manage_system_power(command) + elif command == "SetOneTimeBoot": + boot_opts['override_enabled'] = 'Once' + result = rf_utils.set_boot_override(boot_opts) + elif command == "EnableContinuousBootOverride": + boot_opts['override_enabled'] = 'Continuous' + result = rf_utils.set_boot_override(boot_opts) + elif command == "DisableBootOverride": + boot_opts['override_enabled'] = 'Disabled' + result = rf_utils.set_boot_override(boot_opts) + elif command.startswith('IndicatorLed'): + result = rf_utils.manage_system_indicator_led(command) + elif command == 'VirtualMediaInsert': + result = rf_utils.virtual_media_insert(virtual_media, category) + elif command == 'VirtualMediaEject': + result = rf_utils.virtual_media_eject(virtual_media, category) + elif command == 'VerifyBiosAttributes': + result = rf_utils.verify_bios_attributes(bios_attributes) + + elif category == "Chassis": + result = rf_utils._find_chassis_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + led_commands = ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"] + + # Check if more than one led_command is present + num_led_commands = sum([command in led_commands for command in command_list]) + if num_led_commands > 1: + result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."} + else: + for command in command_list: + if command in led_commands: + result = rf_utils.manage_chassis_indicator_led(command) + + elif category == "Sessions": + # execute only if we find SessionService resources + resource = rf_utils._find_sessionservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "ClearSessions": + result = rf_utils.clear_sessions() + elif command == "CreateSession": + result = rf_utils.create_session() + elif command == "DeleteSession": + result = rf_utils.delete_session(module.params['session_uri']) + + elif category == "Manager": + # execute only if we find a Manager service resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + # standardize on the Power* commands, but allow the legacy + # GracefulRestart command + if command == 'GracefulRestart': + command = 'PowerGracefulRestart' + + if command.startswith('Power'): + result = rf_utils.manage_manager_power(command, module.params['wait'], module.params['wait_timeout']) + elif command == 'ClearLogs': + result = rf_utils.clear_logs() + elif command == 'VirtualMediaInsert': + result = rf_utils.virtual_media_insert(virtual_media, category) + elif command == 'VirtualMediaEject': + result = rf_utils.virtual_media_eject(virtual_media, category) + elif command == 'ResetToDefaults': + result = rf_utils.manager_reset_to_defaults(module.params['reset_to_defaults_mode']) + + elif category == "Update": + # execute only if we find UpdateService resources + resource = rf_utils._find_updateservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "SimpleUpdate": + result = rf_utils.simple_update(update_opts) + if 'update_status' in result: + return_values['update_status'] = result['update_status'] + elif command == "MultipartHTTPPushUpdate": + result = rf_utils.multipath_http_push_update(update_opts) + if 'update_status' in result: + return_values['update_status'] = result['update_status'] + elif command == "PerformRequestedOperations": + result = rf_utils.perform_requested_update_operations(update_opts['update_handle']) + + # Return data back or fail with proper message + if result['ret'] is True: + del result['ret'] + changed = result.get('changed', True) + session = result.get('session', dict()) + module.exit_json(changed=changed, session=session, + msg='Action was successful', + return_values=return_values) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/redfish_config.py b/plugins/modules/redfish_config.py deleted file mode 120000 index 3682f63928..0000000000 --- a/plugins/modules/redfish_config.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/redfish/redfish_config.py \ No newline at end of file diff --git a/plugins/modules/redfish_config.py b/plugins/modules/redfish_config.py new file mode 100644 index 0000000000..a804baab8e --- /dev/null +++ b/plugins/modules/redfish_config.py @@ -0,0 +1,588 @@ +#!/usr/bin/python + +# Copyright (c) 2017-2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: redfish_config +short_description: Manages Out-Of-Band controllers using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to set or update a configuration attribute. + - Manages BIOS configuration settings. + - Manages OOB controller configuration settings. +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + description: + - Username for authenticating to OOB controller. + type: str + password: + description: + - Password for authenticating to OOB controller. + type: str + auth_token: + description: + - Security token for authenticating to OOB controller. + type: str + version_added: 2.3.0 + bios_attributes: + required: false + description: + - Dictionary of BIOS attributes to update. + default: {} + type: dict + version_added: '0.2.0' + timeout: + description: + - Timeout in seconds for HTTP requests to OOB controller. + - The default value for this parameter changed from V(10) to V(60) in community.general 9.0.0. + type: int + default: 60 + boot_order: + required: false + description: + - List of BootOptionReference strings specifying the BootOrder. + default: [] + type: list + elements: str + version_added: '0.2.0' + network_protocols: + required: false + description: + - Setting dict of manager services to update. + type: dict + default: {} + version_added: '0.2.0' + resource_id: + required: false + description: + - ID of the System, Manager or Chassis to modify. + type: str + version_added: '0.2.0' + service_id: + required: false + description: + - ID of the manager to update. + type: str + version_added: '8.4.0' + nic_addr: + required: false + description: + - EthernetInterface Address string on OOB controller. + default: 'null' + type: str + version_added: '0.2.0' + nic_config: + required: false + description: + - Setting dict of EthernetInterface on OOB controller. + type: dict + default: {} + version_added: '0.2.0' + strip_etag_quotes: + description: + - Removes surrounding quotes of etag used in C(If-Match) header of C(PATCH) requests. + - Only use this option to resolve bad vendor implementation where C(If-Match) only matches the unquoted etag string. + type: bool + default: false + version_added: 3.7.0 + hostinterface_config: + required: false + description: + - Setting dict of HostInterface on OOB controller. + type: dict + default: {} + version_added: '4.1.0' + hostinterface_id: + required: false + description: + - Redfish HostInterface instance ID if multiple HostInterfaces are present. + type: str + version_added: '4.1.0' + sessions_config: + required: false + description: + - Setting dict of Sessions. + type: dict + default: {} + version_added: '5.7.0' + storage_subsystem_id: + required: false + description: + - ID of the Storage Subsystem on which the volume is to be created. + type: str + default: '' + version_added: '7.3.0' + storage_none_volume_deletion: + required: false + description: + - Indicates if all non-RAID volumes are automatically deleted prior to creating the new volume. + type: bool + default: false + version_added: '9.5.0' + volume_ids: + required: false + description: + - List of IDs of volumes to be deleted. + type: list + default: [] + elements: str + version_added: '7.3.0' + secure_boot_enable: + required: false + description: + - Setting parameter to enable or disable SecureBoot. + type: bool + default: true + version_added: '7.5.0' + volume_details: + required: false + description: + - Setting dictionary of volume to be created. + - If C(CapacityBytes) key is not specified in this dictionary, the size of the volume is determined by the Redfish service. + It is possible the size is not the maximum available size. + type: dict + default: {} + version_added: '7.5.0' + power_restore_policy: + description: + - The desired power state of the system when power is restored after a power loss. + type: str + choices: + - AlwaysOn + - AlwaysOff + - LastState + version_added: '10.5.0' + ciphers: + version_added: 9.2.0 + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + +author: + - "Jose Delarosa (@jose-delarosa)" + - "T S Kushal (@TSKushal)" +""" + +EXAMPLES = r""" +- name: Set BootMode to UEFI + community.general.redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attributes: + BootMode: "Uefi" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set multiple BootMode attributes + community.general.redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attributes: + BootMode: "Bios" + OneTimeBootMode: "Enabled" + BootSeqRetry: "Enabled" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Enable PXE Boot for NIC1 + community.general.redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attributes: + PxeDev1EnDis: Enabled + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set BIOS default settings with a timeout of 20 seconds + community.general.redfish_config: + category: Systems + command: SetBiosDefaultSettings + resource_id: 437XR1138R2 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + +- name: Set boot order + community.general.redfish_config: + category: Systems + command: SetBootOrder + boot_order: + - Boot0002 + - Boot0001 + - Boot0000 + - Boot0003 + - Boot0004 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set boot order to the default + community.general.redfish_config: + category: Systems + command: SetDefaultBootOrder + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set Manager Network Protocols + community.general.redfish_config: + category: Manager + command: SetNetworkProtocols + network_protocols: + SNMP: + ProtocolEnabled: true + Port: 161 + HTTP: + ProtocolEnabled: false + Port: 8080 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set Manager NIC + community.general.redfish_config: + category: Manager + command: SetManagerNic + nic_config: + DHCPv4: + DHCPEnabled: false + IPv4StaticAddresses: + Address: 192.168.1.3 + Gateway: 192.168.1.1 + SubnetMask: 255.255.255.0 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Disable Host Interface + community.general.redfish_config: + category: Manager + command: SetHostInterface + hostinterface_config: + InterfaceEnabled: false + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Enable Host Interface for HostInterface resource ID '2' + community.general.redfish_config: + category: Manager + command: SetHostInterface + hostinterface_config: + InterfaceEnabled: true + hostinterface_id: "2" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set SessionService Session Timeout to 30 minutes + community.general.redfish_config: + category: Sessions + command: SetSessionService + sessions_config: + SessionTimeout: 1800 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Enable SecureBoot + community.general.redfish_config: + category: Systems + command: EnableSecureBoot + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set SecureBoot + community.general.redfish_config: + category: Systems + command: SetSecureBoot + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + secure_boot_enable: true + +- name: Delete All Volumes + community.general.redfish_config: + category: Systems + command: DeleteVolumes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + storage_subsystem_id: "DExxxxxx" + volume_ids: ["volume1", "volume2"] + +- name: Create Volume + community.general.redfish_config: + category: Systems + command: CreateVolume + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + storage_subsystem_id: "DExxxxxx" + volume_details: + Name: "MR Volume" + RAIDType: "RAID0" + Drives: + - "/redfish/v1/Systems/1/Storage/DE00B000/Drives/1" + +- name: Set PowerRestorePolicy + community.general.redfish_config: + category: Systems + command: SetPowerRestorePolicy + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + power_restore_policy: "AlwaysOff" + +- name: Set service identification to {{ service_id }} + community.general.redfish_config: + category: Manager + command: SetServiceIdentification + service_id: "{{ service_id }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = r""" +msg: + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC +from ansible.module_utils.common.text.converters import to_native + + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder", + "SetDefaultBootOrder", "EnableSecureBoot", "SetSecureBoot", "DeleteVolumes", "CreateVolume", + "SetPowerRestorePolicy"], + "Manager": ["SetNetworkProtocols", "SetManagerNic", "SetHostInterface", "SetServiceIdentification"], + "Sessions": ["SetSessionService"], +} + + +def main(): + result = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + bios_attributes=dict(type='dict', default={}), + timeout=dict(type='int', default=60), + boot_order=dict(type='list', elements='str', default=[]), + network_protocols=dict( + type='dict', + default={} + ), + resource_id=dict(), + service_id=dict(), + nic_addr=dict(default='null'), + nic_config=dict( + type='dict', + default={} + ), + strip_etag_quotes=dict(type='bool', default=False), + hostinterface_config=dict(type='dict', default={}), + hostinterface_id=dict(), + sessions_config=dict(type='dict', default={}), + storage_subsystem_id=dict(type='str', default=''), + storage_none_volume_deletion=dict(type='bool', default=False), + volume_ids=dict(type='list', default=[], elements='str'), + secure_boot_enable=dict(type='bool', default=True), + volume_details=dict(type='dict', default={}), + power_restore_policy=dict(choices=['AlwaysOn', 'AlwaysOff', 'LastState']), + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # BIOS attributes to update + bios_attributes = module.params['bios_attributes'] + + # boot order + boot_order = module.params['boot_order'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # manager nic + nic_addr = module.params['nic_addr'] + nic_config = module.params['nic_config'] + + # Etag options + strip_etag_quotes = module.params['strip_etag_quotes'] + + # HostInterface config options + hostinterface_config = module.params['hostinterface_config'] + + # HostInterface instance ID + hostinterface_id = module.params['hostinterface_id'] + + # Service Identification + service_id = module.params['service_id'] + + # Sessions config options + sessions_config = module.params['sessions_config'] + + # Volume deletion options + storage_subsystem_id = module.params['storage_subsystem_id'] + volume_ids = module.params['volume_ids'] + + # Set SecureBoot options + secure_boot_enable = module.params['secure_boot_enable'] + + # Volume creation options + volume_details = module.params['volume_details'] + storage_subsystem_id = module.params['storage_subsystem_id'] + storage_none_volume_deletion = module.params['storage_none_volume_deletion'] + + # Power Restore Policy + power_restore_policy = module.params['power_restore_policy'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = RedfishUtils(creds, root_uri, timeout, module, + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Systems": + # execute only if we find a System resource + result = rf_utils._find_systems_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "SetBiosDefaultSettings": + result = rf_utils.set_bios_default_settings() + elif command == "SetBiosAttributes": + result = rf_utils.set_bios_attributes(bios_attributes) + elif command == "SetBootOrder": + result = rf_utils.set_boot_order(boot_order) + elif command == "SetDefaultBootOrder": + result = rf_utils.set_default_boot_order() + elif command == "EnableSecureBoot": + result = rf_utils.enable_secure_boot() + elif command == "SetSecureBoot": + result = rf_utils.set_secure_boot(secure_boot_enable) + elif command == "DeleteVolumes": + result = rf_utils.delete_volumes(storage_subsystem_id, volume_ids) + elif command == "CreateVolume": + result = rf_utils.create_volume(volume_details, storage_subsystem_id, storage_none_volume_deletion) + elif command == "SetPowerRestorePolicy": + result = rf_utils.set_power_restore_policy(power_restore_policy) + + elif category == "Manager": + # execute only if we find a Manager service resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "SetNetworkProtocols": + result = rf_utils.set_network_protocols(module.params['network_protocols']) + elif command == "SetManagerNic": + result = rf_utils.set_manager_nic(nic_addr, nic_config) + elif command == "SetHostInterface": + result = rf_utils.set_hostinterface_attributes(hostinterface_config, hostinterface_id) + elif command == "SetServiceIdentification": + result = rf_utils.set_service_identification(service_id) + + elif category == "Sessions": + # execute only if we find a Sessions resource + result = rf_utils._find_sessionservice_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "SetSessionService": + result = rf_utils.set_session_service(sessions_config) + + # Return data back or fail with proper message + if result['ret'] is True: + if result.get('warning'): + module.warn(to_native(result['warning'])) + + module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/redfish_info.py b/plugins/modules/redfish_info.py deleted file mode 120000 index 08c689f284..0000000000 --- a/plugins/modules/redfish_info.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/redfish/redfish_info.py \ No newline at end of file diff --git a/plugins/modules/redfish_info.py b/plugins/modules/redfish_info.py new file mode 100644 index 0000000000..af1b3af319 --- /dev/null +++ b/plugins/modules/redfish_info.py @@ -0,0 +1,642 @@ +#!/usr/bin/python + +# Copyright (c) 2017-2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: redfish_info +short_description: Manages Out-Of-Band controllers using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to get information back. + - Information retrieved is placed in a location specified by the user. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + - community.general.redfish +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + category: + required: false + description: + - List of categories to execute on OOB controller. + default: ['Systems'] + type: list + elements: str + command: + required: false + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + description: + - Username for authenticating to OOB controller. + type: str + password: + description: + - Password for authenticating to OOB controller. + type: str + auth_token: + description: + - Security token for authenticating to OOB controller. + type: str + version_added: 2.3.0 + manager: + description: + - Name of manager on OOB controller to target. + type: str + version_added: '8.3.0' + timeout: + description: + - Timeout in seconds for HTTP requests to OOB controller. + - The default value for this parameter changed from V(10) to V(60) in community.general 9.0.0. + type: int + default: 60 + update_handle: + required: false + description: + - Handle to check the status of an update in progress. + type: str + version_added: '6.1.0' + ciphers: + version_added: 9.2.0 + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + +author: "Jose Delarosa (@jose-delarosa)" +""" + +EXAMPLES = r""" +- name: Get CPU inventory + community.general.redfish_info: + category: Systems + command: GetCpuInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}" + +- name: Get CPU model + community.general.redfish_info: + category: Systems + command: GetCpuInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.cpu.entries.0.Model }}" + +- name: Get memory inventory + community.general.redfish_info: + category: Systems + command: GetMemoryInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Get fan inventory with a timeout of 20 seconds + community.general.redfish_info: + category: Chassis + command: GetFanInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + register: result + +- name: Get Virtual Media information + community.general.redfish_info: + category: Manager + command: GetVirtualMedia + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}" + +- name: Get Virtual Media information from Systems + community.general.redfish_info: + category: Systems + command: GetVirtualMedia + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}" + +- name: Get Volume Inventory + community.general.redfish_info: + category: Systems + command: GetVolumeInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}" + +- name: Get Session information + community.general.redfish_info: + category: Sessions + command: GetSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.session.entries | to_nice_json }}" + +- name: Get default inventory information + community.general.redfish_info: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts | to_nice_json }}" + +- name: Get several inventories + community.general.redfish_info: + category: Systems + command: GetNicInventory,GetBiosAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get configuration of the AccountService + community.general.redfish_info: + category: Accounts + command: GetAccountServiceConfig + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get default system inventory and user information + community.general.redfish_info: + category: Systems,Accounts + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get default system, user and firmware information + community.general.redfish_info: + category: ["Systems", "Accounts", "Update"] + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get Manager NIC inventory information + community.general.redfish_info: + category: Manager + command: GetManagerNicInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get boot override information + community.general.redfish_info: + category: Systems + command: GetBootOverride + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get chassis inventory + community.general.redfish_info: + category: Chassis + command: GetChassisInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get all information available in the Manager category + community.general.redfish_info: + category: Manager + command: all + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get firmware update capability information + community.general.redfish_info: + category: Update + command: GetFirmwareUpdateCapabilities + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get firmware inventory + community.general.redfish_info: + category: Update + command: GetFirmwareInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get service identification + community.general.redfish_info: + category: Manager + command: GetServiceIdentification + manager: "{{ manager }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get software inventory + community.general.redfish_info: + category: Update + command: GetSoftwareInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get the status of an update operation + community.general.redfish_info: + category: Update + command: GetUpdateStatus + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_handle: /redfish/v1/TaskService/TaskMonitors/735 + +- name: Get Manager Services + community.general.redfish_info: + category: Manager + command: GetNetworkProtocols + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get all information available in all categories + community.general.redfish_info: + category: all + command: all + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get system health report + community.general.redfish_info: + category: Systems + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get chassis health report + community.general.redfish_info: + category: Chassis + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get manager health report + community.general.redfish_info: + category: Manager + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get manager Redfish Host Interface inventory + community.general.redfish_info: + category: Manager + command: GetHostInterfaces + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get Manager Inventory + community.general.redfish_info: + category: Manager + command: GetManagerInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get HPE Thermal Config + community.general.redfish_info: + category: Chassis + command: GetHPEThermalConfig + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get HPE Fan Percent Minimum + community.general.redfish_info: + category: Chassis + command: GetHPEFanPercentMin + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get BIOS registry + community.general.redfish_info: + category: Systems + command: GetBiosRegistries + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get power restore policy + community.general.redfish_info: + category: Systems + command: GetPowerRestorePolicy + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Check the availability of the service with a timeout of 5 seconds + community.general.redfish_info: + category: Service + command: CheckAvailability + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 5 + register: result +""" + +RETURN = r""" +result: + description: Different results depending on task. + returned: always + type: dict + sample: List of CPUs on system +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC + +CATEGORY_COMMANDS_ALL = { + "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory", + "GetMemoryInventory", "GetNicInventory", "GetHealthReport", + "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory", + "GetBiosAttributes", "GetBootOrder", "GetBootOverride", "GetVirtualMedia", "GetBiosRegistries", + "GetPowerRestorePolicy"], + "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower", + "GetChassisThermals", "GetChassisInventory", "GetHealthReport", "GetHPEThermalConfig", "GetHPEFanPercentMin"], + "Accounts": ["ListUsers", "GetAccountServiceConfig"], + "Sessions": ["GetSessions"], + "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory", + "GetUpdateStatus"], + "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols", + "GetHealthReport", "GetHostInterfaces", "GetManagerInventory", "GetServiceIdentification"], + "Service": ["CheckAvailability"], +} + +CATEGORY_COMMANDS_DEFAULT = { + "Systems": "GetSystemInventory", + "Chassis": "GetFanInventory", + "Accounts": "ListUsers", + "Update": "GetFirmwareInventory", + "Sessions": "GetSessions", + "Manager": "GetManagerNicInventory", + "Service": "CheckAvailability", +} + + +def main(): + result = {} + category_list = [] + argument_spec = dict( + category=dict(type='list', elements='str', default=['Systems']), + command=dict(type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=60), + update_handle=dict(), + manager=dict(), + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=True, + ) + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # update handle + update_handle = module.params['update_handle'] + + # manager + manager = module.params['manager'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = RedfishUtils(creds, root_uri, timeout, module) + + # Build Category list + if "all" in module.params['category']: + for entry in CATEGORY_COMMANDS_ALL: + category_list.append(entry) + else: + # one or more categories specified + category_list = module.params['category'] + + for category in category_list: + command_list = [] + # Build Command list for each Category + if category in CATEGORY_COMMANDS_ALL: + if not module.params['command']: + # True if we don't specify a command --> use default + command_list.append(CATEGORY_COMMANDS_DEFAULT[category]) + elif "all" in module.params['command']: + for entry in range(len(CATEGORY_COMMANDS_ALL[category])): + command_list.append(CATEGORY_COMMANDS_ALL[category][entry]) + # one or more commands + else: + command_list = module.params['command'] + # Verify that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg="Invalid Command: %s" % cmd) + else: + # Fail if even one category given is invalid + module.fail_json(msg="Invalid Category: %s" % category) + + # Organize by Categories / Commands + if category == "Service": + # service-level commands are always available + for command in command_list: + if command == "CheckAvailability": + result["service"] = rf_utils.check_service_availability() + + elif category == "Systems": + # execute only if we find a Systems resource + resource = rf_utils._find_systems_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetSystemInventory": + result["system"] = rf_utils.get_multi_system_inventory() + elif command == "GetCpuInventory": + result["cpu"] = rf_utils.get_multi_cpu_inventory() + elif command == "GetMemoryInventory": + result["memory"] = rf_utils.get_multi_memory_inventory() + elif command == "GetNicInventory": + result["nic"] = rf_utils.get_multi_nic_inventory(category) + elif command == "GetStorageControllerInventory": + result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory() + elif command == "GetDiskInventory": + result["disk"] = rf_utils.get_multi_disk_inventory() + elif command == "GetVolumeInventory": + result["volume"] = rf_utils.get_multi_volume_inventory() + elif command == "GetBiosAttributes": + result["bios_attribute"] = rf_utils.get_multi_bios_attributes() + elif command == "GetBootOrder": + result["boot_order"] = rf_utils.get_multi_boot_order() + elif command == "GetBootOverride": + result["boot_override"] = rf_utils.get_multi_boot_override() + elif command == "GetHealthReport": + result["health_report"] = rf_utils.get_multi_system_health_report() + elif command == "GetVirtualMedia": + result["virtual_media"] = rf_utils.get_multi_virtualmedia(category) + elif command == "GetBiosRegistries": + result["bios_registries"] = rf_utils.get_bios_registries() + elif command == "GetPowerRestorePolicy": + result["power_restore_policy"] = rf_utils.get_multi_power_restore_policy() + + elif category == "Chassis": + # execute only if we find Chassis resource + resource = rf_utils._find_chassis_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetFanInventory": + result["fan"] = rf_utils.get_fan_inventory() + elif command == "GetPsuInventory": + result["psu"] = rf_utils.get_psu_inventory() + elif command == "GetChassisThermals": + result["thermals"] = rf_utils.get_chassis_thermals() + elif command == "GetChassisPower": + result["chassis_power"] = rf_utils.get_chassis_power() + elif command == "GetChassisInventory": + result["chassis"] = rf_utils.get_chassis_inventory() + elif command == "GetHealthReport": + result["health_report"] = rf_utils.get_multi_chassis_health_report() + elif command == "GetHPEThermalConfig": + result["hpe_thermal_config"] = rf_utils.get_hpe_thermal_config() + elif command == "GetHPEFanPercentMin": + result["hpe_fan_percent_min"] = rf_utils.get_hpe_fan_percent_min() + + elif category == "Accounts": + # execute only if we find an Account service resource + resource = rf_utils._find_accountservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "ListUsers": + result["user"] = rf_utils.list_users() + elif command == "GetAccountServiceConfig": + result["accountservice_config"] = rf_utils.get_accountservice_properties() + + elif category == "Update": + # execute only if we find UpdateService resources + resource = rf_utils._find_updateservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetFirmwareInventory": + result["firmware"] = rf_utils.get_firmware_inventory() + elif command == "GetSoftwareInventory": + result["software"] = rf_utils.get_software_inventory() + elif command == "GetFirmwareUpdateCapabilities": + result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities() + elif command == "GetUpdateStatus": + result["update_status"] = rf_utils.get_update_status(update_handle) + + elif category == "Sessions": + # execute only if we find SessionService resources + resource = rf_utils._find_sessionservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetSessions": + result["session"] = rf_utils.get_sessions() + + elif category == "Manager": + # execute only if we find a Manager service resource + resource = rf_utils._find_managers_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetManagerNicInventory": + result["manager_nics"] = rf_utils.get_multi_nic_inventory(category) + elif command == "GetVirtualMedia": + result["virtual_media"] = rf_utils.get_multi_virtualmedia(category) + elif command == "GetLogs": + result["log"] = rf_utils.get_logs() + elif command == "GetNetworkProtocols": + result["network_protocols"] = rf_utils.get_network_protocols() + elif command == "GetHealthReport": + result["health_report"] = rf_utils.get_multi_manager_health_report() + elif command == "GetHostInterfaces": + result["host_interfaces"] = rf_utils.get_hostinterfaces() + elif command == "GetManagerInventory": + result["manager"] = rf_utils.get_multi_manager_inventory() + elif command == "GetServiceIdentification": + result["service_id"] = rf_utils.get_service_identification(manager) + + # Return data back + module.exit_json(redfish_facts=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/redhat_subscription.py b/plugins/modules/redhat_subscription.py deleted file mode 120000 index fde2f19c6f..0000000000 --- a/plugins/modules/redhat_subscription.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/redhat_subscription.py \ No newline at end of file diff --git a/plugins/modules/redhat_subscription.py b/plugins/modules/redhat_subscription.py new file mode 100644 index 0000000000..c2b76fe8ac --- /dev/null +++ b/plugins/modules/redhat_subscription.py @@ -0,0 +1,1167 @@ +#!/usr/bin/python + +# Copyright (c) James Laska (jlaska@redhat.com) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: redhat_subscription +short_description: Manage registration and subscriptions to RHSM using C(subscription-manager) +description: + - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) + command, registering using D-Bus if possible. +author: "Barnaby Court (@barnabycourt)" +notes: + - 'The module tries to use the D-Bus C(rhsm) service (part of C(subscription-manager)) to register, starting from community.general + 6.5.0: this is done so credentials (username, password, activation keys) can be passed to C(rhsm) in a secure way. C(subscription-manager) + itself gets credentials only as arguments of command line parameters, which is I(not) secure, as they can be easily stolen + by checking the process listing on the system. Due to limitations of the D-Bus interface of C(rhsm), the module does I(not) + use D-Bus for registration when trying either to register using O(token), or when specifying O(environment), or when the + system is old (typically RHEL 7 older than 7.4, RHEL 6, and older).' + - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an + Organization ID. + - Since 2.5 values for O(server_hostname), O(server_insecure), O(rhsm_baseurl), O(server_proxy_hostname), O(server_proxy_port), + O(server_proxy_user) and O(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf) config file and + default to V(null). + - It is possible to interact with C(subscription-manager) only as root, so root permissions are required to successfully + run this module. + - Since community.general 6.5.0, credentials (that is, O(username) and O(password), O(activationkey), or O(token)) are needed + only in case the system is not registered, or O(force_register) is specified; this makes it possible to use the module + to tweak an already registered system, for example attaching pools to it (using O(pool_ids)), and modifying the C(syspurpose) + attributes (using O(syspurpose)). +requirements: + - subscription-manager + - Optionally the C(dbus) Python library; this is usually included in the OS as it is used by C(subscription-manager). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Whether to register and subscribe (V(present)), or unregister (V(absent)) a system. + choices: ["present", "absent"] + default: "present" + type: str + username: + description: + - Access.redhat.com or Red Hat Satellite or Katello username. + type: str + password: + description: + - Access.redhat.com or Red Hat Satellite or Katello password. + type: str + token: + description: + - Sso.redhat.com API access token. + type: str + version_added: 6.3.0 + server_hostname: + description: + - Specify an alternative Red Hat Subscription Management or Red Hat Satellite or Katello server. + type: str + server_insecure: + description: + - Enable or disable https server certificate verification when connecting to O(server_hostname). + type: str + server_prefix: + description: + - Specify the prefix when registering to the Red Hat Subscription Management or Red Hat Satellite or Katello server. + type: str + version_added: 3.3.0 + server_port: + description: + - Specify the port when registering to the Red Hat Subscription Management or Red Hat Satellite or Katello server. + type: str + version_added: 3.3.0 + rhsm_baseurl: + description: + - Specify CDN baseurl. + type: str + rhsm_repo_ca_cert: + description: + - Specify an alternative location for a CA certificate for CDN. + type: str + server_proxy_hostname: + description: + - Specify an HTTP proxy hostname. + type: str + server_proxy_scheme: + description: + - Specify an HTTP proxy scheme, for example V(http) or V(https). + type: str + version_added: 6.2.0 + server_proxy_port: + description: + - Specify an HTTP proxy port. + type: str + server_proxy_user: + description: + - Specify a user for HTTP proxy with basic authentication. + type: str + server_proxy_password: + description: + - Specify a password for HTTP proxy with basic authentication. + type: str + auto_attach: + description: + - Upon successful registration, auto-consume available subscriptions. + - Please note that the alias O(ignore:autosubscribe) was removed in community.general 9.0.0. + type: bool + activationkey: + description: + - Supply an activation key for use with registration. + type: str + org_id: + description: + - Organization ID to use in conjunction with activationkey. + type: str + environment: + description: + - Register with a specific environment in the destination org. Used with Red Hat Satellite or Katello. + type: str + pool_ids: + description: + - Specify subscription pool IDs to consume. + - 'A pool ID may be specified as a C(string) - just the pool ID (for example V(0123456789abcdef0123456789abcdef)), or + as a C(dict) with the pool ID as the key, and a quantity as the value (for example V(0123456789abcdef0123456789abcdef: + 2). If the quantity is provided, it is used to consume multiple entitlements from a pool (the pool must support this).' + default: [] + type: list + elements: raw + consumer_type: + description: + - The type of unit to register, defaults to system. + type: str + consumer_name: + description: + - Name of the system to register, defaults to the hostname. + type: str + consumer_id: + description: + - References an existing consumer ID to resume using a previous registration for this system. If the system's identity + certificate is lost or corrupted, this option allows it to resume using its previous identity and subscriptions. The + default is to not specify a consumer ID so a new ID is created. + type: str + force_register: + description: + - Register the system even if it is already registered. + type: bool + default: false + release: + description: + - Set a release version. + type: str + syspurpose: + description: + - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json) and synchronize these attributes with RHSM + server. Syspurpose attributes help attach the most appropriate subscriptions to the system automatically. When C(syspurpose.json) + file already contains some attributes, then new attributes overwrite existing attributes. When some attribute is not + listed in the new list of attributes, the existing attribute is removed from C(syspurpose.json) file. Unknown attributes + are ignored. + type: dict + suboptions: + usage: + description: Syspurpose attribute usage. + type: str + role: + description: Syspurpose attribute role. + type: str + service_level_agreement: + description: Syspurpose attribute service_level_agreement. + type: str + addons: + description: Syspurpose attribute addons. + type: list + elements: str + sync: + description: + - When this option is V(true), then syspurpose attributes are synchronized with RHSM server immediately. When this + option is V(false), then syspurpose attributes are synchronized with RHSM server by rhsmcertd daemon. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + auto_attach: true + +- name: Same as above but subscribe to a specific pool by ID. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + pool_ids: 0123456789abcdef0123456789abcdef + +- name: Register and subscribe to multiple pools. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + pool_ids: + - 0123456789abcdef0123456789abcdef + - 1123456789abcdef0123456789abcdef + +- name: Same as above but consume multiple entitlements. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + pool_ids: + - 0123456789abcdef0123456789abcdef: 2 + - 1123456789abcdef0123456789abcdef: 4 + +- name: Register and pull existing system data. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + +- name: Register as user credentials into given environment (against Red Hat Satellite or Katello), and auto-subscribe. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + environment: Library + auto_attach: true + +- name: Register as user (joe_user) with password (somepass) and a specific release + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + release: 7.4 + +- name: Register as user (joe_user) with password (somepass), set syspurpose attributes and synchronize them with server + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + auto_attach: true + syspurpose: + usage: "Production" + role: "Red Hat Enterprise Server" + service_level_agreement: "Premium" + addons: + - addon1 + - addon2 + sync: true +""" + +RETURN = r""" +subscribed_pool_ids: + description: List of pool IDs to which system is now subscribed. + returned: success + type: dict + sample: {"8a85f9815ab905d3015ab928c7005de4": "1"} +""" + +from os.path import isfile +from os import getuid, unlink +import configparser +import re +import shutil +import tempfile +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils import distro + + +SUBMAN_CMD = None + + +class Rhsm(object): + + REDHAT_REPO = "/etc/yum.repos.d/redhat.repo" + + def __init__(self, module): + self.module = module + + def update_plugin_conf(self, plugin, enabled=True): + plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin + + if isfile(plugin_conf): + tmpfd, tmpfile = tempfile.mkstemp() + shutil.copy2(plugin_conf, tmpfile) + cfg = configparser.ConfigParser() + cfg.read([tmpfile]) + + if enabled: + cfg.set('main', 'enabled', '1') + else: + cfg.set('main', 'enabled', '0') + + with open(tmpfile, 'w+') as fd: + cfg.write(fd) + self.module.atomic_move(tmpfile, plugin_conf) + + def enable(self): + ''' + Enable the system to receive updates from subscription-manager. + This involves updating affected yum plugins and removing any + conflicting yum repositories. + ''' + # Remove any existing redhat.repo + if isfile(self.REDHAT_REPO): + unlink(self.REDHAT_REPO) + self.update_plugin_conf('rhnplugin', False) + self.update_plugin_conf('subscription-manager', True) + + def configure(self, **kwargs): + ''' + Configure the system as directed for registration with RHSM + Raises: + * Exception - if error occurs while running command + ''' + + args = [SUBMAN_CMD, 'config'] + + # Pass supplied **kwargs as parameters to subscription-manager. Ignore + # non-configuration parameters and replace '_' with '.'. For example, + # 'server_hostname' becomes '--server.hostname'. + options = [] + for k, v in sorted(kwargs.items()): + if re.search(r'^(server|rhsm)_', k) and v is not None: + options.append('--%s=%s' % (k.replace('_', '.', 1), v)) + + # When there is nothing to configure, then it is not necessary + # to run config command, because it only returns current + # content of current configuration file + if len(options) == 0: + return + + args.extend(options) + + self.module.run_command(args, check_rc=True) + + @property + def is_registered(self): + ''' + Determine whether the current system + Returns: + * Boolean - whether the current system is currently registered to + RHSM. + ''' + + args = [SUBMAN_CMD, 'identity'] + rc, stdout, stderr = self.module.run_command(args, check_rc=False) + if rc == 0: + return True + else: + return False + + def _has_dbus_interface(self): + """ + Checks whether subscription-manager has a D-Bus interface. + + :returns: bool -- whether subscription-manager has a D-Bus interface. + """ + + def str2int(s, default=0): + try: + return int(s) + except ValueError: + return default + + distro_id = distro.id() + distro_version = tuple(str2int(p) for p in distro.version_parts()) + + # subscription-manager in any supported Fedora version has the interface. + if distro_id == 'fedora': + return True + # Any other distro: assume it is EL; + # the D-Bus interface was added to subscription-manager in RHEL 7.4. + return (distro_version[0] == 7 and distro_version[1] >= 4) or \ + distro_version[0] >= 8 + + def _can_connect_to_dbus(self): + """ + Checks whether it is possible to connect to the system D-Bus bus. + + :returns: bool -- whether it is possible to connect to the system D-Bus bus. + """ + + try: + # Technically speaking, subscription-manager uses dbus-python + # as D-Bus library, so this ought to work; better be safe than + # sorry, I guess... + import dbus + except ImportError: + self.module.debug('dbus Python module not available, will use CLI') + return False + + try: + bus = dbus.SystemBus() + msg = dbus.lowlevel.SignalMessage('/', 'com.example', 'test') + bus.send_message(msg) + bus.flush() + + except dbus.exceptions.DBusException as e: + self.module.debug('Failed to connect to system D-Bus bus, will use CLI: %s' % e) + return False + + self.module.debug('Verified system D-Bus bus as usable') + return True + + def register(self, was_registered, username, password, token, auto_attach, activationkey, org_id, + consumer_type, consumer_name, consumer_id, force_register, environment, + release): + ''' + Register the current system to the provided RHSM or Red Hat Satellite + or Katello server + + Raises: + * Exception - if any error occurs during the registration + ''' + # There is no support for token-based registration in the D-Bus API + # of rhsm, so always use the CLI in that case; + # also, since the specified environments are names, and the D-Bus APIs + # require IDs for the environments, use the CLI also in that case + if (not token and not environment and self._has_dbus_interface() and + self._can_connect_to_dbus()): + self._register_using_dbus(was_registered, username, password, auto_attach, + activationkey, org_id, consumer_type, + consumer_name, consumer_id, + force_register, environment, release) + return + self._register_using_cli(username, password, token, auto_attach, + activationkey, org_id, consumer_type, + consumer_name, consumer_id, + force_register, environment, release) + + def _register_using_cli(self, username, password, token, auto_attach, + activationkey, org_id, consumer_type, consumer_name, + consumer_id, force_register, environment, release): + ''' + Register using the 'subscription-manager' command + + Raises: + * Exception - if error occurs while running command + ''' + args = [SUBMAN_CMD, 'register'] + + # Generate command arguments + if force_register: + args.extend(['--force']) + + if org_id: + args.extend(['--org', org_id]) + + if auto_attach: + args.append('--auto-attach') + + if consumer_type: + args.extend(['--type', consumer_type]) + + if consumer_name: + args.extend(['--name', consumer_name]) + + if consumer_id: + args.extend(['--consumerid', consumer_id]) + + if environment: + args.extend(['--environment', environment]) + + if activationkey: + args.extend(['--activationkey', activationkey]) + elif token: + args.extend(['--token', token]) + else: + if username: + args.extend(['--username', username]) + if password: + args.extend(['--password', password]) + + if release: + args.extend(['--release', release]) + + rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False) + + def _register_using_dbus(self, was_registered, username, password, auto_attach, + activationkey, org_id, consumer_type, consumer_name, + consumer_id, force_register, environment, release): + ''' + Register using D-Bus (connecting to the rhsm service) + + Raises: + * Exception - if error occurs during the D-Bus communication + ''' + import dbus + + SUBSCRIPTION_MANAGER_LOCALE = 'C' + # Seconds to wait for Registration to complete over DBus; + # 10 minutes should be a pretty generous timeout. + REGISTRATION_TIMEOUT = 600 + + def str2int(s, default=0): + try: + return int(s) + except ValueError: + return default + + distro_id = distro.id() + distro_version_parts = distro.version_parts() + distro_version = tuple(str2int(p) for p in distro_version_parts) + + # Stop the rhsm service when using systemd (which means Fedora or + # RHEL 7+): this is because the service may not use new configuration bits + # - with subscription-manager < 1.26.5-1 (in RHEL < 8.2); + # fixed later by https://github.com/candlepin/subscription-manager/pull/2175 + # - sporadically: https://bugzilla.redhat.com/show_bug.cgi?id=2049296 + if distro_id == 'fedora' or distro_version[0] >= 7: + cmd = ['systemctl', 'stop', 'rhsm'] + self.module.run_command(cmd, check_rc=True, expand_user_and_vars=False) + + # While there is a 'force' options for the registration, it is actually + # not implemented (and thus it does not work) + # - in RHEL 7 and earlier + # - in RHEL 8 before 8.8: https://bugzilla.redhat.com/show_bug.cgi?id=2118486 + # - in RHEL 9 before 9.2: https://bugzilla.redhat.com/show_bug.cgi?id=2121350 + # Hence, use it only when implemented, manually unregistering otherwise. + # Match it on RHEL, since we know about it; other distributions + # will need their own logic. + dbus_force_option_works = False + if (distro_id == 'rhel' and + ((distro_version[0] == 8 and distro_version[1] >= 8) or + (distro_version[0] == 9 and distro_version[1] >= 2) or + distro_version[0] > 9)): + dbus_force_option_works = True + # We need to use the 'enable_content' D-Bus option to ensure that + # content is enabled; sadly the option is available depending on the + # version of the distro, and also depending on which API/method is used + # for registration. + dbus_has_enable_content_option = False + if activationkey: + def supports_enable_content_for_activation_keys(): + # subscription-manager in Fedora >= 41 has the new option. + if distro_id == 'fedora' and distro_version[0] >= 41: + return True + # Assume EL distros here. + if distro_version[0] >= 10: + return True + return False + dbus_has_enable_content_option = supports_enable_content_for_activation_keys() + else: + def supports_enable_content_for_credentials(): + # subscription-manager in any supported Fedora version + # has the new option. + if distro_id == 'fedora': + return True + # Check for RHEL 8 >= 8.6, or RHEL >= 9. + if distro_id == 'rhel' and \ + ((distro_version[0] == 8 and distro_version[1] >= 6) or + distro_version[0] >= 9): + return True + # CentOS: similar checks as for RHEL, with one extra bit: + # if the 2nd part of the version is empty, it means it is + # CentOS Stream, and thus we can assume it has the latest + # version of subscription-manager. + if distro_id == 'centos' and \ + ((distro_version[0] == 8 and + (distro_version[1] >= 6 or distro_version_parts[1] == '')) or + distro_version[0] >= 9): + return True + # Unknown or old distro: assume it does not support + # the new option. + return False + dbus_has_enable_content_option = supports_enable_content_for_credentials() + + if force_register and not dbus_force_option_works and was_registered: + self.unregister() + + register_opts = {} + if consumer_type: + # The option for the consumer type used to be 'type' in versions + # of RHEL before 9 & in RHEL 9 before 9.2, and then it changed to + # 'consumer_type'; since the Register*() D-Bus functions reject + # unknown options, we have to pass the right option depending on + # the version -- funky. + def supports_option_consumer_type(): + # subscription-manager in any supported Fedora version + # has the new option. + if distro_id == 'fedora': + return True + # Check for RHEL 9 >= 9.2, or RHEL >= 10. + if distro_id == 'rhel' and \ + ((distro_version[0] == 9 and distro_version[1] >= 2) or + distro_version[0] >= 10): + return True + # CentOS: since the change was only done in EL 9, then there is + # only CentOS Stream for 9, and thus we can assume it has the + # latest version of subscription-manager. + if distro_id == 'centos' and distro_version[0] >= 9: + return True + # Unknown or old distro: assume it does not support + # the new option. + return False + + consumer_type_key = 'type' + if supports_option_consumer_type(): + consumer_type_key = 'consumer_type' + register_opts[consumer_type_key] = consumer_type + if consumer_name: + register_opts['name'] = consumer_name + if consumer_id: + register_opts['consumerid'] = consumer_id + if environment: + # The option for environments used to be 'environment' in versions + # of RHEL before 8.6, and then it changed to 'environments'; since + # the Register*() D-Bus functions reject unknown options, we have + # to pass the right option depending on the version -- funky. + def supports_option_environments(): + # subscription-manager in any supported Fedora version + # has the new option. + if distro_id == 'fedora': + return True + # Check for RHEL 8 >= 8.6, or RHEL >= 9. + if distro_id == 'rhel' and \ + ((distro_version[0] == 8 and distro_version[1] >= 6) or + distro_version[0] >= 9): + return True + # CentOS: similar checks as for RHEL, with one extra bit: + # if the 2nd part of the version is empty, it means it is + # CentOS Stream, and thus we can assume it has the latest + # version of subscription-manager. + if distro_id == 'centos' and \ + ((distro_version[0] == 8 and + (distro_version[1] >= 6 or distro_version_parts[1] == '')) or + distro_version[0] >= 9): + return True + # Unknown or old distro: assume it does not support + # the new option. + return False + + environment_key = 'environment' + if supports_option_environments(): + environment_key = 'environments' + register_opts[environment_key] = environment + if force_register and dbus_force_option_works and was_registered: + register_opts['force'] = True + if dbus_has_enable_content_option: + register_opts['enable_content'] = "1" + # Wrap it as proper D-Bus dict + register_opts = dbus.Dictionary(register_opts, signature='sv', variant_level=1) + + connection_opts = {} + # Wrap it as proper D-Bus dict + connection_opts = dbus.Dictionary(connection_opts, signature='sv', variant_level=1) + + bus = dbus.SystemBus() + register_server = bus.get_object('com.redhat.RHSM1', + '/com/redhat/RHSM1/RegisterServer') + address = register_server.Start( + SUBSCRIPTION_MANAGER_LOCALE, + dbus_interface='com.redhat.RHSM1.RegisterServer', + ) + + try: + # Use the private bus to register the system + self.module.debug('Connecting to the private DBus') + private_bus = dbus.connection.Connection(address) + + try: + if activationkey: + args = ( + org_id, + [activationkey], + register_opts, + connection_opts, + SUBSCRIPTION_MANAGER_LOCALE, + ) + private_bus.call_blocking( + 'com.redhat.RHSM1', + '/com/redhat/RHSM1/Register', + 'com.redhat.RHSM1.Register', + 'RegisterWithActivationKeys', + 'sasa{sv}a{sv}s', + args, + timeout=REGISTRATION_TIMEOUT, + ) + else: + args = ( + org_id or '', + username, + password, + register_opts, + connection_opts, + SUBSCRIPTION_MANAGER_LOCALE, + ) + private_bus.call_blocking( + 'com.redhat.RHSM1', + '/com/redhat/RHSM1/Register', + 'com.redhat.RHSM1.Register', + 'Register', + 'sssa{sv}a{sv}s', + args, + timeout=REGISTRATION_TIMEOUT, + ) + + except dbus.exceptions.DBusException as e: + # Sometimes we get NoReply but the registration has succeeded. + # Check the registration status before deciding if this is an error. + if e.get_dbus_name() == 'org.freedesktop.DBus.Error.NoReply': + if not self.is_registered(): + # Host is not registered so re-raise the error + raise + else: + raise + # Host was registered so continue + finally: + # Always shut down the private bus + self.module.debug('Shutting down private DBus instance') + register_server.Stop( + SUBSCRIPTION_MANAGER_LOCALE, + dbus_interface='com.redhat.RHSM1.RegisterServer', + ) + + # Make sure to refresh all the local data: this will fetch all the + # certificates, update redhat.repo, etc. + self.module.run_command([SUBMAN_CMD, 'refresh'], + check_rc=True, expand_user_and_vars=False) + + if auto_attach: + args = [SUBMAN_CMD, 'attach', '--auto'] + self.module.run_command(args, check_rc=True, expand_user_and_vars=False) + + # There is no support for setting the release via D-Bus, so invoke + # the CLI for this. + if release: + args = [SUBMAN_CMD, 'release', '--set', release] + self.module.run_command(args, check_rc=True, expand_user_and_vars=False) + + def unsubscribe(self, serials=None): + ''' + Unsubscribe a system from subscribed channels + Args: + serials(list or None): list of serials to unsubscribe. If + serials is none or an empty list, then + all subscribed channels will be removed. + Raises: + * Exception - if error occurs while running command + ''' + items = [] + if serials is not None and serials: + items = ["--serial=%s" % s for s in serials] + if serials is None: + items = ["--all"] + + if items: + args = [SUBMAN_CMD, 'remove'] + items + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + return serials + + def unregister(self): + ''' + Unregister a currently registered system + Raises: + * Exception - if error occurs while running command + ''' + args = [SUBMAN_CMD, 'unregister'] + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + self.update_plugin_conf('rhnplugin', False) + self.update_plugin_conf('subscription-manager', False) + + def subscribe_by_pool_ids(self, pool_ids): + """ + Try to subscribe to the list of pool IDs + """ + available_pools = RhsmPools(self.module) + + available_pool_ids = [p.get_pool_id() for p in available_pools] + + for pool_id, quantity in sorted(pool_ids.items()): + if pool_id in available_pool_ids: + args = [SUBMAN_CMD, 'attach', '--pool', pool_id] + if quantity is not None: + args.extend(['--quantity', to_native(quantity)]) + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + else: + self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id) + return pool_ids + + def update_subscriptions_by_pool_ids(self, pool_ids): + changed = False + consumed_pools = RhsmPools(self.module, consumed=True) + + existing_pools = {} + serials_to_remove = [] + for p in consumed_pools: + pool_id = p.get_pool_id() + quantity_used = p.get_quantity_used() + existing_pools[pool_id] = quantity_used + + quantity = pool_ids.get(pool_id, 0) + if quantity is not None and quantity != quantity_used: + serials_to_remove.append(p.Serial) + + serials = self.unsubscribe(serials=serials_to_remove) + + missing_pools = {} + for pool_id, quantity in sorted(pool_ids.items()): + quantity_used = existing_pools.get(pool_id, 0) + if quantity is None and quantity_used == 0 or quantity not in (None, 0, quantity_used): + missing_pools[pool_id] = quantity + + self.subscribe_by_pool_ids(missing_pools) + + if missing_pools or serials: + changed = True + return {'changed': changed, 'subscribed_pool_ids': list(missing_pools.keys()), + 'unsubscribed_serials': serials} + + def sync_syspurpose(self): + """ + Try to synchronize syspurpose attributes with server + """ + args = [SUBMAN_CMD, 'status'] + rc, stdout, stderr = self.module.run_command(args, check_rc=False) + + +class RhsmPool(object): + ''' + Convenience class for housing subscription information + ''' + + def __init__(self, module, **kwargs): + self.module = module + for k, v in kwargs.items(): + setattr(self, k, v) + + def __str__(self): + return str(self.__getattribute__('_name')) + + def get_pool_id(self): + return getattr(self, 'PoolId', getattr(self, 'PoolID')) + + def get_quantity_used(self): + return int(getattr(self, 'QuantityUsed')) + + def subscribe(self): + args = "subscription-manager attach --pool %s" % self.get_pool_id() + rc, stdout, stderr = self.module.run_command(args, check_rc=True) + if rc == 0: + return True + else: + return False + + +class RhsmPools(object): + """ + This class is used for manipulating pools subscriptions with RHSM + """ + + def __init__(self, module, consumed=False): + self.module = module + self.products = self._load_product_list(consumed) + + def __iter__(self): + return self.products.__iter__() + + def _load_product_list(self, consumed=False): + """ + Loads list of all available or consumed pools for system in data structure + + Args: + consumed(bool): if True list consumed pools, else list available pools (default False) + """ + args = "subscription-manager list" + if consumed: + args += " --consumed" + else: + args += " --available" + lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env) + + products = [] + for line in stdout.split('\n'): + # Remove leading+trailing whitespace + line = line.strip() + # An empty line implies the end of a output group + if len(line) == 0: + continue + # If a colon ':' is found, parse + elif ':' in line: + (key, value) = line.split(':', 1) + key = key.strip().replace(" ", "") # To unify + value = value.strip() + if key in ['ProductName', 'SubscriptionName']: + # Remember the name for later processing + products.append(RhsmPool(self.module, _name=value, key=value)) + elif products: + # Associate value with most recently recorded product + products[-1].__setattr__(key, value) + # FIXME - log some warning? + # else: + # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) + return products + + def filter_pools(self, regexp='^$'): + ''' + Return a list of RhsmPools whose pool id matches the provided regular expression + ''' + r = re.compile(regexp) + for product in self.products: + if r.search(product.get_pool_id()): + yield product + + def filter_products(self, regexp='^$'): + ''' + Return a list of RhsmPools whose product name matches the provided regular expression + ''' + r = re.compile(regexp) + for product in self.products: + if r.search(product._name): + yield product + + +class SysPurpose(object): + """ + This class is used for reading and writing to syspurpose.json file + """ + + SYSPURPOSE_FILE_PATH = "/etc/rhsm/syspurpose/syspurpose.json" + + ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons'] + + def __init__(self, path=None): + """ + Initialize class used for reading syspurpose json file + """ + self.path = path or self.SYSPURPOSE_FILE_PATH + + def update_syspurpose(self, new_syspurpose): + """ + Try to update current syspurpose with new attributes from new_syspurpose + """ + syspurpose = {} + syspurpose_changed = False + for key, value in new_syspurpose.items(): + if key in self.ALLOWED_ATTRIBUTES: + if value is not None: + syspurpose[key] = value + elif key == 'sync': + pass + else: + raise KeyError("Attribute: %s not in list of allowed attributes: %s" % + (key, self.ALLOWED_ATTRIBUTES)) + current_syspurpose = self._read_syspurpose() + if current_syspurpose != syspurpose: + syspurpose_changed = True + # Update current syspurpose with new values + current_syspurpose.update(syspurpose) + # When some key is not listed in new syspurpose, then delete it from current syspurpose + # and ignore custom attributes created by user (e.g. "foo": "bar") + for key in list(current_syspurpose): + if key in self.ALLOWED_ATTRIBUTES and key not in syspurpose: + del current_syspurpose[key] + self._write_syspurpose(current_syspurpose) + return syspurpose_changed + + def _write_syspurpose(self, new_syspurpose): + """ + This function tries to update current new_syspurpose attributes to + json file. + """ + with open(self.path, "w") as fp: + fp.write(json.dumps(new_syspurpose, indent=2, ensure_ascii=False, sort_keys=True)) + + def _read_syspurpose(self): + """ + Read current syspurpuse from json file. + """ + current_syspurpose = {} + try: + with open(self.path, "r") as fp: + content = fp.read() + except IOError: + pass + else: + current_syspurpose = json.loads(content) + return current_syspurpose + + +def main(): + + # Note: the default values for parameters are: + # 'type': 'str', 'default': None, 'required': False + # So there is no need to repeat these values for each parameter. + module = AnsibleModule( + argument_spec={ + 'state': {'default': 'present', 'choices': ['present', 'absent']}, + 'username': {}, + 'password': {'no_log': True}, + 'token': {'no_log': True}, + 'server_hostname': {}, + 'server_insecure': {}, + 'server_prefix': {}, + 'server_port': {}, + 'rhsm_baseurl': {}, + 'rhsm_repo_ca_cert': {}, + 'auto_attach': {'type': 'bool'}, + 'activationkey': {'no_log': True}, + 'org_id': {}, + 'environment': {}, + 'pool_ids': {'default': [], 'type': 'list', 'elements': 'raw'}, + 'consumer_type': {}, + 'consumer_name': {}, + 'consumer_id': {}, + 'force_register': {'default': False, 'type': 'bool'}, + 'server_proxy_hostname': {}, + 'server_proxy_scheme': {}, + 'server_proxy_port': {}, + 'server_proxy_user': {}, + 'server_proxy_password': {'no_log': True}, + 'release': {}, + 'syspurpose': { + 'type': 'dict', + 'options': { + 'role': {}, + 'usage': {}, + 'service_level_agreement': {}, + 'addons': {'type': 'list', 'elements': 'str'}, + 'sync': {'type': 'bool', 'default': False} + } + } + }, + required_together=[['username', 'password'], + ['server_proxy_hostname', 'server_proxy_port'], + ['server_proxy_user', 'server_proxy_password']], + mutually_exclusive=[['activationkey', 'username'], + ['activationkey', 'token'], + ['token', 'username'], + ['activationkey', 'consumer_id'], + ['activationkey', 'environment'], + ['activationkey', 'auto_attach']], + required_if=[['force_register', True, ['username', 'activationkey', 'token'], True]], + ) + + if getuid() != 0: + module.fail_json( + msg="Interacting with subscription-manager requires root permissions ('become: true')" + ) + + # Load RHSM configuration from file + rhsm = Rhsm(module) + + state = module.params['state'] + username = module.params['username'] + password = module.params['password'] + token = module.params['token'] + server_hostname = module.params['server_hostname'] + server_insecure = module.params['server_insecure'] + server_prefix = module.params['server_prefix'] + server_port = module.params['server_port'] + rhsm_baseurl = module.params['rhsm_baseurl'] + rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert'] + auto_attach = module.params['auto_attach'] + activationkey = module.params['activationkey'] + org_id = module.params['org_id'] + if activationkey and not org_id: + module.fail_json(msg='org_id is required when using activationkey') + environment = module.params['environment'] + pool_ids = {} + for value in module.params['pool_ids']: + if isinstance(value, dict): + if len(value) != 1: + module.fail_json(msg='Unable to parse pool_ids option.') + pool_id, quantity = list(value.items())[0] + else: + pool_id, quantity = value, None + pool_ids[pool_id] = quantity + consumer_type = module.params["consumer_type"] + consumer_name = module.params["consumer_name"] + consumer_id = module.params["consumer_id"] + force_register = module.params["force_register"] + server_proxy_hostname = module.params['server_proxy_hostname'] + server_proxy_port = module.params['server_proxy_port'] + server_proxy_user = module.params['server_proxy_user'] + server_proxy_password = module.params['server_proxy_password'] + release = module.params['release'] + syspurpose = module.params['syspurpose'] + + global SUBMAN_CMD + SUBMAN_CMD = module.get_bin_path('subscription-manager', True) + + syspurpose_changed = False + if syspurpose is not None: + try: + syspurpose_changed = SysPurpose().update_syspurpose(syspurpose) + except Exception as err: + module.fail_json(msg="Failed to update syspurpose attributes: %s" % to_native(err)) + + # Ensure system is registered + if state == 'present': + + # Cache the status of the system before the changes + was_registered = rhsm.is_registered + + # Register system + if was_registered and not force_register: + if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True: + try: + rhsm.sync_syspurpose() + except Exception as e: + module.fail_json(msg="Failed to synchronize syspurpose attributes: %s" % to_native(e)) + if pool_ids: + try: + result = rhsm.update_subscriptions_by_pool_ids(pool_ids) + except Exception as e: + module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e))) + else: + module.exit_json(**result) + else: + if syspurpose_changed is True: + module.exit_json(changed=True, msg="Syspurpose attributes changed.") + else: + module.exit_json(changed=False, msg="System already registered.") + else: + if not username and not activationkey and not token: + module.fail_json(msg="state is present but any of the following are missing: username, activationkey, token") + try: + rhsm.enable() + rhsm.configure(**module.params) + rhsm.register(was_registered, username, password, token, auto_attach, activationkey, org_id, + consumer_type, consumer_name, consumer_id, force_register, + environment, release) + if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True: + rhsm.sync_syspurpose() + if pool_ids: + subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids) + else: + subscribed_pool_ids = [] + except Exception as e: + module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e))) + else: + module.exit_json(changed=True, + msg="System successfully registered to '%s'." % server_hostname, + subscribed_pool_ids=subscribed_pool_ids) + + # Ensure system is *not* registered + if state == 'absent': + if not rhsm.is_registered: + module.exit_json(changed=False, msg="System already unregistered.") + else: + try: + rhsm.unregister() + except Exception as e: + module.fail_json(msg="Failed to unregister: %s" % to_native(e)) + else: + module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/redis.py b/plugins/modules/redis.py deleted file mode 120000 index d9b9f4ddab..0000000000 --- a/plugins/modules/redis.py +++ /dev/null @@ -1 +0,0 @@ -./database/misc/redis.py \ No newline at end of file diff --git a/plugins/modules/redis.py b/plugins/modules/redis.py new file mode 100644 index 0000000000..f442599368 --- /dev/null +++ b/plugins/modules/redis.py @@ -0,0 +1,338 @@ +#!/usr/bin/python + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: redis +short_description: Various redis commands, replica and flush +description: + - Unified utility to interact with redis instances. +extends_documentation_fragment: + - community.general.redis + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + command: + description: + - The selected redis command. + - V(config) ensures a configuration setting on an instance. + - V(flush) flushes all the instance or a specified db. + - V(replica) sets a redis instance in replica or master mode. (V(slave) is an alias for V(replica)). + choices: [config, flush, replica, slave] + type: str + tls: + default: false + version_added: 4.6.0 + login_user: + version_added: 4.6.0 + validate_certs: + version_added: 4.6.0 + ca_certs: + version_added: 4.6.0 + master_host: + description: + - The host of the master instance [replica command]. + type: str + master_port: + description: + - The port of the master instance [replica command]. + type: int + replica_mode: + description: + - The mode of the redis instance [replica command]. + - V(slave) is an alias for V(replica). + default: replica + choices: [master, replica, slave] + type: str + aliases: + - slave_mode + db: + description: + - The database to flush (used in DB mode) [flush command]. + type: int + flush_mode: + description: + - Type of flush (all the DBs in a redis instance or a specific one) [flush command]. + default: all + choices: [all, db] + type: str + name: + description: + - A redis config key. + type: str + value: + description: + - A redis config value. When memory size is needed, it is possible to specify it in the usual form of 1KB, 2M, 400MB + where the base is 1024. Units are case insensitive, in other words 1m = 1mb = 1M = 1MB. + type: str + +notes: + - Requires the C(redis-py) Python package on the remote host. You can install it with pip (C(pip install redis)) or with + a package manager. U(https://github.com/andymccurdy/redis-py). + - If the redis master instance you are making replica of is password protected this needs to be in the C(redis.conf) in + the C(masterauth) variable. +seealso: + - module: community.general.redis_info +requirements: [redis] +author: "Xabier Larrakoetxea (@slok)" +""" + +EXAMPLES = r""" +- name: Set local redis instance to be a replica of melee.island on port 6377 + community.general.redis: + command: replica + master_host: melee.island + master_port: 6377 + +- name: Deactivate replica mode + community.general.redis: + command: replica + replica_mode: master + +- name: Flush all the redis db + community.general.redis: + command: flush + flush_mode: all + +- name: Flush only one db in a redis instance + community.general.redis: + command: flush + db: 1 + flush_mode: db + +- name: Configure local redis to have 10000 max clients + community.general.redis: + command: config + name: maxclients + value: 10000 + +- name: Configure local redis maxmemory to 4GB + community.general.redis: + command: config + name: maxmemory + value: 4GB + +- name: Configure local redis to have lua time limit of 100 ms + community.general.redis: + command: config + name: lua-time-limit + value: 100 + +- name: Connect using TLS and certificate authentication + community.general.redis: + command: config + name: lua-time-limit + value: 100 + tls: true + ca_certs: /etc/redis/certs/ca.crt + client_cert_file: /etc/redis/certs/redis.crt + client_key_file: /etc/redis/certs/redis.key +""" + +import traceback + +REDIS_IMP_ERR = None +try: + import redis +except ImportError: + REDIS_IMP_ERR = traceback.format_exc() + redis_found = False +else: + redis_found = True + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.formatters import human_to_bytes +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.redis import ( + fail_imports, redis_auth_argument_spec, redis_auth_params) +import re + + +# Redis module specific support methods. +def set_replica_mode(client, master_host, master_port): + try: + return client.slaveof(master_host, master_port) + except Exception: + return False + + +def set_master_mode(client): + try: + return client.slaveof() + except Exception: + return False + + +def flush(client, db=None): + try: + if not isinstance(db, int): + return client.flushall() + else: + # The passed client has been connected to the database already + return client.flushdb() + except Exception: + return False + + +# Module execution. +def main(): + redis_auth_args = redis_auth_argument_spec(tls_default=False) + module_args = dict( + command=dict(type='str', choices=['config', 'flush', 'replica', 'slave']), + master_host=dict(type='str'), + master_port=dict(type='int'), + replica_mode=dict(type='str', default='replica', choices=['master', 'replica', 'slave'], + aliases=["slave_mode"]), + db=dict(type='int'), + flush_mode=dict(type='str', default='all', choices=['all', 'db']), + name=dict(type='str'), + value=dict(type='str'), + ) + module_args.update(redis_auth_args) + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + fail_imports(module, module.params['tls']) + + redis_params = redis_auth_params(module) + + command = module.params['command'] + if command == "slave": + command = "replica" + + # Replica Command section ----------- + if command == "replica": + master_host = module.params['master_host'] + master_port = module.params['master_port'] + mode = module.params['replica_mode'] + if mode == "slave": + mode = "replica" + + # Check if we have all the data + if mode == "replica": # Only need data if we want to be replica + if not master_host: + module.fail_json(msg='In replica mode master host must be provided') + + if not master_port: + module.fail_json(msg='In replica mode master port must be provided') + + # Connect and check + r = redis.StrictRedis(**redis_params) + try: + r.ping() + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + # Check if we are already in the mode that we want + info = r.info() + if mode == "master" and info["role"] == "master": + module.exit_json(changed=False, mode=mode) + + elif mode == "replica" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port: + status = dict( + status=mode, + master_host=master_host, + master_port=master_port, + ) + module.exit_json(changed=False, mode=status) + else: + # Do the stuff + # (Check Check_mode before commands so the commands aren't evaluated + # if not necessary) + if mode == "replica": + if module.check_mode or set_replica_mode(r, master_host, master_port): + info = r.info() + status = { + 'status': mode, + 'master_host': master_host, + 'master_port': master_port, + } + module.exit_json(changed=True, mode=status) + else: + module.fail_json(msg='Unable to set replica mode') + + else: + if module.check_mode or set_master_mode(r): + module.exit_json(changed=True, mode=mode) + else: + module.fail_json(msg='Unable to set master mode') + + # flush Command section ----------- + elif command == "flush": + db = module.params['db'] + mode = module.params['flush_mode'] + + # Check if we have all the data + if mode == "db": + if db is None: + module.fail_json(msg="In db mode the db number must be provided") + + # Connect and check + r = redis.StrictRedis(db=db, **redis_params) + try: + r.ping() + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + # Do the stuff + # (Check Check_mode before commands so the commands aren't evaluated + # if not necessary) + if mode == "all": + if module.check_mode or flush(r): + module.exit_json(changed=True, flushed=True) + else: # Flush never fails :) + module.fail_json(msg="Unable to flush all databases") + + else: + if module.check_mode or flush(r, db): + module.exit_json(changed=True, flushed=True, db=db) + else: # Flush never fails :) + module.fail_json(msg="Unable to flush '%d' database" % db) + elif command == 'config': + name = module.params['name'] + + try: # try to parse the value as if it were the memory size + if re.match(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?\s*$', module.params['value'].upper()): + value = str(human_to_bytes(module.params['value'].upper())) + else: + value = module.params['value'] + except ValueError: + value = module.params['value'] + + r = redis.StrictRedis(**redis_params) + + try: + r.ping() + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + try: + old_value = r.config_get(name)[name] + except Exception as e: + module.fail_json(msg="unable to read config: %s" % to_native(e), exception=traceback.format_exc()) + changed = old_value != value + + if module.check_mode or not changed: + module.exit_json(changed=changed, name=name, value=value) + else: + try: + r.config_set(name, value) + except Exception as e: + module.fail_json(msg="unable to write config: %s" % to_native(e), exception=traceback.format_exc()) + module.exit_json(changed=changed, name=name, value=value) + else: + module.fail_json(msg='A valid command must be provided') + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/redis_data.py b/plugins/modules/redis_data.py deleted file mode 120000 index 07cbc80a81..0000000000 --- a/plugins/modules/redis_data.py +++ /dev/null @@ -1 +0,0 @@ -./database/misc/redis_data.py \ No newline at end of file diff --git a/plugins/modules/redis_data.py b/plugins/modules/redis_data.py new file mode 100644 index 0000000000..dfca11c898 --- /dev/null +++ b/plugins/modules/redis_data.py @@ -0,0 +1,253 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: redis_data +short_description: Set key value pairs in Redis +version_added: 3.7.0 +description: + - Set key value pairs in Redis database. +author: "Andreas Botzner (@paginabianca)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + key: + description: + - Database key. + required: true + type: str + value: + description: + - Value that key should be set to. + required: false + type: str + expiration: + description: + - Expiration time in milliseconds. Setting this option always results in a change in the database. + required: false + type: int + non_existing: + description: + - Only set key if it does not already exist. + required: false + type: bool + existing: + description: + - Only set key if it already exists. + required: false + type: bool + keep_ttl: + description: + - Retain the time to live associated with the key. + required: false + type: bool + state: + description: + - State of the key. + default: present + type: str + choices: + - present + - absent + +extends_documentation_fragment: + - community.general.redis.documentation + - community.general.attributes + +seealso: + - module: community.general.redis_data_incr + - module: community.general.redis_data_info + - module: community.general.redis +""" + +EXAMPLES = r""" +- name: Set key foo=bar on localhost with no username + community.general.redis_data: + login_host: localhost + login_password: supersecret + key: foo + value: bar + state: present + +- name: Set key foo=bar if non existing with expiration of 30s + community.general.redis_data: + login_host: localhost + login_password: supersecret + key: foo + value: bar + non_existing: true + expiration: 30000 + state: present + +- name: Set key foo=bar if existing and keep current TTL + community.general.redis_data: + login_host: localhost + login_password: supersecret + key: foo + value: bar + existing: true + keep_ttl: true + +- name: Set key foo=bar on redishost with custom ca-cert file + community.general.redis_data: + login_host: redishost + login_password: supersecret + login_user: someuser + validate_certs: true + ssl_ca_certs: /path/to/ca/certs + key: foo + value: bar + +- name: Delete key foo on localhost with no username + community.general.redis_data: + login_host: localhost + login_password: supersecret + key: foo + state: absent +""" + +RETURN = r""" +old_value: + description: Value of key before setting. + returned: on_success if O(state=present) and key exists in database. + type: str + sample: 'old_value_of_key' +value: + description: Value key was set to. + returned: on success if O(state=present). + type: str + sample: 'new_value_of_key' +msg: + description: A short message. + returned: always + type: str + sample: 'Set key: foo to bar' +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redis import ( + fail_imports, redis_auth_argument_spec, RedisAnsible) + + +def main(): + redis_auth_args = redis_auth_argument_spec() + module_args = dict( + key=dict(type='str', required=True, no_log=False), + value=dict(type='str'), + expiration=dict(type='int'), + non_existing=dict(type='bool'), + existing=dict(type='bool'), + keep_ttl=dict(type='bool'), + state=dict(type='str', default='present', + choices=['present', 'absent']), + ) + module_args.update(redis_auth_args) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_if=[('state', 'present', ('value',))], + mutually_exclusive=[['non_existing', 'existing'], + ['keep_ttl', 'expiration']],) + fail_imports(module) + + redis = RedisAnsible(module) + + key = module.params['key'] + value = module.params['value'] + px = module.params['expiration'] + nx = module.params['non_existing'] + xx = module.params['existing'] + keepttl = module.params['keep_ttl'] + state = module.params['state'] + set_args = {'name': key, 'value': value, 'px': px, + 'nx': nx, 'xx': xx, 'keepttl': keepttl} + + result = {'changed': False} + + old_value = None + try: + old_value = redis.connection.get(key) + except Exception as e: + msg = 'Failed to get value of key: {0} with exception: {1}'.format( + key, str(e)) + result['msg'] = msg + module.fail_json(**result) + + if state == 'absent': + if module.check_mode: + if old_value is None: + msg = 'Key: {0} not present'.format(key) + result['msg'] = msg + module.exit_json(**result) + else: + msg = 'Deleted key: {0}'.format(key) + result['msg'] = msg + module.exit_json(**result) + try: + ret = redis.connection.delete(key) + if ret == 0: + msg = 'Key: {0} not present'.format(key) + result['msg'] = msg + module.exit_json(**result) + else: + msg = 'Deleted key: {0}'.format(key) + result['msg'] = msg + result['changed'] = True + module.exit_json(**result) + except Exception as e: + msg = 'Failed to delete key: {0} with exception: {1}'.format( + key, str(e)) + result['msg'] = msg + module.fail_json(**result) + + old_value = None + try: + old_value = redis.connection.get(key) + except Exception as e: + msg = 'Failed to get value of key: {0} with exception: {1}'.format( + key, str(e)) + result['msg'] = msg + module.fail_json(**result) + + result['old_value'] = old_value + if old_value == value and keepttl is not False and px is None: + msg = 'Key {0} already has desired value'.format(key) + result['msg'] = msg + result['value'] = value + module.exit_json(**result) + if module.check_mode: + result['msg'] = 'Set key: {0}'.format(key) + result['value'] = value + module.exit_json(**result) + try: + ret = redis.connection.set(**set_args) + if ret is None: + if nx: + msg = 'Could not set key: {0}. Key already present.'.format( + key) + else: + msg = 'Could not set key: {0}. Key not present.'.format(key) + result['msg'] = msg + module.fail_json(**result) + msg = 'Set key: {0}'.format(key) + result['msg'] = msg + result['changed'] = True + result['value'] = value + module.exit_json(**result) + except Exception as e: + msg = 'Failed to set key: {0} with exception: {2}'.format(key, str(e)) + result['msg'] = msg + module.fail_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/redis_data_incr.py b/plugins/modules/redis_data_incr.py deleted file mode 120000 index 07d54aa8af..0000000000 --- a/plugins/modules/redis_data_incr.py +++ /dev/null @@ -1 +0,0 @@ -./database/misc/redis_data_incr.py \ No newline at end of file diff --git a/plugins/modules/redis_data_incr.py b/plugins/modules/redis_data_incr.py new file mode 100644 index 0000000000..f6c1b67401 --- /dev/null +++ b/plugins/modules/redis_data_incr.py @@ -0,0 +1,188 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: redis_data_incr +short_description: Increment keys in Redis +version_added: 4.0.0 +description: + - Increment integers or float keys in Redis database and get new value. + - Default increment for all keys is V(1). For specific increments use the O(increment_int) and O(increment_float) options. +author: "Andreas Botzner (@paginabianca)" +attributes: + check_mode: + support: partial + details: + - For C(check_mode) to work, the specified O(login_user) needs permission to run the C(GET) command on the key, otherwise + the module fails. + - When using C(check_mode) the module tries to calculate the value that Redis would return. If the key is not present, + V(0.0) is used as value. + diff_mode: + support: none +options: + key: + description: + - Database key. + type: str + required: true + increment_int: + description: + - Integer amount to increment the key by. + required: false + type: int + increment_float: + description: + - Float amount to increment the key by. + - This only works with keys that contain float values in their string representation. + type: float + required: false + + +extends_documentation_fragment: + - community.general.redis.documentation + - community.general.attributes + +seealso: + - module: community.general.redis_data + - module: community.general.redis_data_info + - module: community.general.redis +""" + +EXAMPLES = r""" +- name: Increment integer key foo on localhost with no username and print new value + community.general.redis_data_incr: + login_host: localhost + login_password: supersecret + key: foo + increment_int: 1 + register: result +- name: Print new value + debug: + var: result.value + +- name: Increment float key foo by 20.4 + community.general.redis_data_incr: + login_host: redishost + login_user: redisuser + login_password: somepass + key: foo + increment_float: '20.4' +""" + +RETURN = r""" +value: + description: Incremented value of key. + returned: on success + type: float + sample: '4039.4' +msg: + description: A short message. + returned: always + type: str + sample: 'Incremented key: foo by 20.4 to 65.9' +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redis import ( + fail_imports, redis_auth_argument_spec, RedisAnsible) + + +def main(): + redis_auth_args = redis_auth_argument_spec() + module_args = dict( + key=dict(type='str', required=True, no_log=False), + increment_int=dict(type='int'), + increment_float=dict(type='float'), + ) + module_args.update(redis_auth_args) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + mutually_exclusive=[['increment_int', 'increment_float']], + ) + fail_imports(module) + + redis = RedisAnsible(module) + key = module.params['key'] + increment_float = module.params['increment_float'] + increment_int = module.params['increment_int'] + increment = 1 + if increment_float is not None: + increment = increment_float + elif increment_int is not None: + increment = increment_int + + result = {'changed': False} + if module.check_mode: + value = 0.0 + try: + res = redis.connection.get(key) + if res is not None: + value = float(res) + except ValueError as e: + msg = 'Value: {0} of key: {1} is not incrementable(int or float)'.format( + res, key) + result['msg'] = msg + module.fail_json(**result) + except Exception as e: + msg = 'Failed to get value of key: {0} with exception: {1}'.format( + key, str(e)) + result['msg'] = msg + module.fail_json(**result) + msg = 'Incremented key: {0} by {1} to {2}'.format( + key, increment, value + increment) + result['msg'] = msg + result['value'] = float(value + increment) + module.exit_json(**result) + + if increment_float is not None: + try: + value = redis.connection.incrbyfloat(key, increment) + msg = 'Incremented key: {0} by {1} to {2}'.format( + key, increment, value) + result['msg'] = msg + result['value'] = float(value) + result['changed'] = True + module.exit_json(**result) + except Exception as e: + msg = 'Failed to increment key: {0} by {1} with exception: {2}'.format( + key, increment, str(e)) + result['msg'] = msg + module.fail_json(**result) + elif increment_int is not None: + try: + value = redis.connection.incrby(key, increment) + msg = 'Incremented key: {0} by {1} to {2}'.format( + key, increment, value) + result['msg'] = msg + result['value'] = float(value) + result['changed'] = True + module.exit_json(**result) + except Exception as e: + msg = 'Failed to increment key: {0} by {1} with exception: {2}'.format( + key, increment, str(e)) + result['msg'] = msg + module.fail_json(**result) + else: + try: + value = redis.connection.incr(key) + msg = 'Incremented key: {0} to {1}'.format(key, value) + result['msg'] = msg + result['value'] = float(value) + result['changed'] = True + module.exit_json(**result) + except Exception as e: + msg = 'Failed to increment key: {0} with exception: {1}'.format( + key, str(e)) + result['msg'] = msg + module.fail_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/redis_data_info.py b/plugins/modules/redis_data_info.py deleted file mode 120000 index 14c54fb2d3..0000000000 --- a/plugins/modules/redis_data_info.py +++ /dev/null @@ -1 +0,0 @@ -database/misc/redis_data_info.py \ No newline at end of file diff --git a/plugins/modules/redis_data_info.py b/plugins/modules/redis_data_info.py new file mode 100644 index 0000000000..ad0ea943b4 --- /dev/null +++ b/plugins/modules/redis_data_info.py @@ -0,0 +1,113 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: redis_data_info +short_description: Get value of key in Redis database +version_added: 3.7.0 +description: + - Get value of keys in Redis database. +author: "Andreas Botzner (@paginabianca)" +options: + key: + description: + - Database key. + type: str + required: true + +extends_documentation_fragment: + - community.general.redis + - community.general.attributes + - community.general.attributes.info_module + +seealso: + - module: community.general.redis_data + - module: community.general.redis_data_incr + - module: community.general.redis_info + - module: community.general.redis +""" + +EXAMPLES = r""" +- name: Get key foo=bar from loalhost with no username + community.general.redis_data_info: + login_host: localhost + login_password: supersecret + key: foo + +- name: Get key foo=bar on redishost with custom ca-cert file + community.general.redis_data_info: + login_host: redishost + login_password: supersecret + login_user: somuser + validate_certs: true + ssl_ca_certs: /path/to/ca/certs + key: foo +""" + +RETURN = r""" +exists: + description: If they key exists in the database. + returned: on success + type: bool +value: + description: Value key was set to. + returned: if existing + type: str + sample: 'value_of_some_key' +msg: + description: A short message. + returned: always + type: str + sample: 'Got key: foo with value: bar' +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redis import ( + fail_imports, redis_auth_argument_spec, RedisAnsible) + + +def main(): + redis_auth_args = redis_auth_argument_spec() + module_args = dict( + key=dict(type='str', required=True, no_log=False), + ) + module_args.update(redis_auth_args) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + fail_imports(module) + + redis = RedisAnsible(module) + + key = module.params['key'] + result = {'changed': False} + + value = None + try: + value = redis.connection.get(key) + except Exception as e: + msg = 'Failed to get value of key "{0}" with exception: {1}'.format( + key, str(e)) + result['msg'] = msg + module.fail_json(**result) + + if value is None: + msg = 'Key "{0}" does not exist in database'.format(key) + result['exists'] = False + else: + msg = 'Got key "{0}"'.format(key) + result['value'] = value + result['exists'] = True + result['msg'] = msg + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/redis_info.py b/plugins/modules/redis_info.py deleted file mode 120000 index c7080146f4..0000000000 --- a/plugins/modules/redis_info.py +++ /dev/null @@ -1 +0,0 @@ -./database/misc/redis_info.py \ No newline at end of file diff --git a/plugins/modules/redis_info.py b/plugins/modules/redis_info.py new file mode 100644 index 0000000000..f4327a121f --- /dev/null +++ b/plugins/modules/redis_info.py @@ -0,0 +1,269 @@ +#!/usr/bin/python + +# Copyright (c) 2020, Pavlo Bashynskyi (@levonet) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: redis_info +short_description: Gather information about Redis servers +version_added: '0.2.0' +description: + - Gathers information and statistics about Redis servers. +extends_documentation_fragment: + - community.general.redis + - community.general.attributes + - community.general.attributes.info_module +options: + login_user: + version_added: 7.5.0 + validate_certs: + version_added: 7.5.0 + tls: + default: false + version_added: 7.5.0 + ca_certs: + version_added: 7.5.0 + cluster: + default: false + description: Get informations about cluster status as RV(cluster). + type: bool + version_added: 9.1.0 +seealso: + - module: community.general.redis +author: "Pavlo Bashynskyi (@levonet)" +""" + +EXAMPLES = r""" +- name: Get server information + community.general.redis_info: + register: result + +- name: Print server information + ansible.builtin.debug: + var: result.info + +- name: Get server cluster information + community.general.redis_info: + cluster: true + register: result + +- name: Print server cluster information + ansible.builtin.debug: + var: result.cluster_info +""" + +RETURN = r""" +info: + description: The default set of server information sections U(https://redis.io/commands/info). + returned: success + type: dict + sample: + { + "active_defrag_hits": 0, + "active_defrag_key_hits": 0, + "active_defrag_key_misses": 0, + "active_defrag_misses": 0, + "active_defrag_running": 0, + "allocator_active": 932409344, + "allocator_allocated": 932062792, + "allocator_frag_bytes": 346552, + "allocator_frag_ratio": 1.0, + "allocator_resident": 947253248, + "allocator_rss_bytes": 14843904, + "allocator_rss_ratio": 1.02, + "aof_current_rewrite_time_sec": -1, + "aof_enabled": 0, + "aof_last_bgrewrite_status": "ok", + "aof_last_cow_size": 0, + "aof_last_rewrite_time_sec": -1, + "aof_last_write_status": "ok", + "aof_rewrite_in_progress": 0, + "aof_rewrite_scheduled": 0, + "arch_bits": 64, + "atomicvar_api": "atomic-builtin", + "blocked_clients": 0, + "client_recent_max_input_buffer": 4, + "client_recent_max_output_buffer": 0, + "cluster_enabled": 0, + "config_file": "", + "configured_hz": 10, + "connected_clients": 4, + "connected_slaves": 0, + "db0": { + "avg_ttl": 1945628530, + "expires": 16, + "keys": 3341411 + }, + "evicted_keys": 0, + "executable": "/data/redis-server", + "expired_keys": 9, + "expired_stale_perc": 1.72, + "expired_time_cap_reached_count": 0, + "gcc_version": "9.2.0", + "hz": 10, + "instantaneous_input_kbps": 0.0, + "instantaneous_ops_per_sec": 0, + "instantaneous_output_kbps": 0.0, + "keyspace_hits": 0, + "keyspace_misses": 0, + "latest_fork_usec": 0, + "lazyfree_pending_objects": 0, + "loading": 0, + "lru_clock": 11603632, + "master_repl_offset": 118831417, + "master_replid": "0d904704e424e38c3cd896783e9f9d28d4836e5e", + "master_replid2": "0000000000000000000000000000000000000000", + "maxmemory": 0, + "maxmemory_human": "0B", + "maxmemory_policy": "noeviction", + "mem_allocator": "jemalloc-5.1.0", + "mem_aof_buffer": 0, + "mem_clients_normal": 49694, + "mem_clients_slaves": 0, + "mem_fragmentation_bytes": 12355480, + "mem_fragmentation_ratio": 1.01, + "mem_not_counted_for_evict": 0, + "mem_replication_backlog": 1048576, + "migrate_cached_sockets": 0, + "multiplexing_api": "epoll", + "number_of_cached_scripts": 0, + "os": "Linux 3.10.0-862.14.4.el7.x86_64 x86_64", + "process_id": 1, + "pubsub_channels": 0, + "pubsub_patterns": 0, + "rdb_bgsave_in_progress": 0, + "rdb_changes_since_last_save": 671, + "rdb_current_bgsave_time_sec": -1, + "rdb_last_bgsave_status": "ok", + "rdb_last_bgsave_time_sec": -1, + "rdb_last_cow_size": 0, + "rdb_last_save_time": 1588702236, + "redis_build_id": "a31260535f820267", + "redis_git_dirty": 0, + "redis_git_sha1": 0, + "redis_mode": "standalone", + "redis_version": "999.999.999", + "rejected_connections": 0, + "repl_backlog_active": 1, + "repl_backlog_first_byte_offset": 118707937, + "repl_backlog_histlen": 123481, + "repl_backlog_size": 1048576, + "role": "master", + "rss_overhead_bytes": -3051520, + "rss_overhead_ratio": 1.0, + "run_id": "8d252f66c3ef89bd60a060cf8dc5cfe3d511c5e4", + "second_repl_offset": 118830003, + "slave_expires_tracked_keys": 0, + "sync_full": 0, + "sync_partial_err": 0, + "sync_partial_ok": 0, + "tcp_port": 6379, + "total_commands_processed": 885, + "total_connections_received": 10, + "total_net_input_bytes": 802709255, + "total_net_output_bytes": 31754, + "total_system_memory": 135029538816, + "total_system_memory_human": "125.76G", + "uptime_in_days": 53, + "uptime_in_seconds": 4631778, + "used_cpu_sys": 4.668282, + "used_cpu_sys_children": 0.002191, + "used_cpu_user": 4.21088, + "used_cpu_user_children": 0.0, + "used_memory": 931908760, + "used_memory_dataset": 910774306, + "used_memory_dataset_perc": "97.82%", + "used_memory_human": "888.74M", + "used_memory_lua": 37888, + "used_memory_lua_human": "37.00K", + "used_memory_overhead": 21134454, + "used_memory_peak": 932015216, + "used_memory_peak_human": "888.84M", + "used_memory_peak_perc": "99.99%", + "used_memory_rss": 944201728, + "used_memory_rss_human": "900.46M", + "used_memory_scripts": 0, + "used_memory_scripts_human": "0B", + "used_memory_startup": 791264 + } +cluster: + description: The default set of cluster information sections U(https://redis.io/commands/cluster-info). + returned: success if O(cluster=true) + version_added: 9.1.0 + type: dict + sample: + { + "cluster_state": "ok", + "cluster_slots_assigned": 16384, + "cluster_slots_ok": 16384, + "cluster_slots_pfail": 0, + "cluster_slots_fail": 0, + "cluster_known_nodes": 6, + "cluster_size": 3, + "cluster_current_epoch": 6, + "cluster_my_epoch": 2, + "cluster_stats_messages_sent": 1483972, + "cluster_stats_messages_received": 1483968, + "total_cluster_links_buffer_limit_exceeded": 0 + } +""" + +import traceback + +REDIS_IMP_ERR = None +try: + from redis import StrictRedis + HAS_REDIS_PACKAGE = True +except ImportError: + REDIS_IMP_ERR = traceback.format_exc() + HAS_REDIS_PACKAGE = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.redis import ( + fail_imports, redis_auth_argument_spec, redis_auth_params) + + +def redis_client(**client_params): + return StrictRedis(**client_params) + + +# Module execution. +def main(): + module_args = dict( + cluster=dict(type='bool', default=False), + ) + module_args.update(redis_auth_argument_spec(tls_default=False)) + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + fail_imports(module, module.params['tls']) + + redis_params = redis_auth_params(module) + cluster = module.params['cluster'] + + # Connect and check + client = redis_client(**redis_params) + try: + client.ping() + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + info = client.info() + + result = dict(changed=False, info=info) + + if cluster: + result['cluster_info'] = client.execute_command('CLUSTER INFO') + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/cobbler/cobbler_sync.py b/plugins/modules/remote_management/cobbler/cobbler_sync.py deleted file mode 100644 index 157208216b..0000000000 --- a/plugins/modules/remote_management/cobbler/cobbler_sync.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Dag Wieers (dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: cobbler_sync -short_description: Sync Cobbler -description: -- Sync Cobbler to commit changes. -options: - host: - description: - - The name or IP address of the Cobbler system. - default: 127.0.0.1 - type: str - port: - description: - - Port number to be used for REST connection. - - The default value depends on parameter C(use_ssl). - type: int - username: - description: - - The username to log in to Cobbler. - default: cobbler - type: str - password: - description: - - The password to log in to Cobbler. - type: str - use_ssl: - description: - - If C(no), an HTTP connection will be used instead of the default HTTPS connection. - type: bool - default: 'yes' - validate_certs: - description: - - If C(no), SSL certificates will not be validated. - - This should only set to C(no) when used on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' -author: -- Dag Wieers (@dagwieers) -todo: -notes: -- Concurrently syncing Cobbler is bound to fail with weird errors. -- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation. - More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753). -''' - -EXAMPLES = r''' -- name: Commit Cobbler changes - community.general.cobbler_sync: - host: cobbler01 - username: cobbler - password: MySuperSecureP4sswOrd - run_once: yes - delegate_to: localhost -''' - -RETURN = r''' -# Default return values -''' - -import datetime -import ssl - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client -from ansible.module_utils.common.text.converters import to_text - - -def main(): - module = AnsibleModule( - argument_spec=dict( - host=dict(type='str', default='127.0.0.1'), - port=dict(type='int'), - username=dict(type='str', default='cobbler'), - password=dict(type='str', no_log=True), - use_ssl=dict(type='bool', default=True), - validate_certs=dict(type='bool', default=True), - ), - supports_check_mode=True, - ) - - username = module.params['username'] - password = module.params['password'] - port = module.params['port'] - use_ssl = module.params['use_ssl'] - validate_certs = module.params['validate_certs'] - - module.params['proto'] = 'https' if use_ssl else 'http' - if not port: - module.params['port'] = '443' if use_ssl else '80' - - result = dict( - changed=True, - ) - - start = datetime.datetime.utcnow() - - ssl_context = None - if not validate_certs: - try: - ssl_context = ssl._create_unverified_context() - except AttributeError: - # Legacy Python that doesn't verify HTTPS certificates by default - pass - else: - # Handle target environment that doesn't support HTTPS verification - ssl._create_default_https_context = ssl._create_unverified_context - - url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) - if ssl_context: - conn = xmlrpc_client.ServerProxy(url, context=ssl_context) - else: - conn = xmlrpc_client.Server(url) - - try: - token = conn.login(username, password) - except xmlrpc_client.Fault as e: - module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params)) - except Exception as e: - module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e))) - - if not module.check_mode: - try: - conn.sync(token) - except Exception as e: - module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e))) - - elapsed = datetime.datetime.utcnow() - start - module.exit_json(elapsed=elapsed.seconds, **result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/cobbler/cobbler_system.py b/plugins/modules/remote_management/cobbler/cobbler_system.py deleted file mode 100644 index e97be01239..0000000000 --- a/plugins/modules/remote_management/cobbler/cobbler_system.py +++ /dev/null @@ -1,341 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Dag Wieers (dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: cobbler_system -short_description: Manage system objects in Cobbler -description: -- Add, modify or remove systems in Cobbler -options: - host: - description: - - The name or IP address of the Cobbler system. - default: 127.0.0.1 - type: str - port: - description: - - Port number to be used for REST connection. - - The default value depends on parameter C(use_ssl). - type: int - username: - description: - - The username to log in to Cobbler. - default: cobbler - type: str - password: - description: - - The password to log in to Cobbler. - type: str - use_ssl: - description: - - If C(no), an HTTP connection will be used instead of the default HTTPS connection. - type: bool - default: 'yes' - validate_certs: - description: - - If C(no), SSL certificates will not be validated. - - This should only set to C(no) when used on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - name: - description: - - The system name to manage. - type: str - properties: - description: - - A dictionary with system properties. - type: dict - interfaces: - description: - - A list of dictionaries containing interface options. - type: dict - sync: - description: - - Sync on changes. - - Concurrently syncing Cobbler is bound to fail. - type: bool - default: no - state: - description: - - Whether the system should be present, absent or a query is made. - choices: [ absent, present, query ] - default: present - type: str -author: -- Dag Wieers (@dagwieers) -notes: -- Concurrently syncing Cobbler is bound to fail with weird errors. -- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation. - More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753). -''' - -EXAMPLES = r''' -- name: Ensure the system exists in Cobbler - community.general.cobbler_system: - host: cobbler01 - username: cobbler - password: MySuperSecureP4sswOrd - name: myhost - properties: - profile: CentOS6-x86_64 - name_servers: [ 2.3.4.5, 3.4.5.6 ] - name_servers_search: foo.com, bar.com - interfaces: - eth0: - macaddress: 00:01:02:03:04:05 - ipaddress: 1.2.3.4 - delegate_to: localhost - -- name: Enable network boot in Cobbler - community.general.cobbler_system: - host: bdsol-aci-cobbler-01 - username: cobbler - password: ins3965! - name: bdsol-aci51-apic1.cisco.com - properties: - netboot_enabled: yes - state: present - delegate_to: localhost - -- name: Query all systems in Cobbler - community.general.cobbler_system: - host: cobbler01 - username: cobbler - password: MySuperSecureP4sswOrd - state: query - register: cobbler_systems - delegate_to: localhost - -- name: Query a specific system in Cobbler - community.general.cobbler_system: - host: cobbler01 - username: cobbler - password: MySuperSecureP4sswOrd - name: '{{ inventory_hostname }}' - state: query - register: cobbler_properties - delegate_to: localhost - -- name: Ensure the system does not exist in Cobbler - community.general.cobbler_system: - host: cobbler01 - username: cobbler - password: MySuperSecureP4sswOrd - name: myhost - state: absent - delegate_to: localhost -''' - -RETURN = r''' -systems: - description: List of systems - returned: C(state=query) and C(name) is not provided - type: list -system: - description: (Resulting) information about the system we are working with - returned: when C(name) is provided - type: dict -''' - -import copy -import datetime -import ssl - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves import xmlrpc_client -from ansible.module_utils.common.text.converters import to_text - -IFPROPS_MAPPING = dict( - bondingopts='bonding_opts', - bridgeopts='bridge_opts', - connected_mode='connected_mode', - cnames='cnames', - dhcptag='dhcp_tag', - dnsname='dns_name', - ifgateway='if_gateway', - interfacetype='interface_type', - interfacemaster='interface_master', - ipaddress='ip_address', - ipv6address='ipv6_address', - ipv6defaultgateway='ipv6_default_gateway', - ipv6mtu='ipv6_mtu', - ipv6prefix='ipv6_prefix', - ipv6secondaries='ipv6_secondariesu', - ipv6staticroutes='ipv6_static_routes', - macaddress='mac_address', - management='management', - mtu='mtu', - netmask='netmask', - static='static', - staticroutes='static_routes', - virtbridge='virt_bridge', -) - - -def getsystem(conn, name, token): - system = dict() - if name: - # system = conn.get_system(name, token) - systems = conn.find_system(dict(name=name), token) - if systems: - system = systems[0] - return system - - -def main(): - module = AnsibleModule( - argument_spec=dict( - host=dict(type='str', default='127.0.0.1'), - port=dict(type='int'), - username=dict(type='str', default='cobbler'), - password=dict(type='str', no_log=True), - use_ssl=dict(type='bool', default=True), - validate_certs=dict(type='bool', default=True), - name=dict(type='str'), - interfaces=dict(type='dict'), - properties=dict(type='dict'), - sync=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'present', 'query']), - ), - supports_check_mode=True, - ) - - username = module.params['username'] - password = module.params['password'] - port = module.params['port'] - use_ssl = module.params['use_ssl'] - validate_certs = module.params['validate_certs'] - - name = module.params['name'] - state = module.params['state'] - - module.params['proto'] = 'https' if use_ssl else 'http' - if not port: - module.params['port'] = '443' if use_ssl else '80' - - result = dict( - changed=False, - ) - - start = datetime.datetime.utcnow() - - ssl_context = None - if not validate_certs: - try: - ssl_context = ssl._create_unverified_context() - except AttributeError: - # Legacy Python that doesn't verify HTTPS certificates by default - pass - else: - # Handle target environment that doesn't support HTTPS verification - ssl._create_default_https_context = ssl._create_unverified_context - - url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) - if ssl_context: - conn = xmlrpc_client.ServerProxy(url, context=ssl_context) - else: - conn = xmlrpc_client.Server(url) - - try: - token = conn.login(username, password) - except xmlrpc_client.Fault as e: - module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params)) - except Exception as e: - module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params)) - - system = getsystem(conn, name, token) - # result['system'] = system - - if state == 'query': - if name: - result['system'] = system - else: - # Turn it into a dictionary of dictionaries - # all_systems = conn.get_systems() - # result['systems'] = { system['name']: system for system in all_systems } - - # Return a list of dictionaries - result['systems'] = conn.get_systems() - - elif state == 'present': - - if system: - # Update existing entry - system_id = conn.get_system_handle(name, token) - - for key, value in iteritems(module.params['properties']): - if key not in system: - module.warn("Property '{0}' is not a valid system property.".format(key)) - if system[key] != value: - try: - conn.modify_system(system_id, key, value, token) - result['changed'] = True - except Exception as e: - module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e)) - - else: - # Create a new entry - system_id = conn.new_system(token) - conn.modify_system(system_id, 'name', name, token) - result['changed'] = True - - if module.params['properties']: - for key, value in iteritems(module.params['properties']): - try: - conn.modify_system(system_id, key, value, token) - except Exception as e: - module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e)) - - # Add interface properties - interface_properties = dict() - if module.params['interfaces']: - for device, values in iteritems(module.params['interfaces']): - for key, value in iteritems(values): - if key == 'name': - continue - if key not in IFPROPS_MAPPING: - module.warn("Property '{0}' is not a valid system property.".format(key)) - if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value: - result['changed'] = True - interface_properties['{0}-{1}'.format(key, device)] = value - - if result['changed'] is True: - conn.modify_system(system_id, "modify_interface", interface_properties, token) - - # Only save when the entry was changed - if not module.check_mode and result['changed']: - conn.save_system(system_id, token) - - elif state == 'absent': - - if system: - if not module.check_mode: - conn.remove_system(name, token) - result['changed'] = True - - if not module.check_mode and module.params['sync'] and result['changed']: - try: - conn.sync(token) - except Exception as e: - module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e))) - - if state in ('absent', 'present'): - result['system'] = getsystem(conn, name, token) - - if module._diff: - result['diff'] = dict(before=system, after=result['system']) - - elapsed = datetime.datetime.utcnow() - start - module.exit_json(elapsed=elapsed.seconds, **result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/hpilo/hpilo_boot.py b/plugins/modules/remote_management/hpilo/hpilo_boot.py deleted file mode 100644 index 1e37aee3b5..0000000000 --- a/plugins/modules/remote_management/hpilo/hpilo_boot.py +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2012 Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: hpilo_boot -author: Dag Wieers (@dagwieers) -short_description: Boot system using specific media through HP iLO interface -description: -- "This module boots a system through its HP iLO interface. The boot media - can be one of: cdrom, floppy, hdd, network or usb." -- This module requires the hpilo python module. -options: - host: - description: - - The HP iLO hostname/address that is linked to the physical system. - required: true - login: - description: - - The login name to authenticate to the HP iLO interface. - default: Administrator - password: - description: - - The password to authenticate to the HP iLO interface. - default: admin - media: - description: - - The boot media to boot the system from - choices: [ "cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb" ] - image: - description: - - The URL of a cdrom, floppy or usb boot media image. - protocol://username:password@hostname:port/filename - - protocol is either 'http' or 'https' - - username:password is optional - - port is optional - state: - description: - - The state of the boot media. - - "no_boot: Do not boot from the device" - - "boot_once: Boot from the device once and then notthereafter" - - "boot_always: Boot from the device each time the server is rebooted" - - "connect: Connect the virtual media device and set to boot_always" - - "disconnect: Disconnects the virtual media device and set to no_boot" - - "poweroff: Power off the server" - default: boot_once - choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ] - force: - description: - - Whether to force a reboot (even when the system is already booted). - - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running. - default: no - type: bool - ssl_version: - description: - - Change the ssl_version used. - default: TLSv1 - choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ] -requirements: -- python-hpilo -notes: -- To use a USB key image you need to specify floppy as boot media. -- This module ought to be run from a system that can access the HP iLO - interface directly, either by using C(local_action) or using C(delegate_to). -''' - -EXAMPLES = r''' -- name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server - community.general.hpilo_boot: - host: YOUR_ILO_ADDRESS - login: YOUR_ILO_LOGIN - password: YOUR_ILO_PASSWORD - media: cdrom - image: http://some-web-server/iso/boot.iso - when: cmdb_hwmodel.startswith('HP ') - delegate_to: localhost - -- name: Power off a server - community.general.hpilo_boot: - host: YOUR_ILO_HOST - login: YOUR_ILO_LOGIN - password: YOUR_ILO_PASSWORD - state: poweroff - delegate_to: localhost -''' - -RETURN = ''' -# Default return values -''' - -import time -import traceback -import warnings - -HPILO_IMP_ERR = None -try: - import hpilo - HAS_HPILO = True -except ImportError: - HPILO_IMP_ERR = traceback.format_exc() - HAS_HPILO = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -# Suppress warnings from hpilo -warnings.simplefilter('ignore') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - host=dict(type='str', required=True), - login=dict(type='str', default='Administrator'), - password=dict(type='str', default='admin', no_log=True), - media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']), - image=dict(type='str'), - state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']), - force=dict(type='bool', default=False), - ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']), - ) - ) - - if not HAS_HPILO: - module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR) - - host = module.params['host'] - login = module.params['login'] - password = module.params['password'] - media = module.params['media'] - image = module.params['image'] - state = module.params['state'] - force = module.params['force'] - ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v')) - - ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version) - changed = False - status = {} - power_status = 'UNKNOWN' - - if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'): - - # Workaround for: Error communicating with iLO: Problem manipulating EV - try: - ilo.set_one_time_boot(media) - except hpilo.IloError: - time.sleep(60) - ilo.set_one_time_boot(media) - - # TODO: Verify if image URL exists/works - if image: - ilo.insert_virtual_media(media, image) - changed = True - - if media == 'cdrom': - ilo.set_vm_status('cdrom', state, True) - status = ilo.get_vm_status() - changed = True - elif media in ('floppy', 'usb'): - ilo.set_vf_status(state, True) - status = ilo.get_vf_status() - changed = True - - # Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot - if state in ('boot_once', 'boot_always') or force: - - power_status = ilo.get_host_power_status() - - if not force and power_status == 'ON': - module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host) - - if power_status == 'ON': - ilo.warm_boot_server() -# ilo.cold_boot_server() - changed = True - else: - ilo.press_pwr_btn() -# ilo.reset_server() -# ilo.set_host_power(host_power=True) - changed = True - - elif state in ('poweroff'): - - power_status = ilo.get_host_power_status() - - if not power_status == 'OFF': - ilo.hold_pwr_btn() -# ilo.set_host_power(host_power=False) - changed = True - - module.exit_json(changed=changed, power=power_status, **status) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/hpilo/hpilo_info.py b/plugins/modules/remote_management/hpilo/hpilo_info.py deleted file mode 100644 index 2b6c30abd6..0000000000 --- a/plugins/modules/remote_management/hpilo/hpilo_info.py +++ /dev/null @@ -1,263 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2012 Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: hpilo_info -author: Dag Wieers (@dagwieers) -short_description: Gather information through an HP iLO interface -description: -- This module gathers information on a specific system using its HP iLO interface. - These information includes hardware and network related information useful - for provisioning (e.g. macaddress, uuid). -- This module requires the C(hpilo) python module. -- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.hpilo_info) module no longer returns C(ansible_facts)! -options: - host: - description: - - The HP iLO hostname/address that is linked to the physical system. - required: true - login: - description: - - The login name to authenticate to the HP iLO interface. - default: Administrator - password: - description: - - The password to authenticate to the HP iLO interface. - default: admin - ssl_version: - description: - - Change the ssl_version used. - default: TLSv1 - choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ] -requirements: -- hpilo -notes: -- This module ought to be run from a system that can access the HP iLO - interface directly, either by using C(local_action) or using C(delegate_to). -''' - -EXAMPLES = r''' -- name: Gather facts from a HP iLO interface only if the system is an HP server - community.general.hpilo_info: - host: YOUR_ILO_ADDRESS - login: YOUR_ILO_LOGIN - password: YOUR_ILO_PASSWORD - when: cmdb_hwmodel.startswith('HP ') - delegate_to: localhost - register: results - -- ansible.builtin.fail: - msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !' - when: cmdb_serialno != results.hw_system_serial -''' - -RETURN = r''' -# Typical output of HP iLO_info for a physical system -hw_bios_date: - description: BIOS date - returned: always - type: str - sample: 05/05/2011 - -hw_bios_version: - description: BIOS version - returned: always - type: str - sample: P68 - -hw_ethX: - description: Interface information (for each interface) - returned: always - type: dict - sample: - - macaddress: 00:11:22:33:44:55 - macaddress_dash: 00-11-22-33-44-55 - -hw_eth_ilo: - description: Interface information (for the iLO network interface) - returned: always - type: dict - sample: - - macaddress: 00:11:22:33:44:BA - - macaddress_dash: 00-11-22-33-44-BA - -hw_product_name: - description: Product name - returned: always - type: str - sample: ProLiant DL360 G7 - -hw_product_uuid: - description: Product UUID - returned: always - type: str - sample: ef50bac8-2845-40ff-81d9-675315501dac - -hw_system_serial: - description: System serial number - returned: always - type: str - sample: ABC12345D6 - -hw_uuid: - description: Hardware UUID - returned: always - type: str - sample: 123456ABC78901D2 - -host_power_status: - description: - - Power status of host. - - Will be one of C(ON), C(OFF) and C(UNKNOWN). - returned: always - type: str - sample: ON - version_added: 3.5.0 -''' - -import re -import traceback -import warnings - -HPILO_IMP_ERR = None -try: - import hpilo - HAS_HPILO = True -except ImportError: - HPILO_IMP_ERR = traceback.format_exc() - HAS_HPILO = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -# Suppress warnings from hpilo -warnings.simplefilter('ignore') - - -def parse_flat_interface(entry, non_numeric='hw_eth_ilo'): - try: - infoname = 'hw_eth' + str(int(entry['Port']) - 1) - except Exception: - infoname = non_numeric - - info = { - 'macaddress': entry['MAC'].replace('-', ':'), - 'macaddress_dash': entry['MAC'] - } - return (infoname, info) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - host=dict(type='str', required=True), - login=dict(type='str', default='Administrator'), - password=dict(type='str', default='admin', no_log=True), - ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']), - ), - supports_check_mode=True, - ) - - if not HAS_HPILO: - module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR) - - host = module.params['host'] - login = module.params['login'] - password = module.params['password'] - ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v')) - - ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version) - - info = { - 'module_hw': True, - } - - # TODO: Count number of CPUs, DIMMs and total memory - try: - data = ilo.get_host_data() - power_state = ilo.get_host_power_status() - except hpilo.IloCommunicationError as e: - module.fail_json(msg=to_native(e)) - - for entry in data: - if 'type' not in entry: - continue - elif entry['type'] == 0: # BIOS Information - info['hw_bios_version'] = entry['Family'] - info['hw_bios_date'] = entry['Date'] - elif entry['type'] == 1: # System Information - info['hw_uuid'] = entry['UUID'] - info['hw_system_serial'] = entry['Serial Number'].rstrip() - info['hw_product_name'] = entry['Product Name'] - info['hw_product_uuid'] = entry['cUUID'] - elif entry['type'] == 209: # Embedded NIC MAC Assignment - if 'fields' in entry: - for (name, value) in [(e['name'], e['value']) for e in entry['fields']]: - if name.startswith('Port'): - try: - infoname = 'hw_eth' + str(int(value) - 1) - except Exception: - infoname = 'hw_eth_ilo' - elif name.startswith('MAC'): - info[infoname] = { - 'macaddress': value.replace('-', ':'), - 'macaddress_dash': value - } - else: - (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo') - info[infoname] = entry_info - elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info - for (name, value) in [(e['name'], e['value']) for e in entry['fields']]: - if name.startswith('Port'): - try: - infoname = 'hw_iscsi' + str(int(value) - 1) - except Exception: - infoname = 'hw_iscsi_ilo' - elif name.startswith('MAC'): - info[infoname] = { - 'macaddress': value.replace('-', ':'), - 'macaddress_dash': value - } - elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format) - (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo') - info[infoname] = entry_info - - # Collect health (RAM/CPU data) - health = ilo.get_embedded_health() - info['hw_health'] = health - - memory_details_summary = health.get('memory', {}).get('memory_details_summary') - # RAM as reported by iLO 2.10 on ProLiant BL460c Gen8 - if memory_details_summary: - info['hw_memory_details_summary'] = memory_details_summary - info['hw_memory_total'] = 0 - for cpu, details in memory_details_summary.items(): - cpu_total_memory_size = details.get('total_memory_size') - if cpu_total_memory_size: - ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size) - if ram: - if ram.group(2) == 'GB': - info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1)) - - # reformat into a text friendly format - info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total']) - - # Report host state - info['host_power_status'] = power_state or 'UNKNOWN' - - module.exit_json(**info) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/hpilo/hponcfg.py b/plugins/modules/remote_management/hpilo/hponcfg.py deleted file mode 100644 index 451e4b0613..0000000000 --- a/plugins/modules/remote_management/hpilo/hponcfg.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: hponcfg -author: Dag Wieers (@dagwieers) -short_description: Configure HP iLO interface using hponcfg -description: -- This modules configures the HP iLO interface using hponcfg. -options: - path: - description: - - The XML file as accepted by hponcfg. - required: true - aliases: ['src'] - minfw: - description: - - The minimum firmware level needed. - required: false - executable: - description: - - Path to the hponcfg executable (`hponcfg` which uses $PATH). - default: hponcfg - verbose: - description: - - Run hponcfg in verbose mode (-v). - default: no - type: bool -requirements: -- hponcfg tool -notes: -- You need a working hponcfg on the target system. -''' - -EXAMPLES = r''' -- name: Example hponcfg configuration XML - ansible.builtin.copy: - content: | - - - - - - - - - - - - - - dest: /tmp/enable-ssh.xml - -- name: Configure HP iLO using enable-ssh.xml - community.general.hponcfg: - src: /tmp/enable-ssh.xml - -- name: Configure HP iLO on VMware ESXi hypervisor - community.general.hponcfg: - src: /tmp/enable-ssh.xml - executable: /opt/hp/tools/hponcfg -''' - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - src=dict(type='path', required=True, aliases=['path']), - minfw=dict(type='str'), - executable=dict(default='hponcfg', type='str'), - verbose=dict(default=False, type='bool'), - ) - ) - - # Consider every action a change (not idempotent yet!) - changed = True - - src = module.params['src'] - minfw = module.params['minfw'] - executable = module.params['executable'] - verbose = module.params['verbose'] - - options = ' -f %s' % src - - if verbose: - options += ' -v' - - if minfw: - options += ' -m %s' % minfw - - rc, stdout, stderr = module.run_command('%s %s' % (executable, options)) - - if rc != 0: - module.fail_json(rc=rc, msg="Failed to run hponcfg", stdout=stdout, stderr=stderr) - - module.exit_json(changed=changed, stdout=stdout, stderr=stderr) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/imc/imc_rest.py b/plugins/modules/remote_management/imc/imc_rest.py deleted file mode 100644 index 239c76fab3..0000000000 --- a/plugins/modules/remote_management/imc/imc_rest.py +++ /dev/null @@ -1,434 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: imc_rest -short_description: Manage Cisco IMC hardware through its REST API -description: -- Provides direct access to the Cisco IMC REST API. -- Perform any configuration changes and actions that the Cisco IMC supports. -- More information about the IMC REST API is available from - U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html) -author: -- Dag Wieers (@dagwieers) -requirements: -- lxml -- xmljson >= 0.1.8 -options: - hostname: - description: - - IP Address or hostname of Cisco IMC, resolvable by Ansible control host. - required: true - aliases: [ host, ip ] - type: str - username: - description: - - Username used to login to the switch. - default: admin - aliases: [ user ] - type: str - password: - description: - - The password to use for authentication. - default: password - type: str - path: - description: - - Name of the absolute path of the filename that includes the body - of the http request being sent to the Cisco IMC REST API. - - Parameter C(path) is mutual exclusive with parameter C(content). - aliases: [ 'src', 'config_file' ] - type: path - content: - description: - - When used instead of C(path), sets the content of the API requests directly. - - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module. - - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream, - the Cisco IMC output is subsequently merged. - - Parameter C(content) is mutual exclusive with parameter C(path). - type: str - protocol: - description: - - Connection protocol to use. - default: https - choices: [ http, https ] - type: str - timeout: - description: - - The socket level timeout in seconds. - - This is the time that every single connection (every fragment) can spend. - If this C(timeout) is reached, the module will fail with a - C(Connection failure) indicating that C(The read operation timed out). - default: 60 - type: int - validate_certs: - description: - - If C(no), SSL certificates will not be validated. - - This should only set to C(no) used on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' -notes: -- The XML fragments don't need an authentication cookie, this is injected by the module automatically. -- The Cisco IMC XML output is being translated to JSON using the Cobra convention. -- Any configConfMo change requested has a return status of 'modified', even if there was no actual change - from the previous configuration. As a result, this module will always report a change on subsequent runs. - In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt. -- If you get a C(Connection failure) related to C(The read operation timed out) increase the C(timeout) - parameter. Some XML fragments can take longer than the default timeout. -- More information about the IMC REST API is available from - U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html) -''' - -EXAMPLES = r''' -- name: Power down server - community.general.imc_rest: - hostname: '{{ imc_hostname }}' - username: '{{ imc_username }}' - password: '{{ imc_password }}' - validate_certs: no - content: | - - - - delegate_to: localhost - -- name: Configure IMC using multiple XML fragments - community.general.imc_rest: - hostname: '{{ imc_hostname }}' - username: '{{ imc_username }}' - password: '{{ imc_password }}' - validate_certs: no - timeout: 120 - content: | - - - - - - - - - - delegate_to: localhost - -- name: Enable PXE boot and power-cycle server - community.general.imc_rest: - hostname: '{{ imc_hostname }}' - username: '{{ imc_username }}' - password: '{{ imc_password }}' - validate_certs: no - content: | - - - - - - - - - - delegate_to: localhost - -- name: Reconfigure IMC to boot from storage - community.general.imc_rest: - hostname: '{{ imc_host }}' - username: '{{ imc_username }}' - password: '{{ imc_password }}' - validate_certs: no - content: | - - - - delegate_to: localhost - -- name: Add customer description to server - community.general.imc_rest: - hostname: '{{ imc_host }}' - username: '{{ imc_username }}' - password: '{{ imc_password }}' - validate_certs: no - content: | - - - - delegate_to: localhost - -- name: Disable HTTP and increase session timeout to max value 10800 secs - community.general.imc_rest: - hostname: '{{ imc_host }}' - username: '{{ imc_username }}' - password: '{{ imc_password }}' - validate_certs: no - timeout: 120 - content: | - - - - - - - - delegate_to: localhost -''' - -RETURN = r''' -aaLogin: - description: Cisco IMC XML output for the login, translated to JSON using Cobra convention - returned: success - type: dict - sample: | - "attributes": { - "cookie": "", - "outCookie": "1498902428/9de6dc36-417c-157c-106c-139efe2dc02a", - "outPriv": "admin", - "outRefreshPeriod": "600", - "outSessionId": "114", - "outVersion": "2.0(13e)", - "response": "yes" - } -configConfMo: - description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention - returned: success - type: dict - sample: | -elapsed: - description: Elapsed time in seconds - returned: always - type: int - sample: 31 -response: - description: HTTP response message, including content length - returned: always - type: str - sample: OK (729 bytes) -status: - description: The HTTP response status code - returned: always - type: dict - sample: 200 -error: - description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention - returned: failed - type: dict - sample: | - "attributes": { - "cookie": "", - "errorCode": "ERR-xml-parse-error", - "errorDescr": "XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. ", - "invocationResult": "594", - "response": "yes" - } -error_code: - description: Cisco IMC error code - returned: failed - type: str - sample: ERR-xml-parse-error -error_text: - description: Cisco IMC error message - returned: failed - type: str - sample: | - XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. -input: - description: RAW XML input sent to the Cisco IMC, causing the error - returned: failed - type: str - sample: | - -output: - description: RAW XML output received from the Cisco IMC, with error details - returned: failed - type: str - sample: > - -''' - -import datetime -import itertools -import os -import traceback -from functools import partial - -LXML_ETREE_IMP_ERR = None -try: - import lxml.etree - HAS_LXML_ETREE = True -except ImportError: - LXML_ETREE_IMP_ERR = traceback.format_exc() - HAS_LXML_ETREE = False - -XMLJSON_COBRA_IMP_ERR = None -try: - from xmljson import cobra - HAS_XMLJSON_COBRA = True -except ImportError: - XMLJSON_COBRA_IMP_ERR = traceback.format_exc() - HAS_XMLJSON_COBRA = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.urls import fetch_url - - -def imc_response(module, rawoutput, rawinput=''): - ''' Handle IMC returned data ''' - xmloutput = lxml.etree.fromstring(rawoutput) - result = cobra.data(xmloutput) - - # Handle errors - if xmloutput.get('errorCode') and xmloutput.get('errorDescr'): - if rawinput: - result['input'] = rawinput - result['output'] = rawoutput - result['error_code'] = xmloutput.get('errorCode') - result['error_text'] = xmloutput.get('errorDescr') - module.fail_json(msg='Request failed: %(error_text)s' % result, **result) - - return result - - -def logout(module, url, cookie, timeout): - ''' Perform a logout, if needed ''' - data = '' % (cookie, cookie) - resp, auth = fetch_url(module, url, data=data, method="POST", timeout=timeout) - - -def merge(one, two): - ''' Merge two complex nested datastructures into one''' - if isinstance(one, dict) and isinstance(two, dict): - copy = dict(one) - # copy.update({key: merge(one.get(key, None), two[key]) for key in two}) - copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two)) - return copy - - elif isinstance(one, list) and isinstance(two, list): - return [merge(alpha, beta) for (alpha, beta) in itertools.izip_longest(one, two)] - - return one if two is None else two - - -def main(): - module = AnsibleModule( - argument_spec=dict( - hostname=dict(type='str', required=True, aliases=['host', 'ip']), - username=dict(type='str', default='admin', aliases=['user']), - password=dict(type='str', default='password', no_log=True), - content=dict(type='str'), - path=dict(type='path', aliases=['config_file', 'src']), - protocol=dict(type='str', default='https', choices=['http', 'https']), - timeout=dict(type='int', default=60), - validate_certs=dict(type='bool', default=True), - ), - supports_check_mode=True, - mutually_exclusive=[['content', 'path']], - ) - - if not HAS_LXML_ETREE: - module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR) - - if not HAS_XMLJSON_COBRA: - module.fail_json(msg=missing_required_lib('xmljson >= 0.1.8'), exception=XMLJSON_COBRA_IMP_ERR) - - hostname = module.params['hostname'] - username = module.params['username'] - password = module.params['password'] - - content = module.params['content'] - path = module.params['path'] - - protocol = module.params['protocol'] - timeout = module.params['timeout'] - - result = dict( - failed=False, - changed=False, - ) - - # Report missing file - file_exists = False - if path: - if os.path.isfile(path): - file_exists = True - else: - module.fail_json(msg='Cannot find/access path:\n%s' % path) - - start = datetime.datetime.utcnow() - - # Perform login first - url = '%s://%s/nuova' % (protocol, hostname) - data = '' % (username, password) - resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout) - if resp is None or auth['status'] != 200: - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds - module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result) - result.update(imc_response(module, resp.read())) - - # Store cookie for future requests - cookie = '' - try: - cookie = result['aaaLogin']['attributes']['outCookie'] - except Exception: - module.fail_json(msg='Could not find cookie in output', **result) - - try: - # Prepare request data - if content: - rawdata = content - elif file_exists: - with open(path, 'r') as config_object: - rawdata = config_object.read() - - # Wrap the XML documents in a element - xmldata = lxml.etree.fromstring('%s' % rawdata.replace('\n', '')) - - # Handle each XML document separately in the same session - for xmldoc in list(xmldata): - if xmldoc.tag is lxml.etree.Comment: - continue - # Add cookie to XML - xmldoc.set('cookie', cookie) - data = lxml.etree.tostring(xmldoc) - - # Perform actual request - resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout) - if resp is None or info['status'] != 200: - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds - module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result) - - # Merge results with previous results - rawoutput = resp.read() - result = merge(result, imc_response(module, rawoutput, rawinput=data)) - result['response'] = info['msg'] - result['status'] = info['status'] - - # Check for any changes - # NOTE: Unfortunately IMC API always report status as 'modified' - xmloutput = lxml.etree.fromstring(rawoutput) - results = xmloutput.xpath('/configConfMo/outConfig/*/@status') - result['changed'] = ('modified' in results) - - # Report success - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds - module.exit_json(**result) - finally: - logout(module, url, cookie, timeout) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/ipmi/ipmi_boot.py b/plugins/modules/remote_management/ipmi/ipmi_boot.py deleted file mode 100644 index f4bdbb2112..0000000000 --- a/plugins/modules/remote_management/ipmi/ipmi_boot.py +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ipmi_boot -short_description: Management of order of boot devices -description: - - Use this module to manage order of boot devices -options: - name: - description: - - Hostname or ip address of the BMC. - required: true - type: str - port: - description: - - Remote RMCP port. - default: 623 - type: int - user: - description: - - Username to use to connect to the BMC. - required: true - type: str - password: - description: - - Password to connect to the BMC. - required: true - type: str - bootdev: - description: - - Set boot device to use on next reboot - - "The choices for the device are: - - network -- Request network boot - - floppy -- Boot from floppy - - hd -- Boot from hard drive - - safe -- Boot from hard drive, requesting 'safe mode' - - optical -- boot from CD/DVD/BD drive - - setup -- Boot into setup utility - - default -- remove any IPMI directed boot device request" - required: true - choices: - - network - - floppy - - hd - - safe - - optical - - setup - - default - type: str - state: - description: - - Whether to ensure that boot devices is desired. - - "The choices for the state are: - - present -- Request system turn on - - absent -- Request system turn on" - default: present - choices: [ present, absent ] - type: str - persistent: - description: - - If set, ask that system firmware uses this device beyond next boot. - Be aware many systems do not honor this. - type: bool - default: 'no' - uefiboot: - description: - - If set, request UEFI boot explicitly. - Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option. - In practice, this flag not being set does not preclude UEFI boot on any system I've encountered. - type: bool - default: 'no' -requirements: - - "python >= 2.6" - - pyghmi -author: "Bulat Gaifullin (@bgaifullin) " -''' - -RETURN = ''' -bootdev: - description: The boot device name which will be used beyond next boot. - returned: success - type: str - sample: default -persistent: - description: If True, system firmware will use this device beyond next boot. - returned: success - type: bool - sample: false -uefimode: - description: If True, system firmware will use UEFI boot explicitly beyond next boot. - returned: success - type: bool - sample: false -''' - -EXAMPLES = ''' -- name: Ensure bootdevice is HD - community.general.ipmi_boot: - name: test.testdomain.com - user: admin - password: password - bootdev: hd - -- name: Ensure bootdevice is not Network - community.general.ipmi_boot: - name: test.testdomain.com - user: admin - password: password - bootdev: network - state: absent -''' - -import traceback - -PYGHMI_IMP_ERR = None -try: - from pyghmi.ipmi import command -except ImportError: - PYGHMI_IMP_ERR = traceback.format_exc() - command = None - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - port=dict(default=623, type='int'), - user=dict(required=True, no_log=True), - password=dict(required=True, no_log=True), - state=dict(default='present', choices=['present', 'absent']), - bootdev=dict(required=True, choices=['network', 'hd', 'floppy', 'safe', 'optical', 'setup', 'default']), - persistent=dict(default=False, type='bool'), - uefiboot=dict(default=False, type='bool') - ), - supports_check_mode=True, - ) - - if command is None: - module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR) - - name = module.params['name'] - port = module.params['port'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] - bootdev = module.params['bootdev'] - persistent = module.params['persistent'] - uefiboot = module.params['uefiboot'] - request = dict() - - if state == 'absent' and bootdev == 'default': - module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.") - - # --- run command --- - try: - ipmi_cmd = command.Command( - bmc=name, userid=user, password=password, port=port - ) - module.debug('ipmi instantiated - name: "%s"' % name) - current = ipmi_cmd.get_bootdev() - # uefimode may not supported by BMC, so use desired value as default - current.setdefault('uefimode', uefiboot) - if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot): - request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent) - elif state == 'absent' and current['bootdev'] == bootdev: - request = dict(bootdev='default') - else: - module.exit_json(changed=False, **current) - - if module.check_mode: - response = dict(bootdev=request['bootdev']) - else: - response = ipmi_cmd.set_bootdev(**request) - - if 'error' in response: - module.fail_json(msg=response['error']) - - if 'persist' in request: - response['persistent'] = request['persist'] - if 'uefiboot' in request: - response['uefimode'] = request['uefiboot'] - - module.exit_json(changed=True, **response) - except Exception as e: - module.fail_json(msg=str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/ipmi/ipmi_power.py b/plugins/modules/remote_management/ipmi/ipmi_power.py deleted file mode 100644 index 8a88679697..0000000000 --- a/plugins/modules/remote_management/ipmi/ipmi_power.py +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ipmi_power -short_description: Power management for machine -description: - - Use this module for power management -options: - name: - description: - - Hostname or ip address of the BMC. - required: true - type: str - port: - description: - - Remote RMCP port. - default: 623 - type: int - user: - description: - - Username to use to connect to the BMC. - required: true - type: str - password: - description: - - Password to connect to the BMC. - required: true - type: str - state: - description: - - Whether to ensure that the machine in desired state. - - "The choices for state are: - - on -- Request system turn on - - off -- Request system turn off without waiting for OS to shutdown - - shutdown -- Have system request OS proper shutdown - - reset -- Request system reset without waiting for OS - - boot -- If system is off, then 'on', else 'reset'" - choices: ['on', 'off', shutdown, reset, boot] - required: true - type: str - timeout: - description: - - Maximum number of seconds before interrupt request. - default: 300 - type: int -requirements: - - "python >= 2.6" - - pyghmi -author: "Bulat Gaifullin (@bgaifullin) " -''' - -RETURN = ''' -powerstate: - description: The current power state of the machine. - returned: success - type: str - sample: on -''' - -EXAMPLES = ''' -- name: Ensure machine is powered on - community.general.ipmi_power: - name: test.testdomain.com - user: admin - password: password - state: on -''' - -import traceback - -PYGHMI_IMP_ERR = None -try: - from pyghmi.ipmi import command -except ImportError: - PYGHMI_IMP_ERR = traceback.format_exc() - command = None - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - port=dict(default=623, type='int'), - state=dict(required=True, choices=['on', 'off', 'shutdown', 'reset', 'boot']), - user=dict(required=True, no_log=True), - password=dict(required=True, no_log=True), - timeout=dict(default=300, type='int'), - ), - supports_check_mode=True, - ) - - if command is None: - module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR) - - name = module.params['name'] - port = module.params['port'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] - timeout = module.params['timeout'] - - # --- run command --- - try: - ipmi_cmd = command.Command( - bmc=name, userid=user, password=password, port=port - ) - module.debug('ipmi instantiated - name: "%s"' % name) - - current = ipmi_cmd.get_power() - if current['powerstate'] != state: - response = {'powerstate': state} if module.check_mode else ipmi_cmd.set_power(state, wait=timeout) - changed = True - else: - response = current - changed = False - - if 'error' in response: - module.fail_json(msg=response['error']) - - module.exit_json(changed=changed, **response) - except Exception as e: - module.fail_json(msg=str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py b/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py deleted file mode 100644 index f082f6cd5c..0000000000 --- a/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py +++ /dev/null @@ -1,677 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: xcc_redfish_command -short_description: Manages Lenovo Out-Of-Band controllers using Redfish APIs -version_added: 2.4.0 -description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action or get information back or update a configuration attribute. - - Manages virtual media. - - Supports getting information back via GET method. - - Supports updating a configuration attribute via PATCH method. - - Supports performing an action via POST method. -options: - category: - required: true - description: - - Category to execute on OOB controller. - type: str - command: - required: true - description: - - List of commands to execute on OOB controller. - type: list - elements: str - baseuri: - required: true - description: - - Base URI of OOB controller. - type: str - username: - description: - - Username for authentication with OOB controller. - type: str - password: - description: - - Password for authentication with OOB controller. - type: str - auth_token: - description: - - Security token for authentication with OOB controller - type: str - timeout: - description: - - Timeout in seconds for URL requests to OOB controller. - default: 10 - type: int - resource_id: - required: false - description: - - The ID of the System, Manager or Chassis to modify. - type: str - virtual_media: - required: false - description: - - The options for VirtualMedia commands. - type: dict - suboptions: - media_types: - description: - - The list of media types appropriate for the image. - type: list - elements: str - image_url: - description: - - The URL of the image to insert or eject. - type: str - inserted: - description: - - Indicates if the image is treated as inserted on command completion. - type: bool - default: true - write_protected: - description: - - Indicates if the media is treated as write-protected. - type: bool - default: true - username: - description: - - The username for accessing the image URL. - type: str - password: - description: - - The password for accessing the image URL. - type: str - transfer_protocol_type: - description: - - The network protocol to use with the image. - type: str - transfer_method: - description: - - The transfer method to use with the image. - type: str - resource_uri: - required: false - description: - - The resource uri to get or patch or post. - type: str - request_body: - required: false - description: - - The request body to patch or post. - type: dict - -author: "Yuyan Pan (@panyy3)" -''' - -EXAMPLES = ''' - - name: Insert Virtual Media - community.general.xcc_redfish_command: - category: Manager - command: VirtualMediaInsert - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: "http://example.com/images/SomeLinux-current.iso" - media_types: - - CD - - DVD - resource_id: "1" - - - name: Eject Virtual Media - community.general.xcc_redfish_command: - category: Manager - command: VirtualMediaEject - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: "http://example.com/images/SomeLinux-current.iso" - resource_id: "1" - - - name: Eject all Virtual Media - community.general.xcc_redfish_command: - category: Manager - command: VirtualMediaEject - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_id: "1" - - - name: Get ComputeSystem Oem property SystemStatus via GetResource command - community.general.xcc_redfish_command: - category: Raw - command: GetResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Systems/1" - register: result - - ansible.builtin.debug: - msg: "{{ result.redfish_facts.data.Oem.Lenovo.SystemStatus }}" - - - name: Get Oem DNS setting via GetResource command - community.general.xcc_redfish_command: - category: Raw - command: GetResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.data }}" - - - name: Get Lenovo FoD key collection resource via GetCollectionResource command - community.general.xcc_redfish_command: - category: Raw - command: GetCollectionResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Managers/1/Oem/Lenovo/FoD/Keys" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.data_list }}" - - - name: Update ComputeSystem property AssetTag via PatchResource command - community.general.xcc_redfish_command: - category: Raw - command: PatchResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Systems/1" - request_body: - AssetTag: "new_asset_tag" - - - name: Perform BootToBIOSSetup action via PostResource command - community.general.xcc_redfish_command: - category: Raw - command: PostResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Systems/1/Actions/Oem/LenovoComputerSystem.BootToBIOSSetup" - request_body: {} - - - name: Perform SecureBoot.ResetKeys action via PostResource command - community.general.xcc_redfish_command: - category: Raw - command: PostResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Systems/1/SecureBoot/Actions/SecureBoot.ResetKeys" - request_body: - ResetKeysType: DeleteAllKeys - - - name: Create session - community.general.redfish_command: - category: Sessions - command: CreateSession - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Update Manager DateTimeLocalOffset property using security token for auth - community.general.xcc_redfish_command: - category: Raw - command: PatchResource - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" - resource_uri: "/redfish/v1/Managers/1" - request_body: - DateTimeLocalOffset: "+08:00" - - - name: Delete session using security token created by CreateSesssion above - community.general.redfish_command: - category: Sessions - command: DeleteSession - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" - session_uri: "{{ result.session.uri }}" -''' - -RETURN = ''' -msg: - description: A message related to the performed action(s). - returned: when failure or action/update success - type: str - sample: "Action was successful" -redfish_facts: - description: Resource content. - returned: when command == GetResource or command == GetCollectionResource - type: dict - sample: '{ - "redfish_facts": { - "data": { - "@odata.etag": "\"3179bf00d69f25a8b3c\"", - "@odata.id": "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS", - "@odata.type": "#LenovoDNS.v1_0_0.LenovoDNS", - "DDNS": [ - { - "DDNSEnable": true, - "DomainName": "", - "DomainNameSource": "DHCP" - } - ], - "DNSEnable": true, - "Description": "This resource is used to represent a DNS resource for a Redfish implementation.", - "IPv4Address1": "10.103.62.178", - "IPv4Address2": "0.0.0.0", - "IPv4Address3": "0.0.0.0", - "IPv6Address1": "::", - "IPv6Address2": "::", - "IPv6Address3": "::", - "Id": "LenovoDNS", - "PreferredAddresstype": "IPv4" - }, - "ret": true - } - }' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils - - -class XCCRedfishUtils(RedfishUtils): - @staticmethod - def _find_empty_virt_media_slot(resources, media_types, - media_match_strict=True): - for uri, data in resources.items(): - # check MediaTypes - if 'MediaTypes' in data and media_types: - if not set(media_types).intersection(set(data['MediaTypes'])): - continue - else: - if media_match_strict: - continue - if 'RDOC' in uri: - continue - # if ejected, 'Inserted' should be False and 'ImageName' cleared - if (not data.get('Inserted', False) and - not data.get('ImageName')): - return uri, data - return None, None - - def virtual_media_eject_one(self, image_url): - # locate and read the VirtualMedia resources - response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: - return response - data = response['data'] - if 'VirtualMedia' not in data: - return {'ret': False, 'msg': "VirtualMedia resource not found"} - virt_media_uri = data["VirtualMedia"]["@odata.id"] - response = self.get_request(self.root_uri + virt_media_uri) - if response['ret'] is False: - return response - data = response['data'] - virt_media_list = [] - for member in data[u'Members']: - virt_media_list.append(member[u'@odata.id']) - resources, headers = self._read_virt_media_resources(virt_media_list) - - # find the VirtualMedia resource to eject - uri, data, eject = self._find_virt_media_to_eject(resources, image_url) - if uri and eject: - if ('Actions' not in data or - '#VirtualMedia.EjectMedia' not in data['Actions']): - # try to eject via PATCH if no EjectMedia action found - h = headers[uri] - if 'allow' in h: - methods = [m.strip() for m in h.get('allow').split(',')] - if 'PATCH' not in methods: - # if Allow header present and PATCH missing, return error - return {'ret': False, - 'msg': "%s action not found and PATCH not allowed" - % '#VirtualMedia.EjectMedia'} - return self.virtual_media_eject_via_patch(uri) - else: - # POST to the EjectMedia Action - action = data['Actions']['#VirtualMedia.EjectMedia'] - if 'target' not in action: - return {'ret': False, - 'msg': "target URI property missing from Action " - "#VirtualMedia.EjectMedia"} - action_uri = action['target'] - # empty payload for Eject action - payload = {} - # POST to action - response = self.post_request(self.root_uri + action_uri, - payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, - 'msg': "VirtualMedia ejected"} - elif uri and not eject: - # already ejected: return success but changed=False - return {'ret': True, 'changed': False, - 'msg': "VirtualMedia image '%s' already ejected" % - image_url} - else: - # return failure (no resources matching image_url found) - return {'ret': False, 'changed': False, - 'msg': "No VirtualMedia resource found with image '%s' " - "inserted" % image_url} - - def virtual_media_eject(self, options): - if options: - image_url = options.get('image_url') - if image_url: # eject specified one media - return self.virtual_media_eject_one(image_url) - - # eject all inserted media when no image_url specified - # read all the VirtualMedia resources - response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: - return response - data = response['data'] - if 'VirtualMedia' not in data: - return {'ret': False, 'msg': "VirtualMedia resource not found"} - virt_media_uri = data["VirtualMedia"]["@odata.id"] - response = self.get_request(self.root_uri + virt_media_uri) - if response['ret'] is False: - return response - data = response['data'] - virt_media_list = [] - for member in data[u'Members']: - virt_media_list.append(member[u'@odata.id']) - resources, headers = self._read_virt_media_resources(virt_media_list) - - # eject all inserted media one by one - ejected_media_list = [] - for uri, data in resources.items(): - if data.get('Image') and data.get('Inserted', True): - returndict = self.virtual_media_eject_one(data.get('Image')) - if not returndict['ret']: - return returndict - ejected_media_list.append(data.get('Image')) - - if len(ejected_media_list) == 0: - # no media inserted: return success but changed=False - return {'ret': True, 'changed': False, - 'msg': "No VirtualMedia image inserted"} - else: - return {'ret': True, 'changed': True, - 'msg': "VirtualMedia %s ejected" % str(ejected_media_list)} - - def raw_get_resource(self, resource_uri): - if resource_uri is None: - return {'ret': False, 'msg': "resource_uri is missing"} - response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: - return response - data = response['data'] - return {'ret': True, 'data': data} - - def raw_get_collection_resource(self, resource_uri): - if resource_uri is None: - return {'ret': False, 'msg': "resource_uri is missing"} - response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: - return response - if 'Members' not in response['data']: - return {'ret': False, 'msg': "Specified resource_uri doesn't have Members property"} - member_list = [i['@odata.id'] for i in response['data'].get('Members', [])] - - # get member resource one by one - data_list = [] - for member_uri in member_list: - uri = self.root_uri + member_uri - response = self.get_request(uri) - if response['ret'] is False: - return response - data = response['data'] - data_list.append(data) - - return {'ret': True, 'data_list': data_list} - - def raw_patch_resource(self, resource_uri, request_body): - if resource_uri is None: - return {'ret': False, 'msg': "resource_uri is missing"} - if request_body is None: - return {'ret': False, 'msg': "request_body is missing"} - # check whether resource_uri existing or not - response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: - return response - original_etag = response['data']['@odata.etag'] - - # check validity of keys in request_body - data = response['data'] - for key in request_body.keys(): - if key not in data: - return {'ret': False, 'msg': "Key %s not found. Supported key list: %s" % (key, str(data.keys()))} - - # perform patch - response = self.patch_request(self.root_uri + resource_uri, request_body) - if response['ret'] is False: - return response - - # check whether changed or not - current_etag = '' - if 'data' in response and '@odata.etag' in response['data']: - current_etag = response['data']['@odata.etag'] - if current_etag != original_etag: - return {'ret': True, 'changed': True} - else: - return {'ret': True, 'changed': False} - - def raw_post_resource(self, resource_uri, request_body): - if resource_uri is None: - return {'ret': False, 'msg': "resource_uri is missing"} - if '/Actions/' not in resource_uri: - return {'ret': False, 'msg': "Bad uri %s. Keyword /Actions/ should be included in uri" % resource_uri} - if request_body is None: - return {'ret': False, 'msg': "request_body is missing"} - # get action base uri data for further checking - action_base_uri = resource_uri.split('/Actions/')[0] - response = self.get_request(self.root_uri + action_base_uri) - if response['ret'] is False: - return response - if 'Actions' not in response['data']: - return {'ret': False, 'msg': "Actions property not found in %s" % action_base_uri} - - # check resouce_uri with target uri found in action base uri data - action_found = False - action_info_uri = None - action_target_uri_list = [] - for key in response['data']['Actions'].keys(): - if action_found: - break - if not key.startswith('#'): - continue - if 'target' in response['data']['Actions'][key]: - if resource_uri == response['data']['Actions'][key]['target']: - action_found = True - if '@Redfish.ActionInfo' in response['data']['Actions'][key]: - action_info_uri = response['data']['Actions'][key]['@Redfish.ActionInfo'] - else: - action_target_uri_list.append(response['data']['Actions'][key]['target']) - if not action_found and 'Oem' in response['data']['Actions']: - for key in response['data']['Actions']['Oem'].keys(): - if action_found: - break - if not key.startswith('#'): - continue - if 'target' in response['data']['Actions']['Oem'][key]: - if resource_uri == response['data']['Actions']['Oem'][key]['target']: - action_found = True - if '@Redfish.ActionInfo' in response['data']['Actions']['Oem'][key]: - action_info_uri = response['data']['Actions']['Oem'][key]['@Redfish.ActionInfo'] - else: - action_target_uri_list.append(response['data']['Actions']['Oem'][key]['target']) - - if not action_found: - return {'ret': False, - 'msg': 'Specified resource_uri is not a supported action target uri, please specify a supported target uri instead. Supported uri: %s' - % (str(action_target_uri_list))} - - # check request_body with parameter name defined by @Redfish.ActionInfo - if action_info_uri is not None: - response = self.get_request(self.root_uri + action_info_uri) - if response['ret'] is False: - return response - for key in request_body.keys(): - key_found = False - for para in response['data']['Parameters']: - if key == para['Name']: - key_found = True - break - if not key_found: - return {'ret': False, - 'msg': 'Invalid property %s found in request_body. Please refer to @Redfish.ActionInfo Parameters: %s' - % (key, str(response['data']['Parameters']))} - - # perform post - response = self.post_request(self.root_uri + resource_uri, request_body) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True} - - -# More will be added as module features are expanded -CATEGORY_COMMANDS_ALL = { - "Manager": ["VirtualMediaInsert", - "VirtualMediaEject"], - "Raw": ["GetResource", - "GetCollectionResource", - "PatchResource", - "PostResource"] -} - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10), - resource_id=dict(), - virtual_media=dict( - type='dict', - options=dict( - media_types=dict(type='list', elements='str', default=[]), - image_url=dict(), - inserted=dict(type='bool', default=True), - write_protected=dict(type='bool', default=True), - username=dict(), - password=dict(no_log=True), - transfer_protocol_type=dict(), - transfer_method=dict(), - ) - ), - resource_uri=dict(), - request_body=dict( - type='dict', - ), - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=False - ) - - category = module.params['category'] - command_list = module.params['command'] - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # timeout - timeout = module.params['timeout'] - - # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] - - # VirtualMedia options - virtual_media = module.params['virtual_media'] - - # resource_uri - resource_uri = module.params['resource_uri'] - - # request_body - request_body = module.params['request_body'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = XCCRedfishUtils(creds, root_uri, timeout, module, resource_id=resource_id, data_modification=True) - - # Check that Category is valid - if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys()))) - - # Check that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) - - # Organize by Categories / Commands - if category == "Manager": - # execute only if we find a Manager service resource - result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command == 'VirtualMediaInsert': - result = rf_utils.virtual_media_insert(virtual_media) - elif command == 'VirtualMediaEject': - result = rf_utils.virtual_media_eject(virtual_media) - elif category == "Raw": - for command in command_list: - if command == 'GetResource': - result = rf_utils.raw_get_resource(resource_uri) - elif command == 'GetCollectionResource': - result = rf_utils.raw_get_collection_resource(resource_uri) - elif command == 'PatchResource': - result = rf_utils.raw_patch_resource(resource_uri, request_body) - elif command == 'PostResource': - result = rf_utils.raw_post_resource(resource_uri, request_body) - - # Return data back or fail with proper message - if result['ret'] is True: - if command == 'GetResource' or command == 'GetCollectionResource': - module.exit_json(redfish_facts=result) - else: - changed = result.get('changed', True) - msg = result.get('msg', 'Action was successful') - module.exit_json(changed=changed, msg=msg) - else: - module.fail_json(msg=to_native(result['msg'])) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/lxca/lxca_cmms.py b/plugins/modules/remote_management/lxca/lxca_cmms.py deleted file mode 100644 index b3bb6c2a8c..0000000000 --- a/plugins/modules/remote_management/lxca/lxca_cmms.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) -# -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -author: - - Naval Patel (@navalkp) - - Prashant Bhosale (@prabhosa) -module: lxca_cmms -short_description: Custom module for lxca cmms inventory utility -description: - - This module returns/displays a inventory details of cmms - -options: - uuid: - description: - uuid of device, this is string with length greater than 16. - type: str - - command_options: - description: - options to filter nodes information - default: cmms - choices: - - cmms - - cmms_by_uuid - - cmms_by_chassis_uuid - type: str - - chassis: - description: - uuid of chassis, this is string with length greater than 16. - type: str - -extends_documentation_fragment: -- community.general.lxca_common - -''' - -EXAMPLES = ''' -# get all cmms info -- name: Get nodes data from LXCA - community.general.lxca_cmms: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - -# get specific cmms info by uuid -- name: Get nodes data from LXCA - community.general.lxca_cmms: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - uuid: "3C737AA5E31640CE949B10C129A8B01F" - command_options: cmms_by_uuid - -# get specific cmms info by chassis uuid -- name: Get nodes data from LXCA - community.general.lxca_cmms: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - chassis: "3C737AA5E31640CE949B10C129A8B01F" - command_options: cmms_by_chassis_uuid - -''' - -RETURN = r''' -result: - description: cmms detail from lxca - returned: success - type: dict - sample: - cmmList: - - machineType: '' - model: '' - type: 'CMM' - uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' - # bunch of properties - - machineType: '' - model: '' - type: 'CMM' - uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' - # bunch of properties - # Multiple cmms details -''' - -import traceback -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object -try: - from pylxca import cmms -except ImportError: - pass - - -UUID_REQUIRED = 'UUID of device is required for cmms_by_uuid command.' -CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for cmms_by_chassis_uuid command.' -SUCCESS_MSG = "Success %s result" - - -def _cmms(module, lxca_con): - return cmms(lxca_con) - - -def _cmms_by_uuid(module, lxca_con): - if not module.params['uuid']: - module.fail_json(msg=UUID_REQUIRED) - return cmms(lxca_con, module.params['uuid']) - - -def _cmms_by_chassis_uuid(module, lxca_con): - if not module.params['chassis']: - module.fail_json(msg=CHASSIS_UUID_REQUIRED) - return cmms(lxca_con, chassis=module.params['chassis']) - - -def setup_module_object(): - """ - this function merge argument spec and create ansible module object - :return: - """ - args_spec = dict(LXCA_COMMON_ARGS) - args_spec.update(INPUT_ARG_SPEC) - module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False) - - return module - - -FUNC_DICT = { - 'cmms': _cmms, - 'cmms_by_uuid': _cmms_by_uuid, - 'cmms_by_chassis_uuid': _cmms_by_chassis_uuid, -} - - -INPUT_ARG_SPEC = dict( - command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid', - 'cmms_by_chassis_uuid']), - uuid=dict(default=None), - chassis=dict(default=None) -) - - -def execute_module(module): - """ - This function invoke commands - :param module: Ansible module object - """ - try: - with connection_object(module) as lxca_con: - result = FUNC_DICT[module.params['command_options']](module, lxca_con) - module.exit_json(changed=False, - msg=SUCCESS_MSG % module.params['command_options'], - result=result) - except Exception as exception: - error_msg = '; '.join((e) for e in exception.args) - module.fail_json(msg=error_msg, exception=traceback.format_exc()) - - -def main(): - module = setup_module_object() - has_pylxca(module) - execute_module(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/lxca/lxca_nodes.py b/plugins/modules/remote_management/lxca/lxca_nodes.py deleted file mode 100644 index 62b8e334d8..0000000000 --- a/plugins/modules/remote_management/lxca/lxca_nodes.py +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) -# -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -author: - - Naval Patel (@navalkp) - - Prashant Bhosale (@prabhosa) -module: lxca_nodes -short_description: Custom module for lxca nodes inventory utility -description: - - This module returns/displays a inventory details of nodes - -options: - uuid: - description: - uuid of device, this is string with length greater than 16. - type: str - - command_options: - description: - options to filter nodes information - default: nodes - choices: - - nodes - - nodes_by_uuid - - nodes_by_chassis_uuid - - nodes_status_managed - - nodes_status_unmanaged - type: str - - chassis: - description: - uuid of chassis, this is string with length greater than 16. - type: str - -extends_documentation_fragment: -- community.general.lxca_common - -''' - -EXAMPLES = ''' -# get all nodes info -- name: Get nodes data from LXCA - community.general.lxca_nodes: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - command_options: nodes - -# get specific nodes info by uuid -- name: Get nodes data from LXCA - community.general.lxca_nodes: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - uuid: "3C737AA5E31640CE949B10C129A8B01F" - command_options: nodes_by_uuid - -# get specific nodes info by chassis uuid -- name: Get nodes data from LXCA - community.general.lxca_nodes: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - chassis: "3C737AA5E31640CE949B10C129A8B01F" - command_options: nodes_by_chassis_uuid - -# get managed nodes -- name: Get nodes data from LXCA - community.general.lxca_nodes: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - command_options: nodes_status_managed - -# get unmanaged nodes -- name: Get nodes data from LXCA - community.general.lxca_nodes: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - command_options: nodes_status_unmanaged - -''' - -RETURN = r''' -result: - description: nodes detail from lxca - returned: always - type: dict - sample: - nodeList: - - machineType: '6241' - model: 'AC1' - type: 'Rack-TowerServer' - uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' - # bunch of properties - - machineType: '8871' - model: 'AC1' - type: 'Rack-TowerServer' - uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' - # bunch of properties - # Multiple nodes details -''' - -import traceback -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object -try: - from pylxca import nodes -except ImportError: - pass - - -UUID_REQUIRED = 'UUID of device is required for nodes_by_uuid command.' -CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for nodes_by_chassis_uuid command.' -SUCCESS_MSG = "Success %s result" - - -def _nodes(module, lxca_con): - return nodes(lxca_con) - - -def _nodes_by_uuid(module, lxca_con): - if not module.params['uuid']: - module.fail_json(msg=UUID_REQUIRED) - return nodes(lxca_con, module.params['uuid']) - - -def _nodes_by_chassis_uuid(module, lxca_con): - if not module.params['chassis']: - module.fail_json(msg=CHASSIS_UUID_REQUIRED) - return nodes(lxca_con, chassis=module.params['chassis']) - - -def _nodes_status_managed(module, lxca_con): - return nodes(lxca_con, status='managed') - - -def _nodes_status_unmanaged(module, lxca_con): - return nodes(lxca_con, status='unmanaged') - - -def setup_module_object(): - """ - this function merge argument spec and create ansible module object - :return: - """ - args_spec = dict(LXCA_COMMON_ARGS) - args_spec.update(INPUT_ARG_SPEC) - module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False) - - return module - - -FUNC_DICT = { - 'nodes': _nodes, - 'nodes_by_uuid': _nodes_by_uuid, - 'nodes_by_chassis_uuid': _nodes_by_chassis_uuid, - 'nodes_status_managed': _nodes_status_managed, - 'nodes_status_unmanaged': _nodes_status_unmanaged, -} - - -INPUT_ARG_SPEC = dict( - command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid', - 'nodes_by_chassis_uuid', - 'nodes_status_managed', - 'nodes_status_unmanaged']), - uuid=dict(default=None), chassis=dict(default=None) -) - - -def execute_module(module): - """ - This function invoke commands - :param module: Ansible module object - """ - try: - with connection_object(module) as lxca_con: - result = FUNC_DICT[module.params['command_options']](module, lxca_con) - module.exit_json(changed=False, - msg=SUCCESS_MSG % module.params['command_options'], - result=result) - except Exception as exception: - error_msg = '; '.join(exception.args) - module.fail_json(msg=error_msg, exception=traceback.format_exc()) - - -def main(): - module = setup_module_object() - has_pylxca(module) - execute_module(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py b/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py deleted file mode 100644 index d76c334259..0000000000 --- a/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py +++ /dev/null @@ -1,305 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017 Red Hat Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: manageiq_alert_profiles - -short_description: Configuration of alert profiles for ManageIQ -extends_documentation_fragment: -- community.general.manageiq - -author: Elad Alfassa (@elad661) -description: - - The manageiq_alert_profiles module supports adding, updating and deleting alert profiles in ManageIQ. - -options: - state: - type: str - description: - - absent - alert profile should not exist, - - present - alert profile should exist, - choices: ['absent', 'present'] - default: 'present' - name: - type: str - description: - - The unique alert profile name in ManageIQ. - - Required when state is "absent" or "present". - resource_type: - type: str - description: - - The resource type for the alert profile in ManageIQ. Required when state is "present". - choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', - 'ExtManagementSystem', 'MiddlewareServer'] - alerts: - type: list - elements: str - description: - - List of alert descriptions to assign to this profile. - - Required if state is "present" - notes: - type: str - description: - - Optional notes for this profile - -''' - -EXAMPLES = ''' -- name: Add an alert profile to ManageIQ - community.general.manageiq_alert_profiles: - state: present - name: Test profile - resource_type: ContainerNode - alerts: - - Test Alert 01 - - Test Alert 02 - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Delete an alert profile from ManageIQ - community.general.manageiq_alert_profiles: - state: absent - name: Test profile - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False -''' - -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec - - -class ManageIQAlertProfiles(object): - """ Object to execute alert profile management operations in manageiq. - """ - - def __init__(self, manageiq): - self.manageiq = manageiq - - self.module = self.manageiq.module - self.api_url = self.manageiq.api_url - self.client = self.manageiq.client - self.url = '{api_url}/alert_definition_profiles'.format(api_url=self.api_url) - - def get_profiles(self): - """ Get all alert profiles from ManageIQ - """ - try: - response = self.client.get(self.url + '?expand=alert_definitions,resources') - except Exception as e: - self.module.fail_json(msg="Failed to query alert profiles: {error}".format(error=e)) - return response.get('resources') or [] - - def get_alerts(self, alert_descriptions): - """ Get a list of alert hrefs from a list of alert descriptions - """ - alerts = [] - for alert_description in alert_descriptions: - alert = self.manageiq.find_collection_resource_or_fail("alert_definitions", - description=alert_description) - alerts.append(alert['href']) - - return alerts - - def add_profile(self, profile): - """ Add a new alert profile to ManageIQ - """ - # find all alerts to add to the profile - # we do this first to fail early if one is missing. - alerts = self.get_alerts(profile['alerts']) - - # build the profile dict to send to the server - - profile_dict = dict(name=profile['name'], - description=profile['name'], - mode=profile['resource_type']) - if profile['notes']: - profile_dict['set_data'] = dict(notes=profile['notes']) - - # send it to the server - try: - result = self.client.post(self.url, resource=profile_dict, action="create") - except Exception as e: - self.module.fail_json(msg="Creating profile failed {error}".format(error=e)) - - # now that it has been created, we can assign the alerts - self.assign_or_unassign(result['results'][0], alerts, "assign") - - msg = "Profile {name} created successfully" - msg = msg.format(name=profile['name']) - return dict(changed=True, msg=msg) - - def delete_profile(self, profile): - """ Delete an alert profile from ManageIQ - """ - try: - self.client.post(profile['href'], action="delete") - except Exception as e: - self.module.fail_json(msg="Deleting profile failed: {error}".format(error=e)) - - msg = "Successfully deleted profile {name}".format(name=profile['name']) - return dict(changed=True, msg=msg) - - def get_alert_href(self, alert): - """ Get an absolute href for an alert - """ - return "{url}/alert_definitions/{id}".format(url=self.api_url, id=alert['id']) - - def assign_or_unassign(self, profile, resources, action): - """ Assign or unassign alerts to profile, and validate the result. - """ - alerts = [dict(href=href) for href in resources] - - subcollection_url = profile['href'] + '/alert_definitions' - try: - result = self.client.post(subcollection_url, resources=alerts, action=action) - if len(result['results']) != len(alerts): - msg = "Failed to {action} alerts to profile '{name}'," +\ - "expected {expected} alerts to be {action}ed," +\ - "but only {changed} were {action}ed" - msg = msg.format(action=action, - name=profile['name'], - expected=len(alerts), - changed=result['results']) - self.module.fail_json(msg=msg) - except Exception as e: - msg = "Failed to {action} alerts to profile '{name}': {error}" - msg = msg.format(action=action, name=profile['name'], error=e) - self.module.fail_json(msg=msg) - - return result['results'] - - def update_profile(self, old_profile, desired_profile): - """ Update alert profile in ManageIQ - """ - changed = False - # we need to use client.get to query the alert definitions - old_profile = self.client.get(old_profile['href'] + '?expand=alert_definitions') - - # figure out which alerts we need to assign / unassign - # alerts listed by the user: - desired_alerts = set(self.get_alerts(desired_profile['alerts'])) - - # alert which currently exist in the profile - if 'alert_definitions' in old_profile: - # we use get_alert_href to have a direct href to the alert - existing_alerts = set([self.get_alert_href(alert) for alert in old_profile['alert_definitions']]) - else: - # no alerts in this profile - existing_alerts = set() - - to_add = list(desired_alerts - existing_alerts) - to_remove = list(existing_alerts - desired_alerts) - - # assign / unassign the alerts, if needed - - if to_remove: - self.assign_or_unassign(old_profile, to_remove, "unassign") - changed = True - if to_add: - self.assign_or_unassign(old_profile, to_add, "assign") - changed = True - - # update other properties - profile_dict = dict() - - if old_profile['mode'] != desired_profile['resource_type']: - # mode needs to be updated - profile_dict['mode'] = desired_profile['resource_type'] - - # check if notes need to be updated - old_notes = old_profile.get('set_data', {}).get('notes') - - if desired_profile['notes'] != old_notes: - profile_dict['set_data'] = dict(notes=desired_profile['notes']) - - if profile_dict: - # if we have any updated values - changed = True - try: - result = self.client.post(old_profile['href'], - resource=profile_dict, - action="edit") - except Exception as e: - msg = "Updating profile '{name}' failed: {error}" - msg = msg.format(name=old_profile['name'], error=e) - self.module.fail_json(msg=msg, result=result) - - if changed: - msg = "Profile {name} updated successfully".format(name=desired_profile['name']) - else: - msg = "No update needed for profile {name}".format(name=desired_profile['name']) - return dict(changed=changed, msg=msg) - - -def main(): - argument_spec = dict( - name=dict(type='str'), - resource_type=dict(type='str', choices=['Vm', - 'ContainerNode', - 'MiqServer', - 'Host', - 'Storage', - 'EmsCluster', - 'ExtManagementSystem', - 'MiddlewareServer']), - alerts=dict(type='list', elements='str'), - notes=dict(type='str'), - state=dict(default='present', choices=['present', 'absent']), - ) - # add the manageiq connection arguments to the arguments - argument_spec.update(manageiq_argument_spec()) - - module = AnsibleModule(argument_spec=argument_spec, - required_if=[('state', 'present', ['name', 'resource_type']), - ('state', 'absent', ['name'])]) - - state = module.params['state'] - name = module.params['name'] - - manageiq = ManageIQ(module) - manageiq_alert_profiles = ManageIQAlertProfiles(manageiq) - - existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles", - name=name) - - # we need to add or update the alert profile - if state == "present": - if not existing_profile: - # a profile with this name doesn't exist yet, let's create it - res_args = manageiq_alert_profiles.add_profile(module.params) - else: - # a profile with this name exists, we might need to update it - res_args = manageiq_alert_profiles.update_profile(existing_profile, module.params) - - # this alert profile should not exist - if state == "absent": - # if we have an alert profile with this name, delete it - if existing_profile: - res_args = manageiq_alert_profiles.delete_profile(existing_profile) - else: - # This alert profile does not exist in ManageIQ, and that's okay - msg = "Alert profile '{name}' does not exist in ManageIQ" - msg = msg.format(name=name) - res_args = dict(changed=False, msg=msg) - - module.exit_json(**res_args) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_alerts.py b/plugins/modules/remote_management/manageiq/manageiq_alerts.py deleted file mode 100644 index 4f818a3a51..0000000000 --- a/plugins/modules/remote_management/manageiq/manageiq_alerts.py +++ /dev/null @@ -1,349 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017 Red Hat Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: manageiq_alerts - -short_description: Configuration of alerts in ManageIQ -extends_documentation_fragment: -- community.general.manageiq - -author: Elad Alfassa (@elad661) (base on manageiq_user.py by Daniel Korn ) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = ''' - -module: manageiq_group - -short_description: Management of groups in ManageIQ. -extends_documentation_fragment: -- community.general.manageiq - -author: Evert Mulder (@evertmulder) -description: - - The manageiq_group module supports adding, updating and deleting groups in ManageIQ. -requirements: -- manageiq-client - -options: - state: - type: str - description: - - absent - group should not exist, present - group should be. - choices: ['absent', 'present'] - default: 'present' - description: - type: str - description: - - The group description. - required: true - default: null - role_id: - type: int - description: - - The the group role id - required: false - default: null - role: - type: str - description: - - The the group role name - - The C(role_id) has precedence over the C(role) when supplied. - required: false - default: null - tenant_id: - type: int - description: - - The tenant for the group identified by the tenant id. - required: false - default: null - tenant: - type: str - description: - - The tenant for the group identified by the tenant name. - - The C(tenant_id) has precedence over the C(tenant) when supplied. - - Tenant names are case sensitive. - required: false - default: null - managed_filters: - description: The tag values per category - type: dict - required: false - default: null - managed_filters_merge_mode: - type: str - description: - - In merge mode existing categories are kept or updated, new categories are added. - - In replace mode all categories will be replaced with the supplied C(managed_filters). - choices: [ merge, replace ] - default: replace - belongsto_filters: - description: A list of strings with a reference to the allowed host, cluster or folder - type: list - elements: str - required: false - default: null - belongsto_filters_merge_mode: - type: str - description: - - In merge mode existing settings are merged with the supplied C(belongsto_filters). - - In replace mode current values are replaced with the supplied C(belongsto_filters). - choices: [ merge, replace ] - default: replace -''' - -EXAMPLES = ''' -- name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant' - community.general.manageiq_group: - description: 'MyGroup-user' - role: 'EvmRole-user' - tenant: 'my_tenant' - manageiq_connection: - url: 'https://manageiq_server' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4 - community.general.manageiq_group: - description: 'MyGroup-user' - role: 'EvmRole-user' - tenant_id: 4 - manageiq_connection: - url: 'https://manageiq_server' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: - - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant. - - Apply 3 prov_max_cpu and 2 department tags to the group. - - Limit access to a cluster for the group. - community.general.manageiq_group: - description: 'MyGroup-user' - role: 'EvmRole-user' - tenant: my_tenant - managed_filters: - prov_max_cpu: - - '1' - - '2' - - '4' - department: - - defense - - engineering - managed_filters_merge_mode: replace - belongsto_filters: - - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name" - belongsto_filters_merge_mode: merge - manageiq_connection: - url: 'https://manageiq_server' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Delete a group in ManageIQ - community.general.manageiq_group: - state: 'absent' - description: 'MyGroup-user' - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - -- name: Delete a group in ManageIQ using a token - community.general.manageiq_group: - state: 'absent' - description: 'MyGroup-user' - manageiq_connection: - url: 'http://127.0.0.1:3000' - token: 'sometoken' -''' - -RETURN = ''' -group: - description: The group. - returned: success - type: complex - contains: - description: - description: The group description - returned: success - type: str - id: - description: The group id - returned: success - type: int - group_type: - description: The group type, system or user - returned: success - type: str - role: - description: The group role name - returned: success - type: str - tenant: - description: The group tenant name - returned: success - type: str - managed_filters: - description: The tag values per category - returned: success - type: dict - belongsto_filters: - description: A list of strings with a reference to the allowed host, cluster or folder - returned: success - type: list - created_on: - description: Group creation date - returned: success - type: str - sample: "2018-08-12T08:37:55+00:00" - updated_on: - description: Group update date - returned: success - type: int - sample: "2018-08-12T08:37:55+00:00" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec - - -class ManageIQgroup(object): - """ - Object to execute group management operations in manageiq. - """ - - def __init__(self, manageiq): - self.manageiq = manageiq - - self.module = self.manageiq.module - self.api_url = self.manageiq.api_url - self.client = self.manageiq.client - - def group(self, description): - """ Search for group object by description. - Returns: - the group, or None if group was not found. - """ - groups = self.client.collections.groups.find_by(description=description) - if len(groups) == 0: - return None - else: - return groups[0] - - def tenant(self, tenant_id, tenant_name): - """ Search for tenant entity by name or id - Returns: - the tenant entity, None if no id or name was supplied - """ - - if tenant_id: - tenant = self.client.get_entity('tenants', tenant_id) - if not tenant: - self.module.fail_json(msg="Tenant with id '%s' not found in manageiq" % str(tenant_id)) - return tenant - else: - if tenant_name: - tenant_res = self.client.collections.tenants.find_by(name=tenant_name) - if not tenant_res: - self.module.fail_json(msg="Tenant '%s' not found in manageiq" % tenant_name) - if len(tenant_res) > 1: - self.module.fail_json(msg="Multiple tenants found in manageiq with name '%s" % tenant_name) - tenant = tenant_res[0] - return tenant - else: - # No tenant name or tenant id supplied - return None - - def role(self, role_id, role_name): - """ Search for a role object by name or id. - Returns: - the role entity, None no id or name was supplied - - the role, or send a module Fail signal if role not found. - """ - if role_id: - role = self.client.get_entity('roles', role_id) - if not role: - self.module.fail_json(msg="Role with id '%s' not found in manageiq" % str(role_id)) - return role - else: - if role_name: - role_res = self.client.collections.roles.find_by(name=role_name) - if not role_res: - self.module.fail_json(msg="Role '%s' not found in manageiq" % role_name) - if len(role_res) > 1: - self.module.fail_json(msg="Multiple roles found in manageiq with name '%s" % role_name) - return role_res[0] - else: - # No role name or role id supplied - return None - - @staticmethod - def merge_dict_values(norm_current_values, norm_updated_values): - """ Create an merged update object for manageiq group filters. - - The input dict contain the tag values per category. - If the new values contain the category, all tags for that category are replaced - If the new values do not contain the category, the existing tags are kept - - Returns: - the nested array with the merged values, used in the update post body - """ - - # If no updated values are supplied, in merge mode, the original values must be returned - # otherwise the existing tag filters will be removed. - if norm_current_values and (not norm_updated_values): - return norm_current_values - - # If no existing tag filters exist, use the user supplied values - if (not norm_current_values) and norm_updated_values: - return norm_updated_values - - # start with norm_current_values's keys and values - res = norm_current_values.copy() - # replace res with norm_updated_values's keys and values - res.update(norm_updated_values) - return res - - def delete_group(self, group): - """ Deletes a group from manageiq. - - Returns: - a dict of: - changed: boolean indicating if the entity was updated. - msg: a short message describing the operation executed. - """ - try: - url = '%s/groups/%s' % (self.api_url, group['id']) - result = self.client.post(url, action='delete') - except Exception as e: - self.module.fail_json(msg="failed to delete group %s: %s" % (group['description'], str(e))) - - if result['success'] is False: - self.module.fail_json(msg=result['message']) - - return dict( - changed=True, - msg="deleted group %s with id %s" % (group['description'], group['id'])) - - def edit_group(self, group, description, role, tenant, norm_managed_filters, managed_filters_merge_mode, - belongsto_filters, belongsto_filters_merge_mode): - """ Edit a manageiq group. - - Returns: - a dict of: - changed: boolean indicating if the entity was updated. - msg: a short message describing the operation executed. - """ - - if role or norm_managed_filters or belongsto_filters: - group.reload(attributes=['miq_user_role_name', 'entitlement']) - - try: - current_role = group['miq_user_role_name'] - except AttributeError: - current_role = None - - changed = False - resource = {} - - if description and group['description'] != description: - resource['description'] = description - changed = True - - if tenant and group['tenant_id'] != tenant['id']: - resource['tenant'] = dict(id=tenant['id']) - changed = True - - if role and current_role != role['name']: - resource['role'] = dict(id=role['id']) - changed = True - - if norm_managed_filters or belongsto_filters: - - # Only compare if filters are supplied - entitlement = group['entitlement'] - - if 'filters' not in entitlement: - # No existing filters exist, use supplied filters - managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) - resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters} - changed = True - else: - current_filters = entitlement['filters'] - new_filters = self.edit_group_edit_filters(current_filters, - norm_managed_filters, managed_filters_merge_mode, - belongsto_filters, belongsto_filters_merge_mode) - if new_filters: - resource['filters'] = new_filters - changed = True - - if not changed: - return dict( - changed=False, - msg="group %s is not changed." % group['description']) - - # try to update group - try: - self.client.post(group['href'], action='edit', resource=resource) - changed = True - except Exception as e: - self.module.fail_json(msg="failed to update group %s: %s" % (group['name'], str(e))) - - return dict( - changed=changed, - msg="successfully updated the group %s with id %s" % (group['description'], group['id'])) - - def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed_filters_merge_mode, - belongsto_filters, belongsto_filters_merge_mode): - """ Edit a manageiq group filters. - - Returns: - None if no the group was not updated - If the group was updated the post body part for updating the group - """ - filters_updated = False - new_filters_resource = {} - - current_belongsto_set = current_filters.get('belongsto', set()) - - if belongsto_filters: - new_belongsto_set = set(belongsto_filters) - else: - new_belongsto_set = set() - - if current_belongsto_set == new_belongsto_set: - new_filters_resource['belongsto'] = current_filters['belongsto'] - else: - if belongsto_filters_merge_mode == 'merge': - current_belongsto_set.update(new_belongsto_set) - new_filters_resource['belongsto'] = list(current_belongsto_set) - else: - new_filters_resource['belongsto'] = list(new_belongsto_set) - filters_updated = True - - # Process belongsto managed filter tags - # The input is in the form dict with keys are the categories and the tags are supplied string array - # ManageIQ, the current_managed, uses an array of arrays. One array of categories. - # We normalize the user input from a dict with arrays to a dict of sorted arrays - # We normalize the current manageiq array of arrays also to a dict of sorted arrays so we can compare - norm_current_filters = self.manageiq_filters_to_sorted_dict(current_filters) - - if norm_current_filters == norm_managed_filters: - if 'managed' in current_filters: - new_filters_resource['managed'] = current_filters['managed'] - else: - if managed_filters_merge_mode == 'merge': - merged_dict = self.merge_dict_values(norm_current_filters, norm_managed_filters) - new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(merged_dict) - else: - new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) - filters_updated = True - - if not filters_updated: - return None - - return new_filters_resource - - def create_group(self, description, role, tenant, norm_managed_filters, belongsto_filters): - """ Creates the group in manageiq. - - Returns: - the created group id, name, created_on timestamp, - updated_on timestamp. - """ - # check for required arguments - for key, value in dict(description=description).items(): - if value in (None, ''): - self.module.fail_json(msg="missing required argument: %s" % key) - - url = '%s/groups' % self.api_url - - resource = {'description': description} - - if role is not None: - resource['role'] = dict(id=role['id']) - - if tenant is not None: - resource['tenant'] = dict(id=tenant['id']) - - if norm_managed_filters or belongsto_filters: - managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) - resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters} - - try: - result = self.client.post(url, action='create', resource=resource) - except Exception as e: - self.module.fail_json(msg="failed to create group %s: %s" % (description, str(e))) - - return dict( - changed=True, - msg="successfully created group %s" % description, - group_id=result['results'][0]['id'] - ) - - @staticmethod - def normalized_managed_tag_filters_to_miq(norm_managed_filters): - if not norm_managed_filters: - return None - - return list(norm_managed_filters.values()) - - @staticmethod - def manageiq_filters_to_sorted_dict(current_filters): - current_managed_filters = current_filters.get('managed') - if not current_managed_filters: - return None - - res = {} - for tag_list in current_managed_filters: - tag_list.sort() - key = tag_list[0].split('/')[2] - res[key] = tag_list - - return res - - @staticmethod - def normalize_user_managed_filters_to_sorted_dict(managed_filters, module): - if not managed_filters: - return None - - res = {} - for cat_key in managed_filters: - cat_array = [] - if not isinstance(managed_filters[cat_key], list): - module.fail_json(msg='Entry "{0}" of managed_filters must be a list!'.format(cat_key)) - for tags in managed_filters[cat_key]: - miq_managed_tag = "/managed/" + cat_key + "/" + tags - cat_array.append(miq_managed_tag) - # Do not add empty categories. ManageIQ will remove all categories that are not supplied - if cat_array: - cat_array.sort() - res[cat_key] = cat_array - return res - - @staticmethod - def create_result_group(group): - """ Creates the ansible result object from a manageiq group entity - - Returns: - a dict with the group id, description, role, tenant, filters, group_type, created_on, updated_on - """ - try: - role_name = group['miq_user_role_name'] - except AttributeError: - role_name = None - - managed_filters = None - belongsto_filters = None - if 'filters' in group['entitlement']: - filters = group['entitlement']['filters'] - belongsto_filters = filters.get('belongsto') - group_managed_filters = filters.get('managed') - if group_managed_filters: - managed_filters = {} - for tag_list in group_managed_filters: - key = tag_list[0].split('/')[2] - tags = [] - for t in tag_list: - tags.append(t.split('/')[3]) - managed_filters[key] = tags - - return dict( - id=group['id'], - description=group['description'], - role=role_name, - tenant=group['tenant']['name'], - managed_filters=managed_filters, - belongsto_filters=belongsto_filters, - group_type=group['group_type'], - created_on=group['created_on'], - updated_on=group['updated_on'], - ) - - -def main(): - argument_spec = dict( - description=dict(required=True, type='str'), - state=dict(choices=['absent', 'present'], default='present'), - role_id=dict(required=False, type='int'), - role=dict(required=False, type='str'), - tenant_id=dict(required=False, type='int'), - tenant=dict(required=False, type='str'), - managed_filters=dict(required=False, type='dict'), - managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'), - belongsto_filters=dict(required=False, type='list', elements='str'), - belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'), - ) - # add the manageiq connection arguments to the arguments - argument_spec.update(manageiq_argument_spec()) - - module = AnsibleModule( - argument_spec=argument_spec - ) - - description = module.params['description'] - state = module.params['state'] - role_id = module.params['role_id'] - role_name = module.params['role'] - tenant_id = module.params['tenant_id'] - tenant_name = module.params['tenant'] - managed_filters = module.params['managed_filters'] - managed_filters_merge_mode = module.params['managed_filters_merge_mode'] - belongsto_filters = module.params['belongsto_filters'] - belongsto_filters_merge_mode = module.params['belongsto_filters_merge_mode'] - - manageiq = ManageIQ(module) - manageiq_group = ManageIQgroup(manageiq) - - group = manageiq_group.group(description) - - # group should not exist - if state == "absent": - # if we have a group, delete it - if group: - res_args = manageiq_group.delete_group(group) - # if we do not have a group, nothing to do - else: - res_args = dict( - changed=False, - msg="group '%s' does not exist in manageiq" % description) - - # group should exist - if state == "present": - - tenant = manageiq_group.tenant(tenant_id, tenant_name) - role = manageiq_group.role(role_id, role_name) - norm_managed_filters = manageiq_group.normalize_user_managed_filters_to_sorted_dict(managed_filters, module) - # if we have a group, edit it - if group: - res_args = manageiq_group.edit_group(group, description, role, tenant, - norm_managed_filters, managed_filters_merge_mode, - belongsto_filters, belongsto_filters_merge_mode) - - # if we do not have a group, create it - else: - res_args = manageiq_group.create_group(description, role, tenant, norm_managed_filters, belongsto_filters) - group = manageiq.client.get_entity('groups', res_args['group_id']) - - group.reload(expand='resources', attributes=['miq_user_role_name', 'tenant', 'entitlement']) - res_args['group'] = manageiq_group.create_result_group(group) - - module.exit_json(**res_args) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_policies.py b/plugins/modules/remote_management/manageiq/manageiq_policies.py deleted file mode 100644 index 567833d7cc..0000000000 --- a/plugins/modules/remote_management/manageiq/manageiq_policies.py +++ /dev/null @@ -1,356 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Daniel Korn -# (c) 2017, Yaacov Zamir -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: manageiq_policies - -short_description: Management of resource policy_profiles in ManageIQ. -extends_documentation_fragment: -- community.general.manageiq - -author: Daniel Korn (@dkorn) -description: - - The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ. - -options: - state: - type: str - description: - - absent - policy_profiles should not exist, - - present - policy_profiles should exist, - - list - list current policy_profiles and policies. - choices: ['absent', 'present', 'list'] - default: 'present' - policy_profiles: - type: list - elements: dict - description: - - list of dictionaries, each includes the policy_profile 'name' key. - - required if state is present or absent. - resource_type: - type: str - description: - - The type of the resource to which the profile should be [un]assigned. - required: true - choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', - 'data store', 'group', 'resource pool', 'service', 'service template', - 'template', 'tenant', 'user'] - resource_name: - type: str - description: - - The name of the resource to which the profile should be [un]assigned. - - Must be specified if I(resource_id) is not set. Both options are mutually exclusive. - resource_id: - type: int - description: - - The ID of the resource to which the profile should be [un]assigned. - - Must be specified if I(resource_name) is not set. Both options are mutually exclusive. - version_added: 2.2.0 -''' - -EXAMPLES = ''' -- name: Assign new policy_profile for a provider in ManageIQ - community.general.manageiq_policies: - resource_name: 'EngLab' - resource_type: 'provider' - policy_profiles: - - name: openscap profile - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Unassign a policy_profile for a provider in ManageIQ - community.general.manageiq_policies: - state: absent - resource_name: 'EngLab' - resource_type: 'provider' - policy_profiles: - - name: openscap profile - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: List current policy_profile and policies for a provider in ManageIQ - community.general.manageiq_policies: - state: list - resource_name: 'EngLab' - resource_type: 'provider' - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False -''' - -RETURN = ''' -manageiq_policies: - description: - - List current policy_profile and policies for a provider in ManageIQ - returned: always - type: dict - sample: '{ - "changed": false, - "profiles": [ - { - "policies": [ - { - "active": true, - "description": "OpenSCAP", - "name": "openscap policy" - }, - { - "active": true, - "description": "Analyse incoming container images", - "name": "analyse incoming container images" - }, - { - "active": true, - "description": "Schedule compliance after smart state analysis", - "name": "schedule compliance after smart state analysis" - } - ], - "profile_description": "OpenSCAP profile", - "profile_name": "openscap profile" - } - ] - }' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities - - -class ManageIQPolicies(object): - """ - Object to execute policies management operations of manageiq resources. - """ - - def __init__(self, manageiq, resource_type, resource_id): - self.manageiq = manageiq - - self.module = self.manageiq.module - self.api_url = self.manageiq.api_url - self.client = self.manageiq.client - - self.resource_type = resource_type - self.resource_id = resource_id - self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format( - api_url=self.api_url, - resource_type=resource_type, - resource_id=resource_id) - - def query_profile_href(self, profile): - """ Add or Update the policy_profile href field - - Example: - {name: STR, ...} => {name: STR, href: STR} - """ - resource = self.manageiq.find_collection_resource_or_fail( - "policy_profiles", **profile) - return dict(name=profile['name'], href=resource['href']) - - def query_resource_profiles(self): - """ Returns a set of the profile objects objects assigned to the resource - """ - url = '{resource_url}/policy_profiles?expand=resources' - try: - response = self.client.get(url.format(resource_url=self.resource_url)) - except Exception as e: - msg = "Failed to query {resource_type} policies: {error}".format( - resource_type=self.resource_type, - error=e) - self.module.fail_json(msg=msg) - - resources = response.get('resources', []) - - # clean the returned rest api profile object to look like: - # {profile_name: STR, profile_description: STR, policies: ARR} - profiles = [self.clean_profile_object(profile) for profile in resources] - - return profiles - - def query_profile_policies(self, profile_id): - """ Returns a set of the policy objects assigned to the resource - """ - url = '{api_url}/policy_profiles/{profile_id}?expand=policies' - try: - response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id)) - except Exception as e: - msg = "Failed to query {resource_type} policies: {error}".format( - resource_type=self.resource_type, - error=e) - self.module.fail_json(msg=msg) - - resources = response.get('policies', []) - - # clean the returned rest api policy object to look like: - # {name: STR, description: STR, active: BOOL} - policies = [self.clean_policy_object(policy) for policy in resources] - - return policies - - def clean_policy_object(self, policy): - """ Clean a policy object to have human readable form of: - { - name: STR, - description: STR, - active: BOOL - } - """ - name = policy.get('name') - description = policy.get('description') - active = policy.get('active') - - return dict( - name=name, - description=description, - active=active) - - def clean_profile_object(self, profile): - """ Clean a profile object to have human readable form of: - { - profile_name: STR, - profile_description: STR, - policies: ARR - } - """ - profile_id = profile['id'] - name = profile.get('name') - description = profile.get('description') - policies = self.query_profile_policies(profile_id) - - return dict( - profile_name=name, - profile_description=description, - policies=policies) - - def profiles_to_update(self, profiles, action): - """ Create a list of policies we need to update in ManageIQ. - - Returns: - Whether or not a change took place and a message describing the - operation executed. - """ - profiles_to_post = [] - assigned_profiles = self.query_resource_profiles() - - # make a list of assigned full profile names strings - # e.g. ['openscap profile', ...] - assigned_profiles_set = set([profile['profile_name'] for profile in assigned_profiles]) - - for profile in profiles: - assigned = profile.get('name') in assigned_profiles_set - - if (action == 'unassign' and assigned) or (action == 'assign' and not assigned): - # add/update the policy profile href field - # {name: STR, ...} => {name: STR, href: STR} - profile = self.query_profile_href(profile) - profiles_to_post.append(profile) - - return profiles_to_post - - def assign_or_unassign_profiles(self, profiles, action): - """ Perform assign/unassign action - """ - # get a list of profiles needed to be changed - profiles_to_post = self.profiles_to_update(profiles, action) - if not profiles_to_post: - return dict( - changed=False, - msg="Profiles {profiles} already {action}ed, nothing to do".format( - action=action, - profiles=profiles)) - - # try to assign or unassign profiles to resource - url = '{resource_url}/policy_profiles'.format(resource_url=self.resource_url) - try: - response = self.client.post(url, action=action, resources=profiles_to_post) - except Exception as e: - msg = "Failed to {action} profile: {error}".format( - action=action, - error=e) - self.module.fail_json(msg=msg) - - # check all entities in result to be successful - for result in response['results']: - if not result['success']: - msg = "Failed to {action}: {message}".format( - action=action, - message=result['message']) - self.module.fail_json(msg=msg) - - # successfully changed all needed profiles - return dict( - changed=True, - msg="Successfully {action}ed profiles: {profiles}".format( - action=action, - profiles=profiles)) - - -def main(): - actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'} - argument_spec = dict( - policy_profiles=dict(type='list', elements='dict'), - resource_id=dict(required=False, type='int'), - resource_name=dict(required=False, type='str'), - resource_type=dict(required=True, type='str', - choices=list(manageiq_entities().keys())), - state=dict(required=False, type='str', - choices=['present', 'absent', 'list'], default='present'), - ) - # add the manageiq connection arguments to the arguments - argument_spec.update(manageiq_argument_spec()) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[["resource_id", "resource_name"]], - required_one_of=[["resource_id", "resource_name"]], - required_if=[ - ('state', 'present', ['policy_profiles']), - ('state', 'absent', ['policy_profiles']) - ], - ) - - policy_profiles = module.params['policy_profiles'] - resource_id = module.params['resource_id'] - resource_type_key = module.params['resource_type'] - resource_name = module.params['resource_name'] - state = module.params['state'] - - # get the action and resource type - action = actions[state] - resource_type = manageiq_entities()[resource_type_key] - - manageiq = ManageIQ(module) - - # query resource id, fail if resource does not exist - if resource_id is None: - resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id'] - - manageiq_policies = ManageIQPolicies(manageiq, resource_type, resource_id) - - if action == 'list': - # return a list of current profiles for this object - current_profiles = manageiq_policies.query_resource_profiles() - res_args = dict(changed=False, profiles=current_profiles) - else: - # assign or unassign the profiles - res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action) - - module.exit_json(**res_args) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_provider.py b/plugins/modules/remote_management/manageiq/manageiq_provider.py deleted file mode 100644 index f17cbec910..0000000000 --- a/plugins/modules/remote_management/manageiq/manageiq_provider.py +++ /dev/null @@ -1,928 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Daniel Korn -# (c) 2017, Yaacov Zamir -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: manageiq_provider -short_description: Management of provider in ManageIQ. -extends_documentation_fragment: -- community.general.manageiq - -author: Daniel Korn (@dkorn) -description: - - The manageiq_provider module supports adding, updating, and deleting provider in ManageIQ. - -options: - state: - type: str - description: - - absent - provider should not exist, present - provider should be present, refresh - provider will be refreshed - choices: ['absent', 'present', 'refresh'] - default: 'present' - name: - type: str - description: The provider's name. - required: true - type: - type: str - description: The provider's type. - choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE'] - zone: - type: str - description: The ManageIQ zone name that will manage the provider. - default: 'default' - provider_region: - type: str - description: The provider region name to connect to (e.g. AWS region for Amazon). - host_default_vnc_port_start: - type: str - description: The first port in the host VNC range. defaults to None. - host_default_vnc_port_end: - type: str - description: The last port in the host VNC range. defaults to None. - subscription: - type: str - description: Microsoft Azure subscription ID. defaults to None. - project: - type: str - description: Google Compute Engine Project ID. defaults to None. - azure_tenant_id: - type: str - description: Tenant ID. defaults to None. - aliases: [ keystone_v3_domain_id ] - tenant_mapping_enabled: - type: bool - default: 'no' - description: Whether to enable mapping of existing tenants. defaults to False. - api_version: - type: str - description: The OpenStack Keystone API version. defaults to None. - choices: ['v2', 'v3'] - - provider: - description: Default endpoint connection information, required if state is true. - suboptions: - hostname: - type: str - description: The provider's api hostname. - required: true - port: - type: int - description: The provider's api port. - userid: - type: str - description: Provider's api endpoint authentication userid. defaults to None. - password: - type: str - description: Provider's api endpoint authentication password. defaults to None. - auth_key: - type: str - description: Provider's api endpoint authentication bearer token. defaults to None. - validate_certs: - description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. - type: bool - default: 'yes' - security_protocol: - type: str - description: How SSL certificates should be used for HTTPS requests. defaults to None. - choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl'] - certificate_authority: - type: str - description: The CA bundle string with custom certificates. defaults to None. - - metrics: - description: Metrics endpoint connection information. - suboptions: - hostname: - type: str - description: The provider's api hostname. - required: true - port: - type: int - description: The provider's api port. - userid: - type: str - description: Provider's api endpoint authentication userid. defaults to None. - password: - type: str - description: Provider's api endpoint authentication password. defaults to None. - auth_key: - type: str - description: Provider's api endpoint authentication bearer token. defaults to None. - validate_certs: - description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. - type: bool - default: 'yes' - security_protocol: - type: str - choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl'] - description: How SSL certificates should be used for HTTPS requests. defaults to None. - certificate_authority: - type: str - description: The CA bundle string with custom certificates. defaults to None. - path: - type: str - description: Database name for oVirt metrics. Defaults to C(ovirt_engine_history). - - alerts: - description: Alerts endpoint connection information. - suboptions: - hostname: - type: str - description: The provider's api hostname. - required: true - port: - type: int - description: The provider's api port. - userid: - type: str - description: Provider's api endpoint authentication userid. defaults to None. - password: - type: str - description: Provider's api endpoint authentication password. defaults to None. - auth_key: - type: str - description: Provider's api endpoint authentication bearer token. defaults to None. - validate_certs: - type: bool - description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. - default: true - security_protocol: - type: str - choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation', 'non-ssl'] - description: How SSL certificates should be used for HTTPS requests. defaults to None. - certificate_authority: - type: str - description: The CA bundle string with custom certificates. defaults to None. - - ssh_keypair: - description: SSH key pair used for SSH connections to all hosts in this provider. - suboptions: - hostname: - type: str - description: Director hostname. - required: true - userid: - type: str - description: SSH username. - auth_key: - type: str - description: SSH private key. - validate_certs: - description: - - Whether certificates should be verified for connections. - type: bool - default: yes - aliases: [ verify_ssl ] -''' - -EXAMPLES = ''' -- name: Create a new provider in ManageIQ ('Hawkular' metrics) - community.general.manageiq_provider: - name: 'EngLab' - type: 'OpenShift' - state: 'present' - provider: - auth_key: 'topSecret' - hostname: 'example.com' - port: 8443 - validate_certs: true - security_protocol: 'ssl-with-validation-custom-ca' - certificate_authority: | - -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 - -----END CERTIFICATE----- - metrics: - auth_key: 'topSecret' - role: 'hawkular' - hostname: 'example.com' - port: 443 - validate_certs: true - security_protocol: 'ssl-with-validation-custom-ca' - certificate_authority: | - -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 - -----END CERTIFICATE----- - manageiq_connection: - url: 'https://127.0.0.1:80' - username: 'admin' - password: 'password' - validate_certs: true - - -- name: Update an existing provider named 'EngLab' (defaults to 'Prometheus' metrics) - community.general.manageiq_provider: - name: 'EngLab' - type: 'Openshift' - state: 'present' - provider: - auth_key: 'topSecret' - hostname: 'next.example.com' - port: 8443 - validate_certs: true - security_protocol: 'ssl-with-validation-custom-ca' - certificate_authority: | - -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 - -----END CERTIFICATE----- - metrics: - auth_key: 'topSecret' - hostname: 'next.example.com' - port: 443 - validate_certs: true - security_protocol: 'ssl-with-validation-custom-ca' - certificate_authority: | - -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 - -----END CERTIFICATE----- - manageiq_connection: - url: 'https://127.0.0.1' - username: 'admin' - password: 'password' - validate_certs: true - - -- name: Delete a provider in ManageIQ - community.general.manageiq_provider: - name: 'EngLab' - type: 'Openshift' - state: 'absent' - manageiq_connection: - url: 'https://127.0.0.1' - username: 'admin' - password: 'password' - validate_certs: true - - -- name: Create a new Amazon provider in ManageIQ using token authentication - community.general.manageiq_provider: - name: 'EngAmazon' - type: 'Amazon' - state: 'present' - provider: - hostname: 'amazon.example.com' - userid: 'hello' - password: 'world' - manageiq_connection: - url: 'https://127.0.0.1' - token: 'VeryLongToken' - validate_certs: true - - -- name: Create a new oVirt provider in ManageIQ - community.general.manageiq_provider: - name: 'RHEV' - type: 'oVirt' - state: 'present' - provider: - hostname: 'rhev01.example.com' - userid: 'admin@internal' - password: 'password' - validate_certs: true - certificate_authority: | - -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 - -----END CERTIFICATE----- - metrics: - hostname: 'metrics.example.com' - path: 'ovirt_engine_history' - userid: 'user_id_metrics' - password: 'password_metrics' - validate_certs: true - certificate_authority: | - -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 - -----END CERTIFICATE----- - manageiq_connection: - url: 'https://127.0.0.1' - username: 'admin' - password: 'password' - validate_certs: true - -- name: Create a new VMware provider in ManageIQ - community.general.manageiq_provider: - name: 'EngVMware' - type: 'VMware' - state: 'present' - provider: - hostname: 'vcenter.example.com' - host_default_vnc_port_start: 5800 - host_default_vnc_port_end: 5801 - userid: 'root' - password: 'password' - manageiq_connection: - url: 'https://127.0.0.1' - token: 'VeryLongToken' - validate_certs: true - -- name: Create a new Azure provider in ManageIQ - community.general.manageiq_provider: - name: 'EngAzure' - type: 'Azure' - provider_region: 'northeurope' - subscription: 'e272bd74-f661-484f-b223-88dd128a4049' - azure_tenant_id: 'e272bd74-f661-484f-b223-88dd128a4048' - state: 'present' - provider: - hostname: 'azure.example.com' - userid: 'e272bd74-f661-484f-b223-88dd128a4049' - password: 'password' - manageiq_connection: - url: 'https://cf-6af0.rhpds.opentlc.com' - username: 'admin' - password: 'password' - validate_certs: false - -- name: Create a new OpenStack Director provider in ManageIQ with rsa keypair - community.general.manageiq_provider: - name: 'EngDirector' - type: 'Director' - api_version: 'v3' - state: 'present' - provider: - hostname: 'director.example.com' - userid: 'admin' - password: 'password' - security_protocol: 'ssl-with-validation' - validate_certs: 'true' - certificate_authority: | - -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 - -----END CERTIFICATE----- - ssh_keypair: - hostname: director.example.com - userid: heat-admin - auth_key: 'SecretSSHPrivateKey' - -- name: Create a new OpenStack provider in ManageIQ with amqp metrics - community.general.manageiq_provider: - name: 'EngOpenStack' - type: 'OpenStack' - api_version: 'v3' - state: 'present' - provider_region: 'europe' - tenant_mapping_enabled: 'False' - keystone_v3_domain_id: 'mydomain' - provider: - hostname: 'openstack.example.com' - userid: 'admin' - password: 'password' - security_protocol: 'ssl-with-validation' - validate_certs: 'true' - certificate_authority: | - -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 - -----END CERTIFICATE----- - metrics: - role: amqp - hostname: 'amqp.example.com' - security_protocol: 'non-ssl' - port: 5666 - userid: admin - password: password - - -- name: Create a new GCE provider in ManageIQ - community.general.manageiq_provider: - name: 'EngGoogle' - type: 'GCE' - provider_region: 'europe-west1' - project: 'project1' - state: 'present' - provider: - hostname: 'gce.example.com' - auth_key: 'google_json_key' - validate_certs: 'false' -''' - -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec - - -def supported_providers(): - return dict( - Openshift=dict( - class_name='ManageIQ::Providers::Openshift::ContainerManager', - authtype='bearer', - default_role='default', - metrics_role='prometheus', - alerts_role='prometheus_alerts', - ), - Amazon=dict( - class_name='ManageIQ::Providers::Amazon::CloudManager', - ), - oVirt=dict( - class_name='ManageIQ::Providers::Redhat::InfraManager', - default_role='default', - metrics_role='metrics', - ), - VMware=dict( - class_name='ManageIQ::Providers::Vmware::InfraManager', - ), - Azure=dict( - class_name='ManageIQ::Providers::Azure::CloudManager', - ), - Director=dict( - class_name='ManageIQ::Providers::Openstack::InfraManager', - ssh_keypair_role="ssh_keypair" - ), - OpenStack=dict( - class_name='ManageIQ::Providers::Openstack::CloudManager', - ), - GCE=dict( - class_name='ManageIQ::Providers::Google::CloudManager', - ), - ) - - -def endpoint_list_spec(): - return dict( - provider=dict(type='dict', options=endpoint_argument_spec()), - metrics=dict(type='dict', options=endpoint_argument_spec()), - alerts=dict(type='dict', options=endpoint_argument_spec()), - ssh_keypair=dict(type='dict', options=endpoint_argument_spec(), no_log=False), - ) - - -def endpoint_argument_spec(): - return dict( - role=dict(), - hostname=dict(required=True), - port=dict(type='int'), - validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']), - certificate_authority=dict(), - security_protocol=dict( - choices=[ - 'ssl-with-validation', - 'ssl-with-validation-custom-ca', - 'ssl-without-validation', - 'non-ssl', - ], - ), - userid=dict(), - password=dict(no_log=True), - auth_key=dict(no_log=True), - subscription=dict(no_log=True), - project=dict(), - uid_ems=dict(), - path=dict(), - ) - - -def delete_nulls(h): - """ Remove null entries from a hash - - Returns: - a hash without nulls - """ - if isinstance(h, list): - return [delete_nulls(i) for i in h] - if isinstance(h, dict): - return dict((k, delete_nulls(v)) for k, v in h.items() if v is not None) - - return h - - -class ManageIQProvider(object): - """ - Object to execute provider management operations in manageiq. - """ - - def __init__(self, manageiq): - self.manageiq = manageiq - - self.module = self.manageiq.module - self.api_url = self.manageiq.api_url - self.client = self.manageiq.client - - def class_name_to_type(self, class_name): - """ Convert class_name to type - - Returns: - the type - """ - out = [k for k, v in supported_providers().items() if v['class_name'] == class_name] - if len(out) == 1: - return out[0] - - return None - - def zone_id(self, name): - """ Search for zone id by zone name. - - Returns: - the zone id, or send a module Fail signal if zone not found. - """ - zone = self.manageiq.find_collection_resource_by('zones', name=name) - if not zone: # zone doesn't exist - self.module.fail_json( - msg="zone %s does not exist in manageiq" % (name)) - - return zone['id'] - - def provider(self, name): - """ Search for provider object by name. - - Returns: - the provider, or None if provider not found. - """ - return self.manageiq.find_collection_resource_by('providers', name=name) - - def build_connection_configurations(self, provider_type, endpoints): - """ Build "connection_configurations" objects from - requested endpoints provided by user - - Returns: - the user requested provider endpoints list - """ - connection_configurations = [] - endpoint_keys = endpoint_list_spec().keys() - provider_defaults = supported_providers().get(provider_type, {}) - - # get endpoint defaults - endpoint = endpoints.get('provider') - default_auth_key = endpoint.get('auth_key') - - # build a connection_configuration object for each endpoint - for endpoint_key in endpoint_keys: - endpoint = endpoints.get(endpoint_key) - if endpoint: - # get role and authtype - role = endpoint.get('role') or provider_defaults.get(endpoint_key + '_role', 'default') - if role == 'default': - authtype = provider_defaults.get('authtype') or role - else: - authtype = role - - # set a connection_configuration - connection_configurations.append({ - 'endpoint': { - 'role': role, - 'hostname': endpoint.get('hostname'), - 'port': endpoint.get('port'), - 'verify_ssl': [0, 1][endpoint.get('validate_certs', True)], - 'security_protocol': endpoint.get('security_protocol'), - 'certificate_authority': endpoint.get('certificate_authority'), - 'path': endpoint.get('path'), - }, - 'authentication': { - 'authtype': authtype, - 'userid': endpoint.get('userid'), - 'password': endpoint.get('password'), - 'auth_key': endpoint.get('auth_key') or default_auth_key, - } - }) - - return connection_configurations - - def delete_provider(self, provider): - """ Deletes a provider from manageiq. - - Returns: - a short message describing the operation executed. - """ - try: - url = '%s/providers/%s' % (self.api_url, provider['id']) - result = self.client.post(url, action='delete') - except Exception as e: - self.module.fail_json(msg="failed to delete provider %s: %s" % (provider['name'], str(e))) - - return dict(changed=True, msg=result['message']) - - def edit_provider(self, provider, name, provider_type, endpoints, zone_id, provider_region, - host_default_vnc_port_start, host_default_vnc_port_end, - subscription, project, uid_ems, tenant_mapping_enabled, api_version): - """ Edit a provider from manageiq. - - Returns: - a short message describing the operation executed. - """ - url = '%s/providers/%s' % (self.api_url, provider['id']) - - resource = dict( - name=name, - zone={'id': zone_id}, - provider_region=provider_region, - connection_configurations=endpoints, - host_default_vnc_port_start=host_default_vnc_port_start, - host_default_vnc_port_end=host_default_vnc_port_end, - subscription=subscription, - project=project, - uid_ems=uid_ems, - tenant_mapping_enabled=tenant_mapping_enabled, - api_version=api_version, - ) - - # NOTE: we do not check for diff's between requested and current - # provider, we always submit endpoints with password or auth_keys, - # since we can not compare with current password or auth_key, - # every edit request is sent to ManageIQ API without comparing - # it to current state. - - # clean nulls, we do not send nulls to the api - resource = delete_nulls(resource) - - # try to update provider - try: - result = self.client.post(url, action='edit', resource=resource) - except Exception as e: - self.module.fail_json(msg="failed to update provider %s: %s" % (provider['name'], str(e))) - - return dict( - changed=True, - msg="successfully updated the provider %s: %s" % (provider['name'], result)) - - def create_provider(self, name, provider_type, endpoints, zone_id, provider_region, - host_default_vnc_port_start, host_default_vnc_port_end, - subscription, project, uid_ems, tenant_mapping_enabled, api_version): - """ Creates the provider in manageiq. - - Returns: - a short message describing the operation executed. - """ - resource = dict( - name=name, - zone={'id': zone_id}, - provider_region=provider_region, - host_default_vnc_port_start=host_default_vnc_port_start, - host_default_vnc_port_end=host_default_vnc_port_end, - subscription=subscription, - project=project, - uid_ems=uid_ems, - tenant_mapping_enabled=tenant_mapping_enabled, - api_version=api_version, - connection_configurations=endpoints, - ) - - # clean nulls, we do not send nulls to the api - resource = delete_nulls(resource) - - # try to create a new provider - try: - url = '%s/providers' % (self.api_url) - result = self.client.post(url, type=supported_providers()[provider_type]['class_name'], **resource) - except Exception as e: - self.module.fail_json(msg="failed to create provider %s: %s" % (name, str(e))) - - return dict( - changed=True, - msg="successfully created the provider %s: %s" % (name, result['results'])) - - def refresh(self, provider, name): - """ Trigger provider refresh. - - Returns: - a short message describing the operation executed. - """ - try: - url = '%s/providers/%s' % (self.api_url, provider['id']) - result = self.client.post(url, action='refresh') - except Exception as e: - self.module.fail_json(msg="failed to refresh provider %s: %s" % (name, str(e))) - - return dict( - changed=True, - msg="refreshing provider %s" % name) - - -def main(): - zone_id = None - endpoints = [] - argument_spec = dict( - state=dict(choices=['absent', 'present', 'refresh'], default='present'), - name=dict(required=True), - zone=dict(default='default'), - provider_region=dict(), - host_default_vnc_port_start=dict(), - host_default_vnc_port_end=dict(), - subscription=dict(), - project=dict(), - azure_tenant_id=dict(aliases=['keystone_v3_domain_id']), - tenant_mapping_enabled=dict(default=False, type='bool'), - api_version=dict(choices=['v2', 'v3']), - type=dict(choices=list(supported_providers().keys())), - ) - # add the manageiq connection arguments to the arguments - argument_spec.update(manageiq_argument_spec()) - # add the endpoint arguments to the arguments - argument_spec.update(endpoint_list_spec()) - - module = AnsibleModule( - argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['provider']), - ('state', 'refresh', ['name'])], - required_together=[ - ['host_default_vnc_port_start', 'host_default_vnc_port_end'] - ], - ) - - name = module.params['name'] - zone_name = module.params['zone'] - provider_type = module.params['type'] - raw_endpoints = module.params - provider_region = module.params['provider_region'] - host_default_vnc_port_start = module.params['host_default_vnc_port_start'] - host_default_vnc_port_end = module.params['host_default_vnc_port_end'] - subscription = module.params['subscription'] - uid_ems = module.params['azure_tenant_id'] - project = module.params['project'] - tenant_mapping_enabled = module.params['tenant_mapping_enabled'] - api_version = module.params['api_version'] - state = module.params['state'] - - manageiq = ManageIQ(module) - manageiq_provider = ManageIQProvider(manageiq) - - provider = manageiq_provider.provider(name) - - # provider should not exist - if state == "absent": - # if we have a provider, delete it - if provider: - res_args = manageiq_provider.delete_provider(provider) - # if we do not have a provider, nothing to do - else: - res_args = dict( - changed=False, - msg="provider %s: does not exist in manageiq" % (name)) - - # provider should exist - if state == "present": - # get data user did not explicitly give - if zone_name: - zone_id = manageiq_provider.zone_id(zone_name) - - # if we do not have a provider_type, use the current provider_type - if provider and not provider_type: - provider_type = manageiq_provider.class_name_to_type(provider['type']) - - # check supported_providers types - if not provider_type: - manageiq_provider.module.fail_json( - msg="missing required argument: provider_type") - - # check supported_providers types - if provider_type not in supported_providers().keys(): - manageiq_provider.module.fail_json( - msg="provider_type %s is not supported" % (provider_type)) - - # build "connection_configurations" objects from user requested endpoints - # "provider" is a required endpoint, if we have it, we have endpoints - if raw_endpoints.get("provider"): - endpoints = manageiq_provider.build_connection_configurations(provider_type, raw_endpoints) - - # if we have a provider, edit it - if provider: - res_args = manageiq_provider.edit_provider(provider, name, provider_type, endpoints, zone_id, provider_region, - host_default_vnc_port_start, host_default_vnc_port_end, - subscription, project, uid_ems, tenant_mapping_enabled, api_version) - # if we do not have a provider, create it - else: - res_args = manageiq_provider.create_provider(name, provider_type, endpoints, zone_id, provider_region, - host_default_vnc_port_start, host_default_vnc_port_end, - subscription, project, uid_ems, tenant_mapping_enabled, api_version) - - # refresh provider (trigger sync) - if state == "refresh": - if provider: - res_args = manageiq_provider.refresh(provider, name) - else: - res_args = dict( - changed=False, - msg="provider %s: does not exist in manageiq" % (name)) - - module.exit_json(**res_args) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_tags.py b/plugins/modules/remote_management/manageiq/manageiq_tags.py deleted file mode 100644 index 83ab60ac93..0000000000 --- a/plugins/modules/remote_management/manageiq/manageiq_tags.py +++ /dev/null @@ -1,316 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Daniel Korn -# (c) 2017, Yaacov Zamir -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: manageiq_tags - -short_description: Management of resource tags in ManageIQ. -extends_documentation_fragment: -- community.general.manageiq - -author: Daniel Korn (@dkorn) -description: - - The manageiq_tags module supports adding, updating and deleting tags in ManageIQ. - -options: - state: - type: str - description: - - absent - tags should not exist, - - present - tags should exist, - - list - list current tags. - choices: ['absent', 'present', 'list'] - default: 'present' - tags: - type: list - elements: dict - description: - - tags - list of dictionaries, each includes 'name' and 'category' keys. - - required if state is present or absent. - resource_type: - type: str - description: - - The relevant resource type in manageiq. - required: true - choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', - 'data store', 'group', 'resource pool', 'service', 'service template', - 'template', 'tenant', 'user'] - resource_name: - type: str - description: - - The name of the resource at which tags will be controlled. - - Must be specified if I(resource_id) is not set. Both options are mutually exclusive. - resource_id: - description: - - The ID of the resource at which tags will be controlled. - - Must be specified if I(resource_name) is not set. Both options are mutually exclusive. - type: int - version_added: 2.2.0 -''' - -EXAMPLES = ''' -- name: Create new tags for a provider in ManageIQ - community.general.manageiq_tags: - resource_name: 'EngLab' - resource_type: 'provider' - tags: - - category: environment - name: prod - - category: owner - name: prod_ops - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Create new tags for a provider in ManageIQ - community.general.manageiq_tags: - resource_id: 23000000790497 - resource_type: 'provider' - tags: - - category: environment - name: prod - - category: owner - name: prod_ops - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Remove tags for a provider in ManageIQ - community.general.manageiq_tags: - state: absent - resource_name: 'EngLab' - resource_type: 'provider' - tags: - - category: environment - name: prod - - category: owner - name: prod_ops - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: List current tags for a provider in ManageIQ - community.general.manageiq_tags: - state: list - resource_name: 'EngLab' - resource_type: 'provider' - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False -''' - -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities - - -def query_resource_id(manageiq, resource_type, resource_name): - """ Query the resource name in ManageIQ. - - Returns: - the resource id if it exists in manageiq, Fail otherwise. - """ - resource = manageiq.find_collection_resource_by(resource_type, name=resource_name) - if resource: - return resource["id"] - else: - msg = "{resource_name} {resource_type} does not exist in manageiq".format( - resource_name=resource_name, resource_type=resource_type) - manageiq.module.fail_json(msg=msg) - - -class ManageIQTags(object): - """ - Object to execute tags management operations of manageiq resources. - """ - - def __init__(self, manageiq, resource_type, resource_id): - self.manageiq = manageiq - - self.module = self.manageiq.module - self.api_url = self.manageiq.api_url - self.client = self.manageiq.client - - self.resource_type = resource_type - self.resource_id = resource_id - self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format( - api_url=self.api_url, - resource_type=resource_type, - resource_id=resource_id) - - def full_tag_name(self, tag): - """ Returns the full tag name in manageiq - """ - return '/managed/{tag_category}/{tag_name}'.format( - tag_category=tag['category'], - tag_name=tag['name']) - - def clean_tag_object(self, tag): - """ Clean a tag object to have human readable form of: - { - full_name: STR, - name: STR, - display_name: STR, - category: STR - } - """ - full_name = tag.get('name') - categorization = tag.get('categorization', {}) - - return dict( - full_name=full_name, - name=categorization.get('name'), - display_name=categorization.get('display_name'), - category=categorization.get('category', {}).get('name')) - - def query_resource_tags(self): - """ Returns a set of the tag objects assigned to the resource - """ - url = '{resource_url}/tags?expand=resources&attributes=categorization' - try: - response = self.client.get(url.format(resource_url=self.resource_url)) - except Exception as e: - msg = "Failed to query {resource_type} tags: {error}".format( - resource_type=self.resource_type, - error=e) - self.module.fail_json(msg=msg) - - resources = response.get('resources', []) - - # clean the returned rest api tag object to look like: - # {full_name: STR, name: STR, display_name: STR, category: STR} - tags = [self.clean_tag_object(tag) for tag in resources] - - return tags - - def tags_to_update(self, tags, action): - """ Create a list of tags we need to update in ManageIQ. - - Returns: - Whether or not a change took place and a message describing the - operation executed. - """ - tags_to_post = [] - assigned_tags = self.query_resource_tags() - - # make a list of assigned full tag names strings - # e.g. ['/managed/environment/prod', ...] - assigned_tags_set = set([tag['full_name'] for tag in assigned_tags]) - - for tag in tags: - assigned = self.full_tag_name(tag) in assigned_tags_set - - if assigned and action == 'unassign': - tags_to_post.append(tag) - elif (not assigned) and action == 'assign': - tags_to_post.append(tag) - - return tags_to_post - - def assign_or_unassign_tags(self, tags, action): - """ Perform assign/unassign action - """ - # get a list of tags needed to be changed - tags_to_post = self.tags_to_update(tags, action) - if not tags_to_post: - return dict( - changed=False, - msg="Tags already {action}ed, nothing to do".format(action=action)) - - # try to assign or unassign tags to resource - url = '{resource_url}/tags'.format(resource_url=self.resource_url) - try: - response = self.client.post(url, action=action, resources=tags) - except Exception as e: - msg = "Failed to {action} tag: {error}".format( - action=action, - error=e) - self.module.fail_json(msg=msg) - - # check all entities in result to be successful - for result in response['results']: - if not result['success']: - msg = "Failed to {action}: {message}".format( - action=action, - message=result['message']) - self.module.fail_json(msg=msg) - - # successfully changed all needed tags - return dict( - changed=True, - msg="Successfully {action}ed tags".format(action=action)) - - -def main(): - actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'} - argument_spec = dict( - tags=dict(type='list', elements='dict'), - resource_id=dict(required=False, type='int'), - resource_name=dict(required=False, type='str'), - resource_type=dict(required=True, type='str', - choices=list(manageiq_entities().keys())), - state=dict(required=False, type='str', - choices=['present', 'absent', 'list'], default='present'), - ) - # add the manageiq connection arguments to the arguments - argument_spec.update(manageiq_argument_spec()) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[["resource_id", "resource_name"]], - required_one_of=[["resource_id", "resource_name"]], - required_if=[ - ('state', 'present', ['tags']), - ('state', 'absent', ['tags']) - ], - ) - - tags = module.params['tags'] - resource_id = module.params['resource_id'] - resource_type_key = module.params['resource_type'] - resource_name = module.params['resource_name'] - state = module.params['state'] - - # get the action and resource type - action = actions[state] - resource_type = manageiq_entities()[resource_type_key] - - manageiq = ManageIQ(module) - - # query resource id, fail if resource does not exist - if resource_id is None: - resource_id = query_resource_id(manageiq, resource_type, resource_name) - - manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id) - - if action == 'list': - # return a list of current tags for this object - current_tags = manageiq_tags.query_resource_tags() - res_args = dict(changed=False, tags=current_tags) - else: - # assign or unassign the tags - res_args = manageiq_tags.assign_or_unassign_tags(tags, action) - - module.exit_json(**res_args) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_tenant.py b/plugins/modules/remote_management/manageiq/manageiq_tenant.py deleted file mode 100644 index 58c2e1ed71..0000000000 --- a/plugins/modules/remote_management/manageiq/manageiq_tenant.py +++ /dev/null @@ -1,558 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn ) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = ''' - -module: manageiq_tenant - -short_description: Management of tenants in ManageIQ. -extends_documentation_fragment: -- community.general.manageiq - -author: Evert Mulder (@evertmulder) -description: - - The manageiq_tenant module supports adding, updating and deleting tenants in ManageIQ. -requirements: -- manageiq-client -options: - state: - type: str - description: - - absent - tenant should not exist, present - tenant should be. - choices: ['absent', 'present'] - default: 'present' - name: - type: str - description: - - The tenant name. - required: true - default: null - description: - type: str - description: - - The tenant description. - required: true - default: null - parent_id: - type: int - description: - - The id of the parent tenant. If not supplied the root tenant is used. - - The C(parent_id) takes president over C(parent) when supplied - required: false - default: null - parent: - type: str - description: - - The name of the parent tenant. If not supplied and no C(parent_id) is supplied the root tenant is used. - required: false - default: null - quotas: - type: dict - description: - - The tenant quotas. - - All parameters case sensitive. - - 'Valid attributes are:' - - ' - C(cpu_allocated) (int): use null to remove the quota.' - - ' - C(mem_allocated) (GB): use null to remove the quota.' - - ' - C(storage_allocated) (GB): use null to remove the quota.' - - ' - C(vms_allocated) (int): use null to remove the quota.' - - ' - C(templates_allocated) (int): use null to remove the quota.' - required: false - default: null -''' - -EXAMPLES = ''' -- name: Update the root tenant in ManageIQ - community.general.manageiq_tenant: - name: 'My Company' - description: 'My company name' - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Create a tenant in ManageIQ - community.general.manageiq_tenant: - name: 'Dep1' - description: 'Manufacturing department' - parent_id: 1 - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Delete a tenant in ManageIQ - community.general.manageiq_tenant: - state: 'absent' - name: 'Dep1' - parent_id: 1 - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated - community.general.manageiq_tenant: - name: 'Dep1' - parent_id: 1 - quotas: - - cpu_allocated: 100 - - mem_allocated: 50 - - vms_allocated: null - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - - -- name: Delete a tenant in ManageIQ using a token - community.general.manageiq_tenant: - state: 'absent' - name: 'Dep1' - parent_id: 1 - manageiq_connection: - url: 'http://127.0.0.1:3000' - token: 'sometoken' - validate_certs: False -''' - -RETURN = ''' -tenant: - description: The tenant. - returned: success - type: complex - contains: - id: - description: The tenant id - returned: success - type: int - name: - description: The tenant name - returned: success - type: str - description: - description: The tenant description - returned: success - type: str - parent_id: - description: The id of the parent tenant - returned: success - type: int - quotas: - description: List of tenant quotas - returned: success - type: list - sample: - cpu_allocated: 100 - mem_allocated: 50 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec - - -class ManageIQTenant(object): - """ - Object to execute tenant management operations in manageiq. - """ - - def __init__(self, manageiq): - self.manageiq = manageiq - - self.module = self.manageiq.module - self.api_url = self.manageiq.api_url - self.client = self.manageiq.client - - def tenant(self, name, parent_id, parent): - """ Search for tenant object by name and parent_id or parent - or the root tenant if no parent or parent_id is supplied. - Returns: - the parent tenant, None for the root tenant - the tenant or None if tenant was not found. - """ - - if parent_id: - parent_tenant_res = self.client.collections.tenants.find_by(id=parent_id) - if not parent_tenant_res: - self.module.fail_json(msg="Parent tenant with id '%s' not found in manageiq" % str(parent_id)) - parent_tenant = parent_tenant_res[0] - tenants = self.client.collections.tenants.find_by(name=name) - - for tenant in tenants: - try: - ancestry = tenant['ancestry'] - except AttributeError: - ancestry = None - - if ancestry: - tenant_parent_id = int(ancestry.split("/")[-1]) - if int(tenant_parent_id) == parent_id: - return parent_tenant, tenant - - return parent_tenant, None - else: - if parent: - parent_tenant_res = self.client.collections.tenants.find_by(name=parent) - if not parent_tenant_res: - self.module.fail_json(msg="Parent tenant '%s' not found in manageiq" % parent) - - if len(parent_tenant_res) > 1: - self.module.fail_json(msg="Multiple parent tenants not found in manageiq with name '%s" % parent) - - parent_tenant = parent_tenant_res[0] - parent_id = int(parent_tenant['id']) - tenants = self.client.collections.tenants.find_by(name=name) - - for tenant in tenants: - try: - ancestry = tenant['ancestry'] - except AttributeError: - ancestry = None - - if ancestry: - tenant_parent_id = int(ancestry.split("/")[-1]) - if tenant_parent_id == parent_id: - return parent_tenant, tenant - - return parent_tenant, None - else: - # No parent or parent id supplied we select the root tenant - return None, self.client.collections.tenants.find_by(ancestry=None)[0] - - def compare_tenant(self, tenant, name, description): - """ Compare tenant fields with new field values. - - Returns: - false if tenant fields have some difference from new fields, true o/w. - """ - found_difference = ( - (name and tenant['name'] != name) or - (description and tenant['description'] != description) - ) - - return not found_difference - - def delete_tenant(self, tenant): - """ Deletes a tenant from manageiq. - - Returns: - dict with `msg` and `changed` - """ - try: - url = '%s/tenants/%s' % (self.api_url, tenant['id']) - result = self.client.post(url, action='delete') - except Exception as e: - self.module.fail_json(msg="failed to delete tenant %s: %s" % (tenant['name'], str(e))) - - if result['success'] is False: - self.module.fail_json(msg=result['message']) - - return dict(changed=True, msg=result['message']) - - def edit_tenant(self, tenant, name, description): - """ Edit a manageiq tenant. - - Returns: - dict with `msg` and `changed` - """ - resource = dict(name=name, description=description, use_config_for_attributes=False) - - # check if we need to update ( compare_tenant is true is no difference found ) - if self.compare_tenant(tenant, name, description): - return dict( - changed=False, - msg="tenant %s is not changed." % tenant['name'], - tenant=tenant['_data']) - - # try to update tenant - try: - result = self.client.post(tenant['href'], action='edit', resource=resource) - except Exception as e: - self.module.fail_json(msg="failed to update tenant %s: %s" % (tenant['name'], str(e))) - - return dict( - changed=True, - msg="successfully updated the tenant with id %s" % (tenant['id'])) - - def create_tenant(self, name, description, parent_tenant): - """ Creates the tenant in manageiq. - - Returns: - dict with `msg`, `changed` and `tenant_id` - """ - parent_id = parent_tenant['id'] - # check for required arguments - for key, value in dict(name=name, description=description, parent_id=parent_id).items(): - if value in (None, ''): - self.module.fail_json(msg="missing required argument: %s" % key) - - url = '%s/tenants' % self.api_url - - resource = {'name': name, 'description': description, 'parent': {'id': parent_id}} - - try: - result = self.client.post(url, action='create', resource=resource) - tenant_id = result['results'][0]['id'] - except Exception as e: - self.module.fail_json(msg="failed to create tenant %s: %s" % (name, str(e))) - - return dict( - changed=True, - msg="successfully created tenant '%s' with id '%s'" % (name, tenant_id), - tenant_id=tenant_id) - - def tenant_quota(self, tenant, quota_key): - """ Search for tenant quota object by tenant and quota_key. - Returns: - the quota for the tenant, or None if the tenant quota was not found. - """ - - tenant_quotas = self.client.get("%s/quotas?expand=resources&filter[]=name=%s" % (tenant['href'], quota_key)) - - return tenant_quotas['resources'] - - def tenant_quotas(self, tenant): - """ Search for tenant quotas object by tenant. - Returns: - the quotas for the tenant, or None if no tenant quotas were not found. - """ - - tenant_quotas = self.client.get("%s/quotas?expand=resources" % (tenant['href'])) - - return tenant_quotas['resources'] - - def update_tenant_quotas(self, tenant, quotas): - """ Creates the tenant quotas in manageiq. - - Returns: - dict with `msg` and `changed` - """ - - changed = False - messages = [] - for quota_key, quota_value in quotas.items(): - current_quota_filtered = self.tenant_quota(tenant, quota_key) - if current_quota_filtered: - current_quota = current_quota_filtered[0] - else: - current_quota = None - - if quota_value: - # Change the byte values to GB - if quota_key in ['storage_allocated', 'mem_allocated']: - quota_value_int = int(quota_value) * 1024 * 1024 * 1024 - else: - quota_value_int = int(quota_value) - if current_quota: - res = self.edit_tenant_quota(tenant, current_quota, quota_key, quota_value_int) - else: - res = self.create_tenant_quota(tenant, quota_key, quota_value_int) - else: - if current_quota: - res = self.delete_tenant_quota(tenant, current_quota) - else: - res = dict(changed=False, msg="tenant quota '%s' does not exist" % quota_key) - - if res['changed']: - changed = True - - messages.append(res['msg']) - - return dict( - changed=changed, - msg=', '.join(messages)) - - def edit_tenant_quota(self, tenant, current_quota, quota_key, quota_value): - """ Update the tenant quotas in manageiq. - - Returns: - result - """ - - if current_quota['value'] == quota_value: - return dict( - changed=False, - msg="tenant quota %s already has value %s" % (quota_key, quota_value)) - else: - - url = '%s/quotas/%s' % (tenant['href'], current_quota['id']) - resource = {'value': quota_value} - try: - self.client.post(url, action='edit', resource=resource) - except Exception as e: - self.module.fail_json(msg="failed to update tenant quota %s: %s" % (quota_key, str(e))) - - return dict( - changed=True, - msg="successfully updated tenant quota %s" % quota_key) - - def create_tenant_quota(self, tenant, quota_key, quota_value): - """ Creates the tenant quotas in manageiq. - - Returns: - result - """ - url = '%s/quotas' % (tenant['href']) - resource = {'name': quota_key, 'value': quota_value} - try: - self.client.post(url, action='create', resource=resource) - except Exception as e: - self.module.fail_json(msg="failed to create tenant quota %s: %s" % (quota_key, str(e))) - - return dict( - changed=True, - msg="successfully created tenant quota %s" % quota_key) - - def delete_tenant_quota(self, tenant, quota): - """ deletes the tenant quotas in manageiq. - - Returns: - result - """ - try: - result = self.client.post(quota['href'], action='delete') - except Exception as e: - self.module.fail_json(msg="failed to delete tenant quota '%s': %s" % (quota['name'], str(e))) - - return dict(changed=True, msg=result['message']) - - def create_tenant_response(self, tenant, parent_tenant): - """ Creates the ansible result object from a manageiq tenant entity - - Returns: - a dict with the tenant id, name, description, parent id, - quota's - """ - tenant_quotas = self.create_tenant_quotas_response(tenant['tenant_quotas']) - - try: - ancestry = tenant['ancestry'] - tenant_parent_id = ancestry.split("/")[-1] - except AttributeError: - # The root tenant does not return the ancestry attribute - tenant_parent_id = None - - return dict( - id=tenant['id'], - name=tenant['name'], - description=tenant['description'], - parent_id=tenant_parent_id, - quotas=tenant_quotas - ) - - @staticmethod - def create_tenant_quotas_response(tenant_quotas): - """ Creates the ansible result object from a manageiq tenant_quotas entity - - Returns: - a dict with the applied quotas, name and value - """ - - if not tenant_quotas: - return {} - - result = {} - for quota in tenant_quotas: - if quota['unit'] == 'bytes': - value = float(quota['value']) / (1024 * 1024 * 1024) - else: - value = quota['value'] - result[quota['name']] = value - return result - - -def main(): - argument_spec = dict( - name=dict(required=True, type='str'), - description=dict(required=True, type='str'), - parent_id=dict(required=False, type='int'), - parent=dict(required=False, type='str'), - state=dict(choices=['absent', 'present'], default='present'), - quotas=dict(type='dict', default={}) - ) - # add the manageiq connection arguments to the arguments - argument_spec.update(manageiq_argument_spec()) - - module = AnsibleModule( - argument_spec=argument_spec - ) - - name = module.params['name'] - description = module.params['description'] - parent_id = module.params['parent_id'] - parent = module.params['parent'] - state = module.params['state'] - quotas = module.params['quotas'] - - manageiq = ManageIQ(module) - manageiq_tenant = ManageIQTenant(manageiq) - - parent_tenant, tenant = manageiq_tenant.tenant(name, parent_id, parent) - - # tenant should not exist - if state == "absent": - # if we have a tenant, delete it - if tenant: - res_args = manageiq_tenant.delete_tenant(tenant) - # if we do not have a tenant, nothing to do - else: - if parent_id: - msg = "tenant '%s' with parent_id %i does not exist in manageiq" % (name, parent_id) - else: - msg = "tenant '%s' with parent '%s' does not exist in manageiq" % (name, parent) - - res_args = dict( - changed=False, - msg=msg) - - # tenant should exist - if state == "present": - # if we have a tenant, edit it - if tenant: - res_args = manageiq_tenant.edit_tenant(tenant, name, description) - - # if we do not have a tenant, create it - else: - res_args = manageiq_tenant.create_tenant(name, description, parent_tenant) - tenant = manageiq.client.get_entity('tenants', res_args['tenant_id']) - - # quotas as supplied and we have a tenant - if quotas: - tenant_quotas_res = manageiq_tenant.update_tenant_quotas(tenant, quotas) - if tenant_quotas_res['changed']: - res_args['changed'] = True - res_args['tenant_quotas_msg'] = tenant_quotas_res['msg'] - - tenant.reload(expand='resources', attributes=['tenant_quotas']) - res_args['tenant'] = manageiq_tenant.create_tenant_response(tenant, parent_tenant) - - module.exit_json(**res_args) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_user.py b/plugins/modules/remote_management/manageiq/manageiq_user.py deleted file mode 100644 index f3dc8103f7..0000000000 --- a/plugins/modules/remote_management/manageiq/manageiq_user.py +++ /dev/null @@ -1,332 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2017, Daniel Korn -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: manageiq_user - -short_description: Management of users in ManageIQ. -extends_documentation_fragment: -- community.general.manageiq - -author: Daniel Korn (@dkorn) -description: - - The manageiq_user module supports adding, updating and deleting users in ManageIQ. - -options: - state: - type: str - description: - - absent - user should not exist, present - user should be. - choices: ['absent', 'present'] - default: 'present' - userid: - type: str - description: - - The unique userid in manageiq, often mentioned as username. - required: true - name: - type: str - description: - - The users' full name. - password: - type: str - description: - - The users' password. - group: - type: str - description: - - The name of the group to which the user belongs. - email: - type: str - description: - - The users' E-mail address. - update_password: - type: str - default: always - choices: ['always', 'on_create'] - description: - - C(always) will update passwords unconditionally. C(on_create) will only set the password for a newly created user. -''' - -EXAMPLES = ''' -- name: Create a new user in ManageIQ - community.general.manageiq_user: - userid: 'jdoe' - name: 'Jane Doe' - password: 'VerySecret' - group: 'EvmGroup-user' - email: 'jdoe@example.com' - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Create a new user in ManageIQ using a token - community.general.manageiq_user: - userid: 'jdoe' - name: 'Jane Doe' - password: 'VerySecret' - group: 'EvmGroup-user' - email: 'jdoe@example.com' - manageiq_connection: - url: 'http://127.0.0.1:3000' - token: 'sometoken' - validate_certs: False - -- name: Delete a user in ManageIQ - community.general.manageiq_user: - state: 'absent' - userid: 'jdoe' - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Delete a user in ManageIQ using a token - community.general.manageiq_user: - state: 'absent' - userid: 'jdoe' - manageiq_connection: - url: 'http://127.0.0.1:3000' - token: 'sometoken' - validate_certs: False - -- name: Update email of user in ManageIQ - community.general.manageiq_user: - userid: 'jdoe' - email: 'jaustine@example.com' - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Update email of user in ManageIQ using a token - community.general.manageiq_user: - userid: 'jdoe' - email: 'jaustine@example.com' - manageiq_connection: - url: 'http://127.0.0.1:3000' - token: 'sometoken' - validate_certs: False -''' - -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec - - -class ManageIQUser(object): - """ - Object to execute user management operations in manageiq. - """ - - def __init__(self, manageiq): - self.manageiq = manageiq - - self.module = self.manageiq.module - self.api_url = self.manageiq.api_url - self.client = self.manageiq.client - - def group_id(self, description): - """ Search for group id by group description. - - Returns: - the group id, or send a module Fail signal if group not found. - """ - group = self.manageiq.find_collection_resource_by('groups', description=description) - if not group: # group doesn't exist - self.module.fail_json( - msg="group %s does not exist in manageiq" % (description)) - - return group['id'] - - def user(self, userid): - """ Search for user object by userid. - - Returns: - the user, or None if user not found. - """ - return self.manageiq.find_collection_resource_by('users', userid=userid) - - def compare_user(self, user, name, group_id, password, email): - """ Compare user fields with new field values. - - Returns: - false if user fields have some difference from new fields, true o/w. - """ - found_difference = ( - (name and user['name'] != name) or - (password is not None) or - (email and user['email'] != email) or - (group_id and user['current_group_id'] != group_id) - ) - - return not found_difference - - def delete_user(self, user): - """ Deletes a user from manageiq. - - Returns: - a short message describing the operation executed. - """ - try: - url = '%s/users/%s' % (self.api_url, user['id']) - result = self.client.post(url, action='delete') - except Exception as e: - self.module.fail_json(msg="failed to delete user %s: %s" % (user['userid'], str(e))) - - return dict(changed=True, msg=result['message']) - - def edit_user(self, user, name, group, password, email): - """ Edit a user from manageiq. - - Returns: - a short message describing the operation executed. - """ - group_id = None - url = '%s/users/%s' % (self.api_url, user['id']) - - resource = dict(userid=user['userid']) - if group is not None: - group_id = self.group_id(group) - resource['group'] = dict(id=group_id) - if name is not None: - resource['name'] = name - if email is not None: - resource['email'] = email - - # if there is a password param, but 'update_password' is 'on_create' - # then discard the password (since we're editing an existing user) - if self.module.params['update_password'] == 'on_create': - password = None - if password is not None: - resource['password'] = password - - # check if we need to update ( compare_user is true is no difference found ) - if self.compare_user(user, name, group_id, password, email): - return dict( - changed=False, - msg="user %s is not changed." % (user['userid'])) - - # try to update user - try: - result = self.client.post(url, action='edit', resource=resource) - except Exception as e: - self.module.fail_json(msg="failed to update user %s: %s" % (user['userid'], str(e))) - - return dict( - changed=True, - msg="successfully updated the user %s: %s" % (user['userid'], result)) - - def create_user(self, userid, name, group, password, email): - """ Creates the user in manageiq. - - Returns: - the created user id, name, created_on timestamp, - updated_on timestamp, userid and current_group_id. - """ - # check for required arguments - for key, value in dict(name=name, group=group, password=password).items(): - if value in (None, ''): - self.module.fail_json(msg="missing required argument: %s" % (key)) - - group_id = self.group_id(group) - url = '%s/users' % (self.api_url) - - resource = {'userid': userid, 'name': name, 'password': password, 'group': {'id': group_id}} - if email is not None: - resource['email'] = email - - # try to create a new user - try: - result = self.client.post(url, action='create', resource=resource) - except Exception as e: - self.module.fail_json(msg="failed to create user %s: %s" % (userid, str(e))) - - return dict( - changed=True, - msg="successfully created the user %s: %s" % (userid, result['results'])) - - -def main(): - argument_spec = dict( - userid=dict(required=True, type='str'), - name=dict(), - password=dict(no_log=True), - group=dict(), - email=dict(), - state=dict(choices=['absent', 'present'], default='present'), - update_password=dict(choices=['always', 'on_create'], - default='always'), - ) - # add the manageiq connection arguments to the arguments - argument_spec.update(manageiq_argument_spec()) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - userid = module.params['userid'] - name = module.params['name'] - password = module.params['password'] - group = module.params['group'] - email = module.params['email'] - state = module.params['state'] - - manageiq = ManageIQ(module) - manageiq_user = ManageIQUser(manageiq) - - user = manageiq_user.user(userid) - - # user should not exist - if state == "absent": - # if we have a user, delete it - if user: - res_args = manageiq_user.delete_user(user) - # if we do not have a user, nothing to do - else: - res_args = dict( - changed=False, - msg="user %s: does not exist in manageiq" % (userid)) - - # user should exist - if state == "present": - # if we have a user, edit it - if user: - res_args = manageiq_user.edit_user(user, name, group, password, email) - # if we do not have a user, create it - else: - res_args = manageiq_user.create_user(userid, name, group, password, email) - - module.exit_json(**res_args) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py deleted file mode 100644 index bf3e9a8772..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py +++ /dev/null @@ -1,161 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneview_datacenter_info -short_description: Retrieve information about the OneView Data Centers -description: - - Retrieve information about the OneView Data Centers. - - This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_datacenter_info) module no longer returns C(ansible_facts)! -requirements: - - "hpOneView >= 2.0.1" -author: - - Alex Monteiro (@aalexmonteiro) - - Madhav Bharadwaj (@madhav-bharadwaj) - - Priyanka Sood (@soodpr) - - Ricardo Galeno (@ricardogpsf) -options: - name: - description: - - Data Center name. - type: str - options: - description: - - "Retrieve additional information. Options available: 'visualContent'." - type: list - elements: str - -extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.factsparams - -''' - -EXAMPLES = ''' -- name: Gather information about all Data Centers - community.general.oneview_datacenter_info: - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - delegate_to: localhost - register: result - -- name: Print fetched information about Data Centers - ansible.builtin.debug: - msg: "{{ result.datacenters }}" - -- name: Gather paginated, filtered and sorted information about Data Centers - community.general.oneview_datacenter_info: - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - params: - start: 0 - count: 3 - sort: 'name:descending' - filter: 'state=Unmanaged' - register: result - -- name: Print fetched information about paginated, filtered and sorted list of Data Centers - ansible.builtin.debug: - msg: "{{ result.datacenters }}" - -- name: Gather information about a Data Center by name - community.general.oneview_datacenter_info: - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - name: "My Data Center" - delegate_to: localhost - register: result - -- name: Print fetched information about Data Center found by name - ansible.builtin.debug: - msg: "{{ result.datacenters }}" - -- name: Gather information about the Data Center Visual Content - community.general.oneview_datacenter_info: - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - name: "My Data Center" - options: - - visualContent - delegate_to: localhost - register: result - -- name: Print fetched information about Data Center found by name - ansible.builtin.debug: - msg: "{{ result.datacenters }}" - -- name: Print fetched information about Data Center Visual Content - ansible.builtin.debug: - msg: "{{ result.datacenter_visual_content }}" -''' - -RETURN = ''' -datacenters: - description: Has all the OneView information about the Data Centers. - returned: Always, but can be null. - type: dict - -datacenter_visual_content: - description: Has information about the Data Center Visual Content. - returned: When requested, but can be null. - type: dict -''' - -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase - - -class DatacenterInfoModule(OneViewModuleBase): - argument_spec = dict( - name=dict(type='str'), - options=dict(type='list', elements='str'), - params=dict(type='dict') - ) - - def __init__(self): - super(DatacenterInfoModule, self).__init__( - additional_arg_spec=self.argument_spec, - supports_check_mode=True, - ) - - def execute_module(self): - - client = self.oneview_client.datacenters - info = {} - - if self.module.params.get('name'): - datacenters = client.get_by('name', self.module.params['name']) - - if self.options and 'visualContent' in self.options: - if datacenters: - info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri']) - else: - info['datacenter_visual_content'] = None - - info['datacenters'] = datacenters - else: - info['datacenters'] = client.get_all(**self.facts_params) - - return dict(changed=False, **info) - - -def main(): - DatacenterInfoModule().run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py deleted file mode 100644 index 18e245d617..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py +++ /dev/null @@ -1,245 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneview_enclosure_info -short_description: Retrieve information about one or more Enclosures -description: - - Retrieve information about one or more of the Enclosures from OneView. - - This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_enclosure_info) module no longer returns C(ansible_facts)! -requirements: - - hpOneView >= 2.0.1 -author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) -options: - name: - description: - - Enclosure name. - type: str - options: - description: - - "List with options to gather additional information about an Enclosure and related resources. - Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization), - you can provide specific parameters." - type: list - elements: raw - -extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.factsparams - -''' - -EXAMPLES = ''' -- name: Gather information about all Enclosures - community.general.oneview_enclosure_info: - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - no_log: true - delegate_to: localhost - register: result - -- name: Print fetched information about Enclosures - ansible.builtin.debug: - msg: "{{ result.enclosures }}" - -- name: Gather paginated, filtered and sorted information about Enclosures - community.general.oneview_enclosure_info: - params: - start: 0 - count: 3 - sort: name:descending - filter: status=OK - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - no_log: true - delegate_to: localhost - register: result - -- name: Print fetched information about paginated, filtered ans sorted list of Enclosures - ansible.builtin.debug: - msg: "{{ result.enclosures }}" - -- name: Gather information about an Enclosure by name - community.general.oneview_enclosure_info: - name: Enclosure-Name - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - no_log: true - delegate_to: localhost - register: result - -- name: Print fetched information about Enclosure found by name - ansible.builtin.debug: - msg: "{{ result.enclosures }}" - -- name: Gather information about an Enclosure by name with options - community.general.oneview_enclosure_info: - name: Test-Enclosure - options: - - script # optional - - environmentalConfiguration # optional - - utilization # optional - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - no_log: true - delegate_to: localhost - register: result - -- name: Print fetched information about Enclosure found by name - ansible.builtin.debug: - msg: "{{ result.enclosures }}" - -- name: Print fetched information about Enclosure Script - ansible.builtin.debug: - msg: "{{ result.enclosure_script }}" - -- name: Print fetched information about Enclosure Environmental Configuration - ansible.builtin.debug: - msg: "{{ result.enclosure_environmental_configuration }}" - -- name: Print fetched information about Enclosure Utilization - ansible.builtin.debug: - msg: "{{ result.enclosure_utilization }}" - -- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two - specified dates" - community.general.oneview_enclosure_info: - name: Test-Enclosure - options: - - utilization: # optional - fields: AmbientTemperature - filter: - - startDate=2016-07-01T14:29:42.000Z - - endDate=2017-07-01T03:29:42.000Z - view: day - refresh: false - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - no_log: true - delegate_to: localhost - register: result - -- name: Print fetched information about Enclosure found by name - ansible.builtin.debug: - msg: "{{ result.enclosures }}" - -- name: Print fetched information about Enclosure Utilization - ansible.builtin.debug: - msg: "{{ result.enclosure_utilization }}" -''' - -RETURN = ''' -enclosures: - description: Has all the OneView information about the Enclosures. - returned: Always, but can be null. - type: dict - -enclosure_script: - description: Has all the OneView information about the script of an Enclosure. - returned: When requested, but can be null. - type: str - -enclosure_environmental_configuration: - description: Has all the OneView information about the environmental configuration of an Enclosure. - returned: When requested, but can be null. - type: dict - -enclosure_utilization: - description: Has all the OneView information about the utilization of an Enclosure. - returned: When requested, but can be null. - type: dict -''' - -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase - - -class EnclosureInfoModule(OneViewModuleBase): - argument_spec = dict( - name=dict(type='str'), - options=dict(type='list', elements='raw'), - params=dict(type='dict') - ) - - def __init__(self): - super(EnclosureInfoModule, self).__init__( - additional_arg_spec=self.argument_spec, - supports_check_mode=True, - ) - - def execute_module(self): - - info = {} - - if self.module.params['name']: - enclosures = self._get_by_name(self.module.params['name']) - - if self.options and enclosures: - info = self._gather_optional_info(self.options, enclosures[0]) - else: - enclosures = self.oneview_client.enclosures.get_all(**self.facts_params) - - info['enclosures'] = enclosures - - return dict(changed=False, **info) - - def _gather_optional_info(self, options, enclosure): - - enclosure_client = self.oneview_client.enclosures - info = {} - - if options.get('script'): - info['enclosure_script'] = enclosure_client.get_script(enclosure['uri']) - if options.get('environmentalConfiguration'): - env_config = enclosure_client.get_environmental_configuration(enclosure['uri']) - info['enclosure_environmental_configuration'] = env_config - if options.get('utilization'): - info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization']) - - return info - - def _get_utilization(self, enclosure, params): - fields = view = refresh = filter = '' - - if isinstance(params, dict): - fields = params.get('fields') - view = params.get('view') - refresh = params.get('refresh') - filter = params.get('filter') - - return self.oneview_client.enclosures.get_utilization(enclosure['uri'], - fields=fields, - filter=filter, - refresh=refresh, - view=view) - - def _get_by_name(self, name): - return self.oneview_client.enclosures.get_by('name', name) - - -def main(): - EnclosureInfoModule().run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network.py deleted file mode 100644 index 99b5d0fed9..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network.py +++ /dev/null @@ -1,250 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneview_ethernet_network -short_description: Manage OneView Ethernet Network resources -description: - - Provides an interface to manage Ethernet Network resources. Can create, update, or delete. -requirements: - - hpOneView >= 3.1.0 -author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) -options: - state: - description: - - Indicates the desired state for the Ethernet Network resource. - - C(present) will ensure data properties are compliant with OneView. - - C(absent) will remove the resource from OneView, if it exists. - - C(default_bandwidth_reset) will reset the network connection template to the default. - type: str - default: present - choices: [present, absent, default_bandwidth_reset] - data: - description: - - List with Ethernet Network properties. - type: dict - required: true -extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.validateetag - -''' - -EXAMPLES = ''' -- name: Ensure that the Ethernet Network is present using the default configuration - community.general.oneview_ethernet_network: - config: '/etc/oneview/oneview_config.json' - state: present - data: - name: 'Test Ethernet Network' - vlanId: '201' - delegate_to: localhost - -- name: Update the Ethernet Network changing bandwidth and purpose - community.general.oneview_ethernet_network: - config: '/etc/oneview/oneview_config.json' - state: present - data: - name: 'Test Ethernet Network' - purpose: Management - bandwidth: - maximumBandwidth: 3000 - typicalBandwidth: 2000 - delegate_to: localhost - -- name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network' - community.general.oneview_ethernet_network: - config: '/etc/oneview/oneview_config.json' - state: present - data: - name: 'Test Ethernet Network' - newName: 'Renamed Ethernet Network' - delegate_to: localhost - -- name: Ensure that the Ethernet Network is absent - community.general.oneview_ethernet_network: - config: '/etc/oneview/oneview_config.json' - state: absent - data: - name: 'New Ethernet Network' - delegate_to: localhost - -- name: Create Ethernet networks in bulk - community.general.oneview_ethernet_network: - config: '/etc/oneview/oneview_config.json' - state: present - data: - vlanIdRange: '1-10,15,17' - purpose: General - namePrefix: TestNetwork - smartLink: false - privateNetwork: false - bandwidth: - maximumBandwidth: 10000 - typicalBandwidth: 2000 - delegate_to: localhost - -- name: Reset to the default network connection template - community.general.oneview_ethernet_network: - config: '/etc/oneview/oneview_config.json' - state: default_bandwidth_reset - data: - name: 'Test Ethernet Network' - delegate_to: localhost -''' - -RETURN = ''' -ethernet_network: - description: Has the facts about the Ethernet Networks. - returned: On state 'present'. Can be null. - type: dict - -ethernet_network_bulk: - description: Has the facts about the Ethernet Networks affected by the bulk insert. - returned: When 'vlanIdRange' attribute is in data argument. Can be null. - type: dict - -ethernet_network_connection_template: - description: Has the facts about the Ethernet Network Connection Template. - returned: On state 'default_bandwidth_reset'. Can be null. - type: dict -''' - -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound - - -class EthernetNetworkModule(OneViewModuleBase): - MSG_CREATED = 'Ethernet Network created successfully.' - MSG_UPDATED = 'Ethernet Network updated successfully.' - MSG_DELETED = 'Ethernet Network deleted successfully.' - MSG_ALREADY_PRESENT = 'Ethernet Network is already present.' - MSG_ALREADY_ABSENT = 'Ethernet Network is already absent.' - - MSG_BULK_CREATED = 'Ethernet Networks created successfully.' - MSG_MISSING_BULK_CREATED = 'Some missing Ethernet Networks were created successfully.' - MSG_BULK_ALREADY_EXIST = 'The specified Ethernet Networks already exist.' - MSG_CONNECTION_TEMPLATE_RESET = 'Ethernet Network connection template was reset to the default.' - MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network was not found.' - - RESOURCE_FACT_NAME = 'ethernet_network' - - def __init__(self): - - argument_spec = dict( - state=dict(type='str', default='present', choices=['absent', 'default_bandwidth_reset', 'present']), - data=dict(type='dict', required=True), - ) - - super(EthernetNetworkModule, self).__init__(additional_arg_spec=argument_spec, validate_etag_support=True) - - self.resource_client = self.oneview_client.ethernet_networks - - def execute_module(self): - - changed, msg, ansible_facts, resource = False, '', {}, None - - if self.data.get('name'): - resource = self.get_by_name(self.data['name']) - - if self.state == 'present': - if self.data.get('vlanIdRange'): - return self._bulk_present() - else: - return self._present(resource) - elif self.state == 'absent': - return self.resource_absent(resource) - elif self.state == 'default_bandwidth_reset': - changed, msg, ansible_facts = self._default_bandwidth_reset(resource) - return dict(changed=changed, msg=msg, ansible_facts=ansible_facts) - - def _present(self, resource): - - bandwidth = self.data.pop('bandwidth', None) - scope_uris = self.data.pop('scopeUris', None) - result = self.resource_present(resource, self.RESOURCE_FACT_NAME) - - if bandwidth: - if self._update_connection_template(result['ansible_facts']['ethernet_network'], bandwidth)[0]: - result['changed'] = True - result['msg'] = self.MSG_UPDATED - - if scope_uris is not None: - result = self.resource_scopes_set(result, 'ethernet_network', scope_uris) - - return result - - def _bulk_present(self): - vlan_id_range = self.data['vlanIdRange'] - result = dict(ansible_facts={}) - ethernet_networks = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range) - - if not ethernet_networks: - self.resource_client.create_bulk(self.data) - result['changed'] = True - result['msg'] = self.MSG_BULK_CREATED - - else: - vlan_ids = self.resource_client.dissociate_values_or_ranges(vlan_id_range) - for net in ethernet_networks[:]: - vlan_ids.remove(net['vlanId']) - - if len(vlan_ids) == 0: - result['msg'] = self.MSG_BULK_ALREADY_EXIST - result['changed'] = False - else: - if len(vlan_ids) == 1: - self.data['vlanIdRange'] = '{0}-{1}'.format(vlan_ids[0], vlan_ids[0]) - else: - self.data['vlanIdRange'] = ','.join(map(str, vlan_ids)) - - self.resource_client.create_bulk(self.data) - result['changed'] = True - result['msg'] = self.MSG_MISSING_BULK_CREATED - result['ansible_facts']['ethernet_network_bulk'] = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range) - - return result - - def _update_connection_template(self, ethernet_network, bandwidth): - - if 'connectionTemplateUri' not in ethernet_network: - return False, None - - connection_template = self.oneview_client.connection_templates.get(ethernet_network['connectionTemplateUri']) - - merged_data = connection_template.copy() - merged_data.update({'bandwidth': bandwidth}) - - if not self.compare(connection_template, merged_data): - connection_template = self.oneview_client.connection_templates.update(merged_data) - return True, connection_template - else: - return False, None - - def _default_bandwidth_reset(self, resource): - - if not resource: - raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND) - - default_connection_template = self.oneview_client.connection_templates.get_default() - - changed, connection_template = self._update_connection_template(resource, default_connection_template['bandwidth']) - - return changed, self.MSG_CONNECTION_TEMPLATE_RESET, dict( - ethernet_network_connection_template=connection_template) - - -def main(): - EthernetNetworkModule().run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py deleted file mode 100644 index f1b55165b1..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneview_ethernet_network_info -short_description: Retrieve the information about one or more of the OneView Ethernet Networks -description: - - Retrieve the information about one or more of the Ethernet Networks from OneView. - - This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_ethernet_network_info) module no longer returns C(ansible_facts)! -requirements: - - hpOneView >= 2.0.1 -author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) -options: - name: - description: - - Ethernet Network name. - type: str - options: - description: - - "List with options to gather additional information about an Ethernet Network and related resources. - Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)." - type: list - elements: str -extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.factsparams - -''' - -EXAMPLES = ''' -- name: Gather information about all Ethernet Networks - community.general.oneview_ethernet_network_info: - config: /etc/oneview/oneview_config.json - delegate_to: localhost - register: result - -- name: Print fetched information about Ethernet Networks - ansible.builtin.debug: - msg: "{{ result.ethernet_networks }}" - -- name: Gather paginated and filtered information about Ethernet Networks - community.general.oneview_ethernet_network_info: - config: /etc/oneview/oneview_config.json - params: - start: 1 - count: 3 - sort: 'name:descending' - filter: 'purpose=General' - delegate_to: localhost - register: result - -- name: Print fetched information about paginated and filtered list of Ethernet Networks - ansible.builtin.debug: - msg: "{{ result.ethernet_networks }}" - -- name: Gather information about an Ethernet Network by name - community.general.oneview_ethernet_network_info: - config: /etc/oneview/oneview_config.json - name: Ethernet network name - delegate_to: localhost - register: result - -- name: Print fetched information about Ethernet Network found by name - ansible.builtin.debug: - msg: "{{ result.ethernet_networks }}" - -- name: Gather information about an Ethernet Network by name with options - community.general.oneview_ethernet_network_info: - config: /etc/oneview/oneview_config.json - name: eth1 - options: - - associatedProfiles - - associatedUplinkGroups - delegate_to: localhost - register: result - -- name: Print fetched information about Ethernet Network Associated Profiles - ansible.builtin.debug: - msg: "{{ result.enet_associated_profiles }}" - -- name: Print fetched information about Ethernet Network Associated Uplink Groups - ansible.builtin.debug: - msg: "{{ result.enet_associated_uplink_groups }}" -''' - -RETURN = ''' -ethernet_networks: - description: Has all the OneView information about the Ethernet Networks. - returned: Always, but can be null. - type: dict - -enet_associated_profiles: - description: Has all the OneView information about the profiles which are using the Ethernet network. - returned: When requested, but can be null. - type: dict - -enet_associated_uplink_groups: - description: Has all the OneView information about the uplink sets which are using the Ethernet network. - returned: When requested, but can be null. - type: dict -''' - -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase - - -class EthernetNetworkInfoModule(OneViewModuleBase): - argument_spec = dict( - name=dict(type='str'), - options=dict(type='list', elements='str'), - params=dict(type='dict') - ) - - def __init__(self): - super(EthernetNetworkInfoModule, self).__init__( - additional_arg_spec=self.argument_spec, - supports_check_mode=True, - ) - - self.resource_client = self.oneview_client.ethernet_networks - - def execute_module(self): - info = {} - if self.module.params['name']: - ethernet_networks = self.resource_client.get_by('name', self.module.params['name']) - - if self.module.params.get('options') and ethernet_networks: - info = self.__gather_optional_info(ethernet_networks[0]) - else: - ethernet_networks = self.resource_client.get_all(**self.facts_params) - - info['ethernet_networks'] = ethernet_networks - - return dict(changed=False, **info) - - def __gather_optional_info(self, ethernet_network): - - info = {} - - if self.options.get('associatedProfiles'): - info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network) - if self.options.get('associatedUplinkGroups'): - info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network) - - return info - - def __get_associated_profiles(self, ethernet_network): - associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri']) - return [self.oneview_client.server_profiles.get(x) for x in associated_profiles] - - def __get_associated_uplink_groups(self, ethernet_network): - uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri']) - return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups] - - -def main(): - EthernetNetworkInfoModule().run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network.py b/plugins/modules/remote_management/oneview/oneview_fc_network.py deleted file mode 100644 index 59984ee8b6..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_fc_network.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneview_fc_network -short_description: Manage OneView Fibre Channel Network resources. -description: - - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete. -requirements: - - "hpOneView >= 4.0.0" -author: "Felipe Bulsoni (@fgbulsoni)" -options: - state: - description: - - Indicates the desired state for the Fibre Channel Network resource. - C(present) will ensure data properties are compliant with OneView. - C(absent) will remove the resource from OneView, if it exists. - type: str - choices: ['present', 'absent'] - required: true - data: - description: - - List with the Fibre Channel Network properties. - type: dict - required: true - -extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.validateetag - -''' - -EXAMPLES = ''' -- name: Ensure that the Fibre Channel Network is present using the default configuration - community.general.oneview_fc_network: - config: "{{ config_file_path }}" - state: present - data: - name: 'New FC Network' - -- name: Ensure that the Fibre Channel Network is present with fabricType 'DirectAttach' - community.general.oneview_fc_network: - config: "{{ config_file_path }}" - state: present - data: - name: 'New FC Network' - fabricType: 'DirectAttach' - -- name: Ensure that the Fibre Channel Network is present and is inserted in the desired scopes - community.general.oneview_fc_network: - config: "{{ config_file_path }}" - state: present - data: - name: 'New FC Network' - scopeUris: - - '/rest/scopes/00SC123456' - - '/rest/scopes/01SC123456' - -- name: Ensure that the Fibre Channel Network is absent - community.general.oneview_fc_network: - config: "{{ config_file_path }}" - state: absent - data: - name: 'New FC Network' -''' - -RETURN = ''' -fc_network: - description: Has the facts about the managed OneView FC Network. - returned: On state 'present'. Can be null. - type: dict -''' - -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase - - -class FcNetworkModule(OneViewModuleBase): - MSG_CREATED = 'FC Network created successfully.' - MSG_UPDATED = 'FC Network updated successfully.' - MSG_DELETED = 'FC Network deleted successfully.' - MSG_ALREADY_PRESENT = 'FC Network is already present.' - MSG_ALREADY_ABSENT = 'FC Network is already absent.' - RESOURCE_FACT_NAME = 'fc_network' - - def __init__(self): - - additional_arg_spec = dict(data=dict(required=True, type='dict'), - state=dict( - required=True, - choices=['present', 'absent'])) - - super(FcNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec, - validate_etag_support=True) - - self.resource_client = self.oneview_client.fc_networks - - def execute_module(self): - resource = self.get_by_name(self.data['name']) - - if self.state == 'present': - return self._present(resource) - else: - return self.resource_absent(resource) - - def _present(self, resource): - scope_uris = self.data.pop('scopeUris', None) - result = self.resource_present(resource, self.RESOURCE_FACT_NAME) - if scope_uris is not None: - result = self.resource_scopes_set(result, 'fc_network', scope_uris) - return result - - -def main(): - FcNetworkModule().run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py deleted file mode 100644 index 40fed8d017..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneview_fc_network_info -short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks -description: - - Retrieve the information about one or more of the Fibre Channel Networks from OneView. - - This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_fc_network_info) module no longer returns C(ansible_facts)! -requirements: - - hpOneView >= 2.0.1 -author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) -options: - name: - description: - - Fibre Channel Network name. - type: str - -extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.factsparams - -''' - -EXAMPLES = ''' -- name: Gather information about all Fibre Channel Networks - community.general.oneview_fc_network_info: - config: /etc/oneview/oneview_config.json - delegate_to: localhost - register: result - -- name: Print fetched information about Fibre Channel Networks - ansible.builtin.debug: - msg: "{{ result.fc_networks }}" - -- name: Gather paginated, filtered and sorted information about Fibre Channel Networks - community.general.oneview_fc_network_info: - config: /etc/oneview/oneview_config.json - params: - start: 1 - count: 3 - sort: 'name:descending' - filter: 'fabricType=FabricAttach' - delegate_to: localhost - register: result - -- name: Print fetched information about paginated, filtered and sorted list of Fibre Channel Networks - ansible.builtin.debug: - msg: "{{ result.fc_networks }}" - -- name: Gather information about a Fibre Channel Network by name - community.general.oneview_fc_network_info: - config: /etc/oneview/oneview_config.json - name: network name - delegate_to: localhost - register: result - -- name: Print fetched information about Fibre Channel Network found by name - ansible.builtin.debug: - msg: "{{ result.fc_networks }}" -''' - -RETURN = ''' -fc_networks: - description: Has all the OneView information about the Fibre Channel Networks. - returned: Always, but can be null. - type: dict -''' - -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase - - -class FcNetworkInfoModule(OneViewModuleBase): - def __init__(self): - - argument_spec = dict( - name=dict(required=False, type='str'), - params=dict(required=False, type='dict') - ) - - super(FcNetworkInfoModule, self).__init__( - additional_arg_spec=argument_spec, - supports_check_mode=True, - ) - - def execute_module(self): - - if self.module.params['name']: - fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name']) - else: - fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params) - - return dict(changed=False, fc_networks=fc_networks) - - -def main(): - FcNetworkInfoModule().run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network.py deleted file mode 100644 index ef24f8fc8e..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_fcoe_network.py +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneview_fcoe_network -short_description: Manage OneView FCoE Network resources -description: - - Provides an interface to manage FCoE Network resources. Can create, update, or delete. -requirements: - - "python >= 2.7.9" - - "hpOneView >= 4.0.0" -author: "Felipe Bulsoni (@fgbulsoni)" -options: - state: - description: - - Indicates the desired state for the FCoE Network resource. - C(present) will ensure data properties are compliant with OneView. - C(absent) will remove the resource from OneView, if it exists. - type: str - default: present - choices: ['present', 'absent'] - data: - description: - - List with FCoE Network properties. - type: dict - required: true - -extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.validateetag - -''' - -EXAMPLES = ''' -- name: Ensure that FCoE Network is present using the default configuration - community.general.oneview_fcoe_network: - config: '/etc/oneview/oneview_config.json' - state: present - data: - name: Test FCoE Network - vlanId: 201 - delegate_to: localhost - -- name: Update the FCOE network scopes - community.general.oneview_fcoe_network: - config: '/etc/oneview/oneview_config.json' - state: present - data: - name: New FCoE Network - scopeUris: - - '/rest/scopes/00SC123456' - - '/rest/scopes/01SC123456' - delegate_to: localhost - -- name: Ensure that FCoE Network is absent - community.general.oneview_fcoe_network: - config: '/etc/oneview/oneview_config.json' - state: absent - data: - name: New FCoE Network - delegate_to: localhost -''' - -RETURN = ''' -fcoe_network: - description: Has the facts about the OneView FCoE Networks. - returned: On state 'present'. Can be null. - type: dict -''' - -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase - - -class FcoeNetworkModule(OneViewModuleBase): - MSG_CREATED = 'FCoE Network created successfully.' - MSG_UPDATED = 'FCoE Network updated successfully.' - MSG_DELETED = 'FCoE Network deleted successfully.' - MSG_ALREADY_PRESENT = 'FCoE Network is already present.' - MSG_ALREADY_ABSENT = 'FCoE Network is already absent.' - RESOURCE_FACT_NAME = 'fcoe_network' - - def __init__(self): - - additional_arg_spec = dict(data=dict(required=True, type='dict'), - state=dict(default='present', - choices=['present', 'absent'])) - - super(FcoeNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec, - validate_etag_support=True) - - self.resource_client = self.oneview_client.fcoe_networks - - def execute_module(self): - resource = self.get_by_name(self.data.get('name')) - - if self.state == 'present': - return self.__present(resource) - elif self.state == 'absent': - return self.resource_absent(resource) - - def __present(self, resource): - scope_uris = self.data.pop('scopeUris', None) - result = self.resource_present(resource, self.RESOURCE_FACT_NAME) - if scope_uris is not None: - result = self.resource_scopes_set(result, 'fcoe_network', scope_uris) - return result - - -def main(): - FcoeNetworkModule().run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py deleted file mode 100644 index e581bff862..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneview_fcoe_network_info -short_description: Retrieve the information about one or more of the OneView FCoE Networks -description: - - Retrieve the information about one or more of the FCoE Networks from OneView. - - This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_fcoe_network_info) module no longer returns C(ansible_facts)! -requirements: - - hpOneView >= 2.0.1 -author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) -options: - name: - description: - - FCoE Network name. - type: str -extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.factsparams - -''' - -EXAMPLES = ''' -- name: Gather information about all FCoE Networks - community.general.oneview_fcoe_network_info: - config: /etc/oneview/oneview_config.json - delegate_to: localhost - register: result - -- name: Print fetched information about FCoE Networks - ansible.builtin.debug: - msg: "{{ result.fcoe_networks }}" - -- name: Gather paginated, filtered and sorted information about FCoE Networks - community.general.oneview_fcoe_network_info: - config: /etc/oneview/oneview_config.json - params: - start: 0 - count: 3 - sort: 'name:descending' - filter: 'vlanId=2' - delegate_to: localhost - register: result - -- name: Print fetched information about paginated, filtered and sorted list of FCoE Networks - ansible.builtin.debug: - msg: "{{ result.fcoe_networks }}" - -- name: Gather information about a FCoE Network by name - community.general.oneview_fcoe_network_info: - config: /etc/oneview/oneview_config.json - name: Test FCoE Network Information - delegate_to: localhost - register: result - -- name: Print fetched information about FCoE Network found by name - ansible.builtin.debug: - msg: "{{ result.fcoe_networks }}" -''' - -RETURN = ''' -fcoe_networks: - description: Has all the OneView information about the FCoE Networks. - returned: Always, but can be null. - type: dict -''' - -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase - - -class FcoeNetworkInfoModule(OneViewModuleBase): - def __init__(self): - argument_spec = dict( - name=dict(type='str'), - params=dict(type='dict'), - ) - - super(FcoeNetworkInfoModule, self).__init__( - additional_arg_spec=argument_spec, - supports_check_mode=True, - ) - - def execute_module(self): - - if self.module.params['name']: - fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name']) - else: - fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params) - - return dict(changed=False, fcoe_networks=fcoe_networks) - - -def main(): - FcoeNetworkInfoModule().run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py deleted file mode 100644 index e833f9e092..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneview_logical_interconnect_group -short_description: Manage OneView Logical Interconnect Group resources -description: - - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete. -requirements: - - hpOneView >= 4.0.0 -author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) -options: - state: - description: - - Indicates the desired state for the Logical Interconnect Group resource. - C(absent) will remove the resource from OneView, if it exists. - C(present) will ensure data properties are compliant with OneView. - type: str - choices: [absent, present] - default: present - data: - description: - - List with the Logical Interconnect Group properties. - type: dict - required: true -extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.validateetag - -''' - -EXAMPLES = ''' -- name: Ensure that the Logical Interconnect Group is present - community.general.oneview_logical_interconnect_group: - config: /etc/oneview/oneview_config.json - state: present - data: - name: Test Logical Interconnect Group - uplinkSets: [] - enclosureType: C7000 - interconnectMapTemplate: - interconnectMapEntryTemplates: - - logicalDownlinkUri: ~ - logicalLocation: - locationEntries: - - relativeValue: 1 - type: Bay - - relativeValue: 1 - type: Enclosure - permittedInterconnectTypeName: HP VC Flex-10/10D Module - # Alternatively you can inform permittedInterconnectTypeUri - delegate_to: localhost - -- name: Ensure that the Logical Interconnect Group has the specified scopes - community.general.oneview_logical_interconnect_group: - config: /etc/oneview/oneview_config.json - state: present - data: - name: Test Logical Interconnect Group - scopeUris: - - /rest/scopes/00SC123456 - - /rest/scopes/01SC123456 - delegate_to: localhost - -- name: Ensure that the Logical Interconnect Group is present with name 'Test' - community.general.oneview_logical_interconnect_group: - config: /etc/oneview/oneview_config.json - state: present - data: - name: New Logical Interconnect Group - newName: Test - delegate_to: localhost - -- name: Ensure that the Logical Interconnect Group is absent - community.general.oneview_logical_interconnect_group: - config: /etc/oneview/oneview_config.json - state: absent - data: - name: New Logical Interconnect Group - delegate_to: localhost -''' - -RETURN = ''' -logical_interconnect_group: - description: Has the facts about the OneView Logical Interconnect Group. - returned: On state 'present'. Can be null. - type: dict -''' - -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound - - -class LogicalInterconnectGroupModule(OneViewModuleBase): - MSG_CREATED = 'Logical Interconnect Group created successfully.' - MSG_UPDATED = 'Logical Interconnect Group updated successfully.' - MSG_DELETED = 'Logical Interconnect Group deleted successfully.' - MSG_ALREADY_PRESENT = 'Logical Interconnect Group is already present.' - MSG_ALREADY_ABSENT = 'Logical Interconnect Group is already absent.' - MSG_INTERCONNECT_TYPE_NOT_FOUND = 'Interconnect Type was not found.' - - RESOURCE_FACT_NAME = 'logical_interconnect_group' - - def __init__(self): - argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), - data=dict(required=True, type='dict') - ) - - super(LogicalInterconnectGroupModule, self).__init__(additional_arg_spec=argument_spec, - validate_etag_support=True) - self.resource_client = self.oneview_client.logical_interconnect_groups - - def execute_module(self): - resource = self.get_by_name(self.data['name']) - - if self.state == 'present': - return self.__present(resource) - elif self.state == 'absent': - return self.resource_absent(resource) - - def __present(self, resource): - scope_uris = self.data.pop('scopeUris', None) - - self.__replace_name_by_uris(self.data) - result = self.resource_present(resource, self.RESOURCE_FACT_NAME) - - if scope_uris is not None: - result = self.resource_scopes_set(result, 'logical_interconnect_group', scope_uris) - - return result - - def __replace_name_by_uris(self, data): - map_template = data.get('interconnectMapTemplate') - - if map_template: - map_entry_templates = map_template.get('interconnectMapEntryTemplates') - if map_entry_templates: - for value in map_entry_templates: - permitted_interconnect_type_name = value.pop('permittedInterconnectTypeName', None) - if permitted_interconnect_type_name: - value['permittedInterconnectTypeUri'] = self.__get_interconnect_type_by_name( - permitted_interconnect_type_name).get('uri') - - def __get_interconnect_type_by_name(self, name): - i_type = self.oneview_client.interconnect_types.get_by('name', name) - if i_type: - return i_type[0] - else: - raise OneViewModuleResourceNotFound(self.MSG_INTERCONNECT_TYPE_NOT_FOUND) - - -def main(): - LogicalInterconnectGroupModule().run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py deleted file mode 100644 index 436dd5d62b..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneview_logical_interconnect_group_info -short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups -description: - - Retrieve information about one or more of the Logical Interconnect Groups from OneView - - This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)! -requirements: - - hpOneView >= 2.0.1 -author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) -options: - name: - description: - - Logical Interconnect Group name. - type: str -extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.factsparams - -''' - -EXAMPLES = ''' -- name: Gather information about all Logical Interconnect Groups - community.general.oneview_logical_interconnect_group_info: - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - no_log: true - delegate_to: localhost - register: result - -- name: Print fetched information about Logical Interconnect Groups - ansible.builtin.debug: - msg: "{{ result.logical_interconnect_groups }}" - -- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups - community.general.oneview_logical_interconnect_group_info: - params: - start: 0 - count: 3 - sort: name:descending - filter: name=LIGName - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - no_log: true - delegate_to: localhost - register: result - -- name: Print fetched information about paginated, filtered and sorted list of Logical Interconnect Groups - ansible.builtin.debug: - msg: "{{ result.logical_interconnect_groups }}" - -- name: Gather information about a Logical Interconnect Group by name - community.general.oneview_logical_interconnect_group_info: - name: logical interconnect group name - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - no_log: true - delegate_to: localhost - register: result - -- name: Print fetched information about Logical Interconnect Group found by name - ansible.builtin.debug: - msg: "{{ result.logical_interconnect_groups }}" -''' - -RETURN = ''' -logical_interconnect_groups: - description: Has all the OneView information about the Logical Interconnect Groups. - returned: Always, but can be null. - type: dict -''' - -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase - - -class LogicalInterconnectGroupInfoModule(OneViewModuleBase): - def __init__(self): - - argument_spec = dict( - name=dict(type='str'), - params=dict(type='dict'), - ) - - super(LogicalInterconnectGroupInfoModule, self).__init__( - additional_arg_spec=argument_spec, - supports_check_mode=True, - ) - - def execute_module(self): - if self.module.params.get('name'): - ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name']) - else: - ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params) - - return dict(changed=False, logical_interconnect_groups=ligs) - - -def main(): - LogicalInterconnectGroupInfoModule().run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/oneview/oneview_network_set.py b/plugins/modules/remote_management/oneview/oneview_network_set.py deleted file mode 100644 index 3a2632b765..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_network_set.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneview_network_set -short_description: Manage HPE OneView Network Set resources -description: - - Provides an interface to manage Network Set resources. Can create, update, or delete. -requirements: - - hpOneView >= 4.0.0 -author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) -options: - state: - description: - - Indicates the desired state for the Network Set resource. - - C(present) will ensure data properties are compliant with OneView. - - C(absent) will remove the resource from OneView, if it exists. - type: str - default: present - choices: ['present', 'absent'] - data: - description: - - List with the Network Set properties. - type: dict - required: true - -extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.validateetag - -''' - -EXAMPLES = ''' -- name: Create a Network Set - community.general.oneview_network_set: - config: /etc/oneview/oneview_config.json - state: present - data: - name: OneViewSDK Test Network Set - networkUris: - - Test Ethernet Network_1 # can be a name - - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI - delegate_to: localhost - -- name: Update the Network Set name to 'OneViewSDK Test Network Set - Renamed' and change the associated networks - community.general.oneview_network_set: - config: /etc/oneview/oneview_config.json - state: present - data: - name: OneViewSDK Test Network Set - newName: OneViewSDK Test Network Set - Renamed - networkUris: - - Test Ethernet Network_1 - delegate_to: localhost - -- name: Delete the Network Set - community.general.oneview_network_set: - config: /etc/oneview/oneview_config.json - state: absent - data: - name: OneViewSDK Test Network Set - Renamed - delegate_to: localhost - -- name: Update the Network set with two scopes - community.general.oneview_network_set: - config: /etc/oneview/oneview_config.json - state: present - data: - name: OneViewSDK Test Network Set - scopeUris: - - /rest/scopes/01SC123456 - - /rest/scopes/02SC123456 - delegate_to: localhost -''' - -RETURN = ''' -network_set: - description: Has the facts about the Network Set. - returned: On state 'present', but can be null. - type: dict -''' - -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound - - -class NetworkSetModule(OneViewModuleBase): - MSG_CREATED = 'Network Set created successfully.' - MSG_UPDATED = 'Network Set updated successfully.' - MSG_DELETED = 'Network Set deleted successfully.' - MSG_ALREADY_PRESENT = 'Network Set is already present.' - MSG_ALREADY_ABSENT = 'Network Set is already absent.' - MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network not found: ' - RESOURCE_FACT_NAME = 'network_set' - - argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), - data=dict(required=True, type='dict')) - - def __init__(self): - super(NetworkSetModule, self).__init__(additional_arg_spec=self.argument_spec, - validate_etag_support=True) - self.resource_client = self.oneview_client.network_sets - - def execute_module(self): - resource = self.get_by_name(self.data.get('name')) - - if self.state == 'present': - return self._present(resource) - elif self.state == 'absent': - return self.resource_absent(resource) - - def _present(self, resource): - scope_uris = self.data.pop('scopeUris', None) - self._replace_network_name_by_uri(self.data) - result = self.resource_present(resource, self.RESOURCE_FACT_NAME) - if scope_uris is not None: - result = self.resource_scopes_set(result, self.RESOURCE_FACT_NAME, scope_uris) - return result - - def _get_ethernet_network_by_name(self, name): - result = self.oneview_client.ethernet_networks.get_by('name', name) - return result[0] if result else None - - def _get_network_uri(self, network_name_or_uri): - if network_name_or_uri.startswith('/rest/ethernet-networks'): - return network_name_or_uri - else: - enet_network = self._get_ethernet_network_by_name(network_name_or_uri) - if enet_network: - return enet_network['uri'] - else: - raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND + network_name_or_uri) - - def _replace_network_name_by_uri(self, data): - if 'networkUris' in data: - data['networkUris'] = [self._get_network_uri(x) for x in data['networkUris']] - - -def main(): - NetworkSetModule().run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/oneview/oneview_network_set_info.py b/plugins/modules/remote_management/oneview/oneview_network_set_info.py deleted file mode 100644 index 2d610f2b57..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_network_set_info.py +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneview_network_set_info -short_description: Retrieve information about the OneView Network Sets -description: - - Retrieve information about the Network Sets from OneView. - - This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_network_set_info) module no longer returns C(ansible_facts)! -requirements: - - hpOneView >= 2.0.1 -author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) -options: - name: - description: - - Network Set name. - type: str - - options: - description: - - "List with options to gather information about Network Set. - Option allowed: C(withoutEthernet). - The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks." - type: list - elements: str - -extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.factsparams - -''' - -EXAMPLES = ''' -- name: Gather information about all Network Sets - community.general.oneview_network_set_info: - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - no_log: true - delegate_to: localhost - register: result - -- name: Print fetched information about Network Sets - ansible.builtin.debug: - msg: "{{ result.network_sets }}" - -- name: Gather paginated, filtered and sorted information about Network Sets - community.general.oneview_network_set_info: - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - params: - start: 0 - count: 3 - sort: 'name:descending' - filter: name='netset001' - no_log: true - delegate_to: localhost - register: result - -- name: Print fetched information about paginated, filtered and sorted list of Network Sets - ansible.builtin.debug: - msg: "{{ result.network_sets }}" - -- name: Gather information about all Network Sets, excluding Ethernet networks - community.general.oneview_network_set_info: - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - options: - - withoutEthernet - no_log: true - delegate_to: localhost - register: result - -- name: Print fetched information about Network Sets, excluding Ethernet networks - ansible.builtin.debug: - msg: "{{ result.network_sets }}" - -- name: Gather information about a Network Set by name - community.general.oneview_network_set_info: - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - name: Name of the Network Set - no_log: true - delegate_to: localhost - register: result - -- name: Print fetched information about Network Set found by name - ansible.builtin.debug: - msg: "{{ result.network_sets }}" - -- name: Gather information about a Network Set by name, excluding Ethernet networks - community.general.oneview_network_set_info: - hostname: 172.16.101.48 - username: administrator - password: my_password - api_version: 500 - name: Name of the Network Set - options: - - withoutEthernet - no_log: true - delegate_to: localhost - register: result - -- name: Print fetched information about Network Set found by name, excluding Ethernet networks - ansible.builtin.debug: - msg: "{{ result.network_sets }}" -''' - -RETURN = ''' -network_sets: - description: Has all the OneView information about the Network Sets. - returned: Always, but can be empty. - type: dict -''' - -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase - - -class NetworkSetInfoModule(OneViewModuleBase): - argument_spec = dict( - name=dict(type='str'), - options=dict(type='list', elements='str'), - params=dict(type='dict'), - ) - - def __init__(self): - super(NetworkSetInfoModule, self).__init__( - additional_arg_spec=self.argument_spec, - supports_check_mode=True, - ) - - def execute_module(self): - - name = self.module.params.get('name') - - if 'withoutEthernet' in self.options: - filter_by_name = ("\"'name'='%s'\"" % name) if name else '' - network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name) - elif name: - network_sets = self.oneview_client.network_sets.get_by('name', name) - else: - network_sets = self.oneview_client.network_sets.get_all(**self.facts_params) - - return dict(changed=False, network_sets=network_sets) - - -def main(): - NetworkSetInfoModule().run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager.py b/plugins/modules/remote_management/oneview/oneview_san_manager.py deleted file mode 100644 index 20870a31d5..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_san_manager.py +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneview_san_manager -short_description: Manage OneView SAN Manager resources -description: - - Provides an interface to manage SAN Manager resources. Can create, update, or delete. -requirements: - - hpOneView >= 3.1.1 -author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) -options: - state: - description: - - Indicates the desired state for the Uplink Set resource. - - C(present) ensures data properties are compliant with OneView. - - C(absent) removes the resource from OneView, if it exists. - - C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent. - type: str - default: present - choices: [present, absent, connection_information_set] - data: - description: - - List with SAN Manager properties. - type: dict - required: true - -extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.validateetag - -''' - -EXAMPLES = ''' -- name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials - community.general.oneview_san_manager: - config: /etc/oneview/oneview_config.json - state: present - data: - providerDisplayName: Brocade Network Advisor - connectionInfo: - - name: Host - value: 172.18.15.1 - - name: Port - value: 5989 - - name: Username - value: username - - name: Password - value: password - - name: UseSsl - value: true - delegate_to: localhost - -- name: Ensure a Device Manager for the Cisco SAN Provider is present - community.general.oneview_san_manager: - config: /etc/oneview/oneview_config.json - state: present - data: - name: 172.18.20.1 - providerDisplayName: Cisco - connectionInfo: - - name: Host - value: 172.18.20.1 - - name: SnmpPort - value: 161 - - name: SnmpUserName - value: admin - - name: SnmpAuthLevel - value: authnopriv - - name: SnmpAuthProtocol - value: sha - - name: SnmpAuthString - value: password - delegate_to: localhost - -- name: Sets the SAN Manager connection information - community.general.oneview_san_manager: - config: /etc/oneview/oneview_config.json - state: connection_information_set - data: - connectionInfo: - - name: Host - value: '172.18.15.1' - - name: Port - value: '5989' - - name: Username - value: 'username' - - name: Password - value: 'password' - - name: UseSsl - value: true - delegate_to: localhost - -- name: Refreshes the SAN Manager - community.general.oneview_san_manager: - config: /etc/oneview/oneview_config.json - state: present - data: - name: 172.18.15.1 - refreshState: RefreshPending - delegate_to: localhost - -- name: Delete the SAN Manager recently created - community.general.oneview_san_manager: - config: /etc/oneview/oneview_config.json - state: absent - data: - name: '172.18.15.1' - delegate_to: localhost -''' - -RETURN = ''' -san_manager: - description: Has the OneView facts about the SAN Manager. - returned: On state 'present'. Can be null. - type: dict -''' - -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError - - -class SanManagerModule(OneViewModuleBase): - MSG_CREATED = 'SAN Manager created successfully.' - MSG_UPDATED = 'SAN Manager updated successfully.' - MSG_DELETED = 'SAN Manager deleted successfully.' - MSG_ALREADY_PRESENT = 'SAN Manager is already present.' - MSG_ALREADY_ABSENT = 'SAN Manager is already absent.' - MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND = "The provider '{0}' was not found." - - argument_spec = dict( - state=dict(type='str', default='present', choices=['absent', 'present', 'connection_information_set']), - data=dict(type='dict', required=True) - ) - - def __init__(self): - super(SanManagerModule, self).__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True) - self.resource_client = self.oneview_client.san_managers - - def execute_module(self): - if self.data.get('connectionInfo'): - for connection_hash in self.data.get('connectionInfo'): - if connection_hash.get('name') == 'Host': - resource_name = connection_hash.get('value') - elif self.data.get('name'): - resource_name = self.data.get('name') - else: - msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. ' - msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.' - raise OneViewModuleValueError(msg.format()) - - resource = self.resource_client.get_by_name(resource_name) - - if self.state == 'present': - changed, msg, san_manager = self._present(resource) - return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager)) - - elif self.state == 'absent': - return self.resource_absent(resource, method='remove') - - elif self.state == 'connection_information_set': - changed, msg, san_manager = self._connection_information_set(resource) - return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager)) - - def _present(self, resource): - if not resource: - provider_uri = self.data.get('providerUri', self._get_provider_uri_by_display_name(self.data)) - return True, self.MSG_CREATED, self.resource_client.add(self.data, provider_uri) - else: - merged_data = resource.copy() - merged_data.update(self.data) - - # Remove 'connectionInfo' from comparison, since it is not possible to validate it. - resource.pop('connectionInfo', None) - merged_data.pop('connectionInfo', None) - - if self.compare(resource, merged_data): - return False, self.MSG_ALREADY_PRESENT, resource - else: - updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri']) - return True, self.MSG_UPDATED, updated_san_manager - - def _connection_information_set(self, resource): - if not resource: - return self._present(resource) - else: - merged_data = resource.copy() - merged_data.update(self.data) - merged_data.pop('refreshState', None) - if not self.data.get('connectionInfo', None): - raise OneViewModuleValueError('A connectionInfo field is required for this operation.') - updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri']) - return True, self.MSG_UPDATED, updated_san_manager - - def _get_provider_uri_by_display_name(self, data): - display_name = data.get('providerDisplayName') - provider_uri = self.resource_client.get_provider_uri(display_name) - - if not provider_uri: - raise OneViewModuleValueError(self.MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND.format(display_name)) - - return provider_uri - - -def main(): - SanManagerModule().run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py deleted file mode 100644 index 284371cafc..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneview_san_manager_info -short_description: Retrieve information about one or more of the OneView SAN Managers -description: - - Retrieve information about one or more of the SAN Managers from OneView - - This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_san_manager_info) module no longer returns C(ansible_facts)! -requirements: - - hpOneView >= 2.0.1 -author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) -options: - provider_display_name: - description: - - Provider Display Name. - type: str - params: - description: - - List of params to delimit, filter and sort the list of resources. - - "params allowed: - - C(start): The first item to return, using 0-based indexing. - - C(count): The number of resources to return. - - C(query): A general query string to narrow the list of resources returned. - - C(sort): The sort order of the returned data set." - type: dict -extends_documentation_fragment: -- community.general.oneview - -''' - -EXAMPLES = ''' -- name: Gather information about all SAN Managers - community.general.oneview_san_manager_info: - config: /etc/oneview/oneview_config.json - delegate_to: localhost - register: result - -- name: Print fetched information about SAN Managers - ansible.builtin.debug: - msg: "{{ result.san_managers }}" - -- name: Gather paginated, filtered and sorted information about SAN Managers - community.general.oneview_san_manager_info: - config: /etc/oneview/oneview_config.json - params: - start: 0 - count: 3 - sort: name:ascending - query: isInternal eq false - delegate_to: localhost - register: result - -- name: Print fetched information about paginated, filtered and sorted list of SAN Managers - ansible.builtin.debug: - msg: "{{ result.san_managers }}" - -- name: Gather information about a SAN Manager by provider display name - community.general.oneview_san_manager_info: - config: /etc/oneview/oneview_config.json - provider_display_name: Brocade Network Advisor - delegate_to: localhost - register: result - -- name: Print fetched information about SAN Manager found by provider display name - ansible.builtin.debug: - msg: "{{ result.san_managers }}" -''' - -RETURN = ''' -san_managers: - description: Has all the OneView information about the SAN Managers. - returned: Always, but can be null. - type: dict -''' - -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase - - -class SanManagerInfoModule(OneViewModuleBase): - argument_spec = dict( - provider_display_name=dict(type='str'), - params=dict(type='dict') - ) - - def __init__(self): - super(SanManagerInfoModule, self).__init__( - additional_arg_spec=self.argument_spec, - supports_check_mode=True, - ) - self.resource_client = self.oneview_client.san_managers - - def execute_module(self): - if self.module.params.get('provider_display_name'): - provider_display_name = self.module.params['provider_display_name'] - san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name) - if san_manager: - resources = [san_manager] - else: - resources = [] - else: - resources = self.oneview_client.san_managers.get_all(**self.facts_params) - - return dict(changed=False, san_managers=resources) - - -def main(): - SanManagerInfoModule().run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_command.py b/plugins/modules/remote_management/redfish/idrac_redfish_command.py deleted file mode 100644 index 5e02154ed8..0000000000 --- a/plugins/modules/remote_management/redfish/idrac_redfish_command.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: idrac_redfish_command -short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs -description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action. - - For use with Dell iDRAC operations that require Redfish OEM extensions -options: - category: - required: true - description: - - Category to execute on OOB controller - type: str - command: - required: true - description: - - List of commands to execute on OOB controller - type: list - elements: str - baseuri: - required: true - description: - - Base URI of OOB controller - type: str - username: - description: - - User for authentication with OOB controller - type: str - password: - description: - - Password for authentication with OOB controller - type: str - auth_token: - description: - - Security token for authentication with OOB controller - type: str - version_added: 2.3.0 - timeout: - description: - - Timeout in seconds for URL requests to OOB controller - default: 10 - type: int - resource_id: - required: false - description: - - The ID of the System, Manager or Chassis to modify - type: str - version_added: '0.2.0' - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Create BIOS configuration job (schedule BIOS setting update) - community.general.idrac_redfish_command: - category: Systems - command: CreateBiosConfigJob - resource_id: System.Embedded.1 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" -''' - -RETURN = ''' -msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' - -import re -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils.common.text.converters import to_native - - -class IdracRedfishUtils(RedfishUtils): - - def create_bios_config_job(self): - result = {} - key = "Bios" - jobs = "Jobs" - - # Search for 'key' entry and extract URI from it - response = self.get_request(self.root_uri + self.systems_uris[0]) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - bios_uri = data[key]["@odata.id"] - - # Extract proper URI - response = self.get_request(self.root_uri + bios_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][ - "@odata.id"] - - payload = {"TargetSettingsURI": set_bios_attr_uri} - response = self.post_request( - self.root_uri + self.manager_uri + "/" + jobs, payload) - if response['ret'] is False: - return response - - response_output = response['resp'].__dict__ - job_id = response_output["headers"]["Location"] - job_id = re.search("JID_.+", job_id).group() - # Currently not passing job_id back to user but patch is coming - return {'ret': True, 'msg': "Config job %s created" % job_id} - - -CATEGORY_COMMANDS_ALL = { - "Systems": ["CreateBiosConfigJob"], - "Accounts": [], - "Manager": [] -} - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10), - resource_id=dict() - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=False - ) - - category = module.params['category'] - command_list = module.params['command'] - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # timeout - timeout = module.params['timeout'] - - # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True) - - # Check that Category is valid - if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) - - # Check that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) - - # Organize by Categories / Commands - - if category == "Systems": - # execute only if we find a System resource - result = rf_utils._find_systems_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command == "CreateBiosConfigJob": - # execute only if we find a Managers resource - result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - result = rf_utils.create_bios_config_job() - - # Return data back or fail with proper message - if result['ret'] is True: - del result['ret'] - module.exit_json(changed=True, msg='Action was successful') - else: - module.fail_json(msg=to_native(result['msg'])) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_config.py b/plugins/modules/remote_management/redfish/idrac_redfish_config.py deleted file mode 100644 index adea4b11a9..0000000000 --- a/plugins/modules/remote_management/redfish/idrac_redfish_config.py +++ /dev/null @@ -1,331 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2019 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: idrac_redfish_config -short_description: Manages servers through iDRAC using Dell Redfish APIs -description: - - For use with Dell iDRAC operations that require Redfish OEM extensions - - Builds Redfish URIs locally and sends them to remote iDRAC controllers to - set or update a configuration attribute. -options: - category: - required: true - type: str - description: - - Category to execute on iDRAC - command: - required: true - description: - - List of commands to execute on iDRAC - - I(SetManagerAttributes), I(SetLifecycleControllerAttributes) and - I(SetSystemAttributes) are mutually exclusive commands when C(category) - is I(Manager) - type: list - elements: str - baseuri: - required: true - description: - - Base URI of iDRAC - type: str - username: - description: - - User for authentication with iDRAC - type: str - password: - description: - - Password for authentication with iDRAC - type: str - auth_token: - description: - - Security token for authentication with OOB controller - type: str - version_added: 2.3.0 - manager_attributes: - required: false - description: - - dictionary of iDRAC attribute name and value pairs to update - default: {} - type: 'dict' - version_added: '0.2.0' - timeout: - description: - - Timeout in seconds for URL requests to iDRAC controller - default: 10 - type: int - resource_id: - required: false - description: - - The ID of the System, Manager or Chassis to modify - type: str - version_added: '0.2.0' - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Enable NTP and set NTP server and Time zone attributes in iDRAC - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - NTPConfigGroup.1.NTPEnable: "Enabled" - NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}" - Time.1.Timezone: "{{ timezone }}" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" - - - name: Enable Syslog and set Syslog servers in iDRAC - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - SysLog.1.SysLogEnable: "Enabled" - SysLog.1.Server1: "{{ syslog_server1 }}" - SysLog.1.Server2: "{{ syslog_server2 }}" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" - - - name: Configure SNMP community string, port, protocol and trap format - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - SNMP.1.AgentEnable: "Enabled" - SNMP.1.AgentCommunity: "public_community_string" - SNMP.1.TrapFormat: "SNMPv1" - SNMP.1.SNMPProtocol: "All" - SNMP.1.DiscoveryPort: 161 - SNMP.1.AlertPort: 162 - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" - - - name: Enable CSIOR - community.general.idrac_redfish_config: - category: Manager - command: SetLifecycleControllerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" - - - name: Set Power Supply Redundancy Policy to A/B Grid Redundant - community.general.idrac_redfish_config: - category: Manager - command: SetSystemAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - ServerPwr.1.PSRedPolicy: "A/B Grid Redundant" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" -''' - -RETURN = ''' -msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.validation import ( - check_mutually_exclusive, - check_required_arguments -) -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils.common.text.converters import to_native - - -class IdracRedfishUtils(RedfishUtils): - - def set_manager_attributes(self, command): - - result = {} - required_arg_spec = {'manager_attributes': {'required': True}} - - try: - check_required_arguments(required_arg_spec, self.module.params) - - except TypeError as e: - msg = to_native(e) - self.module.fail_json(msg=msg) - - key = "Attributes" - command_manager_attributes_uri_map = { - "SetManagerAttributes": self.manager_uri, - "SetLifecycleControllerAttributes": "/redfish/v1/Managers/LifecycleController.Embedded.1", - "SetSystemAttributes": "/redfish/v1/Managers/System.Embedded.1" - } - manager_uri = command_manager_attributes_uri_map.get(command, self.manager_uri) - - attributes = self.module.params['manager_attributes'] - - attrs_to_patch = {} - attrs_skipped = {} - attrs_bad = {} # Store attrs which were not found in the system - - # Search for key entry and extract URI from it - response = self.get_request(self.root_uri + manager_uri + "/" + key) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if key not in data: - return {'ret': False, - 'msg': "%s: Key %s not found" % (command, key), - 'warning': ""} - - for attr_name, attr_value in attributes.items(): - # Check if attribute exists - if attr_name not in data[u'Attributes']: - # Skip and proceed to next attribute if this isn't valid - attrs_bad.update({attr_name: attr_value}) - continue - - # Find out if value is already set to what we want. If yes, exclude - # those attributes - if data[u'Attributes'][attr_name] == attr_value: - attrs_skipped.update({attr_name: attr_value}) - else: - attrs_to_patch.update({attr_name: attr_value}) - - warning = "" - if attrs_bad: - warning = "Incorrect attributes %s" % (attrs_bad) - - if not attrs_to_patch: - return {'ret': True, 'changed': False, - 'msg': "No changes made. Manager attributes already set.", - 'warning': warning} - - payload = {"Attributes": attrs_to_patch} - response = self.patch_request(self.root_uri + manager_uri + "/" + key, payload) - if response['ret'] is False: - return response - - return {'ret': True, 'changed': True, - 'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch), - 'warning': warning} - - -CATEGORY_COMMANDS_ALL = { - "Manager": ["SetManagerAttributes", "SetLifecycleControllerAttributes", - "SetSystemAttributes"] -} - - -# list of mutually exclusive commands for a category -CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = { - "Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes", - "SetSystemAttributes"]] -} - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - manager_attributes=dict(type='dict', default={}), - timeout=dict(type='int', default=10), - resource_id=dict() - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=False - ) - - category = module.params['category'] - command_list = module.params['command'] - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # timeout - timeout = module.params['timeout'] - - # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True) - - # Check that Category is valid - if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) - - # Check that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) - - # check for mutually exclusive commands - try: - # check_mutually_exclusive accepts a single list or list of lists that - # are groups of terms that should be mutually exclusive with one another - # and checks that against a dictionary - check_mutually_exclusive(CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE[category], - dict.fromkeys(command_list, True)) - - except TypeError as e: - module.fail_json(msg=to_native(e)) - - # Organize by Categories / Commands - - if category == "Manager": - # execute only if we find a Manager resource - result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command in ["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]: - result = rf_utils.set_manager_attributes(command) - - # Return data back or fail with proper message - if result['ret'] is True: - if result.get('warning'): - module.warn(to_native(result['warning'])) - - module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) - else: - module.fail_json(msg=to_native(result['msg'])) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_info.py b/plugins/modules/remote_management/redfish/idrac_redfish_info.py deleted file mode 100644 index fb137acca3..0000000000 --- a/plugins/modules/remote_management/redfish/idrac_redfish_info.py +++ /dev/null @@ -1,243 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2019 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: idrac_redfish_info -short_description: Gather PowerEdge server information through iDRAC using Redfish APIs -description: - - Builds Redfish URIs locally and sends them to remote iDRAC controllers to - get information back. - - For use with Dell EMC iDRAC operations that require Redfish OEM extensions - - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)! -options: - category: - required: true - description: - - Category to execute on iDRAC controller - type: str - command: - required: true - description: - - List of commands to execute on iDRAC controller - - C(GetManagerAttributes) returns the list of dicts containing iDRAC, - LifecycleController and System attributes - type: list - elements: str - baseuri: - required: true - description: - - Base URI of iDRAC controller - type: str - username: - description: - - User for authentication with iDRAC controller - type: str - password: - description: - - Password for authentication with iDRAC controller - type: str - auth_token: - description: - - Security token for authentication with OOB controller - type: str - version_added: 2.3.0 - timeout: - description: - - Timeout in seconds for URL requests to OOB controller - default: 10 - type: int - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Get Manager attributes with a default of 20 seconds - community.general.idrac_redfish_info: - category: Manager - command: GetManagerAttributes - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - register: result - - # Examples to display the value of all or a single iDRAC attribute - - name: Store iDRAC attributes as a fact variable - ansible.builtin.set_fact: - idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}" - - - name: Display all iDRAC attributes - ansible.builtin.debug: - var: idrac_attributes - - - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute - ansible.builtin.debug: - var: idrac_attributes['Syslog.1.SysLogEnable'] - - # Examples to display the value of all or a single LifecycleController attribute - - name: Store LifecycleController attributes as a fact variable - ansible.builtin.set_fact: - lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}" - - - name: Display LifecycleController attributes - ansible.builtin.debug: - var: lc_attributes - - - name: Display the value of 'CollectSystemInventoryOnRestart' attribute - ansible.builtin.debug: - var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart'] - - # Examples to display the value of all or a single System attribute - - name: Store System attributes as a fact variable - ansible.builtin.set_fact: - system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}" - - - name: Display System attributes - ansible.builtin.debug: - var: system_attributes - - - name: Display the value of 'PSRedPolicy' - ansible.builtin.debug: - var: system_attributes['ServerPwr.1.PSRedPolicy'] - -''' - -RETURN = ''' -msg: - description: different results depending on task - returned: always - type: dict - sample: List of Manager attributes -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils.common.text.converters import to_native - - -class IdracRedfishUtils(RedfishUtils): - - def get_manager_attributes(self): - result = {} - manager_attributes = [] - properties = ['Attributes', 'Id'] - - response = self.get_request(self.root_uri + self.manager_uri) - - if response['ret'] is False: - return response - data = response['data'] - - # Manager attributes are supported as part of iDRAC OEM extension - # Attributes are supported only on iDRAC9 - try: - for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']: - attributes_uri = members[u'@odata.id'] - - response = self.get_request(self.root_uri + attributes_uri) - if response['ret'] is False: - return response - data = response['data'] - - attributes = {} - for prop in properties: - if prop in data: - attributes[prop] = data.get(prop) - - if attributes: - manager_attributes.append(attributes) - - result['ret'] = True - - except (AttributeError, KeyError) as e: - result['ret'] = False - result['msg'] = "Failed to find attribute/key: " + str(e) - - result["entries"] = manager_attributes - return result - - -CATEGORY_COMMANDS_ALL = { - "Manager": ["GetManagerAttributes"] -} - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=True, - ) - - category = module.params['category'] - command_list = module.params['command'] - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # timeout - timeout = module.params['timeout'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module) - - # Check that Category is valid - if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) - - # Check that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) - - # Organize by Categories / Commands - - if category == "Manager": - # execute only if we find a Manager resource - result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command == "GetManagerAttributes": - result = rf_utils.get_manager_attributes() - - # Return data back or fail with proper message - if result['ret'] is True: - del result['ret'] - module.exit_json(redfish_facts=result) - else: - module.fail_json(msg=to_native(result['msg'])) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py deleted file mode 100644 index 8702e468ca..0000000000 --- a/plugins/modules/remote_management/redfish/redfish_command.py +++ /dev/null @@ -1,831 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redfish_command -short_description: Manages Out-Of-Band controllers using Redfish APIs -description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action. - - Manages OOB controller ex. reboot, log management. - - Manages OOB controller users ex. add, remove, update. - - Manages system power ex. on, off, graceful and forced reboot. -options: - category: - required: true - description: - - Category to execute on OOB controller - type: str - command: - required: true - description: - - List of commands to execute on OOB controller - type: list - elements: str - baseuri: - required: true - description: - - Base URI of OOB controller - type: str - username: - description: - - Username for authentication with OOB controller - type: str - password: - description: - - Password for authentication with OOB controller - type: str - auth_token: - description: - - Security token for authentication with OOB controller - type: str - version_added: 2.3.0 - session_uri: - description: - - URI of the session resource - type: str - version_added: 2.3.0 - id: - required: false - aliases: [ account_id ] - description: - - ID of account to delete/modify. - - Can also be used in account creation to work around vendor issues where the ID of the new user is required in the POST request. - type: str - new_username: - required: false - aliases: [ account_username ] - description: - - Username of account to add/delete/modify - type: str - new_password: - required: false - aliases: [ account_password ] - description: - - New password of account to add/modify - type: str - roleid: - required: false - aliases: [ account_roleid ] - description: - - Role of account to add/modify - type: str - bootdevice: - required: false - description: - - bootdevice when setting boot configuration - type: str - timeout: - description: - - Timeout in seconds for URL requests to OOB controller - default: 10 - type: int - boot_override_mode: - description: - - Boot mode when using an override. - type: str - choices: [ Legacy, UEFI ] - version_added: 3.5.0 - uefi_target: - required: false - description: - - UEFI target when bootdevice is "UefiTarget" - type: str - boot_next: - required: false - description: - - BootNext target when bootdevice is "UefiBootNext" - type: str - update_username: - required: false - aliases: [ account_updatename ] - description: - - new update user name for account_username - type: str - version_added: '0.2.0' - account_properties: - required: false - description: - - properties of account service to update - type: dict - version_added: '0.2.0' - resource_id: - required: false - description: - - The ID of the System, Manager or Chassis to modify - type: str - version_added: '0.2.0' - update_image_uri: - required: false - description: - - The URI of the image for the update - type: str - version_added: '0.2.0' - update_protocol: - required: false - description: - - The protocol for the update - type: str - version_added: '0.2.0' - update_targets: - required: false - description: - - The list of target resource URIs to apply the update to - type: list - elements: str - version_added: '0.2.0' - update_creds: - required: false - description: - - The credentials for retrieving the update image - type: dict - version_added: '0.2.0' - suboptions: - username: - required: false - description: - - The username for retrieving the update image - type: str - password: - required: false - description: - - The password for retrieving the update image - type: str - virtual_media: - required: false - description: - - The options for VirtualMedia commands - type: dict - version_added: '0.2.0' - suboptions: - media_types: - required: false - description: - - The list of media types appropriate for the image - type: list - elements: str - image_url: - required: false - description: - - The URL od the image the insert or eject - type: str - inserted: - required: false - description: - - Indicates if the image is treated as inserted on command completion - type: bool - default: True - write_protected: - required: false - description: - - Indicates if the media is treated as write-protected - type: bool - default: True - username: - required: false - description: - - The username for accessing the image URL - type: str - password: - required: false - description: - - The password for accessing the image URL - type: str - transfer_protocol_type: - required: false - description: - - The network protocol to use with the image - type: str - transfer_method: - required: false - description: - - The transfer method to use with the image - type: str - strip_etag_quotes: - description: - - Removes surrounding quotes of etag used in C(If-Match) header - of C(PATCH) requests. - - Only use this option to resolve bad vendor implementation where - C(If-Match) only matches the unquoted etag string. - type: bool - default: false - version_added: 3.7.0 - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Restart system power gracefully - community.general.redfish_command: - category: Systems - command: PowerGracefulRestart - resource_id: 437XR1138R2 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Turn system power off - community.general.redfish_command: - category: Systems - command: PowerForceOff - resource_id: 437XR1138R2 - - - name: Restart system power forcefully - community.general.redfish_command: - category: Systems - command: PowerForceRestart - resource_id: 437XR1138R2 - - - name: Shutdown system power gracefully - community.general.redfish_command: - category: Systems - command: PowerGracefulShutdown - resource_id: 437XR1138R2 - - - name: Turn system power on - community.general.redfish_command: - category: Systems - command: PowerOn - resource_id: 437XR1138R2 - - - name: Reboot system power - community.general.redfish_command: - category: Systems - command: PowerReboot - resource_id: 437XR1138R2 - - - name: Set one-time boot device to {{ bootdevice }} - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - resource_id: 437XR1138R2 - bootdevice: "{{ bootdevice }}" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01" - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - resource_id: 437XR1138R2 - bootdevice: "UefiTarget" - uefi_target: "/0x31/0x33/0x01/0x01" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set one-time boot device to BootNext target of "Boot0001" - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - resource_id: 437XR1138R2 - bootdevice: "UefiBootNext" - boot_next: "Boot0001" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set persistent boot device override - community.general.redfish_command: - category: Systems - command: EnableContinuousBootOverride - resource_id: 437XR1138R2 - bootdevice: "{{ bootdevice }}" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set one-time boot to BiosSetup - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - bootnext: BiosSetup - boot_override_mode: Legacy - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Disable persistent boot device override - community.general.redfish_command: - category: Systems - command: DisableBootOverride - - - name: Add user - community.general.redfish_command: - category: Accounts - command: AddUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - new_username: "{{ new_username }}" - new_password: "{{ new_password }}" - roleid: "{{ roleid }}" - - - name: Add user using new option aliases - community.general.redfish_command: - category: Accounts - command: AddUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - account_password: "{{ account_password }}" - account_roleid: "{{ account_roleid }}" - - - name: Delete user - community.general.redfish_command: - category: Accounts - command: DeleteUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - - - name: Disable user - community.general.redfish_command: - category: Accounts - command: DisableUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - - - name: Enable user - community.general.redfish_command: - category: Accounts - command: EnableUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - - - name: Add and enable user - community.general.redfish_command: - category: Accounts - command: AddUser,EnableUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - new_username: "{{ new_username }}" - new_password: "{{ new_password }}" - roleid: "{{ roleid }}" - - - name: Update user password - community.general.redfish_command: - category: Accounts - command: UpdateUserPassword - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - account_password: "{{ account_password }}" - - - name: Update user role - community.general.redfish_command: - category: Accounts - command: UpdateUserRole - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - roleid: "{{ roleid }}" - - - name: Update user name - community.general.redfish_command: - category: Accounts - command: UpdateUserName - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - account_updatename: "{{ account_updatename }}" - - - name: Update user name - community.general.redfish_command: - category: Accounts - command: UpdateUserName - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - update_username: "{{ update_username }}" - - - name: Update AccountService properties - community.general.redfish_command: - category: Accounts - command: UpdateAccountServiceProperties - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_properties: - AccountLockoutThreshold: 5 - AccountLockoutDuration: 600 - - - name: Clear Manager Logs with a timeout of 20 seconds - community.general.redfish_command: - category: Manager - command: ClearLogs - resource_id: BMC - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - - - name: Create session - community.general.redfish_command: - category: Sessions - command: CreateSession - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Set chassis indicator LED to blink using security token for auth - community.general.redfish_command: - category: Chassis - command: IndicatorLedBlink - resource_id: 1U - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" - - - name: Delete session using security token created by CreateSesssion above - community.general.redfish_command: - category: Sessions - command: DeleteSession - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" - session_uri: "{{ result.session.uri }}" - - - name: Clear Sessions - community.general.redfish_command: - category: Sessions - command: ClearSessions - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Simple update - community.general.redfish_command: - category: Update - command: SimpleUpdate - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - update_image_uri: https://example.com/myupdate.img - - - name: Simple update with additional options - community.general.redfish_command: - category: Update - command: SimpleUpdate - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - update_image_uri: //example.com/myupdate.img - update_protocol: FTP - update_targets: - - /redfish/v1/UpdateService/FirmwareInventory/BMC - update_creds: - username: operator - password: supersecretpwd - - - name: Insert Virtual Media - community.general.redfish_command: - category: Manager - command: VirtualMediaInsert - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: 'http://example.com/images/SomeLinux-current.iso' - media_types: - - CD - - DVD - resource_id: BMC - - - name: Eject Virtual Media - community.general.redfish_command: - category: Manager - command: VirtualMediaEject - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: 'http://example.com/images/SomeLinux-current.iso' - resource_id: BMC - - - name: Restart manager power gracefully - community.general.redfish_command: - category: Manager - command: GracefulRestart - resource_id: BMC - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Restart manager power gracefully - community.general.redfish_command: - category: Manager - command: PowerGracefulRestart - resource_id: BMC - - - name: Turn manager power off - community.general.redfish_command: - category: Manager - command: PowerForceOff - resource_id: BMC - - - name: Restart manager power forcefully - community.general.redfish_command: - category: Manager - command: PowerForceRestart - resource_id: BMC - - - name: Shutdown manager power gracefully - community.general.redfish_command: - category: Manager - command: PowerGracefulShutdown - resource_id: BMC - - - name: Turn manager power on - community.general.redfish_command: - category: Manager - command: PowerOn - resource_id: BMC - - - name: Reboot manager power - community.general.redfish_command: - category: Manager - command: PowerReboot - resource_id: BMC -''' - -RETURN = ''' -msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils.common.text.converters import to_native - - -# More will be added as module features are expanded -CATEGORY_COMMANDS_ALL = { - "Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart", - "PowerGracefulShutdown", "PowerReboot", "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride"], - "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"], - "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser", - "UpdateUserRole", "UpdateUserPassword", "UpdateUserName", - "UpdateAccountServiceProperties"], - "Sessions": ["ClearSessions", "CreateSession", "DeleteSession"], - "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert", - "VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart", - "PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"], - "Update": ["SimpleUpdate"] -} - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - session_uri=dict(), - id=dict(aliases=["account_id"]), - new_username=dict(aliases=["account_username"]), - new_password=dict(aliases=["account_password"], no_log=True), - roleid=dict(aliases=["account_roleid"]), - update_username=dict(type='str', aliases=["account_updatename"]), - account_properties=dict(type='dict', default={}), - bootdevice=dict(), - timeout=dict(type='int', default=10), - uefi_target=dict(), - boot_next=dict(), - boot_override_mode=dict(choices=['Legacy', 'UEFI']), - resource_id=dict(), - update_image_uri=dict(), - update_protocol=dict(), - update_targets=dict(type='list', elements='str', default=[]), - update_creds=dict( - type='dict', - options=dict( - username=dict(), - password=dict(no_log=True) - ) - ), - virtual_media=dict( - type='dict', - options=dict( - media_types=dict(type='list', elements='str', default=[]), - image_url=dict(), - inserted=dict(type='bool', default=True), - write_protected=dict(type='bool', default=True), - username=dict(), - password=dict(no_log=True), - transfer_protocol_type=dict(), - transfer_method=dict(), - ) - ), - strip_etag_quotes=dict(type='bool', default=False), - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=False - ) - - category = module.params['category'] - command_list = module.params['command'] - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # user to add/modify/delete - user = {'account_id': module.params['id'], - 'account_username': module.params['new_username'], - 'account_password': module.params['new_password'], - 'account_roleid': module.params['roleid'], - 'account_updatename': module.params['update_username'], - 'account_properties': module.params['account_properties']} - - # timeout - timeout = module.params['timeout'] - - # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] - - # update options - update_opts = { - 'update_image_uri': module.params['update_image_uri'], - 'update_protocol': module.params['update_protocol'], - 'update_targets': module.params['update_targets'], - 'update_creds': module.params['update_creds'] - } - - # Boot override options - boot_opts = { - 'bootdevice': module.params['bootdevice'], - 'uefi_target': module.params['uefi_target'], - 'boot_next': module.params['boot_next'], - 'boot_override_mode': module.params['boot_override_mode'], - } - - # VirtualMedia options - virtual_media = module.params['virtual_media'] - - # Etag options - strip_etag_quotes = module.params['strip_etag_quotes'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) - - # Check that Category is valid - if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) - - # Check that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) - - # Organize by Categories / Commands - if category == "Accounts": - ACCOUNTS_COMMANDS = { - "AddUser": rf_utils.add_user, - "EnableUser": rf_utils.enable_user, - "DeleteUser": rf_utils.delete_user, - "DisableUser": rf_utils.disable_user, - "UpdateUserRole": rf_utils.update_user_role, - "UpdateUserPassword": rf_utils.update_user_password, - "UpdateUserName": rf_utils.update_user_name, - "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties - } - - # execute only if we find an Account service resource - result = rf_utils._find_accountservice_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - result = ACCOUNTS_COMMANDS[command](user) - - elif category == "Systems": - # execute only if we find a System resource - result = rf_utils._find_systems_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command.startswith('Power'): - result = rf_utils.manage_system_power(command) - elif command == "SetOneTimeBoot": - boot_opts['override_enabled'] = 'Once' - result = rf_utils.set_boot_override(boot_opts) - elif command == "EnableContinuousBootOverride": - boot_opts['override_enabled'] = 'Continuous' - result = rf_utils.set_boot_override(boot_opts) - elif command == "DisableBootOverride": - boot_opts['override_enabled'] = 'Disabled' - result = rf_utils.set_boot_override(boot_opts) - - elif category == "Chassis": - result = rf_utils._find_chassis_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - led_commands = ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"] - - # Check if more than one led_command is present - num_led_commands = sum([command in led_commands for command in command_list]) - if num_led_commands > 1: - result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."} - else: - for command in command_list: - if command in led_commands: - result = rf_utils.manage_indicator_led(command) - - elif category == "Sessions": - # execute only if we find SessionService resources - resource = rf_utils._find_sessionservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "ClearSessions": - result = rf_utils.clear_sessions() - elif command == "CreateSession": - result = rf_utils.create_session() - elif command == "DeleteSession": - result = rf_utils.delete_session(module.params['session_uri']) - - elif category == "Manager": - # execute only if we find a Manager service resource - result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - # standardize on the Power* commands, but allow the the legacy - # GracefulRestart command - if command == 'GracefulRestart': - command = 'PowerGracefulRestart' - - if command.startswith('Power'): - result = rf_utils.manage_manager_power(command) - elif command == 'ClearLogs': - result = rf_utils.clear_logs() - elif command == 'VirtualMediaInsert': - result = rf_utils.virtual_media_insert(virtual_media) - elif command == 'VirtualMediaEject': - result = rf_utils.virtual_media_eject(virtual_media) - - elif category == "Update": - # execute only if we find UpdateService resources - resource = rf_utils._find_updateservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "SimpleUpdate": - result = rf_utils.simple_update(update_opts) - - # Return data back or fail with proper message - if result['ret'] is True: - del result['ret'] - changed = result.get('changed', True) - session = result.get('session', dict()) - module.exit_json(changed=changed, session=session, - msg='Action was successful') - else: - module.fail_json(msg=to_native(result['msg'])) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/redfish/redfish_config.py b/plugins/modules/remote_management/redfish/redfish_config.py deleted file mode 100644 index ff4b15487e..0000000000 --- a/plugins/modules/remote_management/redfish/redfish_config.py +++ /dev/null @@ -1,346 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redfish_config -short_description: Manages Out-Of-Band controllers using Redfish APIs -description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - set or update a configuration attribute. - - Manages BIOS configuration settings. - - Manages OOB controller configuration settings. -options: - category: - required: true - description: - - Category to execute on OOB controller - type: str - command: - required: true - description: - - List of commands to execute on OOB controller - type: list - elements: str - baseuri: - required: true - description: - - Base URI of OOB controller - type: str - username: - description: - - User for authentication with OOB controller - type: str - password: - description: - - Password for authentication with OOB controller - type: str - auth_token: - description: - - Security token for authentication with OOB controller - type: str - version_added: 2.3.0 - bios_attributes: - required: false - description: - - dictionary of BIOS attributes to update - default: {} - type: dict - version_added: '0.2.0' - timeout: - description: - - Timeout in seconds for URL requests to OOB controller - default: 10 - type: int - boot_order: - required: false - description: - - list of BootOptionReference strings specifying the BootOrder - default: [] - type: list - elements: str - version_added: '0.2.0' - network_protocols: - required: false - description: - - setting dict of manager services to update - type: dict - version_added: '0.2.0' - resource_id: - required: false - description: - - The ID of the System, Manager or Chassis to modify - type: str - version_added: '0.2.0' - nic_addr: - required: false - description: - - EthernetInterface Address string on OOB controller - default: 'null' - type: str - version_added: '0.2.0' - nic_config: - required: false - description: - - setting dict of EthernetInterface on OOB controller - type: dict - version_added: '0.2.0' - strip_etag_quotes: - description: - - Removes surrounding quotes of etag used in C(If-Match) header - of C(PATCH) requests. - - Only use this option to resolve bad vendor implementation where - C(If-Match) only matches the unquoted etag string. - type: bool - default: false - version_added: 3.7.0 - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Set BootMode to UEFI - community.general.redfish_config: - category: Systems - command: SetBiosAttributes - resource_id: 437XR1138R2 - bios_attributes: - BootMode: "Uefi" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set multiple BootMode attributes - community.general.redfish_config: - category: Systems - command: SetBiosAttributes - resource_id: 437XR1138R2 - bios_attributes: - BootMode: "Bios" - OneTimeBootMode: "Enabled" - BootSeqRetry: "Enabled" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Enable PXE Boot for NIC1 - community.general.redfish_config: - category: Systems - command: SetBiosAttributes - resource_id: 437XR1138R2 - bios_attributes: - PxeDev1EnDis: Enabled - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set BIOS default settings with a timeout of 20 seconds - community.general.redfish_config: - category: Systems - command: SetBiosDefaultSettings - resource_id: 437XR1138R2 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - - - name: Set boot order - community.general.redfish_config: - category: Systems - command: SetBootOrder - boot_order: - - Boot0002 - - Boot0001 - - Boot0000 - - Boot0003 - - Boot0004 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set boot order to the default - community.general.redfish_config: - category: Systems - command: SetDefaultBootOrder - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set Manager Network Protocols - community.general.redfish_config: - category: Manager - command: SetNetworkProtocols - network_protocols: - SNMP: - ProtocolEnabled: True - Port: 161 - HTTP: - ProtocolEnabled: False - Port: 8080 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set Manager NIC - community.general.redfish_config: - category: Manager - command: SetManagerNic - nic_config: - DHCPv4: - DHCPEnabled: False - IPv4StaticAddresses: - Address: 192.168.1.3 - Gateway: 192.168.1.1 - SubnetMask: 255.255.255.0 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" -''' - -RETURN = ''' -msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils.common.text.converters import to_native - - -# More will be added as module features are expanded -CATEGORY_COMMANDS_ALL = { - "Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder", - "SetDefaultBootOrder"], - "Manager": ["SetNetworkProtocols", "SetManagerNic"] -} - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - bios_attributes=dict(type='dict', default={}), - timeout=dict(type='int', default=10), - boot_order=dict(type='list', elements='str', default=[]), - network_protocols=dict( - type='dict', - default={} - ), - resource_id=dict(), - nic_addr=dict(default='null'), - nic_config=dict( - type='dict', - default={} - ), - strip_etag_quotes=dict(type='bool', default=False), - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=False - ) - - category = module.params['category'] - command_list = module.params['command'] - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # timeout - timeout = module.params['timeout'] - - # BIOS attributes to update - bios_attributes = module.params['bios_attributes'] - - # boot order - boot_order = module.params['boot_order'] - - # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] - - # manager nic - nic_addr = module.params['nic_addr'] - nic_config = module.params['nic_config'] - - # Etag options - strip_etag_quotes = module.params['strip_etag_quotes'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) - - # Check that Category is valid - if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) - - # Check that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) - - # Organize by Categories / Commands - if category == "Systems": - # execute only if we find a System resource - result = rf_utils._find_systems_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command == "SetBiosDefaultSettings": - result = rf_utils.set_bios_default_settings() - elif command == "SetBiosAttributes": - result = rf_utils.set_bios_attributes(bios_attributes) - elif command == "SetBootOrder": - result = rf_utils.set_boot_order(boot_order) - elif command == "SetDefaultBootOrder": - result = rf_utils.set_default_boot_order() - - elif category == "Manager": - # execute only if we find a Manager service resource - result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command == "SetNetworkProtocols": - result = rf_utils.set_network_protocols(module.params['network_protocols']) - elif command == "SetManagerNic": - result = rf_utils.set_manager_nic(nic_addr, nic_config) - - # Return data back or fail with proper message - if result['ret'] is True: - if result.get('warning'): - module.warn(to_native(result['warning'])) - - module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) - else: - module.fail_json(msg=to_native(result['msg'])) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/redfish/redfish_info.py b/plugins/modules/remote_management/redfish/redfish_info.py deleted file mode 100644 index 36d4eff546..0000000000 --- a/plugins/modules/remote_management/redfish/redfish_info.py +++ /dev/null @@ -1,484 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redfish_info -short_description: Manages Out-Of-Band controllers using Redfish APIs -description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - get information back. - - Information retrieved is placed in a location specified by the user. - - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.redfish_info) module no longer returns C(ansible_facts)! -options: - category: - required: false - description: - - List of categories to execute on OOB controller - default: ['Systems'] - type: list - elements: str - command: - required: false - description: - - List of commands to execute on OOB controller - type: list - elements: str - baseuri: - required: true - description: - - Base URI of OOB controller - type: str - username: - description: - - User for authentication with OOB controller - type: str - password: - description: - - Password for authentication with OOB controller - type: str - auth_token: - description: - - Security token for authentication with OOB controller - type: str - version_added: 2.3.0 - timeout: - description: - - Timeout in seconds for URL requests to OOB controller - default: 10 - type: int - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Get CPU inventory - community.general.redfish_info: - category: Systems - command: GetCpuInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}" - - - name: Get CPU model - community.general.redfish_info: - category: Systems - command: GetCpuInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.cpu.entries.0.Model }}" - - - name: Get memory inventory - community.general.redfish_info: - category: Systems - command: GetMemoryInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Get fan inventory with a timeout of 20 seconds - community.general.redfish_info: - category: Chassis - command: GetFanInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - register: result - - - name: Get Virtual Media information - community.general.redfish_info: - category: Manager - command: GetVirtualMedia - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}" - - - name: Get Volume Inventory - community.general.redfish_info: - category: Systems - command: GetVolumeInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}" - - - name: Get Session information - community.general.redfish_info: - category: Sessions - command: GetSessions - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.session.entries | to_nice_json }}" - - - name: Get default inventory information - community.general.redfish_info: - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts | to_nice_json }}" - - - name: Get several inventories - community.general.redfish_info: - category: Systems - command: GetNicInventory,GetBiosAttributes - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get default system inventory and user information - community.general.redfish_info: - category: Systems,Accounts - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get default system, user and firmware information - community.general.redfish_info: - category: ["Systems", "Accounts", "Update"] - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get Manager NIC inventory information - community.general.redfish_info: - category: Manager - command: GetManagerNicInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get boot override information - community.general.redfish_info: - category: Systems - command: GetBootOverride - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get chassis inventory - community.general.redfish_info: - category: Chassis - command: GetChassisInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get all information available in the Manager category - community.general.redfish_info: - category: Manager - command: all - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get firmware update capability information - community.general.redfish_info: - category: Update - command: GetFirmwareUpdateCapabilities - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get firmware inventory - community.general.redfish_info: - category: Update - command: GetFirmwareInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get software inventory - community.general.redfish_info: - category: Update - command: GetSoftwareInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get Manager Services - community.general.redfish_info: - category: Manager - command: GetNetworkProtocols - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get all information available in all categories - community.general.redfish_info: - category: all - command: all - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get system health report - community.general.redfish_info: - category: Systems - command: GetHealthReport - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get chassis health report - community.general.redfish_info: - category: Chassis - command: GetHealthReport - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get manager health report - community.general.redfish_info: - category: Manager - command: GetHealthReport - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" -''' - -RETURN = ''' -result: - description: different results depending on task - returned: always - type: dict - sample: List of CPUs on system -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils - -CATEGORY_COMMANDS_ALL = { - "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory", - "GetMemoryInventory", "GetNicInventory", "GetHealthReport", - "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory", - "GetBiosAttributes", "GetBootOrder", "GetBootOverride"], - "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower", - "GetChassisThermals", "GetChassisInventory", "GetHealthReport"], - "Accounts": ["ListUsers"], - "Sessions": ["GetSessions"], - "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory"], - "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols", - "GetHealthReport"], -} - -CATEGORY_COMMANDS_DEFAULT = { - "Systems": "GetSystemInventory", - "Chassis": "GetFanInventory", - "Accounts": "ListUsers", - "Update": "GetFirmwareInventory", - "Sessions": "GetSessions", - "Manager": "GetManagerNicInventory" -} - - -def main(): - result = {} - category_list = [] - module = AnsibleModule( - argument_spec=dict( - category=dict(type='list', elements='str', default=['Systems']), - command=dict(type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=True, - ) - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # timeout - timeout = module.params['timeout'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = RedfishUtils(creds, root_uri, timeout, module) - - # Build Category list - if "all" in module.params['category']: - for entry in CATEGORY_COMMANDS_ALL: - category_list.append(entry) - else: - # one or more categories specified - category_list = module.params['category'] - - for category in category_list: - command_list = [] - # Build Command list for each Category - if category in CATEGORY_COMMANDS_ALL: - if not module.params['command']: - # True if we don't specify a command --> use default - command_list.append(CATEGORY_COMMANDS_DEFAULT[category]) - elif "all" in module.params['command']: - for entry in range(len(CATEGORY_COMMANDS_ALL[category])): - command_list.append(CATEGORY_COMMANDS_ALL[category][entry]) - # one or more commands - else: - command_list = module.params['command'] - # Verify that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg="Invalid Command: %s" % cmd) - else: - # Fail if even one category given is invalid - module.fail_json(msg="Invalid Category: %s" % category) - - # Organize by Categories / Commands - if category == "Systems": - # execute only if we find a Systems resource - resource = rf_utils._find_systems_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetSystemInventory": - result["system"] = rf_utils.get_multi_system_inventory() - elif command == "GetCpuInventory": - result["cpu"] = rf_utils.get_multi_cpu_inventory() - elif command == "GetMemoryInventory": - result["memory"] = rf_utils.get_multi_memory_inventory() - elif command == "GetNicInventory": - result["nic"] = rf_utils.get_multi_nic_inventory(category) - elif command == "GetStorageControllerInventory": - result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory() - elif command == "GetDiskInventory": - result["disk"] = rf_utils.get_multi_disk_inventory() - elif command == "GetVolumeInventory": - result["volume"] = rf_utils.get_multi_volume_inventory() - elif command == "GetBiosAttributes": - result["bios_attribute"] = rf_utils.get_multi_bios_attributes() - elif command == "GetBootOrder": - result["boot_order"] = rf_utils.get_multi_boot_order() - elif command == "GetBootOverride": - result["boot_override"] = rf_utils.get_multi_boot_override() - elif command == "GetHealthReport": - result["health_report"] = rf_utils.get_multi_system_health_report() - - elif category == "Chassis": - # execute only if we find Chassis resource - resource = rf_utils._find_chassis_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetFanInventory": - result["fan"] = rf_utils.get_fan_inventory() - elif command == "GetPsuInventory": - result["psu"] = rf_utils.get_psu_inventory() - elif command == "GetChassisThermals": - result["thermals"] = rf_utils.get_chassis_thermals() - elif command == "GetChassisPower": - result["chassis_power"] = rf_utils.get_chassis_power() - elif command == "GetChassisInventory": - result["chassis"] = rf_utils.get_chassis_inventory() - elif command == "GetHealthReport": - result["health_report"] = rf_utils.get_multi_chassis_health_report() - - elif category == "Accounts": - # execute only if we find an Account service resource - resource = rf_utils._find_accountservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "ListUsers": - result["user"] = rf_utils.list_users() - - elif category == "Update": - # execute only if we find UpdateService resources - resource = rf_utils._find_updateservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetFirmwareInventory": - result["firmware"] = rf_utils.get_firmware_inventory() - elif command == "GetSoftwareInventory": - result["software"] = rf_utils.get_software_inventory() - elif command == "GetFirmwareUpdateCapabilities": - result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities() - - elif category == "Sessions": - # execute only if we find SessionService resources - resource = rf_utils._find_sessionservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetSessions": - result["session"] = rf_utils.get_sessions() - - elif category == "Manager": - # execute only if we find a Manager service resource - resource = rf_utils._find_managers_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetManagerNicInventory": - result["manager_nics"] = rf_utils.get_multi_nic_inventory(category) - elif command == "GetVirtualMedia": - result["virtual_media"] = rf_utils.get_multi_virtualmedia() - elif command == "GetLogs": - result["log"] = rf_utils.get_logs() - elif command == "GetNetworkProtocols": - result["network_protocols"] = rf_utils.get_network_protocols() - elif command == "GetHealthReport": - result["health_report"] = rf_utils.get_multi_manager_health_report() - - # Return data back - module.exit_json(redfish_facts=result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/stacki/stacki_host.py b/plugins/modules/remote_management/stacki/stacki_host.py deleted file mode 100644 index fda0c5d318..0000000000 --- a/plugins/modules/remote_management/stacki/stacki_host.py +++ /dev/null @@ -1,293 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Hugh Ma -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: stacki_host -short_description: Add or remove host to stacki front-end -description: - - Use this module to add or remove hosts to a stacki front-end via API. - - Information on stacki can be found at U(https://github.com/StackIQ/stacki). -options: - name: - description: - - Name of the host to be added to Stacki. - required: True - type: str - stacki_user: - description: - - Username for authenticating with Stacki API, but if not specified, the environment variable C(stacki_user) is used instead. - required: True - type: str - stacki_password: - description: - - Password for authenticating with Stacki API, but if not - specified, the environment variable C(stacki_password) is used instead. - required: True - type: str - stacki_endpoint: - description: - - URL for the Stacki API Endpoint. - required: True - type: str - prim_intf_mac: - description: - - MAC Address for the primary PXE boot network interface. - - Currently not used by the module. - type: str - prim_intf_ip: - description: - - IP Address for the primary network interface. - - Currently not used by the module. - type: str - prim_intf: - description: - - Name of the primary network interface. - - Currently not used by the module. - type: str - force_install: - description: - - Set value to C(true) to force node into install state if it already exists in stacki. - type: bool - default: no - state: - description: - - Set value to the desired state for the specified host. - type: str - choices: [ absent, present ] - default: present - appliance: - description: - - Applicance to be used in host creation. - - Required if I(state) is C(present) and host does not yet exist. - type: str - default: backend - rack: - description: - - Rack to be used in host creation. - - Required if I(state) is C(present) and host does not yet exist. - type: int - rank: - description: - - Rank to be used in host creation. - - In Stacki terminology, the rank is the position of the machine in a rack. - - Required if I(state) is C(present) and host does not yet exist. - type: int - network: - description: - - Network to be configured in the host. - - Currently not used by the module. - type: str - default: private -author: -- Hugh Ma (@bbyhuy) -''' - -EXAMPLES = ''' -- name: Add a host named test-1 - community.general.stacki_host: - name: test-1 - stacki_user: usr - stacki_password: pwd - stacki_endpoint: url - prim_intf_mac: mac_addr - prim_intf_ip: x.x.x.x - prim_intf: eth0 - -- name: Remove a host named test-1 - community.general.stacki_host: - name: test-1 - stacki_user: usr - stacki_password: pwd - stacki_endpoint: url - state: absent -''' - -RETURN = ''' -changed: - description: response to whether or not the api call completed successfully - returned: always - type: bool - sample: true - -stdout: - description: the set of responses from the commands - returned: always - type: list - sample: ['...', '...'] - -stdout_lines: - description: the value of stdout split into a list - returned: always - type: list - sample: [['...', '...'], ['...'], ['...']] -''' - -import json - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url - - -class StackiHost(object): - - def __init__(self, module): - self.module = module - self.hostname = module.params['name'] - self.rack = module.params['rack'] - self.rank = module.params['rank'] - self.appliance = module.params['appliance'] - self.prim_intf = module.params['prim_intf'] - self.prim_intf_ip = module.params['prim_intf_ip'] - self.network = module.params['network'] - self.prim_intf_mac = module.params['prim_intf_mac'] - self.endpoint = module.params['stacki_endpoint'] - - auth_creds = {'USERNAME': module.params['stacki_user'], - 'PASSWORD': module.params['stacki_password']} - - # Get Initial CSRF - cred_a = self.do_request(self.endpoint, method="GET") - cookie_a = cred_a.headers.get('Set-Cookie').split(';') - init_csrftoken = None - for c in cookie_a: - if "csrftoken" in c: - init_csrftoken = c.replace("csrftoken=", "") - init_csrftoken = init_csrftoken.rstrip("\r\n") - break - - # Make Header Dictionary with initial CSRF - header = {'csrftoken': init_csrftoken, 'X-CSRFToken': init_csrftoken, - 'Content-type': 'application/x-www-form-urlencoded', 'Cookie': cred_a.headers.get('Set-Cookie')} - - # Endpoint to get final authentication header - login_endpoint = self.endpoint + "/login" - - # Get Final CSRF and Session ID - login_req = self.do_request(login_endpoint, headers=header, payload=urlencode(auth_creds), method='POST') - - cookie_f = login_req.headers.get('Set-Cookie').split(';') - csrftoken = None - for f in cookie_f: - if "csrftoken" in f: - csrftoken = f.replace("csrftoken=", "") - if "sessionid" in f: - sessionid = c.split("sessionid=", 1)[-1] - sessionid = sessionid.rstrip("\r\n") - - self.header = {'csrftoken': csrftoken, - 'X-CSRFToken': csrftoken, - 'sessionid': sessionid, - 'Content-type': 'application/json', - 'Cookie': login_req.headers.get('Set-Cookie')} - - def do_request(self, url, payload=None, headers=None, method=None): - res, info = fetch_url(self.module, url, data=payload, headers=headers, method=method) - - if info['status'] != 200: - self.module.fail_json(changed=False, msg=info['msg']) - - return res - - def stack_check_host(self): - res = self.do_request(self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST") - return self.hostname in res.read() - - def stack_sync(self): - self.do_request(self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST") - self.do_request(self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST") - - def stack_force_install(self, result): - data = {'cmd': "set host boot {0} action=install".format(self.hostname)} - self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") - changed = True - - self.stack_sync() - - result['changed'] = changed - result['stdout'] = "api call successful".rstrip("\r\n") - - def stack_add(self, result): - data = dict() - changed = False - - data['cmd'] = "add host {0} rack={1} rank={2} appliance={3}"\ - .format(self.hostname, self.rack, self.rank, self.appliance) - self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") - - self.stack_sync() - - result['changed'] = changed - result['stdout'] = "api call successful".rstrip("\r\n") - - def stack_remove(self, result): - data = dict() - - data['cmd'] = "remove host {0}"\ - .format(self.hostname) - self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") - - self.stack_sync() - - result['changed'] = True - result['stdout'] = "api call successful".rstrip("\r\n") - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - name=dict(type='str', required=True), - rack=dict(type='int', default=0), - rank=dict(type='int', default=0), - appliance=dict(type='str', default='backend'), - prim_intf=dict(type='str'), - prim_intf_ip=dict(type='str'), - network=dict(type='str', default='private'), - prim_intf_mac=dict(type='str'), - stacki_user=dict(type='str', required=True, fallback=(env_fallback, ['stacki_user'])), - stacki_password=dict(type='str', required=True, fallback=(env_fallback, ['stacki_password']), no_log=True), - stacki_endpoint=dict(type='str', required=True, fallback=(env_fallback, ['stacki_endpoint'])), - force_install=dict(type='bool', default=False), - ), - supports_check_mode=False, - ) - - result = {'changed': False} - missing_params = list() - - stacki = StackiHost(module) - host_exists = stacki.stack_check_host() - - # If state is present, but host exists, need force_install flag to put host back into install state - if module.params['state'] == 'present' and host_exists and module.params['force_install']: - stacki.stack_force_install(result) - # If state is present, but host exists, and force_install and false, do nothing - elif module.params['state'] == 'present' and host_exists and not module.params['force_install']: - result['stdout'] = "{0} already exists. Set 'force_install' to true to bootstrap"\ - .format(module.params['name']) - # Otherwise, state is present, but host doesn't exists, require more params to add host - elif module.params['state'] == 'present' and not host_exists: - for param in ['appliance', 'rack', 'rank', 'prim_intf', 'prim_intf_ip', 'network', 'prim_intf_mac']: - if not module.params[param]: - missing_params.append(param) - if len(missing_params) > 0: # @FIXME replace with required_if - module.fail_json(msg="missing required arguments: {0}".format(missing_params)) - - stacki.stack_add(result) - # If state is absent, and host exists, lets remove it. - elif module.params['state'] == 'absent' and host_exists: - stacki.stack_remove(result) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/wakeonlan.py b/plugins/modules/remote_management/wakeonlan.py deleted file mode 100644 index 725e070cd8..0000000000 --- a/plugins/modules/remote_management/wakeonlan.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: wakeonlan -short_description: Send a magic Wake-on-LAN (WoL) broadcast packet -description: -- The C(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets. -options: - mac: - description: - - MAC address to send Wake-on-LAN broadcast packet for. - required: true - type: str - broadcast: - description: - - Network broadcast address to use for broadcasting magic Wake-on-LAN packet. - default: 255.255.255.255 - type: str - port: - description: - - UDP port to use for magic Wake-on-LAN packet. - default: 7 - type: int -todo: - - Add arping support to check whether the system is up (before and after) - - Enable check-mode support (when we have arping support) - - Does not have SecureOn password support -notes: - - This module sends a magic packet, without knowing whether it worked - - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS) - - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first). -seealso: -- module: community.windows.win_wakeonlan -author: -- Dag Wieers (@dagwieers) -''' - -EXAMPLES = r''' -- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66 - community.general.wakeonlan: - mac: '00:00:5E:00:53:66' - broadcast: 192.0.2.23 - delegate_to: localhost - -- community.general.wakeonlan: - mac: 00:00:5E:00:53:66 - port: 9 - delegate_to: localhost -''' - -RETURN = r''' -# Default return values -''' -import socket -import struct -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def wakeonlan(module, mac, broadcast, port): - """ Send a magic Wake-on-LAN packet. """ - - mac_orig = mac - - # Remove possible separator from MAC address - if len(mac) == 12 + 5: - mac = mac.replace(mac[2], '') - - # If we don't end up with 12 hexadecimal characters, fail - if len(mac) != 12: - module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig) - - # Test if it converts to an integer, otherwise fail - try: - int(mac, 16) - except ValueError: - module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig) - - # Create payload for magic packet - data = b'' - padding = ''.join(['FFFFFFFFFFFF', mac * 20]) - for i in range(0, len(padding), 2): - data = b''.join([data, struct.pack('B', int(padding[i: i + 2], 16))]) - - # Broadcast payload to network - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) - - if not module.check_mode: - - try: - sock.sendto(data, (broadcast, port)) - except socket.error as e: - sock.close() - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - sock.close() - - -def main(): - module = AnsibleModule( - argument_spec=dict( - mac=dict(type='str', required=True), - broadcast=dict(type='str', default='255.255.255.255'), - port=dict(type='int', default=7), - ), - supports_check_mode=True, - ) - - mac = module.params['mac'] - broadcast = module.params['broadcast'] - port = module.params['port'] - - wakeonlan(module, mac, broadcast, port) - - module.exit_json(changed=True) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rhevm.py b/plugins/modules/rhevm.py deleted file mode 120000 index f6c3ba558e..0000000000 --- a/plugins/modules/rhevm.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/misc/rhevm.py \ No newline at end of file diff --git a/plugins/modules/rhevm.py b/plugins/modules/rhevm.py new file mode 100644 index 0000000000..422d2739d2 --- /dev/null +++ b/plugins/modules/rhevm.py @@ -0,0 +1,1504 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Timothy Vandenbrande +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: rhevm +short_description: RHEV/oVirt automation +description: + - This module only supports oVirt/RHEV version 3. + - A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4. + - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform. +requirements: + - ovirtsdk +author: + - Timothy Vandenbrande (@TimothyVandenbrande) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + user: + description: + - The user to authenticate with. + type: str + default: admin@internal + password: + description: + - The password for user authentication. + type: str + required: true + server: + description: + - The name/IP of your RHEV-m/oVirt instance. + type: str + default: 127.0.0.1 + port: + description: + - The port on which the API is reachable. + type: int + default: 443 + insecure_api: + description: + - A boolean switch to make a secure or insecure connection to the server. + type: bool + default: false + name: + description: + - The name of the VM. + type: str + cluster: + description: + - The RHEV/oVirt cluster in which you want you VM to start. + type: str + default: '' + datacenter: + description: + - The RHEV/oVirt datacenter in which you want you VM to start. + type: str + default: Default + state: + description: + - This serves to create/remove/update or powermanage your VM. + type: str + choices: [absent, cd, down, info, ping, present, restarted, up] + default: present + image: + description: + - The template to use for the VM. + type: str + type: + description: + - To define if the VM is a server or desktop. + type: str + choices: [desktop, host, server] + default: server + vmhost: + description: + - The host you wish your VM to run on. + type: str + vmcpu: + description: + - The number of CPUs you want in your VM. + type: int + default: 2 + cpu_share: + description: + - This parameter is used to configure the CPU share. + type: int + default: 0 + vmmem: + description: + - The amount of memory you want your VM to use (in GB). + type: int + default: 1 + osver: + description: + - The operating system option in RHEV/oVirt. + type: str + default: rhel_6x64 + mempol: + description: + - The minimum amount of memory you wish to reserve for this system. + type: int + default: 1 + vm_ha: + description: + - To make your VM High Available. + type: bool + default: true + disks: + description: + - This option uses complex arguments and is a list of disks with the options V(name), V(size), and V(domain). + type: list + elements: str + ifaces: + description: + - This option uses complex arguments and is a list of interfaces with the options V(name) and V(vlan). + type: list + elements: str + aliases: [interfaces, nics] + boot_order: + description: + - This option uses complex arguments and is a list of items that specify the bootorder. + type: list + elements: str + default: [hd, network] + del_prot: + description: + - This option sets the delete protection checkbox. + type: bool + default: true + cd_drive: + description: + - The CD you wish to have mounted on the VM when O(state=cd). + type: str + timeout: + description: + - The timeout you wish to define for power actions. + - When O(state=up). + - When O(state=down). + - When O(state=restarted). + type: int +""" + +RETURN = r""" +vm: + description: Returns all of the VMs variables and execution. + returned: always + type: dict + sample: + { + "boot_order": [ + "hd", + "network" + ], + "changed": true, + "changes": [ + "Delete Protection" + ], + "cluster": "C1", + "cpu_share": "0", + "created": false, + "datacenter": "Default", + "del_prot": true, + "disks": [ + { + "domain": "ssd-san", + "name": "OS", + "size": 40 + } + ], + "eth0": "00:00:5E:00:53:00", + "eth1": "00:00:5E:00:53:01", + "eth2": "00:00:5E:00:53:02", + "exists": true, + "failed": false, + "ifaces": [ + { + "name": "eth0", + "vlan": "Management" + }, + { + "name": "eth1", + "vlan": "Internal" + }, + { + "name": "eth2", + "vlan": "External" + } + ], + "image": false, + "mempol": "0", + "msg": [ + "VM exists", + "cpu_share was already set to 0", + "VM high availability was already set to True", + "The boot order has already been set", + "VM delete protection has been set to True", + "Disk web2_Disk0_OS already exists", + "The VM starting host was already set to host416" + ], + "name": "web2", + "type": "server", + "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b", + "vm_ha": true, + "vmcpu": "4", + "vmhost": "host416", + "vmmem": "16" + } +""" + +EXAMPLES = r""" +- name: Basic get info from VM + community.general.rhevm: + server: rhevm01 + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + name: demo + state: info + +- name: Basic create example from image + community.general.rhevm: + server: rhevm01 + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + name: demo + cluster: centos + image: centos7_x64 + state: present + +- name: Power management + community.general.rhevm: + server: rhevm01 + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + cluster: RH + name: uptime_server + image: centos7_x64 + state: down + +- name: Multi disk, multi nic create example + community.general.rhevm: + server: rhevm01 + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + cluster: RH + name: server007 + type: server + vmcpu: 4 + vmmem: 2 + ifaces: + - name: eth0 + vlan: vlan2202 + - name: eth1 + vlan: vlan36 + - name: eth2 + vlan: vlan38 + - name: eth3 + vlan: vlan2202 + disks: + - name: root + size: 10 + domain: ssd-san + - name: swap + size: 10 + domain: 15kiscsi-san + - name: opt + size: 10 + domain: 15kiscsi-san + - name: var + size: 10 + domain: 10kiscsi-san + - name: home + size: 10 + domain: sata-san + boot_order: + - network + - hd + state: present + +- name: Add a CD to the disk cd_drive + community.general.rhevm: + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + name: server007 + cd_drive: rhev-tools-setup.iso + state: cd + +- name: New host deployment + host network configuration + community.general.rhevm: + password: '{{ rhevm.admin.pass }}' + name: ovirt_node007 + type: host + cluster: rhevm01 + ifaces: + - name: em1 + - name: em2 + - name: p3p1 + ip: 172.31.224.200 + netmask: 255.255.254.0 + - name: p3p2 + ip: 172.31.225.200 + netmask: 255.255.254.0 + - name: bond0 + bond: + - em1 + - em2 + network: rhevm + ip: 172.31.222.200 + netmask: 255.255.255.0 + management: true + - name: bond0.36 + network: vlan36 + ip: 10.2.36.200 + netmask: 255.255.254.0 + gateway: 10.2.36.254 + - name: bond0.2202 + network: vlan2202 + - name: bond0.38 + network: vlan38 + state: present +""" + +import time + +try: + from ovirtsdk.api import API + from ovirtsdk.xml import params + HAS_SDK = True +except ImportError: + HAS_SDK = False + +from ansible.module_utils.basic import AnsibleModule + + +RHEV_FAILED = 1 +RHEV_SUCCESS = 0 +RHEV_UNAVAILABLE = 2 + +RHEV_TYPE_OPTS = ['desktop', 'host', 'server'] +STATE_OPTS = ['absent', 'cd', 'down', 'info', 'ping', 'present', 'restart', 'up'] + +msg = [] +changed = False +failed = False + + +class RHEVConn(object): + 'Connection to RHEV-M' + + def __init__(self, module): + self.module = module + + user = module.params.get('user') + password = module.params.get('password') + server = module.params.get('server') + port = module.params.get('port') + insecure_api = module.params.get('insecure_api') + + url = "https://%s:%s" % (server, port) + + try: + api = API(url=url, username=user, password=password, insecure=str(insecure_api)) + api.test() + self.conn = api + except Exception: + raise Exception("Failed to connect to RHEV-M.") + + def __del__(self): + self.conn.disconnect() + + def createVMimage(self, name, cluster, template): + try: + vmparams = params.VM( + name=name, + cluster=self.conn.clusters.get(name=cluster), + template=self.conn.templates.get(name=template), + disks=params.Disks(clone=True) + ) + self.conn.vms.add(vmparams) + setMsg("VM is created") + setChanged() + return True + except Exception as e: + setMsg("Failed to create VM") + setMsg(str(e)) + setFailed() + return False + + def createVM(self, name, cluster, os, actiontype): + try: + vmparams = params.VM( + name=name, + cluster=self.conn.clusters.get(name=cluster), + os=params.OperatingSystem(type_=os), + template=self.conn.templates.get(name="Blank"), + type_=actiontype + ) + self.conn.vms.add(vmparams) + setMsg("VM is created") + setChanged() + return True + except Exception as e: + setMsg("Failed to create VM") + setMsg(str(e)) + setFailed() + return False + + def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot): + VM = self.get_VM(vmname) + + newdisk = params.Disk( + name=diskname, + size=1024 * 1024 * 1024 * int(disksize), + wipe_after_delete=True, + sparse=diskallocationtype, + interface=diskinterface, + format=diskformat, + bootable=diskboot, + storage_domains=params.StorageDomains( + storage_domain=[self.get_domain(diskdomain)] + ) + ) + + try: + VM.disks.add(newdisk) + VM.update() + setMsg("Successfully added disk " + diskname) + setChanged() + except Exception as e: + setFailed() + setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.") + setMsg(str(e)) + return False + + try: + currentdisk = VM.disks.get(name=diskname) + attempt = 1 + while currentdisk.status.state != 'ok': + currentdisk = VM.disks.get(name=diskname) + if attempt == 100: + setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state))) + raise Exception() + else: + attempt += 1 + time.sleep(2) + setMsg("The disk " + diskname + " is ready.") + except Exception as e: + setFailed() + setMsg("Error getting the state of " + diskname + ".") + setMsg(str(e)) + return False + return True + + def createNIC(self, vmname, nicname, vlan, interface): + VM = self.get_VM(vmname) + CLUSTER = self.get_cluster_byid(VM.cluster.id) + DC = self.get_DC_byid(CLUSTER.data_center.id) + newnic = params.NIC( + name=nicname, + network=DC.networks.get(name=vlan), + interface=interface + ) + + try: + VM.nics.add(newnic) + VM.update() + setMsg("Successfully added iface " + nicname) + setChanged() + except Exception as e: + setFailed() + setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.") + setMsg(str(e)) + return False + + try: + currentnic = VM.nics.get(name=nicname) + attempt = 1 + while currentnic.active is not True: + currentnic = VM.nics.get(name=nicname) + if attempt == 100: + setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active))) + raise Exception() + else: + attempt += 1 + time.sleep(2) + setMsg("The iface " + nicname + " is ready.") + except Exception as e: + setFailed() + setMsg("Error getting the state of " + nicname + ".") + setMsg(str(e)) + return False + return True + + def get_DC(self, dc_name): + return self.conn.datacenters.get(name=dc_name) + + def get_DC_byid(self, dc_id): + return self.conn.datacenters.get(id=dc_id) + + def get_VM(self, vm_name): + return self.conn.vms.get(name=vm_name) + + def get_cluster_byid(self, cluster_id): + return self.conn.clusters.get(id=cluster_id) + + def get_cluster(self, cluster_name): + return self.conn.clusters.get(name=cluster_name) + + def get_domain_byid(self, dom_id): + return self.conn.storagedomains.get(id=dom_id) + + def get_domain(self, domain_name): + return self.conn.storagedomains.get(name=domain_name) + + def get_disk(self, disk): + return self.conn.disks.get(disk) + + def get_network(self, dc_name, network_name): + return self.get_DC(dc_name).networks.get(network_name) + + def get_network_byid(self, network_id): + return self.conn.networks.get(id=network_id) + + def get_NIC(self, vm_name, nic_name): + return self.get_VM(vm_name).nics.get(nic_name) + + def get_Host(self, host_name): + return self.conn.hosts.get(name=host_name) + + def get_Host_byid(self, host_id): + return self.conn.hosts.get(id=host_id) + + def set_Memory(self, name, memory): + VM = self.get_VM(name) + VM.memory = int(int(memory) * 1024 * 1024 * 1024) + try: + VM.update() + setMsg("The Memory has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update memory.") + setMsg(str(e)) + setFailed() + return False + + def set_Memory_Policy(self, name, memory_policy): + VM = self.get_VM(name) + VM.memory_policy.guaranteed = int(memory_policy) * 1024 * 1024 * 1024 + try: + VM.update() + setMsg("The memory policy has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update memory policy.") + setMsg(str(e)) + setFailed() + return False + + def set_CPU(self, name, cpu): + VM = self.get_VM(name) + VM.cpu.topology.cores = int(cpu) + try: + VM.update() + setMsg("The number of CPUs has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update the number of CPUs.") + setMsg(str(e)) + setFailed() + return False + + def set_CPU_share(self, name, cpu_share): + VM = self.get_VM(name) + VM.cpu_shares = int(cpu_share) + try: + VM.update() + setMsg("The CPU share has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update the CPU share.") + setMsg(str(e)) + setFailed() + return False + + def set_Disk(self, diskname, disksize, diskinterface, diskboot): + DISK = self.get_disk(diskname) + setMsg("Checking disk " + diskname) + if DISK.get_bootable() != diskboot: + try: + DISK.set_bootable(diskboot) + setMsg("Updated the boot option on the disk.") + setChanged() + except Exception as e: + setMsg("Failed to set the boot option on the disk.") + setMsg(str(e)) + setFailed() + return False + else: + setMsg("The boot option of the disk is correct") + if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)): + try: + DISK.size = (1024 * 1024 * 1024 * int(disksize)) + setMsg("Updated the size of the disk.") + setChanged() + except Exception as e: + setMsg("Failed to update the size of the disk.") + setMsg(str(e)) + setFailed() + return False + elif int(DISK.size) > (1024 * 1024 * 1024 * int(disksize)): + setMsg("Shrinking disks is not supported") + setFailed() + return False + else: + setMsg("The size of the disk is correct") + if str(DISK.interface) != str(diskinterface): + try: + DISK.interface = diskinterface + setMsg("Updated the interface of the disk.") + setChanged() + except Exception as e: + setMsg("Failed to update the interface of the disk.") + setMsg(str(e)) + setFailed() + return False + else: + setMsg("The interface of the disk is correct") + return True + + def set_NIC(self, vmname, nicname, newname, vlan, interface): + NIC = self.get_NIC(vmname, nicname) + VM = self.get_VM(vmname) + CLUSTER = self.get_cluster_byid(VM.cluster.id) + DC = self.get_DC_byid(CLUSTER.data_center.id) + NETWORK = self.get_network(str(DC.name), vlan) + checkFail() + if NIC.name != newname: + NIC.name = newname + setMsg('Updating iface name to ' + newname) + setChanged() + if str(NIC.network.id) != str(NETWORK.id): + NIC.set_network(NETWORK) + setMsg('Updating iface network to ' + vlan) + setChanged() + if NIC.interface != interface: + NIC.interface = interface + setMsg('Updating iface interface to ' + interface) + setChanged() + try: + NIC.update() + setMsg('iface has successfully been updated.') + except Exception as e: + setMsg("Failed to update the iface.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_DeleteProtection(self, vmname, del_prot): + VM = self.get_VM(vmname) + VM.delete_protected = del_prot + try: + VM.update() + setChanged() + except Exception as e: + setMsg("Failed to update delete protection.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_BootOrder(self, vmname, boot_order): + VM = self.get_VM(vmname) + bootorder = [] + for device in boot_order: + bootorder.append(params.Boot(dev=device)) + VM.os.boot = bootorder + + try: + VM.update() + setChanged() + except Exception as e: + setMsg("Failed to update the boot order.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_Host(self, host_name, cluster, ifaces): + HOST = self.get_Host(host_name) + CLUSTER = self.get_cluster(cluster) + + if HOST is None: + setMsg("Host does not exist.") + ifacelist = dict() + networklist = [] + manageip = '' + + try: + for iface in ifaces: + try: + setMsg('creating host interface ' + iface['name']) + if 'management' in iface: + manageip = iface['ip'] + if 'boot_protocol' not in iface: + if 'ip' in iface: + iface['boot_protocol'] = 'static' + else: + iface['boot_protocol'] = 'none' + if 'ip' not in iface: + iface['ip'] = '' + if 'netmask' not in iface: + iface['netmask'] = '' + if 'gateway' not in iface: + iface['gateway'] = '' + + if 'network' in iface: + if 'bond' in iface: + bond = [] + for slave in iface['bond']: + bond.append(ifacelist[slave]) + try: + tmpiface = params.Bonding( + slaves=params.Slaves(host_nic=bond), + options=params.Options( + option=[ + params.Option(name='miimon', value='100'), + params.Option(name='mode', value='4') + ] + ) + ) + except Exception as e: + setMsg('Failed to create the bond for ' + iface['name']) + setFailed() + setMsg(str(e)) + return False + try: + tmpnetwork = params.HostNIC( + network=params.Network(name=iface['network']), + name=iface['name'], + boot_protocol=iface['boot_protocol'], + ip=params.IP( + address=iface['ip'], + netmask=iface['netmask'], + gateway=iface['gateway'] + ), + override_configuration=True, + bonding=tmpiface) + networklist.append(tmpnetwork) + setMsg('Applying network ' + iface['name']) + except Exception as e: + setMsg('Failed to set' + iface['name'] + ' as network interface') + setFailed() + setMsg(str(e)) + return False + else: + tmpnetwork = params.HostNIC( + network=params.Network(name=iface['network']), + name=iface['name'], + boot_protocol=iface['boot_protocol'], + ip=params.IP( + address=iface['ip'], + netmask=iface['netmask'], + gateway=iface['gateway'] + )) + networklist.append(tmpnetwork) + setMsg('Applying network ' + iface['name']) + else: + tmpiface = params.HostNIC( + name=iface['name'], + network=params.Network(), + boot_protocol=iface['boot_protocol'], + ip=params.IP( + address=iface['ip'], + netmask=iface['netmask'], + gateway=iface['gateway'] + )) + ifacelist[iface['name']] = tmpiface + except Exception as e: + setMsg('Failed to set ' + iface['name']) + setFailed() + setMsg(str(e)) + return False + except Exception as e: + setMsg('Failed to set networks') + setMsg(str(e)) + setFailed() + return False + + if manageip == '': + setMsg('No management network is defined') + setFailed() + return False + + try: + HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey')) + if self.conn.hosts.add(HOST): + setChanged() + HOST = self.get_Host(host_name) + state = HOST.status.state + while state != 'non_operational' and state != 'up': + HOST = self.get_Host(host_name) + state = HOST.status.state + time.sleep(1) + if state == 'non_responsive': + setMsg('Failed to add host to RHEVM') + setFailed() + return False + + setMsg('status host: up') + time.sleep(5) + + HOST = self.get_Host(host_name) + state = HOST.status.state + setMsg('State before setting to maintenance: ' + str(state)) + HOST.deactivate() + while state != 'maintenance': + HOST = self.get_Host(host_name) + state = HOST.status.state + time.sleep(1) + setMsg('status host: maintenance') + + try: + HOST.nics.setupnetworks(params.Action( + force=True, + check_connectivity=False, + host_nics=params.HostNics(host_nic=networklist) + )) + setMsg('nics are set') + except Exception as e: + setMsg('Failed to apply networkconfig') + setFailed() + setMsg(str(e)) + return False + + try: + HOST.commitnetconfig() + setMsg('Network config is saved') + except Exception as e: + setMsg('Failed to save networkconfig') + setFailed() + setMsg(str(e)) + return False + except Exception as e: + if 'The Host name is already in use' in str(e): + setMsg("Host already exists") + else: + setMsg("Failed to add host") + setFailed() + setMsg(str(e)) + return False + + HOST.activate() + while state != 'up': + HOST = self.get_Host(host_name) + state = HOST.status.state + time.sleep(1) + if state == 'non_responsive': + setMsg('Failed to apply networkconfig.') + setFailed() + return False + setMsg('status host: up') + else: + setMsg("Host exists.") + + return True + + def del_NIC(self, vmname, nicname): + return self.get_NIC(vmname, nicname).delete() + + def remove_VM(self, vmname): + VM = self.get_VM(vmname) + try: + VM.delete() + except Exception as e: + setMsg("Failed to remove VM.") + setMsg(str(e)) + setFailed() + return False + return True + + def start_VM(self, vmname, timeout): + VM = self.get_VM(vmname) + try: + VM.start() + except Exception as e: + setMsg("Failed to start VM.") + setMsg(str(e)) + setFailed() + return False + return self.wait_VM(vmname, "up", timeout) + + def wait_VM(self, vmname, state, timeout): + VM = self.get_VM(vmname) + while VM.status.state != state: + VM = self.get_VM(vmname) + time.sleep(10) + if timeout is not False: + timeout -= 10 + if timeout <= 0: + setMsg("Timeout expired") + setFailed() + return False + return True + + def stop_VM(self, vmname, timeout): + VM = self.get_VM(vmname) + try: + VM.stop() + except Exception as e: + setMsg("Failed to stop VM.") + setMsg(str(e)) + setFailed() + return False + return self.wait_VM(vmname, "down", timeout) + + def set_CD(self, vmname, cd_drive): + VM = self.get_VM(vmname) + try: + if str(VM.status.state) == 'down': + cdrom = params.CdRom(file=cd_drive) + VM.cdroms.add(cdrom) + setMsg("Attached the image.") + setChanged() + else: + cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000") + cdrom.set_file(cd_drive) + cdrom.update(current=True) + setMsg("Attached the image.") + setChanged() + except Exception as e: + setMsg("Failed to attach image.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_VM_Host(self, vmname, vmhost): + VM = self.get_VM(vmname) + HOST = self.get_Host(vmhost) + try: + VM.placement_policy.host = HOST + VM.update() + setMsg("Set startup host to " + vmhost) + setChanged() + except Exception as e: + setMsg("Failed to set startup host.") + setMsg(str(e)) + setFailed() + return False + return True + + def migrate_VM(self, vmname, vmhost): + VM = self.get_VM(vmname) + + HOST = self.get_Host_byid(VM.host.id) + if str(HOST.name) != vmhost: + try: + VM.migrate( + action=params.Action( + host=params.Host( + name=vmhost, + ) + ), + ) + setChanged() + setMsg("VM migrated to " + vmhost) + except Exception as e: + setMsg("Failed to set startup host.") + setMsg(str(e)) + setFailed() + return False + return True + + def remove_CD(self, vmname): + VM = self.get_VM(vmname) + try: + VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete() + setMsg("Removed the image.") + setChanged() + except Exception as e: + setMsg("Failed to remove the image.") + setMsg(str(e)) + setFailed() + return False + return True + + +class RHEV(object): + def __init__(self, module): + self.module = module + + def __get_conn(self): + self.conn = RHEVConn(self.module) + return self.conn + + def test(self): + self.__get_conn() + return "OK" + + def getVM(self, name): + self.__get_conn() + VM = self.conn.get_VM(name) + if VM: + vminfo = dict() + vminfo['uuid'] = VM.id + vminfo['name'] = VM.name + vminfo['status'] = VM.status.state + vminfo['cpu_cores'] = VM.cpu.topology.cores + vminfo['cpu_sockets'] = VM.cpu.topology.sockets + vminfo['cpu_shares'] = VM.cpu_shares + vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024) + vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024) + vminfo['os'] = VM.get_os().type_ + vminfo['del_prot'] = VM.delete_protected + try: + vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name) + except Exception: + vminfo['host'] = None + vminfo['boot_order'] = [] + for boot_dev in VM.os.get_boot(): + vminfo['boot_order'].append(str(boot_dev.dev)) + vminfo['disks'] = [] + for DISK in VM.disks.list(): + disk = dict() + disk['name'] = DISK.name + disk['size'] = (int(DISK.size) // 1024 // 1024 // 1024) + disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name) + disk['interface'] = DISK.interface + vminfo['disks'].append(disk) + vminfo['ifaces'] = [] + for NIC in VM.nics.list(): + iface = dict() + iface['name'] = str(NIC.name) + iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name) + iface['interface'] = NIC.interface + iface['mac'] = NIC.mac.address + vminfo['ifaces'].append(iface) + vminfo[str(NIC.name)] = NIC.mac.address + CLUSTER = self.conn.get_cluster_byid(VM.cluster.id) + if CLUSTER: + vminfo['cluster'] = CLUSTER.name + else: + vminfo = False + return vminfo + + def createVMimage(self, name, cluster, template, disks): + self.__get_conn() + return self.conn.createVMimage(name, cluster, template, disks) + + def createVM(self, name, cluster, os, actiontype): + self.__get_conn() + return self.conn.createVM(name, cluster, os, actiontype) + + def setMemory(self, name, memory): + self.__get_conn() + return self.conn.set_Memory(name, memory) + + def setMemoryPolicy(self, name, memory_policy): + self.__get_conn() + return self.conn.set_Memory_Policy(name, memory_policy) + + def setCPU(self, name, cpu): + self.__get_conn() + return self.conn.set_CPU(name, cpu) + + def setCPUShare(self, name, cpu_share): + self.__get_conn() + return self.conn.set_CPU_share(name, cpu_share) + + def setDisks(self, name, disks): + self.__get_conn() + counter = 0 + bootselect = False + for disk in disks: + if 'bootable' in disk: + if disk['bootable'] is True: + bootselect = True + + for disk in disks: + diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_') + disksize = disk.get('size', 1) + diskdomain = disk.get('domain', None) + if diskdomain is None: + setMsg("`domain` is a required disk key.") + setFailed() + return False + diskinterface = disk.get('interface', 'virtio') + diskformat = disk.get('format', 'raw') + diskallocationtype = disk.get('thin', False) + diskboot = disk.get('bootable', False) + + if bootselect is False and counter == 0: + diskboot = True + + DISK = self.conn.get_disk(diskname) + + if DISK is None: + self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot) + else: + self.conn.set_Disk(diskname, disksize, diskinterface, diskboot) + checkFail() + counter += 1 + + return True + + def setNetworks(self, vmname, ifaces): + self.__get_conn() + VM = self.conn.get_VM(vmname) + + counter = 0 + length = len(ifaces) + + for NIC in VM.nics.list(): + if counter < length: + iface = ifaces[counter] + name = iface.get('name', None) + if name is None: + setMsg("`name` is a required iface key.") + setFailed() + elif str(name) != str(NIC.name): + setMsg("ifaces are in the wrong order, rebuilding everything.") + for NIC in VM.nics.list(): + self.conn.del_NIC(vmname, NIC.name) + self.setNetworks(vmname, ifaces) + checkFail() + return True + vlan = iface.get('vlan', None) + if vlan is None: + setMsg("`vlan` is a required iface key.") + setFailed() + checkFail() + interface = iface.get('interface', 'virtio') + self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface) + else: + self.conn.del_NIC(vmname, NIC.name) + counter += 1 + checkFail() + + while counter < length: + iface = ifaces[counter] + name = iface.get('name', None) + if name is None: + setMsg("`name` is a required iface key.") + setFailed() + vlan = iface.get('vlan', None) + if vlan is None: + setMsg("`vlan` is a required iface key.") + setFailed() + if failed is True: + return False + interface = iface.get('interface', 'virtio') + self.conn.createNIC(vmname, name, vlan, interface) + + counter += 1 + checkFail() + return True + + def setDeleteProtection(self, vmname, del_prot): + self.__get_conn() + VM = self.conn.get_VM(vmname) + if bool(VM.delete_protected) != bool(del_prot): + self.conn.set_DeleteProtection(vmname, del_prot) + checkFail() + setMsg("`delete protection` has been updated.") + else: + setMsg("`delete protection` already has the right value.") + return True + + def setBootOrder(self, vmname, boot_order): + self.__get_conn() + VM = self.conn.get_VM(vmname) + bootorder = [] + for boot_dev in VM.os.get_boot(): + bootorder.append(str(boot_dev.dev)) + + if boot_order != bootorder: + self.conn.set_BootOrder(vmname, boot_order) + setMsg('The boot order has been set') + else: + setMsg('The boot order has already been set') + return True + + def removeVM(self, vmname): + self.__get_conn() + self.setPower(vmname, "down", 300) + return self.conn.remove_VM(vmname) + + def setPower(self, vmname, state, timeout): + self.__get_conn() + VM = self.conn.get_VM(vmname) + if VM is None: + setMsg("VM does not exist.") + setFailed() + return False + + if state == VM.status.state: + setMsg("VM state was already " + state) + else: + if state == "up": + setMsg("VM is going to start") + self.conn.start_VM(vmname, timeout) + setChanged() + elif state == "down": + setMsg("VM is going to stop") + self.conn.stop_VM(vmname, timeout) + setChanged() + elif state == "restarted": + self.setPower(vmname, "down", timeout) + checkFail() + self.setPower(vmname, "up", timeout) + checkFail() + setMsg("the vm state is set to " + state) + return True + + def setCD(self, vmname, cd_drive): + self.__get_conn() + if cd_drive: + return self.conn.set_CD(vmname, cd_drive) + else: + return self.conn.remove_CD(vmname) + + def setVMHost(self, vmname, vmhost): + self.__get_conn() + return self.conn.set_VM_Host(vmname, vmhost) + + def setHost(self, hostname, cluster, ifaces): + self.__get_conn() + return self.conn.set_Host(hostname, cluster, ifaces) + + +def checkFail(): + if failed: + module.fail_json(msg=msg) + else: + return True + + +def setFailed(): + global failed + failed = True + + +def setChanged(): + global changed + changed = True + + +def setMsg(message): + msg.append(message) + + +def core(module): + + r = RHEV(module) + + state = module.params.get('state') + + if state == 'ping': + r.test() + return RHEV_SUCCESS, {"ping": "pong"} + elif state == 'info': + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + vminfo = r.getVM(name) + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + elif state == 'present': + created = False + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + actiontype = module.params.get('type') + if actiontype == 'server' or actiontype == 'desktop': + vminfo = r.getVM(name) + if vminfo: + setMsg('VM exists') + else: + # Create VM + cluster = module.params.get('cluster') + if cluster is None: + setMsg("cluster is a required argument.") + setFailed() + template = module.params.get('image') + if template: + disks = module.params.get('disks') + if disks is None: + setMsg("disks is a required argument.") + setFailed() + checkFail() + if r.createVMimage(name, cluster, template, disks) is False: + return RHEV_FAILED, vminfo + else: + os = module.params.get('osver') + if os is None: + setMsg("osver is a required argument.") + setFailed() + checkFail() + if r.createVM(name, cluster, os, actiontype) is False: + return RHEV_FAILED, vminfo + created = True + + # Set MEMORY and MEMORY POLICY + vminfo = r.getVM(name) + memory = module.params.get('vmmem') + if memory is not None: + memory_policy = module.params.get('mempol') + if memory_policy == 0: + memory_policy = memory + mem_pol_nok = True + if int(vminfo['mem_pol']) == memory_policy: + setMsg("Memory is correct") + mem_pol_nok = False + + mem_nok = True + if int(vminfo['memory']) == memory: + setMsg("Memory is correct") + mem_nok = False + + if memory_policy > memory: + setMsg('memory_policy cannot have a higher value than memory.') + return RHEV_FAILED, msg + + if mem_nok and mem_pol_nok: + if memory_policy > int(vminfo['memory']): + r.setMemory(vminfo['name'], memory) + r.setMemoryPolicy(vminfo['name'], memory_policy) + else: + r.setMemoryPolicy(vminfo['name'], memory_policy) + r.setMemory(vminfo['name'], memory) + elif mem_nok: + r.setMemory(vminfo['name'], memory) + elif mem_pol_nok: + r.setMemoryPolicy(vminfo['name'], memory_policy) + checkFail() + + # Set CPU + cpu = module.params.get('vmcpu') + if int(vminfo['cpu_cores']) == cpu: + setMsg("Number of CPUs is correct") + else: + if r.setCPU(vminfo['name'], cpu) is False: + return RHEV_FAILED, msg + + # Set CPU SHARE + cpu_share = module.params.get('cpu_share') + if cpu_share is not None: + if int(vminfo['cpu_shares']) == cpu_share: + setMsg("CPU share is correct.") + else: + if r.setCPUShare(vminfo['name'], cpu_share) is False: + return RHEV_FAILED, msg + + # Set DISKS + disks = module.params.get('disks') + if disks is not None: + if r.setDisks(vminfo['name'], disks) is False: + return RHEV_FAILED, msg + + # Set NETWORKS + ifaces = module.params.get('ifaces', None) + if ifaces is not None: + if r.setNetworks(vminfo['name'], ifaces) is False: + return RHEV_FAILED, msg + + # Set Delete Protection + del_prot = module.params.get('del_prot') + if r.setDeleteProtection(vminfo['name'], del_prot) is False: + return RHEV_FAILED, msg + + # Set Boot Order + boot_order = module.params.get('boot_order') + if r.setBootOrder(vminfo['name'], boot_order) is False: + return RHEV_FAILED, msg + + # Set VM Host + vmhost = module.params.get('vmhost') + if vmhost: + if r.setVMHost(vminfo['name'], vmhost) is False: + return RHEV_FAILED, msg + + vminfo = r.getVM(name) + vminfo['created'] = created + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + + if actiontype == 'host': + cluster = module.params.get('cluster') + if cluster is None: + setMsg("cluster is a required argument.") + setFailed() + ifaces = module.params.get('ifaces') + if ifaces is None: + setMsg("ifaces is a required argument.") + setFailed() + if r.setHost(name, cluster, ifaces) is False: + return RHEV_FAILED, msg + return RHEV_SUCCESS, {'changed': changed, 'msg': msg} + + elif state == 'absent': + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + actiontype = module.params.get('type') + if actiontype == 'server' or actiontype == 'desktop': + vminfo = r.getVM(name) + if vminfo: + setMsg('VM exists') + + # Set Delete Protection + del_prot = module.params.get('del_prot') + if r.setDeleteProtection(vminfo['name'], del_prot) is False: + return RHEV_FAILED, msg + + # Remove VM + if r.removeVM(vminfo['name']) is False: + return RHEV_FAILED, msg + setMsg('VM has been removed.') + vminfo['state'] = 'DELETED' + else: + setMsg('VM was already removed.') + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + + elif state == 'up' or state == 'down' or state == 'restarted': + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + timeout = module.params.get('timeout') + if r.setPower(name, state, timeout) is False: + return RHEV_FAILED, msg + vminfo = r.getVM(name) + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + + elif state == 'cd': + name = module.params.get('name') + cd_drive = module.params.get('cd_drive') + if r.setCD(name, cd_drive) is False: + return RHEV_FAILED, msg + return RHEV_SUCCESS, {'changed': changed, 'msg': msg} + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['absent', 'cd', 'down', 'info', 'ping', 'present', 'restarted', 'up']), + user=dict(type='str', default='admin@internal'), + password=dict(type='str', required=True, no_log=True), + server=dict(type='str', default='127.0.0.1'), + port=dict(type='int', default=443), + insecure_api=dict(type='bool', default=False), + name=dict(type='str'), + image=dict(type='str'), + datacenter=dict(type='str', default="Default"), + type=dict(type='str', default='server', choices=['desktop', 'host', 'server']), + cluster=dict(type='str', default=''), + vmhost=dict(type='str'), + vmcpu=dict(type='int', default=2), + vmmem=dict(type='int', default=1), + disks=dict(type='list', elements='str'), + osver=dict(type='str', default="rhel_6x64"), + ifaces=dict(type='list', elements='str', aliases=['interfaces', 'nics']), + timeout=dict(type='int'), + mempol=dict(type='int', default=1), + vm_ha=dict(type='bool', default=True), + cpu_share=dict(type='int', default=0), + boot_order=dict(type='list', elements='str', default=['hd', 'network']), + del_prot=dict(type='bool', default=True), + cd_drive=dict(type='str'), + ), + ) + + if not HAS_SDK: + module.fail_json(msg="The 'ovirtsdk' module is not importable. Check the requirements.") + + rc = RHEV_SUCCESS + try: + rc, result = core(module) + except Exception as e: + module.fail_json(msg=str(e)) + + if rc != 0: # something went wrong emit the msg + module.fail_json(rc=rc, msg=result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/rhn_channel.py b/plugins/modules/rhn_channel.py deleted file mode 120000 index 517fbf349f..0000000000 --- a/plugins/modules/rhn_channel.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/rhn_channel.py \ No newline at end of file diff --git a/plugins/modules/rhn_register.py b/plugins/modules/rhn_register.py deleted file mode 120000 index 5a724b9468..0000000000 --- a/plugins/modules/rhn_register.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/rhn_register.py \ No newline at end of file diff --git a/plugins/modules/rhsm_release.py b/plugins/modules/rhsm_release.py deleted file mode 120000 index 7dcb8f7738..0000000000 --- a/plugins/modules/rhsm_release.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/rhsm_release.py \ No newline at end of file diff --git a/plugins/modules/rhsm_release.py b/plugins/modules/rhsm_release.py new file mode 100644 index 0000000000..7034713c04 --- /dev/null +++ b/plugins/modules/rhsm_release.py @@ -0,0 +1,137 @@ +#!/usr/bin/python + +# Copyright (c) 2018, Sean Myers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: rhsm_release +short_description: Set or Unset RHSM Release version +description: + - Sets or unsets the release version used by RHSM repositories. +notes: + - This module fails on an unregistered system. Use the M(community.general.redhat_subscription) module to register a system + prior to setting the RHSM release. + - It is possible to interact with C(subscription-manager) only as root, so root permissions are required to successfully + run this module. +requirements: + - Red Hat Enterprise Linux 6+ with subscription-manager installed +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + release: + description: + - RHSM release version to use. + - To unset either pass V(null) for this option, or omit this option. + type: str +author: + - Sean Myers (@seandst) +""" + +EXAMPLES = r""" +# Set release version to 7.1 +- name: Set RHSM release version + community.general.rhsm_release: + release: "7.1" + +# Set release version to 6Server +- name: Set RHSM release version + community.general.rhsm_release: + release: "6Server" + +# Unset release version +- name: Unset RHSM release release + community.general.rhsm_release: + release: +""" + +RETURN = r""" +current_release: + description: The current RHSM release version value. + returned: success + type: str +""" + +from ansible.module_utils.basic import AnsibleModule + +import os +import re + +# Matches release-like values such as 7.2, 5.10, 6Server, 8 +# but rejects unlikely values, like 100Server, 1.100, 7server etc. +release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server|Client|Workstation|)\b') + + +def _sm_release(module, *args): + # pass args to s-m release, e.g. _sm_release(module, '--set', '0.1') becomes + # "subscription-manager release --set 0.1" + sm_bin = module.get_bin_path('subscription-manager', required=True) + cmd = [sm_bin, 'release'] + list(args) + # delegate nonzero rc handling to run_command + return module.run_command(cmd, check_rc=True, expand_user_and_vars=False) + + +def get_release(module): + # Get the current release version, or None if release unset + rc, out, err = _sm_release(module, '--show') + try: + match = release_matcher.findall(out)[0] + except IndexError: + # 0'th index did not exist; no matches + match = None + + return match + + +def set_release(module, release): + # Set current release version, or unset if release is None + if release is None: + args = ('--unset',) + else: + args = ('--set', release) + + return _sm_release(module, *args) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + release=dict(type='str'), + ), + supports_check_mode=True + ) + + if os.getuid() != 0: + module.fail_json( + msg="Interacting with subscription-manager requires root permissions ('become: true')" + ) + + target_release = module.params['release'] + + # sanity check: the target release at least looks like a valid release + if target_release and not release_matcher.findall(target_release): + module.fail_json(msg='"{0}" does not appear to be a valid release.'.format(target_release)) + + # Will fail with useful error from s-m if system not subscribed + current_release = get_release(module) + + changed = (target_release != current_release) + if not module.check_mode and changed: + set_release(module, target_release) + # If setting the release fails, then a fail_json would have exited with + # the s-m error, e.g. "No releases match '7.20'...". If not, then the + # current release is now set to the target release (job's done) + current_release = target_release + + module.exit_json(current_release=current_release, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/rhsm_repository.py b/plugins/modules/rhsm_repository.py deleted file mode 120000 index d4fe8f1a3a..0000000000 --- a/plugins/modules/rhsm_repository.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/rhsm_repository.py \ No newline at end of file diff --git a/plugins/modules/rhsm_repository.py b/plugins/modules/rhsm_repository.py new file mode 100644 index 0000000000..b5b4eab4dc --- /dev/null +++ b/plugins/modules/rhsm_repository.py @@ -0,0 +1,255 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Giovanni Sciortino (@giovannisciortino) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: rhsm_repository +short_description: Manage RHSM repositories using the subscription-manager command +description: + - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) + command. +author: Giovanni Sciortino (@giovannisciortino) +notes: + - In order to manage RHSM repositories the system must be already registered to RHSM manually or using the Ansible M(community.general.redhat_subscription) + module. + - It is possible to interact with C(subscription-manager) only as root, so root permissions are required to successfully + run this module. +requirements: + - subscription-manager +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + state: + description: + - If state is equal to present or disabled, indicates the desired repository state. + - In community.general 10.0.0 the states V(present) and V(absent) have been removed. Please use V(enabled) and V(disabled) + instead. + choices: [enabled, disabled] + default: "enabled" + type: str + name: + description: + - The ID of repositories to enable. + - To operate on several repositories this can accept a comma separated list or a YAML list. + required: true + type: list + elements: str + purge: + description: + - Disable all currently enabled repositories that are not not specified in O(name). Only set this to V(true) if passing + in a list of repositories to the O(name) field. Using this with C(loop) is likely not to have the desired result. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Enable a RHSM repository + community.general.rhsm_repository: + name: rhel-7-server-rpms + +- name: Disable all RHSM repositories + community.general.rhsm_repository: + name: '*' + state: disabled + +- name: Enable all repositories starting with rhel-6-server + community.general.rhsm_repository: + name: rhel-6-server* + state: enabled + +- name: Disable all repositories except rhel-7-server-rpms + community.general.rhsm_repository: + name: rhel-7-server-rpms + purge: true +""" + +RETURN = r""" +repositories: + description: + - The list of RHSM repositories with their states. + - When this module is used to change the repository states, this list contains the updated states after the changes. + returned: success + type: list +""" + +import os +from fnmatch import fnmatch +from copy import deepcopy +from ansible.module_utils.basic import AnsibleModule + + +class Rhsm(object): + def __init__(self, module): + self.module = module + self.rhsm_bin = self.module.get_bin_path('subscription-manager', required=True) + self.rhsm_kwargs = { + 'environ_update': dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'), + 'expand_user_and_vars': False, + 'use_unsafe_shell': False, + } + + def run_repos(self, arguments): + """ + Execute `subscription-manager repos` with arguments and manage common errors + """ + rc, out, err = self.module.run_command( + [self.rhsm_bin, 'repos'] + arguments, + **self.rhsm_kwargs + ) + + if rc == 0 and out == 'This system has no repositories available through subscriptions.\n': + self.module.fail_json(msg='This system has no repositories available through subscriptions') + elif rc == 1: + self.module.fail_json(msg='subscription-manager failed with the following error: %s' % err) + else: + return rc, out, err + + def list_repositories(self): + """ + Generate RHSM repository list and return a list of dict + """ + rc, out, err = self.run_repos(['--list']) + + repo_id = '' + repo_name = '' + repo_url = '' + repo_enabled = '' + + repo_result = [] + for line in out.splitlines(): + # ignore lines that are: + # - empty + # - "+---------[...]" -- i.e. header + # - " Available Repositories [...]" -- i.e. header + if line == '' or line[0] == '+' or line[0] == ' ': + continue + + if line.startswith('Repo ID: '): + repo_id = line[9:].lstrip() + continue + + if line.startswith('Repo Name: '): + repo_name = line[11:].lstrip() + continue + + if line.startswith('Repo URL: '): + repo_url = line[10:].lstrip() + continue + + if line.startswith('Enabled: '): + repo_enabled = line[9:].lstrip() + + repo = { + "id": repo_id, + "name": repo_name, + "url": repo_url, + "enabled": True if repo_enabled == '1' else False + } + + repo_result.append(repo) + + return repo_result + + +def repository_modify(module, rhsm, state, name, purge=False): + name = set(name) + current_repo_list = rhsm.list_repositories() + updated_repo_list = deepcopy(current_repo_list) + matched_existing_repo = {} + for repoid in name: + matched_existing_repo[repoid] = [] + for idx, repo in enumerate(current_repo_list): + if fnmatch(repo['id'], repoid): + matched_existing_repo[repoid].append(repo) + # Update current_repo_list to return it as result variable + updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False + + changed = False + results = [] + diff_before = "" + diff_after = "" + rhsm_arguments = [] + + for repoid in matched_existing_repo: + if len(matched_existing_repo[repoid]) == 0: + results.append("%s is not a valid repository ID" % repoid) + module.fail_json(results=results, msg="%s is not a valid repository ID" % repoid) + for repo in matched_existing_repo[repoid]: + if state in ['disabled', 'absent']: + if repo['enabled']: + changed = True + diff_before += "Repository '%s' is enabled for this system\n" % repo['id'] + diff_after += "Repository '%s' is disabled for this system\n" % repo['id'] + results.append("Repository '%s' is disabled for this system" % repo['id']) + rhsm_arguments += ['--disable', repo['id']] + elif state in ['enabled', 'present']: + if not repo['enabled']: + changed = True + diff_before += "Repository '%s' is disabled for this system\n" % repo['id'] + diff_after += "Repository '%s' is enabled for this system\n" % repo['id'] + results.append("Repository '%s' is enabled for this system" % repo['id']) + rhsm_arguments += ['--enable', repo['id']] + + # Disable all enabled repos on the system that are not in the task and not + # marked as disabled by the task + if purge: + enabled_repo_ids = set(repo['id'] for repo in updated_repo_list if repo['enabled']) + matched_repoids_set = set(matched_existing_repo.keys()) + difference = enabled_repo_ids.difference(matched_repoids_set) + if len(difference) > 0: + for repoid in difference: + changed = True + diff_before.join("Repository '{repoid}'' is enabled for this system\n".format(repoid=repoid)) + diff_after.join("Repository '{repoid}' is disabled for this system\n".format(repoid=repoid)) + results.append("Repository '{repoid}' is disabled for this system".format(repoid=repoid)) + rhsm_arguments.extend(['--disable', repoid]) + for updated_repo in updated_repo_list: + if updated_repo['id'] in difference: + updated_repo['enabled'] = False + + diff = {'before': diff_before, + 'after': diff_after, + 'before_header': "RHSM repositories", + 'after_header': "RHSM repositories"} + + if not module.check_mode and changed: + rc, out, err = rhsm.run_repos(rhsm_arguments) + results = out.splitlines() + module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', required=True), + state=dict(choices=['enabled', 'disabled'], default='enabled'), + purge=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + if os.getuid() != 0: + module.fail_json( + msg="Interacting with subscription-manager requires root permissions ('become: true')" + ) + + rhsm = Rhsm(module) + + name = module.params['name'] + state = module.params['state'] + purge = module.params['purge'] + + repository_modify(module, rhsm, state, name, purge) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/riak.py b/plugins/modules/riak.py deleted file mode 120000 index 8e520af904..0000000000 --- a/plugins/modules/riak.py +++ /dev/null @@ -1 +0,0 @@ -./database/misc/riak.py \ No newline at end of file diff --git a/plugins/modules/riak.py b/plugins/modules/riak.py new file mode 100644 index 0000000000..4f3ac14e13 --- /dev/null +++ b/plugins/modules/riak.py @@ -0,0 +1,233 @@ +#!/usr/bin/python + +# Copyright (c) 2013, James Martin , Drew Kerrigan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: riak +short_description: This module handles some common Riak operations +description: + - This module can be used to join nodes to a cluster, check the status of the cluster. +author: + - "James Martin (@jsmartin)" + - "Drew Kerrigan (@drewkerrigan)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + command: + description: + - The command you would like to perform against the cluster. + choices: ['ping', 'kv_test', 'join', 'plan', 'commit'] + type: str + config_dir: + description: + - The path to the riak configuration directory. + default: /etc/riak + type: path + http_conn: + description: + - The IP address and port that is listening for Riak HTTP queries. + default: 127.0.0.1:8098 + type: str + target_node: + description: + - The target node for certain operations (join, ping). + default: riak@127.0.0.1 + type: str + wait_for_handoffs: + description: + - Number of seconds to wait for handoffs to complete. + type: int + default: 0 + wait_for_ring: + description: + - Number of seconds to wait for all nodes to agree on the ring. + type: int + default: 0 + wait_for_service: + description: + - Waits for a riak service to come online before continuing. + choices: ['kv'] + type: str + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + type: bool + default: true +""" + +EXAMPLES = r""" +- name: "Join's a Riak node to another node" + community.general.riak: + command: join + target_node: riak@10.1.1.1 + +- name: Wait for handoffs to finish. Use with async and poll. + community.general.riak: + wait_for_handoffs: true + +- name: Wait for riak_kv service to startup + community.general.riak: + wait_for_service: kv +""" + +import json +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def ring_check(module, riak_admin_bin): + cmd = riak_admin_bin + ['ringready'] + rc, out, err = module.run_command(cmd) + if rc == 0 and 'TRUE All nodes agree on the ring' in out: + return True + else: + return False + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + command=dict(choices=['ping', 'kv_test', 'join', 'plan', 'commit']), + config_dir=dict(default='/etc/riak', type='path'), + http_conn=dict(default='127.0.0.1:8098'), + target_node=dict(default='riak@127.0.0.1'), + wait_for_handoffs=dict(default=0, type='int'), + wait_for_ring=dict(default=0, type='int'), + wait_for_service=dict(choices=['kv']), + validate_certs=dict(default=True, type='bool')) + ) + + command = module.params.get('command') + http_conn = module.params.get('http_conn') + target_node = module.params.get('target_node') + wait_for_handoffs = module.params.get('wait_for_handoffs') + wait_for_ring = module.params.get('wait_for_ring') + wait_for_service = module.params.get('wait_for_service') + + # make sure riak commands are on the path + riak_bin = module.get_bin_path('riak') + riak_admin_bin = module.get_bin_path('riak-admin') + riak_admin_bin = [riak_admin_bin] if riak_admin_bin is not None else [riak_bin, 'admin'] + + timeout = time.time() + 120 + while True: + if time.time() > timeout: + module.fail_json(msg='Timeout, could not fetch Riak stats.') + (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5) + if info['status'] == 200: + stats_raw = response.read() + break + time.sleep(5) + + # here we attempt to load those stats, + try: + stats = json.loads(stats_raw) + except Exception: + module.fail_json(msg='Could not parse Riak stats.') + + node_name = stats['nodename'] + nodes = stats['ring_members'] + ring_size = stats['ring_creation_size'] + rc, out, err = module.run_command([riak_bin, 'version']) + version = out.strip() + + result = dict(node_name=node_name, + nodes=nodes, + ring_size=ring_size, + version=version) + + if command == 'ping': + cmd = [riak_bin, 'ping', target_node] + rc, out, err = module.run_command(cmd) + if rc == 0: + result['ping'] = out + else: + module.fail_json(msg=out) + + elif command == 'kv_test': + cmd = riak_admin_bin + ['test'] + rc, out, err = module.run_command(cmd) + if rc == 0: + result['kv_test'] = out + else: + module.fail_json(msg=out) + + elif command == 'join': + if nodes.count(node_name) == 1 and len(nodes) > 1: + result['join'] = 'Node is already in cluster or staged to be in cluster.' + else: + cmd = riak_admin_bin + ['cluster', 'join', target_node] + rc, out, err = module.run_command(cmd) + if rc == 0: + result['join'] = out + result['changed'] = True + else: + module.fail_json(msg=out) + + elif command == 'plan': + cmd = riak_admin_bin + ['cluster', 'plan'] + rc, out, err = module.run_command(cmd) + if rc == 0: + result['plan'] = out + if 'Staged Changes' in out: + result['changed'] = True + else: + module.fail_json(msg=out) + + elif command == 'commit': + cmd = riak_admin_bin + ['cluster', 'commit'] + rc, out, err = module.run_command(cmd) + if rc == 0: + result['commit'] = out + result['changed'] = True + else: + module.fail_json(msg=out) + +# this could take a while, recommend to run in async mode + if wait_for_handoffs: + timeout = time.time() + wait_for_handoffs + while True: + cmd = riak_admin_bin + ['transfers'] + rc, out, err = module.run_command(cmd) + if 'No transfers active' in out: + result['handoffs'] = 'No transfers active.' + break + time.sleep(10) + if time.time() > timeout: + module.fail_json(msg='Timeout waiting for handoffs.') + + if wait_for_service: + cmd = riak_admin_bin + ['wait_for_service', 'riak_%s' % wait_for_service, node_name] + rc, out, err = module.run_command(cmd) + result['service'] = out + + if wait_for_ring: + timeout = time.time() + wait_for_ring + while True: + if ring_check(module, riak_admin_bin): + break + time.sleep(10) + if time.time() > timeout: + module.fail_json(msg='Timeout waiting for nodes to agree on ring.') + + result['ring_ready'] = ring_check(module, riak_admin_bin) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/rocketchat.py b/plugins/modules/rocketchat.py deleted file mode 120000 index 6e3460b5cb..0000000000 --- a/plugins/modules/rocketchat.py +++ /dev/null @@ -1 +0,0 @@ -./notification/rocketchat.py \ No newline at end of file diff --git a/plugins/modules/rocketchat.py b/plugins/modules/rocketchat.py new file mode 100644 index 0000000000..8bbc1e153b --- /dev/null +++ b/plugins/modules/rocketchat.py @@ -0,0 +1,266 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Deepak Kothandan +# Copyright (c) 2015, Stefan Berggren +# Copyright (c) 2014, Ramon de la Fuente +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: rocketchat +short_description: Send notifications to Rocket Chat +description: + - This module sends notifications to Rocket Chat through the Incoming WebHook integration. +author: "Ramon de la Fuente (@ramondelafuente)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + domain: + type: str + description: + - The domain for your environment without protocol. (For example V(example.com) or V(chat.example.com)). + required: true + token: + type: str + description: + - Rocket Chat Incoming Webhook integration token. This provides authentication to Rocket Chat's Incoming webhook for + posting messages. + required: true + protocol: + type: str + description: + - Specify the protocol used to send notification messages before the webhook URL (that is, V(http) or V(https)). + default: https + choices: + - 'http' + - 'https' + msg: + type: str + description: + - Message to be sent. + channel: + type: str + description: + - Channel to send the message to. If absent, the message goes to the channel selected for the O(token) specified during + the creation of webhook. + username: + type: str + description: + - This is the sender of the message. + default: "Ansible" + icon_url: + type: str + description: + - URL for the message sender's icon. + default: "https://docs.ansible.com/favicon.ico" + icon_emoji: + type: str + description: + - Emoji for the message sender. The representation for the available emojis can be got from Rocket Chat. + - For example V(:thumbsup:). + - If O(icon_emoji) is set, O(icon_url) is not used. + link_names: + type: int + description: + - Automatically create links for channels and usernames in O(msg). + default: 1 + choices: + - 1 + - 0 + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + type: bool + default: true + color: + type: str + description: + - Allow text to use default colors - use the default of V(normal) to not send a custom color bar at the start of the + message. + default: 'normal' + choices: + - 'normal' + - 'good' + - 'warning' + - 'danger' + attachments: + type: list + elements: dict + description: + - Define a list of attachments. + is_pre740: + description: + - If V(true), the payload matches Rocket.Chat prior to 7.4.0 format. This format has been used by the module since its + inception, but is no longer supported by Rocket.Chat 7.4.0. + - The default value of the option, V(true), is B(deprecated) since community.general 11.2.0 and will change to V(false) in community.general 13.0.0. + - This parameter is going to be removed in a future release when Rocket.Chat 7.4.0 becomes the minimum supported version. + type: bool + version_added: 10.5.0 +""" + +EXAMPLES = r""" +- name: Send notification message through Rocket Chat + community.general.rocketchat: + token: thetoken/generatedby/rocketchat + domain: chat.example.com + msg: '{{ inventory_hostname }} completed' + delegate_to: localhost + +- name: Send notification message through Rocket Chat all options + community.general.rocketchat: + domain: chat.example.com + token: thetoken/generatedby/rocketchat + msg: '{{ inventory_hostname }} completed' + channel: "#ansible" + username: 'Ansible on {{ inventory_hostname }}' + icon_url: http://www.example.com/some-image-file.png + link_names: 0 + delegate_to: localhost + +- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured + in rocketchat + community.general.rocketchat: + token: thetoken/generatedby/rocketchat + domain: chat.example.com + msg: '{{ inventory_hostname }} is alive!' + color: good + username: '' + icon_url: '' + delegate_to: localhost + +- name: Use the attachments API + community.general.rocketchat: + token: thetoken/generatedby/rocketchat + domain: chat.example.com + attachments: + - text: Display my system load on host A and B + color: "#ff00dd" + title: System load + fields: + - title: System A + value: 'load average: 0,74, 0,66, 0,63' + short: true + - title: System B + value: 'load average: 5,16, 4,64, 2,43' + short: true + delegate_to: localhost +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s' + + +def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments, is_pre740): + payload = {} + if color == "normal" and text is not None: + payload = dict(text=text) + elif text is not None: + payload = dict(attachments=[dict(text=text, color=color)]) + if channel is not None: + if channel[0] == '#' or channel[0] == '@': + payload['channel'] = channel + else: + payload['channel'] = '#' + channel + if username is not None: + payload['username'] = username + if icon_emoji is not None: + payload['icon_emoji'] = icon_emoji + else: + payload['icon_url'] = icon_url + if link_names is not None: + payload['link_names'] = link_names + + if attachments is not None: + if 'attachments' not in payload: + payload['attachments'] = [] + + if attachments is not None: + for attachment in attachments: + if 'fallback' not in attachment: + attachment['fallback'] = attachment['text'] + payload['attachments'].append(attachment) + + payload = module.jsonify(payload) + if is_pre740: + payload = "payload=" + payload + return payload + + +def do_notify_rocketchat(module, domain, token, protocol, payload, is_pre740): + + if token.count('/') < 1: + module.fail_json(msg="Invalid Token specified, provide a valid token") + + rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token) + + headers = None + if not is_pre740: + headers = {'Content-type': 'application/json'} + response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload, headers=headers) + if info['status'] != 200: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + domain=dict(type='str', required=True), + token=dict(type='str', required=True, no_log=True), + protocol=dict(type='str', default='https', choices=['http', 'https']), + msg=dict(type='str'), + channel=dict(type='str'), + username=dict(type='str', default='Ansible'), + icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), + icon_emoji=dict(type='str'), + link_names=dict(type='int', default=1, choices=[0, 1]), + validate_certs=dict(default=True, type='bool'), + color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']), + attachments=dict(type='list', elements='dict'), + is_pre740=dict(type='bool') + ) + ) + + domain = module.params['domain'] + token = module.params['token'] + protocol = module.params['protocol'] + text = module.params['msg'] + channel = module.params['channel'] + username = module.params['username'] + icon_url = module.params['icon_url'] + icon_emoji = module.params['icon_emoji'] + link_names = module.params['link_names'] + color = module.params['color'] + attachments = module.params['attachments'] + is_pre740 = module.params['is_pre740'] + + if is_pre740 is None: + module.deprecate( + "The default value 'true' for 'is_pre740' is deprecated and will change to 'false' in community.general 13.0.0." + " You can explicitly set 'is_pre740' in your task to avoid this deprecation warning", + version="13.0.0", + collection_name="community.general", + ) + is_pre740 = True + + payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments, is_pre740) + do_notify_rocketchat(module, domain, token, protocol, payload, is_pre740) + + module.exit_json(msg="OK") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/rollbar_deployment.py b/plugins/modules/rollbar_deployment.py deleted file mode 120000 index 2f60733587..0000000000 --- a/plugins/modules/rollbar_deployment.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/rollbar_deployment.py \ No newline at end of file diff --git a/plugins/modules/rollbar_deployment.py b/plugins/modules/rollbar_deployment.py new file mode 100644 index 0000000000..383573d8c7 --- /dev/null +++ b/plugins/modules/rollbar_deployment.py @@ -0,0 +1,144 @@ +#!/usr/bin/python + +# Copyright 2014, Max Riveiro, +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: rollbar_deployment +author: "Max Riveiro (@kavu)" +short_description: Notify Rollbar about app deployments +description: + - Notify Rollbar about app deployments (see U(https://rollbar.com/docs/deploys_other/)). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + type: str + description: + - Your project access token. + required: true + environment: + type: str + description: + - Name of the environment being deployed, for example V(production). + required: true + revision: + type: str + description: + - Revision number/sha being deployed. + required: true + user: + type: str + description: + - User who deployed. + required: false + rollbar_user: + type: str + description: + - Rollbar username of the user who deployed. + required: false + comment: + type: str + description: + - Deploy comment (for example what is being deployed). + required: false + url: + type: str + description: + - Optional URL to submit the notification to. + required: false + default: 'https://api.rollbar.com/api/1/deploy/' + validate_certs: + description: + - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled + sites using self-signed certificates. + required: false + default: true + type: bool +""" + +EXAMPLES = r""" +- name: Rollbar deployment notification + community.general.rollbar_deployment: + token: AAAAAA + environment: staging + user: ansible + revision: '4.2' + rollbar_user: admin + comment: Test Deploy + +- name: Notify rollbar about current git revision deployment by current user + community.general.rollbar_deployment: + token: "{{ rollbar_access_token }}" + environment: production + revision: "{{ lookup('pipe', 'git rev-parse HEAD') }}" + user: "{{ lookup('env', 'USER') }}" +""" + +import traceback +from urllib.parse import urlencode + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.urls import fetch_url + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + environment=dict(required=True), + revision=dict(required=True), + user=dict(), + rollbar_user=dict(), + comment=dict(), + url=dict(default='https://api.rollbar.com/api/1/deploy/'), + validate_certs=dict(default=True, type='bool'), + ), + supports_check_mode=True + ) + + if module.check_mode: + module.exit_json(changed=True) + + params = dict( + access_token=module.params['token'], + environment=module.params['environment'], + revision=module.params['revision'] + ) + + if module.params['user']: + params['local_username'] = module.params['user'] + + if module.params['rollbar_user']: + params['rollbar_username'] = module.params['rollbar_user'] + + if module.params['comment']: + params['comment'] = module.params['comment'] + + url = module.params.get('url') + + try: + data = urlencode(params) + response, info = fetch_url(module, url, data=data, method='POST') + except Exception as e: + module.fail_json(msg='Unable to notify Rollbar: %s' % to_native(e), exception=traceback.format_exc()) + else: + if info['status'] == 200: + module.exit_json(changed=True) + else: + module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/rpm_ostree_pkg.py b/plugins/modules/rpm_ostree_pkg.py deleted file mode 120000 index f23edec531..0000000000 --- a/plugins/modules/rpm_ostree_pkg.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/rpm_ostree_pkg.py \ No newline at end of file diff --git a/plugins/modules/rpm_ostree_pkg.py b/plugins/modules/rpm_ostree_pkg.py new file mode 100644 index 0000000000..a543986706 --- /dev/null +++ b/plugins/modules/rpm_ostree_pkg.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# Copyright (c) 2018, Dusty Mabe +# Copyright (c) 2018, Ansible Project +# Copyright (c) 2021, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: rpm_ostree_pkg +short_description: Install or uninstall overlay additional packages +version_added: "2.0.0" +description: + - Install or uninstall overlay additional packages using C(rpm-ostree) command. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of overlay package to install or remove. + required: true + type: list + elements: str + aliases: [pkg] + state: + description: + - State of the overlay package. + - V(present) simply ensures that a desired package is installed. + - V(absent) removes the specified package. + choices: ['absent', 'present'] + default: 'present' + type: str + apply_live: + description: + - Adds the options C(--apply-live) when O(state=present). + - Option is ignored when O(state=absent). + - For more information, please see U(https://coreos.github.io/rpm-ostree/apply-live/). + type: bool + default: false + version_added: 10.1.0 +author: + - Dusty Mabe (@dustymabe) + - Abhijeet Kasurde (@Akasurde) +""" + +EXAMPLES = r""" +- name: Install overlay package + community.general.rpm_ostree_pkg: + name: nfs-utils + state: present + +- name: Remove overlay package + community.general.rpm_ostree_pkg: + name: nfs-utils + state: absent + +- name: Apply the overlay package live + community.general.rpm_ostree_pkg: + name: nfs-utils + state: present + apply_live: true + +# In case a different transaction is currently running the module would fail. +# Adding a delay can help mitigate this problem: +- name: Install overlay package + community.general.rpm_ostree_pkg: + name: nfs-utils + state: present + register: rpm_ostree_pkg + until: rpm_ostree_pkg is not failed + retries: 10 + delay: 30 +""" + +RETURN = r""" +action: + description: Action performed. + returned: always + type: str + sample: 'install' +packages: + description: A list of packages specified. + returned: always + type: list + sample: ["nfs-utils"] +cmd: + description: Full command used for performed action. + returned: always + type: str + sample: 'rpm-ostree uninstall --allow-inactive --idempotent --unchanged-exit-77 nfs-utils' +needs_reboot: + description: Determine if machine needs a reboot to apply current changes. + returned: success + type: bool + sample: true + version_added: 10.1.0 +""" + +from ansible.module_utils.basic import AnsibleModule + + +class RpmOstreePkg: + def __init__(self, module): + self.module = module + self.params = module.params + self.state = module.params['state'] + + def ensure(self): + results = dict( + rc=0, + changed=False, + action='', + packages=[], + stdout='', + stderr='', + cmd='', + needs_reboot=False, + ) + + # Ensure rpm-ostree command exists + cmd = [self.module.get_bin_path('rpm-ostree', required=True)] + + # Decide action to perform + if self.state == 'present': + results['action'] = 'install' + cmd.append('install') + elif self.state == 'absent': + results['action'] = 'uninstall' + cmd.append('uninstall') + + # Add the options to the command line + if self.params['apply_live'] and self.state == 'present': + cmd.extend(['--apply-live', '--assumeyes']) + + # Additional parameters + cmd.extend(['--allow-inactive', '--idempotent', '--unchanged-exit-77']) + for pkg in self.params['name']: + cmd.append(pkg) + results['packages'].append(pkg) + + rc, out, err = self.module.run_command(cmd) + + # Determine if system needs a reboot to apply change + if 'Changes queued for next boot. Run "systemctl reboot" to start a reboot' in out: + results['needs_reboot'] = True + + results.update(dict( + rc=rc, + cmd=' '.join(cmd), + stdout=out, + stderr=err, + )) + + # A few possible options: + # - rc=0 - succeeded in making a change + # - rc=77 - no change was needed + # - rc=? - error + if rc == 0: + results['changed'] = True + elif rc == 77: + results['changed'] = False + results['rc'] = 0 + else: + self.module.fail_json(msg='non-zero return code', **results) + + self.module.exit_json(**results) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict( + default="present", + choices=['absent', 'present'] + ), + name=dict( + aliases=["pkg"], + required=True, + type='list', + elements='str', + ), + apply_live=dict( + type='bool', + default=False, + ), + ), + ) + + rpm_ostree_pkg = RpmOstreePkg(module) + rpm_ostree_pkg.ensure() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/rundeck_acl_policy.py b/plugins/modules/rundeck_acl_policy.py deleted file mode 120000 index b2a37e47dc..0000000000 --- a/plugins/modules/rundeck_acl_policy.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/rundeck_acl_policy.py \ No newline at end of file diff --git a/plugins/modules/rundeck_acl_policy.py b/plugins/modules/rundeck_acl_policy.py new file mode 100644 index 0000000000..e93363cea2 --- /dev/null +++ b/plugins/modules/rundeck_acl_policy.py @@ -0,0 +1,243 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Loic Blot +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: rundeck_acl_policy + +short_description: Manage Rundeck ACL policies +description: + - Create, update and remove Rundeck ACL policies through HTTP API. +author: "Loic Blot (@nerzhul)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + type: str + description: + - Create or remove Rundeck project. + choices: ['present', 'absent'] + default: 'present' + name: + type: str + description: + - Sets the project name. + required: true + api_token: + description: + - Sets the token to authenticate against Rundeck API. + aliases: ["token"] + project: + type: str + description: + - Sets the project which receive the ACL policy. + - If unset, it is a system ACL policy. + policy: + type: str + description: + - Sets the ACL policy content. + - ACL policy content is a YAML object as described in U(http://rundeck.org/docs/man5/aclpolicy.html). + - It can be a YAML string or a pure Ansible inventory YAML object. + client_cert: + version_added: '0.2.0' + client_key: + version_added: '0.2.0' + force: + version_added: '0.2.0' + force_basic_auth: + version_added: '0.2.0' + http_agent: + version_added: '0.2.0' + url_password: + version_added: '0.2.0' + url_username: + version_added: '0.2.0' + use_proxy: + version_added: '0.2.0' + validate_certs: + version_added: '0.2.0' +extends_documentation_fragment: + - ansible.builtin.url + - community.general.attributes + - community.general.rundeck +""" + +EXAMPLES = r""" +- name: Create or update a rundeck ACL policy in project Ansible + community.general.rundeck_acl_policy: + name: "Project_01" + api_version: 18 + url: "https://rundeck.example.org" + token: "mytoken" + state: present + project: "Ansible" + policy: + description: "my policy" + context: + application: rundeck + for: + project: + - allow: read + by: + group: "build" + +- name: Remove a rundeck system policy + community.general.rundeck_acl_policy: + name: "Project_01" + url: "https://rundeck.example.org" + token: "mytoken" + state: absent +""" + +RETURN = r""" +rundeck_response: + description: Rundeck response when a failure occurs. + returned: failed + type: str +before: + description: Dictionary containing ACL policy information before modification. + returned: success + type: dict +after: + description: Dictionary containing ACL policy information after modification. + returned: success + type: dict +""" + +# import module snippets +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rundeck import ( + api_argument_spec, + api_request, +) + + +class RundeckACLManager: + def __init__(self, module): + self.module = module + if module.params.get("project"): + self.endpoint = "project/%s/acl/%s.aclpolicy" % ( + self.module.params["project"], + self.module.params["name"], + ) + else: + self.endpoint = "system/acl/%s.aclpolicy" % self.module.params["name"] + + def get_acl(self): + resp, info = api_request( + module=self.module, + endpoint=self.endpoint, + ) + + return resp + + def create_or_update_acl(self): + facts = self.get_acl() + if facts is None: + # If in check mode don't create project, simulate a fake project creation + if self.module.check_mode: + self.module.exit_json(changed=True, before={}, after=self.module.params["policy"]) + + resp, info = api_request( + module=self.module, + endpoint=self.endpoint, + method="POST", + data={"contents": self.module.params["policy"]}, + ) + + if info["status"] == 201: + self.module.exit_json(changed=True, before={}, after=self.get_acl()) + elif info["status"] == 400: + self.module.fail_json(msg="Unable to validate acl %s. Please ensure it is a valid ACL" % + self.module.params["name"]) + elif info["status"] == 409: + self.module.fail_json(msg="ACL %s already exists" % self.module.params["name"]) + else: + self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"], + before={}, after=self.get_acl()) + else: + if facts["contents"] == self.module.params["policy"]: + self.module.exit_json(changed=False, before=facts, after=facts) + + if self.module.check_mode: + self.module.exit_json(changed=True, before=facts, after=facts) + + resp, info = api_request( + module=self.module, + endpoint=self.endpoint, + method="PUT", + data={"contents": self.module.params["policy"]}, + ) + + if info["status"] == 200: + self.module.exit_json(changed=True, before=facts, after=self.get_acl()) + elif info["status"] == 400: + self.module.fail_json(msg="Unable to validate acl %s. Please ensure it is a valid ACL" % + self.module.params["name"]) + elif info["status"] == 404: + self.module.fail_json(msg="ACL %s doesn't exists. Cannot update." % self.module.params["name"]) + + def remove_acl(self): + facts = self.get_acl() + + if facts is None: + self.module.exit_json(changed=False, before={}, after={}) + else: + # If not in check mode, remove the project + if not self.module.check_mode: + api_request( + module=self.module, + endpoint=self.endpoint, + method="DELETE", + ) + + self.module.exit_json(changed=True, before=facts, after={}) + + +def main(): + # Also allow the user to set values for fetch_url + argument_spec = api_argument_spec() + argument_spec.update(dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + policy=dict(type='str'), + project=dict(type='str'), + )) + + argument_spec['api_token']['aliases'] = ['token'] + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ['state', 'present', ['policy']], + ], + supports_check_mode=True, + ) + + if not bool(re.match("[a-zA-Z0-9,.+_-]+", module.params["name"])): + module.fail_json(msg="Name contains forbidden characters. The policy can contain the characters: a-zA-Z0-9,.+_-") + + if module.params["api_version"] < 14: + module.fail_json(msg="API version should be at least 14") + + rundeck = RundeckACLManager(module) + if module.params['state'] == 'present': + rundeck.create_or_update_acl() + elif module.params['state'] == 'absent': + rundeck.remove_acl() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/rundeck_job_executions_info.py b/plugins/modules/rundeck_job_executions_info.py deleted file mode 120000 index 9c5c2138ba..0000000000 --- a/plugins/modules/rundeck_job_executions_info.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/rundeck_job_executions_info.py \ No newline at end of file diff --git a/plugins/modules/rundeck_job_executions_info.py b/plugins/modules/rundeck_job_executions_info.py new file mode 100644 index 0000000000..4c4bd85d09 --- /dev/null +++ b/plugins/modules/rundeck_job_executions_info.py @@ -0,0 +1,191 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Phillipe Smith +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: rundeck_job_executions_info +short_description: Query executions for a Rundeck job +description: + - This module gets the list of executions for a specified Rundeck job. +author: "Phillipe Smith (@phsmith)" +version_added: 3.8.0 +options: + job_id: + type: str + description: + - The job unique ID. + required: true + status: + type: str + description: + - The job status to filter. + choices: [succeeded, failed, aborted, running] + max: + type: int + description: + - Max results to return. + default: 20 + offset: + type: int + description: + - The start point to return the results. + default: 0 +extends_documentation_fragment: + - community.general.rundeck + - url + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Get Rundeck job executions info + community.general.rundeck_job_executions_info: + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + job_id: "xxxxxxxxxxxxxxxxx" + register: rundeck_job_executions_info + +- name: Show Rundeck job executions info + ansible.builtin.debug: + var: rundeck_job_executions_info.executions +""" + +RETURN = r""" +paging: + description: Results pagination info. + returned: success + type: dict + contains: + count: + description: Number of results in the response. + type: int + returned: success + total: + description: Total number of results. + type: int + returned: success + offset: + description: Offset from first of all results. + type: int + returned: success + max: + description: Maximum number of results per page. + type: int + returned: success + sample: + { + "count": 20, + "total": 100, + "offset": 0, + "max": 20 + } +executions: + description: Job executions list. + returned: always + type: list + elements: dict + sample: + [ + { + "id": 1, + "href": "https://rundeck.example.org/api/39/execution/1", + "permalink": "https://rundeck.example.org/project/myproject/execution/show/1", + "status": "succeeded", + "project": "myproject", + "executionType": "user", + "user": "admin", + "date-started": { + "unixtime": 1633525515026, + "date": "2021-10-06T13:05:15Z" + }, + "date-ended": { + "unixtime": 1633525518386, + "date": "2021-10-06T13:05:18Z" + }, + "job": { + "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "averageDuration": 6381, + "name": "Test", + "group": "", + "project": "myproject", + "description": "", + "options": { + "exit_code": "0" + }, + "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a" + }, + "description": "Plugin[com.batix.rundeck.plugins.AnsiblePlaybookInlineWorkflowStep, nodeStep: false]", + "argstring": "-exit_code 0", + "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068" + } + ] +""" + +from urllib.parse import quote +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rundeck import ( + api_argument_spec, + api_request +) + + +class RundeckJobExecutionsInfo(object): + def __init__(self, module): + self.module = module + self.url = self.module.params["url"] + self.api_version = self.module.params["api_version"] + self.job_id = self.module.params["job_id"] + self.offset = self.module.params["offset"] + self.max = self.module.params["max"] + self.status = self.module.params["status"] or "" + + def job_executions(self): + response, info = api_request( + module=self.module, + endpoint="job/%s/executions?offset=%s&max=%s&status=%s" + % (quote(self.job_id), self.offset, self.max, self.status), + method="GET" + ) + + if info["status"] != 200: + self.module.fail_json( + msg=info["msg"], + executions=response + ) + + self.module.exit_json(msg="Executions info result", **response) + + +def main(): + argument_spec = api_argument_spec() + argument_spec.update(dict( + job_id=dict(required=True, type="str"), + offset=dict(type="int", default=0), + max=dict(type="int", default=20), + status=dict( + type="str", + choices=["succeeded", "failed", "aborted", "running"] + ) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if module.params["api_version"] < 14: + module.fail_json(msg="API version should be at least 14") + + rundeck = RundeckJobExecutionsInfo(module) + rundeck.job_executions() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/rundeck_job_run.py b/plugins/modules/rundeck_job_run.py deleted file mode 120000 index 0ac9838a56..0000000000 --- a/plugins/modules/rundeck_job_run.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/rundeck_job_run.py \ No newline at end of file diff --git a/plugins/modules/rundeck_job_run.py b/plugins/modules/rundeck_job_run.py new file mode 100644 index 0000000000..768e67967a --- /dev/null +++ b/plugins/modules/rundeck_job_run.py @@ -0,0 +1,320 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Phillipe Smith +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: rundeck_job_run +short_description: Run a Rundeck job +description: + - This module runs a Rundeck job specified by ID. +author: "Phillipe Smith (@phsmith)" +version_added: 3.8.0 +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + job_id: + type: str + description: + - The job unique ID. + required: true + job_options: + type: dict + description: + - The job options for the steps. + - Numeric values must be quoted. + filter_nodes: + type: str + description: + - Filter the nodes where the jobs must run. + - See U(https://docs.rundeck.com/docs/manual/11-node-filters.html#node-filter-syntax). + run_at_time: + type: str + description: + - Schedule the job execution to run at specific date and time. + - ISO-8601 date and time format like V(2021-10-05T15:45:00-03:00). + loglevel: + type: str + description: + - Log level configuration. + choices: [debug, verbose, info, warn, error] + default: info + wait_execution: + type: bool + description: + - Wait until the job finished the execution. + default: true + wait_execution_delay: + type: int + description: + - Delay, in seconds, between job execution status check requests. + default: 5 + wait_execution_timeout: + type: int + description: + - Job execution wait timeout in seconds. + - If the timeout is reached, the job is aborted. + - Keep in mind that there is a sleep based on O(wait_execution_delay) after each job status check. + default: 120 + abort_on_timeout: + type: bool + description: + - Send a job abort request if exceeded the O(wait_execution_timeout) specified. + default: false +extends_documentation_fragment: + - community.general.rundeck + - ansible.builtin.url + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Run a Rundeck job + community.general.rundeck_job_run: + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + job_id: "xxxxxxxxxxxxxxxxx" + register: rundeck_job_run + +- name: Show execution info + ansible.builtin.debug: + var: rundeck_job_run.execution_info + +- name: Run a Rundeck job with options + community.general.rundeck_job_run: + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + job_id: "xxxxxxxxxxxxxxxxx" + job_options: + option_1: "value_1" + option_2: "value_3" + option_3: "value_3" + register: rundeck_job_run + +- name: Run a Rundeck job with timeout, delay between status check and abort on timeout + community.general.rundeck_job_run: + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + job_id: "xxxxxxxxxxxxxxxxx" + wait_execution_timeout: 30 + wait_execution_delay: 10 + abort_on_timeout: true + register: rundeck_job_run + +- name: Schedule a Rundeck job + community.general.rundeck_job_run: + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + job_id: "xxxxxxxxxxxxxxxxx" + run_at_time: "2021-10-05T15:45:00-03:00" + register: rundeck_job_schedule + +- name: Fire-and-forget a Rundeck job + community.general.rundeck_job_run: + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + job_id: "xxxxxxxxxxxxxxxxx" + wait_execution: false + register: rundeck_job_run +""" + +RETURN = r""" +execution_info: + description: Rundeck job execution metadata. + returned: always + type: dict + sample: + { + "msg": "Job execution succeeded!", + "execution_info": { + "id": 1, + "href": "https://rundeck.example.org/api/39/execution/1", + "permalink": "https://rundeck.example.org/project/myproject/execution/show/1", + "status": "succeeded", + "project": "myproject", + "executionType": "user", + "user": "admin", + "date-started": { + "unixtime": 1633449020784, + "date": "2021-10-05T15:50:20Z" + }, + "date-ended": { + "unixtime": 1633449026358, + "date": "2021-10-05T15:50:26Z" + }, + "job": { + "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "averageDuration": 4917, + "name": "Test", + "group": "", + "project": "myproject", + "description": "", + "options": { + "exit_code": "0" + }, + "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a" + }, + "description": "sleep 5 && echo 'Test!' && exit ${option.exit_code}", + "argstring": "-exit_code 0", + "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068", + "successfulNodes": [ + "localhost" + ], + "output": "Test!" + } + } +""" + +# Modules import +from datetime import datetime, timedelta +from time import sleep +from urllib.parse import quote + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rundeck import ( + api_argument_spec, + api_request +) + + +class RundeckJobRun(object): + def __init__(self, module): + self.module = module + self.url = self.module.params["url"] + self.api_version = self.module.params["api_version"] + self.job_id = self.module.params["job_id"] + self.job_options = self.module.params["job_options"] or {} + self.filter_nodes = self.module.params["filter_nodes"] or "" + self.run_at_time = self.module.params["run_at_time"] or "" + self.loglevel = self.module.params["loglevel"].upper() + self.wait_execution = self.module.params['wait_execution'] + self.wait_execution_delay = self.module.params['wait_execution_delay'] + self.wait_execution_timeout = self.module.params['wait_execution_timeout'] + self.abort_on_timeout = self.module.params['abort_on_timeout'] + + for k, v in self.job_options.items(): + if not isinstance(v, str): + self.module.exit_json( + msg="Job option '%s' value must be a string" % k, + execution_info={} + ) + + def job_status_check(self, execution_id): + response = dict() + timeout = False + due = datetime.now() + timedelta(seconds=self.wait_execution_timeout) + + while not timeout: + endpoint = "execution/%d" % execution_id + response = api_request(module=self.module, endpoint=endpoint)[0] + output = api_request(module=self.module, + endpoint="execution/%d/output" % execution_id) + log_output = "\n".join([x["log"] for x in output[0]["entries"]]) + response.update({"output": log_output}) + + if response["status"] == "aborted": + break + elif response["status"] == "scheduled": + self.module.exit_json(msg="Job scheduled to run at %s" % self.run_at_time, + execution_info=response, + changed=True) + elif response["status"] == "failed": + self.module.fail_json(msg="Job execution failed", + execution_info=response) + elif response["status"] == "succeeded": + self.module.exit_json(msg="Job execution succeeded!", + execution_info=response) + + if datetime.now() >= due: + timeout = True + break + + # Wait for 5s before continue + sleep(self.wait_execution_delay) + + response.update({"timed_out": timeout}) + return response + + def job_run(self): + response, info = api_request( + module=self.module, + endpoint="job/%s/run" % quote(self.job_id), + method="POST", + data={ + "loglevel": self.loglevel, + "options": self.job_options, + "runAtTime": self.run_at_time, + "filter": self.filter_nodes + } + ) + + if info["status"] != 200: + self.module.fail_json(msg=info["msg"]) + + if not self.wait_execution: + self.module.exit_json(msg="Job run send successfully!", + execution_info=response) + + job_status = self.job_status_check(response["id"]) + + if job_status["timed_out"]: + if self.abort_on_timeout: + api_request( + module=self.module, + endpoint="execution/%s/abort" % response['id'], + method="GET" + ) + + abort_status = self.job_status_check(response["id"]) + + self.module.fail_json(msg="Job execution aborted due the timeout specified", + execution_info=abort_status) + + self.module.fail_json(msg="Job execution timed out", + execution_info=job_status) + + +def main(): + argument_spec = api_argument_spec() + argument_spec.update(dict( + job_id=dict(required=True, type="str"), + job_options=dict(type="dict"), + filter_nodes=dict(type="str"), + run_at_time=dict(type="str"), + wait_execution=dict(type="bool", default=True), + wait_execution_delay=dict(type="int", default=5), + wait_execution_timeout=dict(type="int", default=120), + abort_on_timeout=dict(type="bool", default=False), + loglevel=dict( + type="str", + choices=["debug", "verbose", "info", "warn", "error"], + default="info" + ) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False + ) + + if module.params["api_version"] < 14: + module.fail_json(msg="API version should be at least 14") + + rundeck = RundeckJobRun(module) + rundeck.job_run() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/rundeck_project.py b/plugins/modules/rundeck_project.py deleted file mode 120000 index 32aeaef8dc..0000000000 --- a/plugins/modules/rundeck_project.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/rundeck_project.py \ No newline at end of file diff --git a/plugins/modules/rundeck_project.py b/plugins/modules/rundeck_project.py new file mode 100644 index 0000000000..47db41a744 --- /dev/null +++ b/plugins/modules/rundeck_project.py @@ -0,0 +1,194 @@ +#!/usr/bin/python + +# Ansible module to manage rundeck projects +# Copyright (c) 2017, Loic Blot +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: rundeck_project + +short_description: Manage Rundeck projects +description: + - Create and remove Rundeck projects through HTTP API. +author: "Loic Blot (@nerzhul)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + type: str + description: + - Create or remove Rundeck project. + choices: ['present', 'absent'] + default: 'present' + name: + type: str + description: + - Sets the project name. + required: true + api_token: + description: + - Sets the token to authenticate against Rundeck API. + aliases: ["token"] + client_cert: + version_added: '0.2.0' + client_key: + version_added: '0.2.0' + force: + version_added: '0.2.0' + force_basic_auth: + version_added: '0.2.0' + http_agent: + version_added: '0.2.0' + url_password: + version_added: '0.2.0' + url_username: + version_added: '0.2.0' + use_proxy: + version_added: '0.2.0' + validate_certs: + version_added: '0.2.0' +extends_documentation_fragment: + - ansible.builtin.url + - community.general.attributes + - community.general.rundeck +""" + +EXAMPLES = r""" +- name: Create a rundeck project + community.general.rundeck_project: + name: "Project_01" + label: "Project 01" + description: "My Project 01" + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + state: present + +- name: Remove a rundeck project + community.general.rundeck_project: + name: "Project_01" + url: "https://rundeck.example.org" + api_token: "mytoken" + state: absent +""" + +RETURN = r""" +rundeck_response: + description: Rundeck response when a failure occurs. + returned: failed + type: str +before: + description: Dictionary containing project information before modification. + returned: success + type: dict +after: + description: Dictionary containing project information after modification. + returned: success + type: dict +""" + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rundeck import ( + api_argument_spec, + api_request, +) + + +class RundeckProjectManager(object): + def __init__(self, module): + self.module = module + + def get_project_facts(self): + resp, info = api_request( + module=self.module, + endpoint="project/%s" % self.module.params["name"], + ) + + return resp + + def create_or_update_project(self): + facts = self.get_project_facts() + + if facts is None: + # If in check mode don't create project, simulate a fake project creation + if self.module.check_mode: + self.module.exit_json( + changed=True, + before={}, + after={ + "name": self.module.params["name"] + }, + ) + + resp, info = api_request( + module=self.module, + endpoint="projects", + method="POST", + data={ + "name": self.module.params["name"], + "config": {}, + } + ) + + if info["status"] == 201: + self.module.exit_json(changed=True, before={}, after=self.get_project_facts()) + else: + self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"], + before={}, after=self.get_project_facts()) + else: + self.module.exit_json(changed=False, before=facts, after=facts) + + def remove_project(self): + facts = self.get_project_facts() + if facts is None: + self.module.exit_json(changed=False, before={}, after={}) + else: + # If not in check mode, remove the project + if not self.module.check_mode: + api_request( + module=self.module, + endpoint="project/%s" % self.module.params["name"], + method="DELETE", + ) + + self.module.exit_json(changed=True, before=facts, after={}) + + +def main(): + # Also allow the user to set values for fetch_url + argument_spec = api_argument_spec() + argument_spec.update(dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + )) + + argument_spec['api_token']['aliases'] = ['token'] + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if module.params["api_version"] < 14: + module.fail_json(msg="API version should be at least 14") + + rundeck = RundeckProjectManager(module) + if module.params['state'] == 'present': + rundeck.create_or_update_project() + elif module.params['state'] == 'absent': + rundeck.remove_project() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/runit.py b/plugins/modules/runit.py deleted file mode 120000 index a65361a111..0000000000 --- a/plugins/modules/runit.py +++ /dev/null @@ -1 +0,0 @@ -./system/runit.py \ No newline at end of file diff --git a/plugins/modules/runit.py b/plugins/modules/runit.py new file mode 100644 index 0000000000..d5acba36d3 --- /dev/null +++ b/plugins/modules/runit.py @@ -0,0 +1,257 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Brian Coca +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: runit +author: + - James Sumners (@jsumners) +short_description: Manage runit services +description: + - Controls runit services on remote hosts using the sv utility. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the service to manage. + type: str + required: true + state: + description: + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - V(restarted) always bounces the service (sv restart) and V(killed) always bounces the service (sv force-stop). + - V(reloaded) always sends a HUP (sv reload). + - V(once) runs a normally downed sv once (sv once), not really an idempotent operation. + type: str + choices: [killed, once, reloaded, restarted, started, stopped] + enabled: + description: + - Whether the service is enabled or not, if disabled it also implies stopped. + type: bool + service_dir: + description: + - Directory runsv watches for services. + type: str + default: /var/service + service_src: + description: + - Directory where services are defined, the source of symlinks to O(service_dir). + type: str + default: /etc/sv +""" + +EXAMPLES = r""" +- name: Start sv dnscache, if not running + community.general.runit: + name: dnscache + state: started + +- name: Stop sv dnscache, if running + community.general.runit: + name: dnscache + state: stopped + +- name: Kill sv dnscache, in all cases + community.general.runit: + name: dnscache + state: killed + +- name: Restart sv dnscache, in all cases + community.general.runit: + name: dnscache + state: restarted + +- name: Reload sv dnscache, in all cases + community.general.runit: + name: dnscache + state: reloaded + +- name: Use alternative sv directory location + community.general.runit: + name: dnscache + state: reloaded + service_dir: /run/service +""" + +import os +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +class Sv(object): + """ + Main class that handles daemontools, can be subclassed and overridden in case + we want to use a 'derivative' like encore, s6, etc + """ + + def __init__(self, module): + self.extra_paths = [] + self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state'] + + self.module = module + + self.name = module.params['name'] + self.service_dir = module.params['service_dir'] + self.service_src = module.params['service_src'] + self.enabled = None + self.full_state = None + self.state = None + self.pid = None + self.duration = None + + self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths, required=True) + self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths) + self.svc_full = '/'.join([self.service_dir, self.name]) + self.src_full = '/'.join([self.service_src, self.name]) + + self.enabled = os.path.lexists(self.svc_full) + if self.enabled: + self.get_status() + else: + self.state = 'stopped' + + def enable(self): + if os.path.exists(self.src_full): + try: + os.symlink(self.src_full, self.svc_full) + except OSError as e: + self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e)) + else: + self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full) + + def disable(self): + self.execute_command([self.svc_cmd, 'force-stop', self.src_full]) + try: + os.unlink(self.svc_full) + except OSError as e: + self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e)) + + def get_status(self): + (rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full]) + + if err is not None and err: + self.full_state = self.state = err + else: + self.full_state = out + # full_state *may* contain information about the logger: + # "down: /etc/service/service-without-logger: 1s, normally up\n" + # "down: /etc/service/updater: 127s, normally up; run: log: (pid 364) 263439s\n" + full_state_no_logger = self.full_state.split("; ")[0] + + m = re.search(r'\(pid (\d+)\)', full_state_no_logger) + if m: + self.pid = m.group(1) + + m = re.search(r' (\d+)s', full_state_no_logger) + if m: + self.duration = m.group(1) + + if re.search(r'^run:', full_state_no_logger): + self.state = 'started' + elif re.search(r'^down:', full_state_no_logger): + self.state = 'stopped' + else: + self.state = 'unknown' + return + + def started(self): + return self.start() + + def start(self): + return self.execute_command([self.svc_cmd, 'start', self.svc_full]) + + def stopped(self): + return self.stop() + + def stop(self): + return self.execute_command([self.svc_cmd, 'stop', self.svc_full]) + + def once(self): + return self.execute_command([self.svc_cmd, 'once', self.svc_full]) + + def reloaded(self): + return self.reload() + + def reload(self): + return self.execute_command([self.svc_cmd, 'reload', self.svc_full]) + + def restarted(self): + return self.restart() + + def restart(self): + return self.execute_command([self.svc_cmd, 'restart', self.svc_full]) + + def killed(self): + return self.kill() + + def kill(self): + return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full]) + + def execute_command(self, cmd): + try: + (rc, out, err) = self.module.run_command(cmd) + except Exception as e: + self.module.fail_json(msg="failed to execute: %s" % to_native(e)) + return rc, out, err + + def report(self): + self.get_status() + states = {} + for k in self.report_vars: + states[k] = self.__dict__[k] + return states + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']), + enabled=dict(type='bool'), + service_dir=dict(type='str', default='/var/service'), + service_src=dict(type='str', default='/etc/sv'), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + state = module.params['state'] + enabled = module.params['enabled'] + + sv = Sv(module) + changed = False + + if enabled is not None and enabled != sv.enabled: + changed = True + if not module.check_mode: + try: + if enabled: + sv.enable() + else: + sv.disable() + except (OSError, IOError) as e: + module.fail_json(msg="Could not change service link: %s" % to_native(e)) + + if state is not None and state != sv.state: + changed = True + if not module.check_mode: + getattr(sv, state)() + + module.exit_json(changed=changed, sv=sv.report()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sap_task_list_execute.py b/plugins/modules/sap_task_list_execute.py deleted file mode 120000 index c27ac0a6ca..0000000000 --- a/plugins/modules/sap_task_list_execute.py +++ /dev/null @@ -1 +0,0 @@ -system/sap_task_list_execute.py \ No newline at end of file diff --git a/plugins/modules/sapcar_extract.py b/plugins/modules/sapcar_extract.py deleted file mode 120000 index 7bb47b10c1..0000000000 --- a/plugins/modules/sapcar_extract.py +++ /dev/null @@ -1 +0,0 @@ -./files/sapcar_extract.py \ No newline at end of file diff --git a/plugins/modules/say.py b/plugins/modules/say.py deleted file mode 120000 index 8ee07ee726..0000000000 --- a/plugins/modules/say.py +++ /dev/null @@ -1 +0,0 @@ -./notification/say.py \ No newline at end of file diff --git a/plugins/modules/say.py b/plugins/modules/say.py new file mode 100644 index 0000000000..84dc65a840 --- /dev/null +++ b/plugins/modules/say.py @@ -0,0 +1,97 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: say +short_description: Makes a computer to speak +description: + - Makes a computer speak! Amuse your friends, annoy your coworkers! +notes: + - In 2.5, this module has been renamed from C(osx_say) to M(community.general.say). + - If you like this module, you may also be interested in the osx_say callback plugin. + - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on + a Linux host. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + msg: + type: str + description: + - What to say. + required: true + voice: + type: str + description: + - What voice to use. + required: false +requirements: [say or espeak or espeak-ng] +author: + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" +""" + +EXAMPLES = r""" +- name: Makes a computer to speak + community.general.say: + msg: '{{ inventory_hostname }} is all done' + voice: Zarvox + delegate_to: localhost +""" +import platform + +from ansible.module_utils.basic import AnsibleModule + + +def say(module, executable, msg, voice): + cmd = [executable, msg] + if voice: + cmd.extend(('-v', voice)) + module.run_command(cmd, check_rc=True) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + msg=dict(required=True), + voice=dict(), + ), + supports_check_mode=True + ) + + msg = module.params['msg'] + voice = module.params['voice'] + possibles = ('say', 'espeak', 'espeak-ng') + + if platform.system() != 'Darwin': + # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter + voice = None + + for possible in possibles: + executable = module.get_bin_path(possible) + if executable: + break + else: + module.fail_json(msg='Unable to find either %s' % ', '.join(possibles)) + + if module.check_mode: + module.exit_json(msg=msg, changed=False) + + say(module, executable, msg, voice) + + module.exit_json(msg=msg, changed=True) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_compute.py b/plugins/modules/scaleway_compute.py deleted file mode 120000 index 9278c1dcdc..0000000000 --- a/plugins/modules/scaleway_compute.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_compute.py \ No newline at end of file diff --git a/plugins/modules/scaleway_compute.py b/plugins/modules/scaleway_compute.py new file mode 100644 index 0000000000..4cc23f9571 --- /dev/null +++ b/plugins/modules/scaleway_compute.py @@ -0,0 +1,705 @@ +#!/usr/bin/python +# +# Scaleway Compute management module +# +# Copyright (C) 2018 Online SAS. +# https://www.scaleway.com +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_compute +short_description: Scaleway compute management module +author: Remy Leone (@remyleone) +description: + - This module manages compute instances on Scaleway. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + + public_ip: + type: str + description: + - Manage public IP on a Scaleway server. + - Could be Scaleway IP address UUID. + - V(dynamic) Means that IP is destroyed at the same time the host is destroyed. + - V(absent) Means no public IP at all. + default: absent + + enable_ipv6: + description: + - Enable public IPv6 connectivity on the instance. + default: false + type: bool + + image: + type: str + description: + - Image identifier used to start the instance with. + required: true + + name: + type: str + description: + - Name of the instance. + organization: + type: str + description: + - Organization identifier. + - Exactly one of O(project) and O(organization) must be specified. + project: + type: str + description: + - Project identifier. + - Exactly one of O(project) and O(organization) must be specified. + version_added: 4.3.0 + + state: + type: str + description: + - Indicate desired state of the instance. + default: present + choices: + - present + - absent + - running + - restarted + - stopped + + tags: + type: list + elements: str + description: + - List of tags to apply to the instance (5 max). + required: false + default: [] + + region: + type: str + description: + - Scaleway compute zone. + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 + + commercial_type: + type: str + description: + - Commercial name of the compute node. + required: true + + wait: + description: + - Wait for the instance to reach its desired state before returning. + type: bool + default: false + + wait_timeout: + type: int + description: + - Time to wait for the server to reach the expected state. + required: false + default: 300 + + wait_sleep_time: + type: int + description: + - Time to wait before every attempt to check the state of the server. + required: false + default: 3 + + security_group: + type: str + description: + - Security group unique identifier. + - If no value provided, the default security group or current security group is used. + required: false +""" + +EXAMPLES = r""" +- name: Create a server + community.general.scaleway_compute: + name: foobar + state: present + image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe + project: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: ams1 + commercial_type: VC1S + tags: + - test + - www + +- name: Create a server attached to a security group + community.general.scaleway_compute: + name: foobar + state: present + image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe + project: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: ams1 + commercial_type: VC1S + security_group: 4a31b633-118e-4900-bd52-facf1085fc8d + tags: + - test + - www + +- name: Destroy it right after + community.general.scaleway_compute: + name: foobar + state: absent + image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe + project: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: ams1 + commercial_type: VC1S +""" + +RETURN = r""" +""" + +import datetime +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.datetime import now +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway + +SCALEWAY_SERVER_STATES = ( + 'stopped', + 'stopping', + 'starting', + 'running', + 'locked' +) + +SCALEWAY_TRANSITIONS_STATES = ( + "stopping", + "starting", + "pending" +) + + +def check_image_id(compute_api, image_id): + response = compute_api.get(path="images/%s" % image_id) + + if not response.ok: + msg = 'Error in getting image %s on %s : %s' % (image_id, compute_api.module.params.get('api_url'), response.json) + compute_api.module.fail_json(msg=msg) + + +def fetch_state(compute_api, server): + compute_api.module.debug("fetch_state of server: %s" % server["id"]) + response = compute_api.get(path="servers/%s" % server["id"]) + + if response.status_code == 404: + return "absent" + + if not response.ok: + msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + try: + compute_api.module.debug("Server %s in state: %s" % (server["id"], response.json["server"]["state"])) + return response.json["server"]["state"] + except KeyError: + compute_api.module.fail_json(msg="Could not fetch state in %s" % response.json) + + +def wait_to_complete_state_transition(compute_api, server, wait=None): + if wait is None: + wait = compute_api.module.params["wait"] + if not wait: + return + + wait_timeout = compute_api.module.params["wait_timeout"] + wait_sleep_time = compute_api.module.params["wait_sleep_time"] + + start = now() + end = start + datetime.timedelta(seconds=wait_timeout) + while now() < end: + compute_api.module.debug("We are going to wait for the server to finish its transition") + if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES: + compute_api.module.debug("It seems that the server is not in transition anymore.") + compute_api.module.debug("Server in state: %s" % fetch_state(compute_api, server)) + break + time.sleep(wait_sleep_time) + else: + compute_api.module.fail_json(msg="Server takes too long to finish its transition") + + +def public_ip_payload(compute_api, public_ip): + # We don't want a public ip + if public_ip in ("absent",): + return {"dynamic_ip_required": False} + + # IP is only attached to the instance and is released as soon as the instance terminates + if public_ip in ("dynamic", "allocated"): + return {"dynamic_ip_required": True} + + # We check that the IP we want to attach exists, if so its ID is returned + response = compute_api.get("ips") + if not response.ok: + msg = 'Error during public IP validation: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + ip_list = [] + try: + ip_list = response.json["ips"] + except KeyError: + compute_api.module.fail_json(msg="Error in getting the IP information from: %s" % response.json) + + lookup = [ip["id"] for ip in ip_list] + if public_ip in lookup: + return {"public_ip": public_ip} + + +def create_server(compute_api, server): + compute_api.module.debug("Starting a create_server") + target_server = None + data = {"enable_ipv6": server["enable_ipv6"], + "tags": server["tags"], + "commercial_type": server["commercial_type"], + "image": server["image"], + "dynamic_ip_required": server["dynamic_ip_required"], + "name": server["name"] + } + + if server["project"]: + data["project"] = server["project"] + + if server["organization"]: + data["organization"] = server["organization"] + + if server["security_group"]: + data["security_group"] = server["security_group"] + + response = compute_api.post(path="servers", data=data) + + if not response.ok: + msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + try: + target_server = response.json["server"] + except KeyError: + compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json) + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + + return target_server + + +def restart_server(compute_api, server): + return perform_action(compute_api=compute_api, server=server, action="reboot") + + +def stop_server(compute_api, server): + return perform_action(compute_api=compute_api, server=server, action="poweroff") + + +def start_server(compute_api, server): + return perform_action(compute_api=compute_api, server=server, action="poweron") + + +def perform_action(compute_api, server, action): + response = compute_api.post(path="servers/%s/action" % server["id"], + data={"action": action}) + if not response.ok: + msg = 'Error during server %s: (%s) %s' % (action, response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + wait_to_complete_state_transition(compute_api=compute_api, server=server) + + return response + + +def remove_server(compute_api, server): + compute_api.module.debug("Starting remove server strategy") + response = compute_api.delete(path="servers/%s" % server["id"]) + if not response.ok: + msg = 'Error during server deletion: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + wait_to_complete_state_transition(compute_api=compute_api, server=server) + + return response + + +def present_strategy(compute_api, wished_server): + compute_api.module.debug("Starting present strategy") + changed = False + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + if not query_results: + changed = True + if compute_api.module.check_mode: + return changed, {"status": "A server would be created."} + + target_server = create_server(compute_api=compute_api, server=wished_server) + else: + target_server = query_results[0] + + if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, + wished_server=wished_server): + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s attributes would be changed." % target_server["id"]} + + target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + + return changed, target_server + + +def absent_strategy(compute_api, wished_server): + compute_api.module.debug("Starting absent strategy") + changed = False + target_server = None + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + if not query_results: + return changed, {"status": "Server already absent."} + else: + target_server = query_results[0] + + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s would be made absent." % target_server["id"]} + + # A server MUST be stopped to be deleted. + while fetch_state(compute_api=compute_api, server=target_server) != "stopped": + wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True) + response = stop_server(compute_api=compute_api, server=target_server) + + if not response.ok: + err_msg = 'Error while stopping a server before removing it [{0}: {1}]'.format(response.status_code, + response.json) + compute_api.module.fail_json(msg=err_msg) + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True) + + response = remove_server(compute_api=compute_api, server=target_server) + + if not response.ok: + err_msg = 'Error while removing server [{0}: {1}]'.format(response.status_code, response.json) + compute_api.module.fail_json(msg=err_msg) + + return changed, {"status": "Server %s deleted" % target_server["id"]} + + +def running_strategy(compute_api, wished_server): + compute_api.module.debug("Starting running strategy") + changed = False + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + if not query_results: + changed = True + if compute_api.module.check_mode: + return changed, {"status": "A server would be created before being run."} + + target_server = create_server(compute_api=compute_api, server=wished_server) + else: + target_server = query_results[0] + + if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, + wished_server=wished_server): + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]} + + target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + + current_state = fetch_state(compute_api=compute_api, server=target_server) + if current_state not in ("running", "starting"): + compute_api.module.debug("running_strategy: Server in state: %s" % current_state) + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s attributes would be changed." % target_server["id"]} + + response = start_server(compute_api=compute_api, server=target_server) + if not response.ok: + msg = 'Error while running server [{0}: {1}]'.format(response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + return changed, target_server + + +def stop_strategy(compute_api, wished_server): + compute_api.module.debug("Starting stop strategy") + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + changed = False + + if not query_results: + + if compute_api.module.check_mode: + return changed, {"status": "A server would be created before being stopped."} + + target_server = create_server(compute_api=compute_api, server=wished_server) + changed = True + else: + target_server = query_results[0] + + compute_api.module.debug("stop_strategy: Servers are found.") + + if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, + wished_server=wished_server): + changed = True + + if compute_api.module.check_mode: + return changed, { + "status": "Server %s attributes would be changed before stopping it." % target_server["id"]} + + target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + + current_state = fetch_state(compute_api=compute_api, server=target_server) + if current_state not in ("stopped",): + compute_api.module.debug("stop_strategy: Server in state: %s" % current_state) + + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s would be stopped." % target_server["id"]} + + response = stop_server(compute_api=compute_api, server=target_server) + compute_api.module.debug(response.json) + compute_api.module.debug(response.ok) + + if not response.ok: + msg = 'Error while stopping server [{0}: {1}]'.format(response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + return changed, target_server + + +def restart_strategy(compute_api, wished_server): + compute_api.module.debug("Starting restart strategy") + changed = False + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + if not query_results: + changed = True + if compute_api.module.check_mode: + return changed, {"status": "A server would be created before being rebooted."} + + target_server = create_server(compute_api=compute_api, server=wished_server) + else: + target_server = query_results[0] + + if server_attributes_should_be_changed(compute_api=compute_api, + target_server=target_server, + wished_server=wished_server): + changed = True + + if compute_api.module.check_mode: + return changed, { + "status": "Server %s attributes would be changed before rebooting it." % target_server["id"]} + + target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + + changed = True + if compute_api.module.check_mode: + return changed, {"status": "Server %s would be rebooted." % target_server["id"]} + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + + if fetch_state(compute_api=compute_api, server=target_server) in ("running",): + response = restart_server(compute_api=compute_api, server=target_server) + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + if not response.ok: + msg = 'Error while restarting server that was running [{0}: {1}].'.format(response.status_code, + response.json) + compute_api.module.fail_json(msg=msg) + + if fetch_state(compute_api=compute_api, server=target_server) in ("stopped",): + response = restart_server(compute_api=compute_api, server=target_server) + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + if not response.ok: + msg = 'Error while restarting server that was stopped [{0}: {1}].'.format(response.status_code, + response.json) + compute_api.module.fail_json(msg=msg) + + return changed, target_server + + +state_strategy = { + "present": present_strategy, + "restarted": restart_strategy, + "stopped": stop_strategy, + "running": running_strategy, + "absent": absent_strategy +} + + +def find(compute_api, wished_server, per_page=1): + compute_api.module.debug("Getting inside find") + # Only the name attribute is accepted in the Compute query API + response = compute_api.get("servers", params={"name": wished_server["name"], + "per_page": per_page}) + + if not response.ok: + msg = 'Error during server search: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + search_results = response.json["servers"] + + return search_results + + +PATCH_MUTABLE_SERVER_ATTRIBUTES = ( + "ipv6", + "tags", + "name", + "dynamic_ip_required", + "security_group", +) + + +def server_attributes_should_be_changed(compute_api, target_server, wished_server): + compute_api.module.debug("Checking if server attributes should be changed") + compute_api.module.debug("Current Server: %s" % target_server) + compute_api.module.debug("Wished Server: %s" % wished_server) + debug_dict = { + x: (target_server[x], wished_server[x]) + for x in PATCH_MUTABLE_SERVER_ATTRIBUTES + if x in target_server and x in wished_server + } + compute_api.module.debug("Debug dict %s" % debug_dict) + try: + for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: + if key in target_server and key in wished_server: + # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook + if isinstance(target_server[key], dict) and wished_server[key] and "id" in target_server[key].keys( + ) and target_server[key]["id"] != wished_server[key]: + return True + # Handling other structure compare simply the two objects content + elif not isinstance(target_server[key], dict) and target_server[key] != wished_server[key]: + return True + return False + except AttributeError: + compute_api.module.fail_json(msg="Error while checking if attributes should be changed") + + +def server_change_attributes(compute_api, target_server, wished_server): + compute_api.module.debug("Starting patching server attributes") + patch_payload = dict() + + for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: + if key in target_server and key in wished_server: + # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook + if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]: + # Setting all key to current value except ID + key_dict = {x: target_server[key][x] for x in target_server[key].keys() if x != "id"} + # Setting ID to the user specified ID + key_dict["id"] = wished_server[key] + patch_payload[key] = key_dict + elif not isinstance(target_server[key], dict): + patch_payload[key] = wished_server[key] + + response = compute_api.patch(path="servers/%s" % target_server["id"], + data=patch_payload) + if not response.ok: + msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + try: + target_server = response.json["server"] + except KeyError: + compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json) + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + + return target_server + + +def core(module): + region = module.params["region"] + wished_server = { + "state": module.params["state"], + "image": module.params["image"], + "name": module.params["name"], + "commercial_type": module.params["commercial_type"], + "enable_ipv6": module.params["enable_ipv6"], + "tags": module.params["tags"], + "organization": module.params["organization"], + "project": module.params["project"], + "security_group": module.params["security_group"] + } + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + compute_api = Scaleway(module=module) + + check_image_id(compute_api, wished_server["image"]) + + # IP parameters of the wished server depends on the configuration + ip_payload = public_ip_payload(compute_api=compute_api, public_ip=module.params["public_ip"]) + wished_server.update(ip_payload) + + changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server) + module.exit_json(changed=changed, msg=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + image=dict(required=True), + name=dict(), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + commercial_type=dict(required=True), + enable_ipv6=dict(default=False, type="bool"), + public_ip=dict(default="absent"), + state=dict(choices=list(state_strategy.keys()), default='present'), + tags=dict(type="list", elements="str", default=[]), + organization=dict(), + project=dict(), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + wait_sleep_time=dict(type="int", default=3), + security_group=dict(), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ('organization', 'project'), + ], + required_one_of=[ + ('organization', 'project'), + ], + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_compute_private_network.py b/plugins/modules/scaleway_compute_private_network.py new file mode 100644 index 0000000000..33be950f22 --- /dev/null +++ b/plugins/modules/scaleway_compute_private_network.py @@ -0,0 +1,220 @@ +#!/usr/bin/python +# +# Scaleway VPC management module +# +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_compute_private_network +short_description: Scaleway compute - private network management +version_added: 5.2.0 +author: Pascal MANGIN (@pastral) +description: + - This module add or remove a private network to a compute instance (U(https://developer.scaleway.com)). +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the VPC. + default: present + choices: + - present + - absent + + project: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example V(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 + + compute_id: + type: str + description: + - ID of the compute instance (see M(community.general.scaleway_compute)). + required: true + + private_network_id: + type: str + description: + - ID of the private network (see M(community.general.scaleway_private_network)). + required: true +""" + +EXAMPLES = r""" +- name: Plug a VM to a private network + community.general.scaleway_compute_private_network: + project: '{{ scw_project }}' + state: present + region: par1 + compute_id: "12345678-f1e6-40ec-83e5-12345d67ed89" + private_network_id: "22345678-f1e6-40ec-83e5-12345d67ed89" + register: nicsvpc_creation_task + +- name: Unplug a VM from a private network + community.general.scaleway_compute_private_network: + project: '{{ scw_project }}' + state: absent + region: par1 + compute_id: "12345678-f1e6-40ec-83e5-12345d67ed89" + private_network_id: "22345678-f1e6-40ec-83e5-12345d67ed89" +""" + +RETURN = r""" +scaleway_compute_private_network: + description: Information on the VPC. + returned: success when O(state=present) + type: dict + sample: + { + "created_at": "2022-01-15T11:11:12.676445Z", + "id": "12345678-f1e6-40ec-83e5-12345d67ed89", + "name": "network", + "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "tags": [ + "tag1", + "tag2", + "tag3", + "tag4", + "tag5" + ], + "updated_at": "2022-01-15T11:12:04.624837Z", + "zone": "fr-par-2" + } +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible.module_utils.basic import AnsibleModule + + +def get_nics_info(api, compute_id, private_network_id): + + response = api.get('servers/' + compute_id + '/private_nics') + if not response.ok: + msg = "Error during get servers information: %s: '%s' (%s)" % (response.info['msg'], response.json['message'], response.json) + api.module.fail_json(msg=msg) + + i = 0 + list_nics = response.json['private_nics'] + + while i < len(list_nics): + if list_nics[i]['private_network_id'] == private_network_id: + return list_nics[i] + i += 1 + + return None + + +def present_strategy(api, compute_id, private_network_id): + + changed = False + nic = get_nics_info(api, compute_id, private_network_id) + if nic is not None: + return changed, nic + + data = {"private_network_id": private_network_id} + changed = True + if api.module.check_mode: + return changed, {"status": "a private network would be add to a server"} + + response = api.post(path='servers/' + compute_id + '/private_nics', data=data) + + if not response.ok: + api.module.fail_json(msg='Error when adding a private network to a server [{0}: {1}]'.format(response.status_code, response.json)) + + return changed, response.json + + +def absent_strategy(api, compute_id, private_network_id): + + changed = False + nic = get_nics_info(api, compute_id, private_network_id) + if nic is None: + return changed, {} + + changed = True + if api.module.check_mode: + return changed, {"status": "private network would be destroyed"} + + response = api.delete('servers/' + compute_id + '/private_nics/' + nic['id']) + + if not response.ok: + api.module.fail_json(msg='Error deleting private network from server [{0}: {1}]'.format( + response.status_code, response.json)) + + return changed, response.json + + +def core(module): + + compute_id = module.params['compute_id'] + pn_id = module.params['private_network_id'] + + region = module.params["region"] + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + api = Scaleway(module=module) + if module.params["state"] == "absent": + changed, summary = absent_strategy(api=api, compute_id=compute_id, private_network_id=pn_id) + else: + changed, summary = present_strategy(api=api, compute_id=compute_id, private_network_id=pn_id) + module.exit_json(changed=changed, scaleway_compute_private_network=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present']), + project=dict(required=True), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + compute_id=dict(required=True), + private_network_id=dict(required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_container.py b/plugins/modules/scaleway_container.py new file mode 100644 index 0000000000..2281c555c1 --- /dev/null +++ b/plugins/modules/scaleway_container.py @@ -0,0 +1,424 @@ +#!/usr/bin/python +# +# Scaleway Serverless container management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_container +short_description: Scaleway Container management +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages container on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +requirements: + - passlib[argon2] >= 1.7.4 + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the container. + default: present + choices: + - present + - absent + + namespace_id: + type: str + description: + - Container namespace identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example V(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container namespace. + required: true + + description: + description: + - Description of the container namespace. + type: str + default: '' + + min_scale: + description: + - Minimum number of replicas for the container. + type: int + + max_scale: + description: + - Maximum number of replicas for the container. + type: int + + environment_variables: + description: + - Environment variables of the container namespace. + - Injected in container at runtime. + type: dict + default: {} + + secret_environment_variables: + description: + - Secret environment variables of the container namespace. + - Updating those values does not output a C(changed) state in Ansible. + - Injected in container at runtime. + type: dict + default: {} + + cpu_limit: + description: + - Resources define performance characteristics of your container. + - They are allocated to your container at runtime. + - Unit is 1/1000 of a VCPU. + type: int + version_added: 11.3.0 + + memory_limit: + description: + - Resources define performance characteristics of your container. + - They are allocated to your container at runtime. + - Unit is MB of memory. + type: int + + container_timeout: + description: + - The length of time your handler can spend processing a request before being stopped. + type: str + + privacy: + description: + - Privacy policies define whether a container can be executed anonymously. + - Choose V(public) to enable anonymous execution, or V(private) to protect your container with an authentication mechanism + provided by the Scaleway API. + type: str + default: public + choices: + - public + - private + + registry_image: + description: + - The name of image used for the container. + type: str + required: true + + max_concurrency: + description: + - Maximum number of connections per container. + - This parameter is used to trigger autoscaling. + type: int + + protocol: + description: + - Communication protocol of the container. + type: str + default: http1 + choices: + - http1 + - h2c + + port: + description: + - Listen port used to expose the container. + type: int + + redeploy: + description: + - Redeploy the container if update is required. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Create a container + community.general.scaleway_container: + namespace_id: '{{ scw_container_namespace }}' + state: present + region: fr-par + name: my-awesome-container + registry_image: rg.fr-par.scw.cloud/funcscwtestrgy2f9zw/nginx:latest + environment_variables: + MY_VAR: my_value + secret_environment_variables: + MY_SECRET_VAR: my_secret_value + register: container_creation_task + +- name: Make sure container is deleted + community.general.scaleway_container: + namespace_id: '{{ scw_container_namespace }}' + state: absent + region: fr-par + name: my-awesome-container +""" + +RETURN = r""" +container: + description: The container information. + returned: when O(state=present) + type: dict + sample: + cpu_limit: 140 + description: Container used for testing scaleway_container ansible module + domain_name: cnansibletestgfogtjod-cn-ansible-test.functions.fnc.fr-par.scw.cloud + environment_variables: + MY_VAR: my_value + error_message: null + http_option: "" + id: c9070eb0-d7a4-48dd-9af3-4fb139890721 + max_concurrency: 50 + max_scale: 5 + memory_limit: 256 + min_scale: 0 + name: cn-ansible-test + namespace_id: 75e299f1-d1e5-4e6b-bc6e-4fb51cfe1e69 + port: 80 + privacy: public + protocol: http1 + region: fr-par + registry_image: rg.fr-par.scw.cloud/namespace-ansible-ci/nginx:latest + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: created + timeout: 300s +""" + +from copy import deepcopy + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed, + SecretVariables +) +from ansible.module_utils.basic import AnsibleModule + +STABLE_STATES = ( + "ready", + "created", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "description", + "min_scale", + "max_scale", + "environment_variables", + "cpu_limit", + "memory_limit", + "timeout", + "privacy", + "registry_image", + "max_concurrency", + "protocol", + "port", + "secret_environment_variables" +) + + +def payload_from_wished_cn(wished_cn): + payload = { + "namespace_id": wished_cn["namespace_id"], + "name": wished_cn["name"], + "description": wished_cn["description"], + "min_scale": wished_cn["min_scale"], + "max_scale": wished_cn["max_scale"], + "environment_variables": wished_cn["environment_variables"], + "secret_environment_variables": SecretVariables.dict_to_list(wished_cn["secret_environment_variables"]), + "cpu_limit": wished_cn["cpu_limit"], + "memory_limit": wished_cn["memory_limit"], + "timeout": wished_cn["timeout"], + "privacy": wished_cn["privacy"], + "registry_image": wished_cn["registry_image"], + "max_concurrency": wished_cn["max_concurrency"], + "protocol": wished_cn["protocol"], + "port": wished_cn["port"], + "redeploy": wished_cn["redeploy"] + } + + return payload + + +def absent_strategy(api, wished_cn): + changed = False + + cn_list = api.fetch_all_resources("containers") + cn_lookup = {cn["name"]: cn for cn in cn_list} + + if wished_cn["name"] not in cn_lookup: + return changed, {} + + target_cn = cn_lookup[wished_cn["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Container would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting container [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_cn): + changed = False + + cn_list = api.fetch_all_resources("containers") + cn_lookup = {cn["name"]: cn for cn in cn_list} + + payload_cn = payload_from_wished_cn(wished_cn) + + if wished_cn["name"] not in cn_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A container would be created."} + + # Creation doesn't support `redeploy` parameter + del payload_cn["redeploy"] + + # Create container + api.warn(payload_cn) + creation_response = api.post(path=api.api_path, + data=payload_cn) + + if not creation_response.ok: + msg = "Error during container creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_cn = cn_lookup[wished_cn["name"]] + decoded_target_cn = deepcopy(target_cn) + decoded_target_cn["secret_environment_variables"] = SecretVariables.decode(decoded_target_cn["secret_environment_variables"], + payload_cn["secret_environment_variables"]) + patch_payload = resource_attributes_should_be_changed(target=decoded_target_cn, + wished=payload_cn, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_cn + + changed = True + if api.module.check_mode: + return changed, {"status": "Container attributes would be changed."} + + cn_patch_response = api.patch(path=api.api_path + "/%s" % target_cn["id"], + data=patch_payload) + + if not cn_patch_response.ok: + api.module.fail_json(msg='Error during container attributes update: [{0}: {1}]'.format( + cn_patch_response.status_code, cn_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + SecretVariables.ensure_scaleway_secret_package(module) + + region = module.params["region"] + wished_container = { + "state": module.params["state"], + "namespace_id": module.params["namespace_id"], + "name": module.params["name"], + "description": module.params['description'], + "min_scale": module.params["min_scale"], + "max_scale": module.params["max_scale"], + "environment_variables": module.params['environment_variables'], + "secret_environment_variables": module.params['secret_environment_variables'], + "cpu_limit": module.params["cpu_limit"], + "memory_limit": module.params["memory_limit"], + "timeout": module.params["container_timeout"], + "privacy": module.params["privacy"], + "registry_image": module.params["registry_image"], + "max_concurrency": module.params["max_concurrency"], + "protocol": module.params["protocol"], + "port": module.params["port"], + "redeploy": module.params["redeploy"] + } + + api = Scaleway(module=module) + api.api_path = "containers/v1beta1/regions/%s/containers" % region + + changed, summary = state_strategy[wished_container["state"]](api=api, wished_cn=wished_container) + + module.exit_json(changed=changed, container=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + namespace_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + min_scale=dict(type='int'), + max_scale=dict(type='int'), + cpu_limit=dict(type='int'), + memory_limit=dict(type='int'), + container_timeout=dict(type='str'), + privacy=dict(type='str', default='public', choices=['public', 'private']), + registry_image=dict(type='str', required=True), + max_concurrency=dict(type='int'), + protocol=dict(type='str', default='http1', choices=['http1', 'h2c']), + port=dict(type='int'), + redeploy=dict(type='bool', default=False), + environment_variables=dict(type='dict', default={}), + secret_environment_variables=dict(type='dict', default={}, no_log=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_container_info.py b/plugins/modules/scaleway_container_info.py new file mode 100644 index 0000000000..fd729bf336 --- /dev/null +++ b/plugins/modules/scaleway_container_info.py @@ -0,0 +1,153 @@ +#!/usr/bin/python +# +# Scaleway Serverless container info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_container_info +short_description: Retrieve information on Scaleway Container +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a container on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + namespace_id: + type: str + description: + - Container namespace identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container. + required: true +""" + +EXAMPLES = r""" +- name: Get a container info + community.general.scaleway_container_info: + namespace_id: '{{ scw_container_namespace }}' + region: fr-par + name: my-awesome-container + register: container_info_task +""" + +RETURN = r""" +container: + description: The container information. + returned: always + type: dict + sample: + cpu_limit: 140 + description: Container used for testing scaleway_container ansible module + domain_name: cnansibletestgfogtjod-cn-ansible-test.functions.fnc.fr-par.scw.cloud + environment_variables: + MY_VAR: my_value + error_message: null + http_option: "" + id: c9070eb0-d7a4-48dd-9af3-4fb139890721 + max_concurrency: 50 + max_scale: 5 + memory_limit: 256 + min_scale: 0 + name: cn-ansible-test + namespace_id: 75e299f1-d1e5-4e6b-bc6e-4fb51cfe1e69 + port: 80 + privacy: public + protocol: http1 + region: fr-par + registry_image: rg.fr-par.scw.cloud/namespace-ansible-ci/nginx:latest + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: created + timeout: 300s +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_cn): + cn_list = api.fetch_all_resources("containers") + cn_lookup = {cn["name"]: cn for cn in cn_list} + + if wished_cn["name"] not in cn_lookup: + msg = "Error during container lookup: Unable to find container named '%s' in namespace '%s'" % (wished_cn["name"], + wished_cn["namespace_id"]) + + api.module.fail_json(msg=msg) + + target_cn = cn_lookup[wished_cn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + msg = "Error during container lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_container = { + "namespace_id": module.params["namespace_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "containers/v1beta1/regions/%s/containers" % region + + summary = info_strategy(api=api, wished_cn=wished_container) + + module.exit_json(changed=False, container=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + namespace_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_container_namespace.py b/plugins/modules/scaleway_container_namespace.py new file mode 100644 index 0000000000..2d76f75315 --- /dev/null +++ b/plugins/modules/scaleway_container_namespace.py @@ -0,0 +1,294 @@ +#!/usr/bin/python +# +# Scaleway Serverless container namespace management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_container_namespace +short_description: Scaleway Container namespace management +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages container namespaces on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +requirements: + - passlib[argon2] >= 1.7.4 + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the container namespace. + default: present + choices: + - present + - absent + + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example V(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container namespace. + required: true + + description: + description: + - Description of the container namespace. + type: str + default: '' + + environment_variables: + description: + - Environment variables of the container namespace. + - Injected in containers at runtime. + type: dict + default: {} + + secret_environment_variables: + description: + - Secret environment variables of the container namespace. + - Updating those values does not output a C(changed) state in Ansible. + - Injected in containers at runtime. + type: dict + default: {} +""" + +EXAMPLES = r""" +- name: Create a container namespace + community.general.scaleway_container_namespace: + project_id: '{{ scw_project }}' + state: present + region: fr-par + name: my-awesome-container-namespace + environment_variables: + MY_VAR: my_value + secret_environment_variables: + MY_SECRET_VAR: my_secret_value + register: container_namespace_creation_task + +- name: Make sure container namespace is deleted + community.general.scaleway_container_namespace: + project_id: '{{ scw_project }}' + state: absent + region: fr-par + name: my-awesome-container-namespace +""" + +RETURN = r""" +container_namespace: + description: The container namespace information. + returned: when O(state=present) + type: dict + sample: + description: "" + environment_variables: + MY_VAR: my_value + error_message: null + id: 531a1fd7-98d2-4a74-ad77-d398324304b8 + name: my-awesome-container-namespace + organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 + project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98 + region: fr-par + registry_endpoint: "" + registry_namespace_id: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: pending +""" + +from copy import deepcopy + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, + resource_attributes_should_be_changed, SecretVariables +) +from ansible.module_utils.basic import AnsibleModule + +STABLE_STATES = ( + "ready", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "description", + "environment_variables", + "secret_environment_variables" +) + + +def payload_from_wished_cn(wished_cn): + payload = { + "project_id": wished_cn["project_id"], + "name": wished_cn["name"], + "description": wished_cn["description"], + "environment_variables": wished_cn["environment_variables"], + "secret_environment_variables": SecretVariables.dict_to_list(wished_cn["secret_environment_variables"]) + } + + return payload + + +def absent_strategy(api, wished_cn): + changed = False + + cn_list = api.fetch_all_resources("namespaces") + cn_lookup = {cn["name"]: cn for cn in cn_list} + + if wished_cn["name"] not in cn_lookup: + return changed, {} + + target_cn = cn_lookup[wished_cn["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Container namespace would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting container namespace [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_cn): + changed = False + + cn_list = api.fetch_all_resources("namespaces") + cn_lookup = {cn["name"]: cn for cn in cn_list} + + payload_cn = payload_from_wished_cn(wished_cn) + + if wished_cn["name"] not in cn_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A container namespace would be created."} + + # Create container namespace + api.warn(payload_cn) + creation_response = api.post(path=api.api_path, + data=payload_cn) + + if not creation_response.ok: + msg = "Error during container namespace creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_cn = cn_lookup[wished_cn["name"]] + decoded_target_cn = deepcopy(target_cn) + decoded_target_cn["secret_environment_variables"] = SecretVariables.decode(decoded_target_cn["secret_environment_variables"], + payload_cn["secret_environment_variables"]) + patch_payload = resource_attributes_should_be_changed(target=decoded_target_cn, + wished=payload_cn, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_cn + + changed = True + if api.module.check_mode: + return changed, {"status": "Container namespace attributes would be changed."} + + cn_patch_response = api.patch(path=api.api_path + "/%s" % target_cn["id"], + data=patch_payload) + + if not cn_patch_response.ok: + api.module.fail_json(msg='Error during container namespace attributes update: [{0}: {1}]'.format( + cn_patch_response.status_code, cn_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + return changed, cn_patch_response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + SecretVariables.ensure_scaleway_secret_package(module) + + region = module.params["region"] + wished_container_namespace = { + "state": module.params["state"], + "project_id": module.params["project_id"], + "name": module.params["name"], + "description": module.params['description'], + "environment_variables": module.params['environment_variables'], + "secret_environment_variables": module.params['secret_environment_variables'] + } + + api = Scaleway(module=module) + api.api_path = "containers/v1beta1/regions/%s/namespaces" % region + + changed, summary = state_strategy[wished_container_namespace["state"]](api=api, wished_cn=wished_container_namespace) + + module.exit_json(changed=changed, container_namespace=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + environment_variables=dict(type='dict', default={}), + secret_environment_variables=dict(type='dict', default={}, no_log=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_container_namespace_info.py b/plugins/modules/scaleway_container_namespace_info.py new file mode 100644 index 0000000000..efd1b3b816 --- /dev/null +++ b/plugins/modules/scaleway_container_namespace_info.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# +# Scaleway Serverless container namespace info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_container_namespace_info +short_description: Retrieve information on Scaleway Container namespace +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a container namespace on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container namespace. + required: true +""" + +EXAMPLES = r""" +- name: Get a container namespace info + community.general.scaleway_container_namespace_info: + project_id: '{{ scw_project }}' + region: fr-par + name: my-awesome-container-namespace + register: container_namespace_info_task +""" + +RETURN = r""" +container_namespace: + description: The container namespace information. + returned: always + type: dict + sample: + description: "" + environment_variables: + MY_VAR: my_value + error_message: + id: 531a1fd7-98d2-4a74-ad77-d398324304b8 + name: my-awesome-container-namespace + organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 + project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98 + region: fr-par + registry_endpoint: "" + registry_namespace_id: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: pending +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_cn): + cn_list = api.fetch_all_resources("namespaces") + cn_lookup = {cn["name"]: cn for cn in cn_list} + + if wished_cn["name"] not in cn_lookup: + msg = "Error during container namespace lookup: Unable to find container namespace named '%s' in project '%s'" % (wished_cn["name"], + wished_cn["project_id"]) + + api.module.fail_json(msg=msg) + + target_cn = cn_lookup[wished_cn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + msg = "Error during container namespace lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_container_namespace = { + "project_id": module.params["project_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "containers/v1beta1/regions/%s/namespaces" % region + + summary = info_strategy(api=api, wished_cn=wished_container_namespace) + + module.exit_json(changed=False, container_namespace=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_container_registry.py b/plugins/modules/scaleway_container_registry.py new file mode 100644 index 0000000000..179b9b5ff7 --- /dev/null +++ b/plugins/modules/scaleway_container_registry.py @@ -0,0 +1,270 @@ +#!/usr/bin/python +# +# Scaleway Container registry management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_container_registry +short_description: Scaleway Container registry management module +version_added: 5.8.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages container registries on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the container registry. + default: present + choices: + - present + - absent + + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example V(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container registry. + required: true + + description: + description: + - Description of the container registry. + type: str + default: '' + + privacy_policy: + type: str + description: + - Default visibility policy. + - Everyone can pull images from a V(public) registry. + choices: + - public + - private + default: private +""" + +EXAMPLES = r""" +- name: Create a container registry + community.general.scaleway_container_registry: + project_id: '{{ scw_project }}' + state: present + region: fr-par + name: my-awesome-container-registry + register: container_registry_creation_task + +- name: Make sure container registry is deleted + community.general.scaleway_container_registry: + project_id: '{{ scw_project }}' + state: absent + region: fr-par + name: my-awesome-container-registry +""" + +RETURN = r""" +container_registry: + description: The container registry information. + returned: when O(state=present) + type: dict + sample: + created_at: "2022-10-14T09:51:07.949716Z" + description: Managed by Ansible + endpoint: rg.fr-par.scw.cloud/my-awesome-registry + id: 0d7d5270-7864-49c2-920b-9fd6731f3589 + image_count: 0 + is_public: false + name: my-awesome-registry + organization_id: 10697b59-5c34-4d24-8d15-9ff2d3b89f58 + project_id: 3da4f0b2-06be-4773-8ec4-5dfa435381be + region: fr-par + size: 0 + status: ready + status_message: "" + updated_at: "2022-10-14T09:51:07.949716Z" +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed +) +from ansible.module_utils.basic import AnsibleModule + +STABLE_STATES = ( + "ready", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "description", + "is_public" +) + + +def payload_from_wished_cr(wished_cr): + payload = { + "project_id": wished_cr["project_id"], + "name": wished_cr["name"], + "description": wished_cr["description"], + "is_public": wished_cr["privacy_policy"] == "public" + } + + return payload + + +def absent_strategy(api, wished_cr): + changed = False + + cr_list = api.fetch_all_resources("namespaces") + cr_lookup = {cr["name"]: cr for cr in cr_list} + + if wished_cr["name"] not in cr_lookup: + return changed, {} + + target_cr = cr_lookup[wished_cr["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Container registry would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_cr["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting container registry [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_cr): + changed = False + + cr_list = api.fetch_all_resources("namespaces") + cr_lookup = {cr["name"]: cr for cr in cr_list} + + payload_cr = payload_from_wished_cr(wished_cr) + + if wished_cr["name"] not in cr_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A container registry would be created."} + + # Create container registry + api.warn(payload_cr) + creation_response = api.post(path=api.api_path, + data=payload_cr) + + if not creation_response.ok: + msg = "Error during container registry creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_cr = cr_lookup[wished_cr["name"]] + patch_payload = resource_attributes_should_be_changed(target=target_cr, + wished=payload_cr, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_cr + + changed = True + if api.module.check_mode: + return changed, {"status": "Container registry attributes would be changed."} + + cr_patch_response = api.patch(path=api.api_path + "/%s" % target_cr["id"], + data=patch_payload) + + if not cr_patch_response.ok: + api.module.fail_json(msg='Error during container registry attributes update: [{0}: {1}]'.format( + cr_patch_response.status_code, cr_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_cr["id"]) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + region = module.params["region"] + wished_container_registry = { + "state": module.params["state"], + "project_id": module.params["project_id"], + "name": module.params["name"], + "description": module.params['description'], + "privacy_policy": module.params['privacy_policy'] + } + + api = Scaleway(module=module) + api.api_path = "registry/v1/regions/%s/namespaces" % region + + changed, summary = state_strategy[wished_container_registry["state"]](api=api, wished_cr=wished_container_registry) + + module.exit_json(changed=changed, container_registry=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + privacy_policy=dict(type='str', default='private', choices=['public', 'private']) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_container_registry_info.py b/plugins/modules/scaleway_container_registry_info.py new file mode 100644 index 0000000000..6dac97234b --- /dev/null +++ b/plugins/modules/scaleway_container_registry_info.py @@ -0,0 +1,143 @@ +#!/usr/bin/python +# +# Scaleway Serverless container registry info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_container_registry_info +short_description: Scaleway Container registry info module +version_added: 5.8.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a container registry on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container registry. + required: true +""" + +EXAMPLES = r""" +- name: Get a container registry info + community.general.scaleway_container_registry_info: + project_id: '{{ scw_project }}' + region: fr-par + name: my-awesome-container-registry + register: container_registry_info_task +""" + +RETURN = r""" +container_registry: + description: The container registry information. + returned: always + type: dict + sample: + created_at: "2022-10-14T09:51:07.949716Z" + description: Managed by Ansible + endpoint: rg.fr-par.scw.cloud/my-awesome-registry + id: 0d7d5270-7864-49c2-920b-9fd6731f3589 + image_count: 0 + is_public: false + name: my-awesome-registry + organization_id: 10697b59-5c34-4d24-8d15-9ff2d3b89f58 + project_id: 3da4f0b2-06be-4773-8ec4-5dfa435381be + region: fr-par + size: 0 + status: ready + status_message: "" + updated_at: "2022-10-14T09:51:07.949716Z" +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_cn): + cn_list = api.fetch_all_resources("namespaces") + cn_lookup = {cn["name"]: cn for cn in cn_list} + + if wished_cn["name"] not in cn_lookup: + msg = "Error during container registries lookup: Unable to find container registry named '%s' in project '%s'" % (wished_cn["name"], + wished_cn["project_id"]) + + api.module.fail_json(msg=msg) + + target_cn = cn_lookup[wished_cn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + msg = "Error during container registry lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_container_namespace = { + "project_id": module.params["project_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "registry/v1/regions/%s/namespaces" % region + + summary = info_strategy(api=api, wished_cn=wished_container_namespace) + + module.exit_json(changed=False, container_registry=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_database_backup.py b/plugins/modules/scaleway_database_backup.py deleted file mode 120000 index 6681f78c1a..0000000000 --- a/plugins/modules/scaleway_database_backup.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_database_backup.py \ No newline at end of file diff --git a/plugins/modules/scaleway_database_backup.py b/plugins/modules/scaleway_database_backup.py new file mode 100644 index 0000000000..33497c41a9 --- /dev/null +++ b/plugins/modules/scaleway_database_backup.py @@ -0,0 +1,384 @@ +#!/usr/bin/python +# +# Scaleway database backups management module +# +# Copyright (C) 2020 Guillaume Rodriguez (g.rodriguez@opendecide.com). +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_database_backup +short_description: Scaleway database backups management module +version_added: 1.2.0 +author: Guillaume Rodriguez (@guillaume_ro_fr) +description: + - This module manages database backups on Scaleway account U(https://developer.scaleway.com). +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + description: + - Indicate desired state of the database backup. + - V(present) creates a backup. + - V(absent) deletes the backup. + - V(exported) creates a download link for the backup. + - V(restored) restores the backup to a new database. + type: str + default: present + choices: + - present + - absent + - exported + - restored + + region: + description: + - Scaleway region to use (for example V(fr-par)). + type: str + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + id: + description: + - UUID used to identify the database backup. + - Required for V(absent), V(exported) and V(restored) states. + type: str + + name: + description: + - Name used to identify the database backup. + - Required for V(present) state. + - Ignored when O(state=absent), O(state=exported) or O(state=restored). + type: str + required: false + + database_name: + description: + - Name used to identify the database. + - Required for V(present) and V(restored) states. + - Ignored when O(state=absent) or O(state=exported). + type: str + required: false + + instance_id: + description: + - UUID of the instance associated to the database backup. + - Required for V(present) and V(restored) states. + - Ignored when O(state=absent) or O(state=exported). + type: str + required: false + + expires_at: + description: + - Expiration datetime of the database backup (ISO 8601 format). + - Ignored when O(state=absent), O(state=exported) or O(state=restored). + type: str + required: false + + wait: + description: + - Wait for the instance to reach its desired state before returning. + type: bool + default: false + + wait_timeout: + description: + - Time to wait for the backup to reach the expected state. + type: int + required: false + default: 300 + + wait_sleep_time: + description: + - Time to wait before every attempt to check the state of the backup. + type: int + required: false + default: 3 +""" + +EXAMPLES = r""" +- name: Create a backup + community.general.scaleway_database_backup: + name: 'my_backup' + state: present + region: 'fr-par' + database_name: 'my-database' + instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' + +- name: Export a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: exported + region: 'fr-par' + +- name: Restore a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: restored + region: 'fr-par' + database_name: 'my-new-database' + instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' + +- name: Remove a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: absent + region: 'fr-par' +""" + +RETURN = r""" +metadata: + description: Backup metadata. + returned: when O(state=present), O(state=exported), or O(state=restored) + type: dict + sample: + { + "metadata": { + "created_at": "2020-08-06T12:42:05.631049Z", + "database_name": "my-database", + "download_url": null, + "download_url_expires_at": null, + "expires_at": null, + "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07", + "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49", + "instance_name": "my-instance", + "name": "backup_name", + "region": "fr-par", + "size": 600000, + "status": "ready", + "updated_at": "2020-08-06T12:42:10.581649Z" + } + } +""" + +import datetime +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + scaleway_argument_spec, + SCALEWAY_REGIONS, +) + +stable_states = ( + 'ready', + 'deleting', +) + + +def wait_to_complete_state_transition(module, account_api, backup=None): + wait_timeout = module.params['wait_timeout'] + wait_sleep_time = module.params['wait_sleep_time'] + + if backup is None or backup['status'] in stable_states: + return backup + + start = now() + end = start + datetime.timedelta(seconds=wait_timeout) + while now() < end: + module.debug('We are going to wait for the backup to finish its transition') + + response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id'])) + if not response.ok: + module.fail_json(msg='Error getting backup [{0}: {1}]'.format(response.status_code, response.json)) + break + response_json = response.json + + if response_json['status'] in stable_states: + module.debug('It seems that the backup is not in transition anymore.') + module.debug('Backup in state: %s' % response_json['status']) + return response_json + time.sleep(wait_sleep_time) + else: + module.fail_json(msg='Backup takes too long to finish its transition') + + +def present_strategy(module, account_api, backup): + name = module.params['name'] + database_name = module.params['database_name'] + instance_id = module.params['instance_id'] + expiration_date = module.params['expires_at'] + + if backup is not None: + if (backup['name'] == name or name is None) and ( + backup['expires_at'] == expiration_date or expiration_date is None): + wait_to_complete_state_transition(module, account_api, backup) + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + payload = {} + if name is not None: + payload['name'] = name + if expiration_date is not None: + payload['expires_at'] = expiration_date + + response = account_api.patch('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']), + payload) + if response.ok: + result = wait_to_complete_state_transition(module, account_api, response.json) + module.exit_json(changed=True, metadata=result) + + module.fail_json(msg='Error modifying backup [{0}: {1}]'.format(response.status_code, response.json)) + + if module.check_mode: + module.exit_json(changed=True) + + payload = {'name': name, 'database_name': database_name, 'instance_id': instance_id} + if expiration_date is not None: + payload['expires_at'] = expiration_date + + response = account_api.post('/rdb/v1/regions/%s/backups' % module.params.get('region'), payload) + + if response.ok: + result = wait_to_complete_state_transition(module, account_api, response.json) + module.exit_json(changed=True, metadata=result) + + module.fail_json(msg='Error creating backup [{0}: {1}]'.format(response.status_code, response.json)) + + +def absent_strategy(module, account_api, backup): + if backup is None: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + response = account_api.delete('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id'])) + if response.ok: + result = wait_to_complete_state_transition(module, account_api, response.json) + module.exit_json(changed=True, metadata=result) + + module.fail_json(msg='Error deleting backup [{0}: {1}]'.format(response.status_code, response.json)) + + +def exported_strategy(module, account_api, backup): + if backup is None: + module.fail_json(msg=('Backup "%s" not found' % module.params['id'])) + + if backup['download_url'] is not None: + module.exit_json(changed=False, metadata=backup) + + if module.check_mode: + module.exit_json(changed=True) + + backup = wait_to_complete_state_transition(module, account_api, backup) + response = account_api.post( + '/rdb/v1/regions/%s/backups/%s/export' % (module.params.get('region'), backup['id']), {}) + + if response.ok: + result = wait_to_complete_state_transition(module, account_api, response.json) + module.exit_json(changed=True, metadata=result) + + module.fail_json(msg='Error exporting backup [{0}: {1}]'.format(response.status_code, response.json)) + + +def restored_strategy(module, account_api, backup): + if backup is None: + module.fail_json(msg=('Backup "%s" not found' % module.params['id'])) + + database_name = module.params['database_name'] + instance_id = module.params['instance_id'] + + if module.check_mode: + module.exit_json(changed=True) + + backup = wait_to_complete_state_transition(module, account_api, backup) + + payload = {'database_name': database_name, 'instance_id': instance_id} + response = account_api.post('/rdb/v1/regions/%s/backups/%s/restore' % (module.params.get('region'), backup['id']), + payload) + + if response.ok: + result = wait_to_complete_state_transition(module, account_api, response.json) + module.exit_json(changed=True, metadata=result) + + module.fail_json(msg='Error restoring backup [{0}: {1}]'.format(response.status_code, response.json)) + + +state_strategy = { + 'present': present_strategy, + 'absent': absent_strategy, + 'exported': exported_strategy, + 'restored': restored_strategy, +} + + +def core(module): + state = module.params['state'] + backup_id = module.params['id'] + + account_api = Scaleway(module) + + if backup_id is None: + backup_by_id = None + else: + response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup_id)) + status_code = response.status_code + backup_json = response.json + backup_by_id = None + if status_code == 404: + backup_by_id = None + elif response.ok: + backup_by_id = backup_json + else: + module.fail_json(msg='Error getting backup [{0}: {1}]'.format(status_code, response.json['message'])) + + state_strategy[state](module, account_api, backup_by_id) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present', 'exported', 'restored']), + region=dict(required=True, choices=SCALEWAY_REGIONS), + id=dict(), + name=dict(type='str'), + database_name=dict(), + instance_id=dict(), + expires_at=dict(), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + wait_sleep_time=dict(type='int', default=3), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_together=[ + ['database_name', 'instance_id'], + ], + required_if=[ + ['state', 'present', ['name', 'database_name', 'instance_id']], + ['state', 'absent', ['id']], + ['state', 'exported', ['id']], + ['state', 'restored', ['id', 'database_name', 'instance_id']], + ], + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_function.py b/plugins/modules/scaleway_function.py new file mode 100644 index 0000000000..a8d0680c71 --- /dev/null +++ b/plugins/modules/scaleway_function.py @@ -0,0 +1,394 @@ +#!/usr/bin/python +# +# Scaleway Serverless function management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_function +short_description: Scaleway Function management +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages function on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +requirements: + - passlib[argon2] >= 1.7.4 + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the function. + default: present + choices: + - present + - absent + + namespace_id: + type: str + description: + - Function namespace identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example V(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the function. + required: true + + description: + description: + - Description of the function. + type: str + default: '' + + min_scale: + description: + - Minimum number of replicas for the function. + type: int + + max_scale: + description: + - Maximum number of replicas for the function. + type: int + + environment_variables: + description: + - Environment variables of the function. + - Injected in function at runtime. + type: dict + default: {} + + secret_environment_variables: + description: + - Secret environment variables of the function. + - Updating those values does not output a C(changed) state in Ansible. + - Injected in function at runtime. + type: dict + default: {} + + runtime: + description: + - Runtime of the function. + - See U(https://www.scaleway.com/en/docs/compute/functions/reference-content/functions-lifecycle/) for all available + runtimes. + type: str + required: true + + memory_limit: + description: + - Resources define performance characteristics of your function. + - They are allocated to your function at runtime. + type: int + + function_timeout: + description: + - The length of time your handler can spend processing a request before being stopped. + type: str + + handler: + description: + - The C(module-name.export) value in your function. + type: str + + privacy: + description: + - Privacy policies define whether a function can be executed anonymously. + - Choose V(public) to enable anonymous execution, or V(private) to protect your function with an authentication mechanism + provided by the Scaleway API. + type: str + default: public + choices: + - public + - private + + redeploy: + description: + - Redeploy the function if update is required. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Create a function + community.general.scaleway_function: + namespace_id: '{{ scw_function_namespace }}' + region: fr-par + state: present + name: my-awesome-function + runtime: python3 + environment_variables: + MY_VAR: my_value + secret_environment_variables: + MY_SECRET_VAR: my_secret_value + register: function_creation_task + +- name: Make sure function is deleted + community.general.scaleway_function: + namespace_id: '{{ scw_function_namespace }}' + region: fr-par + state: absent + name: my-awesome-function +""" + +RETURN = r""" +function: + description: The function information. + returned: when O(state=present) + type: dict + sample: + cpu_limit: 140 + description: Function used for testing scaleway_function ansible module + domain_name: fnansibletestfxamabuc-fn-ansible-test.functions.fnc.fr-par.scw.cloud + environment_variables: + MY_VAR: my_value + error_message: null + handler: handler.handle + http_option: "" + id: ceb64dc4-4464-4196-8e20-ecef705475d3 + max_scale: 5 + memory_limit: 256 + min_scale: 0 + name: fn-ansible-test + namespace_id: 82737d8d-0ebb-4d89-b0ad-625876eca50d + privacy: public + region: fr-par + runtime: python310 + runtime_message: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: created + timeout: 300s +""" + +from copy import deepcopy + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed, + SecretVariables +) +from ansible.module_utils.basic import AnsibleModule + +STABLE_STATES = ( + "ready", + "created", + "absent" +) + +VERIFIABLE_MUTABLE_ATTRIBUTES = ( + "description", + "min_scale", + "max_scale", + "environment_variables", + "runtime", + "memory_limit", + "timeout", + "handler", + "privacy", + "secret_environment_variables" +) + +MUTABLE_ATTRIBUTES = VERIFIABLE_MUTABLE_ATTRIBUTES + ( + "redeploy", +) + + +def payload_from_wished_fn(wished_fn): + payload = { + "namespace_id": wished_fn["namespace_id"], + "name": wished_fn["name"], + "description": wished_fn["description"], + "min_scale": wished_fn["min_scale"], + "max_scale": wished_fn["max_scale"], + "runtime": wished_fn["runtime"], + "memory_limit": wished_fn["memory_limit"], + "timeout": wished_fn["timeout"], + "handler": wished_fn["handler"], + "privacy": wished_fn["privacy"], + "redeploy": wished_fn["redeploy"], + "environment_variables": wished_fn["environment_variables"], + "secret_environment_variables": SecretVariables.dict_to_list(wished_fn["secret_environment_variables"]) + } + + return payload + + +def absent_strategy(api, wished_fn): + changed = False + + fn_list = api.fetch_all_resources("functions") + fn_lookup = {fn["name"]: fn for fn in fn_list} + + if wished_fn["name"] not in fn_lookup: + return changed, {} + + target_fn = fn_lookup[wished_fn["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Function would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_fn["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting function [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_fn): + changed = False + + fn_list = api.fetch_all_resources("functions") + fn_lookup = {fn["name"]: fn for fn in fn_list} + + payload_fn = payload_from_wished_fn(wished_fn) + + if wished_fn["name"] not in fn_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A function would be created."} + + # Creation doesn't support `redeploy` parameter + del payload_fn["redeploy"] + + # Create function + api.warn(payload_fn) + creation_response = api.post(path=api.api_path, + data=payload_fn) + + if not creation_response.ok: + msg = "Error during function creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_fn = fn_lookup[wished_fn["name"]] + decoded_target_fn = deepcopy(target_fn) + decoded_target_fn["secret_environment_variables"] = SecretVariables.decode(decoded_target_fn["secret_environment_variables"], + payload_fn["secret_environment_variables"]) + + patch_payload = resource_attributes_should_be_changed(target=decoded_target_fn, + wished=payload_fn, + verifiable_mutable_attributes=VERIFIABLE_MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_fn + + changed = True + if api.module.check_mode: + return changed, {"status": "Function attributes would be changed."} + + fn_patch_response = api.patch(path=api.api_path + "/%s" % target_fn["id"], + data=patch_payload) + + if not fn_patch_response.ok: + api.module.fail_json(msg='Error during function attributes update: [{0}: {1}]'.format( + fn_patch_response.status_code, fn_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_fn["id"]) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + SecretVariables.ensure_scaleway_secret_package(module) + + region = module.params["region"] + wished_function = { + "state": module.params["state"], + "namespace_id": module.params["namespace_id"], + "name": module.params["name"], + "description": module.params['description'], + "min_scale": module.params['min_scale'], + "max_scale": module.params['max_scale'], + "runtime": module.params["runtime"], + "memory_limit": module.params["memory_limit"], + "timeout": module.params["function_timeout"], + "handler": module.params["handler"], + "privacy": module.params["privacy"], + "redeploy": module.params["redeploy"], + "environment_variables": module.params['environment_variables'], + "secret_environment_variables": module.params['secret_environment_variables'] + } + + api = Scaleway(module=module) + api.api_path = "functions/v1beta1/regions/%s/functions" % region + + changed, summary = state_strategy[wished_function["state"]](api=api, wished_fn=wished_function) + + module.exit_json(changed=changed, function=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + namespace_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + min_scale=dict(type='int'), + max_scale=dict(type='int'), + runtime=dict(type='str', required=True), + memory_limit=dict(type='int'), + function_timeout=dict(type='str'), + handler=dict(type='str'), + privacy=dict(type='str', default='public', choices=['public', 'private']), + redeploy=dict(type='bool', default=False), + environment_variables=dict(type='dict', default={}), + secret_environment_variables=dict(type='dict', default={}, no_log=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_function_info.py b/plugins/modules/scaleway_function_info.py new file mode 100644 index 0000000000..d41e45fba2 --- /dev/null +++ b/plugins/modules/scaleway_function_info.py @@ -0,0 +1,152 @@ +#!/usr/bin/python +# +# Scaleway Serverless function info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_function_info +short_description: Retrieve information on Scaleway Function +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a function on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + namespace_id: + type: str + description: + - Container namespace identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the function. + required: true +""" + +EXAMPLES = r""" +- name: Get a function info + community.general.scaleway_function_info: + namespace_id: '{{ scw_function_namespace }}' + region: fr-par + name: my-awesome-function + register: function_info_task +""" + +RETURN = r""" +function: + description: The function information. + returned: always + type: dict + sample: + cpu_limit: 140 + description: Function used for testing scaleway_function ansible module + domain_name: fnansibletestfxamabuc-fn-ansible-test.functions.fnc.fr-par.scw.cloud + environment_variables: + MY_VAR: my_value + error_message: + handler: handler.handle + http_option: "" + id: ceb64dc4-4464-4196-8e20-ecef705475d3 + max_scale: 5 + memory_limit: 256 + min_scale: 0 + name: fn-ansible-test + namespace_id: 82737d8d-0ebb-4d89-b0ad-625876eca50d + privacy: public + region: fr-par + runtime: python310 + runtime_message: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: created + timeout: 300s +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_fn): + fn_list = api.fetch_all_resources("functions") + fn_lookup = {fn["name"]: fn for fn in fn_list} + + if wished_fn["name"] not in fn_lookup: + msg = "Error during function lookup: Unable to find function named '%s' in namespace '%s'" % (wished_fn["name"], + wished_fn["namespace_id"]) + + api.module.fail_json(msg=msg) + + target_fn = fn_lookup[wished_fn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_fn["id"]) + if not response.ok: + msg = "Error during function lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_function = { + "namespace_id": module.params["namespace_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "functions/v1beta1/regions/%s/functions" % region + + summary = info_strategy(api=api, wished_fn=wished_function) + + module.exit_json(changed=False, function=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + namespace_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_function_namespace.py b/plugins/modules/scaleway_function_namespace.py new file mode 100644 index 0000000000..b5600831b5 --- /dev/null +++ b/plugins/modules/scaleway_function_namespace.py @@ -0,0 +1,296 @@ +#!/usr/bin/python +# +# Scaleway Serverless function namespace management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_function_namespace +short_description: Scaleway Function namespace management +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages function namespaces on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +requirements: + - passlib[argon2] >= 1.7.4 + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the function namespace. + default: present + choices: + - present + - absent + + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example V(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the function namespace. + required: true + + description: + description: + - Description of the function namespace. + type: str + default: '' + + environment_variables: + description: + - Environment variables of the function namespace. + - Injected in functions at runtime. + type: dict + default: {} + + secret_environment_variables: + description: + - Secret environment variables of the function namespace. + - Updating those values does not output a C(changed) state in Ansible. + - Injected in functions at runtime. + type: dict + default: {} +""" + +EXAMPLES = r""" +- name: Create a function namespace + community.general.scaleway_function_namespace: + project_id: '{{ scw_project }}' + state: present + region: fr-par + name: my-awesome-function-namespace + environment_variables: + MY_VAR: my_value + secret_environment_variables: + MY_SECRET_VAR: my_secret_value + register: function_namespace_creation_task + +- name: Make sure function namespace is deleted + community.general.scaleway_function_namespace: + project_id: '{{ scw_project }}' + state: absent + region: fr-par + name: my-awesome-function-namespace +""" + +RETURN = r""" +function_namespace: + description: The function namespace information. + returned: when O(state=present) + type: dict + sample: + description: "" + environment_variables: + MY_VAR: my_value + error_message: + id: 531a1fd7-98d2-4a74-ad77-d398324304b8 + name: my-awesome-function-namespace + organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 + project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98 + region: fr-par + registry_endpoint: "" + registry_namespace_id: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: pending +""" + +from copy import deepcopy + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed, + SecretVariables +) +from ansible.module_utils.basic import AnsibleModule + + +STABLE_STATES = ( + "ready", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "description", + "environment_variables", + "secret_environment_variables", +) + + +def payload_from_wished_fn(wished_fn): + payload = { + "project_id": wished_fn["project_id"], + "name": wished_fn["name"], + "description": wished_fn["description"], + "environment_variables": wished_fn["environment_variables"], + "secret_environment_variables": SecretVariables.dict_to_list(wished_fn["secret_environment_variables"]) + } + + return payload + + +def absent_strategy(api, wished_fn): + changed = False + + fn_list = api.fetch_all_resources("namespaces") + fn_lookup = {fn["name"]: fn for fn in fn_list} + + if wished_fn["name"] not in fn_lookup: + return changed, {} + + target_fn = fn_lookup[wished_fn["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Function namespace would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_fn["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting function namespace [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_fn): + changed = False + + fn_list = api.fetch_all_resources("namespaces") + fn_lookup = {fn["name"]: fn for fn in fn_list} + + payload_fn = payload_from_wished_fn(wished_fn) + + if wished_fn["name"] not in fn_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A function namespace would be created."} + + # Create function namespace + api.warn(payload_fn) + creation_response = api.post(path=api.api_path, + data=payload_fn) + + if not creation_response.ok: + msg = "Error during function namespace creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_fn = fn_lookup[wished_fn["name"]] + decoded_target_fn = deepcopy(target_fn) + decoded_target_fn["secret_environment_variables"] = SecretVariables.decode(decoded_target_fn["secret_environment_variables"], + payload_fn["secret_environment_variables"]) + + patch_payload = resource_attributes_should_be_changed(target=decoded_target_fn, + wished=payload_fn, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_fn + + changed = True + if api.module.check_mode: + return changed, {"status": "Function namespace attributes would be changed."} + + fn_patch_response = api.patch(path=api.api_path + "/%s" % target_fn["id"], + data=patch_payload) + + if not fn_patch_response.ok: + api.module.fail_json(msg='Error during function namespace attributes update: [{0}: {1}]'.format( + fn_patch_response.status_code, fn_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_fn["id"]) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + SecretVariables.ensure_scaleway_secret_package(module) + + region = module.params["region"] + wished_function_namespace = { + "state": module.params["state"], + "project_id": module.params["project_id"], + "name": module.params["name"], + "description": module.params['description'], + "environment_variables": module.params['environment_variables'], + "secret_environment_variables": module.params['secret_environment_variables'] + } + + api = Scaleway(module=module) + api.api_path = "functions/v1beta1/regions/%s/namespaces" % region + + changed, summary = state_strategy[wished_function_namespace["state"]](api=api, wished_fn=wished_function_namespace) + + module.exit_json(changed=changed, function_namespace=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + environment_variables=dict(type='dict', default={}), + secret_environment_variables=dict(type='dict', default={}, no_log=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_function_namespace_info.py b/plugins/modules/scaleway_function_namespace_info.py new file mode 100644 index 0000000000..89c0fdfa61 --- /dev/null +++ b/plugins/modules/scaleway_function_namespace_info.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# +# Scaleway Serverless function namespace info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_function_namespace_info +short_description: Retrieve information on Scaleway Function namespace +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a function namespace on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the function namespace. + required: true +""" + +EXAMPLES = r""" +- name: Get a function namespace info + community.general.scaleway_function_namespace_info: + project_id: '{{ scw_project }}' + region: fr-par + name: my-awesome-function-namespace + register: function_namespace_info_task +""" + +RETURN = r""" +function_namespace: + description: The function namespace information. + returned: always + type: dict + sample: + description: "" + environment_variables: + MY_VAR: my_value + error_message: + id: 531a1fd7-98d2-4a74-ad77-d398324304b8 + name: my-awesome-function-namespace + organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 + project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98 + region: fr-par + registry_endpoint: "" + registry_namespace_id: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: pending +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_fn): + fn_list = api.fetch_all_resources("namespaces") + fn_lookup = {fn["name"]: fn for fn in fn_list} + + if wished_fn["name"] not in fn_lookup: + msg = "Error during function namespace lookup: Unable to find function namespace named '%s' in project '%s'" % (wished_fn["name"], + wished_fn["project_id"]) + + api.module.fail_json(msg=msg) + + target_fn = fn_lookup[wished_fn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_fn["id"]) + if not response.ok: + msg = "Error during function namespace lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_function_namespace = { + "project_id": module.params["project_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "functions/v1beta1/regions/%s/namespaces" % region + + summary = info_strategy(api=api, wished_fn=wished_function_namespace) + + module.exit_json(changed=False, function_namespace=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_image_info.py b/plugins/modules/scaleway_image_info.py deleted file mode 120000 index 6f90f0cd72..0000000000 --- a/plugins/modules/scaleway_image_info.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_image_info.py \ No newline at end of file diff --git a/plugins/modules/scaleway_image_info.py b/plugins/modules/scaleway_image_info.py new file mode 100644 index 0000000000..9cffb1aca0 --- /dev/null +++ b/plugins/modules/scaleway_image_info.py @@ -0,0 +1,136 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: scaleway_image_info +short_description: Gather information about the Scaleway images available +description: + - Gather information about the Scaleway images available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + region: + type: str + description: + - Scaleway compute zone. + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 +""" + +EXAMPLES = r""" +- name: Gather Scaleway images information + community.general.scaleway_image_info: + region: par1 + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_image_info }}" +""" + +RETURN = r""" +scaleway_image_info: + description: + - Response from Scaleway API. + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' + returned: success + type: list + elements: dict + sample: + [ + { + "arch": "x86_64", + "creation_date": "2018-07-17T16:18:49.276456+00:00", + "default_bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": false, + "dtb": "", + "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.9.93 rev1" + }, + "extra_volumes": [], + "from_server": null, + "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0", + "modification_date": "2018-07-17T16:42:06.319315+00:00", + "name": "Debian Stretch", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "public": true, + "root_volume": { + "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd", + "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18", + "size": 25000000000, + "volume_type": "l_ssd" + }, + "state": "available" + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION) + + +class ScalewayImageInfo(Scaleway): + + def __init__(self, module): + super(ScalewayImageInfo, self).__init__(module) + self.name = 'images' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_image_info=ScalewayImageInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_ip.py b/plugins/modules/scaleway_ip.py deleted file mode 120000 index 4677c01b1c..0000000000 --- a/plugins/modules/scaleway_ip.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_ip.py \ No newline at end of file diff --git a/plugins/modules/scaleway_ip.py b/plugins/modules/scaleway_ip.py new file mode 100644 index 0000000000..0edf8f3d31 --- /dev/null +++ b/plugins/modules/scaleway_ip.py @@ -0,0 +1,271 @@ +#!/usr/bin/python +# +# Scaleway IP management module +# +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_ip +short_description: Scaleway IP management module +author: Remy Leone (@remyleone) +description: + - This module manages IP on Scaleway account U(https://developer.scaleway.com). +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the IP. + default: present + choices: + - present + - absent + + organization: + type: str + description: + - Scaleway organization identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example par1). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 + + id: + type: str + description: + - ID of the Scaleway IP (UUID). + server: + type: str + description: + - ID of the server you want to attach an IP to. + - To unattach an IP do not specify this option. + reverse: + type: str + description: + - Reverse to assign to the IP. +""" + +EXAMPLES = r""" +- name: Create an IP + community.general.scaleway_ip: + organization: '{{ scw_org }}' + state: present + region: par1 + register: ip_creation_task + +- name: Make sure IP deleted + community.general.scaleway_ip: + id: '{{ ip_creation_task.scaleway_ip.id }}' + state: absent + region: par1 +""" + +RETURN = r""" +data: + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { + "ips": [ + { + "organization": "951df375-e094-4d26-97c1-ba548eeb9c42", + "reverse": null, + "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477", + "server": { + "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1", + "name": "ansible_tuto-1" + }, + "address": "212.47.232.136" + } + ] + } +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible.module_utils.basic import AnsibleModule + + +def ip_attributes_should_be_changed(api, target_ip, wished_ip): + patch_payload = {} + + if target_ip["reverse"] != wished_ip["reverse"]: + patch_payload["reverse"] = wished_ip["reverse"] + + # IP is assigned to a server + if target_ip["server"] is None and wished_ip["server"]: + patch_payload["server"] = wished_ip["server"] + + # IP is unassigned to a server + try: + if target_ip["server"]["id"] and wished_ip["server"] is None: + patch_payload["server"] = wished_ip["server"] + except (TypeError, KeyError): + pass + + # IP is migrated between 2 different servers + try: + if target_ip["server"]["id"] != wished_ip["server"]: + patch_payload["server"] = wished_ip["server"] + except (TypeError, KeyError): + pass + + return patch_payload + + +def payload_from_wished_ip(wished_ip): + return { + k: v + for k, v in wished_ip.items() + if k != 'id' and v is not None + } + + +def present_strategy(api, wished_ip): + changed = False + + response = api.get('ips') + if not response.ok: + api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format( + response.status_code, response.json['message'])) + + ips_list = response.json["ips"] + ip_lookup = {ip["id"]: ip for ip in ips_list} + + if wished_ip["id"] not in ip_lookup.keys(): + changed = True + if api.module.check_mode: + return changed, {"status": "An IP would be created."} + + # Create IP + creation_response = api.post('/ips', + data=payload_from_wished_ip(wished_ip)) + + if not creation_response.ok: + msg = "Error during ip creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + return changed, creation_response.json["ip"] + + target_ip = ip_lookup[wished_ip["id"]] + patch_payload = ip_attributes_should_be_changed(api=api, target_ip=target_ip, wished_ip=wished_ip) + + if not patch_payload: + return changed, target_ip + + changed = True + if api.module.check_mode: + return changed, {"status": "IP attributes would be changed."} + + ip_patch_response = api.patch(path="ips/%s" % target_ip["id"], + data=patch_payload) + + if not ip_patch_response.ok: + api.module.fail_json(msg='Error during IP attributes update: [{0}: {1}]'.format( + ip_patch_response.status_code, ip_patch_response.json['message'])) + + return changed, ip_patch_response.json["ip"] + + +def absent_strategy(api, wished_ip): + response = api.get('ips') + changed = False + + status_code = response.status_code + ips_json = response.json + ips_list = ips_json["ips"] + + if not response.ok: + api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format( + status_code, response.json['message'])) + + ip_lookup = {ip["id"]: ip for ip in ips_list} + if wished_ip["id"] not in ip_lookup.keys(): + return changed, {} + + changed = True + if api.module.check_mode: + return changed, {"status": "IP would be destroyed"} + + response = api.delete('/ips/' + wished_ip["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting IP [{0}: {1}]'.format( + response.status_code, response.json)) + + return changed, response.json + + +def core(module): + wished_ip = { + "organization": module.params['organization'], + "reverse": module.params["reverse"], + "id": module.params["id"], + "server": module.params["server"] + } + + region = module.params["region"] + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + api = Scaleway(module=module) + if module.params["state"] == "absent": + changed, summary = absent_strategy(api=api, wished_ip=wished_ip) + else: + changed, summary = present_strategy(api=api, wished_ip=wished_ip) + module.exit_json(changed=changed, scaleway_ip=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present']), + organization=dict(required=True), + server=dict(), + reverse=dict(), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + id=dict() + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_ip_info.py b/plugins/modules/scaleway_ip_info.py deleted file mode 120000 index 6edffebf1e..0000000000 --- a/plugins/modules/scaleway_ip_info.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_ip_info.py \ No newline at end of file diff --git a/plugins/modules/scaleway_ip_info.py b/plugins/modules/scaleway_ip_info.py new file mode 100644 index 0000000000..36196583cf --- /dev/null +++ b/plugins/modules/scaleway_ip_info.py @@ -0,0 +1,120 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: scaleway_ip_info +short_description: Gather information about the Scaleway IPs available +description: + - Gather information about the Scaleway IPs available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + region: + type: str + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 +""" + +EXAMPLES = r""" +- name: Gather Scaleway IPs information + community.general.scaleway_ip_info: + region: par1 + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_ip_info }}" +""" + +RETURN = r""" +scaleway_ip_info: + description: + - Response from Scaleway API. + - For more details please refer to U(https://developers.scaleway.com/en/products/instance/api/). + returned: success + type: list + elements: dict + sample: + [ + { + "address": "163.172.170.243", + "id": "ea081794-a581-8899-8451-386ddaf0a451", + "organization": "3f709602-5e6c-4619-b80c-e324324324af", + "reverse": null, + "server": { + "id": "12f19bc7-109c-4517-954c-e6b3d0311363", + "name": "scw-e0d158" + } + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) + + +class ScalewayIpInfo(Scaleway): + + def __init__(self, module): + super(ScalewayIpInfo, self).__init__(module) + self.name = 'ips' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_ip_info=ScalewayIpInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_lb.py b/plugins/modules/scaleway_lb.py deleted file mode 120000 index 410ff68474..0000000000 --- a/plugins/modules/scaleway_lb.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_lb.py \ No newline at end of file diff --git a/plugins/modules/scaleway_lb.py b/plugins/modules/scaleway_lb.py new file mode 100644 index 0000000000..6e0aaa91f1 --- /dev/null +++ b/plugins/modules/scaleway_lb.py @@ -0,0 +1,367 @@ +#!/usr/bin/python +# +# Scaleway Load-balancer management module +# +# Copyright (C) 2018 Online SAS. +# https://www.scaleway.com +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_lb +short_description: Scaleway load-balancer management module +author: Remy Leone (@remyleone) +description: + - This module manages load-balancers on Scaleway. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + + name: + type: str + description: + - Name of the load-balancer. + required: true + + description: + type: str + description: + - Description of the load-balancer. + required: true + + organization_id: + type: str + description: + - Organization identifier. + required: true + + state: + type: str + description: + - Indicate desired state of the instance. + default: present + choices: + - present + - absent + + region: + type: str + description: + - Scaleway zone. + required: true + choices: + - nl-ams + - fr-par + - pl-waw + + tags: + type: list + elements: str + default: [] + description: + - List of tags to apply to the load-balancer. + wait: + description: + - Wait for the load-balancer to reach its desired state before returning. + type: bool + default: false + + wait_timeout: + type: int + description: + - Time to wait for the load-balancer to reach the expected state. + required: false + default: 300 + + wait_sleep_time: + type: int + description: + - Time to wait before every attempt to check the state of the load-balancer. + required: false + default: 3 +""" + +EXAMPLES = r""" +- name: Create a load-balancer + community.general.scaleway_lb: + name: foobar + state: present + organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: fr-par + tags: + - hello + +- name: Delete a load-balancer + community.general.scaleway_lb: + name: foobar + state: absent + organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: fr-par +""" + +RETURN = r""" +scaleway_lb: + description: The load-balancer object. + returned: success + type: dict + sample: + { + "backend_count": 0, + "frontend_count": 0, + "description": "Description of my load-balancer", + "id": "00000000-0000-0000-0000-000000000000", + "instances": [ + { + "id": "00000000-0000-0000-0000-000000000000", + "ip_address": "10.0.0.1", + "region": "fr-par", + "status": "ready" + }, + { + "id": "00000000-0000-0000-0000-000000000000", + "ip_address": "10.0.0.2", + "region": "fr-par", + "status": "ready" + } + ], + "ip": [ + { + "id": "00000000-0000-0000-0000-000000000000", + "ip_address": "192.168.0.1", + "lb_id": "00000000-0000-0000-0000-000000000000", + "region": "fr-par", + "organization_id": "00000000-0000-0000-0000-000000000000", + "reverse": "" + } + ], + "name": "lb_ansible_test", + "organization_id": "00000000-0000-0000-0000-000000000000", + "region": "fr-par", + "status": "ready", + "tags": [ + "first_tag", + "second_tag" + ] + } +""" + +import datetime +import time +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.datetime import now +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_REGIONS, SCALEWAY_ENDPOINT, scaleway_argument_spec, Scaleway + +STABLE_STATES = ( + "ready", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "name", + "description" +) + + +def payload_from_wished_lb(wished_lb): + return { + "organization_id": wished_lb["organization_id"], + "name": wished_lb["name"], + "tags": wished_lb["tags"], + "description": wished_lb["description"] + } + + +def fetch_state(api, lb): + api.module.debug("fetch_state of load-balancer: %s" % lb["id"]) + response = api.get(path=api.api_path + "/%s" % lb["id"]) + + if response.status_code == 404: + return "absent" + + if not response.ok: + msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json) + api.module.fail_json(msg=msg) + + try: + api.module.debug("Load-balancer %s in state: %s" % (lb["id"], response.json["status"])) + return response.json["status"] + except KeyError: + api.module.fail_json(msg="Could not fetch state in %s" % response.json) + + +def wait_to_complete_state_transition(api, lb, force_wait=False): + wait = api.module.params["wait"] + if not (wait or force_wait): + return + wait_timeout = api.module.params["wait_timeout"] + wait_sleep_time = api.module.params["wait_sleep_time"] + + start = now() + end = start + datetime.timedelta(seconds=wait_timeout) + while now() < end: + api.module.debug("We are going to wait for the load-balancer to finish its transition") + state = fetch_state(api, lb) + if state in STABLE_STATES: + api.module.debug("It seems that the load-balancer is not in transition anymore.") + api.module.debug("load-balancer in state: %s" % fetch_state(api, lb)) + break + time.sleep(wait_sleep_time) + else: + api.module.fail_json(msg="Server takes too long to finish its transition") + + +def lb_attributes_should_be_changed(target_lb, wished_lb): + diff = {attr: wished_lb[attr] for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr]} + + if diff: + return {attr: wished_lb[attr] for attr in MUTABLE_ATTRIBUTES} + + return {} + + +def present_strategy(api, wished_lb): + changed = False + + response = api.get(path=api.api_path) + if not response.ok: + api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format( + response.status_code, response.json['message'])) + + lbs_list = response.json["lbs"] + lb_lookup = {lb["name"]: lb for lb in lbs_list} + + if wished_lb["name"] not in lb_lookup.keys(): + changed = True + if api.module.check_mode: + return changed, {"status": "A load-balancer would be created."} + + # Create Load-balancer + api.warn(payload_from_wished_lb(wished_lb)) + creation_response = api.post(path=api.api_path, + data=payload_from_wished_lb(wished_lb)) + + if not creation_response.ok: + msg = "Error during lb creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + wait_to_complete_state_transition(api=api, lb=creation_response.json) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_lb = lb_lookup[wished_lb["name"]] + patch_payload = lb_attributes_should_be_changed(target_lb=target_lb, + wished_lb=wished_lb) + + if not patch_payload: + return changed, target_lb + + changed = True + if api.module.check_mode: + return changed, {"status": "Load-balancer attributes would be changed."} + + lb_patch_response = api.put(path=api.api_path + "/%s" % target_lb["id"], + data=patch_payload) + + if not lb_patch_response.ok: + api.module.fail_json(msg='Error during load-balancer attributes update: [{0}: {1}]'.format( + lb_patch_response.status_code, lb_patch_response.json['message'])) + + wait_to_complete_state_transition(api=api, lb=target_lb) + return changed, lb_patch_response.json + + +def absent_strategy(api, wished_lb): + response = api.get(path=api.api_path) + changed = False + + status_code = response.status_code + lbs_json = response.json + lbs_list = lbs_json["lbs"] + + if not response.ok: + api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format( + status_code, response.json['message'])) + + lb_lookup = {lb["name"]: lb for lb in lbs_list} + if wished_lb["name"] not in lb_lookup.keys(): + return changed, {} + + target_lb = lb_lookup[wished_lb["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Load-balancer would be destroyed"} + + wait_to_complete_state_transition(api=api, lb=target_lb, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_lb["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting load-balancer [{0}: {1}]'.format( + response.status_code, response.json)) + + wait_to_complete_state_transition(api=api, lb=target_lb) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + region = module.params["region"] + wished_load_balancer = { + "state": module.params["state"], + "name": module.params["name"], + "description": module.params["description"], + "tags": module.params["tags"], + "organization_id": module.params["organization_id"] + } + module.params['api_url'] = SCALEWAY_ENDPOINT + api = Scaleway(module=module) + api.api_path = "lb/v1/regions/%s/lbs" % region + + changed, summary = state_strategy[wished_load_balancer["state"]](api=api, + wished_lb=wished_load_balancer) + module.exit_json(changed=changed, scaleway_lb=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + description=dict(required=True), + region=dict(required=True, choices=SCALEWAY_REGIONS), + state=dict(choices=list(state_strategy.keys()), default='present'), + tags=dict(type="list", elements="str", default=[]), + organization_id=dict(required=True), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + wait_sleep_time=dict(type="int", default=3), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_organization_info.py b/plugins/modules/scaleway_organization_info.py deleted file mode 120000 index 00be99e20d..0000000000 --- a/plugins/modules/scaleway_organization_info.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_organization_info.py \ No newline at end of file diff --git a/plugins/modules/scaleway_organization_info.py b/plugins/modules/scaleway_organization_info.py new file mode 100644 index 0000000000..873d15b794 --- /dev/null +++ b/plugins/modules/scaleway_organization_info.py @@ -0,0 +1,109 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: scaleway_organization_info +short_description: Gather information about the Scaleway organizations available +description: + - Gather information about the Scaleway organizations available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" + +attributes: + action_group: + version_added: 11.3.0 + +options: + api_url: + description: + - Scaleway API URL. + default: 'https://account.scaleway.com' + aliases: ['base_url'] +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather Scaleway organizations information + community.general.scaleway_organization_info: + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_organization_info }}" +""" + +RETURN = r""" +scaleway_organization_info: + description: Response from Scaleway API. + returned: success + type: list + elements: dict + sample: + [ + { + "address_city_name": "Paris", + "address_country_code": "FR", + "address_line1": "42 Rue de l'univers", + "address_line2": null, + "address_postal_code": "75042", + "address_subdivision_code": "FR-75", + "creation_date": "2018-08-06T13:43:28.508575+00:00", + "currency": "EUR", + "customer_class": "individual", + "id": "3f709602-5e6c-4619-b80c-e8432ferewtr", + "locale": "fr_FR", + "modification_date": "2018-08-06T14:56:41.401685+00:00", + "name": "James Bond", + "support_id": "694324", + "support_level": "basic", + "support_pin": "9324", + "users": [], + "vat_number": null, + "warnings": [] + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, ScalewayException, scaleway_argument_spec +) + + +class ScalewayOrganizationInfo(Scaleway): + + def __init__(self, module): + super(ScalewayOrganizationInfo, self).__init__(module) + self.name = 'organizations' + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_organization_info=ScalewayOrganizationInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_private_network.py b/plugins/modules/scaleway_private_network.py new file mode 100644 index 0000000000..040477a246 --- /dev/null +++ b/plugins/modules/scaleway_private_network.py @@ -0,0 +1,244 @@ +#!/usr/bin/python +# +# Scaleway VPC management module +# +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_private_network +short_description: Scaleway private network management +version_added: 4.5.0 +author: Pascal MANGIN (@pastral) +description: + - This module manages private network on Scaleway account (U(https://developer.scaleway.com)). +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the VPC. + default: present + choices: + - present + - absent + + project: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example V(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 + + name: + type: str + description: + - Name of the VPC. + tags: + type: list + elements: str + description: + - List of tags to apply to the instance. + default: [] +""" + +EXAMPLES = r""" +- name: Create an private network + community.general.scaleway_vpc: + project: '{{ scw_project }}' + name: 'vpc_one' + state: present + region: par1 + register: vpc_creation_task + +- name: Make sure private network with name 'foo' is deleted in region par1 + community.general.scaleway_vpc: + name: 'foo' + state: absent + region: par1 +""" + +RETURN = r""" +scaleway_private_network: + description: Information on the VPC. + returned: success when O(state=present) + type: dict + sample: + { + "created_at": "2022-01-15T11:11:12.676445Z", + "id": "12345678-f1e6-40ec-83e5-12345d67ed89", + "name": "network", + "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "tags": [ + "tag1", + "tag2", + "tag3", + "tag4", + "tag5" + ], + "updated_at": "2022-01-15T11:12:04.624837Z", + "zone": "fr-par-2" + } +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible.module_utils.basic import AnsibleModule + + +def get_private_network(api, name, page=1): + page_size = 10 + response = api.get('private-networks', params={'name': name, 'order_by': 'name_asc', 'page': page, 'page_size': page_size}) + if not response.ok: + msg = "Error during get private network creation: %s: '%s' (%s)" % (response.info['msg'], response.json['message'], response.json) + api.module.fail_json(msg=msg) + + if response.json['total_count'] == 0: + return None + + i = 0 + while i < len(response.json['private_networks']): + if response.json['private_networks'][i]['name'] == name: + return response.json['private_networks'][i] + i += 1 + + # search on next page if needed + if (page * page_size) < response.json['total_count']: + return get_private_network(api, name, page + 1) + + return None + + +def present_strategy(api, wished_private_network): + + changed = False + private_network = get_private_network(api, wished_private_network['name']) + if private_network is not None: + if set(wished_private_network['tags']) == set(private_network['tags']): + return changed, private_network + else: + # private network need to be updated + data = {'name': wished_private_network['name'], + 'tags': wished_private_network['tags'] + } + changed = True + if api.module.check_mode: + return changed, {"status": "private network would be updated"} + + response = api.patch(path='private-networks/' + private_network['id'], data=data) + if not response.ok: + api.module.fail_json(msg='Error updating private network [{0}: {1}]'.format(response.status_code, response.json)) + + return changed, response.json + + # private network need to be create + changed = True + if api.module.check_mode: + return changed, {"status": "private network would be created"} + + data = {'name': wished_private_network['name'], + 'project_id': wished_private_network['project'], + 'tags': wished_private_network['tags'] + } + + response = api.post(path='private-networks/', data=data) + + if not response.ok: + api.module.fail_json(msg='Error creating private network [{0}: {1}]'.format(response.status_code, response.json)) + + return changed, response.json + + +def absent_strategy(api, wished_private_network): + + changed = False + private_network = get_private_network(api, wished_private_network['name']) + if private_network is None: + return changed, {} + + changed = True + if api.module.check_mode: + return changed, {"status": "private network would be destroyed"} + + response = api.delete('private-networks/' + private_network['id']) + + if not response.ok: + api.module.fail_json(msg='Error deleting private network [{0}: {1}]'.format( + response.status_code, response.json)) + + return changed, response.json + + +def core(module): + + wished_private_network = { + "project": module.params['project'], + "tags": module.params['tags'], + "name": module.params['name'] + } + + region = module.params["region"] + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint_vpc"] + + api = Scaleway(module=module) + if module.params["state"] == "absent": + changed, summary = absent_strategy(api=api, wished_private_network=wished_private_network) + else: + changed, summary = present_strategy(api=api, wished_private_network=wished_private_network) + module.exit_json(changed=changed, scaleway_private_network=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present']), + project=dict(required=True), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + tags=dict(type="list", elements="str", default=[]), + name=dict() + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_security_group.py b/plugins/modules/scaleway_security_group.py deleted file mode 120000 index 041b092f93..0000000000 --- a/plugins/modules/scaleway_security_group.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_security_group.py \ No newline at end of file diff --git a/plugins/modules/scaleway_security_group.py b/plugins/modules/scaleway_security_group.py new file mode 100644 index 0000000000..ea25234588 --- /dev/null +++ b/plugins/modules/scaleway_security_group.py @@ -0,0 +1,249 @@ +#!/usr/bin/python +# +# Scaleway Security Group management module +# +# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com). +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_security_group +short_description: Scaleway Security Group management module +author: Antoine Barbare (@abarbare) +description: + - This module manages Security Group on Scaleway account U(https://developer.scaleway.com). +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + description: + - Indicate desired state of the Security Group. + type: str + choices: [absent, present] + default: present + + organization: + description: + - Organization identifier. + type: str + required: true + + region: + description: + - Scaleway region to use (for example V(par1)). + type: str + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 + + name: + description: + - Name of the Security Group. + type: str + required: true + + description: + description: + - Description of the Security Group. + type: str + + stateful: + description: + - Create a stateful security group which allows established connections in and out. + type: bool + required: true + + inbound_default_policy: + description: + - Default policy for incoming traffic. + type: str + choices: [accept, drop] + + outbound_default_policy: + description: + - Default policy for outcoming traffic. + type: str + choices: [accept, drop] + + organization_default: + description: + - Create security group to be the default one. + type: bool +""" + +EXAMPLES = r""" +- name: Create a Security Group + community.general.scaleway_security_group: + state: present + region: par1 + name: security_group + description: "my security group description" + organization: "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9" + stateful: false + inbound_default_policy: accept + outbound_default_policy: accept + organization_default: false + register: security_group_creation_task +""" + +RETURN = r""" +data: + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { + "scaleway_security_group": { + "description": "my security group description", + "enable_default_security": true, + "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae", + "inbound_default_policy": "accept", + "name": "security_group", + "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9", + "organization_default": false, + "outbound_default_policy": "accept", + "servers": [], + "stateful": false + } + } +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible.module_utils.basic import AnsibleModule +from uuid import uuid4 + + +def payload_from_security_group(security_group): + return { + k: v + for k, v in security_group.items() + if k != 'id' and v is not None + } + + +def present_strategy(api, security_group): + ret = {'changed': False} + + response = api.get('security_groups') + if not response.ok: + api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) + + security_group_lookup = {sg['name']: sg for sg in response.json['security_groups']} + + if security_group['name'] not in security_group_lookup.keys(): + ret['changed'] = True + if api.module.check_mode: + # Help user when check mode is enabled by defining id key + ret['scaleway_security_group'] = {'id': str(uuid4())} + return ret + + # Create Security Group + response = api.post('/security_groups', + data=payload_from_security_group(security_group)) + + if not response.ok: + msg = 'Error during security group creation: "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json) + api.module.fail_json(msg=msg) + ret['scaleway_security_group'] = response.json['security_group'] + + else: + ret['scaleway_security_group'] = security_group_lookup[security_group['name']] + + return ret + + +def absent_strategy(api, security_group): + response = api.get('security_groups') + ret = {'changed': False} + + if not response.ok: + api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) + + security_group_lookup = {sg['name']: sg for sg in response.json['security_groups']} + if security_group['name'] not in security_group_lookup.keys(): + return ret + + ret['changed'] = True + if api.module.check_mode: + return ret + + response = api.delete('/security_groups/' + security_group_lookup[security_group['name']]['id']) + if not response.ok: + api.module.fail_json(msg='Error deleting security group "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) + + return ret + + +def core(module): + security_group = { + 'organization': module.params['organization'], + 'name': module.params['name'], + 'description': module.params['description'], + 'stateful': module.params['stateful'], + 'inbound_default_policy': module.params['inbound_default_policy'], + 'outbound_default_policy': module.params['outbound_default_policy'], + 'organization_default': module.params['organization_default'], + } + + region = module.params['region'] + module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint'] + + api = Scaleway(module=module) + if module.params['state'] == 'present': + summary = present_strategy(api=api, security_group=security_group) + else: + summary = absent_strategy(api=api, security_group=security_group) + module.exit_json(**summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + organization=dict(type='str', required=True), + name=dict(type='str', required=True), + description=dict(type='str'), + region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())), + stateful=dict(type='bool', required=True), + inbound_default_policy=dict(type='str', choices=['accept', 'drop']), + outbound_default_policy=dict(type='str', choices=['accept', 'drop']), + organization_default=dict(type='bool'), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[['stateful', True, ['inbound_default_policy', 'outbound_default_policy']]] + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_security_group_info.py b/plugins/modules/scaleway_security_group_info.py deleted file mode 120000 index e74504cb17..0000000000 --- a/plugins/modules/scaleway_security_group_info.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_security_group_info.py \ No newline at end of file diff --git a/plugins/modules/scaleway_security_group_info.py b/plugins/modules/scaleway_security_group_info.py new file mode 100644 index 0000000000..7ec1fe5b3f --- /dev/null +++ b/plugins/modules/scaleway_security_group_info.py @@ -0,0 +1,124 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: scaleway_security_group_info +short_description: Gather information about the Scaleway security groups available +description: + - Gather information about the Scaleway security groups available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" + +attributes: + action_group: + version_added: 11.3.0 + +options: + region: + type: str + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather Scaleway security groups information + community.general.scaleway_security_group_info: + region: par1 + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_security_group_info }}" +""" + +RETURN = r""" +scaleway_security_group_info: + description: + - Response from Scaleway API. + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' + returned: success + type: list + elements: dict + sample: + [ + { + "description": "test-ams", + "enable_default_security": true, + "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51", + "name": "test-ams", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "organization_default": false, + "servers": [ + { + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "name": "scw-e0d158" + } + ] + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) + + +class ScalewaySecurityGroupInfo(Scaleway): + + def __init__(self, module): + super(ScalewaySecurityGroupInfo, self).__init__(module) + self.name = 'security_groups' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_security_group_info=ScalewaySecurityGroupInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_security_group_rule.py b/plugins/modules/scaleway_security_group_rule.py deleted file mode 120000 index f9d05e3a99..0000000000 --- a/plugins/modules/scaleway_security_group_rule.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_security_group_rule.py \ No newline at end of file diff --git a/plugins/modules/scaleway_security_group_rule.py b/plugins/modules/scaleway_security_group_rule.py new file mode 100644 index 0000000000..edfb6a3565 --- /dev/null +++ b/plugins/modules/scaleway_security_group_rule.py @@ -0,0 +1,273 @@ +#!/usr/bin/python +# +# Scaleway Security Group Rule management module +# +# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com). +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_security_group_rule +short_description: Scaleway Security Group Rule management module +author: Antoine Barbare (@abarbare) +description: + - This module manages Security Group Rule on Scaleway account U(https://developer.scaleway.com). +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the Security Group Rule. + default: present + choices: + - present + - absent + + region: + type: str + description: + - Scaleway region to use (for example V(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 + + protocol: + type: str + description: + - Network protocol to use. + choices: + - TCP + - UDP + - ICMP + required: true + + port: + description: + - Port related to the rule, null value for all the ports. + required: true + type: int + + ip_range: + type: str + description: + - IPV4 CIDR notation to apply to the rule. + default: 0.0.0.0/0 + + direction: + type: str + description: + - Rule direction. + choices: + - inbound + - outbound + required: true + + action: + type: str + description: + - Rule action. + choices: + - accept + - drop + required: true + + security_group: + type: str + description: + - Security Group unique identifier. + required: true +""" + +EXAMPLES = r""" +- name: Create a Security Group Rule + community.general.scaleway_security_group_rule: + state: present + region: par1 + protocol: TCP + port: 80 + ip_range: 0.0.0.0/0 + direction: inbound + action: accept + security_group: b57210ee-1281-4820-a6db-329f78596ecb + register: security_group_rule_creation_task +""" + +RETURN = r""" +data: + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { + "scaleway_security_group_rule": { + "direction": "inbound", + "protocol": "TCP", + "ip_range": "0.0.0.0/0", + "dest_port_from": 80, + "action": "accept", + "position": 2, + "dest_port_to": null, + "editable": null, + "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9" + } + } +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object +from ansible.module_utils.basic import AnsibleModule + + +def get_sgr_from_api(security_group_rules, security_group_rule): + """ Check if a security_group_rule specs are present in security_group_rules + Return None if no rules match the specs + Return the rule if found + """ + for sgr in security_group_rules: + if (sgr['ip_range'] == security_group_rule['ip_range'] and sgr['dest_port_from'] == security_group_rule['dest_port_from'] and + sgr['direction'] == security_group_rule['direction'] and sgr['action'] == security_group_rule['action'] and + sgr['protocol'] == security_group_rule['protocol']): + return sgr + + return None + + +def present_strategy(api, security_group_id, security_group_rule): + ret = {'changed': False} + + response = api.get('security_groups/%s/rules' % security_group_id) + if not response.ok: + api.module.fail_json( + msg='Error getting security group rules "%s": "%s" (%s)' % + (response.info['msg'], response.json['message'], response.json)) + + existing_rule = get_sgr_from_api( + response.json['rules'], security_group_rule) + + if not existing_rule: + ret['changed'] = True + if api.module.check_mode: + return ret + + # Create Security Group Rule + response = api.post('/security_groups/%s/rules' % security_group_id, + data=payload_from_object(security_group_rule)) + + if not response.ok: + api.module.fail_json( + msg='Error during security group rule creation: "%s": "%s" (%s)' % + (response.info['msg'], response.json['message'], response.json)) + ret['scaleway_security_group_rule'] = response.json['rule'] + + else: + ret['scaleway_security_group_rule'] = existing_rule + + return ret + + +def absent_strategy(api, security_group_id, security_group_rule): + ret = {'changed': False} + + response = api.get('security_groups/%s/rules' % security_group_id) + if not response.ok: + api.module.fail_json( + msg='Error getting security group rules "%s": "%s" (%s)' % + (response.info['msg'], response.json['message'], response.json)) + + existing_rule = get_sgr_from_api( + response.json['rules'], security_group_rule) + + if not existing_rule: + return ret + + ret['changed'] = True + if api.module.check_mode: + return ret + + response = api.delete( + '/security_groups/%s/rules/%s' % + (security_group_id, existing_rule['id'])) + if not response.ok: + api.module.fail_json( + msg='Error deleting security group rule "%s": "%s" (%s)' % + (response.info['msg'], response.json['message'], response.json)) + + return ret + + +def core(module): + api = Scaleway(module=module) + + security_group_rule = { + 'protocol': module.params['protocol'], + 'dest_port_from': module.params['port'], + 'ip_range': module.params['ip_range'], + 'direction': module.params['direction'], + 'action': module.params['action'], + } + + region = module.params['region'] + module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint'] + + if module.params['state'] == 'present': + summary = present_strategy( + api=api, + security_group_id=module.params['security_group'], + security_group_rule=security_group_rule) + else: + summary = absent_strategy( + api=api, + security_group_id=module.params['security_group'], + security_group_rule=security_group_rule) + module.exit_json(**summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update( + state=dict(type='str', default='present', choices=['absent', 'present']), + region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())), + protocol=dict(type='str', required=True, choices=['TCP', 'UDP', 'ICMP']), + port=dict(type='int', required=True), + ip_range=dict(type='str', default='0.0.0.0/0'), + direction=dict(type='str', required=True, choices=['inbound', 'outbound']), + action=dict(type='str', required=True, choices=['accept', 'drop']), + security_group=dict(type='str', required=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_server_info.py b/plugins/modules/scaleway_server_info.py deleted file mode 120000 index 1cc4ec4532..0000000000 --- a/plugins/modules/scaleway_server_info.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_server_info.py \ No newline at end of file diff --git a/plugins/modules/scaleway_server_info.py b/plugins/modules/scaleway_server_info.py new file mode 100644 index 0000000000..16a0fc17e3 --- /dev/null +++ b/plugins/modules/scaleway_server_info.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: scaleway_server_info +short_description: Gather information about the Scaleway servers available +description: + - Gather information about the Scaleway servers available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + region: + type: str + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 +""" + +EXAMPLES = r""" +- name: Gather Scaleway servers information + community.general.scaleway_server_info: + region: par1 + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_server_info }}" +""" + +RETURN = r""" +scaleway_server_info: + description: + - Response from Scaleway API. + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' + returned: success + type: list + elements: dict + sample: + [ + { + "arch": "x86_64", + "boot_type": "local", + "bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "dtb": "", + "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.4.127 rev1" + }, + "commercial_type": "START1-XS", + "creation_date": "2018-08-14T21:36:56.271545+00:00", + "dynamic_ip_required": false, + "enable_ipv6": false, + "extra_networks": [], + "hostname": "scw-e0d256", + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "image": { + "arch": "x86_64", + "creation_date": "2018-04-26T12:42:21.619844+00:00", + "default_bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "dtb": "", + "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.4.127 rev1" + }, + "extra_volumes": [], + "from_server": null, + "id": "67375eb1-f14d-4f02-bb42-6119cecbde51", + "modification_date": "2018-04-26T12:49:07.573004+00:00", + "name": "Ubuntu Xenial", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "public": true, + "root_volume": { + "id": "020b8d61-3867-4a0e-84a4-445c5393e05d", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", + "size": 25000000000, + "volume_type": "l_ssd" + }, + "state": "available" + }, + "ipv6": null, + "location": { + "cluster_id": "5", + "hypervisor_id": "412", + "node_id": "2", + "platform_id": "13", + "zone_id": "par1" + }, + "maintenances": [], + "modification_date": "2018-08-14T21:37:28.630882+00:00", + "name": "scw-e0d256", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "private_ip": "10.14.222.131", + "protected": false, + "public_ip": { + "address": "163.172.170.197", + "dynamic": false, + "id": "ea081794-a581-4495-8451-386ddaf0a451" + }, + "security_group": { + "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e", + "name": "Default security group" + }, + "state": "running", + "state_detail": "booted", + "tags": [], + "volumes": { + "0": { + "creation_date": "2018-08-14T21:36:56.271545+00:00", + "export_uri": "device://dev/vda", + "id": "68386fae-4f55-4fbf-aabb-953036a85872", + "modification_date": "2018-08-14T21:36:56.271545+00:00", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "server": { + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "name": "scw-e0d256" + }, + "size": 25000000000, + "state": "available", + "volume_type": "l_ssd" + } + } + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) + + +class ScalewayServerInfo(Scaleway): + + def __init__(self, module): + super(ScalewayServerInfo, self).__init__(module) + self.name = 'servers' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_server_info=ScalewayServerInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_snapshot_info.py b/plugins/modules/scaleway_snapshot_info.py deleted file mode 120000 index 15ec0d47ab..0000000000 --- a/plugins/modules/scaleway_snapshot_info.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_snapshot_info.py \ No newline at end of file diff --git a/plugins/modules/scaleway_snapshot_info.py b/plugins/modules/scaleway_snapshot_info.py new file mode 100644 index 0000000000..e59f9e3262 --- /dev/null +++ b/plugins/modules/scaleway_snapshot_info.py @@ -0,0 +1,125 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: scaleway_snapshot_info +short_description: Gather information about the Scaleway snapshots available +description: + - Gather information about the Scaleway snapshot available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + region: + type: str + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 +""" + +EXAMPLES = r""" +- name: Gather Scaleway snapshots information + community.general.scaleway_snapshot_info: + region: par1 + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_snapshot_info }}" +""" + +RETURN = r""" +scaleway_snapshot_info: + description: + - Response from Scaleway API. + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' + returned: success + type: list + elements: dict + sample: + [ + { + "base_volume": { + "id": "68386fae-4f55-4fbf-aabb-953036a85872", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42" + }, + "creation_date": "2018-08-14T22:34:35.299461+00:00", + "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2", + "modification_date": "2018-08-14T22:34:54.520560+00:00", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "size": 25000000000, + "state": "available", + "volume_type": "l_ssd" + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION +) + + +class ScalewaySnapshotInfo(Scaleway): + + def __init__(self, module): + super(ScalewaySnapshotInfo, self).__init__(module) + self.name = 'snapshots' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_snapshot_info=ScalewaySnapshotInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_sshkey.py b/plugins/modules/scaleway_sshkey.py deleted file mode 120000 index 9fc8d97d5f..0000000000 --- a/plugins/modules/scaleway_sshkey.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_sshkey.py \ No newline at end of file diff --git a/plugins/modules/scaleway_sshkey.py b/plugins/modules/scaleway_sshkey.py new file mode 100644 index 0000000000..213e6a2010 --- /dev/null +++ b/plugins/modules/scaleway_sshkey.py @@ -0,0 +1,182 @@ +#!/usr/bin/python +# +# Scaleway SSH keys management module +# +# Copyright (C) 2018 Online SAS. +# https://www.scaleway.com +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_sshkey +short_description: Scaleway SSH keys management module +author: Remy Leone (@remyleone) +description: + - This module manages SSH keys on Scaleway account (U(https://developer.scaleway.com)). +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the SSH key. + default: present + choices: + - present + - absent + ssh_pub_key: + type: str + description: + - The public SSH key as a string to add. + required: true + api_url: + type: str + description: + - Scaleway API URL. + default: 'https://account.scaleway.com' + aliases: ['base_url'] +""" + +EXAMPLES = r""" +- name: "Add SSH key" + community.general.scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAA..." + state: "present" + +- name: "Delete SSH key" + community.general.scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAA..." + state: "absent" + +- name: "Add SSH key with explicit token" + community.general.scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAA..." + state: "present" + oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c" +""" + +RETURN = r""" +data: + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { + "ssh_public_keys": [ + { + "key": "ssh-rsa AAAA...." + } + ] + } +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.scaleway import scaleway_argument_spec, Scaleway + + +def extract_present_sshkeys(raw_organization_dict): + ssh_key_list = raw_organization_dict["organizations"][0]["users"][0]["ssh_public_keys"] + ssh_key_lookup = [ssh_key["key"] for ssh_key in ssh_key_list] + return ssh_key_lookup + + +def extract_user_id(raw_organization_dict): + return raw_organization_dict["organizations"][0]["users"][0]["id"] + + +def sshkey_user_patch(ssh_lookup): + ssh_list = {"ssh_public_keys": [{"key": key} + for key in ssh_lookup]} + return ssh_list + + +def core(module): + ssh_pub_key = module.params['ssh_pub_key'] + state = module.params["state"] + account_api = Scaleway(module) + response = account_api.get('organizations') + + status_code = response.status_code + organization_json = response.json + + if not response.ok: + module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format( + status_code, response.json['message'])) + + user_id = extract_user_id(organization_json) + present_sshkeys = [] + try: + present_sshkeys = extract_present_sshkeys(organization_json) + except (KeyError, IndexError) as e: + module.fail_json(changed=False, data="Error while extracting present SSH keys from API") + + if state in ('present',): + if ssh_pub_key in present_sshkeys: + module.exit_json(changed=False) + + # If key not found create it! + if module.check_mode: + module.exit_json(changed=True) + + present_sshkeys.append(ssh_pub_key) + payload = sshkey_user_patch(present_sshkeys) + + response = account_api.patch('/users/%s' % user_id, data=payload) + + if response.ok: + module.exit_json(changed=True, data=response.json) + + module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format( + response.status_code, response.json)) + + elif state in ('absent',): + if ssh_pub_key not in present_sshkeys: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + present_sshkeys.remove(ssh_pub_key) + payload = sshkey_user_patch(present_sshkeys) + + response = account_api.patch('/users/%s' % user_id, data=payload) + + if response.ok: + module.exit_json(changed=True, data=response.json) + + module.fail_json(msg='Error deleting ssh key [{0}: {1}]'.format( + response.status_code, response.json)) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present']), + ssh_pub_key=dict(required=True), + api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_user_data.py b/plugins/modules/scaleway_user_data.py deleted file mode 120000 index a65edd42b3..0000000000 --- a/plugins/modules/scaleway_user_data.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_user_data.py \ No newline at end of file diff --git a/plugins/modules/scaleway_user_data.py b/plugins/modules/scaleway_user_data.py new file mode 100644 index 0000000000..2dadf4439a --- /dev/null +++ b/plugins/modules/scaleway_user_data.py @@ -0,0 +1,184 @@ +#!/usr/bin/python +# +# Scaleway user data management module +# +# Copyright (C) 2018 Online SAS. +# https://www.scaleway.com +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_user_data +short_description: Scaleway user_data management module +author: Remy Leone (@remyleone) +description: + - This module manages user_data on compute instances on Scaleway. + - It can be used to configure cloud-init for instance. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + + server_id: + type: str + description: + - Scaleway Compute instance ID of the server. + required: true + + user_data: + type: dict + description: + - User defined data. Typically used with C(cloud-init). + - Pass your C(cloud-init) script here as a string. + required: false + + region: + type: str + description: + - Scaleway compute zone. + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 +""" + +EXAMPLES = r""" +- name: Update the cloud-init + community.general.scaleway_user_data: + server_id: '5a33b4ab-57dd-4eb6-8b0a-d95eb63492ce' + region: ams1 + user_data: + cloud-init: 'final_message: "Hello World!"' +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway + + +def patch_user_data(compute_api, server_id, key, value): + compute_api.module.debug("Starting patching user_data attributes") + + path = "servers/%s/user_data/%s" % (server_id, key) + response = compute_api.patch(path=path, data=value, headers={"Content-Type": "text/plain"}) + if not response.ok: + msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body) + compute_api.module.fail_json(msg=msg) + + return response + + +def delete_user_data(compute_api, server_id, key): + compute_api.module.debug("Starting deleting user_data attributes: %s" % key) + + response = compute_api.delete(path="servers/%s/user_data/%s" % (server_id, key)) + + if not response.ok: + msg = 'Error during user_data deleting: (%s) %s' % response.status_code, response.body + compute_api.module.fail_json(msg=msg) + + return response + + +def get_user_data(compute_api, server_id, key): + compute_api.module.debug("Starting patching user_data attributes") + + path = "servers/%s/user_data/%s" % (server_id, key) + response = compute_api.get(path=path) + if not response.ok: + msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body) + compute_api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + server_id = module.params["server_id"] + user_data = module.params["user_data"] + changed = False + + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + compute_api = Scaleway(module=module) + + user_data_list = compute_api.get(path="servers/%s/user_data" % server_id) + if not user_data_list.ok: + msg = 'Error during user_data fetching: %s %s' % user_data_list.status_code, user_data_list.body + compute_api.module.fail_json(msg=msg) + + present_user_data_keys = user_data_list.json["user_data"] + present_user_data = { + key: get_user_data(compute_api=compute_api, server_id=server_id, key=key) + for key in present_user_data_keys + } + + if present_user_data == user_data: + module.exit_json(changed=changed, msg=user_data_list.json) + + # First we remove keys that are not defined in the wished user_data + for key in present_user_data: + if key not in user_data: + + changed = True + if compute_api.module.check_mode: + module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id}) + + delete_user_data(compute_api=compute_api, server_id=server_id, key=key) + + # Then we patch keys that are different + for key, value in user_data.items(): + if key not in present_user_data or value != present_user_data[key]: + + changed = True + if compute_api.module.check_mode: + module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id}) + + patch_user_data(compute_api=compute_api, server_id=server_id, key=key, value=value) + + module.exit_json(changed=changed, msg=user_data) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + user_data=dict(type="dict"), + server_id=dict(required=True), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_volume.py b/plugins/modules/scaleway_volume.py deleted file mode 120000 index e1e6171502..0000000000 --- a/plugins/modules/scaleway_volume.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_volume.py \ No newline at end of file diff --git a/plugins/modules/scaleway_volume.py b/plugins/modules/scaleway_volume.py new file mode 100644 index 0000000000..f30856538b --- /dev/null +++ b/plugins/modules/scaleway_volume.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# +# Scaleway volumes management module +# +# Copyright (C) 2018 Henryk Konsek Consulting (hekonsek@gmail.com). +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_volume +short_description: Scaleway volumes management module +author: Henryk Konsek (@hekonsek) +description: + - This module manages volumes on Scaleway account U(https://developer.scaleway.com). +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the volume. + default: present + choices: + - present + - absent + region: + type: str + description: + - Scaleway region to use (for example par1). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 + name: + type: str + description: + - Name used to identify the volume. + required: true + project: + type: str + description: + - Scaleway project ID to which volume belongs. + version_added: 4.3.0 + organization: + type: str + description: + - ScaleWay organization ID to which volume belongs. + size: + type: int + description: + - Size of the volume in bytes. + volume_type: + type: str + description: + - Type of the volume (for example 'l_ssd'). +""" + +EXAMPLES = r""" +- name: Create 10GB volume + community.general.scaleway_volume: + name: my-volume + state: present + region: par1 + project: "{{ scw_org }}" + "size": 10000000000 + volume_type: l_ssd + register: server_creation_check_task + +- name: Make sure volume deleted + community.general.scaleway_volume: + name: my-volume + state: absent + region: par1 +""" + +RETURN = r""" +data: + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { + "volume": { + "export_uri": null, + "id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd", + "name": "volume-0-3", + "project": "000a115d-2852-4b0a-9ce8-47f1134ba95a", + "server": null, + "size": 10000000000, + "volume_type": "l_ssd" + } + } +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible.module_utils.basic import AnsibleModule + + +def core(module): + region = module.params["region"] + state = module.params['state'] + name = module.params['name'] + organization = module.params['organization'] + project = module.params['project'] + size = module.params['size'] + volume_type = module.params['volume_type'] + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + account_api = Scaleway(module) + response = account_api.get('volumes') + status_code = response.status_code + volumes_json = response.json + + if project is None: + project = organization + + if not response.ok: + module.fail_json(msg='Error getting volume [{0}: {1}]'.format( + status_code, response.json['message'])) + + volumeByName = None + for volume in volumes_json['volumes']: + if volume['project'] == project and volume['name'] == name: + volumeByName = volume + + if state in ('present',): + if volumeByName is not None: + module.exit_json(changed=False) + + payload = {'name': name, 'project': project, 'size': size, 'volume_type': volume_type} + + response = account_api.post('/volumes', payload) + + if response.ok: + module.exit_json(changed=True, data=response.json) + + module.fail_json(msg='Error creating volume [{0}: {1}]'.format( + response.status_code, response.json)) + + elif state in ('absent',): + if volumeByName is None: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + response = account_api.delete('/volumes/' + volumeByName['id']) + if response.status_code == 204: + module.exit_json(changed=True, data=response.json) + + module.fail_json(msg='Error deleting volume [{0}: {1}]'.format( + response.status_code, response.json)) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present']), + name=dict(required=True), + size=dict(type='int'), + project=dict(), + organization=dict(), + volume_type=dict(), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ('organization', 'project'), + ], + required_one_of=[ + ('organization', 'project'), + ], + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_volume_info.py b/plugins/modules/scaleway_volume_info.py deleted file mode 120000 index 4db27fcd9a..0000000000 --- a/plugins/modules/scaleway_volume_info.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_volume_info.py \ No newline at end of file diff --git a/plugins/modules/scaleway_volume_info.py b/plugins/modules/scaleway_volume_info.py new file mode 100644 index 0000000000..b5b26360c3 --- /dev/null +++ b/plugins/modules/scaleway_volume_info.py @@ -0,0 +1,120 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: scaleway_volume_info +short_description: Gather information about the Scaleway volumes available +description: + - Gather information about the Scaleway volumes available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + region: + type: str + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 +""" + +EXAMPLES = r""" +- name: Gather Scaleway volumes information + community.general.scaleway_volume_info: + region: par1 + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_volume_info }}" +""" + +RETURN = r""" +scaleway_volume_info: + description: + - Response from Scaleway API. + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' + returned: success + type: list + elements: dict + sample: + [ + { + "creation_date": "2018-08-14T20:56:24.949660+00:00", + "export_uri": null, + "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba", + "modification_date": "2018-08-14T20:56:24.949660+00:00", + "name": "test-volume", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "server": null, + "size": 50000000000, + "state": "available", + "volume_type": "l_ssd" + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, ScalewayException, scaleway_argument_spec, + SCALEWAY_LOCATION) + + +class ScalewayVolumeInfo(Scaleway): + + def __init__(self, module): + super(ScalewayVolumeInfo, self).__init__(module) + self.name = 'volumes' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_volume_info=ScalewayVolumeInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sefcontext.py b/plugins/modules/sefcontext.py deleted file mode 120000 index 4c9e69a421..0000000000 --- a/plugins/modules/sefcontext.py +++ /dev/null @@ -1 +0,0 @@ -./system/sefcontext.py \ No newline at end of file diff --git a/plugins/modules/sefcontext.py b/plugins/modules/sefcontext.py new file mode 100644 index 0000000000..f08370ff70 --- /dev/null +++ b/plugins/modules/sefcontext.py @@ -0,0 +1,382 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: sefcontext +short_description: Manages SELinux file context mapping definitions +description: + - Manages SELinux file context mapping definitions. + - Similar to the C(semanage fcontext) command. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.platform +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: linux +options: + target: + description: + - Target path (expression). + type: str + required: true + aliases: [path] + ftype: + description: + - The file type that should have SELinux contexts applied. + - 'The following file type options are available:' + - V(a) for all files, + - V(b) for block devices, + - V(c) for character devices, + - V(d) for directories, + - V(f) for regular files, + - V(l) for symbolic links, + - V(p) for named pipes, + - V(s) for socket files. + type: str + choices: [a, b, c, d, f, l, p, s] + default: a + setype: + description: + - SELinux type for the specified O(target). + type: str + substitute: + description: + - Path to use to substitute file context(s) for the specified O(target). The context labeling for the O(target) subtree + is made equivalent to this path. + - This is also referred to as SELinux file context equivalence and it implements the C(equal) functionality of the SELinux + management tools. + version_added: 6.4.0 + type: str + aliases: [equal] + seuser: + description: + - SELinux user for the specified O(target). + - Defaults to V(system_u) for new file contexts and to existing value when modifying file contexts. + type: str + selevel: + description: + - SELinux range for the specified O(target). + - Defaults to V(s0) for new file contexts and to existing value when modifying file contexts. + type: str + aliases: [serange] + state: + description: + - Whether the SELinux file context must be V(absent) or V(present). + - Specifying V(absent) without either O(setype) or O(substitute) deletes both SELinux type or path substitution mappings + that match O(target). + type: str + choices: [absent, present] + default: present + reload: + description: + - Reload SELinux policy after commit. + - Note that this does not apply SELinux file contexts to existing files. + type: bool + default: true + ignore_selinux_state: + description: + - Useful for scenarios (chrooted environment) that you cannot get the real SELinux state. + type: bool + default: false +notes: + - The changes are persistent across reboots. + - O(setype) and O(substitute) are mutually exclusive. + - If O(state=present) then one of O(setype) or O(substitute) is mandatory. + - The M(community.general.sefcontext) module does not modify existing files to the new SELinux context(s), so it is advisable + to first create the SELinux file contexts before creating files, or run C(restorecon) manually for the existing files + that require the new SELinux file contexts. + - Not applying SELinux fcontexts to existing files is a deliberate decision as it would be unclear what reported changes + would entail to, and there is no guarantee that applying SELinux fcontext does not pick up other unrelated prior changes. +requirements: + - libselinux-python + - policycoreutils-python +author: + - Dag Wieers (@dagwieers) +""" + +EXAMPLES = r""" +- name: Allow apache to modify files in /srv/git_repos + community.general.sefcontext: + target: '/srv/git_repos(/.*)?' + setype: httpd_sys_rw_content_t + state: present + +- name: Substitute file contexts for path /srv/containers with /var/lib/containers + community.general.sefcontext: + target: /srv/containers + substitute: /var/lib/containers + state: present + +- name: Delete file context path substitution for /srv/containers + community.general.sefcontext: + target: /srv/containers + substitute: /var/lib/containers + state: absent + +- name: Delete any file context mappings for path /srv/git + community.general.sefcontext: + target: /srv/git + state: absent + +- name: Apply new SELinux file context to filesystem + ansible.builtin.command: restorecon -irv /srv/git_repos +""" + +RETURN = r""" +# Default return values +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + +# Add missing entries (backward compatible) +if HAVE_SEOBJECT: + seobject.file_types.update( + a=seobject.SEMANAGE_FCONTEXT_ALL, + b=seobject.SEMANAGE_FCONTEXT_BLOCK, + c=seobject.SEMANAGE_FCONTEXT_CHAR, + d=seobject.SEMANAGE_FCONTEXT_DIR, + f=seobject.SEMANAGE_FCONTEXT_REG, + l=seobject.SEMANAGE_FCONTEXT_LINK, + p=seobject.SEMANAGE_FCONTEXT_PIPE, + s=seobject.SEMANAGE_FCONTEXT_SOCK, + ) + +# Make backward compatible +option_to_file_type_str = dict( + a='all files', + b='block device', + c='character device', + d='directory', + f='regular file', + l='symbolic link', + p='named pipe', + s='socket', +) + + +def get_runtime_status(ignore_selinux_state=False): + return True if ignore_selinux_state is True else selinux.is_selinux_enabled() + + +def semanage_fcontext_exists(sefcontext, target, ftype): + ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. ''' + + # Beware that records comprise of a string representation of the file_type + record = (target, option_to_file_type_str[ftype]) + records = sefcontext.get_all() + try: + return records[record] + except KeyError: + return None + + +def semanage_fcontext_substitute_exists(sefcontext, target): + ''' Get the SELinux file context path substitution definition from policy. Return None if it does not exist. ''' + + return sefcontext.equiv_dist.get(target, sefcontext.equiv.get(target)) + + +def semanage_fcontext_modify(module, result, target, ftype, setype, substitute, do_reload, serange, seuser, sestore=''): + ''' Add or modify SELinux file context mapping definition to the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + if substitute is None: + exists = semanage_fcontext_exists(sefcontext, target, ftype) + if exists: + # Modify existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if seuser is None: + seuser = orig_seuser + if serange is None: + serange = orig_serange + + if setype != orig_setype or seuser != orig_seuser or serange != orig_serange: + if not module.check_mode: + sefcontext.modify(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Change to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange) + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange) + else: + # Add missing entry + if seuser is None: + seuser = 'system_u' + if serange is None: + serange = 's0' + + if not module.check_mode: + sefcontext.add(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Addition to semanage file context mappings\n' + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange) + else: + exists = semanage_fcontext_substitute_exists(sefcontext, target) + if exists: + # Modify existing path substitution entry + orig_substitute = exists + + if substitute != orig_substitute: + if not module.check_mode: + sefcontext.modify_equal(target, substitute) + changed = True + + if module._diff: + prepared_diff += '# Change to semanage file context path substitutions\n' + prepared_diff += '-%s = %s\n' % (target, orig_substitute) + prepared_diff += '+%s = %s\n' % (target, substitute) + else: + # Add missing path substitution entry + if not module.check_mode: + sefcontext.add_equal(target, substitute) + changed = True + if module._diff: + prepared_diff += '# Addition to semanage file context path substitutions\n' + prepared_diff += '+%s = %s\n' % (target, substitute) + + except Exception as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, seuser=seuser, serange=serange, **result) + + +def semanage_fcontext_delete(module, result, target, ftype, setype, substitute, do_reload, sestore=''): + ''' Delete SELinux file context mapping definition from the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + exists = semanage_fcontext_exists(sefcontext, target, ftype) + substitute_exists = semanage_fcontext_substitute_exists(sefcontext, target) + if exists and substitute is None: + # Remove existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if not module.check_mode: + sefcontext.delete(target, ftype) + changed = True + + if module._diff: + prepared_diff += '# Deletion to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3]) + if substitute_exists and setype is None and ((substitute is not None and substitute_exists == substitute) or substitute is None): + # Remove existing path substitution entry + orig_substitute = substitute_exists + + if not module.check_mode: + sefcontext.delete(target, orig_substitute) + changed = True + + if module._diff: + prepared_diff += '# Deletion to semanage file context path substitutions\n' + prepared_diff += '-%s = %s\n' % (target, orig_substitute) + + except Exception as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, **result) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + target=dict(type='str', required=True, aliases=['path']), + ftype=dict(type='str', default='a', choices=list(option_to_file_type_str.keys())), + setype=dict(type='str'), + substitute=dict(type='str', aliases=['equal']), + seuser=dict(type='str'), + selevel=dict(type='str', aliases=['serange']), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + ), + mutually_exclusive=[ + ('setype', 'substitute'), + ('substitute', 'ftype'), + ('substitute', 'seuser'), + ('substitute', 'selevel'), + ], + required_if=[ + ('state', 'present', ('setype', 'substitute'), True), + ], + + supports_check_mode=True, + ) + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR) + + if not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + target = module.params['target'] + ftype = module.params['ftype'] + setype = module.params['setype'] + substitute = module.params['substitute'] + seuser = module.params['seuser'] + serange = module.params['selevel'] + state = module.params['state'] + do_reload = module.params['reload'] + + result = dict(target=target, ftype=ftype, setype=setype, substitute=substitute, state=state) + + if state == 'present': + semanage_fcontext_modify(module, result, target, ftype, setype, substitute, do_reload, serange, seuser) + elif state == 'absent': + semanage_fcontext_delete(module, result, target, ftype, setype, substitute, do_reload) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/selinux_permissive.py b/plugins/modules/selinux_permissive.py deleted file mode 120000 index 5c1b5ded36..0000000000 --- a/plugins/modules/selinux_permissive.py +++ /dev/null @@ -1 +0,0 @@ -./system/selinux_permissive.py \ No newline at end of file diff --git a/plugins/modules/selinux_permissive.py b/plugins/modules/selinux_permissive.py new file mode 100644 index 0000000000..64d77e33cf --- /dev/null +++ b/plugins/modules/selinux_permissive.py @@ -0,0 +1,132 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Michael Scherer +# inspired by code of github.com/dandiker/ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: selinux_permissive +short_description: Change permissive domain in SELinux policy +description: + - Add and remove a domain from the list of permissive domains. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + domain: + description: + - The domain that is added or removed from the list of permissive domains. + type: str + required: true + aliases: [name] + permissive: + description: + - Indicate if the domain should or should not be set as permissive. + type: bool + required: true + no_reload: + description: + - Disable reloading of the SELinux policy after making change to a domain's permissive setting. + - The default is V(false), which causes policy to be reloaded when a domain changes state. + - Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6.". + type: bool + default: false + store: + description: + - Name of the SELinux policy store to use. + type: str + default: '' +notes: + - Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer). +requirements: [policycoreutils-python] +author: + - Michael Scherer (@mscherer) +""" + +EXAMPLES = r""" +- name: Change the httpd_t domain to permissive + community.general.selinux_permissive: + name: httpd_t + permissive: true +""" + +import traceback + +HAVE_SEOBJECT = False +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + domain=dict(type='str', required=True, aliases=['name']), + store=dict(type='str', default=''), + permissive=dict(type='bool', required=True), + no_reload=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + # global vars + changed = False + store = module.params['store'] + permissive = module.params['permissive'] + domain = module.params['domain'] + no_reload = module.params['no_reload'] + + if not HAVE_SEOBJECT: + module.fail_json(changed=False, msg=missing_required_lib("policycoreutils-python"), + exception=SEOBJECT_IMP_ERR) + + try: + permissive_domains = seobject.permissiveRecords(store) + except ValueError as e: + module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc()) + + # not supported on EL 6 + if 'set_reload' in dir(permissive_domains): + permissive_domains.set_reload(not no_reload) + + try: + all_domains = permissive_domains.get_all() + except ValueError as e: + module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc()) + + if permissive: + if domain not in all_domains: + if not module.check_mode: + try: + permissive_domains.add(domain) + except ValueError as e: + module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc()) + changed = True + else: + if domain in all_domains: + if not module.check_mode: + try: + permissive_domains.delete(domain) + except ValueError as e: + module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc()) + changed = True + + module.exit_json(changed=changed, store=store, + permissive=permissive, domain=domain) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/selogin.py b/plugins/modules/selogin.py deleted file mode 120000 index 609c0394a4..0000000000 --- a/plugins/modules/selogin.py +++ /dev/null @@ -1 +0,0 @@ -./system/selogin.py \ No newline at end of file diff --git a/plugins/modules/selogin.py b/plugins/modules/selogin.py new file mode 100644 index 0000000000..d1e0faf085 --- /dev/null +++ b/plugins/modules/selogin.py @@ -0,0 +1,252 @@ +#!/usr/bin/python +# Copyright (c) 2017, Petr Lautrbach +# Based on seport.py module (c) 2014, Dan Keder +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: selogin +short_description: Manages linux user to SELinux user mapping +description: + - Manages linux user to SELinux user mapping. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + login: + type: str + description: + - A Linux user. + required: true + seuser: + type: str + description: + - SELinux user name. + selevel: + type: str + aliases: [serange] + description: + - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user + record range. + default: s0 + state: + type: str + description: + - Desired mapping value. + default: present + choices: ['present', 'absent'] + reload: + description: + - Reload SELinux policy after commit. + type: bool + default: true + ignore_selinux_state: + description: + - Run independent of selinux runtime state. + type: bool + default: false +notes: + - The changes are persistent across reboots. + - Not tested on any debian based system. +requirements: ['libselinux', 'policycoreutils'] +author: + - Dan Keder (@dankeder) + - Petr Lautrbach (@bachradsusi) + - James Cassell (@jamescassell) +""" + +EXAMPLES = r""" +- name: Modify the default user on the system to the guest_u user + community.general.selogin: + login: __default__ + seuser: guest_u + state: present + +- name: Assign gijoe user on an MLS machine a range and to the staff_u user + community.general.selogin: + login: gijoe + seuser: staff_u + serange: SystemLow-Secret + state: present + +- name: Assign all users in the engineering group to the staff_u user + community.general.selogin: + login: '%engineering' + seuser: staff_u + state: present +""" + +RETURN = r""" +# Default return values +""" + + +import traceback + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''): + """ Add linux user to SELinux user mapping + + :type module: AnsibleModule + :param module: Ansible module + + :type login: str + :param login: a Linux User or a Linux group if it begins with % + + :type seuser: str + :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l' + + :type serange: str + :param serange: SELinux MLS/MCS range (defaults to 's0') + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + try: + selogin = seobject.loginRecords(sestore) + selogin.set_reload(do_reload) + change = False + all_logins = selogin.get_all() + # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore)) + # for local_login in all_logins: + if login not in all_logins.keys(): + change = True + if not module.check_mode: + selogin.add(login, seuser, serange) + else: + if all_logins[login][0] != seuser or all_logins[login][1] != serange: + change = True + if not module.check_mode: + selogin.modify(login, seuser, serange) + + except (ValueError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def semanage_login_del(module, login, seuser, do_reload, sestore=''): + """ Delete linux user to SELinux user mapping + + :type module: AnsibleModule + :param module: Ansible module + + :type login: str + :param login: a Linux User or a Linux group if it begins with % + + :type seuser: str + :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l' + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + try: + selogin = seobject.loginRecords(sestore) + selogin.set_reload(do_reload) + change = False + all_logins = selogin.get_all() + # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore)) + if login in all_logins.keys(): + change = True + if not module.check_mode: + selogin.delete(login) + + except (ValueError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def get_runtime_status(ignore_selinux_state=False): + return True if ignore_selinux_state is True else selinux.is_selinux_enabled() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + login=dict(type='str', required=True), + seuser=dict(type='str'), + selevel=dict(type='str', aliases=['serange'], default='s0'), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + ), + required_if=[ + ["state", "present", ["seuser"]] + ], + supports_check_mode=True + ) + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR) + + if not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + login = module.params['login'] + seuser = module.params['seuser'] + serange = module.params['selevel'] + state = module.params['state'] + do_reload = module.params['reload'] + + result = { + 'login': login, + 'seuser': seuser, + 'serange': serange, + 'state': state, + } + + if state == 'present': + result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange) + elif state == 'absent': + result['changed'] = semanage_login_del(module, login, seuser, do_reload) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sendgrid.py b/plugins/modules/sendgrid.py deleted file mode 120000 index 28a9330ec3..0000000000 --- a/plugins/modules/sendgrid.py +++ /dev/null @@ -1 +0,0 @@ -./notification/sendgrid.py \ No newline at end of file diff --git a/plugins/modules/sendgrid.py b/plugins/modules/sendgrid.py new file mode 100644 index 0000000000..26021c35c9 --- /dev/null +++ b/plugins/modules/sendgrid.py @@ -0,0 +1,274 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Matt Makai +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: sendgrid +short_description: Sends an email with the SendGrid API +description: + - Sends an email with a SendGrid account through their API, not through the SMTP service. +notes: + - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that + the module fails. + - Like the other notification modules, this one requires an external dependency to work. In this case, you need an active + SendGrid account. + - In order to use O(api_key), O(cc), O(bcc), O(attachments), O(from_name), O(html_body), and O(headers) you must C(pip install + sendgrid). +requirements: + - sendgrid Python library 1.6.22 or lower (Sendgrid API V2 supported) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + username: + type: str + description: + - Username for logging into the SendGrid account. + - It is only required if O(api_key) is not supplied. + password: + type: str + description: + - Password that corresponds to the username. + - It is only required if O(api_key) is not supplied. + from_address: + type: str + description: + - The address in the "from" field for the email. + required: true + to_addresses: + type: list + elements: str + description: + - A list with one or more recipient email addresses. + required: true + subject: + type: str + description: + - The desired subject for the email. + required: true + api_key: + type: str + description: + - Sendgrid API key to use instead of username/password. + cc: + type: list + elements: str + description: + - A list of email addresses to cc. + bcc: + type: list + elements: str + description: + - A list of email addresses to bcc. + attachments: + type: list + elements: path + description: + - A list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs). + from_name: + type: str + description: + - The name you want to appear in the from field, for example V(John Doe). + html_body: + description: + - Whether the body is HTML content that should be rendered. + type: bool + default: false + headers: + type: dict + description: + - A dict to pass on as headers. + body: + type: str + description: + - The e-mail body content. + required: true +author: "Matt Makai (@makaimc)" +""" + +EXAMPLES = r""" +- name: Send an email to a single recipient that the deployment was successful + community.general.sendgrid: + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "ansible@mycompany.com" + to_addresses: + - "ops@mycompany.com" + subject: "Deployment success." + body: "The most recent Ansible deployment was successful." + delegate_to: localhost + +- name: Send an email to more than one recipient that the build failed + community.general.sendgrid: + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "build@mycompany.com" + to_addresses: + - "ops@mycompany.com" + - "devteam@mycompany.com" + subject: "Build failure!." + body: "Unable to pull source repository from Git server." + delegate_to: localhost +""" + +# ======================================= +# sendgrid module support methods +# +import os +import traceback +from urllib.parse import urlencode + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +SENDGRID_IMP_ERR = None +try: + import sendgrid + HAS_SENDGRID = True +except ImportError: + SENDGRID_IMP_ERR = traceback.format_exc() + HAS_SENDGRID = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_bytes +from ansible.module_utils.urls import fetch_url + + +def post_sendgrid_api(module, username, password, from_address, to_addresses, + subject, body, api_key=None, cc=None, bcc=None, attachments=None, + html_body=False, from_name=None, headers=None): + + if not HAS_SENDGRID: + SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json" + AGENT = "Ansible" + data = {'api_user': username, 'api_key': password, + 'from': from_address, 'subject': subject, 'text': body} + encoded_data = urlencode(data) + to_addresses_api = '' + for recipient in to_addresses: + recipient = to_bytes(recipient, errors='surrogate_or_strict') + to_addresses_api += '&to[]=%s' % recipient + encoded_data += to_addresses_api + + headers = {'User-Agent': AGENT, + 'Content-type': 'application/x-www-form-urlencoded', + 'Accept': 'application/json'} + return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST') + else: + # Remove this check when adding Sendgrid API v3 support + if LooseVersion(sendgrid.version.__version__) > LooseVersion("1.6.22"): + module.fail_json(msg="Please install sendgrid==1.6.22 or lower since module uses Sendgrid V2 APIs.") + + if api_key: + sg = sendgrid.SendGridClient(api_key) + else: + sg = sendgrid.SendGridClient(username, password) + + message = sendgrid.Mail() + message.set_subject(subject) + + for recip in to_addresses: + message.add_to(recip) + + if cc: + for recip in cc: + message.add_cc(recip) + if bcc: + for recip in bcc: + message.add_bcc(recip) + + if headers: + message.set_headers(headers) + + if attachments: + for f in attachments: + name = os.path.basename(f) + message.add_attachment(name, f) + + if from_name: + message.set_from('%s <%s.' % (from_name, from_address)) + else: + message.set_from(from_address) + + if html_body: + message.set_html(body) + else: + message.set_text(body) + + return sg.send(message) +# ======================================= +# Main +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + username=dict(), + password=dict(no_log=True), + api_key=dict(no_log=True), + bcc=dict(type='list', elements='str'), + cc=dict(type='list', elements='str'), + headers=dict(type='dict'), + from_address=dict(required=True), + from_name=dict(), + to_addresses=dict(required=True, type='list', elements='str'), + subject=dict(required=True), + body=dict(required=True), + html_body=dict(default=False, type='bool'), + attachments=dict(type='list', elements='path') + ), + supports_check_mode=True, + mutually_exclusive=[ + ['api_key', 'password'], + ['api_key', 'username'] + ], + required_together=[['username', 'password']], + ) + + username = module.params['username'] + password = module.params['password'] + api_key = module.params['api_key'] + bcc = module.params['bcc'] + cc = module.params['cc'] + headers = module.params['headers'] + from_name = module.params['from_name'] + from_address = module.params['from_address'] + to_addresses = module.params['to_addresses'] + subject = module.params['subject'] + body = module.params['body'] + html_body = module.params['html_body'] + attachments = module.params['attachments'] + + sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments] + + if any(lib_arg is not None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID: + reason = 'when using any of the following arguments: ' \ + 'api_key, bcc, cc, headers, from_name, html_body, attachments' + module.fail_json(msg=missing_required_lib('sendgrid', reason=reason), + exception=SENDGRID_IMP_ERR) + + response, info = post_sendgrid_api(module, username, password, + from_address, to_addresses, subject, body, attachments=attachments, + bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key) + + if not HAS_SENDGRID: + if info['status'] != 200: + module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg']) + else: + if response != 200: + module.fail_json(msg="unable to send email through SendGrid API: %s" % info['message']) + + module.exit_json(msg=subject, changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sensu_check.py b/plugins/modules/sensu_check.py deleted file mode 120000 index 6f91c997ad..0000000000 --- a/plugins/modules/sensu_check.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/sensu/sensu_check.py \ No newline at end of file diff --git a/plugins/modules/sensu_check.py b/plugins/modules/sensu_check.py new file mode 100644 index 0000000000..10763992b0 --- /dev/null +++ b/plugins/modules/sensu_check.py @@ -0,0 +1,377 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Anders Ingemann +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: sensu_check +short_description: Manage Sensu checks +description: + - Manage the checks that should be run on a machine by I(Sensu). + - Most options do not have a default and are not added to the check definition unless specified. + - All defaults except O(path), O(state), O(backup) and O(metric) are not managed by this module, they are simply specified + for your convenience. +deprecated: + removed_in: 13.0.0 + why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - The name of the check. + - This is the key that is used to determine whether a check exists. + required: true + state: + type: str + description: + - Whether the check should be present or not. + choices: ['present', 'absent'] + default: present + path: + type: str + description: + - Path to the JSON file of the check to be added/removed. + - It is created if it does not exist (unless O(state=absent)). + - The parent folders need to exist when O(state=present), otherwise an error is thrown. + default: /etc/sensu/conf.d/checks.json + backup: + description: + - Create a backup file (if yes), including the timestamp information so you can get the original file back if you somehow + clobbered it incorrectly. + type: bool + default: false + command: + type: str + description: + - Path to the sensu check to run (not required when O(state=absent)). + handlers: + type: list + elements: str + description: + - List of handlers to notify when the check fails. + subscribers: + type: list + elements: str + description: + - List of subscribers/channels this check should run for. + - See sensu_subscribers to subscribe a machine to a channel. + interval: + type: int + description: + - Check interval in seconds. + timeout: + type: int + description: + - Timeout for the check. + - If not specified, it defaults to 10. + ttl: + type: int + description: + - Time to live in seconds until the check is considered stale. + handle: + description: + - Whether the check should be handled or not. + - Default is V(false). + type: bool + subdue_begin: + type: str + description: + - When to disable handling of check failures. + subdue_end: + type: str + description: + - When to enable handling of check failures. + dependencies: + type: list + elements: str + description: + - Other checks this one depends on. + - If dependencies fail handling of this check is disabled. + metric: + description: + - Whether the check is a metric. + type: bool + default: false + standalone: + description: + - Whether the check should be scheduled by the sensu client or server. + - This option obviates the need for specifying the O(subscribers) option. + - Default is V(false). + type: bool + publish: + description: + - Whether the check should be scheduled at all. + - You can still issue it using the sensu API. + - Default is V(false). + type: bool + occurrences: + type: int + description: + - Number of event occurrences before the handler should take action. + - If not specified, defaults to 1. + refresh: + type: int + description: + - Number of seconds handlers should wait before taking second action. + aggregate: + description: + - Classifies the check as an aggregate check, making it available using the aggregate API. + - Default is V(false). + type: bool + low_flap_threshold: + type: int + description: + - The low threshold for flap detection. + high_flap_threshold: + type: int + description: + - The high threshold for flap detection. + custom: + type: dict + description: + - A hash/dictionary of custom parameters for mixing to the configuration. + - You cannot rewrite other module parameters using this. + source: + type: str + description: + - The check source, used to create a JIT Sensu client for an external resource (for example a network switch). +author: "Anders Ingemann (@andsens)" +""" + +EXAMPLES = r""" +# Fetch metrics about the CPU load every 60 seconds, +# the sensu server has a handler called 'relay' which forwards stats to graphite +- name: Get cpu metrics + community.general.sensu_check: + name: cpu_load + command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb + metric: true + handlers: relay + subscribers: common + interval: 60 + +# Check whether nginx is running +- name: Check nginx process + community.general.sensu_check: + name: nginx_running + command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid + handlers: default + subscribers: nginx + interval: 60 + +# Stop monitoring the disk capacity. +# Note that the check will still show up in the sensu dashboard, +# to remove it completely you need to issue a DELETE request to the sensu api. +- name: Check disk + community.general.sensu_check: + name: check_disk_capacity + state: absent +""" + +import json +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def sensu_check(module, path, name, state='present', backup=False): + changed = False + reasons = [] + + stream = None + try: + try: + stream = open(path, 'r') + config = json.load(stream) + except IOError as e: + if e.errno == 2: # File not found, non-fatal + if state == 'absent': + reasons.append('file did not exist and state is `absent\'') + return changed, reasons + config = {} + else: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except ValueError: + msg = '{path} contains invalid JSON'.format(path=path) + module.fail_json(msg=msg) + finally: + if stream: + stream.close() + + if 'checks' not in config: + if state == 'absent': + reasons.append('`checks\' section did not exist and state is `absent\'') + return changed, reasons + config['checks'] = {} + changed = True + reasons.append('`checks\' section did not exist') + + if state == 'absent': + if name in config['checks']: + del config['checks'][name] + changed = True + reasons.append('check was present and state is `absent\'') + + if state == 'present': + if name not in config['checks']: + check = {} + config['checks'][name] = check + changed = True + reasons.append('check was absent and state is `present\'') + else: + check = config['checks'][name] + simple_opts = ['command', + 'handlers', + 'subscribers', + 'interval', + 'timeout', + 'ttl', + 'handle', + 'dependencies', + 'standalone', + 'publish', + 'occurrences', + 'refresh', + 'aggregate', + 'low_flap_threshold', + 'high_flap_threshold', + 'source', + ] + for opt in simple_opts: + if module.params[opt] is not None: + if opt not in check or check[opt] != module.params[opt]: + check[opt] = module.params[opt] + changed = True + reasons.append('`{opt}\' did not exist or was different'.format(opt=opt)) + else: + if opt in check: + del check[opt] + changed = True + reasons.append('`{opt}\' was removed'.format(opt=opt)) + + if module.params['custom']: + # Convert to json + custom_params = module.params['custom'] + overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']) + if overwrited_fields: + msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields)) + module.fail_json(msg=msg) + + for k, v in custom_params.items(): + if k in config['checks'][name]: + if not config['checks'][name][k] == v: + changed = True + reasons.append('`custom param {opt}\' was changed'.format(opt=k)) + else: + changed = True + reasons.append('`custom param {opt}\' was added'.format(opt=k)) + check[k] = v + simple_opts += custom_params.keys() + + # Remove obsolete custom params + for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']): + changed = True + reasons.append('`custom param {opt}\' was deleted'.format(opt=opt)) + del check[opt] + + if module.params['metric']: + if 'type' not in check or check['type'] != 'metric': + check['type'] = 'metric' + changed = True + reasons.append('`type\' was not defined or not `metric\'') + if not module.params['metric'] and 'type' in check: + del check['type'] + changed = True + reasons.append('`type\' was defined') + + if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None: + subdue = {'begin': module.params['subdue_begin'], + 'end': module.params['subdue_end'], + } + if 'subdue' not in check or check['subdue'] != subdue: + check['subdue'] = subdue + changed = True + reasons.append('`subdue\' did not exist or was different') + else: + if 'subdue' in check: + del check['subdue'] + changed = True + reasons.append('`subdue\' was removed') + + if changed and not module.check_mode: + if backup: + module.backup_local(path) + try: + try: + stream = open(path, 'w') + stream.write(json.dumps(config, indent=2) + '\n') + except IOError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + finally: + if stream: + stream.close() + + return changed, reasons + + +def main(): + + arg_spec = {'name': {'type': 'str', 'required': True}, + 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'}, + 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, + 'backup': {'type': 'bool', 'default': False}, + 'command': {'type': 'str'}, + 'handlers': {'type': 'list', 'elements': 'str'}, + 'subscribers': {'type': 'list', 'elements': 'str'}, + 'interval': {'type': 'int'}, + 'timeout': {'type': 'int'}, + 'ttl': {'type': 'int'}, + 'handle': {'type': 'bool'}, + 'subdue_begin': {'type': 'str'}, + 'subdue_end': {'type': 'str'}, + 'dependencies': {'type': 'list', 'elements': 'str'}, + 'metric': {'type': 'bool', 'default': False}, + 'standalone': {'type': 'bool'}, + 'publish': {'type': 'bool'}, + 'occurrences': {'type': 'int'}, + 'refresh': {'type': 'int'}, + 'aggregate': {'type': 'bool'}, + 'low_flap_threshold': {'type': 'int'}, + 'high_flap_threshold': {'type': 'int'}, + 'custom': {'type': 'dict'}, + 'source': {'type': 'str'}, + } + + required_together = [['subdue_begin', 'subdue_end']] + + module = AnsibleModule(argument_spec=arg_spec, + required_together=required_together, + supports_check_mode=True) + if module.params['state'] != 'absent' and module.params['command'] is None: + module.fail_json(msg="missing required arguments: %s" % ",".join(['command'])) + + path = module.params['path'] + name = module.params['name'] + state = module.params['state'] + backup = module.params['backup'] + + changed, reasons = sensu_check(module, path, name, state, backup) + + module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sensu_client.py b/plugins/modules/sensu_client.py deleted file mode 120000 index 41722d7a03..0000000000 --- a/plugins/modules/sensu_client.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/sensu/sensu_client.py \ No newline at end of file diff --git a/plugins/modules/sensu_client.py b/plugins/modules/sensu_client.py new file mode 100644 index 0000000000..a41b5db9fe --- /dev/null +++ b/plugins/modules/sensu_client.py @@ -0,0 +1,278 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Red Hat Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: sensu_client +author: "David Moreau Simard (@dmsimard)" +short_description: Manages Sensu client configuration +description: + - Manages Sensu client configuration. + - For more information, refer to the L(Sensu documentation, https://sensuapp.org/docs/latest/reference/clients.html). +deprecated: + removed_in: 13.0.0 + why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + type: str + description: + - Whether the client should be present or not. + choices: ['present', 'absent'] + default: present + name: + type: str + description: + - A unique name for the client. The name cannot contain special characters or spaces. + - If not specified, it defaults to the system hostname as determined by Ruby Socket.gethostname (provided by Sensu). + address: + type: str + description: + - An address to help identify and reach the client. This is only informational, usually an IP address or hostname. + - If not specified it defaults to non-loopback IPv4 address as determined by Ruby C(Socket.ip_address_list) (provided + by Sensu). + subscriptions: + type: list + elements: str + description: + - An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (for example V(webserver)). + - These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions. + - The subscriptions array items must be strings. + safe_mode: + description: + - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request + and execute the check. + type: bool + default: false + redact: + type: list + elements: str + description: + - Client definition attributes to redact (values) when logging and sending client keepalives. + socket: + type: dict + description: + - The socket definition scope, used to configure the Sensu client socket. + keepalives: + description: + - If Sensu should monitor keepalives for this client. + type: bool + default: true + keepalive: + type: dict + description: + - The keepalive definition scope, used to configure Sensu client keepalives behavior (for example keepalive thresholds + and so). + registration: + type: dict + description: + - The registration definition scope, used to configure Sensu registration event handlers. + deregister: + description: + - If a deregistration event should be created upon Sensu client process stop. + - Default is V(false). + type: bool + deregistration: + type: dict + description: + - The deregistration definition scope, used to configure automated Sensu client de-registration. + ec2: + type: dict + description: + - The ec2 definition scope, used to configure the Sensu Enterprise AWS EC2 integration (Sensu Enterprise users only). + chef: + type: dict + description: + - The chef definition scope, used to configure the Sensu Enterprise Chef integration (Sensu Enterprise users only). + puppet: + type: dict + description: + - The puppet definition scope, used to configure the Sensu Enterprise Puppet integration (Sensu Enterprise users only). + servicenow: + type: dict + description: + - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users + only). +""" + +EXAMPLES = r""" +# Minimum possible configuration +- name: Configure Sensu client + community.general.sensu_client: + subscriptions: + - default + +# With customization +- name: Configure Sensu client + community.general.sensu_client: + name: "{{ ansible_fqdn }}" + address: "{{ ansible_default_ipv4['address'] }}" + subscriptions: + - default + - webserver + redact: + - password + socket: + bind: 127.0.0.1 + port: 3030 + keepalive: + thresholds: + warning: 180 + critical: 300 + handlers: + - email + custom: + - broadcast: irc + occurrences: 3 + register: client + notify: + - Restart sensu-client + +- name: Secure Sensu client configuration file + ansible.builtin.file: + path: "{{ client['file'] }}" + owner: "sensu" + group: "sensu" + mode: "0600" + +- name: Delete the Sensu client configuration + community.general.sensu_client: + state: "absent" +""" + +RETURN = r""" +config: + description: Effective client configuration, when state is present. + returned: success + type: dict + sample: + { + "name": "client", + "subscriptions": [ + "default" + ] + } +file: + description: Path to the client configuration file. + returned: success + type: str + sample: "/etc/sensu/conf.d/client.json" +""" + +import json +import os + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + supports_check_mode=True, + argument_spec=dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(type='str', ), + address=dict(type='str', ), + subscriptions=dict(type='list', elements="str"), + safe_mode=dict(type='bool', default=False), + redact=dict(type='list', elements="str"), + socket=dict(type='dict'), + keepalives=dict(type='bool', default=True), + keepalive=dict(type='dict'), + registration=dict(type='dict'), + deregister=dict(type='bool'), + deregistration=dict(type='dict'), + ec2=dict(type='dict'), + chef=dict(type='dict'), + puppet=dict(type='dict'), + servicenow=dict(type='dict') + ), + required_if=[ + ['state', 'present', ['subscriptions']] + ] + ) + + state = module.params['state'] + path = "/etc/sensu/conf.d/client.json" + + if state == 'absent': + if os.path.exists(path): + if module.check_mode: + msg = '{path} would have been deleted'.format(path=path) + module.exit_json(msg=msg, changed=True) + else: + try: + os.remove(path) + msg = '{path} deleted successfully'.format(path=path) + module.exit_json(msg=msg, changed=True) + except OSError as e: + msg = 'Exception when trying to delete {path}: {exception}' + module.fail_json( + msg=msg.format(path=path, exception=str(e))) + else: + # Idempotency: it is okay if the file doesn't exist + msg = '{path} already does not exist'.format(path=path) + module.exit_json(msg=msg) + + # Build client configuration from module arguments + config = {'client': {}} + args = ['name', 'address', 'subscriptions', 'safe_mode', 'redact', + 'socket', 'keepalives', 'keepalive', 'registration', 'deregister', + 'deregistration', 'ec2', 'chef', 'puppet', 'servicenow'] + + for arg in args: + if arg in module.params and module.params[arg] is not None: + config['client'][arg] = module.params[arg] + + # Load the current config, if there is one, so we can compare + current_config = None + try: + current_config = json.load(open(path, 'r')) + except (IOError, ValueError): + # File either doesn't exist or it is invalid JSON + pass + + if current_config is not None and current_config == config: + # Config is the same, let's not change anything + module.exit_json(msg='Client configuration is already up to date', + config=config['client'], + file=path) + + # Validate that directory exists before trying to write to it + if not module.check_mode and not os.path.exists(os.path.dirname(path)): + try: + os.makedirs(os.path.dirname(path)) + except OSError as e: + module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path), + str(e))) + + if module.check_mode: + module.exit_json(msg='Client configuration would have been updated', + changed=True, + config=config['client'], + file=path) + + try: + with open(path, 'w') as client: + client.write(json.dumps(config, indent=4)) + module.exit_json(msg='Client configuration updated', + changed=True, + config=config['client'], + file=path) + except (OSError, IOError) as e: + module.fail_json(msg='Unable to write file {0}: {1}'.format(path, + str(e))) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sensu_handler.py b/plugins/modules/sensu_handler.py deleted file mode 120000 index fa51d42ea6..0000000000 --- a/plugins/modules/sensu_handler.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/sensu/sensu_handler.py \ No newline at end of file diff --git a/plugins/modules/sensu_handler.py b/plugins/modules/sensu_handler.py new file mode 100644 index 0000000000..26ce01d313 --- /dev/null +++ b/plugins/modules/sensu_handler.py @@ -0,0 +1,286 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Red Hat Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: sensu_handler +author: "David Moreau Simard (@dmsimard)" +short_description: Manages Sensu handler configuration +description: + - Manages Sensu handler configuration. + - For more information, refer to the L(Sensu documentation, https://sensuapp.org/docs/latest/reference/handlers.html). +deprecated: + removed_in: 13.0.0 + why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + type: str + description: + - Whether the handler should be present or not. + choices: ['present', 'absent'] + default: present + name: + type: str + description: + - A unique name for the handler. The name cannot contain special characters or spaces. + required: true + type: + type: str + description: + - The handler type. + choices: ['pipe', 'tcp', 'udp', 'transport', 'set'] + filter: + type: str + description: + - The Sensu event filter (name) to use when filtering events for the handler. + filters: + type: list + elements: str + description: + - An array of Sensu event filters (names) to use when filtering events for the handler. + - Each array item must be a string. + severities: + type: list + elements: str + description: + - An array of check result severities the handler handles. + - 'NOTE: event resolution bypasses this filtering.' + - "Example: [ 'warning', 'critical', 'unknown' ]." + mutator: + type: str + description: + - The Sensu event mutator (name) to use to mutate event data for the handler. + timeout: + type: int + description: + - The handler execution duration timeout in seconds (hard stop). + - Only used by pipe and tcp handler types. + default: 10 + handle_silenced: + description: + - If events matching one or more silence entries should be handled. + type: bool + default: false + handle_flapping: + description: + - If events in the flapping state should be handled. + type: bool + default: false + command: + type: str + description: + - The handler command to be executed. + - The event data is passed to the process using STDIN. + - 'NOTE: the O(command) attribute is only required for Pipe handlers (that is, handlers configured with O(type=pipe)).' + socket: + type: dict + description: + - The socket definition scope, used to configure the TCP/UDP handler socket. + - 'NOTE: the O(socket) attribute is only required for TCP/UDP handlers (that is, handlers configured with O(type=tcp) + or O(type=udp)).' + pipe: + type: dict + description: + - The pipe definition scope, used to configure the Sensu transport pipe. + - 'NOTE: the O(pipe) attribute is only required for Transport handlers (that is, handlers configured with O(type=transport)).' + handlers: + type: list + elements: str + description: + - An array of Sensu event handlers (names) to use for events using the handler set. + - 'NOTE: the O(handlers) attribute is only required for handler sets (that is, handlers configured with O(type=set)).' +""" + +EXAMPLES = r""" +# Configure a handler that sends event data as STDIN (pipe) +- name: Configure IRC Sensu handler + community.general.sensu_handler: + name: "irc_handler" + type: "pipe" + command: "/usr/local/bin/notify-irc.sh" + severities: + - "ok" + - "critical" + - "warning" + - "unknown" + timeout: 15 + notify: + - Restart sensu-client + - Restart sensu-server + +# Delete a handler +- name: Delete IRC Sensu handler + community.general.sensu_handler: + name: "irc_handler" + state: "absent" + +# Example of a TCP handler +- name: Configure TCP Sensu handler + community.general.sensu_handler: + name: "tcp_handler" + type: "tcp" + timeout: 30 + socket: + host: "10.0.1.99" + port: 4444 + register: handler + notify: + - Restart sensu-client + - Restart sensu-server + +- name: Secure Sensu handler configuration file + ansible.builtin.file: + path: "{{ handler['file'] }}" + owner: "sensu" + group: "sensu" + mode: "0600" +""" + +RETURN = r""" +config: + description: Effective handler configuration, when state is present. + returned: success + type: dict + sample: + { + "name": "irc", + "type": "pipe", + "command": "/usr/local/bin/notify-irc.sh" + } +file: + description: Path to the handler configuration file. + returned: success + type: str + sample: "/etc/sensu/conf.d/handlers/irc.json" +name: + description: Name of the handler. + returned: success + type: str + sample: "irc" +""" + +import json +import os + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + supports_check_mode=True, + argument_spec=dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(type='str', required=True), + type=dict(type='str', choices=['pipe', 'tcp', 'udp', 'transport', 'set']), + filter=dict(type='str'), + filters=dict(type='list', elements='str'), + severities=dict(type='list', elements='str'), + mutator=dict(type='str'), + timeout=dict(type='int', default=10), + handle_silenced=dict(type='bool', default=False), + handle_flapping=dict(type='bool', default=False), + command=dict(type='str'), + socket=dict(type='dict'), + pipe=dict(type='dict'), + handlers=dict(type='list', elements='str'), + ), + required_if=[ + ['state', 'present', ['type']], + ['type', 'pipe', ['command']], + ['type', 'tcp', ['socket']], + ['type', 'udp', ['socket']], + ['type', 'transport', ['pipe']], + ['type', 'set', ['handlers']] + ] + ) + + state = module.params['state'] + name = module.params['name'] + path = '/etc/sensu/conf.d/handlers/{0}.json'.format(name) + + if state == 'absent': + if os.path.exists(path): + if module.check_mode: + msg = '{path} would have been deleted'.format(path=path) + module.exit_json(msg=msg, changed=True) + else: + try: + os.remove(path) + msg = '{path} deleted successfully'.format(path=path) + module.exit_json(msg=msg, changed=True) + except OSError as e: + msg = 'Exception when trying to delete {path}: {exception}' + module.fail_json( + msg=msg.format(path=path, exception=str(e))) + else: + # Idempotency: it is okay if the file doesn't exist + msg = '{path} already does not exist'.format(path=path) + module.exit_json(msg=msg) + + # Build handler configuration from module arguments + config = {'handlers': {name: {}}} + args = ['type', 'filter', 'filters', 'severities', 'mutator', 'timeout', + 'handle_silenced', 'handle_flapping', 'command', 'socket', + 'pipe', 'handlers'] + + for arg in args: + if arg in module.params and module.params[arg] is not None: + config['handlers'][name][arg] = module.params[arg] + + # Load the current config, if there is one, so we can compare + current_config = None + try: + current_config = json.load(open(path, 'r')) + except (IOError, ValueError): + # File either doesn't exist or it is invalid JSON + pass + + if current_config is not None and current_config == config: + # Config is the same, let's not change anything + module.exit_json(msg='Handler configuration is already up to date', + config=config['handlers'][name], + file=path, + name=name) + + # Validate that directory exists before trying to write to it + if not module.check_mode and not os.path.exists(os.path.dirname(path)): + try: + os.makedirs(os.path.dirname(path)) + except OSError as e: + module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path), + str(e))) + + if module.check_mode: + module.exit_json(msg='Handler configuration would have been updated', + changed=True, + config=config['handlers'][name], + file=path, + name=name) + + try: + with open(path, 'w') as handler: + handler.write(json.dumps(config, indent=4)) + module.exit_json(msg='Handler configuration updated', + changed=True, + config=config['handlers'][name], + file=path, + name=name) + except (OSError, IOError) as e: + module.fail_json(msg='Unable to write file {0}: {1}'.format(path, + str(e))) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sensu_silence.py b/plugins/modules/sensu_silence.py deleted file mode 120000 index 48b0793e56..0000000000 --- a/plugins/modules/sensu_silence.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/sensu/sensu_silence.py \ No newline at end of file diff --git a/plugins/modules/sensu_silence.py b/plugins/modules/sensu_silence.py new file mode 100644 index 0000000000..f3270ab506 --- /dev/null +++ b/plugins/modules/sensu_silence.py @@ -0,0 +1,302 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Steven Bambling +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: sensu_silence +author: Steven Bambling (@smbambling) +short_description: Manage Sensu silence entries +description: + - Create and clear (delete) a silence entries using the Sensu API for subscriptions and checks. +deprecated: + removed_in: 13.0.0 + why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + check: + type: str + description: + - Specifies the check which the silence entry applies to. + creator: + type: str + description: + - Specifies the entity responsible for this entry. + expire: + type: int + description: + - If specified, the silence entry is automatically cleared after this number of seconds. + expire_on_resolve: + description: + - If specified as true, the silence entry is automatically cleared once the condition it is silencing is resolved. + type: bool + reason: + type: str + description: + - If specified, this free-form string is used to provide context or rationale for the reason this silence entry was + created. + state: + type: str + description: + - Specifies to create or clear (delete) a silence entry using the Sensu API. + default: present + choices: ['present', 'absent'] + subscription: + type: str + description: + - Specifies the subscription which the silence entry applies to. + - To create a silence entry for a client prepend C(client:) to client name. Example - C(client:server1.example.dev). + required: true + url: + type: str + description: + - Specifies the URL of the Sensu monitoring host server. + required: false + default: http://127.0.01:4567 +""" + +EXAMPLES = r""" +# Silence ALL checks for a given client +- name: Silence server1.example.dev + community.general.sensu_silence: + subscription: client:server1.example.dev + creator: "{{ ansible_user_id }}" + reason: Performing maintenance + +# Silence specific check for a client +- name: Silence CPU_Usage check for server1.example.dev + community.general.sensu_silence: + subscription: client:server1.example.dev + check: CPU_Usage + creator: "{{ ansible_user_id }}" + reason: Investigation alert issue + +# Silence multiple clients from a dict + silence: + server1.example.dev: + reason: 'Deployment in progress' + server2.example.dev: + reason: 'Deployment in progress' + +- name: Silence several clients from a dict + community.general.sensu_silence: + subscription: "client:{{ item.key }}" + reason: "{{ item.value.reason }}" + creator: "{{ ansible_user_id }}" + with_dict: "{{ silence }}" +""" + +RETURN = r""" +""" + +import json + +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def query(module, url, check, subscription): + headers = { + 'Content-Type': 'application/json', + } + + url = url + '/silenced' + + request_data = { + 'check': check, + 'subscription': subscription, + } + + # Remove keys with None value + for k, v in dict(request_data).items(): + if v is None: + del request_data[k] + + response, info = fetch_url( + module, url, method='GET', + headers=headers, data=json.dumps(request_data) + ) + + if info['status'] == 500: + module.fail_json( + msg="Failed to query silence %s. Reason: %s" % (subscription, info) + ) + + try: + json_out = json.loads(to_native(response.read())) + except Exception: + json_out = "" + + return False, json_out, False + + +def clear(module, url, check, subscription): + # Test if silence exists before clearing + (rc, out, changed) = query(module, url, check, subscription) + + d = {i['subscription']: i['check'] for i in out} + subscription_exists = subscription in d + if check and subscription_exists: + exists = (check == d[subscription]) + else: + exists = subscription_exists + + # If check/subscription doesn't exist + # exit with changed state of False + if not exists: + return False, out, changed + + # module.check_mode is inherited from the AnsibleMOdule class + if not module.check_mode: + headers = { + 'Content-Type': 'application/json', + } + + url = url + '/silenced/clear' + + request_data = { + 'check': check, + 'subscription': subscription, + } + + # Remove keys with None value + for k, v in dict(request_data).items(): + if v is None: + del request_data[k] + + response, info = fetch_url( + module, url, method='POST', + headers=headers, data=json.dumps(request_data) + ) + + if info['status'] != 204: + module.fail_json( + msg="Failed to silence %s. Reason: %s" % (subscription, info) + ) + + try: + json_out = json.loads(to_native(response.read())) + except Exception: + json_out = "" + + return False, json_out, True + return False, out, True + + +def create( + module, url, check, creator, expire, + expire_on_resolve, reason, subscription): + (rc, out, changed) = query(module, url, check, subscription) + for i in out: + if i['subscription'] == subscription: + if ( + (check is None or check == i['check']) and + ( + creator == '' or + creator == i['creator']) and + ( + reason == '' or + reason == i['reason']) and + ( + expire is None or expire == i['expire']) and + ( + expire_on_resolve is None or + expire_on_resolve == i['expire_on_resolve'] + ) + ): + return False, out, False + + # module.check_mode is inherited from the AnsibleMOdule class + if not module.check_mode: + headers = { + 'Content-Type': 'application/json', + } + + url = url + '/silenced' + + request_data = { + 'check': check, + 'creator': creator, + 'expire': expire, + 'expire_on_resolve': expire_on_resolve, + 'reason': reason, + 'subscription': subscription, + } + + # Remove keys with None value + for k, v in dict(request_data).items(): + if v is None: + del request_data[k] + + response, info = fetch_url( + module, url, method='POST', + headers=headers, data=json.dumps(request_data) + ) + + if info['status'] != 201: + module.fail_json( + msg="Failed to silence %s. Reason: %s" % + (subscription, info['msg']) + ) + + try: + json_out = json.loads(to_native(response.read())) + except Exception: + json_out = "" + + return False, json_out, True + return False, out, True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + check=dict(), + creator=dict(), + expire=dict(type='int'), + expire_on_resolve=dict(type='bool'), + reason=dict(), + state=dict(default='present', choices=['present', 'absent']), + subscription=dict(required=True), + url=dict(default='http://127.0.01:4567'), + ), + supports_check_mode=True + ) + + url = module.params['url'] + check = module.params['check'] + creator = module.params['creator'] + expire = module.params['expire'] + expire_on_resolve = module.params['expire_on_resolve'] + reason = module.params['reason'] + subscription = module.params['subscription'] + state = module.params['state'] + + if state == 'present': + (rc, out, changed) = create( + module, url, check, creator, + expire, expire_on_resolve, reason, subscription + ) + + if state == 'absent': + (rc, out, changed) = clear(module, url, check, subscription) + + if rc != 0: + module.fail_json(msg="failed", result=out) + module.exit_json(msg="success", result=out, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sensu_subscription.py b/plugins/modules/sensu_subscription.py deleted file mode 120000 index c433ec5670..0000000000 --- a/plugins/modules/sensu_subscription.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/sensu/sensu_subscription.py \ No newline at end of file diff --git a/plugins/modules/sensu_subscription.py b/plugins/modules/sensu_subscription.py new file mode 100644 index 0000000000..7cd7668e98 --- /dev/null +++ b/plugins/modules/sensu_subscription.py @@ -0,0 +1,161 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Anders Ingemann +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: sensu_subscription +short_description: Manage Sensu subscriptions +description: + - Manage which I(sensu channels) a machine should subscribe to. +deprecated: + removed_in: 13.0.0 + why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - The name of the channel. + required: true + state: + type: str + description: + - Whether the machine should subscribe or unsubscribe from the channel. + choices: ['present', 'absent'] + required: false + default: present + path: + type: str + description: + - Path to the subscriptions JSON file. + required: false + default: /etc/sensu/conf.d/subscriptions.json + backup: + description: + - Create a backup file (if yes), including the timestamp information so you can get the original file back if you somehow + clobbered it incorrectly. + type: bool + required: false + default: false +requirements: [] +author: Anders Ingemann (@andsens) +""" + +RETURN = r""" +reasons: + description: The reasons why the module changed or did not change something. + returned: success + type: list + sample: ["channel subscription was absent and state is 'present'"] +""" + +EXAMPLES = r""" +# Subscribe to the nginx channel +- name: Subscribe to nginx checks + community.general.sensu_subscription: name=nginx + +# Unsubscribe from the common checks channel +- name: Unsubscribe from common checks + community.general.sensu_subscription: name=common state=absent +""" + +import json +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def sensu_subscription(module, path, name, state='present', backup=False): + changed = False + reasons = [] + + try: + config = json.load(open(path)) + except IOError as e: + if e.errno == 2: # File not found, non-fatal + if state == 'absent': + reasons.append("file did not exist and state is 'absent'") + return changed, reasons + config = {} + else: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except ValueError: + msg = '{path} contains invalid JSON'.format(path=path) + module.fail_json(msg=msg) + + if 'client' not in config: + if state == 'absent': + reasons.append("'client' did not exist and state is 'absent'") + return changed, reasons + config['client'] = {} + changed = True + reasons.append("'client' did not exist") + + if 'subscriptions' not in config['client']: + if state == 'absent': + reasons.append("'client.subscriptions' did not exist and state is 'absent'") + return changed, reasons + config['client']['subscriptions'] = [] + changed = True + reasons.append("'client.subscriptions' did not exist") + + if name not in config['client']['subscriptions']: + if state == 'absent': + reasons.append("channel subscription was absent") + return changed, reasons + config['client']['subscriptions'].append(name) + changed = True + reasons.append("channel subscription was absent and state is 'present'") + else: + if state == 'absent': + config['client']['subscriptions'].remove(name) + changed = True + reasons.append("channel subscription was present and state is 'absent'") + + if changed and not module.check_mode: + if backup: + module.backup_local(path) + try: + open(path, 'w').write(json.dumps(config, indent=2) + '\n') + except IOError as e: + module.fail_json(msg='Failed to write to file %s: %s' % (path, to_native(e)), + exception=traceback.format_exc()) + + return changed, reasons + + +def main(): + arg_spec = {'name': {'type': 'str', 'required': True}, + 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'}, + 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, + 'backup': {'type': 'bool', 'default': False}, + } + + module = AnsibleModule(argument_spec=arg_spec, + supports_check_mode=True) + + path = module.params['path'] + name = module.params['name'] + state = module.params['state'] + backup = module.params['backup'] + + changed, reasons = sensu_subscription(module, path, name, state, backup) + + module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/seport.py b/plugins/modules/seport.py deleted file mode 120000 index 120d2c387f..0000000000 --- a/plugins/modules/seport.py +++ /dev/null @@ -1 +0,0 @@ -./system/seport.py \ No newline at end of file diff --git a/plugins/modules/seport.py b/plugins/modules/seport.py new file mode 100644 index 0000000000..7e3a2690d2 --- /dev/null +++ b/plugins/modules/seport.py @@ -0,0 +1,328 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Dan Keder +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: seport +short_description: Manages SELinux network port type definitions +description: + - Manages SELinux network port type definitions. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + ports: + description: + - Ports or port ranges. + - Can be a list (since 2.6) or comma separated string. + type: list + elements: str + required: true + proto: + description: + - Protocol for the specified port. + type: str + required: true + choices: [tcp, udp] + setype: + description: + - SELinux type for the specified port. + type: str + required: true + state: + description: + - Desired boolean value. + type: str + choices: [absent, present] + default: present + reload: + description: + - Reload SELinux policy after commit. + type: bool + default: true + ignore_selinux_state: + description: + - Run independent of selinux runtime state. + type: bool + default: false + local: + description: + - Work with local modifications only. + type: bool + default: false + version_added: 5.6.0 +notes: + - The changes are persistent across reboots. + - Not tested on any Debian based system. +requirements: + - libselinux-python + - policycoreutils-python +author: + - Dan Keder (@dankeder) +""" + +EXAMPLES = r""" +- name: Allow Apache to listen on tcp port 8888 + community.general.seport: + ports: 8888 + proto: tcp + setype: http_port_t + state: present + +- name: Allow sshd to listen on tcp port 8991 + community.general.seport: + ports: 8991 + proto: tcp + setype: ssh_port_t + state: present + +- name: Allow memcached to listen on tcp ports 10000-10100 and 10112 + community.general.seport: + ports: 10000-10100,10112 + proto: tcp + setype: memcache_port_t + state: present + +- name: Allow memcached to listen on tcp ports 10000-10100 and 10112 + community.general.seport: + ports: + - 10000-10100 + - 10112 + proto: tcp + setype: memcache_port_t + state: present + +- name: Remove tcp port 22 local modification if exists + community.general.seport: + ports: 22 + protocol: tcp + setype: ssh_port_t + state: absent + local: true +""" + +import traceback + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def get_runtime_status(ignore_selinux_state=False): + return ignore_selinux_state or selinux.is_selinux_enabled() + + +def semanage_port_get_ports(seport, setype, proto, local): + """ Get the list of ports that have the specified type definition. + + :param community.general.seport: Instance of seobject.portRecords + + :type setype: str + :param setype: SELinux type. + + :type proto: str + :param proto: Protocol ('tcp' or 'udp') + + :rtype: list + :return: List of ports that have the specified SELinux type. + """ + records = seport.get_all_by_type(locallist=local) + if (setype, proto) in records: + return records[(setype, proto)] + else: + return [] + + +def semanage_port_get_type(seport, port, proto): + """ Get the SELinux type of the specified port. + + :param community.general.seport: Instance of seobject.portRecords + + :type port: str + :param port: Port or port range (example: "8080", "8080-9090") + + :type proto: str + :param proto: Protocol ('tcp' or 'udp') + + :rtype: tuple + :return: Tuple containing the SELinux type and MLS/MCS level, or None if not found. + """ + if isinstance(port, str): + ports = port.split('-', 1) + if len(ports) == 1: + ports.extend(ports) + else: + ports = (port, port) + + key = (int(ports[0]), int(ports[1]), proto) + + records = seport.get_all() + return records.get(key) + + +def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore='', local=False): + """ Add SELinux port type definition to the policy. + + :type module: AnsibleModule + :param module: Ansible module + + :type ports: list + :param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"]) + + :type proto: str + :param proto: Protocol ('tcp' or 'udp') + + :type setype: str + :param setype: SELinux type + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type serange: str + :param serange: SELinux MLS/MCS range (defaults to 's0') + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + change = False + try: + seport = seobject.portRecords(sestore) + seport.set_reload(do_reload) + ports_by_type = semanage_port_get_ports(seport, setype, proto, local) + for port in ports: + if port in ports_by_type: + continue + + change = True + if module.check_mode: + continue + port_type = semanage_port_get_type(seport, port, proto) + if port_type is None: + seport.add(port, proto, serange, setype) + else: + seport.modify(port, proto, serange, setype) + + except (ValueError, IOError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def semanage_port_del(module, ports, proto, setype, do_reload, sestore='', local=False): + """ Delete SELinux port type definition from the policy. + + :type module: AnsibleModule + :param module: Ansible module + + :type ports: list + :param ports: List of ports and port ranges to delete (e.g. ["8080", "8080-9090"]) + + :type proto: str + :param proto: Protocol ('tcp' or 'udp') + + :type setype: str + :param setype: SELinux type. + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + change = False + try: + seport = seobject.portRecords(sestore) + seport.set_reload(do_reload) + ports_by_type = semanage_port_get_ports(seport, setype, proto, local) + for port in ports: + if port in ports_by_type: + change = True + if not module.check_mode: + seport.delete(port, proto) + + except (ValueError, IOError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + ports=dict(type='list', elements='str', required=True), + proto=dict(type='str', required=True, choices=['tcp', 'udp']), + setype=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + local=dict(type='bool', default=False) + ), + supports_check_mode=True, + ) + + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR) + + if not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + ports = module.params['ports'] + proto = module.params['proto'] + setype = module.params['setype'] + state = module.params['state'] + do_reload = module.params['reload'] + local = module.params['local'] + + result = { + 'ports': ports, + 'proto': proto, + 'setype': setype, + 'state': state, + } + + if state == 'present': + result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload, local=local) + elif state == 'absent': + result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload, local=local) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/serverless.py b/plugins/modules/serverless.py deleted file mode 120000 index ca7d04ba82..0000000000 --- a/plugins/modules/serverless.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/misc/serverless.py \ No newline at end of file diff --git a/plugins/modules/serverless.py b/plugins/modules/serverless.py new file mode 100644 index 0000000000..0ea2eb3e1f --- /dev/null +++ b/plugins/modules/serverless.py @@ -0,0 +1,220 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Ryan Scott Brown +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: serverless +short_description: Manages a Serverless Framework project +description: + - Provides support for managing Serverless Framework (U(https://serverless.com/)) project deployments and stacks. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Goal state of given stage/project. + type: str + choices: [absent, present] + default: present + serverless_bin_path: + description: + - The path of a serverless framework binary relative to the O(service_path), for example V(node_module/.bin/serverless). + type: path + service_path: + description: + - The path to the root of the Serverless Service to be operated on. + type: path + required: true + stage: + description: + - The name of the serverless framework project stage to deploy to. + - This uses the serverless framework default "dev". + type: str + default: '' + region: + description: + - AWS region to deploy the service to. + - This parameter defaults to V(us-east-1). + type: str + default: '' + deploy: + description: + - Whether or not to deploy artifacts after building them. + - When this option is V(false) all the functions are built, but no stack update is run to send them out. + - This is mostly useful for generating artifacts to be stored/deployed elsewhere. + type: bool + default: true + force: + description: + - Whether or not to force full deployment, equivalent to serverless C(--force) option. + type: bool + default: false + verbose: + description: + - Shows all stack events during deployment, and display any Stack Output. + type: bool + default: false +notes: + - Currently, the C(serverless) command must be in the path of the node executing the task. In the future this may be a flag. +requirements: + - serverless + - PyYAML +author: + - Ryan Scott Brown (@ryansb) +""" + +EXAMPLES = r""" +- name: Basic deploy of a service + community.general.serverless: + service_path: '{{ project_dir }}' + state: present + +- name: Deploy a project, then pull its resource list back into Ansible + community.general.serverless: + stage: dev + region: us-east-1 + service_path: '{{ project_dir }}' + register: sls + +# The cloudformation stack is always named the same as the full service, so the +# cloudformation_info module can get a full list of the stack resources, as +# well as stack events and outputs +- cloudformation_info: + region: us-east-1 + stack_name: '{{ sls.service_name }}' + stack_resources: true + +- name: Deploy a project using a locally installed serverless binary + community.general.serverless: + stage: dev + region: us-east-1 + service_path: '{{ project_dir }}' + serverless_bin_path: node_modules/.bin/serverless +""" + +RETURN = r""" +service_name: + type: str + description: The service name specified in the serverless.yml that was just deployed. + returned: always + sample: my-fancy-service-dev +state: + type: str + description: Whether the stack for the serverless project is present/absent. + returned: always +command: + type: str + description: Full C(serverless) command run by this module, in case you want to re-run the command outside the module. + returned: always + sample: serverless deploy --stage production +""" + +import os + +try: + import yaml + HAS_YAML = True +except ImportError: + HAS_YAML = False + +from ansible.module_utils.basic import AnsibleModule + + +def read_serverless_config(module): + path = module.params.get('service_path') + full_path = os.path.join(path, 'serverless.yml') + + try: + with open(full_path) as sls_config: + config = yaml.safe_load(sls_config.read()) + return config + except IOError as e: + module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(full_path, str(e))) + + +def get_service_name(module, stage): + config = read_serverless_config(module) + if config.get('service') is None: + module.fail_json(msg="Could not read `service` key from serverless.yml file") + + if stage: + return "{0}-{1}".format(config['service'], stage) + + return "{0}-{1}".format(config['service'], config.get('stage', 'dev')) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + service_path=dict(type='path', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + region=dict(type='str', default=''), + stage=dict(type='str', default=''), + deploy=dict(type='bool', default=True), + serverless_bin_path=dict(type='path'), + force=dict(type='bool', default=False), + verbose=dict(type='bool', default=False), + ), + ) + + if not HAS_YAML: + module.fail_json(msg='yaml is required for this module') + + service_path = module.params.get('service_path') + state = module.params.get('state') + region = module.params.get('region') + stage = module.params.get('stage') + deploy = module.params.get('deploy', True) + force = module.params.get('force', False) + verbose = module.params.get('verbose', False) + serverless_bin_path = module.params.get('serverless_bin_path') + + if serverless_bin_path is not None: + command = serverless_bin_path + " " + else: + command = module.get_bin_path("serverless") + " " + + if state == 'present': + command += 'deploy ' + elif state == 'absent': + command += 'remove ' + else: + module.fail_json(msg="State must either be 'present' or 'absent'. Received: {0}".format(state)) + + if state == 'present': + if not deploy: + command += '--noDeploy ' + elif force: + command += '--force ' + + if region: + command += '--region {0} '.format(region) + if stage: + command += '--stage {0} '.format(stage) + if verbose: + command += '--verbose ' + + rc, out, err = module.run_command(command, cwd=service_path) + if rc != 0: + if state == 'absent' and "-{0}' does not exist".format(stage) in out: + module.exit_json(changed=False, state='absent', command=command, + out=out, service_name=get_service_name(module, stage)) + + module.fail_json(msg="Failure when executing Serverless command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err)) + + # gather some facts about the deployment + module.exit_json(changed=True, state='present', out=out, command=command, + service_name=get_service_name(module, stage)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/shutdown.py b/plugins/modules/shutdown.py deleted file mode 120000 index 260ca8ece4..0000000000 --- a/plugins/modules/shutdown.py +++ /dev/null @@ -1 +0,0 @@ -./system/shutdown.py \ No newline at end of file diff --git a/plugins/modules/shutdown.py b/plugins/modules/shutdown.py new file mode 100644 index 0000000000..497706d25e --- /dev/null +++ b/plugins/modules/shutdown.py @@ -0,0 +1,82 @@ +#!/usr/bin/python +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: shutdown +short_description: Shut down a machine +notes: + - E(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use O(search_paths) to specify locations + to search if the default paths do not work. + - The O(msg) and O(delay) options are not supported when a shutdown command is not found in O(search_paths), instead the + module attempts to shutdown the system by calling C(systemctl shutdown). +description: + - Shut downs a machine. +version_added: "1.1.0" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.flow +attributes: + check_mode: + support: full + diff_mode: + support: none + action: + support: full + async: + support: full +options: + delay: + description: + - Seconds to wait before shutdown. Passed as a parameter to the shutdown command. + - On Linux, macOS and OpenBSD, this is converted to minutes and rounded down. If less than 60, it is set to 0. + - On Solaris and FreeBSD, this represents seconds. + type: int + default: 0 + msg: + description: + - Message to display to users before shutdown. + type: str + default: Shut down initiated by Ansible + search_paths: + description: + - Paths to search on the remote machine for the C(shutdown) command. + - I(Only) these paths are searched for the C(shutdown) command. E(PATH) is ignored in the remote node when searching + for the C(shutdown) command. + type: list + elements: path + default: ['/sbin', '/usr/sbin', '/usr/local/sbin'] + +seealso: + - module: ansible.builtin.reboot +author: + - Matt Davis (@nitzmahone) + - Sam Doran (@samdoran) + - Amin Vakil (@aminvakil) +""" + +EXAMPLES = r""" +- name: Unconditionally shut down the machine with all defaults + community.general.shutdown: + +- name: Delay shutting down the remote node + community.general.shutdown: + delay: 60 + +- name: Shut down a machine with shutdown command in unusual place + community.general.shutdown: + search_paths: + - '/lib/molly-guard' +""" + +RETURN = r""" +shutdown: + description: V(true) if the machine has been shut down. + returned: always + type: bool + sample: true +""" diff --git a/plugins/modules/simpleinit_msb.py b/plugins/modules/simpleinit_msb.py new file mode 100644 index 0000000000..90e7caa308 --- /dev/null +++ b/plugins/modules/simpleinit_msb.py @@ -0,0 +1,312 @@ +#!/usr/bin/python + +# Copyright (c) 2016-2023, Vlad Glagolev +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: simpleinit_msb +short_description: Manage services on Source Mage GNU/Linux +version_added: 7.5.0 +description: + - Controls services on remote hosts using C(simpleinit-msb). +author: "Vlad Glagolev (@vaygr)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - Name of the service. + required: true + aliases: ['service'] + state: + type: str + required: false + choices: [running, started, stopped, restarted, reloaded] + description: + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. V(restarted) always bounces + the service. V(reloaded) always reloads. + - At least one of O(state) and O(enabled) are required. + - Note that V(reloaded) starts the service if it is not already started, even if your chosen init system would not normally. + enabled: + type: bool + required: false + description: + - Whether the service should start on boot. + - At least one of O(state) and O(enabled) are required. +""" + +EXAMPLES = r""" +- name: Example action to start service httpd, if not running + community.general.simpleinit_msb: + name: httpd + state: started + +- name: Example action to stop service httpd, if running + community.general.simpleinit_msb: + name: httpd + state: stopped + +- name: Example action to restart service httpd, in all cases + community.general.simpleinit_msb: + name: httpd + state: restarted + +- name: Example action to reload service httpd, in all cases + community.general.simpleinit_msb: + name: httpd + state: reloaded + +- name: Example action to enable service httpd, and not touch the running state + community.general.simpleinit_msb: + name: httpd + enabled: true +""" + +import os +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.service import daemonize + + +class SimpleinitMSB(object): + """ + Main simpleinit-msb service manipulation class + """ + + def __init__(self, module): + self.module = module + self.name = module.params['name'] + self.state = module.params['state'] + self.enable = module.params['enabled'] + self.changed = False + self.running = None + self.action = None + self.telinit_cmd = None + self.svc_change = False + + def execute_command(self, cmd, daemon=False): + if not daemon: + return self.module.run_command(cmd) + else: + return daemonize(self.module, cmd) + + def check_service_changed(self): + if self.state and self.running is None: + self.module.fail_json(msg="failed determining service state, possible typo of service name?") + # Find out if state has changed + if not self.running and self.state in ["started", "running", "reloaded"]: + self.svc_change = True + elif self.running and self.state in ["stopped", "reloaded"]: + self.svc_change = True + elif self.state == "restarted": + self.svc_change = True + if self.module.check_mode and self.svc_change: + self.module.exit_json(changed=True, msg='service state changed') + + def modify_service_state(self): + # Only do something if state will change + if self.svc_change: + # Control service + if self.state in ['started', 'running']: + self.action = "start" + elif not self.running and self.state == 'reloaded': + self.action = "start" + elif self.state == 'stopped': + self.action = "stop" + elif self.state == 'reloaded': + self.action = "reload" + elif self.state == 'restarted': + self.action = "restart" + + if self.module.check_mode: + self.module.exit_json(changed=True, msg='changing service state') + + return self.service_control() + else: + # If nothing needs to change just say all is well + rc = 0 + err = '' + out = '' + return rc, out, err + + def get_service_tools(self): + paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin'] + binaries = ['telinit'] + location = dict() + + for binary in binaries: + location[binary] = self.module.get_bin_path(binary, opt_dirs=paths) + + if location.get('telinit', False) and os.path.exists("/etc/init.d/smgl_init"): + self.telinit_cmd = location['telinit'] + + if self.telinit_cmd is None: + self.module.fail_json(msg='cannot find telinit script for simpleinit-msb, aborting...') + + def get_service_status(self): + self.action = "status" + rc, status_stdout, status_stderr = self.service_control() + + if self.running is None and status_stdout.count('\n') <= 1: + cleanout = status_stdout.lower().replace(self.name.lower(), '') + + if "is not running" in cleanout: + self.running = False + elif "is running" in cleanout: + self.running = True + + return self.running + + def service_enable(self): + # Check if the service is already enabled/disabled + if not self.enable ^ self.service_enabled(): + return + + action = "boot" + ("enable" if self.enable else "disable") + + (rc, out, err) = self.execute_command("%s %s %s" % (self.telinit_cmd, action, self.name)) + + self.changed = True + + for line in err.splitlines(): + if self.enable and line.find('already enabled') != -1: + self.changed = False + break + if not self.enable and line.find('already disabled') != -1: + self.changed = False + break + + if not self.changed: + return + + return (rc, out, err) + + def service_enabled(self): + self.service_exists() + + (rc, out, err) = self.execute_command("%s %sd" % (self.telinit_cmd, self.enable)) + + service_enabled = False if self.enable else True + + rex = re.compile(r'^%s$' % self.name) + + for line in out.splitlines(): + if rex.match(line): + service_enabled = True if self.enable else False + break + + return service_enabled + + def service_exists(self): + (rc, out, err) = self.execute_command("%s list" % self.telinit_cmd) + + service_exists = False + + rex = re.compile(r'^\w+\s+%s$' % self.name) + + for line in out.splitlines(): + if rex.match(line): + service_exists = True + break + + if not service_exists: + self.module.fail_json(msg='telinit could not find the requested service: %s' % self.name) + + def service_control(self): + self.service_exists() + + svc_cmd = "%s run %s" % (self.telinit_cmd, self.name) + + rc_state, stdout, stderr = self.execute_command("%s %s" % (svc_cmd, self.action), daemon=True) + + return (rc_state, stdout, stderr) + + +def build_module(): + return AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['service']), + state=dict(choices=['running', 'started', 'stopped', 'restarted', 'reloaded']), + enabled=dict(type='bool'), + ), + supports_check_mode=True, + required_one_of=[['state', 'enabled']], + ) + + +def main(): + module = build_module() + + service = SimpleinitMSB(module) + + rc = 0 + out = '' + err = '' + result = {} + result['name'] = service.name + + # Find service management tools + service.get_service_tools() + + # Enable/disable service startup at boot if requested + if service.module.params['enabled'] is not None: + service.service_enable() + result['enabled'] = service.enable + + if module.params['state'] is None: + # Not changing the running state, so bail out now. + result['changed'] = service.changed + module.exit_json(**result) + + result['state'] = service.state + + service.get_service_status() + + # Calculate if request will change service state + service.check_service_changed() + + # Modify service state if necessary + (rc, out, err) = service.modify_service_state() + + if rc != 0: + if err: + module.fail_json(msg=err) + else: + module.fail_json(msg=out) + + result['changed'] = service.changed | service.svc_change + if service.module.params['enabled'] is not None: + result['enabled'] = service.module.params['enabled'] + + if not service.module.params['state']: + status = service.get_service_status() + if status is None: + result['state'] = 'absent' + elif status is False: + result['state'] = 'started' + else: + result['state'] = 'stopped' + else: + # as we may have just bounced the service the service command may not + # report accurate state at this moment so just show what we ran + if service.module.params['state'] in ['started', 'restarted', 'running', 'reloaded']: + result['state'] = 'started' + else: + result['state'] = 'stopped' + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sl_vm.py b/plugins/modules/sl_vm.py deleted file mode 120000 index 76b7e3a63e..0000000000 --- a/plugins/modules/sl_vm.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/softlayer/sl_vm.py \ No newline at end of file diff --git a/plugins/modules/sl_vm.py b/plugins/modules/sl_vm.py new file mode 100644 index 0000000000..9ae0def5c4 --- /dev/null +++ b/plugins/modules/sl_vm.py @@ -0,0 +1,440 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: sl_vm +short_description: Create or cancel a virtual instance in SoftLayer +description: + - Creates or cancels SoftLayer instances. + - When created, optionally waits for it to be 'running'. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + instance_id: + description: + - Instance ID of the virtual instance to perform action option. + type: str + hostname: + description: + - Hostname to be provided to a virtual instance. + type: str + domain: + description: + - Domain name to be provided to a virtual instance. + type: str + datacenter: + description: + - Datacenter for the virtual instance to be deployed. + type: str + choices: + - ams01 + - ams03 + - che01 + - dal01 + - dal05 + - dal06 + - dal09 + - dal10 + - dal12 + - dal13 + - fra02 + - fra04 + - fra05 + - hkg02 + - hou02 + - lon02 + - lon04 + - lon06 + - mel01 + - mex01 + - mil01 + - mon01 + - osl01 + - par01 + - sao01 + - sea01 + - seo01 + - sjc01 + - sjc03 + - sjc04 + - sng01 + - syd01 + - syd04 + - tok02 + - tor01 + - wdc01 + - wdc04 + - wdc06 + - wdc07 + tags: + description: + - Tag or list of tags to be provided to a virtual instance. + type: str + hourly: + description: + - Flag to determine if the instance should be hourly billed. + type: bool + default: true + private: + description: + - Flag to determine if the instance should be private only. + type: bool + default: false + dedicated: + description: + - Flag to determine if the instance should be deployed in dedicated space. + type: bool + default: false + local_disk: + description: + - Flag to determine if local disk should be used for the new instance. + type: bool + default: true + cpus: + description: + - Count of cpus to be assigned to new virtual instance. + type: int + choices: [1, 2, 4, 8, 16, 32, 56] + memory: + description: + - Amount of memory to be assigned to new virtual instance. + type: int + choices: [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808] + flavor: + description: + - Specify which SoftLayer flavor template to use instead of cpus and memory. + version_added: '0.2.0' + type: str + disks: + description: + - List of disk sizes to be assigned to new virtual instance. + default: [25] + type: list + elements: int + os_code: + description: + - OS Code to be used for new virtual instance. + type: str + image_id: + description: + - Image Template to be used for new virtual instance. + type: str + nic_speed: + description: + - NIC Speed to be assigned to new virtual instance. + choices: [10, 100, 1000] + type: int + public_vlan: + description: + - VLAN by its ID to be assigned to the public NIC. + type: str + private_vlan: + description: + - VLAN by its ID to be assigned to the private NIC. + type: str + ssh_keys: + description: + - List of ssh keys by their ID to be assigned to a virtual instance. + type: list + elements: str + default: [] + post_uri: + description: + - URL of a post provisioning script to be loaded and executed on virtual instance. + type: str + state: + description: + - Create, or cancel a virtual instance. + - Specify V(present) for create, V(absent) to cancel. + choices: [absent, present] + default: present + type: str + wait: + description: + - Flag used to wait for active status before returning. + type: bool + default: true + wait_time: + description: + - Time in seconds before wait returns. + default: 600 + type: int +requirements: + - softlayer >= 4.1.1 +notes: + - The C(softlayer-python) library, at version 6.2.6 (from Jan 2025), only supports Python version 3.8, 3.9 and 3.10. +author: + - Matt Colton (@mcltn) +seealso: + - name: SoftLayer API Python Client + description: The SoftLayer API Python Client is required for this module. + link: https://github.com/SoftLayer/softlayer-python +""" + +EXAMPLES = r""" +- name: Build instance + hosts: localhost + gather_facts: false + tasks: + - name: Build instance request + community.general.sl_vm: + hostname: instance-1 + domain: anydomain.com + datacenter: dal09 + tags: ansible-module-test + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: [25] + os_code: UBUNTU_LATEST + wait: false + +- name: Build additional instances + hosts: localhost + gather_facts: false + tasks: + - name: Build instances request + community.general.sl_vm: + hostname: "{{ item.hostname }}" + domain: "{{ item.domain }}" + datacenter: "{{ item.datacenter }}" + tags: "{{ item.tags }}" + hourly: "{{ item.hourly }}" + private: "{{ item.private }}" + dedicated: "{{ item.dedicated }}" + local_disk: "{{ item.local_disk }}" + cpus: "{{ item.cpus }}" + memory: "{{ item.memory }}" + disks: "{{ item.disks }}" + os_code: "{{ item.os_code }}" + ssh_keys: "{{ item.ssh_keys }}" + wait: "{{ item.wait }}" + with_items: + - hostname: instance-2 + domain: anydomain.com + datacenter: dal09 + tags: + - ansible-module-test + - ansible-module-test-replicas + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: + - 25 + - 100 + os_code: UBUNTU_LATEST + ssh_keys: [] + wait: true + - hostname: instance-3 + domain: anydomain.com + datacenter: dal09 + tags: + - ansible-module-test + - ansible-module-test-replicas + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: + - 25 + - 100 + os_code: UBUNTU_LATEST + ssh_keys: [] + wait: true + +- name: Cancel instances + hosts: localhost + gather_facts: false + tasks: + - name: Cancel by tag + community.general.sl_vm: + state: absent + tags: ansible-module-test +""" + +# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed. +RETURN = """#""" + +import json +import time + +try: + import SoftLayer + from SoftLayer import VSManager + + HAS_SL = True + vsManager = VSManager(SoftLayer.create_client_from_env()) +except ImportError: + HAS_SL = False + +from ansible.module_utils.basic import AnsibleModule + + +# TODO: get this info from API +STATES = ['present', 'absent'] +DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'dal12', 'dal13', 'fra02', + 'fra04', 'fra05', 'hkg02', 'hou02', 'lon02', 'lon04', 'lon06', 'mel01', 'mex01', 'mil01', 'mon01', + 'osl01', 'par01', 'sao01', 'sea01', 'seo01', 'sjc01', 'sjc03', 'sjc04', 'sng01', 'syd01', 'syd04', + 'tok02', 'tor01', 'wdc01', 'wdc04', 'wdc06', 'wdc07'] +CPU_SIZES = [1, 2, 4, 8, 16, 32, 56] +MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808] +INITIALDISK_SIZES = [25, 100] +LOCALDISK_SIZES = [25, 100, 150, 200, 300] +SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000] +NIC_SPEEDS = [10, 100, 1000] + + +def create_virtual_instance(module): + + instances = vsManager.list_instances( + hostname=module.params.get('hostname'), + domain=module.params.get('domain'), + datacenter=module.params.get('datacenter') + ) + + if instances: + return False, None + + # Check if OS or Image Template is provided (Can't be both, defaults to OS) + if module.params.get('os_code') is not None and module.params.get('os_code') != '': + module.params['image_id'] = '' + elif module.params.get('image_id') is not None and module.params.get('image_id') != '': + module.params['os_code'] = '' + module.params['disks'] = [] # Blank out disks since it will use the template + else: + return False, None + + tags = module.params.get('tags') + if isinstance(tags, list): + tags = ','.join(map(str, module.params.get('tags'))) + + instance = vsManager.create_instance( + hostname=module.params.get('hostname'), + domain=module.params.get('domain'), + cpus=module.params.get('cpus'), + memory=module.params.get('memory'), + flavor=module.params.get('flavor'), + hourly=module.params.get('hourly'), + datacenter=module.params.get('datacenter'), + os_code=module.params.get('os_code'), + image_id=module.params.get('image_id'), + local_disk=module.params.get('local_disk'), + disks=module.params.get('disks'), + ssh_keys=module.params.get('ssh_keys'), + nic_speed=module.params.get('nic_speed'), + private=module.params.get('private'), + public_vlan=module.params.get('public_vlan'), + private_vlan=module.params.get('private_vlan'), + dedicated=module.params.get('dedicated'), + post_uri=module.params.get('post_uri'), + tags=tags, + ) + + if instance is not None and instance['id'] > 0: + return True, instance + else: + return False, None + + +def wait_for_instance(module, id): + instance = None + completed = False + wait_timeout = time.time() + module.params.get('wait_time') + while not completed and wait_timeout > time.time(): + try: + completed = vsManager.wait_for_ready(id, 10, 2) + if completed: + instance = vsManager.get_instance(id) + except Exception: + completed = False + + return completed, instance + + +def cancel_instance(module): + canceled = True + if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')): + tags = module.params.get('tags') + if isinstance(tags, str): + tags = [module.params.get('tags')] + instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain')) + for instance in instances: + try: + vsManager.cancel_instance(instance['id']) + except Exception: + canceled = False + elif module.params.get('instance_id') and module.params.get('instance_id') != 0: + try: + vsManager.cancel_instance(instance['id']) + except Exception: + canceled = False + else: + return False, None + + return canceled, None + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + instance_id=dict(type='str'), + hostname=dict(type='str'), + domain=dict(type='str'), + datacenter=dict(type='str', choices=DATACENTERS), + tags=dict(type='str'), + hourly=dict(type='bool', default=True), + private=dict(type='bool', default=False), + dedicated=dict(type='bool', default=False), + local_disk=dict(type='bool', default=True), + cpus=dict(type='int', choices=CPU_SIZES), + memory=dict(type='int', choices=MEMORY_SIZES), + flavor=dict(type='str'), + disks=dict(type='list', elements='int', default=[25]), + os_code=dict(type='str'), + image_id=dict(type='str'), + nic_speed=dict(type='int', choices=NIC_SPEEDS), + public_vlan=dict(type='str'), + private_vlan=dict(type='str'), + ssh_keys=dict(type='list', elements='str', default=[], no_log=False), + post_uri=dict(type='str'), + state=dict(type='str', default='present', choices=STATES), + wait=dict(type='bool', default=True), + wait_time=dict(type='int', default=600), + ) + ) + + if not HAS_SL: + module.fail_json(msg='softlayer python library required for this module') + + if module.params.get('state') == 'absent': + (changed, instance) = cancel_instance(module) + + elif module.params.get('state') == 'present': + (changed, instance) = create_virtual_instance(module) + if module.params.get('wait') is True and instance: + (changed, instance) = wait_for_instance(module, instance['id']) + + module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__))) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/slack.py b/plugins/modules/slack.py deleted file mode 120000 index d00aa29bee..0000000000 --- a/plugins/modules/slack.py +++ /dev/null @@ -1 +0,0 @@ -./notification/slack.py \ No newline at end of file diff --git a/plugins/modules/slack.py b/plugins/modules/slack.py new file mode 100644 index 0000000000..07cd1885f6 --- /dev/null +++ b/plugins/modules/slack.py @@ -0,0 +1,530 @@ +#!/usr/bin/python + +# Copyright (c) 2020, Lee Goolsbee +# Copyright (c) 2020, Michal Middleton +# Copyright (c) 2017, Steve Pletcher +# Copyright (c) 2016, René Moser +# Copyright (c) 2015, Stefan Berggren +# Copyright (c) 2014, Ramon de la Fuente +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: slack +short_description: Send Slack notifications +description: + - The M(community.general.slack) module sends notifications to U(http://slack.com) using the Incoming WebHook integration. +author: "Ramon de la Fuente (@ramondelafuente)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + domain: + type: str + description: + - "When using new format 'Webhook token' and WebAPI tokens: this can be V(slack.com) or V(slack-gov.com) and is ignored + otherwise." + - "When using old format 'Webhook token': Slack (sub)domain for your environment without protocol. (For example V(example.slack.com).) + in Ansible 1.8 and beyond, this is deprecated and may be ignored. See token documentation for information." + token: + type: str + description: + - Slack integration token. This authenticates you to the Slack service. Make sure to use the correct type of token, + depending on what method you use. + - 'Webhook token: Prior to Ansible 1.8, a token looked like V(3Ffe373sfhRE6y42Fg3rvf4GlK). In Ansible 1.8 and above, + Ansible adapts to the new Slack API where tokens look like V(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens + are in the new format then Slack ignores any value of domain except V(slack.com) or V(slack-gov.com). If the token + is in the old format the domain is required. Ansible has no control of when Slack is going to remove the old API. + When Slack does that the old format is going to cease working. B(Please keep in mind the tokens are not the API tokens + but are the webhook tokens.) In Slack these are found in the webhook URL which are obtained under the apps and integrations. + The incoming webhooks can be added in that area. In some cases this may be locked by your Slack admin and you must + request access. It is there that the incoming webhooks can be added. The key is on the end of the URL given to you + in that section.' + - "WebAPI token: Slack WebAPI requires a personal, bot or work application token. These tokens start with V(xoxp-), + V(xoxb-) or V(xoxa-), for example V(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive + thread_id. See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information." + required: true + msg: + type: str + description: + - Message to send. Note that the module does not handle escaping characters. Plain-text angle brackets and ampersands + should be converted to HTML entities (for example C(&) to C(&)) before sending. See Slack's documentation + (U(https://api.slack.com/docs/message-formatting)) + for more. + channel: + type: str + description: + - Channel to send the message to. If absent, the message goes to the channel selected for the O(token). + thread_id: + description: + - Optional. Timestamp of parent message to thread this message, see U(https://api.slack.com/docs/message-threading). + type: str + message_id: + description: + - Optional. Message ID to edit, instead of posting a new message. + - If supplied O(channel) must be in form of C(C0xxxxxxx). use C({{ slack_response.channel }}) to get RV(ignore:channel) + from previous task run. + - The token needs history scope to get information on the message to edit (C(channels:history,groups:history,mpim:history,im:history)). + - Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)). + type: str + version_added: 1.2.0 + username: + type: str + description: + - This is the sender of the message. + default: "Ansible" + icon_url: + type: str + description: + - URL for the message sender's icon. + default: https://docs.ansible.com/favicon.ico + icon_emoji: + type: str + description: + - Emoji for the message sender. See Slack documentation for options. + - If O(icon_emoji) is set, O(icon_url) is not used. + link_names: + type: int + description: + - Automatically create links for channels and usernames in O(msg). + default: 1 + choices: + - 1 + - 0 + parse: + type: str + description: + - Setting for the message parser at Slack. + choices: + - 'full' + - 'none' + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + type: bool + default: true + color: + type: str + description: + - Allow text to use default colors - use the default of V(normal) to not send a custom color bar at the start of the + message. + - Allowed values for color can be one of V(normal), V(good), V(warning), V(danger), any valid 3 digit or 6 digit hex + color value. + default: 'normal' + attachments: + type: list + elements: dict + description: + - Define a list of attachments. This list mirrors the Slack JSON API. + - For more information, see U(https://api.slack.com/docs/attachments). + blocks: + description: + - Define a list of blocks. This list mirrors the Slack JSON API. + - For more information, see U(https://api.slack.com/block-kit). + type: list + elements: dict + version_added: 1.0.0 + prepend_hash: + type: str + description: + - Setting for automatically prepending a V(#) symbol on the passed in O(channel). + - The V(auto) method prepends a V(#) unless O(channel) starts with one of V(#), V(@), V(C0), V(GF), V(G0), V(CP). These + prefixes only cover a small set of the prefixes that should not have a V(#) prepended. Since an exact condition which + O(channel) values must not have the V(#) prefix is not known, the value V(auto) for this option is deprecated in the + future. It is best to explicitly set O(prepend_hash=always) or O(prepend_hash=never) to obtain the needed behavior. + - Before community.general 12.0.0, the default was V(auto). It has been deprecated since community.general 10.2.0. + - Note that V(auto) will be deprecated in a future version. + # TODO: Deprecate 'auto' in community.general 13.0.0 + default: never + choices: + - 'always' + - 'never' + - 'auto' + version_added: 6.1.0 +""" + +EXAMPLES = r""" +- name: Send notification message via Slack + community.general.slack: + token: thetoken/generatedby/slack + msg: '{{ inventory_hostname }} completed' + delegate_to: localhost + +- name: Send notification message via Slack all options + community.general.slack: + token: thetoken/generatedby/slack + msg: '{{ inventory_hostname }} completed' + channel: '#ansible' + thread_id: '1539917263.000100' + username: 'Ansible on {{ inventory_hostname }}' + icon_url: http://www.example.com/some-image-file.png + link_names: 0 + parse: 'none' + delegate_to: localhost + +- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured + in Slack + community.general.slack: + token: thetoken/generatedby/slack + msg: '{{ inventory_hostname }} is alive!' + color: good + username: '' + icon_url: '' + +- name: Insert a color bar in front of the message with valid hex color value + community.general.slack: + token: thetoken/generatedby/slack + msg: 'This message uses color in hex value' + color: '#00aacc' + username: '' + icon_url: '' + +- name: Use the attachments API + community.general.slack: + token: thetoken/generatedby/slack + attachments: + - text: Display my system load on host A and B + color: '#ff00dd' + title: System load + fields: + - title: System A + value: "load average: 0,74, 0,66, 0,63" + short: true + - title: System B + value: 'load average: 5,16, 4,64, 2,43' + short: true + +- name: Use the blocks API + community.general.slack: + token: thetoken/generatedby/slack + blocks: + - type: section + text: + type: mrkdwn + text: |- + *System load* + Display my system load on host A and B + - type: context + elements: + - type: mrkdwn + text: |- + *System A* + load average: 0,74, 0,66, 0,63 + - type: mrkdwn + text: |- + *System B* + load average: 5,16, 4,64, 2,43 + +- name: Send a message with a link using Slack markup + community.general.slack: + token: thetoken/generatedby/slack + msg: We sent this message using ! + +- name: Send a message with angle brackets and ampersands + community.general.slack: + token: thetoken/generatedby/slack + msg: This message has <brackets> & ampersands in plain text. + +- name: Initial Threaded Slack message + community.general.slack: + channel: '#ansible' + token: xoxb-1234-56789abcdefghijklmnop + msg: 'Starting a thread with my initial post.' + register: slack_response +- name: Add more info to thread + community.general.slack: + channel: '#ansible' + token: xoxb-1234-56789abcdefghijklmnop + thread_id: "{{ slack_response['ts'] }}" + color: good + msg: 'And this is my threaded response!' + +- name: Send a message to be edited later on + community.general.slack: + token: thetoken/generatedby/slack + channel: '#ansible' + msg: Deploying something... + register: slack_response +- name: Edit message + community.general.slack: + token: thetoken/generatedby/slack + # The 'channel' option does not accept the channel name. It must use the 'channel_id', + # which can be retrieved for example from 'slack_response' from the previous task. + channel: "{{ slack_response.channel }}" + msg: Deployment complete! + message_id: "{{ slack_response.ts }}" +""" + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from urllib.parse import urlencode + +OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' +SLACK_INCOMING_WEBHOOK = 'https://hooks.%s/services/%s' +SLACK_POSTMESSAGE_WEBAPI = 'https://%s/api/chat.postMessage' +SLACK_UPDATEMESSAGE_WEBAPI = 'https://%s/api/chat.update' +SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://%s/api/conversations.history' + +# Escaping quotes and apostrophes to avoid ending string prematurely in ansible call. +# We do not escape other characters used as Slack metacharacters (e.g. &, <, >). +escape_table = { + '"': "\"", + "'": "\'", +} + + +def is_valid_hex_color(color_choice): + if re.match(r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', color_choice): + return True + return False + + +def escape_quotes(text): + """Backslash any quotes within text.""" + return "".join(escape_table.get(c, c) for c in text) + + +def recursive_escape_quotes(obj, keys): + """Recursively escape quotes inside supplied keys inside block kit objects""" + if isinstance(obj, dict): + escaped = {} + for k, v in obj.items(): + if isinstance(v, str) and k in keys: + escaped[k] = escape_quotes(v) + else: + escaped[k] = recursive_escape_quotes(v, keys) + elif isinstance(obj, list): + escaped = [recursive_escape_quotes(v, keys) for v in obj] + else: + escaped = obj + return escaped + + +def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, + parse, color, attachments, blocks, message_id, prepend_hash): + payload = {} + if color == "normal" and text is not None: + payload = dict(text=escape_quotes(text)) + elif text is not None: + # With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it. + payload = dict(attachments=[dict(text=escape_quotes(text), color=color, mrkdwn_in=["text"])]) + if channel is not None: + if prepend_hash == 'auto': + if channel.startswith(('#', '@', 'C0', 'GF', 'G0', 'CP')): + payload['channel'] = channel + else: + payload['channel'] = '#' + channel + elif prepend_hash == 'always': + payload['channel'] = '#' + channel + elif prepend_hash == 'never': + payload['channel'] = channel + if thread_id is not None: + payload['thread_ts'] = thread_id + if username is not None: + payload['username'] = username + if icon_emoji is not None: + payload['icon_emoji'] = icon_emoji + else: + payload['icon_url'] = icon_url + if link_names is not None: + payload['link_names'] = link_names + if parse is not None: + payload['parse'] = parse + if message_id is not None: + payload['ts'] = message_id + + if attachments is not None: + if 'attachments' not in payload: + payload['attachments'] = [] + + if attachments is not None: + attachment_keys_to_escape = [ + 'title', + 'text', + 'author_name', + 'pretext', + 'fallback', + ] + for attachment in attachments: + for key in attachment_keys_to_escape: + if key in attachment: + attachment[key] = escape_quotes(attachment[key]) + + if 'fallback' not in attachment: + attachment['fallback'] = attachment['text'] + + payload['attachments'].append(attachment) + + if blocks is not None: + block_keys_to_escape = [ + 'text', + 'alt_text' + ] + payload['blocks'] = recursive_escape_quotes(blocks, block_keys_to_escape) + + return payload + + +def validate_slack_domain(domain): + return (domain if domain in ('slack.com', 'slack-gov.com') else 'slack.com') + + +def get_slack_message(module, domain, token, channel, ts): + headers = { + 'Content-Type': 'application/json; charset=UTF-8', + 'Accept': 'application/json', + 'Authorization': 'Bearer ' + token + } + qs = urlencode({ + 'channel': channel, + 'ts': ts, + 'limit': 1, + 'inclusive': 'true', + }) + domain = validate_slack_domain(domain) + url = (SLACK_CONVERSATIONS_HISTORY_WEBAPI % domain) + '?' + qs + response, info = fetch_url(module=module, url=url, headers=headers, method='GET') + if info['status'] != 200: + module.fail_json(msg="failed to get slack message") + data = module.from_json(response.read()) + if data.get('ok') is False: + module.fail_json(msg="failed to get slack message: %s" % data) + if len(data['messages']) < 1: + module.fail_json(msg="no messages matching ts: %s" % ts) + if len(data['messages']) > 1: + module.fail_json(msg="more than 1 message matching ts: %s" % ts) + return data['messages'][0] + + +def do_notify_slack(module, domain, token, payload): + use_webapi = False + if token.count('/') >= 2: + # New style webhook token + domain = validate_slack_domain(domain) + slack_uri = SLACK_INCOMING_WEBHOOK % (domain, token) + elif re.match(r'^xox[abp]-\S+$', token): + domain = validate_slack_domain(domain) + slack_uri = (SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI) % domain + use_webapi = True + else: + if not domain: + module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form " + "XXXX/YYYY/ZZZZ in your playbook") + slack_uri = OLD_SLACK_INCOMING_WEBHOOK % (domain, token) + + headers = { + 'Content-Type': 'application/json; charset=UTF-8', + 'Accept': 'application/json', + } + if use_webapi: + headers['Authorization'] = 'Bearer ' + token + + data = module.jsonify(payload) + response, info = fetch_url(module=module, url=slack_uri, headers=headers, method='POST', data=data) + + if info['status'] != 200: + if use_webapi: + obscured_incoming_webhook = slack_uri + else: + obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, '[obscured]') + module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg'])) + + # each API requires different handling + if use_webapi: + return module.from_json(response.read()) + else: + return {'webhook': 'ok'} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + domain=dict(type='str'), + token=dict(type='str', required=True, no_log=True), + msg=dict(type='str'), + channel=dict(type='str'), + thread_id=dict(type='str'), + username=dict(type='str', default='Ansible'), + icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), + icon_emoji=dict(type='str'), + link_names=dict(type='int', default=1, choices=[0, 1]), + parse=dict(type='str', choices=['none', 'full']), + validate_certs=dict(default=True, type='bool'), + color=dict(type='str', default='normal'), + attachments=dict(type='list', elements='dict'), + blocks=dict(type='list', elements='dict'), + message_id=dict(type='str'), + prepend_hash=dict(type='str', choices=['always', 'never', 'auto'], default='never'), + ), + supports_check_mode=True, + ) + + domain = module.params['domain'] + token = module.params['token'] + text = module.params['msg'] + channel = module.params['channel'] + thread_id = module.params['thread_id'] + username = module.params['username'] + icon_url = module.params['icon_url'] + icon_emoji = module.params['icon_emoji'] + link_names = module.params['link_names'] + parse = module.params['parse'] + color = module.params['color'] + attachments = module.params['attachments'] + blocks = module.params['blocks'] + message_id = module.params['message_id'] + prepend_hash = module.params['prepend_hash'] + + color_choices = ['normal', 'good', 'warning', 'danger'] + if color not in color_choices and not is_valid_hex_color(color): + module.fail_json(msg="Color value specified should be either one of %r " + "or any valid hex value with length 3 or 6." % color_choices) + + changed = True + + # if updating an existing message, we can check if there's anything to update + if message_id is not None: + changed = False + msg = get_slack_message(module, domain, token, channel, message_id) + for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'): + if msg.get(key) != module.params.get(key): + changed = True + break + # if check mode is active, we shouldn't do anything regardless. + # if changed=False, we don't need to do anything, so don't do it. + if module.check_mode or not changed: + module.exit_json(changed=changed, ts=msg['ts'], channel=msg['channel']) + elif module.check_mode: + module.exit_json(changed=changed) + + payload = build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, + parse, color, attachments, blocks, message_id, prepend_hash) + slack_response = do_notify_slack(module, domain, token, payload) + + if 'ok' in slack_response: + # Evaluate WebAPI response + if slack_response['ok']: + # return payload as a string for backwards compatibility + payload_json = module.jsonify(payload) + module.exit_json(changed=changed, ts=slack_response['ts'], channel=slack_response['channel'], + api=slack_response, payload=payload_json) + else: + module.fail_json(msg="Slack API error", error=slack_response['error']) + else: + # Exit with plain OK from WebHook, since we don't have more information + # If we get 200 from webhook, the only answer is OK + module.exit_json(msg="OK") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/slackpkg.py b/plugins/modules/slackpkg.py deleted file mode 120000 index 054f93248f..0000000000 --- a/plugins/modules/slackpkg.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/slackpkg.py \ No newline at end of file diff --git a/plugins/modules/slackpkg.py b/plugins/modules/slackpkg.py new file mode 100644 index 0000000000..a32c0048f7 --- /dev/null +++ b/plugins/modules/slackpkg.py @@ -0,0 +1,205 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Kim Nørgaard +# Written by Kim Nørgaard +# Based on pkgng module written by bleader +# that was based on pkgin module written by Shaun Zinck +# that was based on pacman module written by Afterburn +# that was based on apt module written by Matthew Williams +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: slackpkg +short_description: Package manager for Slackware >= 12.2 +description: + - Manage binary packages for Slackware using C(slackpkg) which is available in versions after 12.2. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of package to install/remove. + required: true + type: list + elements: str + aliases: [pkg] + + state: + description: + - State of the package, you can use V(installed) as an alias for V(present) and V(removed) as one for V(absent). + choices: ['present', 'absent', 'latest', 'installed', 'removed'] + required: false + default: present + type: str + + update_cache: + description: + - Update the package database first. + required: false + default: false + type: bool + +author: Kim Nørgaard (@KimNorgaard) +requirements: ["Slackware >= 12.2"] +""" + +EXAMPLES = r""" +- name: Install package foo + community.general.slackpkg: + name: foo + state: present + +- name: Remove packages foo and bar + community.general.slackpkg: + name: foo,bar + state: absent + +- name: Make sure that it is the most updated package + community.general.slackpkg: + name: foo + state: latest +""" + +from ansible.module_utils.basic import AnsibleModule + + +def query_package(module, slackpkg_path, name): + + import platform + import os + import re + + machine = platform.machine() + # Exception for kernel-headers package on x86_64 + if name == 'kernel-headers' and machine == 'x86_64': + machine = 'x86' + pattern = re.compile('^%s-[^-]+-(%s|noarch|fw)-[^-]+$' % (re.escape(name), re.escape(machine))) + packages = [f for f in os.listdir('/var/log/packages') if pattern.match(f)] + + if len(packages) > 0: + return True + + return False + + +def remove_packages(module, slackpkg_path, packages): + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, slackpkg_path, package): + continue + + if not module.check_mode: + rc, out, err = module.run_command( + [slackpkg_path, "-default_answer=y", "-batch=on", "remove", package]) + + if not module.check_mode and query_package(module, slackpkg_path, + package): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, slackpkg_path, packages): + + install_c = 0 + + for package in packages: + if query_package(module, slackpkg_path, package): + continue + + if not module.check_mode: + rc, out, err = module.run_command( + [slackpkg_path, "-default_answer=y", "-batch=on", "install", package]) + + if not module.check_mode and not query_package(module, slackpkg_path, + package): + module.fail_json(msg="failed to install %s: %s" % (package, out), + stderr=err) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="present %s package(s)" + % (install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + +def upgrade_packages(module, slackpkg_path, packages): + install_c = 0 + + for package in packages: + if not module.check_mode: + rc, out, err = module.run_command( + [slackpkg_path, "-default_answer=y", "-batch=on", "upgrade", package]) + + if not module.check_mode and not query_package(module, slackpkg_path, + package): + module.fail_json(msg="failed to install %s: %s" % (package, out), + stderr=err) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="present %s package(s)" + % (install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + +def update_cache(module, slackpkg_path): + rc, out, err = module.run_command( + [slackpkg_path, "-batch=on", "update"]) + if rc != 0: + module.fail_json(msg="Could not update package cache") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=['installed', 'removed', 'absent', 'present', 'latest']), + name=dict(aliases=["pkg"], required=True, type='list', elements='str'), + update_cache=dict(default=False, type='bool'), + ), + supports_check_mode=True) + + slackpkg_path = module.get_bin_path('slackpkg', True) + + p = module.params + + pkgs = p['name'] + + if p["update_cache"]: + update_cache(module, slackpkg_path) + + if p['state'] == 'latest': + upgrade_packages(module, slackpkg_path, pkgs) + + elif p['state'] in ['present', 'installed']: + install_packages(module, slackpkg_path, pkgs) + + elif p["state"] in ['removed', 'absent']: + remove_packages(module, slackpkg_path, pkgs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/smartos_image_info.py b/plugins/modules/smartos_image_info.py deleted file mode 120000 index 1ed89a791e..0000000000 --- a/plugins/modules/smartos_image_info.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/smartos/smartos_image_info.py \ No newline at end of file diff --git a/plugins/modules/smartos_image_info.py b/plugins/modules/smartos_image_info.py new file mode 100644 index 0000000000..0c68a4c52f --- /dev/null +++ b/plugins/modules/smartos_image_info.py @@ -0,0 +1,119 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Adam Števko +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: smartos_image_info +short_description: Get SmartOS image details +description: + - Retrieve information about all installed images on SmartOS. +author: Adam Števko (@xen0l) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + filters: + description: + - Criteria for selecting image. Can be any value from image manifest and V(published_date), V(published), V(source), + V(clones), and V(size). + - More information can be found at U(https://smartos.org/man/1m/imgadm) under C(imgadm list). + type: str +""" + +EXAMPLES = r""" +- name: Return information about all installed images + community.general.smartos_image_info: + register: result + +- name: Return all private active Linux images + community.general.smartos_image_info: + filters: "os=linux state=active public=false" + register: result + +- name: Show, how many clones does every image have + community.general.smartos_image_info: + register: result + +- name: Print information + ansible.builtin.debug: + msg: >- + {{ + result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }} + has {{ result.smartos_images[item]['clones'] + }} VM(s) + with_items: "{{ result.smartos_images.keys() | list }}" + +- name: Print information + ansible.builtin.debug: + msg: >- + {{ + smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} + has {{ smartos_images[item]['clones'] + }} VM(s) + with_items: "{{ smartos_images.keys() | list }}" +""" + +RETURN = r""" +""" + +import json +from ansible.module_utils.basic import AnsibleModule + + +class ImageFacts(object): + + def __init__(self, module): + self.module = module + + self.filters = module.params['filters'] + + def return_all_installed_images(self): + cmd = [self.module.get_bin_path('imgadm'), 'list', '-j'] + + if self.filters: + cmd.append(self.filters) + + (rc, out, err) = self.module.run_command(cmd) + + if rc != 0: + self.module.exit_json( + msg='Failed to get all installed images', stderr=err) + + images = json.loads(out) + + result = {} + for image in images: + result[image['manifest']['uuid']] = image['manifest'] + # Merge additional attributes with the image manifest. + for attrib in ['clones', 'source', 'zpool']: + result[image['manifest']['uuid']][attrib] = image[attrib] + + return result + + +def main(): + module = AnsibleModule( + argument_spec=dict( + filters=dict(), + ), + supports_check_mode=True, + ) + + image_facts = ImageFacts(module) + + data = dict(smartos_images=image_facts.return_all_installed_images()) + + module.exit_json(**data) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/snap.py b/plugins/modules/snap.py deleted file mode 120000 index 89975635f7..0000000000 --- a/plugins/modules/snap.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/snap.py \ No newline at end of file diff --git a/plugins/modules/snap.py b/plugins/modules/snap.py new file mode 100644 index 0000000000..01599b1b3e --- /dev/null +++ b/plugins/modules/snap.py @@ -0,0 +1,513 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Lincoln Wallace (locnnil) +# Copyright (c) 2021, Alexei Znamensky (russoz) +# Copyright (c) 2021, Marcus Rickert +# Copyright (c) 2018, Stanislas Lange (angristan) +# Copyright (c) 2018, Victor Carceler + +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: snap +short_description: Manages snaps +description: + - Manages snaps packages. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the snaps to be installed. + - Any named snap accepted by the C(snap) command is valid. + - O(dangerous=true) may be necessary when installing C(.snap) files. See O(dangerous) for more details. + required: true + type: list + elements: str + state: + description: + - Desired state of the package. + - When O(state=present) the module uses C(snap install) if the snap is not installed, and C(snap refresh) if it is installed + but from a different channel. + default: present + choices: [absent, present, enabled, disabled] + type: str + classic: + description: + - Install a snap that has classic confinement. + - This option corresponds to the C(--classic) argument of the C(snap install) command. + - This level of confinement is permissive, granting full system access, similar to that of traditionally packaged applications + that do not use sandboxing mechanisms. This option can only be specified when the task involves a single snap. + - See U(https://snapcraft.io/docs/snap-confinement) for more details about classic confinement and confinement levels. + type: bool + required: false + default: false + channel: + description: + - Define which release of a snap is installed and tracked for updates. This option can only be specified if there is + a single snap in the task. + - If not passed, the C(snap) command defaults to V(stable). + - If the value passed does not contain the C(track), it defaults to C(latest). For example, if V(edge) is passed, the + module assumes the channel to be V(latest/edge). + - See U(https://snapcraft.io/docs/channels) for more details about snap channels. + type: str + required: false + options: + description: + - Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option is applied to that + snap only. If the snap name is omitted, the options are applied to all snaps listed in O(name). Options are only applied + to active snaps. + - Options are only applied when C(state) is set to V(present). This is done after the necessary installation or refresh + (upgrade/downgrade) of all the snaps listed in O(name). + - See U(https://snapcraft.io/docs/configuration-in-snaps) for more details about snap configuration options. + required: false + type: list + elements: str + version_added: 4.4.0 + dangerous: + description: + - Install the snap in dangerous mode, without validating its assertions and signatures. + - This is useful when installing local snaps that are either unsigned or have signatures that have not been acknowledged. + - See U(https://snapcraft.io/docs/install-modes) for more details about installation modes. + type: bool + required: false + default: false + version_added: 7.2.0 +notes: + - Privileged operations, such as installing and configuring snaps, require root priviledges. This is only the case if the + user has not logged in to the Snap Store. +author: + - Victor Carceler (@vcarceler) + - Stanislas Lange (@angristan) + +seealso: + - module: community.general.snap_alias +""" + +EXAMPLES = r""" +# Install "foo" and "bar" snap +- name: Install foo + community.general.snap: + name: + - foo + - bar + +# Install "foo" snap with options par1=A and par2=B +- name: Install "foo" with options + community.general.snap: + name: + - foo + options: + - par1=A + - par2=B + +# Install "foo" and "bar" snaps with common option com=A and specific options fooPar=X and barPar=Y +- name: Install "foo" and "bar" with options + community.general.snap: + name: + - foo + - bar + options: + - com=A + - foo:fooPar=X + - bar:barPar=Y + +# Remove "foo" snap +- name: Remove foo + community.general.snap: + name: foo + state: absent + +# Install a snap with classic confinement +- name: Install "foo" with option --classic + community.general.snap: + name: foo + classic: true + +# Install a snap with from a specific channel +- name: Install "foo" with option --channel=latest/edge + community.general.snap: + name: foo + channel: latest/edge +""" + +RETURN = r""" +classic: + description: Whether or not the snaps were installed with the classic confinement. + type: bool + returned: When snaps are installed +channel: + description: The channel the snaps were installed from. + type: str + returned: When snaps are installed +cmd: + description: The command that was executed on the host. + type: str + returned: When changed is true +snaps_installed: + description: The list of actually installed snaps. + type: list + returned: When any snaps have been installed +snaps_removed: + description: The list of actually removed snaps. + type: list + returned: When any snaps have been removed +options_changed: + description: The list of options set/changed in format C(snap:key=value). + type: list + returned: When any options have been changed/set + version_added: 4.4.0 +version: + description: Versions of snap components as reported by C(snap version). + type: dict + returned: always + version_added: 10.3.0 +""" + +import re +import json +import numbers + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.snap import snap_runner, get_version + + +class Snap(StateModuleHelper): + NOT_INSTALLED = 0 + CHANNEL_MISMATCH = 1 + INSTALLED = 2 + + __disable_re = re.compile(r'(?:\S+\s+){5}(?P\S+)') + __set_param_re = re.compile(r'(?P\S+:)?(?P\S+)\s*=\s*(?P.+)') + __list_re = re.compile(r'^(?P\S+)\s+\S+\s+\S+\s+(?P\S+)') + module = dict( + argument_spec={ + 'name': dict(type='list', elements='str', required=True), + 'state': dict(type='str', default='present', choices=['absent', 'present', 'enabled', 'disabled']), + 'classic': dict(type='bool', default=False), + 'channel': dict(type='str'), + 'options': dict(type='list', elements='str'), + 'dangerous': dict(type='bool', default=False), + }, + supports_check_mode=True, + ) + + @staticmethod + def _first_non_zero(a): + for elem in a: + if elem != 0: + return elem + + return 0 + + def __init_module__(self): + self.runner = snap_runner(self.module) + self.vars.version = get_version(self.runner) + # if state=present there might be file names passed in 'name', in + # which case they must be converted to their actual snap names, which + # is done using the names_from_snaps() method calling 'snap info'. + self.vars.set("snapinfo_run_info", [], output=(self.verbosity >= 4)) + self.vars.set("status_run_info", [], output=(self.verbosity >= 4)) + self.vars.set("status_out", None, output=(self.verbosity >= 4)) + self.vars.set("run_info", [], output=(self.verbosity >= 4)) + + if self.vars.state == "present": + self.vars.set("snap_names", self.names_from_snaps(self.vars.name)) + status_var = "snap_names" + else: + status_var = "name" + self.vars.set("status_var", status_var, output=False) + self.vars.set("snap_status", self.snap_status(self.vars[self.vars.status_var], self.vars.channel), output=False, change=True) + self.vars.set("snap_status_map", dict(zip(self.vars.name, self.vars.snap_status)), output=False, change=True) + + def __quit_module__(self): + self.vars.snap_status = self.snap_status(self.vars[self.vars.status_var], self.vars.channel) + if self.vars.channel is None: + self.vars.channel = "stable" + + def _run_multiple_commands(self, commands, actionable_names, bundle=True, refresh=False): + results_cmd = [] + results_rc = [] + results_out = [] + results_err = [] + results_run_info = [] + + state = "refresh" if refresh else self.vars.state + + with self.runner(commands + ["name"]) as ctx: + if bundle: + rc, out, err = ctx.run(state=state, name=actionable_names) + results_cmd.append(commands + actionable_names) + results_rc.append(rc) + results_out.append(out.strip()) + results_err.append(err.strip()) + results_run_info.append(ctx.run_info) + else: + for name in actionable_names: + rc, out, err = ctx.run(state=state, name=name) + results_cmd.append(commands + [name]) + results_rc.append(rc) + results_out.append(out.strip()) + results_err.append(err.strip()) + results_run_info.append(ctx.run_info) + + return ( + '; '.join([to_native(x) for x in results_cmd]), + self._first_non_zero(results_rc), + '\n'.join(results_out), + '\n'.join(results_err), + results_run_info, + ) + + def convert_json_subtree_to_map(self, json_subtree, prefix=None): + option_map = {} + + if not isinstance(json_subtree, dict): + self.do_raise("Non-dict non-leaf element encountered while parsing option map. " + "The output format of 'snap set' may have changed. Aborting!") + + for key, value in json_subtree.items(): + full_key = key if prefix is None else prefix + "." + key + + if isinstance(value, (str, float, bool, numbers.Integral)): + option_map[full_key] = str(value) + else: + option_map.update(self.convert_json_subtree_to_map(json_subtree=value, prefix=full_key)) + + return option_map + + def convert_json_to_map(self, json_string): + json_object = json.loads(json_string) + return self.convert_json_subtree_to_map(json_object) + + def retrieve_option_map(self, snap_name): + with self.runner("get name") as ctx: + rc, out, err = ctx.run(name=snap_name) + + if rc != 0: + return {} + + result = out.splitlines() + + if "has no configuration" in result[0]: + return {} + + try: + option_map = self.convert_json_to_map(out) + return option_map + except Exception as e: + self.do_raise( + msg="Parsing option map returned by 'snap get {0}' triggers exception '{1}', output:\n'{2}'".format(snap_name, str(e), out)) + + def names_from_snaps(self, snaps): + def process_one(rc, out, err): + res = [line for line in out.split("\n") if line.startswith("name:")] + name = res[0].split()[1] + return [name] + + def process_many(rc, out, err): + # This needs to be "\n---" instead of just "---" because otherwise + # if a snap uses "---" in its description then that will incorrectly + # be interpreted as a separator between snaps in the output. + outputs = out.split("\n---") + res = [] + for sout in outputs: + res.extend(process_one(rc, sout, "")) + return res + + def process(rc, out, err): + if len(snaps) == 1: + check_error = err + process_ = process_one + else: + check_error = out + process_ = process_many + + if "warning: no snap found" in check_error: + self.do_raise("Snaps not found: {0}.".format([x.split()[-1] + for x in out.split('\n') + if x.startswith("warning: no snap found")])) + return process_(rc, out, err) + + names = [] + if snaps: + with self.runner("info name", output_process=process) as ctx: + try: + names = ctx.run(name=snaps) + finally: + self.vars.snapinfo_run_info.append(ctx.run_info) + return names + + def snap_status(self, snap_name, channel): + def _status_check(name, channel, installed): + match = [c for n, c in installed if n == name] + if not match: + return Snap.NOT_INSTALLED + if channel and match[0] not in (channel, "latest/{0}".format(channel)): + return Snap.CHANNEL_MISMATCH + else: + return Snap.INSTALLED + + with self.runner("_list") as ctx: + rc, out, err = ctx.run(check_rc=True) + list_out = out.split('\n')[1:] + list_out = [self.__list_re.match(x) for x in list_out] + list_out = [(m.group('name'), m.group('channel')) for m in list_out if m] + self.vars.status_out = list_out + self.vars.status_run_info = ctx.run_info + + return [_status_check(n, channel, list_out) for n in snap_name] + + def is_snap_enabled(self, snap_name): + with self.runner("_list name") as ctx: + rc, out, err = ctx.run(name=snap_name) + if rc != 0: + return None + result = out.splitlines()[1] + match = self.__disable_re.match(result) + if not match: + self.do_raise(msg="Unable to parse 'snap list {0}' output:\n{1}".format(snap_name, out)) + notes = match.group('notes') + return "disabled" not in notes.split(',') + + def _present(self, actionable_snaps, refresh=False): + self.changed = True + self.vars.snaps_installed = actionable_snaps + + if self.check_mode: + return + + params = ['state', 'classic', 'channel', 'dangerous'] # get base cmd parts + has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != 'stable' + has_multiple_snaps = len(actionable_snaps) > 1 + + if has_one_pkg_params and has_multiple_snaps: + self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands(params, actionable_snaps, bundle=False, refresh=refresh) + else: + self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands(params, actionable_snaps, refresh=refresh) + self.vars.run_info = run_info + + if rc == 0: + return + + classic_snap_pattern = re.compile(r'^error: This revision of snap "(?P\w+)"' + r' was published using classic confinement') + match = classic_snap_pattern.match(err) + if match: + err_pkg = match.group('package_name') + msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg) + else: + msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and " \ + "error output for more details.".format(cmd=self.vars.cmd) + self.do_raise(msg=msg) + + def state_present(self): + + self.vars.set_meta('classic', output=True) + self.vars.set_meta('channel', output=True) + + actionable_refresh = [snap for snap in self.vars.name if self.vars.snap_status_map[snap] == Snap.CHANNEL_MISMATCH] + if actionable_refresh: + self._present(actionable_refresh, refresh=True) + actionable_install = [snap for snap in self.vars.name if self.vars.snap_status_map[snap] == Snap.NOT_INSTALLED] + if actionable_install: + self._present(actionable_install) + + self.set_options() + + def set_options(self): + if self.vars.options is None: + return + + actionable_snaps = [s for s in self.vars.name if self.vars.snap_status_map[s] != Snap.NOT_INSTALLED] + overall_options_changed = [] + + for snap_name in actionable_snaps: + option_map = self.retrieve_option_map(snap_name=snap_name) + + options_changed = [] + + for option_string in self.vars.options: + match = self.__set_param_re.match(option_string) + + if not match: + msg = "Cannot parse set option '{option_string}'".format(option_string=option_string) + self.do_raise(msg) + + snap_prefix = match.group("snap_prefix") + selected_snap_name = snap_prefix[:-1] if snap_prefix else None + + if selected_snap_name is not None and selected_snap_name not in self.vars.name: + msg = "Snap option '{option_string}' refers to snap which is not in the list of snap names".format(option_string=option_string) + self.do_raise(msg) + + if selected_snap_name is None or (snap_name is not None and snap_name == selected_snap_name): + key = match.group("key") + value = match.group("value").strip() + + if key not in option_map or key in option_map and option_map[key] != value: + option_without_prefix = key + "=" + value + option_with_prefix = option_string if selected_snap_name is not None else snap_name + ":" + option_string + options_changed.append(option_without_prefix) + overall_options_changed.append(option_with_prefix) + + if options_changed: + self.changed = True + + if not self.check_mode: + with self.runner("_set name options") as ctx: + rc, out, err = ctx.run(name=snap_name, options=options_changed) + if rc != 0: + if 'has no "configure" hook' in err: + msg = "Snap '{snap}' does not have any configurable options".format(snap=snap_name) + self.do_raise(msg) + + msg = "Cannot set options '{options}' for snap '{snap}': error={error}".format( + options=" ".join(options_changed), snap=snap_name, error=err) + self.do_raise(msg) + + if overall_options_changed: + self.vars.options_changed = overall_options_changed + + def _generic_state_action(self, actionable_func, actionable_var, params): + actionable_snaps = [s for s in self.vars.name if actionable_func(s)] + if not actionable_snaps: + return + self.changed = True + self.vars[actionable_var] = actionable_snaps + if self.check_mode: + return + self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands(params, actionable_snaps) + self.vars.run_info = run_info + if rc == 0: + return + msg = "Ooops! Snap operation failed while executing '{cmd}', please examine logs and " \ + "error output for more details.".format(cmd=self.vars.cmd) + self.do_raise(msg=msg) + + def state_absent(self): + self._generic_state_action(lambda s: self.vars.snap_status_map[s] != Snap.NOT_INSTALLED, "snaps_removed", ['classic', 'channel', 'state']) + + def state_enabled(self): + self._generic_state_action(lambda s: not self.is_snap_enabled(s), "snaps_enabled", ['classic', 'channel', 'state']) + + def state_disabled(self): + self._generic_state_action(self.is_snap_enabled, "snaps_disabled", ['classic', 'channel', 'state']) + + +def main(): + Snap.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/snap_alias.py b/plugins/modules/snap_alias.py deleted file mode 120000 index 6f8c2d5fab..0000000000 --- a/plugins/modules/snap_alias.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/snap_alias.py \ No newline at end of file diff --git a/plugins/modules/snap_alias.py b/plugins/modules/snap_alias.py new file mode 100644 index 0000000000..4a68671a06 --- /dev/null +++ b/plugins/modules/snap_alias.py @@ -0,0 +1,185 @@ +#!/usr/bin/python +# +# Copyright (c) 2021, Alexei Znamensky (russoz) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: snap_alias +short_description: Manages snap aliases +version_added: 4.0.0 +description: + - Manages snaps aliases. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + state: + description: + - Desired state of the alias. + type: str + choices: [absent, present] + default: present + name: + description: + - Name of the snap. + type: str + alias: + description: + - Aliases to be created or removed. + type: list + elements: str + aliases: [aliases] + +author: + - Alexei Znamensky (@russoz) + +seealso: + - module: community.general.snap +""" + +EXAMPLES = r""" +# Install "foo" and "bar" snap +- name: Create snap alias + community.general.snap_alias: + name: hello-world + alias: hw + +- name: Create multiple aliases + community.general.snap_alias: + name: hello-world + aliases: + - hw + - hw2 + - hw3 + state: present # optional + +- name: Remove one specific aliases + community.general.snap_alias: + name: hw + state: absent + +- name: Remove all aliases for snap + community.general.snap_alias: + name: hello-world + state: absent +""" + +RETURN = r""" +snap_aliases: + description: The snap aliases after execution. If called in check mode, then the list represents the state before execution. + type: list + elements: str + returned: always +version: + description: Versions of snap components as reported by C(snap version). + type: dict + returned: always + version_added: 10.3.0 +""" + + +import re + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.snap import snap_runner, get_version + + +class SnapAlias(StateModuleHelper): + _RE_ALIAS_LIST = re.compile(r"^(?P\S+)\s+(?P[\w-]+)\s+.*$") + + module = dict( + argument_spec={ + 'state': dict(type='str', choices=['absent', 'present'], default='present'), + 'name': dict(type='str'), + 'alias': dict(type='list', elements='str', aliases=['aliases']), + }, + required_if=[ + ('state', 'present', ['name', 'alias']), + ('state', 'absent', ['name', 'alias'], True), + ], + supports_check_mode=True, + ) + + def _aliases(self): + n = self.vars.name + return {n: self._get_aliases_for(n)} if n else self._get_aliases() + + def __init_module__(self): + self.runner = snap_runner(self.module) + self.vars.version = get_version(self.runner) + self.vars.set("snap_aliases", self._aliases(), change=True, diff=True) + + def __quit_module__(self): + self.vars.snap_aliases = self._aliases() + + def _get_aliases(self): + def process(rc, out, err): + if err: + return {} + aliases = [self._RE_ALIAS_LIST.match(a.strip()) for a in out.splitlines()[1:]] + snap_alias_list = [(entry.group("snap"), entry.group("alias")) for entry in aliases] + results = {} + for snap, alias in snap_alias_list: + results[snap] = results.get(snap, []) + [alias] + return results + + with self.runner("state_alias name", check_rc=True, output_process=process) as ctx: + aliases = ctx.run(state_alias="info") + if self.verbosity >= 4: + self.vars.get_aliases_run_info = ctx.run_info + return aliases + + def _get_aliases_for(self, name): + return self._get_aliases().get(name, []) + + def _has_alias(self, name=None, alias=None): + if name: + if name not in self.vars.snap_aliases: + return False + if alias is None: + return bool(self.vars.snap_aliases[name]) + return alias in self.vars.snap_aliases[name] + + return any(alias in aliases for aliases in self.vars.snap_aliases.values()) + + def state_present(self): + for _alias in self.vars.alias: + if not self._has_alias(self.vars.name, _alias): + self.changed = True + with self.runner("state_alias name alias", check_mode_skip=True) as ctx: + ctx.run(state_alias=self.vars.state, alias=_alias) + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + + def state_absent(self): + if not self.vars.alias: + if self._has_alias(self.vars.name): + self.changed = True + with self.runner("state_alias name", check_mode_skip=True) as ctx: + ctx.run(state_alias=self.vars.state) + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + else: + for _alias in self.vars.alias: + if self._has_alias(self.vars.name, _alias): + self.changed = True + with self.runner("state_alias alias", check_mode_skip=True) as ctx: + ctx.run(state_alias=self.vars.state, alias=_alias) + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + + +def main(): + SnapAlias.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/snmp_facts.py b/plugins/modules/snmp_facts.py deleted file mode 120000 index 2e4c69cf1b..0000000000 --- a/plugins/modules/snmp_facts.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/snmp_facts.py \ No newline at end of file diff --git a/plugins/modules/snmp_facts.py b/plugins/modules/snmp_facts.py new file mode 100644 index 0000000000..a0577a8be9 --- /dev/null +++ b/plugins/modules/snmp_facts.py @@ -0,0 +1,479 @@ +#!/usr/bin/python + +# This file is part of Networklore's snmp library for Ansible +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: snmp_facts +author: + - Patrick Ogenstad (@ogenstad) +short_description: Retrieve facts for a device using SNMP +description: + - Retrieve facts for a device using SNMP, the facts are inserted to the C(ansible_facts) key. +requirements: + - pysnmp +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + host: + description: + - Set to target SNMP server (normally C({{ inventory_hostname }})). + type: str + required: true + version: + description: + - SNMP Version to use, V(v2), V(v2c) or V(v3). + type: str + required: true + choices: [v2, v2c, v3] + community: + description: + - The SNMP community string, required if O(version) is V(v2) or V(v2c). + type: str + level: + description: + - Authentication level. + - Required if O(version=v3). + type: str + choices: [authNoPriv, authPriv] + username: + description: + - Username for SNMPv3. + - Required if O(version=v3). + type: str + integrity: + description: + - Hashing algorithm. + - Required if O(version=v3). + type: str + choices: [md5, sha] + authkey: + description: + - Authentication key. + - Required O(version=v3). + type: str + privacy: + description: + - Encryption algorithm. + - Required if O(level=authPriv). + type: str + choices: [aes, des] + privkey: + description: + - Encryption key. + - Required if O(level=authPriv). + type: str + timeout: + description: + - Response timeout in seconds. + type: int + version_added: 2.3.0 + retries: + description: + - Maximum number of request retries, 0 retries means just a single request. + type: int + version_added: 2.3.0 +""" + +EXAMPLES = r""" +- name: Gather facts with SNMP version 2 + community.general.snmp_facts: + host: '{{ inventory_hostname }}' + version: v2c + community: public + delegate_to: local + +- name: Gather facts using SNMP version 3 + community.general.snmp_facts: + host: '{{ inventory_hostname }}' + version: v3 + level: authPriv + integrity: sha + privacy: aes + username: snmp-user + authkey: abc12345 + privkey: def6789 + delegate_to: localhost +""" + +RETURN = r""" +ansible_sysdescr: + description: A textual description of the entity. + returned: success + type: str + sample: "Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64" +ansible_sysobjectid: + description: The vendor's authoritative identification of the network management subsystem contained in the entity. + returned: success + type: str + sample: 1.3.6.1.4.1.8072.3.2.10 +ansible_sysuptime: + description: The time (in hundredths of a second) since the network management portion of the system was last re-initialized. + returned: success + type: int + sample: 42388 +ansible_syscontact: + description: The textual identification of the contact person for this managed node, together with information on how to + contact this person. + returned: success + type: str + sample: Me +ansible_sysname: + description: An administratively-assigned name for this managed node. + returned: success + type: str + sample: ubuntu-user +ansible_syslocation: + description: The physical location of this node (for example, V(telephone closet, 3rd floor)). + returned: success + type: str + sample: Sitting on the Dock of the Bay +ansible_all_ipv4_addresses: + description: List of all IPv4 addresses. + returned: success + type: list + sample: ["127.0.0.1", "172.17.0.1"] +ansible_interfaces: + description: Dictionary of each network interface and its metadata. + returned: success + type: dict + sample: + { + "1": { + "adminstatus": "up", + "description": "", + "ifindex": "1", + "ipv4": [ + { + "address": "127.0.0.1", + "netmask": "255.0.0.0" + } + ], + "mac": "", + "mtu": "65536", + "name": "lo", + "operstatus": "up", + "speed": "65536" + }, + "2": { + "adminstatus": "up", + "description": "", + "ifindex": "2", + "ipv4": [ + { + "address": "192.168.213.128", + "netmask": "255.255.255.0" + } + ], + "mac": "000a305a52a1", + "mtu": "1500", + "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)", + "operstatus": "up", + "speed": "1500" + } + } +""" + +import binascii +from collections import defaultdict +from ansible_collections.community.general.plugins.module_utils import deps +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text + +with deps.declare("pysnmp"): + from pysnmp.entity.rfc3413.oneliner import cmdgen + from pysnmp.proto.rfc1905 import EndOfMibView + + +class DefineOid(object): + + def __init__(self, dotprefix=False): + if dotprefix: + dp = "." + else: + dp = "" + + # From SNMPv2-MIB + self.sysDescr = dp + "1.3.6.1.2.1.1.1.0" + self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0" + self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0" + self.sysContact = dp + "1.3.6.1.2.1.1.4.0" + self.sysName = dp + "1.3.6.1.2.1.1.5.0" + self.sysLocation = dp + "1.3.6.1.2.1.1.6.0" + + # From IF-MIB + self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1" + self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2" + self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4" + self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5" + self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6" + self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7" + self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8" + self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18" + + # From IP-MIB + self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1" + self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2" + self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3" + + +def decode_hex(hexstring): + + if len(hexstring) < 3: + return hexstring + if hexstring[:2] == "0x": + return to_text(binascii.unhexlify(hexstring[2:])) + return hexstring + + +def decode_mac(hexstring): + + if len(hexstring) != 14: + return hexstring + if hexstring[:2] == "0x": + return hexstring[2:] + return hexstring + + +def lookup_adminstatus(int_adminstatus): + adminstatus_options = { + 1: 'up', + 2: 'down', + 3: 'testing' + } + if int_adminstatus in adminstatus_options: + return adminstatus_options[int_adminstatus] + return "" + + +def lookup_operstatus(int_operstatus): + operstatus_options = { + 1: 'up', + 2: 'down', + 3: 'testing', + 4: 'unknown', + 5: 'dormant', + 6: 'notPresent', + 7: 'lowerLayerDown' + } + if int_operstatus in operstatus_options: + return operstatus_options[int_operstatus] + return "" + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', required=True), + version=dict(type='str', required=True, choices=['v2', 'v2c', 'v3']), + community=dict(type='str'), + username=dict(type='str'), + level=dict(type='str', choices=['authNoPriv', 'authPriv']), + integrity=dict(type='str', choices=['md5', 'sha']), + privacy=dict(type='str', choices=['aes', 'des']), + authkey=dict(type='str', no_log=True), + privkey=dict(type='str', no_log=True), + timeout=dict(type='int'), + retries=dict(type='int'), + ), + required_together=( + ['username', 'level', 'integrity', 'authkey'], + ['privacy', 'privkey'], + ), + supports_check_mode=True, + ) + + m_args = module.params + + deps.validate(module) + + cmdGen = cmdgen.CommandGenerator() + transport_opts = { + k: m_args[k] + for k in ('timeout', 'retries') + if m_args[k] is not None + } + + # Verify that we receive a community when using snmp v2 + if m_args['version'] in ("v2", "v2c"): + if m_args['community'] is None: + module.fail_json(msg='Community not set when using snmp version 2') + + integrity_proto = None + privacy_proto = None + if m_args['version'] == "v3": + if m_args['username'] is None: + module.fail_json(msg='Username not set when using snmp version 3') + + if m_args['level'] == "authPriv" and m_args['privacy'] is None: + module.fail_json(msg='Privacy algorithm not set when using authPriv') + + if m_args['integrity'] == "sha": + integrity_proto = cmdgen.usmHMACSHAAuthProtocol + elif m_args['integrity'] == "md5": + integrity_proto = cmdgen.usmHMACMD5AuthProtocol + + if m_args['privacy'] == "aes": + privacy_proto = cmdgen.usmAesCfb128Protocol + elif m_args['privacy'] == "des": + privacy_proto = cmdgen.usmDESPrivProtocol + + # Use SNMP Version 2 + if m_args['version'] in ("v2", "v2c"): + snmp_auth = cmdgen.CommunityData(m_args['community']) + + # Use SNMP Version 3 with authNoPriv + elif m_args['level'] == "authNoPriv": + snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto) + + # Use SNMP Version 3 with authPriv + else: + snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto, + privProtocol=privacy_proto) + + # Use p to prefix OIDs with a dot for polling + p = DefineOid(dotprefix=True) + # Use v without a prefix to use with return values + v = DefineOid(dotprefix=False) + + def Tree(): + return defaultdict(Tree) + + results = Tree() + + errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd( + snmp_auth, + cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts), + cmdgen.MibVariable(p.sysDescr,), + cmdgen.MibVariable(p.sysObjectId,), + cmdgen.MibVariable(p.sysUpTime,), + cmdgen.MibVariable(p.sysContact,), + cmdgen.MibVariable(p.sysName,), + cmdgen.MibVariable(p.sysLocation,), + lookupMib=False + ) + + if errorIndication: + module.fail_json(msg=str(errorIndication)) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + if current_oid == v.sysDescr: + results['ansible_sysdescr'] = decode_hex(current_val) + elif current_oid == v.sysObjectId: + results['ansible_sysobjectid'] = current_val + elif current_oid == v.sysUpTime: + results['ansible_sysuptime'] = current_val + elif current_oid == v.sysContact: + results['ansible_syscontact'] = current_val + elif current_oid == v.sysName: + results['ansible_sysname'] = current_val + elif current_oid == v.sysLocation: + results['ansible_syslocation'] = current_val + + errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( + snmp_auth, + cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts), + cmdgen.MibVariable(p.ifIndex,), + cmdgen.MibVariable(p.ifDescr,), + cmdgen.MibVariable(p.ifMtu,), + cmdgen.MibVariable(p.ifSpeed,), + cmdgen.MibVariable(p.ifPhysAddress,), + cmdgen.MibVariable(p.ifAdminStatus,), + cmdgen.MibVariable(p.ifOperStatus,), + cmdgen.MibVariable(p.ipAdEntAddr,), + cmdgen.MibVariable(p.ipAdEntIfIndex,), + cmdgen.MibVariable(p.ipAdEntNetMask,), + + cmdgen.MibVariable(p.ifAlias,), + lookupMib=False + ) + + if errorIndication: + module.fail_json(msg=str(errorIndication)) + + interface_indexes = [] + + all_ipv4_addresses = [] + ipv4_networks = Tree() + + for varBinds in varTable: + for oid, val in varBinds: + if isinstance(val, EndOfMibView): + continue + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + if v.ifIndex in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['ifindex'] = current_val + interface_indexes.append(ifIndex) + if v.ifDescr in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['name'] = current_val + if v.ifMtu in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['mtu'] = current_val + if v.ifSpeed in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['speed'] = current_val + if v.ifPhysAddress in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val) + if v.ifAdminStatus in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val)) + if v.ifOperStatus in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val)) + if v.ipAdEntAddr in current_oid: + curIPList = current_oid.rsplit('.', 4)[-4:] + curIP = ".".join(curIPList) + ipv4_networks[curIP]['address'] = current_val + all_ipv4_addresses.append(current_val) + if v.ipAdEntIfIndex in current_oid: + curIPList = current_oid.rsplit('.', 4)[-4:] + curIP = ".".join(curIPList) + ipv4_networks[curIP]['interface'] = current_val + if v.ipAdEntNetMask in current_oid: + curIPList = current_oid.rsplit('.', 4)[-4:] + curIP = ".".join(curIPList) + ipv4_networks[curIP]['netmask'] = current_val + + if v.ifAlias in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['description'] = current_val + + interface_to_ipv4 = {} + for ipv4_network in ipv4_networks: + current_interface = ipv4_networks[ipv4_network]['interface'] + current_network = { + 'address': ipv4_networks[ipv4_network]['address'], + 'netmask': ipv4_networks[ipv4_network]['netmask'] + } + if current_interface not in interface_to_ipv4: + interface_to_ipv4[current_interface] = [] + interface_to_ipv4[current_interface].append(current_network) + else: + interface_to_ipv4[current_interface].append(current_network) + + for interface in interface_to_ipv4: + results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface] + + results['ansible_all_ipv4_addresses'] = all_ipv4_addresses + + module.exit_json(ansible_facts=results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/solaris_zone.py b/plugins/modules/solaris_zone.py deleted file mode 120000 index c51dbd44d5..0000000000 --- a/plugins/modules/solaris_zone.py +++ /dev/null @@ -1 +0,0 @@ -./system/solaris_zone.py \ No newline at end of file diff --git a/plugins/modules/solaris_zone.py b/plugins/modules/solaris_zone.py new file mode 100644 index 0000000000..8999b21393 --- /dev/null +++ b/plugins/modules/solaris_zone.py @@ -0,0 +1,482 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Paul Markham +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: solaris_zone +short_description: Manage Solaris zones +description: + - Create, start, stop and delete Solaris zones. + - This module does not currently allow changing of options for a zone that is already been created. +author: + - Paul Markham (@pmarkham) +requirements: + - Solaris 10 or 11 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - V(present), configure and install the zone. + - V(installed), synonym for V(present). + - V(running), if the zone already exists, boot it, otherwise, configure and install the zone first, then boot it. + - V(started), synonym for V(running). + - V(stopped), shutdown a zone. + - V(absent), destroy the zone. + - V(configured), configure the ready so that it is to be attached. + - V(attached), attach a zone, but do not boot it. + - V(detached), shutdown and detach a zone. + type: str + choices: [absent, attached, configured, detached, installed, present, running, started, stopped] + default: present + name: + description: + - Zone name. + - A zone name must be unique name. + - A zone name must begin with an alphanumeric character. + - The name can contain alphanumeric characters, underscores V(_), hyphens V(-), and periods V(.). + - The name cannot be longer than 64 characters. + type: str + required: true + path: + description: + - The path where the zone is created. This is required when the zone is created, but not used otherwise. + type: str + sparse: + description: + - Whether to create a sparse (V(true)) or whole root (V(false)) zone. + type: bool + default: false + root_password: + description: + - The password hash for the root account. If not specified, the zone's root account does not have a password. + type: str + config: + description: + - The C(zonecfg) configuration commands for this zone. See zonecfg(1M) for the valid options and syntax. Typically this + is a list of options separated by semi-colons or new lines, for example V(set auto-boot=true;add net;set physical=bge0;set + address=10.1.1.1;end). + type: str + default: '' + create_options: + description: + - Extra options to the zonecfg(1M) create command. + type: str + default: '' + install_options: + description: + - Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation, use this to specify the profile + XML file, for example O(install_options=-c sc_profile.xml). + type: str + default: '' + attach_options: + description: + - Extra options to the zoneadm attach command. For example, this can be used to specify whether a minimum or full update + of packages is required and if any packages need to be deleted. For valid values, see zoneadm(1M). + type: str + default: '' + timeout: + description: + - Timeout, in seconds, for zone to boot. + type: int + default: 600 +""" + +EXAMPLES = r""" +- name: Create and install a zone, but don't boot it + community.general.solaris_zone: + name: zone1 + state: present + path: /zones/zone1 + sparse: true + root_password: Be9oX7OSwWoU. + config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' + +- name: Create and install a zone and boot it + community.general.solaris_zone: + name: zone1 + state: running + path: /zones/zone1 + root_password: Be9oX7OSwWoU. + config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' + +- name: Boot an already installed zone + community.general.solaris_zone: + name: zone1 + state: running + +- name: Stop a zone + community.general.solaris_zone: + name: zone1 + state: stopped + +- name: Destroy a zone + community.general.solaris_zone: + name: zone1 + state: absent + +- name: Detach a zone + community.general.solaris_zone: + name: zone1 + state: detached + +- name: Configure a zone, ready to be attached + community.general.solaris_zone: + name: zone1 + state: configured + path: /zones/zone1 + root_password: Be9oX7OSwWoU. + config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' + +- name: Attach zone1 + community.general.solaris_zone: + name: zone1 + state: attached + attach_options: -u +""" + +import os +import platform +import re +import tempfile +import time + +from ansible.module_utils.basic import AnsibleModule + + +class Zone(object): + def __init__(self, module): + self.changed = False + self.msg = [] + + self.module = module + self.path = self.module.params['path'] + self.name = self.module.params['name'] + self.sparse = self.module.params['sparse'] + self.root_password = self.module.params['root_password'] + self.timeout = self.module.params['timeout'] + self.config = self.module.params['config'] + self.create_options = self.module.params['create_options'] + self.install_options = self.module.params['install_options'] + self.attach_options = self.module.params['attach_options'] + + self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True) + self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True) + self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True) + + if self.module.check_mode: + self.msg.append('Running in check mode') + + if platform.system() != 'SunOS': + self.module.fail_json(msg='This module requires Solaris') + + (self.os_major, self.os_minor) = platform.release().split('.') + if int(self.os_minor) < 10: + self.module.fail_json(msg='This module requires Solaris 10 or later') + + match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name) + if not match: + self.module.fail_json(msg="Provided zone name is not a valid zone name. " + "Please refer documentation for correct zone name specifications.") + + def configure(self): + if not self.path: + self.module.fail_json(msg='Missing required argument: path') + + if not self.module.check_mode: + t = tempfile.NamedTemporaryFile(delete=False, mode='wt') + + if self.sparse: + t.write('create %s\n' % self.create_options) + self.msg.append('creating sparse-root zone') + else: + t.write('create -b %s\n' % self.create_options) + self.msg.append('creating whole-root zone') + + t.write('set zonepath=%s\n' % self.path) + t.write('%s\n' % self.config) + t.close() + + cmd = [self.zonecfg_cmd, '-z', self.name, '-f', t.name] + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to create zone. %s' % (out + err)) + os.unlink(t.name) + + self.changed = True + self.msg.append('zone configured') + + def install(self): + if not self.module.check_mode: + cmd = [self.zoneadm_cmd, '-z', self.name, 'install', self.install_options] + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to install zone. %s' % (out + err)) + if int(self.os_minor) == 10: + self.configure_sysid() + self.configure_password() + self.configure_ssh_keys() + self.changed = True + self.msg.append('zone installed') + + def uninstall(self): + if self.is_installed(): + if not self.module.check_mode: + cmd = [self.zoneadm_cmd, '-z', self.name, 'uninstall', '-F'] + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone uninstalled') + + def configure_sysid(self): + if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path): + os.unlink('%s/root/etc/.UNCONFIGURED' % self.path) + + open('%s/root/noautoshutdown' % self.path, 'w').close() + + with open('%s/root/etc/nodename' % self.path, 'w') as node: + node.write(self.name) + + with open('%s/root/etc/.sysIDtool.state' % self.path, 'w') as id: + id.write('1 # System previously configured?\n') + id.write('1 # Bootparams succeeded?\n') + id.write('1 # System is on a network?\n') + id.write('1 # Extended network information gathered?\n') + id.write('0 # Autobinder succeeded?\n') + id.write('1 # Network has subnets?\n') + id.write('1 # root password prompted for?\n') + id.write('1 # locale and term prompted for?\n') + id.write('1 # security policy in place\n') + id.write('1 # NFSv4 domain configured\n') + id.write('0 # Auto Registration Configured\n') + id.write('vt100') + + def configure_ssh_keys(self): + rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path + dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path + + if not os.path.isfile(rsa_key_file): + cmd = [self.ssh_keygen_cmd, '-f', rsa_key_file, '-t', 'rsa', '-N', ''] + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err)) + + if not os.path.isfile(dsa_key_file): + cmd = [self.ssh_keygen_cmd, '-f', dsa_key_file, '-t', 'dsa', '-N', ''] + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err)) + + def configure_password(self): + shadow = '%s/root/etc/shadow' % self.path + if self.root_password: + with open(shadow, 'r') as f: + lines = f.readlines() + + for i in range(0, len(lines)): + fields = lines[i].split(':') + if fields[0] == 'root': + fields[1] = self.root_password + lines[i] = ':'.join(fields) + + with open(shadow, 'w') as f: + for line in lines: + f.write(line) + + def boot(self): + if not self.module.check_mode: + cmd = [self.zoneadm_cmd, '-z', self.name, 'boot'] + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to boot zone. %s' % (out + err)) + + """ + The boot command can return before the zone has fully booted. This is especially + true on the first boot when the zone initializes the SMF services. Unless the zone + has fully booted, subsequent tasks in the playbook may fail as services aren't running yet. + Wait until the zone's console login is running; once that's running, consider the zone booted. + """ + + elapsed = 0 + while True: + if elapsed > self.timeout: + self.module.fail_json(msg='timed out waiting for zone to boot') + rc = os.system('ps -z %s -o args|grep "ttymon.*-d /dev/console" > /dev/null 2>/dev/null' % self.name) + if rc == 0: + break + time.sleep(10) + elapsed += 10 + self.changed = True + self.msg.append('zone booted') + + def destroy(self): + if self.is_running(): + self.stop() + if self.is_installed(): + self.uninstall() + if not self.module.check_mode: + cmd = [self.zonecfg_cmd, '-z', self.name, 'delete', '-F'] + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to delete zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone deleted') + + def stop(self): + if not self.module.check_mode: + cmd = [self.zoneadm_cmd, '-z', self.name, 'halt'] + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to stop zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone stopped') + + def detach(self): + if not self.module.check_mode: + cmd = [self.zoneadm_cmd, '-z', self.name, 'detach'] + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to detach zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone detached') + + def attach(self): + if not self.module.check_mode: + cmd = [self.zoneadm_cmd, '-z', self.name, 'attach', self.attach_options] + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to attach zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone attached') + + def exists(self): + cmd = [self.zoneadm_cmd, '-z', self.name, 'list'] + (rc, out, err) = self.module.run_command(cmd) + if rc == 0: + return True + else: + return False + + def is_running(self): + return self.status() == 'running' + + def is_installed(self): + return self.status() == 'installed' + + def is_configured(self): + return self.status() == 'configured' + + def status(self): + cmd = [self.zoneadm_cmd, '-z', self.name, 'list', '-p'] + (rc, out, err) = self.module.run_command(cmd) + if rc == 0: + return out.split(':')[2] + else: + return 'undefined' + + def state_present(self): + if self.exists(): + self.msg.append('zone already exists') + else: + self.configure() + self.install() + + def state_running(self): + self.state_present() + if self.is_running(): + self.msg.append('zone already running') + else: + self.boot() + + def state_stopped(self): + if self.exists(): + self.stop() + else: + self.module.fail_json(msg='zone does not exist') + + def state_absent(self): + if self.exists(): + if self.is_running(): + self.stop() + self.destroy() + else: + self.msg.append('zone does not exist') + + def state_configured(self): + if self.exists(): + self.msg.append('zone already exists') + else: + self.configure() + + def state_detached(self): + if not self.exists(): + self.module.fail_json(msg='zone does not exist') + if self.is_configured(): + self.msg.append('zone already detached') + else: + self.stop() + self.detach() + + def state_attached(self): + if not self.exists(): + self.msg.append('zone does not exist') + if self.is_configured(): + self.attach() + else: + self.msg.append('zone already attached') + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', + choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']), + path=dict(type='str'), + sparse=dict(type='bool', default=False), + root_password=dict(type='str', no_log=True), + timeout=dict(type='int', default=600), + config=dict(type='str', default=''), + create_options=dict(type='str', default=''), + install_options=dict(type='str', default=''), + attach_options=dict(type='str', default=''), + ), + supports_check_mode=True, + ) + + zone = Zone(module) + + state = module.params['state'] + + if state == 'running' or state == 'started': + zone.state_running() + elif state == 'present' or state == 'installed': + zone.state_present() + elif state == 'stopped': + zone.state_stopped() + elif state == 'absent': + zone.state_absent() + elif state == 'configured': + zone.state_configured() + elif state == 'detached': + zone.state_detached() + elif state == 'attached': + zone.state_attached() + else: + module.fail_json(msg='Invalid state: %s' % state) + + module.exit_json(changed=zone.changed, msg=', '.join(zone.msg)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sorcery.py b/plugins/modules/sorcery.py deleted file mode 120000 index e582e652b5..0000000000 --- a/plugins/modules/sorcery.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/sorcery.py \ No newline at end of file diff --git a/plugins/modules/sorcery.py b/plugins/modules/sorcery.py new file mode 100644 index 0000000000..eabd459be7 --- /dev/null +++ b/plugins/modules/sorcery.py @@ -0,0 +1,758 @@ +#!/usr/bin/python + +# Copyright (c) 2015-2023, Vlad Glagolev +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: sorcery +short_description: Package manager for Source Mage GNU/Linux +description: + - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain. +author: "Vlad Glagolev (@vaygr)" +notes: + - When all three components are selected, the update goes by the sequence -- Sorcery -> Grimoire(s) -> Spell(s); you cannot + override it. + - Grimoire handling is supported since community.general 7.3.0. +requirements: + - bash +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the spell or grimoire. + - Multiple names can be given, separated by commas. + - Special value V(*) in conjunction with states V(latest) or V(rebuild) updates or rebuilds the whole system respectively. + - The alias O(grimoire) was added in community.general 7.3.0. + aliases: ["spell", "grimoire"] + type: list + elements: str + + repository: + description: + - Repository location. + - If specified, O(name) represents grimoire(s) instead of spell(s). + - Special value V(*) pulls grimoire from the official location. + - Only single item in O(name) in conjunction with V(*) can be used. + - O(state=absent) must be used with a special value V(*). + type: str + version_added: 7.3.0 + + state: + description: + - Whether to cast, dispel or rebuild a package. + - State V(cast) is an equivalent of V(present), not V(latest). + - State V(rebuild) implies cast of all specified spells, not only those existed before. + choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"] + default: "present" + type: str + + depends: + description: + - Comma-separated list of _optional_ dependencies to build a spell (or make sure it is built) with; use V(+)/V(-) in + front of dependency to turn it on/off (V(+) is optional though). + - This option is ignored if O(name) parameter is equal to V(*) or contains more than one spell. + - Providers must be supplied in the form recognized by Sorcery, for example 'V(openssl(SSL\))'. + type: str + + update: + description: + - Whether or not to update sorcery scripts at the very first stage. + type: bool + default: false + + update_cache: + description: + - Whether or not to update grimoire collection before casting spells. + type: bool + default: false + aliases: ["update_codex"] + + cache_valid_time: + description: + - Time in seconds to invalidate grimoire collection on update. + - Especially useful for SCM and rsync grimoires. + - Makes sense only in pair with O(update_cache). + type: int + default: 0 +""" + + +EXAMPLES = r""" +- name: Make sure spell foo is installed + community.general.sorcery: + spell: foo + state: present + +- name: Make sure spells foo, bar and baz are removed + community.general.sorcery: + spell: foo,bar,baz + state: absent + +- name: Make sure spell foo with dependencies bar and baz is installed + community.general.sorcery: + spell: foo + depends: bar,baz + state: present + +- name: Make sure spell foo with bar and without baz dependencies is installed + community.general.sorcery: + spell: foo + depends: +bar,-baz + state: present + +- name: Make sure spell foo with libressl (providing SSL) dependency is installed + community.general.sorcery: + spell: foo + depends: libressl(SSL) + state: present + +- name: Make sure spells with/without required dependencies (if any) are installed + community.general.sorcery: + name: "{{ item.spell }}" + depends: "{{ item.depends | default(None) }}" + state: present + loop: + - {spell: 'vifm', depends: '+file,-gtk+2'} + - {spell: 'fwknop', depends: 'gpgme'} + - {spell: 'pv,tnftp,tor'} + +- name: Install the latest version of spell foo using regular glossary + community.general.sorcery: + name: foo + state: latest + +- name: Rebuild spell foo + community.general.sorcery: + spell: foo + state: rebuild + +- name: Rebuild the whole system, but update Sorcery and Codex first + community.general.sorcery: + spell: '*' + state: rebuild + update: true + update_cache: true + +- name: Refresh the grimoire collection if it is 1 day old using native sorcerous alias + community.general.sorcery: + update_codex: true + cache_valid_time: 86400 + +- name: Make sure stable grimoire is present + community.general.sorcery: + name: stable + repository: '*' + state: present + +- name: Make sure binary and stable-rc grimoires are removed + community.general.sorcery: + grimoire: binary,stable-rc + repository: '*' + state: absent + +- name: Make sure games grimoire is pulled from rsync + community.general.sorcery: + grimoire: games + repository: "rsync://download.sourcemage.org::codex/games" + state: present + +- name: Make sure a specific branch of stable grimoire is pulled from git + community.general.sorcery: + grimoire: stable.git + repository: "git://download.sourcemage.org/smgl/grimoire.git:stable.git:stable-0.62" + state: present + +- name: Update only Sorcery itself + community.general.sorcery: + update: true +""" + + +RETURN = r""" +""" + + +import datetime +import fileinput +import os +import re +import shutil +import sys + +from ansible.module_utils.basic import AnsibleModule + + +# auto-filled at module init +SORCERY = { + 'sorcery': None, + 'scribe': None, + 'cast': None, + 'dispel': None, + 'gaze': None +} + +SORCERY_LOG_DIR = "/var/log/sorcery" +SORCERY_STATE_DIR = "/var/state/sorcery" + +NA = "N/A" + + +def get_sorcery_ver(module): + """ Get Sorcery version. """ + + cmd_sorcery = "%s --version" % SORCERY['sorcery'] + + rc, stdout, stderr = module.run_command(cmd_sorcery) + + if rc != 0 or not stdout: + module.fail_json(msg="unable to get Sorcery version") + + return stdout.strip() + + +def codex_fresh(codex, module): + """ Check if grimoire collection is fresh enough. """ + + if not module.params['cache_valid_time']: + return False + + timedelta = datetime.timedelta(seconds=module.params['cache_valid_time']) + + for grimoire in codex: + lastupdate_path = os.path.join(SORCERY_STATE_DIR, + grimoire + ".lastupdate") + + try: + mtime = os.stat(lastupdate_path).st_mtime + except Exception: + return False + + lastupdate_ts = datetime.datetime.fromtimestamp(mtime) + + # if any grimoire is not fresh, we invalidate the Codex + if lastupdate_ts + timedelta < datetime.datetime.now(): + return False + + return True + + +def codex_list(module, skip_new=False): + """ List valid grimoire collection. """ + + params = module.params + + codex = {} + + cmd_scribe = "%s index" % SORCERY['scribe'] + + rc, stdout, stderr = module.run_command(cmd_scribe) + + if rc != 0: + module.fail_json(msg="unable to list grimoire collection, fix your Codex") + + rex = re.compile(r"^\s*\[\d+\] : (?P[\w\-+.]+) : [\w\-+./]+(?: : (?P[\w\-+.]+))?\s*$") + + # drop 4-line header and empty trailing line + for line in stdout.splitlines()[4:-1]: + match = rex.match(line) + + if match: + codex[match.group('grim')] = match.group('ver') + + # return only specified grimoires unless requested to skip new + if params['repository'] and not skip_new: + codex = {x: codex.get(x, NA) for x in params['name']} + + if not codex: + module.fail_json(msg="no grimoires to operate on; add at least one") + + return codex + + +def update_sorcery(module): + """ Update sorcery scripts. + + This runs 'sorcery update' ('sorcery -u'). Check mode always returns a + positive change value. + + """ + + changed = False + + if module.check_mode: + return (True, "would have updated Sorcery") + else: + sorcery_ver = get_sorcery_ver(module) + + cmd_sorcery = "%s update" % SORCERY['sorcery'] + + rc, stdout, stderr = module.run_command(cmd_sorcery) + + if rc != 0: + module.fail_json(msg="unable to update Sorcery: " + stdout) + + if sorcery_ver != get_sorcery_ver(module): + changed = True + + return (changed, "successfully updated Sorcery") + + +def update_codex(module): + """ Update grimoire collections. + + This runs 'scribe update'. Check mode always returns a positive change + value when 'cache_valid_time' is used. + + """ + + params = module.params + + changed = False + + codex = codex_list(module) + fresh = codex_fresh(codex, module) + + if module.check_mode: + if not fresh: + changed = True + + return (changed, "would have updated Codex") + else: + if not fresh: + # SILENT is required as a workaround for query() in libgpg + module.run_command_environ_update.update(dict(SILENT='1')) + + cmd_scribe = "%s update" % SORCERY['scribe'] + + if params['repository']: + cmd_scribe += ' %s' % ' '.join(codex.keys()) + + rc, stdout, stderr = module.run_command(cmd_scribe) + + if rc != 0: + module.fail_json(msg="unable to update Codex: " + stdout) + + if codex != codex_list(module): + changed = True + + return (changed, "successfully updated Codex") + + +def match_depends(module): + """ Check for matching dependencies. + + This inspects spell's dependencies with the desired states and returns + 'False' if a recast is needed to match them. It also adds required lines + to the system-wide depends file for proper recast procedure. + + """ + + params = module.params + spells = params['name'] + + depends = {} + + depends_ok = True + + if len(spells) > 1 or not params['depends']: + return depends_ok + + spell = spells[0] + + if module.check_mode: + sorcery_depends_orig = os.path.join(SORCERY_STATE_DIR, "depends") + sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends.check") + + try: + shutil.copy2(sorcery_depends_orig, sorcery_depends) + except IOError: + module.fail_json(msg="failed to copy depends.check file") + else: + sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends") + + rex = re.compile(r"^(?P\+?|\-){1}(?P[a-z0-9]+[a-z0-9_\-\+\.]*(\([A-Z0-9_\-\+\.]+\))*)$") + + for d in params['depends'].split(','): + match = rex.match(d) + + if not match: + module.fail_json(msg="wrong depends line for spell '%s'" % spell) + + # normalize status + if not match.group('status') or match.group('status') == '+': + status = 'on' + else: + status = 'off' + + depends[match.group('depend')] = status + + # drop providers spec + depends_list = [s.split('(')[0] for s in depends] + + cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(depends_list)) + + rc, stdout, stderr = module.run_command(cmd_gaze) + + if rc != 0: + module.fail_json(msg="wrong dependencies for spell '%s'" % spell) + + fi = fileinput.input(sorcery_depends, inplace=True) + + try: + try: + for line in fi: + if line.startswith(spell + ':'): + match = None + + for d in depends: + # when local status is 'off' and dependency is provider, + # use only provider value + d_offset = d.find('(') + + if d_offset == -1: + d_p = '' + else: + d_p = re.escape(d[d_offset:]) + + # .escape() is needed mostly for the spells like 'libsigc++' + rex = re.compile("%s:(?:%s|%s):(?Pon|off):optional:" % + (re.escape(spell), re.escape(d), d_p)) + + match = rex.match(line) + + # we matched the line "spell:dependency:on|off:optional:" + if match: + # if we also matched the local status, mark dependency + # as empty and put it back into depends file + if match.group('lstatus') == depends[d]: + depends[d] = None + + sys.stdout.write(line) + + # status is not that we need, so keep this dependency + # in the list for further reverse switching; + # stop and process the next line in both cases + break + + if not match: + sys.stdout.write(line) + else: + sys.stdout.write(line) + except IOError: + module.fail_json(msg="I/O error on the depends file") + finally: + fi.close() + + depends_new = [v for v in depends if depends[v]] + + if depends_new: + try: + with open(sorcery_depends, 'a') as fl: + for k in depends_new: + fl.write("%s:%s:%s:optional::\n" % (spell, k, depends[k])) + except IOError: + module.fail_json(msg="I/O error on the depends file") + + depends_ok = False + + if module.check_mode: + try: + os.remove(sorcery_depends) + except IOError: + module.fail_json(msg="failed to clean up depends.backup file") + + return depends_ok + + +def manage_grimoires(module): + """ Add or remove grimoires. """ + + params = module.params + grimoires = params['name'] + url = params['repository'] + + codex = codex_list(module, True) + + if url == '*': + if params['state'] in ('present', 'latest', 'absent'): + if params['state'] == 'absent': + action = "remove" + todo = set(grimoires) & set(codex) + else: + action = "add" + todo = set(grimoires) - set(codex) + + if not todo: + return (False, "all grimoire(s) are already %sed" % action[:5]) + + if module.check_mode: + return (True, "would have %sed grimoire(s)" % action[:5]) + + cmd_scribe = "%s %s %s" % (SORCERY['scribe'], action, ' '.join(todo)) + + rc, stdout, stderr = module.run_command(cmd_scribe) + + if rc != 0: + module.fail_json(msg="failed to %s one or more grimoire(s): %s" % (action, stdout)) + + return (True, "successfully %sed one or more grimoire(s)" % action[:5]) + else: + module.fail_json(msg="unsupported operation on '*' repository value") + else: + if params['state'] in ('present', 'latest'): + if len(grimoires) > 1: + module.fail_json(msg="using multiple items with repository is invalid") + + grimoire = grimoires[0] + + if grimoire in codex: + return (False, "grimoire %s already exists" % grimoire) + + if module.check_mode: + return (True, "would have added grimoire %s from %s" % (grimoire, url)) + + cmd_scribe = "%s add %s from %s" % (SORCERY['scribe'], grimoire, url) + + rc, stdout, stderr = module.run_command(cmd_scribe) + + if rc != 0: + module.fail_json(msg="failed to add grimoire %s from %s: %s" % (grimoire, url, stdout)) + + return (True, "successfully added grimoire %s from %s" % (grimoire, url)) + else: + module.fail_json(msg="unsupported operation on repository value") + + +def manage_spells(module): + """ Cast or dispel spells. + + This manages the whole system ('*'), list or a single spell. Command 'cast' + is used to install or rebuild spells, while 'dispel' takes care of theirs + removal from the system. + + """ + + params = module.params + spells = params['name'] + + sorcery_queue = os.path.join(SORCERY_LOG_DIR, "queue/install") + + if spells == '*': + if params['state'] == 'latest': + # back up original queue + try: + os.rename(sorcery_queue, sorcery_queue + ".backup") + except IOError: + module.fail_json(msg="failed to backup the update queue") + + # see update_codex() + module.run_command_environ_update.update(dict(SILENT='1')) + + cmd_sorcery = "%s queue" % SORCERY['sorcery'] + + rc, stdout, stderr = module.run_command(cmd_sorcery) + + if rc != 0: + module.fail_json(msg="failed to generate the update queue") + + try: + queue_size = os.stat(sorcery_queue).st_size + except Exception: + module.fail_json(msg="failed to read the update queue") + + if queue_size != 0: + if module.check_mode: + try: + os.rename(sorcery_queue + ".backup", sorcery_queue) + except IOError: + module.fail_json(msg="failed to restore the update queue") + + return (True, "would have updated the system") + + cmd_cast = "%s --queue" % SORCERY['cast'] + + rc, stdout, stderr = module.run_command(cmd_cast) + + if rc != 0: + module.fail_json(msg="failed to update the system") + + return (True, "successfully updated the system") + else: + return (False, "the system is already up to date") + elif params['state'] == 'rebuild': + if module.check_mode: + return (True, "would have rebuilt the system") + + cmd_sorcery = "%s rebuild" % SORCERY['sorcery'] + + rc, stdout, stderr = module.run_command(cmd_sorcery) + + if rc != 0: + module.fail_json(msg="failed to rebuild the system: " + stdout) + + return (True, "successfully rebuilt the system") + else: + module.fail_json(msg="unsupported operation on '*' name value") + else: + if params['state'] in ('present', 'latest', 'rebuild', 'absent'): + # extract versions from the 'gaze' command + cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(spells)) + + rc, stdout, stderr = module.run_command(cmd_gaze) + + # fail if any of spells cannot be found + if rc != 0: + module.fail_json(msg="failed to locate spell(s) in the list (%s)" % + ', '.join(spells)) + + cast_queue = [] + dispel_queue = [] + + rex = re.compile(r"[^|]+\|[^|]+\|(?P[^|]+)\|(?P[^|]+)\|(?P[^$]+)") + + # drop 2-line header and empty trailing line + for line in stdout.splitlines()[2:-1]: + match = rex.match(line) + + cast = False + + if params['state'] == 'present': + # spell is not installed.. + if match.group('inst_ver') == '-': + # ..so set up depends reqs for it + match_depends(module) + + cast = True + # spell is installed.. + else: + # ..but does not conform depends reqs + if not match_depends(module): + cast = True + elif params['state'] == 'latest': + # grimoire and installed versions do not match.. + if match.group('grim_ver') != match.group('inst_ver'): + # ..so check for depends reqs first and set them up + match_depends(module) + + cast = True + # grimoire and installed versions match.. + else: + # ..but the spell does not conform depends reqs + if not match_depends(module): + cast = True + elif params['state'] == 'rebuild': + cast = True + # 'absent' + else: + if match.group('inst_ver') != '-': + dispel_queue.append(match.group('spell')) + + if cast: + cast_queue.append(match.group('spell')) + + if cast_queue: + if module.check_mode: + return (True, "would have cast spell(s)") + + cmd_cast = "%s -c %s" % (SORCERY['cast'], ' '.join(cast_queue)) + + rc, stdout, stderr = module.run_command(cmd_cast) + + if rc != 0: + module.fail_json(msg="failed to cast spell(s): " + stdout) + + return (True, "successfully cast spell(s)") + elif params['state'] != 'absent': + return (False, "spell(s) are already cast") + + if dispel_queue: + if module.check_mode: + return (True, "would have dispelled spell(s)") + + cmd_dispel = "%s %s" % (SORCERY['dispel'], ' '.join(dispel_queue)) + + rc, stdout, stderr = module.run_command(cmd_dispel) + + if rc != 0: + module.fail_json(msg="failed to dispel spell(s): " + stdout) + + return (True, "successfully dispelled spell(s)") + else: + return (False, "spell(s) are already dispelled") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['spell', 'grimoire'], type='list', elements='str'), + repository=dict(type='str'), + state=dict(default='present', choices=['present', 'latest', + 'absent', 'cast', 'dispelled', 'rebuild']), + depends=dict(), + update=dict(default=False, type='bool'), + update_cache=dict(default=False, aliases=['update_codex'], type='bool'), + cache_valid_time=dict(default=0, type='int') + ), + required_one_of=[['name', 'update', 'update_cache']], + supports_check_mode=True + ) + + if os.geteuid() != 0: + module.fail_json(msg="root privileges are required for this operation") + + for c in SORCERY: + SORCERY[c] = module.get_bin_path(c, True) + + # prepare environment: run sorcery commands without asking questions + module.run_command_environ_update = dict(PROMPT_DELAY='0', VOYEUR='0') + + params = module.params + + # normalize 'state' parameter + if params['state'] in ('present', 'cast'): + params['state'] = 'present' + elif params['state'] in ('absent', 'dispelled'): + params['state'] = 'absent' + + changed = { + 'sorcery': (False, NA), + 'grimoires': (False, NA), + 'codex': (False, NA), + 'spells': (False, NA) + } + + if params['update']: + changed['sorcery'] = update_sorcery(module) + + if params['name'] and params['repository']: + changed['grimoires'] = manage_grimoires(module) + + if params['update_cache']: + changed['codex'] = update_codex(module) + + if params['name'] and not params['repository']: + changed['spells'] = manage_spells(module) + + if any(x[0] for x in changed.values()): + state_msg = "state changed" + state_changed = True + else: + state_msg = "no change in state" + state_changed = False + + module.exit_json(changed=state_changed, msg=state_msg + ": " + '; '.join(x[1] for x in changed.values())) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/bitbucket/bitbucket_access_key.py b/plugins/modules/source_control/bitbucket/bitbucket_access_key.py deleted file mode 100644 index 6451d72909..0000000000 --- a/plugins/modules/source_control/bitbucket/bitbucket_access_key.py +++ /dev/null @@ -1,276 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: bitbucket_access_key -short_description: Manages Bitbucket repository access keys -description: - - Manages Bitbucket repository access keys (also called deploy keys). -author: - - Evgeniy Krysanov (@catcombo) -extends_documentation_fragment: - - community.general.bitbucket -options: - repository: - description: - - The repository name. - type: str - required: true - workspace: - description: - - The repository owner. - - Alias I(username) has been deprecated and will become an alias of I(user) in community.general 6.0.0. - type: str - required: true - aliases: [ username ] - key: - description: - - The SSH public key. - type: str - label: - description: - - The key label. - type: str - required: true - state: - description: - - Indicates desired state of the access key. - type: str - required: true - choices: [ absent, present ] -notes: - - Bitbucket OAuth consumer or App password should have permissions to read and administrate account repositories. - - Check mode is supported. -''' - -EXAMPLES = r''' -- name: Create access key - community.general.bitbucket_access_key: - repository: 'bitbucket-repo' - workspace: bitbucket_workspace - key: '{{lookup("file", "bitbucket.pub") }}' - label: 'Bitbucket' - state: present - -- name: Delete access key - community.general.bitbucket_access_key: - repository: bitbucket-repo - workspace: bitbucket_workspace - label: Bitbucket - state: absent -''' - -RETURN = r''' # ''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper - -error_messages = { - 'required_key': '`key` is required when the `state` is `present`', - 'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository', - 'invalid_workspace_or_repo': 'Invalid `repository` or `workspace`', - 'invalid_key': 'Invalid SSH key or key is already in use', -} - -BITBUCKET_API_ENDPOINTS = { - 'deploy-key-list': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL, - 'deploy-key-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL, -} - - -def get_existing_deploy_key(module, bitbucket): - """ - Search for an existing deploy key on Bitbucket - with the label specified in module param `label` - - :param module: instance of the :class:`AnsibleModule` - :param bitbucket: instance of the :class:`BitbucketHelper` - :return: existing deploy key or None if not found - :rtype: dict or None - - Return example:: - - { - "id": 123, - "label": "mykey", - "created_on": "2019-03-23T10:15:21.517377+00:00", - "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", - "type": "deploy_key", - "comment": "", - "last_used": None, - "repository": { - "links": { - "self": { - "href": "https://api.bitbucket.org/2.0/repositories/mleu/test" - }, - "html": { - "href": "https://bitbucket.org/mleu/test" - }, - "avatar": { - "href": "..." - } - }, - "type": "repository", - "name": "test", - "full_name": "mleu/test", - "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}" - }, - "links": { - "self": { - "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123" - } - }, - } - """ - content = { - 'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ) - } - - # Look through the all response pages in search of deploy key we need - while 'next' in content: - info, content = bitbucket.request( - api_url=content['next'], - method='GET', - ) - - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_workspace_or_repo']) - - if info['status'] == 403: - module.fail_json(msg=error_messages['required_permission']) - - if info['status'] != 200: - module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info)) - - res = next(iter(filter(lambda v: v['label'] == module.params['label'], content['values'])), None) - - if res is not None: - return res - - return None - - -def create_deploy_key(module, bitbucket): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ), - method='POST', - data={ - 'key': module.params['key'], - 'label': module.params['label'], - }, - ) - - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_workspace_or_repo']) - - if info['status'] == 403: - module.fail_json(msg=error_messages['required_permission']) - - if info['status'] == 400: - module.fail_json(msg=error_messages['invalid_key']) - - if info['status'] != 200: - module.fail_json(msg='Failed to create deploy key `{label}`: {info}'.format( - label=module.params['label'], - info=info, - )) - - -def delete_deploy_key(module, bitbucket, key_id): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - key_id=key_id, - ), - method='DELETE', - ) - - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_workspace_or_repo']) - - if info['status'] == 403: - module.fail_json(msg=error_messages['required_permission']) - - if info['status'] != 204: - module.fail_json(msg='Failed to delete deploy key `{label}`: {info}'.format( - label=module.params['label'], - info=info, - )) - - -def main(): - argument_spec = BitbucketHelper.bitbucket_argument_spec() - argument_spec.update( - repository=dict(type='str', required=True), - workspace=dict( - type='str', aliases=['username'], required=True, - deprecated_aliases=[dict(name='username', version='6.0.0', collection_name='community.general')], - ), - key=dict(type='str', no_log=False), - label=dict(type='str', required=True), - state=dict(type='str', choices=['present', 'absent'], required=True), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=BitbucketHelper.bitbucket_required_one_of(), - required_together=BitbucketHelper.bitbucket_required_together(), - ) - - bitbucket = BitbucketHelper(module) - - key = module.params['key'] - state = module.params['state'] - - # Check parameters - if (key is None) and (state == 'present'): - module.fail_json(msg=error_messages['required_key']) - - # Retrieve access token for authorized API requests - bitbucket.fetch_access_token() - - # Retrieve existing deploy key (if any) - existing_deploy_key = get_existing_deploy_key(module, bitbucket) - changed = False - - # Create new deploy key in case it doesn't exists - if not existing_deploy_key and (state == 'present'): - if not module.check_mode: - create_deploy_key(module, bitbucket) - changed = True - - # Update deploy key if the old value does not match the new one - elif existing_deploy_key and (state == 'present'): - if not key.startswith(existing_deploy_key.get('key')): - if not module.check_mode: - # Bitbucket doesn't support update key for the same label, - # so we need to delete the old one first - delete_deploy_key(module, bitbucket, existing_deploy_key['id']) - create_deploy_key(module, bitbucket) - changed = True - - # Delete deploy key - elif existing_deploy_key and (state == 'absent'): - if not module.check_mode: - delete_deploy_key(module, bitbucket, existing_deploy_key['id']) - changed = True - - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py b/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py deleted file mode 100644 index 5d42419dfa..0000000000 --- a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: bitbucket_pipeline_key_pair -short_description: Manages Bitbucket pipeline SSH key pair -description: - - Manages Bitbucket pipeline SSH key pair. -author: - - Evgeniy Krysanov (@catcombo) -extends_documentation_fragment: - - community.general.bitbucket -options: - repository: - description: - - The repository name. - type: str - required: true - workspace: - description: - - The repository owner. - - Alias I(username) has been deprecated and will become an alias of I(user) in community.general 6.0.0. - type: str - required: true - aliases: [ username ] - public_key: - description: - - The public key. - type: str - private_key: - description: - - The private key. - type: str - state: - description: - - Indicates desired state of the key pair. - type: str - required: true - choices: [ absent, present ] -notes: - - Check mode is supported. -''' - -EXAMPLES = r''' -- name: Create or update SSH key pair - community.general.bitbucket_pipeline_key_pair: - repository: 'bitbucket-repo' - workspace: bitbucket_workspace - public_key: '{{lookup("file", "bitbucket.pub") }}' - private_key: '{{lookup("file", "bitbucket") }}' - state: present - -- name: Remove SSH key pair - community.general.bitbucket_pipeline_key_pair: - repository: bitbucket-repo - workspace: bitbucket_workspace - state: absent -''' - -RETURN = r''' # ''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper - -error_messages = { - 'invalid_params': 'Account, repository or SSH key pair was not found', - 'required_keys': '`public_key` and `private_key` are required when the `state` is `present`', -} - -BITBUCKET_API_ENDPOINTS = { - 'ssh-key-pair': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL, -} - - -def get_existing_ssh_key_pair(module, bitbucket): - """ - Retrieves an existing ssh key pair from repository - specified in module param `repository` - - :param module: instance of the :class:`AnsibleModule` - :param bitbucket: instance of the :class:`BitbucketHelper` - :return: existing key pair or None if not found - :rtype: dict or None - - Return example:: - - { - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...2E8HAeT", - "type": "pipeline_ssh_key_pair" - } - """ - api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ) - - info, content = bitbucket.request( - api_url=api_url, - method='GET', - ) - - if info['status'] == 404: - # Account, repository or SSH key pair was not found. - return None - - return content - - -def update_ssh_key_pair(module, bitbucket): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ), - method='PUT', - data={ - 'private_key': module.params['private_key'], - 'public_key': module.params['public_key'], - }, - ) - - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_params']) - - if info['status'] != 200: - module.fail_json(msg='Failed to create or update pipeline ssh key pair : {0}'.format(info)) - - -def delete_ssh_key_pair(module, bitbucket): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ), - method='DELETE', - ) - - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_params']) - - if info['status'] != 204: - module.fail_json(msg='Failed to delete pipeline ssh key pair: {0}'.format(info)) - - -def main(): - argument_spec = BitbucketHelper.bitbucket_argument_spec() - argument_spec.update( - repository=dict(type='str', required=True), - workspace=dict( - type='str', aliases=['username'], required=True, - deprecated_aliases=[dict(name='username', version='6.0.0', collection_name='community.general')], - ), - public_key=dict(type='str'), - private_key=dict(type='str', no_log=True), - state=dict(type='str', choices=['present', 'absent'], required=True), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=BitbucketHelper.bitbucket_required_one_of(), - required_together=BitbucketHelper.bitbucket_required_together(), - ) - - bitbucket = BitbucketHelper(module) - - state = module.params['state'] - public_key = module.params['public_key'] - private_key = module.params['private_key'] - - # Check parameters - if ((public_key is None) or (private_key is None)) and (state == 'present'): - module.fail_json(msg=error_messages['required_keys']) - - # Retrieve access token for authorized API requests - bitbucket.fetch_access_token() - - # Retrieve existing ssh key - key_pair = get_existing_ssh_key_pair(module, bitbucket) - changed = False - - # Create or update key pair - if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'): - if not module.check_mode: - update_ssh_key_pair(module, bitbucket) - changed = True - - # Delete key pair - elif key_pair and (state == 'absent'): - if not module.check_mode: - delete_ssh_key_pair(module, bitbucket) - changed = True - - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py b/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py deleted file mode 100644 index 9f4f2b9498..0000000000 --- a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: bitbucket_pipeline_known_host -short_description: Manages Bitbucket pipeline known hosts -description: - - Manages Bitbucket pipeline known hosts under the "SSH Keys" menu. - - The host fingerprint will be retrieved automatically, but in case of an error, one can use I(key) field to specify it manually. -author: - - Evgeniy Krysanov (@catcombo) -extends_documentation_fragment: - - community.general.bitbucket -requirements: - - paramiko -options: - repository: - description: - - The repository name. - type: str - required: true - workspace: - description: - - The repository owner. - - Alias I(username) has been deprecated and will become an alias of I(user) in community.general 6.0.0. - type: str - required: true - aliases: [ username ] - name: - description: - - The FQDN of the known host. - type: str - required: true - key: - description: - - The public key. - type: str - state: - description: - - Indicates desired state of the record. - type: str - required: true - choices: [ absent, present ] -notes: - - Check mode is supported. -''' - -EXAMPLES = r''' -- name: Create known hosts from the list - community.general.bitbucket_pipeline_known_host: - repository: 'bitbucket-repo' - workspace: bitbucket_workspace - name: '{{ item }}' - state: present - with_items: - - bitbucket.org - - example.com - -- name: Remove known host - community.general.bitbucket_pipeline_known_host: - repository: bitbucket-repo - workspace: bitbucket_workspace - name: bitbucket.org - state: absent - -- name: Specify public key file - community.general.bitbucket_pipeline_known_host: - repository: bitbucket-repo - workspace: bitbucket_workspace - name: bitbucket.org - key: '{{lookup("file", "bitbucket.pub") }}' - state: absent -''' - -RETURN = r''' # ''' - -import socket - -try: - import paramiko - HAS_PARAMIKO = True -except ImportError: - HAS_PARAMIKO = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper - -error_messages = { - 'invalid_params': 'Account or repository was not found', - 'unknown_key_type': 'Public key type is unknown', -} - -BITBUCKET_API_ENDPOINTS = { - 'known-host-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/' % BitbucketHelper.BITBUCKET_API_URL, - 'known-host-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/{known_host_uuid}' % BitbucketHelper.BITBUCKET_API_URL, -} - - -def get_existing_known_host(module, bitbucket): - """ - Search for a host in Bitbucket pipelines known hosts - with the name specified in module param `name` - - :param module: instance of the :class:`AnsibleModule` - :param bitbucket: instance of the :class:`BitbucketHelper` - :return: existing host or None if not found - :rtype: dict or None - - Return example:: - - { - 'type': 'pipeline_known_host', - 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}' - 'hostname': 'bitbucket.org', - 'public_key': { - 'type': 'pipeline_ssh_public_key', - 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40', - 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A', - 'key_type': 'ssh-rsa', - 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw==' - }, - } - """ - content = { - 'next': BITBUCKET_API_ENDPOINTS['known-host-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ) - } - - # Look through all response pages in search of hostname we need - while 'next' in content: - info, content = bitbucket.request( - api_url=content['next'], - method='GET', - ) - - if info['status'] == 404: - module.fail_json(msg='Invalid `repository` or `workspace`.') - - if info['status'] != 200: - module.fail_json(msg='Failed to retrieve list of known hosts: {0}'.format(info)) - - host = next(filter(lambda v: v['hostname'] == module.params['name'], content['values']), None) - - if host is not None: - return host - - return None - - -def get_host_key(module, hostname): - """ - Fetches public key for specified host - - :param module: instance of the :class:`AnsibleModule` - :param hostname: host name - :return: key type and key content - :rtype: tuple - - Return example:: - - ( - 'ssh-rsa', - 'AAAAB3NzaC1yc2EAAAABIwAAA...SBne8+seeFVBoGqzHM9yXw==', - ) - """ - try: - sock = socket.socket() - sock.connect((hostname, 22)) - except socket.error: - module.fail_json(msg='Error opening socket to {0}'.format(hostname)) - - try: - trans = paramiko.transport.Transport(sock) - trans.start_client() - host_key = trans.get_remote_server_key() - except paramiko.SSHException: - module.fail_json(msg='SSH error on retrieving {0} server key'.format(hostname)) - - trans.close() - sock.close() - - key_type = host_key.get_name() - key = host_key.get_base64() - - return key_type, key - - -def create_known_host(module, bitbucket): - hostname = module.params['name'] - key_param = module.params['key'] - - if key_param is None: - key_type, key = get_host_key(module, hostname) - elif ' ' in key_param: - key_type, key = key_param.split(' ', 1) - else: - module.fail_json(msg=error_messages['unknown_key_type']) - - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['known-host-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ), - method='POST', - data={ - 'hostname': hostname, - 'public_key': { - 'key_type': key_type, - 'key': key, - } - }, - ) - - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_params']) - - if info['status'] != 201: - module.fail_json(msg='Failed to create known host `{hostname}`: {info}'.format( - hostname=module.params['hostname'], - info=info, - )) - - -def delete_known_host(module, bitbucket, known_host_uuid): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['known-host-detail'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - known_host_uuid=known_host_uuid, - ), - method='DELETE', - ) - - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_params']) - - if info['status'] != 204: - module.fail_json(msg='Failed to delete known host `{hostname}`: {info}'.format( - hostname=module.params['name'], - info=info, - )) - - -def main(): - argument_spec = BitbucketHelper.bitbucket_argument_spec() - argument_spec.update( - repository=dict(type='str', required=True), - workspace=dict( - type='str', aliases=['username'], required=True, - deprecated_aliases=[dict(name='username', version='6.0.0', collection_name='community.general')], - ), - name=dict(type='str', required=True), - key=dict(type='str', no_log=False), - state=dict(type='str', choices=['present', 'absent'], required=True), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=BitbucketHelper.bitbucket_required_one_of(), - required_together=BitbucketHelper.bitbucket_required_together(), - ) - - if (module.params['key'] is None) and (not HAS_PARAMIKO): - module.fail_json(msg='`paramiko` package not found, please install it.') - - bitbucket = BitbucketHelper(module) - - # Retrieve access token for authorized API requests - bitbucket.fetch_access_token() - - # Retrieve existing known host - existing_host = get_existing_known_host(module, bitbucket) - state = module.params['state'] - changed = False - - # Create new host in case it doesn't exists - if not existing_host and (state == 'present'): - if not module.check_mode: - create_known_host(module, bitbucket) - changed = True - - # Delete host - elif existing_host and (state == 'absent'): - if not module.check_mode: - delete_known_host(module, bitbucket, existing_host['uuid']) - changed = True - - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py b/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py deleted file mode 100644 index e5701184c3..0000000000 --- a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py +++ /dev/null @@ -1,273 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: bitbucket_pipeline_variable -short_description: Manages Bitbucket pipeline variables -description: - - Manages Bitbucket pipeline variables. -author: - - Evgeniy Krysanov (@catcombo) -extends_documentation_fragment: - - community.general.bitbucket -options: - repository: - description: - - The repository name. - type: str - required: true - workspace: - description: - - The repository owner. - - Alias I(username) has been deprecated and will become an alias of I(user) in community.general 6.0.0. - type: str - required: true - aliases: [ username ] - name: - description: - - The pipeline variable name. - type: str - required: true - value: - description: - - The pipeline variable value. - type: str - secured: - description: - - Whether to encrypt the variable value. - type: bool - default: no - state: - description: - - Indicates desired state of the variable. - type: str - required: true - choices: [ absent, present ] -notes: - - Check mode is supported. - - For secured values return parameter C(changed) is always C(True). -''' - -EXAMPLES = r''' -- name: Create or update pipeline variables from the list - community.general.bitbucket_pipeline_variable: - repository: 'bitbucket-repo' - workspace: bitbucket_workspace - name: '{{ item.name }}' - value: '{{ item.value }}' - secured: '{{ item.secured }}' - state: present - with_items: - - { name: AWS_ACCESS_KEY, value: ABCD1234, secured: False } - - { name: AWS_SECRET, value: qwe789poi123vbn0, secured: True } - -- name: Remove pipeline variable - community.general.bitbucket_pipeline_variable: - repository: bitbucket-repo - workspace: bitbucket_workspace - name: AWS_ACCESS_KEY - state: absent -''' - -RETURN = r''' # ''' - -from ansible.module_utils.basic import AnsibleModule, _load_params -from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper - -error_messages = { - 'required_value': '`value` is required when the `state` is `present`', -} - -BITBUCKET_API_ENDPOINTS = { - 'pipeline-variable-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/' % BitbucketHelper.BITBUCKET_API_URL, - 'pipeline-variable-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/{variable_uuid}' % BitbucketHelper.BITBUCKET_API_URL, -} - - -def get_existing_pipeline_variable(module, bitbucket): - """ - Search for a pipeline variable - - :param module: instance of the :class:`AnsibleModule` - :param bitbucket: instance of the :class:`BitbucketHelper` - :return: existing variable or None if not found - :rtype: dict or None - - Return example:: - - { - 'name': 'AWS_ACCESS_OBKEY_ID', - 'value': 'x7HU80-a2', - 'type': 'pipeline_variable', - 'secured': False, - 'uuid': '{9ddb0507-439a-495a-99f3-5464f15128127}' - } - - The `value` key in dict is absent in case of secured variable. - """ - variables_base_url = BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ) - # Look through the all response pages in search of variable we need - page = 1 - while True: - next_url = "%s?page=%s" % (variables_base_url, page) - info, content = bitbucket.request( - api_url=next_url, - method='GET', - ) - - if info['status'] == 404: - module.fail_json(msg='Invalid `repository` or `workspace`.') - - if info['status'] != 200: - module.fail_json(msg='Failed to retrieve the list of pipeline variables: {0}'.format(info)) - - # We are at the end of list - if 'pagelen' in content and content['pagelen'] == 0: - return None - - page += 1 - var = next(filter(lambda v: v['key'] == module.params['name'], content['values']), None) - - if var is not None: - var['name'] = var.pop('key') - return var - - -def create_pipeline_variable(module, bitbucket): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ), - method='POST', - data={ - 'key': module.params['name'], - 'value': module.params['value'], - 'secured': module.params['secured'], - }, - ) - - if info['status'] != 201: - module.fail_json(msg='Failed to create pipeline variable `{name}`: {info}'.format( - name=module.params['name'], - info=info, - )) - - -def update_pipeline_variable(module, bitbucket, variable_uuid): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - variable_uuid=variable_uuid, - ), - method='PUT', - data={ - 'value': module.params['value'], - 'secured': module.params['secured'], - }, - ) - - if info['status'] != 200: - module.fail_json(msg='Failed to update pipeline variable `{name}`: {info}'.format( - name=module.params['name'], - info=info, - )) - - -def delete_pipeline_variable(module, bitbucket, variable_uuid): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - variable_uuid=variable_uuid, - ), - method='DELETE', - ) - - if info['status'] != 204: - module.fail_json(msg='Failed to delete pipeline variable `{name}`: {info}'.format( - name=module.params['name'], - info=info, - )) - - -class BitBucketPipelineVariable(AnsibleModule): - def __init__(self, *args, **kwargs): - params = _load_params() or {} - if params.get('secured'): - kwargs['argument_spec']['value'].update({'no_log': True}) - super(BitBucketPipelineVariable, self).__init__(*args, **kwargs) - - -def main(): - argument_spec = BitbucketHelper.bitbucket_argument_spec() - argument_spec.update( - repository=dict(type='str', required=True), - workspace=dict( - type='str', aliases=['username'], required=True, - deprecated_aliases=[dict(name='username', version='6.0.0', collection_name='community.general')], - ), - name=dict(type='str', required=True), - value=dict(type='str'), - secured=dict(type='bool', default=False), - state=dict(type='str', choices=['present', 'absent'], required=True), - ) - module = BitBucketPipelineVariable( - argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=BitbucketHelper.bitbucket_required_one_of(), - required_together=BitbucketHelper.bitbucket_required_together(), - ) - - bitbucket = BitbucketHelper(module) - - value = module.params['value'] - state = module.params['state'] - secured = module.params['secured'] - - # Check parameters - if (value is None) and (state == 'present'): - module.fail_json(msg=error_messages['required_value']) - - # Retrieve access token for authorized API requests - bitbucket.fetch_access_token() - - # Retrieve existing pipeline variable (if any) - existing_variable = get_existing_pipeline_variable(module, bitbucket) - changed = False - - # Create new variable in case it doesn't exists - if not existing_variable and (state == 'present'): - if not module.check_mode: - create_pipeline_variable(module, bitbucket) - changed = True - - # Update variable if it is secured or the old value does not match the new one - elif existing_variable and (state == 'present'): - if (existing_variable['secured'] != secured) or (existing_variable.get('value') != value): - if not module.check_mode: - update_pipeline_variable(module, bitbucket, existing_variable['uuid']) - changed = True - - # Delete variable - elif existing_variable and (state == 'absent'): - if not module.check_mode: - delete_pipeline_variable(module, bitbucket, existing_variable['uuid']) - changed = True - - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/bzr.py b/plugins/modules/source_control/bzr.py deleted file mode 100644 index a4ce4bc075..0000000000 --- a/plugins/modules/source_control/bzr.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2013, André Paramés -# Based on the Git module by Michael DeHaan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: bzr -author: -- André Paramés (@andreparames) -short_description: Deploy software (or files) from bzr branches -description: - - Manage I(bzr) branches to deploy files or software. -options: - name: - description: - - SSH or HTTP protocol address of the parent branch. - aliases: [ parent ] - required: yes - type: str - dest: - description: - - Absolute path of where the branch should be cloned to. - required: yes - type: path - version: - description: - - What version of the branch to clone. This can be the - bzr revno or revid. - default: head - type: str - force: - description: - - If C(yes), any modified files in the working - tree will be discarded. Before 1.9 the default - value was C(yes). - type: bool - default: 'no' - executable: - description: - - Path to bzr executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. - type: str -''' - -EXAMPLES = ''' -- name: Checkout - community.general.bzr: - name: bzr+ssh://foosball.example.org/path/to/branch - dest: /srv/checkout - version: 22 -''' - -import os -import re - -from ansible.module_utils.basic import AnsibleModule - - -class Bzr(object): - def __init__(self, module, parent, dest, version, bzr_path): - self.module = module - self.parent = parent - self.dest = dest - self.version = version - self.bzr_path = bzr_path - - def _command(self, args_list, cwd=None, **kwargs): - (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs) - return (rc, out, err) - - def get_version(self): - '''samples the version of the bzr branch''' - - cmd = "%s revno" % self.bzr_path - rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) - revno = stdout.strip() - return revno - - def clone(self): - '''makes a new bzr branch if it does not already exist''' - dest_dirname = os.path.dirname(self.dest) - try: - os.makedirs(dest_dirname) - except Exception: - pass - if self.version.lower() != 'head': - args_list = ["branch", "-r", self.version, self.parent, self.dest] - else: - args_list = ["branch", self.parent, self.dest] - return self._command(args_list, check_rc=True, cwd=dest_dirname) - - def has_local_mods(self): - - cmd = "%s status -S" % self.bzr_path - rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) - lines = stdout.splitlines() - - lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) - return len(lines) > 0 - - def reset(self, force): - ''' - Resets the index and working tree to head. - Discards any changes to tracked files in the working - tree since that commit. - ''' - if not force and self.has_local_mods(): - self.module.fail_json(msg="Local modifications exist in branch (force=no).") - return self._command(["revert"], check_rc=True, cwd=self.dest) - - def fetch(self): - '''updates branch from remote sources''' - if self.version.lower() != 'head': - (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest) - else: - (rc, out, err) = self._command(["pull"], cwd=self.dest) - if rc != 0: - self.module.fail_json(msg="Failed to pull") - return (rc, out, err) - - def switch_version(self): - '''once pulled, switch to a particular revno or revid''' - if self.version.lower() != 'head': - args_list = ["revert", "-r", self.version] - else: - args_list = ["revert"] - return self._command(args_list, check_rc=True, cwd=self.dest) - - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec=dict( - dest=dict(type='path', required=True), - name=dict(type='str', required=True, aliases=['parent']), - version=dict(type='str', default='head'), - force=dict(type='bool', default=False), - executable=dict(type='str'), - ) - ) - - dest = module.params['dest'] - parent = module.params['name'] - version = module.params['version'] - force = module.params['force'] - bzr_path = module.params['executable'] or module.get_bin_path('bzr', True) - - bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf') - - rc, out, err = (0, None, None) - - bzr = Bzr(module, parent, dest, version, bzr_path) - - # if there is no bzr configuration, do a branch operation - # else pull and switch the version - before = None - local_mods = False - if not os.path.exists(bzrconfig): - (rc, out, err) = bzr.clone() - - else: - # else do a pull - local_mods = bzr.has_local_mods() - before = bzr.get_version() - (rc, out, err) = bzr.reset(force) - if rc != 0: - module.fail_json(msg=err) - (rc, out, err) = bzr.fetch() - if rc != 0: - module.fail_json(msg=err) - - # switch to version specified regardless of whether - # we cloned or pulled - (rc, out, err) = bzr.switch_version() - - # determine if we changed anything - after = bzr.get_version() - changed = False - - if before != after or local_mods: - changed = True - - module.exit_json(changed=changed, before=before, after=after) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/git_config.py b/plugins/modules/source_control/git_config.py deleted file mode 100644 index ab71370115..0000000000 --- a/plugins/modules/source_control/git_config.py +++ /dev/null @@ -1,289 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2015, Marius Gedminas -# (c) 2016, Matthew Gamble -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: git_config -author: - - Matthew Gamble (@djmattyg007) - - Marius Gedminas (@mgedmin) -requirements: ['git'] -short_description: Read and write git configuration -description: - - The C(git_config) module changes git configuration by invoking 'git config'. - This is needed if you don't want to use M(ansible.builtin.template) for the entire git - config file (e.g. because you need to change just C(user.email) in - /etc/.git/config). Solutions involving M(ansible.builtin.command) are cumbersome or - don't work correctly in check mode. -options: - list_all: - description: - - List all settings (optionally limited to a given I(scope)). - type: bool - default: 'no' - name: - description: - - The name of the setting. If no value is supplied, the value will - be read from the config if it has been set. - type: str - repo: - description: - - Path to a git repository for reading and writing values from a - specific repo. - type: path - file: - description: - - Path to an adhoc git configuration file to be managed using the C(file) scope. - type: path - version_added: 2.0.0 - scope: - description: - - Specify which scope to read/set values from. - - This is required when setting config values. - - If this is set to C(local), you must also specify the C(repo) parameter. - - If this is set to C(file), you must also specify the C(file) parameter. - - It defaults to system only when not using I(list_all)=C(yes). - choices: [ "file", "local", "global", "system" ] - type: str - state: - description: - - "Indicates the setting should be set/unset. - This parameter has higher precedence than I(value) parameter: - when I(state)=absent and I(value) is defined, I(value) is discarded." - choices: [ 'present', 'absent' ] - default: 'present' - type: str - value: - description: - - When specifying the name of a single setting, supply a value to - set that setting to the given value. - type: str -''' - -EXAMPLES = ''' -- name: Add a setting to ~/.gitconfig - community.general.git_config: - name: alias.ci - scope: global - value: commit - -- name: Add a setting to ~/.gitconfig - community.general.git_config: - name: alias.st - scope: global - value: status - -- name: Remove a setting from ~/.gitconfig - community.general.git_config: - name: alias.ci - scope: global - state: absent - -- name: Add a setting to ~/.gitconfig - community.general.git_config: - name: core.editor - scope: global - value: vim - -- name: Add a setting system-wide - community.general.git_config: - name: alias.remotev - scope: system - value: remote -v - -- name: Add a setting to a system scope (default) - community.general.git_config: - name: alias.diffc - value: diff --cached - -- name: Add a setting to a system scope (default) - community.general.git_config: - name: color.ui - value: auto - -- name: Make etckeeper not complaining when it is invoked by cron - community.general.git_config: - name: user.email - repo: /etc - scope: local - value: 'root@{{ ansible_fqdn }}' - -- name: Read individual values from git config - community.general.git_config: - name: alias.ci - scope: global - -- name: Scope system is also assumed when reading values, unless list_all=yes - community.general.git_config: - name: alias.diffc - -- name: Read all values from git config - community.general.git_config: - list_all: yes - scope: global - -- name: When list_all is yes and no scope is specified, you get configuration from all scopes - community.general.git_config: - list_all: yes - -- name: Specify a repository to include local settings - community.general.git_config: - list_all: yes - repo: /path/to/repo.git -''' - -RETURN = ''' ---- -config_value: - description: When list_all=no and value is not set, a string containing the value of the setting in name - returned: success - type: str - sample: "vim" - -config_values: - description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings - returned: success - type: dict - sample: - core.editor: "vim" - color.ui: "auto" - alias.diffc: "diff --cached" - alias.remotev: "remote -v" -''' -import os - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - argument_spec=dict( - list_all=dict(required=False, type='bool', default=False), - name=dict(type='str'), - repo=dict(type='path'), - file=dict(type='path'), - scope=dict(required=False, type='str', choices=['file', 'local', 'global', 'system']), - state=dict(required=False, type='str', default='present', choices=['present', 'absent']), - value=dict(required=False), - ), - mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']], - required_if=[ - ('scope', 'local', ['repo']), - ('scope', 'file', ['file']) - ], - required_one_of=[['list_all', 'name']], - supports_check_mode=True, - ) - git_path = module.get_bin_path('git', True) - - params = module.params - # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting. - # Set the locale to C to ensure consistent messages. - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') - - if params['name']: - name = params['name'] - else: - name = None - - if params['scope']: - scope = params['scope'] - elif params['list_all']: - scope = None - else: - scope = 'system' - - if params['state'] == 'absent': - unset = 'unset' - params['value'] = None - else: - unset = None - - if params['value']: - new_value = params['value'] - else: - new_value = None - - args = [git_path, "config", "--includes"] - if params['list_all']: - args.append('-l') - if scope == 'file': - args.append('-f') - args.append(params['file']) - elif scope: - args.append("--" + scope) - if name: - args.append(name) - - if scope == 'local': - dir = params['repo'] - elif params['list_all'] and params['repo']: - # Include local settings from a specific repo when listing all available settings - dir = params['repo'] - else: - # Run from root directory to avoid accidentally picking up any local config settings - dir = "/" - - (rc, out, err) = module.run_command(args, cwd=dir, expand_user_and_vars=False) - if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err: - # This just means nothing has been set at the given scope - module.exit_json(changed=False, msg='', config_values={}) - elif rc >= 2: - # If the return code is 1, it just means the option hasn't been set yet, which is fine. - module.fail_json(rc=rc, msg=err, cmd=' '.join(args)) - - if params['list_all']: - values = out.rstrip().splitlines() - config_values = {} - for value in values: - k, v = value.split('=', 1) - config_values[k] = v - module.exit_json(changed=False, msg='', config_values=config_values) - elif not new_value and not unset: - module.exit_json(changed=False, msg='', config_value=out.rstrip()) - elif unset and not out: - module.exit_json(changed=False, msg='no setting to unset') - else: - old_value = out.rstrip() - if old_value == new_value: - module.exit_json(changed=False, msg="") - - if not module.check_mode: - if unset: - args.insert(len(args) - 1, "--" + unset) - cmd = args - else: - cmd = args + [new_value] - try: # try using extra parameter from ansible-base 2.10.4 onwards - (rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False, expand_user_and_vars=False) - except TypeError: - # @TODO remove try/except when community.general drop support for 2.10.x - if not os.path.isdir(dir): - module.fail_json(msg="Cannot find directory '{0}'".format(dir)) - (rc, out, err) = module.run_command(cmd, cwd=dir, expand_user_and_vars=False) - if err: - module.fail_json(rc=rc, msg=err, cmd=cmd) - - module.exit_json( - msg='setting changed', - diff=dict( - before_header=' '.join(args), - before=old_value + "\n", - after_header=' '.join(args), - after=(new_value or '') + "\n" - ), - changed=True - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/github/github_deploy_key.py b/plugins/modules/source_control/github/github_deploy_key.py deleted file mode 100644 index 7a67a12334..0000000000 --- a/plugins/modules/source_control/github/github_deploy_key.py +++ /dev/null @@ -1,342 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: github_deploy_key -author: "Ali (@bincyber)" -short_description: Manages deploy keys for GitHub repositories. -description: - - "Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password, - username and password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin - rights on the repository are required." -options: - github_url: - description: - - The base URL of the GitHub API - required: false - type: str - version_added: '0.2.0' - default: https://api.github.com - owner: - description: - - The name of the individual account or organization that owns the GitHub repository. - required: true - aliases: [ 'account', 'organization' ] - type: str - repo: - description: - - The name of the GitHub repository. - required: true - aliases: [ 'repository' ] - type: str - name: - description: - - The name for the deploy key. - required: true - aliases: [ 'title', 'label' ] - type: str - key: - description: - - The SSH public key to add to the repository as a deploy key. - required: true - type: str - read_only: - description: - - If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write. - type: bool - default: 'yes' - state: - description: - - The state of the deploy key. - default: "present" - choices: [ "present", "absent" ] - type: str - force: - description: - - If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title. - type: bool - default: 'no' - username: - description: - - The username to authenticate with. Should not be set when using personal access token - type: str - password: - description: - - The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination. - type: str - token: - description: - - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password). - type: str - otp: - description: - - The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password). - - Alias C(2fa_token) has been deprecated and will be removed in community.general 5.0.0. - aliases: ['2fa_token'] - type: int -notes: - - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/." -''' - -EXAMPLES = ''' -- name: Add a new read-only deploy key to a GitHub repository using basic authentication - community.general.github_deploy_key: - owner: "johndoe" - repo: "example" - name: "new-deploy-key" - key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." - read_only: yes - username: "johndoe" - password: "supersecretpassword" - -- name: Remove an existing deploy key from a GitHub repository - community.general.github_deploy_key: - owner: "johndoe" - repository: "example" - name: "new-deploy-key" - key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." - force: yes - username: "johndoe" - password: "supersecretpassword" - state: absent - -- name: Add a new deploy key to a GitHub repository, replace an existing key, use an OAuth2 token to authenticate - community.general.github_deploy_key: - owner: "johndoe" - repository: "example" - name: "new-deploy-key" - key: "{{ lookup('file', '~/.ssh/github.pub') }}" - force: yes - token: "ABAQDAwXxn7kIMNWzcDfo..." - -- name: Re-add a deploy key to a GitHub repository but with a different name - community.general.github_deploy_key: - owner: "johndoe" - repository: "example" - name: "replace-deploy-key" - key: "{{ lookup('file', '~/.ssh/github.pub') }}" - username: "johndoe" - password: "supersecretpassword" - -- name: Add a new deploy key to a GitHub repository using 2FA - community.general.github_deploy_key: - owner: "johndoe" - repo: "example" - name: "new-deploy-key-2" - key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." - username: "johndoe" - password: "supersecretpassword" - otp: 123456 - -- name: Add a read-only deploy key to a repository hosted on GitHub Enterprise - community.general.github_deploy_key: - github_url: "https://api.example.com" - owner: "janedoe" - repo: "example" - name: "new-deploy-key" - key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." - read_only: yes - username: "janedoe" - password: "supersecretpassword" -''' - -RETURN = ''' -msg: - description: the status message describing what occurred - returned: always - type: str - sample: "Deploy key added successfully" - -http_status_code: - description: the HTTP status code returned by the GitHub API - returned: failed - type: int - sample: 400 - -error: - description: the error message returned by the GitHub API - returned: failed - type: str - sample: "key is already in use" - -id: - description: the key identifier assigned by GitHub for the deploy key - returned: changed - type: int - sample: 24381901 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from re import findall - - -class GithubDeployKey(object): - def __init__(self, module): - self.module = module - - self.github_url = self.module.params['github_url'] - self.name = module.params['name'] - self.key = module.params['key'] - self.state = module.params['state'] - self.read_only = module.params.get('read_only', True) - self.force = module.params.get('force', False) - self.username = module.params.get('username', None) - self.password = module.params.get('password', None) - self.token = module.params.get('token', None) - self.otp = module.params.get('otp', None) - - @property - def url(self): - owner = self.module.params['owner'] - repo = self.module.params['repo'] - return "{0}/repos/{1}/{2}/keys".format(self.github_url, owner, repo) - - @property - def headers(self): - if self.username is not None and self.password is not None: - self.module.params['url_username'] = self.username - self.module.params['url_password'] = self.password - self.module.params['force_basic_auth'] = True - if self.otp is not None: - return {"X-GitHub-OTP": self.otp} - elif self.token is not None: - return {"Authorization": "token {0}".format(self.token)} - else: - return None - - def paginate(self, url): - while url: - resp, info = fetch_url(self.module, url, headers=self.headers, method="GET") - - if info["status"] == 200: - yield self.module.from_json(resp.read()) - - links = {} - for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info["link"]): - links[y] = x - - url = links.get('next') - else: - self.handle_error(method="GET", info=info) - - def get_existing_key(self): - for keys in self.paginate(self.url): - if keys: - for i in keys: - existing_key_id = str(i["id"]) - if i["key"].split() == self.key.split()[:2]: - return existing_key_id - elif i['title'] == self.name and self.force: - return existing_key_id - else: - return None - - def add_new_key(self): - request_body = {"title": self.name, "key": self.key, "read_only": self.read_only} - - resp, info = fetch_url(self.module, self.url, data=self.module.jsonify(request_body), headers=self.headers, method="POST", timeout=30) - - status_code = info["status"] - - if status_code == 201: - response_body = self.module.from_json(resp.read()) - key_id = response_body["id"] - self.module.exit_json(changed=True, msg="Deploy key successfully added", id=key_id) - elif status_code == 422: - self.module.exit_json(changed=False, msg="Deploy key already exists") - else: - self.handle_error(method="POST", info=info) - - def remove_existing_key(self, key_id): - resp, info = fetch_url(self.module, "{0}/{1}".format(self.url, key_id), headers=self.headers, method="DELETE") - - status_code = info["status"] - - if status_code == 204: - if self.state == 'absent': - self.module.exit_json(changed=True, msg="Deploy key successfully deleted", id=key_id) - else: - self.handle_error(method="DELETE", info=info, key_id=key_id) - - def handle_error(self, method, info, key_id=None): - status_code = info['status'] - body = info.get('body') - if body: - err = self.module.from_json(body)['message'] - - if status_code == 401: - self.module.fail_json(msg="Failed to connect to {0} due to invalid credentials".format(self.github_url), http_status_code=status_code, error=err) - elif status_code == 404: - self.module.fail_json(msg="GitHub repository does not exist", http_status_code=status_code, error=err) - else: - if method == "GET": - self.module.fail_json(msg="Failed to retrieve existing deploy keys", http_status_code=status_code, error=err) - elif method == "POST": - self.module.fail_json(msg="Failed to add deploy key", http_status_code=status_code, error=err) - elif method == "DELETE": - self.module.fail_json(msg="Failed to delete existing deploy key", id=key_id, http_status_code=status_code, error=err) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - github_url=dict(required=False, type='str', default="https://api.github.com"), - owner=dict(required=True, type='str', aliases=['account', 'organization']), - repo=dict(required=True, type='str', aliases=['repository']), - name=dict(required=True, type='str', aliases=['title', 'label']), - key=dict(required=True, type='str', no_log=False), - read_only=dict(required=False, type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), - force=dict(required=False, type='bool', default=False), - username=dict(required=False, type='str'), - password=dict(required=False, type='str', no_log=True), - otp=dict( - required=False, type='int', aliases=['2fa_token'], no_log=True, - deprecated_aliases=[dict(name='2fa_token', version='5.0.0', collection_name='community.general')]), - token=dict(required=False, type='str', no_log=True) - ), - mutually_exclusive=[ - ['password', 'token'] - ], - required_together=[ - ['username', 'password'], - ['otp', 'username', 'password'] - ], - required_one_of=[ - ['username', 'token'] - ], - supports_check_mode=True, - ) - - deploy_key = GithubDeployKey(module) - - if module.check_mode: - key_id = deploy_key.get_existing_key() - if deploy_key.state == "present" and key_id is None: - module.exit_json(changed=True) - elif deploy_key.state == "present" and key_id is not None: - module.exit_json(changed=False) - - # to forcefully modify an existing key, the existing key must be deleted first - if deploy_key.state == 'absent' or deploy_key.force: - key_id = deploy_key.get_existing_key() - - if key_id is not None: - deploy_key.remove_existing_key(key_id) - elif deploy_key.state == 'absent': - module.exit_json(changed=False, msg="Deploy key does not exist") - - if deploy_key.state == "present": - deploy_key.add_new_key() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/github/github_issue.py b/plugins/modules/source_control/github/github_issue.py deleted file mode 100644 index 4add29f341..0000000000 --- a/plugins/modules/source_control/github/github_issue.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017-18, Abhijeet Kasurde -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: github_issue -short_description: View GitHub issue. -description: - - View GitHub issue for a given repository and organization. -options: - repo: - description: - - Name of repository from which issue needs to be retrieved. - required: true - type: str - organization: - description: - - Name of the GitHub organization in which the repository is hosted. - required: true - type: str - issue: - description: - - Issue number for which information is required. - required: true - type: int - action: - description: - - Get various details about issue depending upon action specified. - default: 'get_status' - choices: - - 'get_status' - type: str -author: - - Abhijeet Kasurde (@Akasurde) -''' - -RETURN = ''' -issue_status: - description: State of the GitHub issue - type: str - returned: success - sample: open, closed -''' - -EXAMPLES = ''' -- name: Check if GitHub issue is closed or not - community.general.github_issue: - organization: ansible - repo: ansible - issue: 23642 - action: get_status - register: r - -- name: Take action depending upon issue status - ansible.builtin.debug: - msg: Do something when issue 23642 is open - when: r.issue_status == 'open' -''' - -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def main(): - module = AnsibleModule( - argument_spec=dict( - organization=dict(required=True), - repo=dict(required=True), - issue=dict(type='int', required=True), - action=dict(choices=['get_status'], default='get_status'), - ), - supports_check_mode=True, - ) - - organization = module.params['organization'] - repo = module.params['repo'] - issue = module.params['issue'] - action = module.params['action'] - - result = dict() - - headers = { - 'Content-Type': 'application/json', - 'Accept': 'application/vnd.github.v3+json', - } - - url = "https://api.github.com/repos/%s/%s/issues/%s" % (organization, repo, issue) - - response, info = fetch_url(module, url, headers=headers) - if not (200 <= info['status'] < 400): - if info['status'] == 404: - module.fail_json(msg="Failed to find issue %s" % issue) - module.fail_json(msg="Failed to send request to %s: %s" % (url, info['msg'])) - - gh_obj = json.loads(response.read()) - - if action == 'get_status' or action is None: - if module.check_mode: - result.update(changed=True) - else: - result.update(changed=True, issue_status=gh_obj['state']) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/github/github_key.py b/plugins/modules/source_control/github/github_key.py deleted file mode 100644 index 2afbe29aa1..0000000000 --- a/plugins/modules/source_control/github/github_key.py +++ /dev/null @@ -1,242 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: github_key -short_description: Manage GitHub access keys. -description: - - Creates, removes, or updates GitHub access keys. -options: - token: - description: - - GitHub Access Token with permission to list and create public keys. - required: true - type: str - name: - description: - - SSH key name - required: true - type: str - pubkey: - description: - - SSH public key value. Required when C(state=present). - type: str - state: - description: - - Whether to remove a key, ensure that it exists, or update its value. - choices: ['present', 'absent'] - default: 'present' - type: str - force: - description: - - The default is C(yes), which will replace the existing remote key - if it's different than C(pubkey). If C(no), the key will only be - set if no key with the given C(name) exists. - type: bool - default: 'yes' - -author: Robert Estelle (@erydo) -''' - -RETURN = ''' -deleted_keys: - description: An array of key objects that were deleted. Only present on state=absent - type: list - returned: When state=absent - sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}] -matching_keys: - description: An array of keys matching the specified name. Only present on state=present - type: list - returned: When state=present - sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}] -key: - description: Metadata about the key just created. Only present on state=present - type: dict - returned: success - sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False} -''' - -EXAMPLES = ''' -- name: Read SSH public key to authorize - ansible.builtin.shell: cat /home/foo/.ssh/id_rsa.pub - register: ssh_pub_key - -- name: Authorize key with GitHub - local_action: - module: github_key - name: Access Key for Some Machine - token: '{{ github_access_token }}' - pubkey: '{{ ssh_pub_key.stdout }}' -''' - - -import json -import re - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -API_BASE = 'https://api.github.com' - - -class GitHubResponse(object): - def __init__(self, response, info): - self.content = response.read() - self.info = info - - def json(self): - return json.loads(self.content) - - def links(self): - links = {} - if 'link' in self.info: - link_header = self.info['link'] - matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header) - for url, rel in matches: - links[rel] = url - return links - - -class GitHubSession(object): - def __init__(self, module, token): - self.module = module - self.token = token - - def request(self, method, url, data=None): - headers = { - 'Authorization': 'token %s' % self.token, - 'Content-Type': 'application/json', - 'Accept': 'application/vnd.github.v3+json', - } - response, info = fetch_url( - self.module, url, method=method, data=data, headers=headers) - if not (200 <= info['status'] < 400): - self.module.fail_json( - msg=(" failed to send request %s to %s: %s" - % (method, url, info['msg']))) - return GitHubResponse(response, info) - - -def get_all_keys(session): - url = API_BASE + '/user/keys' - result = [] - while url: - r = session.request('GET', url) - result.extend(r.json()) - url = r.links().get('next') - return result - - -def create_key(session, name, pubkey, check_mode): - if check_mode: - from datetime import datetime - now = datetime.utcnow() - return { - 'id': 0, - 'key': pubkey, - 'title': name, - 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY', - 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'), - 'read_only': False, - 'verified': False - } - else: - return session.request( - 'POST', - API_BASE + '/user/keys', - data=json.dumps({'title': name, 'key': pubkey})).json() - - -def delete_keys(session, to_delete, check_mode): - if check_mode: - return - - for key in to_delete: - session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"]) - - -def ensure_key_absent(session, name, check_mode): - to_delete = [key for key in get_all_keys(session) if key['title'] == name] - delete_keys(session, to_delete, check_mode=check_mode) - - return {'changed': bool(to_delete), - 'deleted_keys': to_delete} - - -def ensure_key_present(module, session, name, pubkey, force, check_mode): - all_keys = get_all_keys(session) - matching_keys = [k for k in all_keys if k['title'] == name] - deleted_keys = [] - - new_signature = pubkey.split(' ')[1] - for key in all_keys: - existing_signature = key['key'].split(' ')[1] - if new_signature == existing_signature and key['title'] != name: - module.fail_json(msg=( - "another key with the same content is already registered " - "under the name |{0}|").format(key['title'])) - - if matching_keys and force and matching_keys[0]['key'].split(' ')[1] != new_signature: - delete_keys(session, matching_keys, check_mode=check_mode) - (deleted_keys, matching_keys) = (matching_keys, []) - - if not matching_keys: - key = create_key(session, name, pubkey, check_mode=check_mode) - else: - key = matching_keys[0] - - return { - 'changed': bool(deleted_keys or not matching_keys), - 'deleted_keys': deleted_keys, - 'matching_keys': matching_keys, - 'key': key - } - - -def main(): - argument_spec = { - 'token': {'required': True, 'no_log': True}, - 'name': {'required': True}, - 'pubkey': {}, - 'state': {'choices': ['present', 'absent'], 'default': 'present'}, - 'force': {'default': True, 'type': 'bool'}, - } - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - token = module.params['token'] - name = module.params['name'] - state = module.params['state'] - force = module.params['force'] - pubkey = module.params.get('pubkey') - - if pubkey: - pubkey_parts = pubkey.split(' ') - # Keys consist of a protocol, the key data, and an optional comment. - if len(pubkey_parts) < 2: - module.fail_json(msg='"pubkey" parameter has an invalid format') - elif state == 'present': - module.fail_json(msg='"pubkey" is required when state=present') - - session = GitHubSession(module, token) - if state == 'present': - result = ensure_key_present(module, session, name, pubkey, force=force, - check_mode=module.check_mode) - elif state == 'absent': - result = ensure_key_absent(session, name, check_mode=module.check_mode) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/github/github_release.py b/plugins/modules/source_control/github/github_release.py deleted file mode 100644 index 654dce5f98..0000000000 --- a/plugins/modules/source_control/github/github_release.py +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Team -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: github_release -short_description: Interact with GitHub Releases -description: - - Fetch metadata about GitHub Releases -options: - token: - description: - - GitHub Personal Access Token for authenticating. Mutually exclusive with C(password). - type: str - user: - description: - - The GitHub account that owns the repository - type: str - required: true - password: - description: - - The GitHub account password for the user. Mutually exclusive with C(token). - type: str - repo: - description: - - Repository name - type: str - required: true - action: - description: - - Action to perform - type: str - required: true - choices: [ 'latest_release', 'create_release' ] - tag: - description: - - Tag name when creating a release. Required when using action is set to C(create_release). - type: str - target: - description: - - Target of release when creating a release - type: str - name: - description: - - Name of release when creating a release - type: str - body: - description: - - Description of the release when creating a release - type: str - draft: - description: - - Sets if the release is a draft or not. (boolean) - type: 'bool' - default: 'no' - prerelease: - description: - - Sets if the release is a prerelease or not. (boolean) - type: bool - default: 'no' - -author: - - "Adrian Moisey (@adrianmoisey)" -requirements: - - "github3.py >= 1.0.0a3" -''' - -EXAMPLES = ''' -- name: Get latest release of a public repository - community.general.github_release: - user: ansible - repo: ansible - action: latest_release - -- name: Get latest release of testuseer/testrepo - community.general.github_release: - token: tokenabc1234567890 - user: testuser - repo: testrepo - action: latest_release - -- name: Get latest release of test repo using username and password. Ansible 2.4. - community.general.github_release: - user: testuser - password: secret123 - repo: testrepo - action: latest_release - -- name: Create a new release - community.general.github_release: - token: tokenabc1234567890 - user: testuser - repo: testrepo - action: create_release - tag: test - target: master - name: My Release - body: Some description - -''' - -RETURN = ''' -create_release: - description: - - Version of the created release - - "For Ansible version 2.5 and later, if specified release version already exists, then State is unchanged" - - "For Ansible versions prior to 2.5, if specified release version already exists, then State is skipped" - type: str - returned: success - sample: 1.1.0 - -latest_release: - description: Version of the latest release - type: str - returned: success - sample: 1.1.0 -''' - -import traceback - -GITHUB_IMP_ERR = None -try: - import github3 - - HAS_GITHUB_API = True -except ImportError: - GITHUB_IMP_ERR = traceback.format_exc() - HAS_GITHUB_API = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def main(): - module = AnsibleModule( - argument_spec=dict( - repo=dict(required=True), - user=dict(required=True), - password=dict(no_log=True), - token=dict(no_log=True), - action=dict( - required=True, choices=['latest_release', 'create_release']), - tag=dict(type='str'), - target=dict(type='str'), - name=dict(type='str'), - body=dict(type='str'), - draft=dict(type='bool', default=False), - prerelease=dict(type='bool', default=False), - ), - supports_check_mode=True, - mutually_exclusive=(('password', 'token'),), - required_if=[('action', 'create_release', ['tag']), - ('action', 'create_release', ['password', 'token'], True)], - ) - - if not HAS_GITHUB_API: - module.fail_json(msg=missing_required_lib('github3.py >= 1.0.0a3'), - exception=GITHUB_IMP_ERR) - - repo = module.params['repo'] - user = module.params['user'] - password = module.params['password'] - login_token = module.params['token'] - action = module.params['action'] - tag = module.params.get('tag') - target = module.params.get('target') - name = module.params.get('name') - body = module.params.get('body') - draft = module.params.get('draft') - prerelease = module.params.get('prerelease') - - # login to github - try: - if password: - gh_obj = github3.login(user, password=password) - elif login_token: - gh_obj = github3.login(token=login_token) - else: - gh_obj = github3.GitHub() - - # test if we're actually logged in - if password or login_token: - gh_obj.me() - except github3.exceptions.AuthenticationFailed as e: - module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e), - details="Please check username and password or token " - "for repository %s" % repo) - - repository = gh_obj.repository(user, repo) - - if not repository: - module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo)) - - if action == 'latest_release': - release = repository.latest_release() - if release: - module.exit_json(tag=release.tag_name) - else: - module.exit_json(tag=None) - - if action == 'create_release': - release_exists = repository.release_from_tag(tag) - if release_exists: - module.exit_json(changed=False, msg="Release for tag %s already exists." % tag) - - release = repository.create_release( - tag, target, name, body, draft, prerelease) - if release: - module.exit_json(changed=True, tag=release.tag_name) - else: - module.exit_json(changed=False, tag=None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/github/github_repo.py b/plugins/modules/source_control/github/github_repo.py deleted file mode 100644 index b5403c6a8d..0000000000 --- a/plugins/modules/source_control/github/github_repo.py +++ /dev/null @@ -1,253 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Álvaro Torres Cogollo -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: github_repo -short_description: Manage your repositories on Github -version_added: 2.2.0 -description: -- Manages Github repositories using PyGithub library. -- Authentication can be done with I(access_token) or with I(username) and I(password). -options: - username: - description: - - Username used for authentication. - - This is only needed when not using I(access_token). - type: str - required: false - password: - description: - - Password used for authentication. - - This is only needed when not using I(access_token). - type: str - required: false - access_token: - description: - - Token parameter for authentication. - - This is only needed when not using I(username) and I(password). - type: str - required: false - name: - description: - - Repository name. - type: str - required: true - description: - description: - - Description for the repository. - - This is only used when I(state) is C(present). - type: str - default: '' - required: false - private: - description: - - Whether the new repository should be private or not. - - This is only used when I(state) is C(present). - type: bool - default: no - required: false - state: - description: - - Whether the repository should exist or not. - type: str - default: present - choices: [ absent, present ] - required: false - organization: - description: - - Organization for the repository. - - When I(state) is C(present), the repository will be created in the current user profile. - type: str - required: false - api_url: - description: - - URL to the GitHub API if not using github.com but you own instance. - type: str - default: 'https://api.github.com' - version_added: "3.5.0" -requirements: -- PyGithub>=1.54 -notes: -- For Python 3, PyGithub>=1.54 should be used. -- "For Python 3.5, PyGithub==1.54 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-54-november-30-2020)." -- "For Python 2.7, PyGithub==1.45 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-45-december-29-2019)." -- Supports C(check_mode). -author: -- Álvaro Torres Cogollo (@atorrescogollo) -''' - -EXAMPLES = ''' -- name: Create a Github repository - community.general.github_repo: - access_token: mytoken - organization: MyOrganization - name: myrepo - description: "Just for fun" - private: yes - state: present - register: result - -- name: Delete the repository - community.general.github_repo: - username: octocat - password: password - organization: MyOrganization - name: myrepo - state: absent - register: result -''' - -RETURN = ''' -repo: - description: Repository information as JSON. See U(https://docs.github.com/en/rest/reference/repos#get-a-repository). - returned: success and I(state) is C(present) - type: dict -''' - -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -import sys - -GITHUB_IMP_ERR = None -try: - from github import Github, GithubException - from github.GithubException import UnknownObjectException - HAS_GITHUB_PACKAGE = True -except Exception: - GITHUB_IMP_ERR = traceback.format_exc() - HAS_GITHUB_PACKAGE = False - - -def authenticate(username=None, password=None, access_token=None, api_url=None): - if not api_url: - return None - - if access_token: - return Github(base_url=api_url, login_or_token=access_token) - else: - return Github(base_url=api_url, login_or_token=username, password=password) - - -def create_repo(gh, name, organization=None, private=False, description='', check_mode=False): - result = dict( - changed=False, - repo=dict()) - if organization: - target = gh.get_organization(organization) - else: - target = gh.get_user() - - repo = None - try: - repo = target.get_repo(name=name) - result['repo'] = repo.raw_data - except UnknownObjectException: - if not check_mode: - repo = target.create_repo( - name=name, private=private, description=description) - result['repo'] = repo.raw_data - - result['changed'] = True - - changes = {} - if repo is None or repo.raw_data['private'] != private: - changes['private'] = private - if repo is None or repo.raw_data['description'] != description: - changes['description'] = description - - if changes: - if not check_mode: - repo.edit(**changes) - - result['repo'].update({ - 'private': repo._private.value if not check_mode else private, - 'description': repo._description.value if not check_mode else description, - }) - result['changed'] = True - - return result - - -def delete_repo(gh, name, organization=None, check_mode=False): - result = dict(changed=False) - if organization: - target = gh.get_organization(organization) - else: - target = gh.get_user() - try: - repo = target.get_repo(name=name) - if not check_mode: - repo.delete() - result['changed'] = True - except UnknownObjectException: - pass - - return result - - -def run_module(params, check_mode=False): - gh = authenticate( - username=params['username'], password=params['password'], access_token=params['access_token'], - api_url=params['api_url']) - if params['state'] == "absent": - return delete_repo( - gh=gh, - name=params['name'], - organization=params['organization'], - check_mode=check_mode - ) - else: - return create_repo( - gh=gh, - name=params['name'], - organization=params['organization'], - private=params['private'], - description=params['description'], - check_mode=check_mode - ) - - -def main(): - module_args = dict( - username=dict(type='str', required=False, default=None), - password=dict(type='str', required=False, default=None, no_log=True), - access_token=dict(type='str', required=False, - default=None, no_log=True), - name=dict(type='str', required=True), - state=dict(type='str', required=False, default="present", - choices=["present", "absent"]), - organization=dict(type='str', required=False, default=None), - private=dict(type='bool', required=False, default=False), - description=dict(type='str', required=False, default=''), - api_url=dict(type='str', required=False, default='https://api.github.com'), - ) - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - required_together=[('username', 'password')], - required_one_of=[('username', 'access_token')], - mutually_exclusive=[('username', 'access_token')] - ) - - if not HAS_GITHUB_PACKAGE: - module.fail_json(msg=missing_required_lib( - "PyGithub"), exception=GITHUB_IMP_ERR) - - try: - result = run_module(module.params, module.check_mode) - module.exit_json(**result) - except GithubException as e: - module.fail_json(msg="Github error. {0}".format(repr(e))) - except Exception as e: - module.fail_json(msg="Unexpected error. {0}".format(repr(e))) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/github/github_webhook.py b/plugins/modules/source_control/github/github_webhook.py deleted file mode 100644 index fcb6f8d06f..0000000000 --- a/plugins/modules/source_control/github/github_webhook.py +++ /dev/null @@ -1,290 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: github_webhook -short_description: Manage GitHub webhooks -description: - - "Create and delete GitHub webhooks" -requirements: - - "PyGithub >= 1.3.5" -options: - repository: - description: - - Full name of the repository to configure a hook for - type: str - required: true - aliases: - - repo - url: - description: - - URL to which payloads will be delivered - type: str - required: true - content_type: - description: - - The media type used to serialize the payloads - type: str - required: false - choices: [ form, json ] - default: form - secret: - description: - - The shared secret between GitHub and the payload URL. - type: str - required: false - insecure_ssl: - description: - - > - Flag to indicate that GitHub should skip SSL verification when calling - the hook. - required: false - type: bool - default: false - events: - description: - - > - A list of GitHub events the hook is triggered for. Events are listed at - U(https://developer.github.com/v3/activity/events/types/). Required - unless C(state) is C(absent) - required: false - type: list - elements: str - active: - description: - - Whether or not the hook is active - required: false - type: bool - default: true - state: - description: - - Whether the hook should be present or absent - type: str - required: false - choices: [ absent, present ] - default: present - user: - description: - - User to authenticate to GitHub as - type: str - required: true - password: - description: - - Password to authenticate to GitHub with - type: str - required: false - token: - description: - - Token to authenticate to GitHub with - type: str - required: false - github_url: - description: - - Base URL of the GitHub API - type: str - required: false - default: https://api.github.com - -author: - - "Chris St. Pierre (@stpierre)" -''' - -EXAMPLES = ''' -- name: Create a new webhook that triggers on push (password auth) - community.general.github_webhook: - repository: ansible/ansible - url: https://www.example.com/hooks/ - events: - - push - user: "{{ github_user }}" - password: "{{ github_password }}" - -- name: Create a new webhook in a github enterprise installation with multiple event triggers (token auth) - community.general.github_webhook: - repository: myorg/myrepo - url: https://jenkins.example.com/ghprbhook/ - content_type: json - secret: "{{ github_shared_secret }}" - insecure_ssl: True - events: - - issue_comment - - pull_request - user: "{{ github_user }}" - token: "{{ github_user_api_token }}" - github_url: https://github.example.com - -- name: Delete a webhook (password auth) - community.general.github_webhook: - repository: ansible/ansible - url: https://www.example.com/hooks/ - state: absent - user: "{{ github_user }}" - password: "{{ github_password }}" -''' - -RETURN = ''' ---- -hook_id: - description: The GitHub ID of the hook created/updated - returned: when state is 'present' - type: int - sample: 6206 -''' - -import traceback - -GITHUB_IMP_ERR = None -try: - import github - HAS_GITHUB = True -except ImportError: - GITHUB_IMP_ERR = traceback.format_exc() - HAS_GITHUB = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def _create_hook_config(module): - return { - "url": module.params["url"], - "content_type": module.params["content_type"], - "secret": module.params.get("secret"), - "insecure_ssl": "1" if module.params["insecure_ssl"] else "0" - } - - -def create_hook(repo, module): - config = _create_hook_config(module) - try: - hook = repo.create_hook( - name="web", - config=config, - events=module.params["events"], - active=module.params["active"]) - except github.GithubException as err: - module.fail_json(msg="Unable to create hook for repository %s: %s" % ( - repo.full_name, to_native(err))) - - data = {"hook_id": hook.id} - return True, data - - -def update_hook(repo, hook, module): - config = _create_hook_config(module) - try: - hook.update() - hook.edit( - name="web", - config=config, - events=module.params["events"], - active=module.params["active"]) - - changed = hook.update() - except github.GithubException as err: - module.fail_json(msg="Unable to modify hook for repository %s: %s" % ( - repo.full_name, to_native(err))) - - data = {"hook_id": hook.id} - return changed, data - - -def main(): - module = AnsibleModule( - argument_spec=dict( - repository=dict(type='str', required=True, aliases=['repo']), - url=dict(type='str', required=True), - content_type=dict( - type='str', - choices=('json', 'form'), - required=False, - default='form'), - secret=dict(type='str', required=False, no_log=True), - insecure_ssl=dict(type='bool', required=False, default=False), - events=dict(type='list', elements='str', required=False), - active=dict(type='bool', required=False, default=True), - state=dict( - type='str', - required=False, - choices=('absent', 'present'), - default='present'), - user=dict(type='str', required=True), - password=dict(type='str', required=False, no_log=True), - token=dict(type='str', required=False, no_log=True), - github_url=dict( - type='str', required=False, default="https://api.github.com")), - mutually_exclusive=(('password', 'token'),), - required_one_of=(("password", "token"),), - required_if=(("state", "present", ("events",)),), - ) - - if not HAS_GITHUB: - module.fail_json(msg=missing_required_lib('PyGithub'), - exception=GITHUB_IMP_ERR) - - try: - github_conn = github.Github( - module.params["user"], - module.params.get("password") or module.params.get("token"), - base_url=module.params["github_url"]) - except github.GithubException as err: - module.fail_json(msg="Could not connect to GitHub at %s: %s" % ( - module.params["github_url"], to_native(err))) - - try: - repo = github_conn.get_repo(module.params["repository"]) - except github.BadCredentialsException as err: - module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % ( - module.params["github_url"], to_native(err))) - except github.UnknownObjectException as err: - module.fail_json( - msg="Could not find repository %s in GitHub at %s: %s" % ( - module.params["repository"], module.params["github_url"], - to_native(err))) - except Exception as err: - module.fail_json( - msg="Could not fetch repository %s from GitHub at %s: %s" % - (module.params["repository"], module.params["github_url"], - to_native(err)), - exception=traceback.format_exc()) - - hook = None - try: - for hook in repo.get_hooks(): - if hook.config.get("url") == module.params["url"]: - break - else: - hook = None - except github.GithubException as err: - module.fail_json(msg="Unable to get hooks from repository %s: %s" % ( - module.params["repository"], to_native(err))) - - changed = False - data = {} - if hook is None and module.params["state"] == "present": - changed, data = create_hook(repo, module) - elif hook is not None and module.params["state"] == "absent": - try: - hook.delete() - except github.GithubException as err: - module.fail_json( - msg="Unable to delete hook from repository %s: %s" % ( - repo.full_name, to_native(err))) - else: - changed = True - elif hook is not None and module.params["state"] == "present": - changed, data = update_hook(repo, hook, module) - # else, there is no hook and we want there to be no hook - - module.exit_json(changed=changed, **data) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/github/github_webhook_info.py b/plugins/modules/source_control/github/github_webhook_info.py deleted file mode 100644 index 98a7516e75..0000000000 --- a/plugins/modules/source_control/github/github_webhook_info.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: github_webhook_info -short_description: Query information about GitHub webhooks -description: - - "Query information about GitHub webhooks" - - This module was called C(github_webhook_facts) before Ansible 2.9. The usage did not change. -requirements: - - "PyGithub >= 1.3.5" -options: - repository: - description: - - Full name of the repository to configure a hook for - type: str - required: true - aliases: - - repo - user: - description: - - User to authenticate to GitHub as - type: str - required: true - password: - description: - - Password to authenticate to GitHub with - type: str - required: false - token: - description: - - Token to authenticate to GitHub with - type: str - required: false - github_url: - description: - - Base URL of the github api - type: str - required: false - default: https://api.github.com - -author: - - "Chris St. Pierre (@stpierre)" -''' - -EXAMPLES = ''' -- name: List hooks for a repository (password auth) - community.general.github_webhook_info: - repository: ansible/ansible - user: "{{ github_user }}" - password: "{{ github_password }}" - register: ansible_webhooks - -- name: List hooks for a repository on GitHub Enterprise (token auth) - community.general.github_webhook_info: - repository: myorg/myrepo - user: "{{ github_user }}" - token: "{{ github_user_api_token }}" - github_url: https://github.example.com/api/v3/ - register: myrepo_webhooks -''' - -RETURN = ''' ---- -hooks: - description: A list of hooks that exist for the repo - returned: always - type: list - sample: > - [{"has_shared_secret": true, - "url": "https://jenkins.example.com/ghprbhook/", - "events": ["issue_comment", "pull_request"], - "insecure_ssl": "1", - "content_type": "json", - "active": true, - "id": 6206, - "last_response": {"status": "active", "message": "OK", "code": 200}}] -''' - -import traceback - -GITHUB_IMP_ERR = None -try: - import github - HAS_GITHUB = True -except ImportError: - GITHUB_IMP_ERR = traceback.format_exc() - HAS_GITHUB = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def _munge_hook(hook_obj): - retval = { - "active": hook_obj.active, - "events": hook_obj.events, - "id": hook_obj.id, - "url": hook_obj.url, - } - retval.update(hook_obj.config) - retval["has_shared_secret"] = "secret" in retval - if "secret" in retval: - del retval["secret"] - - retval["last_response"] = hook_obj.last_response.raw_data - return retval - - -def main(): - module = AnsibleModule( - argument_spec=dict( - repository=dict(type='str', required=True, aliases=["repo"]), - user=dict(type='str', required=True), - password=dict(type='str', required=False, no_log=True), - token=dict(type='str', required=False, no_log=True), - github_url=dict( - type='str', required=False, default="https://api.github.com")), - mutually_exclusive=(('password', 'token'), ), - required_one_of=(("password", "token"), ), - supports_check_mode=True) - - if not HAS_GITHUB: - module.fail_json(msg=missing_required_lib('PyGithub'), - exception=GITHUB_IMP_ERR) - - try: - github_conn = github.Github( - module.params["user"], - module.params.get("password") or module.params.get("token"), - base_url=module.params["github_url"]) - except github.GithubException as err: - module.fail_json(msg="Could not connect to GitHub at %s: %s" % ( - module.params["github_url"], to_native(err))) - - try: - repo = github_conn.get_repo(module.params["repository"]) - except github.BadCredentialsException as err: - module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % ( - module.params["github_url"], to_native(err))) - except github.UnknownObjectException as err: - module.fail_json( - msg="Could not find repository %s in GitHub at %s: %s" % ( - module.params["repository"], module.params["github_url"], - to_native(err))) - except Exception as err: - module.fail_json( - msg="Could not fetch repository %s from GitHub at %s: %s" % - (module.params["repository"], module.params["github_url"], - to_native(err)), - exception=traceback.format_exc()) - - try: - hooks = [_munge_hook(h) for h in repo.get_hooks()] - except github.GithubException as err: - module.fail_json( - msg="Unable to get hooks from repository %s: %s" % - (module.params["repository"], to_native(err)), - exception=traceback.format_exc()) - - module.exit_json(changed=False, hooks=hooks) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/gitlab/gitlab_deploy_key.py b/plugins/modules/source_control/gitlab/gitlab_deploy_key.py deleted file mode 100644 index c0c97d8c99..0000000000 --- a/plugins/modules/source_control/gitlab/gitlab_deploy_key.py +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2018, Marcus Watkins -# Based on code: -# Copyright: (c) 2013, Phillip Gentry -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: gitlab_deploy_key -short_description: Manages GitLab project deploy keys. -description: - - Adds, updates and removes project deploy keys -author: - - Marcus Watkins (@marwatk) - - Guillaume Martinez (@Lunik) -requirements: - - python >= 2.7 - - python-gitlab python module -extends_documentation_fragment: -- community.general.auth_basic - -options: - api_token: - description: - - GitLab token for logging in. - type: str - project: - description: - - Id or Full path of project in the form of group/name. - required: true - type: str - title: - description: - - Deploy key's title. - required: true - type: str - key: - description: - - Deploy key - required: true - type: str - can_push: - description: - - Whether this key can push to the project. - type: bool - default: no - state: - description: - - When C(present) the deploy key added to the project if it doesn't exist. - - When C(absent) it will be removed from the project if it exists. - default: present - type: str - choices: [ "present", "absent" ] -''' - -EXAMPLES = ''' -- name: "Adding a project deploy key" - community.general.gitlab_deploy_key: - api_url: https://gitlab.example.com/ - api_token: "{{ api_token }}" - project: "my_group/my_project" - title: "Jenkins CI" - state: present - key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..." - -- name: "Update the above deploy key to add push access" - community.general.gitlab_deploy_key: - api_url: https://gitlab.example.com/ - api_token: "{{ api_token }}" - project: "my_group/my_project" - title: "Jenkins CI" - state: present - can_push: yes - -- name: "Remove the previous deploy key from the project" - community.general.gitlab_deploy_key: - api_url: https://gitlab.example.com/ - api_token: "{{ api_token }}" - project: "my_group/my_project" - state: absent - key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..." - -''' - -RETURN = ''' -msg: - description: Success or failure message - returned: always - type: str - sample: "Success" - -result: - description: json parsed response from the server - returned: always - type: dict - -error: - description: the error message returned by the GitLab API - returned: failed - type: str - sample: "400: key is already in use" - -deploy_key: - description: API object - returned: always - type: dict -''' - -import re -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication - - -class GitLabDeployKey(object): - def __init__(self, module, gitlab_instance): - self._module = module - self._gitlab = gitlab_instance - self.deployKeyObject = None - - ''' - @param project Project object - @param key_title Title of the key - @param key_key String of the key - @param key_can_push Option of the deployKey - @param options Deploy key options - ''' - def createOrUpdateDeployKey(self, project, key_title, key_key, options): - changed = False - - # note: unfortunately public key cannot be updated directly by - # GitLab REST API, so for that case we need to delete and - # than recreate the key - if self.deployKeyObject and self.deployKeyObject.key != key_key: - if not self._module.check_mode: - self.deployKeyObject.delete() - self.deployKeyObject = None - - # Because we have already call existsDeployKey in main() - if self.deployKeyObject is None: - deployKey = self.createDeployKey(project, { - 'title': key_title, - 'key': key_key, - 'can_push': options['can_push']}) - changed = True - else: - changed, deployKey = self.updateDeployKey(self.deployKeyObject, { - 'can_push': options['can_push']}) - - self.deployKeyObject = deployKey - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title) - - try: - deployKey.save() - except Exception as e: - self._module.fail_json(msg="Failed to update deploy key: %s " % e) - return True - else: - return False - - ''' - @param project Project Object - @param arguments Attributes of the deployKey - ''' - def createDeployKey(self, project, arguments): - if self._module.check_mode: - return True - - try: - deployKey = project.keys.create(arguments) - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json(msg="Failed to create deploy key: %s " % to_native(e)) - - return deployKey - - ''' - @param deployKey Deploy Key Object - @param arguments Attributes of the deployKey - ''' - def updateDeployKey(self, deployKey, arguments): - changed = False - - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(deployKey, arg_key) != arguments[arg_key]: - setattr(deployKey, arg_key, arguments[arg_key]) - changed = True - - return (changed, deployKey) - - ''' - @param project Project object - @param key_title Title of the key - ''' - def findDeployKey(self, project, key_title): - deployKeys = project.keys.list(all=True) - for deployKey in deployKeys: - if (deployKey.title == key_title): - return deployKey - - ''' - @param project Project object - @param key_title Title of the key - ''' - def existsDeployKey(self, project, key_title): - # When project exists, object will be stored in self.projectObject. - deployKey = self.findDeployKey(project, key_title) - if deployKey: - self.deployKeyObject = deployKey - return True - return False - - def deleteDeployKey(self): - if self._module.check_mode: - return True - - return self.deployKeyObject.delete() - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(dict( - api_token=dict(type='str', no_log=True), - state=dict(type='str', default="present", choices=["absent", "present"]), - project=dict(type='str', required=True), - key=dict(type='str', required=True, no_log=False), - can_push=dict(type='bool', default=False), - title=dict(type='str', required=True) - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_password', 'api_token'] - ], - required_together=[ - ['api_username', 'api_password'] - ], - required_one_of=[ - ['api_username', 'api_token'] - ], - supports_check_mode=True, - ) - - state = module.params['state'] - project_identifier = module.params['project'] - key_title = module.params['title'] - key_keyfile = module.params['key'] - key_can_push = module.params['can_push'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlabAuthentication(module) - - gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance) - - project = findProject(gitlab_instance, project_identifier) - - if project is None: - module.fail_json(msg="Failed to create deploy key: project %s doesn't exists" % project_identifier) - - deployKey_exists = gitlab_deploy_key.existsDeployKey(project, key_title) - - if state == 'absent': - if deployKey_exists: - gitlab_deploy_key.deleteDeployKey() - module.exit_json(changed=True, msg="Successfully deleted deploy key %s" % key_title) - else: - module.exit_json(changed=False, msg="Deploy key deleted or does not exists") - - if state == 'present': - if gitlab_deploy_key.createOrUpdateDeployKey(project, key_title, key_keyfile, {'can_push': key_can_push}): - - module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title, - deploy_key=gitlab_deploy_key.deployKeyObject._attrs) - else: - module.exit_json(changed=False, msg="No need to update the deploy key %s" % key_title, - deploy_key=gitlab_deploy_key.deployKeyObject._attrs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/gitlab/gitlab_group.py b/plugins/modules/source_control/gitlab/gitlab_group.py deleted file mode 100644 index a82756d81e..0000000000 --- a/plugins/modules/source_control/gitlab/gitlab_group.py +++ /dev/null @@ -1,383 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: gitlab_group -short_description: Creates/updates/deletes GitLab Groups -description: - - When the group does not exist in GitLab, it will be created. - - When the group does exist and state=absent, the group will be deleted. -author: - - Werner Dijkerman (@dj-wasabi) - - Guillaume Martinez (@Lunik) -requirements: - - python >= 2.7 - - python-gitlab python module -extends_documentation_fragment: -- community.general.auth_basic - -options: - api_token: - description: - - GitLab token for logging in. - type: str - name: - description: - - Name of the group you want to create. - required: true - type: str - path: - description: - - The path of the group you want to create, this will be api_url/group_path - - If not supplied, the group_name will be used. - type: str - description: - description: - - A description for the group. - type: str - state: - description: - - create or delete group. - - Possible values are present and absent. - default: present - type: str - choices: ["present", "absent"] - parent: - description: - - Allow to create subgroups - - Id or Full path of parent group in the form of group/name - type: str - visibility: - description: - - Default visibility of the group - choices: ["private", "internal", "public"] - default: private - type: str - project_creation_level: - description: - - Determine if developers can create projects in the group. - choices: ["developer", "maintainer", "noone"] - type: str - version_added: 3.7.0 - auto_devops_enabled: - description: - - Default to Auto DevOps pipeline for all projects within this group. - type: bool - version_added: 3.7.0 - subgroup_creation_level: - description: - - Allowed to create subgroups. - choices: ["maintainer", "owner"] - type: str - version_added: 3.7.0 - require_two_factor_authentication: - description: - - Require all users in this group to setup two-factor authentication. - type: bool - version_added: 3.7.0 -''' - -EXAMPLES = ''' -- name: "Delete GitLab Group" - community.general.gitlab_group: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - validate_certs: False - name: my_first_group - state: absent - -- name: "Create GitLab Group" - community.general.gitlab_group: - api_url: https://gitlab.example.com/ - validate_certs: True - api_username: dj-wasabi - api_password: "MySecretPassword" - name: my_first_group - path: my_first_group - state: present - -# The group will by created at https://gitlab.dj-wasabi.local/super_parent/parent/my_first_group -- name: "Create GitLab SubGroup" - community.general.gitlab_group: - api_url: https://gitlab.example.com/ - validate_certs: True - api_username: dj-wasabi - api_password: "MySecretPassword" - name: my_first_group - path: my_first_group - state: present - parent: "super_parent/parent" - -# Other group which only allows sub-groups - no projects -- name: "Create GitLab Group for SubGroups only" - community.general.gitlab_group: - api_url: https://gitlab.example.com/ - validate_certs: True - api_username: dj-wasabi - api_password: "MySecretPassword" - name: my_main_group - path: my_main_group - state: present - project_creation_level: noone - auto_devops_enabled: false - subgroup_creation_level: maintainer -''' - -RETURN = ''' -msg: - description: Success or failure message - returned: always - type: str - sample: "Success" - -result: - description: json parsed response from the server - returned: always - type: dict - -error: - description: the error message returned by the GitLab API - returned: failed - type: str - sample: "400: path is already in use" - -group: - description: API object - returned: always - type: dict -''' - -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication - - -class GitLabGroup(object): - def __init__(self, module, gitlab_instance): - self._module = module - self._gitlab = gitlab_instance - self.groupObject = None - - ''' - @param group Group object - ''' - def getGroupId(self, group): - if group is not None: - return group.id - return None - - ''' - @param name Name of the group - @param parent Parent group full path - @param options Group options - ''' - def createOrUpdateGroup(self, name, parent, options): - changed = False - - # Because we have already call userExists in main() - if self.groupObject is None: - parent_id = self.getGroupId(parent) - - payload = { - 'name': name, - 'path': options['path'], - 'parent_id': parent_id, - 'visibility': options['visibility'], - 'project_creation_level': options['project_creation_level'], - 'auto_devops_enabled': options['auto_devops_enabled'], - 'subgroup_creation_level': options['subgroup_creation_level'], - } - if options.get('description'): - payload['description'] = options['description'] - if options.get('require_two_factor_authentication'): - payload['require_two_factor_authentication'] = options['require_two_factor_authentication'] - group = self.createGroup(payload) - changed = True - else: - changed, group = self.updateGroup(self.groupObject, { - 'name': name, - 'description': options['description'], - 'visibility': options['visibility'], - 'project_creation_level': options['project_creation_level'], - 'auto_devops_enabled': options['auto_devops_enabled'], - 'subgroup_creation_level': options['subgroup_creation_level'], - 'require_two_factor_authentication': options['require_two_factor_authentication'], - }) - - self.groupObject = group - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the group %s" % name) - - try: - group.save() - except Exception as e: - self._module.fail_json(msg="Failed to update group: %s " % e) - return True - else: - return False - - ''' - @param arguments Attributes of the group - ''' - def createGroup(self, arguments): - if self._module.check_mode: - return True - - try: - group = self._gitlab.groups.create(arguments) - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json(msg="Failed to create group: %s " % to_native(e)) - - return group - - ''' - @param group Group Object - @param arguments Attributes of the group - ''' - def updateGroup(self, group, arguments): - changed = False - - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(group, arg_key) != arguments[arg_key]: - setattr(group, arg_key, arguments[arg_key]) - changed = True - - return (changed, group) - - def deleteGroup(self): - group = self.groupObject - - if len(group.projects.list()) >= 1: - self._module.fail_json( - msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.") - else: - if self._module.check_mode: - return True - - try: - group.delete() - except Exception as e: - self._module.fail_json(msg="Failed to delete group: %s " % to_native(e)) - - ''' - @param name Name of the groupe - @param full_path Complete path of the Group including parent group path. / - ''' - def existsGroup(self, project_identifier): - # When group/user exists, object will be stored in self.groupObject. - group = findGroup(self._gitlab, project_identifier) - if group: - self.groupObject = group - return True - return False - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(dict( - api_token=dict(type='str', no_log=True), - name=dict(type='str', required=True), - path=dict(type='str'), - description=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - parent=dict(type='str'), - visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), - project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), - auto_devops_enabled=dict(type='bool'), - subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), - require_two_factor_authentication=dict(type='bool'), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_password', 'api_token'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token'] - ], - supports_check_mode=True, - ) - - group_name = module.params['name'] - group_path = module.params['path'] - description = module.params['description'] - state = module.params['state'] - parent_identifier = module.params['parent'] - group_visibility = module.params['visibility'] - project_creation_level = module.params['project_creation_level'] - auto_devops_enabled = module.params['auto_devops_enabled'] - subgroup_creation_level = module.params['subgroup_creation_level'] - require_two_factor_authentication = module.params['require_two_factor_authentication'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlabAuthentication(module) - - # Define default group_path based on group_name - if group_path is None: - group_path = group_name.replace(" ", "_") - - gitlab_group = GitLabGroup(module, gitlab_instance) - - parent_group = None - if parent_identifier: - parent_group = findGroup(gitlab_instance, parent_identifier) - if not parent_group: - module.fail_json(msg="Failed create GitLab group: Parent group doesn't exists") - - group_exists = gitlab_group.existsGroup(parent_group.full_path + '/' + group_path) - else: - group_exists = gitlab_group.existsGroup(group_path) - - if state == 'absent': - if group_exists: - gitlab_group.deleteGroup() - module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name) - else: - module.exit_json(changed=False, msg="Group deleted or does not exists") - - if state == 'present': - if gitlab_group.createOrUpdateGroup(group_name, parent_group, { - "path": group_path, - "description": description, - "visibility": group_visibility, - "project_creation_level": project_creation_level, - "auto_devops_enabled": auto_devops_enabled, - "subgroup_creation_level": subgroup_creation_level, - "require_two_factor_authentication": require_two_factor_authentication, - }): - module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.groupObject._attrs) - else: - module.exit_json(changed=False, msg="No need to update the group %s" % group_name, group=gitlab_group.groupObject._attrs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/gitlab/gitlab_group_members.py b/plugins/modules/source_control/gitlab/gitlab_group_members.py deleted file mode 100644 index 8c351aaceb..0000000000 --- a/plugins/modules/source_control/gitlab/gitlab_group_members.py +++ /dev/null @@ -1,449 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Zainab Alsaffar -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: gitlab_group_members -short_description: Manage group members on GitLab Server -description: - - This module allows to add and remove members to/from a group, or change a member's access level in a group on GitLab. -version_added: '1.2.0' -author: Zainab Alsaffar (@zanssa) -requirements: - - python-gitlab python module <= 1.15.0 - - administrator rights on the GitLab server -extends_documentation_fragment: community.general.auth_basic -options: - api_token: - description: - - A personal access token to authenticate with the GitLab API. - required: true - type: str - gitlab_group: - description: - - The C(full_path) of the GitLab group the member is added to/removed from. - - Setting this to C(name) or C(path) is deprecated and will be removed in community.general 6.0.0. Use C(full_path) instead. - required: true - type: str - gitlab_user: - description: - - A username or a list of usernames to add to/remove from the GitLab group. - - Mutually exclusive with I(gitlab_users_access). - type: list - elements: str - access_level: - description: - - The access level for the user. - - Required if I(state=present), user state is set to present. - - Mutually exclusive with I(gitlab_users_access). - type: str - choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] - gitlab_users_access: - description: - - Provide a list of user to access level mappings. - - Every dictionary in this list specifies a user (by username) and the access level the user should have. - - Mutually exclusive with I(gitlab_user) and I(access_level). - - Use together with I(purge_users) to remove all users not specified here from the group. - type: list - elements: dict - suboptions: - name: - description: A username or a list of usernames to add to/remove from the GitLab group. - type: str - required: true - access_level: - description: - - The access level for the user. - - Required if I(state=present), user state is set to present. - type: str - choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] - required: true - version_added: 3.6.0 - state: - description: - - State of the member in the group. - - On C(present), it adds a user to a GitLab group. - - On C(absent), it removes a user from a GitLab group. - choices: ['present', 'absent'] - default: 'present' - type: str - purge_users: - description: - - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list. - If omitted do not purge orphaned members. - - Is only used when I(state=present). - type: list - elements: str - choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] - version_added: 3.6.0 -notes: - - Supports C(check_mode). -''' - -EXAMPLES = r''' -- name: Add a user to a GitLab Group - community.general.gitlab_group_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - gitlab_group: groupname - gitlab_user: username - access_level: developer - state: present - -- name: Remove a user from a GitLab Group - community.general.gitlab_group_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - gitlab_group: groupname - gitlab_user: username - state: absent - -- name: Add a list of Users to A GitLab Group - community.general.gitlab_group_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - gitlab_group: groupname - gitlab_user: - - user1 - - user2 - access_level: developer - state: present - -- name: Add a list of Users with Dedicated Access Levels to A GitLab Group - community.general.gitlab_group_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - gitlab_group: groupname - gitlab_users_access: - - name: user1 - access_level: developer - - name: user2 - access_level: maintainer - state: present - -- name: Add a user, remove all others which might be on this access level - community.general.gitlab_group_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - gitlab_group: groupname - gitlab_user: username - access_level: developer - pruge_users: developer - state: present - -- name: Remove a list of Users with Dedicated Access Levels to A GitLab Group - community.general.gitlab_group_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - gitlab_group: groupname - gitlab_users_access: - - name: user1 - access_level: developer - - name: user2 - access_level: maintainer - state: absent -''' - -RETURN = r''' # ''' - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication - -import traceback - -try: - import gitlab - HAS_PY_GITLAB = True -except ImportError: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_PY_GITLAB = False - - -class GitLabGroup(object): - def __init__(self, module, gl): - self._module = module - self._gitlab = gl - - # get user id if the user exists - def get_user_id(self, gitlab_user): - user_exists = self._gitlab.users.list(username=gitlab_user) - if user_exists: - return user_exists[0].id - - # get group id if group exists - def get_group_id(self, gitlab_group): - groups = self._gitlab.groups.list(search=gitlab_group) - for group in groups: - if group.full_path == gitlab_group: - return group.id - for group in groups: - if group.path == gitlab_group or group.name == gitlab_group: - self._module.deprecate( - msg="Setting 'gitlab_group' to 'name' or 'path' is deprecated. Use 'full_path' instead", - version="6.0.0", collection_name="community.general") - return group.id - - # get all members in a group - def get_members_in_a_group(self, gitlab_group_id): - group = self._gitlab.groups.get(gitlab_group_id) - return group.members.list(all=True) - - # get single member in a group by user name - def get_member_in_a_group(self, gitlab_group_id, gitlab_user_id): - member = None - group = self._gitlab.groups.get(gitlab_group_id) - try: - member = group.members.get(gitlab_user_id) - if member: - return member - except gitlab.exceptions.GitlabGetError as e: - return None - - # check if the user is a member of the group - def is_user_a_member(self, members, gitlab_user_id): - for member in members: - if member.id == gitlab_user_id: - return True - return False - - # add user to a group - def add_member_to_group(self, gitlab_user_id, gitlab_group_id, access_level): - group = self._gitlab.groups.get(gitlab_group_id) - add_member = group.members.create( - {'user_id': gitlab_user_id, 'access_level': access_level}) - - # remove user from a group - def remove_user_from_group(self, gitlab_user_id, gitlab_group_id): - group = self._gitlab.groups.get(gitlab_group_id) - group.members.delete(gitlab_user_id) - - # get user's access level - def get_user_access_level(self, members, gitlab_user_id): - for member in members: - if member.id == gitlab_user_id: - return member.access_level - - # update user's access level in a group - def update_user_access_level(self, members, gitlab_user_id, access_level): - for member in members: - if member.id == gitlab_user_id: - member.access_level = access_level - member.save() - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(dict( - api_token=dict(type='str', required=True, no_log=True), - gitlab_group=dict(type='str', required=True), - gitlab_user=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent']), - access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), - purge_users=dict(type='list', elements='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), - gitlab_users_access=dict( - type='list', - elements='dict', - options=dict( - name=dict(type='str', required=True), - access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True), - ) - ), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_password', 'api_token'], - ['gitlab_user', 'gitlab_users_access'], - ['access_level', 'gitlab_users_access'], - ], - required_together=[ - ['api_username', 'api_password'], - ['gitlab_user', 'access_level'], - ], - required_one_of=[ - ['api_username', 'api_token'], - ['gitlab_user', 'gitlab_users_access'], - ], - required_if=[ - ['state', 'present', ['access_level', 'gitlab_users_access'], True], - ], - supports_check_mode=True, - ) - - if not HAS_PY_GITLAB: - module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR) - - access_level_int = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS, - 'owner': gitlab.OWNER_ACCESS - } - - gitlab_group = module.params['gitlab_group'] - state = module.params['state'] - access_level = module.params['access_level'] - purge_users = module.params['purge_users'] - - if purge_users: - purge_users = [access_level_int[level] for level in purge_users] - - # connect to gitlab server - gl = gitlabAuthentication(module) - - group = GitLabGroup(module, gl) - - gitlab_group_id = group.get_group_id(gitlab_group) - - # group doesn't exist - if not gitlab_group_id: - module.fail_json(msg="group '%s' not found." % gitlab_group) - - members = [] - if module.params['gitlab_user'] is not None: - gitlab_users_access = [] - gitlab_users = module.params['gitlab_user'] - for gl_user in gitlab_users: - gitlab_users_access.append({'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None}) - elif module.params['gitlab_users_access'] is not None: - gitlab_users_access = module.params['gitlab_users_access'] - for user_level in gitlab_users_access: - user_level['access_level'] = access_level_int[user_level['access_level']] - - if len(gitlab_users_access) == 1 and not purge_users: - # only single user given - members = [group.get_member_in_a_group(gitlab_group_id, group.get_user_id(gitlab_users_access[0]['name']))] - if members[0] is None: - members = [] - elif len(gitlab_users_access) > 1 or purge_users: - # list of users given - members = group.get_members_in_a_group(gitlab_group_id) - else: - module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.", - result_data=[]) - - changed = False - error = False - changed_users = [] - changed_data = [] - - for gitlab_user in gitlab_users_access: - gitlab_user_id = group.get_user_id(gitlab_user['name']) - - # user doesn't exist - if not gitlab_user_id: - if state == 'absent': - changed_users.append("user '%s' not found, and thus also not part of the group" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': "user '%s' not found, and thus also not part of the group" % gitlab_user['name']}) - else: - error = True - changed_users.append("user '%s' not found." % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "user '%s' not found." % gitlab_user['name']}) - continue - - is_user_a_member = group.is_user_a_member(members, gitlab_user_id) - - # check if the user is a member in the group - if not is_user_a_member: - if state == 'present': - # add user to the group - try: - if not module.check_mode: - group.add_member_to_group(gitlab_user_id, gitlab_group_id, gitlab_user['access_level']) - changed = True - changed_users.append("Successfully added user '%s' to group" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': "Successfully added user '%s' to group" % gitlab_user['name']}) - except (gitlab.exceptions.GitlabCreateError) as e: - error = True - changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)}) - # state as absent - else: - changed_users.append("User, '%s', is not a member in the group. No change to report" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': "User, '%s', is not a member in the group. No change to report" % gitlab_user['name']}) - # in case that a user is a member - else: - if state == 'present': - # compare the access level - user_access_level = group.get_user_access_level(members, gitlab_user_id) - if user_access_level == gitlab_user['access_level']: - changed_users.append("User, '%s', is already a member in the group. No change to report" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': "User, '%s', is already a member in the group. No change to report" % gitlab_user['name']}) - else: - # update the access level for the user - try: - if not module.check_mode: - group.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level']) - changed = True - changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']}) - except (gitlab.exceptions.GitlabUpdateError) as e: - error = True - changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)}) - else: - # remove the user from the group - try: - if not module.check_mode: - group.remove_user_from_group(gitlab_user_id, gitlab_group_id) - changed = True - changed_users.append("Successfully removed user, '%s', from the group" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': "Successfully removed user, '%s', from the group" % gitlab_user['name']}) - except (gitlab.exceptions.GitlabDeleteError) as e: - error = True - changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)}) - - # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users - if state == 'present' and purge_users: - uppercase_names_in_gitlab_users_access = [] - for name in gitlab_users_access: - uppercase_names_in_gitlab_users_access.append(name['name'].upper()) - - for member in members: - if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access: - try: - if not module.check_mode: - group.remove_user_from_group(member.id, gitlab_group_id) - changed = True - changed_users.append("Successfully removed user '%s', from group. Was not in given list" % member.username) - changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED', - 'msg': "Successfully removed user '%s', from group. Was not in given list" % member.username}) - except (gitlab.exceptions.GitlabDeleteError) as e: - error = True - changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)}) - - if len(gitlab_users_access) == 1 and error: - # if single user given and an error occurred return error for list errors will be per user - module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data) - elif error: - module.fail_json(msg='FAILED: At least one given user/permission could not be set', result_data=changed_data) - - module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/gitlab/gitlab_group_variable.py b/plugins/modules/source_control/gitlab/gitlab_group_variable.py deleted file mode 100644 index dd20a0b8da..0000000000 --- a/plugins/modules/source_control/gitlab/gitlab_group_variable.py +++ /dev/null @@ -1,304 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Florent Madiot (scodeman@scode.io) -# Based on code: -# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' -module: gitlab_group_variable -short_description: Creates, updates, or deletes GitLab groups variables -version_added: 1.2.0 -description: - - Creates a group variable if it does not exist. - - When a group variable does exist, its value will be updated when the values are different. - - Variables which are untouched in the playbook, but are not untouched in the GitLab group, - they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)). -author: - - Florent Madiot (@scodeman) -requirements: - - python >= 2.7 - - python-gitlab python module -extends_documentation_fragment: - - community.general.auth_basic - -options: - state: - description: - - Create or delete group variable. - default: present - type: str - choices: ["present", "absent"] - api_token: - description: - - GitLab access token with API permissions. - required: true - type: str - group: - description: - - The path and name of the group. - required: true - type: str - purge: - description: - - When set to C(true), delete all variables which are not untouched in the task. - default: false - type: bool - vars: - description: - - When the list element is a simple key-value pair, set masked and protected to false. - - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can - have full control about whether a value should be masked, protected or both. - - Support for protected values requires GitLab >= 9.3. - - Support for masked values requires GitLab >= 11.10. - - A I(value) must be a string or a number. - - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file). - - When a value is masked, it must be in Base64 and have a length of at least 8 characters. - See GitLab documentation on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)). - default: {} - type: dict -notes: -- Supports I(check_mode). -''' - - -EXAMPLES = r''' -- name: Set or update some CI/CD variables - community.general.gitlab_group_variable: - api_url: https://gitlab.com - api_token: secret_access_token - group: scodeman/testgroup/ - purge: false - vars: - ACCESS_KEY_ID: abc123 - SECRET_ACCESS_KEY: 321cba - -- name: Set or update some CI/CD variables - community.general.gitlab_group_variable: - api_url: https://gitlab.com - api_token: secret_access_token - group: scodeman/testgroup/ - purge: false - vars: - ACCESS_KEY_ID: abc123 - SECRET_ACCESS_KEY: - value: 3214cbad - masked: true - protected: true - variable_type: env_var - -- name: Delete one variable - community.general.gitlab_group_variable: - api_url: https://gitlab.com - api_token: secret_access_token - group: scodeman/testgroup/ - state: absent - vars: - ACCESS_KEY_ID: abc123 -''' - -RETURN = r''' -group_variable: - description: Four lists of the variablenames which were added, updated, removed or exist. - returned: always - type: dict - contains: - added: - description: A list of variables which were created. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" - untouched: - description: A list of variables which exist. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" - removed: - description: A list of variables which were deleted. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" - updated: - description: A list of variables whose values were changed. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.six import string_types -from ansible.module_utils.six import integer_types - - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication - - -class GitlabGroupVariables(object): - - def __init__(self, module, gitlab_instance): - self.repo = gitlab_instance - self.group = self.get_group(module.params['group']) - self._module = module - - def get_group(self, group_name): - return self.repo.groups.get(group_name) - - def list_all_group_variables(self): - page_nb = 1 - variables = [] - vars_page = self.group.variables.list(page=page_nb) - while len(vars_page) > 0: - variables += vars_page - page_nb += 1 - vars_page = self.group.variables.list(page=page_nb) - return variables - - def create_variable(self, key, value, masked, protected, variable_type): - if self._module.check_mode: - return - return self.group.variables.create({"key": key, "value": value, - "masked": masked, "protected": protected, - "variable_type": variable_type}) - - def update_variable(self, key, var, value, masked, protected, variable_type): - if var.value == value and var.protected == protected and var.masked == masked and var.variable_type == variable_type: - return False - - if self._module.check_mode: - return True - - if var.protected == protected and var.masked == masked and var.variable_type == variable_type: - var.value = value - var.save() - return True - - self.delete_variable(key) - self.create_variable(key, value, masked, protected, variable_type) - return True - - def delete_variable(self, key): - if self._module.check_mode: - return - return self.group.variables.delete(key) - - -def native_python_main(this_gitlab, purge, var_list, state, module): - - change = False - return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) - - gitlab_keys = this_gitlab.list_all_group_variables() - existing_variables = [x.get_id() for x in gitlab_keys] - - for key in var_list: - if not isinstance(var_list[key], (string_types, integer_types, float, dict)): - module.fail_json(msg="Value of %s variable must be of type string, integer, float or dict, passed %s" % (key, var_list[key].__class__.__name__)) - - for key in var_list: - - if isinstance(var_list[key], (string_types, integer_types, float)): - value = var_list[key] - masked = False - protected = False - variable_type = 'env_var' - elif isinstance(var_list[key], dict): - value = var_list[key].get('value') - masked = var_list[key].get('masked', False) - protected = var_list[key].get('protected', False) - variable_type = var_list[key].get('variable_type', 'env_var') - - if key in existing_variables: - index = existing_variables.index(key) - existing_variables[index] = None - - if state == 'present': - single_change = this_gitlab.update_variable(key, - gitlab_keys[index], - value, masked, - protected, - variable_type) - change = single_change or change - if single_change: - return_value['updated'].append(key) - else: - return_value['untouched'].append(key) - - elif state == 'absent': - this_gitlab.delete_variable(key) - change = True - return_value['removed'].append(key) - - elif key not in existing_variables and state == 'present': - this_gitlab.create_variable(key, value, masked, protected, variable_type) - change = True - return_value['added'].append(key) - - existing_variables = list(filter(None, existing_variables)) - if purge: - for item in existing_variables: - this_gitlab.delete_variable(item) - change = True - return_value['removed'].append(item) - else: - return_value['untouched'].extend(existing_variables) - - return change, return_value - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update( - api_token=dict(type='str', required=True, no_log=True), - group=dict(type='str', required=True), - purge=dict(type='bool', required=False, default=False), - vars=dict(type='dict', required=False, default=dict(), no_log=True), - state=dict(type='str', default="present", choices=["absent", "present"]) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_password', 'api_token'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token'] - ], - supports_check_mode=True - ) - - purge = module.params['purge'] - var_list = module.params['vars'] - state = module.params['state'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlabAuthentication(module) - - this_gitlab = GitlabGroupVariables(module=module, gitlab_instance=gitlab_instance) - - changed, return_value = native_python_main(this_gitlab, purge, var_list, state, module) - - module.exit_json(changed=changed, group_variable=return_value) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/gitlab/gitlab_hook.py b/plugins/modules/source_control/gitlab/gitlab_hook.py deleted file mode 100644 index 5128fba9e1..0000000000 --- a/plugins/modules/source_control/gitlab/gitlab_hook.py +++ /dev/null @@ -1,387 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2018, Marcus Watkins -# Based on code: -# Copyright: (c) 2013, Phillip Gentry -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: gitlab_hook -short_description: Manages GitLab project hooks. -description: - - Adds, updates and removes project hook -author: - - Marcus Watkins (@marwatk) - - Guillaume Martinez (@Lunik) -requirements: - - python >= 2.7 - - python-gitlab python module -extends_documentation_fragment: -- community.general.auth_basic - -options: - api_token: - description: - - GitLab token for logging in. - type: str - project: - description: - - Id or Full path of the project in the form of group/name. - required: true - type: str - hook_url: - description: - - The url that you want GitLab to post to, this is used as the primary key for updates and deletion. - required: true - type: str - state: - description: - - When C(present) the hook will be updated to match the input or created if it doesn't exist. - - When C(absent) hook will be deleted if it exists. - default: present - type: str - choices: [ "present", "absent" ] - push_events: - description: - - Trigger hook on push events. - type: bool - default: yes - push_events_branch_filter: - description: - - Branch name of wildcard to trigger hook on push events - type: str - version_added: '0.2.0' - issues_events: - description: - - Trigger hook on issues events. - type: bool - default: no - merge_requests_events: - description: - - Trigger hook on merge requests events. - type: bool - default: no - tag_push_events: - description: - - Trigger hook on tag push events. - type: bool - default: no - note_events: - description: - - Trigger hook on note events or when someone adds a comment. - type: bool - default: no - job_events: - description: - - Trigger hook on job events. - type: bool - default: no - pipeline_events: - description: - - Trigger hook on pipeline events. - type: bool - default: no - wiki_page_events: - description: - - Trigger hook on wiki events. - type: bool - default: no - hook_validate_certs: - description: - - Whether GitLab will do SSL verification when triggering the hook. - type: bool - default: no - aliases: [ enable_ssl_verification ] - token: - description: - - Secret token to validate hook messages at the receiver. - - If this is present it will always result in a change as it cannot be retrieved from GitLab. - - Will show up in the X-GitLab-Token HTTP request header. - required: false - type: str -''' - -EXAMPLES = ''' -- name: "Adding a project hook" - community.general.gitlab_hook: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - project: "my_group/my_project" - hook_url: "https://my-ci-server.example.com/gitlab-hook" - state: present - push_events: yes - tag_push_events: yes - hook_validate_certs: no - token: "my-super-secret-token-that-my-ci-server-will-check" - -- name: "Delete the previous hook" - community.general.gitlab_hook: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - project: "my_group/my_project" - hook_url: "https://my-ci-server.example.com/gitlab-hook" - state: absent - -- name: "Delete a hook by numeric project id" - community.general.gitlab_hook: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - project: 10 - hook_url: "https://my-ci-server.example.com/gitlab-hook" - state: absent -''' - -RETURN = ''' -msg: - description: Success or failure message - returned: always - type: str - sample: "Success" - -result: - description: json parsed response from the server - returned: always - type: dict - -error: - description: the error message returned by the GitLab API - returned: failed - type: str - sample: "400: path is already in use" - -hook: - description: API object - returned: always - type: dict -''' - -import re -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication - - -class GitLabHook(object): - def __init__(self, module, gitlab_instance): - self._module = module - self._gitlab = gitlab_instance - self.hookObject = None - - ''' - @param project Project Object - @param hook_url Url to call on event - @param description Description of the group - @param parent Parent group full path - ''' - def createOrUpdateHook(self, project, hook_url, options): - changed = False - - # Because we have already call userExists in main() - if self.hookObject is None: - hook = self.createHook(project, { - 'url': hook_url, - 'push_events': options['push_events'], - 'push_events_branch_filter': options['push_events_branch_filter'], - 'issues_events': options['issues_events'], - 'merge_requests_events': options['merge_requests_events'], - 'tag_push_events': options['tag_push_events'], - 'note_events': options['note_events'], - 'job_events': options['job_events'], - 'pipeline_events': options['pipeline_events'], - 'wiki_page_events': options['wiki_page_events'], - 'enable_ssl_verification': options['enable_ssl_verification'], - 'token': options['token']}) - changed = True - else: - changed, hook = self.updateHook(self.hookObject, { - 'push_events': options['push_events'], - 'push_events_branch_filter': options['push_events_branch_filter'], - 'issues_events': options['issues_events'], - 'merge_requests_events': options['merge_requests_events'], - 'tag_push_events': options['tag_push_events'], - 'note_events': options['note_events'], - 'job_events': options['job_events'], - 'pipeline_events': options['pipeline_events'], - 'wiki_page_events': options['wiki_page_events'], - 'enable_ssl_verification': options['enable_ssl_verification'], - 'token': options['token']}) - - self.hookObject = hook - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url) - - try: - hook.save() - except Exception as e: - self._module.fail_json(msg="Failed to update hook: %s " % e) - return True - else: - return False - - ''' - @param project Project Object - @param arguments Attributes of the hook - ''' - def createHook(self, project, arguments): - if self._module.check_mode: - return True - - hook = project.hooks.create(arguments) - - return hook - - ''' - @param hook Hook Object - @param arguments Attributes of the hook - ''' - def updateHook(self, hook, arguments): - changed = False - - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(hook, arg_key) != arguments[arg_key]: - setattr(hook, arg_key, arguments[arg_key]) - changed = True - - return (changed, hook) - - ''' - @param project Project object - @param hook_url Url to call on event - ''' - def findHook(self, project, hook_url): - hooks = project.hooks.list() - for hook in hooks: - if (hook.url == hook_url): - return hook - - ''' - @param project Project object - @param hook_url Url to call on event - ''' - def existsHook(self, project, hook_url): - # When project exists, object will be stored in self.projectObject. - hook = self.findHook(project, hook_url) - if hook: - self.hookObject = hook - return True - return False - - def deleteHook(self): - if self._module.check_mode: - return True - - return self.hookObject.delete() - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(dict( - api_token=dict(type='str', no_log=True), - state=dict(type='str', default="present", choices=["absent", "present"]), - project=dict(type='str', required=True), - hook_url=dict(type='str', required=True), - push_events=dict(type='bool', default=True), - push_events_branch_filter=dict(type='str', default=''), - issues_events=dict(type='bool', default=False), - merge_requests_events=dict(type='bool', default=False), - tag_push_events=dict(type='bool', default=False), - note_events=dict(type='bool', default=False), - job_events=dict(type='bool', default=False), - pipeline_events=dict(type='bool', default=False), - wiki_page_events=dict(type='bool', default=False), - hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']), - token=dict(type='str', no_log=True), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_password', 'api_token'] - ], - required_together=[ - ['api_username', 'api_password'] - ], - required_one_of=[ - ['api_username', 'api_token'] - ], - supports_check_mode=True, - ) - - state = module.params['state'] - project_identifier = module.params['project'] - hook_url = module.params['hook_url'] - push_events = module.params['push_events'] - push_events_branch_filter = module.params['push_events_branch_filter'] - issues_events = module.params['issues_events'] - merge_requests_events = module.params['merge_requests_events'] - tag_push_events = module.params['tag_push_events'] - note_events = module.params['note_events'] - job_events = module.params['job_events'] - pipeline_events = module.params['pipeline_events'] - wiki_page_events = module.params['wiki_page_events'] - enable_ssl_verification = module.params['hook_validate_certs'] - hook_token = module.params['token'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlabAuthentication(module) - - gitlab_hook = GitLabHook(module, gitlab_instance) - - project = findProject(gitlab_instance, project_identifier) - - if project is None: - module.fail_json(msg="Failed to create hook: project %s doesn't exists" % project_identifier) - - hook_exists = gitlab_hook.existsHook(project, hook_url) - - if state == 'absent': - if hook_exists: - gitlab_hook.deleteHook() - module.exit_json(changed=True, msg="Successfully deleted hook %s" % hook_url) - else: - module.exit_json(changed=False, msg="Hook deleted or does not exists") - - if state == 'present': - if gitlab_hook.createOrUpdateHook(project, hook_url, { - "push_events": push_events, - "push_events_branch_filter": push_events_branch_filter, - "issues_events": issues_events, - "merge_requests_events": merge_requests_events, - "tag_push_events": tag_push_events, - "note_events": note_events, - "job_events": job_events, - "pipeline_events": pipeline_events, - "wiki_page_events": wiki_page_events, - "enable_ssl_verification": enable_ssl_verification, - "token": hook_token}): - - module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs) - else: - module.exit_json(changed=False, msg="No need to update the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/gitlab/gitlab_project.py b/plugins/modules/source_control/gitlab/gitlab_project.py deleted file mode 100644 index ad5e0a2166..0000000000 --- a/plugins/modules/source_control/gitlab/gitlab_project.py +++ /dev/null @@ -1,506 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: gitlab_project -short_description: Creates/updates/deletes GitLab Projects -description: - - When the project does not exist in GitLab, it will be created. - - When the project does exists and I(state=absent), the project will be deleted. - - When changes are made to the project, the project will be updated. -author: - - Werner Dijkerman (@dj-wasabi) - - Guillaume Martinez (@Lunik) -requirements: - - python >= 2.7 - - python-gitlab python module -extends_documentation_fragment: -- community.general.auth_basic - -options: - api_token: - description: - - GitLab token for logging in. - type: str - group: - description: - - Id or the full path of the group of which this projects belongs to. - type: str - name: - description: - - The name of the project. - required: true - type: str - path: - description: - - The path of the project you want to create, this will be server_url//path. - - If not supplied, name will be used. - type: str - description: - description: - - An description for the project. - type: str - initialize_with_readme: - description: - - Will initialize the project with a default C(README.md). - - Is only used when the project is created, and ignored otherwise. - type: bool - default: false - version_added: "4.0.0" - issues_enabled: - description: - - Whether you want to create issues or not. - - Possible values are true and false. - type: bool - default: yes - merge_requests_enabled: - description: - - If merge requests can be made or not. - - Possible values are true and false. - type: bool - default: yes - wiki_enabled: - description: - - If an wiki for this project should be available or not. - type: bool - default: yes - snippets_enabled: - description: - - If creating snippets should be available or not. - type: bool - default: yes - visibility: - description: - - C(private) Project access must be granted explicitly for each user. - - C(internal) The project can be cloned by any logged in user. - - C(public) The project can be cloned without any authentication. - default: private - type: str - choices: ["private", "internal", "public"] - aliases: - - visibility_level - import_url: - description: - - Git repository which will be imported into gitlab. - - GitLab server needs read access to this git repository. - required: false - type: str - state: - description: - - Create or delete project. - - Possible values are present and absent. - default: present - type: str - choices: ["present", "absent"] - merge_method: - description: - - What requirements are placed upon merges. - - Possible values are C(merge), C(rebase_merge) merge commit with semi-linear history, C(ff) fast-forward merges only. - type: str - choices: ["ff", "merge", "rebase_merge"] - default: merge - version_added: "1.0.0" - lfs_enabled: - description: - - Enable Git large file systems to manages large files such - as audio, video, and graphics files. - type: bool - required: false - default: false - version_added: "2.0.0" - username: - description: - - Used to create a personal project under a user's name. - type: str - version_added: "3.3.0" - allow_merge_on_skipped_pipeline: - description: - - Allow merge when skipped pipelines exist. - type: bool - version_added: "3.4.0" - only_allow_merge_if_all_discussions_are_resolved: - description: - - All discussions on a merge request (MR) have to be resolved. - type: bool - version_added: "3.4.0" - only_allow_merge_if_pipeline_succeeds: - description: - - Only allow merges if pipeline succeeded. - type: bool - version_added: "3.4.0" - packages_enabled: - description: - - Enable GitLab package repository. - type: bool - version_added: "3.4.0" - remove_source_branch_after_merge: - description: - - Remove the source branch after merge. - type: bool - version_added: "3.4.0" - squash_option: - description: - - Squash commits when merging. - type: str - choices: ["never", "always", "default_off", "default_on"] - version_added: "3.4.0" - ci_config_path: - description: - - Custom path to the CI configuration file for this project. - type: str - version_added: "3.7.0" - shared_runners_enabled: - description: - - Enable shared runners for this project. - type: bool - version_added: "3.7.0" -''' - -EXAMPLES = r''' -- name: Create GitLab Project - community.general.gitlab_project: - api_url: https://gitlab.example.com/ - api_token: "{{ api_token }}" - name: my_first_project - group: "10481470" - -- name: Delete GitLab Project - community.general.gitlab_project: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - validate_certs: False - name: my_first_project - state: absent - delegate_to: localhost - -- name: Create GitLab Project in group Ansible - community.general.gitlab_project: - api_url: https://gitlab.example.com/ - validate_certs: True - api_username: dj-wasabi - api_password: "MySecretPassword" - name: my_first_project - group: ansible - issues_enabled: False - merge_method: rebase_merge - wiki_enabled: True - snippets_enabled: True - import_url: http://git.example.com/example/lab.git - initialize_with_readme: true - state: present - delegate_to: localhost -''' - -RETURN = r''' -msg: - description: Success or failure message. - returned: always - type: str - sample: "Success" - -result: - description: json parsed response from the server. - returned: always - type: dict - -error: - description: the error message returned by the GitLab API. - returned: failed - type: str - sample: "400: path is already in use" - -project: - description: API object. - returned: always - type: dict -''' - -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, findProject, gitlabAuthentication - - -class GitLabProject(object): - def __init__(self, module, gitlab_instance): - self._module = module - self._gitlab = gitlab_instance - self.projectObject = None - - ''' - @param project_name Name of the project - @param namespace Namespace Object (User or Group) - @param options Options of the project - ''' - def createOrUpdateProject(self, project_name, namespace, options): - changed = False - project_options = { - 'name': project_name, - 'description': options['description'], - 'issues_enabled': options['issues_enabled'], - 'merge_requests_enabled': options['merge_requests_enabled'], - 'merge_method': options['merge_method'], - 'wiki_enabled': options['wiki_enabled'], - 'snippets_enabled': options['snippets_enabled'], - 'visibility': options['visibility'], - 'lfs_enabled': options['lfs_enabled'], - 'allow_merge_on_skipped_pipeline': options['allow_merge_on_skipped_pipeline'], - 'only_allow_merge_if_all_discussions_are_resolved': options['only_allow_merge_if_all_discussions_are_resolved'], - 'only_allow_merge_if_pipeline_succeeds': options['only_allow_merge_if_pipeline_succeeds'], - 'packages_enabled': options['packages_enabled'], - 'remove_source_branch_after_merge': options['remove_source_branch_after_merge'], - 'squash_option': options['squash_option'], - 'ci_config_path': options['ci_config_path'], - 'shared_runners_enabled': options['shared_runners_enabled'], - } - # Because we have already call userExists in main() - if self.projectObject is None: - project_options.update({ - 'path': options['path'], - 'import_url': options['import_url'], - }) - if options['initialize_with_readme']: - project_options['initialize_with_readme'] = options['initialize_with_readme'] - project_options = self.getOptionsWithValue(project_options) - project = self.createProject(namespace, project_options) - changed = True - else: - changed, project = self.updateProject(self.projectObject, project_options) - - self.projectObject = project - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name) - - try: - project.save() - except Exception as e: - self._module.fail_json(msg="Failed update project: %s " % e) - return True - return False - - ''' - @param namespace Namespace Object (User or Group) - @param arguments Attributes of the project - ''' - def createProject(self, namespace, arguments): - if self._module.check_mode: - return True - - arguments['namespace_id'] = namespace.id - try: - project = self._gitlab.projects.create(arguments) - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json(msg="Failed to create project: %s " % to_native(e)) - - return project - - ''' - @param arguments Attributes of the project - ''' - def getOptionsWithValue(self, arguments): - ret_arguments = dict() - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - ret_arguments[arg_key] = arg_value - - return ret_arguments - - ''' - @param project Project Object - @param arguments Attributes of the project - ''' - def updateProject(self, project, arguments): - changed = False - - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(project, arg_key) != arguments[arg_key]: - setattr(project, arg_key, arguments[arg_key]) - changed = True - - return (changed, project) - - def deleteProject(self): - if self._module.check_mode: - return True - - project = self.projectObject - - return project.delete() - - ''' - @param namespace User/Group object - @param name Name of the project - ''' - def existsProject(self, namespace, path): - # When project exists, object will be stored in self.projectObject. - project = findProject(self._gitlab, namespace.full_path + '/' + path) - if project: - self.projectObject = project - return True - return False - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(dict( - api_token=dict(type='str', no_log=True), - group=dict(type='str'), - name=dict(type='str', required=True), - path=dict(type='str'), - description=dict(type='str'), - initialize_with_readme=dict(type='bool', default=False), - issues_enabled=dict(type='bool', default=True), - merge_requests_enabled=dict(type='bool', default=True), - merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]), - wiki_enabled=dict(type='bool', default=True), - snippets_enabled=dict(default=True, type='bool'), - visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), - import_url=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - lfs_enabled=dict(default=False, type='bool'), - username=dict(type='str'), - allow_merge_on_skipped_pipeline=dict(type='bool'), - only_allow_merge_if_all_discussions_are_resolved=dict(type='bool'), - only_allow_merge_if_pipeline_succeeds=dict(type='bool'), - packages_enabled=dict(type='bool'), - remove_source_branch_after_merge=dict(type='bool'), - squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), - ci_config_path=dict(type='str'), - shared_runners_enabled=dict(type='bool'), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_password', 'api_token'], - ['group', 'username'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token'] - ], - supports_check_mode=True, - ) - - group_identifier = module.params['group'] - project_name = module.params['name'] - project_path = module.params['path'] - project_description = module.params['description'] - initialize_with_readme = module.params['initialize_with_readme'] - issues_enabled = module.params['issues_enabled'] - merge_requests_enabled = module.params['merge_requests_enabled'] - merge_method = module.params['merge_method'] - wiki_enabled = module.params['wiki_enabled'] - snippets_enabled = module.params['snippets_enabled'] - visibility = module.params['visibility'] - import_url = module.params['import_url'] - state = module.params['state'] - lfs_enabled = module.params['lfs_enabled'] - username = module.params['username'] - allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline'] - only_allow_merge_if_all_discussions_are_resolved = module.params['only_allow_merge_if_all_discussions_are_resolved'] - only_allow_merge_if_pipeline_succeeds = module.params['only_allow_merge_if_pipeline_succeeds'] - packages_enabled = module.params['packages_enabled'] - remove_source_branch_after_merge = module.params['remove_source_branch_after_merge'] - squash_option = module.params['squash_option'] - ci_config_path = module.params['ci_config_path'] - shared_runners_enabled = module.params['shared_runners_enabled'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlabAuthentication(module) - - # Set project_path to project_name if it is empty. - if project_path is None: - project_path = project_name.replace(" ", "_") - - gitlab_project = GitLabProject(module, gitlab_instance) - - namespace = None - namespace_id = None - if group_identifier: - group = findGroup(gitlab_instance, group_identifier) - if group is None: - module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier) - - namespace_id = group.id - else: - if username: - namespace = gitlab_instance.namespaces.list(search=username)[0] - else: - namespace = gitlab_instance.namespaces.list(search=gitlab_instance.user.username)[0] - namespace_id = namespace.id - - if not namespace_id: - module.fail_json(msg="Failed to find the namespace or group ID which is required to look up the namespace") - - try: - namespace = gitlab_instance.namespaces.get(namespace_id) - except gitlab.exceptions.GitlabGetError as e: - module.fail_json(msg="Failed to find the namespace for the given user: %s" % to_native(e)) - - if not namespace: - module.fail_json(msg="Failed to find the namespace for the project") - project_exists = gitlab_project.existsProject(namespace, project_path) - - if state == 'absent': - if project_exists: - gitlab_project.deleteProject() - module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name) - module.exit_json(changed=False, msg="Project deleted or does not exists") - - if state == 'present': - - if gitlab_project.createOrUpdateProject(project_name, namespace, { - "path": project_path, - "description": project_description, - "initialize_with_readme": initialize_with_readme, - "issues_enabled": issues_enabled, - "merge_requests_enabled": merge_requests_enabled, - "merge_method": merge_method, - "wiki_enabled": wiki_enabled, - "snippets_enabled": snippets_enabled, - "visibility": visibility, - "import_url": import_url, - "lfs_enabled": lfs_enabled, - "allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline, - "only_allow_merge_if_all_discussions_are_resolved": only_allow_merge_if_all_discussions_are_resolved, - "only_allow_merge_if_pipeline_succeeds": only_allow_merge_if_pipeline_succeeds, - "packages_enabled": packages_enabled, - "remove_source_branch_after_merge": remove_source_branch_after_merge, - "squash_option": squash_option, - "ci_config_path": ci_config_path, - "shared_runners_enabled": shared_runners_enabled, - }): - - module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.projectObject._attrs) - module.exit_json(changed=False, msg="No need to update the project %s" % project_name, project=gitlab_project.projectObject._attrs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/gitlab/gitlab_project_members.py b/plugins/modules/source_control/gitlab/gitlab_project_members.py deleted file mode 100644 index 89ac96dc12..0000000000 --- a/plugins/modules/source_control/gitlab/gitlab_project_members.py +++ /dev/null @@ -1,469 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Sergey Mikhaltsov -# Copyright: (c) 2020, Zainab Alsaffar -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: gitlab_project_members -short_description: Manage project members on GitLab Server -version_added: 2.2.0 -description: - - This module allows to add and remove members to/from a project, or change a member's access level in a project on GitLab. -author: - - Sergey Mikhaltsov (@metanovii) - - Zainab Alsaffar (@zanssa) -requirements: - - python-gitlab python module <= 1.15.0 - - owner or maintainer rights to project on the GitLab server -options: - api_token: - description: - - A personal access token to authenticate with the GitLab API. - required: true - type: str - validate_certs: - description: - - Whether or not to validate TLS/SSL certificates when supplying a HTTPS endpoint. - - Should only be set to C(false) if you can guarantee that you are talking to the correct server - and no man-in-the-middle attack can happen. - default: true - type: bool - api_username: - description: - - The username to use for authentication against the API. - type: str - api_password: - description: - - The password to use for authentication against the API. - type: str - api_url: - description: - - The resolvable endpoint for the API. - type: str - project: - description: - - The name (or full path) of the GitLab project the member is added to/removed from. - required: true - type: str - gitlab_user: - description: - - A username or a list of usernames to add to/remove from the GitLab project. - - Mutually exclusive with I(gitlab_users_access). - type: list - elements: str - access_level: - description: - - The access level for the user. - - Required if I(state=present), user state is set to present. - type: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] - gitlab_users_access: - description: - - Provide a list of user to access level mappings. - - Every dictionary in this list specifies a user (by username) and the access level the user should have. - - Mutually exclusive with I(gitlab_user) and I(access_level). - - Use together with I(purge_users) to remove all users not specified here from the project. - type: list - elements: dict - suboptions: - name: - description: A username or a list of usernames to add to/remove from the GitLab project. - type: str - required: true - access_level: - description: - - The access level for the user. - - Required if I(state=present), user state is set to present. - type: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] - required: true - version_added: 3.7.0 - state: - description: - - State of the member in the project. - - On C(present), it adds a user to a GitLab project. - - On C(absent), it removes a user from a GitLab project. - choices: ['present', 'absent'] - default: 'present' - type: str - purge_users: - description: - - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list. - If omitted do not purge orphaned members. - - Is only used when I(state=present). - type: list - elements: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] - version_added: 3.7.0 -notes: - - Supports C(check_mode). -''' - -EXAMPLES = r''' -- name: Add a user to a GitLab Project - community.general.gitlab_project_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - validate_certs: True - project: projectname - gitlab_user: username - access_level: developer - state: present - -- name: Remove a user from a GitLab project - community.general.gitlab_project_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - validate_certs: False - project: projectname - gitlab_user: username - state: absent - -- name: Add a list of Users to A GitLab project - community.general.gitlab_project_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - gitlab_project: projectname - gitlab_user: - - user1 - - user2 - access_level: developer - state: present - -- name: Add a list of Users with Dedicated Access Levels to A GitLab project - community.general.gitlab_project_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - project: projectname - gitlab_users_access: - - name: user1 - access_level: developer - - name: user2 - access_level: maintainer - state: present - -- name: Add a user, remove all others which might be on this access level - community.general.gitlab_project_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - project: projectname - gitlab_user: username - access_level: developer - pruge_users: developer - state: present - -- name: Remove a list of Users with Dedicated Access Levels to A GitLab project - community.general.gitlab_project_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - project: projectname - gitlab_users_access: - - name: user1 - access_level: developer - - name: user2 - access_level: maintainer - state: absent -''' - -RETURN = r''' # ''' - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication - -import traceback - -try: - import gitlab - HAS_PY_GITLAB = True -except ImportError: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_PY_GITLAB = False - - -class GitLabProjectMembers(object): - def __init__(self, module, gl): - self._module = module - self._gitlab = gl - - def get_project(self, project_name): - try: - project_exists = self._gitlab.projects.get(project_name) - return project_exists.id - except gitlab.exceptions.GitlabGetError as e: - project_exists = self._gitlab.projects.list(search=project_name) - if project_exists: - return project_exists[0].id - - def get_user_id(self, gitlab_user): - user_exists = self._gitlab.users.list(username=gitlab_user) - if user_exists: - return user_exists[0].id - - # get all members in a project - def get_members_in_a_project(self, gitlab_project_id): - project = self._gitlab.projects.get(gitlab_project_id) - return project.members.list(all=True) - - # get single member in a project by user name - def get_member_in_a_project(self, gitlab_project_id, gitlab_user_id): - member = None - project = self._gitlab.projects.get(gitlab_project_id) - try: - member = project.members.get(gitlab_user_id) - if member: - return member - except gitlab.exceptions.GitlabGetError as e: - return None - - # check if the user is a member of the project - def is_user_a_member(self, members, gitlab_user_id): - for member in members: - if member.id == gitlab_user_id: - return True - return False - - # add user to a project - def add_member_to_project(self, gitlab_user_id, gitlab_project_id, access_level): - project = self._gitlab.projects.get(gitlab_project_id) - add_member = project.members.create( - {'user_id': gitlab_user_id, 'access_level': access_level}) - - # remove user from a project - def remove_user_from_project(self, gitlab_user_id, gitlab_project_id): - project = self._gitlab.projects.get(gitlab_project_id) - project.members.delete(gitlab_user_id) - - # get user's access level - def get_user_access_level(self, members, gitlab_user_id): - for member in members: - if member.id == gitlab_user_id: - return member.access_level - - # update user's access level in a project - def update_user_access_level(self, members, gitlab_user_id, access_level): - for member in members: - if member.id == gitlab_user_id: - member.access_level = access_level - member.save() - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(dict( - api_token=dict(type='str', required=True, no_log=True), - project=dict(type='str', required=True), - gitlab_user=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent']), - access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer']), - purge_users=dict(type='list', elements='str', choices=[ - 'guest', 'reporter', 'developer', 'maintainer']), - gitlab_users_access=dict( - type='list', - elements='dict', - options=dict( - name=dict(type='str', required=True), - access_level=dict(type='str', choices=[ - 'guest', 'reporter', 'developer', 'maintainer'], required=True), - ) - ), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_password', 'api_token'], - ['gitlab_user', 'gitlab_users_access'], - ['access_level', 'gitlab_users_access'], - ], - required_together=[ - ['api_username', 'api_password'], - ['gitlab_user', 'access_level'], - ], - required_one_of=[ - ['api_username', 'api_token'], - ['gitlab_user', 'gitlab_users_access'], - ], - required_if=[ - ['state', 'present', ['access_level', 'gitlab_users_access'], True], - ], - supports_check_mode=True, - ) - - if not HAS_PY_GITLAB: - module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR) - - access_level_int = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS, - } - - gitlab_project = module.params['project'] - state = module.params['state'] - access_level = module.params['access_level'] - purge_users = module.params['purge_users'] - - if purge_users: - purge_users = [access_level_int[level] for level in purge_users] - - # connect to gitlab server - gl = gitlabAuthentication(module) - - project = GitLabProjectMembers(module, gl) - - gitlab_project_id = project.get_project(gitlab_project) - - # project doesn't exist - if not gitlab_project_id: - module.fail_json(msg="project '%s' not found." % gitlab_project) - - members = [] - if module.params['gitlab_user'] is not None: - gitlab_users_access = [] - gitlab_users = module.params['gitlab_user'] - for gl_user in gitlab_users: - gitlab_users_access.append( - {'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None}) - elif module.params['gitlab_users_access'] is not None: - gitlab_users_access = module.params['gitlab_users_access'] - for user_level in gitlab_users_access: - user_level['access_level'] = access_level_int[user_level['access_level']] - - if len(gitlab_users_access) == 1 and not purge_users: - # only single user given - members = [project.get_member_in_a_project( - gitlab_project_id, project.get_user_id(gitlab_users_access[0]['name']))] - if members[0] is None: - members = [] - elif len(gitlab_users_access) > 1 or purge_users: - # list of users given - members = project.get_members_in_a_project(gitlab_project_id) - else: - module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.", - result_data=[]) - - changed = False - error = False - changed_users = [] - changed_data = [] - - for gitlab_user in gitlab_users_access: - gitlab_user_id = project.get_user_id(gitlab_user['name']) - - # user doesn't exist - if not gitlab_user_id: - if state == 'absent': - changed_users.append("user '%s' not found, and thus also not part of the project" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': "user '%s' not found, and thus also not part of the project" % gitlab_user['name']}) - else: - error = True - changed_users.append("user '%s' not found." % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "user '%s' not found." % gitlab_user['name']}) - continue - - is_user_a_member = project.is_user_a_member(members, gitlab_user_id) - - # check if the user is a member in the project - if not is_user_a_member: - if state == 'present': - # add user to the project - try: - if not module.check_mode: - project.add_member_to_project(gitlab_user_id, gitlab_project_id, gitlab_user['access_level']) - changed = True - changed_users.append("Successfully added user '%s' to project" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': "Successfully added user '%s' to project" % gitlab_user['name']}) - except (gitlab.exceptions.GitlabCreateError) as e: - error = True - changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)}) - # state as absent - else: - changed_users.append("User, '%s', is not a member in the project. No change to report" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': "User, '%s', is not a member in the project. No change to report" % gitlab_user['name']}) - # in case that a user is a member - else: - if state == 'present': - # compare the access level - user_access_level = project.get_user_access_level(members, gitlab_user_id) - if user_access_level == gitlab_user['access_level']: - changed_users.append("User, '%s', is already a member in the project. No change to report" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': "User, '%s', is already a member in the project. No change to report" % gitlab_user['name']}) - else: - # update the access level for the user - try: - if not module.check_mode: - project.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level']) - changed = True - changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']}) - except (gitlab.exceptions.GitlabUpdateError) as e: - error = True - changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)}) - else: - # remove the user from the project - try: - if not module.check_mode: - project.remove_user_from_project(gitlab_user_id, gitlab_project_id) - changed = True - changed_users.append("Successfully removed user, '%s', from the project" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': "Successfully removed user, '%s', from the project" % gitlab_user['name']}) - except (gitlab.exceptions.GitlabDeleteError) as e: - error = True - changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)}) - - # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users - if state == 'present' and purge_users: - uppercase_names_in_gitlab_users_access = [] - for name in gitlab_users_access: - uppercase_names_in_gitlab_users_access.append(name['name'].upper()) - - for member in members: - if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access: - try: - if not module.check_mode: - project.remove_user_from_project(member.id, gitlab_project_id) - changed = True - changed_users.append("Successfully removed user '%s', from project. Was not in given list" % member.username) - changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED', - 'msg': "Successfully removed user '%s', from project. Was not in given list" % member.username}) - except (gitlab.exceptions.GitlabDeleteError) as e: - error = True - changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)}) - - if len(gitlab_users_access) == 1 and error: - # if single user given and an error occurred return error for list errors will be per user - module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data) - elif error: - module.fail_json( - msg='FAILED: At least one given user/permission could not be set', result_data=changed_data) - - module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/gitlab/gitlab_project_variable.py b/plugins/modules/source_control/gitlab/gitlab_project_variable.py deleted file mode 100644 index 21821cd495..0000000000 --- a/plugins/modules/source_control/gitlab/gitlab_project_variable.py +++ /dev/null @@ -1,313 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: gitlab_project_variable -short_description: Creates/updates/deletes GitLab Projects Variables -description: - - When a project variable does not exist, it will be created. - - When a project variable does exist, its value will be updated when the values are different. - - Variables which are untouched in the playbook, but are not untouched in the GitLab project, - they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)). -author: - - "Markus Bergholz (@markuman)" -requirements: - - python >= 2.7 - - python-gitlab python module -extends_documentation_fragment: -- community.general.auth_basic - -options: - state: - description: - - Create or delete project variable. - - Possible values are present and absent. - default: present - type: str - choices: ["present", "absent"] - api_token: - description: - - GitLab access token with API permissions. - required: true - type: str - project: - description: - - The path and name of the project. - required: true - type: str - purge: - description: - - When set to true, all variables which are not untouched in the task will be deleted. - default: false - type: bool - vars: - description: - - When the list element is a simple key-value pair, masked and protected will be set to false. - - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can - have full control about whether a value should be masked, protected or both. - - Support for protected values requires GitLab >= 9.3. - - Support for masked values requires GitLab >= 11.10. - - A I(value) must be a string or a number. - - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file). - - Field I(environment_scope) must be a string defined by scope environment. - - When a value is masked, it must be in Base64 and have a length of at least 8 characters. - See GitLab documentation on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables). - default: {} - type: dict -''' - - -EXAMPLES = ''' -- name: Set or update some CI/CD variables - community.general.gitlab_project_variable: - api_url: https://gitlab.com - api_token: secret_access_token - project: markuman/dotfiles - purge: false - vars: - ACCESS_KEY_ID: abc123 - SECRET_ACCESS_KEY: 321cba - -- name: Set or update some CI/CD variables - community.general.gitlab_project_variable: - api_url: https://gitlab.com - api_token: secret_access_token - project: markuman/dotfiles - purge: false - vars: - ACCESS_KEY_ID: abc123 - SECRET_ACCESS_KEY: - value: 3214cbad - masked: true - protected: true - variable_type: env_var - environment_scope: '*' - -- name: Delete one variable - community.general.gitlab_project_variable: - api_url: https://gitlab.com - api_token: secret_access_token - project: markuman/dotfiles - state: absent - vars: - ACCESS_KEY_ID: abc123 -''' - -RETURN = ''' -project_variable: - description: Four lists of the variablenames which were added, updated, removed or exist. - returned: always - type: dict - contains: - added: - description: A list of variables which were created. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" - untouched: - description: A list of variables which exist. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" - removed: - description: A list of variables which were deleted. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" - updated: - description: A list of variables whose values were changed. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.six import string_types -from ansible.module_utils.six import integer_types - - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication - - -class GitlabProjectVariables(object): - - def __init__(self, module, gitlab_instance): - self.repo = gitlab_instance - self.project = self.get_project(module.params['project']) - self._module = module - - def get_project(self, project_name): - return self.repo.projects.get(project_name) - - def list_all_project_variables(self): - page_nb = 1 - variables = [] - vars_page = self.project.variables.list(page=page_nb) - while len(vars_page) > 0: - variables += vars_page - page_nb += 1 - vars_page = self.project.variables.list(page=page_nb) - return variables - - def create_variable(self, key, value, masked, protected, variable_type, environment_scope): - if self._module.check_mode: - return - var = { - "key": key, "value": value, - "masked": masked, "protected": protected, - "variable_type": variable_type - } - if environment_scope is not None: - var["environment_scope"] = environment_scope - return self.project.variables.create(var) - - def update_variable(self, key, var, value, masked, protected, variable_type, environment_scope): - if (var.value == value and var.protected == protected and var.masked == masked - and var.variable_type == variable_type - and (var.environment_scope == environment_scope or environment_scope is None)): - return False - - if self._module.check_mode: - return True - - if (var.protected == protected and var.masked == masked - and var.variable_type == variable_type - and (var.environment_scope == environment_scope or environment_scope is None)): - var.value = value - var.save() - return True - - self.delete_variable(key) - self.create_variable(key, value, masked, protected, variable_type, environment_scope) - return True - - def delete_variable(self, key): - if self._module.check_mode: - return - return self.project.variables.delete(key) - - -def native_python_main(this_gitlab, purge, var_list, state, module): - - change = False - return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) - - gitlab_keys = this_gitlab.list_all_project_variables() - existing_variables = [x.get_id() for x in gitlab_keys] - - for key in var_list: - - if isinstance(var_list[key], string_types) or isinstance(var_list[key], (integer_types, float)): - value = var_list[key] - masked = False - protected = False - variable_type = 'env_var' - environment_scope = None - elif isinstance(var_list[key], dict): - value = var_list[key].get('value') - masked = var_list[key].get('masked', False) - protected = var_list[key].get('protected', False) - variable_type = var_list[key].get('variable_type', 'env_var') - environment_scope = var_list[key].get('environment_scope') - else: - module.fail_json(msg="value must be of type string, integer or dict") - - if key in existing_variables: - index = existing_variables.index(key) - existing_variables[index] = None - - if state == 'present': - single_change = this_gitlab.update_variable(key, - gitlab_keys[index], - value, masked, - protected, - variable_type, - environment_scope) - change = single_change or change - if single_change: - return_value['updated'].append(key) - else: - return_value['untouched'].append(key) - - elif state == 'absent': - this_gitlab.delete_variable(key) - change = True - return_value['removed'].append(key) - - elif key not in existing_variables and state == 'present': - this_gitlab.create_variable(key, value, masked, protected, variable_type, environment_scope) - change = True - return_value['added'].append(key) - - existing_variables = list(filter(None, existing_variables)) - if purge: - for item in existing_variables: - this_gitlab.delete_variable(item) - change = True - return_value['removed'].append(item) - else: - return_value['untouched'].extend(existing_variables) - - return change, return_value - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update( - api_token=dict(type='str', required=True, no_log=True), - project=dict(type='str', required=True), - purge=dict(type='bool', required=False, default=False), - vars=dict(type='dict', required=False, default=dict(), no_log=True), - state=dict(type='str', default="present", choices=["absent", "present"]) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_password', 'api_token'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token'] - ], - supports_check_mode=True - ) - - purge = module.params['purge'] - var_list = module.params['vars'] - state = module.params['state'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlabAuthentication(module) - - this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance) - - change, return_value = native_python_main(this_gitlab, purge, var_list, state, module) - - module.exit_json(changed=change, project_variable=return_value) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/gitlab/gitlab_protected_branch.py b/plugins/modules/source_control/gitlab/gitlab_protected_branch.py deleted file mode 100644 index f61f2b9fa1..0000000000 --- a/plugins/modules/source_control/gitlab/gitlab_protected_branch.py +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: gitlab_protected_branch -short_description: (un)Marking existing branches for protection -version_added: 3.4.0 -description: - - (un)Marking existing branches for protection. -author: - - "Werner Dijkerman (@dj-wasabi)" -requirements: - - python >= 2.7 - - python-gitlab >= 2.3.0 -extends_documentation_fragment: -- community.general.auth_basic - -options: - state: - description: - - Create or delete proteced branch. - default: present - type: str - choices: ["present", "absent"] - api_token: - description: - - GitLab access token with API permissions. - required: true - type: str - project: - description: - - The path and name of the project. - required: true - type: str - name: - description: - - The name of the branch that needs to be protected. - - Can make use a wildcard charachter for like C(production/*) or just have C(main) or C(develop) as value. - required: true - type: str - merge_access_levels: - description: - - Access levels allowed to merge. - default: maintainer - type: str - choices: ["maintainer", "developer", "nobody"] - push_access_level: - description: - - Access levels allowed to push. - default: maintainer - type: str - choices: ["maintainer", "developer", "nobody"] -''' - - -EXAMPLES = ''' -- name: Create protected branch on main - community.general.gitlab_protected_branch: - api_url: https://gitlab.com - api_token: secret_access_token - project: "dj-wasabi/collection.general" - name: main - merge_access_levels: maintainer - push_access_level: nobody - -''' - -RETURN = ''' -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.api import basic_auth_argument_spec -from distutils.version import LooseVersion - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication - - -class GitlabProtectedBranch(object): - - def __init__(self, module, project, gitlab_instance): - self.repo = gitlab_instance - self._module = module - self.project = self.get_project(project) - self.ACCESS_LEVEL = { - 'nobody': gitlab.NO_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS - } - - def get_project(self, project_name): - return self.repo.projects.get(project_name) - - def protected_branch_exist(self, name): - try: - return self.project.protectedbranches.get(name) - except Exception as e: - return False - - def create_protected_branch(self, name, merge_access_levels, push_access_level): - if self._module.check_mode: - return True - merge = self.ACCESS_LEVEL[merge_access_levels] - push = self.ACCESS_LEVEL[push_access_level] - self.project.protectedbranches.create({ - 'name': name, - 'merge_access_level': merge, - 'push_access_level': push - }) - - def compare_protected_branch(self, name, merge_access_levels, push_access_level): - configured_merge = self.ACCESS_LEVEL[merge_access_levels] - configured_push = self.ACCESS_LEVEL[push_access_level] - current = self.protected_branch_exist(name=name) - current_merge = current.merge_access_levels[0]['access_level'] - current_push = current.push_access_levels[0]['access_level'] - if current: - if current.name == name and current_merge == configured_merge and current_push == configured_push: - return True - return False - - def delete_protected_branch(self, name): - if self._module.check_mode: - return True - return self.project.protectedbranches.delete(name) - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update( - api_token=dict(type='str', required=True, no_log=True), - project=dict(type='str', required=True), - name=dict(type='str', required=True), - merge_access_levels=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), - push_access_level=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), - state=dict(type='str', default="present", choices=["absent", "present"]), - ) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_password', 'api_token'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token'] - ], - supports_check_mode=True - ) - - project = module.params['project'] - name = module.params['name'] - merge_access_levels = module.params['merge_access_levels'] - push_access_level = module.params['push_access_level'] - state = module.params['state'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_version = gitlab.__version__ - if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): - module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." - " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) - - gitlab_instance = gitlabAuthentication(module) - this_gitlab = GitlabProtectedBranch(module=module, project=project, gitlab_instance=gitlab_instance) - - p_branch = this_gitlab.protected_branch_exist(name=name) - if not p_branch and state == "present": - this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level) - module.exit_json(changed=True, msg="Created the proteched branch.") - elif p_branch and state == "present": - if not this_gitlab.compare_protected_branch(name, merge_access_levels, push_access_level): - this_gitlab.delete_protected_branch(name=name) - this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level) - module.exit_json(changed=True, msg="Recreated the proteched branch.") - elif p_branch and state == "absent": - this_gitlab.delete_protected_branch(name=name) - module.exit_json(changed=True, msg="Deleted the proteched branch.") - module.exit_json(changed=False, msg="No changes are needed.") - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/gitlab/gitlab_runner.py b/plugins/modules/source_control/gitlab/gitlab_runner.py deleted file mode 100644 index 127cd388f4..0000000000 --- a/plugins/modules/source_control/gitlab/gitlab_runner.py +++ /dev/null @@ -1,407 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Raphaël Droz (raphael.droz@gmail.com) -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2018, Samy Coenen -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: gitlab_runner -short_description: Create, modify and delete GitLab Runners. -description: - - Register, update and delete runners with the GitLab API. - - All operations are performed using the GitLab API v4. - - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html). - - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web interface at - U(https://$GITLAB_URL/profile/personal_access_tokens). - - A valid registration token is required for registering a new runner. - To create shared runners, you need to ask your administrator to give you this token. - It can be found at U(https://$GITLAB_URL/admin/runners/). -notes: - - To create a new runner at least the C(api_token), C(description) and C(api_url) options are required. - - Runners need to have unique descriptions. -author: - - Samy Coenen (@SamyCoenen) - - Guillaume Martinez (@Lunik) -requirements: - - python >= 2.7 - - python-gitlab >= 1.5.0 -extends_documentation_fragment: -- community.general.auth_basic - -options: - api_token: - description: - - Your private token to interact with the GitLab API. - type: str - project: - description: - - ID or full path of the project in the form of group/name. - type: str - version_added: '3.7.0' - description: - description: - - The unique name of the runner. - required: True - type: str - aliases: - - name - state: - description: - - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same name. - required: False - default: present - choices: ["present", "absent"] - type: str - registration_token: - description: - - The registration token is used to register new runners. - - Required if I(state) is C(present). - type: str - owned: - description: - - Searches only runners available to the user when searching for existing, when false admin token required. - default: no - type: bool - version_added: 2.0.0 - active: - description: - - Define if the runners is immediately active after creation. - required: False - default: yes - type: bool - locked: - description: - - Determines if the runner is locked or not. - required: False - default: False - type: bool - access_level: - description: - - Determines if a runner can pick up jobs only from protected branches. - - If set to C(ref_protected), runner can pick up jobs only from protected branches. - - If set to C(not_protected), runner can pick up jobs from both protected and unprotected branches. - required: False - default: ref_protected - choices: ["ref_protected", "not_protected"] - type: str - maximum_timeout: - description: - - The maximum time that a runner has to complete a specific job. - required: False - default: 3600 - type: int - run_untagged: - description: - - Run untagged jobs or not. - required: False - default: yes - type: bool - tag_list: - description: The tags that apply to the runner. - required: False - default: [] - type: list - elements: str -''' - -EXAMPLES = ''' -- name: "Register runner" - community.general.gitlab_runner: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - registration_token: 4gfdsg345 - description: Docker Machine t1 - state: present - active: True - tag_list: ['docker'] - run_untagged: False - locked: False - -- name: "Delete runner" - community.general.gitlab_runner: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - description: Docker Machine t1 - state: absent - -- name: Delete an owned runner as a non-admin - community.general.gitlab_runner: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - description: Docker Machine t1 - owned: yes - state: absent - -- name: Register runner for a specific project - community.general.gitlab_runner: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - registration_token: 4gfdsg345 - description: MyProject runner - state: present - project: mygroup/mysubgroup/myproject -''' - -RETURN = ''' -msg: - description: Success or failure message - returned: always - type: str - sample: "Success" - -result: - description: json parsed response from the server - returned: always - type: dict - -error: - description: the error message returned by the GitLab API - returned: failed - type: str - sample: "400: path is already in use" - -runner: - description: API object - returned: always - type: dict -''' - -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication - -try: - cmp -except NameError: - def cmp(a, b): - return (a > b) - (a < b) - - -class GitLabRunner(object): - def __init__(self, module, gitlab_instance, project=None): - self._module = module - self._gitlab = gitlab_instance - # Whether to operate on GitLab-instance-wide or project-wide runners - # See https://gitlab.com/gitlab-org/gitlab-ce/issues/60774 - # for group runner token access - self._runners_endpoint = project.runners if project else gitlab_instance.runners - self.runnerObject = None - - def createOrUpdateRunner(self, description, options): - changed = False - - # Because we have already call userExists in main() - if self.runnerObject is None: - runner = self.createRunner({ - 'description': description, - 'active': options['active'], - 'token': options['registration_token'], - 'locked': options['locked'], - 'run_untagged': options['run_untagged'], - 'maximum_timeout': options['maximum_timeout'], - 'tag_list': options['tag_list']}) - changed = True - else: - changed, runner = self.updateRunner(self.runnerObject, { - 'active': options['active'], - 'locked': options['locked'], - 'run_untagged': options['run_untagged'], - 'maximum_timeout': options['maximum_timeout'], - 'access_level': options['access_level'], - 'tag_list': options['tag_list']}) - - self.runnerObject = runner - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the runner %s" % description) - - try: - runner.save() - except Exception as e: - self._module.fail_json(msg="Failed to update runner: %s " % to_native(e)) - return True - else: - return False - - ''' - @param arguments Attributes of the runner - ''' - def createRunner(self, arguments): - if self._module.check_mode: - return True - - try: - runner = self._runners_endpoint.create(arguments) - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json(msg="Failed to create runner: %s " % to_native(e)) - - return runner - - ''' - @param runner Runner object - @param arguments Attributes of the runner - ''' - def updateRunner(self, runner, arguments): - changed = False - - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if isinstance(arguments[arg_key], list): - list1 = getattr(runner, arg_key) - list1.sort() - list2 = arguments[arg_key] - list2.sort() - if cmp(list1, list2): - setattr(runner, arg_key, arguments[arg_key]) - changed = True - else: - if getattr(runner, arg_key) != arguments[arg_key]: - setattr(runner, arg_key, arguments[arg_key]) - changed = True - - return (changed, runner) - - ''' - @param description Description of the runner - ''' - def findRunner(self, description, owned=False): - if owned: - runners = self._runners_endpoint.list(as_list=False) - else: - runners = self._runners_endpoint.all(as_list=False) - - for runner in runners: - # python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner - # object, so we need to handle both - if hasattr(runner, "description"): - if (runner.description == description): - return self._runners_endpoint.get(runner.id) - else: - if (runner['description'] == description): - return self._runners_endpoint.get(runner['id']) - - ''' - @param description Description of the runner - ''' - def existsRunner(self, description, owned=False): - # When runner exists, object will be stored in self.runnerObject. - runner = self.findRunner(description, owned) - - if runner: - self.runnerObject = runner - return True - return False - - def deleteRunner(self): - if self._module.check_mode: - return True - - runner = self.runnerObject - - return runner.delete() - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(dict( - api_token=dict(type='str', no_log=True), - description=dict(type='str', required=True, aliases=["name"]), - active=dict(type='bool', default=True), - owned=dict(type='bool', default=False), - tag_list=dict(type='list', elements='str', default=[]), - run_untagged=dict(type='bool', default=True), - locked=dict(type='bool', default=False), - access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]), - maximum_timeout=dict(type='int', default=3600), - registration_token=dict(type='str', no_log=True), - project=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_password', 'api_token'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token'], - ], - required_if=[ - ('state', 'present', ['registration_token']), - ], - supports_check_mode=True, - ) - - state = module.params['state'] - owned = module.params['owned'] - runner_description = module.params['description'] - runner_active = module.params['active'] - tag_list = module.params['tag_list'] - run_untagged = module.params['run_untagged'] - runner_locked = module.params['locked'] - access_level = module.params['access_level'] - maximum_timeout = module.params['maximum_timeout'] - registration_token = module.params['registration_token'] - project = module.params['project'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlabAuthentication(module) - gitlab_project = None - if project: - try: - gitlab_project = gitlab_instance.projects.get(project) - except gitlab.exceptions.GitlabGetError as e: - module.fail_json(msg='No such a project %s' % project, exception=to_native(e)) - - gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_project) - runner_exists = gitlab_runner.existsRunner(runner_description, owned) - - if state == 'absent': - if runner_exists: - gitlab_runner.deleteRunner() - module.exit_json(changed=True, msg="Successfully deleted runner %s" % runner_description) - else: - module.exit_json(changed=False, msg="Runner deleted or does not exists") - - if state == 'present': - if gitlab_runner.createOrUpdateRunner(runner_description, { - "active": runner_active, - "tag_list": tag_list, - "run_untagged": run_untagged, - "locked": runner_locked, - "access_level": access_level, - "maximum_timeout": maximum_timeout, - "registration_token": registration_token}): - module.exit_json(changed=True, runner=gitlab_runner.runnerObject._attrs, - msg="Successfully created or updated the runner %s" % runner_description) - else: - module.exit_json(changed=False, runner=gitlab_runner.runnerObject._attrs, - msg="No need to update the runner %s" % runner_description) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/gitlab/gitlab_user.py b/plugins/modules/source_control/gitlab/gitlab_user.py deleted file mode 100644 index c586cafd60..0000000000 --- a/plugins/modules/source_control/gitlab/gitlab_user.py +++ /dev/null @@ -1,691 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Lennert Mertens (lennert@nubera.be) -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: gitlab_user -short_description: Creates/updates/deletes/blocks/unblocks GitLab Users -description: - - When the user does not exist in GitLab, it will be created. - - When the user exists and state=absent, the user will be deleted. - - When the user exists and state=blocked, the user will be blocked. - - When changes are made to user, the user will be updated. -notes: - - From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user. -author: - - Werner Dijkerman (@dj-wasabi) - - Guillaume Martinez (@Lunik) - - Lennert Mertens (@LennertMertens) - - Stef Graces (@stgrace) -requirements: - - python >= 2.7 - - python-gitlab python module - - administrator rights on the GitLab server -extends_documentation_fragment: -- community.general.auth_basic - -options: - api_token: - description: - - GitLab token for logging in. - type: str - name: - description: - - Name of the user you want to create. - - Required only if C(state) is set to C(present). - type: str - username: - description: - - The username of the user. - required: true - type: str - password: - description: - - The password of the user. - - GitLab server enforces minimum password length to 8, set this value with 8 or more characters. - - Required only if C(state) is set to C(present). - type: str - reset_password: - description: - - Whether the user can change its password or not. - default: false - type: bool - version_added: 3.3.0 - email: - description: - - The email that belongs to the user. - - Required only if C(state) is set to C(present). - type: str - sshkey_name: - description: - - The name of the SSH public key. - type: str - sshkey_file: - description: - - The SSH public key itself. - type: str - sshkey_expires_at: - description: - - The expiration date of the SSH public key in ISO 8601 format C(YYYY-MM-DDTHH:MM:SSZ). - - This is only used when adding new SSH public keys. - type: str - version_added: 3.1.0 - group: - description: - - Id or Full path of parent group in the form of group/name. - - Add user as a member to this group. - type: str - access_level: - description: - - The access level to the group. One of the following can be used. - - guest - - reporter - - developer - - master (alias for maintainer) - - maintainer - - owner - default: guest - type: str - choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"] - state: - description: - - Create, delete or block a user. - default: present - type: str - choices: ["present", "absent", "blocked", "unblocked"] - confirm: - description: - - Require confirmation. - type: bool - default: yes - isadmin: - description: - - Grant admin privileges to the user. - type: bool - default: no - external: - description: - - Define external parameter for this user. - type: bool - default: no - identities: - description: - - List of identities to be added/updated for this user. - - To remove all other identities from this user, set I(overwrite_identities=true). - type: list - elements: dict - suboptions: - provider: - description: - - The name of the external identity provider - type: str - extern_uid: - description: - - User ID for external identity. - type: str - version_added: 3.3.0 - overwrite_identities: - description: - - Overwrite identities with identities added in this module. - - This means that all identities that the user has and that are not listed in I(identities) are removed from the user. - - This is only done if a list is provided for I(identities). To remove all identities, provide an empty list. - type: bool - default: false - version_added: 3.3.0 -''' - -EXAMPLES = ''' -- name: "Delete GitLab User" - community.general.gitlab_user: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - validate_certs: False - username: myusername - state: absent - -- name: "Create GitLab User" - community.general.gitlab_user: - api_url: https://gitlab.example.com/ - validate_certs: True - api_username: dj-wasabi - api_password: "MySecretPassword" - name: My Name - username: myusername - password: mysecretpassword - email: me@example.com - sshkey_name: MySSH - sshkey_file: ssh-rsa AAAAB3NzaC1yc... - state: present - group: super_group/mon_group - access_level: owner - -- name: "Create GitLab User using external identity provider" - community.general.gitlab_user: - api_url: https://gitlab.example.com/ - validate_certs: True - api_token: "{{ access_token }}" - name: My Name - username: myusername - password: mysecretpassword - email: me@example.com - identities: - - provider: Keycloak - extern_uid: f278f95c-12c7-4d51-996f-758cc2eb11bc - state: present - group: super_group/mon_group - access_level: owner - -- name: "Block GitLab User" - community.general.gitlab_user: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - validate_certs: False - username: myusername - state: blocked - -- name: "Unblock GitLab User" - community.general.gitlab_user: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - validate_certs: False - username: myusername - state: unblocked -''' - -RETURN = ''' -msg: - description: Success or failure message - returned: always - type: str - sample: "Success" - -result: - description: json parsed response from the server - returned: always - type: dict - -error: - description: the error message returned by the GitLab API - returned: failed - type: str - sample: "400: path is already in use" - -user: - description: API object - returned: always - type: dict -''' - -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication - - -class GitLabUser(object): - def __init__(self, module, gitlab_instance): - self._module = module - self._gitlab = gitlab_instance - self.userObject = None - self.ACCESS_LEVEL = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'master': gitlab.MAINTAINER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS, - 'owner': gitlab.OWNER_ACCESS} - - ''' - @param username Username of the user - @param options User options - ''' - def createOrUpdateUser(self, username, options): - changed = False - potentionally_changed = False - - # Because we have already call userExists in main() - if self.userObject is None: - user = self.createUser({ - 'name': options['name'], - 'username': username, - 'password': options['password'], - 'reset_password': options['reset_password'], - 'email': options['email'], - 'skip_confirmation': not options['confirm'], - 'admin': options['isadmin'], - 'external': options['external'], - 'identities': options['identities'], - }) - changed = True - else: - changed, user = self.updateUser( - self.userObject, { - # add "normal" parameters here, put uncheckable - # params in the dict below - 'name': {'value': options['name']}, - 'email': {'value': options['email']}, - - # note: for some attributes like this one the key - # from reading back from server is unfortunately - # different to the one needed for pushing/writing, - # in that case use the optional setter key - 'is_admin': { - 'value': options['isadmin'], 'setter': 'admin' - }, - 'external': {'value': options['external']}, - 'identities': {'value': options['identities']}, - }, - { - # put "uncheckable" params here, this means params - # which the gitlab does accept for setting but does - # not return any information about it - 'skip_reconfirmation': {'value': not options['confirm']}, - 'password': {'value': options['password']}, - 'reset_password': {'value': options['reset_password']}, - 'overwrite_identities': {'value': options['overwrite_identities']}, - } - ) - - # note: as we unfortunately have some uncheckable parameters - # where it is not possible to determine if the update - # changed something or not, we must assume here that a - # changed happend and that an user object update is needed - potentionally_changed = True - - # Assign ssh keys - if options['sshkey_name'] and options['sshkey_file']: - key_changed = self.addSshKeyToUser(user, { - 'name': options['sshkey_name'], - 'file': options['sshkey_file'], - 'expires_at': options['sshkey_expires_at']}) - changed = changed or key_changed - - # Assign group - if options['group_path']: - group_changed = self.assignUserToGroup(user, options['group_path'], options['access_level']) - changed = changed or group_changed - - self.userObject = user - if (changed or potentionally_changed) and not self._module.check_mode: - try: - user.save() - except Exception as e: - self._module.fail_json(msg="Failed to update user: %s " % to_native(e)) - - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the user %s" % username) - return True - else: - return False - - ''' - @param group User object - ''' - def getUserId(self, user): - if user is not None: - return user.id - return None - - ''' - @param user User object - @param sshkey_name Name of the ssh key - ''' - def sshKeyExists(self, user, sshkey_name): - keyList = map(lambda k: k.title, user.keys.list()) - - return sshkey_name in keyList - - ''' - @param user User object - @param sshkey Dict containing sshkey infos {"name": "", "file": "", "expires_at": ""} - ''' - def addSshKeyToUser(self, user, sshkey): - if not self.sshKeyExists(user, sshkey['name']): - if self._module.check_mode: - return True - - try: - parameter = { - 'title': sshkey['name'], - 'key': sshkey['file'], - } - if sshkey['expires_at'] is not None: - parameter['expires_at'] = sshkey['expires_at'] - user.keys.create(parameter) - except gitlab.exceptions.GitlabCreateError as e: - self._module.fail_json(msg="Failed to assign sshkey to user: %s" % to_native(e)) - return True - return False - - ''' - @param group Group object - @param user_id Id of the user to find - ''' - def findMember(self, group, user_id): - try: - member = group.members.get(user_id) - except gitlab.exceptions.GitlabGetError: - return None - return member - - ''' - @param group Group object - @param user_id Id of the user to check - ''' - def memberExists(self, group, user_id): - member = self.findMember(group, user_id) - - return member is not None - - ''' - @param group Group object - @param user_id Id of the user to check - @param access_level GitLab access_level to check - ''' - def memberAsGoodAccessLevel(self, group, user_id, access_level): - member = self.findMember(group, user_id) - - return member.access_level == access_level - - ''' - @param user User object - @param group_path Complete path of the Group including parent group path. / - @param access_level GitLab access_level to assign - ''' - def assignUserToGroup(self, user, group_identifier, access_level): - group = findGroup(self._gitlab, group_identifier) - - if self._module.check_mode: - return True - - if group is None: - return False - - if self.memberExists(group, self.getUserId(user)): - member = self.findMember(group, self.getUserId(user)) - if not self.memberAsGoodAccessLevel(group, member.id, self.ACCESS_LEVEL[access_level]): - member.access_level = self.ACCESS_LEVEL[access_level] - member.save() - return True - else: - try: - group.members.create({ - 'user_id': self.getUserId(user), - 'access_level': self.ACCESS_LEVEL[access_level]}) - except gitlab.exceptions.GitlabCreateError as e: - self._module.fail_json(msg="Failed to assign user to group: %s" % to_native(e)) - return True - return False - - ''' - @param user User object - @param arguments User attributes - ''' - def updateUser(self, user, arguments, uncheckable_args): - changed = False - - for arg_key, arg_value in arguments.items(): - av = arg_value['value'] - - if av is not None: - if arg_key == "identities": - changed = self.addIdentities(user, av, uncheckable_args['overwrite_identities']['value']) - - elif getattr(user, arg_key) != av: - setattr(user, arg_value.get('setter', arg_key), av) - changed = True - - for arg_key, arg_value in uncheckable_args.items(): - av = arg_value['value'] - - if av is not None: - setattr(user, arg_value.get('setter', arg_key), av) - - return (changed, user) - - ''' - @param arguments User attributes - ''' - def createUser(self, arguments): - if self._module.check_mode: - return True - - identities = None - if 'identities' in arguments: - identities = arguments['identities'] - del arguments['identities'] - - try: - user = self._gitlab.users.create(arguments) - if identities: - self.addIdentities(user, identities) - - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json(msg="Failed to create user: %s " % to_native(e)) - - return user - - ''' - @param user User object - @param identites List of identities to be added/updated - @param overwrite_identities Overwrite user identities with identities passed to this module - ''' - def addIdentities(self, user, identities, overwrite_identities=False): - changed = False - if overwrite_identities: - changed = self.deleteIdentities(user, identities) - - for identity in identities: - if identity not in user.identities: - setattr(user, 'provider', identity['provider']) - setattr(user, 'extern_uid', identity['extern_uid']) - if not self._module.check_mode: - user.save() - changed = True - return changed - - ''' - @param user User object - @param identites List of identities to be added/updated - ''' - def deleteIdentities(self, user, identities): - changed = False - for identity in user.identities: - if identity not in identities: - if not self._module.check_mode: - user.identityproviders.delete(identity['provider']) - changed = True - return changed - - ''' - @param username Username of the user - ''' - def findUser(self, username): - users = self._gitlab.users.list(search=username) - for user in users: - if (user.username == username): - return user - - ''' - @param username Username of the user - ''' - def existsUser(self, username): - # When user exists, object will be stored in self.userObject. - user = self.findUser(username) - if user: - self.userObject = user - return True - return False - - ''' - @param username Username of the user - ''' - def isActive(self, username): - user = self.findUser(username) - return user.attributes['state'] == 'active' - - def deleteUser(self): - if self._module.check_mode: - return True - - user = self.userObject - - return user.delete() - - def blockUser(self): - if self._module.check_mode: - return True - - user = self.userObject - - return user.block() - - def unblockUser(self): - if self._module.check_mode: - return True - - user = self.userObject - - return user.unblock() - - -def sanitize_arguments(arguments): - for key, value in list(arguments.items()): - if value is None: - del arguments[key] - return arguments - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(dict( - api_token=dict(type='str', no_log=True), - name=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present", "blocked", "unblocked"]), - username=dict(type='str', required=True), - password=dict(type='str', no_log=True), - reset_password=dict(type='bool', default=False, no_log=False), - email=dict(type='str'), - sshkey_name=dict(type='str'), - sshkey_file=dict(type='str', no_log=False), - sshkey_expires_at=dict(type='str', no_log=False), - group=dict(type='str'), - access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]), - confirm=dict(type='bool', default=True), - isadmin=dict(type='bool', default=False), - external=dict(type='bool', default=False), - identities=dict(type='list', elements='dict'), - overwrite_identities=dict(type='bool', default=False), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_password', 'api_token'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token'] - ], - supports_check_mode=True, - required_if=( - ('state', 'present', ['name', 'email']), - ) - ) - - user_name = module.params['name'] - state = module.params['state'] - user_username = module.params['username'].lower() - user_password = module.params['password'] - user_reset_password = module.params['reset_password'] - user_email = module.params['email'] - user_sshkey_name = module.params['sshkey_name'] - user_sshkey_file = module.params['sshkey_file'] - user_sshkey_expires_at = module.params['sshkey_expires_at'] - group_path = module.params['group'] - access_level = module.params['access_level'] - confirm = module.params['confirm'] - user_isadmin = module.params['isadmin'] - user_external = module.params['external'] - user_identities = module.params['identities'] - overwrite_identities = module.params['overwrite_identities'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlabAuthentication(module) - - gitlab_user = GitLabUser(module, gitlab_instance) - user_exists = gitlab_user.existsUser(user_username) - if user_exists: - user_is_active = gitlab_user.isActive(user_username) - else: - user_is_active = False - - if state == 'absent': - if user_exists: - gitlab_user.deleteUser() - module.exit_json(changed=True, msg="Successfully deleted user %s" % user_username) - else: - module.exit_json(changed=False, msg="User deleted or does not exists") - - if state == 'blocked': - if user_exists and user_is_active: - gitlab_user.blockUser() - module.exit_json(changed=True, msg="Successfully blocked user %s" % user_username) - else: - module.exit_json(changed=False, msg="User already blocked or does not exists") - - if state == 'unblocked': - if user_exists and not user_is_active: - gitlab_user.unblockUser() - module.exit_json(changed=True, msg="Successfully unblocked user %s" % user_username) - else: - module.exit_json(changed=False, msg="User is not blocked or does not exists") - - if state == 'present': - if gitlab_user.createOrUpdateUser(user_username, { - "name": user_name, - "password": user_password, - "reset_password": user_reset_password, - "email": user_email, - "sshkey_name": user_sshkey_name, - "sshkey_file": user_sshkey_file, - "sshkey_expires_at": user_sshkey_expires_at, - "group_path": group_path, - "access_level": access_level, - "confirm": confirm, - "isadmin": user_isadmin, - "external": user_external, - "identities": user_identities, - "overwrite_identities": overwrite_identities}): - module.exit_json(changed=True, msg="Successfully created or updated the user %s" % user_username, user=gitlab_user.userObject._attrs) - else: - module.exit_json(changed=False, msg="No need to update the user %s" % user_username, user=gitlab_user.userObject._attrs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/hg.py b/plugins/modules/source_control/hg.py deleted file mode 100644 index 572b036e1f..0000000000 --- a/plugins/modules/source_control/hg.py +++ /dev/null @@ -1,295 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2013, Yeukhon Wong -# Copyright: (c) 2014, Nate Coraor -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: hg -short_description: Manages Mercurial (hg) repositories -description: - - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. -author: "Yeukhon Wong (@yeukhon)" -options: - repo: - description: - - The repository address. - required: yes - aliases: [ name ] - type: str - dest: - description: - - Absolute path of where the repository should be cloned to. - This parameter is required, unless clone and update are set to no - type: path - revision: - description: - - Equivalent C(-r) option in hg command which could be the changeset, revision number, - branch name or even tag. - aliases: [ version ] - type: str - force: - description: - - Discards uncommitted changes. Runs C(hg update -C). Prior to - 1.9, the default was `yes`. - type: bool - default: 'no' - purge: - description: - - Deletes untracked files. Runs C(hg purge). - type: bool - default: 'no' - update: - description: - - If C(no), do not retrieve new revisions from the origin repository - type: bool - default: 'yes' - clone: - description: - - If C(no), do not clone the repository if it does not exist locally. - type: bool - default: 'yes' - executable: - description: - - Path to hg executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. - type: str -notes: - - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156). - - "If the task seems to be hanging, first verify remote host is in C(known_hosts). - SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, - one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling - the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts." - - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such, - if the underlying system still uses a Python version below 2.7.9, you will have issues checking out - bitbucket repositories. See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01). -''' - -EXAMPLES = ''' -- name: Ensure the current working copy is inside the stable branch and deletes untracked files if any. - community.general.hg: - repo: https://bitbucket.org/user/repo1 - dest: /home/user/repo1 - revision: stable - purge: yes - -- name: Get information about the repository whether or not it has already been cloned locally. - community.general.hg: - repo: git://bitbucket.org/user/repo - dest: /srv/checkout - clone: no - update: no -''' - -import os - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -class Hg(object): - def __init__(self, module, dest, repo, revision, hg_path): - self.module = module - self.dest = dest - self.repo = repo - self.revision = revision - self.hg_path = hg_path - - def _command(self, args_list): - (rc, out, err) = self.module.run_command([self.hg_path] + args_list) - return (rc, out, err) - - def _list_untracked(self): - args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print'] - return self._command(args) - - def get_revision(self): - """ - hg id -b -i -t returns a string in the format: - "[+] " - This format lists the state of the current working copy, - and indicates whether there are uncommitted changes by the - plus sign. Otherwise, the sign is omitted. - - Read the full description via hg id --help - """ - (rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest]) - if rc != 0: - self.module.fail_json(msg=err) - else: - return to_native(out).strip('\n') - - def get_remote_revision(self): - (rc, out, err) = self._command(['id', self.repo]) - if rc != 0: - self.module.fail_json(msg=err) - else: - return to_native(out).strip('\n') - - def has_local_mods(self): - now = self.get_revision() - if '+' in now: - return True - else: - return False - - def discard(self): - before = self.has_local_mods() - if not before: - return False - - args = ['update', '-C', '-R', self.dest, '-r', '.'] - (rc, out, err) = self._command(args) - if rc != 0: - self.module.fail_json(msg=err) - - after = self.has_local_mods() - if before != after and not after: # no more local modification - return True - - def purge(self): - # before purge, find out if there are any untracked files - (rc1, out1, err1) = self._list_untracked() - if rc1 != 0: - self.module.fail_json(msg=err1) - - # there are some untrackd files - if out1 != '': - args = ['purge', '--config', 'extensions.purge=', '-R', self.dest] - (rc2, out2, err2) = self._command(args) - if rc2 != 0: - self.module.fail_json(msg=err2) - return True - else: - return False - - def cleanup(self, force, purge): - discarded = False - purged = False - - if force: - discarded = self.discard() - if purge: - purged = self.purge() - if discarded or purged: - return True - else: - return False - - def pull(self): - return self._command( - ['pull', '-R', self.dest, self.repo]) - - def update(self): - if self.revision is not None: - return self._command(['update', '-r', self.revision, '-R', self.dest]) - return self._command(['update', '-R', self.dest]) - - def clone(self): - if self.revision is not None: - return self._command(['clone', self.repo, self.dest, '-r', self.revision]) - return self._command(['clone', self.repo, self.dest]) - - @property - def at_revision(self): - """ - There is no point in pulling from a potentially down/slow remote site - if the desired changeset is already the current changeset. - """ - if self.revision is None or len(self.revision) < 7: - # Assume it's a rev number, tag, or branch - return False - (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest]) - if rc != 0: - self.module.fail_json(msg=err) - if out.startswith(self.revision): - return True - return False - - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec=dict( - repo=dict(type='str', required=True, aliases=['name']), - dest=dict(type='path'), - revision=dict(type='str', default=None, aliases=['version']), - force=dict(type='bool', default=False), - purge=dict(type='bool', default=False), - update=dict(type='bool', default=True), - clone=dict(type='bool', default=True), - executable=dict(type='str', default=None), - ), - ) - repo = module.params['repo'] - dest = module.params['dest'] - revision = module.params['revision'] - force = module.params['force'] - purge = module.params['purge'] - update = module.params['update'] - clone = module.params['clone'] - hg_path = module.params['executable'] or module.get_bin_path('hg', True) - if dest is not None: - hgrc = os.path.join(dest, '.hg/hgrc') - - # initial states - before = '' - changed = False - cleaned = False - - if not dest and (clone or update): - module.fail_json(msg="the destination directory must be specified unless clone=no and update=no") - - hg = Hg(module, dest, repo, revision, hg_path) - - # If there is no hgrc file, then assume repo is absent - # and perform clone. Otherwise, perform pull and update. - if not clone and not update: - out = hg.get_remote_revision() - module.exit_json(after=out, changed=False) - if not os.path.exists(hgrc): - if clone: - (rc, out, err) = hg.clone() - if rc != 0: - module.fail_json(msg=err) - else: - module.exit_json(changed=False) - elif not update: - # Just return having found a repo already in the dest path - before = hg.get_revision() - elif hg.at_revision: - # no update needed, don't pull - before = hg.get_revision() - - # but force and purge if desired - cleaned = hg.cleanup(force, purge) - else: - # get the current state before doing pulling - before = hg.get_revision() - - # can perform force and purge - cleaned = hg.cleanup(force, purge) - - (rc, out, err) = hg.pull() - if rc != 0: - module.fail_json(msg=err) - - (rc, out, err) = hg.update() - if rc != 0: - module.fail_json(msg=err) - - after = hg.get_revision() - if before != after or cleaned: - changed = True - - module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/spectrum_device.py b/plugins/modules/spectrum_device.py deleted file mode 120000 index 8b1b0ecd67..0000000000 --- a/plugins/modules/spectrum_device.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/spectrum_device.py \ No newline at end of file diff --git a/plugins/modules/spectrum_device.py b/plugins/modules/spectrum_device.py new file mode 100644 index 0000000000..bbc6fe0ba4 --- /dev/null +++ b/plugins/modules/spectrum_device.py @@ -0,0 +1,338 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Renato Orgito +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: spectrum_device +short_description: Creates/deletes devices in CA Spectrum +description: + - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html). + - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1. +author: "Renato Orgito (@orgito)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + device: + type: str + aliases: [host, name] + required: true + description: + - IP address of the device. + - If a hostname is given, it is resolved to the IP address. + community: + type: str + description: + - SNMP community used for device discovery. + - Required when O(state=present). + required: true + landscape: + type: str + required: true + description: + - Landscape handle of the SpectroServer to which add or remove the device. + state: + type: str + description: + - On V(present) creates the device when it does not exist. + - On V(absent) removes the device when it exists. + choices: ['present', 'absent'] + default: 'present' + url: + type: str + aliases: [oneclick_url] + required: true + description: + - HTTP, HTTPS URL of the Oneclick server in the form V((http|https\)://host.domain[:port]). + url_username: + type: str + aliases: [oneclick_user] + required: true + description: + - Oneclick user name. + url_password: + type: str + aliases: [oneclick_password] + required: true + description: + - Oneclick user password. + use_proxy: + description: + - If V(false), it does not use a proxy, even if one is defined in an environment variable on the target hosts. + default: true + type: bool + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + default: true + type: bool + agentport: + type: int + required: false + description: + - UDP port used for SNMP discovery. + default: 161 +notes: + - The devices are created inside the I(Universe) container of the specified landscape. + - All the operations are performed only on the specified landscape. +""" + +EXAMPLES = r""" +- name: Add device to CA Spectrum + local_action: + module: spectrum_device + device: '{{ ansible_host }}' + community: secret + landscape: '0x100000' + oneclick_url: http://oneclick.example.com:8080 + oneclick_user: username + oneclick_password: password + state: present + + +- name: Remove device from CA Spectrum + local_action: + module: spectrum_device + device: '{{ ansible_host }}' + landscape: '{{ landscape_handle }}' + oneclick_url: http://oneclick.example.com:8080 + oneclick_user: username + oneclick_password: password + use_proxy: false + state: absent +""" + +RETURN = r""" +device: + description: Device data when O(state=present). + returned: success + type: dict + sample: + { + "model_handle": "0x1007ab", + "landscape": "0x100000", + "address": "10.10.5.1" + } +""" + +from socket import gethostbyname, gaierror +import xml.etree.ElementTree as ET + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def request(resource, xml=None, method=None): + headers = { + "Content-Type": "application/xml", + "Accept": "application/xml" + } + + url = module.params['oneclick_url'] + '/spectrum/restful/' + resource + + response, info = fetch_url(module, url, data=xml, method=method, headers=headers, timeout=45) + + if info['status'] == 401: + module.fail_json(msg="failed to authenticate to Oneclick server") + + if info['status'] not in (200, 201, 204): + module.fail_json(msg=info['msg']) + + return response.read() + + +def post(resource, xml=None): + return request(resource, xml=xml, method='POST') + + +def delete(resource): + return request(resource, xml=None, method='DELETE') + + +def get_ip(): + try: + device_ip = gethostbyname(module.params.get('device')) + except gaierror: + module.fail_json(msg="failed to resolve device ip address for '%s'" % module.params.get('device')) + + return device_ip + + +def get_device(device_ip): + """Query OneClick for the device using the IP Address""" + resource = '/models' + landscape_min = "0x%x" % int(module.params.get('landscape'), 16) + landscape_max = "0x%x" % (int(module.params.get('landscape'), 16) + 0x100000) + + xml = """ + + + + + + + + + SearchManager + + + + {mh_min} + + + + + {mh_max} + + + + + FIND_DEV_MODELS_BY_IP + + {search_ip} + + + + + + + + """.format(search_ip=device_ip, mh_min=landscape_min, mh_max=landscape_max) + + result = post(resource, xml=xml) + + root = ET.fromstring(result) + + if root.get('total-models') == '0': + return None + + namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') + + # get the first device + model = root.find('ca:model-responses', namespace).find('ca:model', namespace) + + if model.get('error'): + module.fail_json(msg="error checking device: %s" % model.get('error')) + + # get the attributes + model_handle = model.get('mh') + + model_address = model.find('./*[@id="0x12d7f"]').text + + # derive the landscape handler from the model handler of the device + model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000) + + device = dict( + model_handle=model_handle, + address=model_address, + landscape=model_landscape) + + return device + + +def add_device(): + device_ip = get_ip() + device = get_device(device_ip) + + if device: + module.exit_json(changed=False, device=device) + + if module.check_mode: + device = dict( + model_handle=None, + address=device_ip, + landscape="0x%x" % int(module.params.get('landscape'), 16)) + module.exit_json(changed=True, device=device) + + resource = 'model?ipaddress=' + device_ip + '&commstring=' + module.params.get('community') + resource += '&landscapeid=' + module.params.get('landscape') + + if module.params.get('agentport', None): + resource += '&agentport=' + str(module.params.get('agentport', 161)) + + result = post(resource) + root = ET.fromstring(result) + + if root.get('error') != 'Success': + module.fail_json(msg=root.get('error-message')) + + namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') + model = root.find('ca:model', namespace) + + model_handle = model.get('mh') + model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000) + + device = dict( + model_handle=model_handle, + address=device_ip, + landscape=model_landscape, + ) + + module.exit_json(changed=True, device=device) + + +def remove_device(): + device_ip = get_ip() + device = get_device(device_ip) + + if device is None: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + resource = '/model/' + device['model_handle'] + result = delete(resource) + + root = ET.fromstring(result) + + namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') + error = root.find('ca:error', namespace).text + + if error != 'Success': + error_message = root.find('ca:error-message', namespace).text + module.fail_json(msg="%s %s" % (error, error_message)) + + module.exit_json(changed=True) + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + device=dict(required=True, aliases=['host', 'name']), + landscape=dict(required=True), + state=dict(choices=['present', 'absent'], default='present'), + community=dict(required=True, no_log=True), # @TODO remove the 'required', given the required_if ? + agentport=dict(type='int', default=161), + url=dict(required=True, aliases=['oneclick_url']), + url_username=dict(required=True, aliases=['oneclick_user']), + url_password=dict(required=True, no_log=True, aliases=['oneclick_password']), + use_proxy=dict(type='bool', default=True), + validate_certs=dict(type='bool', default=True), + ), + required_if=[('state', 'present', ['community'])], + supports_check_mode=True + ) + + if module.params.get('state') == 'present': + add_device() + else: + remove_device() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/spectrum_model_attrs.py b/plugins/modules/spectrum_model_attrs.py deleted file mode 120000 index 31d8c33060..0000000000 --- a/plugins/modules/spectrum_model_attrs.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/spectrum_model_attrs.py \ No newline at end of file diff --git a/plugins/modules/spectrum_model_attrs.py b/plugins/modules/spectrum_model_attrs.py new file mode 100644 index 0000000000..acd07042c2 --- /dev/null +++ b/plugins/modules/spectrum_model_attrs.py @@ -0,0 +1,529 @@ +#!/usr/bin/python +# +# Copyright (c) 2021, Tyler Gates +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: spectrum_model_attrs +short_description: Enforce a model's attributes in CA Spectrum +description: + - This module can be used to enforce a model's attributes in CA Spectrum. +version_added: 2.5.0 +author: + - Tyler Gates (@tgates81) +notes: + - Tested on CA Spectrum version 10.4.2.0.189. + - Model creation and deletion are not possible with this module. For that use M(community.general.spectrum_device) instead. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + url: + description: + - URL of OneClick server. + type: str + required: true + url_username: + description: + - OneClick username. + type: str + required: true + aliases: [username] + url_password: + description: + - OneClick password. + type: str + required: true + aliases: [password] + use_proxy: + description: + - If V(false), it does not use a proxy, even if one is defined in an environment variable on the target hosts. + default: true + required: false + type: bool + name: + description: + - Model name. + type: str + required: true + type: + description: + - Model type. + type: str + required: true + validate_certs: + description: + - Validate SSL certificates. Only change this to V(false) if you can guarantee that you are talking to the correct endpoint + and there is no man-in-the-middle attack happening. + type: bool + default: true + required: false + attributes: + description: + - A list of attribute names and values to enforce. + - All values and parameters are case sensitive and must be provided as strings only. + required: true + type: list + elements: dict + suboptions: + name: + description: + - Attribute name OR hex ID. + - 'Currently defined names are:' + - C(App_Manufacturer) (C(0x230683)); + - C(CollectionsModelNameString) (C(0x12adb)); + - C(Condition) (C(0x1000a)); + - C(Criticality) (C(0x1290c)); + - C(DeviceType) (C(0x23000e)); + - C(isManaged) (C(0x1295d)); + - C(Model_Class) (C(0x11ee8)); + - C(Model_Handle) (C(0x129fa)); + - C(Model_Name) (C(0x1006e)); + - C(Modeltype_Handle) (C(0x10001)); + - C(Modeltype_Name) (C(0x10000)); + - C(Network_Address) (C(0x12d7f)); + - C(Notes) (C(0x11564)); + - C(ServiceDesk_Asset_ID) (C(0x12db9)); + - C(TopologyModelNameString) (C(0x129e7)); + - C(sysDescr) (C(0x10052)); + - C(sysName) (C(0x10b5b)); + - C(Vendor_Name) (C(0x11570)); + - C(Description) (C(0x230017)). + - Hex IDs are the direct identifiers in Spectrum and always work. + - 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> -> Attributes tab.' + type: str + required: true + value: + description: + - Attribute value. Empty strings should be V("") or V(null). + type: str + required: true +""" + +EXAMPLES = r""" +- name: Enforce maintenance mode for modelxyz01 with a note about why + community.general.spectrum_model_attrs: + url: "http://oneclick.url.com" + username: "{{ oneclick_username }}" + password: "{{ oneclick_password }}" + name: "modelxyz01" + type: "Host_Device" + validate_certs: true + attributes: + - name: "isManaged" + value: "false" + - name: "Notes" + value: >- + MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }} + by {{ tower_user_name | default(ansible_user_id) }} + delegate_to: localhost + register: spectrum_model_attrs_status +""" + +RETURN = r""" +msg: + description: Informational message on the job result. + type: str + returned: always + sample: 'Success' +changed_attrs: + description: Dictionary of changed name or hex IDs (whichever was specified) to their new corresponding values. + type: dict + returned: always + sample: {"Notes": "MM set on 2021-02-03T22:04:02Z via CO CO9999 by tgates", "isManaged": "true"} +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.urls import fetch_url +from urllib.parse import quote +import json +import re +import xml.etree.ElementTree as ET + + +class spectrum_model_attrs: + def __init__(self, module): + self.module = module + self.url = module.params['url'] + # If the user did not define a full path to the restul space in url: + # params, add what we believe it to be. + if not re.search('\\/.+', self.url.split('://')[1]): + self.url = "%s/spectrum/restful" % self.url.rstrip('/') + # Align these with what is defined in OneClick's UI under: + # Locator -> Devices -> By Model Name -> -> + # Attributes tab. + self.attr_map = dict(App_Manufacturer=hex(0x230683), + CollectionsModelNameString=hex(0x12adb), + Condition=hex(0x1000a), + Criticality=hex(0x1290c), + DeviceType=hex(0x23000e), + isManaged=hex(0x1295d), + Model_Class=hex(0x11ee8), + Model_Handle=hex(0x129fa), + Model_Name=hex(0x1006e), + Modeltype_Handle=hex(0x10001), + Modeltype_Name=hex(0x10000), + Network_Address=hex(0x12d7f), + Notes=hex(0x11564), + ServiceDesk_Asset_ID=hex(0x12db9), + TopologyModelNameString=hex(0x129e7), + sysDescr=hex(0x10052), + sysName=hex(0x10b5b), + Vendor_Name=hex(0x11570), + Description=hex(0x230017)) + self.search_qualifiers = [ + "and", "or", "not", "greater-than", "greater-than-or-equals", + "less-than", "less-than-or-equals", "equals", "equals-ignore-case", + "does-not-equal", "does-not-equal-ignore-case", "has-prefix", + "does-not-have-prefix", "has-prefix-ignore-case", + "does-not-have-prefix-ignore-case", "has-substring", + "does-not-have-substring", "has-substring-ignore-case", + "does-not-have-substring-ignore-case", "has-suffix", + "does-not-have-suffix", "has-suffix-ignore-case", + "does-not-have-suffix-ignore-case", "has-pcre", + "has-pcre-ignore-case", "has-wildcard", "has-wildcard-ignore-case", + "is-derived-from", "not-is-derived-from"] + + self.resp_namespace = dict(ca="http://www.ca.com/spectrum/restful/schema/response") + + self.result = dict(msg="", changed_attrs=dict()) + self.success_msg = "Success" + + def build_url(self, path): + """ + Build a sane Spectrum restful API URL + :param path: The path to append to the restful base + :type path: str + :returns: Complete restful API URL + :rtype: str + """ + + return "%s/%s" % (self.url.rstrip('/'), path.lstrip('/')) + + def attr_id(self, name): + """ + Get attribute hex ID + :param name: The name of the attribute to retrieve the hex ID for + :type name: str + :returns: Translated hex ID of name, or None if no translation found + :rtype: str or None + """ + + try: + return self.attr_map[name] + except KeyError: + return None + + def attr_name(self, _id): + """ + Get attribute name from hex ID + :param _id: The hex ID to lookup a name for + :type _id: str + :returns: Translated name of hex ID, or None if no translation found + :rtype: str or None + """ + + for name, m_id in list(self.attr_map.items()): + if _id == m_id: + return name + return None + + def urlencode(self, string): + """ + URL Encode a string + :param: string: The string to URL encode + :type string: str + :returns: URL encode version of supplied string + :rtype: str + """ + + return quote(string, "<>%-_.!*'():?#/@&+,;=") + + def update_model(self, model_handle, attrs): + """ + Update a model's attributes + :param model_handle: The model's handle ID + :type model_handle: str + :param attrs: Model's attributes to update. {'': ''} + :type attrs: dict + :returns: Nothing; exits on error or updates self.results + :rtype: None + """ + + # Build the update URL + update_url = self.build_url("/model/%s?" % model_handle) + for name, val in list(attrs.items()): + if val is None: + # None values should be converted to empty strings + val = "" + val = self.urlencode(str(val)) + if not update_url.endswith('?'): + update_url += "&" + + update_url += "attr=%s&val=%s" % (self.attr_id(name) or name, val) + + # POST to /model to update the attributes, or fail. + resp, info = fetch_url(self.module, update_url, method="PUT", + headers={"Content-Type": "application/json", + "Accept": "application/json"}, + use_proxy=self.module.params['use_proxy']) + status_code = info["status"] + if status_code >= 400: + body = info['body'] + else: + body = "" if resp is None else resp.read() + if status_code != 200: + self.result['msg'] = "HTTP PUT error %s: %s: %s" % (status_code, update_url, body) + self.module.fail_json(**self.result) + + # Load and parse the JSON response and either fail or set results. + json_resp = json.loads(body) + """ + Example success response: + {'model-update-response-list':{'model-responses':{'model':{'@error':'Success','@mh':'0x1010e76','attribute':{'@error':'Success','@id':'0x1295d'}}}}}" + Example failure response: + {'model-update-response-list': {'model-responses': {'model': {'@error': 'PartialFailure', '@mh': '0x1010e76', 'attribute': {'@error-message': 'brn0vlappua001: You do not have permission to set attribute Network_Address for this model.', '@error': 'Error', '@id': '0x12d7f'}}}}} + """ # noqa + model_resp = json_resp['model-update-response-list']['model-responses']['model'] + if model_resp['@error'] != "Success": + # I'm not 100% confident on the expected failure structure so just + # dump all of ['attribute']. + self.result['msg'] = str(model_resp['attribute']) + self.module.fail_json(**self.result) + + # Should be OK if we get to here, set results. + self.result['msg'] = self.success_msg + self.result['changed_attrs'].update(attrs) + self.result['changed'] = True + + def find_model(self, search_criteria, ret_attrs=None): + """ + Search for a model in /models + :param search_criteria: The XML + :type search_criteria: str + :param ret_attrs: List of attributes by name or ID to return back + (default is Model_Handle) + :type ret_attrs: list + returns: Dictionary mapping of ret_attrs to values: {ret_attr: ret_val} + rtype: dict + """ + + # If no return attributes were asked for, return Model_Handle. + if ret_attrs is None: + ret_attrs = ['Model_Handle'] + + # Set the XML > tags. If no hex ID + # is found for the name, assume it is already in hex. {name: hex ID} + rqstd_attrs = "" + for ra in ret_attrs: + _id = self.attr_id(ra) or ra + rqstd_attrs += '' % (self.attr_id(ra) or ra) + + # Build the complete XML search query for HTTP POST. + xml = """ + + + + + {0} + + + + {1} + +""".format(search_criteria, rqstd_attrs) + + # POST to /models and fail on errors. + url = self.build_url("/models") + resp, info = fetch_url(self.module, url, data=xml, method="POST", + use_proxy=self.module.params['use_proxy'], + headers={"Content-Type": "application/xml", + "Accept": "application/xml"}) + status_code = info["status"] + if status_code >= 400: + body = info['body'] + else: + body = "" if resp is None else resp.read() + if status_code != 200: + self.result['msg'] = "HTTP POST error %s: %s: %s" % (status_code, url, body) + self.module.fail_json(**self.result) + + # Parse through the XML response and fail on any detected errors. + root = ET.fromstring(body) + total_models = int(root.attrib['total-models']) + error = root.attrib['error'] + model_responses = root.find('ca:model-responses', self.resp_namespace) + if total_models < 1: + self.result['msg'] = "No models found matching search criteria `%s'" % search_criteria + self.module.fail_json(**self.result) + elif total_models > 1: + self.result['msg'] = "More than one model found (%s): `%s'" % (total_models, ET.tostring(model_responses, + encoding='unicode')) + self.module.fail_json(**self.result) + if error != "EndOfResults": + self.result['msg'] = "Unexpected search response `%s': %s" % (error, ET.tostring(model_responses, + encoding='unicode')) + self.module.fail_json(**self.result) + model = model_responses.find('ca:model', self.resp_namespace) + attrs = model.findall('ca:attribute', self.resp_namespace) + if not attrs: + self.result['msg'] = "No attributes returned." + self.module.fail_json(**self.result) + + # XML response should be successful. Iterate and set each returned + # attribute ID/name and value for return. + ret = dict() + for attr in attrs: + attr_id = attr.get('id') + attr_name = self.attr_name(attr_id) + # Note: all values except empty strings (None) are strings only! + attr_val = attr.text + key = attr_name if attr_name in ret_attrs else attr_id + ret[key] = attr_val + ret_attrs.remove(key) + return ret + + def find_model_by_name_type(self, mname, mtype, ret_attrs=None): + """ + Find a model by name and type + :param mname: Model name + :type mname: str + :param mtype: Model type + :type mtype: str + :param ret_attrs: List of attributes by name or ID to return back + (default is Model_Handle) + :type ret_attrs: list + returns: find_model(): Dictionary mapping of ret_attrs to values: + {ret_attr: ret_val} + rtype: dict + """ + + # If no return attributes were asked for, return Model_Handle. + if ret_attrs is None: + ret_attrs = ['Model_Handle'] + + """This is basically as follows: + + + + + ... + + + + + + + + """ + + # Parent filter tag + filtered_models = ET.Element('filtered-models') + # Logically and + _and = ET.SubElement(filtered_models, 'and') + + # Model Name + MN_equals = ET.SubElement(_and, 'equals') + Model_Name = ET.SubElement(MN_equals, 'attribute', + {'id': self.attr_map['Model_Name']}) + MN_value = ET.SubElement(Model_Name, 'value') + MN_value.text = mname + + # Model Type Name + MTN_equals = ET.SubElement(_and, 'equals') + Modeltype_Name = ET.SubElement(MTN_equals, 'attribute', + {'id': self.attr_map['Modeltype_Name']}) + MTN_value = ET.SubElement(Modeltype_Name, 'value') + MTN_value.text = mtype + + return self.find_model(ET.tostring(filtered_models, + encoding='unicode'), + ret_attrs) + + def ensure_model_attrs(self): + + # Get a list of all requested attribute names/IDs plus Model_Handle and + # use them to query the values currently set. Store finding in a + # dictionary. + req_attrs = [] + for attr in self.module.params['attributes']: + req_attrs.append(attr['name']) + if 'Model_Handle' not in req_attrs: + req_attrs.append('Model_Handle') + + # Survey attributes currently set and store in a dict. + cur_attrs = self.find_model_by_name_type(self.module.params['name'], + self.module.params['type'], + req_attrs) + + # Iterate through the requested attributes names/IDs values pair and + # compare with those currently set. If different, attempt to change. + Model_Handle = cur_attrs.pop("Model_Handle") + for attr in self.module.params['attributes']: + req_name = attr['name'] + req_val = attr['value'] + if req_val == "": + # The API will return None on empty string + req_val = None + if cur_attrs[req_name] != req_val: + if self.module.check_mode: + self.result['changed_attrs'][req_name] = req_val + self.result['msg'] = self.success_msg + self.result['changed'] = True + continue + resp = self.update_model(Model_Handle, {req_name: req_val}) + + self.module.exit_json(**self.result) + + +def run_module(): + argument_spec = dict( + url=dict(type='str', required=True), + url_username=dict(type='str', required=True, aliases=['username']), + url_password=dict(type='str', required=True, aliases=['password'], + no_log=True), + validate_certs=dict(type='bool', default=True), + use_proxy=dict(type='bool', default=True), + name=dict(type='str', required=True), + type=dict(type='str', required=True), + attributes=dict(type='list', + required=True, + elements='dict', + options=dict( + name=dict(type='str', required=True), + value=dict(type='str', required=True) + )), + ) + module = AnsibleModule( + supports_check_mode=True, + argument_spec=argument_spec, + ) + + try: + sm = spectrum_model_attrs(module) + sm.ensure_model_attrs() + except Exception as e: + module.fail_json(msg="Failed to ensure attribute(s) on `%s' with " + "exception: %s" % (module.params['name'], + to_native(e))) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/spotinst_aws_elastigroup.py b/plugins/modules/spotinst_aws_elastigroup.py deleted file mode 120000 index 3322ea03c1..0000000000 --- a/plugins/modules/spotinst_aws_elastigroup.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/spotinst/spotinst_aws_elastigroup.py \ No newline at end of file diff --git a/plugins/modules/spotinst_aws_elastigroup.py b/plugins/modules/spotinst_aws_elastigroup.py new file mode 100644 index 0000000000..237ffddcdd --- /dev/null +++ b/plugins/modules/spotinst_aws_elastigroup.py @@ -0,0 +1,1483 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: spotinst_aws_elastigroup +short_description: Create, update or delete Spotinst AWS Elastigroups +author: Spotinst (@talzur) +description: + - Can create, update, or delete Spotinst AWS Elastigroups Launch configuration is part of the elastigroup configuration, + so no additional modules are necessary for handling the launch configuration. You must have a credentials file in this + location - C($HOME/.spotinst/credentials). The credentials file must contain a row that looks like this C(token = ). + - Full documentation available at U(https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-). +requirements: + - spotinst_sdk >= 1.0.38 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + + credentials_path: + description: + - Optional parameter that allows to set a non-default credentials path. + default: ~/.spotinst/credentials + type: path + + account_id: + description: + - Optional parameter that allows to set an account-id inside the module configuration. + - By default this is retrieved from the credentials path. + type: str + + token: + description: + - A Personal API Access Token issued by Spotinst. + - When not specified, the module tries to obtain it, in that order, from environment variable E(SPOTINST_TOKEN), or + from the credentials path. + type: str + + availability_vs_cost: + description: + - The strategy orientation. + - 'The choices available are: V(availabilityOriented), V(costOriented), V(balanced).' + required: true + type: str + + availability_zones: + description: + - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; '[{"key":"value", "key":"value"}]'; + keys allowed are name (String), subnet_id (String), placement_group_name (String),. + required: true + type: list + elements: dict + + block_device_mappings: + description: + - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; You can specify virtual devices and + EBS volumes.; '[{"key":"value", "key":"value"}]'; keys allowed are device_name (List of Strings), virtual_name (String), + no_device (String), ebs (Object, expects the following keys- delete_on_termination(Boolean), encrypted(Boolean), iops + (Integer), snapshot_id(Integer), volume_type(String), volume_size(Integer)). + type: list + elements: dict + + chef: + description: + - The Chef integration configuration.; Expects the following keys - chef_server (String), organization (String), user + (String), pem_key (String), chef_version (String). + type: dict + + draining_timeout: + description: + - Time for instance to be drained from incoming requests and deregistered from ELB before termination. + type: int + + ebs_optimized: + description: + - Enable EBS optimization for supported instances which are not enabled by default. Note - additional charges are applied. + type: bool + + ebs_volume_pool: + description: + - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; '[{"key":"value", "key":"value"}]'; + keys allowed are - volume_ids (List of Strings), device_name (String). + type: list + elements: dict + + ecs: + description: + - The ECS integration configuration.; Expects the following key - cluster_name (String). + type: dict + + elastic_ips: + description: + - List of ElasticIps Allocation IDs (example V(eipalloc-9d4e16f8)) to associate to the group instances. + type: list + elements: str + + fallback_to_od: + description: + - In case of no spots available, Elastigroup launches an On-demand instance instead. + type: bool + + health_check_grace_period: + description: + - The amount of time, in seconds, after the instance has launched to start and check its health. + - If not specified, it defaults to V(300). + type: int + + health_check_unhealthy_duration_before_replacement: + description: + - Minimal mount of time instance should be unhealthy for us to consider it unhealthy. + type: int + + health_check_type: + description: + - The service to use for the health check. + - 'The choices available are: V(ELB), V(HCS), V(TARGET_GROUP), V(MLB), V(EC2).' + type: str + + iam_role_name: + description: + - The instance profile iamRole name. + - Only use O(iam_role_arn) or O(iam_role_name). + type: str + + iam_role_arn: + description: + - The instance profile iamRole arn. + - Only use O(iam_role_arn) or O(iam_role_name). + type: str + + id: + description: + - The group ID if it already exists and you want to update, or delete it. This does not work unless the O(uniqueness_by) + field is set to ID. When this is set, and the O(uniqueness_by) field is set, the group is either updated or deleted, + but not created. + type: str + + image_id: + description: + - The image ID used to launch the instance.; In case of conflict between Instance type and image type, an error is be + returned. + required: true + type: str + + key_pair: + description: + - Specify a Key Pair to attach to the instances. + type: str + + kubernetes: + description: + - The Kubernetes integration configuration. Expects the following keys - api_server (String), token (String). + type: dict + + lifetime_period: + description: + - Lifetime period. + type: int + + load_balancers: + description: + - List of classic ELB names. + type: list + elements: str + + max_size: + description: + - The upper limit number of instances that you can scale up to. + required: true + type: int + + mesosphere: + description: + - The Mesosphere integration configuration. Expects the following key - api_server (String). + type: dict + + min_size: + description: + - The lower limit number of instances that you can scale down to. + required: true + type: int + + monitoring: + description: + - Describes whether instance Enhanced Monitoring is enabled. + type: str + + name: + description: + - Unique name for elastigroup to be created, updated or deleted. + required: true + type: str + + network_interfaces: + description: + - A list of hash/dictionaries of network interfaces to add to the elastigroup; '[{"key":"value", "key":"value"}]'; keys + allowed are - description (String), device_index (Integer), secondary_private_ip_address_count (Integer), associate_public_ip_address + (Boolean), delete_on_termination (Boolean), groups (List of Strings), network_interface_id (String), private_ip_address + (String), subnet_id (String), associate_ipv6_address (Boolean), private_ip_addresses (List of Objects, Keys are privateIpAddress + (String, required) and primary (Boolean)). + type: list + elements: dict + + on_demand_count: + description: + - Required if risk is not set. + - Number of on demand instances to launch. All other instances are spot instances.; Either set this parameter or the + O(risk) parameter. + type: int + + on_demand_instance_type: + description: + - On-demand instance type that is provisioned. + type: str + + opsworks: + description: + - The elastigroup OpsWorks integration configuration.; Expects the following key - layer_id (String). + type: dict + + persistence: + description: + - The Stateful elastigroup configuration.; Accepts the following keys - should_persist_root_device (Boolean), should_persist_block_devices + (Boolean), should_persist_private_ip (Boolean). + type: dict + + product: + description: + - Operation system type. + - 'Available choices are: V(Linux/UNIX), V(SUSE Linux), V(Windows), V(Linux/UNIX (Amazon VPC)), V(SUSE Linux (Amazon + VPC)).' + required: true + type: str + + rancher: + description: + - The Rancher integration configuration.; Expects the following keys - version (String), access_key (String), secret_key + (String), master_host (String). + type: dict + + right_scale: + description: + - The Rightscale integration configuration.; Expects the following keys - account_id (String), refresh_token (String). + type: dict + + risk: + description: + - Required if on demand is not set. The percentage of Spot instances to launch (0 - 100). + type: int + + roll_config: + description: + - Roll configuration. + - If you would like the group to roll after updating, please use this feature. + - Accepts the following keys - batch_size_percentage(Integer, Required), grace_period - (Integer, Required), health_check_type(String, + Optional). + type: dict + + scheduled_tasks: + description: + - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup, as in V([{"key":"value", "key":"value"}]). + - 'Keys allowed are: adjustment (Integer), scale_target_capacity (Integer), scale_min_capacity (Integer), scale_max_capacity + (Integer), adjustment_percentage (Integer), batch_size_percentage (Integer), cron_expression (String), frequency (String), + grace_period (Integer), task_type (String, required), is_enabled (Boolean).' + type: list + elements: dict + + security_group_ids: + description: + - One or more security group IDs. + - In case of update it overrides the existing Security Group with the new given array. + required: true + type: list + elements: str + + shutdown_script: + description: + - The Base64-encoded shutdown script that executes prior to instance termination. Encode before setting. + type: str + + signals: + description: + - A list of hash/dictionaries of signals to configure in the elastigroup; keys allowed are - name (String, required), + timeout (Integer). + type: list + elements: dict + + spin_up_time: + description: + - Spin up time, in seconds, for the instance. + type: int + + spot_instance_types: + description: + - Spot instance type that is provisioned. + required: true + type: list + elements: str + + state: + choices: + - present + - absent + description: + - Create or delete the elastigroup. + default: present + type: str + + tags: + description: + - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value). + type: list + elements: dict + + target: + description: + - The number of instances to launch. + required: true + type: int + + target_group_arns: + description: + - List of target group arns instances should be registered to. + type: list + elements: str + + tenancy: + description: + - Dedicated or shared tenancy. + - 'The available choices are: V(default), V(dedicated).' + type: str + + terminate_at_end_of_billing_hour: + description: + - Terminate at the end of billing hour. + type: bool + + unit: + description: + - The capacity unit to launch instances by. + - 'The available choices are: V(instance), V(weight).' + type: str + + up_scaling_policies: + description: + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; + keys allowed are - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions + (List of Objects, Keys allowed are name (String, required) and value (String)), statistic (String, required) evaluation_periods + (String, required), period (String, required), threshold (String, required), cooldown (String, required), unit (String, + required), operator (String, required), action_type (String, required), adjustment (String), min_target_capacity (String), + target (String), maximum (String), minimum (String). + type: list + elements: dict + + down_scaling_policies: + description: + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; + keys allowed are - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions + ((List of Objects), Keys allowed are name (String, required) and value (String)), statistic (String, required), evaluation_periods + (String, required), period (String, required), threshold (String, required), cooldown (String, required), unit (String, + required), operator (String, required), action_type (String, required), adjustment (String), max_target_capacity (String), + target (String), maximum (String), minimum (String). + type: list + elements: dict + + target_tracking_policies: + description: + - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; + keys allowed are - policy_name (String, required), namespace (String, required), source (String, required), metric_name + (String, required), statistic (String, required), unit (String, required), cooldown (String, required), target (String, + required). + type: list + elements: dict + + uniqueness_by: + choices: + - id + - name + description: + - If your group names are not unique, you may use this feature to update or delete a specific group. Whenever this property + is set, you must set a group_id in order to update or delete a group, otherwise a group is created. + default: name + type: str + + user_data: + description: + - Base64-encoded MIME user data. Encode before setting the value. + type: str + + utilize_reserved_instances: + description: + - In case of any available Reserved Instances, Elastigroup utilizes your reservations before purchasing Spot instances. + type: bool + + wait_for_instances: + description: + - Whether or not the elastigroup creation / update actions should wait for the instances to spin. + type: bool + default: false + + wait_timeout: + description: + - How long the module should wait for instances before failing the action. + - Only works if O(wait_for_instances=true). + type: int + + do_not_update: + description: + - TODO document. + type: list + elements: str + default: [] + + multai_token: + description: + - Token used for Multai configuration. + type: str + + multai_load_balancers: + description: + - Configuration parameters for Multai load balancers. + type: list + elements: dict + + elastic_beanstalk: + description: + - Placeholder parameter for future implementation of Elastic Beanstalk configurations. + type: dict +""" +EXAMPLES = r""" +# Basic configuration YAML example + +- hosts: localhost + tasks: + - name: Create elastigroup + community.general.spotinst_aws_elastigroup: + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target + register: result + - ansible.builtin.debug: var=result + +# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips + +- hosts: localhost + tasks: + - name: Create elastigroup + community.general.spotinst_aws_elastigroup: + state: present + account_id: act-1a9dd2b + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + tags: + - Environment: someEnvValue + - OtherTagKey: otherValue + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 5 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group-tal + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-8f4b8fe9 + block_device_mappings: + - device_name: '/dev/sda1' + ebs: + volume_size: 100 + volume_type: gp2 + spot_instance_types: + - c3.large + do_not_update: + - image_id + wait_for_instances: true + wait_timeout: 600 + register: result + + - name: Store private ips to file + ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips + with_items: "{{ result.instances }}" + - ansible.builtin.debug: var=result + +# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id +# In organizations with more than one account, it is required to specify an account_id + +- hosts: localhost + tasks: + - name: Create elastigroup + community.general.spotinst_aws_elastigroup: + state: present + account_id: act-1a9dd2b + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + tags: + - Environment: someEnvValue + - OtherTagKey: otherValue + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 5 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group-tal + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-8f4b8fe9 + block_device_mappings: + - device_name: '/dev/xvda' + ebs: + volume_size: 60 + volume_type: gp2 + - device_name: '/dev/xvdb' + ebs: + volume_size: 120 + volume_type: gp2 + spot_instance_types: + - c3.large + do_not_update: + - image_id + wait_for_instances: true + wait_timeout: 600 + register: result + + - name: Store private ips to file + ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips + with_items: "{{ result.instances }}" + - ansible.builtin.debug: var=result + +# In this example we have set up block device mapping with ephemeral devices + +- hosts: localhost + tasks: + - name: Create elastigroup + community.general.spotinst_aws_elastigroup: + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + block_device_mappings: + - device_name: '/dev/xvda' + virtual_name: ephemeral0 + - device_name: '/dev/xvdb/' + virtual_name: ephemeral1 + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target + register: result + - ansible.builtin.debug: var=result + +# In this example we create a basic group configuration with a network interface defined. +# Each network interface must have a device index + +- hosts: localhost + tasks: + - name: Create elastigroup + community.general.spotinst_aws_elastigroup: + state: present + risk: 100 + availability_vs_cost: balanced + network_interfaces: + - associate_public_ip_address: true + device_index: 0 + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target + register: result + - ansible.builtin.debug: var=result + + +# In this example we create a basic group configuration with a target tracking scaling policy defined + +- hosts: localhost + tasks: + - name: Create elastigroup + community.general.spotinst_aws_elastigroup: + account_id: act-92d45673 + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-79da021e + image_id: ami-f173cc91 + fallback_to_od: true + tags: + - Creator: ValueOfCreatorTag + - Environment: ValueOfEnvironmentTag + key_pair: spotinst-labs-oregon + max_size: 10 + min_size: 0 + target: 2 + unit: instance + monitoring: true + name: ansible-group-1 + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-46cdc13d + spot_instance_types: + - c3.large + target_tracking_policies: + - policy_name: target-tracking-1 + namespace: AWS/EC2 + metric_name: CPUUtilization + statistic: average + unit: percent + target: 50 + cooldown: 120 + do_not_update: + - image_id + register: result + - ansible.builtin.debug: var=result +""" + +RETURN = r""" +instances: + description: List of active elastigroup instances and their details. + returned: success + type: dict + sample: + - "spotInstanceRequestId": "sir-regs25zp" + "instanceId": "i-09640ad8678234c" + "instanceType": "m4.large" + "product": "Linux/UNIX" + "availabilityZone": "us-west-2b" + "privateIp": "180.0.2.244" + "createdAt": "2017-07-17T12:46:18.000Z" + "status": "fulfilled" +group_id: + description: Created / Updated group's ID. + returned: success + type: str + sample: "sig-12345" +""" + +HAS_SPOTINST_SDK = False + +import os +import time +from ansible.module_utils.basic import AnsibleModule + +try: + import spotinst_sdk as spotinst + from spotinst_sdk import SpotinstClientException + + HAS_SPOTINST_SDK = True + +except ImportError: + pass + +eni_fields = ('description', + 'device_index', + 'secondary_private_ip_address_count', + 'associate_public_ip_address', + 'delete_on_termination', + 'groups', + 'network_interface_id', + 'private_ip_address', + 'subnet_id', + 'associate_ipv6_address') + +private_ip_fields = ('private_ip_address', + 'primary') + +capacity_fields = (dict(ansible_field_name='min_size', + spotinst_field_name='minimum'), + dict(ansible_field_name='max_size', + spotinst_field_name='maximum'), + 'target', + 'unit') + +lspec_fields = ('user_data', + 'key_pair', + 'tenancy', + 'shutdown_script', + 'monitoring', + 'ebs_optimized', + 'image_id', + 'health_check_type', + 'health_check_grace_period', + 'health_check_unhealthy_duration_before_replacement', + 'security_group_ids') + +iam_fields = (dict(ansible_field_name='iam_role_name', + spotinst_field_name='name'), + dict(ansible_field_name='iam_role_arn', + spotinst_field_name='arn')) + +scheduled_task_fields = ('adjustment', + 'adjustment_percentage', + 'batch_size_percentage', + 'cron_expression', + 'frequency', + 'grace_period', + 'task_type', + 'is_enabled', + 'scale_target_capacity', + 'scale_min_capacity', + 'scale_max_capacity') + +scaling_policy_fields = ('policy_name', + 'namespace', + 'metric_name', + 'dimensions', + 'statistic', + 'evaluation_periods', + 'period', + 'threshold', + 'cooldown', + 'unit', + 'operator') + +tracking_policy_fields = ('policy_name', + 'namespace', + 'source', + 'metric_name', + 'statistic', + 'unit', + 'cooldown', + 'target', + 'threshold') + +action_fields = (dict(ansible_field_name='action_type', + spotinst_field_name='type'), + 'adjustment', + 'min_target_capacity', + 'max_target_capacity', + 'target', + 'minimum', + 'maximum') + +signal_fields = ('name', + 'timeout') + +multai_lb_fields = ('balancer_id', + 'project_id', + 'target_set_id', + 'az_awareness', + 'auto_weight') + +persistence_fields = ('should_persist_root_device', + 'should_persist_block_devices', + 'should_persist_private_ip') + +strategy_fields = ('risk', + 'utilize_reserved_instances', + 'fallback_to_od', + 'on_demand_count', + 'availability_vs_cost', + 'draining_timeout', + 'spin_up_time', + 'lifetime_period') + +ebs_fields = ('delete_on_termination', + 'encrypted', + 'iops', + 'snapshot_id', + 'volume_type', + 'volume_size') + +bdm_fields = ('device_name', + 'virtual_name', + 'no_device') + +kubernetes_fields = ('api_server', + 'token') + +right_scale_fields = ('account_id', + 'refresh_token') + +rancher_fields = ('access_key', + 'secret_key', + 'master_host', + 'version') + +chef_fields = ('chef_server', + 'organization', + 'user', + 'pem_key', + 'chef_version') + +az_fields = ('name', + 'subnet_id', + 'placement_group_name') + +opsworks_fields = ('layer_id',) + +scaling_strategy_fields = ('terminate_at_end_of_billing_hour',) + +mesosphere_fields = ('api_server',) + +ecs_fields = ('cluster_name',) + +multai_fields = ('multai_token',) + + +def handle_elastigroup(client, module): + has_changed = False + group_id = None + message = 'None' + + name = module.params.get('name') + state = module.params.get('state') + uniqueness_by = module.params.get('uniqueness_by') + external_group_id = module.params.get('id') + + if uniqueness_by == 'id': + if external_group_id is None: + should_create = True + else: + should_create = False + group_id = external_group_id + else: + groups = client.get_elastigroups() + should_create, group_id = find_group_with_same_name(groups, name) + + if should_create is True: + if state == 'present': + eg = expand_elastigroup(module, is_update=False) + module.debug(str(" [INFO] " + message + "\n")) + group = client.create_elastigroup(group=eg) + group_id = group['id'] + message = 'Created group Successfully.' + has_changed = True + + elif state == 'absent': + message = 'Cannot delete non-existent group.' + has_changed = False + else: + eg = expand_elastigroup(module, is_update=True) + + if state == 'present': + group = client.update_elastigroup(group_update=eg, group_id=group_id) + message = 'Updated group successfully.' + + try: + roll_config = module.params.get('roll_config') + if roll_config: + eg_roll = spotinst.aws_elastigroup.Roll( + batch_size_percentage=roll_config.get('batch_size_percentage'), + grace_period=roll_config.get('grace_period'), + health_check_type=roll_config.get('health_check_type') + ) + roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id) + message = 'Updated and started rolling the group successfully.' + + except SpotinstClientException as exc: + message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc) + has_changed = True + + elif state == 'absent': + try: + client.delete_elastigroup(group_id=group_id) + except SpotinstClientException as exc: + if "GROUP_DOESNT_EXIST" in exc.message: + pass + else: + module.fail_json(msg="Error while attempting to delete group : " + exc.message) + + message = 'Deleted group successfully.' + has_changed = True + + return group_id, message, has_changed + + +def retrieve_group_instances(client, module, group_id): + wait_timeout = module.params.get('wait_timeout') + wait_for_instances = module.params.get('wait_for_instances') + + health_check_type = module.params.get('health_check_type') + + if wait_timeout is None: + wait_timeout = 300 + + wait_timeout = time.time() + wait_timeout + target = module.params.get('target') + state = module.params.get('state') + instances = list() + + if state == 'present' and group_id is not None and wait_for_instances is True: + + is_amount_fulfilled = False + while is_amount_fulfilled is False and wait_timeout > time.time(): + instances = list() + amount_of_fulfilled_instances = 0 + + if health_check_type is not None: + healthy_instances = client.get_instance_healthiness(group_id=group_id) + + for healthy_instance in healthy_instances: + if healthy_instance.get('healthStatus') == 'HEALTHY': + amount_of_fulfilled_instances += 1 + instances.append(healthy_instance) + + else: + active_instances = client.get_elastigroup_active_instances(group_id=group_id) + + for active_instance in active_instances: + if active_instance.get('private_ip') is not None: + amount_of_fulfilled_instances += 1 + instances.append(active_instance) + + if amount_of_fulfilled_instances >= target: + is_amount_fulfilled = True + + time.sleep(10) + + return instances + + +def find_group_with_same_name(groups, name): + for group in groups: + if group['name'] == name: + return False, group.get('id') + + return True, None + + +def expand_elastigroup(module, is_update): + do_not_update = module.params['do_not_update'] + name = module.params.get('name') + + eg = spotinst.aws_elastigroup.Elastigroup() + description = module.params.get('description') + + if name is not None: + eg.name = name + if description is not None: + eg.description = description + + # Capacity + expand_capacity(eg, module, is_update, do_not_update) + # Strategy + expand_strategy(eg, module) + # Scaling + expand_scaling(eg, module) + # Third party integrations + expand_integrations(eg, module) + # Compute + expand_compute(eg, module, is_update, do_not_update) + # Multai + expand_multai(eg, module) + # Scheduling + expand_scheduled_tasks(eg, module) + + return eg + + +def expand_compute(eg, module, is_update, do_not_update): + elastic_ips = module.params['elastic_ips'] + on_demand_instance_type = module.params.get('on_demand_instance_type') + spot_instance_types = module.params['spot_instance_types'] + ebs_volume_pool = module.params['ebs_volume_pool'] + availability_zones_list = module.params['availability_zones'] + product = module.params.get('product') + + eg_compute = spotinst.aws_elastigroup.Compute() + + if product is not None: + # Only put product on group creation + if is_update is not True: + eg_compute.product = product + + if elastic_ips is not None: + eg_compute.elastic_ips = elastic_ips + + if on_demand_instance_type or spot_instance_types is not None: + eg_instance_types = spotinst.aws_elastigroup.InstanceTypes() + + if on_demand_instance_type is not None: + eg_instance_types.spot = spot_instance_types + if spot_instance_types is not None: + eg_instance_types.ondemand = on_demand_instance_type + + if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None: + eg_compute.instance_types = eg_instance_types + + expand_ebs_volume_pool(eg_compute, ebs_volume_pool) + + eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone') + + expand_launch_spec(eg_compute, module, is_update, do_not_update) + + eg.compute = eg_compute + + +def expand_ebs_volume_pool(eg_compute, ebs_volumes_list): + if ebs_volumes_list is not None: + eg_volumes = [] + + for volume in ebs_volumes_list: + eg_volume = spotinst.aws_elastigroup.EbsVolume() + + if volume.get('device_name') is not None: + eg_volume.device_name = volume.get('device_name') + if volume.get('volume_ids') is not None: + eg_volume.volume_ids = volume.get('volume_ids') + + if eg_volume.device_name is not None: + eg_volumes.append(eg_volume) + + if len(eg_volumes) > 0: + eg_compute.ebs_volume_pool = eg_volumes + + +def expand_launch_spec(eg_compute, module, is_update, do_not_update): + eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification') + + if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None: + eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole') + + tags = module.params['tags'] + load_balancers = module.params['load_balancers'] + target_group_arns = module.params['target_group_arns'] + block_device_mappings = module.params['block_device_mappings'] + network_interfaces = module.params['network_interfaces'] + + if is_update is True: + if 'image_id' in do_not_update: + delattr(eg_launch_spec, 'image_id') + + expand_tags(eg_launch_spec, tags) + + expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns) + + expand_block_device_mappings(eg_launch_spec, block_device_mappings) + + expand_network_interfaces(eg_launch_spec, network_interfaces) + + eg_compute.launch_specification = eg_launch_spec + + +def expand_integrations(eg, module): + rancher = module.params.get('rancher') + mesosphere = module.params.get('mesosphere') + ecs = module.params.get('ecs') + kubernetes = module.params.get('kubernetes') + right_scale = module.params.get('right_scale') + opsworks = module.params.get('opsworks') + chef = module.params.get('chef') + + integration_exists = False + + eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations() + + if mesosphere is not None: + eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere') + integration_exists = True + + if ecs is not None: + eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration') + integration_exists = True + + if kubernetes is not None: + eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration') + integration_exists = True + + if right_scale is not None: + eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration') + integration_exists = True + + if opsworks is not None: + eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration') + integration_exists = True + + if rancher is not None: + eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher') + integration_exists = True + + if chef is not None: + eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration') + integration_exists = True + + if integration_exists: + eg.third_parties_integration = eg_integrations + + +def expand_capacity(eg, module, is_update, do_not_update): + eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity') + + if is_update is True: + delattr(eg_capacity, 'unit') + + if 'target' in do_not_update: + delattr(eg_capacity, 'target') + + eg.capacity = eg_capacity + + +def expand_strategy(eg, module): + persistence = module.params.get('persistence') + signals = module.params.get('signals') + + eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy') + + terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour') + + if terminate_at_end_of_billing_hour is not None: + eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields, + module.params, 'ScalingStrategy') + + if persistence is not None: + eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence') + + if signals is not None: + eg_signals = expand_list(signals, signal_fields, 'Signal') + + if len(eg_signals) > 0: + eg_strategy.signals = eg_signals + + eg.strategy = eg_strategy + + +def expand_multai(eg, module): + multai_load_balancers = module.params.get('multai_load_balancers') + + eg_multai = expand_fields(multai_fields, module.params, 'Multai') + + if multai_load_balancers is not None: + eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer') + + if len(eg_multai_load_balancers) > 0: + eg_multai.balancers = eg_multai_load_balancers + eg.multai = eg_multai + + +def expand_scheduled_tasks(eg, module): + scheduled_tasks = module.params.get('scheduled_tasks') + + if scheduled_tasks is not None: + eg_scheduling = spotinst.aws_elastigroup.Scheduling() + + eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask') + + if len(eg_tasks) > 0: + eg_scheduling.tasks = eg_tasks + eg.scheduling = eg_scheduling + + +def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns): + if load_balancers is not None or target_group_arns is not None: + eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig() + eg_total_lbs = [] + + if load_balancers is not None: + for elb_name in load_balancers: + eg_elb = spotinst.aws_elastigroup.LoadBalancer() + if elb_name is not None: + eg_elb.name = elb_name + eg_elb.type = 'CLASSIC' + eg_total_lbs.append(eg_elb) + + if target_group_arns is not None: + for target_arn in target_group_arns: + eg_elb = spotinst.aws_elastigroup.LoadBalancer() + if target_arn is not None: + eg_elb.arn = target_arn + eg_elb.type = 'TARGET_GROUP' + eg_total_lbs.append(eg_elb) + + if len(eg_total_lbs) > 0: + eg_load_balancers_config.load_balancers = eg_total_lbs + eg_launchspec.load_balancers_config = eg_load_balancers_config + + +def expand_tags(eg_launchspec, tags): + if tags is not None: + eg_tags = [] + + for tag in tags: + eg_tag = spotinst.aws_elastigroup.Tag() + if tag: + eg_tag.tag_key, eg_tag.tag_value = list(tag.items())[0] + + eg_tags.append(eg_tag) + + if len(eg_tags) > 0: + eg_launchspec.tags = eg_tags + + +def expand_block_device_mappings(eg_launchspec, bdms): + if bdms is not None: + eg_bdms = [] + + for bdm in bdms: + eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping') + + if bdm.get('ebs') is not None: + eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS') + + eg_bdms.append(eg_bdm) + + if len(eg_bdms) > 0: + eg_launchspec.block_device_mappings = eg_bdms + + +def expand_network_interfaces(eg_launchspec, enis): + if enis is not None: + eg_enis = [] + + for eni in enis: + eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface') + + eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress') + + if eg_pias is not None: + eg_eni.private_ip_addresses = eg_pias + + eg_enis.append(eg_eni) + + if len(eg_enis) > 0: + eg_launchspec.network_interfaces = eg_enis + + +def expand_scaling(eg, module): + up_scaling_policies = module.params['up_scaling_policies'] + down_scaling_policies = module.params['down_scaling_policies'] + target_tracking_policies = module.params['target_tracking_policies'] + + eg_scaling = spotinst.aws_elastigroup.Scaling() + + if up_scaling_policies is not None: + eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies) + if len(eg_up_scaling_policies) > 0: + eg_scaling.up = eg_up_scaling_policies + + if down_scaling_policies is not None: + eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies) + if len(eg_down_scaling_policies) > 0: + eg_scaling.down = eg_down_scaling_policies + + if target_tracking_policies is not None: + eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies) + if len(eg_target_tracking_policies) > 0: + eg_scaling.target = eg_target_tracking_policies + + if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None: + eg.scaling = eg_scaling + + +def expand_list(items, fields, class_name): + if items is not None: + new_objects_list = [] + for item in items: + new_obj = expand_fields(fields, item, class_name) + new_objects_list.append(new_obj) + + return new_objects_list + + +def expand_fields(fields, item, class_name): + class_ = getattr(spotinst.aws_elastigroup, class_name) + new_obj = class_() + + # Handle primitive fields + if item is not None: + for field in fields: + if isinstance(field, dict): + ansible_field_name = field['ansible_field_name'] + spotinst_field_name = field['spotinst_field_name'] + else: + ansible_field_name = field + spotinst_field_name = field + if item.get(ansible_field_name) is not None: + setattr(new_obj, spotinst_field_name, item.get(ansible_field_name)) + + return new_obj + + +def expand_scaling_policies(scaling_policies): + eg_scaling_policies = [] + + for policy in scaling_policies: + eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy') + eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction') + eg_scaling_policies.append(eg_policy) + + return eg_scaling_policies + + +def expand_target_tracking_policies(tracking_policies): + eg_tracking_policies = [] + + for policy in tracking_policies: + eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy') + eg_tracking_policies.append(eg_policy) + + return eg_tracking_policies + + +def main(): + fields = dict( + account_id=dict(type='str'), + availability_vs_cost=dict(type='str', required=True), + availability_zones=dict(type='list', elements='dict', required=True), + block_device_mappings=dict(type='list', elements='dict'), + chef=dict(type='dict'), + credentials_path=dict(type='path', default="~/.spotinst/credentials"), + do_not_update=dict(default=[], type='list', elements='str'), + down_scaling_policies=dict(type='list', elements='dict'), + draining_timeout=dict(type='int'), + ebs_optimized=dict(type='bool'), + ebs_volume_pool=dict(type='list', elements='dict'), + ecs=dict(type='dict'), + elastic_beanstalk=dict(type='dict'), + elastic_ips=dict(type='list', elements='str'), + fallback_to_od=dict(type='bool'), + id=dict(type='str'), + health_check_grace_period=dict(type='int'), + health_check_type=dict(type='str'), + health_check_unhealthy_duration_before_replacement=dict(type='int'), + iam_role_arn=dict(type='str'), + iam_role_name=dict(type='str'), + image_id=dict(type='str', required=True), + key_pair=dict(type='str', no_log=False), + kubernetes=dict(type='dict'), + lifetime_period=dict(type='int'), + load_balancers=dict(type='list', elements='str'), + max_size=dict(type='int', required=True), + mesosphere=dict(type='dict'), + min_size=dict(type='int', required=True), + monitoring=dict(type='str'), + multai_load_balancers=dict(type='list', elements='dict'), + multai_token=dict(type='str', no_log=True), + name=dict(type='str', required=True), + network_interfaces=dict(type='list', elements='dict'), + on_demand_count=dict(type='int'), + on_demand_instance_type=dict(type='str'), + opsworks=dict(type='dict'), + persistence=dict(type='dict'), + product=dict(type='str', required=True), + rancher=dict(type='dict'), + right_scale=dict(type='dict'), + risk=dict(type='int'), + roll_config=dict(type='dict'), + scheduled_tasks=dict(type='list', elements='dict'), + security_group_ids=dict(type='list', elements='str', required=True), + shutdown_script=dict(type='str'), + signals=dict(type='list', elements='dict'), + spin_up_time=dict(type='int'), + spot_instance_types=dict(type='list', elements='str', required=True), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(type='list', elements='dict'), + target=dict(type='int', required=True), + target_group_arns=dict(type='list', elements='str'), + tenancy=dict(type='str'), + terminate_at_end_of_billing_hour=dict(type='bool'), + token=dict(type='str', no_log=True), + unit=dict(type='str'), + user_data=dict(type='str'), + utilize_reserved_instances=dict(type='bool'), + uniqueness_by=dict(default='name', choices=['name', 'id']), + up_scaling_policies=dict(type='list', elements='dict'), + target_tracking_policies=dict(type='list', elements='dict'), + wait_for_instances=dict(type='bool', default=False), + wait_timeout=dict(type='int') + ) + + module = AnsibleModule(argument_spec=fields) + + if not HAS_SPOTINST_SDK: + module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)") + + # Retrieve creds file variables + creds_file_loaded_vars = dict() + + credentials_path = module.params.get('credentials_path') + + try: + with open(credentials_path, "r") as creds: + for line in creds: + eq_index = line.find('=') + var_name = line[:eq_index].strip() + string_value = line[eq_index + 1:].strip() + creds_file_loaded_vars[var_name] = string_value + except IOError: + pass + # End of creds file retrieval + + token = module.params.get('token') + if not token: + token = os.environ.get('SPOTINST_TOKEN') + if not token: + token = creds_file_loaded_vars.get("token") + + account = module.params.get('account_id') + if not account: + account = os.environ.get('SPOTINST_ACCOUNT_ID') or os.environ.get('ACCOUNT') + if not account: + account = creds_file_loaded_vars.get("account") + + client = spotinst.SpotinstClient(auth_token=token, print_output=False) + + if account is not None: + client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account) + + group_id, message, has_changed = handle_elastigroup(client=client, module=module) + + instances = retrieve_group_instances(client=client, module=module, group_id=group_id) + + module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ss_3par_cpg.py b/plugins/modules/ss_3par_cpg.py deleted file mode 120000 index a5659bca4d..0000000000 --- a/plugins/modules/ss_3par_cpg.py +++ /dev/null @@ -1 +0,0 @@ -./storage/hpe3par/ss_3par_cpg.py \ No newline at end of file diff --git a/plugins/modules/ss_3par_cpg.py b/plugins/modules/ss_3par_cpg.py new file mode 100644 index 0000000000..6c6219ed64 --- /dev/null +++ b/plugins/modules/ss_3par_cpg.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# Copyright (c) 2018, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + + +from __future__ import annotations + + +DOCUMENTATION = r""" +short_description: Manage HPE StoreServ 3PAR CPG +author: + - Farhan Nomani (@farhan7500) + - Gautham P Hegde (@gautamphegde) +description: + - Create and delete CPG on HPE 3PAR. +module: ss_3par_cpg +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + cpg_name: + description: + - Name of the CPG. + type: str + required: true + disk_type: + choices: + - FC + - NL + - SSD + description: + - Specifies that physical disks must have the specified device type. + type: str + domain: + description: + - Specifies the name of the domain in which the object resides. + type: str + growth_increment: + description: + - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage created on each auto-grow operation. + type: str + growth_limit: + description: + - Specifies that the autogrow operation is limited to the specified storage amount that sets the growth limit (in MiB, + GiB or TiB). + type: str + growth_warning: + description: + - Specifies that the threshold (in MiB, GiB or TiB) of used logical disk space when exceeded results in a warning alert. + type: str + high_availability: + choices: + - PORT + - CAGE + - MAG + description: + - Specifies that the layout must support the failure of one port pair, one cage, or one magazine. + type: str + raid_type: + choices: + - R0 + - R1 + - R5 + - R6 + description: + - Specifies the RAID type for the logical disk. + type: str + set_size: + description: + - Specifies the set size in the number of chunklets. + type: int + state: + choices: + - present + - absent + description: + - Whether the specified CPG should exist or not. + required: true + type: str + secure: + description: + - Specifies whether the certificate needs to be validated while communicating. + type: bool + default: false +extends_documentation_fragment: + - community.general.hpe3par + - community.general.attributes +""" + + +EXAMPLES = r""" +- name: Create CPG sample_cpg + community.general.ss_3par_cpg: + storage_system_ip: 10.10.10.1 + storage_system_username: username + storage_system_password: password + state: present + cpg_name: sample_cpg + domain: sample_domain + growth_increment: 32000 MiB + growth_limit: 64000 MiB + growth_warning: 48000 MiB + raid_type: R6 + set_size: 8 + high_availability: MAG + disk_type: FC + secure: false + +- name: Delete CPG sample_cpg + community.general.ss_3par_cpg: + storage_system_ip: 10.10.10.1 + storage_system_username: username + storage_system_password: password + state: absent + cpg_name: sample_cpg + secure: false +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par +try: + from hpe3par_sdk import client + from hpe3parclient import exceptions + HAS_3PARCLIENT = True +except ImportError: + HAS_3PARCLIENT = False + + +def validate_set_size(raid_type, set_size): + if raid_type: + set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]['set_sizes'] + if set_size in set_size_array: + return True + return False + + +def cpg_ldlayout_map(ldlayout_dict): + if ldlayout_dict['RAIDType'] is not None and ldlayout_dict['RAIDType']: + ldlayout_dict['RAIDType'] = client.HPE3ParClient.RAID_MAP[ + ldlayout_dict['RAIDType']]['raid_value'] + if ldlayout_dict['HA'] is not None and ldlayout_dict['HA']: + ldlayout_dict['HA'] = getattr( + client.HPE3ParClient, ldlayout_dict['HA']) + return ldlayout_dict + + +def create_cpg( + client_obj, + cpg_name, + domain, + growth_increment, + growth_limit, + growth_warning, + raid_type, + set_size, + high_availability, + disk_type): + try: + if not validate_set_size(raid_type, set_size): + return (False, False, "Set size %s not part of RAID set %s" % (set_size, raid_type)) + if not client_obj.cpgExists(cpg_name): + + disk_patterns = [] + if disk_type: + disk_type = getattr(client.HPE3ParClient, disk_type) + disk_patterns = [{'diskType': disk_type}] + ld_layout = { + 'RAIDType': raid_type, + 'setSize': set_size, + 'HA': high_availability, + 'diskPatterns': disk_patterns} + ld_layout = cpg_ldlayout_map(ld_layout) + if growth_increment is not None: + growth_increment = hpe3par.convert_to_binary_multiple( + growth_increment) + if growth_limit is not None: + growth_limit = hpe3par.convert_to_binary_multiple( + growth_limit) + if growth_warning is not None: + growth_warning = hpe3par.convert_to_binary_multiple( + growth_warning) + optional = { + 'domain': domain, + 'growthIncrementMiB': growth_increment, + 'growthLimitMiB': growth_limit, + 'usedLDWarningAlertMiB': growth_warning, + 'LDLayout': ld_layout} + client_obj.createCPG(cpg_name, optional) + else: + return (True, False, "CPG already present") + except exceptions.ClientException as e: + return (False, False, "CPG creation failed | %s" % (e)) + return (True, True, "Created CPG %s successfully." % cpg_name) + + +def delete_cpg( + client_obj, + cpg_name): + try: + if client_obj.cpgExists(cpg_name): + client_obj.deleteCPG(cpg_name) + else: + return (True, False, "CPG does not exist") + except exceptions.ClientException as e: + return (False, False, "CPG delete failed | %s" % e) + return (True, True, "Deleted CPG %s successfully." % cpg_name) + + +def main(): + module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(), + required_together=[['raid_type', 'set_size']]) + if not HAS_3PARCLIENT: + module.fail_json(msg='the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)') + + if len(module.params["cpg_name"]) < 1 or len(module.params["cpg_name"]) > 31: + module.fail_json(msg="CPG name must be at least 1 character and not more than 31 characters") + + storage_system_ip = module.params["storage_system_ip"] + storage_system_username = module.params["storage_system_username"] + storage_system_password = module.params["storage_system_password"] + cpg_name = module.params["cpg_name"] + domain = module.params["domain"] + growth_increment = module.params["growth_increment"] + growth_limit = module.params["growth_limit"] + growth_warning = module.params["growth_warning"] + raid_type = module.params["raid_type"] + set_size = module.params["set_size"] + high_availability = module.params["high_availability"] + disk_type = module.params["disk_type"] + secure = module.params["secure"] + + wsapi_url = 'https://%s:8080/api/v1' % storage_system_ip + try: + client_obj = client.HPE3ParClient(wsapi_url, secure) + except exceptions.SSLCertFailed: + module.fail_json(msg="SSL Certificate Failed") + except exceptions.ConnectionError: + module.fail_json(msg="Connection Error") + except exceptions.UnsupportedVersion: + module.fail_json(msg="Unsupported WSAPI version") + except Exception as e: + module.fail_json(msg="Initializing client failed. %s" % e) + + if storage_system_username is None or storage_system_password is None: + module.fail_json(msg="Storage system username or password is None") + if cpg_name is None: + module.fail_json(msg="CPG Name is None") + + # States + if module.params["state"] == "present": + try: + client_obj.login(storage_system_username, storage_system_password) + return_status, changed, msg = create_cpg( + client_obj, + cpg_name, + domain, + growth_increment, + growth_limit, + growth_warning, + raid_type, + set_size, + high_availability, + disk_type + ) + except Exception as e: + module.fail_json(msg="CPG create failed | %s" % e) + finally: + client_obj.logout() + + elif module.params["state"] == "absent": + try: + client_obj.login(storage_system_username, storage_system_password) + return_status, changed, msg = delete_cpg( + client_obj, + cpg_name + ) + except Exception as e: + module.fail_json(msg="CPG create failed | %s" % e) + finally: + client_obj.logout() + + if return_status: + module.exit_json(changed=changed, msg=msg) + else: + module.fail_json(msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ssh_config.py b/plugins/modules/ssh_config.py deleted file mode 120000 index 4f0c5a2967..0000000000 --- a/plugins/modules/ssh_config.py +++ /dev/null @@ -1 +0,0 @@ -./system/ssh_config.py \ No newline at end of file diff --git a/plugins/modules/ssh_config.py b/plugins/modules/ssh_config.py new file mode 100644 index 0000000000..6844da92a7 --- /dev/null +++ b/plugins/modules/ssh_config.py @@ -0,0 +1,425 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Björn Andersson +# Copyright (c) 2021, Ansible Project +# Copyright (c) 2021, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ssh_config +short_description: Manage SSH config for user +version_added: '2.0.0' +description: + - Configures SSH hosts with special C(IdentityFile)s and hostnames. +author: + - Björn Andersson (@gaqzi) + - Abhijeet Kasurde (@Akasurde) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether a host entry should exist or not. + default: present + choices: ['present', 'absent'] + type: str + user: + description: + - Which user account this configuration file belongs to. + - If none given and O(ssh_config_file) is not specified, C(/etc/ssh/ssh_config) is used. + - If a user is given, C(~/.ssh/config) is used. + - Mutually exclusive with O(ssh_config_file). + type: str + group: + description: + - Which group this configuration file belongs to. + - If none given, O(user) is used. + type: str + host: + description: + - The endpoint this configuration is valid for. + - It can be an actual address on the internet or an alias that connects to the value of O(hostname). + required: true + type: str + hostname: + description: + - The actual host to connect to when connecting to the host defined. + type: str + port: + description: + - The actual port to connect to when connecting to the host defined. + type: str + remote_user: + description: + - Specifies the user to log in as. + type: str + identity_file: + description: + - The path to an identity file (SSH private key) that is used when connecting to this host. + - File need to exist and have mode V(0600) to be valid. + type: path + identities_only: + description: + - Specifies that SSH should only use the configured authentication identity and certificate files (either the default + files, or those explicitly configured in the C(ssh_config) files or passed on the ssh command-line), even if C(ssh-agent) + or a C(PKCS11Provider) or C(SecurityKeyProvider) offers more identities. + type: bool + version_added: 8.2.0 + user_known_hosts_file: + description: + - Sets the user known hosts file option. + type: str + strict_host_key_checking: + description: + - Whether to strictly check the host key when doing connections to the remote host. + - The value V(accept-new) is supported since community.general 8.6.0. + choices: ['yes', 'no', 'ask', 'accept-new'] + type: str + proxycommand: + description: + - Sets the C(ProxyCommand) option. + - Mutually exclusive with O(proxyjump). + type: str + proxyjump: + description: + - Sets the C(ProxyJump) option. + - Mutually exclusive with O(proxycommand). + type: str + version_added: 6.5.0 + forward_agent: + description: + - Sets the C(ForwardAgent) option. + type: bool + version_added: 4.0.0 + add_keys_to_agent: + description: + - Sets the C(AddKeysToAgent) option. + type: bool + version_added: 8.2.0 + ssh_config_file: + description: + - SSH config file. + - If O(user) and this option are not specified, C(/etc/ssh/ssh_config) is used. + - Mutually exclusive with O(user). + type: path + host_key_algorithms: + description: + - Sets the C(HostKeyAlgorithms) option. + type: str + version_added: 6.1.0 + controlmaster: + description: + - Sets the C(ControlMaster) option. + choices: ['yes', 'no', 'ask', 'auto', 'autoask'] + type: str + version_added: 8.1.0 + controlpath: + description: + - Sets the C(ControlPath) option. + type: str + version_added: 8.1.0 + controlpersist: + description: + - Sets the C(ControlPersist) option. + type: str + version_added: 8.1.0 + dynamicforward: + description: + - Sets the C(DynamicForward) option. + type: str + version_added: 10.1.0 + other_options: + description: + - Allows specifying arbitrary SSH config entry options using a dictionary. + - The key names must be lower case. Keys with upper case values are rejected. + - The values must be strings. Other values are rejected. + type: dict + version_added: 10.4.0 +requirements: + - paramiko +""" + +EXAMPLES = r""" +- name: Add a host in the configuration + community.general.ssh_config: + user: akasurde + host: "example.com" + hostname: "github.com" + identity_file: "/home/akasurde/.ssh/id_rsa" + port: '2223' + state: present + other_options: + serveraliveinterval: '30' + +- name: Add SSH config with key auto-added to agent + community.general.ssh_config: + user: devops + host: "example.com" + hostname: "staging.example.com" + identity_file: "/home/devops/.ssh/id_rsa" + add_keys_to_agent: true + state: present + +- name: Delete a host from the configuration + community.general.ssh_config: + ssh_config_file: "{{ ssh_config_test }}" + host: "example.com" + state: absent +""" + +RETURN = r""" +hosts_added: + description: A list of host added. + returned: success + type: list + sample: ["example.com"] +hosts_removed: + description: A list of host removed. + returned: success + type: list + sample: ["example.com"] +hosts_changed: + description: A list of host changed. + returned: success + type: list + sample: ["example.com"] +hosts_change_diff: + description: A list of host diff changes. + returned: on change + type: list + sample: + [ + { + "example.com": { + "new": { + "hostname": "github.com", + "identityfile": [ + "/tmp/test_ssh_config/fake_id_rsa" + ], + "port": "2224" + }, + "old": { + "hostname": "github.com", + "identityfile": [ + "/tmp/test_ssh_config/fake_id_rsa" + ], + "port": "2224" + } + } + } + ] +""" + +import os + +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils._stormssh import ConfigParser, HAS_PARAMIKO, PARAMIKO_IMPORT_ERROR +from ansible_collections.community.general.plugins.module_utils.ssh import determine_config_file + + +def convert_bool(value): + if value is True: + return 'yes' + if value is False: + return 'no' + return None + + +def fix_bool_str(value): + if value == 'True': + return 'yes' + if value == 'False': + return 'no' + return value + + +class SSHConfig(object): + def __init__(self, module): + self.module = module + if not HAS_PARAMIKO: + module.fail_json(msg=missing_required_lib('PARAMIKO'), exception=PARAMIKO_IMPORT_ERROR) + self.params = module.params + self.user = self.params.get('user') + self.group = self.params.get('group') or self.user + self.host = self.params.get('host') + self.config_file = self.params.get('ssh_config_file') + self.identity_file = self.params['identity_file'] + self.check_ssh_config_path() + try: + self.config = ConfigParser(self.config_file) + except FileNotFoundError: + self.module.fail_json(msg="Failed to find %s" % self.config_file) + self.config.load() + + def check_ssh_config_path(self): + self.config_file = determine_config_file(self.user, self.config_file) + + # See if the identity file exists or not, relative to the config file + if os.path.exists(self.config_file) and self.identity_file is not None: + dirname = os.path.dirname(self.config_file) + self.identity_file = os.path.join(dirname, self.identity_file) + + if not os.path.exists(self.identity_file): + self.module.fail_json(msg='IdentityFile %s does not exist' % self.params['identity_file']) + + def ensure_state(self): + hosts_result = self.config.search_host(self.host) + state = self.params['state'] + args = dict( + hostname=self.params.get('hostname'), + port=self.params.get('port'), + identity_file=self.params.get('identity_file'), + identities_only=convert_bool(self.params.get('identities_only')), + user=self.params.get('remote_user'), + strict_host_key_checking=self.params.get('strict_host_key_checking'), + user_known_hosts_file=self.params.get('user_known_hosts_file'), + proxycommand=self.params.get('proxycommand'), + proxyjump=self.params.get('proxyjump'), + host_key_algorithms=self.params.get('host_key_algorithms'), + forward_agent=convert_bool(self.params.get('forward_agent')), + add_keys_to_agent=convert_bool(self.params.get('add_keys_to_agent')), + controlmaster=self.params.get('controlmaster'), + controlpath=self.params.get('controlpath'), + controlpersist=fix_bool_str(self.params.get('controlpersist')), + dynamicforward=self.params.get('dynamicforward'), + ) + if self.params.get('other_options'): + for key, value in self.params.get('other_options').items(): + if key.lower() != key: + self.module.fail_json(msg="The other_options key {key!r} must be lower case".format(key=key)) + if key not in args: + if not isinstance(value, str): + self.module.fail_json(msg="The other_options value provided for key {key!r} must be a string, got {type}".format(key=key, + type=type(value))) + args[key] = value + else: + self.module.fail_json(msg="Multiple values provided for key {key!r}".format(key=key)) + + config_changed = False + hosts_changed = [] + hosts_change_diff = [] + hosts_removed = [] + hosts_added = [] + + hosts_result = [host for host in hosts_result if host['host'] == self.host] + + if hosts_result: + for host in hosts_result: + if state == 'absent': + # Delete host from the configuration + config_changed = True + hosts_removed.append(host['host']) + self.config.delete_host(host['host']) + else: + # Update host in the configuration + changed, options = self.change_host(host['options'], **args) + + if changed: + config_changed = True + self.config.update_host(host['host'], options) + hosts_changed.append(host['host']) + hosts_change_diff.append({ + host['host']: { + 'old': host['options'], + 'new': options, + } + }) + elif state == 'present': + changed, options = self.change_host(dict(), **args) + + if changed: + config_changed = True + hosts_added.append(self.host) + self.config.add_host(self.host, options) + + if config_changed and not self.module.check_mode: + try: + self.config.write_to_ssh_config() + except PermissionError as perm_exec: + self.module.fail_json( + msg="Failed to write to %s due to permission issue: %s" % (self.config_file, to_native(perm_exec))) + # Make sure we set the permission + perm_mode = '0600' + if self.config_file == '/etc/ssh/ssh_config': + perm_mode = '0644' + self.module.set_mode_if_different(self.config_file, perm_mode, False) + # Make sure the file is owned by the right user and group + self.module.set_owner_if_different(self.config_file, self.user, False) + self.module.set_group_if_different(self.config_file, self.group, False) + + self.module.exit_json(changed=config_changed, + hosts_changed=hosts_changed, + hosts_removed=hosts_removed, + hosts_change_diff=hosts_change_diff, + hosts_added=hosts_added) + + @staticmethod + def change_host(options, **kwargs): + options = deepcopy(options) + changed = False + for k, v in kwargs.items(): + if '_' in k: + k = k.replace('_', '') + + if not v: + if options.get(k): + del options[k] + changed = True + elif options.get(k) != v and not (isinstance(options.get(k), list) and v in options.get(k)): + options[k] = v + changed = True + + return changed, options + + +def main(): + module = AnsibleModule( + argument_spec=dict( + group=dict(type='str'), + host=dict(type='str', required=True), + hostname=dict(type='str'), + host_key_algorithms=dict(type='str', no_log=False), + identity_file=dict(type='path'), + identities_only=dict(type='bool'), + other_options=dict(type='dict'), + port=dict(type='str'), + proxycommand=dict(type='str'), + proxyjump=dict(type='str'), + forward_agent=dict(type='bool'), + add_keys_to_agent=dict(type='bool'), + remote_user=dict(type='str'), + ssh_config_file=dict(type='path'), + state=dict(type='str', default='present', choices=['present', 'absent']), + strict_host_key_checking=dict(type='str', choices=['yes', 'no', 'ask', 'accept-new']), + controlmaster=dict(type='str', choices=['yes', 'no', 'ask', 'auto', 'autoask']), + controlpath=dict(type='str'), + controlpersist=dict(type='str'), + dynamicforward=dict(type='str'), + user=dict(type='str'), + user_known_hosts_file=dict(type='str'), + ), + supports_check_mode=True, + mutually_exclusive=[ + ['user', 'ssh_config_file'], + ['proxycommand', 'proxyjump'], + ], + ) + + ssh_config_obj = SSHConfig(module) + ssh_config_obj.ensure_state() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/stackdriver.py b/plugins/modules/stackdriver.py deleted file mode 120000 index 88c1e662ef..0000000000 --- a/plugins/modules/stackdriver.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/stackdriver.py \ No newline at end of file diff --git a/plugins/modules/stacki_host.py b/plugins/modules/stacki_host.py deleted file mode 120000 index abd15be0c0..0000000000 --- a/plugins/modules/stacki_host.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/stacki/stacki_host.py \ No newline at end of file diff --git a/plugins/modules/stacki_host.py b/plugins/modules/stacki_host.py new file mode 100644 index 0000000000..58312e8784 --- /dev/null +++ b/plugins/modules/stacki_host.py @@ -0,0 +1,282 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Hugh Ma +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: stacki_host +short_description: Add or remove host to stacki front-end +description: + - Use this module to add or remove hosts to a stacki front-end using API. + - Information on stacki can be found at U(https://github.com/StackIQ/stacki). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of the host to be added to Stacki. + required: true + type: str + stacki_user: + description: + - Username for authenticating with Stacki API, but if not specified, the environment variable E(stacki_user) is used + instead. + required: true + type: str + stacki_password: + description: + - Password for authenticating with Stacki API, but if not specified, the environment variable E(stacki_password) is + used instead. + required: true + type: str + stacki_endpoint: + description: + - URL for the Stacki API Endpoint. + required: true + type: str + prim_intf_mac: + description: + - MAC Address for the primary PXE boot network interface. + - Currently not used by the module. + type: str + prim_intf_ip: + description: + - IP Address for the primary network interface. + - Currently not used by the module. + type: str + prim_intf: + description: + - Name of the primary network interface. + - Currently not used by the module. + type: str + force_install: + description: + - Set value to V(true) to force node into install state if it already exists in stacki. + type: bool + default: false + state: + description: + - Set value to the desired state for the specified host. + type: str + choices: [absent, present] + default: present + appliance: + description: + - Appliance to be used in host creation. + - Required if O(state=present) and host does not yet exist. + type: str + default: backend + rack: + description: + - Rack to be used in host creation. + - Required if O(state=present) and host does not yet exist. + type: int + default: 0 + rank: + description: + - Rank to be used in host creation. + - In Stacki terminology, the rank is the position of the machine in a rack. + - Required if O(state=present) and host does not yet exist. + type: int + default: 0 + network: + description: + - Network to be configured in the host. + - Currently not used by the module. + type: str + default: private +author: + - Hugh Ma (@bbyhuy) +""" + +EXAMPLES = r""" +- name: Add a host named test-1 + community.general.stacki_host: + name: test-1 + stacki_user: usr + stacki_password: pwd + stacki_endpoint: url + prim_intf_mac: mac_addr + prim_intf_ip: x.x.x.x + prim_intf: eth0 + +- name: Remove a host named test-1 + community.general.stacki_host: + name: test-1 + stacki_user: usr + stacki_password: pwd + stacki_endpoint: url + state: absent +""" + + +import json + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url + + +class StackiHost(object): + + def __init__(self, module): + self.module = module + self.hostname = module.params['name'] + self.rack = module.params['rack'] + self.rank = module.params['rank'] + self.appliance = module.params['appliance'] + self.prim_intf = module.params['prim_intf'] + self.prim_intf_ip = module.params['prim_intf_ip'] + self.network = module.params['network'] + self.prim_intf_mac = module.params['prim_intf_mac'] + self.endpoint = module.params['stacki_endpoint'] + + auth_creds = {'USERNAME': module.params['stacki_user'], + 'PASSWORD': module.params['stacki_password']} + + # Get Initial CSRF + cred_a = self.do_request(self.endpoint, method="GET") + cookie_a = cred_a.headers.get('Set-Cookie').split(';') + init_csrftoken = None + for c in cookie_a: + if "csrftoken" in c: + init_csrftoken = c.replace("csrftoken=", "") + init_csrftoken = init_csrftoken.rstrip("\r\n") + break + + # Make Header Dictionary with initial CSRF + header = {'csrftoken': init_csrftoken, 'X-CSRFToken': init_csrftoken, + 'Content-type': 'application/x-www-form-urlencoded', 'Cookie': cred_a.headers.get('Set-Cookie')} + + # Endpoint to get final authentication header + login_endpoint = self.endpoint + "/login" + + # Get Final CSRF and Session ID + login_req = self.do_request(login_endpoint, headers=header, payload=urlencode(auth_creds), method='POST') + + cookie_f = login_req.headers.get('Set-Cookie').split(';') + csrftoken = None + for f in cookie_f: + if "csrftoken" in f: + csrftoken = f.replace("csrftoken=", "") + if "sessionid" in f: + sessionid = c.split("sessionid=", 1)[-1] + sessionid = sessionid.rstrip("\r\n") + + self.header = {'csrftoken': csrftoken, + 'X-CSRFToken': csrftoken, + 'sessionid': sessionid, + 'Content-type': 'application/json', + 'Cookie': login_req.headers.get('Set-Cookie')} + + def do_request(self, url, payload=None, headers=None, method=None): + res, info = fetch_url(self.module, url, data=payload, headers=headers, method=method) + + if info['status'] != 200: + self.module.fail_json(changed=False, msg=info['msg']) + + return res + + def stack_check_host(self): + res = self.do_request(self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST") + return self.hostname in res.read() + + def stack_sync(self): + self.do_request(self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST") + self.do_request(self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST") + + def stack_force_install(self, result): + data = {'cmd': "set host boot {0} action=install".format(self.hostname)} + self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + changed = True + + self.stack_sync() + + result['changed'] = changed + result['stdout'] = "api call successful".rstrip("\r\n") + + def stack_add(self, result): + data = dict() + changed = False + + data['cmd'] = "add host {0} rack={1} rank={2} appliance={3}"\ + .format(self.hostname, self.rack, self.rank, self.appliance) + self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + + self.stack_sync() + + result['changed'] = changed + result['stdout'] = "api call successful".rstrip("\r\n") + + def stack_remove(self, result): + data = dict() + + data['cmd'] = "remove host {0}"\ + .format(self.hostname) + self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + + self.stack_sync() + + result['changed'] = True + result['stdout'] = "api call successful".rstrip("\r\n") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + name=dict(type='str', required=True), + rack=dict(type='int', default=0), + rank=dict(type='int', default=0), + appliance=dict(type='str', default='backend'), + prim_intf=dict(type='str'), + prim_intf_ip=dict(type='str'), + network=dict(type='str', default='private'), + prim_intf_mac=dict(type='str'), + stacki_user=dict(type='str', required=True, fallback=(env_fallback, ['stacki_user'])), + stacki_password=dict(type='str', required=True, fallback=(env_fallback, ['stacki_password']), no_log=True), + stacki_endpoint=dict(type='str', required=True, fallback=(env_fallback, ['stacki_endpoint'])), + force_install=dict(type='bool', default=False), + ), + supports_check_mode=False, + ) + + result = {'changed': False} + missing_params = list() + + stacki = StackiHost(module) + host_exists = stacki.stack_check_host() + + # If state is present, but host exists, need force_install flag to put host back into install state + if module.params['state'] == 'present' and host_exists and module.params['force_install']: + stacki.stack_force_install(result) + # If state is present, but host exists, and force_install and false, do nothing + elif module.params['state'] == 'present' and host_exists and not module.params['force_install']: + result['stdout'] = "{0} already exists. Set 'force_install' to true to bootstrap"\ + .format(module.params['name']) + # Otherwise, state is present, but host doesn't exists, require more params to add host + elif module.params['state'] == 'present' and not host_exists: + for param in ['appliance', 'rack', 'rank', 'prim_intf', 'prim_intf_ip', 'network', 'prim_intf_mac']: + if not module.params[param]: + missing_params.append(param) + if len(missing_params) > 0: + module.fail_json(msg="missing required arguments: {0}".format(missing_params)) + + stacki.stack_add(result) + # If state is absent, and host exists, lets remove it. + elif module.params['state'] == 'absent' and host_exists: + stacki.stack_remove(result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/statsd.py b/plugins/modules/statsd.py deleted file mode 120000 index a906f4df1a..0000000000 --- a/plugins/modules/statsd.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/statsd.py \ No newline at end of file diff --git a/plugins/modules/statsd.py b/plugins/modules/statsd.py new file mode 100644 index 0000000000..c127cd42f1 --- /dev/null +++ b/plugins/modules/statsd.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: statsd +short_description: Send metrics to StatsD +version_added: 2.1.0 +description: + - The C(statsd) module sends metrics to StatsD. + - For more information, see U(https://statsd-metrics.readthedocs.io/en/latest/). + - Supported metric types are V(counter) and V(gauge). Currently unupported metric types are V(timer), V(set), and V(gaugedelta). +author: "Mark Mercado (@mamercad)" +requirements: + - statsd +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - State of the check, only V(present) makes sense. + choices: ["present"] + default: present + host: + type: str + default: localhost + description: + - StatsD host (hostname or IP) to send metrics to. + port: + type: int + default: 8125 + description: + - The port on O(host) which StatsD is listening on. + protocol: + type: str + default: udp + choices: ["udp", "tcp"] + description: + - The transport protocol to send metrics over. + timeout: + type: float + default: 1.0 + description: + - Sender timeout, only applicable if O(protocol) is V(tcp). + metric: + type: str + required: true + description: + - The name of the metric. + metric_type: + type: str + required: true + choices: ["counter", "gauge"] + description: + - The type of metric. + metric_prefix: + type: str + description: + - The prefix to add to the metric. + default: '' + value: + type: int + required: true + description: + - The value of the metric. + delta: + type: bool + default: false + description: + - If the metric is of type V(gauge), change the value by O(delta). +""" + +EXAMPLES = r""" +- name: Increment the metric my_counter by 1 + community.general.statsd: + host: localhost + port: 9125 + protocol: tcp + metric: my_counter + metric_type: counter + value: 1 + +- name: Set the gauge my_gauge to 7 + community.general.statsd: + host: localhost + port: 9125 + protocol: tcp + metric: my_gauge + metric_type: gauge + value: 7 +""" + + +from ansible.module_utils.basic import (AnsibleModule, missing_required_lib) + +try: + from statsd import StatsClient, TCPStatsClient + HAS_STATSD = True +except ImportError: + HAS_STATSD = False + + +def udp_statsd_client(**client_params): + return StatsClient(**client_params) + + +def tcp_statsd_client(**client_params): + return TCPStatsClient(**client_params) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present']), + host=dict(type='str', default='localhost'), + port=dict(type='int', default=8125), + protocol=dict(type='str', default='udp', choices=['udp', 'tcp']), + timeout=dict(type='float', default=1.0), + metric=dict(type='str', required=True), + metric_type=dict(type='str', required=True, choices=['counter', 'gauge']), + metric_prefix=dict(type='str', default=''), + value=dict(type='int', required=True), + delta=dict(type='bool', default=False), + ), + supports_check_mode=False + ) + + if not HAS_STATSD: + module.fail_json(msg=missing_required_lib('statsd')) + + host = module.params.get('host') + port = module.params.get('port') + protocol = module.params.get('protocol') + timeout = module.params.get('timeout') + metric = module.params.get('metric') + metric_type = module.params.get('metric_type') + metric_prefix = module.params.get('metric_prefix') + value = module.params.get('value') + delta = module.params.get('delta') + + if protocol == 'udp': + client = udp_statsd_client(host=host, port=port, prefix=metric_prefix, maxudpsize=512, ipv6=False) + elif protocol == 'tcp': + client = tcp_statsd_client(host=host, port=port, timeout=timeout, prefix=metric_prefix, ipv6=False) + + metric_name = '%s/%s' % (metric_prefix, metric) if metric_prefix else metric + metric_display_value = '%s (delta=%s)' % (value, delta) if metric_type == 'gauge' else value + + try: + if metric_type == 'counter': + client.incr(metric, value) + elif metric_type == 'gauge': + client.gauge(metric, value, delta=delta) + + except Exception as exc: + module.fail_json(msg='Failed sending to StatsD %s' % str(exc)) + + finally: + if protocol == 'tcp': + client.close() + + module.exit_json(msg="Sent %s %s -> %s to StatsD" % (metric_type, metric_name, str(metric_display_value)), changed=True) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/statusio_maintenance.py b/plugins/modules/statusio_maintenance.py deleted file mode 120000 index 9c69cd1360..0000000000 --- a/plugins/modules/statusio_maintenance.py +++ /dev/null @@ -1 +0,0 @@ -./monitoring/statusio_maintenance.py \ No newline at end of file diff --git a/plugins/modules/statusio_maintenance.py b/plugins/modules/statusio_maintenance.py new file mode 100644 index 0000000000..43921488d9 --- /dev/null +++ b/plugins/modules/statusio_maintenance.py @@ -0,0 +1,463 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Benjamin Copeland (@bhcopeland) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: statusio_maintenance +short_description: Create maintenance windows for your status.io dashboard +description: + - Creates or deletes a maintenance window for status.io. +notes: + - You can use the apiary API URL (U(http://docs.statusio.apiary.io/)) to capture API traffic. + - Use start_date and start_time with minutes to set future maintenance window. +author: Benjamin Copeland (@bhcopeland) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + title: + type: str + description: + - A descriptive title for the maintenance window. + default: "A new maintenance window" + desc: + type: str + description: + - Message describing the maintenance window. + default: "Created by Ansible" + state: + type: str + description: + - Desired state of the package. + default: "present" + choices: ["present", "absent"] + api_id: + type: str + description: + - Your unique API ID from status.io. + required: true + api_key: + type: str + description: + - Your unique API Key from status.io. + required: true + statuspage: + type: str + description: + - Your unique StatusPage ID from status.io. + required: true + url: + type: str + description: + - Status.io API URL. A private apiary can be used instead. + default: "https://api.status.io" + components: + type: list + elements: str + description: + - The given name of your component (server name). + aliases: ['component'] + containers: + type: list + elements: str + description: + - The given name of your container (data center). + aliases: ['container'] + all_infrastructure_affected: + description: + - If it affects all components and containers. + type: bool + default: false + automation: + description: + - Automatically start and end the maintenance window. + type: bool + default: false + maintenance_notify_now: + description: + - Notify subscribers now. + type: bool + default: false + maintenance_notify_72_hr: + description: + - Notify subscribers 72 hours before maintenance start time. + type: bool + default: false + maintenance_notify_24_hr: + description: + - Notify subscribers 24 hours before maintenance start time. + type: bool + default: false + maintenance_notify_1_hr: + description: + - Notify subscribers 1 hour before maintenance start time. + type: bool + default: false + maintenance_id: + type: str + description: + - The maintenance ID number when deleting a maintenance window. + minutes: + type: int + description: + - The duration of the maintenance window (starting from playbook runtime). + default: 10 + start_date: + type: str + description: + - Date maintenance is expected to start (Month/Day/Year) (UTC). + - End Date is worked out from O(start_date) + O(minutes). + start_time: + type: str + description: + - Time maintenance is expected to start (Hour:Minutes) (UTC). + - End Time is worked out from O(start_time) + O(minutes). +""" + +EXAMPLES = r""" +- name: Create a maintenance window for 10 minutes on server1, with automation to stop the maintenance + community.general.statusio_maintenance: + title: Router Upgrade from ansible + desc: Performing a Router Upgrade + components: server1.example.com + api_id: api_id + api_key: api_key + statuspage: statuspage_id + maintenance_notify_1_hr: true + automation: true + +- name: Create a maintenance window for 60 minutes on server1 and server2 + community.general.statusio_maintenance: + title: Routine maintenance + desc: Some security updates + components: + - server1.example.com + - server2.example.com + minutes: 60 + api_id: api_id + api_key: api_key + statuspage: statuspage_id + maintenance_notify_1_hr: true + automation: true + delegate_to: localhost + +- name: Create a future maintenance window for 24 hours to all hosts inside the Primary Data Center + community.general.statusio_maintenance: + title: Data center downtime + desc: Performing a Upgrade to our data center + components: Primary Data Center + api_id: api_id + api_key: api_key + statuspage: statuspage_id + start_date: 01/01/2016 + start_time: 12:00 + minutes: 1440 + +- name: Delete a maintenance window + community.general.statusio_maintenance: + title: Remove a maintenance window + maintenance_id: 561f90faf74bc94a4700087b + statuspage: statuspage_id + api_id: api_id + api_key: api_key + state: absent +""" +# TODO: Add RETURN documentation. +RETURN = """ # """ + +import datetime +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.urls import open_url + +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + + +def get_api_auth_headers(api_id, api_key, url, statuspage): + + headers = { + "x-api-id": api_id, + "x-api-key": api_key, + "Content-Type": "application/json" + } + + try: + response = open_url( + url + "/v2/component/list/" + statuspage, headers=headers) + data = json.loads(response.read()) + if data['status']['message'] == 'Authentication failed': + return 1, None, None, "Authentication failed: " \ + "Check api_id/api_key and statuspage id." + else: + auth_headers = headers + auth_content = data + except Exception as e: + return 1, None, None, to_native(e) + return 0, auth_headers, auth_content, None + + +def get_component_ids(auth_content, components): + host_ids = [] + lower_components = [x.lower() for x in components] + for result in auth_content["result"]: + if result['name'].lower() in lower_components: + data = { + "component_id": result["_id"], + "container_id": result["containers"][0]["_id"] + } + host_ids.append(data) + lower_components.remove(result['name'].lower()) + if len(lower_components): + # items not found in the api + return 1, None, lower_components + return 0, host_ids, None + + +def get_container_ids(auth_content, containers): + host_ids = [] + lower_containers = [x.lower() for x in containers] + for result in auth_content["result"]: + if result["containers"][0]["name"].lower() in lower_containers: + data = { + "component_id": result["_id"], + "container_id": result["containers"][0]["_id"] + } + host_ids.append(data) + lower_containers.remove(result["containers"][0]["name"].lower()) + + if len(lower_containers): + # items not found in the api + return 1, None, lower_containers + return 0, host_ids, None + + +def get_date_time(start_date, start_time, minutes): + returned_date = [] + if start_date and start_time: + try: + datetime.datetime.strptime(start_date, '%m/%d/%Y') + returned_date.append(start_date) + except (NameError, ValueError): + return 1, None, "Not a valid start_date format." + try: + datetime.datetime.strptime(start_time, '%H:%M') + returned_date.append(start_time) + except (NameError, ValueError): + return 1, None, "Not a valid start_time format." + try: + # Work out end date/time based on minutes + date_time_start = datetime.datetime.strptime( + start_time + start_date, '%H:%M%m/%d/%Y') + delta = date_time_start + datetime.timedelta(minutes=minutes) + returned_date.append(delta.strftime("%m/%d/%Y")) + returned_date.append(delta.strftime("%H:%M")) + except (NameError, ValueError): + return 1, None, "Couldn't work out a valid date" + else: + now_t = now() + delta = now_t + datetime.timedelta(minutes=minutes) + # start_date + returned_date.append(now_t.strftime("%m/%d/%Y")) + returned_date.append(now_t.strftime("%H:%M")) + # end_date + returned_date.append(delta.strftime("%m/%d/%Y")) + returned_date.append(delta.strftime("%H:%M")) + return 0, returned_date, None + + +def create_maintenance(auth_headers, url, statuspage, host_ids, + all_infrastructure_affected, automation, title, desc, + returned_date, maintenance_notify_now, + maintenance_notify_72_hr, maintenance_notify_24_hr, + maintenance_notify_1_hr): + component_id = [] + container_id = [] + for val in host_ids: + component_id.append(val['component_id']) + container_id.append(val['container_id']) + infrastructure_id = [i + '-' + j for i, j in zip(component_id, container_id)] + try: + values = json.dumps({ + "statuspage_id": statuspage, + "all_infrastructure_affected": str(int(all_infrastructure_affected)), + "infrastructure_affected": infrastructure_id, + "automation": str(int(automation)), + "maintenance_name": title, + "maintenance_details": desc, + "date_planned_start": returned_date[0], + "time_planned_start": returned_date[1], + "date_planned_end": returned_date[2], + "time_planned_end": returned_date[3], + "maintenance_notify_now": str(int(maintenance_notify_now)), + "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)), + "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)), + "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr)) + }) + response = open_url( + url + "/v2/maintenance/schedule", data=values, + headers=auth_headers) + data = json.loads(response.read()) + + if data["status"]["error"] == "yes": + return 1, None, data["status"]["message"] + except Exception as e: + return 1, None, to_native(e) + return 0, None, None + + +def delete_maintenance(auth_headers, url, statuspage, maintenance_id): + try: + values = json.dumps({ + "statuspage_id": statuspage, + "maintenance_id": maintenance_id, + }) + response = open_url( + url=url + "/v2/maintenance/delete", + data=values, + headers=auth_headers) + data = json.loads(response.read()) + if data["status"]["error"] == "yes": + return 1, None, "Invalid maintenance_id" + except Exception as e: + return 1, None, to_native(e) + return 0, None, None + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_id=dict(required=True), + api_key=dict(required=True, no_log=True), + statuspage=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + url=dict(default='https://api.status.io'), + components=dict(type='list', elements='str', aliases=['component']), + containers=dict(type='list', elements='str', aliases=['container']), + all_infrastructure_affected=dict(type='bool', default=False), + automation=dict(type='bool', default=False), + title=dict(default='A new maintenance window'), + desc=dict(default='Created by Ansible'), + minutes=dict(type='int', default=10), + maintenance_notify_now=dict(type='bool', default=False), + maintenance_notify_72_hr=dict(type='bool', default=False), + maintenance_notify_24_hr=dict(type='bool', default=False), + maintenance_notify_1_hr=dict(type='bool', default=False), + maintenance_id=dict(), + start_date=dict(), + start_time=dict() + ), + supports_check_mode=True, + ) + + api_id = module.params['api_id'] + api_key = module.params['api_key'] + statuspage = module.params['statuspage'] + state = module.params['state'] + url = module.params['url'] + components = module.params['components'] + containers = module.params['containers'] + all_infrastructure_affected = module.params['all_infrastructure_affected'] + automation = module.params['automation'] + title = module.params['title'] + desc = module.params['desc'] + minutes = module.params['minutes'] + maintenance_notify_now = module.params['maintenance_notify_now'] + maintenance_notify_72_hr = module.params['maintenance_notify_72_hr'] + maintenance_notify_24_hr = module.params['maintenance_notify_24_hr'] + maintenance_notify_1_hr = module.params['maintenance_notify_1_hr'] + maintenance_id = module.params['maintenance_id'] + start_date = module.params['start_date'] + start_time = module.params['start_time'] + + if state == "present": + + if api_id and api_key: + (rc, auth_headers, auth_content, error) = \ + get_api_auth_headers(api_id, api_key, url, statuspage) + if rc != 0: + module.fail_json(msg="Failed to get auth keys: %s" % error) + else: + auth_headers = {} + auth_content = {} + + if minutes or start_time and start_date: + (rc, returned_date, error) = get_date_time( + start_date, start_time, minutes) + if rc != 0: + module.fail_json(msg="Failed to set date/time: %s" % error) + + if not components and not containers: + return module.fail_json(msg="A Component or Container must be " + "defined") + elif components and containers: + return module.fail_json(msg="Components and containers cannot " + "be used together") + else: + if components: + (rc, host_ids, error) = get_component_ids(auth_content, + components) + if rc != 0: + module.fail_json(msg="Failed to find component %s" % error) + + if containers: + (rc, host_ids, error) = get_container_ids(auth_content, + containers) + if rc != 0: + module.fail_json(msg="Failed to find container %s" % error) + + if module.check_mode: + module.exit_json(changed=True) + else: + (rc, dummy, error) = create_maintenance( + auth_headers, url, statuspage, host_ids, + all_infrastructure_affected, automation, + title, desc, returned_date, maintenance_notify_now, + maintenance_notify_72_hr, maintenance_notify_24_hr, + maintenance_notify_1_hr) + if rc == 0: + module.exit_json(changed=True, result="Successfully created " + "maintenance") + else: + module.fail_json(msg="Failed to create maintenance: %s" + % error) + + if state == "absent": + + if api_id and api_key: + (rc, auth_headers, auth_content, error) = \ + get_api_auth_headers(api_id, api_key, url, statuspage) + if rc != 0: + module.fail_json(msg="Failed to get auth keys: %s" % error) + else: + auth_headers = {} + + if module.check_mode: + module.exit_json(changed=True) + else: + (rc, dummy, error) = delete_maintenance( + auth_headers, url, statuspage, maintenance_id) + if rc == 0: + module.exit_json( + changed=True, + result="Successfully deleted maintenance" + ) + else: + module.fail_json( + msg="Failed to delete maintenance: %s" % error) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/emc/emc_vnx_sg_member.py b/plugins/modules/storage/emc/emc_vnx_sg_member.py deleted file mode 100644 index 20977687fc..0000000000 --- a/plugins/modules/storage/emc/emc_vnx_sg_member.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: emc_vnx_sg_member - -short_description: Manage storage group member on EMC VNX - - -description: - - "This module manages the members of an existing storage group." - -extends_documentation_fragment: -- community.general.emc.emc_vnx - - -options: - name: - description: - - Name of the Storage group to manage. - required: true - type: str - lunid: - description: - - Lun id to be added. - required: true - type: int - state: - description: - - Indicates the desired lunid state. - - C(present) ensures specified lunid is present in the Storage Group. - - C(absent) ensures specified lunid is absent from Storage Group. - default: present - choices: [ "present", "absent"] - type: str - - -author: - - Luca 'remix_tj' Lorenzetto (@remixtj) -''' - -EXAMPLES = ''' -- name: Add lun to storage group - community.general.emc_vnx_sg_member: - name: sg01 - sp_address: sp1a.fqdn - sp_user: sysadmin - sp_password: sysadmin - lunid: 100 - state: present - -- name: Remove lun from storage group - community.general.emc_vnx_sg_member: - name: sg01 - sp_address: sp1a.fqdn - sp_user: sysadmin - sp_password: sysadmin - lunid: 100 - state: absent -''' - -RETURN = ''' -hluid: - description: LUNID that hosts attached to the storage group will see. - type: int - returned: success -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec - -LIB_IMP_ERR = None -try: - from storops import VNXSystem - from storops.exception import VNXCredentialError, VNXStorageGroupError, \ - VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError - HAS_LIB = True -except Exception: - LIB_IMP_ERR = traceback.format_exc() - HAS_LIB = False - - -def run_module(): - module_args = dict( - name=dict(type='str', required=True), - lunid=dict(type='int', required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - - module_args.update(emc_vnx_argument_spec) - - result = dict( - changed=False, - hluid=None - ) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) - - if not HAS_LIB: - module.fail_json(msg=missing_required_lib('storops >= 0.5.10'), - exception=LIB_IMP_ERR) - - sp_user = module.params['sp_user'] - sp_address = module.params['sp_address'] - sp_password = module.params['sp_password'] - alu = module.params['lunid'] - - # if the user is working with this module in only check mode we do not - # want to make any changes to the environment, just return the current - # state with no modifications - if module.check_mode: - return result - - try: - vnx = VNXSystem(sp_address, sp_user, sp_password) - sg = vnx.get_sg(module.params['name']) - if sg.existed: - if module.params['state'] == 'present': - if not sg.has_alu(alu): - try: - result['hluid'] = sg.attach_alu(alu) - result['changed'] = True - except VNXAluAlreadyAttachedError: - result['hluid'] = sg.get_hlu(alu) - except (VNXAttachAluError, VNXStorageGroupError) as e: - module.fail_json(msg='Error attaching {0}: ' - '{1} '.format(alu, to_native(e)), - **result) - else: - result['hluid'] = sg.get_hlu(alu) - if module.params['state'] == 'absent' and sg.has_alu(alu): - try: - sg.detach_alu(alu) - result['changed'] = True - except VNXDetachAluNotFoundError: - # being not attached when using absent is OK - pass - except VNXStorageGroupError as e: - module.fail_json(msg='Error detaching alu {0}: ' - '{1} '.format(alu, to_native(e)), - **result) - else: - module.fail_json(msg='No such storage group named ' - '{0}'.format(module.params['name']), - **result) - except VNXCredentialError as e: - module.fail_json(msg='{0}'.format(to_native(e)), **result) - - module.exit_json(**result) - - -def main(): - run_module() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/hpe3par/ss_3par_cpg.py b/plugins/modules/storage/hpe3par/ss_3par_cpg.py deleted file mode 100644 index be4a6a02a2..0000000000 --- a/plugins/modules/storage/hpe3par/ss_3par_cpg.py +++ /dev/null @@ -1,296 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -short_description: Manage HPE StoreServ 3PAR CPG -author: - - Farhan Nomani (@farhan7500) - - Gautham P Hegde (@gautamphegde) -description: - - Create and delete CPG on HPE 3PAR. -module: ss_3par_cpg -options: - cpg_name: - description: - - Name of the CPG. - type: str - required: true - disk_type: - choices: - - FC - - NL - - SSD - description: - - Specifies that physical disks must have the specified device type. - type: str - domain: - description: - - Specifies the name of the domain in which the object will reside. - type: str - growth_increment: - description: - - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage - created on each auto-grow operation. - type: str - growth_limit: - description: - - Specifies that the autogrow operation is limited to the specified - storage amount that sets the growth limit(in MiB, GiB or TiB). - type: str - growth_warning: - description: - - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded - results in a warning alert. - type: str - high_availability: - choices: - - PORT - - CAGE - - MAG - description: - - Specifies that the layout must support the failure of one port pair, - one cage, or one magazine. - type: str - raid_type: - choices: - - R0 - - R1 - - R5 - - R6 - description: - - Specifies the RAID type for the logical disk. - type: str - set_size: - description: - - Specifies the set size in the number of chunklets. - type: int - state: - choices: - - present - - absent - description: - - Whether the specified CPG should exist or not. - required: true - type: str - secure: - description: - - Specifies whether the certificate needs to be validated while communicating. - type: bool - default: no -extends_documentation_fragment: -- community.general.hpe3par - -''' - - -EXAMPLES = r''' -- name: Create CPG sample_cpg - community.general.ss_3par_cpg: - storage_system_ip: 10.10.10.1 - storage_system_username: username - storage_system_password: password - state: present - cpg_name: sample_cpg - domain: sample_domain - growth_increment: 32000 MiB - growth_limit: 64000 MiB - growth_warning: 48000 MiB - raid_type: R6 - set_size: 8 - high_availability: MAG - disk_type: FC - secure: no - -- name: Delete CPG sample_cpg - community.general.ss_3par_cpg: - storage_system_ip: 10.10.10.1 - storage_system_username: username - storage_system_password: password - state: absent - cpg_name: sample_cpg - secure: no -''' - -RETURN = r''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par -try: - from hpe3par_sdk import client - from hpe3parclient import exceptions - HAS_3PARCLIENT = True -except ImportError: - HAS_3PARCLIENT = False - - -def validate_set_size(raid_type, set_size): - if raid_type: - set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]['set_sizes'] - if set_size in set_size_array: - return True - return False - - -def cpg_ldlayout_map(ldlayout_dict): - if ldlayout_dict['RAIDType'] is not None and ldlayout_dict['RAIDType']: - ldlayout_dict['RAIDType'] = client.HPE3ParClient.RAID_MAP[ - ldlayout_dict['RAIDType']]['raid_value'] - if ldlayout_dict['HA'] is not None and ldlayout_dict['HA']: - ldlayout_dict['HA'] = getattr( - client.HPE3ParClient, ldlayout_dict['HA']) - return ldlayout_dict - - -def create_cpg( - client_obj, - cpg_name, - domain, - growth_increment, - growth_limit, - growth_warning, - raid_type, - set_size, - high_availability, - disk_type): - try: - if not validate_set_size(raid_type, set_size): - return (False, False, "Set size %s not part of RAID set %s" % (set_size, raid_type)) - if not client_obj.cpgExists(cpg_name): - - disk_patterns = [] - if disk_type: - disk_type = getattr(client.HPE3ParClient, disk_type) - disk_patterns = [{'diskType': disk_type}] - ld_layout = { - 'RAIDType': raid_type, - 'setSize': set_size, - 'HA': high_availability, - 'diskPatterns': disk_patterns} - ld_layout = cpg_ldlayout_map(ld_layout) - if growth_increment is not None: - growth_increment = hpe3par.convert_to_binary_multiple( - growth_increment) - if growth_limit is not None: - growth_limit = hpe3par.convert_to_binary_multiple( - growth_limit) - if growth_warning is not None: - growth_warning = hpe3par.convert_to_binary_multiple( - growth_warning) - optional = { - 'domain': domain, - 'growthIncrementMiB': growth_increment, - 'growthLimitMiB': growth_limit, - 'usedLDWarningAlertMiB': growth_warning, - 'LDLayout': ld_layout} - client_obj.createCPG(cpg_name, optional) - else: - return (True, False, "CPG already present") - except exceptions.ClientException as e: - return (False, False, "CPG creation failed | %s" % (e)) - return (True, True, "Created CPG %s successfully." % cpg_name) - - -def delete_cpg( - client_obj, - cpg_name): - try: - if client_obj.cpgExists(cpg_name): - client_obj.deleteCPG(cpg_name) - else: - return (True, False, "CPG does not exist") - except exceptions.ClientException as e: - return (False, False, "CPG delete failed | %s" % e) - return (True, True, "Deleted CPG %s successfully." % cpg_name) - - -def main(): - module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(), - required_together=[['raid_type', 'set_size']]) - if not HAS_3PARCLIENT: - module.fail_json(msg='the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)') - - if len(module.params["cpg_name"]) < 1 or len(module.params["cpg_name"]) > 31: - module.fail_json(msg="CPG name must be at least 1 character and not more than 31 characters") - - storage_system_ip = module.params["storage_system_ip"] - storage_system_username = module.params["storage_system_username"] - storage_system_password = module.params["storage_system_password"] - cpg_name = module.params["cpg_name"] - domain = module.params["domain"] - growth_increment = module.params["growth_increment"] - growth_limit = module.params["growth_limit"] - growth_warning = module.params["growth_warning"] - raid_type = module.params["raid_type"] - set_size = module.params["set_size"] - high_availability = module.params["high_availability"] - disk_type = module.params["disk_type"] - secure = module.params["secure"] - - wsapi_url = 'https://%s:8080/api/v1' % storage_system_ip - try: - client_obj = client.HPE3ParClient(wsapi_url, secure) - except exceptions.SSLCertFailed: - module.fail_json(msg="SSL Certificate Failed") - except exceptions.ConnectionError: - module.fail_json(msg="Connection Error") - except exceptions.UnsupportedVersion: - module.fail_json(msg="Unsupported WSAPI version") - except Exception as e: - module.fail_json(msg="Initializing client failed. %s" % e) - - if storage_system_username is None or storage_system_password is None: - module.fail_json(msg="Storage system username or password is None") - if cpg_name is None: - module.fail_json(msg="CPG Name is None") - - # States - if module.params["state"] == "present": - try: - client_obj.login(storage_system_username, storage_system_password) - return_status, changed, msg = create_cpg( - client_obj, - cpg_name, - domain, - growth_increment, - growth_limit, - growth_warning, - raid_type, - set_size, - high_availability, - disk_type - ) - except Exception as e: - module.fail_json(msg="CPG create failed | %s" % e) - finally: - client_obj.logout() - - elif module.params["state"] == "absent": - try: - client_obj.login(storage_system_username, storage_system_password) - return_status, changed, msg = delete_cpg( - client_obj, - cpg_name - ) - except Exception as e: - module.fail_json(msg="CPG create failed | %s" % e) - finally: - client_obj.logout() - - if return_status: - module.exit_json(changed=changed, msg=msg) - else: - module.fail_json(msg=msg) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/ibm/ibm_sa_domain.py b/plugins/modules/storage/ibm/ibm_sa_domain.py deleted file mode 100644 index 9c5e6c50c8..0000000000 --- a/plugins/modules/storage/ibm/ibm_sa_domain.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, IBM CORPORATION -# Author(s): Tzur Eliyahu -# -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ibm_sa_domain -short_description: Manages domains on IBM Spectrum Accelerate Family storage systems - -description: - - "This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems." - -options: - domain: - description: - - Name of the domain to be managed. - required: true - type: str - state: - description: - - The desired state of the domain. - default: "present" - choices: [ "present", "absent" ] - type: str - ldap_id: - description: - - ldap id to add to the domain. - required: false - type: str - size: - description: - - Size of the domain. - required: false - type: str - hard_capacity: - description: - - Hard capacity of the domain. - required: false - type: str - soft_capacity: - description: - - Soft capacity of the domain. - required: false - type: str - max_cgs: - description: - - Number of max cgs. - required: false - type: str - max_dms: - description: - - Number of max dms. - required: false - type: str - max_mirrors: - description: - - Number of max_mirrors. - required: false - type: str - max_pools: - description: - - Number of max_pools. - required: false - type: str - max_volumes: - description: - - Number of max_volumes. - required: false - type: str - perf_class: - description: - - Add the domain to a performance class. - required: false - type: str - -extends_documentation_fragment: -- community.general.ibm_storage - - -author: - - Tzur Eliyahu (@tzure) -''' - -EXAMPLES = ''' -- name: Define new domain. - community.general.ibm_sa_domain: - domain: domain_name - size: domain_size - state: present - username: admin - password: secret - endpoints: hostdev-system - -- name: Delete domain. - community.general.ibm_sa_domain: - domain: domain_name - state: absent - username: admin - password: secret - endpoints: hostdev-system -''' -RETURN = ''' -msg: - description: module return status. - returned: as needed - type: str - sample: "domain 'domain_name' created successfully." -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed - - -def main(): - argument_spec = spectrum_accelerate_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - domain=dict(required=True), - size=dict(), - max_dms=dict(), - max_cgs=dict(), - ldap_id=dict(), - max_mirrors=dict(), - max_pools=dict(), - max_volumes=dict(), - perf_class=dict(), - hard_capacity=dict(), - soft_capacity=dict() - ) - ) - - module = AnsibleModule(argument_spec) - - is_pyxcli_installed(module) - - xcli_client = connect_ssl(module) - domain = xcli_client.cmd.domain_list( - domain=module.params['domain']).as_single_element - state = module.params['state'] - - state_changed = False - msg = 'Domain \'{0}\''.format(module.params['domain']) - if state == 'present' and not domain: - state_changed = execute_pyxcli_command( - module, 'domain_create', xcli_client) - msg += " created successfully." - elif state == 'absent' and domain: - state_changed = execute_pyxcli_command( - module, 'domain_delete', xcli_client) - msg += " deleted successfully." - else: - msg += " state unchanged." - - module.exit_json(changed=state_changed, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/ibm/ibm_sa_host.py b/plugins/modules/storage/ibm/ibm_sa_host.py deleted file mode 100644 index 27a7287f8a..0000000000 --- a/plugins/modules/storage/ibm/ibm_sa_host.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (C) 2018 IBM CORPORATION -# Author(s): Tzur Eliyahu -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ibm_sa_host -short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems. - -description: - - "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems." - -options: - host: - description: - - Host name. - required: true - type: str - state: - description: - - Host state. - default: "present" - choices: [ "present", "absent" ] - type: str - cluster: - description: - - The name of the cluster to include the host. - required: false - type: str - domain: - description: - - The domains the cluster will be attached to. - To include more than one domain, - separate domain names with commas. - To include all existing domains, use an asterisk ("*"). - required: false - type: str - iscsi_chap_name: - description: - - The host's CHAP name identifier - required: false - type: str - iscsi_chap_secret: - description: - - The password of the initiator used to - authenticate to the system when CHAP is enable - required: false - type: str - -extends_documentation_fragment: -- community.general.ibm_storage - - -author: - - Tzur Eliyahu (@tzure) -''' - -EXAMPLES = ''' -- name: Define new host. - community.general.ibm_sa_host: - host: host_name - state: present - username: admin - password: secret - endpoints: hostdev-system - -- name: Delete host. - community.general.ibm_sa_host: - host: host_name - state: absent - username: admin - password: secret - endpoints: hostdev-system -''' -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed - - -def main(): - argument_spec = spectrum_accelerate_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - host=dict(required=True), - cluster=dict(), - domain=dict(), - iscsi_chap_name=dict(), - iscsi_chap_secret=dict(no_log=True), - ) - ) - - module = AnsibleModule(argument_spec) - - is_pyxcli_installed(module) - - xcli_client = connect_ssl(module) - host = xcli_client.cmd.host_list( - host=module.params['host']).as_single_element - state = module.params['state'] - - state_changed = False - if state == 'present' and not host: - state_changed = execute_pyxcli_command( - module, 'host_define', xcli_client) - elif state == 'absent' and host: - state_changed = execute_pyxcli_command( - module, 'host_delete', xcli_client) - - module.exit_json(changed=state_changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/ibm/ibm_sa_host_ports.py b/plugins/modules/storage/ibm/ibm_sa_host_ports.py deleted file mode 100644 index 32daa9f3c7..0000000000 --- a/plugins/modules/storage/ibm/ibm_sa_host_ports.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (C) 2018 IBM CORPORATION -# Author(s): Tzur Eliyahu -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ibm_sa_host_ports -short_description: Add host ports on IBM Spectrum Accelerate Family storage systems. - -description: - - "This module adds ports to or removes them from the hosts - on IBM Spectrum Accelerate Family storage systems." - -options: - host: - description: - - Host name. - required: true - type: str - state: - description: - - Host ports state. - default: "present" - choices: [ "present", "absent" ] - type: str - iscsi_name: - description: - - iSCSI initiator name. - required: false - type: str - fcaddress: - description: - - Fiber channel address. - required: false - type: str - num_of_visible_targets: - description: - - Number of visible targets. - required: false - type: str - -extends_documentation_fragment: -- community.general.ibm_storage - -author: - - Tzur Eliyahu (@tzure) -''' - -EXAMPLES = ''' -- name: Add ports for host. - community.general.ibm_sa_host_ports: - host: test_host - iscsi_name: iqn.1994-05.com*** - username: admin - password: secret - endpoints: hostdev-system - state: present - -- name: Remove ports for host. - community.general.ibm_sa_host_ports: - host: test_host - iscsi_name: iqn.1994-05.com*** - username: admin - password: secret - endpoints: hostdev-system - state: absent - -''' -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl, - spectrum_accelerate_spec, is_pyxcli_installed) - - -def main(): - argument_spec = spectrum_accelerate_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - host=dict(required=True), - iscsi_name=dict(), - fcaddress=dict(), - num_of_visible_targets=dict() - ) - ) - - module = AnsibleModule(argument_spec) - is_pyxcli_installed(module) - - xcli_client = connect_ssl(module) - # required args - ports = [] - try: - ports = xcli_client.cmd.host_list_ports( - host=module.params.get('host')).as_list - except Exception: - pass - state = module.params['state'] - port_exists = False - ports = [port.get('port_name') for port in ports] - - fc_ports = (module.params.get('fcaddress') - if module.params.get('fcaddress') else []) - iscsi_ports = (module.params.get('iscsi_name') - if module.params.get('iscsi_name') else []) - for port in ports: - if port in iscsi_ports or port in fc_ports: - port_exists = True - break - state_changed = False - if state == 'present' and not port_exists: - state_changed = execute_pyxcli_command( - module, 'host_add_port', xcli_client) - if state == 'absent' and port_exists: - state_changed = execute_pyxcli_command( - module, 'host_remove_port', xcli_client) - - module.exit_json(changed=state_changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/ibm/ibm_sa_pool.py b/plugins/modules/storage/ibm/ibm_sa_pool.py deleted file mode 100644 index 67c963ace1..0000000000 --- a/plugins/modules/storage/ibm/ibm_sa_pool.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (C) 2018 IBM CORPORATION -# Author(s): Tzur Eliyahu -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ibm_sa_pool -short_description: Handles pools on IBM Spectrum Accelerate Family storage systems. - -description: - - "This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems" - -options: - pool: - description: - - Pool name. - required: true - type: str - state: - description: - - Pool state. - default: "present" - choices: [ "present", "absent" ] - type: str - size: - description: - - Pool size in GB - required: false - type: str - snapshot_size: - description: - - Pool snapshot size in GB - required: false - type: str - domain: - description: - - Adds the pool to the specified domain. - required: false - type: str - perf_class: - description: - - Assigns a perf_class to the pool. - required: false - type: str - -extends_documentation_fragment: -- community.general.ibm_storage - - -author: - - Tzur Eliyahu (@tzure) -''' - -EXAMPLES = ''' -- name: Create new pool. - community.general.ibm_sa_pool: - name: pool_name - size: 300 - state: present - username: admin - password: secret - endpoints: hostdev-system - -- name: Delete pool. - community.general.ibm_sa_pool: - name: pool_name - state: absent - username: admin - password: secret - endpoints: hostdev-system -''' -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed - - -def main(): - argument_spec = spectrum_accelerate_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - pool=dict(required=True), - size=dict(), - snapshot_size=dict(), - domain=dict(), - perf_class=dict() - ) - ) - - module = AnsibleModule(argument_spec) - - is_pyxcli_installed(module) - - xcli_client = connect_ssl(module) - pool = xcli_client.cmd.pool_list( - pool=module.params['pool']).as_single_element - state = module.params['state'] - - state_changed = False - if state == 'present' and not pool: - state_changed = execute_pyxcli_command( - module, 'pool_create', xcli_client) - if state == 'absent' and pool: - state_changed = execute_pyxcli_command( - module, 'pool_delete', xcli_client) - - module.exit_json(changed=state_changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/ibm/ibm_sa_vol.py b/plugins/modules/storage/ibm/ibm_sa_vol.py deleted file mode 100644 index 7820d26828..0000000000 --- a/plugins/modules/storage/ibm/ibm_sa_vol.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (C) 2018 IBM CORPORATION -# Author(s): Tzur Eliyahu -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ibm_sa_vol -short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems. - -description: - - "This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems." - -options: - vol: - description: - - Volume name. - required: true - type: str - pool: - description: - - Volume pool. - required: false - type: str - state: - description: - - Volume state. - default: "present" - choices: [ "present", "absent" ] - type: str - size: - description: - - Volume size. - required: false - type: str - -extends_documentation_fragment: -- community.general.ibm_storage - - -author: - - Tzur Eliyahu (@tzure) -''' - -EXAMPLES = ''' -- name: Create a new volume. - community.general.ibm_sa_vol: - vol: volume_name - pool: pool_name - size: 17 - state: present - username: admin - password: secret - endpoints: hostdev-system - -- name: Delete an existing volume. - community.general.ibm_sa_vol: - vol: volume_name - state: absent - username: admin - password: secret - endpoints: hostdev-system -''' -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed - - -def main(): - argument_spec = spectrum_accelerate_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - vol=dict(required=True), - pool=dict(), - size=dict() - ) - ) - - module = AnsibleModule(argument_spec) - - is_pyxcli_installed(module) - - xcli_client = connect_ssl(module) - # required args - volume = xcli_client.cmd.vol_list( - vol=module.params.get('vol')).as_single_element - state = module.params['state'] - - state_changed = False - if state == 'present' and not volume: - state_changed = execute_pyxcli_command( - module, 'vol_create', xcli_client) - elif state == 'absent' and volume: - state_changed = execute_pyxcli_command( - module, 'vol_delete', xcli_client) - - module.exit_json(changed=state_changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/ibm/ibm_sa_vol_map.py b/plugins/modules/storage/ibm/ibm_sa_vol_map.py deleted file mode 100644 index b449ba8de4..0000000000 --- a/plugins/modules/storage/ibm/ibm_sa_vol_map.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (C) 2018 IBM CORPORATION -# Author(s): Tzur Eliyahu -# -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ibm_sa_vol_map -short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems. - -description: - - "This module maps volumes to or unmaps them from the hosts on - IBM Spectrum Accelerate Family storage systems." - -options: - vol: - description: - - Volume name. - required: true - type: str - state: - default: "present" - choices: [ "present", "absent" ] - description: - - When the state is present the volume is mapped. - When the state is absent, the volume is meant to be unmapped. - type: str - - cluster: - description: - - Maps the volume to a cluster. - required: false - type: str - host: - description: - - Maps the volume to a host. - required: false - type: str - lun: - description: - - The LUN identifier. - required: false - type: str - override: - description: - - Overrides the existing volume mapping. - required: false - type: str - -extends_documentation_fragment: -- community.general.ibm_storage - - -author: - - Tzur Eliyahu (@tzure) -''' - -EXAMPLES = ''' -- name: Map volume to host. - community.general.ibm_sa_vol_map: - vol: volume_name - lun: 1 - host: host_name - username: admin - password: secret - endpoints: hostdev-system - state: present - -- name: Map volume to cluster. - community.general.ibm_sa_vol_map: - vol: volume_name - lun: 1 - cluster: cluster_name - username: admin - password: secret - endpoints: hostdev-system - state: present - -- name: Unmap volume. - community.general.ibm_sa_vol_map: - host: host_name - username: admin - password: secret - endpoints: hostdev-system - state: absent -''' -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed) - - -def main(): - argument_spec = spectrum_accelerate_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - vol=dict(required=True), - lun=dict(), - cluster=dict(), - host=dict(), - override=dict() - ) - ) - - module = AnsibleModule(argument_spec) - is_pyxcli_installed(module) - - xcli_client = connect_ssl(module) - # required args - mapping = False - try: - mapped_hosts = xcli_client.cmd.vol_mapping_list( - vol=module.params.get('vol')).as_list - for host in mapped_hosts: - if host['host'] == module.params.get("host", ""): - mapping = True - except Exception: - pass - state = module.params['state'] - - state_changed = False - if state == 'present' and not mapping: - state_changed = execute_pyxcli_command(module, 'map_vol', xcli_client) - if state == 'absent' and mapping: - state_changed = execute_pyxcli_command( - module, 'unmap_vol', xcli_client) - - module.exit_json(changed=state_changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/vexata/vexata_eg.py b/plugins/modules/storage/vexata/vexata_eg.py deleted file mode 100644 index 54bb8c29a7..0000000000 --- a/plugins/modules/storage/vexata/vexata_eg.py +++ /dev/null @@ -1,209 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: vexata_eg -short_description: Manage export groups on Vexata VX100 storage arrays -description: - - Create or delete export groups on a Vexata VX100 array. - - An export group is a tuple of a volume group, initiator group and port - group that allows a set of volumes to be exposed to one or more hosts - through specific array ports. -author: - - Sandeep Kasargod (@vexata) -options: - name: - description: - - Export group name. - required: true - type: str - state: - description: - - Creates export group when present or delete when absent. - default: present - choices: [ present, absent ] - type: str - vg: - description: - - Volume group name. - type: str - ig: - description: - - Initiator group name. - type: str - pg: - description: - - Port group name. - type: str -extends_documentation_fragment: -- community.general.vexata.vx100 - -''' - -EXAMPLES = r''' -- name: Create export group named db_export. - community.general.vexata_eg: - name: db_export - vg: dbvols - ig: dbhosts - pg: pg1 - state: present - array: vx100_ultra.test.com - user: admin - password: secret - -- name: Delete export group named db_export - community.general.vexata_eg: - name: db_export - state: absent - array: vx100_ultra.test.com - user: admin - password: secret -''' - -RETURN = r''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.vexata import ( - argument_spec, get_array, required_together) - - -def get_eg(module, array): - """Retrieve a named vg if it exists, None if absent.""" - name = module.params['name'] - try: - egs = array.list_egs() - eg = filter(lambda eg: eg['name'] == name, egs) - if len(eg) == 1: - return eg[0] - else: - return None - except Exception: - module.fail_json(msg='Error while attempting to retrieve export groups.') - - -def get_vg_id(module, array): - """Retrieve a named vg's id if it exists, error if absent.""" - name = module.params['vg'] - try: - vgs = array.list_vgs() - vg = filter(lambda vg: vg['name'] == name, vgs) - if len(vg) == 1: - return vg[0]['id'] - else: - module.fail_json(msg='Volume group {0} was not found.'.format(name)) - except Exception: - module.fail_json(msg='Error while attempting to retrieve volume groups.') - - -def get_ig_id(module, array): - """Retrieve a named ig's id if it exists, error if absent.""" - name = module.params['ig'] - try: - igs = array.list_igs() - ig = filter(lambda ig: ig['name'] == name, igs) - if len(ig) == 1: - return ig[0]['id'] - else: - module.fail_json(msg='Initiator group {0} was not found.'.format(name)) - except Exception: - module.fail_json(msg='Error while attempting to retrieve initiator groups.') - - -def get_pg_id(module, array): - """Retrieve a named pg's id if it exists, error if absent.""" - name = module.params['pg'] - try: - pgs = array.list_pgs() - pg = filter(lambda pg: pg['name'] == name, pgs) - if len(pg) == 1: - return pg[0]['id'] - else: - module.fail_json(msg='Port group {0} was not found.'.format(name)) - except Exception: - module.fail_json(msg='Error while attempting to retrieve port groups.') - - -def create_eg(module, array): - """"Create a new export group.""" - changed = False - eg_name = module.params['name'] - vg_id = get_vg_id(module, array) - ig_id = get_ig_id(module, array) - pg_id = get_pg_id(module, array) - if module.check_mode: - module.exit_json(changed=changed) - - try: - eg = array.create_eg( - eg_name, - 'Ansible export group', - (vg_id, ig_id, pg_id)) - if eg: - module.log(msg='Created export group {0}'.format(eg_name)) - changed = True - else: - raise Exception - except Exception: - module.fail_json(msg='Export group {0} create failed.'.format(eg_name)) - module.exit_json(changed=changed) - - -def delete_eg(module, array, eg): - changed = False - eg_name = eg['name'] - if module.check_mode: - module.exit_json(changed=changed) - - try: - ok = array.delete_eg( - eg['id']) - if ok: - module.log(msg='Export group {0} deleted.'.format(eg_name)) - changed = True - else: - raise Exception - except Exception: - module.fail_json(msg='Export group {0} delete failed.'.format(eg_name)) - module.exit_json(changed=changed) - - -def main(): - arg_spec = argument_spec() - arg_spec.update( - dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - vg=dict(type='str'), - ig=dict(type='str'), - pg=dict(type='str') - ) - ) - - module = AnsibleModule(arg_spec, - supports_check_mode=True, - required_together=required_together()) - - state = module.params['state'] - array = get_array(module) - eg = get_eg(module, array) - - if state == 'present' and not eg: - create_eg(module, array) - elif state == 'absent' and eg: - delete_eg(module, array, eg) - else: - module.exit_json(changed=False) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/vexata/vexata_volume.py b/plugins/modules/storage/vexata/vexata_volume.py deleted file mode 100644 index 1cf4cd7b5c..0000000000 --- a/plugins/modules/storage/vexata/vexata_volume.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: vexata_volume -short_description: Manage volumes on Vexata VX100 storage arrays -description: - - Create, deletes or extend volumes on a Vexata VX100 array. -author: -- Sandeep Kasargod (@vexata) -options: - name: - description: - - Volume name. - required: true - type: str - state: - description: - - Creates/Modifies volume when present or removes when absent. - default: present - choices: [ present, absent ] - type: str - size: - description: - - Volume size in M, G, T units. M=2^20, G=2^30, T=2^40 bytes. - type: str -extends_documentation_fragment: -- community.general.vexata.vx100 - -''' - -EXAMPLES = r''' -- name: Create new 2 TiB volume named foo - community.general.vexata_volume: - name: foo - size: 2T - state: present - array: vx100_ultra.test.com - user: admin - password: secret - -- name: Expand volume named foo to 4 TiB - community.general.vexata_volume: - name: foo - size: 4T - state: present - array: vx100_ultra.test.com - user: admin - password: secret - -- name: Delete volume named foo - community.general.vexata_volume: - name: foo - state: absent - array: vx100_ultra.test.com - user: admin - password: secret -''' - -RETURN = r''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.vexata import ( - argument_spec, get_array, required_together, size_to_MiB) - - -def get_volume(module, array): - """Retrieve a named volume if it exists, None if absent.""" - name = module.params['name'] - try: - vols = array.list_volumes() - vol = filter(lambda v: v['name'] == name, vols) - if len(vol) == 1: - return vol[0] - else: - return None - except Exception: - module.fail_json(msg='Error while attempting to retrieve volumes.') - - -def validate_size(module, err_msg): - size = module.params.get('size', False) - if not size: - module.fail_json(msg=err_msg) - size = size_to_MiB(size) - if size <= 0: - module.fail_json(msg='Invalid volume size, must be [MGT].') - return size - - -def create_volume(module, array): - """"Create a new volume.""" - changed = False - size = validate_size(module, err_msg='Size is required to create volume.') - if module.check_mode: - module.exit_json(changed=changed) - - try: - vol = array.create_volume( - module.params['name'], - 'Ansible volume', - size) - if vol: - module.log(msg='Created volume {0}'.format(vol['id'])) - changed = True - else: - module.fail_json(msg='Volume create failed.') - except Exception: - pass - module.exit_json(changed=changed) - - -def update_volume(module, array, volume): - """Expand the volume size.""" - changed = False - size = validate_size(module, err_msg='Size is required to update volume') - prev_size = volume['volSize'] - if size <= prev_size: - module.log(msg='Volume expanded size needs to be larger ' - 'than current size.') - if module.check_mode: - module.exit_json(changed=changed) - - try: - vol = array.grow_volume( - volume['name'], - volume['description'], - volume['id'], - size) - if vol: - changed = True - except Exception: - pass - - module.exit_json(changed=changed) - - -def delete_volume(module, array, volume): - changed = False - vol_name = volume['name'] - if module.check_mode: - module.exit_json(changed=changed) - - try: - ok = array.delete_volume( - volume['id']) - if ok: - module.log(msg='Volume {0} deleted.'.format(vol_name)) - changed = True - else: - raise Exception - except Exception: - pass - module.exit_json(changed=changed) - - -def main(): - arg_spec = argument_spec() - arg_spec.update( - dict( - name=dict(type='str', required=True), - state=dict(default='present', choices=['present', 'absent']), - size=dict(type='str') - ) - ) - - module = AnsibleModule(arg_spec, - supports_check_mode=True, - required_together=required_together()) - - state = module.params['state'] - array = get_array(module) - volume = get_volume(module, array) - - if state == 'present': - if not volume: - create_volume(module, array) - else: - update_volume(module, array, volume) - elif state == 'absent' and volume: - delete_volume(module, array, volume) - else: - module.exit_json(changed=False) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/zfs/zfs.py b/plugins/modules/storage/zfs/zfs.py deleted file mode 100644 index a804753a16..0000000000 --- a/plugins/modules/storage/zfs/zfs.py +++ /dev/null @@ -1,297 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2013, Johan Wiren -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: zfs -short_description: Manage zfs -description: - - Manages ZFS file systems, volumes, clones and snapshots -options: - name: - description: - - File system, snapshot or volume name e.g. C(rpool/myfs). - required: true - type: str - state: - description: - - Whether to create (C(present)), or remove (C(absent)) a - file system, snapshot or volume. All parents/children - will be created/destroyed as needed to reach the desired state. - choices: [ absent, present ] - required: true - type: str - origin: - description: - - Snapshot from which to create a clone. - type: str - extra_zfs_properties: - description: - - A dictionary of zfs properties to be set. - - See the zfs(8) man page for more information. - type: dict -notes: - - C(check_mode) is supported, but in certain situations it may report a task - as changed that will not be reported as changed when C(check_mode) is disabled. - For example, this might occur when the zpool C(altroot) option is set or when - a size is written using human-readable notation, such as C(1M) or C(1024K), - instead of as an unqualified byte count, such as C(1048576). -author: -- Johan Wiren (@johanwiren) -''' - -EXAMPLES = ''' -- name: Create a new file system called myfs in pool rpool with the setuid property turned off - community.general.zfs: - name: rpool/myfs - state: present - extra_zfs_properties: - setuid: off - -- name: Create a new volume called myvol in pool rpool. - community.general.zfs: - name: rpool/myvol - state: present - extra_zfs_properties: - volsize: 10M - -- name: Create a snapshot of rpool/myfs file system. - community.general.zfs: - name: rpool/myfs@mysnapshot - state: present - -- name: Create a new file system called myfs2 with snapdir enabled - community.general.zfs: - name: rpool/myfs2 - state: present - extra_zfs_properties: - snapdir: enabled - -- name: Create a new file system by cloning a snapshot - community.general.zfs: - name: rpool/cloned_fs - state: present - origin: rpool/myfs@mysnapshot - -- name: Destroy a filesystem - community.general.zfs: - name: rpool/myfs - state: absent -''' - -import os - -from ansible.module_utils.basic import AnsibleModule - - -class Zfs(object): - - def __init__(self, module, name, properties): - self.module = module - self.name = name - self.properties = properties - self.changed = False - self.zfs_cmd = module.get_bin_path('zfs', True) - self.zpool_cmd = module.get_bin_path('zpool', True) - self.pool = name.split('/')[0].split('@')[0] - self.is_solaris = os.uname()[0] == 'SunOS' - self.is_openzfs = self.check_openzfs() - self.enhanced_sharing = self.check_enhanced_sharing() - - def check_openzfs(self): - cmd = [self.zpool_cmd] - cmd.extend(['get', 'version']) - cmd.append(self.pool) - (rc, out, err) = self.module.run_command(cmd, check_rc=True) - version = out.splitlines()[-1].split()[2] - if version == '-': - return True - if int(version) == 5000: - return True - return False - - def check_enhanced_sharing(self): - if self.is_solaris and not self.is_openzfs: - cmd = [self.zpool_cmd] - cmd.extend(['get', 'version']) - cmd.append(self.pool) - (rc, out, err) = self.module.run_command(cmd, check_rc=True) - version = out.splitlines()[-1].split()[2] - if int(version) >= 34: - return True - return False - - def exists(self): - cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name] - (rc, out, err) = self.module.run_command(' '.join(cmd)) - if rc == 0: - return True - else: - return False - - def create(self): - if self.module.check_mode: - self.changed = True - return - properties = self.properties - origin = self.module.params.get('origin', None) - cmd = [self.zfs_cmd] - - if "@" in self.name: - action = 'snapshot' - elif origin: - action = 'clone' - else: - action = 'create' - - cmd.append(action) - - if action in ['create', 'clone']: - cmd += ['-p'] - - if properties: - for prop, value in properties.items(): - if prop == 'volsize': - cmd += ['-V', value] - elif prop == 'volblocksize': - cmd += ['-b', value] - else: - cmd += ['-o', '%s="%s"' % (prop, value)] - if origin and action == 'clone': - cmd.append(origin) - cmd.append(self.name) - (rc, out, err) = self.module.run_command(' '.join(cmd)) - if rc == 0: - self.changed = True - else: - self.module.fail_json(msg=err) - - def destroy(self): - if self.module.check_mode: - self.changed = True - return - cmd = [self.zfs_cmd, 'destroy', '-R', self.name] - (rc, out, err) = self.module.run_command(' '.join(cmd)) - if rc == 0: - self.changed = True - else: - self.module.fail_json(msg=err) - - def set_property(self, prop, value): - if self.module.check_mode: - self.changed = True - return - cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name] - (rc, out, err) = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg=err) - - def set_properties_if_changed(self): - diff = {'before': {'extra_zfs_properties': {}}, 'after': {'extra_zfs_properties': {}}} - current_properties = self.get_current_properties() - for prop, value in self.properties.items(): - current_value = current_properties.get(prop, None) - if current_value != value: - self.set_property(prop, value) - diff['before']['extra_zfs_properties'][prop] = current_value - diff['after']['extra_zfs_properties'][prop] = value - if self.module.check_mode: - return diff - updated_properties = self.get_current_properties() - for prop in self.properties: - value = updated_properties.get(prop, None) - if value is None: - self.module.fail_json(msg="zfsprop was not present after being successfully set: %s" % prop) - if current_properties.get(prop, None) != value: - self.changed = True - if prop in diff['after']['extra_zfs_properties']: - diff['after']['extra_zfs_properties'][prop] = value - return diff - - def get_current_properties(self): - cmd = [self.zfs_cmd, 'get', '-H', '-p', '-o', "property,value,source"] - if self.enhanced_sharing: - cmd += ['-e'] - cmd += ['all', self.name] - rc, out, err = self.module.run_command(" ".join(cmd)) - properties = dict() - for line in out.splitlines(): - prop, value, source = line.split('\t') - # include source '-' so that creation-only properties are not removed - # to avoids errors when the dataset already exists and the property is not changed - # this scenario is most likely when the same playbook is run more than once - if source == 'local' or source == 'received' or source == '-': - properties[prop] = value - # Add alias for enhanced sharing properties - if self.enhanced_sharing: - properties['sharenfs'] = properties.get('share.nfs', None) - properties['sharesmb'] = properties.get('share.smb', None) - return properties - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', required=True, choices=['absent', 'present']), - origin=dict(type='str', default=None), - extra_zfs_properties=dict(type='dict', default={}), - ), - supports_check_mode=True, - ) - - state = module.params.get('state') - name = module.params.get('name') - - if module.params.get('origin') and '@' in name: - module.fail_json(msg='cannot specify origin when operating on a snapshot') - - # Reverse the boolification of zfs properties - for prop, value in module.params['extra_zfs_properties'].items(): - if isinstance(value, bool): - if value is True: - module.params['extra_zfs_properties'][prop] = 'on' - else: - module.params['extra_zfs_properties'][prop] = 'off' - else: - module.params['extra_zfs_properties'][prop] = value - - result = dict( - name=name, - state=state, - ) - - zfs = Zfs(module, name, module.params['extra_zfs_properties']) - - if state == 'present': - if zfs.exists(): - result['diff'] = zfs.set_properties_if_changed() - else: - zfs.create() - result['diff'] = {'before': {'state': 'absent'}, 'after': {'state': state}} - - elif state == 'absent': - if zfs.exists(): - zfs.destroy() - result['diff'] = {'before': {'state': 'present'}, 'after': {'state': state}} - else: - result['diff'] = {} - - result['diff']['before_header'] = name - result['diff']['after_header'] = name - - result.update(zfs.properties) - result['changed'] = zfs.changed - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/zfs/zfs_delegate_admin.py b/plugins/modules/storage/zfs/zfs_delegate_admin.py deleted file mode 100644 index ead4041150..0000000000 --- a/plugins/modules/storage/zfs/zfs_delegate_admin.py +++ /dev/null @@ -1,265 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Nate Coraor -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: zfs_delegate_admin -short_description: Manage ZFS delegated administration (user admin privileges) -description: - - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS - operations normally restricted to the superuser. - - See the C(zfs allow) section of C(zfs(1M)) for detailed explanations of options. - - This module attempts to adhere to the behavior of the command line tool as much as possible. -requirements: - - "A ZFS/OpenZFS implementation that supports delegation with `zfs allow`, including: Solaris >= 10, illumos (all - versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0." -options: - name: - description: - - File system or volume name e.g. C(rpool/myfs). - required: true - type: str - state: - description: - - Whether to allow (C(present)), or unallow (C(absent)) a permission. - - When set to C(present), at least one "entity" param of I(users), I(groups), or I(everyone) are required. - - When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified. - choices: [ absent, present ] - default: present - type: str - users: - description: - - List of users to whom permission(s) should be granted. - type: list - elements: str - groups: - description: - - List of groups to whom permission(s) should be granted. - type: list - elements: str - everyone: - description: - - Apply permissions to everyone. - type: bool - default: no - permissions: - description: - - The list of permission(s) to delegate (required if C(state) is C(present)). - - Supported permissions depend on the ZFS version in use. See for example - U(https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html) for OpenZFS. - type: list - elements: str - local: - description: - - Apply permissions to C(name) locally (C(zfs allow -l)). - type: bool - descendents: - description: - - Apply permissions to C(name)'s descendents (C(zfs allow -d)). - type: bool - recursive: - description: - - Unallow permissions recursively (ignored when C(state) is C(present)). - type: bool - default: no -author: -- Nate Coraor (@natefoo) -''' - -EXAMPLES = r''' -- name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope - community.general.zfs_delegate_admin: - name: rpool/myfs - users: adm - permissions: allow,unallow - -- name: Grant `zfs send` to everyone, plus the group `backup` - community.general.zfs_delegate_admin: - name: rpool/myvol - groups: backup - everyone: yes - permissions: send - -- name: Grant `zfs send,receive` to users `foo` and `bar` with local scope only - community.general.zfs_delegate_admin: - name: rpool/myfs - users: foo,bar - permissions: send,receive - local: yes - -- name: Revoke all permissions from everyone (permissions specifically assigned to users and groups remain) - community.general.zfs_delegate_admin: - name: rpool/myfs - everyone: yes - state: absent -''' - -# This module does not return anything other than the standard -# changed/state/msg/stdout -RETURN = ''' -''' - -from itertools import product - -from ansible.module_utils.basic import AnsibleModule - - -class ZfsDelegateAdmin(object): - def __init__(self, module): - self.module = module - self.name = module.params.get('name') - self.state = module.params.get('state') - self.users = module.params.get('users') - self.groups = module.params.get('groups') - self.everyone = module.params.get('everyone') - self.perms = module.params.get('permissions') - self.scope = None - self.changed = False - self.initial_perms = None - self.subcommand = 'allow' - self.recursive_opt = [] - self.run_method = self.update - - self.setup(module) - - def setup(self, module): - """ Validate params and set up for run. - """ - if self.state == 'absent': - self.subcommand = 'unallow' - if module.params.get('recursive'): - self.recursive_opt = ['-r'] - - local = module.params.get('local') - descendents = module.params.get('descendents') - if (local and descendents) or (not local and not descendents): - self.scope = 'ld' - elif local: - self.scope = 'l' - elif descendents: - self.scope = 'd' - else: - self.module.fail_json(msg='Impossible value for local and descendents') - - if not (self.users or self.groups or self.everyone): - if self.state == 'present': - self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set') - elif self.state == 'absent': - self.run_method = self.clear - # ansible ensures the else cannot happen here - - self.zfs_path = module.get_bin_path('zfs', True) - - @property - def current_perms(self): - """ Parse the output of `zfs allow ` to retrieve current permissions. - """ - out = self.run_zfs_raw(subcommand='allow') - perms = { - 'l': {'u': {}, 'g': {}, 'e': []}, - 'd': {'u': {}, 'g': {}, 'e': []}, - 'ld': {'u': {}, 'g': {}, 'e': []}, - } - linemap = { - 'Local permissions:': 'l', - 'Descendent permissions:': 'd', - 'Local+Descendent permissions:': 'ld', - } - scope = None - for line in out.splitlines(): - scope = linemap.get(line, scope) - if not scope: - continue - try: - if line.startswith('\tuser ') or line.startswith('\tgroup '): - ent_type, ent, cur_perms = line.split() - perms[scope][ent_type[0]][ent] = cur_perms.split(',') - elif line.startswith('\teveryone '): - perms[scope]['e'] = line.split()[1].split(',') - except ValueError: - self.module.fail_json(msg="Cannot parse user/group permission output by `zfs allow`: '%s'" % line) - return perms - - def run_zfs_raw(self, subcommand=None, args=None): - """ Run a raw zfs command, fail on error. - """ - cmd = [self.zfs_path, subcommand or self.subcommand] + (args or []) + [self.name] - rc, out, err = self.module.run_command(cmd) - if rc: - self.module.fail_json(msg='Command `%s` failed: %s' % (' '.join(cmd), err)) - return out - - def run_zfs(self, args): - """ Run zfs allow/unallow with appropriate options as per module arguments. - """ - args = self.recursive_opt + ['-' + self.scope] + args - if self.perms: - args.append(','.join(self.perms)) - return self.run_zfs_raw(args=args) - - def clear(self): - """ Called by run() to clear all permissions. - """ - changed = False - stdout = '' - for scope, ent_type in product(('ld', 'l', 'd'), ('u', 'g')): - for ent in self.initial_perms[scope][ent_type].keys(): - stdout += self.run_zfs(['-%s' % ent_type, ent]) - changed = True - for scope in ('ld', 'l', 'd'): - if self.initial_perms[scope]['e']: - stdout += self.run_zfs(['-e']) - changed = True - return (changed, stdout) - - def update(self): - """ Update permissions as per module arguments. - """ - stdout = '' - for ent_type, entities in (('u', self.users), ('g', self.groups)): - if entities: - stdout += self.run_zfs(['-%s' % ent_type, ','.join(entities)]) - if self.everyone: - stdout += self.run_zfs(['-e']) - return (self.initial_perms != self.current_perms, stdout) - - def run(self): - """ Run an operation, return results for Ansible. - """ - exit_args = {'state': self.state} - self.initial_perms = self.current_perms - exit_args['changed'], stdout = self.run_method() - if exit_args['changed']: - exit_args['msg'] = 'ZFS delegated admin permissions updated' - exit_args['stdout'] = stdout - self.module.exit_json(**exit_args) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - users=dict(type='list', elements='str'), - groups=dict(type='list', elements='str'), - everyone=dict(type='bool', default=False), - permissions=dict(type='list', elements='str'), - local=dict(type='bool'), - descendents=dict(type='bool'), - recursive=dict(type='bool', default=False), - ), - supports_check_mode=False, - required_if=[('state', 'present', ['permissions'])], - ) - zfs_delegate_admin = ZfsDelegateAdmin(module) - zfs_delegate_admin.run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/zfs/zfs_facts.py b/plugins/modules/storage/zfs/zfs_facts.py deleted file mode 100644 index cb106de111..0000000000 --- a/plugins/modules/storage/zfs/zfs_facts.py +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Adam Števko -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: zfs_facts -short_description: Gather facts about ZFS datasets. -description: - - Gather facts from ZFS dataset properties. -author: Adam Števko (@xen0l) -options: - name: - description: - - ZFS dataset name. - required: yes - aliases: [ "ds", "dataset" ] - type: str - recurse: - description: - - Specifies if properties for any children should be recursively - displayed. - type: bool - default: 'no' - parsable: - description: - - Specifies if property values should be displayed in machine - friendly format. - type: bool - default: 'no' - properties: - description: - - Specifies which dataset properties should be queried in comma-separated format. - For more information about dataset properties, check zfs(1M) man page. - default: all - type: str - type: - description: - - Specifies which datasets types to display. Multiple values have to be - provided in comma-separated form. - choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ] - default: all - type: str - depth: - description: - - Specifies recursion depth. - type: int -''' - -EXAMPLES = ''' -- name: Gather facts about ZFS dataset rpool/export/home - community.general.zfs_facts: - dataset: rpool/export/home - -- name: Report space usage on ZFS filesystems under data/home - community.general.zfs_facts: - name: data/home - recurse: yes - type: filesystem - -- ansible.builtin.debug: - msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.' - with_items: '{{ ansible_zfs_datasets }}' -''' - -RETURN = ''' -name: - description: ZFS dataset name - returned: always - type: str - sample: rpool/var/spool -parsable: - description: if parsable output should be provided in machine friendly format. - returned: if 'parsable' is set to True - type: bool - sample: True -recurse: - description: if we should recurse over ZFS dataset - returned: if 'recurse' is set to True - type: bool - sample: True -zfs_datasets: - description: ZFS dataset facts - returned: always - type: str - sample: - { - "aclinherit": "restricted", - "aclmode": "discard", - "atime": "on", - "available": "43.8G", - "canmount": "on", - "casesensitivity": "sensitive", - "checksum": "on", - "compression": "off", - "compressratio": "1.00x", - "copies": "1", - "creation": "Thu Jun 16 11:37 2016", - "dedup": "off", - "devices": "on", - "exec": "on", - "filesystem_count": "none", - "filesystem_limit": "none", - "logbias": "latency", - "logicalreferenced": "18.5K", - "logicalused": "3.45G", - "mlslabel": "none", - "mounted": "yes", - "mountpoint": "/rpool", - "name": "rpool", - "nbmand": "off", - "normalization": "none", - "org.openindiana.caiman:install": "ready", - "primarycache": "all", - "quota": "none", - "readonly": "off", - "recordsize": "128K", - "redundant_metadata": "all", - "refcompressratio": "1.00x", - "referenced": "29.5K", - "refquota": "none", - "refreservation": "none", - "reservation": "none", - "secondarycache": "all", - "setuid": "on", - "sharenfs": "off", - "sharesmb": "off", - "snapdir": "hidden", - "snapshot_count": "none", - "snapshot_limit": "none", - "sync": "standard", - "type": "filesystem", - "used": "4.41G", - "usedbychildren": "4.41G", - "usedbydataset": "29.5K", - "usedbyrefreservation": "0", - "usedbysnapshots": "0", - "utf8only": "off", - "version": "5", - "vscan": "off", - "written": "29.5K", - "xattr": "on", - "zoned": "off" - } -''' - -from collections import defaultdict - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import iteritems - - -SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark'] - - -class ZFSFacts(object): - def __init__(self, module): - - self.module = module - - self.name = module.params['name'] - self.recurse = module.params['recurse'] - self.parsable = module.params['parsable'] - self.properties = module.params['properties'] - self.type = module.params['type'] - self.depth = module.params['depth'] - - self._datasets = defaultdict(dict) - self.facts = [] - - def dataset_exists(self): - cmd = [self.module.get_bin_path('zfs'), 'list', self.name] - - (rc, out, err) = self.module.run_command(cmd) - - if rc == 0: - return True - else: - return False - - def get_facts(self): - cmd = [self.module.get_bin_path('zfs'), 'get', '-H'] - if self.parsable: - cmd.append('-p') - if self.recurse: - cmd.append('-r') - if int(self.depth) != 0: - cmd.append('-d') - cmd.append('%s' % self.depth) - if self.type: - cmd.append('-t') - cmd.append(self.type) - cmd.extend(['-o', 'name,property,value', self.properties, self.name]) - - (rc, out, err) = self.module.run_command(cmd) - - if rc == 0: - for line in out.splitlines(): - dataset, property, value = line.split('\t') - - self._datasets[dataset].update({property: value}) - - for k, v in iteritems(self._datasets): - v.update({'name': k}) - self.facts.append(v) - - return {'ansible_zfs_datasets': self.facts} - else: - self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name, - stderr=err, - rc=rc) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True, aliases=['ds', 'dataset'], type='str'), - recurse=dict(required=False, default=False, type='bool'), - parsable=dict(required=False, default=False, type='bool'), - properties=dict(required=False, default='all', type='str'), - type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES), - depth=dict(required=False, default=0, type='int') - ), - supports_check_mode=True - ) - - zfs_facts = ZFSFacts(module) - - result = {} - result['changed'] = False - result['name'] = zfs_facts.name - - if zfs_facts.parsable: - result['parsable'] = zfs_facts.parsable - - if zfs_facts.recurse: - result['recurse'] = zfs_facts.recurse - - if zfs_facts.dataset_exists(): - result['ansible_facts'] = zfs_facts.get_facts() - else: - module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/zfs/zpool_facts.py b/plugins/modules/storage/zfs/zpool_facts.py deleted file mode 100644 index b7a66255c6..0000000000 --- a/plugins/modules/storage/zfs/zpool_facts.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Adam Števko -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: zpool_facts -short_description: Gather facts about ZFS pools. -description: - - Gather facts from ZFS pool properties. -author: Adam Števko (@xen0l) -options: - name: - description: - - ZFS pool name. - type: str - aliases: [ "pool", "zpool" ] - required: false - parsable: - description: - - Specifies if property values should be displayed in machine - friendly format. - type: bool - default: False - required: false - properties: - description: - - Specifies which dataset properties should be queried in comma-separated format. - For more information about dataset properties, check zpool(1M) man page. - type: str - default: all - required: false -''' - -EXAMPLES = ''' -- name: Gather facts about ZFS pool rpool - community.general.zpool_facts: pool=rpool - -- name: Gather space usage about all imported ZFS pools - community.general.zpool_facts: properties='free,size' - -- name: Print gathered information - ansible.builtin.debug: - msg: 'ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.' - with_items: '{{ ansible_zfs_pools }}' -''' - -RETURN = ''' -ansible_facts: - description: Dictionary containing all the detailed information about the ZFS pool facts - returned: always - type: complex - contains: - ansible_zfs_pools: - description: ZFS pool facts - returned: always - type: str - sample: - { - "allocated": "3.46G", - "altroot": "-", - "autoexpand": "off", - "autoreplace": "off", - "bootfs": "rpool/ROOT/openindiana", - "cachefile": "-", - "capacity": "6%", - "comment": "-", - "dedupditto": "0", - "dedupratio": "1.00x", - "delegation": "on", - "expandsize": "-", - "failmode": "wait", - "feature@async_destroy": "enabled", - "feature@bookmarks": "enabled", - "feature@edonr": "enabled", - "feature@embedded_data": "active", - "feature@empty_bpobj": "active", - "feature@enabled_txg": "active", - "feature@extensible_dataset": "enabled", - "feature@filesystem_limits": "enabled", - "feature@hole_birth": "active", - "feature@large_blocks": "enabled", - "feature@lz4_compress": "active", - "feature@multi_vdev_crash_dump": "enabled", - "feature@sha512": "enabled", - "feature@skein": "enabled", - "feature@spacemap_histogram": "active", - "fragmentation": "3%", - "free": "46.3G", - "freeing": "0", - "guid": "15729052870819522408", - "health": "ONLINE", - "leaked": "0", - "listsnapshots": "off", - "name": "rpool", - "readonly": "off", - "size": "49.8G", - "version": "-" - } -name: - description: ZFS pool name - returned: always - type: str - sample: rpool -parsable: - description: if parsable output should be provided in machine friendly format. - returned: if 'parsable' is set to True - type: bool - sample: True -''' - -from collections import defaultdict - -from ansible.module_utils.six import iteritems -from ansible.module_utils.basic import AnsibleModule - - -class ZPoolFacts(object): - def __init__(self, module): - - self.module = module - self.name = module.params['name'] - self.parsable = module.params['parsable'] - self.properties = module.params['properties'] - self._pools = defaultdict(dict) - self.facts = [] - - def pool_exists(self): - cmd = [self.module.get_bin_path('zpool'), 'list', self.name] - rc, dummy, dummy = self.module.run_command(cmd) - return rc == 0 - - def get_facts(self): - cmd = [self.module.get_bin_path('zpool'), 'get', '-H'] - if self.parsable: - cmd.append('-p') - cmd.append('-o') - cmd.append('name,property,value') - cmd.append(self.properties) - if self.name: - cmd.append(self.name) - - rc, out, err = self.module.run_command(cmd, check_rc=True) - - for line in out.splitlines(): - pool, prop, value = line.split('\t') - - self._pools[pool].update({prop: value}) - - for k, v in iteritems(self._pools): - v.update({'name': k}) - self.facts.append(v) - - return {'ansible_zfs_pools': self.facts} - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(aliases=['pool', 'zpool'], type='str'), - parsable=dict(default=False, type='bool'), - properties=dict(default='all', type='str'), - ), - supports_check_mode=True - ) - - zpool_facts = ZPoolFacts(module) - - result = { - 'changed': False, - 'name': zpool_facts.name, - } - if zpool_facts.parsable: - result['parsable'] = zpool_facts.parsable - - if zpool_facts.name is not None: - if zpool_facts.pool_exists(): - result['ansible_facts'] = zpool_facts.get_facts() - else: - module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name) - else: - result['ansible_facts'] = zpool_facts.get_facts() - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/sudoers.py b/plugins/modules/sudoers.py new file mode 100644 index 0000000000..0a40e5155e --- /dev/null +++ b/plugins/modules/sudoers.py @@ -0,0 +1,328 @@ +#!/usr/bin/python + + +# Copyright (c) 2019, Jon Ellis (@JonEllis) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: sudoers +short_description: Manage sudoers files +version_added: "4.3.0" +description: + - This module allows for the manipulation of sudoers files. +author: + - "Jon Ellis (@JonEllis) " +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + commands: + description: + - The commands allowed by the sudoers rule. + - Multiple can be added by passing a list of commands. + - Use V(ALL) for all commands. + type: list + elements: str + group: + description: + - The name of the group for the sudoers rule. + - This option cannot be used in conjunction with O(user). + type: str + name: + required: true + description: + - The name of the sudoers rule. + - This is used for the filename for the sudoers file managed by this rule. + type: str + noexec: + description: + - Whether a command is prevented to run further commands itself. + default: false + type: bool + version_added: 8.4.0 + nopassword: + description: + - Whether a password is not required when command is run with sudo. + default: true + type: bool + setenv: + description: + - Whether to allow keeping the environment when command is run with sudo. + default: false + type: bool + version_added: 6.3.0 + host: + description: + - Specify the host the rule is for. + default: ALL + type: str + version_added: 6.2.0 + runas: + description: + - Specify the target user the command(s) runs as. + type: str + version_added: 4.7.0 + sudoers_path: + description: + - The path which sudoers config files are managed in. + default: /etc/sudoers.d + type: str + state: + default: "present" + choices: + - present + - absent + description: + - Whether the rule should exist or not. + type: str + user: + description: + - The name of the user for the sudoers rule. + - This option cannot be used in conjunction with O(group). + type: str + validation: + description: + - If V(absent), the sudoers rule is added without validation. + - If V(detect) and C(visudo) is available, then the sudoers rule is validated by C(visudo). + - If V(required), C(visudo) must be available to validate the sudoers rule. + type: str + default: detect + choices: [absent, detect, required] + version_added: 5.2.0 +""" + +EXAMPLES = r""" +- name: Allow the backup user to sudo /usr/local/bin/backup + community.general.sudoers: + name: allow-backup + state: present + user: backup + commands: /usr/local/bin/backup + +- name: Allow the bob user to run any commands as alice with sudo -u alice + community.general.sudoers: + name: bob-do-as-alice + state: present + user: bob + runas: alice + commands: ALL + +- name: >- + Allow the monitoring group to run sudo /usr/local/bin/gather-app-metrics + without requiring a password on the host called webserver + community.general.sudoers: + name: monitor-app + group: monitoring + host: webserver + commands: /usr/local/bin/gather-app-metrics + +- name: >- + Allow the alice user to run sudo /bin/systemctl restart my-service or + sudo /bin/systemctl reload my-service, but a password is required + community.general.sudoers: + name: alice-service + user: alice + commands: + - /bin/systemctl restart my-service + - /bin/systemctl reload my-service + nopassword: false + +- name: Revoke the previous sudo grants given to the alice user + community.general.sudoers: + name: alice-service + state: absent + +- name: Allow alice to sudo /usr/local/bin/upload and keep env variables + community.general.sudoers: + name: allow-alice-upload + user: alice + commands: /usr/local/bin/upload + setenv: true + +- name: >- + Allow alice to sudo /usr/bin/less but prevent less from + running further commands itself + community.general.sudoers: + name: allow-alice-restricted-less + user: alice + commands: /usr/bin/less + noexec: true +""" + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +class Sudoers(object): + + FILE_MODE = 0o440 + + def __init__(self, module): + self.module = module + + self.check_mode = module.check_mode + self.name = module.params['name'] + self.user = module.params['user'] + self.group = module.params['group'] + self.state = module.params['state'] + self.noexec = module.params['noexec'] + self.nopassword = module.params['nopassword'] + self.setenv = module.params['setenv'] + self.host = module.params['host'] + self.runas = module.params['runas'] + self.sudoers_path = module.params['sudoers_path'] + self.file = os.path.join(self.sudoers_path, self.name) + self.commands = module.params['commands'] + self.validation = module.params['validation'] + + def write(self): + if self.check_mode: + return + + with open(self.file, 'w') as f: + f.write(self.content()) + + os.chmod(self.file, self.FILE_MODE) + + def delete(self): + if self.check_mode: + return + + os.remove(self.file) + + def exists(self): + return os.path.exists(self.file) + + def matches(self): + with open(self.file, 'r') as f: + content_matches = f.read() == self.content() + + current_mode = os.stat(self.file).st_mode & 0o777 + mode_matches = current_mode == self.FILE_MODE + + return content_matches and mode_matches + + def content(self): + if self.user: + owner = self.user + elif self.group: + owner = '%{group}'.format(group=self.group) + + commands_str = ', '.join(self.commands) + noexec_str = 'NOEXEC:' if self.noexec else '' + nopasswd_str = 'NOPASSWD:' if self.nopassword else '' + setenv_str = 'SETENV:' if self.setenv else '' + runas_str = '({runas})'.format(runas=self.runas) if self.runas is not None else '' + return "{owner} {host}={runas}{noexec}{nopasswd}{setenv} {commands}\n".format( + owner=owner, + host=self.host, + runas=runas_str, + noexec=noexec_str, + nopasswd=nopasswd_str, + setenv=setenv_str, + commands=commands_str + ) + + def validate(self): + if self.validation == 'absent': + return + + visudo_path = self.module.get_bin_path('visudo', required=self.validation == 'required') + if visudo_path is None: + return + + check_command = [visudo_path, '-c', '-f', '-'] + rc, stdout, stderr = self.module.run_command(check_command, data=self.content()) + + if rc != 0: + self.module.fail_json(msg='Failed to validate sudoers rule:\n{stdout}'.format(stdout=stdout or stderr), stdout=stdout, stderr=stderr) + + def run(self): + if self.state == 'absent': + if self.exists(): + self.delete() + return True + else: + return False + + self.validate() + + if self.exists() and self.matches(): + return False + + self.write() + return True + + +def main(): + argument_spec = { + 'commands': { + 'type': 'list', + 'elements': 'str', + }, + 'group': {}, + 'name': { + 'required': True, + }, + 'noexec': { + 'type': 'bool', + 'default': False, + }, + 'nopassword': { + 'type': 'bool', + 'default': True, + }, + 'setenv': { + 'type': 'bool', + 'default': False, + }, + 'host': { + 'type': 'str', + 'default': 'ALL', + }, + 'runas': { + 'type': 'str', + 'default': None, + }, + 'sudoers_path': { + 'type': 'str', + 'default': '/etc/sudoers.d', + }, + 'state': { + 'default': 'present', + 'choices': ['present', 'absent'], + }, + 'user': {}, + 'validation': { + 'default': 'detect', + 'choices': ['absent', 'detect', 'required'] + }, + } + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[['user', 'group']], + supports_check_mode=True, + required_if=[('state', 'present', ['commands'])], + ) + + sudoers = Sudoers(module) + + try: + changed = sudoers.run() + module.exit_json(changed=changed) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/supervisorctl.py b/plugins/modules/supervisorctl.py deleted file mode 120000 index fc2b29b664..0000000000 --- a/plugins/modules/supervisorctl.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/supervisorctl.py \ No newline at end of file diff --git a/plugins/modules/supervisorctl.py b/plugins/modules/supervisorctl.py new file mode 100644 index 0000000000..e1aac7e37a --- /dev/null +++ b/plugins/modules/supervisorctl.py @@ -0,0 +1,285 @@ +#!/usr/bin/python + +# Copyright (c) 2012, Matt Wright +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: supervisorctl +short_description: Manage the state of a program or group of programs managed by C(supervisord) +description: + - Manage the state of a program or group of programs managed by C(supervisord). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - The name of the supervisord program or group to manage. + - The name is taken as group name when it ends with a colon V(:). + - If O(name=all), all programs and program groups are managed. + required: true + config: + type: path + description: + - The supervisor configuration file path. + server_url: + type: str + description: + - URL on which supervisord server is listening. + username: + type: str + description: + - Username to use for authentication. + password: + type: str + description: + - Password to use for authentication. + state: + type: str + description: + - The desired state of program/group. + required: true + choices: ["present", "started", "stopped", "restarted", "absent", "signalled"] + stop_before_removing: + type: bool + description: + - Use O(stop_before_removing=true) to stop the program/group before removing it. + required: false + default: false + version_added: 7.5.0 + signal: + type: str + description: + - The signal to send to the program/group, when combined with the 'signalled' state. Required when l(state=signalled). + supervisorctl_path: + type: path + description: + - Path to C(supervisorctl) executable. +notes: + - When O(state=present), the module calls C(supervisorctl reread) then C(supervisorctl add) if the program/group does not + exist. + - When O(state=restarted), the module calls C(supervisorctl update) then calls C(supervisorctl restart). + - When O(state=absent), the module calls C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group. + If the program/group is still running, the action fails. If you want to stop the program/group before removing, use O(stop_before_removing=true). +requirements: ["supervisorctl"] +author: + - "Matt Wright (@mattupstate)" + - "Aaron Wang (@inetfuture) " +""" + +EXAMPLES = r""" +- name: Manage the state of program to be in started state + community.general.supervisorctl: + name: my_app + state: started + +- name: Manage the state of program group to be in started state + community.general.supervisorctl: + name: 'my_apps:' + state: started + +- name: Restart my_app, reading supervisorctl configuration from a specified file + community.general.supervisorctl: + name: my_app + state: restarted + config: /var/opt/my_project/supervisord.conf + +- name: Restart my_app, connecting to supervisord with credentials and server URL + community.general.supervisorctl: + name: my_app + state: restarted + username: test + password: testpass + server_url: http://localhost:9001 + +- name: Send a signal to my_app via supervisorctl + community.general.supervisorctl: + name: my_app + state: signalled + signal: USR1 + +- name: Restart all programs and program groups + community.general.supervisorctl: + name: all + state: restarted +""" + +import os +from ansible.module_utils.basic import AnsibleModule, is_executable + + +def main(): + arg_spec = dict( + name=dict(type='str', required=True), + config=dict(type='path'), + server_url=dict(type='str'), + username=dict(type='str'), + password=dict(type='str', no_log=True), + supervisorctl_path=dict(type='path'), + state=dict(type='str', required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']), + stop_before_removing=dict(type='bool', default=False), + signal=dict(type='str'), + ) + + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True, + required_if=[('state', 'signalled', ['signal'])], + ) + + name = module.params['name'] + is_group = False + if name.endswith(':'): + is_group = True + name = name.rstrip(':') + state = module.params['state'] + stop_before_removing = module.params.get('stop_before_removing') + config = module.params.get('config') + server_url = module.params.get('server_url') + username = module.params.get('username') + password = module.params.get('password') + supervisorctl_path = module.params.get('supervisorctl_path') + signal = module.params.get('signal') + + # we check error message for a pattern, so we need to make sure that's in C locale + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + if supervisorctl_path: + if os.path.exists(supervisorctl_path) and is_executable(supervisorctl_path): + supervisorctl_args = [supervisorctl_path] + else: + module.fail_json( + msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path) + else: + supervisorctl_args = [module.get_bin_path('supervisorctl', True)] + + if config: + supervisorctl_args.extend(['-c', config]) + if server_url: + supervisorctl_args.extend(['-s', server_url]) + if username: + supervisorctl_args.extend(['-u', username]) + if password: + supervisorctl_args.extend(['-p', password]) + + def run_supervisorctl(cmd, name=None, **kwargs): + args = list(supervisorctl_args) # copy the master args + args.append(cmd) + if name: + args.append(name) + return module.run_command(args, **kwargs) + + def get_matched_processes(): + matched = [] + rc, out, err = run_supervisorctl('status') + for line in out.splitlines(): + # One status line may look like one of these two: + # process not in group: + # echo_date_lonely RUNNING pid 7680, uptime 13:22:18 + # process in group: + # echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18 + fields = [field for field in line.split(' ') if field != ''] + process_name = fields[0] + status = fields[1] + + if is_group: + # If there is ':', this process must be in a group. + if ':' in process_name: + group = process_name.split(':')[0] + if group != name: + continue + else: + continue + else: + if process_name != name and name != "all": + continue + + matched.append((process_name, status)) + return matched + + def take_action_on_processes(processes, status_filter, action, expected_result, exit_module=True): + to_take_action_on = [] + for process_name, status in processes: + if status_filter(status): + to_take_action_on.append(process_name) + + if len(to_take_action_on) == 0: + if not exit_module: + return + module.exit_json(changed=False, name=name, state=state) + if module.check_mode: + if not exit_module: + return + module.exit_json(changed=True) + for process_name in to_take_action_on: + rc, out, err = run_supervisorctl(action, process_name, check_rc=True) + if '%s: %s' % (process_name, expected_result) not in out: + module.fail_json(msg=out) + + if exit_module: + module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on) + + if state == 'restarted': + rc, out, err = run_supervisorctl('update', check_rc=True) + processes = get_matched_processes() + if len(processes) == 0: + module.fail_json(name=name, msg="ERROR (no such process)") + + take_action_on_processes(processes, lambda s: True, 'restart', 'started') + + processes = get_matched_processes() + + if state == 'absent': + if len(processes) == 0: + module.exit_json(changed=False, name=name, state=state) + + if stop_before_removing: + take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped', exit_module=False) + + if module.check_mode: + module.exit_json(changed=True) + run_supervisorctl('reread', check_rc=True) + rc, out, err = run_supervisorctl('remove', name) + if '%s: removed process group' % name in out: + module.exit_json(changed=True, name=name, state=state) + else: + module.fail_json(msg=out, name=name, state=state) + + if state == 'present': + if len(processes) > 0: + module.exit_json(changed=False, name=name, state=state) + + if module.check_mode: + module.exit_json(changed=True) + run_supervisorctl('reread', check_rc=True) + dummy, out, dummy = run_supervisorctl('add', name) + if '%s: added process group' % name in out: + module.exit_json(changed=True, name=name, state=state) + else: + module.fail_json(msg=out, name=name, state=state) + + # from this point onwards, if there are no matching processes, module cannot go on. + if len(processes) == 0: + module.fail_json(name=name, msg="ERROR (no such process)") + + if state == 'started': + take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started') + + if state == 'stopped': + take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped') + + if state == 'signalled': + take_action_on_processes(processes, lambda s: s in ('RUNNING',), "signal %s" % signal, 'signalled') + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/svc.py b/plugins/modules/svc.py deleted file mode 120000 index 1495d69eff..0000000000 --- a/plugins/modules/svc.py +++ /dev/null @@ -1 +0,0 @@ -./system/svc.py \ No newline at end of file diff --git a/plugins/modules/svc.py b/plugins/modules/svc.py new file mode 100644 index 0000000000..a4ad991d63 --- /dev/null +++ b/plugins/modules/svc.py @@ -0,0 +1,300 @@ +#!/usr/bin/python +# +# Copyright (c) 2015, Brian Coca +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: svc +author: + - Brian Coca (@bcoca) +short_description: Manage C(daemontools) services +description: + - Controls C(daemontools) services on remote hosts using the C(svc) utility. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the service to manage. + type: str + required: true + state: + description: + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - V(restarted) always bounces the svc (svc -t) and V(killed) always bounces the svc (svc -k). + - V(reloaded) sends a sigusr1 (svc -1). + - V(once) runs a normally downed svc once (svc -o), not really an idempotent operation. + type: str + choices: [killed, once, reloaded, restarted, started, stopped] + downed: + description: + - Should a C(down) file exist or not, if it exists it disables auto startup. Defaults to V(false). Downed does not imply + stopped. + type: bool + enabled: + description: + - Whether the service is enabled or not, if disabled it also implies O(state=stopped). Take note that a service can + be enabled and downed (no auto restart). + type: bool + service_dir: + description: + - Directory C(svscan) watches for services. + type: str + default: /service + service_src: + description: + - Directory where services are defined, the source of symlinks to O(service_dir). + type: str + default: /etc/service +""" + +EXAMPLES = r""" +- name: Start svc dnscache, if not running + community.general.svc: + name: dnscache + state: started + +- name: Stop svc dnscache, if running + community.general.svc: + name: dnscache + state: stopped + +- name: Kill svc dnscache, in all cases + community.general.svc: + name: dnscache + state: killed + +- name: Restart svc dnscache, in all cases + community.general.svc: + name: dnscache + state: restarted + +- name: Reload svc dnscache, in all cases + community.general.svc: + name: dnscache + state: reloaded + +- name: Using alternative svc directory location + community.general.svc: + name: dnscache + state: reloaded + service_dir: /var/service +""" + +import os +import re +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def _load_dist_subclass(cls, *args, **kwargs): + ''' + Used for derivative implementations + ''' + subclass = None + + distro = kwargs['module'].params['distro'] + + # get the most specific superclass for this platform + if distro is not None: + for sc in cls.__subclasses__(): + if sc.distro is not None and sc.distro == distro: + subclass = sc + if subclass is None: + subclass = cls + + return super(cls, subclass).__new__(subclass) + + +class Svc(object): + """ + Main class that handles daemontools, can be subclassed and overridden in case + we want to use a 'derivative' like encore, s6, etc + """ + + # def __new__(cls, *args, **kwargs): + # return _load_dist_subclass(cls, args, kwargs) + + def __init__(self, module): + self.extra_paths = ['/command', '/usr/local/bin'] + self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state'] + + self.module = module + + self.name = module.params['name'] + self.service_dir = module.params['service_dir'] + self.service_src = module.params['service_src'] + self.enabled = None + self.downed = None + self.full_state = None + self.state = None + self.pid = None + self.duration = None + + self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths) + self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths) + self.svc_full = '/'.join([self.service_dir, self.name]) + self.src_full = '/'.join([self.service_src, self.name]) + + self.enabled = os.path.lexists(self.svc_full) + if self.enabled: + self.downed = os.path.lexists('%s/down' % self.svc_full) + self.get_status() + else: + self.downed = os.path.lexists('%s/down' % self.src_full) + self.state = 'stopped' + + def enable(self): + if os.path.exists(self.src_full): + try: + os.symlink(self.src_full, self.svc_full) + except OSError as e: + self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e)) + else: + self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full) + + def disable(self): + try: + os.unlink(self.svc_full) + except OSError as e: + self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e)) + self.execute_command([self.svc_cmd, '-dx', self.src_full]) + + src_log = '%s/log' % self.src_full + if os.path.exists(src_log): + self.execute_command([self.svc_cmd, '-dx', src_log]) + + def get_status(self): + rc, out, err = self.execute_command([self.svstat_cmd, self.svc_full]) + + if err is not None and err: + self.full_state = self.state = err + else: + self.full_state = out + + m = re.search(r'\(pid (\d+)\)', out) + if m: + self.pid = m.group(1) + + m = re.search(r'(\d+) seconds', out) + if m: + self.duration = m.group(1) + + if re.search(' up ', out): + self.state = 'start' + elif re.search(' down ', out): + self.state = 'stopp' + else: + self.state = 'unknown' + return + + if re.search(' want ', out): + self.state += 'ing' + else: + self.state += 'ed' + + def start(self): + return self.execute_command([self.svc_cmd, '-u', self.svc_full]) + + def stopp(self): + return self.stop() + + def stop(self): + return self.execute_command([self.svc_cmd, '-d', self.svc_full]) + + def once(self): + return self.execute_command([self.svc_cmd, '-o', self.svc_full]) + + def reload(self): + return self.execute_command([self.svc_cmd, '-1', self.svc_full]) + + def restart(self): + return self.execute_command([self.svc_cmd, '-t', self.svc_full]) + + def kill(self): + return self.execute_command([self.svc_cmd, '-k', self.svc_full]) + + def execute_command(self, cmd): + try: + rc, out, err = self.module.run_command(cmd) + except Exception as e: + self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc()) + return (rc, out, err) + + def report(self): + self.get_status() + states = {} + for k in self.report_vars: + states[k] = self.__dict__[k] + return states + + +# =========================================== +# Main control flow + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']), + enabled=dict(type='bool'), + downed=dict(type='bool'), + service_dir=dict(type='str', default='/service'), + service_src=dict(type='str', default='/etc/service'), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + state = module.params['state'] + enabled = module.params['enabled'] + downed = module.params['downed'] + + svc = Svc(module) + changed = False + orig_state = svc.report() + + if enabled is not None and enabled != svc.enabled: + changed = True + if not module.check_mode: + try: + if enabled: + svc.enable() + else: + svc.disable() + except (OSError, IOError) as e: + module.fail_json(msg="Could not change service link: %s" % to_native(e)) + + if state is not None and state != svc.state: + changed = True + if not module.check_mode: + getattr(svc, state[:-2])() + + if downed is not None and downed != svc.downed: + changed = True + if not module.check_mode: + d_file = "%s/down" % svc.svc_full + try: + if downed: + open(d_file, "a").close() + else: + os.unlink(d_file) + except (OSError, IOError) as e: + module.fail_json(msg="Could not change downed file: %s " % (to_native(e))) + + module.exit_json(changed=changed, svc=svc.report()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/svr4pkg.py b/plugins/modules/svr4pkg.py deleted file mode 120000 index 9fe6be3147..0000000000 --- a/plugins/modules/svr4pkg.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/svr4pkg.py \ No newline at end of file diff --git a/plugins/modules/svr4pkg.py b/plugins/modules/svr4pkg.py new file mode 100644 index 0000000000..4f9a61e104 --- /dev/null +++ b/plugins/modules/svr4pkg.py @@ -0,0 +1,268 @@ +#!/usr/bin/python + +# Copyright (c) 2012, Boyd Adamson +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: svr4pkg +short_description: Manage Solaris SVR4 packages +description: + - Manages SVR4 packages on Solaris 10 and 11. + - These were the native packages on Solaris <= 10 and are available as a legacy feature in Solaris 11. + - Note that this is a very basic packaging system. It does not enforce dependencies on install or remove. +author: "Boyd Adamson (@brontitall)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Package name, for example V(SUNWcsr). + required: true + type: str + + state: + description: + - Whether to install (V(present)), or remove (V(absent)) a package. + - If the package is to be installed, then O(src) is required. + - The SVR4 package system does not provide an upgrade operation. You need to uninstall the old, then install the new + package. + required: true + choices: ["present", "absent"] + type: str + + src: + description: + - Specifies the location to install the package from. Required when O(state=present). + - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. For example: V(somefile.pkg), V(/dir/with/pkgs), + V(http://server/mypkgs.pkg)." + - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module + for a way to get them there. + type: str + proxy: + description: + - HTTP[s] proxy to be used if O(src) is a URL. + type: str + response_file: + description: + - Specifies the location of a response file to be used if package expects input on install. + required: false + type: str + zone: + description: + - Whether to install the package only in the current zone, or install it into all zones. + - The installation into all zones works only if you are working with the global zone. + required: false + default: "all" + choices: ["current", "all"] + type: str + category: + description: + - Install/Remove category instead of a single package. + required: false + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Install a package from an already copied file + community.general.svr4pkg: + name: CSWcommon + src: /tmp/cswpkgs.pkg + state: present + +- name: Install a package directly from an http site + community.general.svr4pkg: + name: CSWpkgutil + src: 'http://get.opencsw.org/now' + state: present + zone: current + +- name: Install a package with a response file + community.general.svr4pkg: + name: CSWggrep + src: /tmp/third-party.pkg + response_file: /tmp/ggrep.response + state: present + +- name: Ensure that a package is not installed + community.general.svr4pkg: + name: SUNWgnome-sound-recorder + state: absent + +- name: Ensure that a category is not installed + community.general.svr4pkg: + name: FIREFOX + state: absent + category: true +""" + + +import os +import tempfile + +from ansible.module_utils.basic import AnsibleModule + + +def package_installed(module, name, category): + cmd = [module.get_bin_path('pkginfo', True), '-q'] + if category: + cmd.append('-c') + cmd.append(name) + rc, out, err = module.run_command(cmd) + if rc == 0: + return True + else: + return False + + +def create_admin_file(): + (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True) + fullauto = b''' +mail= +instance=unique +partial=nocheck +runlevel=quit +idepend=nocheck +rdepend=nocheck +space=quit +setuid=nocheck +conflict=nocheck +action=nocheck +networktimeout=60 +networkretries=3 +authentication=quit +keystore=/var/sadm/security +proxy= +basedir=default +''' + os.write(desc, fullauto) + os.close(desc) + return filename + + +def run_command(module, cmd): + progname = cmd[0] + cmd[0] = module.get_bin_path(progname, True) + return module.run_command(cmd) + + +def package_install(module, name, src, proxy, response_file, zone, category): + adminfile = create_admin_file() + cmd = ['pkgadd', '-n'] + if zone == 'current': + cmd += ['-G'] + cmd += ['-a', adminfile, '-d', src] + if proxy is not None: + cmd += ['-x', proxy] + if response_file is not None: + cmd += ['-r', response_file] + if category: + cmd += ['-Y'] + cmd.append(name) + (rc, out, err) = run_command(module, cmd) + os.unlink(adminfile) + return (rc, out, err) + + +def package_uninstall(module, name, src, category): + adminfile = create_admin_file() + if category: + cmd = ['pkgrm', '-na', adminfile, '-Y', name] + else: + cmd = ['pkgrm', '-na', adminfile, name] + (rc, out, err) = run_command(module, cmd) + os.unlink(adminfile) + return (rc, out, err) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(required=True, choices=['present', 'absent']), + src=dict(), + proxy=dict(), + response_file=dict(), + zone=dict(default='all', choices=['current', 'all']), + category=dict(default=False, type='bool') + ), + supports_check_mode=True + ) + state = module.params['state'] + name = module.params['name'] + src = module.params['src'] + proxy = module.params['proxy'] + response_file = module.params['response_file'] + zone = module.params['zone'] + category = module.params['category'] + rc = None + out = '' + err = '' + result = {} + result['name'] = name + result['state'] = state + + if state == 'present': + if src is None: + module.fail_json(name=name, + msg="src is required when state=present") + if not package_installed(module, name, category): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category) + # Stdout is normally empty but for some packages can be + # very long and is not often useful + if len(out) > 75: + out = out[:75] + '...' + + elif state == 'absent': + if package_installed(module, name, category): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_uninstall(module, name, src, category) + out = out[:75] + + # Returncodes as per pkgadd(1m) + # 0 Successful completion + # 1 Fatal error. + # 2 Warning. + # 3 Interruption. + # 4 Administration. + # 5 Administration. Interaction is required. Do not use pkgadd -n. + # 10 Reboot after installation of all packages. + # 20 Reboot after installation of this package. + # 99 (observed) pkgadd: ERROR: could not process datastream from + if rc in (0, 2, 3, 10, 20): + result['changed'] = True + # no install nor uninstall, or failed + else: + result['changed'] = False + + # rc will be none when the package already was installed and no action took place + # Only return failed=False when the returncode is known to be good as there may be more + # undocumented failure return codes + if rc not in (None, 0, 2, 10, 20): + result['failed'] = True + else: + result['failed'] = False + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/swdepot.py b/plugins/modules/swdepot.py deleted file mode 120000 index ece133bad8..0000000000 --- a/plugins/modules/swdepot.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/swdepot.py \ No newline at end of file diff --git a/plugins/modules/swdepot.py b/plugins/modules/swdepot.py new file mode 100644 index 0000000000..8e0233b04f --- /dev/null +++ b/plugins/modules/swdepot.py @@ -0,0 +1,210 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Raul Melo +# Written by Raul Melo +# Based on yum module written by Seth Vidal +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: swdepot +short_description: Manage packages with swdepot package manager (HP-UX) +description: + - Installs, upgrades, and removes packages with C(swdepot) package manager (HP-UX). +notes: [] +author: "Raul Melo (@melodous)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Package name. + aliases: [pkg] + required: true + type: str + state: + description: + - Whether to install (V(present), V(latest)), or remove (V(absent)) a package. + required: true + choices: ['present', 'latest', 'absent'] + type: str + depot: + description: + - The source repository from which install or upgrade a package. + type: str +""" + +EXAMPLES = r""" +- name: Install a package + community.general.swdepot: + name: unzip-6.0 + state: present + depot: 'repository:/path' + +- name: Install the latest version of a package + community.general.swdepot: + name: unzip + state: latest + depot: 'repository:/path' + +- name: Remove a package + community.general.swdepot: + name: unzip + state: absent +""" + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def compare_package(version1, version2): + """ Compare version packages. + Return values: + -1 first minor + 0 equal + 1 first greater """ + + def normalize(v): + return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")] + normalized_version1 = normalize(version1) + normalized_version2 = normalize(version2) + if normalized_version1 == normalized_version2: + rc = 0 + elif normalized_version1 < normalized_version2: + rc = -1 + else: + rc = 1 + return rc + + +def query_package(module, name, depot=None): + """ Returns whether a package is installed or not and version. """ + + cmd_list = ['/usr/sbin/swlist', '-a', 'revision', '-l', 'product'] + if depot: + cmd_list.extend(['-s', depot]) + cmd_list.append(name) + rc, stdout, stderr = module.run_command(cmd_list) + if rc == 0: + stdout = ''.join(line for line in stdout.splitlines(True) if name in line) + version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1] + else: + version = None + + return rc, version + + +def remove_package(module, name): + """ Uninstall package if installed. """ + + cmd_remove = '/usr/sbin/swremove' + rc, stdout, stderr = module.run_command([cmd_remove, name]) + + if rc == 0: + return rc, stdout + else: + return rc, stderr + + +def install_package(module, depot, name): + """ Install package if not already installed """ + + cmd_install = ['/usr/sbin/swinstall', '-x', 'mount_all_filesystems=false'] + rc, stdout, stderr = module.run_command(cmd_install + ["-s", depot, name]) + if rc == 0: + return rc, stdout + else: + return rc, stderr + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['pkg'], required=True), + state=dict(choices=['present', 'absent', 'latest'], required=True), + depot=dict() + ), + supports_check_mode=True + ) + name = module.params['name'] + state = module.params['state'] + depot = module.params['depot'] + + changed = False + msg = "No changed" + rc = 0 + if (state == 'present' or state == 'latest') and depot is None: + output = "depot parameter is mandatory in present or latest task" + module.fail_json(name=name, msg=output, rc=rc) + + # Check local version + rc, version_installed = query_package(module, name) + if not rc: + installed = True + msg = "Already installed" + + else: + installed = False + + if (state == 'present' or state == 'latest') and installed is False: + if module.check_mode: + module.exit_json(changed=True) + rc, output = install_package(module, depot, name) + + if not rc: + changed = True + msg = "Package installed" + + else: + module.fail_json(name=name, msg=output, rc=rc) + + elif state == 'latest' and installed is True: + # Check depot version + rc, version_depot = query_package(module, name, depot) + + if not rc: + if compare_package(version_installed, version_depot) == -1: + if module.check_mode: + module.exit_json(changed=True) + # Install new version + rc, output = install_package(module, depot, name) + + if not rc: + msg = "Package upgraded, Before " + version_installed + " Now " + version_depot + changed = True + + else: + module.fail_json(name=name, msg=output, rc=rc) + + else: + output = "Software package not in repository " + depot + module.fail_json(name=name, msg=output, rc=rc) + + elif state == 'absent' and installed is True: + if module.check_mode: + module.exit_json(changed=True) + rc, output = remove_package(module, name) + if not rc: + changed = True + msg = "Package removed" + else: + module.fail_json(name=name, msg=output, rc=rc) + + if module.check_mode: + module.exit_json(changed=False) + + module.exit_json(changed=changed, name=name, state=state, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/swupd.py b/plugins/modules/swupd.py deleted file mode 120000 index ba121e3d78..0000000000 --- a/plugins/modules/swupd.py +++ /dev/null @@ -1 +0,0 @@ -./packaging/os/swupd.py \ No newline at end of file diff --git a/plugins/modules/swupd.py b/plugins/modules/swupd.py new file mode 100644 index 0000000000..0ab529e3e7 --- /dev/null +++ b/plugins/modules/swupd.py @@ -0,0 +1,306 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Alberto Murillo +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: swupd +short_description: Manages updates and bundles in ClearLinux systems +description: + - Manages updates and bundles with the swupd bundle manager, which is used by the Clear Linux Project for Intel Architecture. +author: Alberto Murillo (@albertomurillo) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + contenturl: + description: + - URL pointing to the contents of available bundles. If not specified, the contents are retrieved from clearlinux.org. + type: str + format: + description: + - The format suffix for version file downloads. For example V(1), V(2), V(3), and so on, or the special value V(staging). + If not specified, the default format is used. + type: str + manifest: + description: + - The manifest contains information about the bundles at certain version of the OS. Specify a Manifest version to verify + against that version or leave unspecified to verify against the current version. + aliases: [release, version] + type: int + name: + description: + - Name of the (I)bundle to install or remove. + aliases: [bundle] + type: str + state: + description: + - Indicates the desired (I)bundle state. V(present) ensures the bundle is installed while V(absent) ensures the (I)bundle + is not installed. + default: present + choices: [present, absent] + type: str + update: + description: + - Updates the OS to the latest version. + type: bool + default: false + url: + description: + - Overrides both O(contenturl) and O(versionurl). + type: str + verify: + description: + - Verify content for OS version. + type: bool + default: false + versionurl: + description: + - URL for version string download. + type: str +""" + +EXAMPLES = r""" +- name: Update the OS to the latest version + community.general.swupd: + update: true + +- name: Installs the "foo" bundle + community.general.swupd: + name: foo + state: present + +- name: Removes the "foo" bundle + community.general.swupd: + name: foo + state: absent + +- name: Check integrity of filesystem + community.general.swupd: + verify: true + +- name: Downgrade OS to release 12920 + community.general.swupd: + verify: true + manifest: 12920 +""" + + +import os +from ansible.module_utils.basic import AnsibleModule + + +class Swupd(object): + FILES_NOT_MATCH = "files did not match" + FILES_REPLACED = "missing files were replaced" + FILES_FIXED = "files were fixed" + FILES_DELETED = "files were deleted" + + def __init__(self, module): + # Fail if swupd is not found + self.module = module + self.swupd_cmd = module.get_bin_path("swupd", False) + if not self.swupd_cmd: + module.fail_json(msg="Could not find swupd.") + + # Initialize parameters + for key in module.params.keys(): + setattr(self, key, module.params[key]) + + # Initialize return values + self.changed = False + self.failed = False + self.msg = None + self.rc = None + self.stderr = "" + self.stdout = "" + + def _run_cmd(self, cmd): + self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False) + + def _get_cmd(self, command): + cmd = [self.swupd_cmd] + command + + if self.format: + cmd.append("--format=%s" % self.format) + if self.manifest: + cmd.append("--manifest=%s" % self.manifest) + if self.url: + cmd.append("--url=%s" % self.url) + else: + if self.contenturl and command != "check-update": + cmd.append("--contenturl=%s" % self.contenturl) + if self.versionurl: + cmd.append("--versionurl=%s" % self.versionurl) + + return cmd + + def _is_bundle_installed(self, bundle): + try: + os.stat("/usr/share/clear/bundles/%s" % bundle) + except OSError: + return False + + return True + + def _needs_update(self): + cmd = self._get_cmd(["check-update"]) + self._run_cmd(cmd) + + if self.rc == 0: + return True + + if self.rc == 1: + return False + + self.failed = True + self.msg = "Failed to check for updates" + + def _needs_verify(self): + cmd = self._get_cmd(["verify"]) + self._run_cmd(cmd) + + if self.rc != 0: + self.failed = True + self.msg = "Failed to check for filesystem inconsistencies." + + if self.FILES_NOT_MATCH in self.stdout: + return True + + return False + + def install_bundle(self, bundle): + """Installs a bundle with `swupd bundle-add bundle`""" + if self.module.check_mode: + self.module.exit_json(changed=not self._is_bundle_installed(bundle)) + + if self._is_bundle_installed(bundle): + self.msg = "Bundle %s is already installed" % bundle + return + + cmd = self._get_cmd(["bundle-add", bundle]) + self._run_cmd(cmd) + + if self.rc == 0: + self.changed = True + self.msg = "Bundle %s installed" % bundle + return + + self.failed = True + self.msg = "Failed to install bundle %s" % bundle + + def remove_bundle(self, bundle): + """Removes a bundle with `swupd bundle-remove bundle`""" + if self.module.check_mode: + self.module.exit_json(changed=self._is_bundle_installed(bundle)) + + if not self._is_bundle_installed(bundle): + self.msg = "Bundle %s not installed" + return + + cmd = self._get_cmd(["bundle-remove", bundle]) + self._run_cmd(cmd) + + if self.rc == 0: + self.changed = True + self.msg = "Bundle %s removed" % bundle + return + + self.failed = True + self.msg = "Failed to remove bundle %s" % bundle + + def update_os(self): + """Updates the os with `swupd update`""" + if self.module.check_mode: + self.module.exit_json(changed=self._needs_update()) + + if not self._needs_update(): + self.msg = "There are no updates available" + return + + cmd = self._get_cmd(["update"]) + self._run_cmd(cmd) + + if self.rc == 0: + self.changed = True + self.msg = "Update successful" + return + + self.failed = True + self.msg = "Failed to check for updates" + + def verify_os(self): + """Verifies filesystem against specified or current version""" + if self.module.check_mode: + self.module.exit_json(changed=self._needs_verify()) + + if not self._needs_verify(): + self.msg = "No files where changed" + return + + cmd = self._get_cmd(["verify", "--fix"]) + self._run_cmd(cmd) + + if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout): + self.changed = True + self.msg = "Fix successful" + return + + self.failed = True + self.msg = "Failed to verify the OS" + + +def main(): + """The main function.""" + module = AnsibleModule( + argument_spec=dict( + contenturl=dict(type="str"), + format=dict(type="str"), + manifest=dict(aliases=["release", "version"], type="int"), + name=dict(aliases=["bundle"], type="str"), + state=dict(default="present", choices=["present", "absent"], type="str"), + update=dict(default=False, type="bool"), + url=dict(type="str"), + verify=dict(default=False, type="bool"), + versionurl=dict(type="str"), + ), + required_one_of=[["name", "update", "verify"]], + mutually_exclusive=[["name", "update", "verify"]], + supports_check_mode=True + ) + + swupd = Swupd(module) + + name = module.params["name"] + state = module.params["state"] + update = module.params["update"] + verify = module.params["verify"] + + if update: + swupd.update_os() + elif verify: + swupd.verify_os() + elif state == "present": + swupd.install_bundle(name) + elif state == "absent": + swupd.remove_bundle(name) + else: + swupd.failed = True + + if swupd.failed: + module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr) + else: + module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/syslogger.py b/plugins/modules/syslogger.py deleted file mode 120000 index 9123b027a0..0000000000 --- a/plugins/modules/syslogger.py +++ /dev/null @@ -1 +0,0 @@ -./notification/syslogger.py \ No newline at end of file diff --git a/plugins/modules/syslogger.py b/plugins/modules/syslogger.py new file mode 100644 index 0000000000..9922d4b579 --- /dev/null +++ b/plugins/modules/syslogger.py @@ -0,0 +1,211 @@ +#!/usr/bin/python +# Copyright (c) 2017, Tim Rightnour +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: syslogger +short_description: Log messages in the syslog +description: + - Uses syslog to add log entries to the host. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + msg: + type: str + description: + - This is the message to place in syslog. + required: true + priority: + type: str + description: + - Set the log priority. + choices: ["emerg", "alert", "crit", "err", "warning", "notice", "info", "debug"] + default: "info" + facility: + type: str + description: + - Set the log facility. + choices: + - kern + - user + - mail + - daemon + - auth + - lpr + - news + - uucp + - cron + - syslog + - local0 + - local1 + - local2 + - local3 + - local4 + - local5 + - local6 + - local7 + default: "daemon" + log_pid: + description: + - Log the PID in brackets. + type: bool + default: false + ident: + description: + - Specify the name of application name which is sending the log to syslog. + type: str + default: 'ansible_syslogger' + version_added: '0.2.0' +author: + - Tim Rightnour (@garbled1) +""" + +EXAMPLES = r""" +- name: Simple Usage + community.general.syslogger: + msg: "I will end up as daemon.info" + +- name: Send a log message with err priority and user facility with log_pid + community.general.syslogger: + msg: "Hello from Ansible" + priority: "err" + facility: "user" + log_pid: true + +- name: Specify the name of application which is sending log message + community.general.syslogger: + ident: "MyApp" + msg: "I want to believe" + priority: "alert" +""" + +RETURN = r""" +ident: + description: Name of application sending the message to log. + returned: always + type: str + sample: "ansible_syslogger" + version_added: '0.2.0' +priority: + description: Priority level. + returned: always + type: str + sample: "daemon" +facility: + description: Syslog facility. + returned: always + type: str + sample: "info" +log_pid: + description: Log PID status. + returned: always + type: bool + sample: true +msg: + description: Message sent to syslog. + returned: always + type: str + sample: "Hello from Ansible" +""" + +import syslog +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def get_facility(facility): + return { + 'kern': syslog.LOG_KERN, + 'user': syslog.LOG_USER, + 'mail': syslog.LOG_MAIL, + 'daemon': syslog.LOG_DAEMON, + 'auth': syslog.LOG_AUTH, + 'lpr': syslog.LOG_LPR, + 'news': syslog.LOG_NEWS, + 'uucp': syslog.LOG_UUCP, + 'cron': syslog.LOG_CRON, + 'syslog': syslog.LOG_SYSLOG, + 'local0': syslog.LOG_LOCAL0, + 'local1': syslog.LOG_LOCAL1, + 'local2': syslog.LOG_LOCAL2, + 'local3': syslog.LOG_LOCAL3, + 'local4': syslog.LOG_LOCAL4, + 'local5': syslog.LOG_LOCAL5, + 'local6': syslog.LOG_LOCAL6, + 'local7': syslog.LOG_LOCAL7 + }.get(facility, syslog.LOG_DAEMON) + + +def get_priority(priority): + return { + 'emerg': syslog.LOG_EMERG, + 'alert': syslog.LOG_ALERT, + 'crit': syslog.LOG_CRIT, + 'err': syslog.LOG_ERR, + 'warning': syslog.LOG_WARNING, + 'notice': syslog.LOG_NOTICE, + 'info': syslog.LOG_INFO, + 'debug': syslog.LOG_DEBUG + }.get(priority, syslog.LOG_INFO) + + +def main(): + # define the available arguments/parameters that a user can pass to + # the module + module_args = dict( + ident=dict(type='str', default='ansible_syslogger'), + msg=dict(type='str', required=True), + priority=dict(type='str', + choices=["emerg", "alert", "crit", "err", "warning", + "notice", "info", "debug"], + default='info'), + facility=dict(type='str', + choices=["kern", "user", "mail", "daemon", "auth", + "lpr", "news", "uucp", "cron", "syslog", + "local0", "local1", "local2", "local3", + "local4", "local5", "local6", "local7"], + default='daemon'), + log_pid=dict(type='bool', default=False) + ) + + module = AnsibleModule( + argument_spec=module_args, + ) + + result = dict( + changed=False, + ident=module.params['ident'], + priority=module.params['priority'], + facility=module.params['facility'], + log_pid=module.params['log_pid'], + msg=module.params['msg'] + ) + + # do the logging + try: + syslog.openlog(module.params['ident'], + syslog.LOG_PID if module.params['log_pid'] else 0, + get_facility(module.params['facility'])) + syslog.syslog(get_priority(module.params['priority']), + module.params['msg']) + syslog.closelog() + result['changed'] = True + + except Exception as exc: + module.fail_json(error='Failed to write to syslog %s' % to_native(exc), exception=traceback.format_exc(), **result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/syspatch.py b/plugins/modules/syspatch.py deleted file mode 120000 index 3d206a61de..0000000000 --- a/plugins/modules/syspatch.py +++ /dev/null @@ -1 +0,0 @@ -./system/syspatch.py \ No newline at end of file diff --git a/plugins/modules/syspatch.py b/plugins/modules/syspatch.py new file mode 100644 index 0000000000..a64c8a4bec --- /dev/null +++ b/plugins/modules/syspatch.py @@ -0,0 +1,155 @@ +#!/usr/bin/python + +# Copyright (c) 2019-2020, Andrew Klaus +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: syspatch + +short_description: Manage OpenBSD system patches + + +description: + - Manage OpenBSD system patches using syspatch. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + revert: + description: + - Revert system patches. + type: str + choices: [all, one] + +author: + - Andrew Klaus (@precurse) +""" + +EXAMPLES = r""" +- name: Apply all available system patches + community.general.syspatch: + +- name: Revert last patch + community.general.syspatch: + revert: one + +- name: Revert all patches + community.general.syspatch: + revert: all + +# NOTE: You can reboot automatically if a patch requires it: +- name: Apply all patches and store result + community.general.syspatch: + register: syspatch + +- name: Reboot if patch requires it + ansible.builtin.reboot: + when: syspatch.reboot_needed +""" + +RETURN = r""" +reboot_needed: + description: Whether or not a reboot is required after an update. + returned: always + type: bool + sample: true +""" + +from ansible.module_utils.basic import AnsibleModule + + +def run_module(): + # define available arguments/parameters a user can pass to the module + module_args = dict( + revert=dict(type='str', choices=['all', 'one']) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + result = syspatch_run(module) + + module.exit_json(**result) + + +def syspatch_run(module): + cmd = module.get_bin_path('syspatch', True) + changed = False + reboot_needed = False + + # Set safe defaults for run_flag and check_flag + run_flag = ['-c'] + check_flag = ['-c'] + if module.params['revert']: + check_flag = ['-l'] + + if module.params['revert'] == 'all': + run_flag = ['-R'] + else: + run_flag = ['-r'] + else: + check_flag = ['-c'] + run_flag = [] + + # Run check command + rc, out, err = module.run_command([cmd] + check_flag) + + if rc != 0: + module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err)) + + if len(out) > 0: + # Changes pending + change_pending = True + else: + # No changes pending + change_pending = False + + if module.check_mode: + changed = change_pending + elif change_pending: + rc, out, err = module.run_command([cmd] + run_flag) + + # Workaround syspatch ln bug: + # http://openbsd-archive.7691.n7.nabble.com/Warning-applying-latest-syspatch-td354250.html + if rc != 0 and err != 'ln: /usr/X11R6/bin/X: No such file or directory\n': + module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err)) + elif out.lower().find('create unique kernel') >= 0: + # Kernel update applied + reboot_needed = True + elif out.lower().find('syspatch updated itself') >= 0: + module.warn('Syspatch was updated. Please run syspatch again.') + + # If no stdout, then warn user + if len(out) == 0: + module.warn('syspatch had suggested changes, but stdout was empty.') + + changed = True + else: + changed = False + + return dict( + changed=changed, + reboot_needed=reboot_needed, + rc=rc, + stderr=err, + stdout=out, + ) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sysrc.py b/plugins/modules/sysrc.py deleted file mode 120000 index 25c66335f5..0000000000 --- a/plugins/modules/sysrc.py +++ /dev/null @@ -1 +0,0 @@ -./system/sysrc.py \ No newline at end of file diff --git a/plugins/modules/sysrc.py b/plugins/modules/sysrc.py new file mode 100644 index 0000000000..1f34a90aaf --- /dev/null +++ b/plugins/modules/sysrc.py @@ -0,0 +1,215 @@ +#!/usr/bin/python + +# Copyright (c) 2019 David Lundgren +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +author: + - David Lundgren (@dlundgren) +module: sysrc +short_description: Manage FreeBSD using sysrc +version_added: '2.0.0' +description: + - Manages C(/etc/rc.conf) for FreeBSD. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of variable in C(/etc/rc.conf) to manage. + type: str + required: true + value: + description: + - The value to set when O(state=present). + - The value to add when O(state=value_present). + - The value to remove when O(state=value_absent). + type: str + state: + description: + - Use V(present) to add the variable. + - Use V(absent) to remove the variable. + - Use V(value_present) to add the value to the existing variable. + - Use V(value_absent) to remove the value from the existing variable. + type: str + default: "present" + choices: [absent, present, value_present, value_absent] + path: + description: + - Path to file to use instead of V(/etc/rc.conf). + type: str + default: "/etc/rc.conf" + delim: + description: + - Delimiter to be used instead of V(" ") (space). + - Only used when O(state=value_present) or O(state=value_absent). + default: " " + type: str + jail: + description: + - Name or ID of the jail to operate on. + type: str +notes: + - The O(name) cannot contain periods as sysrc does not support OID style names. +""" + +EXAMPLES = r""" +# enable mysql in the /etc/rc.conf +- name: Configure mysql pid file + community.general.sysrc: + name: mysql_pidfile + value: "/var/run/mysqld/mysqld.pid" + +# enable accf_http kld in the boot loader +- name: Enable accf_http kld + community.general.sysrc: + name: accf_http_load + state: present + value: "YES" + path: /boot/loader.conf + +# add gif0 to cloned_interfaces +- name: Add gif0 interface + community.general.sysrc: + name: cloned_interfaces + state: value_present + value: "gif0" + +# enable nginx on a jail +- name: Enable nginx in test jail + community.general.sysrc: + name: nginx_enable + value: "YES" + jail: testjail +""" + + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + +import os +import re + + +class Sysrc(StateModuleHelper): + module = dict( + argument_spec=dict( + name=dict(type='str', required=True), + value=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present', 'value_present', 'value_absent']), + path=dict(type='str', default='/etc/rc.conf'), + delim=dict(type='str', default=' '), + jail=dict(type='str') + ), + supports_check_mode=True + ) + output_params = ('value',) + use_old_vardict = False + + def __init_module__(self): + # OID style names are not supported + if not re.match(r'^\w+$', self.vars.name, re.ASCII): + self.module.fail_json(msg="Name may only contain alpha-numeric and underscore characters") + + self.sysrc = self.module.get_bin_path('sysrc', True) + + def _contains(self): + value = self._get() + if value is None: + return False, None + + value = value.split(self.vars.delim) + + return self.vars.value in value, value + + def _get(self): + if not os.path.exists(self.vars.path): + return None + + (rc, out, err) = self._sysrc('-v', '-n', self.vars.name) + if "unknown variable" in err or "unknown variable" in out: + # Prior to FreeBSD 11.1 sysrc would write "unknown variable" to stdout and not stderr + # https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=229806 + return None + + if out.startswith(self.vars.path): + return out.split(':', 1)[1].strip() + + return None + + def _modify(self, op, changed): + (rc, out, err) = self._sysrc("%s%s=%s%s" % (self.vars.name, op, self.vars.delim, self.vars.value)) + if out.startswith("%s:" % self.vars.name): + return changed(out.split(' -> ')[1].strip().split(self.vars.delim)) + + return False + + def _sysrc(self, *args): + cmd = [self.sysrc, '-f', self.vars.path] + if self.vars.jail: + cmd += ['-j', self.vars.jail] + cmd.extend(args) + + (rc, out, err) = self.module.run_command(cmd) + if "Permission denied" in err: + self.module.fail_json(msg="Permission denied for %s" % self.vars.path) + + return rc, out, err + + def state_absent(self): + if self._get() is None: + return + + if not self.check_mode: + self._sysrc('-x', self.vars.name) + + self.changed = True + + def state_present(self): + value = self._get() + if value == self.vars.value: + return + + if self.vars.value is None: + self.vars.set('value', value) + return + + if not self.check_mode: + self._sysrc("%s=%s" % (self.vars.name, self.vars.value)) + + self.changed = True + + def state_value_absent(self): + (contains, _unused) = self._contains() + if not contains: + return + + self.changed = self.check_mode or self._modify('-', lambda values: self.vars.value not in values) + + def state_value_present(self): + (contains, value) = self._contains() + if contains: + return + + if self.vars.value is None: + self.vars.set('value', value) + return + + self.changed = self.check_mode or self._modify('+', lambda values: self.vars.value in values) + + +def main(): + Sysrc.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/aix_devices.py b/plugins/modules/system/aix_devices.py deleted file mode 100644 index 89468059f3..0000000000 --- a/plugins/modules/system/aix_devices.py +++ /dev/null @@ -1,369 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, 2018 Kairo Araujo -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -author: -- Kairo Araujo (@kairoaraujo) -module: aix_devices -short_description: Manages AIX devices -description: -- This module discovers, defines, removes and modifies attributes of AIX devices. -options: - attributes: - description: - - A list of device attributes. - type: dict - device: - description: - - The name of the device. - - C(all) is valid to rescan C(available) all devices (AIX cfgmgr command). - type: str - force: - description: - - Forces action. - type: bool - default: no - recursive: - description: - - Removes or defines a device and children devices. - type: bool - default: no - state: - description: - - Controls the device state. - - C(available) (alias C(present)) rescan a specific device or all devices (when C(device) is not specified). - - C(removed) (alias C(absent) removes a device. - - C(defined) changes device to Defined state. - type: str - choices: [ available, defined, removed ] - default: available -''' - -EXAMPLES = r''' -- name: Scan new devices - community.general.aix_devices: - device: all - state: available - -- name: Scan new virtual devices (vio0) - community.general.aix_devices: - device: vio0 - state: available - -- name: Removing IP alias to en0 - community.general.aix_devices: - device: en0 - attributes: - delalias4: 10.0.0.100,255.255.255.0 - -- name: Removes ent2 - community.general.aix_devices: - device: ent2 - state: removed - -- name: Put device en2 in Defined - community.general.aix_devices: - device: en2 - state: defined - -- name: Removes ent4 (inexistent). - community.general.aix_devices: - device: ent4 - state: removed - -- name: Put device en4 in Defined (inexistent) - community.general.aix_devices: - device: en4 - state: defined - -- name: Put vscsi1 and children devices in Defined state. - community.general.aix_devices: - device: vscsi1 - recursive: yes - state: defined - -- name: Removes vscsi1 and children devices. - community.general.aix_devices: - device: vscsi1 - recursive: yes - state: removed - -- name: Changes en1 mtu to 9000 and disables arp. - community.general.aix_devices: - device: en1 - attributes: - mtu: 900 - arp: off - state: available - -- name: Configure IP, netmask and set en1 up. - community.general.aix_devices: - device: en1 - attributes: - netaddr: 192.168.0.100 - netmask: 255.255.255.0 - state: up - state: available - -- name: Adding IP alias to en0 - community.general.aix_devices: - device: en0 - attributes: - alias4: 10.0.0.100,255.255.255.0 - state: available -''' - -RETURN = r''' # ''' - -from ansible.module_utils.basic import AnsibleModule - - -def _check_device(module, device): - """ - Check if device already exists and the state. - Args: - module: Ansible module. - device: device to be checked. - - Returns: bool, device state - - """ - lsdev_cmd = module.get_bin_path('lsdev', True) - rc, lsdev_out, err = module.run_command(["%s" % lsdev_cmd, '-C', '-l', "%s" % device]) - - if rc != 0: - module.fail_json(msg="Failed to run lsdev", rc=rc, err=err) - - if lsdev_out: - device_state = lsdev_out.split()[1] - return True, device_state - - device_state = None - return False, device_state - - -def _check_device_attr(module, device, attr): - """ - - Args: - module: Ansible module. - device: device to check attributes. - attr: attribute to be checked. - - Returns: - - """ - lsattr_cmd = module.get_bin_path('lsattr', True) - rc, lsattr_out, err = module.run_command(["%s" % lsattr_cmd, '-El', "%s" % device, '-a', "%s" % attr]) - - hidden_attrs = ['delalias4', 'delalias6'] - - if rc == 255: - - if attr in hidden_attrs: - current_param = '' - else: - current_param = None - - return current_param - - elif rc != 0: - module.fail_json(msg="Failed to run lsattr: %s" % err, rc=rc, err=err) - - current_param = lsattr_out.split()[1] - return current_param - - -def discover_device(module, device): - """ Discover AIX devices.""" - cfgmgr_cmd = module.get_bin_path('cfgmgr', True) - - if device is not None: - device = "-l %s" % device - - else: - device = '' - - changed = True - msg = '' - if not module.check_mode: - rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device]) - changed = True - msg = cfgmgr_out - - return changed, msg - - -def change_device_attr(module, attributes, device, force): - """ Change AIX device attribute. """ - - attr_changed = [] - attr_not_changed = [] - attr_invalid = [] - chdev_cmd = module.get_bin_path('chdev', True) - - for attr in list(attributes.keys()): - new_param = attributes[attr] - current_param = _check_device_attr(module, device, attr) - - if current_param is None: - attr_invalid.append(attr) - - elif current_param != new_param: - if force: - cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr]), "%s" % force] - else: - cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr])] - - if not module.check_mode: - rc, chdev_out, err = module.run_command(cmd) - if rc != 0: - module.exit_json(msg="Failed to run chdev.", rc=rc, err=err) - - attr_changed.append(attributes[attr]) - else: - attr_not_changed.append(attributes[attr]) - - if len(attr_changed) > 0: - changed = True - attr_changed_msg = "Attributes changed: %s. " % ','.join(attr_changed) - else: - changed = False - attr_changed_msg = '' - - if len(attr_not_changed) > 0: - attr_not_changed_msg = "Attributes already set: %s. " % ','.join(attr_not_changed) - else: - attr_not_changed_msg = '' - - if len(attr_invalid) > 0: - attr_invalid_msg = "Invalid attributes: %s " % ', '.join(attr_invalid) - else: - attr_invalid_msg = '' - - msg = "%s%s%s" % (attr_changed_msg, attr_not_changed_msg, attr_invalid_msg) - - return changed, msg - - -def remove_device(module, device, force, recursive, state): - """ Puts device in defined state or removes device. """ - - state_opt = { - 'removed': '-d', - 'absent': '-d', - 'defined': '' - } - - recursive_opt = { - True: '-R', - False: '' - } - - recursive = recursive_opt[recursive] - state = state_opt[state] - - changed = True - msg = '' - rmdev_cmd = module.get_bin_path('rmdev', True) - - if not module.check_mode: - if state: - rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive, "%s" % force]) - else: - rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive]) - - if rc != 0: - module.fail_json(msg="Failed to run rmdev", rc=rc, err=err) - - msg = rmdev_out - - return changed, msg - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - attributes=dict(type='dict'), - device=dict(type='str'), - force=dict(type='bool', default=False), - recursive=dict(type='bool', default=False), - state=dict(type='str', default='available', choices=['available', 'defined', 'removed']), - ), - supports_check_mode=True, - ) - - force_opt = { - True: '-f', - False: '', - } - - attributes = module.params['attributes'] - device = module.params['device'] - force = force_opt[module.params['force']] - recursive = module.params['recursive'] - state = module.params['state'] - - result = dict( - changed=False, - msg='', - ) - - if state == 'available' or state == 'present': - if attributes: - # change attributes on device - device_status, device_state = _check_device(module, device) - if device_status: - result['changed'], result['msg'] = change_device_attr(module, attributes, device, force) - else: - result['msg'] = "Device %s does not exist." % device - - else: - # discovery devices (cfgmgr) - if device and device != 'all': - device_status, device_state = _check_device(module, device) - if device_status: - # run cfgmgr on specific device - result['changed'], result['msg'] = discover_device(module, device) - - else: - result['msg'] = "Device %s does not exist." % device - - else: - result['changed'], result['msg'] = discover_device(module, device) - - elif state == 'removed' or state == 'absent' or state == 'defined': - if not device: - result['msg'] = "device is required to removed or defined state." - - else: - # Remove device - check_device, device_state = _check_device(module, device) - if check_device: - if state == 'defined' and device_state == 'Defined': - result['changed'] = False - result['msg'] = 'Device %s already in Defined' % device - - else: - result['changed'], result['msg'] = remove_device(module, device, force, recursive, state) - - else: - result['msg'] = "Device %s does not exist." % device - - else: - result['msg'] = "Unexpected state %s." % state - module.fail_json(**result) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/aix_filesystem.py b/plugins/modules/system/aix_filesystem.py deleted file mode 100644 index 58a5c25df3..0000000000 --- a/plugins/modules/system/aix_filesystem.py +++ /dev/null @@ -1,567 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Kairo Araujo -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = r''' ---- -author: - - Kairo Araujo (@kairoaraujo) -module: aix_filesystem -short_description: Configure LVM and NFS file systems for AIX -description: - - This module creates, removes, mount and unmount LVM and NFS file system for - AIX using C(/etc/filesystems). - - For LVM file systems is possible to resize a file system. -options: - account_subsystem: - description: - - Specifies whether the file system is to be processed by the accounting subsystem. - type: bool - default: no - attributes: - description: - - Specifies attributes for files system separated by comma. - type: list - elements: str - default: agblksize='4096',isnapshot='no' - auto_mount: - description: - - File system is automatically mounted at system restart. - type: bool - default: yes - device: - description: - - Logical volume (LV) device name or remote export device to create a NFS file system. - - It is used to create a file system on an already existing logical volume or the exported NFS file system. - - If not mentioned a new logical volume name will be created following AIX standards (LVM). - type: str - fs_type: - description: - - Specifies the virtual file system type. - type: str - default: jfs2 - permissions: - description: - - Set file system permissions. C(rw) (read-write) or C(ro) (read-only). - type: str - choices: [ ro, rw ] - default: rw - mount_group: - description: - - Specifies the mount group. - type: str - filesystem: - description: - - Specifies the mount point, which is the directory where the file system will be mounted. - type: str - required: true - nfs_server: - description: - - Specifies a Network File System (NFS) server. - type: str - rm_mount_point: - description: - - Removes the mount point directory when used with state C(absent). - type: bool - default: no - size: - description: - - Specifies the file system size. - - For already C(present) it will be resized. - - 512-byte blocks, Megabytes or Gigabytes. If the value has M specified - it will be in Megabytes. If the value has G specified it will be in - Gigabytes. - - If no M or G the value will be 512-byte blocks. - - If "+" is specified in begin of value, the value will be added. - - If "-" is specified in begin of value, the value will be removed. - - If "+" or "-" is not specified, the total value will be the specified. - - Size will respects the LVM AIX standards. - type: str - state: - description: - - Controls the file system state. - - C(present) check if file system exists, creates or resize. - - C(absent) removes existing file system if already C(unmounted). - - C(mounted) checks if the file system is mounted or mount the file system. - - C(unmounted) check if the file system is unmounted or unmount the file system. - type: str - choices: [ absent, mounted, present, unmounted ] - default: present - vg: - description: - - Specifies an existing volume group (VG). - type: str -notes: - - For more C(attributes), please check "crfs" AIX manual. -''' - -EXAMPLES = r''' -- name: Create filesystem in a previously defined logical volume. - community.general.aix_filesystem: - device: testlv - community.general.filesystem: /testfs - state: present - -- name: Creating NFS filesystem from nfshost. - community.general.aix_filesystem: - device: /home/ftp - nfs_server: nfshost - community.general.filesystem: /home/ftp - state: present - -- name: Creating a new file system without a previously logical volume. - community.general.aix_filesystem: - community.general.filesystem: /newfs - size: 1G - state: present - vg: datavg - -- name: Unmounting /testfs. - community.general.aix_filesystem: - community.general.filesystem: /testfs - state: unmounted - -- name: Resizing /mksysb to +512M. - community.general.aix_filesystem: - community.general.filesystem: /mksysb - size: +512M - state: present - -- name: Resizing /mksysb to 11G. - community.general.aix_filesystem: - community.general.filesystem: /mksysb - size: 11G - state: present - -- name: Resizing /mksysb to -2G. - community.general.aix_filesystem: - community.general.filesystem: /mksysb - size: -2G - state: present - -- name: Remove NFS filesystem /home/ftp. - community.general.aix_filesystem: - community.general.filesystem: /home/ftp - rm_mount_point: yes - state: absent - -- name: Remove /newfs. - community.general.aix_filesystem: - community.general.filesystem: /newfs - rm_mount_point: yes - state: absent -''' - -RETURN = r''' -changed: - description: Return changed for aix_filesystems actions as true or false. - returned: always - type: bool -msg: - description: Return message regarding the action. - returned: always - type: str -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._mount import ismount -import re - - -def _fs_exists(module, filesystem): - """ - Check if file system already exists on /etc/filesystems. - - :param module: Ansible module. - :param community.general.filesystem: filesystem name. - :return: True or False. - """ - lsfs_cmd = module.get_bin_path('lsfs', True) - rc, lsfs_out, err = module.run_command("%s -l %s" % (lsfs_cmd, filesystem)) - if rc == 1: - if re.findall("No record matching", err): - return False - - else: - module.fail_json(msg="Failed to run lsfs. Error message: %s" % err) - - else: - - return True - - -def _check_nfs_device(module, nfs_host, device): - """ - Validate if NFS server is exporting the device (remote export). - - :param module: Ansible module. - :param nfs_host: nfs_host parameter, NFS server. - :param device: device parameter, remote export. - :return: True or False. - """ - showmount_cmd = module.get_bin_path('showmount', True) - rc, showmount_out, err = module.run_command( - "%s -a %s" % (showmount_cmd, nfs_host)) - if rc != 0: - module.fail_json(msg="Failed to run showmount. Error message: %s" % err) - else: - showmount_data = showmount_out.splitlines() - for line in showmount_data: - if line.split(':')[1] == device: - return True - - return False - - -def _validate_vg(module, vg): - """ - Check the current state of volume group. - - :param module: Ansible module argument spec. - :param vg: Volume Group name. - :return: True (VG in varyon state) or False (VG in varyoff state) or - None (VG does not exist), message. - """ - lsvg_cmd = module.get_bin_path('lsvg', True) - rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd) - if rc != 0: - module.fail_json(msg="Failed executing %s command." % lsvg_cmd) - - rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd) - if rc != 0: - module.fail_json(msg="Failed executing %s command." % lsvg_cmd) - - if vg in current_all_vgs and vg not in current_active_vgs: - msg = "Volume group %s is in varyoff state." % vg - return False, msg - elif vg in current_active_vgs: - msg = "Volume group %s is in varyon state." % vg - return True, msg - else: - msg = "Volume group %s does not exist." % vg - return None, msg - - -def resize_fs(module, filesystem, size): - """ Resize LVM file system. """ - - chfs_cmd = module.get_bin_path('chfs', True) - if not module.check_mode: - rc, chfs_out, err = module.run_command('%s -a size="%s" %s' % (chfs_cmd, size, filesystem)) - - if rc == 28: - changed = False - return changed, chfs_out - elif rc != 0: - if re.findall('Maximum allocation for logical', err): - changed = False - return changed, err - else: - module.fail_json(msg="Failed to run chfs. Error message: %s" % err) - - else: - if re.findall('The filesystem size is already', chfs_out): - changed = False - else: - changed = True - - return changed, chfs_out - else: - changed = True - msg = '' - - return changed, msg - - -def create_fs( - module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, - account_subsystem, permissions, nfs_server, attributes): - """ Create LVM file system or NFS remote mount point. """ - - attributes = ' -a '.join(attributes) - - # Parameters definition. - account_subsys_opt = { - True: '-t yes', - False: '-t no' - } - - if nfs_server is not None: - auto_mount_opt = { - True: '-A', - False: '-a' - } - - else: - auto_mount_opt = { - True: '-A yes', - False: '-A no' - } - - if size is None: - size = '' - else: - size = "-a size=%s" % size - - if device is None: - device = '' - else: - device = "-d %s" % device - - if vg is None: - vg = '' - else: - vg_state, msg = _validate_vg(module, vg) - if vg_state: - vg = "-g %s" % vg - else: - changed = False - - return changed, msg - - if mount_group is None: - mount_group = '' - - else: - mount_group = "-u %s" % mount_group - - auto_mount = auto_mount_opt[auto_mount] - account_subsystem = account_subsys_opt[account_subsystem] - - if nfs_server is not None: - # Creates a NFS file system. - mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True) - if not module.check_mode: - rc, mknfsmnt_out, err = module.run_command('%s -f "%s" %s -h "%s" -t "%s" "%s" -w "bg"' % ( - mknfsmnt_cmd, filesystem, device, nfs_server, permissions, auto_mount)) - if rc != 0: - module.fail_json(msg="Failed to run mknfsmnt. Error message: %s" % err) - else: - changed = True - msg = "NFS file system %s created." % filesystem - - return changed, msg - else: - changed = True - msg = '' - - return changed, msg - - else: - # Creates a LVM file system. - crfs_cmd = module.get_bin_path('crfs', True) - if not module.check_mode: - cmd = "%s -v %s -m %s %s %s %s %s %s -p %s %s -a %s" % ( - crfs_cmd, fs_type, filesystem, vg, device, mount_group, auto_mount, account_subsystem, permissions, size, attributes) - rc, crfs_out, err = module.run_command(cmd) - - if rc == 10: - module.exit_json( - msg="Using a existent previously defined logical volume, " - "volume group needs to be empty. %s" % err) - - elif rc != 0: - module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err)) - - else: - changed = True - return changed, crfs_out - else: - changed = True - msg = '' - - return changed, msg - - -def remove_fs(module, filesystem, rm_mount_point): - """ Remove an LVM file system or NFS entry. """ - - # Command parameters. - rm_mount_point_opt = { - True: '-r', - False: '' - } - - rm_mount_point = rm_mount_point_opt[rm_mount_point] - - rmfs_cmd = module.get_bin_path('rmfs', True) - if not module.check_mode: - cmd = "%s -r %s %s" % (rmfs_cmd, rm_mount_point, filesystem) - rc, rmfs_out, err = module.run_command(cmd) - if rc != 0: - module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err)) - else: - changed = True - msg = rmfs_out - if not rmfs_out: - msg = "File system %s removed." % filesystem - - return changed, msg - else: - changed = True - msg = '' - - return changed, msg - - -def mount_fs(module, filesystem): - """ Mount a file system. """ - mount_cmd = module.get_bin_path('mount', True) - - if not module.check_mode: - rc, mount_out, err = module.run_command( - "%s %s" % (mount_cmd, filesystem)) - if rc != 0: - module.fail_json(msg="Failed to run mount. Error message: %s" % err) - else: - changed = True - msg = "File system %s mounted." % filesystem - - return changed, msg - else: - changed = True - msg = '' - - return changed, msg - - -def unmount_fs(module, filesystem): - """ Unmount a file system.""" - unmount_cmd = module.get_bin_path('unmount', True) - - if not module.check_mode: - rc, unmount_out, err = module.run_command("%s %s" % (unmount_cmd, filesystem)) - if rc != 0: - module.fail_json(msg="Failed to run unmount. Error message: %s" % err) - else: - changed = True - msg = "File system %s unmounted." % filesystem - - return changed, msg - else: - changed = True - msg = '' - - return changed, msg - - -def main(): - module = AnsibleModule( - argument_spec=dict( - account_subsystem=dict(type='bool', default=False), - attributes=dict(type='list', elements='str', default=["agblksize='4096'", "isnapshot='no'"]), - auto_mount=dict(type='bool', default=True), - device=dict(type='str'), - filesystem=dict(type='str', required=True), - fs_type=dict(type='str', default='jfs2'), - permissions=dict(type='str', default='rw', choices=['rw', 'ro']), - mount_group=dict(type='str'), - nfs_server=dict(type='str'), - rm_mount_point=dict(type='bool', default=False), - size=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'mounted', 'present', 'unmounted']), - vg=dict(type='str'), - ), - supports_check_mode=True, - ) - - account_subsystem = module.params['account_subsystem'] - attributes = module.params['attributes'] - auto_mount = module.params['auto_mount'] - device = module.params['device'] - fs_type = module.params['fs_type'] - permissions = module.params['permissions'] - mount_group = module.params['mount_group'] - filesystem = module.params['filesystem'] - nfs_server = module.params['nfs_server'] - rm_mount_point = module.params['rm_mount_point'] - size = module.params['size'] - state = module.params['state'] - vg = module.params['vg'] - - result = dict( - changed=False, - msg='', - ) - - if state == 'present': - fs_mounted = ismount(filesystem) - fs_exists = _fs_exists(module, filesystem) - - # Check if fs is mounted or exists. - if fs_mounted or fs_exists: - result['msg'] = "File system %s already exists." % filesystem - result['changed'] = False - - # If parameter size was passed, resize fs. - if size is not None: - result['changed'], result['msg'] = resize_fs(module, filesystem, size) - - # If fs doesn't exist, create it. - else: - # Check if fs will be a NFS device. - if nfs_server is not None: - if device is None: - result['msg'] = 'Parameter "device" is required when "nfs_server" is defined.' - module.fail_json(**result) - else: - # Create a fs from NFS export. - if _check_nfs_device(module, nfs_server, device): - result['changed'], result['msg'] = create_fs( - module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) - - if device is None: - if vg is None: - result['msg'] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.' - module.fail_json(**result) - else: - # Create a fs from - result['changed'], result['msg'] = create_fs( - module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) - - if device is not None and nfs_server is None: - # Create a fs from a previously lv device. - result['changed'], result['msg'] = create_fs( - module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) - - elif state == 'absent': - if ismount(filesystem): - result['msg'] = "File system %s mounted." % filesystem - - else: - fs_status = _fs_exists(module, filesystem) - if not fs_status: - result['msg'] = "File system %s does not exist." % filesystem - else: - result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point) - - elif state == 'mounted': - if ismount(filesystem): - result['changed'] = False - result['msg'] = "File system %s already mounted." % filesystem - else: - result['changed'], result['msg'] = mount_fs(module, filesystem) - - elif state == 'unmounted': - if not ismount(filesystem): - result['changed'] = False - result['msg'] = "File system %s already unmounted." % filesystem - else: - result['changed'], result['msg'] = unmount_fs(module, filesystem) - - else: - # Unreachable codeblock - result['msg'] = "Unexpected state %s." % state - module.fail_json(**result) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/aix_inittab.py b/plugins/modules/system/aix_inittab.py deleted file mode 100644 index c2daface36..0000000000 --- a/plugins/modules/system/aix_inittab.py +++ /dev/null @@ -1,247 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Joris Weijters -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -author: -- Joris Weijters (@molekuul) -module: aix_inittab -short_description: Manages the inittab on AIX -description: - - Manages the inittab on AIX. -options: - name: - description: - - Name of the inittab entry. - type: str - required: yes - aliases: [ service ] - runlevel: - description: - - Runlevel of the entry. - type: str - required: yes - action: - description: - - Action what the init has to do with this entry. - type: str - choices: - - boot - - bootwait - - hold - - initdefault - - 'off' - - once - - ondemand - - powerfail - - powerwait - - respawn - - sysinit - - wait - command: - description: - - What command has to run. - type: str - required: yes - insertafter: - description: - - After which inittabline should the new entry inserted. - type: str - state: - description: - - Whether the entry should be present or absent in the inittab file. - type: str - choices: [ absent, present ] - default: present -notes: - - The changes are persistent across reboots. - - You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands. - - Tested on AIX 7.1. -requirements: -- itertools -''' - -EXAMPLES = ''' -# Add service startmyservice to the inittab, directly after service existingservice. -- name: Add startmyservice to inittab - community.general.aix_inittab: - name: startmyservice - runlevel: 4 - action: once - command: echo hello - insertafter: existingservice - state: present - become: yes - -# Change inittab entry startmyservice to runlevel "2" and processaction "wait". -- name: Change startmyservice to inittab - community.general.aix_inittab: - name: startmyservice - runlevel: 2 - action: wait - command: echo hello - state: present - become: yes - -- name: Remove startmyservice from inittab - community.general.aix_inittab: - name: startmyservice - runlevel: 2 - action: wait - command: echo hello - state: absent - become: yes -''' - -RETURN = ''' -name: - description: Name of the adjusted inittab entry - returned: always - type: str - sample: startmyservice -msg: - description: Action done with the inittab entry - returned: changed - type: str - sample: changed inittab entry startmyservice -changed: - description: Whether the inittab changed or not - returned: always - type: bool - sample: true -''' - -# Import necessary libraries -try: - # python 2 - from itertools import izip -except ImportError: - izip = zip - -from ansible.module_utils.basic import AnsibleModule - -# end import modules -# start defining the functions - - -def check_current_entry(module): - # Check if entry exists, if not return False in exists in return dict, - # if true return True and the entry in return dict - existsdict = {'exist': False} - lsitab = module.get_bin_path('lsitab') - (rc, out, err) = module.run_command([lsitab, module.params['name']]) - if rc == 0: - keys = ('name', 'runlevel', 'action', 'command') - values = out.split(":") - # strip non readable characters as \n - values = map(lambda s: s.strip(), values) - existsdict = dict(izip(keys, values)) - existsdict.update({'exist': True}) - return existsdict - - -def main(): - # initialize - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True, aliases=['service']), - runlevel=dict(type='str', required=True), - action=dict(type='str', choices=[ - 'boot', - 'bootwait', - 'hold', - 'initdefault', - 'off', - 'once', - 'ondemand', - 'powerfail', - 'powerwait', - 'respawn', - 'sysinit', - 'wait', - ]), - command=dict(type='str', required=True), - insertafter=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'present']), - ), - supports_check_mode=True, - ) - - result = { - 'name': module.params['name'], - 'changed': False, - 'msg': "" - } - - # Find commandline strings - mkitab = module.get_bin_path('mkitab') - rmitab = module.get_bin_path('rmitab') - chitab = module.get_bin_path('chitab') - rc = 0 - - # check if the new entry exists - current_entry = check_current_entry(module) - - # if action is install or change, - if module.params['state'] == 'present': - - # create new entry string - new_entry = module.params['name'] + ":" + module.params['runlevel'] + \ - ":" + module.params['action'] + ":" + module.params['command'] - - # If current entry exists or fields are different(if the entry does not - # exists, then the entry wil be created - if (not current_entry['exist']) or ( - module.params['runlevel'] != current_entry['runlevel'] or - module.params['action'] != current_entry['action'] or - module.params['command'] != current_entry['command']): - - # If the entry does exist then change the entry - if current_entry['exist']: - if not module.check_mode: - (rc, out, err) = module.run_command([chitab, new_entry]) - if rc != 0: - module.fail_json( - msg="could not change inittab", rc=rc, err=err) - result['msg'] = "changed inittab entry" + " " + current_entry['name'] - result['changed'] = True - - # If the entry does not exist create the entry - elif not current_entry['exist']: - if module.params['insertafter']: - if not module.check_mode: - (rc, out, err) = module.run_command( - [mkitab, '-i', module.params['insertafter'], new_entry]) - else: - if not module.check_mode: - (rc, out, err) = module.run_command( - [mkitab, new_entry]) - - if rc != 0: - module.fail_json(msg="could not adjust inittab", rc=rc, err=err) - result['msg'] = "add inittab entry" + " " + module.params['name'] - result['changed'] = True - - elif module.params['state'] == 'absent': - # If the action is remove and the entry exists then remove the entry - if current_entry['exist']: - if not module.check_mode: - (rc, out, err) = module.run_command( - [rmitab, module.params['name']]) - if rc != 0: - module.fail_json( - msg="could not remove entry from inittab)", rc=rc, err=err) - result['msg'] = "removed inittab entry" + " " + current_entry['name'] - result['changed'] = True - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/aix_lvg.py b/plugins/modules/system/aix_lvg.py deleted file mode 100644 index 569711f492..0000000000 --- a/plugins/modules/system/aix_lvg.py +++ /dev/null @@ -1,363 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Kairo Araujo -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -author: -- Kairo Araujo (@kairoaraujo) -module: aix_lvg -short_description: Manage LVM volume groups on AIX -description: -- This module creates, removes or resize volume groups on AIX LVM. -options: - force: - description: - - Force volume group creation. - type: bool - default: no - pp_size: - description: - - The size of the physical partition in megabytes. - type: int - pvs: - description: - - List of comma-separated devices to use as physical devices in this volume group. - - Required when creating or extending (C(present) state) the volume group. - - If not informed reducing (C(absent) state) the volume group will be removed. - type: list - elements: str - state: - description: - - Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff). - type: str - choices: [ absent, present, varyoff, varyon ] - default: present - vg: - description: - - The name of the volume group. - type: str - required: true - vg_type: - description: - - The type of the volume group. - type: str - choices: [ big, normal, scalable ] - default: normal -notes: -- AIX will permit remove VG only if all LV/Filesystems are not busy. -- Module does not modify PP size for already present volume group. -''' - -EXAMPLES = r''' -- name: Create a volume group datavg - community.general.aix_lvg: - vg: datavg - pp_size: 128 - vg_type: scalable - state: present - -- name: Removing a volume group datavg - community.general.aix_lvg: - vg: datavg - state: absent - -- name: Extending rootvg - community.general.aix_lvg: - vg: rootvg - pvs: hdisk1 - state: present - -- name: Reducing rootvg - community.general.aix_lvg: - vg: rootvg - pvs: hdisk1 - state: absent -''' - -RETURN = r''' # ''' - -from ansible.module_utils.basic import AnsibleModule - - -def _validate_pv(module, vg, pvs): - """ - Function to validate if the physical volume (PV) is not already in use by - another volume group or Oracle ASM. - - :param module: Ansible module argument spec. - :param vg: Volume group name. - :param pvs: Physical volume list. - :return: [bool, message] or module.fail_json for errors. - """ - - lspv_cmd = module.get_bin_path('lspv', True) - rc, current_lspv, stderr = module.run_command("%s" % lspv_cmd) - if rc != 0: - module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr) - - for pv in pvs: - # Get pv list. - lspv_list = {} - for line in current_lspv.splitlines(): - pv_data = line.split() - lspv_list[pv_data[0]] = pv_data[2] - - # Check if pv exists and is free. - if pv not in lspv_list.keys(): - module.fail_json(msg="Physical volume '%s' doesn't exist." % pv) - - if lspv_list[pv] == 'None': - # Disk None, looks free. - # Check if PV is not already in use by Oracle ASM. - lquerypv_cmd = module.get_bin_path('lquerypv', True) - rc, current_lquerypv, stderr = module.run_command("%s -h /dev/%s 20 10" % (lquerypv_cmd, pv)) - if rc != 0: - module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr) - - if 'ORCLDISK' in current_lquerypv: - module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv) - - msg = "Physical volume '%s' is ok to be used." % pv - return True, msg - - # Check if PV is already in use for the same vg. - elif vg != lspv_list[pv]: - module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv])) - - msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv]) - return False, msg - - -def _validate_vg(module, vg): - """ - Check the current state of volume group. - - :param module: Ansible module argument spec. - :param vg: Volume Group name. - :return: True (VG in varyon state) or False (VG in varyoff state) or - None (VG does not exist), message. - """ - lsvg_cmd = module.get_bin_path('lsvg', True) - rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd) - if rc != 0: - module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd) - - rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd) - if rc != 0: - module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd) - - if vg in current_all_vgs and vg not in current_active_vgs: - msg = "Volume group '%s' is in varyoff state." % vg - return False, msg - - if vg in current_active_vgs: - msg = "Volume group '%s' is in varyon state." % vg - return True, msg - - msg = "Volume group '%s' does not exist." % vg - return None, msg - - -def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation): - """ Creates or extend a volume group. """ - - # Command option parameters. - force_opt = { - True: '-f', - False: '' - } - - vg_opt = { - 'normal': '', - 'big': '-B', - 'scalable': '-S', - } - - # Validate if PV are not already in use. - pv_state, msg = _validate_pv(module, vg, pvs) - if not pv_state: - changed = False - return changed, msg - - vg_state, msg = vg_validation - if vg_state is False: - changed = False - return changed, msg - - elif vg_state is True: - # Volume group extension. - changed = True - msg = "" - - if not module.check_mode: - extendvg_cmd = module.get_bin_path('extendvg', True) - rc, output, err = module.run_command("%s %s %s" % (extendvg_cmd, vg, ' '.join(pvs))) - if rc != 0: - changed = False - msg = "Extending volume group '%s' has failed." % vg - return changed, msg - - msg = "Volume group '%s' extended." % vg - return changed, msg - - elif vg_state is None: - # Volume group creation. - changed = True - msg = '' - - if not module.check_mode: - mkvg_cmd = module.get_bin_path('mkvg', True) - rc, output, err = module.run_command("%s %s %s %s -y %s %s" % (mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], vg, ' '.join(pvs))) - if rc != 0: - changed = False - msg = "Creating volume group '%s' failed." % vg - return changed, msg - - msg = "Volume group '%s' created." % vg - return changed, msg - - -def reduce_vg(module, vg, pvs, vg_validation): - vg_state, msg = vg_validation - - if vg_state is False: - changed = False - return changed, msg - - elif vg_state is None: - changed = False - return changed, msg - - # Define pvs_to_remove (list of physical volumes to be removed). - if pvs is None: - # Remove VG if pvs are note informed. - # Remark: AIX will permit remove only if the VG has not LVs. - lsvg_cmd = module.get_bin_path('lsvg', True) - rc, current_pvs, err = module.run_command("%s -p %s" % (lsvg_cmd, vg)) - if rc != 0: - module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd) - - pvs_to_remove = [] - for line in current_pvs.splitlines()[2:]: - pvs_to_remove.append(line.split()[0]) - - reduce_msg = "Volume group '%s' removed." % vg - else: - pvs_to_remove = pvs - reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg)) - - # Reduce volume group. - if len(pvs_to_remove) <= 0: - changed = False - msg = "No physical volumes to remove." - return changed, msg - - changed = True - msg = '' - - if not module.check_mode: - reducevg_cmd = module.get_bin_path('reducevg', True) - rc, stdout, stderr = module.run_command("%s -df %s %s" % (reducevg_cmd, vg, ' '.join(pvs_to_remove))) - if rc != 0: - module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr) - - msg = reduce_msg - return changed, msg - - -def state_vg(module, vg, state, vg_validation): - vg_state, msg = vg_validation - - if vg_state is None: - module.fail_json(msg=msg) - - if state == 'varyon': - if vg_state is True: - changed = False - return changed, msg - - changed = True - msg = '' - if not module.check_mode: - varyonvg_cmd = module.get_bin_path('varyonvg', True) - rc, varyonvg_out, err = module.run_command("%s %s" % (varyonvg_cmd, vg)) - if rc != 0: - module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err) - - msg = "Varyon volume group %s completed." % vg - return changed, msg - - elif state == 'varyoff': - if vg_state is False: - changed = False - return changed, msg - - changed = True - msg = '' - - if not module.check_mode: - varyonvg_cmd = module.get_bin_path('varyoffvg', True) - rc, varyonvg_out, stderr = module.run_command("%s %s" % (varyonvg_cmd, vg)) - if rc != 0: - module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr) - - msg = "Varyoff volume group %s completed." % vg - return changed, msg - - -def main(): - module = AnsibleModule( - argument_spec=dict( - force=dict(type='bool', default=False), - pp_size=dict(type='int'), - pvs=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']), - vg=dict(type='str', required=True), - vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable']) - ), - supports_check_mode=True, - ) - - force = module.params['force'] - pp_size = module.params['pp_size'] - pvs = module.params['pvs'] - state = module.params['state'] - vg = module.params['vg'] - vg_type = module.params['vg_type'] - - if pp_size is None: - pp_size = '' - else: - pp_size = "-s %s" % pp_size - - vg_validation = _validate_vg(module, vg) - - if state == 'present': - if not pvs: - changed = False - msg = "pvs is required to state 'present'." - module.fail_json(msg=msg) - else: - changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation) - - elif state == 'absent': - changed, msg = reduce_vg(module, vg, pvs, vg_validation) - - elif state == 'varyon' or state == 'varyoff': - changed, msg = state_vg(module, vg, state, vg_validation) - - else: - changed = False - msg = "Unexpected state" - - module.exit_json(changed=changed, msg=msg, state=state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/aix_lvol.py b/plugins/modules/system/aix_lvol.py deleted file mode 100644 index 02b4f06c5b..0000000000 --- a/plugins/modules/system/aix_lvol.py +++ /dev/null @@ -1,337 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Alain Dejoux -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -author: - - Alain Dejoux (@adejoux) -module: aix_lvol -short_description: Configure AIX LVM logical volumes -description: - - This module creates, removes or resizes AIX logical volumes. Inspired by lvol module. -options: - vg: - description: - - The volume group this logical volume is part of. - type: str - required: true - lv: - description: - - The name of the logical volume. - type: str - required: true - lv_type: - description: - - The type of the logical volume. - type: str - default: jfs2 - size: - description: - - The size of the logical volume with one of the [MGT] units. - type: str - copies: - description: - - The number of copies of the logical volume. - - Maximum copies are 3. - type: int - default: 1 - policy: - description: - - Sets the interphysical volume allocation policy. - - C(maximum) allocates logical partitions across the maximum number of physical volumes. - - C(minimum) allocates logical partitions across the minimum number of physical volumes. - type: str - choices: [ maximum, minimum ] - default: maximum - state: - description: - - Control if the logical volume exists. If C(present) and the - volume does not already exist then the C(size) option is required. - type: str - choices: [ absent, present ] - default: present - opts: - description: - - Free-form options to be passed to the mklv command. - type: str - pvs: - description: - - A list of physical volumes e.g. C(hdisk1,hdisk2). - type: list - elements: str -''' - -EXAMPLES = r''' -- name: Create a logical volume of 512M - community.general.aix_lvol: - vg: testvg - lv: testlv - size: 512M - -- name: Create a logical volume of 512M with disks hdisk1 and hdisk2 - community.general.aix_lvol: - vg: testvg - lv: test2lv - size: 512M - pvs: [ hdisk1, hdisk2 ] - -- name: Create a logical volume of 512M mirrored - community.general.aix_lvol: - vg: testvg - lv: test3lv - size: 512M - copies: 2 - -- name: Create a logical volume of 1G with a minimum placement policy - community.general.aix_lvol: - vg: rootvg - lv: test4lv - size: 1G - policy: minimum - -- name: Create a logical volume with special options like mirror pool - community.general.aix_lvol: - vg: testvg - lv: testlv - size: 512M - opts: -p copy1=poolA -p copy2=poolB - -- name: Extend the logical volume to 1200M - community.general.aix_lvol: - vg: testvg - lv: test4lv - size: 1200M - -- name: Remove the logical volume - community.general.aix_lvol: - vg: testvg - lv: testlv - state: absent -''' - -RETURN = r''' -msg: - type: str - description: A friendly message describing the task result. - returned: always - sample: Logical volume testlv created. -''' - -import re - -from ansible.module_utils.basic import AnsibleModule - - -def convert_size(module, size): - unit = size[-1].upper() - units = ['M', 'G', 'T'] - try: - multiplier = 1024 ** units.index(unit) - except ValueError: - module.fail_json(msg="No valid size unit specified.") - - return int(size[:-1]) * multiplier - - -def round_ppsize(x, base=16): - new_size = int(base * round(float(x) / base)) - if new_size < x: - new_size += base - return new_size - - -def parse_lv(data): - name = None - - for line in data.splitlines(): - match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line) - if match is not None: - name = match.group(1) - vg = match.group(2) - continue - match = re.search(r"LPs:\s+(\d+).*PPs", line) - if match is not None: - lps = int(match.group(1)) - continue - match = re.search(r"PP SIZE:\s+(\d+)", line) - if match is not None: - pp_size = int(match.group(1)) - continue - match = re.search(r"INTER-POLICY:\s+(\w+)", line) - if match is not None: - policy = match.group(1) - continue - - if not name: - return None - - size = lps * pp_size - - return {'name': name, 'vg': vg, 'size': size, 'policy': policy} - - -def parse_vg(data): - - for line in data.splitlines(): - - match = re.search(r"VOLUME GROUP:\s+(\w+)", line) - if match is not None: - name = match.group(1) - continue - - match = re.search(r"TOTAL PP.*\((\d+)", line) - if match is not None: - size = int(match.group(1)) - continue - - match = re.search(r"PP SIZE:\s+(\d+)", line) - if match is not None: - pp_size = int(match.group(1)) - continue - - match = re.search(r"FREE PP.*\((\d+)", line) - if match is not None: - free = int(match.group(1)) - continue - - return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size} - - -def main(): - module = AnsibleModule( - argument_spec=dict( - vg=dict(type='str', required=True), - lv=dict(type='str', required=True), - lv_type=dict(type='str', default='jfs2'), - size=dict(type='str'), - opts=dict(type='str', default=''), - copies=dict(type='int', default=1), - state=dict(type='str', default='present', choices=['absent', 'present']), - policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']), - pvs=dict(type='list', elements='str', default=list()) - ), - supports_check_mode=True, - ) - - vg = module.params['vg'] - lv = module.params['lv'] - lv_type = module.params['lv_type'] - size = module.params['size'] - opts = module.params['opts'] - copies = module.params['copies'] - policy = module.params['policy'] - state = module.params['state'] - pvs = module.params['pvs'] - - pv_list = ' '.join(pvs) - - if policy == 'maximum': - lv_policy = 'x' - else: - lv_policy = 'm' - - # Add echo command when running in check-mode - if module.check_mode: - test_opt = 'echo ' - else: - test_opt = '' - - # check if system commands are available - lsvg_cmd = module.get_bin_path("lsvg", required=True) - lslv_cmd = module.get_bin_path("lslv", required=True) - - # Get information on volume group requested - rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg)) - - if rc != 0: - if state == 'absent': - module.exit_json(changed=False, msg="Volume group %s does not exist." % vg) - else: - module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err) - - this_vg = parse_vg(vg_info) - - if size is not None: - # Calculate pp size and round it up based on pp size. - lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size']) - - # Get information on logical volume requested - rc, lv_info, err = module.run_command( - "%s %s" % (lslv_cmd, lv)) - - if rc != 0: - if state == 'absent': - module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv) - - changed = False - - this_lv = parse_lv(lv_info) - - if state == 'present' and not size: - if this_lv is None: - module.fail_json(msg="No size given.") - - if this_lv is None: - if state == 'present': - if lv_size > this_vg['free']: - module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free'])) - - # create LV - mklv_cmd = module.get_bin_path("mklv", required=True) - - cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list) - rc, out, err = module.run_command(cmd) - if rc == 0: - module.exit_json(changed=True, msg="Logical volume %s created." % lv) - else: - module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err) - else: - if state == 'absent': - # remove LV - rmlv_cmd = module.get_bin_path("rmlv", required=True) - rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name'])) - if rc == 0: - module.exit_json(changed=True, msg="Logical volume %s deleted." % lv) - else: - module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err) - else: - if this_lv['policy'] != policy: - # change lv allocation policy - chlv_cmd = module.get_bin_path("chlv", required=True) - rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name'])) - if rc == 0: - module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy)) - else: - module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err) - - if vg != this_lv['vg']: - module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg'])) - - # from here the last remaining action is to resize it, if no size parameter is passed we do nothing. - if not size: - module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv)) - - # resize LV based on absolute values - if int(lv_size) > this_lv['size']: - extendlv_cmd = module.get_bin_path("extendlv", required=True) - cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size']) - rc, out, err = module.run_command(cmd) - if rc == 0: - module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size)) - else: - module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err) - elif lv_size < this_lv['size']: - module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size'])) - else: - module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/alternatives.py b/plugins/modules/system/alternatives.py deleted file mode 100644 index 5831382680..0000000000 --- a/plugins/modules/system/alternatives.py +++ /dev/null @@ -1,159 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Gabe Mulley -# Copyright: (c) 2015, David Wittman -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: alternatives -short_description: Manages alternative programs for common commands -description: - - Manages symbolic links using the 'update-alternatives' tool. - - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). -author: - - David Wittman (@DavidWittman) - - Gabe Mulley (@mulby) -options: - name: - description: - - The generic name of the link. - type: str - required: true - path: - description: - - The path to the real executable that the link should point to. - type: path - required: true - link: - description: - - The path to the symbolic link that should point to the real executable. - - This option is always required on RHEL-based distributions. On Debian-based distributions this option is - required when the alternative I(name) is unknown to the system. - type: path - priority: - description: - - The priority of the alternative. - type: int - default: 50 -requirements: [ update-alternatives ] -''' - -EXAMPLES = r''' -- name: Correct java version selected - community.general.alternatives: - name: java - path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - -- name: Alternatives link created - community.general.alternatives: - name: hadoop-conf - link: /etc/hadoop/conf - path: /etc/hadoop/conf.ansible - -- name: Make java 32 bit an alternative with low priority - community.general.alternatives: - name: java - path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java - priority: -10 -''' - -import os -import re -import subprocess - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - path=dict(type='path', required=True), - link=dict(type='path'), - priority=dict(type='int', default=50), - ), - supports_check_mode=True, - ) - - params = module.params - name = params['name'] - path = params['path'] - link = params['link'] - priority = params['priority'] - - UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives', True) - - current_path = None - all_alternatives = [] - - # Run `update-alternatives --display ` to find existing alternatives - (rc, display_output, dummy) = module.run_command( - ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name] - ) - - if rc == 0: - # Alternatives already exist for this link group - # Parse the output to determine the current path of the symlink and - # available alternatives - current_path_regex = re.compile(r'^\s*link currently points to (.*)$', - re.MULTILINE) - alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE) - - match = current_path_regex.search(display_output) - if match: - current_path = match.group(1) - all_alternatives = alternative_regex.findall(display_output) - - if not link: - # Read the current symlink target from `update-alternatives --query` - # in case we need to install the new alternative before setting it. - # - # This is only compatible on Debian-based systems, as the other - # alternatives don't have --query available - rc, query_output, dummy = module.run_command( - ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name] - ) - if rc == 0: - for line in query_output.splitlines(): - if line.startswith('Link:'): - link = line.split()[1] - break - - if current_path != path: - if module.check_mode: - module.exit_json(changed=True, current_path=current_path) - try: - # install the requested path if necessary - if path not in all_alternatives: - if not os.path.exists(path): - module.fail_json(msg="Specified path %s does not exist" % path) - if not link: - module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link") - - module.run_command( - [UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)], - check_rc=True - ) - - # select the requested path - module.run_command( - [UPDATE_ALTERNATIVES, '--set', name, path], - check_rc=True - ) - - module.exit_json(changed=True) - except subprocess.CalledProcessError as cpe: - module.fail_json(msg=str(dir(cpe))) - else: - module.exit_json(changed=False) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/awall.py b/plugins/modules/system/awall.py deleted file mode 100644 index 260c7ae4d0..0000000000 --- a/plugins/modules/system/awall.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ted Trask -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: awall -short_description: Manage awall policies -author: Ted Trask (@tdtrask) -description: - - This modules allows for enable/disable/activate of I(awall) policies. - - Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files - and activates the configuration on the system. -options: - name: - description: - - One or more policy names. - type: list - elements: str - state: - description: - - Whether the policies should be enabled or disabled. - type: str - choices: [ disabled, enabled ] - default: enabled - activate: - description: - - Activate the new firewall rules. - - Can be run with other steps or on its own. - type: bool - default: no -''' - -EXAMPLES = r''' -- name: Enable "foo" and "bar" policy - community.general.awall: - name: [ foo bar ] - state: enabled - -- name: Disable "foo" and "bar" policy and activate new rules - community.general.awall: - name: - - foo - - bar - state: disabled - activate: no - -- name: Activate currently enabled firewall rules - community.general.awall: - activate: yes -''' - -RETURN = ''' # ''' - -import re -from ansible.module_utils.basic import AnsibleModule - - -def activate(module): - cmd = "%s activate --force" % (AWALL_PATH) - rc, stdout, stderr = module.run_command(cmd) - if rc == 0: - return True - else: - module.fail_json(msg="could not activate new rules", stdout=stdout, stderr=stderr) - - -def is_policy_enabled(module, name): - cmd = "%s list" % (AWALL_PATH) - rc, stdout, stderr = module.run_command(cmd) - if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE): - return True - return False - - -def enable_policy(module, names, act): - policies = [] - for name in names: - if not is_policy_enabled(module, name): - policies.append(name) - if not policies: - module.exit_json(changed=False, msg="policy(ies) already enabled") - names = " ".join(policies) - if module.check_mode: - cmd = "%s list" % (AWALL_PATH) - else: - cmd = "%s enable %s" % (AWALL_PATH, names) - rc, stdout, stderr = module.run_command(cmd) - if rc != 0: - module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr) - if act and not module.check_mode: - activate(module) - module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names) - - -def disable_policy(module, names, act): - policies = [] - for name in names: - if is_policy_enabled(module, name): - policies.append(name) - if not policies: - module.exit_json(changed=False, msg="policy(ies) already disabled") - names = " ".join(policies) - if module.check_mode: - cmd = "%s list" % (AWALL_PATH) - else: - cmd = "%s disable %s" % (AWALL_PATH, names) - rc, stdout, stderr = module.run_command(cmd) - if rc != 0: - module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr) - if act and not module.check_mode: - activate(module) - module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='enabled', choices=['disabled', 'enabled']), - name=dict(type='list', elements='str'), - activate=dict(type='bool', default=False), - ), - required_one_of=[['name', 'activate']], - supports_check_mode=True, - ) - - global AWALL_PATH - AWALL_PATH = module.get_bin_path('awall', required=True) - - p = module.params - - if p['name']: - if p['state'] == 'enabled': - enable_policy(module, p['name'], p['activate']) - elif p['state'] == 'disabled': - disable_policy(module, p['name'], p['activate']) - - if p['activate']: - if not module.check_mode: - activate(module) - module.exit_json(changed=True, msg="activated awall rules") - - module.fail_json(msg="no action defined") - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/beadm.py b/plugins/modules/system/beadm.py deleted file mode 100644 index d89ca79af1..0000000000 --- a/plugins/modules/system/beadm.py +++ /dev/null @@ -1,408 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Adam Števko -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: beadm -short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems. -description: - - Create, delete or activate ZFS boot environments. - - Mount and unmount ZFS boot environments. -author: Adam Števko (@xen0l) -options: - name: - description: - - ZFS boot environment name. - type: str - required: True - aliases: [ "be" ] - snapshot: - description: - - If specified, the new boot environment will be cloned from the given - snapshot or inactive boot environment. - type: str - description: - description: - - Associate a description with a new boot environment. This option is - available only on Solarish platforms. - type: str - options: - description: - - Create the datasets for new BE with specific ZFS properties. - - Multiple options can be specified. - - This option is available only on Solarish platforms. - type: str - mountpoint: - description: - - Path where to mount the ZFS boot environment. - type: path - state: - description: - - Create or delete ZFS boot environment. - type: str - choices: [ absent, activated, mounted, present, unmounted ] - default: present - force: - description: - - Specifies if the unmount should be forced. - type: bool - default: false -''' - -EXAMPLES = r''' -- name: Create ZFS boot environment - community.general.beadm: - name: upgrade-be - state: present - -- name: Create ZFS boot environment from existing inactive boot environment - community.general.beadm: - name: upgrade-be - snapshot: be@old - state: present - -- name: Create ZFS boot environment with compression enabled and description "upgrade" - community.general.beadm: - name: upgrade-be - options: "compression=on" - description: upgrade - state: present - -- name: Delete ZFS boot environment - community.general.beadm: - name: old-be - state: absent - -- name: Mount ZFS boot environment on /tmp/be - community.general.beadm: - name: BE - mountpoint: /tmp/be - state: mounted - -- name: Unmount ZFS boot environment - community.general.beadm: - name: BE - state: unmounted - -- name: Activate ZFS boot environment - community.general.beadm: - name: upgrade-be - state: activated -''' - -RETURN = r''' -name: - description: BE name - returned: always - type: str - sample: pre-upgrade -snapshot: - description: ZFS snapshot to create BE from - returned: always - type: str - sample: rpool/ROOT/oi-hipster@fresh -description: - description: BE description - returned: always - type: str - sample: Upgrade from 9.0 to 10.0 -options: - description: BE additional options - returned: always - type: str - sample: compression=on -mountpoint: - description: BE mountpoint - returned: always - type: str - sample: /mnt/be -state: - description: state of the target - returned: always - type: str - sample: present -force: - description: If forced action is wanted - returned: always - type: bool - sample: False -''' - -import os -import re -from ansible.module_utils.basic import AnsibleModule - - -class BE(object): - def __init__(self, module): - self.module = module - - self.name = module.params['name'] - self.snapshot = module.params['snapshot'] - self.description = module.params['description'] - self.options = module.params['options'] - self.mountpoint = module.params['mountpoint'] - self.state = module.params['state'] - self.force = module.params['force'] - self.is_freebsd = os.uname()[0] == 'FreeBSD' - - def _beadm_list(self): - cmd = [self.module.get_bin_path('beadm'), 'list', '-H'] - if '@' in self.name: - cmd.append('-s') - return self.module.run_command(cmd) - - def _find_be_by_name(self, out): - if '@' in self.name: - for line in out.splitlines(): - if self.is_freebsd: - check = line.split() - if(check == []): - continue - full_name = check[0].split('/') - if(full_name == []): - continue - check[0] = full_name[len(full_name) - 1] - if check[0] == self.name: - return check - else: - check = line.split(';') - if check[0] == self.name: - return check - else: - for line in out.splitlines(): - if self.is_freebsd: - check = line.split() - if check[0] == self.name: - return check - else: - check = line.split(';') - if check[0] == self.name: - return check - return None - - def exists(self): - (rc, out, dummy) = self._beadm_list() - - if rc == 0: - if self._find_be_by_name(out): - return True - else: - return False - else: - return False - - def is_activated(self): - (rc, out, dummy) = self._beadm_list() - - if rc == 0: - line = self._find_be_by_name(out) - if line is None: - return False - if self.is_freebsd: - if 'R' in line[1]: - return True - else: - if 'R' in line[2]: - return True - - return False - - def activate_be(self): - cmd = [self.module.get_bin_path('beadm'), 'activate', self.name] - return self.module.run_command(cmd) - - def create_be(self): - cmd = [self.module.get_bin_path('beadm'), 'create'] - - if self.snapshot: - cmd.extend(['-e', self.snapshot]) - if not self.is_freebsd: - if self.description: - cmd.extend(['-d', self.description]) - if self.options: - cmd.extend(['-o', self.options]) - - cmd.append(self.name) - - return self.module.run_command(cmd) - - def destroy_be(self): - cmd = [self.module.get_bin_path('beadm'), 'destroy', '-F', self.name] - return self.module.run_command(cmd) - - def is_mounted(self): - (rc, out, dummy) = self._beadm_list() - - if rc == 0: - line = self._find_be_by_name(out) - if line is None: - return False - if self.is_freebsd: - # On FreeBSD, we exclude currently mounted BE on /, as it is - # special and can be activated even if it is mounted. That is not - # possible with non-root BEs. - if line[2] != '-' and line[2] != '/': - return True - else: - if line[3]: - return True - - return False - - def mount_be(self): - cmd = [self.module.get_bin_path('beadm'), 'mount', self.name] - - if self.mountpoint: - cmd.append(self.mountpoint) - - return self.module.run_command(cmd) - - def unmount_be(self): - cmd = [self.module.get_bin_path('beadm'), 'unmount'] - if self.force: - cmd.append('-f') - cmd.append(self.name) - - return self.module.run_command(cmd) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True, aliases=['be']), - snapshot=dict(type='str'), - description=dict(type='str'), - options=dict(type='str'), - mountpoint=dict(type='path'), - state=dict(type='str', default='present', choices=['absent', 'activated', 'mounted', 'present', 'unmounted']), - force=dict(type='bool', default=False), - ), - supports_check_mode=True, - ) - - be = BE(module) - - rc = None - out = '' - err = '' - result = {} - result['name'] = be.name - result['state'] = be.state - - if be.snapshot: - result['snapshot'] = be.snapshot - - if be.description: - result['description'] = be.description - - if be.options: - result['options'] = be.options - - if be.mountpoint: - result['mountpoint'] = be.mountpoint - - if be.state == 'absent': - # beadm on FreeBSD and Solarish systems differs in delete behaviour in - # that we are not allowed to delete activated BE on FreeBSD while on - # Solarish systems we cannot delete BE if it is mounted. We add mount - # check for both platforms as BE should be explicitly unmounted before - # being deleted. On FreeBSD, we also check if the BE is activated. - if be.exists(): - if not be.is_mounted(): - if module.check_mode: - module.exit_json(changed=True) - - if be.is_freebsd: - if be.is_activated(): - module.fail_json(msg='Unable to remove active BE!') - - (rc, out, err) = be.destroy_be() - - if rc != 0: - module.fail_json(msg='Error while destroying BE: "%s"' % err, - name=be.name, - stderr=err, - rc=rc) - else: - module.fail_json(msg='Unable to remove BE as it is mounted!') - - elif be.state == 'present': - if not be.exists(): - if module.check_mode: - module.exit_json(changed=True) - - (rc, out, err) = be.create_be() - - if rc != 0: - module.fail_json(msg='Error while creating BE: "%s"' % err, - name=be.name, - stderr=err, - rc=rc) - - elif be.state == 'activated': - if not be.is_activated(): - if module.check_mode: - module.exit_json(changed=True) - - # On FreeBSD, beadm is unable to activate mounted BEs, so we add - # an explicit check for that case. - if be.is_freebsd: - if be.is_mounted(): - module.fail_json(msg='Unable to activate mounted BE!') - - (rc, out, err) = be.activate_be() - - if rc != 0: - module.fail_json(msg='Error while activating BE: "%s"' % err, - name=be.name, - stderr=err, - rc=rc) - elif be.state == 'mounted': - if not be.is_mounted(): - if module.check_mode: - module.exit_json(changed=True) - - (rc, out, err) = be.mount_be() - - if rc != 0: - module.fail_json(msg='Error while mounting BE: "%s"' % err, - name=be.name, - stderr=err, - rc=rc) - - elif be.state == 'unmounted': - if be.is_mounted(): - if module.check_mode: - module.exit_json(changed=True) - - (rc, out, err) = be.unmount_be() - - if rc != 0: - module.fail_json(msg='Error while unmounting BE: "%s"' % err, - name=be.name, - stderr=err, - rc=rc) - - if rc is None: - result['changed'] = False - else: - result['changed'] = True - - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/capabilities.py b/plugins/modules/system/capabilities.py deleted file mode 100644 index ac6dde6761..0000000000 --- a/plugins/modules/system/capabilities.py +++ /dev/null @@ -1,180 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Nate Coraor -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: capabilities -short_description: Manage Linux capabilities -description: - - This module manipulates files privileges using the Linux capabilities(7) system. -options: - path: - description: - - Specifies the path to the file to be managed. - type: str - required: yes - aliases: [ key ] - capability: - description: - - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent)) - type: str - required: yes - aliases: [ cap ] - state: - description: - - Whether the entry should be present or absent in the file's capabilities. - type: str - choices: [ absent, present ] - default: present -notes: - - The capabilities system will automatically transform operators and flags into the effective set, - so for example, C(cap_foo=ep) will probably become C(cap_foo+ep). - - This module does not attempt to determine the final operator and flags to compare, - so you will want to ensure that your capabilities argument matches the final capabilities. -author: -- Nate Coraor (@natefoo) -''' - -EXAMPLES = r''' -- name: Set cap_sys_chroot+ep on /foo - community.general.capabilities: - path: /foo - capability: cap_sys_chroot+ep - state: present - -- name: Remove cap_net_bind_service from /bar - community.general.capabilities: - path: /bar - capability: cap_net_bind_service - state: absent -''' - -from ansible.module_utils.basic import AnsibleModule - -OPS = ('=', '-', '+') - - -class CapabilitiesModule(object): - platform = 'Linux' - distribution = None - - def __init__(self, module): - self.module = module - self.path = module.params['path'].strip() - self.capability = module.params['capability'].strip().lower() - self.state = module.params['state'] - self.getcap_cmd = module.get_bin_path('getcap', required=True) - self.setcap_cmd = module.get_bin_path('setcap', required=True) - self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present') - - self.run() - - def run(self): - - current = self.getcap(self.path) - caps = [cap[0] for cap in current] - - if self.state == 'present' and self.capability_tup not in current: - # need to add capability - if self.module.check_mode: - self.module.exit_json(changed=True, msg='capabilities changed') - else: - # remove from current cap list if it's already set (but op/flags differ) - current = list(filter(lambda x: x[0] != self.capability_tup[0], current)) - # add new cap with correct op/flags - current.append(self.capability_tup) - self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) - elif self.state == 'absent' and self.capability_tup[0] in caps: - # need to remove capability - if self.module.check_mode: - self.module.exit_json(changed=True, msg='capabilities changed') - else: - # remove from current cap list and then set current list - current = filter(lambda x: x[0] != self.capability_tup[0], current) - self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) - self.module.exit_json(changed=False, state=self.state) - - def getcap(self, path): - rval = [] - cmd = "%s -v %s" % (self.getcap_cmd, path) - rc, stdout, stderr = self.module.run_command(cmd) - # If file xattrs are set but no caps are set the output will be: - # '/foo =' - # If file xattrs are unset the output will be: - # '/foo' - # If the file does not exist, the stderr will be (with rc == 0...): - # '/foo (No such file or directory)' - if rc != 0 or stderr != "": - self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr) - if stdout.strip() != path: - if ' =' in stdout: - # process output of an older version of libcap - caps = stdout.split(' =')[1].strip().split() - else: - # otherwise, we have a newer version here - # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git - caps = stdout.split()[1].strip().split() - for cap in caps: - cap = cap.lower() - # getcap condenses capabilities with the same op/flags into a - # comma-separated list, so we have to parse that - if ',' in cap: - cap_group = cap.split(',') - cap_group[-1], op, flags = self._parse_cap(cap_group[-1]) - for subcap in cap_group: - rval.append((subcap, op, flags)) - else: - rval.append(self._parse_cap(cap)) - return rval - - def setcap(self, path, caps): - caps = ' '.join([''.join(cap) for cap in caps]) - cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path) - rc, stdout, stderr = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr) - else: - return stdout - - def _parse_cap(self, cap, op_required=True): - opind = -1 - try: - i = 0 - while opind == -1: - opind = cap.find(OPS[i]) - i += 1 - except Exception: - if op_required: - self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS)) - else: - return (cap, None, None) - op = cap[opind] - cap, flags = cap.split(op) - return (cap, op, flags) - - -# ============================================================== -# main - -def main(): - # defining module - module = AnsibleModule( - argument_spec=dict( - path=dict(type='str', required=True, aliases=['key']), - capability=dict(type='str', required=True, aliases=['cap']), - state=dict(type='str', default='present', choices=['absent', 'present']), - ), - supports_check_mode=True, - ) - - CapabilitiesModule(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/cronvar.py b/plugins/modules/system/cronvar.py deleted file mode 100644 index 9871668ac0..0000000000 --- a/plugins/modules/system/cronvar.py +++ /dev/null @@ -1,423 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Cronvar Plugin: The goal of this plugin is to provide an idempotent -# method for set cron variable values. It should play well with the -# existing cron module as well as allow for manually added variables. -# Each variable entered will be preceded with a comment describing the -# variable so that it can be found later. This is required to be -# present in order for this plugin to find/modify the variable - -# This module is based on the crontab module. - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: cronvar -short_description: Manage variables in crontabs -description: - - Use this module to manage crontab variables. - - This module allows you to create, update, or delete cron variable definitions. -options: - name: - description: - - Name of the crontab variable. - type: str - required: yes - value: - description: - - The value to set this variable to. - - Required if C(state=present). - type: str - insertafter: - description: - - If specified, the variable will be inserted after the variable specified. - - Used with C(state=present). - type: str - insertbefore: - description: - - Used with C(state=present). If specified, the variable will be inserted - just before the variable specified. - type: str - state: - description: - - Whether to ensure that the variable is present or absent. - type: str - choices: [ absent, present ] - default: present - user: - description: - - The specific user whose crontab should be modified. - - This parameter defaults to C(root) when unset. - type: str - cron_file: - description: - - If specified, uses this file instead of an individual user's crontab. - - Without a leading C(/), this is assumed to be in I(/etc/cron.d). - - With a leading C(/), this is taken as absolute. - type: str - backup: - description: - - If set, create a backup of the crontab before it is modified. - The location of the backup is returned in the C(backup) variable by this module. - type: bool - default: no -requirements: - - cron -author: -- Doug Luce (@dougluce) -''' - -EXAMPLES = r''' -- name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists - community.general.cronvar: - name: EMAIL - value: doug@ansibmod.con.com - -- name: Ensure a variable does not exist. This may remove any variable named "LEGACY" - community.general.cronvar: - name: LEGACY - state: absent - -- name: Add a variable to a file under /etc/cron.d - community.general.cronvar: - name: LOGFILE - value: /var/log/yum-autoupdate.log - user: root - cron_file: ansible_yum-autoupdate -''' - -import os -import platform -import pwd -import re -import shlex -import sys -import tempfile - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote - - -class CronVarError(Exception): - pass - - -class CronVar(object): - """ - CronVar object to write variables to crontabs. - - user - the user of the crontab (defaults to root) - cron_file - a cron file under /etc/cron.d - """ - - def __init__(self, module, user=None, cron_file=None): - self.module = module - self.user = user - self.lines = None - self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',)) - self.cron_cmd = self.module.get_bin_path('crontab', required=True) - - if cron_file: - self.cron_file = "" - if os.path.isabs(cron_file): - self.cron_file = cron_file - else: - self.cron_file = os.path.join('/etc/cron.d', cron_file) - else: - self.cron_file = None - - self.read() - - def read(self): - # Read in the crontab from the system - self.lines = [] - if self.cron_file: - # read the cronfile - try: - f = open(self.cron_file, 'r') - self.lines = f.read().splitlines() - f.close() - except IOError: - # cron file does not exist - return - except Exception: - raise CronVarError("Unexpected error:", sys.exc_info()[0]) - else: - # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME - (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True) - - if rc != 0 and rc != 1: # 1 can mean that there are no jobs. - raise CronVarError("Unable to read crontab") - - lines = out.splitlines() - count = 0 - for l in lines: - if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l - ) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)): - self.lines.append(l) - count += 1 - - def log_message(self, message): - self.module.debug('ansible: "%s"' % message) - - def write(self, backup_file=None): - """ - Write the crontab to the system. Saves all information. - """ - if backup_file: - fileh = open(backup_file, 'w') - elif self.cron_file: - fileh = open(self.cron_file, 'w') - else: - filed, path = tempfile.mkstemp(prefix='crontab') - fileh = os.fdopen(filed, 'w') - - fileh.write(self.render()) - fileh.close() - - # return if making a backup - if backup_file: - return - - # Add the entire crontab back to the user crontab - if not self.cron_file: - # quoting shell args for now but really this should be two non-shell calls. FIXME - (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True) - os.unlink(path) - - if rc != 0: - self.module.fail_json(msg=err) - - def remove_variable_file(self): - try: - os.unlink(self.cron_file) - return True - except OSError: - # cron file does not exist - return False - except Exception: - raise CronVarError("Unexpected error:", sys.exc_info()[0]) - - def parse_for_var(self, line): - lexer = shlex.shlex(line) - lexer.wordchars = self.wordchars - varname = lexer.get_token() - is_env_var = lexer.get_token() == '=' - value = ''.join(lexer) - if is_env_var: - return (varname, value) - raise CronVarError("Not a variable.") - - def find_variable(self, name): - for l in self.lines: - try: - (varname, value) = self.parse_for_var(l) - if varname == name: - return value - except CronVarError: - pass - return None - - def get_var_names(self): - var_names = [] - for l in self.lines: - try: - var_name, dummy = self.parse_for_var(l) - var_names.append(var_name) - except CronVarError: - pass - return var_names - - def add_variable(self, name, value, insertbefore, insertafter): - if insertbefore is None and insertafter is None: - # Add the variable to the top of the file. - self.lines.insert(0, "%s=%s" % (name, value)) - else: - newlines = [] - for l in self.lines: - try: - varname, dummy = self.parse_for_var(l) # Throws if not a var line - if varname == insertbefore: - newlines.append("%s=%s" % (name, value)) - newlines.append(l) - elif varname == insertafter: - newlines.append(l) - newlines.append("%s=%s" % (name, value)) - else: - raise CronVarError # Append. - except CronVarError: - newlines.append(l) - - self.lines = newlines - - def remove_variable(self, name): - self.update_variable(name, None, remove=True) - - def update_variable(self, name, value, remove=False): - newlines = [] - for l in self.lines: - try: - varname, dummy = self.parse_for_var(l) # Throws if not a var line - if varname != name: - raise CronVarError # Append. - if not remove: - newlines.append("%s=%s" % (name, value)) - except CronVarError: - newlines.append(l) - - self.lines = newlines - - def render(self): - """ - Render a proper crontab - """ - result = '\n'.join(self.lines) - if result and result[-1] not in ['\n', '\r']: - result += '\n' - return result - - def _read_user_execute(self): - """ - Returns the command line for reading a crontab - """ - user = '' - - if self.user: - if platform.system() == 'SunOS': - return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd)) - elif platform.system() == 'AIX': - return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user)) - elif platform.system() == 'HP-UX': - return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user)) - elif pwd.getpwuid(os.getuid())[0] != self.user: - user = '-u %s' % shlex_quote(self.user) - return "%s %s %s" % (self.cron_cmd, user, '-l') - - def _write_execute(self, path): - """ - Return the command line for writing a crontab - """ - user = '' - if self.user: - if platform.system() in ['SunOS', 'HP-UX', 'AIX']: - return "chown %s %s ; su '%s' -c '%s %s'" % ( - shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path)) - elif pwd.getpwuid(os.getuid())[0] != self.user: - user = '-u %s' % shlex_quote(self.user) - return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path)) - - -# ================================================== - -def main(): - # The following example playbooks: - # - # - community.general.cronvar: name="SHELL" value="/bin/bash" - # - # - name: Set the email - # community.general.cronvar: name="EMAILTO" value="doug@ansibmod.con.com" - # - # - name: Get rid of the old new host variable - # community.general.cronvar: name="NEW_HOST" state=absent - # - # Would produce: - # SHELL = /bin/bash - # EMAILTO = doug@ansibmod.con.com - - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - value=dict(type='str'), - user=dict(type='str'), - cron_file=dict(type='str'), - insertafter=dict(type='str'), - insertbefore=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'present']), - backup=dict(type='bool', default=False), - ), - mutually_exclusive=[['insertbefore', 'insertafter']], - supports_check_mode=False, - ) - - name = module.params['name'] - value = module.params['value'] - user = module.params['user'] - cron_file = module.params['cron_file'] - insertafter = module.params['insertafter'] - insertbefore = module.params['insertbefore'] - state = module.params['state'] - backup = module.params['backup'] - ensure_present = state == 'present' - - changed = False - res_args = dict() - - # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. - os.umask(int('022', 8)) - cronvar = CronVar(module, user, cron_file) - - module.debug('cronvar instantiated - name: "%s"' % name) - - # --- user input validation --- - - if name is None and ensure_present: - module.fail_json(msg="You must specify 'name' to insert a new cron variable") - - if value is None and ensure_present: - module.fail_json(msg="You must specify 'value' to insert a new cron variable") - - if name is None and not ensure_present: - module.fail_json(msg="You must specify 'name' to remove a cron variable") - - # if requested make a backup before making a change - if backup: - dummy, backup_file = tempfile.mkstemp(prefix='cronvar') - cronvar.write(backup_file) - - if cronvar.cron_file and not name and not ensure_present: - changed = cronvar.remove_job_file() - module.exit_json(changed=changed, cron_file=cron_file, state=state) - - old_value = cronvar.find_variable(name) - - if ensure_present: - if old_value is None: - cronvar.add_variable(name, value, insertbefore, insertafter) - changed = True - elif old_value != value: - cronvar.update_variable(name, value) - changed = True - else: - if old_value is not None: - cronvar.remove_variable(name) - changed = True - - res_args = { - "vars": cronvar.get_var_names(), - "changed": changed - } - - if changed: - cronvar.write() - - # retain the backup only if crontab or cron file have changed - if backup: - if changed: - res_args['backup_file'] = backup_file - else: - os.unlink(backup_file) - - if cron_file: - res_args['cron_file'] = cron_file - - module.exit_json(**res_args) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/crypttab.py b/plugins/modules/system/crypttab.py deleted file mode 100644 index 8eeec56d3d..0000000000 --- a/plugins/modules/system/crypttab.py +++ /dev/null @@ -1,354 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Steve -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: crypttab -short_description: Encrypted Linux block devices -description: - - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab). -options: - name: - description: - - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or - optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/) - will be stripped from I(name). - type: str - required: yes - state: - description: - - Use I(present) to add a line to C(/etc/crypttab) or update its definition - if already present. - - Use I(absent) to remove a line with matching I(name). - - Use I(opts_present) to add options to those already present; options with - different values will be updated. - - Use I(opts_absent) to remove options from the existing set. - type: str - required: yes - choices: [ absent, opts_absent, opts_present, present ] - backing_device: - description: - - Path to the underlying block device or file, or the UUID of a block-device - prefixed with I(UUID=). - type: str - password: - description: - - Encryption password, the path to a file containing the password, or - C(-) or unset if the password should be entered at boot. - type: path - opts: - description: - - A comma-delimited list of options. See C(crypttab(5) ) for details. - type: str - path: - description: - - Path to file to use instead of C(/etc/crypttab). - - This might be useful in a chroot environment. - type: path - default: /etc/crypttab -author: -- Steve (@groks) -''' - -EXAMPLES = r''' -- name: Set the options explicitly a device which must already exist - community.general.crypttab: - name: luks-home - state: present - opts: discard,cipher=aes-cbc-essiv:sha256 - -- name: Add the 'discard' option to any existing options for all devices - community.general.crypttab: - name: '{{ item.device }}' - state: opts_present - opts: discard - loop: '{{ ansible_mounts }}' - when: "'/dev/mapper/luks-' in {{ item.device }}" -''' - -import os -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes, to_native - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']), - backing_device=dict(type='str'), - password=dict(type='path'), - opts=dict(type='str'), - path=dict(type='path', default='/etc/crypttab') - ), - supports_check_mode=True, - ) - - backing_device = module.params['backing_device'] - password = module.params['password'] - opts = module.params['opts'] - state = module.params['state'] - path = module.params['path'] - name = module.params['name'] - if name.startswith('/dev/mapper/'): - name = name[len('/dev/mapper/'):] - - if state != 'absent' and backing_device is None and password is None and opts is None: - module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'", - **module.params) - - if 'opts' in state and (backing_device is not None or password is not None): - module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state, - **module.params) - - for arg_name, arg in (('name', name), - ('backing_device', backing_device), - ('password', password), - ('opts', opts)): - if (arg is not None and (' ' in arg or '\t' in arg or arg == '')): - module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name, - **module.params) - - try: - crypttab = Crypttab(path) - existing_line = crypttab.match(name) - except Exception as e: - module.fail_json(msg="failed to open and parse crypttab file: %s" % to_native(e), - exception=traceback.format_exc(), **module.params) - - if 'present' in state and existing_line is None and backing_device is None: - module.fail_json(msg="'backing_device' required to add a new entry", - **module.params) - - changed, reason = False, '?' - - if state == 'absent': - if existing_line is not None: - changed, reason = existing_line.remove() - - elif state == 'present': - if existing_line is not None: - changed, reason = existing_line.set(backing_device, password, opts) - else: - changed, reason = crypttab.add(Line(None, name, backing_device, password, opts)) - - elif state == 'opts_present': - if existing_line is not None: - changed, reason = existing_line.opts.add(opts) - else: - changed, reason = crypttab.add(Line(None, name, backing_device, password, opts)) - - elif state == 'opts_absent': - if existing_line is not None: - changed, reason = existing_line.opts.remove(opts) - - if changed and not module.check_mode: - try: - f = open(path, 'wb') - f.write(to_bytes(crypttab, errors='surrogate_or_strict')) - finally: - f.close() - - module.exit_json(changed=changed, msg=reason, **module.params) - - -class Crypttab(object): - _lines = [] - - def __init__(self, path): - self.path = path - if not os.path.exists(path): - if not os.path.exists(os.path.dirname(path)): - os.makedirs(os.path.dirname(path)) - open(path, 'a').close() - - try: - f = open(path, 'r') - for line in f.readlines(): - self._lines.append(Line(line)) - finally: - f.close() - - def add(self, line): - self._lines.append(line) - return True, 'added line' - - def lines(self): - for line in self._lines: - if line.valid(): - yield line - - def match(self, name): - for line in self.lines(): - if line.name == name: - return line - return None - - def __str__(self): - lines = [] - for line in self._lines: - lines.append(str(line)) - crypttab = '\n'.join(lines) - if len(crypttab) == 0: - crypttab += '\n' - if crypttab[-1] != '\n': - crypttab += '\n' - return crypttab - - -class Line(object): - def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None): - self.line = line - self.name = name - self.backing_device = backing_device - self.password = password - self.opts = Options(opts) - - if line is not None: - self.line = self.line.rstrip('\n') - if self._line_valid(line): - self.name, backing_device, password, opts = self._split_line(line) - - self.set(backing_device, password, opts) - - def set(self, backing_device, password, opts): - changed = False - - if backing_device is not None and self.backing_device != backing_device: - self.backing_device = backing_device - changed = True - - if password is not None and self.password != password: - self.password = password - changed = True - - if opts is not None: - opts = Options(opts) - if opts != self.opts: - self.opts = opts - changed = True - - return changed, 'updated line' - - def _line_valid(self, line): - if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4): - return False - return True - - def _split_line(self, line): - fields = line.split() - try: - field2 = fields[2] - except IndexError: - field2 = None - try: - field3 = fields[3] - except IndexError: - field3 = None - - return (fields[0], - fields[1], - field2, - field3) - - def remove(self): - self.line, self.name, self.backing_device = '', None, None - return True, 'removed line' - - def valid(self): - if self.name is not None and self.backing_device is not None: - return True - return False - - def __str__(self): - if self.valid(): - fields = [self.name, self.backing_device] - if self.password is not None or self.opts: - if self.password is not None: - fields.append(self.password) - else: - fields.append('none') - if self.opts: - fields.append(str(self.opts)) - return ' '.join(fields) - return self.line - - -class Options(dict): - """opts_string looks like: 'discard,foo=bar,baz=greeble' """ - - def __init__(self, opts_string): - super(Options, self).__init__() - self.itemlist = [] - if opts_string is not None: - for opt in opts_string.split(','): - kv = opt.split('=') - if len(kv) > 1: - k, v = (kv[0], kv[1]) - else: - k, v = (kv[0], None) - self[k] = v - - def add(self, opts_string): - changed = False - for k, v in Options(opts_string).items(): - if k in self: - if self[k] != v: - changed = True - else: - changed = True - self[k] = v - return changed, 'updated options' - - def remove(self, opts_string): - changed = False - for k in Options(opts_string): - if k in self: - del self[k] - changed = True - return changed, 'removed options' - - def keys(self): - return self.itemlist - - def values(self): - return [self[key] for key in self] - - def items(self): - return [(key, self[key]) for key in self] - - def __iter__(self): - return iter(self.itemlist) - - def __setitem__(self, key, value): - if key not in self: - self.itemlist.append(key) - super(Options, self).__setitem__(key, value) - - def __delitem__(self, key): - self.itemlist.remove(key) - super(Options, self).__delitem__(key) - - def __ne__(self, obj): - return not (isinstance(obj, Options) and sorted(self.items()) == sorted(obj.items())) - - def __str__(self): - ret = [] - for k, v in self.items(): - if v is None: - ret.append(k) - else: - ret.append('%s=%s' % (k, v)) - return ','.join(ret) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/dconf.py b/plugins/modules/system/dconf.py deleted file mode 100644 index f7776cde6e..0000000000 --- a/plugins/modules/system/dconf.py +++ /dev/null @@ -1,384 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Branko Majic -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' -module: dconf -author: - - "Branko Majic (@azaghal)" -short_description: Modify and read dconf database -description: - - This module allows modifications and reading of C(dconf) database. The module - is implemented as a wrapper around C(dconf) tool. Please see the dconf(1) man - page for more details. - - Since C(dconf) requires a running D-Bus session to change values, the module - will try to detect an existing session and reuse it, or run the tool via - C(dbus-run-session). -notes: - - This module depends on C(psutil) Python library (version 4.0.0 and upwards), - C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on - distribution you are using, you may need to install additional packages to - have these available. - - Detection of existing, running D-Bus session, required to change settings - via C(dconf), is not 100% reliable due to implementation details of D-Bus - daemon itself. This might lead to running applications not picking-up - changes on the fly if options are changed via Ansible and - C(dbus-run-session). - - Keep in mind that the C(dconf) CLI tool, which this module wraps around, - utilises an unusual syntax for the values (GVariant). For example, if you - wanted to provide a string value, the correct syntax would be - C(value="'myvalue'") - with single quotes as part of the Ansible parameter - value. - - When using loops in combination with a value like - :code:`"[('xkb', 'us'), ('xkb', 'se')]"`, you need to be aware of possible - type conversions. Applying a filter :code:`"{{ item.value | string }}"` - to the parameter variable can avoid potential conversion problems. - - The easiest way to figure out exact syntax/value you need to provide for a - key is by making the configuration change in application affected by the - key, and then having a look at value set via commands C(dconf dump - /path/to/dir/) or C(dconf read /path/to/key). -options: - key: - type: str - required: true - description: - - A dconf key to modify or read from the dconf database. - value: - type: str - required: false - description: - - Value to set for the specified dconf key. Value should be specified in - GVariant format. Due to complexity of this format, it is best to have a - look at existing values in the dconf database. - - Required for I(state=present). - state: - type: str - required: false - default: present - choices: [ 'read', 'present', 'absent' ] - description: - - The action to take upon the key/value. -''' - -RETURN = r""" -value: - description: value associated with the requested key - returned: success, state was "read" - type: str - sample: "'Default'" -""" - -EXAMPLES = r""" -- name: Configure available keyboard layouts in Gnome - community.general.dconf: - key: "/org/gnome/desktop/input-sources/sources" - value: "[('xkb', 'us'), ('xkb', 'se')]" - state: present - -- name: Read currently available keyboard layouts in Gnome - community.general.dconf: - key: "/org/gnome/desktop/input-sources/sources" - state: read - register: keyboard_layouts - -- name: Reset the available keyboard layouts in Gnome - community.general.dconf: - key: "/org/gnome/desktop/input-sources/sources" - state: absent - -- name: Configure available keyboard layouts in Cinnamon - community.general.dconf: - key: "/org/gnome/libgnomekbd/keyboard/layouts" - value: "['us', 'se']" - state: present - -- name: Read currently available keyboard layouts in Cinnamon - community.general.dconf: - key: "/org/gnome/libgnomekbd/keyboard/layouts" - state: read - register: keyboard_layouts - -- name: Reset the available keyboard layouts in Cinnamon - community.general.dconf: - key: "/org/gnome/libgnomekbd/keyboard/layouts" - state: absent - -- name: Disable desktop effects in Cinnamon - community.general.dconf: - key: "/org/cinnamon/desktop-effects" - value: "false" - state: present -""" - - -import os -import traceback - -PSUTIL_IMP_ERR = None -try: - import psutil - HAS_PSUTIL = True -except ImportError: - PSUTIL_IMP_ERR = traceback.format_exc() - HAS_PSUTIL = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class DBusWrapper(object): - """ - Helper class that can be used for running a command with a working D-Bus - session. - - If possible, command will be run against an existing D-Bus session, - otherwise the session will be spawned via dbus-run-session. - - Example usage: - - dbus_wrapper = DBusWrapper(ansible_module) - dbus_wrapper.run_command(["printenv", "DBUS_SESSION_BUS_ADDRESS"]) - """ - - def __init__(self, module): - """ - Initialises an instance of the class. - - :param module: Ansible module instance used to signal failures and run commands. - :type module: AnsibleModule - """ - - # Store passed-in arguments and set-up some defaults. - self.module = module - - # Try to extract existing D-Bus session address. - self.dbus_session_bus_address = self._get_existing_dbus_session() - - # If no existing D-Bus session was detected, check if dbus-run-session - # is available. - if self.dbus_session_bus_address is None: - self.dbus_run_session_cmd = self.module.get_bin_path('dbus-run-session', required=True) - - def _get_existing_dbus_session(self): - """ - Detects and returns an existing D-Bus session bus address. - - :returns: string -- D-Bus session bus address. If a running D-Bus session was not detected, returns None. - """ - - # We'll be checking the processes of current user only. - uid = os.getuid() - - # Go through all the pids for this user, try to extract the D-Bus - # session bus address from environment, and ensure it is possible to - # connect to it. - self.module.debug("Trying to detect existing D-Bus user session for user: %d" % uid) - - for pid in psutil.pids(): - process = psutil.Process(pid) - process_real_uid, dummy, dummy = process.uids() - try: - if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ(): - dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS'] - self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate) - dbus_send_cmd = self.module.get_bin_path('dbus-send', required=True) - command = [dbus_send_cmd, '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test'] - rc, dummy, dummy = self.module.run_command(command) - - if rc == 0: - self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate) - - return dbus_session_bus_address_candidate - - # This can happen with things like SSH sessions etc. - except psutil.AccessDenied: - pass - - self.module.debug("Failed to find running D-Bus user session, will use dbus-run-session") - - return None - - def run_command(self, command): - """ - Runs the specified command within a functional D-Bus session. Command is - effectively passed-on to AnsibleModule.run_command() method, with - modification for using dbus-run-session if necessary. - - :param command: Command to run, including parameters. Each element of the list should be a string. - :type module: list - - :returns: tuple(result_code, standard_output, standard_error) -- Result code, standard output, and standard error from running the command. - """ - - if self.dbus_session_bus_address is None: - self.module.debug("Using dbus-run-session wrapper for running commands.") - command = [self.dbus_run_session_cmd] + command - rc, out, err = self.module.run_command(command) - - if self.dbus_session_bus_address is None and rc == 127: - self.module.fail_json(msg="Failed to run passed-in command, dbus-run-session faced an internal error: %s" % err) - else: - extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address} - rc, out, err = self.module.run_command(command, environ_update=extra_environment) - - return rc, out, err - - -class DconfPreference(object): - - def __init__(self, module, check_mode=False): - """ - Initialises instance of the class. - - :param module: Ansible module instance used to signal failures and run commands. - :type module: AnsibleModule - - :param check_mode: Specify whether to only check if a change should be made or if to actually make a change. - :type check_mode: bool - """ - - self.module = module - self.check_mode = check_mode - # Check if dconf binary exists - self.dconf_bin = self.module.get_bin_path('dconf', required=True) - - def read(self, key): - """ - Retrieves current value associated with the dconf key. - - If an error occurs, a call will be made to AnsibleModule.fail_json. - - :returns: string -- Value assigned to the provided key. If the value is not set for specified key, returns None. - """ - command = [self.dconf_bin, "read", key] - - rc, out, err = self.module.run_command(command) - - if rc != 0: - self.module.fail_json(msg='dconf failed while reading the value with error: %s' % err, - out=out, - err=err) - - if out == '': - value = None - else: - value = out.rstrip('\n') - - return value - - def write(self, key, value): - """ - Writes the value for specified key. - - If an error occurs, a call will be made to AnsibleModule.fail_json. - - :param key: dconf key for which the value should be set. Should be a full path. - :type key: str - - :param value: Value to set for the specified dconf key. Should be specified in GVariant format. - :type value: str - - :returns: bool -- True if a change was made, False if no change was required. - """ - # If no change is needed (or won't be done due to check_mode), notify - # caller straight away. - if value == self.read(key): - return False - elif self.check_mode: - return True - - # Set-up command to run. Since DBus is needed for write operation, wrap - # dconf command dbus-launch. - command = [self.dconf_bin, "write", key, value] - - # Run the command and fetch standard return code, stdout, and stderr. - dbus_wrapper = DBusWrapper(self.module) - rc, out, err = dbus_wrapper.run_command(command) - - if rc != 0: - self.module.fail_json(msg='dconf failed while write the value with error: %s' % err, - out=out, - err=err) - - # Value was changed. - return True - - def reset(self, key): - """ - Returns value for the specified key (removes it from user configuration). - - If an error occurs, a call will be made to AnsibleModule.fail_json. - - :param key: dconf key to reset. Should be a full path. - :type key: str - - :returns: bool -- True if a change was made, False if no change was required. - """ - - # Read the current value first. - current_value = self.read(key) - - # No change was needed, key is not set at all, or just notify user if we - # are in check mode. - if current_value is None: - return False - elif self.check_mode: - return True - - # Set-up command to run. Since DBus is needed for reset operation, wrap - # dconf command dbus-launch. - command = [self.dconf_bin, "reset", key] - - # Run the command and fetch standard return code, stdout, and stderr. - dbus_wrapper = DBusWrapper(self.module) - rc, out, err = dbus_wrapper.run_command(command) - - if rc != 0: - self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err, - out=out, - err=err) - - # Value was changed. - return True - - -def main(): - # Setup the Ansible module - module = AnsibleModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent', 'read']), - key=dict(required=True, type='str', no_log=False), - value=dict(required=False, default=None, type='str'), - ), - supports_check_mode=True - ) - - if not HAS_PSUTIL: - module.fail_json(msg=missing_required_lib("psutil"), exception=PSUTIL_IMP_ERR) - - # If present state was specified, value must be provided. - if module.params['state'] == 'present' and module.params['value'] is None: - module.fail_json(msg='State "present" requires "value" to be set.') - - # Create wrapper instance. - dconf = DconfPreference(module, module.check_mode) - - # Process based on different states. - if module.params['state'] == 'read': - value = dconf.read(module.params['key']) - module.exit_json(changed=False, value=value) - elif module.params['state'] == 'present': - changed = dconf.write(module.params['key'], module.params['value']) - module.exit_json(changed=changed) - elif module.params['state'] == 'absent': - changed = dconf.reset(module.params['key']) - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/dpkg_divert.py b/plugins/modules/system/dpkg_divert.py deleted file mode 100644 index 1033f70f14..0000000000 --- a/plugins/modules/system/dpkg_divert.py +++ /dev/null @@ -1,370 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017-2020, Yann Amar -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: dpkg_divert -short_description: Override a debian package's version of a file -version_added: '0.2.0' -author: - - quidame (@quidame) -description: - - A diversion is for C(dpkg) the knowledge that only a given package - (or the local administrator) is allowed to install a file at a given - location. Other packages shipping their own version of this file will - be forced to I(divert) it, i.e. to install it at another location. It - allows one to keep changes in a file provided by a debian package by - preventing its overwrite at package upgrade. - - This module manages diversions of debian packages files using the - C(dpkg-divert) commandline tool. It can either create or remove a - diversion for a given file, but also update an existing diversion - to modify its I(holder) and/or its I(divert) location. -options: - path: - description: - - The original and absolute path of the file to be diverted or - undiverted. This path is unique, i.e. it is not possible to get - two diversions for the same I(path). - required: true - type: path - state: - description: - - When I(state=absent), remove the diversion of the specified - I(path); when I(state=present), create the diversion if it does - not exist, or update its package I(holder) or I(divert) location, - if it already exists. - type: str - default: present - choices: [absent, present] - holder: - description: - - The name of the package whose copy of file is not diverted, also - known as the diversion holder or the package the diversion belongs - to. - - The actual package does not have to be installed or even to exist - for its name to be valid. If not specified, the diversion is hold - by 'LOCAL', that is reserved by/for dpkg for local diversions. - - This parameter is ignored when I(state=absent). - type: str - divert: - description: - - The location where the versions of file will be diverted. - - Default is to add suffix C(.distrib) to the file path. - - This parameter is ignored when I(state=absent). - type: path - rename: - description: - - Actually move the file aside (when I(state=present)) or back (when - I(state=absent)), but only when changing the state of the diversion. - This parameter has no effect when attempting to add a diversion that - already exists or when removing an unexisting one. - - Unless I(force=true), renaming fails if the destination file already - exists (this lock being a dpkg-divert feature, and bypassing it being - a module feature). - type: bool - default: no - force: - description: - - When I(rename=true) and I(force=true), renaming is performed even if - the target of the renaming exists, i.e. the existing contents of the - file at this location will be lost. - - This parameter is ignored when I(rename=false). - type: bool - default: no -notes: - - This module supports I(check_mode) and I(diff). -requirements: - - dpkg-divert >= 1.15.0 (Debian family) -''' - -EXAMPLES = r''' -- name: Divert /usr/bin/busybox to /usr/bin/busybox.distrib and keep file in place - community.general.dpkg_divert: - path: /usr/bin/busybox - -- name: Divert /usr/bin/busybox by package 'branding' - community.general.dpkg_divert: - path: /usr/bin/busybox - holder: branding - -- name: Divert and rename busybox to busybox.dpkg-divert - community.general.dpkg_divert: - path: /usr/bin/busybox - divert: /usr/bin/busybox.dpkg-divert - rename: yes - -- name: Remove the busybox diversion and move the diverted file back - community.general.dpkg_divert: - path: /usr/bin/busybox - state: absent - rename: yes - force: yes -''' - -RETURN = r''' -commands: - description: The dpkg-divert commands ran internally by the module. - type: list - returned: on_success - elements: str - sample: |- - [ - "/usr/bin/dpkg-divert --no-rename --remove /etc/foobarrc", - "/usr/bin/dpkg-divert --package ansible --no-rename --add /etc/foobarrc" - ] -messages: - description: The dpkg-divert relevant messages (stdout or stderr). - type: list - returned: on_success - elements: str - sample: |- - [ - "Removing 'local diversion of /etc/foobarrc to /etc/foobarrc.distrib'", - "Adding 'diversion of /etc/foobarrc to /etc/foobarrc.distrib by ansible'" - ] -diversion: - description: The status of the diversion after task execution. - type: dict - returned: always - contains: - divert: - description: The location of the diverted file. - type: str - holder: - description: The package holding the diversion. - type: str - path: - description: The path of the file to divert/undivert. - type: str - state: - description: The state of the diversion. - type: str - sample: |- - { - "divert": "/etc/foobarrc.distrib", - "holder": "LOCAL", - "path": "/etc/foobarrc" - "state": "present" - } -''' - - -import re -import os -from distutils.version import LooseVersion - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes, to_native - - -def diversion_state(module, command, path): - diversion = dict(path=path, state='absent', divert=None, holder=None) - rc, out, err = module.run_command([command, '--listpackage', path], check_rc=True) - if out: - diversion['state'] = 'present' - diversion['holder'] = out.rstrip() - rc, out, err = module.run_command([command, '--truename', path], check_rc=True) - diversion['divert'] = out.rstrip() - return diversion - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(required=True, type='path'), - state=dict(required=False, type='str', default='present', choices=['absent', 'present']), - holder=dict(required=False, type='str'), - divert=dict(required=False, type='path'), - rename=dict(required=False, type='bool', default=False), - force=dict(required=False, type='bool', default=False), - ), - supports_check_mode=True, - ) - - path = module.params['path'] - state = module.params['state'] - holder = module.params['holder'] - divert = module.params['divert'] - rename = module.params['rename'] - force = module.params['force'] - - diversion_wanted = dict(path=path, state=state) - changed = False - - DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True) - MAINCOMMAND = [DPKG_DIVERT] - - # Option --listpackage is needed and comes with 1.15.0 - rc, stdout, stderr = module.run_command([DPKG_DIVERT, '--version'], check_rc=True) - [current_version] = [x for x in stdout.splitlines()[0].split() if re.match('^[0-9]+[.][0-9]', x)] - if LooseVersion(current_version) < LooseVersion("1.15.0"): - module.fail_json(msg="Unsupported dpkg version (<1.15.0).") - no_rename_is_supported = (LooseVersion(current_version) >= LooseVersion("1.19.1")) - - b_path = to_bytes(path, errors='surrogate_or_strict') - path_exists = os.path.exists(b_path) - # Used for things not doable with a single dpkg-divert command (as forced - # renaming of files, and diversion's 'holder' or 'divert' updates). - target_exists = False - truename_exists = False - - diversion_before = diversion_state(module, DPKG_DIVERT, path) - if diversion_before['state'] == 'present': - b_divert = to_bytes(diversion_before['divert'], errors='surrogate_or_strict') - truename_exists = os.path.exists(b_divert) - - # Append options as requested in the task parameters, but ignore some of - # them when removing the diversion. - if rename: - MAINCOMMAND.append('--rename') - elif no_rename_is_supported: - MAINCOMMAND.append('--no-rename') - - if state == 'present': - if holder and holder != 'LOCAL': - MAINCOMMAND.extend(['--package', holder]) - diversion_wanted['holder'] = holder - else: - MAINCOMMAND.append('--local') - diversion_wanted['holder'] = 'LOCAL' - - if divert: - MAINCOMMAND.extend(['--divert', divert]) - target = divert - else: - target = '%s.distrib' % path - - MAINCOMMAND.extend(['--add', path]) - diversion_wanted['divert'] = target - b_target = to_bytes(target, errors='surrogate_or_strict') - target_exists = os.path.exists(b_target) - - else: - MAINCOMMAND.extend(['--remove', path]) - diversion_wanted['divert'] = None - diversion_wanted['holder'] = None - - # Start to populate the returned objects. - diversion = diversion_before.copy() - maincommand = ' '.join(MAINCOMMAND) - commands = [maincommand] - - if module.check_mode or diversion_wanted == diversion_before: - MAINCOMMAND.insert(1, '--test') - diversion_after = diversion_wanted - - # Just try and see - rc, stdout, stderr = module.run_command(MAINCOMMAND) - - if rc == 0: - messages = [stdout.rstrip()] - - # else... cases of failure with dpkg-divert are: - # - The diversion does not belong to the same package (or LOCAL) - # - The divert filename is not the same (e.g. path.distrib != path.divert) - # - The renaming is forbidden by dpkg-divert (i.e. both the file and the - # diverted file exist) - - elif state != diversion_before['state']: - # There should be no case with 'divert' and 'holder' when creating the - # diversion from none, and they're ignored when removing the diversion. - # So this is all about renaming... - if rename and path_exists and ( - (state == 'absent' and truename_exists) or - (state == 'present' and target_exists)): - if not force: - msg = "Set 'force' param to True to force renaming of files." - module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, - stderr=stderr, stdout=stdout, diversion=diversion) - else: - msg = "Unexpected error while changing state of the diversion." - module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, - stderr=stderr, stdout=stdout, diversion=diversion) - - to_remove = path - if state == 'present': - to_remove = target - - if not module.check_mode: - try: - b_remove = to_bytes(to_remove, errors='surrogate_or_strict') - os.unlink(b_remove) - except OSError as e: - msg = 'Failed to remove %s: %s' % (to_remove, to_native(e)) - module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, - stderr=stderr, stdout=stdout, diversion=diversion) - rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True) - - messages = [stdout.rstrip()] - - # The situation is that we want to modify the settings (holder or divert) - # of an existing diversion. dpkg-divert does not handle this, and we have - # to remove the existing diversion first, and then set a new one. - else: - RMDIVERSION = [DPKG_DIVERT, '--remove', path] - if no_rename_is_supported: - RMDIVERSION.insert(1, '--no-rename') - rmdiversion = ' '.join(RMDIVERSION) - - if module.check_mode: - RMDIVERSION.insert(1, '--test') - - if rename: - MAINCOMMAND.remove('--rename') - if no_rename_is_supported: - MAINCOMMAND.insert(1, '--no-rename') - maincommand = ' '.join(MAINCOMMAND) - - commands = [rmdiversion, maincommand] - rc, rmdout, rmderr = module.run_command(RMDIVERSION, check_rc=True) - - if module.check_mode: - messages = [rmdout.rstrip(), 'Running in check mode'] - else: - rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True) - messages = [rmdout.rstrip(), stdout.rstrip()] - - # Avoid if possible to orphan files (i.e. to dereference them in diversion - # database but let them in place), but do not make renaming issues fatal. - # BTW, this module is not about state of files involved in the diversion. - old = diversion_before['divert'] - new = diversion_wanted['divert'] - if new != old: - b_old = to_bytes(old, errors='surrogate_or_strict') - b_new = to_bytes(new, errors='surrogate_or_strict') - if os.path.exists(b_old) and not os.path.exists(b_new): - try: - os.rename(b_old, b_new) - except OSError as e: - pass - - if not module.check_mode: - diversion_after = diversion_state(module, DPKG_DIVERT, path) - - diversion = diversion_after.copy() - diff = dict() - if module._diff: - diff['before'] = diversion_before - diff['after'] = diversion_after - - if diversion_after != diversion_before: - changed = True - - if diversion_after == diversion_wanted: - module.exit_json(changed=changed, diversion=diversion, - commands=commands, messages=messages, diff=diff) - else: - msg = "Unexpected error: see stdout and stderr for details." - module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, - stderr=stderr, stdout=stdout, diversion=diversion) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/facter.py b/plugins/modules/system/facter.py deleted file mode 100644 index abd2ebc3a7..0000000000 --- a/plugins/modules/system/facter.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2012, Michael DeHaan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: facter -short_description: Runs the discovery program I(facter) on the remote system -description: - - Runs the C(facter) discovery program - (U(https://github.com/puppetlabs/facter)) on the remote system, returning - JSON data that can be useful for inventory purposes. -options: - arguments: - description: - - Specifies arguments for facter. - type: list - elements: str -requirements: - - facter - - ruby-json -author: - - Ansible Core Team - - Michael DeHaan -''' - -EXAMPLES = ''' -# Example command-line invocation -# ansible www.example.net -m facter - -- name: Execute facter no arguments - community.general.facter: - -- name: Execute facter with arguments - community.general.facter: - arguments: - - -p - - system_uptime - - timezone - - is_virtual -''' -import json - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - argument_spec=dict( - arguments=dict(required=False, type='list', elements='str') - ) - ) - - facter_path = module.get_bin_path( - 'facter', - opt_dirs=['/opt/puppetlabs/bin']) - - cmd = [facter_path, "--json"] - if module.params['arguments']: - cmd += module.params['arguments'] - - rc, out, err = module.run_command(cmd, check_rc=True) - module.exit_json(**json.loads(out)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/filesystem.py b/plugins/modules/system/filesystem.py deleted file mode 100644 index 4f1d6ee0d1..0000000000 --- a/plugins/modules/system/filesystem.py +++ /dev/null @@ -1,578 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, quidame -# Copyright: (c) 2013, Alexander Bulimov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -author: - - Alexander Bulimov (@abulimov) - - quidame (@quidame) -module: filesystem -short_description: Makes a filesystem -description: - - This module creates a filesystem. -options: - state: - description: - - If C(state=present), the filesystem is created if it doesn't already - exist, that is the default behaviour if I(state) is omitted. - - If C(state=absent), filesystem signatures on I(dev) are wiped if it - contains a filesystem (as known by C(blkid)). - - When C(state=absent), all other options but I(dev) are ignored, and the - module doesn't fail if the device I(dev) doesn't actually exist. - type: str - choices: [ present, absent ] - default: present - version_added: 1.3.0 - fstype: - choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs ] - description: - - Filesystem type to be created. This option is required with - C(state=present) (or if I(state) is omitted). - - ufs support has been added in community.general 3.4.0. - type: str - aliases: [type] - dev: - description: - - Target path to block device (Linux) or character device (FreeBSD) or - regular file (both). - - When setting Linux-specific filesystem types on FreeBSD, this module - only works when applying to regular files, aka disk images. - - Currently C(lvm) (Linux-only) and C(ufs) (FreeBSD-only) don't support - a regular file as their target I(dev). - - Support for character devices on FreeBSD has been added in community.general 3.4.0. - type: path - required: yes - aliases: [device] - force: - description: - - If C(yes), allows to create new filesystem on devices that already has filesystem. - type: bool - default: 'no' - resizefs: - description: - - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space. - - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) filesystems. - Attempts to resize other filesystem types will fail. - - XFS Will only grow if mounted. Currently, the module is based on commands - from C(util-linux) package to perform operations, so resizing of XFS is - not supported on FreeBSD systems. - - vFAT will likely fail if fatresize < 1.04. - type: bool - default: 'no' - opts: - description: - - List of options to be passed to mkfs command. - type: str -requirements: - - Uses specific tools related to the I(fstype) for creating or resizing a - filesystem (from packages e2fsprogs, xfsprogs, dosfstools, and so on). - - Uses generic tools mostly related to the Operating System (Linux or - FreeBSD) or available on both, as C(blkid). - - On FreeBSD, either C(util-linux) or C(e2fsprogs) package is required. -notes: - - Potential filesystems on I(dev) are checked using C(blkid). In case C(blkid) - is unable to detect a filesystem (and in case C(fstyp) on FreeBSD is also - unable to detect a filesystem), this filesystem is overwritten even if - I(force) is C(no). - - On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide - a C(blkid) command that is compatible with this module. However, these - packages conflict with each other, and only the C(util-linux) package - provides the command required to not fail when I(state=absent). - - This module supports I(check_mode). -seealso: - - module: community.general.filesize - - module: ansible.posix.mount -''' - -EXAMPLES = ''' -- name: Create a ext2 filesystem on /dev/sdb1 - community.general.filesystem: - fstype: ext2 - dev: /dev/sdb1 - -- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks - community.general.filesystem: - fstype: ext4 - dev: /dev/sdb1 - opts: -cc - -- name: Blank filesystem signature on /dev/sdb1 - community.general.filesystem: - dev: /dev/sdb1 - state: absent - -- name: Create a filesystem on top of a regular file - community.general.filesystem: - dev: /path/to/disk.img - fstype: vfat -''' - -from distutils.version import LooseVersion -import os -import platform -import re -import stat - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -class Device(object): - def __init__(self, module, path): - self.module = module - self.path = path - - def size(self): - """ Return size in bytes of device. Returns int """ - statinfo = os.stat(self.path) - if stat.S_ISBLK(statinfo.st_mode): - blockdev_cmd = self.module.get_bin_path("blockdev", required=True) - dummy, out, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) - devsize_in_bytes = int(out) - elif stat.S_ISCHR(statinfo.st_mode) and platform.system() == 'FreeBSD': - diskinfo_cmd = self.module.get_bin_path("diskinfo", required=True) - dummy, out, dummy = self.module.run_command([diskinfo_cmd, self.path], check_rc=True) - devsize_in_bytes = int(out.split()[2]) - elif os.path.isfile(self.path): - devsize_in_bytes = os.path.getsize(self.path) - else: - self.module.fail_json(changed=False, msg="Target device not supported: %s" % self) - - return devsize_in_bytes - - def get_mountpoint(self): - """Return (first) mountpoint of device. Returns None when not mounted.""" - cmd_findmnt = self.module.get_bin_path("findmnt", required=True) - - # find mountpoint - rc, mountpoint, dummy = self.module.run_command([cmd_findmnt, "--mtab", "--noheadings", "--output", - "TARGET", "--source", self.path], check_rc=False) - if rc != 0: - mountpoint = None - else: - mountpoint = mountpoint.split('\n')[0] - - return mountpoint - - def __str__(self): - return self.path - - -class Filesystem(object): - - MKFS = None - MKFS_FORCE_FLAGS = [] - INFO = None - GROW = None - GROW_MAX_SPACE_FLAGS = [] - GROW_MOUNTPOINT_ONLY = False - - LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'} - - def __init__(self, module): - self.module = module - - @property - def fstype(self): - return type(self).__name__ - - def get_fs_size(self, dev): - """Return size in bytes of filesystem on device (integer). - Should query the info with a per-fstype command that can access the - device whenever it is mounted or not, and parse the command output. - Parser must ensure to return an integer, or raise a ValueError. - """ - raise NotImplementedError() - - def create(self, opts, dev): - if self.module.check_mode: - return - - mkfs = self.module.get_bin_path(self.MKFS, required=True) - cmd = [mkfs] + self.MKFS_FORCE_FLAGS + opts + [str(dev)] - self.module.run_command(cmd, check_rc=True) - - def wipefs(self, dev): - if self.module.check_mode: - return - - # wipefs comes with util-linux package (as 'blockdev' & 'findmnt' above) - # that is ported to FreeBSD. The use of dd as a portable fallback is - # not doable here if it needs get_mountpoint() (to prevent corruption of - # a mounted filesystem), since 'findmnt' is not available on FreeBSD, - # even in util-linux port for this OS. - wipefs = self.module.get_bin_path('wipefs', required=True) - cmd = [wipefs, "--all", str(dev)] - self.module.run_command(cmd, check_rc=True) - - def grow_cmd(self, target): - """Build and return the resizefs commandline as list.""" - cmdline = [self.module.get_bin_path(self.GROW, required=True)] - cmdline += self.GROW_MAX_SPACE_FLAGS + [target] - return cmdline - - def grow(self, dev): - """Get dev and fs size and compare. Returns stdout of used command.""" - devsize_in_bytes = dev.size() - - try: - fssize_in_bytes = self.get_fs_size(dev) - except NotImplementedError: - self.module.fail_json(msg="module does not support resizing %s filesystem yet" % self.fstype) - except ValueError as err: - self.module.warn("unable to process %s output '%s'" % (self.INFO, to_native(err))) - self.module.fail_json(msg="unable to process %s output for %s" % (self.INFO, dev)) - - if not fssize_in_bytes < devsize_in_bytes: - self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev)) - elif self.module.check_mode: - self.module.exit_json(changed=True, msg="resizing filesystem %s on device %s" % (self.fstype, dev)) - - if self.GROW_MOUNTPOINT_ONLY: - mountpoint = dev.get_mountpoint() - if not mountpoint: - self.module.fail_json(msg="%s needs to be mounted for %s operations" % (dev, self.fstype)) - grow_target = mountpoint - else: - grow_target = str(dev) - - dummy, out, dummy = self.module.run_command(self.grow_cmd(grow_target), check_rc=True) - return out - - -class Ext(Filesystem): - MKFS_FORCE_FLAGS = ['-F'] - INFO = 'tune2fs' - GROW = 'resize2fs' - - def get_fs_size(self, dev): - """Get Block count and Block size and return their product.""" - cmd = self.module.get_bin_path(self.INFO, required=True) - dummy, out, dummy = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV) - - block_count = block_size = None - for line in out.splitlines(): - if 'Block count:' in line: - block_count = int(line.split(':')[1].strip()) - elif 'Block size:' in line: - block_size = int(line.split(':')[1].strip()) - if None not in (block_size, block_count): - break - else: - raise ValueError(out) - - return block_size * block_count - - -class Ext2(Ext): - MKFS = 'mkfs.ext2' - - -class Ext3(Ext): - MKFS = 'mkfs.ext3' - - -class Ext4(Ext): - MKFS = 'mkfs.ext4' - - -class XFS(Filesystem): - MKFS = 'mkfs.xfs' - MKFS_FORCE_FLAGS = ['-f'] - INFO = 'xfs_info' - GROW = 'xfs_growfs' - GROW_MOUNTPOINT_ONLY = True - - def get_fs_size(self, dev): - """Get bsize and blocks and return their product.""" - cmdline = [self.module.get_bin_path(self.INFO, required=True)] - - # Depending on the versions, xfs_info is able to get info from the - # device, whenever it is mounted or not, or only if unmounted, or - # only if mounted, or not at all. For any version until now, it is - # able to query info from the mountpoint. So try it first, and use - # device as the last resort: it may or may not work. - mountpoint = dev.get_mountpoint() - if mountpoint: - cmdline += [mountpoint] - else: - cmdline += [str(dev)] - dummy, out, dummy = self.module.run_command(cmdline, check_rc=True, environ_update=self.LANG_ENV) - - block_size = block_count = None - for line in out.splitlines(): - col = line.split('=') - if col[0].strip() == 'data': - if col[1].strip() == 'bsize': - block_size = int(col[2].split()[0]) - if col[2].split()[1] == 'blocks': - block_count = int(col[3].split(',')[0]) - if None not in (block_size, block_count): - break - else: - raise ValueError(out) - - return block_size * block_count - - -class Reiserfs(Filesystem): - MKFS = 'mkfs.reiserfs' - MKFS_FORCE_FLAGS = ['-q'] - - -class Btrfs(Filesystem): - MKFS = 'mkfs.btrfs' - - def __init__(self, module): - super(Btrfs, self).__init__(module) - mkfs = self.module.get_bin_path(self.MKFS, required=True) - dummy, stdout, stderr = self.module.run_command([mkfs, '--version'], check_rc=True) - match = re.search(r" v([0-9.]+)", stdout) - if not match: - # v0.20-rc1 use stderr - match = re.search(r" v([0-9.]+)", stderr) - if match: - # v0.20-rc1 doesn't have --force parameter added in following version v3.12 - if LooseVersion(match.group(1)) >= LooseVersion('3.12'): - self.MKFS_FORCE_FLAGS = ['-f'] - else: - # assume version is greater or equal to 3.12 - self.MKFS_FORCE_FLAGS = ['-f'] - self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr)) - - -class Ocfs2(Filesystem): - MKFS = 'mkfs.ocfs2' - MKFS_FORCE_FLAGS = ['-Fx'] - - -class F2fs(Filesystem): - MKFS = 'mkfs.f2fs' - INFO = 'dump.f2fs' - GROW = 'resize.f2fs' - - def __init__(self, module): - super(F2fs, self).__init__(module) - mkfs = self.module.get_bin_path(self.MKFS, required=True) - dummy, out, dummy = self.module.run_command([mkfs, os.devnull], check_rc=False, environ_update=self.LANG_ENV) - # Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)" - # mkfs.f2fs displays version since v1.2.0 - match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out) - if match is not None: - # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem - # before that version -f switch wasn't used - if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'): - self.MKFS_FORCE_FLAGS = ['-f'] - - def get_fs_size(self, dev): - """Get sector size and total FS sectors and return their product.""" - cmd = self.module.get_bin_path(self.INFO, required=True) - dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) - sector_size = sector_count = None - for line in out.splitlines(): - if 'Info: sector size = ' in line: - # expected: 'Info: sector size = 512' - sector_size = int(line.split()[4]) - elif 'Info: total FS sectors = ' in line: - # expected: 'Info: total FS sectors = 102400 (50 MB)' - sector_count = int(line.split()[5]) - if None not in (sector_size, sector_count): - break - else: - raise ValueError(out) - - return sector_size * sector_count - - -class VFAT(Filesystem): - INFO = 'fatresize' - GROW = 'fatresize' - GROW_MAX_SPACE_FLAGS = ['-s', 'max'] - - def __init__(self, module): - super(VFAT, self).__init__(module) - if platform.system() == 'FreeBSD': - self.MKFS = 'newfs_msdos' - else: - self.MKFS = 'mkfs.vfat' - - def get_fs_size(self, dev): - """Get and return size of filesystem, in bytes.""" - cmd = self.module.get_bin_path(self.INFO, required=True) - dummy, out, dummy = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV) - fssize = None - for line in out.splitlines()[1:]: - param, value = line.split(':', 1) - if param.strip() == 'Size': - fssize = int(value.strip()) - break - else: - raise ValueError(out) - - return fssize - - -class LVM(Filesystem): - MKFS = 'pvcreate' - MKFS_FORCE_FLAGS = ['-f'] - INFO = 'pvs' - GROW = 'pvresize' - - def get_fs_size(self, dev): - """Get and return PV size, in bytes.""" - cmd = self.module.get_bin_path(self.INFO, required=True) - dummy, size, dummy = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True) - pv_size = int(size) - return pv_size - - -class Swap(Filesystem): - MKFS = 'mkswap' - MKFS_FORCE_FLAGS = ['-f'] - - -class UFS(Filesystem): - MKFS = 'newfs' - INFO = 'dumpfs' - GROW = 'growfs' - GROW_MAX_SPACE_FLAGS = ['-y'] - - def get_fs_size(self, dev): - """Get providersize and fragment size and return their product.""" - cmd = self.module.get_bin_path(self.INFO, required=True) - dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) - - fragmentsize = providersize = None - for line in out.splitlines(): - if line.startswith('fsize'): - fragmentsize = int(line.split()[1]) - elif 'providersize' in line: - providersize = int(line.split()[-1]) - if None not in (fragmentsize, providersize): - break - else: - raise ValueError(out) - - return fragmentsize * providersize - - -FILESYSTEMS = { - 'ext2': Ext2, - 'ext3': Ext3, - 'ext4': Ext4, - 'ext4dev': Ext4, - 'f2fs': F2fs, - 'reiserfs': Reiserfs, - 'xfs': XFS, - 'btrfs': Btrfs, - 'vfat': VFAT, - 'ocfs2': Ocfs2, - 'LVM2_member': LVM, - 'swap': Swap, - 'ufs': UFS, -} - - -def main(): - friendly_names = { - 'lvm': 'LVM2_member', - } - - fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys()) - - # There is no "single command" to manipulate filesystems, so we map them all out and their options - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - fstype=dict(type='str', aliases=['type'], choices=list(fstypes)), - dev=dict(type='path', required=True, aliases=['device']), - opts=dict(type='str'), - force=dict(type='bool', default=False), - resizefs=dict(type='bool', default=False), - ), - required_if=[ - ('state', 'present', ['fstype']) - ], - supports_check_mode=True, - ) - - state = module.params['state'] - dev = module.params['dev'] - fstype = module.params['fstype'] - opts = module.params['opts'] - force = module.params['force'] - resizefs = module.params['resizefs'] - - mkfs_opts = [] - if opts is not None: - mkfs_opts = opts.split() - - changed = False - - if not os.path.exists(dev): - msg = "Device %s not found." % dev - if state == "present": - module.fail_json(msg=msg) - else: - module.exit_json(msg=msg) - - dev = Device(module, dev) - - # In case blkid/fstyp isn't able to identify an existing filesystem, device - # is considered as empty, then this existing filesystem would be overwritten - # even if force isn't enabled. - cmd = module.get_bin_path('blkid', required=True) - rc, raw_fs, err = module.run_command([cmd, '-c', os.devnull, '-o', 'value', '-s', 'TYPE', str(dev)]) - fs = raw_fs.strip() - if not fs and platform.system() == 'FreeBSD': - cmd = module.get_bin_path('fstyp', required=True) - rc, raw_fs, err = module.run_command([cmd, str(dev)]) - fs = raw_fs.strip() - - if state == "present": - if fstype in friendly_names: - fstype = friendly_names[fstype] - - try: - klass = FILESYSTEMS[fstype] - except KeyError: - module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype) - - filesystem = klass(module) - - same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype] - if same_fs and not resizefs and not force: - module.exit_json(changed=False) - elif same_fs and resizefs: - if not filesystem.GROW: - module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype) - - out = filesystem.grow(dev) - - module.exit_json(changed=True, msg=out) - elif fs and not force: - module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err) - - # create fs - filesystem.create(mkfs_opts, dev) - changed = True - - elif fs: - # wipe fs signatures - filesystem = Filesystem(module) - filesystem.wipefs(dev) - changed = True - - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/gconftool2.py b/plugins/modules/system/gconftool2.py deleted file mode 100644 index 6b9ce71213..0000000000 --- a/plugins/modules/system/gconftool2.py +++ /dev/null @@ -1,233 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Kenneth D. Evensen -# Copyright: (c) 2017, Abhijeet Kasurde -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: gconftool2 -author: - - Kenneth D. Evensen (@kevensen) -short_description: Edit GNOME Configurations -description: - - This module allows for the manipulation of GNOME 2 Configuration via - gconftool-2. Please see the gconftool-2(1) man pages for more details. -options: - key: - type: str - description: - - A GConf preference key is an element in the GConf repository - that corresponds to an application preference. See man gconftool-2(1) - required: yes - value: - type: str - description: - - Preference keys typically have simple values such as strings, - integers, or lists of strings and integers. This is ignored if the state - is "get". See man gconftool-2(1) - value_type: - type: str - description: - - The type of value being set. This is ignored if the state is "get". - choices: [ bool, float, int, string ] - state: - type: str - description: - - The action to take upon the key/value. - required: yes - choices: [ absent, get, present ] - config_source: - type: str - description: - - Specify a configuration source to use rather than the default path. - See man gconftool-2(1) - direct: - description: - - Access the config database directly, bypassing server. If direct is - specified then the config_source must be specified as well. - See man gconftool-2(1) - type: bool - default: 'no' -''' - -EXAMPLES = """ -- name: Change the widget font to "Serif 12" - community.general.gconftool2: - key: "/desktop/gnome/interface/font_name" - value_type: "string" - value: "Serif 12" -""" - -RETURN = ''' - key: - description: The key specified in the module parameters - returned: success - type: str - sample: /desktop/gnome/interface/font_name - value_type: - description: The type of the value that was changed - returned: success - type: str - sample: string - value: - description: The value of the preference key after executing the module - returned: success - type: str - sample: "Serif 12" -... -''' - -from ansible.module_utils.basic import AnsibleModule - - -class GConf2Preference(object): - def __init__(self, ansible, key, value_type, value, - direct=False, config_source=""): - self.ansible = ansible - self.key = key - self.value_type = value_type - self.value = value - self.config_source = config_source - self.direct = direct - - def value_already_set(self): - return False - - def call(self, call_type, fail_onerr=True): - """ Helper function to perform gconftool-2 operations """ - config_source = '' - direct = '' - changed = False - out = '' - - # If the configuration source is different from the default, create - # the argument - if self.config_source is not None and len(self.config_source) > 0: - config_source = "--config-source " + self.config_source - - # If direct is true, create the argument - if self.direct: - direct = "--direct" - - # Execute the call - cmd = "gconftool-2 " - try: - # If the call is "get", then we don't need as many parameters and - # we can ignore some - if call_type == 'get': - cmd += "--get {0}".format(self.key) - # Otherwise, we will use all relevant parameters - elif call_type == 'set': - cmd += "{0} {1} --type {2} --{3} {4} \"{5}\"".format(direct, - config_source, - self.value_type, - call_type, - self.key, - self.value) - elif call_type == 'unset': - cmd += "--unset {0}".format(self.key) - - # Start external command - rc, out, err = self.ansible.run_command(cmd, use_unsafe_shell=True) - - if len(err) > 0: - if fail_onerr: - self.ansible.fail_json(msg='gconftool-2 failed with ' - 'error: %s' % (str(err))) - else: - changed = True - - except OSError as exception: - self.ansible.fail_json(msg='gconftool-2 failed with exception: ' - '%s' % exception) - return changed, out.rstrip() - - -def main(): - # Setup the Ansible module - module = AnsibleModule( - argument_spec=dict( - key=dict(type='str', required=True, no_log=False), - value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']), - value=dict(type='str'), - state=dict(type='str', required=True, choices=['absent', 'get', 'present']), - direct=dict(type='bool', default=False), - config_source=dict(type='str'), - ), - supports_check_mode=True - ) - - state_values = {"present": "set", "absent": "unset", "get": "get"} - - # Assign module values to dictionary values - key = module.params['key'] - value_type = module.params['value_type'] - if module.params['value'].lower() == "true": - value = "true" - elif module.params['value'] == "false": - value = "false" - else: - value = module.params['value'] - - state = state_values[module.params['state']] - direct = module.params['direct'] - config_source = module.params['config_source'] - - # Initialize some variables for later - change = False - new_value = '' - - if state != "get": - if value is None or value == "": - module.fail_json(msg='State %s requires "value" to be set' - % str(state)) - elif value_type is None or value_type == "": - module.fail_json(msg='State %s requires "value_type" to be set' - % str(state)) - - if direct and config_source is None: - module.fail_json(msg='If "direct" is "yes" then the ' + - '"config_source" must be specified') - elif not direct and config_source is not None: - module.fail_json(msg='If the "config_source" is specified ' + - 'then "direct" must be "yes"') - - # Create a gconf2 preference - gconf_pref = GConf2Preference(module, key, value_type, - value, direct, config_source) - # Now we get the current value, if not found don't fail - dummy, current_value = gconf_pref.call("get", fail_onerr=False) - - # Check if the current value equals the value we want to set. If not, make - # a change - if current_value != value: - # If check mode, we know a change would have occurred. - if module.check_mode: - # So we will set the change to True - change = True - # And set the new_value to the value that would have been set - new_value = value - # If not check mode make the change. - else: - change, new_value = gconf_pref.call(state) - # If the value we want to set is the same as the current_value, we will - # set the new_value to the current_value for reporting - else: - new_value = current_value - - facts = dict(gconftool2={'changed': change, - 'key': key, - 'value_type': value_type, - 'new_value': new_value, - 'previous_value': current_value, - 'playbook_value': module.params['value']}) - - module.exit_json(changed=change, ansible_facts=facts) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/interfaces_file.py b/plugins/modules/system/interfaces_file.py deleted file mode 100644 index 7666ba1cbc..0000000000 --- a/plugins/modules/system/interfaces_file.py +++ /dev/null @@ -1,372 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2016, Roman Belyakovsky -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: interfaces_file -short_description: Tweak settings in /etc/network/interfaces files -extends_documentation_fragment: files -description: - - Manage (add, remove, change) individual interface options in an interfaces-style file without having - to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file. - - Read information about interfaces from interfaces-styled files -options: - dest: - type: path - description: - - Path to the interfaces file - default: /etc/network/interfaces - iface: - type: str - description: - - Name of the interface, required for value changes or option remove - address_family: - type: str - description: - - Address family of the interface, useful if same interface name is used for both inet and inet6 - option: - type: str - description: - - Name of the option, required for value changes or option remove - value: - type: str - description: - - If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added. - If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated. - C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing - ones or cleaning the whole option set are supported - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - type: bool - default: 'no' - state: - type: str - description: - - If set to C(absent) the option or section will be removed if present instead of created. - default: "present" - choices: [ "present", "absent" ] - -notes: - - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state -requirements: [] -author: "Roman Belyakovsky (@hryamzik)" -''' - -RETURN = ''' -dest: - description: destination file/path - returned: success - type: str - sample: "/etc/network/interfaces" -ifaces: - description: interfaces dictionary - returned: success - type: complex - contains: - ifaces: - description: interface dictionary - returned: success - type: dict - contains: - eth0: - description: Name of the interface - returned: success - type: dict - contains: - address_family: - description: interface address family - returned: success - type: str - sample: "inet" - method: - description: interface method - returned: success - type: str - sample: "manual" - mtu: - description: other options, all values returned as strings - returned: success - type: str - sample: "1500" - pre-up: - description: list of C(pre-up) scripts - returned: success - type: list - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - up: - description: list of C(up) scripts - returned: success - type: list - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - post-up: - description: list of C(post-up) scripts - returned: success - type: list - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - down: - description: list of C(down) scripts - returned: success - type: list - sample: - - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" -... -''' - -EXAMPLES = ''' -- name: Set eth1 mtu configuration value to 8000 - community.general.interfaces_file: - dest: /etc/network/interfaces.d/eth1.cfg - iface: eth1 - option: mtu - value: 8000 - backup: yes - state: present - register: eth1_cfg -''' - -import os -import re -import tempfile - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes - - -def line_dict(line): - return {'line': line, 'line_type': 'unknown'} - - -def make_option_dict(line, iface, option, value, address_family): - return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family} - - -def get_option_value(line): - patt = re.compile(r'^\s+(?P